ID
stringlengths 36
36
| Language
stringclasses 1
value | Repository Name
stringclasses 13
values | File Name
stringlengths 2
44
| File Path in Repository
stringlengths 11
111
| File Path for Unit Test
stringlengths 16
116
| Code
stringlengths 0
278k
| Unit Test - (Ground Truth)
stringlengths 127
663k
| Code Url
stringlengths 91
198
| Test Code Url
stringlengths 96
203
| Commit Hash
stringclasses 13
values |
---|---|---|---|---|---|---|---|---|---|---|
94c058e2-6312-47f4-a0e8-a7bfebcf8345 | cpp | google/cel-cpp | namespace_generator | checker/internal/namespace_generator.cc | checker/internal/namespace_generator_test.cc | #include "checker/internal/namespace_generator.h"
#include <algorithm>
#include <string>
#include <utility>
#include <vector>
#include "absl/functional/function_ref.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/str_split.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "internal/lexis.h"
namespace cel::checker_internal {
namespace {
bool FieldSelectInterpretationCandidates(
absl::string_view prefix,
absl::Span<const std::string> partly_qualified_name,
absl::FunctionRef<bool(absl::string_view, int)> callback) {
for (int i = 0; i < partly_qualified_name.size(); ++i) {
std::string buf;
int count = partly_qualified_name.size() - i;
auto end_idx = count - 1;
auto ident = absl::StrJoin(partly_qualified_name.subspan(0, count), ".");
absl::string_view candidate = ident;
if (absl::StartsWith(candidate, ".")) {
candidate = candidate.substr(1);
}
if (!prefix.empty()) {
buf = absl::StrCat(prefix, ".", candidate);
candidate = buf;
}
if (!callback(candidate, end_idx)) {
return false;
}
}
return true;
}
}
absl::StatusOr<NamespaceGenerator> NamespaceGenerator::Create(
absl::string_view container) {
std::vector<std::string> candidates;
if (container.empty()) {
return NamespaceGenerator(std::move(candidates));
}
if (absl::StartsWith(container, ".")) {
return absl::InvalidArgumentError("container must not start with a '.'");
}
std::string prefix;
for (auto segment : absl::StrSplit(container, '.')) {
if (!internal::LexisIsIdentifier(segment)) {
return absl::InvalidArgumentError(
"container must only contain valid identifier segments");
}
if (prefix.empty()) {
prefix = segment;
} else {
absl::StrAppend(&prefix, ".", segment);
}
candidates.push_back(prefix);
}
std::reverse(candidates.begin(), candidates.end());
return NamespaceGenerator(std::move(candidates));
}
void NamespaceGenerator::GenerateCandidates(
absl::string_view unqualified_name,
absl::FunctionRef<bool(absl::string_view)> callback) {
if (absl::StartsWith(unqualified_name, ".")) {
callback(unqualified_name.substr(1));
return;
}
for (const auto& prefix : candidates_) {
std::string candidate = absl::StrCat(prefix, ".", unqualified_name);
if (!callback(candidate)) {
return;
}
}
callback(unqualified_name);
}
void NamespaceGenerator::GenerateCandidates(
absl::Span<const std::string> partly_qualified_name,
absl::FunctionRef<bool(absl::string_view, int)> callback) {
if (!partly_qualified_name.empty() &&
absl::StartsWith(partly_qualified_name[0], ".")) {
FieldSelectInterpretationCandidates("", partly_qualified_name, callback);
return;
}
for (const auto& prefix : candidates_) {
if (!FieldSelectInterpretationCandidates(prefix, partly_qualified_name,
callback)) {
return;
}
}
FieldSelectInterpretationCandidates("", partly_qualified_name, callback);
}
} | #include "checker/internal/namespace_generator.h"
#include <string>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "internal/testing.h"
namespace cel::checker_internal {
namespace {
using ::absl_testing::StatusIs;
using ::testing::ElementsAre;
using ::testing::Pair;
TEST(NamespaceGeneratorTest, EmptyContainer) {
ASSERT_OK_AND_ASSIGN(auto generator, NamespaceGenerator::Create(""));
std::vector<std::string> candidates;
generator.GenerateCandidates("foo", [&](absl::string_view candidate) {
candidates.push_back(std::string(candidate));
return true;
});
EXPECT_THAT(candidates, ElementsAre("foo"));
}
TEST(NamespaceGeneratorTest, MultipleSegments) {
ASSERT_OK_AND_ASSIGN(auto generator,
NamespaceGenerator::Create("com.example"));
std::vector<std::string> candidates;
generator.GenerateCandidates("foo", [&](absl::string_view candidate) {
candidates.push_back(std::string(candidate));
return true;
});
EXPECT_THAT(candidates, ElementsAre("com.example.foo", "com.foo", "foo"));
}
TEST(NamespaceGeneratorTest, MultipleSegmentsRootNamespace) {
ASSERT_OK_AND_ASSIGN(auto generator,
NamespaceGenerator::Create("com.example"));
std::vector<std::string> candidates;
generator.GenerateCandidates(".foo", [&](absl::string_view candidate) {
candidates.push_back(std::string(candidate));
return true;
});
EXPECT_THAT(candidates, ElementsAre("foo"));
}
TEST(NamespaceGeneratorTest, InvalidContainers) {
EXPECT_THAT(NamespaceGenerator::Create(".com.example"),
StatusIs(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(NamespaceGenerator::Create("com..example"),
StatusIs(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(NamespaceGenerator::Create("com.$example"),
StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST(NamespaceGeneratorTest, MultipleSegmentsSelectInterpretation) {
ASSERT_OK_AND_ASSIGN(auto generator,
NamespaceGenerator::Create("com.example"));
std::vector<std::string> qualified_ident = {"foo", "Bar"};
std::vector<std::pair<std::string, int>> candidates;
generator.GenerateCandidates(
qualified_ident, [&](absl::string_view candidate, int segment_index) {
candidates.push_back(std::pair(std::string(candidate), segment_index));
return true;
});
EXPECT_THAT(
candidates,
ElementsAre(Pair("com.example.foo.Bar", 1), Pair("com.example.foo", 0),
Pair("com.foo.Bar", 1), Pair("com.foo", 0),
Pair("foo.Bar", 1), Pair("foo", 0)));
}
TEST(NamespaceGeneratorTest,
MultipleSegmentsSelectInterpretationRootNamespace) {
ASSERT_OK_AND_ASSIGN(auto generator,
NamespaceGenerator::Create("com.example"));
std::vector<std::string> qualified_ident = {".foo", "Bar"};
std::vector<std::pair<std::string, int>> candidates;
generator.GenerateCandidates(
qualified_ident, [&](absl::string_view candidate, int segment_index) {
candidates.push_back(std::pair(std::string(candidate), segment_index));
return true;
});
EXPECT_THAT(candidates, ElementsAre(Pair("foo.Bar", 1), Pair("foo", 0)));
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/checker/internal/namespace_generator.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/checker/internal/namespace_generator_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
a98d044e-9cf9-4514-bd1e-db7503f03570 | cpp | tensorflow/tensorflow | range_sampler | tensorflow/core/kernels/range_sampler.cc | tensorflow/core/kernels/range_sampler_test.cc | #include "tensorflow/core/kernels/range_sampler.h"
#include <cmath>
#include <unordered_set>
#include <vector>
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/lib/io/inputbuffer.h"
#include "tensorflow/core/lib/strings/numbers.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
using gtl::ArraySlice;
using gtl::MutableArraySlice;
RangeSampler::~RangeSampler() {}
void RangeSampler::SampleBatch(random::SimplePhilox* rnd, bool unique,
absl::Span<int64_t> batch) const {
SampleBatchGetExpectedCount(rnd, unique, batch, absl::Span<float>(),
absl::Span<const int64_t>(), absl::Span<float>());
}
void RangeSampler::SampleBatchGetExpectedCount(
random::SimplePhilox* rnd, bool unique, absl::Span<int64_t> batch,
absl::Span<float> batch_expected_count, absl::Span<const int64_t> extras,
absl::Span<float> extras_expected_count) const {
SampleBatchGetExpectedCountAvoid(rnd, unique, batch, batch_expected_count,
extras, extras_expected_count,
absl::Span<const int64_t>());
}
namespace {
static float ExpectedCountHelper(float p, int batch_size, int num_tries) {
if (num_tries == batch_size) {
return p * batch_size;
}
return -std::expm1(num_tries * std::log1p(-p));
}
}
void RangeSampler::SampleBatchGetExpectedCountAvoid(
random::SimplePhilox* rnd, bool unique, absl::Span<int64_t> batch,
absl::Span<float> batch_expected_count, absl::Span<const int64_t> extras,
absl::Span<float> extras_expected_count,
absl::Span<const int64_t> avoided_values) const {
const int batch_size = batch.size();
int num_tries;
if (unique) {
CHECK_LE(static_cast<int64_t>(batch_size + avoided_values.size()), range_);
std::unordered_set<int64_t> used(batch_size);
used.insert(avoided_values.begin(), avoided_values.end());
int num_picked = 0;
num_tries = 0;
while (num_picked < batch_size) {
num_tries++;
CHECK_LT(num_tries, kint32max);
int64_t value = Sample(rnd);
if (gtl::InsertIfNotPresent(&used, value)) {
batch[num_picked++] = value;
}
}
} else {
CHECK_EQ(avoided_values.size(), size_t{0})
<< "avoided_values only supported with unique=true";
for (int i = 0; i < batch_size; i++) {
batch[i] = Sample(rnd);
}
num_tries = batch_size;
}
if (!batch_expected_count.empty()) {
CHECK_EQ(batch_size, batch_expected_count.size());
for (int i = 0; i < batch_size; i++) {
batch_expected_count[i] =
ExpectedCountHelper(Probability(batch[i]), batch_size, num_tries);
}
}
CHECK_EQ(extras.size(), extras_expected_count.size());
for (size_t i = 0; i < extras.size(); i++) {
extras_expected_count[i] =
ExpectedCountHelper(Probability(extras[i]), batch_size, num_tries);
}
}
AllSampler::AllSampler(int64_t range) : RangeSampler(range) {}
void AllSampler::SampleBatchGetExpectedCountAvoid(
random::SimplePhilox* rnd, bool unique, absl::Span<int64_t> batch,
absl::Span<float> batch_expected_count, absl::Span<const int64_t> extras,
absl::Span<float> extras_expected_count,
absl::Span<const int64_t> avoided_values) const {
const int batch_size = batch.size();
CHECK_EQ(range_, batch_size);
for (int i = 0; i < batch_size; i++) {
batch[i] = i;
}
if (!batch_expected_count.empty()) {
CHECK_EQ(batch_size, batch_expected_count.size());
for (int i = 0; i < batch_size; i++) {
batch_expected_count[i] = 1;
}
}
CHECK_EQ(size_t{0}, avoided_values.size());
CHECK_EQ(extras.size(), extras_expected_count.size());
for (size_t i = 0; i < extras.size(); i++) {
extras_expected_count[i] = 1;
}
}
UniformSampler::UniformSampler(int64_t range)
: RangeSampler(range), inv_range_(1.0 / range) {}
int64_t UniformSampler::Sample(random::SimplePhilox* rnd) const {
return rnd->Uniform64(range_);
}
float UniformSampler::Probability(int64_t value) const { return inv_range_; }
LogUniformSampler::LogUniformSampler(int64_t range)
: RangeSampler(range), log_range_(log1p(range)) {}
int64_t LogUniformSampler::Sample(random::SimplePhilox* rnd) const {
const int64_t value =
static_cast<int64_t>(exp(rnd->RandDouble() * log_range_)) - 1;
DCHECK_GE(value, 0);
return value % range_;
}
float LogUniformSampler::Probability(int64_t value) const {
return (log((value + 2.0) / (value + 1.0))) / log_range_;
}
ThreadUnsafeUnigramSampler::ThreadUnsafeUnigramSampler(int64_t range)
: RangeSampler(range), picker_(range) {
CHECK_LT(range, kint32max);
}
int64_t ThreadUnsafeUnigramSampler::Sample(random::SimplePhilox* rnd) const {
return picker_.Pick(rnd);
}
float ThreadUnsafeUnigramSampler::Probability(int64_t value) const {
return static_cast<float>(picker_.get_weight(value)) / picker_.total_weight();
}
void ThreadUnsafeUnigramSampler::Update(absl::Span<const int64_t> values) {
int num_updates = std::min(static_cast<int>(values.size()),
kint32max - picker_.total_weight());
for (int i = 0; i < num_updates; i++) {
const int64_t value = values[i];
picker_.set_weight(value, picker_.get_weight(value) + 1);
}
}
UnigramSampler::UnigramSampler(int64_t range)
: RangeSampler(range), unsafe_sampler_(range) {
CHECK_LT(range, kint32max);
}
int64_t UnigramSampler::Sample(random::SimplePhilox* rnd) const {
tf_shared_lock lock(mu_);
return unsafe_sampler_.Sample(rnd);
}
float UnigramSampler::Probability(int64_t value) const {
tf_shared_lock lock(mu_);
return unsafe_sampler_.Probability(value);
}
void UnigramSampler::SampleBatchGetExpectedCountAvoid(
random::SimplePhilox* rnd, bool unique, absl::Span<int64_t> batch,
absl::Span<float> batch_expected_count, absl::Span<const int64_t> extras,
absl::Span<float> extras_expected_count,
absl::Span<const int64_t> avoided_values) const {
tf_shared_lock lock(mu_);
unsafe_sampler_.SampleBatchGetExpectedCountAvoid(
rnd, unique, batch, batch_expected_count, extras, extras_expected_count,
avoided_values);
}
void UnigramSampler::Update(absl::Span<const int64_t> values) {
mutex_lock lock(mu_);
unsafe_sampler_.Update(values);
}
FixedUnigramSampler::FixedUnigramSampler(int64_t range, float distortion,
int32_t num_reserved_ids,
int32_t num_shards, int32_t shard)
: RangeSampler(range),
total_weight_(0.0),
num_shards_(num_shards),
shard_(shard),
distortion_(distortion) {
FillReservedIds(num_reserved_ids);
}
Status FixedUnigramSampler::SetDistributionSampler(Env* env,
const string& vocab_file) {
TF_RETURN_IF_ERROR(LoadFromFile(env, vocab_file, distortion_));
if (!TF_PREDICT_TRUE(FixedUnigramSampler::range() == weights_.size()))
return (errors::InvalidArgument("range is ", FixedUnigramSampler::range(),
" must be equal to weights size ",
weights_.size()));
dist_sampler_.reset(new random::DistributionSampler(weights_));
return absl::OkStatus();
}
Status FixedUnigramSampler::SetDistributionSampler(
const std::vector<float>& unigrams) {
LoadFromUnigrams(unigrams, distortion_);
if (!TF_PREDICT_TRUE(FixedUnigramSampler::range() == weights_.size()))
return (errors::InvalidArgument("range is ", FixedUnigramSampler::range(),
" must be equal to weights size ",
weights_.size()));
dist_sampler_.reset(new random::DistributionSampler(weights_));
return absl::OkStatus();
}
float FixedUnigramSampler::Probability(int64_t value) const {
if (value < 0 || static_cast<size_t>(value) >= weights_.size()) {
return 0.0;
}
return weights_.at(value) / total_weight_;
}
int64_t FixedUnigramSampler::Sample(random::SimplePhilox* rnd) const {
return dist_sampler_->Sample(rnd);
}
void FixedUnigramSampler::FillReservedIds(int32_t num_reserved_ids) {
for (int32_t word_id = 0; word_id < num_reserved_ids; ++word_id) {
if (word_id % num_shards_ == shard_) weights_.push_back(0.0);
}
}
Status FixedUnigramSampler::LoadFromFile(Env* env, const string& vocab_file,
float distortion) {
std::unique_ptr<RandomAccessFile> file;
TF_RETURN_IF_ERROR(env->NewRandomAccessFile(vocab_file, &file));
io::InputBuffer in(file.get(), 262144 );
string line;
int32_t word_id = weights_.size();
while (in.ReadLine(&line).ok()) {
std::vector<string> cols = str_util::Split(line, ',');
if (cols.empty()) continue;
if (word_id % num_shards_ == shard_) {
float w = 0.0;
if (!strings::safe_strtof(cols.at(cols.size() - 1), &w)) {
return errors::InvalidArgument("Wrong vocabulary format at line: ",
line);
}
w = std::pow(w, distortion);
total_weight_ += w;
weights_.push_back(w);
}
++word_id;
}
return absl::OkStatus();
}
void FixedUnigramSampler::LoadFromUnigrams(const std::vector<float>& unigrams,
float distortion) {
int32_t word_id = weights_.size();
for (float w : unigrams) {
if (word_id % num_shards_ == shard_) {
w = std::pow(w, distortion);
total_weight_ += w;
weights_.push_back(w);
}
++word_id;
}
}
} | #include "tensorflow/core/kernels/range_sampler.h"
#include <vector>
#include "absl/status/status.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/lib/random/simple_philox.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
using gtl::ArraySlice;
using gtl::MutableArraySlice;
class RangeSamplerTest : public ::testing::Test {
protected:
void CheckProbabilitiesSumToOne() {
double sum = 0;
for (int i = 0; i < sampler_->range(); i++) {
sum += sampler_->Probability(i);
}
EXPECT_NEAR(sum, 1.0, 1e-4);
}
void CheckHistogram(int num_samples, float tolerance) {
const int range = sampler_->range();
std::vector<int> h(range);
std::vector<int64_t> a(num_samples);
random::PhiloxRandom philox(123, 17);
random::SimplePhilox rnd(&philox);
sampler_->SampleBatch(&rnd, false, absl::MakeSpan(a));
for (int i = 0; i < num_samples; i++) {
int64_t val = a[i];
ASSERT_GE(val, 0);
ASSERT_LT(val, range);
h[val]++;
}
for (int val = 0; val < range; val++) {
EXPECT_NEAR((h[val] + 0.0) / num_samples, sampler_->Probability(val),
tolerance);
}
}
void Update1() {
std::vector<int64_t> a(10);
for (int i = 0; i < 10; i++) {
a[i] = 3;
}
sampler_->Update(a);
}
void Update2() {
int64_t a[10];
for (int i = 0; i < 10; i++) {
a[i] = i;
}
for (int64_t i = 1; i < 10; i++) {
sampler_->Update(absl::Span<const int64_t>(a + i, 10 - i));
}
}
std::unique_ptr<RangeSampler> sampler_;
};
TEST_F(RangeSamplerTest, UniformProbabilities) {
sampler_.reset(new UniformSampler(10));
for (int i = 0; i < 10; i++) {
CHECK_EQ(sampler_->Probability(i), sampler_->Probability(0));
}
}
TEST_F(RangeSamplerTest, UniformChecksum) {
sampler_.reset(new UniformSampler(10));
CheckProbabilitiesSumToOne();
}
TEST_F(RangeSamplerTest, UniformHistogram) {
sampler_.reset(new UniformSampler(10));
CheckHistogram(1000, 0.05);
}
TEST_F(RangeSamplerTest, LogUniformProbabilities) {
int range = 1000000;
sampler_.reset(new LogUniformSampler(range));
for (int i = 100; i < range; i *= 2) {
float ratio = sampler_->Probability(i) / sampler_->Probability(i / 2);
EXPECT_NEAR(ratio, 0.5, 0.1);
}
}
TEST_F(RangeSamplerTest, LogUniformChecksum) {
sampler_.reset(new LogUniformSampler(10));
CheckProbabilitiesSumToOne();
}
TEST_F(RangeSamplerTest, LogUniformHistogram) {
sampler_.reset(new LogUniformSampler(10));
CheckHistogram(1000, 0.05);
}
TEST_F(RangeSamplerTest, UnigramProbabilities1) {
sampler_.reset(new UnigramSampler(10));
Update1();
EXPECT_NEAR(sampler_->Probability(3), 0.55, 1e-4);
for (int i = 0; i < 10; i++) {
if (i != 3) {
ASSERT_NEAR(sampler_->Probability(i), 0.05, 1e-4);
}
}
}
TEST_F(RangeSamplerTest, UnigramProbabilities2) {
sampler_.reset(new UnigramSampler(10));
Update2();
for (int i = 0; i < 10; i++) {
ASSERT_NEAR(sampler_->Probability(i), (i + 1) / 55.0, 1e-4);
}
}
TEST_F(RangeSamplerTest, UnigramChecksum) {
sampler_.reset(new UnigramSampler(10));
Update1();
CheckProbabilitiesSumToOne();
}
TEST_F(RangeSamplerTest, UnigramHistogram) {
sampler_.reset(new UnigramSampler(10));
Update1();
CheckHistogram(1000, 0.05);
}
static const char kVocabContent[] =
"w1,1\n"
"w2,2\n"
"w3,4\n"
"w4,8\n"
"w5,16\n"
"w6,32\n"
"w7,64\n"
"w8,128\n"
"w9,256";
TEST_F(RangeSamplerTest, FixedUnigramProbabilities) {
Env* env = Env::Default();
string fname = io::JoinPath(testing::TmpDir(), "vocab_file");
TF_CHECK_OK(WriteStringToFile(env, fname, kVocabContent));
FixedUnigramSampler* test_sampler = new FixedUnigramSampler(9, 0.8, 0, 1, 0);
TF_CHECK_OK(test_sampler->SetDistributionSampler(env, fname));
sampler_.reset(test_sampler);
for (int i = 0; i < 9; i++) {
ASSERT_NEAR(sampler_->Probability(i), pow(2, i * 0.8) / 197.05, 1e-4);
}
}
TEST_F(RangeSamplerTest, FixedUnigramNoExistingFilename) {
Env* env = Env::Default();
string fname = "NoExistingFile";
FixedUnigramSampler* test_sampler = new FixedUnigramSampler(9, 0.8, 0, 1, 0);
Status s = test_sampler->SetDistributionSampler(env, fname);
sampler_.reset(test_sampler);
EXPECT_TRUE(absl::IsNotFound(s)) << s;
}
TEST_F(RangeSamplerTest, FixedUnigramNoMatchingRangeWeights) {
Env* env = Env::Default();
string fname = io::JoinPath(testing::TmpDir(), "vocab_file");
TF_CHECK_OK(WriteStringToFile(env, fname, kVocabContent));
FixedUnigramSampler* test_sampler = new FixedUnigramSampler(8, 0.8, 0, 1, 0);
Status s = test_sampler->SetDistributionSampler(env, fname);
sampler_.reset(test_sampler);
EXPECT_TRUE(absl::IsInvalidArgument(s)) << s;
}
TEST_F(RangeSamplerTest, FixedUnigramChecksum) {
Env* env = Env::Default();
string fname = io::JoinPath(testing::TmpDir(), "vocab_file");
TF_CHECK_OK(WriteStringToFile(env, fname, kVocabContent));
FixedUnigramSampler* test_sampler = new FixedUnigramSampler(9, 0.8, 0, 1, 0);
TF_CHECK_OK(test_sampler->SetDistributionSampler(env, fname));
sampler_.reset(test_sampler);
CheckProbabilitiesSumToOne();
}
TEST_F(RangeSamplerTest, FixedUnigramHistogram) {
Env* env = Env::Default();
string fname = io::JoinPath(testing::TmpDir(), "vocab_file");
TF_CHECK_OK(WriteStringToFile(env, fname, kVocabContent));
FixedUnigramSampler* test_sampler = new FixedUnigramSampler(9, 0.8, 0, 1, 0);
TF_CHECK_OK(test_sampler->SetDistributionSampler(env, fname));
sampler_.reset(test_sampler);
CheckHistogram(1000, 0.05);
}
TEST_F(RangeSamplerTest, FixedUnigramProbabilitiesReserve1) {
Env* env = Env::Default();
string fname = io::JoinPath(testing::TmpDir(), "vocab_file");
TF_CHECK_OK(WriteStringToFile(env, fname, kVocabContent));
FixedUnigramSampler* test_sampler = new FixedUnigramSampler(10, 0.8, 1, 1, 0);
TF_CHECK_OK(test_sampler->SetDistributionSampler(env, fname));
sampler_.reset(test_sampler);
ASSERT_NEAR(sampler_->Probability(0), 0, 1e-4);
for (int i = 1; i < 10; i++) {
ASSERT_NEAR(sampler_->Probability(i), pow(2, (i - 1) * 0.8) / 197.05, 1e-4);
}
}
TEST_F(RangeSamplerTest, FixedUnigramProbabilitiesReserve2) {
Env* env = Env::Default();
string fname = io::JoinPath(testing::TmpDir(), "vocab_file");
TF_CHECK_OK(WriteStringToFile(env, fname, kVocabContent));
FixedUnigramSampler* test_sampler = new FixedUnigramSampler(11, 0.8, 2, 1, 0);
TF_CHECK_OK(test_sampler->SetDistributionSampler(env, fname));
sampler_.reset(test_sampler);
ASSERT_NEAR(sampler_->Probability(0), 0, 1e-4);
ASSERT_NEAR(sampler_->Probability(1), 0, 1e-4);
for (int i = 2; i < 11; i++) {
ASSERT_NEAR(sampler_->Probability(i), pow(2, (i - 2) * 0.8) / 197.05, 1e-4);
}
}
TEST_F(RangeSamplerTest, FixedUnigramProbabilitiesFromVector) {
std::vector<float> weights = {1, 2, 4, 8, 16, 32, 64, 128, 256};
FixedUnigramSampler* test_sampler = new FixedUnigramSampler(9, 0.8, 0, 1, 0);
TF_CHECK_OK(test_sampler->SetDistributionSampler(weights));
sampler_.reset(test_sampler);
for (int i = 0; i < 9; i++) {
ASSERT_NEAR(sampler_->Probability(i), pow(2, i * 0.8) / 197.05, 1e-4);
}
}
TEST_F(RangeSamplerTest, FixedUnigramChecksumFromVector) {
std::vector<float> weights = {1, 2, 4, 8, 16, 32, 64, 128, 256};
FixedUnigramSampler* test_sampler = new FixedUnigramSampler(9, 0.8, 0, 1, 0);
TF_CHECK_OK(test_sampler->SetDistributionSampler(weights));
sampler_.reset(test_sampler);
CheckProbabilitiesSumToOne();
}
TEST_F(RangeSamplerTest, FixedUnigramHistogramFromVector) {
std::vector<float> weights = {1, 2, 4, 8, 16, 32, 64, 128, 256};
FixedUnigramSampler* test_sampler = new FixedUnigramSampler(9, 0.8, 0, 1, 0);
TF_CHECK_OK(test_sampler->SetDistributionSampler(weights));
sampler_.reset(test_sampler);
CheckHistogram(1000, 0.05);
}
TEST_F(RangeSamplerTest, FixedUnigramProbabilitiesReserve1FromVector) {
std::vector<float> weights = {1, 2, 4, 8, 16, 32, 64, 128, 256};
FixedUnigramSampler* test_sampler = new FixedUnigramSampler(10, 0.8, 1, 1, 0);
TF_CHECK_OK(test_sampler->SetDistributionSampler(weights));
sampler_.reset(test_sampler);
ASSERT_NEAR(sampler_->Probability(0), 0, 1e-4);
for (int i = 1; i < 10; i++) {
ASSERT_NEAR(sampler_->Probability(i), pow(2, (i - 1) * 0.8) / 197.05, 1e-4);
}
}
TEST_F(RangeSamplerTest, FixedUnigramProbabilitiesReserve2FromVector) {
std::vector<float> weights = {1, 2, 4, 8, 16, 32, 64, 128, 256};
FixedUnigramSampler* test_sampler = new FixedUnigramSampler(11, 0.8, 2, 1, 0);
TF_CHECK_OK(test_sampler->SetDistributionSampler(weights));
sampler_.reset(test_sampler);
ASSERT_NEAR(sampler_->Probability(0), 0, 1e-4);
ASSERT_NEAR(sampler_->Probability(1), 0, 1e-4);
for (int i = 2; i < 11; i++) {
ASSERT_NEAR(sampler_->Probability(i), pow(2, (i - 2) * 0.8) / 197.05, 1e-4);
}
}
TEST_F(RangeSamplerTest, All) {
int batch_size = 10;
sampler_.reset(new AllSampler(10));
std::vector<int64_t> batch(batch_size);
std::vector<float> batch_expected(batch_size);
std::vector<int64_t> extras(2);
std::vector<float> extras_expected(2);
extras[0] = 0;
extras[1] = batch_size - 1;
sampler_->SampleBatchGetExpectedCount(nullptr,
false, absl::MakeSpan(batch),
absl::MakeSpan(batch_expected), extras,
absl::MakeSpan(extras_expected));
for (int i = 0; i < batch_size; i++) {
EXPECT_EQ(i, batch[i]);
EXPECT_EQ(1, batch_expected[i]);
}
EXPECT_EQ(1, extras_expected[0]);
EXPECT_EQ(1, extras_expected[1]);
}
TEST_F(RangeSamplerTest, Unique) {
random::PhiloxRandom philox(123, 17);
random::SimplePhilox rnd(&philox);
const int range = 100;
const int batch_size = 50;
const int num_batches = 100;
sampler_.reset(new LogUniformSampler(range));
std::vector<int> histogram(range);
std::vector<int64_t> batch(batch_size);
std::vector<int64_t> all_values(range);
for (int i = 0; i < range; i++) {
all_values[i] = i;
}
std::vector<float> expected(range);
sampler_->SampleBatchGetExpectedCount(&rnd, true, absl::MakeSpan(batch),
absl::Span<float>(), all_values,
absl::MakeSpan(expected));
std::set<int64_t> s(batch.begin(), batch.end());
CHECK_EQ(batch_size, s.size());
for (int trial = 0; trial < num_batches; trial++) {
std::vector<float> trial_expected(range);
sampler_->SampleBatchGetExpectedCount(&rnd, true, absl::MakeSpan(batch),
absl::Span<float>(), all_values,
absl::MakeSpan(trial_expected));
for (int i = 0; i < range; i++) {
EXPECT_NEAR(expected[i], trial_expected[i], expected[i] * 0.5);
}
for (int i = 0; i < batch_size; i++) {
histogram[batch[i]]++;
}
}
for (int i = 0; i < range; i++) {
const float average_count = static_cast<float>(histogram[i]) / num_batches;
EXPECT_NEAR(expected[i], average_count, 0.2);
}
}
TEST_F(RangeSamplerTest, Avoid) {
random::PhiloxRandom philox(123, 17);
random::SimplePhilox rnd(&philox);
sampler_.reset(new LogUniformSampler(100));
std::vector<int64_t> avoided(2);
avoided[0] = 17;
avoided[1] = 23;
std::vector<int64_t> batch(98);
sampler_->SampleBatchGetExpectedCountAvoid(
&rnd, true, absl::MakeSpan(batch), absl::Span<float>(),
absl::Span<const int64_t>(), absl::Span<float>(), avoided);
int sum = 0;
for (auto val : batch) {
sum += val;
}
const int expected_sum = 100 * 99 / 2 - avoided[0] - avoided[1];
EXPECT_EQ(expected_sum, sum);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/range_sampler.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/range_sampler_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c2d5cdab-c5af-4a62-b278-95f45b56ecde | cpp | tensorflow/tensorflow | sig_node | tensorflow/core/grappler/graph_analyzer/sig_node.cc | tensorflow/core/grappler/graph_analyzer/sig_node_test.cc | #include "tensorflow/core/grappler/graph_analyzer/sig_node.h"
#include <algorithm>
#include "absl/strings/str_format.h"
namespace tensorflow {
namespace grappler {
namespace graph_analyzer {
static constexpr bool debug = false;
SigNode::SigNode(const NodeDef* node) : node_(node) {}
void SigNode::CopyLinks(const GenNode& from, const TranslationMap& map) {
hash_to_link_.clear();
hashed_peers_.clear();
std::map<LinkTag, Link> link_map;
CopyLinksPass1(from, map, &link_map);
CopyLinksPass2(&link_map);
}
void SigNode::CopyLinksPass1(const GenNode& from, const TranslationMap& map,
std::map<LinkTag, Link>* link_map) {
LinkTag::Hasher link_hasher;
for (const auto& entry : from.links()) {
for (const auto& target : entry.second) {
auto nodeit = map.find(target.node);
if (nodeit == map.end()) {
continue;
}
LinkTag tag(entry.first, target.port);
size_t hval = link_hasher(tag);
Link& map_entry = (*link_map)[tag];
if (map_entry.peers.empty()) {
map_entry.tag = tag;
map_entry.unique_hash = hval;
}
map_entry.peers.push_back(nodeit->second);
}
}
}
void SigNode::CopyLinksPass2(std::map<LinkTag, Link>* link_map) {
for (auto& entry : *link_map) {
Link* hl_entry_ptr = &hash_to_link_[entry.second.unique_hash];
while (!hl_entry_ptr->peers.empty()) {
CombineHash(1, &entry.second.unique_hash);
hl_entry_ptr = &hash_to_link_[entry.second.unique_hash];
}
for (const auto& peer : entry.second.peers) {
hashed_peers_.emplace_back(HashedPeer(entry.second.unique_hash, peer));
}
hl_entry_ptr->tag = entry.second.tag;
hl_entry_ptr->unique_hash = entry.second.unique_hash;
hl_entry_ptr->peers.swap(entry.second.peers);
}
}
void SigNode::ComputeTopoHash0() {
topo_hash_.clear();
last_hashed_nodes_ = next_hashed_nodes_ = node_mask_;
size_t hval = std::hash<string>()(opcode());
for (const auto& entry : hashed_peers_) {
CombineHash(entry.link_hash, &hval);
}
topo_hash_.push_back(hval);
}
void SigNode::ComputeTopoHash(int distance) {
next_hashed_nodes_ = last_hashed_nodes_;
if (debug) {
LOG(INFO) << "DEBUG node " << name() << " mask=" << std::hex
<< next_hashed_nodes_;
}
if (hash_is_final_) {
return;
}
const int64_t topo_hash_size = topo_hash_.size();
CHECK(topo_hash_size == distance);
int prev = distance - 1;
size_t hval = topo_hash_[0];
if (!hashed_peers_.empty()) {
size_t last_link_hash = hashed_peers_[0].link_hash;
size_t comm_hash = 0;
for (const auto& entry : hashed_peers_) {
if (entry.link_hash != last_link_hash) {
CombineHash(last_link_hash, &hval);
CombineHash(comm_hash, &hval);
comm_hash = 0;
last_link_hash = entry.link_hash;
}
CombineHashCommutative(entry.peer->GetTopoHash(prev), &comm_hash);
next_hashed_nodes_ |= entry.peer->last_hashed_nodes_;
if (debug) {
LOG(INFO) << "DEBUG node " << name() << " += " << entry.peer->name()
<< " mask=" << std::hex << next_hashed_nodes_;
}
}
CombineHash(last_link_hash, &hval);
CombineHash(comm_hash, &hval);
}
topo_hash_.push_back(hval);
}
size_t SigNode::GetTopoHash(int distance) const {
CHECK(!topo_hash_.empty());
const int64_t topo_hash_size = topo_hash_.size();
if (distance >= topo_hash_size) {
CHECK(hash_is_final_);
return topo_hash_.back();
} else {
return topo_hash_[distance];
}
}
bool SigNode::operator==(const SigNode& other) const {
if (opcode() != other.opcode()) {
return false;
}
if (unique_rank_ != other.unique_rank_) {
return false;
}
if (hashed_peers_.size() != other.hashed_peers_.size()) {
return false;
}
for (auto it1 = hashed_peers_.begin(), it2 = other.hashed_peers_.begin();
it1 != hashed_peers_.end(); ++it1, ++it2) {
if (it1->link_hash != it2->link_hash) {
return false;
}
if (it1->peer->unique_rank_ != it2->peer->unique_rank_) {
return false;
}
}
return true;
}
constexpr int Signature::kMaxGraphSize;
string Signature::ToString() const {
string result;
for (size_t n = 0; n < nodes.size(); ++n) {
result += absl::StrFormat("%d:%s", n, nodes[n]->opcode());
for (const auto& entry : nodes[n]->hashed_peers_) {
const auto& link = nodes[n]->hash_to_link_[entry.link_hash];
if (link.tag.local.IsInbound()) {
result +=
absl::StrFormat("[%s:%s:%d]", string(link.tag.local),
string(link.tag.remote), entry.peer->unique_rank_);
}
}
result.push_back(',');
}
return result;
}
Status Signature::Compute() {
if (map.size() > kMaxGraphSize) {
return Status(
absl::StatusCode::kInvalidArgument,
absl::StrFormat(
"A graph of %d nodes is too big for signature computation, "
"the maximal supported node count is %d.",
map.size(), kMaxGraphSize));
}
size_t next_node_id = 0;
sig_short = 0;
sig_full.resize(0);
PrepareNodes();
FindUniqueHashes(&next_node_id);
while (next_node_id < map.size()) {
ComputeOneRound(next_node_id);
FindUniqueHashes(&next_node_id);
}
OrderLinks();
return absl::OkStatus();
}
void Signature::PrepareNodes() {
nodes.resize(0);
int64_t mask = 1;
for (const auto& entry : map) {
SigNode* node = entry.second.get();
node->last_hashed_nodes_ = node->node_mask_ = mask;
mask <<= 1;
node->unique_rank_ = ~0;
node->hash_is_final_ = false;
node->ComputeTopoHash0();
if (node->GetHighTopoHash() <= map.size()) {
node->ReHighTopoHash();
}
nodes.emplace_back(node);
}
}
void Signature::FindUniqueHashes(size_t* next_node_id_p) {
std::stable_sort(nodes.begin() + *next_node_id_p, nodes.end(),
SigNode::NodeOrderLess());
bool found_unique = false;
for (size_t n = *next_node_id_p; n < nodes.size(); ++n) {
size_t cur_hash = nodes[n]->GetHighTopoHash();
if (n + 1 < nodes.size() && nodes[n + 1]->GetHighTopoHash() == cur_hash) {
for (++n;
n + 1 < nodes.size() && nodes[n + 1]->GetHighTopoHash() == cur_hash;
++n) {
}
if (found_unique || n != nodes.size() - 1) {
continue;
}
}
found_unique = true;
size_t id = (*next_node_id_p)++;
nodes[n]->unique_rank_ = id;
size_t last_hash = nodes[n]->GetHighTopoHash();
CombineHash(last_hash, &sig_short);
sig_full.push_back(last_hash);
nodes[n]->topo_hash_.resize(1);
nodes[n]->topo_hash_[0] = id + 1;
nodes[n]->hash_is_final_ = true;
nodes[n]->last_hashed_nodes_ = nodes[n]->node_mask_;
if (n != id) {
std::swap(nodes[id], nodes[n]);
}
}
}
void Signature::ComputeOneRound(size_t next_node_id) {
int debug_i = 0;
for (auto it = nodes.begin() + next_node_id; it != nodes.end(); ++it) {
auto node = *it;
node->topo_hash_.resize(1);
node->last_hashed_nodes_ = node->node_mask_;
node->hash_is_final_ = false;
if (debug) {
LOG(INFO) << "DEBUG distance=" << 0 << " node " << debug_i++ << " "
<< node->name() << " mask=" << std::hex
<< node->last_hashed_nodes_;
}
}
bool stop = false;
for (int distance = 1; !stop; ++distance) {
for (auto it = nodes.begin() + next_node_id; it != nodes.end(); ++it) {
auto node = *it;
if (node->hash_is_final_) {
continue;
}
node->ComputeTopoHash(distance);
if (node->GetHighTopoHash() <= nodes.size()) {
node->ReHighTopoHash();
}
}
stop = true;
debug_i = 0;
for (auto it = nodes.begin() + next_node_id; it != nodes.end(); ++it) {
auto node = *it;
if (debug) {
LOG(INFO) << "DEBUG distance=" << distance << " node " << debug_i++
<< " " << node->name() << " oldmask=" << std::hex
<< node->last_hashed_nodes_ << " mask=" << std::hex
<< node->next_hashed_nodes_;
}
if (node->last_hashed_nodes_ == node->next_hashed_nodes_) {
node->hash_is_final_ = true;
} else {
node->last_hashed_nodes_ = node->next_hashed_nodes_;
stop = false;
}
}
}
}
void Signature::OrderLinks() {
for (const auto& node : nodes) {
if (node->hashed_peers_.empty()) {
continue;
}
size_t cur_link_hash = node->hashed_peers_[0].link_hash + 1;
int first_idx = -1;
int idx;
for (idx = 0; idx < static_cast<int64_t>(node->hashed_peers_.size());
++idx) {
auto& entry = node->hashed_peers_[idx];
if (entry.link_hash == cur_link_hash) {
continue;
}
if (idx - first_idx > 1) {
std::sort(node->hashed_peers_.begin() + first_idx,
node->hashed_peers_.begin() + idx,
SigNode::HashedPeer::LessByRank());
}
cur_link_hash = entry.link_hash;
first_idx = idx;
}
if (idx - first_idx > 1) {
std::sort(node->hashed_peers_.begin() + first_idx,
node->hashed_peers_.begin() + idx,
SigNode::HashedPeer::LessByRank());
}
}
}
bool Signature::operator==(const Signature& other) const {
if (sig_short != other.sig_short) {
return false;
}
if (sig_full.size() != other.sig_full.size()) {
return false;
}
for (auto it1 = sig_full.begin(), it2 = other.sig_full.begin();
it1 != sig_full.end(); ++it1, ++it2) {
if (*it1 != *it2) {
return false;
}
}
if (nodes.size() != other.nodes.size()) {
return false;
}
for (auto it1 = nodes.begin(), it2 = other.nodes.begin(); it1 != nodes.end();
++it1, ++it2) {
if (**it1 != **it2) {
return false;
}
}
return true;
}
}
}
} | #include "tensorflow/core/grappler/graph_analyzer/sig_node.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/memory/memory.h"
#include "absl/strings/str_format.h"
#include "tensorflow/core/grappler/graph_analyzer/subgraph.h"
#include "tensorflow/core/grappler/graph_analyzer/test_tools.h"
#include "tensorflow/core/grappler/utils.h"
namespace tensorflow {
namespace grappler {
namespace graph_analyzer {
namespace test {
using ::testing::ElementsAre;
using ::testing::Eq;
using ::testing::Gt;
using ::testing::Ne;
using ::testing::SizeIs;
TEST(SigNodeLinkTag, Compare) {
SigNode::LinkTag a(GenNode::Port(false, 1), GenNode::Port(false, 2));
SigNode::LinkTag b(GenNode::Port(false, 1), GenNode::Port(false, 2));
SigNode::LinkTag c(GenNode::Port(false, 2), GenNode::Port(false, 1));
SigNode::LinkTag d(GenNode::Port(false, 1), GenNode::Port(false, 3));
SigNode::LinkTag e(GenNode::Port(false, 2), GenNode::Port(false, 2));
EXPECT_TRUE(a == b);
EXPECT_FALSE(a == c);
EXPECT_FALSE(a == e);
EXPECT_FALSE(a < b);
EXPECT_FALSE(b < a);
EXPECT_TRUE(a < c);
EXPECT_FALSE(c < a);
EXPECT_TRUE(a < d);
EXPECT_FALSE(d < a);
}
class SigBaseTest : public ::testing::Test, protected TestGraphs {
protected:
void BuildSigMap(const GraphDef& graph) {
gen_map_.clear();
sig_.map.clear();
CHECK(GenNode::BuildGraphInMap(graph, &gen_map_).ok());
Subgraph::Identity id;
for (const auto& entry : gen_map_) {
id.insert(entry.second.get());
}
Subgraph sg(id);
sg.ExtractForSignature(&sig_.map);
}
static void CopyLinksPass2(
std::map<SigNode::LinkTag, SigNode::Link>* link_map, SigNode* node) {
node->CopyLinksPass2(link_map);
}
static void ComputeTopoHash0(SigNode* node) { node->ComputeTopoHash0(); }
static void ComputeTopoHash(int distance, SigNode* node) {
node->ComputeTopoHash(distance);
}
static size_t GetTopoHash(int distance, SigNode* node) {
return node->GetTopoHash(distance);
}
static size_t GetHighTopoHash(SigNode* node) {
return node->GetHighTopoHash();
}
static void ReHighTopoHash(SigNode* node) { node->ReHighTopoHash(); }
static SigNode::HashedPeerVector& RefHashedPeers(SigNode* node) {
return node->hashed_peers_;
}
static size_t& RefUniqueRank(SigNode* node) { return node->unique_rank_; }
static bool& RefHashIsFinal(SigNode* node) { return node->hash_is_final_; }
static std::vector<size_t>& RefTopoHash(SigNode* node) {
return node->topo_hash_;
}
static uint64_t& RefNodeMask(SigNode* node) { return node->node_mask_; }
static uint64_t& RefLastHashedNodes(SigNode* node) {
return node->last_hashed_nodes_;
}
static uint64_t& RefNextHashedNodes(SigNode* node) {
return node->next_hashed_nodes_;
}
static void PrepareNodes(Signature* signature) { signature->PrepareNodes(); }
static void FindUniqueHashes(size_t* next_node_id_p, Signature* signature) {
signature->FindUniqueHashes(next_node_id_p);
}
static void ComputeOneRound(size_t next_node_id, Signature* signature) {
signature->ComputeOneRound(next_node_id);
}
static void OrderLinks(Signature* signature) { signature->OrderLinks(); }
GenNodeMap gen_map_;
Signature sig_;
};
class SigNodeTest : public SigBaseTest {};
TEST_F(SigNodeTest, DuplicateHash) {
NodeDef node1 = MakeNodeConst("node1");
NodeDef node2 = MakeNodeConst("node2");
NodeDef node3 = MakeNodeShapeN("node3", "node1", "node2");
SigNode sn1(&node1);
SigNode sn2(&node2);
SigNode sn3(&node3);
constexpr size_t kSameHash = 999;
SigNode::Link link1;
link1.tag = SigNode::LinkTag(GenNode::Port(true, 0), GenNode::Port(false, 0));
link1.unique_hash = kSameHash;
link1.peers.emplace_back(&sn1);
SigNode::Link link2;
link2.tag = SigNode::LinkTag(GenNode::Port(true, 1), GenNode::Port(false, 0));
link2.unique_hash = kSameHash;
link2.peers.emplace_back(&sn2);
SigNode::Link link3;
link3.tag = SigNode::LinkTag(GenNode::Port(true, 2), GenNode::Port(false, 0));
link3.unique_hash = kSameHash;
link3.peers.emplace_back(&sn3);
std::map<SigNode::LinkTag, SigNode::Link> link_map;
link_map[link1.tag] = link1;
link_map[link2.tag] = link2;
link_map[link3.tag] = link3;
CopyLinksPass2(&link_map, &sn3);
auto& hl = sn3.hash_to_link();
EXPECT_THAT(hl, SizeIs(3));
std::map<SigNode::LinkTag, SigNode::Link> rehashed;
auto hlit = hl.begin();
ASSERT_THAT(hlit, Ne(hl.end()));
EXPECT_THAT(hlit->second.unique_hash, Eq(hlit->first));
rehashed[hlit->second.tag] = hlit->second;
++hlit;
ASSERT_THAT(hlit, Ne(hl.end()));
EXPECT_THAT(hlit->second.unique_hash, Eq(hlit->first));
rehashed[hlit->second.tag] = hlit->second;
++hlit;
ASSERT_THAT(hlit, Ne(hl.end()));
EXPECT_THAT(hlit->second.unique_hash, Eq(hlit->first));
rehashed[hlit->second.tag] = hlit->second;
ASSERT_THAT(rehashed, SizeIs(3));
auto rhit = rehashed.begin();
ASSERT_THAT(rhit, Ne(rehashed.end()));
EXPECT_TRUE(rhit->second.tag == link1.tag);
EXPECT_THAT(rhit->second.unique_hash, Eq(kSameHash));
EXPECT_THAT(rhit->second.peers, ElementsAre(&sn1));
++rhit;
ASSERT_THAT(rhit, Ne(rehashed.end()));
EXPECT_TRUE(rhit->second.tag == link2.tag);
EXPECT_THAT(rhit->second.unique_hash, Ne(kSameHash));
size_t hash2 = rhit->second.unique_hash;
EXPECT_THAT(rhit->second.peers, ElementsAre(&sn2));
++rhit;
ASSERT_THAT(rhit, Ne(rehashed.end()));
EXPECT_TRUE(rhit->second.tag == link3.tag);
EXPECT_THAT(rhit->second.unique_hash, Ne(kSameHash));
EXPECT_THAT(rhit->second.unique_hash, Ne(hash2));
size_t hash3 = rhit->second.unique_hash;
EXPECT_THAT(rhit->second.peers, ElementsAre(&sn3));
auto& peers = sn3.hashed_peers();
EXPECT_THAT(peers, SizeIs(3));
auto peerit = peers.begin();
ASSERT_THAT(peerit, Ne(peers.end()));
EXPECT_THAT(peerit->link_hash, Eq(kSameHash));
EXPECT_THAT(peerit->peer, Eq(&sn1));
++peerit;
ASSERT_THAT(peerit, Ne(peers.end()));
EXPECT_THAT(peerit->link_hash, Eq(hash2));
EXPECT_THAT(peerit->peer, Eq(&sn2));
++peerit;
ASSERT_THAT(peerit, Ne(peers.end()));
EXPECT_THAT(peerit->link_hash, Eq(hash3));
EXPECT_THAT(peerit->peer, Eq(&sn3));
}
TEST_F(SigNodeTest, GetTopoHash) {
NodeDef node1 = MakeNodeConst("node1");
SigNode sn1(&node1);
RefTopoHash(&sn1).emplace_back(123);
RefTopoHash(&sn1).emplace_back(456);
EXPECT_THAT(GetTopoHash(0, &sn1), Eq(123));
EXPECT_THAT(GetTopoHash(1, &sn1), Eq(456));
RefHashIsFinal(&sn1) = true;
EXPECT_THAT(GetTopoHash(0, &sn1), Eq(123));
EXPECT_THAT(GetTopoHash(1, &sn1), Eq(456));
EXPECT_THAT(GetTopoHash(2, &sn1), Eq(456));
EXPECT_THAT(GetHighTopoHash(&sn1), Eq(456));
}
TEST_F(SigNodeTest, ReTopoHash) {
NodeDef node1 = MakeNodeConst("node1");
SigNode sn1(&node1);
RefTopoHash(&sn1).emplace_back(123);
RefTopoHash(&sn1).emplace_back(456);
EXPECT_THAT(GetTopoHash(0, &sn1), Eq(123));
EXPECT_THAT(GetTopoHash(1, &sn1), Eq(456));
ReHighTopoHash(&sn1);
size_t expected_hash = 456;
CombineHash(1, &expected_hash);
EXPECT_THAT(GetTopoHash(0, &sn1), Eq(123));
EXPECT_THAT(GetTopoHash(1, &sn1), Eq(expected_hash));
}
TEST_F(SigNodeTest, ComputeTopoHash0) {
NodeDef node1 = MakeNodeConst("node1");
SigNode sn1(&node1);
RefUniqueRank(&sn1) = 10;
RefNodeMask(&sn1) = 0x02;
RefTopoHash(&sn1).emplace_back(123);
RefTopoHash(&sn1).emplace_back(456);
RefLastHashedNodes(&sn1) = 0xFF;
RefNextHashedNodes(&sn1) = 0xFF;
RefHashedPeers(&sn1).emplace_back(SigNode::HashedPeer(1, nullptr));
RefHashedPeers(&sn1).emplace_back(SigNode::HashedPeer(1, nullptr));
RefHashedPeers(&sn1).emplace_back(SigNode::HashedPeer(2, nullptr));
RefHashedPeers(&sn1).emplace_back(SigNode::HashedPeer(3, nullptr));
RefHashedPeers(&sn1).emplace_back(SigNode::HashedPeer(3, nullptr));
ComputeTopoHash0(&sn1);
EXPECT_THAT(RefLastHashedNodes(&sn1), Eq(0x02));
EXPECT_THAT(RefNextHashedNodes(&sn1), Eq(0x02));
EXPECT_THAT(RefTopoHash(&sn1), SizeIs(1));
size_t exp_hval = std::hash<string>()(sn1.opcode());
CombineHash(1, &exp_hval);
CombineHash(1, &exp_hval);
CombineHash(2, &exp_hval);
CombineHash(3, &exp_hval);
CombineHash(3, &exp_hval);
EXPECT_THAT(GetTopoHash(0, &sn1), Eq(exp_hval));
}
TEST_F(SigNodeTest, ComputeTopoHashNotFinal) {
NodeDef node1 = MakeNodeConst("node1");
SigNode sn1(&node1);
NodeDef node2 = MakeNodeConst("node2");
SigNode sn2(&node2);
NodeDef node3 = MakeNodeConst("node3");
SigNode sn3(&node3);
RefUniqueRank(&sn1) = 0;
RefNodeMask(&sn1) = 0x01;
RefUniqueRank(&sn2) = 0;
RefNodeMask(&sn2) = 0x02;
RefUniqueRank(&sn3) = 0;
RefNodeMask(&sn3) = 0x04;
RefHashedPeers(&sn1).emplace_back(SigNode::HashedPeer(10, &sn2));
RefHashedPeers(&sn1).emplace_back(SigNode::HashedPeer(10, &sn3));
RefHashedPeers(&sn1).emplace_back(SigNode::HashedPeer(20, &sn2));
RefHashedPeers(&sn1).emplace_back(SigNode::HashedPeer(30, &sn3));
RefHashedPeers(&sn1).emplace_back(SigNode::HashedPeer(30, &sn2));
RefTopoHash(&sn1).emplace_back(123);
RefTopoHash(&sn1).emplace_back(321);
RefTopoHash(&sn2).emplace_back(456);
RefTopoHash(&sn2).emplace_back(654);
RefTopoHash(&sn3).emplace_back(789);
RefTopoHash(&sn3).emplace_back(987);
RefLastHashedNodes(&sn1) = 0x8;
RefLastHashedNodes(&sn2) = 0x10;
RefLastHashedNodes(&sn3) = 0x20;
RefNextHashedNodes(&sn1) = 0x100;
ComputeTopoHash(2, &sn1);
EXPECT_THAT(RefLastHashedNodes(&sn1), Eq(0x8));
EXPECT_THAT(RefNextHashedNodes(&sn1), Eq(0x38));
size_t exp_hash = 123;
size_t comm_hash;
comm_hash = 0;
CombineHashCommutative(654, &comm_hash);
CombineHashCommutative(987, &comm_hash);
CombineHash(10, &exp_hash);
CombineHash(comm_hash, &exp_hash);
comm_hash = 0;
CombineHashCommutative(654, &comm_hash);
CombineHash(20, &exp_hash);
CombineHash(comm_hash, &exp_hash);
comm_hash = 0;
CombineHashCommutative(654, &comm_hash);
CombineHashCommutative(987, &comm_hash);
CombineHash(30, &exp_hash);
CombineHash(comm_hash, &exp_hash);
EXPECT_THAT(GetTopoHash(2, &sn1), Eq(exp_hash));
EXPECT_THAT(RefTopoHash(&sn1), SizeIs(3));
}
TEST_F(SigNodeTest, ComputeTopoHashFinal) {
NodeDef node1 = MakeNodeConst("node1");
SigNode sn1(&node1);
NodeDef node2 = MakeNodeConst("node2");
SigNode sn2(&node2);
NodeDef node3 = MakeNodeConst("node3");
SigNode sn3(&node3);
RefUniqueRank(&sn1) = 0;
RefNodeMask(&sn1) = 0x01;
RefUniqueRank(&sn2) = 0;
RefNodeMask(&sn2) = 0x02;
RefUniqueRank(&sn3) = 0;
RefNodeMask(&sn3) = 0x04;
RefHashedPeers(&sn1).emplace_back(SigNode::HashedPeer(10, &sn2));
RefHashedPeers(&sn1).emplace_back(SigNode::HashedPeer(10, &sn3));
RefHashedPeers(&sn1).emplace_back(SigNode::HashedPeer(20, &sn2));
RefHashedPeers(&sn1).emplace_back(SigNode::HashedPeer(30, &sn3));
RefHashedPeers(&sn1).emplace_back(SigNode::HashedPeer(30, &sn2));
RefTopoHash(&sn1).emplace_back(123);
RefTopoHash(&sn1).emplace_back(321);
RefTopoHash(&sn2).emplace_back(456);
RefTopoHash(&sn2).emplace_back(654);
RefTopoHash(&sn3).emplace_back(789);
RefTopoHash(&sn3).emplace_back(987);
RefLastHashedNodes(&sn1) = 0x8;
RefLastHashedNodes(&sn2) = 0x10;
RefLastHashedNodes(&sn3) = 0x20;
RefNextHashedNodes(&sn1) = 0x100;
RefHashIsFinal(&sn1) = true;
ComputeTopoHash(2, &sn1);
EXPECT_THAT(RefLastHashedNodes(&sn1), Eq(0x8));
EXPECT_THAT(RefNextHashedNodes(&sn1), Eq(0x8));
EXPECT_THAT(RefTopoHash(&sn1), SizeIs(2));
EXPECT_THAT(GetTopoHash(2, &sn1), Eq(321));
}
TEST_F(SigNodeTest, EqualsOpcode) {
NodeDef node1 = MakeNodeConst("node1");
SigNode sn1(&node1);
NodeDef node2 = MakeNodeConst("node2");
SigNode sn2(&node2);
EXPECT_TRUE(sn1 == sn2);
EXPECT_FALSE(sn1 != sn2);
node2.set_op("Mul");
EXPECT_TRUE(sn1 != sn2);
EXPECT_FALSE(sn1 == sn2);
}
TEST_F(SigNodeTest, EqualsRank) {
NodeDef node1 = MakeNodeConst("node1");
SigNode sn1(&node1);
NodeDef node2 = MakeNodeConst("node2");
SigNode sn2(&node2);
EXPECT_TRUE(sn1 == sn2);
EXPECT_FALSE(sn1 != sn2);
RefUniqueRank(&sn1) = 1;
RefUniqueRank(&sn2) = 2;
EXPECT_TRUE(sn1 != sn2);
EXPECT_FALSE(sn1 == sn2);
}
TEST_F(SigNodeTest, EqualsLinkSize) {
GraphDef graph1;
(*graph1.add_node()) = MakeNodeConst("node1");
(*graph1.add_node()) = MakeNodeMul("node2", "node1", "node1");
GenNodeMap gen_map1;
ASSERT_THAT(GenNode::BuildGraphInMap(graph1, &gen_map1),
Eq(absl::OkStatus()));
Subgraph::Identity id1;
id1.insert(gen_map1["node1"].get());
id1.insert(gen_map1["node2"].get());
Subgraph sg1(id1);
SigNodeMap sig_map1;
sg1.ExtractForSignature(&sig_map1);
GraphDef graph2;
(*graph2.add_node()) = MakeNodeConst("node1");
auto node22 = graph2.add_node();
*node22 = MakeNodeMul("node2", "node1", "node1");
node22->add_input("node2");
GenNodeMap gen_map2;
ASSERT_THAT(GenNode::BuildGraphInMap(graph2, &gen_map2),
Eq(absl::OkStatus()));
Subgraph::Identity id2;
id2.insert(gen_map2["node1"].get());
id2.insert(gen_map2["node2"].get());
Subgraph sg2(id2);
SigNodeMap sig_map2;
sg2.ExtractForSignature(&sig_map2);
EXPECT_TRUE(*sig_map1["node1"] == *sig_map2["node1"]);
EXPECT_FALSE(*sig_map1["node2"] == *sig_map2["node2"]);
EXPECT_FALSE(*sig_map2["node2"] == *sig_map1["node2"]);
}
TEST_F(SigNodeTest, EqualsLinks) {
GraphDef graph1;
(*graph1.add_node()) = MakeNodeConst("node1");
(*graph1.add_node()) = MakeNodeMul("node2", "node1", "node1");
GenNodeMap gen_map1;
ASSERT_THAT(GenNode::BuildGraphInMap(graph1, &gen_map1),
Eq(absl::OkStatus()));
Subgraph::Identity id1;
id1.insert(gen_map1["node1"].get());
id1.insert(gen_map1["node2"].get());
Subgraph sg1(id1);
SigNodeMap sig_map1;
sg1.ExtractForSignature(&sig_map1);
GenNodeMap gen_map2;
ASSERT_THAT(GenNode::BuildGraphInMap(graph1, &gen_map2),
Eq(absl::OkStatus()));
Subgraph::Identity id2;
id2.insert(gen_map2["node1"].get());
id2.insert(gen_map2["node2"].get());
Subgraph sg2(id2);
SigNodeMap sig_map2;
sg2.ExtractForSignature(&sig_map2);
EXPECT_TRUE(*sig_map1["node1"] == *sig_map2["node1"]);
EXPECT_TRUE(*sig_map1["node2"] == *sig_map2["node2"]);
SigNode* sn2 = sig_map2["node2"].get();
++RefHashedPeers(sn2)[0].link_hash;
EXPECT_FALSE(*sig_map1["node2"] == *sig_map2["node2"]);
--RefHashedPeers(sn2)[0].link_hash;
EXPECT_TRUE(*sig_map1["node2"] == *sig_map2["node2"]);
++RefUniqueRank(sig_map2["node1"].get());
EXPECT_FALSE(*sig_map1["node2"] == *sig_map2["node2"]);
}
class SignatureTest : public SigBaseTest {
protected:
static void InitPermutation(size_t size,
std::vector<size_t>* plain_permutation,
std::vector<size_t>* countdown) {
plain_permutation->clear();
countdown->clear();
for (size_t i = 0; i < size; ++i) {
plain_permutation->emplace_back(i);
countdown->emplace_back(size - 1 - i);
}
}
static void BuildPermutation(const std::vector<size_t>& plain_permutation,
const std::vector<size_t>& countdown,
std::vector<size_t>* result) {
*result = plain_permutation;
for (int i = 0; i < result->size(); ++i) {
std::swap((*result)[i], (*result)[i + countdown[i]]);
}
}
static bool CountDown(std::vector<size_t>* countdown) {
int pos;
for (pos = countdown->size() - 2; pos >= 0; --pos) {
if ((*countdown)[pos] > 0) {
--(*countdown)[pos];
break;
}
(*countdown)[pos] = (countdown->size() - 1 - pos);
}
return pos >= 0;
}
void TestGraphEveryWay(const GraphDef& graph) {
size_t graph_size = graph.node_size();
gen_map_.clear();
sig_.map.clear();
Status result = GenNode::BuildGraphInMap(graph, &gen_map_);
ASSERT_THAT(result, Eq(absl::OkStatus()));
Subgraph::Identity id;
for (const auto& entry : gen_map_) {
id.insert(entry.second.get());
}
Subgraph sg(id);
sg.ExtractForSignature(&sig_.map);
std::vector<size_t> plain_permutation;
std::vector<size_t> countdown;
InitPermutation(graph_size, &plain_permutation, &countdown);
std::set<string> signatures;
std::vector<size_t> permutation;
do {
BuildPermutation(plain_permutation, countdown, &permutation);
constexpr bool kDebugPermutation = false;
if (kDebugPermutation) {
string p;
for (int i = 0; i < permutation.size(); ++i) {
p.push_back('0' + permutation[i]);
}
LOG(INFO) << "Permutation: " << p;
}
std::vector<std::unique_ptr<SigNode>> hold(graph_size);
int idx;
sig_.nodes.clear();
idx = 0;
if (kDebugPermutation) {
LOG(INFO) << " nodes before permutation:";
}
for (auto& entry : sig_.map) {
if (kDebugPermutation) {
LOG(INFO) << " " << entry.second.get();
}
hold[idx++] = std::move(entry.second);
}
idx = 0;
if (kDebugPermutation) {
LOG(INFO) << " nodes after permutation:";
}
for (auto& entry : sig_.map) {
entry.second = std::move(hold[permutation[idx++]]);
if (kDebugPermutation) {
LOG(INFO) << " " << entry.second.get();
}
sig_.nodes.emplace_back(entry.second.get());
RefUniqueRank(entry.second.get()) = idx;
}
OrderLinks(&sig_);
ASSERT_THAT(sig_.Compute(), Eq(absl::OkStatus()));
signatures.insert(sig_.ToString());
EXPECT_THAT(sig_.sig_full, SizeIs(graph_size));
size_t hval = 0;
for (size_t ih : sig_.sig_full) {
EXPECT_THAT(ih, Gt(graph_size));
CombineHash(ih, &hval);
}
EXPECT_THAT(sig_.sig_short, Eq(hval));
idx = 0;
for (auto& entry : sig_.map) {
hold[permutation[idx++]] = std::move(entry.second);
}
idx = 0;
if (kDebugPermutation) {
LOG(INFO) << " nodes after un-permutation:";
}
for (auto& entry : sig_.map) {
entry.second = std::move(hold[idx++]);
if (kDebugPermutation) {
LOG(INFO) << " " << entry.second.get();
}
}
} while (CountDown(&countdown));
for (const auto& s : signatures) {
LOG(INFO) << "Signature: " << s;
}
EXPECT_THAT(signatures, SizeIs(1));
}
};
TEST_F(SignatureTest, PrepareNodes) {
NodeDef node1 = MakeNodeConst("node1");
sig_.map["node1"] = std::make_unique<SigNode>(&node1);
NodeDef node2 = MakeNodeConst("node2");
sig_.map["node2"] = std::make_unique<SigNode>(&node2);
NodeDef node3 = MakeNodeConst("node3");
sig_.map["node3"] = std::make_unique<SigNode>(&node3);
PrepareNodes(&sig_);
ASSERT_THAT(sig_.nodes, SizeIs(3));
int idx = 0;
for (const auto& entry : sig_.map) {
EXPECT_THAT(RefNodeMask(entry.second.get()), Eq(1 << idx))
<< " at index " << idx;
EXPECT_THAT(RefUniqueRank(entry.second.get()), Eq(static_cast<size_t>(~0)))
<< " at index " << idx;
EXPECT_THAT(RefHashIsFinal(entry.second.get()), false)
<< " at index " << idx;
EXPECT_THAT(RefTopoHash(entry.second.get()), SizeIs(1))
<< " at index " << idx;
++idx;
}
}
TEST_F(SignatureTest, FindUniqueHashesAllDifferent) {
NodeDef node1 = MakeNodeConst("node1");
SigNode sn1(&node1);
NodeDef node2 = MakeNodeConst("node2");
SigNode sn2(&node2);
NodeDef node3 = MakeNodeConst("node3");
SigNode sn3(&node3);
NodeDef node4 = MakeNodeConst("node4");
SigNode sn4(&node4);
RefTopoHash(&sn1).emplace_back(100);
RefTopoHash(&sn1).emplace_back(900);
RefTopoHash(&sn2).emplace_back(200);
RefTopoHash(&sn2).emplace_back(800);
RefTopoHash(&sn3).emplace_back(300);
RefTopoHash(&sn3).emplace_back(700);
RefTopoHash(&sn4).emplace_back(400);
RefTopoHash(&sn4).emplace_back(600);
sig_.nodes.emplace_back(&sn1);
sig_.nodes.emplace_back(&sn2);
sig_.nodes.emplace_back(&sn3);
sig_.nodes.emplace_back(&sn4);
size_t next = 1;
FindUniqueHashes(&next, &sig_);
EXPECT_THAT(next, Eq(4));
EXPECT_THAT(sig_.nodes[0], Eq(&sn1));
EXPECT_THAT(sig_.nodes[1], Eq(&sn4));
EXPECT_THAT(sig_.nodes[2], Eq(&sn3));
EXPECT_THAT(sig_.nodes[3], Eq(&sn2));
EXPECT_THAT(RefHashIsFinal(&sn1), Eq(false));
EXPECT_THAT(RefHashIsFinal(&sn2), Eq(true));
EXPECT_THAT(RefHashIsFinal(&sn3), Eq(true));
EXPECT_THAT(RefHashIsFinal(&sn4), Eq(true));
EXPECT_THAT(RefTopoHash(&sn1), SizeIs(2));
ASSERT_THAT(RefTopoHash(&sn2), SizeIs(1));
ASSERT_THAT(RefTopoHash(&sn3), SizeIs(1));
ASSERT_THAT(RefTopoHash(&sn4), SizeIs(1));
EXPECT_THAT(RefTopoHash(&sn2)[0], Eq(4));
EXPECT_THAT(RefTopoHash(&sn3)[0], Eq(3));
EXPECT_THAT(RefTopoHash(&sn4)[0], Eq(2));
EXPECT_THAT(sig_.sig_full, ElementsAre(600, 700, 800));
size_t exp_short_hash = 0;
CombineHash(600, &exp_short_hash);
CombineHash(700, &exp_short_hash);
CombineHash(800, &exp_short_hash);
EXPECT_THAT(sig_.sig_short, Eq(exp_short_hash));
}
TEST_F(SignatureTest, FindUniqueHashesDuplicatesExceptOne) {
NodeDef node1 = MakeNodeConst("node1");
SigNode sn1(&node1);
NodeDef node2 = MakeNodeConst("node2");
SigNode sn2(&node2);
NodeDef node3 = MakeNodeConst("node3");
SigNode sn3(&node3);
NodeDef node4 = MakeNodeConst("node4");
SigNode sn4(&node4);
NodeDef node5 = MakeNodeConst("node5");
SigNode sn5(&node5);
RefTopoHash(&sn1).emplace_back(100);
RefTopoHash(&sn1).emplace_back(600);
RefTopoHash(&sn2).emplace_back(200);
RefTopoHash(&sn2).emplace_back(600);
RefTopoHash(&sn3).emplace_back(300);
RefTopoHash(&sn3).emplace_back(700);
RefTopoHash(&sn4).emplace_back(400);
RefTopoHash(&sn4).emplace_back(800);
RefTopoHash(&sn5).emplace_back(500);
RefTopoHash(&sn5).emplace_back(800);
sig_.nodes.emplace_back(&sn1);
sig_.nodes.emplace_back(&sn2);
sig_.nodes.emplace_back(&sn3);
sig_.nodes.emplace_back(&sn4);
sig_.nodes.emplace_back(&sn5);
size_t next = 0;
FindUniqueHashes(&next, &sig_);
EXPECT_THAT(next, Eq(1));
EXPECT_THAT(sig_.nodes[0], Eq(&sn3));
EXPECT_THAT(sig_.nodes[1], Eq(&sn2));
EXPECT_THAT(sig_.nodes[2], Eq(&sn1));
EXPECT_THAT(sig_.nodes[3], Eq(&sn4));
EXPECT_THAT(sig_.nodes[4], Eq(&sn5));
EXPECT_THAT(RefHashIsFinal(&sn1), Eq(false));
EXPECT_THAT(RefHashIsFinal(&sn2), Eq(false));
EXPECT_THAT(RefHashIsFinal(&sn3), Eq(true));
EXPECT_THAT(RefHashIsFinal(&sn4), Eq(false));
EXPECT_THAT(RefHashIsFinal(&sn5), Eq(false));
EXPECT_THAT(RefTopoHash(&sn1), SizeIs(2));
EXPECT_THAT(RefTopoHash(&sn2), SizeIs(2));
EXPECT_THAT(RefTopoHash(&sn3), SizeIs(1));
EXPECT_THAT(RefTopoHash(&sn4), SizeIs(2));
EXPECT_THAT(RefTopoHash(&sn5), SizeIs(2));
EXPECT_THAT(RefTopoHash(&sn3)[0], Eq(1));
}
TEST_F(SignatureTest, FindUniqueHashesDuplicates) {
NodeDef node1 = MakeNodeConst("node1");
SigNode sn1(&node1);
NodeDef node2 = MakeNodeConst("node2");
SigNode sn2(&node2);
NodeDef node3 = MakeNodeConst("node3");
SigNode sn3(&node3);
NodeDef node4 = MakeNodeConst("node4");
SigNode sn4(&node4);
NodeDef node5 = MakeNodeConst("node5");
SigNode sn5(&node5);
RefTopoHash(&sn1).emplace_back(100);
RefTopoHash(&sn1).emplace_back(600);
RefTopoHash(&sn2).emplace_back(200);
RefTopoHash(&sn2).emplace_back(600);
RefTopoHash(&sn3).emplace_back(300);
RefTopoHash(&sn3).emplace_back(700);
RefTopoHash(&sn4).emplace_back(400);
RefTopoHash(&sn4).emplace_back(700);
RefTopoHash(&sn5).emplace_back(500);
RefTopoHash(&sn5).emplace_back(700);
sig_.nodes.emplace_back(&sn1);
sig_.nodes.emplace_back(&sn2);
sig_.nodes.emplace_back(&sn3);
sig_.nodes.emplace_back(&sn4);
sig_.nodes.emplace_back(&sn5);
size_t next = 0;
FindUniqueHashes(&next, &sig_);
EXPECT_THAT(next, Eq(1));
EXPECT_THAT(sig_.nodes[0], Eq(&sn5));
EXPECT_THAT(sig_.nodes[1], Eq(&sn2));
EXPECT_THAT(sig_.nodes[2], Eq(&sn3));
EXPECT_THAT(sig_.nodes[3], Eq(&sn4));
EXPECT_THAT(sig_.nodes[4], Eq(&sn1));
EXPECT_THAT(RefHashIsFinal(&sn1), Eq(false));
EXPECT_THAT(RefHashIsFinal(&sn2), Eq(false));
EXPECT_THAT(RefHashIsFinal(&sn3), Eq(false));
EXPECT_THAT(RefHashIsFinal(&sn4), Eq(false));
EXPECT_THAT(RefHashIsFinal(&sn5), Eq(true));
EXPECT_THAT(RefTopoHash(&sn1), SizeIs(2));
EXPECT_THAT(RefTopoHash(&sn2), SizeIs(2));
EXPECT_THAT(RefTopoHash(&sn3), SizeIs(2));
EXPECT_THAT(RefTopoHash(&sn4), SizeIs(2));
EXPECT_THAT(RefTopoHash(&sn5), SizeIs(1));
EXPECT_THAT(RefTopoHash(&sn5)[0], Eq(1));
}
TEST_F(SignatureTest, ComputeOneRoundCircular) {
BuildSigMap(graph_circular_onedir_);
PrepareNodes(&sig_);
ASSERT_THAT(sig_.nodes, SizeIs(5));
ComputeOneRound(0, &sig_);
size_t hval = GetHighTopoHash(sig_.nodes[0]);
for (int i = 0; i < 5; ++i) {
EXPECT_THAT(GetHighTopoHash(sig_.nodes[i]), Eq(hval)) << " at index " << i;
EXPECT_THAT(RefHashIsFinal(sig_.nodes[i]), Eq(true)) << " at index " << i;
EXPECT_THAT(RefLastHashedNodes(sig_.nodes[i]), Eq(0x1F))
<< " at index " << i;
EXPECT_THAT(RefNextHashedNodes(sig_.nodes[i]), Eq(0x1F))
<< " at index " << i;
EXPECT_THAT(RefTopoHash(sig_.nodes[i]), SizeIs(4)) << " at index " << i;
}
}
TEST_F(SignatureTest, ComputeOneRoundLinear) {
BuildSigMap(graph_linear_);
PrepareNodes(&sig_);
ASSERT_THAT(sig_.nodes, SizeIs(5));
ComputeOneRound(0, &sig_);
std::vector<size_t> hash_size;
for (int i = 0; i < 5; ++i) {
EXPECT_THAT(RefHashIsFinal(sig_.nodes[i]), Eq(true)) << " at index " << i;
EXPECT_THAT(RefLastHashedNodes(sig_.nodes[i]), Eq(0x1F))
<< " at index " << i;
EXPECT_THAT(RefNextHashedNodes(sig_.nodes[i]), Eq(0x1F))
<< " at index " << i;
hash_size.emplace_back(RefTopoHash(sig_.nodes[i]).size());
}
std::sort(hash_size.begin(), hash_size.end());
EXPECT_THAT(hash_size, ElementsAre(4, 5, 5, 6, 6));
}
TEST_F(SignatureTest, ComputeOneRoundSplitLinear) {
BuildSigMap(graph_linear_);
PrepareNodes(&sig_);
ASSERT_THAT(sig_.nodes, SizeIs(5));
std::swap(sig_.nodes[0], sig_.nodes[2]);
ASSERT_THAT(RefNodeMask(sig_.nodes[0]), Eq(0x04));
ASSERT_THAT(RefLastHashedNodes(sig_.nodes[0]), Eq(0x04));
ASSERT_THAT(RefNextHashedNodes(sig_.nodes[0]), Eq(0x04));
RefHashIsFinal(sig_.nodes[0]) = true;
ComputeOneRound(1, &sig_);
EXPECT_THAT(RefLastHashedNodes(sig_.nodes[0]), Eq(0x04));
EXPECT_THAT(RefNextHashedNodes(sig_.nodes[0]), Eq(0x04));
std::vector<size_t> hash_size;
for (int i = 1; i < 5; ++i) {
EXPECT_THAT(RefHashIsFinal(sig_.nodes[i]), Eq(true)) << " at index " << i;
hash_size.emplace_back(RefTopoHash(sig_.nodes[i]).size());
}
std::sort(hash_size.begin(), hash_size.end());
EXPECT_THAT(hash_size, ElementsAre(3, 3, 4, 4));
EXPECT_THAT(RefLastHashedNodes(sig_.nodes[1]), Eq(0x07));
EXPECT_THAT(RefNextHashedNodes(sig_.nodes[1]), Eq(0x07));
EXPECT_THAT(RefLastHashedNodes(sig_.nodes[2]), Eq(0x07));
EXPECT_THAT(RefNextHashedNodes(sig_.nodes[2]), Eq(0x07));
EXPECT_THAT(RefLastHashedNodes(sig_.nodes[3]), Eq(0x1C));
EXPECT_THAT(RefNextHashedNodes(sig_.nodes[3]), Eq(0x1C));
EXPECT_THAT(RefLastHashedNodes(sig_.nodes[4]), Eq(0x1C));
EXPECT_THAT(RefNextHashedNodes(sig_.nodes[4]), Eq(0x1C));
}
TEST_F(SignatureTest, OrderLinks) {
gen_map_.clear();
sig_.map.clear();
Status result = GenNode::BuildGraphInMap(graph_for_link_order_, &gen_map_);
ASSERT_THAT(result, Eq(absl::OkStatus()));
Subgraph::Identity id;
for (const auto& entry : gen_map_) {
id.insert(entry.second.get());
}
Subgraph sg(id);
sg.ExtractForSignature(&sig_.map);
for (auto it = sig_.map.rbegin(); it != sig_.map.rend(); ++it) {
auto& entry = *it;
RefUniqueRank(entry.second.get()) = sig_.nodes.size();
sig_.nodes.emplace_back(entry.second.get());
}
string before = sig_.ToString();
EXPECT_THAT(before, Eq(
"0:Mul[i0:o0:5][i0:o0:4][i0:o1:4][i0:o2:3][i0:o2:2][i0:o3:2],"
"1:Mul[i0:o0:5][i0:o0:4][i0:o0:3][i0:o0:2],"
"2:Const,"
"3:Const,"
"4:Const,"
"5:Const,"
));
OrderLinks(&sig_);
string after = sig_.ToString();
EXPECT_THAT(after, Eq(
"0:Mul[i0:o0:4][i0:o0:5][i0:o1:4][i0:o2:2][i0:o2:3][i0:o3:2],"
"1:Mul[i0:o0:2][i0:o0:3][i0:o0:4][i0:o0:5],"
"2:Const,"
"3:Const,"
"4:Const,"
"5:Const,"
));
}
TEST_F(SignatureTest, GraphTooBig) {
GraphDef graph;
for (int i = 0; i <= Signature::kMaxGraphSize; ++i) {
(*graph.add_node()) = MakeNodeConst(absl::StrFormat("node%d", i));
}
ASSERT_THAT(GenNode::BuildGraphInMap(graph, &gen_map_), Eq(absl::OkStatus()));
Subgraph::Identity id;
for (const auto& entry : gen_map_) {
id.insert(entry.second.get());
}
Subgraph sg(id);
sg.ExtractForSignature(&sig_.map);
ASSERT_THAT(sig_.Compute(),
Eq(Status(absl::StatusCode::kInvalidArgument,
"A graph of 65 nodes is too big for signature "
"computation, the maximal supported node count is "
"64.")));
}
TEST_F(SignatureTest, ToString) {
BuildSigMap(graph_circular_onedir_);
PrepareNodes(&sig_);
ASSERT_THAT(sig_.nodes, SizeIs(5));
for (int i = 0; i < 5; ++i) {
RefUniqueRank(sig_.nodes[i]) = i;
RefHashIsFinal(sig_.nodes[i]) = true;
}
string result = sig_.ToString();
ASSERT_THAT(result, Eq(
"0:Mul[i0:o0:4][i0:o0:4],"
"1:Mul[i0:o0:0][i0:o0:0],"
"2:Mul[i0:o0:1][i0:o0:1],"
"3:Mul[i0:o0:2][i0:o0:2],"
"4:Mul[i0:o0:3][i0:o0:3],"
));
}
TEST_F(SignatureTest, Permutation) {
std::vector<size_t> plain_permutation;
std::vector<size_t> countdown;
InitPermutation(5, &plain_permutation, &countdown);
std::set<string> results;
std::vector<size_t> permutation;
do {
BuildPermutation(plain_permutation, countdown, &permutation);
EXPECT_THAT(permutation, SizeIs(5));
string p;
for (int i = 0; i < permutation.size(); ++i) {
p.push_back('0' + permutation[i]);
}
LOG(INFO) << "Permutation: " << p;
results.insert(p);
} while (CountDown(&countdown));
EXPECT_THAT(results, SizeIs(5 * 4 * 3 * 2 * 1));
}
TEST_F(SignatureTest, ComputeCircularOneDir) {
TestGraphEveryWay(graph_circular_onedir_);
}
TEST_F(SignatureTest, ComputeCircularBiDir) {
TestGraphEveryWay(graph_circular_bidir_);
}
TEST_F(SignatureTest, ComputeLinear) { TestGraphEveryWay(graph_linear_); }
TEST_F(SignatureTest, ComputeMultiInput) {
TestGraphEveryWay(graph_multi_input_);
}
TEST_F(SignatureTest, ComputeAllOrNone) {
TestGraphEveryWay(graph_all_or_none_);
}
TEST_F(SignatureTest, ComputeCross) { TestGraphEveryWay(graph_small_cross_); }
TEST_F(SignatureTest, Equals) {
GenNodeMap gen_map1;
ASSERT_THAT(GenNode::BuildGraphInMap(graph_circular_bidir_, &gen_map1),
Eq(absl::OkStatus()));
Subgraph::Identity id1;
id1.insert(gen_map1["node1"].get());
id1.insert(gen_map1["node2"].get());
Subgraph sg1(id1);
Signature sig1;
sg1.ExtractForSignature(&sig1.map);
ASSERT_THAT(sig1.Compute(), Eq(absl::OkStatus()));
GenNodeMap gen_map2;
ASSERT_THAT(GenNode::BuildGraphInMap(graph_circular_bidir_, &gen_map2),
Eq(absl::OkStatus()));
Subgraph::Identity id2;
id2.insert(gen_map2["node1"].get());
id2.insert(gen_map2["node2"].get());
Subgraph sg2(id2);
Signature sig2;
sg2.ExtractForSignature(&sig2.map);
ASSERT_THAT(sig2.Compute(), Eq(absl::OkStatus()));
EXPECT_TRUE(sig1 == sig2);
++sig2.sig_short;
EXPECT_FALSE(sig1 == sig2);
--sig2.sig_short;
EXPECT_TRUE(sig1 == sig2);
++sig2.sig_full[0];
EXPECT_FALSE(sig1 == sig2);
--sig2.sig_full[0];
EXPECT_TRUE(sig1 == sig2);
std::swap(sig2.nodes[0], sig2.nodes[1]);
EXPECT_FALSE(sig1 == sig2);
std::swap(sig2.nodes[0], sig2.nodes[1]);
EXPECT_TRUE(sig1 == sig2);
sig2.nodes.emplace_back(sig2.nodes[0]);
EXPECT_FALSE(sig1 == sig2);
EXPECT_FALSE(sig2 == sig1);
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/graph_analyzer/sig_node.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/graph_analyzer/sig_node_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b4d56199-05c4-4fc6-ac2b-3999333517b7 | cpp | tensorflow/tensorflow | tensor_slice_set | tensorflow/core/util/tensor_slice_set.cc | tensorflow/core/util/tensor_slice_set_test.cc | #include "tensorflow/core/util/tensor_slice_set.h"
#include <unordered_map>
#include <utility>
#include <vector>
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/util/tensor_slice_util.h"
namespace tensorflow {
namespace checkpoint {
TensorSliceSet::TensorSliceSet(const TensorShape& shape, DataType type)
: shape_(shape), type_(type) {}
TensorSliceSet::~TensorSliceSet() = default;
Status TensorSliceSet::Register(const TensorSlice& slice, const string& tag) {
TensorShape result_shape;
TF_RETURN_IF_ERROR(slice.SliceTensorShape(shape_, &result_shape));
string str = slice.DebugString();
if (slices_.empty()) {
slices_hull_ = slice;
} else {
if (slices_hull_.Overlaps(slice)) {
for (const auto& x : slices_) {
if (slice.Overlaps(x.second.slice)) {
return errors::Internal("Overlapping slices: existing slice = ",
x.first, ", new slice = ", str);
}
}
}
slices_hull_.UpdateToCover(slice);
}
TensorSliceSet::SliceInfo info = {slice, tag, result_shape.num_elements()};
slices_.insert(std::make_pair(str, info));
return absl::OkStatus();
}
bool TensorSliceSet::QueryMeta(
const TensorSlice& slice,
std::vector<std::pair<TensorSlice, string>>* results) const {
results->clear();
Status s;
string str = slice.DebugString();
const TensorSliceSet::SliceInfo* info = gtl::FindOrNull(slices_, str);
if (info) {
results->emplace_back(std::make_pair(info->slice, info->tag));
return true;
} else {
TensorShape target_shape;
Status s;
s = slice.SliceTensorShape(shape_, &target_shape);
if (!s.ok()) {
LOG(WARNING) << s;
return false;
}
int64_t total_size = target_shape.num_elements();
int64_t overlap_size = 0;
TensorSlice intersection;
TensorShape inter_shape;
for (const auto& x : slices_) {
if (slice.Intersect(x.second.slice, &intersection)) {
s = intersection.SliceTensorShape(shape_, &inter_shape);
if (!s.ok()) {
LOG(WARNING) << s;
return false;
}
overlap_size += inter_shape.num_elements();
results->emplace_back(std::make_pair(x.second.slice, x.second.tag));
}
}
if (total_size == overlap_size) {
return true;
} else {
results->clear();
return false;
}
}
}
Status RegisterTensorSlice(
const string& name, const TensorShape& shape, DataType type,
const string& tag, const TensorSlice& slice,
std::unordered_map<string, TensorSliceSet*>* tensor_slices) {
DCHECK_NE(tensor_slices, nullptr);
TensorSliceSet* tss = gtl::FindPtrOrNull(*tensor_slices, name);
if (!tss) {
tss = new TensorSliceSet(shape, type);
tensor_slices->insert(std::make_pair(name, tss));
} else {
const TensorShape& tss_shape(tss->shape());
if (!shape.IsSameSize(tss_shape)) {
return errors::Internal("Incompatible tensor shapes detected for tensor ",
name, ": existing = ", tss_shape.DebugString(),
", new = ", shape.DebugString());
}
if (type != tss->type()) {
return errors::Internal("Incompatible tensor types detected for tensor ",
name,
": existing = ", DataTypeString(tss->type()),
", new = ", DataTypeString(type));
}
}
return tss->Register(slice, tag);
}
}
} | #include "tensorflow/core/util/tensor_slice_set.h"
#include <utility>
#include <vector>
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
namespace checkpoint {
namespace {
TEST(TensorSliceSetTest, QueryMetaTwoD) {
TensorShape shape({4, 5});
TensorSliceSet tss(shape, DT_INT32);
TensorSlice slice_1 = TensorSlice::ParseOrDie("0,2:-");
TF_CHECK_OK(tss.Register(slice_1, "slice_1"));
TensorSlice slice_2 = TensorSlice::ParseOrDie("2,2:0,3");
TF_CHECK_OK(tss.Register(slice_2, "slice_2"));
TensorSlice slice_3 = TensorSlice::ParseOrDie("3,1:3,2");
TF_CHECK_OK(tss.Register(slice_3, "slice_3"));
{
TensorSlice s = TensorSlice::ParseOrDie("0,2:-");
std::vector<std::pair<TensorSlice, string>> results;
EXPECT_TRUE(tss.QueryMeta(s, &results));
EXPECT_EQ(1, results.size());
EXPECT_EQ("0,2:-", results[0].first.DebugString());
EXPECT_EQ("slice_1", results[0].second);
}
{
TensorSlice s = TensorSlice::ParseOrDie("1,1:-");
std::vector<std::pair<TensorSlice, string>> results;
EXPECT_TRUE(tss.QueryMeta(s, &results));
EXPECT_EQ(1, results.size());
EXPECT_EQ("0,2:-", results[0].first.DebugString());
EXPECT_EQ("slice_1", results[0].second);
}
{
TensorSlice s = TensorSlice::ParseOrDie("1,2:0,3");
std::vector<std::pair<TensorSlice, string>> results;
EXPECT_TRUE(tss.QueryMeta(s, &results));
EXPECT_EQ(2, results.size());
if (results[0].second == "slice_2") {
EXPECT_EQ("2,2:0,3", results[0].first.DebugString());
EXPECT_EQ("slice_2", results[0].second);
EXPECT_EQ("0,2:-", results[1].first.DebugString());
EXPECT_EQ("slice_1", results[1].second);
} else {
EXPECT_EQ("0,2:-", results[0].first.DebugString());
EXPECT_EQ("slice_1", results[0].second);
EXPECT_EQ("2,2:0,3", results[1].first.DebugString());
EXPECT_EQ("slice_2", results[1].second);
}
}
{
TensorSlice s = TensorSlice::ParseOrDie("1,2:2,3");
std::vector<std::pair<TensorSlice, string>> results;
EXPECT_FALSE(tss.QueryMeta(s, &results));
EXPECT_EQ(0, results.size());
}
}
static void BM_RegisterOneByOne(::testing::benchmark::State& state) {
TensorShape shape({static_cast<int>(state.max_iterations), 41});
TensorSliceSet slice_set(shape, DT_INT32);
int i = 0;
for (auto s : state) {
TensorSlice part({{i, 1}, {0, -1}});
TF_CHECK_OK(slice_set.Register(part, part.DebugString()));
++i;
}
}
BENCHMARK(BM_RegisterOneByOne);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/tensor_slice_set.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/tensor_slice_set_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b9f15842-1826-48a4-bfa0-e2fbdeeca04f | cpp | tensorflow/tensorflow | convert_async_collectives_to_sync | third_party/xla/xla/service/gpu/transforms/convert_async_collectives_to_sync.cc | third_party/xla/xla/service/gpu/transforms/convert_async_collectives_to_sync_test.cc | #include "xla/service/gpu/transforms/convert_async_collectives_to_sync.h"
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
absl::Status GpuConvertAsyncCollectivesToSync::ConvertAsyncInstructionsToSync(
HloComputation* computation,
absl::Span<const std::pair<HloInstruction*, HloInstruction*>> async_pairs)
const {
absl::flat_hash_map<HloInstruction*, HloInstruction*> replaced_ops;
CollectiveBackendConfig sync_config;
sync_config.set_is_sync(true);
for (auto& [async_start, async_done] : async_pairs) {
TF_ASSIGN_OR_RETURN(GpuBackendConfig gpu_config,
async_start->backend_config<GpuBackendConfig>());
*gpu_config.mutable_collective_backend_config() = sync_config;
TF_RETURN_IF_ERROR(async_start->set_backend_config(gpu_config));
replaced_ops[async_start] = nullptr;
replaced_ops[async_done] = async_start;
}
HloModule* module = computation->parent();
const HloInstructionSequence& sequence =
module->schedule().sequence(computation);
std::vector<HloInstruction*> new_sequence;
new_sequence.reserve(sequence.size());
for (HloInstruction* instr : sequence.instructions()) {
auto it = replaced_ops.find(instr);
if (it == replaced_ops.end()) {
new_sequence.push_back(instr);
continue;
}
if (it->second == nullptr) {
continue;
}
new_sequence.push_back(it->second);
new_sequence.push_back(instr);
}
module->schedule().set_sequence(computation, new_sequence);
return absl::OkStatus();
}
}
} | #include "xla/service/gpu/transforms/convert_async_collectives_to_sync.h"
#include <string_view>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
using ::testing::IsFalse;
using ::testing::IsTrue;
class GpuConvertAsyncCollectivesToSyncTest : public HloTestBase {
public:
absl::Status RunPass(HloModule *module, bool expect_change,
HloPredicate is_nop = {}) {
TF_ASSIGN_OR_RETURN(bool changed,
GpuConvertAsyncCollectivesToSync{is_nop}.Run(module));
EXPECT_EQ(changed, expect_change);
return absl::OkStatus();
}
bool IsSync(HloModule *module, std::string_view name) {
const HloInstruction *inst = FindInstruction(module, name);
if (inst == nullptr) {
return false;
}
auto backend_config = inst->backend_config<GpuBackendConfig>()
.value()
.collective_backend_config();
return backend_config.is_sync();
}
HloPredicate is_nop_simple_ =
HloPredicateIsOp<HloOpcode::kBitcast, HloOpcode::kGetTupleElement,
HloOpcode::kParameter>;
};
TEST_F(GpuConvertAsyncCollectivesToSyncTest, SimpleAllReduce) {
const absl::string_view hlo_string = R"(
HloModule test, is_scheduled=true
apply_op {
x = u32[] parameter(0)
y = u32[] parameter(1)
ROOT apply_op = u32[] add(x, y)
}
ENTRY test_computation {
id = u32[] replica-id()
start = u32[] all-reduce-start(id), to_apply=apply_op, channel_id=3
ROOT done = u32[] all-reduce-done(start)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(module.get(), true));
EXPECT_THAT(IsSync(module.get(), "start"), IsTrue());
}
TEST_F(GpuConvertAsyncCollectivesToSyncTest, SimpleAllReduceWithNop) {
const absl::string_view hlo_string = R"(
HloModule test, is_scheduled=true
apply_op {
x = u32[] parameter(0)
y = u32[] parameter(1)
ROOT apply_op = u32[] add(x, y)
}
ENTRY test_computation {
id = u32[] replica-id()
start = u32[] all-reduce-start(id), to_apply=apply_op, channel_id=3, replica_groups={{0,1}, {2,3}}
id2 = f32[] bitcast(id)
ROOT done = u32[] all-reduce-done(start)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(module.get(), true, is_nop_simple_));
EXPECT_THAT(IsSync(module.get(), "start"), IsTrue());
}
TEST_F(GpuConvertAsyncCollectivesToSyncTest, SimpleCollectiveBroadcast) {
const absl::string_view hlo_string = R"(
HloModule test, is_scheduled=true
collective_broadcast {
p0 = u32[8] parameter(0)
ROOT result = u32[8] collective-broadcast(p0), replica_groups={{0,1}, {2,3}}
}
ENTRY main {
data = u32[8] parameter(0)
cb-start = ((u32[8]{0}), u32[8]{0}) async-start(u32[8]{0} %data), calls=collective_broadcast
ROOT %ars = u32[8]{0} async-done(((u32[8]{0}), u32[8]{0}) %cb-start), calls=collective_broadcast
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(module.get(), true));
EXPECT_THAT(IsSync(module.get(), "cb-start"), IsTrue());
}
TEST_F(GpuConvertAsyncCollectivesToSyncTest, SimpleAllReduceWithNonNop) {
const absl::string_view hlo_string = R"(
HloModule test, is_scheduled=true
apply_op {
x = u32[] parameter(0)
y = u32[] parameter(1)
ROOT apply_op = u32[] add(x, y)
}
ENTRY test_computation {
id = u32[] replica-id()
start = u32[] all-reduce-start(id), to_apply=apply_op, channel_id=3
id2 = u32[] add(id, id)
ROOT done = u32[] all-reduce-done(start)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(module.get(), false));
}
TEST_F(GpuConvertAsyncCollectivesToSyncTest, SimpleAllGather) {
const absl::string_view hlo_string = R"(
HloModule test, is_scheduled=true
ENTRY test_computation {
a1 = u32[1, 2] parameter(0)
ags = (u32[1, 2], u32[2, 2]) all-gather-start(a1), dimensions={0}, channel_id=3
ROOT allgather = u32[2,2] all-gather-done(ags)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(module.get(), true));
EXPECT_THAT(IsSync(module.get(), "ags"), IsTrue());
}
TEST_F(GpuConvertAsyncCollectivesToSyncTest, SimpleCollectivePermute) {
const absl::string_view hlo_string = R"(
HloModule test, is_scheduled=true
ENTRY test_computation {
p = u32[2] parameter(0)
start = (u32[2], u32[2], u32[], u32[]) collective-permute-start(p), source_target_pairs={{0,1}, {1,0}}
ROOT done = u32[2] collective-permute-done(start)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(module.get(), true));
EXPECT_THAT(IsSync(module.get(), "start"), IsTrue());
}
TEST_F(GpuConvertAsyncCollectivesToSyncTest, SimpleReduceScatter) {
const absl::string_view hlo_string = R"(
HloModule test, is_scheduled=true
add {
lhs = u32[] parameter(0)
rhs = u32[] parameter(1)
ROOT add = u32[] add(lhs, rhs)
}
reduce_scatter {
p0 = u32[8] parameter(0)
ROOT result = u32[4] reduce-scatter(p0), replica_groups={{0,3}, {1,2}},
dimensions={0}, to_apply=add
}
ENTRY main {
data = u32[8] parameter(0)
rs-start = ((u32[8]{0}), u32[4]{0}) async-start(u32[8]{0} %data), calls=reduce_scatter
ROOT %ars = u32[4]{0} async-done(((u32[8]{0}), u32[4]{0}) %rs-start), calls=reduce_scatter
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(module.get(), true));
EXPECT_THAT(IsSync(module.get(), "rs-start"), IsTrue());
}
TEST_F(GpuConvertAsyncCollectivesToSyncTest, SimpleAllToAll) {
const absl::string_view hlo_string = R"(
HloModule test, is_scheduled=true
all_to_all {
p0 = u32[2] parameter(0)
ROOT result = u32[2] all-to-all(p0), dimensions={0}, replica_groups={{0,1},{2,3}}
}
ENTRY test_computation {
a1 = u32[2] parameter(0)
a2a-start = ((u32[2]), u32[2]) async-start(u32[2] a1), calls=all_to_all
ROOT a2s = u32[2] async-done(a2a-start), calls=all_to_all
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(module.get(), true));
EXPECT_THAT(IsSync(module.get(), "a2a-start"), IsTrue());
}
TEST_F(GpuConvertAsyncCollectivesToSyncTest, ControlDeps) {
const absl::string_view hlo_string = R"(
HloModule test, is_scheduled=true
apply_op {
x = u32[] parameter(0)
y = u32[] parameter(1)
ROOT apply_op = u32[] add(x, y)
}
ENTRY test_computation {
id = u32[] replica-id()
start1 = u32[] all-reduce-start(id), to_apply=apply_op, channel_id=3
done1 = u32[] all-reduce-done(start1)
start2 = u32[] all-reduce-start(id), to_apply=apply_op, channel_id=4, control-predecessors={done1}
done2 = u32[] all-reduce-done(start2)
ROOT x = u32[] add(done1, done2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(module.get(), true));
EXPECT_THAT(IsSync(module.get(), "start1"), IsTrue());
EXPECT_THAT(IsSync(module.get(), "start2"), IsTrue());
}
TEST_F(GpuConvertAsyncCollectivesToSyncTest, MultipleInFlightStreaming) {
const absl::string_view hlo_string = R"(
HloModule test, is_scheduled=true
apply_op {
x = u32[] parameter(0)
y = u32[] parameter(1)
ROOT apply_op = u32[] add(x, y)
}
ENTRY test_computation {
id = u32[] replica-id()
start1 = u32[] all-reduce-start(id), to_apply=apply_op, channel_id=3
start2 = u32[] all-reduce-start(id), to_apply=apply_op, channel_id=4
done1 = u32[] all-reduce-done(start1)
done2 = u32[] all-reduce-done(start2)
ROOT x = u32[] add(done1, done2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(module.get(), true));
EXPECT_THAT(IsSync(module.get(), "start1"), IsTrue());
EXPECT_THAT(IsSync(module.get(), "start2"), IsTrue());
}
TEST_F(GpuConvertAsyncCollectivesToSyncTest, MultipleInFlightNested) {
const absl::string_view hlo_string = R"(
HloModule test, is_scheduled=true
apply_op {
x = u32[] parameter(0)
y = u32[] parameter(1)
ROOT apply_op = u32[] add(x, y)
}
ENTRY test_computation {
id = u32[] replica-id()
start1 = u32[] all-reduce-start(id), to_apply=apply_op, channel_id=3
start2 = u32[] all-reduce-start(id), to_apply=apply_op, channel_id=4
done2 = u32[] all-reduce-done(start2)
done1 = u32[] all-reduce-done(start1)
ROOT x = u32[] add(done1, done2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(module.get(), true));
EXPECT_THAT(IsSync(module.get(), "start1"), IsTrue());
EXPECT_THAT(IsSync(module.get(), "start2"), IsTrue());
}
TEST_F(GpuConvertAsyncCollectivesToSyncTest, MultipleInFlightNestedPartial) {
const absl::string_view hlo_string = R"(
HloModule test, is_scheduled=true
apply_op {
x = u32[] parameter(0)
y = u32[] parameter(1)
ROOT apply_op = u32[] add(x, y)
}
ENTRY test_computation {
id = u32[] replica-id()
start1 = u32[] all-reduce-start(id), to_apply=apply_op, channel_id=3
start2 = u32[] all-reduce-start(id), to_apply=apply_op, channel_id=4
done2 = u32[] all-reduce-done(start2)
id2 = u32[] add(done2, done2)
done1 = u32[] all-reduce-done(start1)
ROOT x = u32[] add(done1, done2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(module.get(), true));
EXPECT_THAT(IsSync(module.get(), "start1"), IsFalse());
EXPECT_THAT(IsSync(module.get(), "start2"), IsTrue());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/convert_async_collectives_to_sync.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/convert_async_collectives_to_sync_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
6685bdc8-3ebc-494e-b6e0-36ecd722d50c | cpp | tensorflow/tensorflow | map_util | tensorflow/core/lib/gtl/map_util.h | third_party/xla/xla/tsl/lib/gtl/map_util_test.cc | #ifndef TENSORFLOW_CORE_LIB_GTL_MAP_UTIL_H_
#define TENSORFLOW_CORE_LIB_GTL_MAP_UTIL_H_
#include "xla/tsl/lib/gtl/map_util.h"
namespace tensorflow {
namespace gtl {
using ::tsl::gtl::EraseKeyReturnValuePtr;
using ::tsl::gtl::FindOrNull;
using ::tsl::gtl::FindPtrOrNull;
using ::tsl::gtl::FindWithDefault;
using ::tsl::gtl::InsertIfNotPresent;
using ::tsl::gtl::InsertOrUpdate;
using ::tsl::gtl::LookupOrInsert;
using ::tsl::gtl::ReverseMap;
}
}
#endif | #include "xla/tsl/lib/gtl/map_util.h"
#include <map>
#include <set>
#include <string>
#include "tsl/platform/test.h"
#include "tsl/platform/types.h"
namespace tsl {
TEST(MapUtil, Find) {
typedef std::map<string, string> Map;
Map m;
EXPECT_EQ("", gtl::FindWithDefault(m, "foo", ""));
m["foo"] = "bar";
EXPECT_EQ("bar", gtl::FindWithDefault(m, "foo", ""));
EXPECT_EQ("bar", *gtl::FindOrNull(m, "foo"));
EXPECT_TRUE(m.count("foo") > 0);
EXPECT_EQ(m["foo"], "bar");
}
TEST(MapUtil, LookupOrInsert) {
typedef std::map<string, string> Map;
Map m;
EXPECT_EQ("xyz", gtl::LookupOrInsert(&m, "foo", "xyz"));
EXPECT_EQ("xyz", gtl::LookupOrInsert(&m, "foo", "abc"));
}
TEST(MapUtil, InsertIfNotPresent) {
typedef std::set<int> Set;
Set s;
EXPECT_TRUE(gtl::InsertIfNotPresent(&s, 0));
EXPECT_EQ(s.count(0), 1);
EXPECT_FALSE(gtl::InsertIfNotPresent(&s, 0));
EXPECT_EQ(s.count(0), 1);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/lib/gtl/map_util.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/lib/gtl/map_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
fe4ae289-cef5-456c-802a-cfe0bf6dd346 | cpp | google/libaddressinput | preload_supplier | cpp/src/preload_supplier.cc | cpp/test/preload_supplier_test.cc | #include <libaddressinput/preload_supplier.h>
#include <libaddressinput/address_data.h>
#include <libaddressinput/address_field.h>
#include <libaddressinput/callback.h>
#include <libaddressinput/supplier.h>
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <map>
#include <memory>
#include <set>
#include <stack>
#include <string>
#include <vector>
#include "lookup_key.h"
#include "region_data_constants.h"
#include "retriever.h"
#include "rule.h"
#include "util/json.h"
#include "util/size.h"
#include "util/string_compare.h"
namespace i18n {
namespace addressinput {
namespace {
class IndexLess {
public:
bool operator()(const std::string& a, const std::string& b) const {
static const StringCompare kStringCompare;
return kStringCompare.NaturalLess(a, b);
}
};
}
class IndexMap : public std::map<std::string, const Rule*, IndexLess> {};
namespace {
class Helper {
public:
Helper(const Helper&) = delete;
Helper& operator=(const Helper&) = delete;
Helper(const std::string& region_code, const std::string& key,
const PreloadSupplier::Callback& loaded, const Retriever& retriever,
std::set<std::string>* pending, IndexMap* rule_index,
IndexMap* language_rule_index, std::vector<const Rule*>* rule_storage,
std::map<std::string, const Rule*>* region_rules)
: region_code_(region_code),
loaded_(loaded),
pending_(pending),
rule_index_(rule_index),
language_rule_index_(language_rule_index),
rule_storage_(rule_storage),
region_rules_(region_rules),
retrieved_(BuildCallback(this, &Helper::OnRetrieved)) {
assert(pending_ != nullptr);
assert(rule_index_ != nullptr);
assert(rule_storage_ != nullptr);
assert(region_rules_ != nullptr);
assert(retrieved_ != nullptr);
pending_->insert(key);
retriever.Retrieve(key, *retrieved_);
}
private:
~Helper() = default;
void OnRetrieved(bool success, const std::string& key,
const std::string& data) {
int rule_count = 0;
size_t status = pending_->erase(key);
assert(status == 1);
(void)status;
Json json;
std::string id;
std::vector<const Rule*> sub_rules;
auto last_index_it = rule_index_->end();
auto last_latin_it = rule_index_->end();
auto language_index_it = language_rule_index_->end();
auto last_region_it = region_rules_->end();
IndexMap::const_iterator hints[size(LookupKey::kHierarchy) - 1];
std::fill(hints, hints + size(hints), rule_index_->end());
if (!success) {
goto callback;
}
if (!json.ParseObject(data)) {
success = false;
goto callback;
}
for (auto ptr : json.GetSubDictionaries()) {
assert(ptr != nullptr);
if (!ptr->GetStringValueForKey("id", &id)) {
success = false;
goto callback;
}
assert(!id.empty());
size_t depth = std::count(id.begin(), id.end(), '/') - 1;
assert(depth < size(LookupKey::kHierarchy));
AddressField field = LookupKey::kHierarchy[depth];
auto* rule = new Rule;
if (field == COUNTRY) {
rule->CopyFrom(Rule::GetDefault());
}
rule->ParseJsonRule(*ptr);
assert(id == rule->GetId());
rule_storage_->push_back(rule);
if (depth > 0) {
sub_rules.push_back(rule);
}
last_index_it = rule_index_->emplace_hint(last_index_it, id, rule);
last_region_it = region_rules_->emplace_hint(last_region_it, id, rule);
++rule_count;
}
for (auto ptr : sub_rules) {
assert(ptr != nullptr);
std::stack<const Rule*> hierarchy;
hierarchy.push(ptr);
for (std::string parent_id(ptr->GetId());;) {
std::string::size_type pos = parent_id.rfind('/');
if (pos == sizeof "data/ZZ" - 1) {
break;
}
parent_id.resize(pos);
IndexMap::const_iterator* const hint = &hints[hierarchy.size() - 1];
if (*hint == rule_index_->end() || (*hint)->first != parent_id) {
*hint = rule_index_->find(parent_id);
}
assert(*hint != rule_index_->end());
hierarchy.push((*hint)->second);
}
std::string human_id(ptr->GetId().substr(0, sizeof "data/ZZ" - 1));
std::string latin_id(human_id);
for (; !hierarchy.empty(); hierarchy.pop()) {
const Rule* rule = hierarchy.top();
human_id.push_back('/');
if (!rule->GetName().empty()) {
human_id.append(rule->GetName());
} else {
const std::string& id = rule->GetId();
std::string::size_type pos = id.rfind('/');
assert(pos != std::string::npos);
human_id.append(id.substr(pos + 1));
}
if (!rule->GetLatinName().empty()) {
latin_id.push_back('/');
latin_id.append(rule->GetLatinName());
}
}
{
const std::string& id = ptr->GetId();
std::string::size_type pos = id.rfind("--");
if (pos != std::string::npos) {
language_index_it = language_rule_index_->emplace_hint(
language_index_it, human_id, ptr);
human_id.append(id, pos, id.size() - pos);
}
}
last_index_it = rule_index_->emplace_hint(last_index_it, human_id, ptr);
if (std::count(human_id.begin(), human_id.end(), '/') ==
std::count(latin_id.begin(), latin_id.end(), '/')) {
last_latin_it = rule_index_->emplace_hint(last_latin_it, latin_id, ptr);
}
}
callback:
loaded_(success, region_code_, rule_count);
delete this;
}
const std::string region_code_;
const PreloadSupplier::Callback& loaded_;
std::set<std::string>* const pending_;
IndexMap* const rule_index_;
IndexMap* const language_rule_index_;
std::vector<const Rule*>* const rule_storage_;
std::map<std::string, const Rule*>* const region_rules_;
const std::unique_ptr<const Retriever::Callback> retrieved_;
};
std::string KeyFromRegionCode(const std::string& region_code) {
AddressData address;
address.region_code = region_code;
LookupKey lookup_key;
lookup_key.FromAddress(address);
return lookup_key.ToKeyString(0);
}
}
PreloadSupplier::PreloadSupplier(const Source* source, Storage* storage)
: retriever_(new Retriever(source, storage)),
pending_(),
rule_index_(new IndexMap),
language_rule_index_(new IndexMap),
rule_storage_(),
region_rules_() {}
PreloadSupplier::~PreloadSupplier() {
for (auto ptr : rule_storage_) {
delete ptr;
}
}
void PreloadSupplier::Supply(const LookupKey& lookup_key,
const Supplier::Callback& supplied) {
Supplier::RuleHierarchy hierarchy;
bool success = GetRuleHierarchy(lookup_key, &hierarchy, false);
supplied(success, lookup_key, hierarchy);
}
void PreloadSupplier::SupplyGlobally(const LookupKey& lookup_key,
const Supplier::Callback& supplied) {
Supplier::RuleHierarchy hierarchy;
bool success = GetRuleHierarchy(lookup_key, &hierarchy, true);
supplied(success, lookup_key, hierarchy);
}
const Rule* PreloadSupplier::GetRule(const LookupKey& lookup_key) const {
assert(IsLoaded(lookup_key.GetRegionCode()));
Supplier::RuleHierarchy hierarchy;
if (!GetRuleHierarchy(lookup_key, &hierarchy, false)) {
return nullptr;
}
return hierarchy.rule[lookup_key.GetDepth()];
}
void PreloadSupplier::LoadRules(const std::string& region_code,
const Callback& loaded) {
const std::string key = KeyFromRegionCode(region_code);
if (IsLoadedKey(key)) {
loaded(true, region_code, 0);
return;
}
if (IsPendingKey(key)) {
return;
}
new Helper(region_code, key, loaded, *retriever_, &pending_,
rule_index_.get(), language_rule_index_.get(), &rule_storage_,
®ion_rules_[region_code]);
}
const std::map<std::string, const Rule*>& PreloadSupplier::GetRulesForRegion(
const std::string& region_code) const {
assert(IsLoaded(region_code));
return region_rules_.find(region_code)->second;
}
bool PreloadSupplier::IsLoaded(const std::string& region_code) const {
return IsLoadedKey(KeyFromRegionCode(region_code));
}
bool PreloadSupplier::IsPending(const std::string& region_code) const {
return IsPendingKey(KeyFromRegionCode(region_code));
}
bool PreloadSupplier::GetRuleHierarchy(const LookupKey& lookup_key,
RuleHierarchy* hierarchy,
const bool search_globally) const {
assert(hierarchy != nullptr);
if (RegionDataConstants::IsSupported(lookup_key.GetRegionCode())) {
size_t max_depth = std::min(
lookup_key.GetDepth(),
RegionDataConstants::GetMaxLookupKeyDepth(lookup_key.GetRegionCode()));
for (size_t depth = 0; depth <= max_depth; ++depth) {
const std::string key = lookup_key.ToKeyString(depth);
const Rule* rule = nullptr;
auto it = rule_index_->find(key);
if (it != rule_index_->end()) {
rule = it->second;
} else if (search_globally && depth > 0 &&
!hierarchy->rule[0]->GetLanguages().empty()) {
it = language_rule_index_->find(key);
if (it != language_rule_index_->end()) {
rule = it->second;
}
}
if (rule == nullptr) {
return depth > 0;
}
hierarchy->rule[depth] = rule;
}
}
return true;
}
size_t PreloadSupplier::GetLoadedRuleDepth(
const std::string& region_code) const {
const size_t code_size = 7;
std::string full_code = region_code.substr(0, code_size);
size_t depth = 0;
auto it = rule_index_->find(full_code);
while (it != rule_index_->end()) {
const Rule* rule = it->second;
depth++;
if (rule->GetSubKeys().empty()) return depth;
full_code += "/" + rule->GetSubKeys()[0];
it = rule_index_->find(full_code);
}
return depth;
}
bool PreloadSupplier::IsLoadedKey(const std::string& key) const {
return rule_index_->find(key) != rule_index_->end();
}
bool PreloadSupplier::IsPendingKey(const std::string& key) const {
return pending_.find(key) != pending_.end();
}
}
} | #include <libaddressinput/preload_supplier.h>
#include <libaddressinput/address_data.h>
#include <libaddressinput/callback.h>
#include <libaddressinput/null_storage.h>
#include <libaddressinput/supplier.h>
#include <cstddef>
#include <memory>
#include <string>
#include <gtest/gtest.h>
#include "lookup_key.h"
#include "rule.h"
#include "testdata_source.h"
namespace {
using i18n::addressinput::AddressData;
using i18n::addressinput::BuildCallback;
using i18n::addressinput::LookupKey;
using i18n::addressinput::NullStorage;
using i18n::addressinput::PreloadSupplier;
using i18n::addressinput::Rule;
using i18n::addressinput::Supplier;
using i18n::addressinput::TestdataSource;
class PreloadSupplierTest : public testing::Test {
public:
PreloadSupplierTest(const PreloadSupplierTest&) = delete;
PreloadSupplierTest& operator=(const PreloadSupplierTest&) = delete;
protected:
PreloadSupplierTest()
: supplier_(new TestdataSource(true), new NullStorage),
loaded_callback_(BuildCallback(this, &PreloadSupplierTest::OnLoaded)),
supplied_callback_(
BuildCallback(this, &PreloadSupplierTest::OnSupplied)) {}
PreloadSupplier supplier_;
const std::unique_ptr<const PreloadSupplier::Callback> loaded_callback_;
const std::unique_ptr<const Supplier::Callback> supplied_callback_;
Supplier::RuleHierarchy hierarchy_;
private:
void OnLoaded(bool success, const std::string& region_code, int num_rules) {
ASSERT_TRUE(success);
ASSERT_FALSE(region_code.empty());
ASSERT_LT(0, num_rules);
ASSERT_TRUE(supplier_.IsLoaded(region_code));
}
void OnSupplied(bool success, const LookupKey& lookup_key,
const Supplier::RuleHierarchy& hierarchy) {
ASSERT_TRUE(success);
hierarchy_ = hierarchy;
}
};
TEST_F(PreloadSupplierTest, GetUsRule) {
supplier_.LoadRules("US", *loaded_callback_);
LookupKey us_key;
const AddressData us_address{.region_code = "US"};
us_key.FromAddress(us_address);
const Rule* rule = supplier_.GetRule(us_key);
ASSERT_TRUE(rule != nullptr);
EXPECT_EQ("data/US", rule->GetId());
}
TEST_F(PreloadSupplierTest, GetUsCaRule) {
supplier_.LoadRules("US", *loaded_callback_);
LookupKey ca_key;
const AddressData ca_address{
.region_code = "US",
.administrative_area = "CA",
};
ca_key.FromAddress(ca_address);
const Rule* rule = supplier_.GetRule(ca_key);
ASSERT_TRUE(rule != nullptr);
EXPECT_EQ("data/US/CA", rule->GetId());
}
TEST_F(PreloadSupplierTest, GetUsCaliforniaRule) {
supplier_.LoadRules("US", *loaded_callback_);
LookupKey ca_key;
const AddressData ca_address{
.region_code = "US",
.administrative_area = "California",
};
ca_key.FromAddress(ca_address);
const Rule* rule = supplier_.GetRule(ca_key);
ASSERT_TRUE(rule != nullptr);
EXPECT_EQ("data/US/CA", rule->GetId());
}
TEST_F(PreloadSupplierTest, GetZwRule) {
supplier_.LoadRules("ZW", *loaded_callback_);
LookupKey zw_key;
const AddressData zw_address{.region_code = "ZW"};
zw_key.FromAddress(zw_address);
const Rule* rule = supplier_.GetRule(zw_key);
ASSERT_TRUE(rule != nullptr);
EXPECT_EQ("data/ZW", rule->GetId());
}
TEST_F(PreloadSupplierTest, GetUnknownRule) {
supplier_.LoadRules("US", *loaded_callback_);
LookupKey unknown_key;
const AddressData unknown_address{
.region_code = "US",
.administrative_area = "ZZ",
};
unknown_key.FromAddress(unknown_address);
const Rule* rule = supplier_.GetRule(unknown_key);
EXPECT_TRUE(rule == nullptr);
}
TEST_F(PreloadSupplierTest, GetTooPreciseRule) {
supplier_.LoadRules("US", *loaded_callback_);
LookupKey precise_key;
const AddressData precise_address{
.region_code = "US",
.administrative_area = "CA",
.locality = "Mountain View",
};
precise_key.FromAddress(precise_address);
const Rule* rule = supplier_.GetRule(precise_key);
EXPECT_TRUE(rule == nullptr);
}
TEST_F(PreloadSupplierTest, GetRulesForRegion) {
supplier_.LoadRules("CN", *loaded_callback_);
const auto& rules = supplier_.GetRulesForRegion("CN");
EXPECT_TRUE(rules.find("data/CN") != rules.end());
EXPECT_LT(1U, rules.size());
}
TEST_F(PreloadSupplierTest, SupplyRegionCode) {
supplier_.LoadRules("CA", *loaded_callback_);
LookupKey key;
const AddressData address{
.region_code = "CA",
.administrative_area = "NB",
};
key.FromAddress(address);
supplier_.Supply(key, *supplied_callback_);
ASSERT_TRUE(hierarchy_.rule[0] != nullptr);
EXPECT_EQ(key.ToKeyString(0), hierarchy_.rule[0]->GetId());
ASSERT_TRUE(hierarchy_.rule[1] != nullptr);
EXPECT_EQ("data/CA/NB", hierarchy_.rule[1]->GetId());
EXPECT_TRUE(hierarchy_.rule[2] == nullptr);
EXPECT_TRUE(hierarchy_.rule[3] == nullptr);
}
TEST_F(PreloadSupplierTest, SupplyGloballyRegionCode) {
supplier_.LoadRules("CA", *loaded_callback_);
LookupKey key;
const AddressData address{
.region_code = "CA",
.administrative_area = "NB",
};
key.FromAddress(address);
supplier_.SupplyGlobally(key, *supplied_callback_);
ASSERT_TRUE(hierarchy_.rule[0] != nullptr);
EXPECT_EQ(key.ToKeyString(0), hierarchy_.rule[0]->GetId());
ASSERT_TRUE(hierarchy_.rule[1] != nullptr);
EXPECT_EQ("data/CA/NB", hierarchy_.rule[1]->GetId());
EXPECT_TRUE(hierarchy_.rule[2] == nullptr);
EXPECT_TRUE(hierarchy_.rule[3] == nullptr);
}
TEST_F(PreloadSupplierTest, SupplyRegionName) {
supplier_.LoadRules("CA", *loaded_callback_);
LookupKey key;
const AddressData address{
.region_code = "CA",
.administrative_area = "New Brunswick",
};
key.FromAddress(address);
supplier_.Supply(key, *supplied_callback_);
ASSERT_TRUE(hierarchy_.rule[0] != nullptr);
EXPECT_EQ(key.ToKeyString(0), hierarchy_.rule[0]->GetId());
ASSERT_TRUE(hierarchy_.rule[1] != nullptr);
EXPECT_EQ("data/CA/NB", hierarchy_.rule[1]->GetId());
EXPECT_TRUE(hierarchy_.rule[2] == nullptr);
EXPECT_TRUE(hierarchy_.rule[3] == nullptr);
}
TEST_F(PreloadSupplierTest, SupplyGloballyRegionName) {
supplier_.LoadRules("CA", *loaded_callback_);
LookupKey key;
const AddressData address{
.region_code = "CA",
.administrative_area = "New Brunswick",
};
key.FromAddress(address);
supplier_.SupplyGlobally(key, *supplied_callback_);
ASSERT_TRUE(hierarchy_.rule[0] != nullptr);
EXPECT_EQ(key.ToKeyString(0), hierarchy_.rule[0]->GetId());
ASSERT_TRUE(hierarchy_.rule[1] != nullptr);
EXPECT_EQ("data/CA/NB", hierarchy_.rule[1]->GetId());
EXPECT_TRUE(hierarchy_.rule[2] == nullptr);
EXPECT_TRUE(hierarchy_.rule[3] == nullptr);
}
TEST_F(PreloadSupplierTest, SupplyRegionNameLanguage) {
supplier_.LoadRules("CA", *loaded_callback_);
LookupKey key;
const AddressData address{
.region_code = "CA",
.administrative_area = "Nouveau-Brunswick",
};
key.FromAddress(address);
supplier_.Supply(key, *supplied_callback_);
ASSERT_TRUE(hierarchy_.rule[0] != nullptr);
EXPECT_EQ(key.ToKeyString(0), hierarchy_.rule[0]->GetId());
EXPECT_TRUE(hierarchy_.rule[1] == nullptr);
EXPECT_TRUE(hierarchy_.rule[2] == nullptr);
EXPECT_TRUE(hierarchy_.rule[3] == nullptr);
}
TEST_F(PreloadSupplierTest, SupplyRegionNameLanguageSet) {
supplier_.LoadRules("CA", *loaded_callback_);
LookupKey key;
const AddressData address{
.region_code = "CA",
.administrative_area = "Nouveau-Brunswick",
.language_code = "fr",
};
key.FromAddress(address);
supplier_.Supply(key, *supplied_callback_);
ASSERT_TRUE(hierarchy_.rule[0] != nullptr);
EXPECT_EQ(key.ToKeyString(0), hierarchy_.rule[0]->GetId());
ASSERT_TRUE(hierarchy_.rule[1] != nullptr);
EXPECT_EQ("data/CA/NB--fr", hierarchy_.rule[1]->GetId());
EXPECT_TRUE(hierarchy_.rule[2] == nullptr);
EXPECT_TRUE(hierarchy_.rule[3] == nullptr);
}
TEST_F(PreloadSupplierTest, SupplyGloballyRegionNameLanguage) {
supplier_.LoadRules("CA", *loaded_callback_);
LookupKey key;
const AddressData address{
.region_code = "CA",
.administrative_area = "Nouveau-Brunswick",
};
key.FromAddress(address);
supplier_.SupplyGlobally(key, *supplied_callback_);
ASSERT_TRUE(hierarchy_.rule[0] != nullptr);
EXPECT_EQ(key.ToKeyString(0), hierarchy_.rule[0]->GetId());
ASSERT_TRUE(hierarchy_.rule[1] != nullptr);
EXPECT_EQ("data/CA/NB--fr", hierarchy_.rule[1]->GetId());
EXPECT_TRUE(hierarchy_.rule[2] == nullptr);
EXPECT_TRUE(hierarchy_.rule[3] == nullptr);
}
TEST_F(PreloadSupplierTest, SupplyRegionNameHK) {
supplier_.LoadRules("HK", *loaded_callback_);
LookupKey key;
const AddressData address{
.region_code = "HK",
.administrative_area = "新界",
.locality = "大嶼山石壁",
};
key.FromAddress(address);
supplier_.Supply(key, *supplied_callback_);
ASSERT_TRUE(hierarchy_.rule[0] != nullptr);
EXPECT_EQ(key.ToKeyString(0), hierarchy_.rule[0]->GetId());
ASSERT_TRUE(hierarchy_.rule[1] != nullptr);
EXPECT_EQ("data/HK/新界", hierarchy_.rule[1]->GetId());
ASSERT_TRUE(hierarchy_.rule[2] != nullptr);
EXPECT_EQ("data/HK/新界/大嶼山石壁", hierarchy_.rule[2]->GetId());
EXPECT_TRUE(hierarchy_.rule[3] == nullptr);
}
TEST_F(PreloadSupplierTest, SupplyGloballyRegionNameHKEnglish) {
supplier_.LoadRules("HK", *loaded_callback_);
LookupKey key;
const AddressData address{
.region_code = "HK",
.administrative_area = "New Territories",
.locality = "Tsing Yi",
};
key.FromAddress(address);
supplier_.SupplyGlobally(key, *supplied_callback_);
ASSERT_TRUE(hierarchy_.rule[0] != nullptr);
EXPECT_EQ(key.ToKeyString(0), hierarchy_.rule[0]->GetId());
ASSERT_TRUE(hierarchy_.rule[1] != nullptr);
EXPECT_EQ("data/HK/New Territories--en", hierarchy_.rule[1]->GetId());
ASSERT_TRUE(hierarchy_.rule[2] != nullptr);
EXPECT_EQ("data/HK/New Territories/Tsing Yi--en",
hierarchy_.rule[2]->GetId());
EXPECT_TRUE(hierarchy_.rule[3] == nullptr);
}
TEST_F(PreloadSupplierTest, SupplyRegionNameAllLevels) {
supplier_.LoadRules("CN", *loaded_callback_);
LookupKey key;
const AddressData address{
.region_code = "CN",
.administrative_area = "云南省",
.locality = "临沧市",
.dependent_locality = "临翔区",
};
key.FromAddress(address);
supplier_.Supply(key, *supplied_callback_);
ASSERT_TRUE(hierarchy_.rule[0] != nullptr);
EXPECT_EQ(key.ToKeyString(0), hierarchy_.rule[0]->GetId());
ASSERT_TRUE(hierarchy_.rule[1] != nullptr);
EXPECT_EQ("data/CN/云南省", hierarchy_.rule[1]->GetId());
ASSERT_TRUE(hierarchy_.rule[2] != nullptr);
EXPECT_EQ("data/CN/云南省/临沧市", hierarchy_.rule[2]->GetId());
ASSERT_TRUE(hierarchy_.rule[3] != nullptr);
EXPECT_EQ("data/CN/云南省/临沧市/临翔区", hierarchy_.rule[3]->GetId());
}
TEST_F(PreloadSupplierTest, GetLoadedRuleDepth) {
supplier_.LoadRules("CA", *loaded_callback_);
EXPECT_EQ(2,
supplier_.GetLoadedRuleDepth("data/CA"));
EXPECT_EQ(0, supplier_.GetLoadedRuleDepth(
"data/CN"));
supplier_.LoadRules("CN", *loaded_callback_);
EXPECT_EQ(4,
supplier_.GetLoadedRuleDepth(
"data/CN"));
EXPECT_EQ(
0, supplier_.GetLoadedRuleDepth("data/PP"));
}
} | https://github.com/google/libaddressinput/blob/2610f7b1043d6784ada41392fc9392d1ea09ea07/cpp/src/preload_supplier.cc | https://github.com/google/libaddressinput/blob/2610f7b1043d6784ada41392fc9392d1ea09ea07/cpp/test/preload_supplier_test.cc | 2610f7b1043d6784ada41392fc9392d1ea09ea07 |
d79cc799-2095-4818-baf9-bbf5f8ee9e8a | cpp | google/quiche | oblivious_http_header_key_config | quiche/oblivious_http/common/oblivious_http_header_key_config.cc | quiche/oblivious_http/common/oblivious_http_header_key_config_test.cc | #include "quiche/oblivious_http/common/oblivious_http_header_key_config.h"
#include <algorithm>
#include <cstdint>
#include <functional>
#include <string>
#include <utility>
#include <vector>
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "absl/strings/escaping.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "openssl/base.h"
#include "openssl/hpke.h"
#include "quiche/common/platform/api/quiche_bug_tracker.h"
#include "quiche/common/platform/api/quiche_logging.h"
#include "quiche/common/quiche_data_writer.h"
#include "quiche/common/quiche_endian.h"
namespace quiche {
namespace {
constexpr size_t kSizeOfHpkeKemId = 2;
constexpr size_t kSizeOfSymmetricAlgorithmHpkeKdfId = 2;
constexpr size_t kSizeOfSymmetricAlgorithmHpkeAeadId = 2;
absl::StatusOr<const EVP_HPKE_KEM*> CheckKemId(uint16_t kem_id) {
switch (kem_id) {
case EVP_HPKE_DHKEM_X25519_HKDF_SHA256:
return EVP_hpke_x25519_hkdf_sha256();
default:
return absl::UnimplementedError("No support for this KEM ID.");
}
}
absl::StatusOr<const EVP_HPKE_KDF*> CheckKdfId(uint16_t kdf_id) {
switch (kdf_id) {
case EVP_HPKE_HKDF_SHA256:
return EVP_hpke_hkdf_sha256();
default:
return absl::UnimplementedError("No support for this KDF ID.");
}
}
absl::StatusOr<const EVP_HPKE_AEAD*> CheckAeadId(uint16_t aead_id) {
switch (aead_id) {
case EVP_HPKE_AES_128_GCM:
return EVP_hpke_aes_128_gcm();
case EVP_HPKE_AES_256_GCM:
return EVP_hpke_aes_256_gcm();
case EVP_HPKE_CHACHA20_POLY1305:
return EVP_hpke_chacha20_poly1305();
default:
return absl::UnimplementedError("No support for this AEAD ID.");
}
}
}
ObliviousHttpHeaderKeyConfig::ObliviousHttpHeaderKeyConfig(uint8_t key_id,
uint16_t kem_id,
uint16_t kdf_id,
uint16_t aead_id)
: key_id_(key_id), kem_id_(kem_id), kdf_id_(kdf_id), aead_id_(aead_id) {}
absl::StatusOr<ObliviousHttpHeaderKeyConfig>
ObliviousHttpHeaderKeyConfig::Create(uint8_t key_id, uint16_t kem_id,
uint16_t kdf_id, uint16_t aead_id) {
ObliviousHttpHeaderKeyConfig instance(key_id, kem_id, kdf_id, aead_id);
auto is_config_ok = instance.ValidateKeyConfig();
if (!is_config_ok.ok()) {
return is_config_ok;
}
return instance;
}
absl::Status ObliviousHttpHeaderKeyConfig::ValidateKeyConfig() const {
auto supported_kem = CheckKemId(kem_id_);
if (!supported_kem.ok()) {
return absl::InvalidArgumentError(
absl::StrCat("Unsupported KEM ID:", kem_id_));
}
auto supported_kdf = CheckKdfId(kdf_id_);
if (!supported_kdf.ok()) {
return absl::InvalidArgumentError(
absl::StrCat("Unsupported KDF ID:", kdf_id_));
}
auto supported_aead = CheckAeadId(aead_id_);
if (!supported_aead.ok()) {
return absl::InvalidArgumentError(
absl::StrCat("Unsupported AEAD ID:", aead_id_));
}
return absl::OkStatus();
}
const EVP_HPKE_KEM* ObliviousHttpHeaderKeyConfig::GetHpkeKem() const {
auto kem = CheckKemId(kem_id_);
QUICHE_CHECK_OK(kem.status());
return kem.value();
}
const EVP_HPKE_KDF* ObliviousHttpHeaderKeyConfig::GetHpkeKdf() const {
auto kdf = CheckKdfId(kdf_id_);
QUICHE_CHECK_OK(kdf.status());
return kdf.value();
}
const EVP_HPKE_AEAD* ObliviousHttpHeaderKeyConfig::GetHpkeAead() const {
auto aead = CheckAeadId(aead_id_);
QUICHE_CHECK_OK(aead.status());
return aead.value();
}
std::string ObliviousHttpHeaderKeyConfig::SerializeRecipientContextInfo(
absl::string_view request_label) const {
uint8_t zero_byte = 0x00;
int buf_len = request_label.size() + kHeaderLength + sizeof(zero_byte);
std::string info(buf_len, '\0');
QuicheDataWriter writer(info.size(), info.data());
QUICHE_CHECK(writer.WriteStringPiece(request_label));
QUICHE_CHECK(writer.WriteUInt8(zero_byte));
QUICHE_CHECK(writer.WriteUInt8(key_id_));
QUICHE_CHECK(writer.WriteUInt16(kem_id_));
QUICHE_CHECK(writer.WriteUInt16(kdf_id_));
QUICHE_CHECK(writer.WriteUInt16(aead_id_));
return info;
}
absl::Status ObliviousHttpHeaderKeyConfig::ParseOhttpPayloadHeader(
absl::string_view payload_bytes) const {
if (payload_bytes.empty()) {
return absl::InvalidArgumentError("Empty request payload.");
}
QuicheDataReader reader(payload_bytes);
return ParseOhttpPayloadHeader(reader);
}
absl::Status ObliviousHttpHeaderKeyConfig::ParseOhttpPayloadHeader(
QuicheDataReader& reader) const {
uint8_t key_id;
if (!reader.ReadUInt8(&key_id)) {
return absl::InvalidArgumentError("Failed to read key_id from header.");
}
if (key_id != key_id_) {
return absl::InvalidArgumentError(
absl::StrCat("KeyID in request:", static_cast<uint16_t>(key_id),
" doesn't match with server's public key "
"configuration KeyID:",
static_cast<uint16_t>(key_id_)));
}
uint16_t kem_id;
if (!reader.ReadUInt16(&kem_id)) {
return absl::InvalidArgumentError("Failed to read kem_id from header.");
}
if (kem_id != kem_id_) {
return absl::InvalidArgumentError(
absl::StrCat("Received Invalid kemID:", kem_id, " Expected:", kem_id_));
}
uint16_t kdf_id;
if (!reader.ReadUInt16(&kdf_id)) {
return absl::InvalidArgumentError("Failed to read kdf_id from header.");
}
if (kdf_id != kdf_id_) {
return absl::InvalidArgumentError(
absl::StrCat("Received Invalid kdfID:", kdf_id, " Expected:", kdf_id_));
}
uint16_t aead_id;
if (!reader.ReadUInt16(&aead_id)) {
return absl::InvalidArgumentError("Failed to read aead_id from header.");
}
if (aead_id != aead_id_) {
return absl::InvalidArgumentError(absl::StrCat(
"Received Invalid aeadID:", aead_id, " Expected:", aead_id_));
}
return absl::OkStatus();
}
absl::StatusOr<uint8_t>
ObliviousHttpHeaderKeyConfig::ParseKeyIdFromObliviousHttpRequestPayload(
absl::string_view payload_bytes) {
if (payload_bytes.empty()) {
return absl::InvalidArgumentError("Empty request payload.");
}
QuicheDataReader reader(payload_bytes);
uint8_t key_id;
if (!reader.ReadUInt8(&key_id)) {
return absl::InvalidArgumentError("Failed to read key_id from payload.");
}
return key_id;
}
std::string ObliviousHttpHeaderKeyConfig::SerializeOhttpPayloadHeader() const {
int buf_len =
sizeof(key_id_) + sizeof(kem_id_) + sizeof(kdf_id_) + sizeof(aead_id_);
std::string hdr(buf_len, '\0');
QuicheDataWriter writer(hdr.size(), hdr.data());
QUICHE_CHECK(writer.WriteUInt8(key_id_));
QUICHE_CHECK(writer.WriteUInt16(kem_id_));
QUICHE_CHECK(writer.WriteUInt16(kdf_id_));
QUICHE_CHECK(writer.WriteUInt16(aead_id_));
return hdr;
}
namespace {
absl::StatusOr<uint16_t> KeyLength(uint16_t kem_id) {
auto supported_kem = CheckKemId(kem_id);
if (!supported_kem.ok()) {
return absl::InvalidArgumentError(absl::StrCat(
"Unsupported KEM ID:", kem_id, ". public key length is unknown."));
}
return EVP_HPKE_KEM_public_key_len(supported_kem.value());
}
absl::StatusOr<std::string> SerializeOhttpKeyWithPublicKey(
uint8_t key_id, absl::string_view public_key,
const std::vector<ObliviousHttpHeaderKeyConfig>& ohttp_configs) {
auto ohttp_config = ohttp_configs[0];
static_assert(sizeof(ohttp_config.GetHpkeKemId()) == kSizeOfHpkeKemId &&
sizeof(ohttp_config.GetHpkeKdfId()) ==
kSizeOfSymmetricAlgorithmHpkeKdfId &&
sizeof(ohttp_config.GetHpkeAeadId()) ==
kSizeOfSymmetricAlgorithmHpkeAeadId,
"Size of HPKE IDs should match RFC specification.");
uint16_t symmetric_algs_length =
ohttp_configs.size() * (kSizeOfSymmetricAlgorithmHpkeKdfId +
kSizeOfSymmetricAlgorithmHpkeAeadId);
int buf_len = sizeof(key_id) + kSizeOfHpkeKemId + public_key.size() +
sizeof(symmetric_algs_length) + symmetric_algs_length;
std::string ohttp_key_configuration(buf_len, '\0');
QuicheDataWriter writer(ohttp_key_configuration.size(),
ohttp_key_configuration.data());
if (!writer.WriteUInt8(key_id)) {
return absl::InternalError("Failed to serialize OHTTP key.[key_id]");
}
if (!writer.WriteUInt16(ohttp_config.GetHpkeKemId())) {
return absl::InternalError(
"Failed to serialize OHTTP key.[kem_id]");
}
if (!writer.WriteStringPiece(public_key)) {
return absl::InternalError(
"Failed to serialize OHTTP key.[public_key]");
}
if (!writer.WriteUInt16(symmetric_algs_length)) {
return absl::InternalError(
"Failed to serialize OHTTP key.[symmetric_algs_length]");
}
for (const auto& item : ohttp_configs) {
if (item.GetHpkeKemId() != ohttp_config.GetHpkeKemId()) {
QUICHE_BUG(ohttp_key_configs_builder_parser)
<< "ObliviousHttpKeyConfigs object cannot hold ConfigMap of "
"different KEM IDs:[ "
<< item.GetHpkeKemId() << "," << ohttp_config.GetHpkeKemId()
<< " ]for a given key_id:" << static_cast<uint16_t>(key_id);
}
if (!writer.WriteUInt16(item.GetHpkeKdfId())) {
return absl::InternalError(
"Failed to serialize OHTTP key.[kdf_id]");
}
if (!writer.WriteUInt16(item.GetHpkeAeadId())) {
return absl::InternalError(
"Failed to serialize OHTTP key.[aead_id]");
}
}
QUICHE_DCHECK_EQ(writer.remaining(), 0u);
return ohttp_key_configuration;
}
std::string GetDebugStringForFailedKeyConfig(
const ObliviousHttpKeyConfigs::OhttpKeyConfig& failed_key_config) {
std::string debug_string = "[ ";
absl::StrAppend(&debug_string,
"key_id:", static_cast<uint16_t>(failed_key_config.key_id),
" , kem_id:", failed_key_config.kem_id,
". Printing HEX formatted public_key:",
absl::BytesToHexString(failed_key_config.public_key));
absl::StrAppend(&debug_string, ", symmetric_algorithms: { ");
for (const auto& symmetric_config : failed_key_config.symmetric_algorithms) {
absl::StrAppend(&debug_string, "{kdf_id: ", symmetric_config.kdf_id,
", aead_id:", symmetric_config.aead_id, " }");
}
absl::StrAppend(&debug_string, " } ]");
return debug_string;
}
absl::Status StoreKeyConfigIfValid(
ObliviousHttpKeyConfigs::OhttpKeyConfig key_config,
absl::btree_map<uint8_t, std::vector<ObliviousHttpHeaderKeyConfig>,
std::greater<uint8_t>>& configs,
absl::flat_hash_map<uint8_t, std::string>& keys) {
if (!CheckKemId(key_config.kem_id).ok() ||
key_config.public_key.size() != KeyLength(key_config.kem_id).value()) {
QUICHE_LOG(ERROR) << "Failed to process: "
<< GetDebugStringForFailedKeyConfig(key_config);
return absl::InvalidArgumentError(
absl::StrCat("Invalid key_config! [KEM ID:", key_config.kem_id, "]"));
}
for (const auto& symmetric_config : key_config.symmetric_algorithms) {
if (!CheckKdfId(symmetric_config.kdf_id).ok() ||
!CheckAeadId(symmetric_config.aead_id).ok()) {
QUICHE_LOG(ERROR) << "Failed to process: "
<< GetDebugStringForFailedKeyConfig(key_config);
return absl::InvalidArgumentError(
absl::StrCat("Invalid key_config! [KDF ID:", symmetric_config.kdf_id,
", AEAD ID:", symmetric_config.aead_id, "]"));
}
auto ohttp_config = ObliviousHttpHeaderKeyConfig::Create(
key_config.key_id, key_config.kem_id, symmetric_config.kdf_id,
symmetric_config.aead_id);
if (ohttp_config.ok()) {
configs[key_config.key_id].emplace_back(std::move(ohttp_config.value()));
}
}
keys.emplace(key_config.key_id, std::move(key_config.public_key));
return absl::OkStatus();
}
}
absl::StatusOr<ObliviousHttpKeyConfigs>
ObliviousHttpKeyConfigs::ParseConcatenatedKeys(absl::string_view key_config) {
ConfigMap configs;
PublicKeyMap keys;
auto reader = QuicheDataReader(key_config);
while (!reader.IsDoneReading()) {
absl::Status status = ReadSingleKeyConfig(reader, configs, keys);
if (!status.ok()) return status;
}
return ObliviousHttpKeyConfigs(std::move(configs), std::move(keys));
}
absl::StatusOr<ObliviousHttpKeyConfigs> ObliviousHttpKeyConfigs::Create(
absl::flat_hash_set<ObliviousHttpKeyConfigs::OhttpKeyConfig>
ohttp_key_configs) {
if (ohttp_key_configs.empty()) {
return absl::InvalidArgumentError("Empty input.");
}
ConfigMap configs_map;
PublicKeyMap keys_map;
for (auto& ohttp_key_config : ohttp_key_configs) {
auto result = StoreKeyConfigIfValid(std::move(ohttp_key_config),
configs_map, keys_map);
if (!result.ok()) {
return result;
}
}
auto oblivious_configs =
ObliviousHttpKeyConfigs(std::move(configs_map), std::move(keys_map));
return oblivious_configs;
}
absl::StatusOr<ObliviousHttpKeyConfigs> ObliviousHttpKeyConfigs::Create(
const ObliviousHttpHeaderKeyConfig& single_key_config,
absl::string_view public_key) {
if (public_key.empty()) {
return absl::InvalidArgumentError("Empty input.");
}
if (auto key_length = KeyLength(single_key_config.GetHpkeKemId());
public_key.size() != key_length.value()) {
return absl::InvalidArgumentError(absl::StrCat(
"Invalid key. Key size mismatch. Expected:", key_length.value(),
" Actual:", public_key.size()));
}
ConfigMap configs;
PublicKeyMap keys;
uint8_t key_id = single_key_config.GetKeyId();
keys.emplace(key_id, public_key);
configs[key_id].emplace_back(std::move(single_key_config));
return ObliviousHttpKeyConfigs(std::move(configs), std::move(keys));
}
absl::StatusOr<std::string> ObliviousHttpKeyConfigs::GenerateConcatenatedKeys()
const {
std::string concatenated_keys;
for (const auto& [key_id, ohttp_configs] : configs_) {
auto key = public_keys_.find(key_id);
if (key == public_keys_.end()) {
return absl::InternalError(
"Failed to serialize. No public key found for key_id");
}
auto serialized =
SerializeOhttpKeyWithPublicKey(key_id, key->second, ohttp_configs);
if (!serialized.ok()) {
return absl::InternalError("Failed to serialize OHTTP key configs.");
}
absl::StrAppend(&concatenated_keys, serialized.value());
}
return concatenated_keys;
}
ObliviousHttpHeaderKeyConfig ObliviousHttpKeyConfigs::PreferredConfig() const {
return configs_.begin()->second.front();
}
absl::StatusOr<absl::string_view> ObliviousHttpKeyConfigs::GetPublicKeyForId(
uint8_t key_id) const {
auto key = public_keys_.find(key_id);
if (key == public_keys_.end()) {
return absl::NotFoundError("No public key found for key_id");
}
return key->second;
}
absl::Status ObliviousHttpKeyConfigs::ReadSingleKeyConfig(
QuicheDataReader& reader, ConfigMap& configs, PublicKeyMap& keys) {
uint8_t key_id;
uint16_t kem_id;
if (!reader.ReadUInt8(&key_id) || !reader.ReadUInt16(&kem_id)) {
return absl::InvalidArgumentError("Invalid key_config!");
}
auto maybe_key_length = KeyLength(kem_id);
if (!maybe_key_length.ok()) {
return maybe_key_length.status();
}
const int key_length = maybe_key_length.value();
std::string key_str(key_length, '\0');
if (!reader.ReadBytes(key_str.data(), key_length)) {
return absl::InvalidArgumentError("Invalid key_config!");
}
if (!keys.insert({key_id, std::move(key_str)}).second) {
return absl::InvalidArgumentError("Duplicate key_id's in key_config!");
}
absl::string_view alg_bytes;
if (!reader.ReadStringPiece16(&alg_bytes)) {
return absl::InvalidArgumentError("Invalid key_config!");
}
QuicheDataReader sub_reader(alg_bytes);
while (!sub_reader.IsDoneReading()) {
uint16_t kdf_id;
uint16_t aead_id;
if (!sub_reader.ReadUInt16(&kdf_id) || !sub_reader.ReadUInt16(&aead_id)) {
return absl::InvalidArgumentError("Invalid key_config!");
}
absl::StatusOr<ObliviousHttpHeaderKeyConfig> maybe_cfg =
ObliviousHttpHeaderKeyConfig::Create(key_id, kem_id, kdf_id, aead_id);
if (!maybe_cfg.ok()) {
return maybe_cfg.status();
}
configs[key_id].emplace_back(std::move(maybe_cfg.value()));
}
return absl::OkStatus();
}
} | #include "quiche/oblivious_http/common/oblivious_http_header_key_config.h"
#include <cstdint>
#include <string>
#include "absl/strings/escaping.h"
#include "absl/strings/str_cat.h"
#include "openssl/hpke.h"
#include "quiche/common/platform/api/quiche_logging.h"
#include "quiche/common/platform/api/quiche_test.h"
#include "quiche/common/quiche_data_writer.h"
namespace quiche {
namespace {
using ::testing::AllOf;
using ::testing::Property;
using ::testing::StrEq;
using ::testing::UnorderedElementsAre;
using ::testing::UnorderedElementsAreArray;
std::string BuildHeader(uint8_t key_id, uint16_t kem_id, uint16_t kdf_id,
uint16_t aead_id) {
int buf_len =
sizeof(key_id) + sizeof(kem_id) + sizeof(kdf_id) + sizeof(aead_id);
std::string hdr(buf_len, '\0');
QuicheDataWriter writer(hdr.size(), hdr.data());
EXPECT_TRUE(writer.WriteUInt8(key_id));
EXPECT_TRUE(writer.WriteUInt16(kem_id));
EXPECT_TRUE(writer.WriteUInt16(kdf_id));
EXPECT_TRUE(writer.WriteUInt16(aead_id));
return hdr;
}
std::string GetSerializedKeyConfig(
ObliviousHttpKeyConfigs::OhttpKeyConfig& key_config) {
uint16_t symmetric_algs_length =
key_config.symmetric_algorithms.size() *
(sizeof(key_config.symmetric_algorithms.cbegin()->kdf_id) +
sizeof(key_config.symmetric_algorithms.cbegin()->aead_id));
int buf_len = sizeof(key_config.key_id) + sizeof(key_config.kem_id) +
key_config.public_key.size() + sizeof(symmetric_algs_length) +
symmetric_algs_length;
std::string ohttp_key(buf_len, '\0');
QuicheDataWriter writer(ohttp_key.size(), ohttp_key.data());
EXPECT_TRUE(writer.WriteUInt8(key_config.key_id));
EXPECT_TRUE(writer.WriteUInt16(key_config.kem_id));
EXPECT_TRUE(writer.WriteStringPiece(key_config.public_key));
EXPECT_TRUE(writer.WriteUInt16(symmetric_algs_length));
for (const auto& symmetric_alg : key_config.symmetric_algorithms) {
EXPECT_TRUE(writer.WriteUInt16(symmetric_alg.kdf_id));
EXPECT_TRUE(writer.WriteUInt16(symmetric_alg.aead_id));
}
return ohttp_key;
}
TEST(ObliviousHttpHeaderKeyConfig, TestSerializeRecipientContextInfo) {
uint8_t key_id = 3;
uint16_t kem_id = EVP_HPKE_DHKEM_X25519_HKDF_SHA256;
uint16_t kdf_id = EVP_HPKE_HKDF_SHA256;
uint16_t aead_id = EVP_HPKE_AES_256_GCM;
absl::string_view ohttp_req_label = "message/bhttp request";
std::string expected(ohttp_req_label);
uint8_t zero_byte = 0x00;
int buf_len = ohttp_req_label.size() + sizeof(zero_byte) + sizeof(key_id) +
sizeof(kem_id) + sizeof(kdf_id) + sizeof(aead_id);
expected.reserve(buf_len);
expected.push_back(zero_byte);
std::string ohttp_cfg(BuildHeader(key_id, kem_id, kdf_id, aead_id));
expected.insert(expected.end(), ohttp_cfg.begin(), ohttp_cfg.end());
auto instance =
ObliviousHttpHeaderKeyConfig::Create(key_id, kem_id, kdf_id, aead_id);
ASSERT_TRUE(instance.ok());
EXPECT_EQ(instance.value().SerializeRecipientContextInfo(), expected);
}
TEST(ObliviousHttpHeaderKeyConfig, TestValidKeyConfig) {
auto valid_key_config = ObliviousHttpHeaderKeyConfig::Create(
2, EVP_HPKE_DHKEM_X25519_HKDF_SHA256, EVP_HPKE_HKDF_SHA256,
EVP_HPKE_AES_256_GCM);
ASSERT_TRUE(valid_key_config.ok());
}
TEST(ObliviousHttpHeaderKeyConfig, TestInvalidKeyConfig) {
auto invalid_kem = ObliviousHttpHeaderKeyConfig::Create(
3, 0, EVP_HPKE_HKDF_SHA256, EVP_HPKE_AES_256_GCM);
EXPECT_EQ(invalid_kem.status().code(), absl::StatusCode::kInvalidArgument);
auto invalid_kdf = ObliviousHttpHeaderKeyConfig::Create(
3, EVP_HPKE_DHKEM_X25519_HKDF_SHA256, 0, EVP_HPKE_AES_256_GCM);
EXPECT_EQ(invalid_kdf.status().code(), absl::StatusCode::kInvalidArgument);
auto invalid_aead = ObliviousHttpHeaderKeyConfig::Create(
3, EVP_HPKE_DHKEM_X25519_HKDF_SHA256, EVP_HPKE_HKDF_SHA256, 0);
EXPECT_EQ(invalid_kdf.status().code(), absl::StatusCode::kInvalidArgument);
}
TEST(ObliviousHttpHeaderKeyConfig, TestParsingValidHeader) {
auto instance = ObliviousHttpHeaderKeyConfig::Create(
5, EVP_HPKE_DHKEM_X25519_HKDF_SHA256, EVP_HPKE_HKDF_SHA256,
EVP_HPKE_AES_256_GCM);
ASSERT_TRUE(instance.ok());
std::string good_hdr(BuildHeader(5, EVP_HPKE_DHKEM_X25519_HKDF_SHA256,
EVP_HPKE_HKDF_SHA256, EVP_HPKE_AES_256_GCM));
ASSERT_TRUE(instance.value().ParseOhttpPayloadHeader(good_hdr).ok());
}
TEST(ObliviousHttpHeaderKeyConfig, TestParsingInvalidHeader) {
auto instance = ObliviousHttpHeaderKeyConfig::Create(
8, EVP_HPKE_DHKEM_X25519_HKDF_SHA256, EVP_HPKE_HKDF_SHA256,
EVP_HPKE_AES_256_GCM);
ASSERT_TRUE(instance.ok());
std::string keyid_mismatch_hdr(
BuildHeader(0, EVP_HPKE_DHKEM_X25519_HKDF_SHA256, EVP_HPKE_HKDF_SHA256,
EVP_HPKE_AES_256_GCM));
EXPECT_EQ(instance.value().ParseOhttpPayloadHeader(keyid_mismatch_hdr).code(),
absl::StatusCode::kInvalidArgument);
std::string invalid_hpke_hdr(BuildHeader(8, 0, 0, 0));
EXPECT_EQ(instance.value().ParseOhttpPayloadHeader(invalid_hpke_hdr).code(),
absl::StatusCode::kInvalidArgument);
}
TEST(ObliviousHttpHeaderKeyConfig, TestParsingKeyIdFromObliviousHttpRequest) {
std::string key_id(sizeof(uint8_t), '\0');
QuicheDataWriter writer(key_id.size(), key_id.data());
EXPECT_TRUE(writer.WriteUInt8(99));
auto parsed_key_id =
ObliviousHttpHeaderKeyConfig::ParseKeyIdFromObliviousHttpRequestPayload(
key_id);
ASSERT_TRUE(parsed_key_id.ok());
EXPECT_EQ(parsed_key_id.value(), 99);
}
TEST(ObliviousHttpHeaderKeyConfig, TestCopyable) {
auto obj1 = ObliviousHttpHeaderKeyConfig::Create(
4, EVP_HPKE_DHKEM_X25519_HKDF_SHA256, EVP_HPKE_HKDF_SHA256,
EVP_HPKE_AES_256_GCM);
ASSERT_TRUE(obj1.ok());
auto copy_obj1_to_obj2 = obj1.value();
EXPECT_EQ(copy_obj1_to_obj2.kHeaderLength, obj1->kHeaderLength);
EXPECT_EQ(copy_obj1_to_obj2.SerializeRecipientContextInfo(),
obj1->SerializeRecipientContextInfo());
}
TEST(ObliviousHttpHeaderKeyConfig, TestSerializeOhttpPayloadHeader) {
auto instance = ObliviousHttpHeaderKeyConfig::Create(
7, EVP_HPKE_DHKEM_X25519_HKDF_SHA256, EVP_HPKE_HKDF_SHA256,
EVP_HPKE_AES_128_GCM);
ASSERT_TRUE(instance.ok());
EXPECT_EQ(instance->SerializeOhttpPayloadHeader(),
BuildHeader(7, EVP_HPKE_DHKEM_X25519_HKDF_SHA256,
EVP_HPKE_HKDF_SHA256, EVP_HPKE_AES_128_GCM));
}
MATCHER_P(HasKeyId, id, "") {
*result_listener << "has key_id=" << arg.GetKeyId();
return arg.GetKeyId() == id;
}
MATCHER_P(HasKemId, id, "") {
*result_listener << "has kem_id=" << arg.GetHpkeKemId();
return arg.GetHpkeKemId() == id;
}
MATCHER_P(HasKdfId, id, "") {
*result_listener << "has kdf_id=" << arg.GetHpkeKdfId();
return arg.GetHpkeKdfId() == id;
}
MATCHER_P(HasAeadId, id, "") {
*result_listener << "has aead_id=" << arg.GetHpkeAeadId();
return arg.GetHpkeAeadId() == id;
}
TEST(ObliviousHttpKeyConfigs, SingleKeyConfig) {
std::string key;
ASSERT_TRUE(absl::HexStringToBytes(
"4b0020f83e0a17cbdb18d2684dd2a9b087a43e5f3fa3fa27a049bc746a6e97a1e0244b00"
"0400010002",
&key));
auto configs = ObliviousHttpKeyConfigs::ParseConcatenatedKeys(key).value();
EXPECT_THAT(configs, Property(&ObliviousHttpKeyConfigs::NumKeys, 1));
EXPECT_THAT(
configs.PreferredConfig(),
AllOf(HasKeyId(0x4b), HasKemId(EVP_HPKE_DHKEM_X25519_HKDF_SHA256),
HasKdfId(EVP_HPKE_HKDF_SHA256), HasAeadId(EVP_HPKE_AES_256_GCM)));
std::string expected_public_key;
ASSERT_TRUE(absl::HexStringToBytes(
"f83e0a17cbdb18d2684dd2a9b087a43e5f3fa3fa27a049bc746a6e97a1e0244b",
&expected_public_key));
EXPECT_THAT(
configs.GetPublicKeyForId(configs.PreferredConfig().GetKeyId()).value(),
StrEq(expected_public_key));
}
TEST(ObliviousHttpKeyConfigs, TwoSimilarKeyConfigs) {
std::string key;
ASSERT_TRUE(absl::HexStringToBytes(
"4b0020f83e0a17cbdb18d2684dd2a9b087a43e5f3fa3fa27a049bc746a6e97a1e0244b00"
"0400010002"
"4f0020f83e0a17cbdb18d2684dd2a9b087a43e5f3fa3fa27a049bc746a6e97a1e0244b00"
"0400010001",
&key));
EXPECT_THAT(ObliviousHttpKeyConfigs::ParseConcatenatedKeys(key).value(),
Property(&ObliviousHttpKeyConfigs::NumKeys, 2));
EXPECT_THAT(
ObliviousHttpKeyConfigs::ParseConcatenatedKeys(key)->PreferredConfig(),
AllOf(HasKeyId(0x4f), HasKemId(EVP_HPKE_DHKEM_X25519_HKDF_SHA256),
HasKdfId(EVP_HPKE_HKDF_SHA256), HasAeadId(EVP_HPKE_AES_128_GCM)));
}
TEST(ObliviousHttpKeyConfigs, RFCExample) {
std::string key;
ASSERT_TRUE(absl::HexStringToBytes(
"01002031e1f05a740102115220e9af918f738674aec95f54db6e04eb705aae8e79815500"
"080001000100010003",
&key));
auto configs = ObliviousHttpKeyConfigs::ParseConcatenatedKeys(key).value();
EXPECT_THAT(configs, Property(&ObliviousHttpKeyConfigs::NumKeys, 1));
EXPECT_THAT(
configs.PreferredConfig(),
AllOf(HasKeyId(0x01), HasKemId(EVP_HPKE_DHKEM_X25519_HKDF_SHA256),
HasKdfId(EVP_HPKE_HKDF_SHA256), HasAeadId(EVP_HPKE_AES_128_GCM)));
std::string expected_public_key;
ASSERT_TRUE(absl::HexStringToBytes(
"31e1f05a740102115220e9af918f738674aec95f54db6e04eb705aae8e798155",
&expected_public_key));
EXPECT_THAT(
configs.GetPublicKeyForId(configs.PreferredConfig().GetKeyId()).value(),
StrEq(expected_public_key));
}
TEST(ObliviousHttpKeyConfigs, DuplicateKeyId) {
std::string key;
ASSERT_TRUE(absl::HexStringToBytes(
"4b0020f83e0a17cbdb18d2684dd2a9b087a43e5f3fa3fa27a049bc746a6e97a1e0244b00"
"0400010002"
"4b0020f83e0a17cbdb18d2684dd2a9b087a43e5f3fa3fb27a049bc746a6e97a1e0244b00"
"0400010001",
&key));
EXPECT_FALSE(ObliviousHttpKeyConfigs::ParseConcatenatedKeys(key).ok());
}
TEST(ObliviousHttpHeaderKeyConfigs, TestCreateWithSingleKeyConfig) {
auto instance = ObliviousHttpHeaderKeyConfig::Create(
123, EVP_HPKE_DHKEM_X25519_HKDF_SHA256, EVP_HPKE_HKDF_SHA256,
EVP_HPKE_CHACHA20_POLY1305);
EXPECT_TRUE(instance.ok());
std::string test_public_key(
EVP_HPKE_KEM_public_key_len(instance->GetHpkeKem()), 'a');
auto configs =
ObliviousHttpKeyConfigs::Create(instance.value(), test_public_key);
EXPECT_TRUE(configs.ok());
auto serialized_key = configs->GenerateConcatenatedKeys();
EXPECT_TRUE(serialized_key.ok());
auto ohttp_configs =
ObliviousHttpKeyConfigs::ParseConcatenatedKeys(serialized_key.value());
EXPECT_TRUE(ohttp_configs.ok());
ASSERT_EQ(ohttp_configs->PreferredConfig().GetKeyId(), 123);
auto parsed_public_key = ohttp_configs->GetPublicKeyForId(123);
EXPECT_TRUE(parsed_public_key.ok());
EXPECT_EQ(parsed_public_key.value(), test_public_key);
}
TEST(ObliviousHttpHeaderKeyConfigs, TestCreateWithWithMultipleKeys) {
std::string expected_preferred_public_key(32, 'b');
ObliviousHttpKeyConfigs::OhttpKeyConfig config1 = {
100,
EVP_HPKE_DHKEM_X25519_HKDF_SHA256,
std::string(32, 'a'),
{{EVP_HPKE_HKDF_SHA256, EVP_HPKE_AES_256_GCM}}};
ObliviousHttpKeyConfigs::OhttpKeyConfig config2 = {
200,
EVP_HPKE_DHKEM_X25519_HKDF_SHA256,
expected_preferred_public_key,
{{EVP_HPKE_HKDF_SHA256, EVP_HPKE_CHACHA20_POLY1305}}};
auto configs = ObliviousHttpKeyConfigs::Create({config1, config2});
EXPECT_TRUE(configs.ok());
auto serialized_key = configs->GenerateConcatenatedKeys();
EXPECT_TRUE(serialized_key.ok());
ASSERT_EQ(serialized_key.value(),
absl::StrCat(GetSerializedKeyConfig(config2),
GetSerializedKeyConfig(config1)));
auto ohttp_configs =
ObliviousHttpKeyConfigs::ParseConcatenatedKeys(serialized_key.value());
EXPECT_TRUE(ohttp_configs.ok());
ASSERT_EQ(ohttp_configs->NumKeys(), 2);
EXPECT_THAT(configs->PreferredConfig(),
AllOf(HasKeyId(200), HasKemId(EVP_HPKE_DHKEM_X25519_HKDF_SHA256),
HasKdfId(EVP_HPKE_HKDF_SHA256),
HasAeadId(EVP_HPKE_CHACHA20_POLY1305)));
auto parsed_preferred_public_key = ohttp_configs->GetPublicKeyForId(
ohttp_configs->PreferredConfig().GetKeyId());
EXPECT_TRUE(parsed_preferred_public_key.ok());
EXPECT_EQ(parsed_preferred_public_key.value(), expected_preferred_public_key);
}
TEST(ObliviousHttpHeaderKeyConfigs, TestCreateWithInvalidConfigs) {
ASSERT_EQ(ObliviousHttpKeyConfigs::Create({}).status().code(),
absl::StatusCode::kInvalidArgument);
ASSERT_EQ(ObliviousHttpKeyConfigs::Create(
{{100, 2, std::string(32, 'a'), {{2, 3}, {4, 5}}},
{200, 6, std::string(32, 'b'), {{7, 8}, {9, 10}}}})
.status()
.code(),
absl::StatusCode::kInvalidArgument);
EXPECT_EQ(
ObliviousHttpKeyConfigs::Create(
{{123,
EVP_HPKE_DHKEM_X25519_HKDF_SHA256,
"invalid key length" ,
{{EVP_HPKE_HKDF_SHA256, EVP_HPKE_AES_128_GCM}}}})
.status()
.code(),
absl::StatusCode::kInvalidArgument);
}
TEST(ObliviousHttpHeaderKeyConfigs,
TestCreateSingleKeyConfigWithInvalidConfig) {
const auto sample_ohttp_hdr_config = ObliviousHttpHeaderKeyConfig::Create(
123, EVP_HPKE_DHKEM_X25519_HKDF_SHA256, EVP_HPKE_HKDF_SHA256,
EVP_HPKE_AES_128_GCM);
ASSERT_TRUE(sample_ohttp_hdr_config.ok());
ASSERT_EQ(ObliviousHttpKeyConfigs::Create(sample_ohttp_hdr_config.value(),
"" )
.status()
.code(),
absl::StatusCode::kInvalidArgument);
EXPECT_EQ(ObliviousHttpKeyConfigs::Create(
sample_ohttp_hdr_config.value(),
"invalid key length" )
.status()
.code(),
absl::StatusCode::kInvalidArgument);
}
TEST(ObliviousHttpHeaderKeyConfigs, TestHashImplWithObliviousStruct) {
absl::flat_hash_set<ObliviousHttpKeyConfigs::SymmetricAlgorithmsConfig>
symmetric_algs_set;
for (int i = 0; i < 50; ++i) {
symmetric_algs_set.insert({EVP_HPKE_HKDF_SHA256, EVP_HPKE_AES_128_GCM});
symmetric_algs_set.insert({EVP_HPKE_HKDF_SHA256, EVP_HPKE_AES_256_GCM});
symmetric_algs_set.insert(
{EVP_HPKE_HKDF_SHA256, EVP_HPKE_CHACHA20_POLY1305});
}
ASSERT_EQ(symmetric_algs_set.size(), 3);
EXPECT_THAT(symmetric_algs_set,
UnorderedElementsAreArray<
ObliviousHttpKeyConfigs::SymmetricAlgorithmsConfig>({
{EVP_HPKE_HKDF_SHA256, EVP_HPKE_AES_128_GCM},
{EVP_HPKE_HKDF_SHA256, EVP_HPKE_AES_256_GCM},
{EVP_HPKE_HKDF_SHA256, EVP_HPKE_CHACHA20_POLY1305},
}));
absl::flat_hash_set<ObliviousHttpKeyConfigs::OhttpKeyConfig>
ohttp_key_configs_set;
ObliviousHttpKeyConfigs::OhttpKeyConfig expected_key_config{
100,
EVP_HPKE_DHKEM_X25519_HKDF_SHA256,
std::string(32, 'c'),
{{EVP_HPKE_HKDF_SHA256, EVP_HPKE_AES_128_GCM},
{EVP_HPKE_HKDF_SHA256, EVP_HPKE_AES_256_GCM}}};
for (int i = 0; i < 50; ++i) {
ohttp_key_configs_set.insert(expected_key_config);
}
ASSERT_EQ(ohttp_key_configs_set.size(), 1);
EXPECT_THAT(ohttp_key_configs_set, UnorderedElementsAre(expected_key_config));
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/oblivious_http/common/oblivious_http_header_key_config.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/oblivious_http/common/oblivious_http_header_key_config_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
46bc99e4-502f-4dcb-8ff3-7a075b7f1f98 | cpp | tensorflow/tensorflow | elemental_ir_emitter | third_party/xla/xla/service/gpu/elemental_ir_emitter.cc | third_party/xla/xla/service/elemental_ir_emitter_test.cc | #include "xla/service/gpu/elemental_ir_emitter.h"
#include <cstdint>
#include <string>
#include <vector>
#include "absl/log/check.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Type.h"
#include "llvm/Support/ModRef.h"
#include "llvm/TargetParser/Triple.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/layout.h"
#include "xla/service/elemental_ir_emitter.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/ir_emitter_context.h"
#include "xla/service/gpu/ir_emitter_nested.h"
#include "xla/service/gpu/target_util.h"
#include "xla/service/llvm_ir/ir_array.h"
#include "xla/service/llvm_ir/llvm_util.h"
#include "xla/service/llvm_ir/math_ops.h"
#include "xla/stream_executor/device_description.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace gpu {
GpuElementalIrEmitter::GpuElementalIrEmitter(
IrEmitterContext& ir_emitter_context, llvm::IRBuilder<>* b)
: ElementalIrEmitter(ir_emitter_context.llvm_module(), b),
ir_emitter_context_(ir_emitter_context) {}
absl::StatusOr<llvm::Value*> GpuElementalIrEmitter::EmitDeviceMathCall(
TargetDeviceFunctionID funcid, absl::Span<llvm::Value* const> operands,
absl::Span<const PrimitiveType> input_types, PrimitiveType output_type,
absl::string_view name) {
bool cast_result_to_fp16 = false;
std::vector<llvm::Value*> converted_operands(operands.begin(),
operands.end());
std::vector<PrimitiveType> converted_input_types(input_types.begin(),
input_types.end());
switch (output_type) {
case F16:
cast_result_to_fp16 = true;
for (int64_t i = 0; i < operands.size(); ++i) {
if (input_types[i] == F16) {
converted_operands[i] =
FPCast(converted_operands[i], b()->getFloatTy());
converted_input_types[i] = F32;
}
}
output_type = F32;
[[fallthrough]];
case F32:
break;
case F64:
break;
default:
return Unimplemented("Bad type for device math call: %s",
PrimitiveType_Name(output_type));
}
const std::string& munged_callee = ObtainDeviceFunctionName(
funcid, output_type,
llvm::Triple(b()->GetInsertBlock()->getModule()->getTargetTriple()));
llvm::Value* result = EmitMathCall(munged_callee, converted_operands,
converted_input_types, output_type, name)
.value();
if (cast_result_to_fp16) {
result = FPCast(result, b()->getHalfTy());
}
return result;
}
absl::StatusOr<llvm::Value*> GpuElementalIrEmitter::EmitMathCall(
const std::string& callee_name, absl::Span<llvm::Value* const> operands,
absl::Span<const PrimitiveType> input_types, PrimitiveType output_type,
absl::string_view name) {
for (PrimitiveType input_type : input_types) {
if (output_type != input_type) {
return Unimplemented("Input type != output type: %s != %s",
PrimitiveType_Name(input_type),
PrimitiveType_Name(output_type));
}
}
return EmitDeviceFunctionCall(callee_name, operands, input_types, output_type,
llvm::AttrBuilder(b()->getContext())
.addMemoryAttr(llvm::MemoryEffects::none())
.addAttribute(llvm::Attribute::NoUnwind),
b(), name);
}
llvm_ir::IrArray::Index GpuElementalIrEmitter::GetSourceIndexOfBitcast(
const llvm_ir::IrArray::Index& index, const HloInstruction* hlo) {
Shape shape = hlo->shape();
Shape operand_shape = hlo->operand(0)->shape();
auto gpu_config = hlo->backend_config<GpuBackendConfig>();
CHECK_OK(gpu_config);
const BitcastBackendConfig& bitcast_config =
gpu_config.value().bitcast_backend_config();
if (!bitcast_config.result_layout().minor_to_major().empty()) {
*shape.mutable_layout() =
xla::Layout::CreateFromProto(bitcast_config.result_layout());
}
if (!bitcast_config.source_layout().minor_to_major().empty()) {
*operand_shape.mutable_layout() =
xla::Layout::CreateFromProto(bitcast_config.source_layout());
}
return index.SourceIndexOfBitcast(shape, operand_shape, b());
}
absl::StatusOr<llvm::Value*> GpuElementalIrEmitter::EmitFloatBinaryOp(
const HloInstruction* op, llvm::Value* lhs_value, llvm::Value* rhs_value) {
PrimitiveType lhs_input_type = op->operand(0)->shape().element_type();
PrimitiveType rhs_input_type = op->operand(1)->shape().element_type();
PrimitiveType output_type = op->shape().element_type();
HloOpcode opcode = op->opcode();
if (ir_emitter_context_.debug_options().xla_gpu_enable_fast_min_max() &&
(opcode == HloOpcode::kMaximum || opcode == HloOpcode::kMinimum)) {
return llvm_ir::EmitCallToIntrinsic(
opcode == HloOpcode::kMaximum ? llvm::Intrinsic::maxnum
: llvm::Intrinsic::minnum,
{lhs_value, rhs_value}, {lhs_value->getType()}, b());
}
switch (op->opcode()) {
case HloOpcode::kRemainder: {
return EmitDeviceMathCall(TargetDeviceFunctionID::kFmod,
{lhs_value, rhs_value},
{lhs_input_type, rhs_input_type}, output_type);
}
case HloOpcode::kPower: {
return EmitPowerOp(op, lhs_value, rhs_value);
}
default:
return ElementalIrEmitter::EmitFloatBinaryOp(op, lhs_value, rhs_value);
}
}
absl::StatusOr<llvm::Value*> GpuElementalIrEmitter::EmitPowerOp(
const HloInstruction* op, llvm::Value* lhs_value, llvm::Value* rhs_value) {
CHECK_EQ(op->opcode(), HloOpcode::kPower);
PrimitiveType lhs_input_type = op->operand(0)->shape().element_type();
PrimitiveType rhs_input_type = op->operand(1)->shape().element_type();
PrimitiveType output_type = op->shape().element_type();
return EmitDeviceMathCall(TargetDeviceFunctionID::kPow,
{lhs_value, rhs_value},
{lhs_input_type, rhs_input_type}, output_type);
}
absl::StatusOr<llvm::Value*> GpuElementalIrEmitter::EmitLog(
PrimitiveType prim_type, llvm::Value* value) {
return EmitDeviceMathCall(TargetDeviceFunctionID::kLog, {value}, {prim_type},
prim_type);
}
absl::StatusOr<llvm::Value*> GpuElementalIrEmitter::EmitLog1p(
PrimitiveType prim_type, llvm::Value* value) {
return EmitDeviceMathCall(TargetDeviceFunctionID::kLog1p, {value},
{prim_type}, prim_type);
}
absl::StatusOr<llvm::Value*> GpuElementalIrEmitter::EmitSin(
PrimitiveType prim_type, llvm::Value* value) {
return EmitDeviceMathCall(TargetDeviceFunctionID::kSin, {value}, {prim_type},
prim_type);
}
absl::StatusOr<llvm::Value*> GpuElementalIrEmitter::EmitCos(
PrimitiveType prim_type, llvm::Value* value) {
return EmitDeviceMathCall(TargetDeviceFunctionID::kCos, {value}, {prim_type},
prim_type);
}
absl::StatusOr<llvm::Value*> GpuElementalIrEmitter::EmitTan(
PrimitiveType prim_type, llvm::Value* value) {
return EmitDeviceMathCall(TargetDeviceFunctionID::kTan, {value}, {prim_type},
prim_type);
}
absl::StatusOr<llvm::Value*> GpuElementalIrEmitter::EmitExp(
PrimitiveType prim_type, llvm::Value* value, absl::string_view ) {
return EmitDeviceMathCall(TargetDeviceFunctionID::kExp, {value}, {prim_type},
prim_type);
}
absl::StatusOr<llvm::Value*> GpuElementalIrEmitter::EmitExpm1(
PrimitiveType prim_type, llvm::Value* value) {
return EmitDeviceMathCall(TargetDeviceFunctionID::kExpm1, {value},
{prim_type}, prim_type);
}
absl::StatusOr<llvm::Value*> GpuElementalIrEmitter::EmitPow(
PrimitiveType prim_type, llvm::Value* lhs, llvm::Value* rhs,
absl::string_view name) {
return EmitDeviceMathCall(TargetDeviceFunctionID::kPow, {lhs, rhs},
{prim_type, prim_type}, prim_type, name);
}
absl::StatusOr<llvm::Value*> GpuElementalIrEmitter::EmitSqrt(
PrimitiveType prim_type, llvm::Value* value) {
return EmitDeviceMathCall(TargetDeviceFunctionID::kSqrt, {value}, {prim_type},
prim_type);
}
absl::StatusOr<llvm::Value*> GpuElementalIrEmitter::EmitRsqrt(
PrimitiveType prim_type, llvm::Value* value) {
return EmitDeviceMathCall(TargetDeviceFunctionID::kRsqrt, {value},
{prim_type}, prim_type);
}
absl::StatusOr<llvm::Value*> GpuElementalIrEmitter::EmitAtan2(
PrimitiveType prim_type, llvm::Value* lhs, llvm::Value* rhs,
absl::string_view name) {
return EmitDeviceMathCall(TargetDeviceFunctionID::kAtan2, {lhs, rhs},
{prim_type, prim_type}, prim_type, name);
}
absl::StatusOr<llvm::Value*> GpuElementalIrEmitter::EmitTanh(
PrimitiveType prim_type, llvm::Value* value) {
if (prim_type == F64) {
return EmitDeviceMathCall(TargetDeviceFunctionID::kTanh, {value},
{prim_type}, prim_type);
}
llvm::Type* type = prim_type == F16 ? b()->getFloatTy() : value->getType();
llvm::Value* input = FPCast(value, type);
constexpr double kMaxValue = 20.0;
auto max_value = llvm::ConstantFP::get(type, kMaxValue);
llvm::Value* abs_value =
llvm_ir::EmitCallToIntrinsic(llvm::Intrinsic::fabs, {input}, {type}, b());
llvm::Value* fast_tanh = llvm_ir::EmitFastTanh(b(), input);
auto one = llvm::ConstantFP::get(type, 1.0);
auto one_with_sign = llvm_ir::EmitCallToIntrinsic(llvm::Intrinsic::copysign,
{one, input}, {type}, b());
return FPCast(Select(FCmpULT(abs_value, max_value), fast_tanh, one_with_sign),
value->getType(), "tanh");
}
absl::StatusOr<llvm::Value*> GpuElementalIrEmitter::EmitErf(
PrimitiveType prim_type, llvm::Value* value) {
if (prim_type == F64) {
return EmitDeviceMathCall(TargetDeviceFunctionID::kErf, {value},
{prim_type}, prim_type);
}
llvm::Type* type = prim_type == F16 ? b()->getFloatTy() : value->getType();
if (type == b()->getFloatTy()) {
llvm::Value* x = FPCast(value, type);
auto* result = llvm_ir::EmitErfF32(b(), x);
return FPCast(result, value->getType());
}
return Unimplemented("erf");
}
absl::StatusOr<llvm::Value*> GpuElementalIrEmitter::EmitComplexAbs(
PrimitiveType prim_type, llvm::Value* value) {
return EmitDeviceMathCall(TargetDeviceFunctionID::kHypot,
{EmitExtractReal(value), EmitExtractImag(value)},
{prim_type, prim_type}, prim_type);
}
absl::StatusOr<llvm::Value*> GpuElementalIrEmitter::EmitCbrt(
PrimitiveType prim_type, llvm::Value* value) {
return EmitDeviceMathCall(TargetDeviceFunctionID::kCbrt, {value}, {prim_type},
prim_type);
}
absl::StatusOr<std::vector<llvm::Value*>>
GpuElementalIrEmitter::EmitThreadLocalCall(
const HloComputation& callee, absl::Span<llvm::Value* const> parameters,
absl::string_view, bool ) {
return CallNestedComputationWithScalars(b(), ir_emitter_context_, callee,
parameters);
}
}
} | #include "xla/service/elemental_ir_emitter.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <type_traits>
#include <utility>
#include <gtest/gtest.h>
#include "absl/strings/str_replace.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
#include "xla/error_spec.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/llvm_ir/ir_array.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/test_macros.h"
#include "xla/types.h"
#include "tsl/platform/ml_dtypes.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using std::nullopt;
class ElementalIrEmitterExecutionTest : public HloTestBase {
protected:
void RunTest(const std::string& hlo_text, absl::Span<Literal* const> args) {
HloModuleConfig config;
config.set_debug_options(GetDebugOptionsForTest());
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text, config));
EXPECT_TRUE(RunAndCompareNoHloPasses(std::move(module), args, nullopt));
}
void RunTypeConversionTest(absl::string_view hlo_text) {
HloModuleConfig config;
auto debug_options = GetDebugOptionsForTest();
debug_options.set_xla_cpu_fast_math_honor_nans(true);
debug_options.set_xla_cpu_fast_math_honor_infs(true);
config.set_debug_options(debug_options);
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text, config));
EXPECT_TRUE(RunAndCompare(std::move(module), ErrorSpec{(0.)}));
}
};
class ElementalIrEmitterExecutionTestWithoutFastMinMax
: public ElementalIrEmitterExecutionTest {
protected:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options =
ElementalIrEmitterExecutionTest::GetDebugOptionsForTest();
debug_options.set_xla_cpu_enable_fast_min_max(false);
debug_options.set_xla_gpu_enable_fast_min_max(false);
return debug_options;
}
};
template <typename T>
class ElementalIrEmitterExecutionTypedTest
: public ElementalIrEmitterExecutionTest {
protected:
const std::string& TypeName() {
return primitive_util::LowercasePrimitiveTypeName(
primitive_util::NativeToPrimitiveType<T>());
}
};
using FloatTypes =
::testing::Types<bfloat16, tsl::float8_e5m2, tsl::float8_e5m2fnuz,
tsl::float8_e4m3, tsl::float8_e4m3fn, tsl::float8_e4m3fnuz,
tsl::float8_e4m3b11fnuz, tsl::float8_e3m4>;
TYPED_TEST_SUITE(ElementalIrEmitterExecutionTypedTest, FloatTypes);
XLA_TEST_F(ElementalIrEmitterExecutionTest, DotFusion) {
const std::string hlo_text = R"(
HloModule FusedDot
fused_computation {
arg0 = s32[1,2,1]{2,1,0} parameter(0)
reshape.lhs = s32[2,1]{1,0} reshape(arg0)
arg1 = s32[1,2,1]{2,1,0} parameter(1)
reshape.rhs = s32[2,1]{1,0} reshape(arg1)
ROOT dot = s32[1,1]{1,0} dot(reshape.lhs, reshape.rhs), lhs_contracting_dims={0}, rhs_contracting_dims={0}
}
ENTRY main {
entry_arg0 = s32[1,2,1]{2,1,0} parameter(0)
entry_arg1 = s32[1,2,1]{2,1,0} parameter(1)
ROOT fusion = s32[1,1]{1,0} fusion(entry_arg0, entry_arg1), kind=kLoop, calls=fused_computation
}
)";
Literal lhs = LiteralUtil::CreateR3<int32_t>({{{1}, {2}}});
Literal rhs = LiteralUtil::CreateR3<int32_t>({{{3}, {4}}});
RunTest(hlo_text, {&lhs, &rhs});
}
XLA_TEST_F(ElementalIrEmitterExecutionTest, ScalarDotFusion) {
const char* hlo_text = R"(
HloModule ScalarDotFusion
fused_computation {
arg0 = s32[2,2]{1,0} parameter(0)
reshape.lhs = s32[4]{0} reshape(arg0)
arg1 = s32[2,2]{1,0} parameter(1)
reshape.rhs = s32[4]{0} reshape(arg1)
ROOT dot = s32[] dot(reshape.lhs, reshape.rhs), lhs_contracting_dims={0}, rhs_contracting_dims={0}
}
ENTRY main {
entry_arg0 = s32[2,2]{1,0} parameter(0)
entry_arg1 = s32[2,2]{1,0} parameter(1)
ROOT fusion = s32[] fusion(entry_arg0, entry_arg1), kind=kLoop, calls=fused_computation
}
)";
Literal lhs = LiteralUtil::CreateR2<int32_t>({{1, 2}, {3, 4}});
Literal rhs = LiteralUtil::CreateR2<int32_t>({{10, 20}, {30, 40}});
RunTest(hlo_text, {&lhs, &rhs});
}
XLA_TEST_F(ElementalIrEmitterExecutionTest, BatchDot) {
const char* hlo_text = R"(
HloModule BatchDot
fused_computation.1 {
param_0 = f64[1,1,8]{2,1,0} parameter(0)
r.1 = f64[2,4]{1,0} reshape(param_0)
param_1 = f64[1,2,2,2,1]{4,3,2,1,0} parameter(1)
r.2 = f64[2,4,1]{2,1,0} reshape(param_1)
ROOT dot = f64[2,1]{1,0} dot(r.1, r.2), lhs_batch_dims={0},
lhs_contracting_dims={1},
rhs_batch_dims={0},
rhs_contracting_dims={1}
}
ENTRY resampler_Resampler.49 {
p0 = f64[1,1,8]{2,1,0} parameter(0)
p1 = f64[1,2,2,2,1]{4,3,2,1,0} parameter(1)
ROOT f = f64[2,1]{1,0} fusion(p0, p1), kind=kLoop, calls=fused_computation.1
}
)";
HloModuleConfig config;
auto debug_options = GetDebugOptionsForTest();
debug_options.add_xla_disable_hlo_passes("layout-assignment");
config.set_debug_options(debug_options);
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text, config));
EXPECT_TRUE(RunAndCompare(std::move(module), ErrorSpec{4e-3, 4e-3}));
}
XLA_TEST_F(ElementalIrEmitterExecutionTest,
DivideComplexNumbersWithInfiniteNormRhs) {
constexpr char hlo_text[] = R"(
HloModule DivideComplexNumbers
ENTRY DivideComplexNumbers {
constant.1 = c64[8]{0} constant({
(1, 1), (1, inf), (1, inf), (nan, 1),
(inf, inf), (inf, nan), (nan, nan), (1, 2)})
real = f32[8]{0} constant({nan, nan, inf, inf, inf, 1, inf, 3})
imag = f32[8]{0} constant({inf, inf, inf, inf, 1, inf, inf, 4})
complex.2 = c64[8]{0} complex(real, imag)
ROOT divide.1 = c64[8]{0} divide(constant.1, complex.2)
}
)";
HloModuleConfig config;
auto debug_options = GetDebugOptionsForTest();
debug_options.set_xla_cpu_fast_math_honor_nans(true);
debug_options.set_xla_cpu_fast_math_honor_infs(true);
config.set_debug_options(debug_options);
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text, config));
EXPECT_TRUE(RunAndCompare(std::move(module), ErrorSpec{(0.)}));
}
XLA_TEST_F(ElementalIrEmitterExecutionTest,
DivideComplexNumbersWithFiniteNormRhs) {
constexpr char hlo_text[] = R"(
HloModule DivideComplexNumbers
ENTRY DivideComplexNumbers {
constant.1 = c64[5]{0} constant({
(1, inf), (inf, 1), (inf, nan), (inf, inf), (nan, inf)})
real = f32[5]{0} constant({1, 1, 1, 1, 1})
imag = f32[5]{0} constant({1, 1, 1, 1, 1})
complex.2 = c64[5]{0} complex(real, imag)
ROOT divide.1 = c64[5]{0} divide(constant.1, complex.2)
}
)";
HloModuleConfig config;
auto debug_options = GetDebugOptionsForTest();
debug_options.set_xla_cpu_fast_math_honor_nans(true);
debug_options.set_xla_cpu_fast_math_honor_infs(true);
config.set_debug_options(debug_options);
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text, config));
EXPECT_TRUE(RunAndCompare(std::move(module), ErrorSpec{(0.)}));
}
XLA_TEST_F(ElementalIrEmitterExecutionTest,
DivideComplexNumbersWithZeroNormRhs) {
constexpr char hlo_text[] = R"(
HloModule DivideComplexNumbers
ENTRY DivideComplexNumbers {
constant.1 = c64[9]{0} constant({
(1, 1), (1, nan), (1, inf), (inf, inf), (inf, 1),
(inf, nan), (nan, 1), (nan, inf), (nan, nan)})
real = f32[9]{0} constant({0, 0, 0, 0, 0, 0, 0, 0, 0})
imag = f32[9]{0} constant({0, 0, 0, 0, 0, 0, 0, 0, 0})
complex.2 = c64[9]{0} complex(real, imag)
ROOT divide.1 = c64[9]{0} divide(constant.1, complex.2)
}
)";
HloModuleConfig config;
auto debug_options = GetDebugOptionsForTest();
debug_options.set_xla_cpu_fast_math_honor_nans(true);
debug_options.set_xla_cpu_fast_math_honor_infs(true);
config.set_debug_options(debug_options);
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text, config));
EXPECT_TRUE(RunAndCompare(std::move(module), ErrorSpec{(0.)}));
}
TYPED_TEST(ElementalIrEmitterExecutionTypedTest, ConvertFloatsToFloat) {
auto tname = this->TypeName();
if (std::is_same<TypeParam, tsl::float8_e4m3>() ||
std::is_same<TypeParam, tsl::float8_e4m3fn>() ||
std::is_same<TypeParam, tsl::float8_e4m3b11fnuz>() ||
std::is_same<TypeParam, tsl::float8_e3m4>()) {
GTEST_SKIP() << "Skipping test for type " << tname;
}
const auto hlo_text = absl::StrReplaceAll(R"(
HloModule m
ENTRY main {
f16_ = f16[] parameter(0)
f32_ = f32[] parameter(1)
f64_ = f64[] parameter(2)
bf16_ = bf16[] parameter(3)
converted_f16 = ${tname}[] convert(f16_)
converted_f32 = ${tname}[] convert(f32_)
converted_f64 = ${tname}[] convert(f64_)
converted_bf16 = ${tname}[] convert(bf16_)
ROOT tuple = (${tname}[], ${tname}[], ${tname}[], ${tname}[]) tuple(
converted_f16, converted_f32, converted_f64, converted_bf16)
}
)",
{{"${tname}", tname}});
ElementalIrEmitterExecutionTest::RunTypeConversionTest(hlo_text);
}
TYPED_TEST(ElementalIrEmitterExecutionTypedTest, ConvertSignedToFloat) {
auto tname = this->TypeName();
const auto hlo_text = absl::StrReplaceAll(R"(
HloModule m
ENTRY main {
s8_ = s8[] parameter(0)
s16_ = s16[] parameter(1)
s32_ = s32[] parameter(2)
s64_ = s64[] parameter(3)
converted_s8 = ${tname}[] convert(s8_)
converted_s16 = ${tname}[] convert(s16_)
converted_s32 = ${tname}[] convert(s32_)
converted_s64 = ${tname}[] convert(s64_)
ROOT tuple = (${tname}[], ${tname}[], ${tname}[], ${tname}[]) tuple(
converted_s8, converted_s16, converted_s32, converted_s64)
}
)",
{{"${tname}", tname}});
ElementalIrEmitterExecutionTest::RunTypeConversionTest(hlo_text);
}
TYPED_TEST(ElementalIrEmitterExecutionTypedTest, ConvertUnsignedToFloat) {
auto tname = this->TypeName();
const auto hlo_text = absl::StrReplaceAll(R"(
HloModule m
ENTRY main {
u8_ = u8[] parameter(0)
u16_ = u16[] parameter(1)
u32_ = u32[] parameter(2)
u64_ = u64[] parameter(3)
converted_u8 = ${tname}[] convert(u8_)
converted_u16 = ${tname}[] convert(u16_)
converted_u32 = ${tname}[] convert(u32_)
converted_u64 = ${tname}[] convert(u64_)
ROOT tuple = (${tname}[], ${tname}[], ${tname}[], ${tname}[]) tuple(
converted_u8, converted_u16, converted_u32, converted_u64)
}
)",
{{"${tname}", tname}});
ElementalIrEmitterExecutionTest::RunTypeConversionTest(hlo_text);
}
TYPED_TEST(ElementalIrEmitterExecutionTypedTest, ConvertFloatToFloats) {
auto tname = this->TypeName();
const auto hlo_text = absl::StrReplaceAll(R"(
HloModule m
ENTRY main {
to_f16 = ${tname}[] parameter(0)
to_f32 = ${tname}[] parameter(1)
to_f64 = ${tname}[] parameter(2)
to_bf16 = ${tname}[] parameter(3)
f16_ = f16[] convert(to_f16)
f32_ = f32[] convert(to_f32)
f64_ = f64[] convert(to_f64)
bf16_ = bf16[] convert(to_f64)
ROOT tuple = (f16[], f32[], f64[], bf16[]) tuple(f16_, f32_, f64_, bf16_)
}
)",
{{"${tname}", tname}});
ElementalIrEmitterExecutionTest::RunTypeConversionTest(hlo_text);
}
TYPED_TEST(ElementalIrEmitterExecutionTypedTest, ConvertFloatToSigned) {
auto tname = this->TypeName();
const auto hlo_text = absl::StrReplaceAll(R"(
HloModule m
ENTRY main {
to_s8 = ${tname}[] parameter(0)
to_s16 = ${tname}[] parameter(1)
to_s32 = ${tname}[] parameter(2)
to_s64 = ${tname}[] parameter(3)
s8_ = s8[] convert(to_s8)
s16_ = s16[] convert(to_s16)
s32_ = s32[] convert(to_s32)
s64_ = s64[] convert(to_s64)
ROOT tuple = (s8[], s16[], s32[], s64[]) tuple(s8_, s16_, s32_, s64_)
}
)",
{{"${tname}", tname}});
ElementalIrEmitterExecutionTest::RunTypeConversionTest(hlo_text);
}
TYPED_TEST(ElementalIrEmitterExecutionTypedTest, ConvertFloatToUnsigned) {
auto tname = this->TypeName();
const auto hlo_text = absl::StrReplaceAll(R"(
HloModule m
ENTRY main {
to_u8 = ${tname}[] parameter(0)
to_u16 = ${tname}[] parameter(1)
to_u32 = ${tname}[] parameter(2)
to_u64 = ${tname}[] parameter(3)
u8_ = u8[] convert(to_u8)
u16_ = u16[] convert(to_u16)
u32_ = u32[] convert(to_u32)
u64_ = u64[] convert(to_u64)
ROOT tuple = (u8[], u16[], u32[], u64[]) tuple(u8_, u16_, u32_, u64_)
}
)",
{{"${tname}", tname}});
ElementalIrEmitterExecutionTest::RunTypeConversionTest(hlo_text);
}
TYPED_TEST(ElementalIrEmitterExecutionTypedTest, ConvertFloatToComplex) {
auto tname = this->TypeName();
const auto hlo_text = absl::StrReplaceAll(R"(
HloModule m
ENTRY main {
to_c64 = ${tname}[] parameter(0)
to_c128 = ${tname}[] parameter(1)
c64_ = c64[] convert(to_c64)
c128_ = c128[] convert(to_c128)
ROOT tuple = (c64[], c128[]) tuple(c64_, c128_)
}
)",
{{"${tname}", tname}});
ElementalIrEmitterExecutionTest::RunTypeConversionTest(hlo_text);
}
TYPED_TEST(ElementalIrEmitterExecutionTypedTest, CompareFloat) {
auto tname = this->TypeName();
if (std::is_same<TypeParam, tsl::float8_e4m3b11fnuz>()) {
GTEST_SKIP() << "Skipping test for type " << tname;
}
const auto hlo_text = absl::StrReplaceAll(R"(
HloModule m
ENTRY main {
p0 = ${tname}[4] parameter(0)
p1 = ${tname}[4] parameter(1)
ROOT cmp = pred[4] compare(p0, p1), direction=LT
})",
{{"${tname}", tname}});
Literal lhs = LiteralUtil::CreateR1<TypeParam>(
{TypeParam(1.), TypeParam(2.), TypeParam(3.), TypeParam(4.)});
Literal rhs = LiteralUtil::CreateR1<TypeParam>(
{TypeParam(4.), TypeParam(4.), TypeParam(2.), TypeParam(1.)});
ElementalIrEmitterExecutionTest::RunTest(hlo_text, {&lhs, &rhs});
}
TYPED_TEST(ElementalIrEmitterExecutionTypedTest, IotaFloat) {
auto tname = this->TypeName();
if (std::is_same<TypeParam, tsl::float8_e5m2>() ||
std::is_same<TypeParam, tsl::float8_e4m3>() ||
std::is_same<TypeParam, tsl::float8_e4m3fn>() ||
std::is_same<TypeParam, tsl::float8_e4m3b11fnuz>() ||
std::is_same<TypeParam, tsl::float8_e3m4>()) {
GTEST_SKIP() << "Skipping test for type " << tname;
}
const auto hlo_text = absl::StrReplaceAll(R"(
HloModule m
ENTRY main {
ROOT iota_ = ${tname}[4] iota(), iota_dimension=0
}
)",
{{"${tname}", tname}});
ElementalIrEmitterExecutionTest::RunTest(hlo_text, {});
}
TYPED_TEST(ElementalIrEmitterExecutionTypedTest, BatchDotFloat) {
auto tname = this->TypeName();
const auto hlo_text = absl::StrReplaceAll(R"(
HloModule matmul
ENTRY main {
x = ${tname}[8,16] parameter(0)
y = ${tname}[8,16,32] parameter(1)
ROOT dot = ${tname}[8,32] dot(x, y), lhs_batch_dims={0},
rhs_batch_dims={0}, lhs_contracting_dims={1}, rhs_contracting_dims={1}
}
)",
{{"${tname}", tname}});
HloModuleConfig config;
DebugOptions debug_options = HloTestBase::GetDebugOptionsForTest();
config.set_debug_options(debug_options);
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
HloTestBase::ParseAndReturnVerifiedModule(hlo_text, config));
EXPECT_TRUE(
HloTestBase::RunAndCompare(std::move(module), ErrorSpec{1e-5, 1e-5}));
}
XLA_TEST_F(ElementalIrEmitterExecutionTestWithoutFastMinMax,
MinimumHandlesNaNsOnTheLeft) {
constexpr absl::string_view kHloText = R"(
HloModule t
ENTRY e {
neg1 = f32[] constant(-1)
neg1s = f32[5,5] broadcast(neg1), dimensions={}
nans = f32[5,5] sqrt(neg1s)
ROOT min = f32[5,5] minimum(nans, neg1s)
})";
EXPECT_TRUE(RunAndCompare(kHloText, ErrorSpec{1e-3, 1e-3}));
}
XLA_TEST_F(ElementalIrEmitterExecutionTestWithoutFastMinMax,
DISABLED_MinimumHandlesNaNsOnTheRight) {
constexpr absl::string_view kHloText = R"(
HloModule t
ENTRY e {
neg1 = f32[] constant(-1)
neg1s = f32[5,5] broadcast(neg1), dimensions={}
nans = f32[5,5] sqrt(neg1s)
ROOT min = f32[5,5] minimum(neg1s, nans)
})";
EXPECT_TRUE(RunAndCompare(kHloText, ErrorSpec{1e-3, 1e-3}));
}
XLA_TEST_F(ElementalIrEmitterExecutionTestWithoutFastMinMax,
MaximumHandlesNaNsOnTheLeft) {
constexpr absl::string_view kHloText = R"(
HloModule t
ENTRY e {
neg1 = f32[] constant(-1)
neg1s = f32[5,5] broadcast(neg1), dimensions={}
nans = f32[5,5] sqrt(neg1s)
ROOT max = f32[5,5] maximum(nans, neg1s)
})";
EXPECT_TRUE(RunAndCompare(kHloText, ErrorSpec{1e-3, 1e-3}));
}
XLA_TEST_F(ElementalIrEmitterExecutionTestWithoutFastMinMax,
MaximumHandlesNaNsOnTheRight) {
constexpr absl::string_view kHloText = R"(
HloModule t
ENTRY e {
neg1 = f32[] constant(-1)
neg1s = f32[5,5] broadcast(neg1), dimensions={}
nans = f32[5,5] sqrt(neg1s)
ROOT max = f32[5,5] maximum(neg1s, nans)
})";
EXPECT_TRUE(RunAndCompare(kHloText, ErrorSpec{1e-3, 1e-3}));
}
XLA_TEST_F(ElementalIrEmitterExecutionTestWithoutFastMinMax,
MinimumReturnsLHS) {
constexpr absl::string_view kHloText = R"(
HloModule t
ENTRY e {
zero = f32[] constant(0)
zeros = f32[5,5] broadcast(zero), dimensions={}
one = f32[] constant(1)
ones = f32[5,5] broadcast(one), dimensions={}
ROOT min = f32[5,5] minimum(zeros, ones)
})";
EXPECT_TRUE(RunAndCompare(kHloText, ErrorSpec{1e-3,
1e-3}));
}
XLA_TEST_F(ElementalIrEmitterExecutionTestWithoutFastMinMax,
MinimumReturnsRHS) {
constexpr absl::string_view kHloText = R"(
HloModule t
ENTRY e {
zero = f32[] constant(0)
zeros = f32[5,5] broadcast(zero), dimensions={}
one = f32[] constant(1)
ones = f32[5,5] broadcast(one), dimensions={}
ROOT min = f32[5,5] minimum(ones, zeros)
})";
EXPECT_TRUE(RunAndCompare(kHloText, ErrorSpec{1e-3,
1e-3}));
}
XLA_TEST_F(ElementalIrEmitterExecutionTestWithoutFastMinMax,
MaximumReturnsLHS) {
constexpr absl::string_view kHloText = R"(
HloModule t
ENTRY e {
zero = f32[] constant(0)
zeros = f32[5,5] broadcast(zero), dimensions={}
one = f32[] constant(1)
ones = f32[5,5] broadcast(one), dimensions={}
ROOT max = f32[5,5] maximum(ones, zeros)
})";
EXPECT_TRUE(RunAndCompare(kHloText, ErrorSpec{1e-3,
1e-3}));
}
XLA_TEST_F(ElementalIrEmitterExecutionTestWithoutFastMinMax,
MaximumReturnsRHS) {
constexpr absl::string_view kHloText = R"(
HloModule t
ENTRY e {
zero = f32[] constant(0)
zeros = f32[5,5] broadcast(zero), dimensions={}
one = f32[] constant(1)
ones = f32[5,5] broadcast(one), dimensions={}
ROOT max = f32[5,5] maximum(zeros, ones)
})";
EXPECT_TRUE(RunAndCompare(kHloText, ErrorSpec{1e-3,
1e-3}));
}
class ElementalIrEmitterInternalTest : public HloTestBase {};
XLA_TEST_F(ElementalIrEmitterInternalTest, SparseDotIsUnsupported) {
constexpr absl::string_view kHloText = R"(
HloModule test
ENTRY main {
lhs = f16[5,16] parameter(0)
rhs = f16[32,10] parameter(1)
meta = u16[5,2] parameter(2)
ROOT dot = f32[5,10] dot(lhs, rhs, meta),
lhs_contracting_dims={1}, rhs_contracting_dims={0}, sparsity=L.1@2:4
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloText));
HloInstruction* root = module->entry_computation()->root_instruction();
llvm::LLVMContext llvm_context;
llvm::Module llvm_module("", llvm_context);
llvm::IRBuilder<> builder(llvm_context);
ElementalIrEmitterForTests emitter(&llvm_module, &builder);
llvm_ir::IrArray::Index test_index{builder.getInt64Ty()};
auto result = emitter.TestElementalDot(root, test_index);
EXPECT_FALSE(result.ok());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/elemental_ir_emitter.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/elemental_ir_emitter_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
05bd909d-de74-41fe-9f9a-bc1cf9746d43 | cpp | tensorflow/tensorflow | depthwise_conv | tensorflow/lite/delegates/gpu/gl/kernels/depthwise_conv.cc | tensorflow/lite/delegates/gpu/cl/kernels/depthwise_conv_test.cc | #include "tensorflow/lite/delegates/gpu/gl/kernels/depthwise_conv.h"
#include <any>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/memory/memory.h"
#include "tensorflow/lite/delegates/gpu/common/convert.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/types.h"
#include "tensorflow/lite/delegates/gpu/common/util.h"
#include "tensorflow/lite/delegates/gpu/gl/node_shader.h"
#include "tensorflow/lite/delegates/gpu/gl/variable.h"
#include "tensorflow/lite/delegates/gpu/gl/workgroups/ideal_workgroup_picker.h"
namespace tflite {
namespace gpu {
namespace gl {
namespace {
class DepthwiseConvolution : public NodeShader {
public:
absl::Status GenerateCode(const GenerationContext& ctx,
GeneratedCode* generated_code) const final {
if (ctx.input_shapes.size() != 1) {
return absl::UnimplementedError(
"DepthWise Convolution does not support more than 1 runtime tensor");
}
const auto& attr =
std::any_cast<const DepthwiseConvolution2DAttributes&>(ctx.op_attr);
auto weights = attr.weights.shape;
const int offsets_count = weights.h * weights.w;
const bool offsets_count_too_large = offsets_count > kMaxConstArraySize;
std::vector<Variable> parameters;
if (offsets_count_too_large) {
parameters = {
{"input_data_0_h", static_cast<int>(ctx.input_shapes[0][1])},
{"input_data_0_w", static_cast<int>(ctx.input_shapes[0][2])},
{"padding_w", attr.padding.prepended.w},
{"padding_h", attr.padding.prepended.h},
{"dilation_w", attr.dilations.w},
{"dilation_h", attr.dilations.h},
{"kernel_w", weights.w},
{"kernel_h", weights.h},
{"src_depth", DivideRoundUp(weights.i, 4)},
{"channel_multiplier", weights.o},
{"stride", int2(attr.strides.w, attr.strides.h)},
};
} else {
std::vector<int2> offsets;
for (int h = 0; h < weights.h; ++h) {
for (int w = 0; w < weights.w; ++w) {
offsets.emplace_back(w * attr.dilations.w - attr.padding.prepended.w,
h * attr.dilations.h - attr.padding.prepended.h);
}
}
parameters = {
{"input_data_0_h", static_cast<int>(ctx.input_shapes[0][1])},
{"input_data_0_w", static_cast<int>(ctx.input_shapes[0][2])},
{"offsets_count", offsets_count},
{"offsets", offsets},
{"src_depth", DivideRoundUp(weights.i, 4)},
{"channel_multiplier", weights.o},
{"stride", int2(attr.strides.w, attr.strides.h)},
};
}
bool non_empty_padding =
attr.padding.appended.h != 0 || attr.padding.appended.w != 0 ||
attr.padding.prepended.h != 0 || attr.padding.prepended.w != 0;
std::vector<std::pair<std::string, Object>> objects = {
{"weights", MakeReadonlyObject(ConvertToPIOHW4(attr.weights))}};
std::string source;
if (offsets_count_too_large) {
source = R"(
int offsets_count = $kernel_w$ * $kernel_h$;
int src_layer_offset = (gid.z % $channel_multiplier$) * 4;
int i = 0;
for (int ky = 0; ky < $kernel_h$; ky++) {
for (int kx = 0; kx < $kernel_w$; kx++, i++) {
ivec2 coord = gid.xy * $stride$ + ivec2(kx * $dilation_w$ - $padding_w$, ky * $dilation_h$ - $padding_h$);)";
} else {
source = R"(
int offsets_count = $offsets_count$;
int src_layer_offset = (gid.z % $channel_multiplier$) * 4;
for (int i = 0; i < offsets_count; ++i) {
ivec2 coord = gid.xy * $stride$ + $offsets[i]$;)";
}
if (non_empty_padding) {
source += R"(
if (coord.x < 0 || coord.y < 0 ||
coord.x >= $input_data_0_w$ || coord.y >= $input_data_0_h$) {
continue;
})";
}
source += R"(
int src_layer = gid.z / $channel_multiplier$;
vec4 input_ = $input_data_0[coord.x, coord.y, src_layer]$;
vec4 input_shifted = vec4(
input_[(src_layer_offset + 0) / $channel_multiplier$],
input_[(src_layer_offset + 1) / $channel_multiplier$],
input_[(src_layer_offset + 2) / $channel_multiplier$],
input_[(src_layer_offset + 3) / $channel_multiplier$]
);
value_0 += input_shifted * $weights[gid.z * offsets_count + i]$;
}
)";
if (offsets_count_too_large) {
source += R"(
}
)";
}
if (!attr.bias.data.empty()) {
source += "value_0 += $bias[gid.z]$;\n";
objects.push_back({"bias", MakeReadonlyObject(attr.bias.data)});
}
*generated_code = {
std::move(parameters),
std::move(objects),
{},
uint3(),
GetIdealWorkgroupIfPossible(
*ctx.gpu_info, OperationType::DEPTHWISE_CONVOLUTION,
HW(attr.weights.shape.h, attr.weights.shape.w), attr.strides,
OHWI(attr.weights.shape.o, ctx.input_shapes[0][1],
ctx.input_shapes[0][2], ctx.input_shapes[0][3])),
std::move(source),
IOStructure::ONLY_DEFINITIONS,
IOStructure::AUTO,
};
return absl::OkStatus();
}
};
}
std::unique_ptr<NodeShader> NewDepthwiseConvolutionNodeShader() {
return std::make_unique<DepthwiseConvolution>();
}
}
}
} | #include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/cl/kernels/cl_test.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/tasks/depthwise_conv_test_util.h"
namespace tflite {
namespace gpu {
namespace cl {
namespace {
TEST_F(OpenCLOperationTest, DepthwiseConvSimpleWeights) {
auto status = DepthwiseConvSimpleWeightsTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, DepthwiseConvNoMultiplier) {
auto status = DepthwiseConvNoMultiplierTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, DepthwiseConvMultiplier2) {
auto status = DepthwiseConvMultiplier2Test(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/gl/kernels/depthwise_conv.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/cl/kernels/depthwise_conv_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
724bea7e-edc7-4246-b9c8-b4da79bb7cdb | cpp | abseil/abseil-cpp | bits | absl/numeric/internal/bits.h | absl/numeric/bits_test.cc | #ifndef ABSL_NUMERIC_INTERNAL_BITS_H_
#define ABSL_NUMERIC_INTERNAL_BITS_H_
#include <cstdint>
#include <limits>
#include <type_traits>
#if defined(_MSC_VER) && !defined(__clang__)
#include <intrin.h>
#endif
#include "absl/base/attributes.h"
#include "absl/base/config.h"
#if defined(__GNUC__) && !defined(__clang__)
#define ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(x) 1
#else
#define ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(x) ABSL_HAVE_BUILTIN(x)
#endif
#if ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_popcountl) && \
ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_popcountll)
#define ABSL_INTERNAL_CONSTEXPR_POPCOUNT constexpr
#define ABSL_INTERNAL_HAS_CONSTEXPR_POPCOUNT 1
#else
#define ABSL_INTERNAL_CONSTEXPR_POPCOUNT
#define ABSL_INTERNAL_HAS_CONSTEXPR_POPCOUNT 0
#endif
#if ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_clz) && \
ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_clzll)
#define ABSL_INTERNAL_CONSTEXPR_CLZ constexpr
#define ABSL_INTERNAL_HAS_CONSTEXPR_CLZ 1
#else
#define ABSL_INTERNAL_CONSTEXPR_CLZ
#define ABSL_INTERNAL_HAS_CONSTEXPR_CLZ 0
#endif
#if ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_ctz) && \
ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_ctzll)
#define ABSL_INTERNAL_CONSTEXPR_CTZ constexpr
#define ABSL_INTERNAL_HAS_CONSTEXPR_CTZ 1
#else
#define ABSL_INTERNAL_CONSTEXPR_CTZ
#define ABSL_INTERNAL_HAS_CONSTEXPR_CTZ 0
#endif
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace numeric_internal {
constexpr bool IsPowerOf2(unsigned int x) noexcept {
return x != 0 && (x & (x - 1)) == 0;
}
template <class T>
ABSL_MUST_USE_RESULT ABSL_ATTRIBUTE_ALWAYS_INLINE constexpr T RotateRight(
T x, int s) noexcept {
static_assert(std::is_unsigned<T>::value, "T must be unsigned");
static_assert(IsPowerOf2(std::numeric_limits<T>::digits),
"T must have a power-of-2 size");
return static_cast<T>(x >> (s & (std::numeric_limits<T>::digits - 1))) |
static_cast<T>(x << ((-s) & (std::numeric_limits<T>::digits - 1)));
}
template <class T>
ABSL_MUST_USE_RESULT ABSL_ATTRIBUTE_ALWAYS_INLINE constexpr T RotateLeft(
T x, int s) noexcept {
static_assert(std::is_unsigned<T>::value, "T must be unsigned");
static_assert(IsPowerOf2(std::numeric_limits<T>::digits),
"T must have a power-of-2 size");
return static_cast<T>(x << (s & (std::numeric_limits<T>::digits - 1))) |
static_cast<T>(x >> ((-s) & (std::numeric_limits<T>::digits - 1)));
}
ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_POPCOUNT inline int
Popcount32(uint32_t x) noexcept {
#if ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_popcount)
static_assert(sizeof(unsigned int) == sizeof(x),
"__builtin_popcount does not take 32-bit arg");
return __builtin_popcount(x);
#else
x -= ((x >> 1) & 0x55555555);
x = ((x >> 2) & 0x33333333) + (x & 0x33333333);
return static_cast<int>((((x + (x >> 4)) & 0xF0F0F0F) * 0x1010101) >> 24);
#endif
}
ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_POPCOUNT inline int
Popcount64(uint64_t x) noexcept {
#if ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_popcountll)
static_assert(sizeof(unsigned long long) == sizeof(x),
"__builtin_popcount does not take 64-bit arg");
return __builtin_popcountll(x);
#else
x -= (x >> 1) & 0x5555555555555555ULL;
x = ((x >> 2) & 0x3333333333333333ULL) + (x & 0x3333333333333333ULL);
return static_cast<int>(
(((x + (x >> 4)) & 0xF0F0F0F0F0F0F0FULL) * 0x101010101010101ULL) >> 56);
#endif
}
template <class T>
ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_POPCOUNT inline int
Popcount(T x) noexcept {
static_assert(std::is_unsigned<T>::value, "T must be unsigned");
static_assert(IsPowerOf2(std::numeric_limits<T>::digits),
"T must have a power-of-2 size");
static_assert(sizeof(x) <= sizeof(uint64_t), "T is too large");
return sizeof(x) <= sizeof(uint32_t) ? Popcount32(x) : Popcount64(x);
}
ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_CLZ inline int
CountLeadingZeroes32(uint32_t x) {
#if ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_clz)
static_assert(sizeof(unsigned int) == sizeof(x),
"__builtin_clz does not take 32-bit arg");
return x == 0 ? 32 : __builtin_clz(x);
#elif defined(_MSC_VER) && !defined(__clang__)
unsigned long result = 0;
if (_BitScanReverse(&result, x)) {
return 31 - result;
}
return 32;
#else
int zeroes = 28;
if (x >> 16) {
zeroes -= 16;
x >>= 16;
}
if (x >> 8) {
zeroes -= 8;
x >>= 8;
}
if (x >> 4) {
zeroes -= 4;
x >>= 4;
}
return "\4\3\2\2\1\1\1\1\0\0\0\0\0\0\0"[x] + zeroes;
#endif
}
ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_CLZ inline int
CountLeadingZeroes16(uint16_t x) {
#if ABSL_HAVE_BUILTIN(__builtin_clzg)
return x == 0 ? 16 : __builtin_clzg(x);
#elif ABSL_HAVE_BUILTIN(__builtin_clzs)
static_assert(sizeof(unsigned short) == sizeof(x),
"__builtin_clzs does not take 16-bit arg");
return x == 0 ? 16 : __builtin_clzs(x);
#else
return CountLeadingZeroes32(x) - 16;
#endif
}
ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_CLZ inline int
CountLeadingZeroes64(uint64_t x) {
#if ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_clzll)
static_assert(sizeof(unsigned long long) == sizeof(x),
"__builtin_clzll does not take 64-bit arg");
return x == 0 ? 64 : __builtin_clzll(x);
#elif defined(_MSC_VER) && !defined(__clang__) && \
(defined(_M_X64) || defined(_M_ARM64))
unsigned long result = 0;
if (_BitScanReverse64(&result, x)) {
return 63 - result;
}
return 64;
#elif defined(_MSC_VER) && !defined(__clang__)
unsigned long result = 0;
if ((x >> 32) &&
_BitScanReverse(&result, static_cast<unsigned long>(x >> 32))) {
return 31 - result;
}
if (_BitScanReverse(&result, static_cast<unsigned long>(x))) {
return 63 - result;
}
return 64;
#else
int zeroes = 60;
if (x >> 32) {
zeroes -= 32;
x >>= 32;
}
if (x >> 16) {
zeroes -= 16;
x >>= 16;
}
if (x >> 8) {
zeroes -= 8;
x >>= 8;
}
if (x >> 4) {
zeroes -= 4;
x >>= 4;
}
return "\4\3\2\2\1\1\1\1\0\0\0\0\0\0\0"[x] + zeroes;
#endif
}
template <typename T>
ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_CLZ inline int
CountLeadingZeroes(T x) {
static_assert(std::is_unsigned<T>::value, "T must be unsigned");
static_assert(IsPowerOf2(std::numeric_limits<T>::digits),
"T must have a power-of-2 size");
static_assert(sizeof(T) <= sizeof(uint64_t), "T too large");
return sizeof(T) <= sizeof(uint16_t)
? CountLeadingZeroes16(static_cast<uint16_t>(x)) -
(std::numeric_limits<uint16_t>::digits -
std::numeric_limits<T>::digits)
: (sizeof(T) <= sizeof(uint32_t)
? CountLeadingZeroes32(static_cast<uint32_t>(x)) -
(std::numeric_limits<uint32_t>::digits -
std::numeric_limits<T>::digits)
: CountLeadingZeroes64(x));
}
ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_CTZ inline int
CountTrailingZeroesNonzero32(uint32_t x) {
#if ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_ctz)
static_assert(sizeof(unsigned int) == sizeof(x),
"__builtin_ctz does not take 32-bit arg");
return __builtin_ctz(x);
#elif defined(_MSC_VER) && !defined(__clang__)
unsigned long result = 0;
_BitScanForward(&result, x);
return result;
#else
int c = 31;
x &= ~x + 1;
if (x & 0x0000FFFF) c -= 16;
if (x & 0x00FF00FF) c -= 8;
if (x & 0x0F0F0F0F) c -= 4;
if (x & 0x33333333) c -= 2;
if (x & 0x55555555) c -= 1;
return c;
#endif
}
ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_CTZ inline int
CountTrailingZeroesNonzero64(uint64_t x) {
#if ABSL_NUMERIC_INTERNAL_HAVE_BUILTIN_OR_GCC(__builtin_ctzll)
static_assert(sizeof(unsigned long long) == sizeof(x),
"__builtin_ctzll does not take 64-bit arg");
return __builtin_ctzll(x);
#elif defined(_MSC_VER) && !defined(__clang__) && \
(defined(_M_X64) || defined(_M_ARM64))
unsigned long result = 0;
_BitScanForward64(&result, x);
return result;
#elif defined(_MSC_VER) && !defined(__clang__)
unsigned long result = 0;
if (static_cast<uint32_t>(x) == 0) {
_BitScanForward(&result, static_cast<unsigned long>(x >> 32));
return result + 32;
}
_BitScanForward(&result, static_cast<unsigned long>(x));
return result;
#else
int c = 63;
x &= ~x + 1;
if (x & 0x00000000FFFFFFFF) c -= 32;
if (x & 0x0000FFFF0000FFFF) c -= 16;
if (x & 0x00FF00FF00FF00FF) c -= 8;
if (x & 0x0F0F0F0F0F0F0F0F) c -= 4;
if (x & 0x3333333333333333) c -= 2;
if (x & 0x5555555555555555) c -= 1;
return c;
#endif
}
ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_CTZ inline int
CountTrailingZeroesNonzero16(uint16_t x) {
#if ABSL_HAVE_BUILTIN(__builtin_ctzg)
return __builtin_ctzg(x);
#elif ABSL_HAVE_BUILTIN(__builtin_ctzs)
static_assert(sizeof(unsigned short) == sizeof(x),
"__builtin_ctzs does not take 16-bit arg");
return __builtin_ctzs(x);
#else
return CountTrailingZeroesNonzero32(x);
#endif
}
template <class T>
ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_CTZ inline int
CountTrailingZeroes(T x) noexcept {
static_assert(std::is_unsigned<T>::value, "T must be unsigned");
static_assert(IsPowerOf2(std::numeric_limits<T>::digits),
"T must have a power-of-2 size");
static_assert(sizeof(T) <= sizeof(uint64_t), "T too large");
return x == 0 ? std::numeric_limits<T>::digits
: (sizeof(T) <= sizeof(uint16_t)
? CountTrailingZeroesNonzero16(static_cast<uint16_t>(x))
: (sizeof(T) <= sizeof(uint32_t)
? CountTrailingZeroesNonzero32(
static_cast<uint32_t>(x))
: CountTrailingZeroesNonzero64(x)));
}
template <class T>
ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_CLZ inline
typename std::enable_if<std::is_unsigned<T>::value, T>::type
BitCeilPromotionHelper(T x, T promotion) {
return (T{1} << (x + promotion)) >> promotion;
}
template <class T>
ABSL_ATTRIBUTE_ALWAYS_INLINE ABSL_INTERNAL_CONSTEXPR_CLZ inline
typename std::enable_if<std::is_unsigned<T>::value, T>::type
BitCeilNonPowerOf2(T x) {
return BitCeilPromotionHelper(
static_cast<T>(std::numeric_limits<T>::digits - CountLeadingZeroes(x)),
T{sizeof(T) >= sizeof(unsigned) ? 0
: std::numeric_limits<unsigned>::digits -
std::numeric_limits<T>::digits});
}
}
ABSL_NAMESPACE_END
}
#endif | #include "absl/numeric/bits.h"
#include <limits>
#include <type_traits>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/random/random.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace {
template <typename IntT>
class IntegerTypesTest : public ::testing::Test {};
using OneByteIntegerTypes = ::testing::Types<
unsigned char,
uint8_t
>;
TYPED_TEST_SUITE(IntegerTypesTest, OneByteIntegerTypes);
TYPED_TEST(IntegerTypesTest, HandlesTypes) {
using UIntType = TypeParam;
EXPECT_EQ(rotl(UIntType{0x12}, 0), uint8_t{0x12});
EXPECT_EQ(rotr(UIntType{0x12}, -4), uint8_t{0x21});
static_assert(rotl(UIntType{0x12}, 0) == uint8_t{0x12}, "");
static_assert(rotr(UIntType{0x12}, 0) == uint8_t{0x12}, "");
EXPECT_EQ(rotr(UIntType{0x12}, 0), uint8_t{0x12});
#if ABSL_INTERNAL_HAS_CONSTEXPR_CLZ
static_assert(countl_zero(UIntType{}) == 8, "");
static_assert(countl_zero(static_cast<UIntType>(-1)) == 0, "");
static_assert(countl_one(UIntType{}) == 0, "");
static_assert(countl_one(static_cast<UIntType>(-1)) == 8, "");
static_assert(countr_zero(UIntType{}) == 8, "");
static_assert(countr_zero(static_cast<UIntType>(-1)) == 0, "");
static_assert(countr_one(UIntType{}) == 0, "");
static_assert(countr_one(static_cast<UIntType>(-1)) == 8, "");
static_assert(popcount(UIntType{}) == 0, "");
static_assert(popcount(UIntType{1}) == 1, "");
static_assert(popcount(static_cast<UIntType>(-1)) == 8, "");
static_assert(bit_width(UIntType{}) == 0, "");
static_assert(bit_width(UIntType{1}) == 1, "");
static_assert(bit_width(UIntType{3}) == 2, "");
static_assert(bit_width(static_cast<UIntType>(-1)) == 8, "");
#endif
EXPECT_EQ(countl_zero(UIntType{}), 8);
EXPECT_EQ(countl_zero(static_cast<UIntType>(-1)), 0);
EXPECT_EQ(countl_one(UIntType{}), 0);
EXPECT_EQ(countl_one(static_cast<UIntType>(-1)), 8);
EXPECT_EQ(countr_zero(UIntType{}), 8);
EXPECT_EQ(countr_zero(static_cast<UIntType>(-1)), 0);
EXPECT_EQ(countr_one(UIntType{}), 0);
EXPECT_EQ(countr_one(static_cast<UIntType>(-1)), 8);
EXPECT_EQ(popcount(UIntType{}), 0);
EXPECT_EQ(popcount(UIntType{1}), 1);
EXPECT_FALSE(has_single_bit(UIntType{}));
EXPECT_FALSE(has_single_bit(static_cast<UIntType>(-1)));
EXPECT_EQ(bit_width(UIntType{}), 0);
EXPECT_EQ(bit_width(UIntType{1}), 1);
EXPECT_EQ(bit_width(UIntType{3}), 2);
EXPECT_EQ(bit_width(static_cast<UIntType>(-1)), 8);
}
TEST(Rotate, Left) {
static_assert(rotl(uint8_t{0x12}, 0) == uint8_t{0x12}, "");
static_assert(rotl(uint16_t{0x1234}, 0) == uint16_t{0x1234}, "");
static_assert(rotl(uint32_t{0x12345678UL}, 0) == uint32_t{0x12345678UL}, "");
static_assert(rotl(uint64_t{0x12345678ABCDEF01ULL}, 0) ==
uint64_t{0x12345678ABCDEF01ULL},
"");
EXPECT_EQ(rotl(uint8_t{0x12}, 0), uint8_t{0x12});
EXPECT_EQ(rotl(uint16_t{0x1234}, 0), uint16_t{0x1234});
EXPECT_EQ(rotl(uint32_t{0x12345678UL}, 0), uint32_t{0x12345678UL});
EXPECT_EQ(rotl(uint64_t{0x12345678ABCDEF01ULL}, 0),
uint64_t{0x12345678ABCDEF01ULL});
EXPECT_EQ(rotl(uint8_t{0x12}, 8), uint8_t{0x12});
EXPECT_EQ(rotl(uint16_t{0x1234}, 16), uint16_t{0x1234});
EXPECT_EQ(rotl(uint32_t{0x12345678UL}, 32), uint32_t{0x12345678UL});
EXPECT_EQ(rotl(uint64_t{0x12345678ABCDEF01ULL}, 64),
uint64_t{0x12345678ABCDEF01ULL});
EXPECT_EQ(rotl(uint8_t{0x12}, -8), uint8_t{0x12});
EXPECT_EQ(rotl(uint16_t{0x1234}, -16), uint16_t{0x1234});
EXPECT_EQ(rotl(uint32_t{0x12345678UL}, -32), uint32_t{0x12345678UL});
EXPECT_EQ(rotl(uint64_t{0x12345678ABCDEF01ULL}, -64),
uint64_t{0x12345678ABCDEF01ULL});
EXPECT_EQ(rotl(uint8_t{0x12}, 4), uint8_t{0x21});
EXPECT_EQ(rotl(uint16_t{0x1234}, 4), uint16_t{0x2341});
EXPECT_EQ(rotl(uint32_t{0x12345678UL}, 4), uint32_t{0x23456781UL});
EXPECT_EQ(rotl(uint64_t{0x12345678ABCDEF01ULL}, 4),
uint64_t{0x2345678ABCDEF011ULL});
EXPECT_EQ(rotl(uint8_t{0x12}, -4), uint8_t{0x21});
EXPECT_EQ(rotl(uint16_t{0x1234}, -4), uint16_t{0x4123});
EXPECT_EQ(rotl(uint32_t{0x12345678UL}, -4), uint32_t{0x81234567UL});
EXPECT_EQ(rotl(uint64_t{0x12345678ABCDEF01ULL}, -4),
uint64_t{0x112345678ABCDEF0ULL});
}
TEST(Rotate, Right) {
static_assert(rotr(uint8_t{0x12}, 0) == uint8_t{0x12}, "");
static_assert(rotr(uint16_t{0x1234}, 0) == uint16_t{0x1234}, "");
static_assert(rotr(uint32_t{0x12345678UL}, 0) == uint32_t{0x12345678UL}, "");
static_assert(rotr(uint64_t{0x12345678ABCDEF01ULL}, 0) ==
uint64_t{0x12345678ABCDEF01ULL},
"");
EXPECT_EQ(rotr(uint8_t{0x12}, 0), uint8_t{0x12});
EXPECT_EQ(rotr(uint16_t{0x1234}, 0), uint16_t{0x1234});
EXPECT_EQ(rotr(uint32_t{0x12345678UL}, 0), uint32_t{0x12345678UL});
EXPECT_EQ(rotr(uint64_t{0x12345678ABCDEF01ULL}, 0),
uint64_t{0x12345678ABCDEF01ULL});
EXPECT_EQ(rotr(uint8_t{0x12}, 8), uint8_t{0x12});
EXPECT_EQ(rotr(uint16_t{0x1234}, 16), uint16_t{0x1234});
EXPECT_EQ(rotr(uint32_t{0x12345678UL}, 32), uint32_t{0x12345678UL});
EXPECT_EQ(rotr(uint64_t{0x12345678ABCDEF01ULL}, 64),
uint64_t{0x12345678ABCDEF01ULL});
EXPECT_EQ(rotr(uint8_t{0x12}, -8), uint8_t{0x12});
EXPECT_EQ(rotr(uint16_t{0x1234}, -16), uint16_t{0x1234});
EXPECT_EQ(rotr(uint32_t{0x12345678UL}, -32), uint32_t{0x12345678UL});
EXPECT_EQ(rotr(uint64_t{0x12345678ABCDEF01ULL}, -64),
uint64_t{0x12345678ABCDEF01ULL});
EXPECT_EQ(rotr(uint8_t{0x12}, 4), uint8_t{0x21});
EXPECT_EQ(rotr(uint16_t{0x1234}, 4), uint16_t{0x4123});
EXPECT_EQ(rotr(uint32_t{0x12345678UL}, 4), uint32_t{0x81234567UL});
EXPECT_EQ(rotr(uint64_t{0x12345678ABCDEF01ULL}, 4),
uint64_t{0x112345678ABCDEF0ULL});
EXPECT_EQ(rotr(uint8_t{0x12}, -4), uint8_t{0x21});
EXPECT_EQ(rotr(uint16_t{0x1234}, -4), uint16_t{0x2341});
EXPECT_EQ(rotr(uint32_t{0x12345678UL}, -4), uint32_t{0x23456781UL});
EXPECT_EQ(rotr(uint64_t{0x12345678ABCDEF01ULL}, -4),
uint64_t{0x2345678ABCDEF011ULL});
}
TEST(Rotate, Symmetry) {
absl::BitGen rng;
constexpr int kTrials = 100;
for (int i = 0; i < kTrials; ++i) {
uint8_t value = absl::Uniform(rng, std::numeric_limits<uint8_t>::min(),
std::numeric_limits<uint8_t>::max());
int shift = absl::Uniform(rng, -2 * std::numeric_limits<uint8_t>::digits,
2 * std::numeric_limits<uint8_t>::digits);
EXPECT_EQ(rotl(value, shift), rotr(value, -shift));
}
for (int i = 0; i < kTrials; ++i) {
uint16_t value = absl::Uniform(rng, std::numeric_limits<uint16_t>::min(),
std::numeric_limits<uint16_t>::max());
int shift = absl::Uniform(rng, -2 * std::numeric_limits<uint16_t>::digits,
2 * std::numeric_limits<uint16_t>::digits);
EXPECT_EQ(rotl(value, shift), rotr(value, -shift));
}
for (int i = 0; i < kTrials; ++i) {
uint32_t value = absl::Uniform(rng, std::numeric_limits<uint32_t>::min(),
std::numeric_limits<uint32_t>::max());
int shift = absl::Uniform(rng, -2 * std::numeric_limits<uint32_t>::digits,
2 * std::numeric_limits<uint32_t>::digits);
EXPECT_EQ(rotl(value, shift), rotr(value, -shift));
}
for (int i = 0; i < kTrials; ++i) {
uint64_t value = absl::Uniform(rng, std::numeric_limits<uint64_t>::min(),
std::numeric_limits<uint64_t>::max());
int shift = absl::Uniform(rng, -2 * std::numeric_limits<uint64_t>::digits,
2 * std::numeric_limits<uint64_t>::digits);
EXPECT_EQ(rotl(value, shift), rotr(value, -shift));
}
}
TEST(Counting, LeadingZeroes) {
#if ABSL_INTERNAL_HAS_CONSTEXPR_CLZ
static_assert(countl_zero(uint8_t{}) == 8, "");
static_assert(countl_zero(static_cast<uint8_t>(-1)) == 0, "");
static_assert(countl_zero(uint16_t{}) == 16, "");
static_assert(countl_zero(static_cast<uint16_t>(-1)) == 0, "");
static_assert(countl_zero(uint32_t{}) == 32, "");
static_assert(countl_zero(~uint32_t{}) == 0, "");
static_assert(countl_zero(uint64_t{}) == 64, "");
static_assert(countl_zero(~uint64_t{}) == 0, "");
#endif
EXPECT_EQ(countl_zero(uint8_t{}), 8);
EXPECT_EQ(countl_zero(static_cast<uint8_t>(-1)), 0);
EXPECT_EQ(countl_zero(uint16_t{}), 16);
EXPECT_EQ(countl_zero(static_cast<uint16_t>(-1)), 0);
EXPECT_EQ(countl_zero(uint32_t{}), 32);
EXPECT_EQ(countl_zero(~uint32_t{}), 0);
EXPECT_EQ(countl_zero(uint64_t{}), 64);
EXPECT_EQ(countl_zero(~uint64_t{}), 0);
for (int i = 0; i < 8; i++) {
EXPECT_EQ(countl_zero(static_cast<uint8_t>(1u << i)), 7 - i);
}
for (int i = 0; i < 16; i++) {
EXPECT_EQ(countl_zero(static_cast<uint16_t>(1u << i)), 15 - i);
}
for (int i = 0; i < 32; i++) {
EXPECT_EQ(countl_zero(uint32_t{1} << i), 31 - i);
}
for (int i = 0; i < 64; i++) {
EXPECT_EQ(countl_zero(uint64_t{1} << i), 63 - i);
}
}
TEST(Counting, LeadingOnes) {
#if ABSL_INTERNAL_HAS_CONSTEXPR_CLZ
static_assert(countl_one(uint8_t{}) == 0, "");
static_assert(countl_one(static_cast<uint8_t>(-1)) == 8, "");
static_assert(countl_one(uint16_t{}) == 0, "");
static_assert(countl_one(static_cast<uint16_t>(-1)) == 16, "");
static_assert(countl_one(uint32_t{}) == 0, "");
static_assert(countl_one(~uint32_t{}) == 32, "");
static_assert(countl_one(uint64_t{}) == 0, "");
static_assert(countl_one(~uint64_t{}) == 64, "");
#endif
EXPECT_EQ(countl_one(uint8_t{}), 0);
EXPECT_EQ(countl_one(static_cast<uint8_t>(-1)), 8);
EXPECT_EQ(countl_one(uint16_t{}), 0);
EXPECT_EQ(countl_one(static_cast<uint16_t>(-1)), 16);
EXPECT_EQ(countl_one(uint32_t{}), 0);
EXPECT_EQ(countl_one(~uint32_t{}), 32);
EXPECT_EQ(countl_one(uint64_t{}), 0);
EXPECT_EQ(countl_one(~uint64_t{}), 64);
}
TEST(Counting, TrailingZeroes) {
#if ABSL_INTERNAL_HAS_CONSTEXPR_CTZ
static_assert(countr_zero(uint8_t{}) == 8, "");
static_assert(countr_zero(static_cast<uint8_t>(-1)) == 0, "");
static_assert(countr_zero(uint16_t{}) == 16, "");
static_assert(countr_zero(static_cast<uint16_t>(-1)) == 0, "");
static_assert(countr_zero(uint32_t{}) == 32, "");
static_assert(countr_zero(~uint32_t{}) == 0, "");
static_assert(countr_zero(uint64_t{}) == 64, "");
static_assert(countr_zero(~uint64_t{}) == 0, "");
#endif
EXPECT_EQ(countr_zero(uint8_t{}), 8);
EXPECT_EQ(countr_zero(static_cast<uint8_t>(-1)), 0);
EXPECT_EQ(countr_zero(uint16_t{}), 16);
EXPECT_EQ(countr_zero(static_cast<uint16_t>(-1)), 0);
EXPECT_EQ(countr_zero(uint32_t{}), 32);
EXPECT_EQ(countr_zero(~uint32_t{}), 0);
EXPECT_EQ(countr_zero(uint64_t{}), 64);
EXPECT_EQ(countr_zero(~uint64_t{}), 0);
}
TEST(Counting, TrailingOnes) {
#if ABSL_INTERNAL_HAS_CONSTEXPR_CTZ
static_assert(countr_one(uint8_t{}) == 0, "");
static_assert(countr_one(static_cast<uint8_t>(-1)) == 8, "");
static_assert(countr_one(uint16_t{}) == 0, "");
static_assert(countr_one(static_cast<uint16_t>(-1)) == 16, "");
static_assert(countr_one(uint32_t{}) == 0, "");
static_assert(countr_one(~uint32_t{}) == 32, "");
static_assert(countr_one(uint64_t{}) == 0, "");
static_assert(countr_one(~uint64_t{}) == 64, "");
#endif
EXPECT_EQ(countr_one(uint8_t{}), 0);
EXPECT_EQ(countr_one(static_cast<uint8_t>(-1)), 8);
EXPECT_EQ(countr_one(uint16_t{}), 0);
EXPECT_EQ(countr_one(static_cast<uint16_t>(-1)), 16);
EXPECT_EQ(countr_one(uint32_t{}), 0);
EXPECT_EQ(countr_one(~uint32_t{}), 32);
EXPECT_EQ(countr_one(uint64_t{}), 0);
EXPECT_EQ(countr_one(~uint64_t{}), 64);
}
TEST(Counting, Popcount) {
#if ABSL_INTERNAL_HAS_CONSTEXPR_POPCOUNT
static_assert(popcount(uint8_t{}) == 0, "");
static_assert(popcount(uint8_t{1}) == 1, "");
static_assert(popcount(static_cast<uint8_t>(-1)) == 8, "");
static_assert(popcount(uint16_t{}) == 0, "");
static_assert(popcount(uint16_t{1}) == 1, "");
static_assert(popcount(static_cast<uint16_t>(-1)) == 16, "");
static_assert(popcount(uint32_t{}) == 0, "");
static_assert(popcount(uint32_t{1}) == 1, "");
static_assert(popcount(~uint32_t{}) == 32, "");
static_assert(popcount(uint64_t{}) == 0, "");
static_assert(popcount(uint64_t{1}) == 1, "");
static_assert(popcount(~uint64_t{}) == 64, "");
#endif
EXPECT_EQ(popcount(uint8_t{}), 0);
EXPECT_EQ(popcount(uint8_t{1}), 1);
EXPECT_EQ(popcount(static_cast<uint8_t>(-1)), 8);
EXPECT_EQ(popcount(uint16_t{}), 0);
EXPECT_EQ(popcount(uint16_t{1}), 1);
EXPECT_EQ(popcount(static_cast<uint16_t>(-1)), 16);
EXPECT_EQ(popcount(uint32_t{}), 0);
EXPECT_EQ(popcount(uint32_t{1}), 1);
EXPECT_EQ(popcount(~uint32_t{}), 32);
EXPECT_EQ(popcount(uint64_t{}), 0);
EXPECT_EQ(popcount(uint64_t{1}), 1);
EXPECT_EQ(popcount(~uint64_t{}), 64);
for (int i = 0; i < 8; i++) {
EXPECT_EQ(popcount(static_cast<uint8_t>(uint8_t{1} << i)), 1);
EXPECT_EQ(popcount(static_cast<uint8_t>(static_cast<uint8_t>(-1) ^
(uint8_t{1} << i))),
7);
}
for (int i = 0; i < 16; i++) {
EXPECT_EQ(popcount(static_cast<uint16_t>(uint16_t{1} << i)), 1);
EXPECT_EQ(popcount(static_cast<uint16_t>(static_cast<uint16_t>(-1) ^
(uint16_t{1} << i))),
15);
}
for (int i = 0; i < 32; i++) {
EXPECT_EQ(popcount(uint32_t{1} << i), 1);
EXPECT_EQ(popcount(static_cast<uint32_t>(-1) ^ (uint32_t{1} << i)), 31);
}
for (int i = 0; i < 64; i++) {
EXPECT_EQ(popcount(uint64_t{1} << i), 1);
EXPECT_EQ(popcount(static_cast<uint64_t>(-1) ^ (uint64_t{1} << i)), 63);
}
}
template <typename T>
struct PopcountInput {
T value = 0;
int expected = 0;
};
template <typename T>
PopcountInput<T> GeneratePopcountInput(absl::BitGen& gen) {
PopcountInput<T> ret;
for (int i = 0; i < std::numeric_limits<T>::digits; i++) {
bool coin = absl::Bernoulli(gen, 0.2);
if (coin) {
ret.value |= T{1} << i;
ret.expected++;
}
}
return ret;
}
TEST(Counting, PopcountFuzz) {
absl::BitGen rng;
constexpr int kTrials = 100;
for (int i = 0; i < kTrials; ++i) {
auto input = GeneratePopcountInput<uint8_t>(rng);
EXPECT_EQ(popcount(input.value), input.expected);
}
for (int i = 0; i < kTrials; ++i) {
auto input = GeneratePopcountInput<uint16_t>(rng);
EXPECT_EQ(popcount(input.value), input.expected);
}
for (int i = 0; i < kTrials; ++i) {
auto input = GeneratePopcountInput<uint32_t>(rng);
EXPECT_EQ(popcount(input.value), input.expected);
}
for (int i = 0; i < kTrials; ++i) {
auto input = GeneratePopcountInput<uint64_t>(rng);
EXPECT_EQ(popcount(input.value), input.expected);
}
}
TEST(IntegralPowersOfTwo, SingleBit) {
EXPECT_FALSE(has_single_bit(uint8_t{}));
EXPECT_FALSE(has_single_bit(static_cast<uint8_t>(-1)));
EXPECT_FALSE(has_single_bit(uint16_t{}));
EXPECT_FALSE(has_single_bit(static_cast<uint16_t>(-1)));
EXPECT_FALSE(has_single_bit(uint32_t{}));
EXPECT_FALSE(has_single_bit(~uint32_t{}));
EXPECT_FALSE(has_single_bit(uint64_t{}));
EXPECT_FALSE(has_single_bit(~uint64_t{}));
static_assert(!has_single_bit(0u), "");
static_assert(has_single_bit(1u), "");
static_assert(has_single_bit(2u), "");
static_assert(!has_single_bit(3u), "");
static_assert(has_single_bit(4u), "");
static_assert(!has_single_bit(1337u), "");
static_assert(has_single_bit(65536u), "");
static_assert(has_single_bit(uint32_t{1} << 30), "");
static_assert(has_single_bit(uint64_t{1} << 42), "");
EXPECT_FALSE(has_single_bit(0u));
EXPECT_TRUE(has_single_bit(1u));
EXPECT_TRUE(has_single_bit(2u));
EXPECT_FALSE(has_single_bit(3u));
EXPECT_TRUE(has_single_bit(4u));
EXPECT_FALSE(has_single_bit(1337u));
EXPECT_TRUE(has_single_bit(65536u));
EXPECT_TRUE(has_single_bit(uint32_t{1} << 30));
EXPECT_TRUE(has_single_bit(uint64_t{1} << 42));
EXPECT_TRUE(has_single_bit(
static_cast<uint8_t>(std::numeric_limits<uint8_t>::max() / 2 + 1)));
EXPECT_TRUE(has_single_bit(
static_cast<uint16_t>(std::numeric_limits<uint16_t>::max() / 2 + 1)));
EXPECT_TRUE(has_single_bit(
static_cast<uint32_t>(std::numeric_limits<uint32_t>::max() / 2 + 1)));
EXPECT_TRUE(has_single_bit(
static_cast<uint64_t>(std::numeric_limits<uint64_t>::max() / 2 + 1)));
}
template <typename T, T arg, T = bit_ceil(arg)>
bool IsBitCeilConstantExpression(int) {
return true;
}
template <typename T, T arg>
bool IsBitCeilConstantExpression(char) {
return false;
}
TEST(IntegralPowersOfTwo, Ceiling) {
#if ABSL_INTERNAL_HAS_CONSTEXPR_CLZ
static_assert(bit_ceil(0u) == 1, "");
static_assert(bit_ceil(1u) == 1, "");
static_assert(bit_ceil(2u) == 2, "");
static_assert(bit_ceil(3u) == 4, "");
static_assert(bit_ceil(4u) == 4, "");
static_assert(bit_ceil(1337u) == 2048, "");
static_assert(bit_ceil(65536u) == 65536, "");
static_assert(bit_ceil(65536u - 1337u) == 65536, "");
static_assert(bit_ceil(uint32_t{0x80000000}) == uint32_t{0x80000000}, "");
static_assert(bit_ceil(uint64_t{0x40000000000}) == uint64_t{0x40000000000},
"");
static_assert(
bit_ceil(uint64_t{0x8000000000000000}) == uint64_t{0x8000000000000000},
"");
EXPECT_TRUE((IsBitCeilConstantExpression<uint8_t, uint8_t{0x0}>(0)));
EXPECT_TRUE((IsBitCeilConstantExpression<uint8_t, uint8_t{0x80}>(0)));
EXPECT_FALSE((IsBitCeilConstantExpression<uint8_t, uint8_t{0x81}>(0)));
EXPECT_FALSE((IsBitCeilConstantExpression<uint8_t, uint8_t{0xff}>(0)));
EXPECT_TRUE((IsBitCeilConstantExpression<uint16_t, uint16_t{0x0}>(0)));
EXPECT_TRUE((IsBitCeilConstantExpression<uint16_t, uint16_t{0x8000}>(0)));
EXPECT_FALSE((IsBitCeilConstantExpression<uint16_t, uint16_t{0x8001}>(0)));
EXPECT_FALSE((IsBitCeilConstantExpression<uint16_t, uint16_t{0xffff}>(0)));
EXPECT_TRUE((IsBitCeilConstantExpression<uint32_t, uint32_t{0x0}>(0)));
EXPECT_TRUE((IsBitCeilConstantExpression<uint32_t, uint32_t{0x80000000}>(0)));
EXPECT_FALSE(
(IsBitCeilConstantExpression<uint32_t, uint32_t{0x80000001}>(0)));
EXPECT_FALSE(
(IsBitCeilConstantExpression<uint32_t, uint32_t{0xffffffff}>(0)));
EXPECT_TRUE((IsBitCeilConstantExpression<uint64_t, uint64_t{0x0}>(0)));
EXPECT_TRUE(
(IsBitCeilConstantExpression<uint64_t, uint64_t{0x8000000000000000}>(0)));
EXPECT_FALSE(
(IsBitCeilConstantExpression<uint64_t, uint64_t{0x8000000000000001}>(0)));
EXPECT_FALSE(
(IsBitCeilConstantExpression<uint64_t, uint64_t{0xffffffffffffffff}>(0)));
#endif
EXPECT_EQ(bit_ceil(0u), 1);
EXPECT_EQ(bit_ceil(1u), 1);
EXPECT_EQ(bit_ceil(2u), 2);
EXPECT_EQ(bit_ceil(3u), 4);
EXPECT_EQ(bit_ceil(4u), 4);
EXPECT_EQ(bit_ceil(1337u), 2048);
EXPECT_EQ(bit_ceil(65536u), 65536);
EXPECT_EQ(bit_ceil(65536u - 1337u), 65536);
EXPECT_EQ(bit_ceil(uint64_t{0x40000000000}), uint64_t{0x40000000000});
}
TEST(IntegralPowersOfTwo, Floor) {
#if ABSL_INTERNAL_HAS_CONSTEXPR_CLZ
static_assert(bit_floor(0u) == 0, "");
static_assert(bit_floor(1u) == 1, "");
static_assert(bit_floor(2u) == 2, "");
static_assert(bit_floor(3u) == 2, "");
static_assert(bit_floor(4u) == 4, "");
static_assert(bit_floor(1337u) == 1024, "");
static_assert(bit_floor(65536u) == 65536, "");
static_assert(bit_floor(65536u - 1337u) == 32768, "");
static_assert(bit_floor(uint64_t{0x40000000000}) == uint64_t{0x40000000000},
"");
#endif
EXPECT_EQ(bit_floor(0u), 0);
EXPECT_EQ(bit_floor(1u), 1);
EXPECT_EQ(bit_floor(2u), 2);
EXPECT_EQ(bit_floor(3u), 2);
EXPECT_EQ(bit_floor(4u), 4);
EXPECT_EQ(bit_floor(1337u), 1024);
EXPECT_EQ(bit_floor(65536u), 65536);
EXPECT_EQ(bit_floor(65536u - 1337u), 32768);
EXPECT_EQ(bit_floor(uint64_t{0x40000000000}), uint64_t{0x40000000000});
for (int i = 0; i < 8; i++) {
uint8_t input = uint8_t{1} << i;
EXPECT_EQ(bit_floor(input), input);
if (i > 0) {
EXPECT_EQ(bit_floor(static_cast<uint8_t>(input + 1)), input);
}
}
for (int i = 0; i < 16; i++) {
uint16_t input = uint16_t{1} << i;
EXPECT_EQ(bit_floor(input), input);
if (i > 0) {
EXPECT_EQ(bit_floor(static_cast<uint16_t>(input + 1)), input);
}
}
for (int i = 0; i < 32; i++) {
uint32_t input = uint32_t{1} << i;
EXPECT_EQ(bit_floor(input), input);
if (i > 0) {
EXPECT_EQ(bit_floor(input + 1), input);
}
}
for (int i = 0; i < 64; i++) {
uint64_t input = uint64_t{1} << i;
EXPECT_EQ(bit_floor(input), input);
if (i > 0) {
EXPECT_EQ(bit_floor(input + 1), input);
}
}
}
TEST(IntegralPowersOfTwo, Width) {
#if ABSL_INTERNAL_HAS_CONSTEXPR_CLZ
static_assert(bit_width(uint8_t{}) == 0, "");
static_assert(bit_width(uint8_t{1}) == 1, "");
static_assert(bit_width(uint8_t{3}) == 2, "");
static_assert(bit_width(static_cast<uint8_t>(-1)) == 8, "");
static_assert(bit_width(uint16_t{}) == 0, "");
static_assert(bit_width(uint16_t{1}) == 1, "");
static_assert(bit_width(uint16_t{3}) == 2, "");
static_assert(bit_width(static_cast<uint16_t>(-1)) == 16, "");
static_assert(bit_width(uint32_t{}) == 0, "");
static_assert(bit_width(uint32_t{1}) == 1, "");
static_assert(bit_width(uint32_t{3}) == 2, "");
static_assert(bit_width(~uint32_t{}) == 32, "");
static_assert(bit_width(uint64_t{}) == 0, "");
static_assert(bit_width(uint64_t{1}) == 1, "");
static_assert(bit_width(uint64_t{3}) == 2, "");
static_assert(bit_width(~uint64_t{}) == 64, "");
#endif
EXPECT_EQ(bit_width(uint8_t{}), 0);
EXPECT_EQ(bit_width(uint8_t{1}), 1);
EXPECT_EQ(bit_width(uint8_t{3}), 2);
EXPECT_EQ(bit_width(static_cast<uint8_t>(-1)), 8);
EXPECT_EQ(bit_width(uint16_t{}), 0);
EXPECT_EQ(bit_width(uint16_t{1}), 1);
EXPECT_EQ(bit_width(uint16_t{3}), 2);
EXPECT_EQ(bit_width(static_cast<uint16_t>(-1)), 16);
EXPECT_EQ(bit_width(uint32_t{}), 0);
EXPECT_EQ(bit_width(uint32_t{1}), 1);
EXPECT_EQ(bit_width(uint32_t{3}), 2);
EXPECT_EQ(bit_width(~uint32_t{}), 32);
EXPECT_EQ(bit_width(uint64_t{}), 0);
EXPECT_EQ(bit_width(uint64_t{1}), 1);
EXPECT_EQ(bit_width(uint64_t{3}), 2);
EXPECT_EQ(bit_width(~uint64_t{}), 64);
for (int i = 0; i < 8; i++) {
EXPECT_EQ(bit_width(static_cast<uint8_t>(uint8_t{1} << i)), i + 1);
}
for (int i = 0; i < 16; i++) {
EXPECT_EQ(bit_width(static_cast<uint16_t>(uint16_t{1} << i)), i + 1);
}
for (int i = 0; i < 32; i++) {
EXPECT_EQ(bit_width(uint32_t{1} << i), i + 1);
}
for (int i = 0; i < 64; i++) {
EXPECT_EQ(bit_width(uint64_t{1} << i), i + 1);
}
}
#if defined(__GNUC__)
static_assert(ABSL_INTERNAL_HAS_CONSTEXPR_POPCOUNT,
"popcount should be constexpr");
static_assert(ABSL_INTERNAL_HAS_CONSTEXPR_CLZ, "clz should be constexpr");
static_assert(ABSL_INTERNAL_HAS_CONSTEXPR_CTZ, "ctz should be constexpr");
#endif
}
ABSL_NAMESPACE_END
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/numeric/internal/bits.h | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/numeric/bits_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
4142408e-4266-428e-ac32-9ee5cf8ea20c | cpp | tensorflow/tensorflow | arg_min_max | tensorflow/lite/kernels/arg_min_max.cc | tensorflow/lite/delegates/hexagon/builders/tests/arg_min_max_test.cc | #include "tensorflow/lite/kernels/internal/reference/arg_min_max.h"
#include <stdint.h>
#include <functional>
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/quantization_util.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace arg_min_max {
constexpr int kInputTensor = 0;
constexpr int kAxis = 1;
constexpr int kOutputTensor = 0;
TfLiteStatus ResizeOutput(TfLiteContext* context, const TfLiteTensor* input,
const TfLiteTensor* axis, TfLiteTensor* output) {
int axis_value;
if (axis->type == kTfLiteInt64) {
axis_value = static_cast<int>(*GetTensorData<int64_t>(axis));
} else {
axis_value = *GetTensorData<int>(axis);
}
if (axis_value < 0) {
axis_value += NumDimensions(input);
}
TF_LITE_ENSURE(context, axis_value >= 0);
TF_LITE_ENSURE(context, axis_value < NumDimensions(input));
TfLiteIntArray* output_dims = TfLiteIntArrayCreate(NumDimensions(input) - 1);
int j = 0;
for (int i = 0; i < NumDimensions(input); ++i) {
if (i != axis_value) {
output_dims->data[j] = SizeOfDimension(input, i);
++j;
}
}
return context->ResizeTensor(context, output, output_dims);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
const TfLiteTensor* axis;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kAxis, &axis));
TF_LITE_ENSURE_EQ(context, NumElements(axis), 1);
TF_LITE_ENSURE(context,
axis->type == kTfLiteInt32 || axis->type == kTfLiteInt64);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
auto* params = reinterpret_cast<TfLiteArgMaxParams*>(node->builtin_data);
switch (params->output_type) {
case kTfLiteInt32:
output->type = kTfLiteInt32;
break;
case kTfLiteInt64:
output->type = kTfLiteInt64;
break;
default:
TF_LITE_KERNEL_LOG(context, "Unknown index output data type: %d",
params->output_type);
return kTfLiteError;
}
switch (input->type) {
case kTfLiteFloat32:
case kTfLiteUInt8:
case kTfLiteInt8:
case kTfLiteInt32:
case kTfLiteBool:
break;
default:
TF_LITE_KERNEL_LOG(context,
"Unknown input type: %d, only float32, int types "
"and bool are supported",
input->type);
return kTfLiteError;
}
TF_LITE_ENSURE(context, NumDimensions(input) >= 1);
if (IsConstantOrPersistentTensor(axis)) {
TF_LITE_ENSURE_STATUS(ResizeOutput(context, input, axis, output));
} else {
SetTensorToDynamic(output);
}
return kTfLiteOk;
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node, bool is_arg_max) {
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
const TfLiteTensor* axis;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kAxis, &axis));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
if (IsDynamicTensor(output)) {
TF_LITE_ENSURE_STATUS(ResizeOutput(context, input, axis, output));
}
#define TF_LITE_ARG_MIN_MAX(data_type, axis_type, output_type) \
optimized_ops::ArgMinMax( \
GetTensorShape(input), GetTensorData<data_type>(input), \
GetTensorData<axis_type>(axis), GetTensorShape(output), \
GetTensorData<output_type>(output), is_arg_max)
if (axis->type == kTfLiteInt32) {
switch (output->type) {
case kTfLiteInt32: {
switch (input->type) {
case kTfLiteFloat32:
TF_LITE_ARG_MIN_MAX(float, int32_t, int32_t);
break;
case kTfLiteUInt8:
TF_LITE_ARG_MIN_MAX(uint8_t, int32_t, int32_t);
break;
case kTfLiteInt8:
TF_LITE_ARG_MIN_MAX(int8_t, int32_t, int32_t);
break;
case kTfLiteInt32:
TF_LITE_ARG_MIN_MAX(int32_t, int32_t, int32_t);
break;
case kTfLiteBool:
TF_LITE_ARG_MIN_MAX(bool, int32_t, int32_t);
break;
default:
TF_LITE_KERNEL_LOG(context,
"Only float32, uint8, int8, int32 and bool are "
"supported currently, got %s.",
TfLiteTypeGetName(input->type));
return kTfLiteError;
}
} break;
case kTfLiteInt64: {
switch (input->type) {
case kTfLiteFloat32:
TF_LITE_ARG_MIN_MAX(float, int32_t, int64_t);
break;
case kTfLiteUInt8:
TF_LITE_ARG_MIN_MAX(uint8_t, int32_t, int64_t);
break;
case kTfLiteInt8:
TF_LITE_ARG_MIN_MAX(int8_t, int32_t, int64_t);
break;
case kTfLiteInt32:
TF_LITE_ARG_MIN_MAX(int32_t, int32_t, int64_t);
break;
case kTfLiteBool:
TF_LITE_ARG_MIN_MAX(bool, int32_t, int64_t);
break;
default:
TF_LITE_KERNEL_LOG(context,
"Only float32, uint8, int8, int32 and bool are "
"supported currently, got %s.",
TfLiteTypeGetName(input->type));
return kTfLiteError;
}
} break;
default:
TF_LITE_KERNEL_LOG(
context, "Only int32 and int64 are supported currently, got %s.",
TfLiteTypeGetName(output->type));
return kTfLiteError;
}
} else {
switch (output->type) {
case kTfLiteInt32: {
switch (input->type) {
case kTfLiteFloat32:
TF_LITE_ARG_MIN_MAX(float, int64_t, int32_t);
break;
case kTfLiteUInt8:
TF_LITE_ARG_MIN_MAX(uint8_t, int64_t, int32_t);
break;
case kTfLiteInt8:
TF_LITE_ARG_MIN_MAX(int8_t, int64_t, int32_t);
break;
case kTfLiteInt32:
TF_LITE_ARG_MIN_MAX(int32_t, int64_t, int32_t);
break;
case kTfLiteBool:
TF_LITE_ARG_MIN_MAX(bool, int64_t, int32_t);
break;
default:
TF_LITE_KERNEL_LOG(context,
"Only float32, uint8, int8, int32 and bool are "
"supported currently, got %s.",
TfLiteTypeGetName(input->type));
return kTfLiteError;
}
} break;
case kTfLiteInt64: {
switch (input->type) {
case kTfLiteFloat32:
TF_LITE_ARG_MIN_MAX(float, int64_t, int64_t);
break;
case kTfLiteUInt8:
TF_LITE_ARG_MIN_MAX(uint8_t, int64_t, int64_t);
break;
case kTfLiteInt8:
TF_LITE_ARG_MIN_MAX(int8_t, int64_t, int64_t);
break;
case kTfLiteInt32:
TF_LITE_ARG_MIN_MAX(int32_t, int64_t, int64_t);
break;
case kTfLiteBool:
TF_LITE_ARG_MIN_MAX(bool, int64_t, int64_t);
break;
default:
TF_LITE_KERNEL_LOG(context,
"Only float32, uint8, int8, int32 and bool are "
"supported currently, got %s.",
TfLiteTypeGetName(input->type));
return kTfLiteError;
}
} break;
default:
TF_LITE_KERNEL_LOG(
context, "Only int32 and int64 are supported currently, got %s.",
TfLiteTypeGetName(output->type));
return kTfLiteError;
}
}
#undef TF_LITE_ARG_MIN_MAX
return kTfLiteOk;
}
TfLiteStatus ArgMinEval(TfLiteContext* context, TfLiteNode* node) {
return Eval(context, node, false);
}
TfLiteStatus ArgMaxEval(TfLiteContext* context, TfLiteNode* node) {
return Eval(context, node, true);
}
}
TfLiteRegistration* Register_ARG_MAX() {
static TfLiteRegistration r = {nullptr, nullptr, arg_min_max::Prepare,
arg_min_max::ArgMaxEval};
return &r;
}
TfLiteRegistration* Register_ARG_MIN() {
static TfLiteRegistration r = {nullptr, nullptr, arg_min_max::Prepare,
arg_min_max::ArgMinEval};
return &r;
}
}
}
} | #include <initializer_list>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/hexagon/builders/tests/hexagon_delegate_op_model.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
using testing::ElementsAreArray;
class ArgBaseOpModel : public SingleOpModelWithHexagon {
public:
explicit ArgBaseOpModel(TensorType input_type) {
input_ = AddInput(input_type);
output_ = AddOutput(TensorType_INT32);
}
int input() const { return input_; }
std::vector<int> GetInt32Output() const {
return ExtractVector<int>(output_);
}
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
protected:
using SingleOpModelWithHexagon::builder_;
int input_;
int output_;
};
class ArgMinOpModel : public ArgBaseOpModel {
public:
ArgMinOpModel(std::initializer_list<int> input_shape, TensorType input_type)
: ArgBaseOpModel(input_type ), input_shape_(input_shape) {}
void Build() {
SetBuiltinOp(BuiltinOperator_ARG_MIN, BuiltinOptions_ArgMinOptions,
CreateArgMinOptions(builder_, TensorType_INT32 )
.Union());
BuildInterpreter({input_shape_, {1}});
}
private:
std::vector<int> input_shape_;
};
class ArgMaxOpModel : public ArgBaseOpModel {
public:
ArgMaxOpModel(std::initializer_list<int> input_shape, TensorType input_type)
: ArgBaseOpModel(input_type ), input_shape_(input_shape) {}
void Build() {
SetBuiltinOp(BuiltinOperator_ARG_MAX, BuiltinOptions_ArgMaxOptions,
CreateArgMaxOptions(builder_, TensorType_INT32 )
.Union());
BuildInterpreter({input_shape_, {1}});
}
private:
std::vector<int> input_shape_;
};
template <typename integer_type, TensorType tensor_dtype>
void ArgMinTestImpl() {
ArgMinOpModel model({1, 1, 1, 4}, tensor_dtype);
model.AddConstInput(TensorType_INT32, {3}, {1});
model.Build();
if (tensor_dtype == TensorType_UINT8) {
model.SymmetricQuantizeAndPopulate(model.input(), {1, 5, 0, 7});
} else {
model.SignedSymmetricQuantizeAndPopulate(model.input(), {1, 5, 0, 7});
}
model.ApplyDelegateAndInvoke();
EXPECT_THAT(model.GetInt32Output(), ElementsAreArray({2}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 1, 1}));
}
template <typename integer_type, TensorType tensor_dtype>
void ArgMinNegativeTestImpl() {
ArgMinOpModel model({1, 1, 2, 4}, tensor_dtype);
model.AddConstInput(TensorType_INT32, {-2}, {1});
model.Build();
if (tensor_dtype == TensorType_UINT8) {
model.SymmetricQuantizeAndPopulate(model.input(), {1, 2, 7, 8, 1, 9, 7, 3});
} else {
model.SignedSymmetricQuantizeAndPopulate(model.input(),
{1, 2, 7, 8, 1, 9, 7, 3});
}
model.ApplyDelegateAndInvoke();
EXPECT_THAT(model.GetInt32Output(), ElementsAreArray({0, 0, 0, 1}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 1, 4}));
}
template <typename integer_type, TensorType tensor_dtype>
void ArgMaxTestImpl() {
ArgMaxOpModel model({1, 1, 1, 4}, tensor_dtype);
model.AddConstInput(TensorType_INT32, {3}, {1});
model.Build();
if (tensor_dtype == TensorType_UINT8) {
model.SymmetricQuantizeAndPopulate(model.input(), {1, 5, 0, 7});
} else {
model.SignedSymmetricQuantizeAndPopulate(model.input(), {1, 5, 0, 7});
}
model.ApplyDelegateAndInvoke();
EXPECT_THAT(model.GetInt32Output(), ElementsAreArray({3}));
}
TEST(ArgMinTest, GetArgMin_UInt8) {
ArgMinTestImpl<uint8_t, TensorType_UINT8>();
}
TEST(ArgMinTest, GetArgMin_Int8) { ArgMinTestImpl<int8_t, TensorType_INT8>(); }
TEST(ArgMinTest, GetArgMinNegative_UInt8) {
ArgMinNegativeTestImpl<uint8_t, TensorType_UINT8>();
}
TEST(ArgMinTest, GetArgMinNegative_Int8) {
ArgMinNegativeTestImpl<int8_t, TensorType_INT8>();
}
TEST(ArgMaxTest, GetArgMax_UInt8) {
ArgMaxTestImpl<uint8_t, TensorType_UINT8>();
}
TEST(ArgMaxTest, GetArgMax_Int8) { ArgMaxTestImpl<int8_t, TensorType_INT8>(); }
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/arg_min_max.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/hexagon/builders/tests/arg_min_max_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
856f08f5-e960-4ae4-90cc-7b4516bd8de3 | cpp | tensorflow/tensorflow | fingerprint_op | tensorflow/core/kernels/fingerprint_op.cc | tensorflow/core/kernels/fingerprint_op_test.cc | #include <cstddef>
#include <string>
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/platform/byte_order.h"
#include "tensorflow/core/platform/fingerprint.h"
namespace tensorflow {
namespace {
template <typename T>
inline void CopyToBuffer(const T& value, uint8* output) {
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
static_assert(port::kLittleEndian, "");
std::memcpy(output, &value, sizeof(value));
#else
static_assert(!port::kLittleEndian, "");
std::reverse_copy(reinterpret_cast<const uint8*>(&value),
reinterpret_cast<const uint8*>(&value + 1), output);
#endif
}
void FarmhashFingerprint64(TTypes<uint8, 2>::ConstTensor input,
TTypes<uint8, 2>::Matrix output) {
DCHECK_EQ(output.dimension(0), input.dimension(0));
DCHECK_EQ(output.dimension(1), sizeof(uint64));
for (int64_t i = 0; i < output.dimension(0); ++i) {
const uint64 fingerprint =
Fingerprint64({reinterpret_cast<const char*>(&input(i, 0)),
static_cast<std::size_t>(input.dimension(1))});
CopyToBuffer(fingerprint, &output(i, 0));
}
}
void FarmhashFingerprint64(TTypes<tstring>::ConstFlat input,
TTypes<uint8, 2>::Matrix output) {
DCHECK_EQ(output.dimension(0), input.dimension(0));
DCHECK_EQ(output.dimension(1), sizeof(uint64));
for (int64_t i = 0; i < input.dimension(0); ++i) {
const uint64 fingerprint =
Fingerprint64({input(i).data(), input(i).size()});
CopyToBuffer(fingerprint, &output(i, 0));
}
}
class FingerprintOp : public OpKernel {
public:
explicit FingerprintOp(OpKernelConstruction* context) : OpKernel(context) {
DataType dtype;
OP_REQUIRES_OK(context, context->GetAttr("T", &dtype));
OP_REQUIRES(context, DataTypeCanUseMemcpy(dtype) || dtype == DT_STRING,
errors::InvalidArgument("Data type not supported: ",
DataTypeString(dtype)));
}
void Compute(tensorflow::OpKernelContext* context) override {
const Tensor& method_tensor = context->input(1);
OP_REQUIRES(context, TensorShapeUtils::IsScalar(method_tensor.shape()),
errors::InvalidArgument("`method` should be a scalar string: ",
method_tensor.shape()));
const tstring& method = method_tensor.scalar<tstring>()();
OP_REQUIRES(
context, method == "farmhash64",
errors::InvalidArgument("Unsupported fingerprint method: ", method));
const Tensor& input = context->input(0);
OP_REQUIRES(
context, TensorShapeUtils::IsVectorOrHigher(input.shape()),
errors::InvalidArgument("`data` should have at least one dimension: ",
input.shape()));
const int64_t dim0 = input.shape().dim_size(0);
int64_t dim1;
if (dim0 == 0) {
dim1 = 0;
} else {
dim1 = input.shape().num_elements() / dim0;
}
Tensor* output;
OP_REQUIRES_OK(context,
context->allocate_output(
0, TensorShape{dim0, kFingerprintSize}, &output));
if (input.dtype() == DT_STRING) {
if (dim1 > 1) {
Tensor temp;
OP_REQUIRES_OK(context, context->allocate_temp(
DT_UINT8,
TensorShape{input.shape().num_elements(),
kFingerprintSize},
&temp));
FarmhashFingerprint64(input.flat<tstring>(), temp.tensor<uint8, 2>());
FarmhashFingerprint64(static_cast<const Tensor&>(temp).shaped<uint8, 2>(
{dim0, dim1 * kFingerprintSize}),
output->matrix<uint8>());
} else {
FarmhashFingerprint64(input.flat<tstring>(), output->matrix<uint8>());
}
} else {
auto data = input.bit_casted_shaped<uint8, 2>(
{dim0, dim1 * DataTypeSize(input.dtype())});
FarmhashFingerprint64(data, output->matrix<uint8>());
}
}
private:
static constexpr int kFingerprintSize = sizeof(uint64);
};
REGISTER_KERNEL_BUILDER(Name("Fingerprint").Device(tensorflow::DEVICE_CPU),
FingerprintOp);
}
} | #include <memory>
#include <numeric>
#include <vector>
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/shape_inference_testutil.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
Status MakeNodeDef(DataType dtype, NodeDef* node_def) {
return NodeDefBuilder("fingerprint", "Fingerprint")
.Input(FakeInput(dtype))
.Input(FakeInput(DT_STRING))
.Finalize(node_def);
}
class FingerprintOpTest : public OpsTestBase {
protected:
Status MakeFingerprintOp(Tensor* tensor) {
return MakeFingerprintOp(tensor, "farmhash64");
}
Status MakeFingerprintOp(Tensor* data, const string& method) {
TF_RETURN_IF_ERROR(MakeNodeDef(data->dtype(), node_def()));
TF_RETURN_IF_ERROR(InitOp());
inputs_.clear();
inputs_.push_back(TensorValue(data));
method_ = Tensor(DT_STRING, TensorShape{});
method_.scalar<tstring>()() = method;
inputs_.push_back(TensorValue(&method_));
return absl::OkStatus();
}
Tensor batch_dims_;
Tensor method_;
};
TEST_F(FingerprintOpTest, Empty) {
Tensor tensor(DT_UINT8, {0});
TF_ASSERT_OK(MakeFingerprintOp(&tensor));
TF_ASSERT_OK(RunOpKernel());
EXPECT_EQ(GetOutput(0)->shape(), (TensorShape{0, 8}));
EXPECT_EQ(GetOutput(0)->tensor_data(), "");
}
TEST_F(FingerprintOpTest, GoldenValue) {
Tensor tensor(DT_UINT8, {1, 3, 4, 5, 6, 7});
auto buffer = tensor.flat<uint8>();
std::iota(buffer.data(), buffer.data() + buffer.size(),
static_cast<uint8>(47));
TF_ASSERT_OK(MakeFingerprintOp(&tensor));
TF_ASSERT_OK(RunOpKernel());
EXPECT_EQ(GetOutput(0)->shape(), (TensorShape{1, 8}));
EXPECT_EQ(GetOutput(0)->tensor_data(), "\x2d\x90\xdf\x03\x79\x36\x3c\x43");
}
TEST_F(FingerprintOpTest, StringGoldenValue) {
Tensor data(DT_STRING, {1, 2, 2});
auto buffer = data.flat<tstring>();
buffer(0).resize(10);
buffer(1).resize(7);
buffer(2).resize(0);
buffer(3).resize(19);
std::iota(&buffer(0)[0], &buffer(0)[0] + buffer(0).size(), 0);
std::iota(&buffer(1)[0], &buffer(1)[0] + buffer(1).size(), 7);
std::iota(&buffer(2)[0], &buffer(2)[0] + buffer(2).size(), 71);
std::iota(&buffer(3)[0], &buffer(3)[0] + buffer(3).size(), 41);
TF_ASSERT_OK(MakeFingerprintOp(&data));
TF_ASSERT_OK(RunOpKernel());
ASSERT_EQ(GetOutput(0)->shape(), (TensorShape{1, 8}));
EXPECT_EQ(GetOutput(0)->tensor_data(), "\x92\x43\x28\x52\xa3\x7c\x48\x18");
ASSERT_TRUE(data.CopyFrom(data, TensorShape{4}));
TF_ASSERT_OK(MakeFingerprintOp(&data));
TF_ASSERT_OK(RunOpKernel());
ASSERT_EQ(GetOutput(0)->shape(), (TensorShape{4, 8}));
EXPECT_EQ(GetOutput(0)->tensor_data(),
"\xea\xff\xd6\xb2\xb2\x4d\x70\x9b"
"\x6e\x9d\xed\x21\xc6\x4a\x61\x52"
"\x4f\x40\x90\x2f\x3b\x6a\xe1\x9a"
"\x0d\x9b\x7f\x63\x23\x14\x1c\xb8");
}
TEST_F(FingerprintOpTest, Collision) {
const TensorShape shape = {1, 2, 4, 6};
for (DataType dtype : kRealNumberTypes) {
const int64_t size = shape.num_elements() * DataTypeSize(dtype);
Tensor tensor(dtype, shape);
auto buffer = tensor.bit_casted_shaped<uint8, 1>({size});
buffer.setRandom();
TF_ASSERT_OK(MakeFingerprintOp(&tensor));
TF_ASSERT_OK(RunOpKernel());
const Tensor fingerprint0 = *GetOutput(0);
const int offset = buffer(0) % buffer.size();
buffer(offset) = ~buffer(offset);
TF_ASSERT_OK(MakeFingerprintOp(&tensor));
TF_ASSERT_OK(RunOpKernel());
const Tensor fingerprint1 = *GetOutput(0);
EXPECT_NE(fingerprint0.tensor_data(), fingerprint1.tensor_data());
}
}
TEST_F(FingerprintOpTest, CollisionString) {
constexpr int64_t size = 256;
Tensor tensor(DT_STRING, {1});
auto& input = tensor.vec<tstring>()(0);
input.resize(size);
TTypes<uint8>::UnalignedFlat buffer(reinterpret_cast<uint8*>(&input[0]),
input.size());
buffer.setRandom();
TF_ASSERT_OK(MakeFingerprintOp(&tensor));
TF_ASSERT_OK(RunOpKernel());
const Tensor fingerprint0 = *GetOutput(0);
const int offset = buffer(0) % buffer.size();
buffer(offset) = ~buffer(offset);
TF_ASSERT_OK(MakeFingerprintOp(&tensor));
TF_ASSERT_OK(RunOpKernel());
const Tensor fingerprint1 = *GetOutput(0);
EXPECT_NE(fingerprint0.tensor_data(), fingerprint1.tensor_data());
}
TEST_F(FingerprintOpTest, CompareBytesAndString) {
Tensor pods_tensor(DT_FLOAT, {4, 64});
Tensor strings_tensor(DT_STRING, {4});
auto pods = pods_tensor.matrix<float>();
pods.setRandom();
auto strings = strings_tensor.vec<tstring>();
for (int64_t i = 0; i < strings.size(); ++i) {
strings(i).assign(reinterpret_cast<const char*>(&pods(i, 0)),
pods.dimension(1) * sizeof(pods(i, 0)));
}
TF_ASSERT_OK(MakeFingerprintOp(&pods_tensor));
TF_ASSERT_OK(RunOpKernel());
Tensor pods_fingerprints = *GetOutput(0);
TF_ASSERT_OK(MakeFingerprintOp(&strings_tensor));
TF_ASSERT_OK(RunOpKernel());
Tensor strings_fingerprints = *GetOutput(0);
EXPECT_EQ(pods_fingerprints.tensor_data(),
strings_fingerprints.tensor_data());
}
TEST_F(FingerprintOpTest, SupportedMethods) {
Tensor tensor(DT_STRING, TensorShape{1});
TF_ASSERT_OK(MakeFingerprintOp(&tensor, "unsupported_method"));
const Status status = RunOpKernel();
EXPECT_FALSE(status.ok());
EXPECT_NE(status.message().find("unsupported_method"), string::npos);
}
TEST_F(FingerprintOpTest, SupportedTypes) {
Tensor input(DT_RESOURCE, TensorShape{1});
EXPECT_FALSE(MakeFingerprintOp(&input).ok());
}
TEST(FingerprintOpShapeFnTest, MethodKnownStatically) {
ShapeInferenceTestOp op("Fingerprint");
Tensor method(DT_STRING, TensorShape{});
method.scalar<tstring>()() = "farmhash64";
op.input_tensors.assign({nullptr, &method});
TF_ASSERT_OK(MakeNodeDef(DT_UINT8, &op.node_def));
INFER_OK(op, "?;?", "[?,8]");
INFER_ERROR("must be at least rank 1", op, "[];?");
INFER_OK(op, "[?];?", "[d0_0,8]");
INFER_OK(op, "[1,?];?", "[d0_0,8]");
INFER_OK(op, "[?,2,3];?", "[d0_0,8]");
}
TEST(FingerprintOpShapeFnTest, MethodUnknownStatically) {
ShapeInferenceTestOp op("Fingerprint");
TF_ASSERT_OK(MakeNodeDef(DT_FLOAT, &op.node_def));
INFER_OK(op, "?;?", "[?,?]");
INFER_ERROR("must be at least rank 1", op, "[];?");
INFER_OK(op, "[?];?", "[d0_0,?]");
INFER_OK(op, "[1,?];?", "[d0_0,?]");
INFER_OK(op, "[?,2,3];?", "[d0_0,?]");
}
TEST(FingerprintOpShapeFnTest, InvalidMethod) {
ShapeInferenceTestOp op("Fingerprint");
INFER_ERROR("must be rank 0", op, "[1];[1]");
Tensor method(DT_STRING, TensorShape{1});
method.vec<tstring>()(0) = "farmhash64";
op.input_tensors.assign({nullptr, &method});
INFER_ERROR("must be rank 0", op, "?;?");
method = Tensor(DT_STRING, TensorShape{});
method.scalar<tstring>()() = "unsupported_method";
op.input_tensors.assign({nullptr, &method});
INFER_ERROR("unsupported_method", op, "?;?");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/fingerprint_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/fingerprint_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d9deaec4-1a4f-4ebb-9b2a-34857a5e3323 | cpp | tensorflow/tensorflow | phwc4_to_bhwc | tensorflow/lite/delegates/gpu/gl/converters/phwc4_to_bhwc.cc | tensorflow/lite/delegates/gpu/gl/converters/phwc4_to_bhwc_test.cc | #include "tensorflow/lite/delegates/gpu/gl/converters/phwc4_to_bhwc.h"
#include <algorithm>
#include <cstdint>
#include <string>
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/types.h"
#include "tensorflow/lite/delegates/gpu/common/util.h"
#include "tensorflow/lite/delegates/gpu/gl/converters/util.h"
#include "tensorflow/lite/delegates/gpu/gl/gl_program.h"
#include "tensorflow/lite/delegates/gpu/gl/gl_shader.h"
#include "tensorflow/lite/delegates/gpu/gl/variable.h"
namespace tflite {
namespace gpu {
namespace gl {
absl::Status ConverterPhwc4ToBhwc::Create(ConverterPhwc4ToBhwc* converter) {
uint3 workgroup_size = uint3(4, 4, 4);
std::string shader_source = GetShaderHeader(workgroup_size) + R"(
layout(std430) buffer;
precision highp float;
layout(binding = 0) readonly buffer B0 {
vec4 elements[];
} input_data;
layout(binding = 1) writeonly buffer B1 {
float elements[];
} output_data;
uniform ivec4 sizes_;
void main() {
ivec3 gid = ivec3(gl_GlobalInvocationID.xyz);
if (gid.x >= sizes_.x || gid.y >= sizes_.y || gid.z >= sizes_.z) {
return;
}
output_data.elements[(gid.y * sizes_.x + gid.x) * sizes_.z + gid.z] = input_data.elements[(gid.z / 4 * sizes_.y + gid.y) * sizes_.x + gid.x][gid.z % 4];
})";
GlShader shader;
RETURN_IF_ERROR(
GlShader::CompileShader(GL_COMPUTE_SHADER, shader_source, &shader));
GlProgram program;
RETURN_IF_ERROR(GlProgram::CreateWithShader(shader, &program));
*converter = ConverterPhwc4ToBhwc(std::move(program), workgroup_size);
return absl::OkStatus();
}
absl::Status ConverterPhwc4ToBhwc::Convert(const BHWC& shape,
const GlBuffer& source,
CommandQueue* command_queue,
GlBuffer* destination) {
if (source.bytes_size() < BytesForPHWC4(shape)) {
return absl::InvalidArgumentError(
"Phwc4ToBhwc: Input data size does not match expected size.");
}
if (destination->bytes_size() < BytesForBHWC(shape)) {
return absl::InvalidArgumentError(
"Phwc4ToBhwc: output data size does not match expected size.");
}
if (shape.b != 1) {
return absl::UnimplementedError(
"Phwc4ToBhwc: Batch size is not equal to 1.");
}
uint3 workload = uint3(shape.w, shape.h, shape.c);
uint3 num_workgroups = DivideRoundUp(workload, workgroup_size_);
RETURN_IF_ERROR(program_.SetParameter(
{"sizes_",
int4(static_cast<int32_t>(workload.x), static_cast<int32_t>(workload.y),
static_cast<int32_t>(workload.z), 0)}));
RETURN_IF_ERROR(source.BindToIndex(0));
RETURN_IF_ERROR(destination->BindToIndex(1));
if (command_queue) {
return command_queue->Dispatch(program_, num_workgroups);
}
return program_.Dispatch(num_workgroups);
}
}
}
} | #include "tensorflow/lite/delegates/gpu/gl/converters/phwc4_to_bhwc.h"
#include <algorithm>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/types/span.h"
#include "tensorflow/lite/delegates/gpu/common/convert.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/gl/egl_environment.h"
#include "tensorflow/lite/delegates/gpu/gl/gl_buffer.h"
#include "tensorflow/lite/delegates/gpu/gl/portable_gl31.h"
namespace tflite {
namespace gpu {
namespace gl {
namespace {
inline std::vector<float> GenerateFloats(float multiplier, int size) {
std::vector<float> v(size);
for (int i = 0; i < size; ++i) {
v[i] = multiplier * i * (i % 2 == 0 ? -1 : 1);
}
return v;
}
absl::Status RunTest(const BHWC& shape) {
std::vector<float> input =
GenerateFloats(0.01, GetElementsSizeForPHWC4(shape));
std::vector<float> output(shape.DimensionsProduct(), 0);
RETURN_IF_ERROR(
ConvertFromPHWC4(absl::MakeConstSpan(input.data(), input.size()), shape,
absl::MakeSpan(output.data(), output.size())));
std::unique_ptr<EglEnvironment> env;
RETURN_IF_ERROR(EglEnvironment::NewEglEnvironment(&env));
GlBuffer input_buffer;
RETURN_IF_ERROR(CreateReadOnlyShaderStorageBuffer(
absl::MakeConstSpan(input.data(), input.size()), &input_buffer));
GlBuffer output_buffer;
RETURN_IF_ERROR(CreateReadWriteShaderStorageBuffer<float>(
shape.DimensionsProduct(), &output_buffer));
ConverterPhwc4ToBhwc converter;
RETURN_IF_ERROR(ConverterPhwc4ToBhwc::Create(&converter));
RETURN_IF_ERROR(
converter.Convert(shape, input_buffer, nullptr, &output_buffer));
std::vector<float> converted_output(output.size(), 0);
RETURN_IF_ERROR(output_buffer.Read(
absl::MakeSpan(converted_output.data(), converted_output.size())));
if (output != converted_output) {
return absl::InternalError("Outputs don't match");
}
return absl::OkStatus();
}
TEST(Phwc4ToHwc, Smoke) {
for (int32_t h : {1, 2, 3, 7, 20}) {
for (int32_t w : {1, 2, 4, 5, 11}) {
for (int32_t c : {1, 2, 4, 5, 8, 9}) {
BHWC shape(1, h, w, c);
EXPECT_TRUE(RunTest(shape).ok())
<< shape.h << " " << shape.w << " " << shape.c;
}
}
}
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/gl/converters/phwc4_to_bhwc.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/gl/converters/phwc4_to_bhwc_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e5e2b254-879d-48d9-939b-833510f208ac | cpp | tensorflow/tensorflow | io_ops | tensorflow/c/experimental/ops/io_ops.cc | tensorflow/core/ops/io_ops_test.cc | #include "tensorflow/c/experimental/ops/io_ops.h"
#include "absl/types/span.h"
#include "tensorflow/c/eager/abstract_context.h"
#include "tensorflow/c/eager/abstract_operation.h"
#include "tensorflow/c/eager/abstract_tensor_handle.h"
#include "tensorflow/c/eager/tracing_utils.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/status.h"
#include "tsl/platform/errors.h"
using tensorflow::tracing::MaybeSetOpName;
namespace tensorflow {
namespace ops {
Status RestoreV2(AbstractContext* ctx, AbstractTensorHandle* const prefix,
AbstractTensorHandle* const tensor_names,
AbstractTensorHandle* const shape_and_slices,
absl::Span<AbstractTensorHandle*> tensors,
absl::Span<DataType> dtypes, const char* name,
const char* raw_device_name) {
AbstractOperationPtr op_ptr(ctx->CreateOperation());
TF_RETURN_IF_ERROR(op_ptr->Reset("RestoreV2", raw_device_name));
TF_RETURN_IF_ERROR(MaybeSetOpName(op_ptr.get(), name));
TF_RETURN_IF_ERROR(op_ptr->AddInput(prefix));
TF_RETURN_IF_ERROR(op_ptr->AddInput(tensor_names));
TF_RETURN_IF_ERROR(op_ptr->AddInput(shape_and_slices));
TF_RETURN_IF_ERROR(
op_ptr->SetAttrTypeList("dtypes", dtypes.data(), dtypes.length()));
int num_retvals = tensors.size();
return op_ptr->Execute(tensors, &num_retvals);
}
Status SaveV2(AbstractContext* ctx, AbstractTensorHandle* const prefix,
AbstractTensorHandle* const tensor_names,
AbstractTensorHandle* const shape_and_slices,
absl::Span<AbstractTensorHandle* const> tensors, const char* name,
const char* raw_device_name) {
AbstractOperationPtr op_ptr(ctx->CreateOperation());
TF_RETURN_IF_ERROR(op_ptr->Reset("SaveV2", raw_device_name));
TF_RETURN_IF_ERROR(MaybeSetOpName(op_ptr.get(), name));
TF_RETURN_IF_ERROR(op_ptr->AddInput(prefix));
TF_RETURN_IF_ERROR(op_ptr->AddInput(tensor_names));
TF_RETURN_IF_ERROR(op_ptr->AddInput(shape_and_slices));
TF_RETURN_IF_ERROR(op_ptr->AddInputList(tensors));
int num_retvals = 0;
std::vector<AbstractTensorHandle*> dummy_outputs;
return op_ptr->Execute(absl::MakeSpan(dummy_outputs), &num_retvals);
}
}
} | #include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/shape_inference_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
TEST(IoOpsTest, Save_ShapeFn) {
ShapeInferenceTestOp op("Save");
TF_ASSERT_OK(NodeDefBuilder("test", op.name)
.Input({"a", 0, DT_STRING})
.Input({"b", 0, DT_STRING})
.Input({{"c", 0, DT_FLOAT}, {"d", 0, DT_INT64}})
.Attr("T", {DT_FLOAT, DT_INT64})
.Finalize(&op.node_def));
INFER_OK(op, "?;?;?;?", "");
INFER_OK(op, "[];[2];?;?", "");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[?];?;?;?");
INFER_ERROR("Shape must be rank 1 but is rank 2", op, "[];[2,3];?;?");
INFER_ERROR("Dimension must be 2 but is 3", op, "[];[3];?;?");
}
TEST(IoOpsTest, SaveSlices_ShapeFn) {
ShapeInferenceTestOp op("SaveSlices");
TF_ASSERT_OK(NodeDefBuilder("test", op.name)
.Input({"a", 0, DT_STRING})
.Input({"b", 0, DT_STRING})
.Input({"c", 0, DT_STRING})
.Input({{"d", 0, DT_FLOAT}, {"e", 0, DT_INT64}})
.Attr("T", {DT_FLOAT, DT_INT64})
.Finalize(&op.node_def));
INFER_OK(op, "?;?;?;?;?", "");
INFER_OK(op, "[];[2];[2];?;?", "");
INFER_OK(op, "[];[2];[2];[100,200,300];[4,5]", "");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[?];?;?;?;?");
INFER_ERROR("Shape must be rank 1 but is rank 2", op, "[];[2,3];?;?;?");
INFER_ERROR("Dimension must be 2 but is 3", op, "[];[3];?;?;?");
INFER_ERROR("Shape must be rank 1 but is rank 2", op, "[];[2];[2,3];?;?");
INFER_ERROR("Dimension must be 2 but is 3", op, "[];[2];[3];?;?");
}
TEST(IoOpsTest, Restore_ShapeFn) {
ShapeInferenceTestOp op("Restore");
INFER_OK(op, "?;?", "?");
INFER_OK(op, "[];[]", "?");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[?];[]");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[];[?]");
}
TEST(IoOpsTest, RestoreV2_ShapeFn) {
ShapeInferenceTestOp op("RestoreV2");
TF_ASSERT_OK(NodeDefBuilder("test", op.name)
.Input({"prefix", 0, DT_STRING})
.Input({"tensor_names", 0, DT_STRING})
.Input({"shapes_and_slices", 0, DT_STRING})
.Attr("dtypes", {DT_FLOAT, DT_INT64})
.Finalize(&op.node_def));
INFER_OK(op, "?;?;?", "?;?");
INFER_OK(op, "[];[10];[10]", "?;?");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[?];[?];[?]");
INFER_ERROR("Shape must be rank 1 but is rank 2", op, "[];[?,?];[?]");
INFER_ERROR("Shape must be rank 1 but is rank 2", op, "[];[?];[?,?]");
INFER_ERROR("in both shapes must be equal", op, "[];[10];[20]");
}
TEST(IoOpsTest, RestoreSlice_ShapeFn) {
ShapeInferenceTestOp op("RestoreSlice");
INFER_OK(op, "?;?;?", "?");
INFER_OK(op, "[];[];[]", "?");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[?];[];[]");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[];[?];[]");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[];[];[?]");
}
TEST(IoOpsTest, ShardedFilename_ShapeFn) {
ShapeInferenceTestOp op("ShardedFilename");
INFER_OK(op, "?;?;?", "[]");
INFER_OK(op, "[];[];[]", "[]");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[?];[];[]");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[];[?];[]");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[];[];[?]");
}
TEST(IoOpsTest, ShardedFilespec_ShapeFn) {
ShapeInferenceTestOp op("ShardedFilespec");
INFER_OK(op, "?;?", "[]");
INFER_OK(op, "[];[]", "[]");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[?];[]");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[];[?]");
}
TEST(IoOpsTest, SingleScalarInputAndOutput_ShapeFns) {
for (const char* op_name : {"ReadFile"}) {
ShapeInferenceTestOp op(op_name);
INFER_OK(op, "?", "[]");
INFER_OK(op, "[]", "[]");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[?]");
}
}
TEST(IoOpsTest, TwoElementVectorInputsAndScalarOutput_ShapeFns) {
for (const char* op_name :
{"ReaderNumRecordsProduced", "ReaderNumWorkUnitsCompleted",
"ReaderSerializeState"}) {
ShapeInferenceTestOp op(op_name);
INFER_OK(op, "?", "[]");
INFER_OK(op, "[2]", "[]");
INFER_ERROR("Shape must be rank 1 but is rank 0", op, "[]");
INFER_ERROR("Dimension must be 2 but is 3", op, "[3]");
}
}
TEST(IoOpsTest, ReaderRead_ShapeFn) {
ShapeInferenceTestOp op("ReaderRead");
INFER_OK(op, "?;?", "[];[]");
INFER_OK(op, "[2];[?]", "[];[]");
INFER_ERROR("Shape must be rank 1 but is rank 2", op, "[?,?];[2]");
INFER_ERROR("Shape must be rank 1 but is rank 0", op, "[2];[]");
}
TEST(IoOpsTest, ReaderReadUpTo_ShapeFn) {
ShapeInferenceTestOp op("ReaderReadUpTo");
INFER_OK(op, "[2];[2];[]", "[?];[?]");
INFER_ERROR("Shape must be rank 1 but is rank 0", op, "[];[2];[]");
INFER_ERROR("Shape must be rank 1 but is rank 0", op, "[2];[];[]");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[2];[2];[?]");
}
TEST(IoOpsTest, ReaderReset_ShapeFn) {
ShapeInferenceTestOp op("ReaderReset");
INFER_OK(op, "[2]", "");
INFER_OK(op, "[?]", "");
INFER_OK(op, "?", "");
INFER_ERROR("Shape must be rank 1 but is rank 0", op, "[]");
}
TEST(IoOpsTest, ReaderRestoreState_ShapeFn) {
ShapeInferenceTestOp op("ReaderRestoreState");
INFER_OK(op, "?;?", "");
INFER_OK(op, "[2];[]", "");
INFER_ERROR("Shape must be rank 1 but is rank 0", op, "[];[]");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[?];[?]");
}
TEST(IoOpsTest, MatchingFiles_ShapeFn) {
ShapeInferenceTestOp op("MatchingFiles");
INFER_OK(op, "?", "[?]");
INFER_OK(op, "[]", "[?]");
INFER_OK(op, "[42]", "[?]");
INFER_ERROR("Shape must be at most rank 1 but is rank 2", op, "[?,?]");
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/experimental/ops/io_ops.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ops/io_ops_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c3d39c28-8d42-4581-a701-ed09abead87c | cpp | tensorflow/tensorflow | zeros_like | tensorflow/lite/kernels/zeros_like.cc | tensorflow/lite/kernels/zeros_like_test.cc | #include <stdint.h>
#include <string.h>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace zeros_like {
constexpr int kInputTensor = 0;
constexpr int kOutputTensor = 0;
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
output->type = input->type;
return context->ResizeTensor(context, output,
TfLiteIntArrayCopy(input->dims));
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
const int num_elements = NumElements(input);
switch (input->type) {
case kTfLiteInt64:
memset(GetTensorData<int64_t>(output), 0, num_elements * sizeof(int64_t));
break;
case kTfLiteInt32:
memset(GetTensorData<int32_t>(output), 0, num_elements * sizeof(int32_t));
break;
case kTfLiteFloat32:
memset(GetTensorData<float>(output), 0, num_elements * sizeof(float));
break;
default:
TF_LITE_KERNEL_LOG(context,
"ZerosLike only currently supports int64, int32, "
"and float32, got %d.",
input->type);
return kTfLiteError;
}
return kTfLiteOk;
}
}
TfLiteRegistration* Register_ZEROS_LIKE() {
static TfLiteRegistration r = {nullptr, nullptr,
zeros_like::Prepare, zeros_like::Eval};
return &r;
}
}
}
} | #include <stdint.h>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "flatbuffers/flatbuffers.h"
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using ::testing::ElementsAreArray;
class ZerosLikeOpModel : public SingleOpModel {
public:
explicit ZerosLikeOpModel(const TensorData& input) {
input_ = AddInput(input);
output_ = AddOutput(input);
SetBuiltinOp(BuiltinOperator_ZEROS_LIKE, BuiltinOptions_ZerosLikeOptions,
CreateZerosLikeOptions(builder_).Union());
BuildInterpreter({GetShape(input_)});
}
int input() { return input_; }
int output() { return output_; }
protected:
int input_;
int output_;
};
TEST(ZerosLikeOpModel, ZerosLikeFloat) {
ZerosLikeOpModel m({TensorType_FLOAT32, {2, 3}});
m.PopulateTensor<float>(m.input(), {-2.0, -1.0, 0.0, 1.0, 2.0, 3.0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.ExtractVector<float>(m.output()),
Pointwise(FloatingPointEq(), {0.0, 0.0, 0.0, 0.0, 0.0, 0.0}));
EXPECT_THAT(m.GetTensorShape(m.output()), ElementsAreArray({2, 3}));
}
TEST(ZerosLikeOpModel, ZerosLikeInt32) {
ZerosLikeOpModel m({TensorType_INT32, {1, 2, 2, 1}});
m.PopulateTensor<int32_t>(m.input(), {-2, -1, 0, 3});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.ExtractVector<int32_t>(m.output()),
ElementsAreArray({0, 0, 0, 0}));
EXPECT_THAT(m.GetTensorShape(m.output()), ElementsAreArray({1, 2, 2, 1}));
}
TEST(ZerosLikeOpModel, ZerosLikeInt64) {
ZerosLikeOpModel m({TensorType_INT64, {1, 2, 2, 1}});
m.PopulateTensor<int64_t>(m.input(), {-2, -1, 0, 3});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.ExtractVector<int64_t>(m.output()),
ElementsAreArray({0, 0, 0, 0}));
EXPECT_THAT(m.GetTensorShape(m.output()), ElementsAreArray({1, 2, 2, 1}));
}
TEST(ZerosLikeOpModel, InvalidTypeTest) {
ZerosLikeOpModel m_uint8({TensorType_UINT8, {1, 1}});
ASSERT_NE(m_uint8.Invoke(), kTfLiteOk)
<< "ZerosLike only currently supports int64, int32, and float32";
ZerosLikeOpModel m_int16({TensorType_INT16, {1, 1}});
ASSERT_NE(m_int16.Invoke(), kTfLiteOk)
<< "ZerosLike only currently supports int64, int32, and float32";
ZerosLikeOpModel m_complex({TensorType_COMPLEX64, {1, 1}});
ASSERT_NE(m_complex.Invoke(), kTfLiteOk)
<< "ZerosLike only currently supports int64, int32, and float32";
ZerosLikeOpModel m_int8({TensorType_INT8, {1, 1}});
ASSERT_NE(m_int8.Invoke(), kTfLiteOk)
<< "ZerosLike only currently supports int64, int32, and float32";
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/zeros_like.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/zeros_like_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
6916f821-7c75-4936-99fa-2bea3e19eb07 | cpp | tensorflow/tensorflow | toco_cmdline_flags | tensorflow/lite/toco/toco_cmdline_flags.cc | tensorflow/lite/toco/toco_cmdline_flags_test.cc | #include "tensorflow/lite/toco/toco_cmdline_flags.h"
#include <optional>
#include <string>
#include <vector>
#include "absl/strings/numbers.h"
#include "absl/strings/str_join.h"
#include "absl/strings/str_split.h"
#include "absl/strings/strip.h"
#include "absl/types/optional.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/util/command_line_flags.h"
#include "tensorflow/lite/toco/toco_port.h"
namespace toco {
bool ParseTocoFlagsFromCommandLineFlags(
int* argc, char* argv[], std::string* msg,
ParsedTocoFlags* parsed_toco_flags_ptr) {
using tensorflow::Flag;
ParsedTocoFlags& parsed_flags = *parsed_toco_flags_ptr;
std::vector<tensorflow::Flag> flags = {
Flag("input_file", parsed_flags.input_file.bind(),
parsed_flags.input_file.default_value(),
"Input file (model of any supported format). For Protobuf "
"formats, both text and binary are supported regardless of file "
"extension."),
Flag("savedmodel_directory", parsed_flags.savedmodel_directory.bind(),
parsed_flags.savedmodel_directory.default_value(),
"Deprecated. Full path to the directory containing the SavedModel."),
Flag("output_file", parsed_flags.output_file.bind(),
parsed_flags.output_file.default_value(),
"Output file. "
"For Protobuf formats, the binary format will be used."),
Flag("input_format", parsed_flags.input_format.bind(),
parsed_flags.input_format.default_value(),
"Input file format. One of: TENSORFLOW_GRAPHDEF, TFLITE."),
Flag("output_format", parsed_flags.output_format.bind(),
parsed_flags.output_format.default_value(),
"Output file format. "
"One of TENSORFLOW_GRAPHDEF, TFLITE, GRAPHVIZ_DOT."),
Flag("savedmodel_tagset", parsed_flags.savedmodel_tagset.bind(),
parsed_flags.savedmodel_tagset.default_value(),
"Deprecated. Comma-separated set of tags identifying the "
"MetaGraphDef within the SavedModel to analyze. All tags in the tag "
"set must be specified."),
Flag("default_ranges_min", parsed_flags.default_ranges_min.bind(),
parsed_flags.default_ranges_min.default_value(),
"If defined, will be used as the default value for the min bound "
"of min/max ranges used for quantization of uint8 arrays."),
Flag("default_ranges_max", parsed_flags.default_ranges_max.bind(),
parsed_flags.default_ranges_max.default_value(),
"If defined, will be used as the default value for the max bound "
"of min/max ranges used for quantization of uint8 arrays."),
Flag("default_int16_ranges_min",
parsed_flags.default_int16_ranges_min.bind(),
parsed_flags.default_int16_ranges_min.default_value(),
"If defined, will be used as the default value for the min bound "
"of min/max ranges used for quantization of int16 arrays."),
Flag("default_int16_ranges_max",
parsed_flags.default_int16_ranges_max.bind(),
parsed_flags.default_int16_ranges_max.default_value(),
"If defined, will be used as the default value for the max bound "
"of min/max ranges used for quantization of int16 arrays."),
Flag("inference_type", parsed_flags.inference_type.bind(),
parsed_flags.inference_type.default_value(),
"Target data type of arrays in the output file (for input_arrays, "
"this may be overridden by inference_input_type). "
"One of FLOAT, QUANTIZED_UINT8."),
Flag("inference_input_type", parsed_flags.inference_input_type.bind(),
parsed_flags.inference_input_type.default_value(),
"Target data type of input arrays. "
"If not specified, inference_type is used. "
"One of FLOAT, QUANTIZED_UINT8."),
Flag("input_type", parsed_flags.input_type.bind(),
parsed_flags.input_type.default_value(),
"Deprecated ambiguous flag that set both --input_data_types and "
"--inference_input_type."),
Flag("input_types", parsed_flags.input_types.bind(),
parsed_flags.input_types.default_value(),
"Deprecated ambiguous flag that set both --input_data_types and "
"--inference_input_type. Was meant to be a "
"comma-separated list, but this was deprecated before "
"multiple-input-types was ever properly supported."),
Flag("drop_fake_quant", parsed_flags.drop_fake_quant.bind(),
parsed_flags.drop_fake_quant.default_value(),
"Ignore and discard FakeQuant nodes. For instance, to "
"generate plain float code without fake-quantization from a "
"quantized graph."),
Flag(
"reorder_across_fake_quant",
parsed_flags.reorder_across_fake_quant.bind(),
parsed_flags.reorder_across_fake_quant.default_value(),
"Normally, FakeQuant nodes must be strict boundaries for graph "
"transformations, in order to ensure that quantized inference has "
"the exact same arithmetic behavior as quantized training --- which "
"is the whole point of quantized training and of FakeQuant nodes in "
"the first place. "
"However, that entails subtle requirements on where exactly "
"FakeQuant nodes must be placed in the graph. Some quantized graphs "
"have FakeQuant nodes at unexpected locations, that prevent graph "
"transformations that are necessary in order to generate inference "
"code for these graphs. Such graphs should be fixed, but as a "
"temporary work-around, setting this reorder_across_fake_quant flag "
"allows TOCO to perform necessary graph transformaitons on them, "
"at the cost of no longer faithfully matching inference and training "
"arithmetic."),
Flag("allow_custom_ops", parsed_flags.allow_custom_ops.bind(),
parsed_flags.allow_custom_ops.default_value(),
"If true, allow TOCO to create TF Lite Custom operators for all the "
"unsupported TensorFlow ops."),
Flag("custom_opdefs", parsed_flags.custom_opdefs.bind(),
parsed_flags.custom_opdefs.default_value(),
"List of strings representing custom ops OpDefs that are included "
"in the GraphDef."),
Flag("allow_dynamic_tensors", parsed_flags.allow_dynamic_tensors.bind(),
parsed_flags.allow_dynamic_tensors.default_value(),
"Boolean flag indicating whether the converter should allow models "
"with dynamic Tensor shape. When set to False, the converter will "
"generate runtime memory offsets for activation Tensors (with 128 "
"bits alignment) and error out on models with undetermined Tensor "
"shape. (Default: True)"),
Flag(
"drop_control_dependency",
parsed_flags.drop_control_dependency.bind(),
parsed_flags.drop_control_dependency.default_value(),
"If true, ignore control dependency requirements in input TensorFlow "
"GraphDef. Otherwise an error will be raised upon control dependency "
"inputs."),
Flag("debug_disable_recurrent_cell_fusion",
parsed_flags.debug_disable_recurrent_cell_fusion.bind(),
parsed_flags.debug_disable_recurrent_cell_fusion.default_value(),
"If true, disable fusion of known identifiable cell subgraphs into "
"cells. This includes, for example, specific forms of LSTM cell."),
Flag("propagate_fake_quant_num_bits",
parsed_flags.propagate_fake_quant_num_bits.bind(),
parsed_flags.propagate_fake_quant_num_bits.default_value(),
"If true, use FakeQuant* operator num_bits attributes to adjust "
"array data_types."),
Flag("allow_nudging_weights_to_use_fast_gemm_kernel",
parsed_flags.allow_nudging_weights_to_use_fast_gemm_kernel.bind(),
parsed_flags.allow_nudging_weights_to_use_fast_gemm_kernel
.default_value(),
"Some fast uint8 GEMM kernels require uint8 weights to avoid the "
"value 0. This flag allows nudging them to 1 to allow proceeding, "
"with moderate inaccuracy."),
Flag("dedupe_array_min_size_bytes",
parsed_flags.dedupe_array_min_size_bytes.bind(),
parsed_flags.dedupe_array_min_size_bytes.default_value(),
"Minimum size of constant arrays to deduplicate; arrays smaller "
"will not be deduplicated."),
Flag("split_tflite_lstm_inputs",
parsed_flags.split_tflite_lstm_inputs.bind(),
parsed_flags.split_tflite_lstm_inputs.default_value(),
"Split the LSTM inputs from 5 tensors to 18 tensors for TFLite. "
"Ignored if the output format is not TFLite."),
Flag("quantize_to_float16", parsed_flags.quantize_to_float16.bind(),
parsed_flags.quantize_to_float16.default_value(),
"Used in conjunction with post_training_quantize. Specifies that "
"the weights should be quantized to fp16 instead of the default "
"(int8)"),
Flag("quantize_weights", parsed_flags.quantize_weights.bind(),
parsed_flags.quantize_weights.default_value(),
"Deprecated. Please use --post_training_quantize instead."),
Flag("post_training_quantize", parsed_flags.post_training_quantize.bind(),
parsed_flags.post_training_quantize.default_value(),
"Boolean indicating whether to quantize the weights of the "
"converted float model. Model size will be reduced and there will "
"be latency improvements (at the cost of accuracy)."),
Flag("enable_select_tf_ops", parsed_flags.enable_select_tf_ops.bind(),
parsed_flags.enable_select_tf_ops.default_value(), ""),
Flag("force_select_tf_ops", parsed_flags.force_select_tf_ops.bind(),
parsed_flags.force_select_tf_ops.default_value(), ""),
Flag("unfold_batchmatmul", parsed_flags.unfold_batchmatmul.bind(),
parsed_flags.unfold_batchmatmul.default_value(), ""),
Flag("accumulation_type", parsed_flags.accumulation_type.bind(),
parsed_flags.accumulation_type.default_value(),
"Accumulation type to use with quantize_to_float16"),
Flag("allow_bfloat16", parsed_flags.allow_bfloat16.bind(),
parsed_flags.allow_bfloat16.default_value(), "")};
bool asked_for_help =
*argc == 2 && (!strcmp(argv[1], "--help") || !strcmp(argv[1], "-help"));
if (asked_for_help) {
*msg += tensorflow::Flags::Usage(argv[0], flags);
return false;
} else {
return tensorflow::Flags::Parse(argc, argv, flags);
}
}
namespace {
enum class FlagRequirement {
kNone,
kMustBeSpecified,
kMustNotBeSpecified,
kUseDefault,
};
template <typename T>
void EnforceFlagRequirement(const T& flag, const std::string& flag_name,
FlagRequirement requirement) {
if (requirement == FlagRequirement::kMustBeSpecified) {
QCHECK(flag.specified()) << "Missing required flag " << flag_name;
}
if (requirement == FlagRequirement::kMustNotBeSpecified) {
QCHECK(!flag.specified())
<< "Given other flags, this flag should not have been specified: "
<< flag_name;
}
}
template <typename T>
std::optional<T> GetFlagValue(const Arg<T>& flag, FlagRequirement requirement) {
if (flag.specified()) return flag.value();
if (requirement == FlagRequirement::kUseDefault) return flag.default_value();
return std::optional<T>();
}
}
void ReadTocoFlagsFromCommandLineFlags(const ParsedTocoFlags& parsed_toco_flags,
TocoFlags* toco_flags) {
namespace port = toco::port;
port::CheckInitGoogleIsDone("InitGoogle is not done yet");
#define READ_TOCO_FLAG(name, requirement) \
do { \
EnforceFlagRequirement(parsed_toco_flags.name, #name, requirement); \
auto flag_value = GetFlagValue(parsed_toco_flags.name, requirement); \
if (flag_value.has_value()) { \
toco_flags->set_##name(flag_value.value()); \
} \
} while (false)
#define PARSE_TOCO_FLAG(Type, name, requirement) \
do { \
EnforceFlagRequirement(parsed_toco_flags.name, #name, requirement); \
auto flag_value = GetFlagValue(parsed_toco_flags.name, requirement); \
if (flag_value.has_value()) { \
Type x; \
QCHECK(Type##_Parse(flag_value.value(), &x)) \
<< "Unrecognized " << #Type << " value " \
<< parsed_toco_flags.name.value(); \
toco_flags->set_##name(x); \
} \
} while (false)
PARSE_TOCO_FLAG(FileFormat, input_format, FlagRequirement::kUseDefault);
PARSE_TOCO_FLAG(FileFormat, output_format, FlagRequirement::kUseDefault);
PARSE_TOCO_FLAG(IODataType, inference_type, FlagRequirement::kNone);
PARSE_TOCO_FLAG(IODataType, inference_input_type, FlagRequirement::kNone);
READ_TOCO_FLAG(default_ranges_min, FlagRequirement::kNone);
READ_TOCO_FLAG(default_ranges_max, FlagRequirement::kNone);
READ_TOCO_FLAG(default_int16_ranges_min, FlagRequirement::kNone);
READ_TOCO_FLAG(default_int16_ranges_max, FlagRequirement::kNone);
READ_TOCO_FLAG(drop_fake_quant, FlagRequirement::kNone);
READ_TOCO_FLAG(reorder_across_fake_quant, FlagRequirement::kNone);
READ_TOCO_FLAG(allow_custom_ops, FlagRequirement::kNone);
READ_TOCO_FLAG(drop_control_dependency, FlagRequirement::kNone);
READ_TOCO_FLAG(debug_disable_recurrent_cell_fusion, FlagRequirement::kNone);
READ_TOCO_FLAG(propagate_fake_quant_num_bits, FlagRequirement::kNone);
READ_TOCO_FLAG(allow_nudging_weights_to_use_fast_gemm_kernel,
FlagRequirement::kNone);
READ_TOCO_FLAG(dedupe_array_min_size_bytes, FlagRequirement::kNone);
READ_TOCO_FLAG(split_tflite_lstm_inputs, FlagRequirement::kNone);
READ_TOCO_FLAG(quantize_weights, FlagRequirement::kNone);
READ_TOCO_FLAG(quantize_to_float16, FlagRequirement::kNone);
READ_TOCO_FLAG(post_training_quantize, FlagRequirement::kNone);
READ_TOCO_FLAG(enable_select_tf_ops, FlagRequirement::kNone);
READ_TOCO_FLAG(force_select_tf_ops, FlagRequirement::kNone);
READ_TOCO_FLAG(unfold_batchmatmul, FlagRequirement::kNone);
PARSE_TOCO_FLAG(IODataType, accumulation_type, FlagRequirement::kNone);
READ_TOCO_FLAG(allow_bfloat16, FlagRequirement::kNone);
if (parsed_toco_flags.force_select_tf_ops.value() &&
!parsed_toco_flags.enable_select_tf_ops.value()) {
LOG(WARNING) << "--force_select_tf_ops should always be used with "
"--enable_select_tf_ops.";
}
if (parsed_toco_flags.input_type.specified()) {
LOG(WARNING)
<< "--input_type is deprecated. It was an ambiguous flag that set both "
"--input_data_types and --inference_input_type. If you are trying "
"to complement the input file with information about the type of "
"input arrays, use --input_data_type. If you are trying to control "
"the quantization/dequantization of real-numbers input arrays in "
"the output file, use --inference_input_type.";
toco::IODataType input_type;
QCHECK(toco::IODataType_Parse(parsed_toco_flags.input_type.value(),
&input_type));
toco_flags->set_inference_input_type(input_type);
}
if (parsed_toco_flags.input_types.specified()) {
LOG(WARNING)
<< "--input_types is deprecated. It was an ambiguous flag that set "
"both --input_data_types and --inference_input_type. If you are "
"trying to complement the input file with information about the "
"type of input arrays, use --input_data_type. If you are trying to "
"control the quantization/dequantization of real-numbers input "
"arrays in the output file, use --inference_input_type.";
std::vector<std::string> input_types =
absl::StrSplit(parsed_toco_flags.input_types.value(), ',');
QCHECK(!input_types.empty());
for (size_t i = 1; i < input_types.size(); i++) {
QCHECK_EQ(input_types[i], input_types[0]);
}
toco::IODataType input_type;
QCHECK(toco::IODataType_Parse(input_types[0], &input_type));
toco_flags->set_inference_input_type(input_type);
}
if (parsed_toco_flags.quantize_weights.value()) {
LOG(WARNING)
<< "--quantize_weights is deprecated. Falling back to "
"--post_training_quantize. Please switch --post_training_quantize.";
toco_flags->set_post_training_quantize(
parsed_toco_flags.quantize_weights.value());
}
if (parsed_toco_flags.quantize_weights.value()) {
if (toco_flags->inference_type() == IODataType::QUANTIZED_UINT8) {
LOG(WARNING)
<< "--post_training_quantize quantizes a graph of inference_type "
"FLOAT. Overriding inference type QUANTIZED_UINT8 to FLOAT.";
toco_flags->set_inference_type(IODataType::FLOAT);
}
}
#undef READ_TOCO_FLAG
#undef PARSE_TOCO_FLAG
}
} | #include "tensorflow/lite/toco/toco_cmdline_flags.h"
#include <string>
#include <gtest/gtest.h>
#include "tensorflow/lite/testing/util.h"
namespace toco {
namespace {
TEST(TocoCmdlineFlagsTest, DefaultValue) {
int argc = 1;
const char* args[] = {"toco", nullptr};
std::string message;
ParsedTocoFlags result_flags;
EXPECT_TRUE(ParseTocoFlagsFromCommandLineFlags(
&argc, const_cast<char**>(args), &message, &result_flags));
EXPECT_EQ(result_flags.allow_dynamic_tensors.value(), true);
}
TEST(TocoCmdlineFlagsTest, ParseFlags) {
int argc = 2;
const char* args[] = {"toco", "--allow_dynamic_tensors=false", nullptr};
std::string message;
ParsedTocoFlags result_flags;
EXPECT_TRUE(ParseTocoFlagsFromCommandLineFlags(
&argc, const_cast<char**>(args), &message, &result_flags));
EXPECT_EQ(result_flags.allow_dynamic_tensors.value(), false);
}
}
}
int main(int argc, char** argv) {
::tflite::LogToStderr();
::testing::InitGoogleTest(&argc, argv);
::toco::port::InitGoogleWasDoneElsewhere();
return RUN_ALL_TESTS();
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/toco/toco_cmdline_flags.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/toco/toco_cmdline_flags_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
67d9e88d-80b3-4696-a5ac-0ca0737cf6fa | cpp | tensorflow/tensorflow | cuda_collectives | third_party/xla/xla/stream_executor/cuda/cuda_collectives.cc | third_party/xla/xla/stream_executor/cuda/cuda_collectives_test.cc | #include "xla/stream_executor/cuda/cuda_collectives.h"
#include <cstdint>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "third_party/nccl/nccl.h"
#include "xla/stream_executor/gpu/context.h"
#include "xla/stream_executor/gpu/scoped_activate_context.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/numbers.h"
namespace stream_executor::gpu {
absl::StatusOr<void*> CudaCollectives::CollectiveMemoryAllocate(
Context* context, uint64_t bytes) {
if (bytes == 0) return nullptr;
ScopedActivateContext activated(context);
void* ptr = nullptr;
ncclResult_t res = ncclMemAlloc(&ptr, bytes);
if (res != ncclSuccess) {
return absl::InternalError(absl::StrFormat(
"failed to allocate %s (%llu bytes) from device collective memory: %s, "
"Last NCCL warning(error) log entry (may be unrelated): %s",
tsl::strings::HumanReadableNumBytes(bytes), bytes,
ncclGetErrorString(res), ncclGetLastError(nullptr)));
}
VLOG(2) << "Allocated collective memory " << ptr << " for context " << context
<< " of " << bytes << " bytes";
return ptr;
}
absl::Status CudaCollectives::CollectiveMemoryDeallocate(
Context* context, void* location) {
ScopedActivateContext activation(context);
ncclResult_t res = ncclMemFree(location);
if (res != ncclSuccess) {
return absl::InternalError(absl::StrFormat(
"failed to free device collective memory at %p; result: %s, Last NCCL "
"warning(error) log entry (may be unrelated): %s",
location, ncclGetErrorString(res), ncclGetLastError(nullptr)));
}
VLOG(2) << "Deallocated collective memory " << location << " for context "
<< context;
return absl::OkStatus();
}
} | #include "xla/stream_executor/cuda/cuda_collectives.h"
#include <cstddef>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "xla/service/gpu/runtime/nccl_api.h"
#include "xla/stream_executor/gpu/gpu_executor.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/platform_manager.h"
#include "xla/stream_executor/stream_executor.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace stream_executor::gpu {
namespace {
using ::tsl::testing::IsOk;
using ::tsl::testing::IsOkAndHolds;
TEST(CudaCollectivesTest, CollectiveMemoryAllocation) {
if (!xla::gpu::NcclApi::HasNcclSupport()) {
GTEST_SKIP() << "Compiled without NCCL support";
}
TF_ASSERT_OK_AND_ASSIGN(Platform * platform,
PlatformManager::PlatformWithName("CUDA"));
TF_ASSERT_OK_AND_ASSIGN(StreamExecutor * executor,
platform->ExecutorForDevice(0));
GpuExecutor* gpu_executor = ExtractGpuExecutor(executor);
constexpr size_t kAllocateSize = 1024;
TF_ASSERT_OK_AND_ASSIGN(void* memory,
CudaCollectives::CollectiveMemoryAllocate(
gpu_executor->gpu_context(), kAllocateSize));
EXPECT_THAT(gpu_executor->GetPointerMemorySpace(memory),
IsOkAndHolds(MemoryType::kDevice));
EXPECT_THAT(CudaCollectives::CollectiveMemoryDeallocate(
gpu_executor->gpu_context(), memory),
IsOk());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/cuda/cuda_collectives.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/cuda/cuda_collectives_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5b441f45-3775-471c-b737-9ebb2a54c496 | cpp | google/cel-cpp | type_checker_builder | checker/type_checker_builder.cc | checker/type_checker_builder_test.cc | #include "checker/type_checker_builder.h"
#include <memory>
#include <string>
#include <utility>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "checker/internal/type_check_env.h"
#include "checker/internal/type_checker_impl.h"
#include "checker/type_checker.h"
#include "common/decl.h"
#include "common/type_introspector.h"
namespace cel {
absl::StatusOr<std::unique_ptr<TypeChecker>> TypeCheckerBuilder::Build() && {
if (env_.type_providers().empty() && env_.parent() == nullptr) {
env_.AddTypeProvider(std::make_unique<TypeIntrospector>());
}
return std::make_unique<checker_internal::TypeCheckerImpl>(std::move(env_));
}
absl::Status TypeCheckerBuilder::AddLibrary(CheckerLibrary library) {
if (!library.id.empty() && !library_ids_.insert(library.id).second) {
return absl::AlreadyExistsError(
absl::StrCat("library '", library.id, "' already exists"));
}
absl::Status status = library.options(*this);
libraries_.push_back(std::move(library));
return status;
}
absl::Status TypeCheckerBuilder::AddVariable(const VariableDecl& decl) {
bool inserted = env_.InsertVariableIfAbsent(decl);
if (!inserted) {
return absl::AlreadyExistsError(
absl::StrCat("variable '", decl.name(), "' already exists"));
}
return absl::OkStatus();
}
absl::Status TypeCheckerBuilder::AddFunction(const FunctionDecl& decl) {
bool inserted = env_.InsertFunctionIfAbsent(decl);
if (!inserted) {
return absl::AlreadyExistsError(
absl::StrCat("function '", decl.name(), "' already exists"));
}
return absl::OkStatus();
}
void TypeCheckerBuilder::AddTypeProvider(
std::unique_ptr<TypeIntrospector> provider) {
env_.AddTypeProvider(std::move(provider));
}
void TypeCheckerBuilder::set_container(absl::string_view container) {
env_.set_container(std::string(container));
}
} | #include "checker/type_checker_builder.h"
#include <utility>
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "checker/internal/test_ast_helpers.h"
#include "checker/validation_result.h"
#include "common/decl.h"
#include "common/type.h"
#include "internal/testing.h"
namespace cel {
namespace {
using ::absl_testing::IsOk;
using ::absl_testing::StatusIs;
using ::cel::checker_internal::MakeTestParsedAst;
using ::testing::HasSubstr;
TEST(TypeCheckerBuilderTest, AddVariable) {
TypeCheckerBuilder builder;
ASSERT_THAT(builder.AddVariable(MakeVariableDecl("x", IntType())), IsOk());
ASSERT_OK_AND_ASSIGN(auto checker, std::move(builder).Build());
ASSERT_OK_AND_ASSIGN(auto ast, MakeTestParsedAst("x"));
ASSERT_OK_AND_ASSIGN(ValidationResult result, checker->Check(std::move(ast)));
EXPECT_TRUE(result.IsValid());
}
TEST(TypeCheckerBuilderTest, AddVariableRedeclaredError) {
TypeCheckerBuilder builder;
ASSERT_THAT(builder.AddVariable(MakeVariableDecl("x", IntType())), IsOk());
EXPECT_THAT(builder.AddVariable(MakeVariableDecl("x", IntType())),
StatusIs(absl::StatusCode::kAlreadyExists));
}
TEST(TypeCheckerBuilderTest, AddFunction) {
TypeCheckerBuilder builder;
ASSERT_OK_AND_ASSIGN(
auto fn_decl,
MakeFunctionDecl(
"add", MakeOverloadDecl("add_int", IntType(), IntType(), IntType())));
ASSERT_THAT(builder.AddFunction(fn_decl), IsOk());
ASSERT_OK_AND_ASSIGN(auto checker, std::move(builder).Build());
ASSERT_OK_AND_ASSIGN(auto ast, MakeTestParsedAst("add(1, 2)"));
ASSERT_OK_AND_ASSIGN(ValidationResult result, checker->Check(std::move(ast)));
EXPECT_TRUE(result.IsValid());
}
TEST(TypeCheckerBuilderTest, AddFunctionRedeclaredError) {
TypeCheckerBuilder builder;
ASSERT_OK_AND_ASSIGN(
auto fn_decl,
MakeFunctionDecl(
"add", MakeOverloadDecl("add_int", IntType(), IntType(), IntType())));
ASSERT_THAT(builder.AddFunction(fn_decl), IsOk());
EXPECT_THAT(builder.AddFunction(fn_decl),
StatusIs(absl::StatusCode::kAlreadyExists));
}
TEST(TypeCheckerBuilderTest, AddLibrary) {
TypeCheckerBuilder builder;
ASSERT_OK_AND_ASSIGN(
auto fn_decl,
MakeFunctionDecl(
"add", MakeOverloadDecl("add_int", IntType(), IntType(), IntType())));
ASSERT_THAT(builder.AddLibrary({"",
[&](TypeCheckerBuilder& b) {
return builder.AddFunction(fn_decl);
}}),
IsOk());
ASSERT_OK_AND_ASSIGN(auto checker, std::move(builder).Build());
ASSERT_OK_AND_ASSIGN(auto ast, MakeTestParsedAst("add(1, 2)"));
ASSERT_OK_AND_ASSIGN(ValidationResult result, checker->Check(std::move(ast)));
EXPECT_TRUE(result.IsValid());
}
TEST(TypeCheckerBuilderTest, AddLibraryRedeclaredError) {
TypeCheckerBuilder builder;
ASSERT_OK_AND_ASSIGN(
auto fn_decl,
MakeFunctionDecl(
"add", MakeOverloadDecl("add_int", IntType(), IntType(), IntType())));
ASSERT_THAT(builder.AddLibrary({"testlib",
[&](TypeCheckerBuilder& b) {
return builder.AddFunction(fn_decl);
}}),
IsOk());
EXPECT_THAT(builder.AddLibrary({"testlib",
[&](TypeCheckerBuilder& b) {
return builder.AddFunction(fn_decl);
}}),
StatusIs(absl::StatusCode::kAlreadyExists, HasSubstr("testlib")));
}
TEST(TypeCheckerBuilderTest, AddLibraryForwardsErrors) {
TypeCheckerBuilder builder;
ASSERT_OK_AND_ASSIGN(
auto fn_decl,
MakeFunctionDecl(
"add", MakeOverloadDecl("add_int", IntType(), IntType(), IntType())));
ASSERT_THAT(builder.AddLibrary({"",
[&](TypeCheckerBuilder& b) {
return builder.AddFunction(fn_decl);
}}),
IsOk());
EXPECT_THAT(builder.AddLibrary({"",
[](TypeCheckerBuilder& b) {
return absl::InternalError("test error");
}}),
StatusIs(absl::StatusCode::kInternal, HasSubstr("test error")));
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/checker/type_checker_builder.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/checker/type_checker_builder_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
857b14e6-046b-4b50-97f4-1f1c2b59df7d | cpp | abseil/abseil-cpp | cord_rep_btree_reader | absl/strings/internal/cord_rep_btree_reader.cc | absl/strings/internal/cord_rep_btree_reader_test.cc | #include "absl/strings/internal/cord_rep_btree_reader.h"
#include <cassert>
#include "absl/base/config.h"
#include "absl/strings/internal/cord_data_edge.h"
#include "absl/strings/internal/cord_internal.h"
#include "absl/strings/internal/cord_rep_btree.h"
#include "absl/strings/internal/cord_rep_btree_navigator.h"
#include "absl/strings/internal/cord_rep_flat.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace cord_internal {
absl::string_view CordRepBtreeReader::Read(size_t n, size_t chunk_size,
CordRep*& tree) {
assert(chunk_size <= navigator_.Current()->length);
CordRep* edge = chunk_size ? navigator_.Current() : navigator_.Next();
const size_t offset = chunk_size ? edge->length - chunk_size : 0;
ReadResult result = navigator_.Read(offset, n);
tree = result.tree;
if (n < chunk_size) return EdgeData(edge).substr(result.n);
const size_t consumed_by_read = n - chunk_size - result.n;
if (consumed_by_read >= remaining_) {
remaining_ = 0;
return {};
}
edge = navigator_.Current();
remaining_ -= consumed_by_read + edge->length;
return EdgeData(edge).substr(result.n);
}
}
ABSL_NAMESPACE_END
} | #include "absl/strings/internal/cord_rep_btree_reader.h"
#include <iostream>
#include <random>
#include <string>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/base/config.h"
#include "absl/base/internal/raw_logging.h"
#include "absl/strings/cord.h"
#include "absl/strings/internal/cord_internal.h"
#include "absl/strings/internal/cord_rep_btree.h"
#include "absl/strings/internal/cord_rep_test_util.h"
#include "absl/strings/string_view.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace cord_internal {
namespace {
using ::testing::Eq;
using ::testing::IsEmpty;
using ::testing::Ne;
using ::testing::Not;
using ::absl::cordrep_testing::CordRepBtreeFromFlats;
using ::absl::cordrep_testing::MakeFlat;
using ::absl::cordrep_testing::CordToString;
using ::absl::cordrep_testing::CreateFlatsFromString;
using ::absl::cordrep_testing::CreateRandomString;
using ReadResult = CordRepBtreeReader::ReadResult;
TEST(CordRepBtreeReaderTest, Next) {
constexpr size_t kChars = 3;
const size_t cap = CordRepBtree::kMaxCapacity;
size_t counts[] = {1, 2, cap, cap * cap, cap * cap + 1, cap * cap * 2 + 17};
for (size_t count : counts) {
std::string data = CreateRandomString(count * kChars);
std::vector<CordRep*> flats = CreateFlatsFromString(data, kChars);
CordRepBtree* node = CordRepBtreeFromFlats(flats);
CordRepBtreeReader reader;
size_t remaining = data.length();
absl::string_view chunk = reader.Init(node);
EXPECT_THAT(chunk, Eq(data.substr(0, chunk.length())));
remaining -= chunk.length();
EXPECT_THAT(reader.remaining(), Eq(remaining));
while (remaining > 0) {
const size_t offset = data.length() - remaining;
chunk = reader.Next();
EXPECT_THAT(chunk, Eq(data.substr(offset, chunk.length())));
remaining -= chunk.length();
EXPECT_THAT(reader.remaining(), Eq(remaining));
}
EXPECT_THAT(reader.remaining(), Eq(0u));
EXPECT_THAT(reader.Next(), testing::IsEmpty());
CordRep::Unref(node);
}
}
TEST(CordRepBtreeReaderTest, Skip) {
constexpr size_t kChars = 3;
const size_t cap = CordRepBtree::kMaxCapacity;
size_t counts[] = {1, 2, cap, cap * cap, cap * cap + 1, cap * cap * 2 + 17};
for (size_t count : counts) {
std::string data = CreateRandomString(count * kChars);
std::vector<CordRep*> flats = CreateFlatsFromString(data, kChars);
CordRepBtree* node = CordRepBtreeFromFlats(flats);
for (size_t skip1 = 0; skip1 < data.length() - kChars; ++skip1) {
for (size_t skip2 = 0; skip2 < data.length() - kChars; ++skip2) {
CordRepBtreeReader reader;
size_t remaining = data.length();
absl::string_view chunk = reader.Init(node);
remaining -= chunk.length();
chunk = reader.Skip(skip1);
size_t offset = data.length() - remaining;
ASSERT_THAT(chunk, Eq(data.substr(offset + skip1, chunk.length())));
remaining -= chunk.length() + skip1;
ASSERT_THAT(reader.remaining(), Eq(remaining));
if (remaining == 0) continue;
size_t skip = std::min(remaining - 1, skip2);
chunk = reader.Skip(skip);
offset = data.length() - remaining;
ASSERT_THAT(chunk, Eq(data.substr(offset + skip, chunk.length())));
}
}
CordRep::Unref(node);
}
}
TEST(CordRepBtreeReaderTest, SkipBeyondLength) {
CordRepBtree* tree = CordRepBtree::Create(MakeFlat("abc"));
tree = CordRepBtree::Append(tree, MakeFlat("def"));
CordRepBtreeReader reader;
reader.Init(tree);
EXPECT_THAT(reader.Skip(100), IsEmpty());
EXPECT_THAT(reader.remaining(), Eq(0u));
CordRep::Unref(tree);
}
TEST(CordRepBtreeReaderTest, Seek) {
constexpr size_t kChars = 3;
const size_t cap = CordRepBtree::kMaxCapacity;
size_t counts[] = {1, 2, cap, cap * cap, cap * cap + 1, cap * cap * 2 + 17};
for (size_t count : counts) {
std::string data = CreateRandomString(count * kChars);
std::vector<CordRep*> flats = CreateFlatsFromString(data, kChars);
CordRepBtree* node = CordRepBtreeFromFlats(flats);
for (size_t seek = 0; seek < data.length() - 1; ++seek) {
CordRepBtreeReader reader;
reader.Init(node);
absl::string_view chunk = reader.Seek(seek);
ASSERT_THAT(chunk, Not(IsEmpty()));
ASSERT_THAT(chunk, Eq(data.substr(seek, chunk.length())));
ASSERT_THAT(reader.remaining(),
Eq(data.length() - seek - chunk.length()));
}
CordRep::Unref(node);
}
}
TEST(CordRepBtreeReaderTest, SeekBeyondLength) {
CordRepBtree* tree = CordRepBtree::Create(MakeFlat("abc"));
tree = CordRepBtree::Append(tree, MakeFlat("def"));
CordRepBtreeReader reader;
reader.Init(tree);
EXPECT_THAT(reader.Seek(6), IsEmpty());
EXPECT_THAT(reader.remaining(), Eq(0u));
EXPECT_THAT(reader.Seek(100), IsEmpty());
EXPECT_THAT(reader.remaining(), Eq(0u));
CordRep::Unref(tree);
}
TEST(CordRepBtreeReaderTest, Read) {
std::string data = "abcdefghijklmno";
std::vector<CordRep*> flats = CreateFlatsFromString(data, 5);
CordRepBtree* node = CordRepBtreeFromFlats(flats);
CordRep* tree;
CordRepBtreeReader reader;
absl::string_view chunk;
chunk = reader.Init(node);
chunk = reader.Read(0, chunk.length(), tree);
EXPECT_THAT(tree, Eq(nullptr));
EXPECT_THAT(chunk, Eq("abcde"));
EXPECT_THAT(reader.remaining(), Eq(10u));
EXPECT_THAT(reader.Next(), Eq("fghij"));
chunk = reader.Init(node);
chunk = reader.Read(15, chunk.length(), tree);
EXPECT_THAT(tree, Ne(nullptr));
EXPECT_THAT(CordToString(tree), Eq("abcdefghijklmno"));
EXPECT_THAT(chunk, Eq(""));
EXPECT_THAT(reader.remaining(), Eq(0u));
CordRep::Unref(tree);
chunk = reader.Init(node);
chunk = reader.Read(3, chunk.length(), tree);
ASSERT_THAT(tree, Ne(nullptr));
EXPECT_THAT(CordToString(tree), Eq("abc"));
EXPECT_THAT(chunk, Eq("de"));
EXPECT_THAT(reader.remaining(), Eq(10u));
EXPECT_THAT(reader.Next(), Eq("fghij"));
CordRep::Unref(tree);
chunk = reader.Init(node);
chunk = reader.Read(2, chunk.length() - 2, tree);
ASSERT_THAT(tree, Ne(nullptr));
EXPECT_THAT(CordToString(tree), Eq("cd"));
EXPECT_THAT(chunk, Eq("e"));
EXPECT_THAT(reader.remaining(), Eq(10u));
EXPECT_THAT(reader.Next(), Eq("fghij"));
CordRep::Unref(tree);
chunk = reader.Init(node);
chunk = reader.Read(3, 0, tree);
ASSERT_THAT(tree, Ne(nullptr));
EXPECT_THAT(CordToString(tree), Eq("fgh"));
EXPECT_THAT(chunk, Eq("ij"));
EXPECT_THAT(reader.remaining(), Eq(5u));
EXPECT_THAT(reader.Next(), Eq("klmno"));
CordRep::Unref(tree);
chunk = reader.Init(node);
chunk = reader.Read(12, chunk.length() - 2, tree);
ASSERT_THAT(tree, Ne(nullptr));
EXPECT_THAT(CordToString(tree), Eq("cdefghijklmn"));
EXPECT_THAT(chunk, Eq("o"));
EXPECT_THAT(reader.remaining(), Eq(0u));
CordRep::Unref(tree);
chunk = reader.Init(node);
chunk = reader.Read(10 - 2, chunk.length() - 2, tree);
ASSERT_THAT(tree, Ne(nullptr));
EXPECT_THAT(CordToString(tree), Eq("cdefghij"));
EXPECT_THAT(chunk, Eq("klmno"));
EXPECT_THAT(reader.remaining(), Eq(0u));
CordRep::Unref(tree);
CordRep::Unref(node);
}
TEST(CordRepBtreeReaderTest, ReadExhaustive) {
constexpr size_t kChars = 3;
const size_t cap = CordRepBtree::kMaxCapacity;
size_t counts[] = {1, 2, cap, cap * cap + 1, cap * cap * cap * 2 + 17};
for (size_t count : counts) {
std::string data = CreateRandomString(count * kChars);
std::vector<CordRep*> flats = CreateFlatsFromString(data, kChars);
CordRepBtree* node = CordRepBtreeFromFlats(flats);
for (size_t read_size : {kChars - 1, kChars, kChars + 7, cap * cap}) {
CordRepBtreeReader reader;
absl::string_view chunk = reader.Init(node);
size_t consumed = 0;
size_t remaining = data.length();
while (remaining > 0) {
CordRep* tree;
size_t n = (std::min)(remaining, read_size);
chunk = reader.Read(n, chunk.length(), tree);
EXPECT_THAT(tree, Ne(nullptr));
if (tree) {
EXPECT_THAT(CordToString(tree), Eq(data.substr(consumed, n)));
CordRep::Unref(tree);
}
consumed += n;
remaining -= n;
EXPECT_THAT(reader.remaining(), Eq(remaining - chunk.length()));
if (remaining > 0) {
ASSERT_FALSE(chunk.empty());
ASSERT_THAT(chunk, Eq(data.substr(consumed, chunk.length())));
} else {
ASSERT_TRUE(chunk.empty()) << chunk;
}
}
}
CordRep::Unref(node);
}
}
}
}
ABSL_NAMESPACE_END
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/strings/internal/cord_rep_btree_reader.cc | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/strings/internal/cord_rep_btree_reader_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
9b61f2fd-dae2-47fa-a877-b61df3b15a36 | cpp | tensorflow/tensorflow | libc_handle | tensorflow/lite/experimental/acceleration/mini_benchmark/libc_handle.cc | tensorflow/lite/experimental/acceleration/mini_benchmark/libc_handle_test.cc | #include "tensorflow/lite/experimental/acceleration/mini_benchmark/libc_handle.h"
#ifdef __ANDROID__
#include <dlfcn.h>
#endif
#include <stdio.h>
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/decode_jpeg_status.h"
namespace tflite {
namespace acceleration {
namespace decode_jpeg_kernel {
LibCHandle LibCHandle::Create(Status &status) {
#ifndef __ANDROID__
#ifndef _WIN32
return LibCHandle(nullptr, ::fmemopen);
#else
status = {kTfLiteError, "Windows not supported."};
return LibCHandle(nullptr, nullptr);
#endif
#else
void *libc = nullptr;
FmemopenPtr fmemopen_ptr = nullptr;
if (!(libc = dlopen("libc.so", RTLD_NOW | RTLD_LOCAL))) {
status = {kTfLiteError,
"Failed to load the libc dynamic shared object library."};
return LibCHandle(nullptr, nullptr);
}
if (!(fmemopen_ptr =
reinterpret_cast<FmemopenPtr>(dlsym(libc, "fmemopen")))) {
status = {kTfLiteError, "Failed to dynamically load the method: fmemopen"};
return LibCHandle(nullptr, nullptr);
}
status = {kTfLiteOk, ""};
return LibCHandle(libc, fmemopen_ptr);
#endif
}
FILE *LibCHandle::fmemopen(void *buf, size_t size, const char *mode) const {
return fmemopen_(buf, size, mode);
}
}
}
} | #include "tensorflow/lite/experimental/acceleration/mini_benchmark/libc_handle.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
namespace tflite {
namespace acceleration {
namespace decode_jpeg_kernel {
namespace {
TEST(LibCHandleTest, LoadingSucceedsAndroidPlatforms) {
Status status;
LibCHandle handle = LibCHandle::Create(status);
EXPECT_EQ(status.error_message, "");
EXPECT_EQ(status.code, kTfLiteOk);
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/acceleration/mini_benchmark/libc_handle.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/acceleration/mini_benchmark/libc_handle_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
60f20ad5-af5f-44be-868d-f9539a658eb7 | cpp | google/quiche | quic_path_validator | quiche/quic/core/quic_path_validator.cc | quiche/quic/core/quic_path_validator_test.cc | #include "quiche/quic/core/quic_path_validator.h"
#include <memory>
#include <ostream>
#include <utility>
#include "quiche/quic/core/quic_constants.h"
#include "quiche/quic/core/quic_types.h"
#include "quiche/quic/platform/api/quic_socket_address.h"
namespace quic {
class RetryAlarmDelegate : public QuicAlarm::DelegateWithContext {
public:
explicit RetryAlarmDelegate(QuicPathValidator* path_validator,
QuicConnectionContext* context)
: QuicAlarm::DelegateWithContext(context),
path_validator_(path_validator) {}
RetryAlarmDelegate(const RetryAlarmDelegate&) = delete;
RetryAlarmDelegate& operator=(const RetryAlarmDelegate&) = delete;
void OnAlarm() override { path_validator_->OnRetryTimeout(); }
private:
QuicPathValidator* path_validator_;
};
std::ostream& operator<<(std::ostream& os,
const QuicPathValidationContext& context) {
return os << " from " << context.self_address_ << " to "
<< context.peer_address_;
}
QuicPathValidator::QuicPathValidator(QuicAlarmFactory* alarm_factory,
QuicConnectionArena* arena,
SendDelegate* send_delegate,
QuicRandom* random, const QuicClock* clock,
QuicConnectionContext* context)
: send_delegate_(send_delegate),
random_(random),
clock_(clock),
retry_timer_(alarm_factory->CreateAlarm(
arena->New<RetryAlarmDelegate>(this, context), arena)),
retry_count_(0u) {}
void QuicPathValidator::OnPathResponse(const QuicPathFrameBuffer& probing_data,
QuicSocketAddress self_address) {
if (!HasPendingPathValidation()) {
return;
}
QUIC_DVLOG(1) << "Match PATH_RESPONSE received on " << self_address;
QUIC_BUG_IF(quic_bug_12402_1, !path_context_->self_address().IsInitialized())
<< "Self address should have been known by now";
if (self_address != path_context_->self_address()) {
QUIC_DVLOG(1) << "Expect the response to be received on "
<< path_context_->self_address();
return;
}
for (auto it = probing_data_.begin(); it != probing_data_.end(); ++it) {
if (it->frame_buffer == probing_data) {
result_delegate_->OnPathValidationSuccess(std::move(path_context_),
it->send_time);
ResetPathValidation();
return;
}
}
QUIC_DVLOG(1) << "PATH_RESPONSE with payload " << probing_data.data()
<< " doesn't match the probing data.";
}
void QuicPathValidator::StartPathValidation(
std::unique_ptr<QuicPathValidationContext> context,
std::unique_ptr<ResultDelegate> result_delegate,
PathValidationReason reason) {
QUICHE_DCHECK(context);
QUIC_DLOG(INFO) << "Start validating path " << *context
<< " via writer: " << context->WriterToUse();
if (path_context_ != nullptr) {
QUIC_BUG(quic_bug_10876_1)
<< "There is an on-going validation on path " << *path_context_;
ResetPathValidation();
}
reason_ = reason;
path_context_ = std::move(context);
result_delegate_ = std::move(result_delegate);
SendPathChallengeAndSetAlarm();
}
void QuicPathValidator::ResetPathValidation() {
path_context_ = nullptr;
result_delegate_ = nullptr;
retry_timer_->Cancel();
retry_count_ = 0;
reason_ = PathValidationReason::kReasonUnknown;
}
void QuicPathValidator::CancelPathValidation() {
if (path_context_ == nullptr) {
return;
}
QUIC_DVLOG(1) << "Cancel validation on path" << *path_context_;
result_delegate_->OnPathValidationFailure(std::move(path_context_));
ResetPathValidation();
}
bool QuicPathValidator::HasPendingPathValidation() const {
return path_context_ != nullptr;
}
QuicPathValidationContext* QuicPathValidator::GetContext() const {
return path_context_.get();
}
std::unique_ptr<QuicPathValidationContext> QuicPathValidator::ReleaseContext() {
auto ret = std::move(path_context_);
ResetPathValidation();
return ret;
}
const QuicPathFrameBuffer& QuicPathValidator::GeneratePathChallengePayload() {
probing_data_.emplace_back(clock_->Now());
random_->RandBytes(probing_data_.back().frame_buffer.data(),
sizeof(QuicPathFrameBuffer));
return probing_data_.back().frame_buffer;
}
void QuicPathValidator::OnRetryTimeout() {
++retry_count_;
if (retry_count_ > kMaxRetryTimes) {
CancelPathValidation();
return;
}
QUIC_DVLOG(1) << "Send another PATH_CHALLENGE on path " << *path_context_;
SendPathChallengeAndSetAlarm();
}
void QuicPathValidator::SendPathChallengeAndSetAlarm() {
bool should_continue = send_delegate_->SendPathChallenge(
GeneratePathChallengePayload(), path_context_->self_address(),
path_context_->peer_address(), path_context_->effective_peer_address(),
path_context_->WriterToUse());
if (!should_continue) {
CancelPathValidation();
return;
}
retry_timer_->Set(send_delegate_->GetRetryTimeout(
path_context_->peer_address(), path_context_->WriterToUse()));
}
bool QuicPathValidator::IsValidatingPeerAddress(
const QuicSocketAddress& effective_peer_address) {
return path_context_ != nullptr &&
path_context_->effective_peer_address() == effective_peer_address;
}
void QuicPathValidator::MaybeWritePacketToAddress(
const char* buffer, size_t buf_len, const QuicSocketAddress& peer_address) {
if (!HasPendingPathValidation() ||
path_context_->peer_address() != peer_address) {
return;
}
QUIC_DVLOG(1) << "Path validator is sending packet of size " << buf_len
<< " from " << path_context_->self_address() << " to "
<< path_context_->peer_address();
path_context_->WriterToUse()->WritePacket(
buffer, buf_len, path_context_->self_address().host(),
path_context_->peer_address(), nullptr, QuicPacketWriterParams());
}
} | #include "quiche/quic/core/quic_path_validator.h"
#include <memory>
#include "quiche/quic/core/frames/quic_path_challenge_frame.h"
#include "quiche/quic/core/quic_constants.h"
#include "quiche/quic/core/quic_types.h"
#include "quiche/quic/platform/api/quic_ip_address.h"
#include "quiche/quic/platform/api/quic_socket_address.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/test_tools/mock_clock.h"
#include "quiche/quic/test_tools/mock_random.h"
#include "quiche/quic/test_tools/quic_path_validator_peer.h"
#include "quiche/quic/test_tools/quic_test_utils.h"
using testing::_;
using testing::Invoke;
using testing::Return;
namespace quic {
namespace test {
class MockSendDelegate : public QuicPathValidator::SendDelegate {
public:
MOCK_METHOD(bool, SendPathChallenge,
(const QuicPathFrameBuffer&, const QuicSocketAddress&,
const QuicSocketAddress&, const QuicSocketAddress&,
QuicPacketWriter*),
(override));
MOCK_METHOD(QuicTime, GetRetryTimeout,
(const QuicSocketAddress&, QuicPacketWriter*), (const, override));
};
class QuicPathValidatorTest : public QuicTest {
public:
QuicPathValidatorTest()
: path_validator_(&alarm_factory_, &arena_, &send_delegate_, &random_,
&clock_,
nullptr),
context_(new MockQuicPathValidationContext(
self_address_, peer_address_, effective_peer_address_, &writer_)),
result_delegate_(
new testing::StrictMock<MockQuicPathValidationResultDelegate>()) {
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(1));
ON_CALL(send_delegate_, GetRetryTimeout(_, _))
.WillByDefault(
Return(clock_.ApproximateNow() +
3 * QuicTime::Delta::FromMilliseconds(kInitialRttMs)));
}
protected:
quic::test::MockAlarmFactory alarm_factory_;
MockSendDelegate send_delegate_;
MockRandom random_;
MockClock clock_;
QuicConnectionArena arena_;
QuicPathValidator path_validator_;
QuicSocketAddress self_address_{QuicIpAddress::Any4(), 443};
QuicSocketAddress peer_address_{QuicIpAddress::Loopback4(), 443};
QuicSocketAddress effective_peer_address_{QuicIpAddress::Loopback4(), 12345};
MockPacketWriter writer_;
MockQuicPathValidationContext* context_;
MockQuicPathValidationResultDelegate* result_delegate_;
};
TEST_F(QuicPathValidatorTest, PathValidationSuccessOnFirstRound) {
QuicPathFrameBuffer challenge_data;
EXPECT_CALL(send_delegate_,
SendPathChallenge(_, self_address_, peer_address_,
effective_peer_address_, &writer_))
.WillOnce(Invoke([&](const QuicPathFrameBuffer& payload,
const QuicSocketAddress&, const QuicSocketAddress&,
const QuicSocketAddress&, QuicPacketWriter*) {
memcpy(challenge_data.data(), payload.data(), payload.size());
return true;
}));
EXPECT_CALL(send_delegate_, GetRetryTimeout(peer_address_, &writer_));
const QuicTime expected_start_time = clock_.Now();
path_validator_.StartPathValidation(
std::unique_ptr<QuicPathValidationContext>(context_),
std::unique_ptr<MockQuicPathValidationResultDelegate>(result_delegate_),
PathValidationReason::kMultiPort);
EXPECT_TRUE(path_validator_.HasPendingPathValidation());
EXPECT_EQ(PathValidationReason::kMultiPort,
path_validator_.GetPathValidationReason());
EXPECT_TRUE(path_validator_.IsValidatingPeerAddress(effective_peer_address_));
EXPECT_CALL(*result_delegate_, OnPathValidationSuccess(_, _))
.WillOnce(
Invoke([=, this](std::unique_ptr<QuicPathValidationContext> context,
QuicTime start_time) {
EXPECT_EQ(context.get(), context_);
EXPECT_EQ(start_time, expected_start_time);
}));
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(kInitialRttMs));
path_validator_.OnPathResponse(challenge_data, self_address_);
EXPECT_FALSE(path_validator_.HasPendingPathValidation());
EXPECT_EQ(PathValidationReason::kReasonUnknown,
path_validator_.GetPathValidationReason());
}
TEST_F(QuicPathValidatorTest, RespondWithDifferentSelfAddress) {
QuicPathFrameBuffer challenge_data;
EXPECT_CALL(send_delegate_,
SendPathChallenge(_, self_address_, peer_address_,
effective_peer_address_, &writer_))
.WillOnce(Invoke([&](const QuicPathFrameBuffer payload,
const QuicSocketAddress&, const QuicSocketAddress&,
const QuicSocketAddress&, QuicPacketWriter*) {
memcpy(challenge_data.data(), payload.data(), payload.size());
return true;
}));
EXPECT_CALL(send_delegate_, GetRetryTimeout(peer_address_, &writer_));
const QuicTime expected_start_time = clock_.Now();
path_validator_.StartPathValidation(
std::unique_ptr<QuicPathValidationContext>(context_),
std::unique_ptr<MockQuicPathValidationResultDelegate>(result_delegate_),
PathValidationReason::kMultiPort);
const QuicSocketAddress kAlternativeSelfAddress(QuicIpAddress::Any6(), 54321);
EXPECT_NE(kAlternativeSelfAddress, self_address_);
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(kInitialRttMs));
path_validator_.OnPathResponse(challenge_data, kAlternativeSelfAddress);
EXPECT_CALL(*result_delegate_, OnPathValidationSuccess(_, _))
.WillOnce(
Invoke([=, this](std::unique_ptr<QuicPathValidationContext> context,
QuicTime start_time) {
EXPECT_EQ(context->self_address(), self_address_);
EXPECT_EQ(start_time, expected_start_time);
}));
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(kInitialRttMs));
path_validator_.OnPathResponse(challenge_data, self_address_);
EXPECT_EQ(PathValidationReason::kReasonUnknown,
path_validator_.GetPathValidationReason());
}
TEST_F(QuicPathValidatorTest, RespondAfter1stRetry) {
QuicPathFrameBuffer challenge_data;
EXPECT_CALL(send_delegate_,
SendPathChallenge(_, self_address_, peer_address_,
effective_peer_address_, &writer_))
.WillOnce(Invoke([&](const QuicPathFrameBuffer& payload,
const QuicSocketAddress&, const QuicSocketAddress&,
const QuicSocketAddress&, QuicPacketWriter*) {
memcpy(challenge_data.data(), payload.data(), payload.size());
return true;
}))
.WillOnce(Invoke([&](const QuicPathFrameBuffer& payload,
const QuicSocketAddress&, const QuicSocketAddress&,
const QuicSocketAddress&, QuicPacketWriter*) {
EXPECT_NE(payload, challenge_data);
return true;
}));
EXPECT_CALL(send_delegate_, GetRetryTimeout(peer_address_, &writer_))
.Times(2u);
const QuicTime start_time = clock_.Now();
path_validator_.StartPathValidation(
std::unique_ptr<QuicPathValidationContext>(context_),
std::unique_ptr<MockQuicPathValidationResultDelegate>(result_delegate_),
PathValidationReason::kMultiPort);
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(3 * kInitialRttMs));
random_.ChangeValue();
alarm_factory_.FireAlarm(
QuicPathValidatorPeer::retry_timer(&path_validator_));
EXPECT_CALL(*result_delegate_, OnPathValidationSuccess(_, start_time));
path_validator_.OnPathResponse(challenge_data, self_address_);
EXPECT_FALSE(path_validator_.HasPendingPathValidation());
}
TEST_F(QuicPathValidatorTest, RespondToRetryChallenge) {
QuicPathFrameBuffer challenge_data;
EXPECT_CALL(send_delegate_,
SendPathChallenge(_, self_address_, peer_address_,
effective_peer_address_, &writer_))
.WillOnce(Invoke([&](const QuicPathFrameBuffer& payload,
const QuicSocketAddress&, const QuicSocketAddress&,
const QuicSocketAddress&, QuicPacketWriter*) {
memcpy(challenge_data.data(), payload.data(), payload.size());
return true;
}))
.WillOnce(Invoke([&](const QuicPathFrameBuffer& payload,
const QuicSocketAddress&, const QuicSocketAddress&,
const QuicSocketAddress&, QuicPacketWriter*) {
EXPECT_NE(challenge_data, payload);
memcpy(challenge_data.data(), payload.data(), payload.size());
return true;
}));
EXPECT_CALL(send_delegate_, GetRetryTimeout(peer_address_, &writer_))
.Times(2u);
path_validator_.StartPathValidation(
std::unique_ptr<QuicPathValidationContext>(context_),
std::unique_ptr<MockQuicPathValidationResultDelegate>(result_delegate_),
PathValidationReason::kMultiPort);
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(3 * kInitialRttMs));
const QuicTime start_time = clock_.Now();
random_.ChangeValue();
alarm_factory_.FireAlarm(
QuicPathValidatorPeer::retry_timer(&path_validator_));
EXPECT_CALL(*result_delegate_, OnPathValidationSuccess(_, start_time));
path_validator_.OnPathResponse(challenge_data, self_address_);
EXPECT_FALSE(path_validator_.HasPendingPathValidation());
}
TEST_F(QuicPathValidatorTest, ValidationTimeOut) {
EXPECT_CALL(send_delegate_,
SendPathChallenge(_, self_address_, peer_address_,
effective_peer_address_, &writer_))
.Times(3u)
.WillRepeatedly(Return(true));
EXPECT_CALL(send_delegate_, GetRetryTimeout(peer_address_, &writer_))
.Times(3u);
path_validator_.StartPathValidation(
std::unique_ptr<QuicPathValidationContext>(context_),
std::unique_ptr<MockQuicPathValidationResultDelegate>(result_delegate_),
PathValidationReason::kMultiPort);
QuicPathFrameBuffer challenge_data;
memset(challenge_data.data(), 'a', challenge_data.size());
path_validator_.OnPathResponse(challenge_data, self_address_);
EXPECT_CALL(*result_delegate_, OnPathValidationFailure(_))
.WillOnce(
Invoke([=, this](std::unique_ptr<QuicPathValidationContext> context) {
EXPECT_EQ(context_, context.get());
}));
for (size_t i = 0; i <= QuicPathValidator::kMaxRetryTimes; ++i) {
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(3 * kInitialRttMs));
alarm_factory_.FireAlarm(
QuicPathValidatorPeer::retry_timer(&path_validator_));
}
EXPECT_EQ(PathValidationReason::kReasonUnknown,
path_validator_.GetPathValidationReason());
}
TEST_F(QuicPathValidatorTest, SendPathChallengeError) {
EXPECT_CALL(send_delegate_,
SendPathChallenge(_, self_address_, peer_address_,
effective_peer_address_, &writer_))
.WillOnce(Invoke([&](const QuicPathFrameBuffer&, const QuicSocketAddress&,
const QuicSocketAddress&, const QuicSocketAddress&,
QuicPacketWriter*) {
path_validator_.CancelPathValidation();
return false;
}));
EXPECT_CALL(send_delegate_, GetRetryTimeout(peer_address_, &writer_))
.Times(0u);
EXPECT_CALL(*result_delegate_, OnPathValidationFailure(_));
path_validator_.StartPathValidation(
std::unique_ptr<QuicPathValidationContext>(context_),
std::unique_ptr<MockQuicPathValidationResultDelegate>(result_delegate_),
PathValidationReason::kMultiPort);
EXPECT_FALSE(path_validator_.HasPendingPathValidation());
EXPECT_FALSE(QuicPathValidatorPeer::retry_timer(&path_validator_)->IsSet());
EXPECT_EQ(PathValidationReason::kReasonUnknown,
path_validator_.GetPathValidationReason());
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_path_validator.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_path_validator_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
7a29b9d1-1578-444f-adeb-b1e9b5c2583e | cpp | tensorflow/tensorflow | flexbuffers_util | tensorflow/lite/delegates/xnnpack/flexbuffers_util.h | tensorflow/lite/delegates/xnnpack/flexbuffers_util_test.cc | #ifndef TENSORFLOW_LITE_DELEGATES_XNNPACK_FLEXBUFFERS_UTIL_H_
#define TENSORFLOW_LITE_DELEGATES_XNNPACK_FLEXBUFFERS_UTIL_H_
#include "flatbuffers/base.h"
#include "flatbuffers/flexbuffers.h"
namespace tflite::xnnpack {
struct FloatPointer {
const float* ptr = nullptr;
};
}
namespace flexbuffers {
template <>
tflite::xnnpack::FloatPointer inline flexbuffers::Reference::As<
tflite::xnnpack::FloatPointer>() const {
#if !FLATBUFFERS_LITTLEENDIAN
return nullptr;
#else
return {IsFloat() ? reinterpret_cast<const float*>(data_) : nullptr};
#endif
}
}
#endif | #include "tensorflow/lite/delegates/xnnpack/flexbuffers_util.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "flatbuffers/flexbuffers.h"
namespace tflite::xnnpack {
namespace {
using ::testing::Pointee;
TEST(FlexbuffersUtilTest, FloatPointer) {
constexpr float kAValue = 3.14;
constexpr float kBValue = 56;
flexbuffers::Builder fbb;
fbb.Map([&] {
fbb.Float("a", kAValue);
fbb.Float("b", kBValue);
});
fbb.Finish();
const flexbuffers::Map map = flexbuffers::GetRoot(fbb.GetBuffer()).AsMap();
const flexbuffers::Reference a = map["a"];
EXPECT_TRUE(a.IsFloat());
EXPECT_THAT(a.As<FloatPointer>().ptr, Pointee(kAValue));
const flexbuffers::Reference b = map["b"];
EXPECT_TRUE(b.IsFloat());
EXPECT_THAT(b.As<FloatPointer>().ptr, Pointee(kBValue));
const flexbuffers::Reference c = map["c"];
ASSERT_TRUE(c.IsNull());
EXPECT_EQ(c.As<FloatPointer>().ptr, nullptr);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/xnnpack/flexbuffers_util.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/xnnpack/flexbuffers_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
acb6e25d-5035-4121-96bc-58bf96d1e10c | cpp | tensorflow/tensorflow | graph_executor | tensorflow/core/tfrt/graph_executor/graph_executor.cc | tensorflow/core/tfrt/graph_executor/graph_executor_test.cc | #include "tensorflow/core/tfrt/graph_executor/graph_executor.h"
#include <algorithm>
#include <array>
#include <cstdint>
#include <functional>
#include <memory>
#include <numeric>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "absl/types/optional.h"
#include "absl/types/span.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "mlir/Dialect/Func/Extensions/AllExtensions.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinDialect.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/DialectRegistry.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "tensorflow/compiler/mlir/tensorflow/dialect_registration.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_saved_model.h"
#include "tensorflow/compiler/mlir/tensorflow/translate/import_model.h"
#include "tensorflow/compiler/mlir/tensorflow/translate/mlir_roundtrip_flags.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/error_util.h"
#include "tensorflow/compiler/mlir/tfrt/transforms/mlrt/import_model.h"
#include "tensorflow/compiler/mlir/tfrt/transforms/update_op_cost_in_tfrt_mlir.h"
#include "tensorflow/compiler/mlir/tfrt/translate/import_model.h"
#include "tensorflow/compiler/mlir/tfrt/translate/tfrt_compile_options.h"
#include "xla/tsl/concurrency/async_value_ref.h"
#include "xla/tsl/lib/monitoring/sampler.h"
#include "tensorflow/core/common_runtime/process_function_library_runtime.h"
#include "tensorflow/core/common_runtime/rendezvous_mgr.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/rendezvous.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/lib/gtl/cleanup.h"
#include "tensorflow/core/lib/monitoring/gauge.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/tstring.h"
#include "tensorflow/core/profiler/lib/traceme_encode.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/core/public/version.h"
#include "tensorflow/core/runtime_fallback/kernel/kernel_fallback_compat_request_state.h"
#include "tensorflow/core/runtime_fallback/kernel/kernel_fallback_utils.h"
#include "tensorflow/core/tfrt/common/metrics.h"
#include "tensorflow/core/tfrt/fallback/cost_recorder.h"
#include "tensorflow/core/tfrt/fallback/fallback_state.h"
#include "tensorflow/core/tfrt/fallback/op_kernel_runner.h"
#include "tensorflow/core/tfrt/graph_executor/executable_context.h"
#include "tensorflow/core/tfrt/graph_executor/export_mlir.h"
#include "tensorflow/core/tfrt/graph_executor/graph_execution_options.h"
#include "tensorflow/core/tfrt/graph_executor/sync_resource_state.h"
#include "tensorflow/core/tfrt/mlrt/bytecode/bytecode.h"
#include "tensorflow/core/tfrt/mlrt/bytecode/executable.h"
#include "tensorflow/core/tfrt/mlrt/bytecode/function.h"
#include "tensorflow/core/tfrt/mlrt/interpreter/context.h"
#include "tensorflow/core/tfrt/mlrt/interpreter/execute.h"
#include "tensorflow/core/tfrt/mlrt/interpreter/value.h"
#include "tensorflow/core/tfrt/mlrt/kernel/context.h"
#include "tensorflow/core/tfrt/runtime/runtime.h"
#include "tensorflow/core/tfrt/runtime/step_id.h"
#include "tensorflow/core/tfrt/runtime/stream.h"
#include "tensorflow/core/tfrt/runtime/work_queue_interface.h"
#include "tensorflow/core/tfrt/stubs/tfrt_native_lowering_stub.h"
#include "tensorflow/core/tfrt/utils/fallback_tensor.h"
#include "tensorflow/core/tfrt/utils/tfrt_graph_execution_state.h"
#include "tensorflow/core/tfrt/utils/utils.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/refcount.h"
#include "tsl/platform/statusor.h"
#include "tsl/profiler/lib/traceme.h"
#include "tfrt/bef/bef_buffer.h"
#include "tfrt/bef_converter/mlir_to_bef.h"
#include "tfrt/core_runtime/core_runtime.h"
#include "tfrt/host_context/async_dispatch.h"
#include "tfrt/host_context/async_value.h"
#include "tfrt/host_context/async_value_ref.h"
#include "tfrt/host_context/chain.h"
#include "tfrt/host_context/concurrent_work_queue.h"
#include "tfrt/host_context/execution_context.h"
#include "tfrt/host_context/function.h"
#include "tfrt/host_context/host_context.h"
#include "tfrt/host_context/request_deadline_tracker.h"
#include "tfrt/host_context/resource_context.h"
#include "tfrt/support/forward_decls.h"
#include "tfrt/support/ref_count.h"
#include "tfrt/support/string_util.h"
namespace tensorflow {
namespace tfrt_stub {
namespace {
constexpr char kDeadlineExceededMessage[] = "Deadline exceeded.";
constexpr char kTensorNameJoiningDelimiter[] = "-";
constexpr char kArgumentTypeJoiningDelimiter[] = "^";
constexpr char kFallbackInitFunction[] = "_tfrt_fallback_init";
constexpr char kResourceInitFunction[] = "_tfrt_resource_init";
StepId GetNextStepId() {
static StepIdGenerator gen;
return gen.GetNextStepId();
}
auto* graph_executor_mode = monitoring::Gauge<std::string, 2>::New(
"/tfrt/graph_executor/mode",
"Record the total number of imported savedmodel using different graph "
"executor modes (BEF vs MLRT interpreter)",
"model_name", "model_version");
}
tensorflow::Status RunMlrtFunction(
mlrt::bc::Function function,
const mlrt::LoadedExecutable& loaded_executable,
const tsl::RCReference<tfrt::RequestContext>& request_context,
tfrt::ConcurrentWorkQueue& work_queue,
absl::Span<const tensorflow::Tensor> inputs,
std::vector<tensorflow::Tensor>* outputs,
SyncResourceState* sync_resource_state) {
DCHECK(function);
const auto* fallback_request_state =
request_context->GetDataIfExists<tfd::KernelFallbackCompatRequestState>();
DCHECK(fallback_request_state);
mlrt::ExecutionContext execution_context(&loaded_executable);
execution_context.set_work_queue(&work_queue);
tfrt::ExecutionContext exec_ctx(request_context);
AddSyncContext(execution_context, *request_context->host(),
sync_resource_state);
execution_context.AddUserContext(std::make_unique<tf_mlrt::Context>(
fallback_request_state, request_context->resource_context(),
request_context->cancellation_context().get()));
execution_context.AddUserErrorLogger(
[fallback_request_state](absl::Status status) {
if (fallback_request_state) {
LOG(ERROR) << "Model "
<< fallback_request_state->session_metadata().name()
<< " version "
<< fallback_request_state->session_metadata().version()
<< " has error: " << status;
}
});
absl::InlinedVector<mlrt::Value, 4> mlrt_inputs;
mlrt_inputs.reserve(inputs.size());
for (const auto& input : inputs) {
mlrt_inputs.emplace_back(FallbackTensor(input));
}
absl::InlinedVector<mlrt::Value, 4> mlrt_outputs(
function.output_regs().size());
tsl::RCReference<tsl::AsyncValue> chain =
tsl::MakeConstructedAsyncValueRef<tsl::Chain>();
execution_context.set_exit_handler(
[chain = chain.get()]() { chain->SetStateConcrete(); });
execution_context.CallByMove(function, absl::MakeSpan(mlrt_inputs),
absl::MakeSpan(mlrt_outputs));
work_queue.AddTask(
[&execution_context]() { mlrt::Execute(execution_context); });
work_queue.Await(chain);
if (!execution_context.status().ok()) {
outputs->resize(mlrt_outputs.size(), tensorflow::Tensor());
return execution_context.status();
}
for (auto& mlrt_output : mlrt_outputs) {
DCHECK(mlrt_output.HasValue());
outputs->push_back(std::move(mlrt_output.Get<FallbackTensor>().tensor()));
}
return absl::OkStatus();
}
absl::StatusOr<std::unique_ptr<RequestInfo>> CreateRequestInfo(
const GraphExecutionOptions& options,
const GraphExecutionRunOptions& run_options,
tensorflow::tfrt_stub::WorkQueueInterface* work_queue,
tfrt::ResourceContext* resource_context,
tfrt::ResourceContext* client_graph_resource_context,
OpKernelRunnerTable* runner_table,
tfd::FallbackResourceArray* resource_array,
tensorflow::tfrt_stub::FallbackState& fallback_state,
const tensorflow::ProcessFunctionLibraryRuntime&
process_function_library_runtime,
CostRecorder* cost_recorder) {
auto request_info = std::make_unique<RequestInfo>();
DCHECK(options.runtime);
const Runtime& runtime = *options.runtime;
int64_t request_id = 0;
if (work_queue != nullptr) {
request_id = work_queue->id();
if (request_id == 0) request_id = GetNextStepId().id;
request_info->request_queue = work_queue;
} else {
request_id = GetNextStepId().id;
TF_ASSIGN_OR_RETURN(request_info->request_queue_owner,
runtime.CreateRequestQueue(request_id));
request_info->request_queue = request_info->request_queue_owner.get();
}
auto* request_queue = request_info->request_queue;
request_info->runner = [request_queue](std::function<void()> f) {
request_queue->AddTask(std::move(f));
};
tfrt::RequestContextBuilder request_context_builder(
runtime.core_runtime()->GetHostContext(), resource_context, request_id);
DCHECK(runner_table);
DCHECK(resource_array);
auto& fallback_request_state =
request_context_builder.context_data()
.emplace<tfd::KernelFallbackCompatRequestState>(
&request_info->runner, &fallback_state.device_manager(),
request_context_builder.id(), runner_table, resource_array,
request_queue->GetIntraOpThreadPool(), options.model_metadata,
&process_function_library_runtime);
fallback_request_state.set_cost_recorder(cost_recorder);
fallback_request_state.set_client_graph_resource_context(
client_graph_resource_context);
fallback_request_state.set_runtime_config(&options.runtime_config);
fallback_request_state.set_cancellation_manager(
&request_info->cancellation_manager);
tfrt::RequestOptions request_options;
request_options.priority = run_options.priority;
request_context_builder.set_request_options(request_options);
auto expected_req_ctx = std::move(request_context_builder).build();
if (!expected_req_ctx) {
return tensorflow::errors::Internal(
tfrt::StrCat(expected_req_ctx.takeError()));
}
request_info->tfrt_request_context = std::move(expected_req_ctx.get());
return request_info;
}
tensorflow::Status GraphExecutionRunOnFunction(
const GraphExecutionOptions& options,
const GraphExecutionRunOptions& run_options,
absl::string_view signature_name, const SymbolUids& symbol_uids,
const tfrt::Function* func, const mlrt::LoadedExecutable* loaded_executable,
absl::Span<const tensorflow::Tensor> inputs,
std::vector<tensorflow::Tensor>* outputs,
tfrt::ResourceContext* resource_context,
tfrt::ResourceContext* client_graph_resource_context,
OpKernelRunnerTable* runner_table,
tfd::FallbackResourceArray* resource_array, const Runtime& runtime,
FallbackState& fallback_state,
const tensorflow::ProcessFunctionLibraryRuntime&
process_function_library_runtime,
tfrt::RequestDeadlineTracker* req_deadline_tracker,
std::optional<StreamCallbackId> stream_callback_id,
CostRecorder* cost_recorder) {
TF_ASSIGN_OR_RETURN(
auto request_info,
CreateRequestInfo(options, run_options, run_options.work_queue,
resource_context, client_graph_resource_context,
runner_table, resource_array, fallback_state,
process_function_library_runtime, cost_recorder));
int64_t request_id = request_info->tfrt_request_context->id();
tsl::profiler::TraceMe traceme(
[request_id, signature_name, &options, symbol_uids] {
return tsl::profiler::TraceMeEncode(
"TfrtModelRun",
{{"_r", 1},
{"id", request_id},
{"signature", signature_name},
{"model_id", absl::StrCat(options.model_metadata.name(), ":",
options.model_metadata.version())},
{"tf_symbol_uid", symbol_uids.tf_symbol_uid},
{"tfrt_symbol_uid", symbol_uids.tfrt_symbol_uid}});
});
if (run_options.deadline.has_value()) {
auto deadline = run_options.deadline.value();
if (absl::ToChronoTime(absl::Now()) > deadline) {
return tensorflow::errors::DeadlineExceeded(kDeadlineExceededMessage);
}
if (req_deadline_tracker == nullptr) {
return tensorflow::errors::InvalidArgument(
"req_deadline_tracker must be non-null");
}
req_deadline_tracker->CancelRequestOnDeadline(
deadline, request_info->tfrt_request_context);
}
ScopedStreamCallback scoped_stream_callback;
if (run_options.streamed_output_callback && !stream_callback_id.has_value()) {
return absl::InvalidArgumentError(absl::StrCat(
"Signature '", signature_name, "' does not support streaming."));
}
if (stream_callback_id.has_value()) {
if (!run_options.streamed_output_callback) {
return absl::InvalidArgumentError(
absl::StrCat("Signature '", signature_name,
"' contains streaming ops but is called using Predict "
"without the streamed callback."));
}
}
if (run_options.streamed_output_callback) {
if (!stream_callback_id.has_value()) {
return absl::InvalidArgumentError(absl::StrCat(
"Signature ", signature_name, " does not support streaming."));
}
auto streamed_output_callback = run_options.streamed_output_callback;
TF_ASSIGN_OR_RETURN(
scoped_stream_callback,
GetGlobalStreamCallbackRegistry().Register(
options.model_metadata.name(), *stream_callback_id,
StepId(request_id), std::move(streamed_output_callback)));
}
if (loaded_executable) {
auto function = loaded_executable->GetFunction(signature_name);
if (!function) {
return errors::InvalidArgument(absl::StrCat(
"Function not found in MLRT executable: ", signature_name));
}
return RunMlrtFunction(function, *loaded_executable,
request_info->tfrt_request_context,
*request_info->request_queue, inputs, outputs,
nullptr);
}
DCHECK(func);
tfrt::ExecutionContext exec_ctx{request_info->tfrt_request_context};
if (run_options.work_queue) {
exec_ctx.set_work_queue(run_options.work_queue);
} else if (request_info->request_queue) {
exec_ctx.set_work_queue(request_info->request_queue);
} else {
exec_ctx.set_work_queue(runtime.work_queue());
}
llvm::SmallVector<tfrt::AsyncValue*, 4> arguments;
auto cleanup = tensorflow::gtl::MakeCleanup([&]() {
for (auto* argument : arguments) argument->DropRef();
});
arguments.push_back(tfrt::GetReadyChain().release());
for (const auto& input : inputs) {
arguments.push_back(
tfrt::MakeAvailableAsyncValueRef<FallbackTensor>(input).release());
}
if (arguments.size() != func->argument_types().size())
return tensorflow::errors::Internal("incorrect number of inputs.");
llvm::SmallVector<tfrt::RCReference<tfrt::AsyncValue>, 4> chain_and_results;
chain_and_results.resize(func->result_types().size());
std::array<tfrt::RCReference<tfrt::AsyncValue>, 1> executed = {
EnqueueWork(exec_ctx, [&]() -> tfrt::Chain {
func->Execute(exec_ctx, arguments, chain_and_results);
return {};
})};
exec_ctx.work_queue().Await(executed);
exec_ctx.work_queue().Await(chain_and_results);
DCHECK(!chain_and_results.empty());
tfrt::RCReference<tfrt::AsyncValue>& chain = chain_and_results[0];
auto results = llvm::drop_begin(chain_and_results, 1);
tensorflow::StatusGroup status_group;
if (chain->IsError()) {
status_group.Update(chain->GetError());
}
for (tfrt::RCReference<tfrt::AsyncValue>& result : results) {
DCHECK(result->IsAvailable());
if (result->IsError()) {
status_group.Update(result->GetError());
outputs->push_back(tensorflow::Tensor());
continue;
}
DCHECK(result->IsType<FallbackTensor>());
const auto& host_tensor = result->get<FallbackTensor>().tensor();
outputs->push_back(host_tensor);
}
if (request_info->tfrt_request_context->IsCancelled()) {
return tensorflow::errors::DeadlineExceeded(kDeadlineExceededMessage);
}
return status_group.as_summary_status();
}
GraphExecutor::GraphExecutor(
Options options, std::unique_ptr<FallbackState> fallback_state,
std::unique_ptr<tfrt::ResourceContext> resource_context,
std::unique_ptr<tensorflow::tfrt_stub::TfrtGraphExecutionState>
graph_execution_state,
std::unique_ptr<mlrt::KernelRegistry> kernel_registry)
: options_(std::move(options)),
fallback_state_(std::move(fallback_state)),
graph_execution_state_(std::move(graph_execution_state)),
req_deadline_tracker_(options_.runtime->core_runtime()->GetHostContext()),
kernel_registry_(std::move(kernel_registry)),
resource_context_(std::move(resource_context)) {
DCHECK(resource_context_);
SetSessionCreatedMetric();
}
absl::StatusOr<std::unique_ptr<GraphExecutor>> GraphExecutor::Create(
Options options, std::unique_ptr<FallbackState> fallback_state,
std::unique_ptr<tfrt::ResourceContext> resource_context,
tensorflow::GraphDef graph_def,
std::unique_ptr<mlrt::KernelRegistry> kernel_registry) {
if (options.runtime == nullptr) {
return errors::InvalidArgument("options.runtime must be non-null ");
}
if (options.enable_online_cost_analysis) {
options.cost_analysis_options.version = Options::CostAnalysisOptions::kOnce;
}
TfrtGraphExecutionState::Options graph_execution_state_options;
graph_execution_state_options.run_placer_grappler_on_functions =
options.run_placer_grappler_on_functions;
options.compile_options.fuse_get_resource_ops_in_hoisting =
!options.enable_mlrt;
graph_executor_mode
->GetCell(options.model_metadata.name(),
absl::StrCat(options.model_metadata.version()))
->Set(options.enable_mlrt ? "mlrt" : "bef");
TF_ASSIGN_OR_RETURN(
auto graph_execution_state,
TfrtGraphExecutionState::Create(graph_execution_state_options,
std::move(graph_def), *fallback_state));
return std::make_unique<GraphExecutor>(
std::move(options), std::move(fallback_state),
std::move(resource_context), std::move(graph_execution_state),
std::move(kernel_registry));
}
namespace {
void CreateSortedNamesAndOriginalIndices(absl::Span<const std::string> names,
std::vector<std::string>& sorted_names,
std::vector<int>& original_indices) {
DCHECK(sorted_names.empty());
DCHECK(original_indices.empty());
original_indices.resize(names.size());
std::iota(original_indices.begin(), original_indices.end(), 0);
std::sort(original_indices.begin(), original_indices.end(),
[&](int x, int y) { return names[x] < names[y]; });
sorted_names.reserve(names.size());
for (int original_index : original_indices) {
DCHECK_LT(original_index, names.size());
sorted_names.push_back(names[original_index]);
}
}
}
tensorflow::Status GraphExecutor::Run(
const RunOptions& run_options,
absl::Span<const std::pair<std::string, tensorflow::Tensor>> inputs,
absl::Span<const std::string> output_tensor_names,
absl::Span<const std::string> target_tensor_names,
std::vector<tensorflow::Tensor>* outputs) {
std::vector<std::string> input_names;
input_names.reserve(inputs.size());
for (const auto& p : inputs) input_names.push_back(p.first);
std::vector<std::string> sorted_input_names;
std::vector<int> input_original_indices;
CreateSortedNamesAndOriginalIndices(input_names, sorted_input_names,
input_original_indices);
std::vector<tensorflow::DataType> sorted_input_dtypes;
sorted_input_dtypes.reserve(inputs.size());
for (int original_index : input_original_indices) {
sorted_input_dtypes.push_back(inputs.at(original_index).second.dtype());
}
std::vector<std::string> sorted_output_names;
std::vector<int> output_original_indices;
CreateSortedNamesAndOriginalIndices(output_tensor_names, sorted_output_names,
output_original_indices);
std::vector<std::string> sorted_target_node_names(target_tensor_names.begin(),
target_tensor_names.end());
std::sort(sorted_target_node_names.begin(), sorted_target_node_names.end());
TF_ASSIGN_OR_RETURN(
LoadedClientGraph & loaded_client_graph,
GetOrCreateLoadedClientGraph(
run_options, sorted_input_names, sorted_input_dtypes,
sorted_output_names, sorted_target_node_names, run_options.work_queue,
{}, inputs));
auto executable_context = loaded_client_graph.executable_context();
const mlrt::LoadedExecutable* loaded_executable = nullptr;
const tfrt::Function* func = nullptr;
if (executable_context->IsForMlrt()) {
loaded_executable = executable_context->bytecode_executable.get();
} else {
func =
executable_context->bef_file->GetFunction(loaded_client_graph.name());
}
DCHECK(func || loaded_executable);
std::vector<tensorflow::Tensor> flat_inputs;
if (!loaded_client_graph.is_restore()) {
flat_inputs.reserve(inputs.size());
for (int original_index : input_original_indices) {
flat_inputs.push_back(inputs.at(original_index).second);
}
}
auto now = absl::Now() + simulated_duration_;
bool do_recompilation;
CostRecorder* cost_recorder =
loaded_client_graph.MaybeGetCostRecorder(now, &do_recompilation);
std::vector<tensorflow::Tensor> flat_outputs;
TF_RETURN_IF_ERROR(GraphExecutionRunOnFunction(
options_, run_options, loaded_client_graph.name(),
loaded_client_graph.symbol_uids(), func, loaded_executable, flat_inputs,
&flat_outputs, resource_context_.get(),
&executable_context->resource_context,
&loaded_client_graph.runner_table(),
&loaded_client_graph.resource_array(), runtime(), fallback_state(),
loaded_client_graph.process_function_library_runtime(),
&req_deadline_tracker_, loaded_client_graph.stream_callback_id(),
cost_recorder));
if (do_recompilation) {
TF_RETURN_IF_ERROR(
loaded_client_graph.UpdateCost(*cost_recorder, runtime()));
tensorflow::mutex_lock l(num_recompilations_mu_);
num_recompilations_ += 1;
}
if (cost_recorder != nullptr) {
loaded_client_graph.UpdateCostAnalysisData(now, do_recompilation);
}
auto flat_output_iter = flat_outputs.begin();
outputs->resize(flat_outputs.size());
for (int original_index : output_original_indices) {
(*outputs)[original_index] = std::move(*flat_output_iter);
++flat_output_iter;
}
absl::Time end = absl::Now() + simulated_duration_;
absl::Duration elapsed_duration = end - now;
loaded_client_graph.latency_sampler()->Add(
absl::ToDoubleMicroseconds(elapsed_duration));
return absl::OkStatus();
}
tensorflow::Status GraphExecutor::Extend(const GraphDef& graph) {
return graph_execution_state_->Extend(graph);
}
absl::StatusOr<std::unique_ptr<GraphExecutor::LoadedClientGraph>>
GraphExecutor::ImportAndCompileClientGraph(
const GraphExecutor::ClientGraph& client_graph,
absl::Span<const std::pair<std::string, tensorflow::Tensor>> inputs) {
auto import_start_time = absl::Now();
mlir::DialectRegistry registry;
RegisterMlirDialect(registry, options_.compile_options.backend_compiler);
auto context = std::make_unique<mlir::MLIRContext>(
registry, mlir::MLIRContext::Threading::DISABLED);
context->loadAllAvailableDialects();
ASSIGN_OR_RETURN_IN_IMPORT(
auto flib_def_and_module,
ImportClientGraphToMlirModule(client_graph, context.get()));
auto& [flib_def, module] = flib_def_and_module;
std::string checkpoint_path;
if (options_.compile_options.backend_compiler &&
mlir::tf_saved_model::IsRestoreGraph(module.get())) {
if (inputs.size() != 1) {
return absl::InvalidArgumentError(absl::StrCat(
"Expected 1 input for restore graph, but got ", inputs.size(), "."));
}
const tensorflow::Tensor& input = inputs[0].second;
if (input.dtype() != tensorflow::DT_STRING) {
return absl::InvalidArgumentError(
absl::StrCat("Expected string input for restore graph, but got ",
input.dtype(), "."));
}
checkpoint_path = input.scalar<tstring>()();
}
TF_ASSIGN_OR_RETURN(
auto stream_callback_id,
CreateStreamCallbackId(options().model_metadata.name(), module.get()));
SymbolUids symbol_uids;
symbol_uids.tf_symbol_uid = MaybeUploadMlirToXsymbol(module.get());
auto import_duration = absl::Now() - import_start_time;
LOG(INFO) << "TFRT finished importing client graph (" << &client_graph
<< "). Took " << absl::ToInt64Milliseconds(import_duration)
<< " ms. Client graph name: " << client_graph.name;
auto compile_start_time = absl::Now();
mlir::OwningOpRef<mlir::ModuleOp> module_with_op_keys;
std::shared_ptr<ExecutableContext> executable_context = nullptr;
ModelRuntimeContext model_context(&options_,
options_.compile_options.saved_model_dir,
resource_context_.get());
if (checkpoint_path.empty()) {
model_context.set_function_library_definition(&flib_def);
}
model_context.set_checkpoint_path(checkpoint_path);
if (options_.compile_options.compile_to_sync_tfrt_dialect) {
if (kernel_registry_ == nullptr) {
return tensorflow::errors::Internal("Missing kernel registry in MLRT.");
}
ASSIGN_OR_RETURN_IN_COMPILE(
executable_context,
tfrt::BuildExecutableContext(module.get(), *kernel_registry_));
} else if (options_.enable_mlrt) {
if (kernel_registry_ == nullptr) {
return tensorflow::errors::Internal("Missing kernel registry in MLRT.");
}
ASSIGN_OR_RETURN_IN_COMPILE(
auto bytecode_buffer,
tensorflow::mlrt_compiler::ConvertTfMlirToBytecode(
options_.compile_options, fallback_state(), module.get(),
model_context, &module_with_op_keys));
mlrt::bc::Executable executable(bytecode_buffer.data());
auto bytecode_executable =
std::make_unique<mlrt::LoadedExecutable>(executable, *kernel_registry_);
executable_context = std::make_shared<ExecutableContext>(
std::move(bytecode_buffer), std::move(bytecode_executable));
} else {
tfrt::BefBuffer bef;
TF_RETURN_IF_ERROR(
tensorflow::ConvertTfMlirToBef(options_.compile_options, module.get(),
&bef, model_context, &fallback_state()));
ASSIGN_OR_RETURN_IN_COMPILE(
auto bef_file, tfrt::CreateBefFileFromBefBuffer(runtime(), bef));
executable_context = std::make_shared<ExecutableContext>(
std::move(bef), std::move(bef_file));
}
symbol_uids.tfrt_symbol_uid = MaybeUploadMlirToXsymbol(module.get());
auto compile_duration = absl::Now() - compile_start_time;
LOG(INFO) << "TFRT finished compiling client graph (" << &client_graph
<< "). Took " << absl::ToInt64Milliseconds(compile_duration)
<< " ms. Client graph name: " << client_graph.name;
auto* latency_sampler =
tensorflow::tfrt_metrics::GetTfrtGraphExecutorLatencySampler(
options_.model_metadata.name(), options_.model_metadata.version(),
client_graph.name);
return std::make_unique<LoadedClientGraph>(
client_graph.name, std::move(symbol_uids), this, std::move(context),
std::move(module_with_op_keys), std::move(module),
std::move(executable_context), stream_callback_id,
!checkpoint_path.empty(), std::move(flib_def), latency_sampler);
}
absl::StatusOr<std::unique_ptr<GraphExecutor::LoadedClientGraph>>
GraphExecutor::LoadClientGraph(
const GraphExecutor::ClientGraph& client_graph,
tensorflow::tfrt_stub::WorkQueueInterface* work_queue,
absl::Span<const std::pair<std::string, tensorflow::Tensor>> inputs) {
LOG(INFO) << "TFRT loading client graph (" << &client_graph << ") "
<< client_graph.name;
TF_ASSIGN_OR_RETURN(auto loaded_client_graph,
ImportAndCompileClientGraph(client_graph, inputs));
auto init_start_time = absl::Now();
if (loaded_client_graph->executable_context()->IsForMlrt()) {
RETURN_IF_ERROR_IN_INIT(InitBytecode(loaded_client_graph.get()));
} else {
RETURN_IF_ERROR_IN_INIT(InitBef(loaded_client_graph.get(), work_queue));
}
auto init_duration = absl::Now() - init_start_time;
LOG(INFO) << "TFRT finished initializing client graph (" << &client_graph
<< "). Took " << absl::ToInt64Milliseconds(init_duration)
<< " ms. Client graph name: " << client_graph.name;
return loaded_client_graph;
}
absl::StatusOr<
std::pair<FunctionLibraryDefinition, mlir::OwningOpRef<mlir::ModuleOp>>>
GraphExecutor::ImportClientGraphToMlirModule(
const GraphExecutor::ClientGraph& client_graph,
mlir::MLIRContext* context) const {
tensorflow::GraphImportConfig graph_import_config;
graph_import_config.graph_func_name = client_graph.name;
graph_import_config.prune_unused_nodes = true;
graph_import_config.enable_shape_inference = false;
graph_import_config.inputs = client_graph.input_nodes;
graph_import_config.outputs = client_graph.output_nodes;
graph_import_config.control_outputs = client_graph.target_nodes;
graph_import_config.set_original_tf_func_name = true;
TF_ASSIGN_OR_RETURN(
auto optimized_graph,
graph_execution_state_->CreateOptimizedGraph(graph_import_config));
LOG(INFO) << "TFRT import client graph (" << &client_graph
<< "): Functionalization took "
<< absl::ToInt64Milliseconds(
optimized_graph.functionalization_duration)
<< " ms. Client graph name: " << client_graph.name;
LOG(INFO) << "TFRT import client graph (" << &client_graph
<< "): Grappler took "
<< absl::ToInt64Milliseconds(optimized_graph.grappler_duration)
<< " ms. Client graph name: " << client_graph.name;
TF_ASSIGN_OR_RETURN(
auto module,
tensorflow::ConvertGraphToMlir(*optimized_graph.graph, {},
optimized_graph.graph->flib_def(),
graph_import_config, context));
return std::make_pair(std::move(*optimized_graph.graph->mutable_flib_def()),
std::move(module));
}
tensorflow::Status GraphExecutor::InitBef(
LoadedClientGraph* loaded_client_graph,
tensorflow::tfrt_stub::WorkQueueInterface* work_queue) {
auto* bef_file = loaded_client_graph->executable_context()->bef_file.get();
TF_ASSIGN_OR_RETURN(
auto request_info,
CreateRequestInfo(
options_, {}, work_queue, resource_context_.get(),
nullptr,
&loaded_client_graph->runner_table(),
&loaded_client_graph->resource_array(), fallback_state(),
loaded_client_graph->process_function_library_runtime()));
tfrt::ExecutionContext exec_ctx(request_info->tfrt_request_context);
TF_RETURN_IF_ERROR(
RunRuntimeInitializer(exec_ctx, bef_file, kFallbackInitFunction));
TF_RETURN_IF_ERROR(
RunRuntimeInitializer(exec_ctx, bef_file, kResourceInitFunction));
return absl::OkStatus();
}
tensorflow::Status GraphExecutor::InitBytecode(
LoadedClientGraph* loaded_graph) {
TF_ASSIGN_OR_RETURN(
auto request_info,
CreateRequestInfo(options_, {},
options_.runtime->work_queue(), resource_context_.get(),
nullptr,
&loaded_graph->runner_table(),
&loaded_graph->resource_array(), fallback_state(),
loaded_graph->process_function_library_runtime()));
const auto* loaded_executable =
loaded_graph->executable_context()->bytecode_executable.get();
DCHECK(loaded_executable);
std::vector<tensorflow::Tensor> outputs;
if (auto function = loaded_executable->GetFunction(kFallbackInitFunction)) {
TF_RETURN_IF_ERROR(RunMlrtFunction(
function, *loaded_executable, request_info->tfrt_request_context,
*request_info->request_queue, {}, &outputs,
&loaded_graph->sync_resource_state()));
}
if (auto function = loaded_executable->GetFunction(kResourceInitFunction)) {
TF_RETURN_IF_ERROR(RunMlrtFunction(
function, *loaded_executable, request_info->tfrt_request_context,
*request_info->request_queue, {}, &outputs,
&loaded_graph->sync_resource_state()));
}
return absl::OkStatus();
}
absl::StatusOr<std::reference_wrapper<GraphExecutor::LoadedClientGraph>>
GraphExecutor::GetOrCreateLoadedClientGraph(
const RunOptions& run_options,
absl::Span<const std::string> input_tensor_names,
absl::Span<const tensorflow::DataType> input_tensor_dtypes,
absl::Span<const std::string> output_tensor_names,
absl::Span<const std::string> target_tensor_names,
tensorflow::tfrt_stub::WorkQueueInterface* work_queue,
absl::string_view graph_name,
absl::Span<const std::pair<std::string, tensorflow::Tensor>> inputs) {
const std::string joined_name =
!graph_name.empty()
? std::string(graph_name)
: absl::StrCat(
absl::StrJoin(input_tensor_names, kTensorNameJoiningDelimiter),
kArgumentTypeJoiningDelimiter,
absl::StrJoin(output_tensor_names, kTensorNameJoiningDelimiter),
kArgumentTypeJoiningDelimiter,
absl::StrJoin(target_tensor_names,
kTensorNameJoiningDelimiter));
tensorflow::mutex_lock l(loaded_client_graphs_mu_);
const auto iter = loaded_client_graphs_.find(joined_name);
if (iter != loaded_client_graphs_.end()) return {*iter->second};
if (run_options.disable_compilation) {
return tensorflow::errors::InvalidArgument(
absl::StrCat("GraphExecutor: compilation is disabled in execution but "
"the compiled graph is not found for ",
joined_name));
}
tensorflow::GraphImportConfig::InputArrays input_nodes;
DCHECK_EQ(input_tensor_names.size(), input_tensor_dtypes.size());
for (int i = 0; i < input_tensor_names.size(); ++i) {
const auto& input_name = input_tensor_names[i];
auto input_dtype = input_tensor_dtypes[i];
tensorflow::ArrayInfo array_info;
array_info.imported_dtype = input_dtype;
array_info.shape.set_unknown_rank(true);
input_nodes[input_name] = array_info;
}
ClientGraph client_graph{
run_options.name.empty() ? joined_name : run_options.name,
std::move(input_nodes),
{output_tensor_names.begin(), output_tensor_names.end()},
{target_tensor_names.begin(), target_tensor_names.end()}};
TF_ASSIGN_OR_RETURN(auto loaded_client_graph,
LoadClientGraph(client_graph, work_queue, inputs));
auto* loaded_client_graph_ptr = loaded_client_graph.get();
loaded_client_graphs_[joined_name] = std::move(loaded_client_graph);
return {*loaded_client_graph_ptr};
}
tensorflow::Status GraphExecutor::RunWithSyncInterpreter(
const std::string& graph_name, absl::Span<mlrt::Value> input_values,
absl::Span<const std::string> input_names,
absl::Span<const tensorflow::DataType> input_dtypes,
absl::Span<const std::string> output_tensor_names,
absl::Span<const std::string> target_tensor_names,
absl::Span<mlrt::Value> outputs) {
TF_ASSIGN_OR_RETURN(
LoadedClientGraph & loaded_client_graph,
GetOrCreateLoadedClientGraph(
{}, input_names, input_dtypes, output_tensor_names,
target_tensor_names,
nullptr,
graph_name.empty() ? output_tensor_names[0] : graph_name));
auto executable_context = loaded_client_graph.executable_context();
mlrt::ExecutionContext execution_context(
executable_context->bytecode_executable.get());
AddSyncContext(execution_context,
*options_.runtime->core_runtime()->GetHostContext(),
&loaded_client_graph.sync_resource_state());
tensorflow::tfd::KernelFallbackCompatRequestState kernel_fallback_state(
tfd::GetDefaultRunner(), &fallback_state().device_manager(),
0, &loaded_client_graph.runner_table(),
&loaded_client_graph.resource_array(),
nullptr, std::nullopt,
&loaded_client_graph.process_function_library_runtime());
auto tf_context = std::make_unique<tensorflow::tf_mlrt::Context>(
&kernel_fallback_state, resource_context_.get());
execution_context.AddUserContext(std::move(tf_context));
auto serving_function = executable_context->bytecode_executable->GetFunction(
loaded_client_graph.name());
DCHECK(serving_function);
execution_context.CallByMove(serving_function, input_values, outputs);
mlrt::Execute(execution_context);
return execution_context.status();
}
CostRecorder* GraphExecutor::LoadedClientGraph::MaybeGetCostRecorder(
absl::Time now, bool* do_recompilation) {
*do_recompilation = false;
tensorflow::mutex_lock l(cost_analysis_data_.mu);
if (!cost_analysis_data_.is_available) {
return nullptr;
}
const auto& options = graph_executor_->options().cost_analysis_options;
absl::Duration elapsed_duration = now - cost_analysis_data_.start_time;
double intended_num_updates = absl::ToDoubleSeconds(elapsed_duration) /
absl::ToDoubleSeconds(options.reset_interval) *
options.updates_per_interval;
if (intended_num_updates - cost_analysis_data_.num_cost_updates >= 1) {
cost_analysis_data_.is_available = false;
*do_recompilation = 1 + cost_analysis_data_.num_cost_updates >=
options.updates_per_interval;
return cost_analysis_data_.cost_recorder.get();
}
return nullptr;
}
Status GraphExecutor::LoadedClientGraph::UpdateCost(
const CostRecorder& cost_recorder, const Runtime& runtime) {
LOG(INFO) << "TFRT updating op costs of loaded client graph (" << this << ") "
<< name_;
std::shared_ptr<ExecutableContext> new_executable_context = nullptr;
if (executable_context()->IsForMlrt()) {
auto tf_mlir_with_op_keys = ::mlir::OwningOpRef<mlir::ModuleOp>(
cost_analysis_data_.tf_mlir_with_op_keys.get().clone());
TF_ASSIGN_OR_RETURN(
auto bytecode_buffer,
tensorflow::mlrt_compiler::ConvertTfMlirWithOpKeysToBytecode(
graph_executor_->options().compile_options,
graph_executor_->fallback_state(), tf_mlir_with_op_keys.get(),
cost_recorder));
mlrt::bc::Executable executable(bytecode_buffer.data());
auto bytecode_executable = std::make_unique<mlrt::LoadedExecutable>(
executable, *graph_executor_->kernel_registry_);
new_executable_context = std::make_shared<ExecutableContext>(
std::move(bytecode_buffer), std::move(bytecode_executable));
} else {
auto tfrt_mlir = ::mlir::OwningOpRef<mlir::ModuleOp>(
cost_analysis_data_.tfrt_mlir.get().clone());
mlir::StatusScopedDiagnosticHandler diag_handler(
tfrt_mlir.get().getContext());
tfrt_compiler::UpdateOpCostInTfrtMlir(tfrt_mlir.get(), cost_recorder);
auto bef = tfrt::ConvertMLIRToBEF(tfrt_mlir.get(),
true);
if (bef.empty()) {
return diag_handler.Combine(
tensorflow::errors::Internal("failed to convert MLIR to BEF."));
}
bef.shrink_to_fit();
TF_ASSIGN_OR_RETURN(auto bef_file,
tfrt::CreateBefFileFromBefBuffer(runtime, bef));
new_executable_context = std::make_shared<ExecutableContext>(
std::move(bef), std::move(bef_file));
}
{
tensorflow::mutex_lock lock(executable_context_mu_);
executable_context_ = std::move(new_executable_context);
}
return absl::OkStatus();
}
GraphExecutor::LoadedClientGraph::LoadedClientGraph(
std::string name, SymbolUids symbol_uids, GraphExecutor* graph_executor,
std::unique_ptr<mlir::MLIRContext> mlir_context,
mlir::OwningOpRef<mlir::ModuleOp> tf_mlir_with_op_keys,
mlir::OwningOpRef<mlir::ModuleOp> tfrt_mlir,
std::shared_ptr<ExecutableContext> executable_context,
std::optional<StreamCallbackId> stream_callback_id, bool is_restore,
FunctionLibraryDefinition flib_def,
tsl::monitoring::SamplerCell* latency_sampler)
: name_(std::move(name)),
symbol_uids_(std::move(symbol_uids)),
graph_executor_(graph_executor),
mlir_context_(std::move(mlir_context)),
executable_context_(std::move(executable_context)),
stream_callback_id_(stream_callback_id),
is_restore_(is_restore),
flib_def_(std::move(flib_def)),
pflr_(&graph_executor->fallback_state().device_manager(),
graph_executor->fallback_state().session_options().env,
&graph_executor->fallback_state().session_options().config,
TF_GRAPH_DEF_VERSION, &flib_def_,
graph_executor->fallback_state()
.session_options()
.config.graph_options()
.optimizer_options(),
nullptr, nullptr,
nullptr,
Rendezvous::Factory{[](int64_t, const DeviceMgr* device_mgr,
tsl::core::RefCountPtr<Rendezvous>* r) {
*r = tsl::core::RefCountPtr<Rendezvous>(
new IntraProcessRendezvous(device_mgr));
return absl::OkStatus();
}}),
latency_sampler_(latency_sampler) {
const auto& options = graph_executor_->options().cost_analysis_options;
if (options.version != Options::CostAnalysisOptions::kDisabled) {
cost_analysis_data_.start_time = absl::Now() - options.reset_interval;
cost_analysis_data_.is_available = true;
cost_analysis_data_.num_cost_updates = options.updates_per_interval - 1;
cost_analysis_data_.cost_recorder = std::make_unique<CostRecorder>();
if (executable_context_->IsForMlrt()) {
cost_analysis_data_.tf_mlir_with_op_keys =
std::move(tf_mlir_with_op_keys);
} else {
cost_analysis_data_.tfrt_mlir = std::move(tfrt_mlir);
}
}
}
void GraphExecutor::LoadedClientGraph::UpdateCostAnalysisData(
absl::Time now, bool do_recompilation) {
tensorflow::mutex_lock lock(cost_analysis_data_.mu);
if (!do_recompilation) {
cost_analysis_data_.num_cost_updates += 1;
cost_analysis_data_.is_available = true;
return;
}
if (graph_executor_->options().cost_analysis_options.version ==
Options::CostAnalysisOptions::kOnce) {
cost_analysis_data_.is_available = false;
cost_analysis_data_.tfrt_mlir = nullptr;
cost_analysis_data_.tf_mlir_with_op_keys = nullptr;
cost_analysis_data_.cost_recorder = nullptr;
} else {
cost_analysis_data_.cost_recorder = std::make_unique<CostRecorder>();
cost_analysis_data_.is_available = true;
cost_analysis_data_.start_time = now;
cost_analysis_data_.num_cost_updates = 0;
}
}
tensorflow::Status GraphExecutor::CompileGraph(
const std::string& graph_name,
absl::Span<const std::string> input_tensor_names,
absl::Span<const tensorflow::DataType> input_tensor_dtypes,
absl::Span<const std::string> output_tensor_names,
absl::Span<const std::string> target_tensor_names) {
return GetOrCreateLoadedClientGraph(
{}, input_tensor_names, input_tensor_dtypes,
output_tensor_names, target_tensor_names,
nullptr, graph_name)
.status();
}
void RegisterMlirDialect(mlir::DialectRegistry& registry,
tensorflow::BackendCompiler* backend_compiler) {
registry.insert<mlir::BuiltinDialect, mlir::func::FuncDialect>();
mlir::RegisterAllTensorFlowDialects(registry);
if (backend_compiler) {
backend_compiler->GetDependentDialects(registry);
}
}
}
} | #include "tensorflow/core/tfrt/graph_executor/graph_executor.h"
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "learning/brain/experimental/tfrt/native_lowering/kernels/math_kernels.h"
#include "learning/brain/experimental/tfrt/native_lowering/kernels/sync_fallback_kernels.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/time/time.h"
#include "absl/types/span.h"
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/const_op.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/framework/common_shape_fns.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/protobuf/rewriter_config.pb.h"
#include "tensorflow/core/tfrt/fallback/fallback_state.h"
#include "tensorflow/core/tfrt/graph_executor/graph_execution_options.h"
#include "tensorflow/core/tfrt/mlrt/interpreter/context.h"
#include "tensorflow/core/tfrt/mlrt/interpreter/value.h"
#include "tensorflow/core/tfrt/mlrt/kernel/kernel.h"
#include "tensorflow/core/tfrt/saved_model/saved_model_testutil.h"
#include "tsl/platform/statusor.h"
#include "tfrt/cpp_tests/test_util.h"
#include "tfrt/host_context/resource_context.h"
#include "tfrt/tensor/dense_host_tensor.h"
namespace tensorflow {
namespace tfrt_stub {
namespace {
using ::testing::status::StatusIs;
class GraphExecutorForTestingCostAnalysis : public GraphExecutor {
public:
int num_recompilations() {
tensorflow::mutex_lock lock(num_recompilations_mu_);
return num_recompilations_;
}
void AdvanceTime(absl::Duration duration) {
simulated_duration_ = simulated_duration_ + duration;
}
};
class GraphExecutorTest : public ::testing::TestWithParam<bool> {};
tensorflow::Status GetSimpleGraphDef(GraphDef& graph_def) {
auto scope = tensorflow::Scope::NewRootScope().WithDevice("/device:CPU:0");
auto input = ops::Placeholder(scope.WithOpName("input"), DT_INT32);
auto rank = ops::Rank(scope.WithOpName("rank"), input);
return scope.ToGraphDef(&graph_def);
}
std::unique_ptr<mlrt::KernelRegistry> GetKernelRegistry() {
auto kernel_registry = std::make_unique<mlrt::KernelRegistry>();
tensorflow::tf_mlrt::RegisterTfMlrtKernels(*kernel_registry);
tfrt::cpu::RegisterMlrtMathKernels(kernel_registry.get());
tfrt::cpu::RegisterMlrtFallbackCompatKernels(kernel_registry.get());
return kernel_registry;
}
TEST_P(GraphExecutorTest, Vanilla) {
GraphDef graph_def;
TF_ASSERT_OK(GetSimpleGraphDef(graph_def));
auto runtime = DefaultTfrtRuntime(1);
GraphExecutor::Options options(runtime.get());
options.enable_mlrt = GetParam();
TF_ASSERT_OK_AND_ASSIGN(
auto fallback_state,
tensorflow::tfrt_stub::FallbackState::Create(
CreateDefaultSessionOptions(options), graph_def.library()))
auto resource_context = std::make_unique<tfrt::ResourceContext>();
TF_ASSERT_OK_AND_ASSIGN(
auto graph_executor,
GraphExecutor::Create(std::move(options), std::move(fallback_state),
std::move(resource_context), graph_def,
GetKernelRegistry()));
std::vector<std::pair<std::string, tensorflow::Tensor>> inputs;
inputs.push_back({"input", CreateTfTensor<int32_t>(
{1, 3}, {1, 1, 1})});
std::vector<tensorflow::Tensor> outputs;
TF_ASSERT_OK(graph_executor->Run({}, inputs,
{"rank"},
{}, &outputs));
ASSERT_EQ(outputs.size(), 1);
EXPECT_THAT(GetTfTensorData<int32_t>(outputs[0]),
::testing::ElementsAreArray({2}));
}
TEST_P(GraphExecutorTest, OnlineCostAnalysisOptionsOverrideToOnce) {
GraphDef graph_def;
TF_ASSERT_OK(GetSimpleGraphDef(graph_def));
auto runtime = DefaultTfrtRuntime(1);
GraphExecutor::Options options(runtime.get());
options.enable_online_cost_analysis = true;
options.cost_analysis_options.version =
GraphExecutionOptions::CostAnalysisOptions::kDisabled;
options.enable_mlrt = GetParam();
TF_ASSERT_OK_AND_ASSIGN(
auto fallback_state,
tensorflow::tfrt_stub::FallbackState::Create(
CreateDefaultSessionOptions(options), graph_def.library()));
auto resource_context = std::make_unique<tfrt::ResourceContext>();
TF_ASSERT_OK_AND_ASSIGN(
auto graph_executor_base,
GraphExecutor::Create(std::move(options), std::move(fallback_state),
std::move(resource_context), graph_def,
GetKernelRegistry()));
auto graph_executor = std::unique_ptr<GraphExecutorForTestingCostAnalysis>(
static_cast<GraphExecutorForTestingCostAnalysis*>(
graph_executor_base.release()));
std::vector<std::pair<std::string, tensorflow::Tensor>> inputs;
inputs.push_back({"input", CreateTfTensor<int32_t>(
{1, 3}, {1, 1, 1})});
std::vector<tensorflow::Tensor> outputs;
EXPECT_EQ(graph_executor->num_recompilations(), 0);
TF_ASSERT_OK(graph_executor->Run({}, inputs,
{"rank"},
{}, &outputs));
ASSERT_EQ(outputs.size(), 1);
EXPECT_THAT(GetTfTensorData<int32_t>(outputs[0]),
::testing::ElementsAreArray({2}));
EXPECT_EQ(graph_executor->num_recompilations(), 1);
TF_ASSERT_OK(graph_executor->Run({}, inputs,
{"rank"},
{}, &outputs));
ASSERT_EQ(outputs.size(), 1);
EXPECT_THAT(GetTfTensorData<int32_t>(outputs[0]),
::testing::ElementsAreArray({2}));
EXPECT_EQ(graph_executor->num_recompilations(), 1);
}
TEST_P(GraphExecutorTest, OnlineCostAnalysisEveryTime) {
GraphDef graph_def;
TF_ASSERT_OK(GetSimpleGraphDef(graph_def));
auto runtime = DefaultTfrtRuntime(1);
GraphExecutor::Options options(runtime.get());
options.cost_analysis_options.version =
GraphExecutionOptions::CostAnalysisOptions::kPeriodic;
options.cost_analysis_options.reset_interval = absl::ZeroDuration();
options.cost_analysis_options.updates_per_interval = 1;
options.enable_mlrt = GetParam();
TF_ASSERT_OK_AND_ASSIGN(
auto fallback_state,
tensorflow::tfrt_stub::FallbackState::Create(
CreateDefaultSessionOptions(options), graph_def.library()));
auto resource_context = std::make_unique<tfrt::ResourceContext>();
TF_ASSERT_OK_AND_ASSIGN(
auto graph_executor_base,
GraphExecutor::Create(std::move(options), std::move(fallback_state),
std::move(resource_context), graph_def,
GetKernelRegistry()));
auto graph_executor = std::unique_ptr<GraphExecutorForTestingCostAnalysis>(
static_cast<GraphExecutorForTestingCostAnalysis*>(
graph_executor_base.release()));
std::vector<std::pair<std::string, tensorflow::Tensor>> inputs;
inputs.push_back({"input", CreateTfTensor<int32_t>(
{1, 3}, {1, 1, 1})});
std::vector<tensorflow::Tensor> outputs;
for (int i = 0; i < 10; ++i) {
TF_ASSERT_OK(graph_executor->Run({}, inputs,
{"rank"},
{}, &outputs));
ASSERT_EQ(outputs.size(), 1);
EXPECT_THAT(GetTfTensorData<int32_t>(outputs[0]),
::testing::ElementsAreArray({2}));
EXPECT_EQ(graph_executor->num_recompilations(), i + 1);
}
}
TEST_P(GraphExecutorTest, OnlineCostAnalysisDisabled) {
GraphDef graph_def;
TF_ASSERT_OK(GetSimpleGraphDef(graph_def));
auto runtime = DefaultTfrtRuntime(1);
GraphExecutor::Options options(runtime.get());
options.cost_analysis_options.version =
GraphExecutionOptions::CostAnalysisOptions::kDisabled;
options.cost_analysis_options.reset_interval = absl::ZeroDuration();
options.cost_analysis_options.updates_per_interval = 1;
options.enable_mlrt = GetParam();
TF_ASSERT_OK_AND_ASSIGN(
auto fallback_state,
tensorflow::tfrt_stub::FallbackState::Create(
CreateDefaultSessionOptions(options), graph_def.library()));
auto resource_context = std::make_unique<tfrt::ResourceContext>();
TF_ASSERT_OK_AND_ASSIGN(
auto graph_executor_base,
GraphExecutor::Create(std::move(options), std::move(fallback_state),
std::move(resource_context), graph_def,
GetKernelRegistry()));
auto graph_executor = std::unique_ptr<GraphExecutorForTestingCostAnalysis>(
static_cast<GraphExecutorForTestingCostAnalysis*>(
graph_executor_base.release()));
std::vector<std::pair<std::string, tensorflow::Tensor>> inputs;
inputs.push_back({"input", CreateTfTensor<int32_t>(
{1, 3}, {1, 1, 1})});
std::vector<tensorflow::Tensor> outputs;
TF_ASSERT_OK(graph_executor->Run({}, inputs,
{"rank"},
{}, &outputs));
EXPECT_EQ(graph_executor->num_recompilations(), 0);
}
TEST_P(GraphExecutorTest, OnlineCostAnalysisPeriodic) {
GraphDef graph_def;
TF_ASSERT_OK(GetSimpleGraphDef(graph_def));
auto runtime = DefaultTfrtRuntime(1);
GraphExecutor::Options options(runtime.get());
options.cost_analysis_options.version =
GraphExecutionOptions::CostAnalysisOptions::kPeriodic;
options.cost_analysis_options.reset_interval = absl::Minutes(10);
options.cost_analysis_options.updates_per_interval = 5;
options.enable_mlrt = GetParam();
TF_ASSERT_OK_AND_ASSIGN(
auto fallback_state,
tensorflow::tfrt_stub::FallbackState::Create(
CreateDefaultSessionOptions(options), graph_def.library()));
auto resource_context = std::make_unique<tfrt::ResourceContext>();
TF_ASSERT_OK_AND_ASSIGN(
auto graph_executor_base,
GraphExecutor::Create(std::move(options), std::move(fallback_state),
std::move(resource_context), graph_def,
GetKernelRegistry()));
auto graph_executor = std::unique_ptr<GraphExecutorForTestingCostAnalysis>(
static_cast<GraphExecutorForTestingCostAnalysis*>(
graph_executor_base.release()));
std::vector<std::pair<std::string, tensorflow::Tensor>> inputs;
inputs.push_back({"input", CreateTfTensor<int32_t>(
{1, 3}, {1, 1, 1})});
std::vector<tensorflow::Tensor> outputs;
TF_ASSERT_OK(graph_executor->Run({}, inputs,
{"rank"},
{}, &outputs));
EXPECT_EQ(graph_executor->num_recompilations(), 1);
for (int i = 0; i < 10; ++i) {
TF_ASSERT_OK(graph_executor->Run({}, inputs,
{"rank"},
{}, &outputs));
EXPECT_EQ(graph_executor->num_recompilations(), 1);
}
for (int i = 0; i < 4; ++i) {
graph_executor->AdvanceTime(absl::Minutes(2));
TF_ASSERT_OK(graph_executor->Run({}, inputs,
{"rank"},
{}, &outputs));
EXPECT_EQ(graph_executor->num_recompilations(), 1);
}
graph_executor->AdvanceTime(absl::Minutes(2));
TF_ASSERT_OK(graph_executor->Run({}, inputs,
{"rank"},
{}, &outputs));
EXPECT_EQ(graph_executor->num_recompilations(), 2);
for (int i = 0; i < 4; ++i) {
graph_executor->AdvanceTime(absl::Minutes(1000));
TF_ASSERT_OK(graph_executor->Run({}, inputs,
{"rank"},
{}, &outputs));
EXPECT_EQ(graph_executor->num_recompilations(), 2);
}
graph_executor->AdvanceTime(absl::Minutes(1000));
TF_ASSERT_OK(graph_executor->Run({}, inputs,
{"rank"},
{}, &outputs));
EXPECT_EQ(graph_executor->num_recompilations(), 3);
}
REGISTER_OP("TestCancel")
.Input("x: T")
.Output("z: T")
.Attr("T: {int32}")
.SetShapeFn(::tensorflow::shape_inference::UnchangedShape);
class TestCancelKernel : public OpKernel {
public:
explicit TestCancelKernel(OpKernelConstruction* context)
: OpKernel(context) {}
void Compute(OpKernelContext* ctx) override {
auto status = absl::CancelledError();
ctx->cancellation_manager()->StartCancelWithStatus(status);
ctx->SetStatus(status);
}
};
REGISTER_KERNEL_BUILDER(Name("TestCancel").Device(DEVICE_CPU),
TestCancelKernel);
REGISTER_OP("TestIsCancelled").Output("z: T").Attr("T: {bool}").SetIsStateful();
class TestIsCancelledKernel : public OpKernel {
public:
explicit TestIsCancelledKernel(OpKernelConstruction* context)
: OpKernel(context) {}
void Compute(OpKernelContext* ctx) override {
ctx->set_output(
0, tensorflow::Tensor(ctx->cancellation_manager()->IsCancelled()));
}
};
REGISTER_KERNEL_BUILDER(Name("TestIsCancelled").Device(DEVICE_CPU),
TestIsCancelledKernel);
TEST_P(GraphExecutorTest, Cancellation) {
GraphDef graph_def;
tensorflow::GraphDefBuilder builder(
tensorflow::GraphDefBuilder::kFailImmediately);
const tensorflow::TensorShape tensor_shape({10, 9});
tensorflow::Node* input = tensorflow::ops::SourceOp(
"Placeholder", builder.opts()
.WithName("input")
.WithAttr("dtype", tensorflow::DT_INT32)
.WithAttr("shape", tensor_shape));
tensorflow::ops::SourceOp("TestIsCancelled",
builder.opts()
.WithName("is_cancelled")
.WithAttr("T", tensorflow::DT_BOOL));
tensorflow::ops::UnaryOp("TestCancel", input,
builder.opts()
.WithName("test_cancel")
.WithAttr("T", tensorflow::DT_INT32));
TF_ASSERT_OK(builder.ToGraphDef(&graph_def));
auto runtime = DefaultTfrtRuntime(1);
GraphExecutor::Options options(runtime.get());
options.enable_mlrt = GetParam();
TF_ASSERT_OK_AND_ASSIGN(
auto fallback_state,
tensorflow::tfrt_stub::FallbackState::Create(
CreateDefaultSessionOptions(options), graph_def.library()))
auto resource_context = std::make_unique<tfrt::ResourceContext>();
TF_ASSERT_OK_AND_ASSIGN(
auto graph_executor,
GraphExecutor::Create(std::move(options), std::move(fallback_state),
std::move(resource_context), graph_def,
GetKernelRegistry()));
{
std::vector<std::pair<std::string, tensorflow::Tensor>> inputs;
inputs.push_back({"input", CreateTfTensor<int32_t>(
{1, 3}, {1, 1, 1})});
std::vector<tensorflow::Tensor> outputs;
EXPECT_THAT(graph_executor->Run({}, inputs,
{"test_cancel:0"},
{}, &outputs),
StatusIs(absl::StatusCode::kCancelled));
}
{
std::vector<tensorflow::Tensor> outputs;
TF_ASSERT_OK(graph_executor->Run({}, {},
{"is_cancelled:0"},
{}, &outputs));
ASSERT_EQ(outputs.size(), 1);
EXPECT_THAT(GetTfTensorData<bool>(outputs[0]),
::testing::ElementsAreArray({false}));
}
}
INSTANTIATE_TEST_SUITE_P(GraphExecutorTestSuite, GraphExecutorTest,
::testing::Bool());
TEST_F(GraphExecutorTest, Extend) {
GraphDef graph_def;
{
auto scope = tensorflow::Scope::NewRootScope().WithDevice("/device:CPU:0");
Output a = ops::Const(scope.WithOpName("a"), 0.0f, {10, 10});
Output b = ops::Const(scope.WithControlDependencies(a).WithOpName("b"),
0.0f, {10, 10});
Output c = ops::Identity(scope.WithOpName("c"), b);
TF_ASSERT_OK(scope.ToGraphDef(&graph_def));
}
auto runtime = DefaultTfrtRuntime(1);
GraphExecutor::Options options(runtime.get());
auto session_options = CreateDefaultSessionOptions(options);
session_options.config.mutable_experimental()
->set_disable_optimize_for_static_graph(true);
TF_ASSERT_OK_AND_ASSIGN(auto fallback_state,
tensorflow::tfrt_stub::FallbackState::Create(
session_options, graph_def.library()));
auto resource_context = std::make_unique<tfrt::ResourceContext>();
TF_ASSERT_OK_AND_ASSIGN(
auto graph_executor,
GraphExecutor::Create(std::move(options), std::move(fallback_state),
std::move(resource_context), graph_def,
GetKernelRegistry()));
GraphDef extension;
{
auto scope = tensorflow::Scope::NewRootScope().WithDevice("/device:CPU:0");
auto input = ops::Placeholder(scope.WithOpName("input"), DT_INT32);
auto rank = ops::Rank(scope.WithOpName("rank"), input);
TF_ASSERT_OK(scope.ToGraphDef(&extension));
}
TF_ASSERT_OK(graph_executor->Extend(extension));
std::vector<std::pair<std::string, tensorflow::Tensor>> inputs;
inputs.push_back({"input", CreateTfTensor<int32_t>(
{1, 3}, {1, 1, 1})});
std::vector<tensorflow::Tensor> outputs;
TF_ASSERT_OK(graph_executor->Run({}, inputs,
{"rank"},
{}, &outputs));
ASSERT_EQ(outputs.size(), 1);
EXPECT_THAT(GetTfTensorData<int32_t>(outputs[0]),
::testing::ElementsAreArray({2}));
}
TEST_F(GraphExecutorTest, DisableCompilation) {
GraphDef graph_def;
TF_ASSERT_OK(GetSimpleGraphDef(graph_def));
auto runtime = DefaultTfrtRuntime(1);
GraphExecutor::Options options(runtime.get());
TF_ASSERT_OK_AND_ASSIGN(
auto fallback_state,
tensorflow::tfrt_stub::FallbackState::Create(
CreateDefaultSessionOptions(options), graph_def.library()));
auto resource_context = std::make_unique<tfrt::ResourceContext>();
TF_ASSERT_OK_AND_ASSIGN(
auto graph_executor,
GraphExecutor::Create(std::move(options), std::move(fallback_state),
std::move(resource_context), graph_def,
GetKernelRegistry()));
std::vector<std::pair<std::string, tensorflow::Tensor>> inputs;
inputs.push_back({"input", CreateTfTensor<int32_t>(
{1, 3}, {1, 1, 1})});
std::vector<tensorflow::Tensor> outputs;
GraphExecutor::RunOptions run_options;
run_options.disable_compilation = true;
auto status = graph_executor->Run(run_options, inputs,
{"rank"},
{}, &outputs);
ASSERT_FALSE(status.ok());
EXPECT_THAT(
status.ToString(),
::testing::HasSubstr("GraphExecutor: compilation is disabled in "
"execution but the compiled graph is not found"));
run_options.disable_compilation = false;
TF_ASSERT_OK(graph_executor->Run(run_options, inputs,
{"rank"},
{}, &outputs));
ASSERT_EQ(outputs.size(), 1);
EXPECT_THAT(GetTfTensorData<int32_t>(outputs[0]),
::testing::ElementsAreArray({2}));
}
TEST_F(GraphExecutorTest, SyncExecute) {
GraphDef graph_def;
TF_ASSERT_OK(GetSimpleGraphDef(graph_def));
auto runtime = DefaultTfrtRuntime(1);
GraphExecutor::Options options(runtime.get());
options.compile_options.compile_to_sync_tfrt_dialect = true;
TF_ASSERT_OK_AND_ASSIGN(
auto fallback_state,
tensorflow::tfrt_stub::FallbackState::Create(
CreateDefaultSessionOptions(options), graph_def.library()));
auto resource_context = std::make_unique<tfrt::ResourceContext>();
TF_ASSERT_OK_AND_ASSIGN(
auto graph_executor,
GraphExecutor::Create(std::move(options), std::move(fallback_state),
std::move(resource_context), graph_def,
GetKernelRegistry()));
std::vector<mlrt::Value> inputs;
tfrt::DenseHostTensor dht =
tfrt::CreateTensorFromValues<int32_t>({1, 3}, {1, 1, 1});
inputs.emplace_back(std::move(dht));
std::vector<mlrt::Value> results;
results.resize(1);
TF_ASSERT_OK(graph_executor->RunWithSyncInterpreter(
"test_graph", absl::Span<mlrt::Value>(inputs),
{"input"}, {DT_INT32},
{"rank"},
{}, absl::Span<mlrt::Value>(results)));
tfrt::DenseHostTensor expected =
tfrt::CreateTensorFromValues<int32_t>({}, {2});
EXPECT_EQ(expected, results[0].Get<tfrt::DenseHostTensor>());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/graph_executor/graph_executor.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/graph_executor/graph_executor_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
967a66dc-1c0b-4779-ba8f-04e0591f8900 | cpp | abseil/abseil-cpp | flat_hash_set | absl/container/flat_hash_set.h | absl/container/flat_hash_set_test.cc | #ifndef ABSL_CONTAINER_FLAT_HASH_SET_H_
#define ABSL_CONTAINER_FLAT_HASH_SET_H_
#include <cstddef>
#include <memory>
#include <type_traits>
#include <utility>
#include "absl/algorithm/container.h"
#include "absl/base/attributes.h"
#include "absl/base/macros.h"
#include "absl/container/hash_container_defaults.h"
#include "absl/container/internal/container_memory.h"
#include "absl/container/internal/raw_hash_set.h"
#include "absl/memory/memory.h"
#include "absl/meta/type_traits.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace container_internal {
template <typename T>
struct FlatHashSetPolicy;
}
template <class T, class Hash = DefaultHashContainerHash<T>,
class Eq = DefaultHashContainerEq<T>,
class Allocator = std::allocator<T>>
class ABSL_ATTRIBUTE_OWNER flat_hash_set
: public absl::container_internal::raw_hash_set<
absl::container_internal::FlatHashSetPolicy<T>, Hash, Eq, Allocator> {
using Base = typename flat_hash_set::raw_hash_set;
public:
flat_hash_set() {}
using Base::Base;
using Base::begin;
using Base::cbegin;
using Base::cend;
using Base::end;
using Base::capacity;
using Base::empty;
using Base::max_size;
using Base::size;
using Base::clear;
using Base::erase;
using Base::insert;
using Base::emplace;
using Base::emplace_hint;
using Base::extract;
using Base::merge;
using Base::swap;
using Base::rehash;
using Base::reserve;
using Base::contains;
using Base::count;
using Base::equal_range;
using Base::find;
using Base::bucket_count;
using Base::load_factor;
using Base::max_load_factor;
using Base::get_allocator;
using Base::hash_function;
using Base::key_eq;
};
template <typename T, typename H, typename E, typename A, typename Predicate>
typename flat_hash_set<T, H, E, A>::size_type erase_if(
flat_hash_set<T, H, E, A>& c, Predicate pred) {
return container_internal::EraseIf(pred, &c);
}
template <typename T, typename H, typename E, typename A>
void swap(flat_hash_set<T, H, E, A>& x,
flat_hash_set<T, H, E, A>& y) noexcept(noexcept(x.swap(y))) {
return x.swap(y);
}
namespace container_internal {
template <typename T, typename H, typename E, typename A, typename Function>
decay_t<Function> c_for_each_fast(const flat_hash_set<T, H, E, A>& c,
Function&& f) {
container_internal::ForEach(f, &c);
return f;
}
template <typename T, typename H, typename E, typename A, typename Function>
decay_t<Function> c_for_each_fast(flat_hash_set<T, H, E, A>& c, Function&& f) {
container_internal::ForEach(f, &c);
return f;
}
template <typename T, typename H, typename E, typename A, typename Function>
decay_t<Function> c_for_each_fast(flat_hash_set<T, H, E, A>&& c, Function&& f) {
container_internal::ForEach(f, &c);
return f;
}
}
namespace container_internal {
template <class T>
struct FlatHashSetPolicy {
using slot_type = T;
using key_type = T;
using init_type = T;
using constant_iterators = std::true_type;
template <class Allocator, class... Args>
static void construct(Allocator* alloc, slot_type* slot, Args&&... args) {
absl::allocator_traits<Allocator>::construct(*alloc, slot,
std::forward<Args>(args)...);
}
template <class Allocator>
static auto destroy(Allocator* alloc, slot_type* slot) {
absl::allocator_traits<Allocator>::destroy(*alloc, slot);
return IsDestructionTrivial<Allocator, slot_type>();
}
static T& element(slot_type* slot) { return *slot; }
template <class F, class... Args>
static decltype(absl::container_internal::DecomposeValue(
std::declval<F>(), std::declval<Args>()...))
apply(F&& f, Args&&... args) {
return absl::container_internal::DecomposeValue(
std::forward<F>(f), std::forward<Args>(args)...);
}
static size_t space_used(const T*) { return 0; }
template <class Hash>
static constexpr HashSlotFn get_hash_slot_fn() {
return &TypeErasedApplyToSlotFn<Hash, T>;
}
};
}
namespace container_algorithm_internal {
template <class Key, class Hash, class KeyEqual, class Allocator>
struct IsUnorderedContainer<absl::flat_hash_set<Key, Hash, KeyEqual, Allocator>>
: std::true_type {};
}
ABSL_NAMESPACE_END
}
#endif | #include "absl/container/flat_hash_set.h"
#include <cstddef>
#include <cstdint>
#include <memory>
#include <type_traits>
#include <utility>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/base/config.h"
#include "absl/container/hash_container_defaults.h"
#include "absl/container/internal/container_memory.h"
#include "absl/container/internal/hash_generator_testing.h"
#include "absl/container/internal/test_allocator.h"
#include "absl/container/internal/unordered_set_constructor_test.h"
#include "absl/container/internal/unordered_set_lookup_test.h"
#include "absl/container/internal/unordered_set_members_test.h"
#include "absl/container/internal/unordered_set_modifiers_test.h"
#include "absl/hash/hash.h"
#include "absl/log/check.h"
#include "absl/memory/memory.h"
#include "absl/strings/string_view.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace container_internal {
namespace {
using ::absl::container_internal::hash_internal::Enum;
using ::absl::container_internal::hash_internal::EnumClass;
using ::testing::IsEmpty;
using ::testing::Pointee;
using ::testing::UnorderedElementsAre;
using ::testing::UnorderedElementsAreArray;
struct BeforeMain {
BeforeMain() {
absl::flat_hash_set<int> x;
x.insert(1);
CHECK(!x.contains(0)) << "x should not contain 0";
CHECK(x.contains(1)) << "x should contain 1";
}
};
const BeforeMain before_main;
template <class T>
using Set =
absl::flat_hash_set<T, StatefulTestingHash, StatefulTestingEqual, Alloc<T>>;
using SetTypes =
::testing::Types<Set<int>, Set<std::string>, Set<Enum>, Set<EnumClass>>;
INSTANTIATE_TYPED_TEST_SUITE_P(FlatHashSet, ConstructorTest, SetTypes);
INSTANTIATE_TYPED_TEST_SUITE_P(FlatHashSet, LookupTest, SetTypes);
INSTANTIATE_TYPED_TEST_SUITE_P(FlatHashSet, MembersTest, SetTypes);
INSTANTIATE_TYPED_TEST_SUITE_P(FlatHashSet, ModifiersTest, SetTypes);
TEST(FlatHashSet, EmplaceString) {
std::vector<std::string> v = {"a", "b"};
absl::flat_hash_set<absl::string_view> hs(v.begin(), v.end());
EXPECT_THAT(hs, UnorderedElementsAreArray(v));
}
TEST(FlatHashSet, BitfieldArgument) {
union {
int n : 1;
};
n = 0;
absl::flat_hash_set<int> s = {n};
s.insert(n);
s.insert(s.end(), n);
s.insert({n});
s.erase(n);
s.count(n);
s.prefetch(n);
s.find(n);
s.contains(n);
s.equal_range(n);
}
TEST(FlatHashSet, MergeExtractInsert) {
struct Hash {
size_t operator()(const std::unique_ptr<int>& p) const { return *p; }
};
struct Eq {
bool operator()(const std::unique_ptr<int>& a,
const std::unique_ptr<int>& b) const {
return *a == *b;
}
};
absl::flat_hash_set<std::unique_ptr<int>, Hash, Eq> set1, set2;
set1.insert(absl::make_unique<int>(7));
set1.insert(absl::make_unique<int>(17));
set2.insert(absl::make_unique<int>(7));
set2.insert(absl::make_unique<int>(19));
EXPECT_THAT(set1, UnorderedElementsAre(Pointee(7), Pointee(17)));
EXPECT_THAT(set2, UnorderedElementsAre(Pointee(7), Pointee(19)));
set1.merge(set2);
EXPECT_THAT(set1, UnorderedElementsAre(Pointee(7), Pointee(17), Pointee(19)));
EXPECT_THAT(set2, UnorderedElementsAre(Pointee(7)));
auto node = set1.extract(absl::make_unique<int>(7));
EXPECT_TRUE(node);
EXPECT_THAT(node.value(), Pointee(7));
EXPECT_THAT(set1, UnorderedElementsAre(Pointee(17), Pointee(19)));
auto insert_result = set2.insert(std::move(node));
EXPECT_FALSE(node);
EXPECT_FALSE(insert_result.inserted);
EXPECT_TRUE(insert_result.node);
EXPECT_THAT(insert_result.node.value(), Pointee(7));
EXPECT_EQ(**insert_result.position, 7);
EXPECT_NE(insert_result.position->get(), insert_result.node.value().get());
EXPECT_THAT(set2, UnorderedElementsAre(Pointee(7)));
node = set1.extract(absl::make_unique<int>(17));
EXPECT_TRUE(node);
EXPECT_THAT(node.value(), Pointee(17));
EXPECT_THAT(set1, UnorderedElementsAre(Pointee(19)));
node.value() = absl::make_unique<int>(23);
insert_result = set2.insert(std::move(node));
EXPECT_FALSE(node);
EXPECT_TRUE(insert_result.inserted);
EXPECT_FALSE(insert_result.node);
EXPECT_EQ(**insert_result.position, 23);
EXPECT_THAT(set2, UnorderedElementsAre(Pointee(7), Pointee(23)));
}
bool IsEven(int k) { return k % 2 == 0; }
TEST(FlatHashSet, EraseIf) {
{
flat_hash_set<int> s = {1, 2, 3, 4, 5};
EXPECT_EQ(erase_if(s, [](int) { return true; }), 5);
EXPECT_THAT(s, IsEmpty());
}
{
flat_hash_set<int> s = {1, 2, 3, 4, 5};
EXPECT_EQ(erase_if(s, [](int) { return false; }), 0);
EXPECT_THAT(s, UnorderedElementsAre(1, 2, 3, 4, 5));
}
{
flat_hash_set<int> s = {1, 2, 3, 4, 5};
EXPECT_EQ(erase_if(s, [](int k) { return k % 2 == 1; }), 3);
EXPECT_THAT(s, UnorderedElementsAre(2, 4));
}
{
flat_hash_set<int> s = {1, 2, 3, 4, 5};
EXPECT_EQ(erase_if(s, IsEven), 2);
EXPECT_THAT(s, UnorderedElementsAre(1, 3, 5));
}
{
flat_hash_set<int> s = {1, 2, 3, 4, 5};
EXPECT_EQ(erase_if(s, &IsEven), 2);
EXPECT_THAT(s, UnorderedElementsAre(1, 3, 5));
}
}
TEST(FlatHashSet, CForEach) {
using ValueType = std::pair<int, int>;
flat_hash_set<ValueType> s;
std::vector<ValueType> expected;
for (int i = 0; i < 100; ++i) {
{
SCOPED_TRACE("mutable object iteration");
std::vector<ValueType> v;
absl::container_internal::c_for_each_fast(
s, [&v](const ValueType& p) { v.push_back(p); });
ASSERT_THAT(v, UnorderedElementsAreArray(expected));
}
{
SCOPED_TRACE("const object iteration");
std::vector<ValueType> v;
const flat_hash_set<ValueType>& cs = s;
absl::container_internal::c_for_each_fast(
cs, [&v](const ValueType& p) { v.push_back(p); });
ASSERT_THAT(v, UnorderedElementsAreArray(expected));
}
{
SCOPED_TRACE("temporary object iteration");
std::vector<ValueType> v;
absl::container_internal::c_for_each_fast(
flat_hash_set<ValueType>(s),
[&v](const ValueType& p) { v.push_back(p); });
ASSERT_THAT(v, UnorderedElementsAreArray(expected));
}
s.emplace(i, i);
expected.emplace_back(i, i);
}
}
class PoisonSoo {
int64_t data_;
public:
explicit PoisonSoo(int64_t d) : data_(d) { SanitizerPoisonObject(&data_); }
PoisonSoo(const PoisonSoo& that) : PoisonSoo(*that) {}
~PoisonSoo() { SanitizerUnpoisonObject(&data_); }
int64_t operator*() const {
SanitizerUnpoisonObject(&data_);
const int64_t ret = data_;
SanitizerPoisonObject(&data_);
return ret;
}
template <typename H>
friend H AbslHashValue(H h, const PoisonSoo& pi) {
return H::combine(std::move(h), *pi);
}
bool operator==(const PoisonSoo& rhs) const { return **this == *rhs; }
};
TEST(FlatHashSet, PoisonSooBasic) {
PoisonSoo a(0), b(1);
flat_hash_set<PoisonSoo> set;
set.insert(a);
EXPECT_THAT(set, UnorderedElementsAre(a));
set.insert(b);
EXPECT_THAT(set, UnorderedElementsAre(a, b));
set.erase(a);
EXPECT_THAT(set, UnorderedElementsAre(b));
set.rehash(0);
EXPECT_THAT(set, UnorderedElementsAre(b));
}
TEST(FlatHashSet, PoisonSooMoveConstructSooToSoo) {
PoisonSoo a(0);
flat_hash_set<PoisonSoo> set;
set.insert(a);
flat_hash_set<PoisonSoo> set2(std::move(set));
EXPECT_THAT(set2, UnorderedElementsAre(a));
}
TEST(FlatHashSet, PoisonSooAllocMoveConstructSooToSoo) {
PoisonSoo a(0);
flat_hash_set<PoisonSoo> set;
set.insert(a);
flat_hash_set<PoisonSoo> set2(std::move(set), std::allocator<PoisonSoo>());
EXPECT_THAT(set2, UnorderedElementsAre(a));
}
TEST(FlatHashSet, PoisonSooMoveAssignFullSooToEmptySoo) {
PoisonSoo a(0);
flat_hash_set<PoisonSoo> set, set2;
set.insert(a);
set2 = std::move(set);
EXPECT_THAT(set2, UnorderedElementsAre(a));
}
TEST(FlatHashSet, PoisonSooMoveAssignFullSooToFullSoo) {
PoisonSoo a(0), b(1);
flat_hash_set<PoisonSoo> set, set2;
set.insert(a);
set2.insert(b);
set2 = std::move(set);
EXPECT_THAT(set2, UnorderedElementsAre(a));
}
TEST(FlatHashSet, FlatHashSetPolicyDestroyReturnsTrue) {
EXPECT_TRUE((decltype(FlatHashSetPolicy<int>::destroy<std::allocator<int>>(
nullptr, nullptr))()));
EXPECT_FALSE(
(decltype(FlatHashSetPolicy<int>::destroy<CountingAllocator<int>>(
nullptr, nullptr))()));
EXPECT_FALSE((decltype(FlatHashSetPolicy<std::unique_ptr<int>>::destroy<
std::allocator<int>>(nullptr, nullptr))()));
}
struct HashEqInvalidOnMove {
HashEqInvalidOnMove() = default;
HashEqInvalidOnMove(const HashEqInvalidOnMove& rhs) = default;
HashEqInvalidOnMove(HashEqInvalidOnMove&& rhs) { rhs.moved = true; }
HashEqInvalidOnMove& operator=(const HashEqInvalidOnMove& rhs) = default;
HashEqInvalidOnMove& operator=(HashEqInvalidOnMove&& rhs) {
rhs.moved = true;
return *this;
}
size_t operator()(int x) const {
CHECK(!moved);
return absl::HashOf(x);
}
bool operator()(int x, int y) const {
CHECK(!moved);
return x == y;
}
bool moved = false;
};
TEST(FlatHashSet, MovedFromCleared_HashMustBeValid) {
flat_hash_set<int, HashEqInvalidOnMove> s1, s2;
s2 = std::move(s1);
s1.clear();
s1.insert(2);
EXPECT_THAT(s1, UnorderedElementsAre(2));
}
TEST(FlatHashSet, MovedFromCleared_EqMustBeValid) {
flat_hash_set<int, DefaultHashContainerHash<int>, HashEqInvalidOnMove> s1, s2;
s2 = std::move(s1);
s1.clear();
s1.insert(2);
EXPECT_THAT(s1, UnorderedElementsAre(2));
}
}
}
ABSL_NAMESPACE_END
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/container/flat_hash_set.h | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/container/flat_hash_set_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
f83f267d-434a-4484-9e69-3ad4689bb579 | cpp | tensorflow/tensorflow | stream_ops_util | tensorflow/core/tfrt/kernels/stream_ops_util.cc | tensorflow/core/tfrt/kernels/stream_ops_util_test.cc | #include "tensorflow/core/tfrt/kernels/stream_ops_util.h"
#include <cstdint>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/tfrt/kernels/stream_ops_util_constants.h"
namespace tensorflow {
namespace tfrt_stub {
absl::StatusOr<std::vector<std::pair<int64_t, std::vector<tensorflow::Tensor>>>>
UnbatchStreamResults(const tensorflow::Tensor& step_ids,
absl::Span<const tensorflow::Tensor> tensors) {
std::vector<std::pair<int64_t, std::vector<tensorflow::Tensor>>> responses;
if (step_ids.dims() > 0) {
if (step_ids.dtype() != tensorflow::DT_INT64 || step_ids.dims() != 1) {
return absl::InvalidArgumentError(absl::StrCat(
"Expected a 1-D int64 tensor for batched step ids but got dtype=",
tensorflow::DataTypeString(step_ids.dtype()),
" shape=", step_ids.shape().DebugString()));
}
const int batch_size = step_ids.dim_size(0);
for (int i = 0; i < tensors.size(); ++i) {
const tensorflow::TensorShape& shape = tensors[i].shape();
if (shape.dims() < 1 || shape.dim_size(0) != batch_size) {
return absl::InvalidArgumentError(absl::StrCat(
"All inputs to PwStreamResults inside tf.batch_function are "
"required to be batched (batch_size=",
batch_size, ") but input #", i, " has shape ",
shape.DebugString()));
}
}
std::vector<int> sizes;
absl::flat_hash_set<int64_t> unique_step_ids;
for (int i = 0; i < step_ids.NumElements(); ++i) {
const int64_t request_id = step_ids.flat<int64_t>()(i);
const int64_t step_id =
static_cast<uint64_t>(request_id) >> (64 - kStepIdBitSize);
VLOG(1) << "PwStreamResults op is unbatching request_id=" << request_id
<< ", step_id=" << step_id;
if (step_id <= 0) {
return absl::InternalError(
absl::StrCat("Invalid step id=", step_id,
"; this usually indicates that `PwStreamResults` "
"was called from an unsupported nested context"));
}
if (i != 0 && request_id == step_ids.flat<int64_t>()(0)) {
break;
}
if (!responses.empty() && responses.back().first == step_id) {
sizes.back()++;
} else {
responses.push_back({step_id, {}});
sizes.push_back(1);
const bool inserted = unique_step_ids.insert(step_id).second;
if (!inserted) {
return absl::InternalError(absl::StrCat(
"Non-contiguous step ids found in the step id batch: ",
step_ids.DebugString(batch_size)));
}
}
}
int offset = 0;
for (int i = 0; i < responses.size(); ++i) {
auto& outputs = responses[i].second;
outputs.resize(tensors.size());
const int limit = offset + sizes[i];
for (int j = 0; j < tensors.size(); ++j) {
outputs[j] = tensors[j].Slice(offset, limit);
}
offset = limit;
}
} else {
const int64_t step_id = step_ids.flat<int64_t>()(0);
if (step_id <= 0) {
return absl::InternalError(
"Invalid step id; this usually indicates that `PwStreamResults` was "
"called from an unsupported nested context");
}
responses.push_back({step_id, std::vector<tensorflow::Tensor>(
tensors.begin(), tensors.end())});
}
return responses;
}
}
} | #include "tensorflow/core/tfrt/kernels/stream_ops_util.h"
#include <cstdint>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_matcher.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/tfrt/kernels/stream_ops_util_constants.h"
namespace tensorflow {
namespace tfrt_stub {
namespace {
using ::tensorflow::test::AsScalar;
using ::tensorflow::test::AsTensor;
using ::tensorflow::test::TensorEq;
using ::testing::ElementsAre;
using ::testing::Pair;
using ::testing::UnorderedElementsAre;
using ::testing::status::IsOkAndHolds;
int64_t RequestId(int64_t step_id, uint32_t id) {
return (step_id << kStepIdBitSize) | id;
}
TEST(UnbatchStreamResultsTest, ScalarStepId) {
const tensorflow::Tensor step_ids = AsScalar<int64_t>(1);
const std::vector<tensorflow::Tensor> tensors = {
AsScalar<int32_t>(1),
AsTensor<int32_t>({2, 3}),
};
EXPECT_THAT(UnbatchStreamResults(step_ids, tensors),
IsOkAndHolds(UnorderedElementsAre(
Pair(1, ElementsAre(TensorEq(AsScalar<int32_t>(1)),
TensorEq(AsTensor<int32_t>({2, 3})))))));
}
TEST(UnbatchStreamResultsTest, Batched) {
const tensorflow::Tensor step_ids = AsTensor<int64_t>(
{RequestId(1, 0), RequestId(1, 1), RequestId(2, 0), RequestId(3, 0)});
const std::vector<tensorflow::Tensor> tensors = {
AsTensor<int32_t>({1, 2, 3, 4}),
AsTensor<int32_t>({5, 6, 7, 8}),
};
EXPECT_THAT(UnbatchStreamResults(step_ids, tensors),
IsOkAndHolds(UnorderedElementsAre(
Pair(1, ElementsAre(TensorEq(AsTensor<int32_t>({1, 2})),
TensorEq(AsTensor<int32_t>({5, 6})))),
Pair(2, ElementsAre(TensorEq(AsTensor<int32_t>({3})),
TensorEq(AsTensor<int32_t>({7})))),
Pair(3, ElementsAre(TensorEq(AsTensor<int32_t>({4})),
TensorEq(AsTensor<int32_t>({8})))))));
}
TEST(UnbatchStreamResultsTest, BatchedUnordered) {
const tensorflow::Tensor step_ids = AsTensor<int64_t>(
{RequestId(2, 0), RequestId(1, 0), RequestId(1, 1), RequestId(3, 0)});
const std::vector<tensorflow::Tensor> tensors = {
AsTensor<int32_t>({20, 10, 10, 30}),
};
EXPECT_THAT(UnbatchStreamResults(step_ids, tensors),
IsOkAndHolds(UnorderedElementsAre(
Pair(1, ElementsAre(TensorEq(AsTensor<int32_t>({10, 10})))),
Pair(2, ElementsAre(TensorEq(AsTensor<int32_t>({20})))),
Pair(3, ElementsAre(TensorEq(AsTensor<int32_t>({30})))))));
}
TEST(UnbatchStreamResultsTest, PaddingOneExample) {
const tensorflow::Tensor step_ids = AsTensor<int64_t>(
{RequestId(1, 0), RequestId(1, 0), RequestId(1, 0), RequestId(1, 0)});
const std::vector<tensorflow::Tensor> tensors = {
AsTensor<int32_t>({10, 10, 10, 10}),
};
EXPECT_THAT(UnbatchStreamResults(step_ids, tensors),
IsOkAndHolds(UnorderedElementsAre(
Pair(1, ElementsAre(TensorEq(AsTensor<int32_t>({10})))))));
}
TEST(UnbatchStreamResultsTest, PaddingMultipleExamples) {
const tensorflow::Tensor step_ids = AsTensor<int64_t>(
{RequestId(1, 0), RequestId(1, 1), RequestId(2, 0), RequestId(1, 0)});
const std::vector<tensorflow::Tensor> tensors = {
AsTensor<int32_t>({10, 20, 30, 10}),
};
EXPECT_THAT(UnbatchStreamResults(step_ids, tensors),
IsOkAndHolds(UnorderedElementsAre(
Pair(1, ElementsAre(TensorEq(AsTensor<int32_t>({10, 20})))),
Pair(2, ElementsAre(TensorEq(AsTensor<int32_t>({30})))))));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/kernels/stream_ops_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/kernels/stream_ops_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
08a67a1f-f0f3-4b9c-a899-9e6205fa1417 | cpp | google/cel-cpp | container_access_step | eval/eval/container_access_step.cc | eval/eval/container_access_step_test.cc | #include "eval/eval/container_access_step.h"
#include <cstdint>
#include <memory>
#include <utility>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/types/optional.h"
#include "absl/types/span.h"
#include "base/ast_internal/expr.h"
#include "base/attribute.h"
#include "base/kind.h"
#include "common/casting.h"
#include "common/native_type.h"
#include "common/value.h"
#include "common/value_kind.h"
#include "eval/eval/attribute_trail.h"
#include "eval/eval/attribute_utility.h"
#include "eval/eval/direct_expression_step.h"
#include "eval/eval/evaluator_core.h"
#include "eval/eval/expression_step_base.h"
#include "eval/internal/errors.h"
#include "internal/casts.h"
#include "internal/number.h"
#include "internal/status_macros.h"
#include "runtime/internal/errors.h"
namespace google::api::expr::runtime {
namespace {
using ::cel::AttributeQualifier;
using ::cel::BoolValue;
using ::cel::Cast;
using ::cel::DoubleValue;
using ::cel::ErrorValue;
using ::cel::InstanceOf;
using ::cel::IntValue;
using ::cel::ListValue;
using ::cel::MapValue;
using ::cel::StringValue;
using ::cel::UintValue;
using ::cel::Value;
using ::cel::ValueKind;
using ::cel::ValueKindToString;
using ::cel::internal::Number;
using ::cel::runtime_internal::CreateNoSuchKeyError;
inline constexpr int kNumContainerAccessArguments = 2;
absl::optional<Number> CelNumberFromValue(const Value& value) {
switch (value->kind()) {
case ValueKind::kInt64:
return Number::FromInt64(value.GetInt().NativeValue());
case ValueKind::kUint64:
return Number::FromUint64(value.GetUint().NativeValue());
case ValueKind::kDouble:
return Number::FromDouble(value.GetDouble().NativeValue());
default:
return absl::nullopt;
}
}
absl::Status CheckMapKeyType(const Value& key) {
ValueKind kind = key->kind();
switch (kind) {
case ValueKind::kString:
case ValueKind::kInt64:
case ValueKind::kUint64:
case ValueKind::kBool:
return absl::OkStatus();
default:
return absl::InvalidArgumentError(absl::StrCat(
"Invalid map key type: '", ValueKindToString(kind), "'"));
}
}
AttributeQualifier AttributeQualifierFromValue(const Value& v) {
switch (v->kind()) {
case ValueKind::kString:
return AttributeQualifier::OfString(v.GetString().ToString());
case ValueKind::kInt64:
return AttributeQualifier::OfInt(v.GetInt().NativeValue());
case ValueKind::kUint64:
return AttributeQualifier::OfUint(v.GetUint().NativeValue());
case ValueKind::kBool:
return AttributeQualifier::OfBool(v.GetBool().NativeValue());
default:
return AttributeQualifier();
}
}
void LookupInMap(const MapValue& cel_map, const Value& key,
ExecutionFrameBase& frame, Value& result) {
if (frame.options().enable_heterogeneous_equality) {
absl::optional<Number> number = CelNumberFromValue(key);
if (number.has_value()) {
if (key->Is<UintValue>()) {
auto lookup = cel_map.Find(frame.value_manager(), key, result);
if (!lookup.ok()) {
result = frame.value_manager().CreateErrorValue(
std::move(lookup).status());
return;
}
if (*lookup) {
return;
}
}
if (number->LosslessConvertibleToInt()) {
auto lookup = cel_map.Find(
frame.value_manager(),
frame.value_manager().CreateIntValue(number->AsInt()), result);
if (!lookup.ok()) {
result = frame.value_manager().CreateErrorValue(
std::move(lookup).status());
return;
}
if (*lookup) {
return;
}
}
if (number->LosslessConvertibleToUint()) {
auto lookup = cel_map.Find(
frame.value_manager(),
frame.value_manager().CreateUintValue(number->AsUint()), result);
if (!lookup.ok()) {
result = frame.value_manager().CreateErrorValue(
std::move(lookup).status());
return;
}
if (*lookup) {
return;
}
}
result = frame.value_manager().CreateErrorValue(
CreateNoSuchKeyError(key->DebugString()));
return;
}
}
absl::Status status = CheckMapKeyType(key);
if (!status.ok()) {
result = frame.value_manager().CreateErrorValue(std::move(status));
return;
}
absl::Status lookup = cel_map.Get(frame.value_manager(), key, result);
if (!lookup.ok()) {
result = frame.value_manager().CreateErrorValue(std::move(lookup));
}
}
void LookupInList(const ListValue& cel_list, const Value& key,
ExecutionFrameBase& frame, Value& result) {
absl::optional<int64_t> maybe_idx;
if (frame.options().enable_heterogeneous_equality) {
auto number = CelNumberFromValue(key);
if (number.has_value() && number->LosslessConvertibleToInt()) {
maybe_idx = number->AsInt();
}
} else if (InstanceOf<IntValue>(key)) {
maybe_idx = key.GetInt().NativeValue();
}
if (!maybe_idx.has_value()) {
result = frame.value_manager().CreateErrorValue(absl::UnknownError(
absl::StrCat("Index error: expected integer type, got ",
cel::KindToString(ValueKindToKind(key->kind())))));
return;
}
int64_t idx = *maybe_idx;
auto size = cel_list.Size();
if (!size.ok()) {
result = frame.value_manager().CreateErrorValue(size.status());
return;
}
if (idx < 0 || idx >= *size) {
result = frame.value_manager().CreateErrorValue(absl::UnknownError(
absl::StrCat("Index error: index=", idx, " size=", *size)));
return;
}
absl::Status lookup = cel_list.Get(frame.value_manager(), idx, result);
if (!lookup.ok()) {
result = frame.value_manager().CreateErrorValue(std::move(lookup));
}
}
void LookupInContainer(const Value& container, const Value& key,
ExecutionFrameBase& frame, Value& result) {
switch (container.kind()) {
case ValueKind::kMap: {
LookupInMap(Cast<MapValue>(container), key, frame, result);
return;
}
case ValueKind::kList: {
LookupInList(Cast<ListValue>(container), key, frame, result);
return;
}
default:
result =
frame.value_manager().CreateErrorValue(absl::InvalidArgumentError(
absl::StrCat("Invalid container type: '",
ValueKindToString(container->kind()), "'")));
return;
}
}
void PerformLookup(ExecutionFrameBase& frame, const Value& container,
const Value& key, const AttributeTrail& container_trail,
bool enable_optional_types, Value& result,
AttributeTrail& trail) {
if (frame.unknown_processing_enabled()) {
AttributeUtility::Accumulator unknowns =
frame.attribute_utility().CreateAccumulator();
unknowns.MaybeAdd(container);
unknowns.MaybeAdd(key);
if (!unknowns.IsEmpty()) {
result = std::move(unknowns).Build();
return;
}
trail = container_trail.Step(AttributeQualifierFromValue(key));
if (frame.attribute_utility().CheckForUnknownExact(trail)) {
result = frame.attribute_utility().CreateUnknownSet(trail.attribute());
return;
}
}
if (InstanceOf<ErrorValue>(container)) {
result = container;
return;
}
if (InstanceOf<ErrorValue>(key)) {
result = key;
return;
}
if (enable_optional_types &&
cel::NativeTypeId::Of(container) ==
cel::NativeTypeId::For<cel::OptionalValueInterface>()) {
const auto& optional_value =
*cel::internal::down_cast<const cel::OptionalValueInterface*>(
cel::Cast<cel::OpaqueValue>(container).operator->());
if (!optional_value.HasValue()) {
result = cel::OptionalValue::None();
return;
}
LookupInContainer(optional_value.Value(), key, frame, result);
if (auto error_value = cel::As<cel::ErrorValue>(result);
error_value && cel::IsNoSuchKey(*error_value)) {
result = cel::OptionalValue::None();
return;
}
result = cel::OptionalValue::Of(frame.value_manager().GetMemoryManager(),
std::move(result));
return;
}
LookupInContainer(container, key, frame, result);
}
class ContainerAccessStep : public ExpressionStepBase {
public:
ContainerAccessStep(int64_t expr_id, bool enable_optional_types)
: ExpressionStepBase(expr_id),
enable_optional_types_(enable_optional_types) {}
absl::Status Evaluate(ExecutionFrame* frame) const override;
private:
bool enable_optional_types_;
};
absl::Status ContainerAccessStep::Evaluate(ExecutionFrame* frame) const {
if (!frame->value_stack().HasEnough(kNumContainerAccessArguments)) {
return absl::Status(
absl::StatusCode::kInternal,
"Insufficient arguments supplied for ContainerAccess-type expression");
}
Value result;
AttributeTrail result_trail;
auto args = frame->value_stack().GetSpan(kNumContainerAccessArguments);
const AttributeTrail& container_trail =
frame->value_stack().GetAttributeSpan(kNumContainerAccessArguments)[0];
PerformLookup(*frame, args[0], args[1], container_trail,
enable_optional_types_, result, result_trail);
frame->value_stack().PopAndPush(kNumContainerAccessArguments,
std::move(result), std::move(result_trail));
return absl::OkStatus();
}
class DirectContainerAccessStep : public DirectExpressionStep {
public:
DirectContainerAccessStep(
std::unique_ptr<DirectExpressionStep> container_step,
std::unique_ptr<DirectExpressionStep> key_step,
bool enable_optional_types, int64_t expr_id)
: DirectExpressionStep(expr_id),
container_step_(std::move(container_step)),
key_step_(std::move(key_step)),
enable_optional_types_(enable_optional_types) {}
absl::Status Evaluate(ExecutionFrameBase& frame, Value& result,
AttributeTrail& trail) const override;
private:
std::unique_ptr<DirectExpressionStep> container_step_;
std::unique_ptr<DirectExpressionStep> key_step_;
bool enable_optional_types_;
};
absl::Status DirectContainerAccessStep::Evaluate(ExecutionFrameBase& frame,
Value& result,
AttributeTrail& trail) const {
Value container;
Value key;
AttributeTrail container_trail;
AttributeTrail key_trail;
CEL_RETURN_IF_ERROR(
container_step_->Evaluate(frame, container, container_trail));
CEL_RETURN_IF_ERROR(key_step_->Evaluate(frame, key, key_trail));
PerformLookup(frame, container, key, container_trail, enable_optional_types_,
result, trail);
return absl::OkStatus();
}
}
std::unique_ptr<DirectExpressionStep> CreateDirectContainerAccessStep(
std::unique_ptr<DirectExpressionStep> container_step,
std::unique_ptr<DirectExpressionStep> key_step, bool enable_optional_types,
int64_t expr_id) {
return std::make_unique<DirectContainerAccessStep>(
std::move(container_step), std::move(key_step), enable_optional_types,
expr_id);
}
absl::StatusOr<std::unique_ptr<ExpressionStep>> CreateContainerAccessStep(
const cel::ast_internal::Call& call, int64_t expr_id,
bool enable_optional_types) {
int arg_count = call.args().size() + (call.has_target() ? 1 : 0);
if (arg_count != kNumContainerAccessArguments) {
return absl::InvalidArgumentError(absl::StrCat(
"Invalid argument count for index operation: ", arg_count));
}
return std::make_unique<ContainerAccessStep>(expr_id, enable_optional_types);
}
} | #include "eval/eval/container_access_step.h"
#include <memory>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "google/api/expr/v1alpha1/syntax.pb.h"
#include "google/protobuf/struct.pb.h"
#include "absl/status/status.h"
#include "base/builtins.h"
#include "base/type_provider.h"
#include "eval/eval/cel_expression_flat_impl.h"
#include "eval/eval/direct_expression_step.h"
#include "eval/eval/evaluator_core.h"
#include "eval/eval/ident_step.h"
#include "eval/public/activation.h"
#include "eval/public/cel_attribute.h"
#include "eval/public/cel_expr_builder_factory.h"
#include "eval/public/cel_expression.h"
#include "eval/public/cel_options.h"
#include "eval/public/cel_value.h"
#include "eval/public/containers/container_backed_list_impl.h"
#include "eval/public/containers/container_backed_map_impl.h"
#include "eval/public/structs/cel_proto_wrapper.h"
#include "eval/public/testing/matchers.h"
#include "eval/public/unknown_set.h"
#include "internal/testing.h"
#include "parser/parser.h"
#include "google/protobuf/arena.h"
namespace google::api::expr::runtime {
namespace {
using ::absl_testing::StatusIs;
using ::cel::TypeProvider;
using ::cel::ast_internal::Expr;
using ::cel::ast_internal::SourceInfo;
using ::google::api::expr::v1alpha1::ParsedExpr;
using ::google::protobuf::Struct;
using ::testing::_;
using ::testing::AllOf;
using ::testing::HasSubstr;
using TestParamType = std::tuple<bool, bool, bool>;
CelValue EvaluateAttributeHelper(
google::protobuf::Arena* arena, CelValue container, CelValue key,
bool use_recursive_impl, bool receiver_style, bool enable_unknown,
const std::vector<CelAttributePattern>& patterns) {
ExecutionPath path;
Expr expr;
SourceInfo source_info;
auto& call = expr.mutable_call_expr();
call.set_function(cel::builtin::kIndex);
call.mutable_args().reserve(2);
Expr& container_expr = (receiver_style) ? call.mutable_target()
: call.mutable_args().emplace_back();
Expr& key_expr = call.mutable_args().emplace_back();
container_expr.mutable_ident_expr().set_name("container");
key_expr.mutable_ident_expr().set_name("key");
if (use_recursive_impl) {
path.push_back(std::make_unique<WrappedDirectStep>(
CreateDirectContainerAccessStep(CreateDirectIdentStep("container", 1),
CreateDirectIdentStep("key", 2),
false, 3),
3));
} else {
path.push_back(
std::move(CreateIdentStep(container_expr.ident_expr(), 1).value()));
path.push_back(
std::move(CreateIdentStep(key_expr.ident_expr(), 2).value()));
path.push_back(std::move(CreateContainerAccessStep(call, 3).value()));
}
cel::RuntimeOptions options;
options.unknown_processing = cel::UnknownProcessingOptions::kAttributeOnly;
options.enable_heterogeneous_equality = false;
CelExpressionFlatImpl cel_expr(
FlatExpression(std::move(path), 0,
TypeProvider::Builtin(), options));
Activation activation;
activation.InsertValue("container", container);
activation.InsertValue("key", key);
activation.set_unknown_attribute_patterns(patterns);
auto result = cel_expr.Evaluate(activation, arena);
return *result;
}
class ContainerAccessStepTest : public ::testing::Test {
protected:
ContainerAccessStepTest() = default;
void SetUp() override {}
CelValue EvaluateAttribute(
CelValue container, CelValue key, bool receiver_style,
bool enable_unknown, bool use_recursive_impl = false,
const std::vector<CelAttributePattern>& patterns = {}) {
return EvaluateAttributeHelper(&arena_, container, key, receiver_style,
enable_unknown, use_recursive_impl,
patterns);
}
google::protobuf::Arena arena_;
};
class ContainerAccessStepUniformityTest
: public ::testing::TestWithParam<TestParamType> {
protected:
ContainerAccessStepUniformityTest() = default;
void SetUp() override {}
bool receiver_style() {
TestParamType params = GetParam();
return std::get<0>(params);
}
bool enable_unknown() {
TestParamType params = GetParam();
return std::get<1>(params);
}
bool use_recursive_impl() {
TestParamType params = GetParam();
return std::get<2>(params);
}
CelValue EvaluateAttribute(
CelValue container, CelValue key, bool receiver_style,
bool enable_unknown, bool use_recursive_impl = false,
const std::vector<CelAttributePattern>& patterns = {}) {
return EvaluateAttributeHelper(&arena_, container, key, receiver_style,
enable_unknown, use_recursive_impl,
patterns);
}
google::protobuf::Arena arena_;
};
TEST_P(ContainerAccessStepUniformityTest, TestListIndexAccess) {
ContainerBackedListImpl cel_list({CelValue::CreateInt64(1),
CelValue::CreateInt64(2),
CelValue::CreateInt64(3)});
CelValue result = EvaluateAttribute(CelValue::CreateList(&cel_list),
CelValue::CreateInt64(1),
receiver_style(), enable_unknown());
ASSERT_TRUE(result.IsInt64());
ASSERT_EQ(result.Int64OrDie(), 2);
}
TEST_P(ContainerAccessStepUniformityTest, TestListIndexAccessOutOfBounds) {
ContainerBackedListImpl cel_list({CelValue::CreateInt64(1),
CelValue::CreateInt64(2),
CelValue::CreateInt64(3)});
CelValue result = EvaluateAttribute(CelValue::CreateList(&cel_list),
CelValue::CreateInt64(0),
receiver_style(), enable_unknown());
ASSERT_TRUE(result.IsInt64());
result = EvaluateAttribute(CelValue::CreateList(&cel_list),
CelValue::CreateInt64(2), receiver_style(),
enable_unknown());
ASSERT_TRUE(result.IsInt64());
result = EvaluateAttribute(CelValue::CreateList(&cel_list),
CelValue::CreateInt64(-1), receiver_style(),
enable_unknown());
ASSERT_TRUE(result.IsError());
result = EvaluateAttribute(CelValue::CreateList(&cel_list),
CelValue::CreateInt64(3), receiver_style(),
enable_unknown());
ASSERT_TRUE(result.IsError());
}
TEST_P(ContainerAccessStepUniformityTest, TestListIndexAccessNotAnInt) {
ContainerBackedListImpl cel_list({CelValue::CreateInt64(1),
CelValue::CreateInt64(2),
CelValue::CreateInt64(3)});
CelValue result = EvaluateAttribute(CelValue::CreateList(&cel_list),
CelValue::CreateUint64(1),
receiver_style(), enable_unknown());
ASSERT_TRUE(result.IsError());
}
TEST_P(ContainerAccessStepUniformityTest, TestMapKeyAccess) {
const std::string kKey0 = "testkey0";
const std::string kKey1 = "testkey1";
const std::string kKey2 = "testkey2";
Struct cel_struct;
(*cel_struct.mutable_fields())[kKey0].set_string_value("value0");
(*cel_struct.mutable_fields())[kKey1].set_string_value("value1");
(*cel_struct.mutable_fields())[kKey2].set_string_value("value2");
CelValue result = EvaluateAttribute(
CelProtoWrapper::CreateMessage(&cel_struct, &arena_),
CelValue::CreateString(&kKey0), receiver_style(), enable_unknown());
ASSERT_TRUE(result.IsString());
ASSERT_EQ(result.StringOrDie().value(), "value0");
}
TEST_P(ContainerAccessStepUniformityTest, TestBoolKeyType) {
CelMapBuilder cel_map;
ASSERT_OK(cel_map.Add(CelValue::CreateBool(true),
CelValue::CreateStringView("value_true")));
CelValue result = EvaluateAttribute(CelValue::CreateMap(&cel_map),
CelValue::CreateBool(true),
receiver_style(), enable_unknown());
ASSERT_THAT(result, test::IsCelString("value_true"));
}
TEST_P(ContainerAccessStepUniformityTest, TestMapKeyAccessNotFound) {
const std::string kKey0 = "testkey0";
const std::string kKey1 = "testkey1";
Struct cel_struct;
(*cel_struct.mutable_fields())[kKey0].set_string_value("value0");
CelValue result = EvaluateAttribute(
CelProtoWrapper::CreateMessage(&cel_struct, &arena_),
CelValue::CreateString(&kKey1), receiver_style(), enable_unknown());
ASSERT_TRUE(result.IsError());
EXPECT_THAT(*result.ErrorOrDie(),
StatusIs(absl::StatusCode::kNotFound,
AllOf(HasSubstr("Key not found in map : "),
HasSubstr("testkey1"))));
}
TEST_F(ContainerAccessStepTest, TestInvalidReceiverCreateContainerAccessStep) {
Expr expr;
auto& call = expr.mutable_call_expr();
call.set_function(cel::builtin::kIndex);
Expr& container_expr = call.mutable_target();
container_expr.mutable_ident_expr().set_name("container");
call.mutable_args().reserve(2);
Expr& key_expr = call.mutable_args().emplace_back();
key_expr.mutable_ident_expr().set_name("key");
Expr& extra_arg = call.mutable_args().emplace_back();
extra_arg.mutable_const_expr().set_bool_value(true);
EXPECT_THAT(CreateContainerAccessStep(call, 0).status(),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("Invalid argument count")));
}
TEST_F(ContainerAccessStepTest, TestInvalidGlobalCreateContainerAccessStep) {
Expr expr;
auto& call = expr.mutable_call_expr();
call.set_function(cel::builtin::kIndex);
call.mutable_args().reserve(3);
Expr& container_expr = call.mutable_args().emplace_back();
container_expr.mutable_ident_expr().set_name("container");
Expr& key_expr = call.mutable_args().emplace_back();
key_expr.mutable_ident_expr().set_name("key");
Expr& extra_arg = call.mutable_args().emplace_back();
extra_arg.mutable_const_expr().set_bool_value(true);
EXPECT_THAT(CreateContainerAccessStep(call, 0).status(),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("Invalid argument count")));
}
TEST_F(ContainerAccessStepTest, TestListIndexAccessUnknown) {
ContainerBackedListImpl cel_list({CelValue::CreateInt64(1),
CelValue::CreateInt64(2),
CelValue::CreateInt64(3)});
CelValue result = EvaluateAttribute(CelValue::CreateList(&cel_list),
CelValue::CreateInt64(1), true, true, {});
ASSERT_TRUE(result.IsInt64());
ASSERT_EQ(result.Int64OrDie(), 2);
std::vector<CelAttributePattern> patterns = {CelAttributePattern(
"container",
{CreateCelAttributeQualifierPattern(CelValue::CreateInt64(1))})};
result =
EvaluateAttribute(CelValue::CreateList(&cel_list),
CelValue::CreateInt64(1), true, true, false, patterns);
ASSERT_TRUE(result.IsUnknownSet());
}
TEST_F(ContainerAccessStepTest, TestListUnknownKey) {
ContainerBackedListImpl cel_list({CelValue::CreateInt64(1),
CelValue::CreateInt64(2),
CelValue::CreateInt64(3)});
UnknownSet unknown_set;
CelValue result =
EvaluateAttribute(CelValue::CreateList(&cel_list),
CelValue::CreateUnknownSet(&unknown_set), true, true);
ASSERT_TRUE(result.IsUnknownSet());
}
TEST_F(ContainerAccessStepTest, TestMapInvalidKey) {
const std::string kKey0 = "testkey0";
const std::string kKey1 = "testkey1";
const std::string kKey2 = "testkey2";
Struct cel_struct;
(*cel_struct.mutable_fields())[kKey0].set_string_value("value0");
(*cel_struct.mutable_fields())[kKey1].set_string_value("value1");
(*cel_struct.mutable_fields())[kKey2].set_string_value("value2");
CelValue result =
EvaluateAttribute(CelProtoWrapper::CreateMessage(&cel_struct, &arena_),
CelValue::CreateDouble(1.0), true, true);
ASSERT_TRUE(result.IsError());
EXPECT_THAT(*result.ErrorOrDie(),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("Invalid map key type: 'double'")));
}
TEST_F(ContainerAccessStepTest, TestMapUnknownKey) {
const std::string kKey0 = "testkey0";
const std::string kKey1 = "testkey1";
const std::string kKey2 = "testkey2";
Struct cel_struct;
(*cel_struct.mutable_fields())[kKey0].set_string_value("value0");
(*cel_struct.mutable_fields())[kKey1].set_string_value("value1");
(*cel_struct.mutable_fields())[kKey2].set_string_value("value2");
UnknownSet unknown_set;
CelValue result =
EvaluateAttribute(CelProtoWrapper::CreateMessage(&cel_struct, &arena_),
CelValue::CreateUnknownSet(&unknown_set), true, true);
ASSERT_TRUE(result.IsUnknownSet());
}
TEST_F(ContainerAccessStepTest, TestUnknownContainer) {
UnknownSet unknown_set;
CelValue result = EvaluateAttribute(CelValue::CreateUnknownSet(&unknown_set),
CelValue::CreateInt64(1), true, true);
ASSERT_TRUE(result.IsUnknownSet());
}
TEST_F(ContainerAccessStepTest, TestInvalidContainerType) {
CelValue result = EvaluateAttribute(CelValue::CreateInt64(1),
CelValue::CreateInt64(0), true, true);
ASSERT_TRUE(result.IsError());
EXPECT_THAT(*result.ErrorOrDie(),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("Invalid container type: 'int")));
}
INSTANTIATE_TEST_SUITE_P(
CombinedContainerTest, ContainerAccessStepUniformityTest,
testing::Combine( testing::Bool(),
testing::Bool(),
testing::Bool()));
class ContainerAccessHeterogeneousLookupsTest : public testing::Test {
public:
ContainerAccessHeterogeneousLookupsTest() {
options_.enable_heterogeneous_equality = true;
builder_ = CreateCelExpressionBuilder(options_);
}
protected:
InterpreterOptions options_;
std::unique_ptr<CelExpressionBuilder> builder_;
google::protobuf::Arena arena_;
Activation activation_;
};
TEST_F(ContainerAccessHeterogeneousLookupsTest, DoubleMapKeyInt) {
ASSERT_OK_AND_ASSIGN(ParsedExpr expr, parser::Parse("{1: 2}[1.0]"));
ASSERT_OK_AND_ASSIGN(auto cel_expr, builder_->CreateExpression(
&expr.expr(), &expr.source_info()));
ASSERT_OK_AND_ASSIGN(CelValue result,
cel_expr->Evaluate(activation_, &arena_));
EXPECT_THAT(result, test::IsCelInt64(2));
}
TEST_F(ContainerAccessHeterogeneousLookupsTest, DoubleMapKeyNotAnInt) {
ASSERT_OK_AND_ASSIGN(ParsedExpr expr, parser::Parse("{1: 2}[1.1]"));
ASSERT_OK_AND_ASSIGN(auto cel_expr, builder_->CreateExpression(
&expr.expr(), &expr.source_info()));
ASSERT_OK_AND_ASSIGN(CelValue result,
cel_expr->Evaluate(activation_, &arena_));
EXPECT_THAT(result, test::IsCelError(_));
}
TEST_F(ContainerAccessHeterogeneousLookupsTest, DoubleMapKeyUint) {
ASSERT_OK_AND_ASSIGN(ParsedExpr expr, parser::Parse("{1u: 2u}[1.0]"));
ASSERT_OK_AND_ASSIGN(auto cel_expr, builder_->CreateExpression(
&expr.expr(), &expr.source_info()));
ASSERT_OK_AND_ASSIGN(CelValue result,
cel_expr->Evaluate(activation_, &arena_));
EXPECT_THAT(result, test::IsCelUint64(2));
}
TEST_F(ContainerAccessHeterogeneousLookupsTest, DoubleListIndex) {
ASSERT_OK_AND_ASSIGN(ParsedExpr expr, parser::Parse("[1, 2, 3][1.0]"));
ASSERT_OK_AND_ASSIGN(auto cel_expr, builder_->CreateExpression(
&expr.expr(), &expr.source_info()));
ASSERT_OK_AND_ASSIGN(CelValue result,
cel_expr->Evaluate(activation_, &arena_));
EXPECT_THAT(result, test::IsCelInt64(2));
}
TEST_F(ContainerAccessHeterogeneousLookupsTest, DoubleListIndexNotAnInt) {
ASSERT_OK_AND_ASSIGN(ParsedExpr expr, parser::Parse("[1, 2, 3][1.1]"));
ASSERT_OK_AND_ASSIGN(auto cel_expr, builder_->CreateExpression(
&expr.expr(), &expr.source_info()));
ASSERT_OK_AND_ASSIGN(CelValue result,
cel_expr->Evaluate(activation_, &arena_));
EXPECT_THAT(result, test::IsCelError(_));
}
TEST_F(ContainerAccessHeterogeneousLookupsTest, UintKeyAsUint) {
ASSERT_OK_AND_ASSIGN(ParsedExpr expr, parser::Parse("{1u: 2u, 1: 2}[1u]"));
ASSERT_OK_AND_ASSIGN(auto cel_expr, builder_->CreateExpression(
&expr.expr(), &expr.source_info()));
ASSERT_OK_AND_ASSIGN(CelValue result,
cel_expr->Evaluate(activation_, &arena_));
EXPECT_THAT(result, test::IsCelUint64(2));
}
TEST_F(ContainerAccessHeterogeneousLookupsTest, UintKeyAsInt) {
ASSERT_OK_AND_ASSIGN(ParsedExpr expr, parser::Parse("{1: 2}[1u]"));
ASSERT_OK_AND_ASSIGN(auto cel_expr, builder_->CreateExpression(
&expr.expr(), &expr.source_info()));
ASSERT_OK_AND_ASSIGN(CelValue result,
cel_expr->Evaluate(activation_, &arena_));
EXPECT_THAT(result, test::IsCelInt64(2));
}
TEST_F(ContainerAccessHeterogeneousLookupsTest, IntKeyAsUint) {
ASSERT_OK_AND_ASSIGN(ParsedExpr expr, parser::Parse("{1u: 2u}[1]"));
ASSERT_OK_AND_ASSIGN(auto cel_expr, builder_->CreateExpression(
&expr.expr(), &expr.source_info()));
ASSERT_OK_AND_ASSIGN(CelValue result,
cel_expr->Evaluate(activation_, &arena_));
EXPECT_THAT(result, test::IsCelUint64(2));
}
TEST_F(ContainerAccessHeterogeneousLookupsTest, UintListIndex) {
ASSERT_OK_AND_ASSIGN(ParsedExpr expr, parser::Parse("[1, 2, 3][2u]"));
ASSERT_OK_AND_ASSIGN(auto cel_expr, builder_->CreateExpression(
&expr.expr(), &expr.source_info()));
ASSERT_OK_AND_ASSIGN(CelValue result,
cel_expr->Evaluate(activation_, &arena_));
EXPECT_THAT(result, test::IsCelInt64(3));
}
TEST_F(ContainerAccessHeterogeneousLookupsTest, StringKeyUnaffected) {
ASSERT_OK_AND_ASSIGN(ParsedExpr expr, parser::Parse("{1: 2, '1': 3}['1']"));
ASSERT_OK_AND_ASSIGN(auto cel_expr, builder_->CreateExpression(
&expr.expr(), &expr.source_info()));
ASSERT_OK_AND_ASSIGN(CelValue result,
cel_expr->Evaluate(activation_, &arena_));
EXPECT_THAT(result, test::IsCelInt64(3));
}
class ContainerAccessHeterogeneousLookupsDisabledTest : public testing::Test {
public:
ContainerAccessHeterogeneousLookupsDisabledTest() {
options_.enable_heterogeneous_equality = false;
builder_ = CreateCelExpressionBuilder(options_);
}
protected:
InterpreterOptions options_;
std::unique_ptr<CelExpressionBuilder> builder_;
google::protobuf::Arena arena_;
Activation activation_;
};
TEST_F(ContainerAccessHeterogeneousLookupsDisabledTest, DoubleMapKeyInt) {
ASSERT_OK_AND_ASSIGN(ParsedExpr expr, parser::Parse("{1: 2}[1.0]"));
ASSERT_OK_AND_ASSIGN(auto cel_expr, builder_->CreateExpression(
&expr.expr(), &expr.source_info()));
ASSERT_OK_AND_ASSIGN(CelValue result,
cel_expr->Evaluate(activation_, &arena_));
EXPECT_THAT(result, test::IsCelError(_));
}
TEST_F(ContainerAccessHeterogeneousLookupsDisabledTest, DoubleMapKeyNotAnInt) {
ASSERT_OK_AND_ASSIGN(ParsedExpr expr, parser::Parse("{1: 2}[1.1]"));
ASSERT_OK_AND_ASSIGN(auto cel_expr, builder_->CreateExpression(
&expr.expr(), &expr.source_info()));
ASSERT_OK_AND_ASSIGN(CelValue result,
cel_expr->Evaluate(activation_, &arena_));
EXPECT_THAT(result, test::IsCelError(_));
}
TEST_F(ContainerAccessHeterogeneousLookupsDisabledTest, DoubleMapKeyUint) {
ASSERT_OK_AND_ASSIGN(ParsedExpr expr, parser::Parse("{1u: 2u}[1.0]"));
ASSERT_OK_AND_ASSIGN(auto cel_expr, builder_->CreateExpression(
&expr.expr(), &expr.source_info()));
ASSERT_OK_AND_ASSIGN(CelValue result,
cel_expr->Evaluate(activation_, &arena_));
EXPECT_THAT(result, test::IsCelError(_));
}
TEST_F(ContainerAccessHeterogeneousLookupsDisabledTest, DoubleListIndex) {
ASSERT_OK_AND_ASSIGN(ParsedExpr expr, parser::Parse("[1, 2, 3][1.0]"));
ASSERT_OK_AND_ASSIGN(auto cel_expr, builder_->CreateExpression(
&expr.expr(), &expr.source_info()));
ASSERT_OK_AND_ASSIGN(CelValue result,
cel_expr->Evaluate(activation_, &arena_));
EXPECT_THAT(result, test::IsCelError(_));
}
TEST_F(ContainerAccessHeterogeneousLookupsDisabledTest,
DoubleListIndexNotAnInt) {
ASSERT_OK_AND_ASSIGN(ParsedExpr expr, parser::Parse("[1, 2, 3][1.1]"));
ASSERT_OK_AND_ASSIGN(auto cel_expr, builder_->CreateExpression(
&expr.expr(), &expr.source_info()));
ASSERT_OK_AND_ASSIGN(CelValue result,
cel_expr->Evaluate(activation_, &arena_));
EXPECT_THAT(result, test::IsCelError(_));
}
TEST_F(ContainerAccessHeterogeneousLookupsDisabledTest, UintKeyAsUint) {
ASSERT_OK_AND_ASSIGN(ParsedExpr expr, parser::Parse("{1u: 2u, 1: 2}[1u]"));
ASSERT_OK_AND_ASSIGN(auto cel_expr, builder_->CreateExpression(
&expr.expr(), &expr.source_info()));
ASSERT_OK_AND_ASSIGN(CelValue result,
cel_expr->Evaluate(activation_, &arena_));
EXPECT_THAT(result, test::IsCelUint64(2));
}
TEST_F(ContainerAccessHeterogeneousLookupsDisabledTest, UintKeyAsInt) {
ASSERT_OK_AND_ASSIGN(ParsedExpr expr, parser::Parse("{1: 2}[1u]"));
ASSERT_OK_AND_ASSIGN(auto cel_expr, builder_->CreateExpression(
&expr.expr(), &expr.source_info()));
ASSERT_OK_AND_ASSIGN(CelValue result,
cel_expr->Evaluate(activation_, &arena_));
EXPECT_THAT(result, test::IsCelError(_));
}
TEST_F(ContainerAccessHeterogeneousLookupsDisabledTest, IntKeyAsUint) {
ASSERT_OK_AND_ASSIGN(ParsedExpr expr, parser::Parse("{1u: 2u}[1]"));
ASSERT_OK_AND_ASSIGN(auto cel_expr, builder_->CreateExpression(
&expr.expr(), &expr.source_info()));
ASSERT_OK_AND_ASSIGN(CelValue result,
cel_expr->Evaluate(activation_, &arena_));
EXPECT_THAT(result, test::IsCelError(_));
}
TEST_F(ContainerAccessHeterogeneousLookupsDisabledTest, UintListIndex) {
ASSERT_OK_AND_ASSIGN(ParsedExpr expr, parser::Parse("[1, 2, 3][2u]"));
ASSERT_OK_AND_ASSIGN(auto cel_expr, builder_->CreateExpression(
&expr.expr(), &expr.source_info()));
ASSERT_OK_AND_ASSIGN(CelValue result,
cel_expr->Evaluate(activation_, &arena_));
EXPECT_THAT(result, test::IsCelError(_));
}
TEST_F(ContainerAccessHeterogeneousLookupsDisabledTest, StringKeyUnaffected) {
ASSERT_OK_AND_ASSIGN(ParsedExpr expr, parser::Parse("{1: 2, '1': 3}['1']"));
ASSERT_OK_AND_ASSIGN(auto cel_expr, builder_->CreateExpression(
&expr.expr(), &expr.source_info()));
ASSERT_OK_AND_ASSIGN(CelValue result,
cel_expr->Evaluate(activation_, &arena_));
EXPECT_THAT(result, test::IsCelInt64(3));
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/eval/container_access_step.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/eval/container_access_step_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
ea72a9d2-860f-466d-ac49-7b322bc3a955 | cpp | google/quiche | hpack_block_decoder | quiche/http2/hpack/decoder/hpack_block_decoder.cc | quiche/http2/hpack/decoder/hpack_block_decoder_test.cc | #include "quiche/http2/hpack/decoder/hpack_block_decoder.h"
#include <cstdint>
#include <ostream>
#include <string>
#include "absl/strings/str_cat.h"
#include "quiche/common/platform/api/quiche_flag_utils.h"
#include "quiche/common/platform/api/quiche_logging.h"
namespace http2 {
DecodeStatus HpackBlockDecoder::Decode(DecodeBuffer* db) {
if (!before_entry_) {
QUICHE_DVLOG(2) << "HpackBlockDecoder::Decode resume entry, db->Remaining="
<< db->Remaining();
DecodeStatus status = entry_decoder_.Resume(db, listener_);
switch (status) {
case DecodeStatus::kDecodeDone:
before_entry_ = true;
break;
case DecodeStatus::kDecodeInProgress:
QUICHE_DCHECK_EQ(0u, db->Remaining());
return DecodeStatus::kDecodeInProgress;
case DecodeStatus::kDecodeError:
QUICHE_CODE_COUNT_N(decompress_failure_3, 1, 23);
return DecodeStatus::kDecodeError;
}
}
QUICHE_DCHECK(before_entry_);
while (db->HasData()) {
QUICHE_DVLOG(2) << "HpackBlockDecoder::Decode start entry, db->Remaining="
<< db->Remaining();
DecodeStatus status = entry_decoder_.Start(db, listener_);
switch (status) {
case DecodeStatus::kDecodeDone:
continue;
case DecodeStatus::kDecodeInProgress:
QUICHE_DCHECK_EQ(0u, db->Remaining());
before_entry_ = false;
return DecodeStatus::kDecodeInProgress;
case DecodeStatus::kDecodeError:
QUICHE_CODE_COUNT_N(decompress_failure_3, 2, 23);
return DecodeStatus::kDecodeError;
}
QUICHE_DCHECK(false);
}
QUICHE_DCHECK(before_entry_);
return DecodeStatus::kDecodeDone;
}
std::string HpackBlockDecoder::DebugString() const {
return absl::StrCat(
"HpackBlockDecoder(", entry_decoder_.DebugString(), ", listener@",
absl::Hex(reinterpret_cast<intptr_t>(listener_)),
(before_entry_ ? ", between entries)" : ", in an entry)"));
}
std::ostream& operator<<(std::ostream& out, const HpackBlockDecoder& v) {
return out << v.DebugString();
}
} | #include "quiche/http2/hpack/decoder/hpack_block_decoder.h"
#include <cstdint>
#include <sstream>
#include <string>
#include "absl/strings/string_view.h"
#include "quiche/http2/decoder/decode_buffer.h"
#include "quiche/http2/hpack/http2_hpack_constants.h"
#include "quiche/http2/test_tools/hpack_block_builder.h"
#include "quiche/http2/test_tools/hpack_block_collector.h"
#include "quiche/http2/test_tools/hpack_example.h"
#include "quiche/http2/test_tools/http2_random.h"
#include "quiche/http2/test_tools/random_decoder_test_base.h"
#include "quiche/common/platform/api/quiche_expect_bug.h"
#include "quiche/common/platform/api/quiche_test.h"
namespace http2 {
namespace test {
namespace {
class HpackBlockDecoderTest : public RandomDecoderTest {
protected:
HpackBlockDecoderTest() : listener_(&collector_), decoder_(&listener_) {
stop_decode_on_done_ = false;
decoder_.Reset();
std::ostringstream strm;
strm << decoder_;
}
DecodeStatus StartDecoding(DecodeBuffer* db) override {
collector_.Clear();
decoder_.Reset();
return ResumeDecoding(db);
}
DecodeStatus ResumeDecoding(DecodeBuffer* db) override {
DecodeStatus status = decoder_.Decode(db);
std::ostringstream strm;
strm << decoder_;
return status;
}
AssertionResult DecodeAndValidateSeveralWays(DecodeBuffer* db,
const Validator& validator) {
bool return_non_zero_on_first = false;
return RandomDecoderTest::DecodeAndValidateSeveralWays(
db, return_non_zero_on_first, validator);
}
AssertionResult DecodeAndValidateSeveralWays(const HpackBlockBuilder& hbb,
const Validator& validator) {
DecodeBuffer db(hbb.buffer());
return DecodeAndValidateSeveralWays(&db, validator);
}
AssertionResult DecodeHpackExampleAndValidateSeveralWays(
absl::string_view hpack_example, Validator validator) {
std::string input = HpackExampleToStringOrDie(hpack_example);
DecodeBuffer db(input);
return DecodeAndValidateSeveralWays(&db, validator);
}
uint8_t Rand8() { return Random().Rand8(); }
std::string Rand8String() { return Random().RandString(Rand8()); }
HpackBlockCollector collector_;
HpackEntryDecoderVLoggingListener listener_;
HpackBlockDecoder decoder_;
};
TEST_F(HpackBlockDecoderTest, SpecExample_C_2_1) {
auto do_check = [this]() {
return collector_.ValidateSoleLiteralNameValueHeader(
HpackEntryType::kIndexedLiteralHeader, false, "custom-key", false,
"custom-header");
};
const char hpack_example[] = R"(
40 | == Literal indexed ==
0a | Literal name (len = 10)
6375 7374 6f6d 2d6b 6579 | custom-key
0d | Literal value (len = 13)
6375 7374 6f6d 2d68 6561 6465 72 | custom-header
| -> custom-key:
| custom-header
)";
EXPECT_TRUE(DecodeHpackExampleAndValidateSeveralWays(
hpack_example, ValidateDoneAndEmpty(do_check)));
EXPECT_TRUE(do_check());
}
TEST_F(HpackBlockDecoderTest, SpecExample_C_2_2) {
auto do_check = [this]() {
return collector_.ValidateSoleLiteralValueHeader(
HpackEntryType::kUnindexedLiteralHeader, 4, false, "/sample/path");
};
const char hpack_example[] = R"(
04 | == Literal not indexed ==
| Indexed name (idx = 4)
| :path
0c | Literal value (len = 12)
2f73 616d 706c 652f 7061 7468 | /sample/path
| -> :path: /sample/path
)";
EXPECT_TRUE(DecodeHpackExampleAndValidateSeveralWays(
hpack_example, ValidateDoneAndEmpty(do_check)));
EXPECT_TRUE(do_check());
}
TEST_F(HpackBlockDecoderTest, SpecExample_C_2_3) {
auto do_check = [this]() {
return collector_.ValidateSoleLiteralNameValueHeader(
HpackEntryType::kNeverIndexedLiteralHeader, false, "password", false,
"secret");
};
const char hpack_example[] = R"(
10 | == Literal never indexed ==
08 | Literal name (len = 8)
7061 7373 776f 7264 | password
06 | Literal value (len = 6)
7365 6372 6574 | secret
| -> password: secret
)";
EXPECT_TRUE(DecodeHpackExampleAndValidateSeveralWays(
hpack_example, ValidateDoneAndEmpty(do_check)));
EXPECT_TRUE(do_check());
}
TEST_F(HpackBlockDecoderTest, SpecExample_C_2_4) {
auto do_check = [this]() { return collector_.ValidateSoleIndexedHeader(2); };
const char hpack_example[] = R"(
82 | == Indexed - Add ==
| idx = 2
| -> :method: GET
)";
EXPECT_TRUE(DecodeHpackExampleAndValidateSeveralWays(
hpack_example, ValidateDoneAndEmpty(do_check)));
EXPECT_TRUE(do_check());
}
TEST_F(HpackBlockDecoderTest, SpecExample_C_3_1) {
std::string example = R"(
82 | == Indexed - Add ==
| idx = 2
| -> :method: GET
86 | == Indexed - Add ==
| idx = 6
| -> :scheme: http
84 | == Indexed - Add ==
| idx = 4
| -> :path: /
41 | == Literal indexed ==
| Indexed name (idx = 1)
| :authority
0f | Literal value (len = 15)
7777 772e 6578 616d 706c 652e 636f 6d | www.example.com
| -> :authority:
| www.example.com
)";
HpackBlockCollector expected;
expected.ExpectIndexedHeader(2);
expected.ExpectIndexedHeader(6);
expected.ExpectIndexedHeader(4);
expected.ExpectNameIndexAndLiteralValue(HpackEntryType::kIndexedLiteralHeader,
1, false, "www.example.com");
EXPECT_TRUE(DecodeHpackExampleAndValidateSeveralWays(
example,
ValidateDoneAndEmpty([&] { return collector_.VerifyEq(expected); })));
EXPECT_TRUE(collector_.VerifyEq(expected));
}
TEST_F(HpackBlockDecoderTest, SpecExample_C_5_1) {
std::string example = R"(
48 | == Literal indexed ==
| Indexed name (idx = 8)
| :status
03 | Literal value (len = 3)
3330 32 | 302
| -> :status: 302
58 | == Literal indexed ==
| Indexed name (idx = 24)
| cache-control
07 | Literal value (len = 7)
7072 6976 6174 65 | private
| -> cache-control: private
61 | == Literal indexed ==
| Indexed name (idx = 33)
| date
1d | Literal value (len = 29)
4d6f 6e2c 2032 3120 4f63 7420 3230 3133 | Mon, 21 Oct 2013
2032 303a 3133 3a32 3120 474d 54 | 20:13:21 GMT
| -> date: Mon, 21 Oct 2013
| 20:13:21 GMT
6e | == Literal indexed ==
| Indexed name (idx = 46)
| location
17 | Literal value (len = 23)
6874 7470 733a 2f2f 7777 772e 6578 616d | https:
706c 652e 636f 6d | ple.com
| -> location:
| https:
)";
HpackBlockCollector expected;
expected.ExpectNameIndexAndLiteralValue(HpackEntryType::kIndexedLiteralHeader,
8, false, "302");
expected.ExpectNameIndexAndLiteralValue(HpackEntryType::kIndexedLiteralHeader,
24, false, "private");
expected.ExpectNameIndexAndLiteralValue(HpackEntryType::kIndexedLiteralHeader,
33, false,
"Mon, 21 Oct 2013 20:13:21 GMT");
expected.ExpectNameIndexAndLiteralValue(HpackEntryType::kIndexedLiteralHeader,
46, false, "https:
EXPECT_TRUE(DecodeHpackExampleAndValidateSeveralWays(
example,
ValidateDoneAndEmpty([&] { return collector_.VerifyEq(expected); })));
EXPECT_TRUE(collector_.VerifyEq(expected));
}
TEST_F(HpackBlockDecoderTest, Computed) {
HpackBlockCollector expected;
expected.ExpectIndexedHeader(0);
expected.ExpectIndexedHeader(1);
expected.ExpectIndexedHeader(126);
expected.ExpectIndexedHeader(127);
expected.ExpectIndexedHeader(128);
expected.ExpectDynamicTableSizeUpdate(0);
expected.ExpectDynamicTableSizeUpdate(1);
expected.ExpectDynamicTableSizeUpdate(14);
expected.ExpectDynamicTableSizeUpdate(15);
expected.ExpectDynamicTableSizeUpdate(30);
expected.ExpectDynamicTableSizeUpdate(31);
expected.ExpectDynamicTableSizeUpdate(4095);
expected.ExpectDynamicTableSizeUpdate(4096);
expected.ExpectDynamicTableSizeUpdate(8192);
for (auto type : {HpackEntryType::kIndexedLiteralHeader,
HpackEntryType::kUnindexedLiteralHeader,
HpackEntryType::kNeverIndexedLiteralHeader}) {
for (bool value_huffman : {false, true}) {
expected.ExpectNameIndexAndLiteralValue(type, Rand8() + 1, value_huffman,
Rand8String());
expected.ExpectLiteralNameAndValue(type, false, Rand8String(),
value_huffman, Rand8String());
expected.ExpectLiteralNameAndValue(type, true, Rand8String(),
value_huffman, Rand8String());
}
}
expected.ShuffleEntries(RandomPtr());
HpackBlockBuilder hbb;
expected.AppendToHpackBlockBuilder(&hbb);
EXPECT_TRUE(DecodeAndValidateSeveralWays(
hbb,
ValidateDoneAndEmpty([&] { return collector_.VerifyEq(expected); })));
EXPECT_TRUE(collector_.VerifyEq(expected));
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/hpack/decoder/hpack_block_decoder.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/hpack/decoder/hpack_block_decoder_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
1f1ddcd6-b16e-4419-83c4-75e3931b8b8d | cpp | tensorflow/tensorflow | save_op | tensorflow/core/kernels/save_op.cc | tensorflow/core/kernels/save_op_test.cc | #include "tensorflow/core/kernels/save_restore_tensor.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/gtl/array_slice.h"
#include "tensorflow/core/lib/strings/stringprintf.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/util/tensor_slice_writer.h"
namespace tensorflow {
class SaveOp : public OpKernel {
public:
explicit SaveOp(OpKernelConstruction* context) : OpKernel(context) {}
void Compute(OpKernelContext* context) override {
SaveTensors(context, &checkpoint::CreateTableTensorSliceBuilder, false);
}
};
REGISTER_KERNEL_BUILDER(Name("Save").Device(DEVICE_CPU), SaveOp);
class SaveSlicesOp : public OpKernel {
public:
explicit SaveSlicesOp(OpKernelConstruction* context) : OpKernel(context) {}
void Compute(OpKernelContext* context) override {
SaveTensors(context, &checkpoint::CreateTableTensorSliceBuilder, true);
}
};
REGISTER_KERNEL_BUILDER(Name("SaveSlices").Device(DEVICE_CPU), SaveSlicesOp);
class ShardedFilenameOp : public OpKernel {
public:
explicit ShardedFilenameOp(OpKernelConstruction* ctx) : OpKernel(ctx) {}
void Compute(OpKernelContext* ctx) override {
static const char* input_names[3] = {"basename", "shard", "num_shards"};
for (int i = 0; i < ctx->num_inputs(); ++i) {
OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(ctx->input(i).shape()),
errors::InvalidArgument(input_names[i],
" must be a scalar, got shape ",
ctx->input(i).shape().DebugString()));
}
Tensor* out = nullptr;
OP_REQUIRES_OK(ctx, ctx->allocate_output(0, TensorShape({}), &out));
out->scalar<tstring>()() = strings::Printf(
"%s-%05d-of-%05d", ctx->input(0).scalar<tstring>()().c_str(),
ctx->input(1).scalar<int32>()(), ctx->input(2).scalar<int32>()());
}
};
REGISTER_KERNEL_BUILDER(Name("ShardedFilename").Device(DEVICE_CPU),
ShardedFilenameOp);
class ShardedFilespecOp : public OpKernel {
public:
explicit ShardedFilespecOp(OpKernelConstruction* ctx) : OpKernel(ctx) {}
void Compute(OpKernelContext* ctx) override {
static const char* input_names[2] = {"basename", "num_shards"};
for (int i = 0; i < ctx->num_inputs(); ++i) {
OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(ctx->input(i).shape()),
errors::InvalidArgument(input_names[i],
" must be a scalar, got shape ",
ctx->input(i).shape().DebugString()));
}
Tensor* out = nullptr;
OP_REQUIRES_OK(ctx, ctx->allocate_output(0, TensorShape({}), &out));
out->scalar<tstring>()() = strings::Printf(
"%s-\?\?\?\?\?-of-%05d", ctx->input(0).scalar<tstring>()().c_str(),
ctx->input(1).scalar<int32>()());
}
};
REGISTER_KERNEL_BUILDER(Name("ShardedFilespec").Device(DEVICE_CPU),
ShardedFilespecOp);
} | #include <functional>
#include <memory>
#include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/io_ops.h"
#include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/public/session_options.h"
#include "tensorflow/core/util/tensor_slice_reader.h"
namespace tensorflow {
namespace {
class SaveOpTest : public OpsTestBase {
protected:
void MakeOp() {
TF_ASSERT_OK(
NodeDefBuilder("myop", "Save")
.Input(FakeInput())
.Input(FakeInput())
.Input(FakeInput({DT_BOOL, DT_INT32, DT_FLOAT, DT_DOUBLE, DT_QINT8,
DT_QINT32, DT_UINT8, DT_INT8, DT_INT16, DT_INT64,
DT_STRING, DT_COMPLEX64, DT_COMPLEX128, DT_HALF}))
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
}
};
TEST_F(SaveOpTest, Simple) {
const string filename = io::JoinPath(testing::TmpDir(), "tensor_simple");
const string tensornames[] = {
"tensor_bool", "tensor_int", "tensor_float", "tensor_double",
"tensor_qint8", "tensor_qint32", "tensor_uint8", "tensor_int8",
"tensor_int16", "tensor_int64", "tensor_string", "tensor_complex64",
"tensor_complex128", "tensor_half"};
MakeOp();
AddInput<tstring>(TensorShape({}),
[&filename](int x) -> tstring { return filename; });
AddInput<tstring>(TensorShape({14}), [&tensornames](int x) -> tstring {
return tensornames[x];
});
AddInput<bool>(TensorShape({2}), [](int x) -> bool { return x != 0; });
AddInput<int32>(TensorShape({10}), [](int x) -> int32 { return x + 1; });
AddInput<float>(TensorShape({2, 4}),
[](int x) -> float { return static_cast<float>(x) / 10; });
AddInput<double>(TensorShape({2, 4}),
[](int x) -> double { return static_cast<double>(x) / 20; });
AddInput<qint8>(TensorShape({3, 2}),
[](int x) -> qint8 { return *reinterpret_cast<qint8*>(&x); });
AddInput<qint32>(TensorShape({2, 3}), [](int x) -> qint32 {
return *reinterpret_cast<qint32*>(&x) * qint8(2);
});
AddInput<uint8>(TensorShape({11}), [](int x) -> uint8 { return x + 1; });
AddInput<int8>(TensorShape({7}), [](int x) -> int8 { return x - 7; });
AddInput<int16>(TensorShape({7}), [](int x) -> int16 { return x - 8; });
AddInput<int64_t>(TensorShape({9}), [](int x) -> int64 { return x - 9; });
AddInput<tstring>(TensorShape({2}),
[](int x) -> tstring { return x ? "yes" : "no"; });
AddInput<complex64>(TensorShape({2, 3}), [](int x) -> complex64 {
return complex64(100 + x, 200 + x);
});
AddInput<complex128>(TensorShape({2, 3}), [](int x) -> complex128 {
return complex128(100 + x, 200 + x);
});
AddInput<Eigen::half>(TensorShape({2, 4}), [](int x) -> Eigen::half {
return static_cast<Eigen::half>(x) / Eigen::half(2);
});
TF_ASSERT_OK(RunOpKernel());
checkpoint::TensorSliceReader reader(filename,
checkpoint::OpenTableTensorSliceReader);
TF_EXPECT_OK(reader.status());
{
TensorShape shape;
DataType type;
EXPECT_TRUE(reader.HasTensor("tensor_bool", &shape, &type));
TensorShape expected({2});
EXPECT_TRUE(shape.IsSameSize(expected));
EXPECT_EQ(DT_BOOL, type);
TensorSlice s = TensorSlice::ParseOrDie("-");
bool data[2];
std::fill_n(data, 2, false);
EXPECT_TRUE(reader.CopySliceData("tensor_bool", s, data));
for (int i = 0; i < 2; ++i) {
EXPECT_EQ((i != 0), data[i]);
}
}
{
TensorShape shape;
DataType type;
EXPECT_TRUE(reader.HasTensor("tensor_int", &shape, &type));
TensorShape expected({10});
EXPECT_TRUE(shape.IsSameSize(expected));
EXPECT_EQ(DT_INT32, type);
TensorSlice s = TensorSlice::ParseOrDie("-");
int data[10];
std::fill_n(data, 10, 0);
EXPECT_TRUE(reader.CopySliceData("tensor_int", s, data));
for (int i = 0; i < 10; ++i) {
EXPECT_EQ(i + 1, data[i]);
}
}
{
TensorShape shape;
DataType type;
EXPECT_TRUE(reader.HasTensor("tensor_float", &shape, &type));
TensorShape expected({2, 4});
EXPECT_TRUE(shape.IsSameSize(expected));
EXPECT_EQ(DT_FLOAT, type);
TensorSlice s = TensorSlice::ParseOrDie("-:-");
float data[8];
std::fill_n(data, 8, 0);
EXPECT_TRUE(reader.CopySliceData("tensor_float", s, data));
for (int i = 0; i < 8; ++i) {
EXPECT_EQ(static_cast<float>(i) / 10, data[i]);
}
}
{
TensorShape shape;
DataType type;
EXPECT_TRUE(reader.HasTensor("tensor_double", &shape, &type));
TensorShape expected({2, 4});
EXPECT_TRUE(shape.IsSameSize(expected));
EXPECT_EQ(DT_DOUBLE, type);
TensorSlice s = TensorSlice::ParseOrDie("-:-");
double data[8];
std::fill_n(data, 8, 0);
EXPECT_TRUE(reader.CopySliceData("tensor_double", s, data));
for (int i = 0; i < 8; ++i) {
EXPECT_EQ(static_cast<double>(i) / 20, data[i]);
}
}
{
TensorShape shape;
DataType type;
EXPECT_TRUE(reader.HasTensor("tensor_qint8", &shape, &type));
TensorShape expected({3, 2});
EXPECT_TRUE(shape.IsSameSize(expected));
EXPECT_EQ(DT_QINT8, type);
TensorSlice s = TensorSlice::ParseOrDie("-:-");
qint8 data[6];
EXPECT_TRUE(reader.CopySliceData("tensor_qint8", s, data));
for (int i = 0; i < 6; ++i) {
EXPECT_EQ(*reinterpret_cast<qint8*>(&i), data[i]);
}
}
{
TensorShape shape;
DataType type;
EXPECT_TRUE(reader.HasTensor("tensor_qint32", &shape, &type));
TensorShape expected({2, 3});
EXPECT_TRUE(shape.IsSameSize(expected));
EXPECT_EQ(DT_QINT32, type);
TensorSlice s = TensorSlice::ParseOrDie("-:-");
qint32 data[6];
EXPECT_TRUE(reader.CopySliceData("tensor_qint32", s, data));
for (int i = 0; i < 6; ++i) {
EXPECT_EQ(*reinterpret_cast<qint32*>(&i) * qint8(2), data[i]);
}
}
{
TensorShape shape;
DataType type;
EXPECT_TRUE(reader.HasTensor("tensor_uint8", &shape, &type));
TensorShape expected({11});
EXPECT_TRUE(shape.IsSameSize(expected));
EXPECT_EQ(DT_UINT8, type);
TensorSlice s = TensorSlice::ParseOrDie("-");
uint8 data[11];
EXPECT_TRUE(reader.CopySliceData("tensor_uint8", s, data));
for (int i = 0; i < 11; ++i) {
EXPECT_EQ(i + 1, data[i]);
}
}
{
TensorShape shape;
DataType type;
EXPECT_TRUE(reader.HasTensor("tensor_int8", &shape, &type));
TensorShape expected({7});
EXPECT_TRUE(shape.IsSameSize(expected));
EXPECT_EQ(DT_INT8, type);
TensorSlice s = TensorSlice::ParseOrDie("-");
int8 data[7];
EXPECT_TRUE(reader.CopySliceData("tensor_int8", s, data));
for (int i = 0; i < 7; ++i) {
EXPECT_EQ(i - 7, data[i]);
}
}
{
TensorShape shape;
DataType type;
EXPECT_TRUE(reader.HasTensor("tensor_int16", &shape, &type));
TensorShape expected({7});
EXPECT_TRUE(shape.IsSameSize(expected));
EXPECT_EQ(DT_INT16, type);
TensorSlice s = TensorSlice::ParseOrDie("-");
int16 data[7];
EXPECT_TRUE(reader.CopySliceData("tensor_int16", s, data));
for (int i = 0; i < 7; ++i) {
EXPECT_EQ(i - 8, data[i]);
}
}
{
TensorShape shape;
DataType type;
EXPECT_TRUE(reader.HasTensor("tensor_int64", &shape, &type));
TensorShape expected({9});
EXPECT_TRUE(shape.IsSameSize(expected));
EXPECT_EQ(DT_INT64, type);
TensorSlice s = TensorSlice::ParseOrDie("-");
int64_t data[9];
EXPECT_TRUE(reader.CopySliceData("tensor_int64", s, data));
for (int i = 0; i < 9; ++i) {
EXPECT_EQ(i - 9, data[i]);
}
}
{
TensorShape shape;
DataType type;
EXPECT_TRUE(reader.HasTensor("tensor_string", &shape, &type));
TensorShape expected({2});
EXPECT_TRUE(shape.IsSameSize(expected));
EXPECT_EQ(DT_STRING, type);
TensorSlice s = TensorSlice::ParseOrDie("-");
tstring data[2];
EXPECT_TRUE(reader.CopySliceData("tensor_string", s, data));
EXPECT_EQ("no", data[0]);
EXPECT_EQ("yes", data[1]);
}
{
TensorShape shape;
DataType type;
EXPECT_TRUE(reader.HasTensor("tensor_complex64", &shape, &type));
TensorShape expected({2, 3});
EXPECT_TRUE(shape.IsSameSize(expected));
EXPECT_EQ(DT_COMPLEX64, type);
TensorSlice s = TensorSlice::ParseOrDie("-:-");
complex64 data[6];
EXPECT_TRUE(reader.CopySliceData("tensor_complex64", s, data));
for (int i = 0; i < 6; ++i) {
EXPECT_EQ(100 + i, data[i].real());
EXPECT_EQ(200 + i, data[i].imag());
}
}
{
TensorShape shape;
DataType type;
EXPECT_TRUE(reader.HasTensor("tensor_complex128", &shape, &type));
TensorShape expected({2, 3});
EXPECT_TRUE(shape.IsSameSize(expected));
EXPECT_EQ(DT_COMPLEX128, type);
TensorSlice s = TensorSlice::ParseOrDie("-:-");
complex128 data[6];
EXPECT_TRUE(reader.CopySliceData("tensor_complex128", s, data));
for (int i = 0; i < 6; ++i) {
EXPECT_EQ(100 + i, data[i].real());
EXPECT_EQ(200 + i, data[i].imag());
}
}
{
TensorShape shape;
DataType type;
EXPECT_TRUE(reader.HasTensor("tensor_half", &shape, &type));
TensorShape expected({2, 4});
EXPECT_TRUE(shape.IsSameSize(expected));
EXPECT_EQ(DT_HALF, type);
TensorSlice s = TensorSlice::ParseOrDie("-:-");
Eigen::half data[8];
std::fill_n(data, 8, Eigen::half(0));
EXPECT_TRUE(reader.CopySliceData("tensor_half", s, data));
for (int i = 0; i < 8; ++i) {
EXPECT_EQ(static_cast<Eigen::half>(i) / Eigen::half(2), data[i]);
}
}
}
class SaveSlicesOpTest : public OpsTestBase {
protected:
void MakeOp() {
TF_ASSERT_OK(NodeDefBuilder("myop", "SaveSlices")
.Input(FakeInput())
.Input(FakeInput())
.Input(FakeInput())
.Input(FakeInput(
{DT_INT32, DT_FLOAT, DT_DOUBLE, DT_QINT8, DT_QINT32}))
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
}
};
TEST_F(SaveSlicesOpTest, Slices) {
const string filename = io::JoinPath(testing::TmpDir(), "tensor_slices");
const string tensornames[] = {"tensor_int", "tensor_float", "tensor_double",
"tensor_qint8", "tensor_qint32"};
const string tensorshapes[] = {
"10 -",
"2 4 -:0,2",
"2 4 0,1:2,2",
"3 2 -:-",
"2 3 1,1:2,1"
};
MakeOp();
AddInput<tstring>(TensorShape({}),
[&filename](int x) -> tstring { return filename; });
AddInput<tstring>(TensorShape({5}), [&tensornames](int x) -> tstring {
return tensornames[x];
});
AddInput<tstring>(TensorShape({5}), [&tensorshapes](int x) -> tstring {
return tensorshapes[x];
});
AddInput<int32>(TensorShape({10}), [](int x) -> int32 { return x + 1; });
AddInput<float>(TensorShape({2, 2}),
[](int x) -> float { return static_cast<float>(x) / 10; });
AddInput<double>(TensorShape({1, 2}),
[](int x) -> double { return static_cast<double>(x) / 20; });
AddInput<qint8>(TensorShape({3, 2}),
[](int x) -> qint8 { return *reinterpret_cast<qint8*>(&x); });
AddInput<qint32>(TensorShape({1, 1}), [](int x) -> qint32 {
return *reinterpret_cast<qint32*>(&x) * qint8(2);
});
TF_ASSERT_OK(RunOpKernel());
checkpoint::TensorSliceReader reader(filename,
checkpoint::OpenTableTensorSliceReader);
TF_EXPECT_OK(reader.status());
{
TensorShape shape;
DataType type;
EXPECT_TRUE(reader.HasTensor("tensor_int", &shape, &type));
TensorShape expected({10});
EXPECT_TRUE(shape.IsSameSize(expected));
EXPECT_EQ(DT_INT32, type);
TensorSlice s = TensorSlice::ParseOrDie("-");
int data[10];
EXPECT_TRUE(reader.CopySliceData("tensor_int", s, data));
}
{
TensorShape shape;
DataType type;
EXPECT_TRUE(reader.HasTensor("tensor_float", &shape, &type));
TensorShape expected({2, 4});
EXPECT_TRUE(shape.IsSameSize(expected));
EXPECT_EQ(DT_FLOAT, type);
TensorSlice full_slice = TensorSlice::ParseOrDie("-:-");
TensorSlice saved_slice = TensorSlice::ParseOrDie("-:0,2");
float data[8];
EXPECT_FALSE(reader.CopySliceData("tensor_float", full_slice, data));
EXPECT_TRUE(reader.CopySliceData("tensor_float", saved_slice, data));
}
{
TensorShape shape;
DataType type;
EXPECT_TRUE(reader.HasTensor("tensor_double", &shape, &type));
TensorShape expected({2, 4});
EXPECT_TRUE(shape.IsSameSize(expected));
EXPECT_EQ(DT_DOUBLE, type);
TensorSlice full_slice = TensorSlice::ParseOrDie("-:-");
TensorSlice saved_slice = TensorSlice::ParseOrDie("0,1:2,2");
double data[8];
EXPECT_FALSE(reader.CopySliceData("tensor_double", full_slice, data));
EXPECT_TRUE(reader.CopySliceData("tensor_double", saved_slice, data));
}
{
TensorShape shape;
DataType type;
EXPECT_TRUE(reader.HasTensor("tensor_qint8", &shape, &type));
TensorShape expected({3, 2});
EXPECT_TRUE(shape.IsSameSize(expected));
EXPECT_EQ(DT_QINT8, type);
TensorSlice s = TensorSlice::ParseOrDie("-:-");
qint8 data[6];
EXPECT_TRUE(reader.CopySliceData("tensor_qint8", s, data));
}
{
TensorShape shape;
DataType type;
EXPECT_TRUE(reader.HasTensor("tensor_qint32", &shape, &type));
TensorShape expected({2, 3});
EXPECT_TRUE(shape.IsSameSize(expected));
EXPECT_EQ(DT_QINT32, type);
TensorSlice s = TensorSlice::ParseOrDie("1,1:2,1");
TensorSlice full_slice = TensorSlice::ParseOrDie("-:-");
TensorSlice saved_slice = TensorSlice::ParseOrDie("1,1:2,1");
qint32 data[6];
EXPECT_FALSE(reader.CopySliceData("tensor_qint32", full_slice, data));
EXPECT_TRUE(reader.CopySliceData("tensor_qint32", saved_slice, data));
}
}
class SaveOpSlices2Test : public OpsTestBase {
protected:
void MakeOp() {
TF_ASSERT_OK(NodeDefBuilder("myop", "SaveSlices")
.Input(FakeInput())
.Input(FakeInput())
.Input(FakeInput())
.Input(FakeInput({DT_INT32, DT_INT32, DT_FLOAT}))
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
}
};
TEST_F(SaveOpSlices2Test, TwoSlices) {
const string filename = io::JoinPath(testing::TmpDir(), "three_slices");
const string tensornames[] = {"four_by_sixteen", "four_by_sixteen", "small"};
const string tensorshapes[] = {
"4 16 0,2:-",
"4 16 2,2:-",
""
};
MakeOp();
AddInput<tstring>(TensorShape({}),
[&filename](int x) -> tstring { return filename; });
AddInput<tstring>(TensorShape({3}), [&tensornames](int x) -> tstring {
return tensornames[x];
});
AddInput<tstring>(TensorShape({3}), [&tensorshapes](int x) -> tstring {
return tensorshapes[x];
});
AddInput<int32>(TensorShape({2, 16}), [](int x) -> int32 { return x + 1; });
AddInput<int32>(TensorShape({2, 16}),
[](int x) -> int32 { return 10 * (x + 1); });
AddInput<float>(TensorShape({2, 4}),
[](int x) -> float { return static_cast<float>(x) / 10; });
TF_ASSERT_OK(RunOpKernel());
checkpoint::TensorSliceReader reader(filename,
checkpoint::OpenTableTensorSliceReader);
TF_EXPECT_OK(reader.status());
{
Tensor reloaded(DT_INT32, {4, 16});
TensorShape shape;
DataType type;
EXPECT_TRUE(reader.HasTensor("four_by_sixteen", &shape, &type));
EXPECT_TRUE(shape.IsSameSize(reloaded.shape()));
EXPECT_EQ(type, reloaded.dtype());
EXPECT_TRUE(reader.CopySliceData("four_by_sixteen",
TensorSlice(reloaded.dims()),
reloaded.flat<int>().data()));
{
auto slice = reloaded.Slice(0, 2).flat<int>();
for (int i = 0; i < slice.size(); ++i) {
EXPECT_EQ(i + 1, slice(i));
}
}
{
auto slice = reloaded.Slice(2, 4).flat<int>();
for (int i = 0; i < slice.size(); ++i) {
EXPECT_EQ(10 * (i + 1), slice(i));
}
}
}
{
Tensor reloaded(DT_FLOAT, {2, 4});
TensorShape shape;
DataType type;
EXPECT_TRUE(reader.HasTensor("small", &shape, &type));
EXPECT_TRUE(shape.IsSameSize(reloaded.shape()));
EXPECT_EQ(DT_FLOAT, reloaded.dtype());
EXPECT_TRUE(reader.CopySliceData("small", TensorSlice(reloaded.dims()),
reloaded.flat<float>().data()));
for (int64_t i = 0; i < reloaded.NumElements(); ++i) {
EXPECT_EQ(static_cast<float>(i) / 10, reloaded.flat<float>().data()[i]);
}
}
}
void BM_LargeTensorWrite(::testing::benchmark::State& state) {
const int num_elements = state.range(0);
Tensor tensor(DT_FLOAT, TensorShape({num_elements}));
tensor.flat<float>().setZero();
const tstring temp_filename =
io::JoinPath(testing::TmpDir(), "benchmark_checkpoint");
auto root = Scope::NewRootScope().ExitOnError();
const tstring tensor_name = "my_tensor";
ops::Save give_me_a_name(root, temp_filename, {tensor_name}, {{tensor}});
SessionOptions session_options;
session_options.config.mutable_graph_options()
->mutable_optimizer_options()
->set_opt_level(tensorflow::OptimizerOptions::L0);
TF_CHECK_OK(root.status());
Graph* g = new Graph(OpRegistry::Global());
TF_CHECK_OK(root.ToGraph(g));
VLOG(1) << "Save op's output path: " << temp_filename;
VLOG(1) << "# nodes in Graph: " << g->num_nodes();
test::Benchmark("cpu", g, &session_options, nullptr, nullptr, "",
false)
.Run(state);
}
BENCHMARK(BM_LargeTensorWrite)->Arg((1 << 30) / 4 );
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/save_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/save_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
dc72981d-1b15-4aa9-9f6b-e243a84e6ac8 | cpp | google/tensorstore | nditerable_array | tensorstore/internal/nditerable_array.cc | tensorstore/internal/nditerable_array_test.cc | #include "tensorstore/internal/nditerable_array.h"
#include <stddef.h>
#include <cassert>
#include <vector>
#include "absl/status/status.h"
#include "tensorstore/array.h"
#include "tensorstore/data_type.h"
#include "tensorstore/index.h"
#include "tensorstore/internal/arena.h"
#include "tensorstore/internal/elementwise_function.h"
#include "tensorstore/internal/integer_overflow.h"
#include "tensorstore/internal/nditerable.h"
#include "tensorstore/internal/nditerable_array_util.h"
#include "tensorstore/internal/nditerable_util.h"
#include "tensorstore/internal/unique_with_intrusive_allocator.h"
#include "tensorstore/strided_layout.h"
#include "tensorstore/util/byte_strided_pointer.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
namespace internal {
namespace {
Index ComputeIteratorBaseOffsetAndByteStrides(
NDIterable::IterationLayoutView layout,
tensorstore::span<const Index> orig_byte_strides, Index* byte_strides) {
assert(layout.full_rank() == orig_byte_strides.size());
Index base_offset = 0;
for (DimensionIndex dim = 0; dim < layout.full_rank(); ++dim) {
const int dir = layout.directions[dim];
if (dir == -1) {
base_offset = wrap_on_overflow::Add(
base_offset, wrap_on_overflow::Multiply(layout.shape[dim] - 1,
orig_byte_strides[dim]));
}
}
for (DimensionIndex i = 0; i < layout.iteration_rank(); ++i) {
const DimensionIndex dim = layout.iteration_dimensions[i];
if (dim == -1) {
byte_strides[i] = 0;
} else {
byte_strides[i] = orig_byte_strides[dim] * layout.directions[dim];
}
}
return base_offset;
}
template <DimensionIndex Rank>
class StridedIteratorImpl;
template <DimensionIndex Rank = -1>
class StridedIteratorImplBase
: public NDIterator::Base<StridedIteratorImpl<Rank>> {
public:
explicit StridedIteratorImplBase(DimensionIndex rank,
ArenaAllocator<> allocator)
: allocator_(allocator) {}
ArenaAllocator<> get_allocator() const override { return allocator_; }
protected:
ArenaAllocator<> allocator_;
std::array<Index, Rank> byte_strides_;
};
template <>
class StridedIteratorImplBase<-1>
: public NDIterator::Base<StridedIteratorImpl<-1>> {
public:
explicit StridedIteratorImplBase(DimensionIndex rank,
ArenaAllocator<> allocator)
: byte_strides_(rank, allocator) {}
ArenaAllocator<> get_allocator() const override {
return byte_strides_.get_allocator();
}
protected:
std::vector<Index, ArenaAllocator<Index>> byte_strides_;
};
template <DimensionIndex Rank = -1>
class StridedIteratorImpl : public StridedIteratorImplBase<Rank> {
using Base = StridedIteratorImplBase<Rank>;
using Base::byte_strides_;
public:
StridedIteratorImpl(ByteStridedPointer<void> data,
tensorstore::span<const Index> orig_byte_strides,
NDIterable::IterationLayoutView layout,
ArenaAllocator<> allocator)
: Base(layout.iteration_rank(), allocator) {
data_ = data + ComputeIteratorBaseOffsetAndByteStrides(
layout, orig_byte_strides, byte_strides_.data());
}
bool GetBlock(tensorstore::span<const Index> indices,
IterationBufferShape block_shape,
IterationBufferPointer* pointer,
absl::Status* status) override {
Index offset;
if constexpr (Rank == -1) {
offset = IndexInnerProduct(indices.size(), byte_strides_.data(),
indices.data());
} else {
offset = IndexInnerProduct<Rank>(byte_strides_.data(), indices.data());
}
*pointer = IterationBufferPointer{data_ + offset,
byte_strides_[byte_strides_.size() - 2],
byte_strides_[byte_strides_.size() - 1]};
return true;
}
private:
ByteStridedPointer<void> data_;
};
class IndexedIteratorImpl : public NDIterator::Base<IndexedIteratorImpl> {
public:
IndexedIteratorImpl(ByteStridedPointer<void> data,
tensorstore::span<const Index> orig_byte_strides,
NDIterable::IterationBufferLayoutView layout,
ArenaAllocator<> allocator)
: block_inner_size_(layout.block_shape[1]),
buffer_(layout.iteration_rank() +
layout.block_shape[0] * layout.block_shape[1],
allocator) {
data_ = data + ComputeIteratorBaseOffsetAndByteStrides(
layout, orig_byte_strides, buffer_.data());
FillOffsetsArrayFromStride(buffer_[layout.iteration_rank() - 2],
buffer_[layout.iteration_rank() - 1],
layout.block_shape[0], layout.block_shape[1],
buffer_.data() + layout.iteration_rank());
}
ArenaAllocator<> get_allocator() const override {
return buffer_.get_allocator();
}
bool GetBlock(tensorstore::span<const Index> indices,
IterationBufferShape block_shape,
IterationBufferPointer* pointer,
absl::Status* status) override {
*pointer = IterationBufferPointer{
data_ +
IndexInnerProduct(indices.size(), buffer_.data(), indices.data()),
block_inner_size_, buffer_.data() + indices.size()};
return true;
}
private:
ByteStridedPointer<void> data_;
Index block_inner_size_;
std::vector<Index, ArenaAllocator<Index>> buffer_;
};
class ArrayIterableImpl : public NDIterable::Base<ArrayIterableImpl> {
public:
ArrayIterableImpl(SharedOffsetArrayView<const void> array,
ArenaAllocator<> allocator)
: dtype_(array.dtype()),
byte_strides_(array.byte_strides().begin(), array.byte_strides().end(),
allocator) {
void* origin_pointer =
const_cast<void*>(array.byte_strided_origin_pointer().get());
data_ = std::shared_ptr<void>(std::move(array.pointer()), origin_pointer);
}
ArenaAllocator<> get_allocator() const override {
return byte_strides_.get_allocator();
}
int GetDimensionOrder(DimensionIndex dim_i,
DimensionIndex dim_j) const override {
return GetDimensionOrderFromByteStrides(byte_strides_[dim_i],
byte_strides_[dim_j]);
}
void UpdateDirectionPrefs(NDIterable::DirectionPref* prefs) const override {
UpdateDirectionPrefsFromByteStrides(byte_strides_, prefs);
}
bool CanCombineDimensions(DimensionIndex dim_i, int dir_i,
DimensionIndex dim_j, int dir_j,
Index size_j) const override {
return CanCombineStridedArrayDimensions(
byte_strides_[dim_i], dir_i, byte_strides_[dim_j], dir_j, size_j);
}
DataType dtype() const override { return dtype_; }
IterationBufferConstraint GetIterationBufferConstraint(
IterationLayoutView layout) const override {
const DimensionIndex last_dim = layout.iteration_dimensions.back();
return {(last_dim == -1 ||
(byte_strides_[last_dim] * layout.directions[last_dim] ==
dtype_->size))
? IterationBufferKind::kContiguous
: IterationBufferKind::kStrided,
false};
}
std::ptrdiff_t GetWorkingMemoryBytesPerElement(
IterationLayoutView layout,
IterationBufferKind buffer_kind) const override {
return buffer_kind == IterationBufferKind::kIndexed ? sizeof(Index) : 0;
}
NDIterator::Ptr GetIterator(
IterationBufferKindLayoutView layout) const override {
if (layout.buffer_kind == IterationBufferKind::kIndexed) {
return MakeUniqueWithVirtualIntrusiveAllocator<IndexedIteratorImpl>(
get_allocator(), data_.get(), byte_strides_, layout);
}
const auto make_strided_iterator = [&](auto rank) {
return MakeUniqueWithVirtualIntrusiveAllocator<
StridedIteratorImpl<decltype(rank)::value>>(
get_allocator(), data_.get(), byte_strides_, layout);
};
switch (layout.iteration_rank()) {
#ifndef TENSORSTORE_NDITERABLE_DISABLE_ARRAY_OPTIMIZE
case 2:
return make_strided_iterator(
std::integral_constant<DimensionIndex, 2>{});
case 3:
return make_strided_iterator(
std::integral_constant<DimensionIndex, 3>{});
#endif
default:
assert(layout.iteration_rank() > 1);
return make_strided_iterator(
std::integral_constant<DimensionIndex, -1>{});
}
}
private:
std::shared_ptr<void> data_;
DataType dtype_;
std::vector<Index, ArenaAllocator<Index>> byte_strides_;
};
}
NDIterable::Ptr GetArrayNDIterable(SharedOffsetArrayView<const void> array,
Arena* arena) {
return MakeUniqueWithVirtualIntrusiveAllocator<ArrayIterableImpl>(
ArenaAllocator<>(arena), std::move(array));
}
}
} | #include "tensorstore/internal/nditerable_array.h"
#include <cstdint>
#include <memory>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/array.h"
#include "tensorstore/contiguous_layout.h"
#include "tensorstore/index.h"
#include "tensorstore/internal/arena.h"
#include "tensorstore/internal/elementwise_function.h"
#include "tensorstore/internal/nditerable.h"
#include "tensorstore/internal/nditerable_buffer_management.h"
#include "tensorstore/internal/nditerable_util.h"
#include "tensorstore/strided_layout.h"
#include "tensorstore/util/byte_strided_pointer.h"
#include "tensorstore/util/iterate.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::Array;
using ::tensorstore::DimensionIndex;
using ::tensorstore::Index;
using ::tensorstore::StridedLayout;
using ::tensorstore::internal::Arena;
using ::tensorstore::internal::GetArrayNDIterable;
using ::tensorstore::internal::IterationBufferKind;
using ::tensorstore::internal::IterationBufferPointer;
using ::tensorstore::internal::MultiNDIterator;
using ::tensorstore::internal::NDIterable;
using DirectionPref = NDIterable::DirectionPref;
TEST(NDIterableArrayTest, Direct) {
uint8_t data[1000];
Array<uint8_t> array(data + 500,
StridedLayout<>({6, 3, 4, 5}, {-1, -6, 0, 3}));
Arena arena;
auto iterable = GetArrayNDIterable(UnownedToShared(array), &arena);
{
std::vector<DirectionPref> direction_prefs(4, DirectionPref::kCanSkip);
iterable->UpdateDirectionPrefs(direction_prefs.data());
EXPECT_THAT(direction_prefs,
::testing::ElementsAre(
DirectionPref::kBackward, DirectionPref::kBackward,
DirectionPref::kCanSkip, DirectionPref::kForward));
}
EXPECT_GT(iterable->GetDimensionOrder(0, 1), 0);
EXPECT_LT(iterable->GetDimensionOrder(0, 2), 0);
EXPECT_GT(iterable->GetDimensionOrder(0, 3), 0);
EXPECT_LT(iterable->GetDimensionOrder(1, 0), 0);
EXPECT_LT(iterable->GetDimensionOrder(1, 2), 0);
EXPECT_LT(iterable->GetDimensionOrder(1, 3), 0);
EXPECT_GT(iterable->GetDimensionOrder(2, 0), 0);
EXPECT_GT(iterable->GetDimensionOrder(2, 1), 0);
EXPECT_LT(iterable->GetDimensionOrder(1, 3), 0);
EXPECT_LT(iterable->GetDimensionOrder(3, 0), 0);
EXPECT_GT(iterable->GetDimensionOrder(3, 1), 0);
EXPECT_LT(iterable->GetDimensionOrder(3, 2), 0);
EXPECT_TRUE(iterable->CanCombineDimensions(1, 1,
0, 1,
6));
EXPECT_TRUE(iterable->CanCombineDimensions(1, -1,
0, -1,
6));
EXPECT_FALSE(iterable->CanCombineDimensions(1, 1,
0, -1,
6));
EXPECT_FALSE(iterable->CanCombineDimensions(1, 1,
0, 1,
5));
EXPECT_TRUE(iterable->CanCombineDimensions(3, 1,
0, -1,
3));
EXPECT_TRUE(iterable->CanCombineDimensions(3, -1,
0, 1,
3));
EXPECT_TRUE(iterable->CanCombineDimensions(1, -1,
3, 1,
2));
{
auto c = iterable->GetIterationBufferConstraint(
{tensorstore::span<const Index>({6, 3, 4, 5}),
tensorstore::span<const int>({1, 1, 1, 1}),
tensorstore::span<const DimensionIndex>({0, 1, 2, 3}),
tensorstore::span<const Index>({6, 3, 4, 5})});
EXPECT_EQ(IterationBufferKind::kStrided, c.min_buffer_kind);
EXPECT_FALSE(c.external);
}
{
auto c = iterable->GetIterationBufferConstraint(
{tensorstore::span<const Index>({6, 3, 4, 5}),
tensorstore::span<const int>({1, 1, 1, 1}),
tensorstore::span<const DimensionIndex>({1, 3, 0}),
tensorstore::span<const Index>({3, 5, 6})});
EXPECT_EQ(IterationBufferKind::kStrided, c.min_buffer_kind);
EXPECT_FALSE(c.external);
}
{
auto c = iterable->GetIterationBufferConstraint(
{tensorstore::span<const Index>({6, 3, 4, 5}),
tensorstore::span<const int>({-1, -1, 0, 1}),
tensorstore::span<const DimensionIndex>({1, 3, 0}),
tensorstore::span<const Index>({3, 5, 6})});
EXPECT_EQ(IterationBufferKind::kContiguous, c.min_buffer_kind);
EXPECT_FALSE(c.external);
}
EXPECT_EQ(
0, iterable->GetWorkingMemoryBytesPerElement(
{tensorstore::span<const Index>({6, 3, 4, 5}),
tensorstore::span<const int>({-1, -1, 0, 1}),
tensorstore::span<const DimensionIndex>({1, 3, 0}),
tensorstore::span<const Index>({3, 5, 6})},
IterationBufferKind::kContiguous));
EXPECT_EQ(
0, iterable->GetWorkingMemoryBytesPerElement(
{tensorstore::span<const Index>({6, 3, 4, 5}),
tensorstore::span<const int>({-1, -1, 0, 1}),
tensorstore::span<const DimensionIndex>({1, 3, 0}),
tensorstore::span<const Index>({3, 5, 6})},
IterationBufferKind::kStrided));
EXPECT_EQ(
sizeof(Index),
iterable->GetWorkingMemoryBytesPerElement(
{tensorstore::span<const Index>({6, 3, 4, 5}),
tensorstore::span<const int>({-1, -1, 0, 1}),
tensorstore::span<const DimensionIndex>({1, 3, 0}),
tensorstore::span<const Index>({3, 5, 6})},
IterationBufferKind::kIndexed));
{
auto iterator = iterable->GetIterator(
{{{tensorstore::span<const Index>({6, 3, 4, 5}),
tensorstore::span<const int>({-1, -1, 0, 1}),
tensorstore::span<const DimensionIndex>({1, 3, 0}),
tensorstore::span<const Index>({3, 5, 6})},
{1, 3}},
IterationBufferKind::kContiguous});
IterationBufferPointer pointer;
absl::Status status;
EXPECT_TRUE(iterator->GetBlock(tensorstore::span<const Index>({2, 3, 1}),
{1, 3}, &pointer, &status));
EXPECT_EQ(&array((6 - 1) - 1, (3 - 1) - 2, 0, 3), pointer.pointer.get());
EXPECT_EQ(1, pointer.inner_byte_stride);
EXPECT_EQ(absl::OkStatus(), status);
}
{
auto iterator = iterable->GetIterator(
{{{tensorstore::span<const Index>({6, 3, 4, 5}),
tensorstore::span<const int>({-1, -1, 0, 1}),
tensorstore::span<const DimensionIndex>({1, 3, 0}),
tensorstore::span<const Index>({3, 5, 6})},
{1, 3}},
IterationBufferKind::kIndexed});
IterationBufferPointer pointer;
absl::Status status;
EXPECT_TRUE(iterator->GetBlock(tensorstore::span<const Index>({2, 3, 1}),
{1, 3}, &pointer, &status));
EXPECT_EQ(&array((6 - 1) - 1, (3 - 1) - 2, 0, 3), pointer.pointer.get());
EXPECT_THAT(tensorstore::span<const Index>(pointer.byte_offsets, 3),
::testing::ElementsAre(0, 1, 2));
EXPECT_EQ(absl::OkStatus(), status);
}
}
TEST(NDIterableArrayTest, RankZero) {
auto array = tensorstore::MakeScalarArray<int>(5);
Arena arena;
auto iterable = GetArrayNDIterable(array, &arena);
MultiNDIterator<1, true> multi_iterator(
tensorstore::span<const Index>{}, {}, {{iterable.get()}}, &arena);
EXPECT_THAT(multi_iterator.iteration_dimensions,
::testing::ElementsAre(-1, -1));
EXPECT_THAT(multi_iterator.directions, ::testing::ElementsAre());
EXPECT_THAT(multi_iterator.shape, ::testing::ElementsAre());
EXPECT_THAT(multi_iterator.iteration_shape, ::testing::ElementsAre(1, 1));
EXPECT_THAT(multi_iterator.full_iteration_dimensions,
::testing::ElementsAre());
EXPECT_EQ(IterationBufferKind::kContiguous, multi_iterator.buffer_kind);
EXPECT_EQ(false, multi_iterator.empty);
EXPECT_THAT(multi_iterator.block_shape, ::testing::ElementsAre(1, 1));
EXPECT_THAT(multi_iterator.ResetAtBeginning(), ::testing::ElementsAre(1, 1));
absl::Status status;
EXPECT_TRUE(multi_iterator.GetBlock({1, 1}, &status));
EXPECT_THAT(multi_iterator.position(), ::testing::ElementsAre(0, 0));
TENSORSTORE_EXPECT_OK(status);
EXPECT_EQ(array.data(), multi_iterator.block_pointers()[0].pointer);
EXPECT_EQ(0, multi_iterator.block_pointers()[0].inner_byte_stride);
EXPECT_THAT(multi_iterator.StepForward({1, 1}), ::testing::ElementsAre(0, 1));
EXPECT_THAT(multi_iterator.position(), ::testing::ElementsAre(1, 0));
}
#ifndef TENSORSTORE_INTERNAL_NDITERABLE_TEST_UNIT_BLOCK_SIZE
constexpr Index ExpectedBlockSize(Index block_size) { return block_size; }
#else
constexpr Index ExpectedBlockSize(Index block_size) { return 1; }
#endif
TEST(NDIterableArrayTest, RankOne) {
auto array = tensorstore::MakeArray<int>({1, 2, 3, 4, 5});
Arena arena;
auto iterable = GetArrayNDIterable(array, &arena);
MultiNDIterator<1, true> multi_iterator(
tensorstore::span<const Index>({5}), {}, {{iterable.get()}}, &arena);
EXPECT_THAT(multi_iterator.shape, ::testing::ElementsAre(5));
EXPECT_THAT(multi_iterator.iteration_dimensions,
::testing::ElementsAre(-1, 0));
EXPECT_THAT(multi_iterator.directions, ::testing::ElementsAre(1));
EXPECT_THAT(multi_iterator.iteration_shape, ::testing::ElementsAre(1, 5));
EXPECT_THAT(multi_iterator.full_iteration_dimensions,
::testing::ElementsAre(0));
EXPECT_EQ(IterationBufferKind::kContiguous, multi_iterator.buffer_kind);
EXPECT_EQ(false, multi_iterator.empty);
EXPECT_THAT(multi_iterator.block_shape,
::testing::ElementsAre(1, ExpectedBlockSize(5)));
EXPECT_THAT(multi_iterator.ResetAtBeginning(),
::testing::ElementsAre(1, ExpectedBlockSize(5)));
absl::Status status;
EXPECT_TRUE(multi_iterator.GetBlock({1, ExpectedBlockSize(5)}, &status));
EXPECT_THAT(multi_iterator.position(), ::testing::ElementsAre(0, 0));
EXPECT_EQ(absl::OkStatus(), status);
EXPECT_EQ(array.data(), multi_iterator.block_pointers()[0].pointer);
EXPECT_EQ(sizeof(int), multi_iterator.block_pointers()[0].inner_byte_stride);
EXPECT_THAT(multi_iterator.StepForward({1, 5}), ::testing::ElementsAre(0, 5));
EXPECT_THAT(multi_iterator.position(), ::testing::ElementsAre(1, 0));
}
TEST(NDIterableArrayTest, RankTwoContiguous) {
auto array = tensorstore::MakeArray<int>({{1, 2, 3}, {4, 5, 6}});
Arena arena;
auto iterable = GetArrayNDIterable(array, &arena);
MultiNDIterator<1, true> multi_iterator(array.shape(), {},
{{iterable.get()}}, &arena);
EXPECT_THAT(multi_iterator.shape, ::testing::ElementsAre(2, 3));
EXPECT_THAT(multi_iterator.iteration_dimensions,
::testing::ElementsAre(-1, 1));
EXPECT_THAT(multi_iterator.directions, ::testing::ElementsAre(1, 1));
EXPECT_THAT(multi_iterator.iteration_shape, ::testing::ElementsAre(1, 6));
EXPECT_THAT(multi_iterator.full_iteration_dimensions,
::testing::ElementsAre(0, 1));
EXPECT_EQ(IterationBufferKind::kContiguous, multi_iterator.buffer_kind);
EXPECT_EQ(false, multi_iterator.empty);
EXPECT_THAT(multi_iterator.block_shape,
::testing::ElementsAre(1, ExpectedBlockSize(6)));
EXPECT_THAT(multi_iterator.ResetAtBeginning(),
::testing::ElementsAre(1, ExpectedBlockSize(6)));
absl::Status status;
EXPECT_TRUE(multi_iterator.GetBlock({1, ExpectedBlockSize(6)}, &status));
EXPECT_THAT(multi_iterator.position(), ::testing::ElementsAre(0, 0));
EXPECT_EQ(absl::OkStatus(), status);
EXPECT_EQ(array.data(), multi_iterator.block_pointers()[0].pointer);
EXPECT_EQ(sizeof(int), multi_iterator.block_pointers()[0].inner_byte_stride);
EXPECT_THAT(multi_iterator.StepForward({1, 6}), ::testing::ElementsAre(0, 6));
EXPECT_THAT(multi_iterator.position(), ::testing::ElementsAre(1, 0));
}
TEST(NDIterableArrayTest, RankTwoTranspose) {
auto array = tensorstore::MakeArray<int>({{1, 2, 3}, {4, 5, 6}});
Arena arena;
auto iterable = GetArrayNDIterable(array, &arena);
MultiNDIterator<1, true> multi_iterator(
array.shape(), tensorstore::fortran_order, {{iterable.get()}}, &arena);
EXPECT_THAT(multi_iterator.shape, ::testing::ElementsAre(2, 3));
EXPECT_THAT(multi_iterator.iteration_dimensions,
::testing::ElementsAre(1, 0));
EXPECT_THAT(multi_iterator.directions, ::testing::ElementsAre(1, 1));
EXPECT_THAT(multi_iterator.iteration_shape, ::testing::ElementsAre(3, 2));
EXPECT_THAT(multi_iterator.full_iteration_dimensions,
::testing::ElementsAre(1, 0));
EXPECT_EQ(IterationBufferKind::kStrided, multi_iterator.buffer_kind);
EXPECT_EQ(false, multi_iterator.empty);
EXPECT_THAT(
multi_iterator.block_shape,
::testing::ElementsAre(ExpectedBlockSize(3), ExpectedBlockSize(2)));
EXPECT_THAT(
multi_iterator.ResetAtBeginning(),
::testing::ElementsAre(ExpectedBlockSize(3), ExpectedBlockSize(2)));
absl::Status status;
EXPECT_THAT(multi_iterator.position(), ::testing::ElementsAre(0, 0));
#ifdef TENSORSTORE_INTERNAL_NDITERABLE_TEST_UNIT_BLOCK_SIZE
GTEST_SKIP();
#endif
EXPECT_TRUE(multi_iterator.GetBlock({3, 2}, &status));
EXPECT_EQ(absl::OkStatus(), status);
EXPECT_EQ(&array(0, 0), multi_iterator.block_pointers()[0].pointer);
EXPECT_EQ(sizeof(int) * 3,
multi_iterator.block_pointers()[0].inner_byte_stride);
EXPECT_THAT(multi_iterator.StepForward({3, 2}), ::testing::ElementsAre(0, 2));
EXPECT_THAT(multi_iterator.position(), ::testing::ElementsAre(3, 0));
}
TEST(NDIterableArrayTest, SkipSize1Dimension) {
unsigned char data[300];
Arena arena;
Array<unsigned char> array = {&data[150],
StridedLayout<>({2, 1, 3}, {5, 10, -20})};
auto iterable = GetArrayNDIterable(UnownedToShared(array), &arena);
MultiNDIterator<1, true> multi_iterator(array.shape(), {},
{{iterable.get()}}, &arena);
EXPECT_THAT(multi_iterator.shape, ::testing::ElementsAre(2, 1, 3));
EXPECT_THAT(multi_iterator.iteration_dimensions,
::testing::ElementsAre(2, 0));
EXPECT_THAT(multi_iterator.directions, ::testing::ElementsAre(1, 0, -1));
EXPECT_THAT(multi_iterator.iteration_shape, ::testing::ElementsAre(3, 2));
EXPECT_THAT(multi_iterator.full_iteration_dimensions,
::testing::ElementsAre(1, 2, 0));
}
TEST(NDIterableArrayTest, SkipZeroByteStride) {
unsigned char data[300];
Arena arena;
Array<unsigned char> array = {&data[150], StridedLayout<>({2, 3}, {5, 0})};
auto iterable = GetArrayNDIterable(UnownedToShared(array), &arena);
MultiNDIterator<1, true> multi_iterator(
array.shape(), tensorstore::skip_repeated_elements, {{iterable.get()}},
&arena);
EXPECT_THAT(multi_iterator.shape, ::testing::ElementsAre(2, 3));
EXPECT_THAT(multi_iterator.iteration_dimensions,
::testing::ElementsAre(-1, 0));
EXPECT_THAT(multi_iterator.directions, ::testing::ElementsAre(1, 0));
EXPECT_THAT(multi_iterator.iteration_shape, ::testing::ElementsAre(1, 2));
EXPECT_THAT(multi_iterator.full_iteration_dimensions,
::testing::ElementsAre(1, 0));
}
TEST(NDIterableArrayTest, FortranOrderArray) {
auto array =
tensorstore::AllocateArray<int>({2, 3}, tensorstore::fortran_order);
Arena arena;
auto iterable = GetArrayNDIterable(array, &arena);
MultiNDIterator<1, true> multi_iterator(
array.shape(), tensorstore::skip_repeated_elements, {{iterable.get()}},
&arena);
EXPECT_THAT(multi_iterator.shape, ::testing::ElementsAre(2, 3));
EXPECT_THAT(multi_iterator.iteration_dimensions,
::testing::ElementsAre(-1, 0));
EXPECT_THAT(multi_iterator.directions, ::testing::ElementsAre(1, 1));
EXPECT_THAT(multi_iterator.iteration_shape, ::testing::ElementsAre(1, 6));
EXPECT_THAT(multi_iterator.full_iteration_dimensions,
::testing::ElementsAre(1, 0));
}
TEST(NDIterableArrayTest, ReversedDimensions) {
auto orig_array = tensorstore::AllocateArray<int>({3, 4, 5});
auto orig_shape = orig_array.shape();
auto orig_strides = orig_array.byte_strides();
Array<int> array(
&orig_array(0, 4 - 1, 5 - 1),
StridedLayout<>({orig_shape[2], orig_shape[0], orig_shape[1]},
{-orig_strides[2], orig_strides[0], -orig_strides[1]}));
Arena arena;
auto iterable = GetArrayNDIterable(UnownedToShared(array), &arena);
MultiNDIterator<1, true> multi_iterator(
array.shape(), tensorstore::skip_repeated_elements, {{iterable.get()}},
&arena);
EXPECT_THAT(multi_iterator.shape, ::testing::ElementsAre(5, 3, 4));
EXPECT_THAT(multi_iterator.iteration_dimensions,
::testing::ElementsAre(-1, 0));
EXPECT_THAT(multi_iterator.directions, ::testing::ElementsAre(-1, 1, -1));
EXPECT_THAT(multi_iterator.iteration_shape,
::testing::ElementsAre(1, 3 * 4 * 5));
EXPECT_THAT(multi_iterator.full_iteration_dimensions,
::testing::ElementsAre(1, 2, 0));
EXPECT_EQ(IterationBufferKind::kContiguous, multi_iterator.buffer_kind);
EXPECT_EQ(false, multi_iterator.empty);
EXPECT_THAT(multi_iterator.block_shape,
::testing::ElementsAre(1, ExpectedBlockSize(3 * 4 * 5)));
EXPECT_THAT(multi_iterator.ResetAtBeginning(),
::testing::ElementsAre(1, ExpectedBlockSize(3 * 4 * 5)));
absl::Status status;
EXPECT_THAT(multi_iterator.position(), ::testing::ElementsAre(0, 0));
EXPECT_TRUE(
multi_iterator.GetBlock({1, ExpectedBlockSize(3 * 4 * 5)}, &status));
EXPECT_EQ(absl::OkStatus(), status);
EXPECT_EQ(orig_array.byte_strided_pointer(),
multi_iterator.block_pointers()[0].pointer);
EXPECT_EQ(sizeof(int), multi_iterator.block_pointers()[0].inner_byte_stride);
}
TEST(NDIterableArrayTest, MultipleArrays) {
auto array_a = tensorstore::AllocateArray<int>({2, 3}, tensorstore::c_order);
auto array_b =
tensorstore::AllocateArray<int>({2, 3}, tensorstore::fortran_order);
Arena arena;
auto iterable_a = GetArrayNDIterable(array_a, &arena);
auto iterable_b = GetArrayNDIterable(array_b, &arena);
MultiNDIterator<2, true> multi_iterator(
array_a.shape(), tensorstore::skip_repeated_elements,
{{iterable_a.get(), iterable_b.get()}}, &arena);
EXPECT_THAT(multi_iterator.shape, ::testing::ElementsAre(2, 3));
EXPECT_THAT(multi_iterator.iteration_dimensions,
::testing::ElementsAre(0, 1));
EXPECT_THAT(multi_iterator.directions, ::testing::ElementsAre(1, 1));
EXPECT_THAT(multi_iterator.iteration_shape, ::testing::ElementsAre(2, 3));
EXPECT_THAT(multi_iterator.full_iteration_dimensions,
::testing::ElementsAre(0, 1));
EXPECT_EQ(IterationBufferKind::kStrided, multi_iterator.buffer_kind);
EXPECT_EQ(false, multi_iterator.empty);
EXPECT_THAT(
multi_iterator.block_shape,
::testing::ElementsAre(ExpectedBlockSize(2), ExpectedBlockSize(3)));
EXPECT_THAT(
multi_iterator.ResetAtBeginning(),
::testing::ElementsAre(ExpectedBlockSize(2), ExpectedBlockSize(3)));
absl::Status status;
EXPECT_THAT(multi_iterator.position(), ::testing::ElementsAre(0, 0));
#ifdef TENSORSTORE_INTERNAL_NDITERABLE_TEST_UNIT_BLOCK_SIZE
GTEST_SKIP();
#endif
EXPECT_TRUE(multi_iterator.GetBlock({2, 3}, &status));
EXPECT_EQ(absl::OkStatus(), status);
EXPECT_EQ(&array_a(0, 0), multi_iterator.block_pointers()[0].pointer);
EXPECT_EQ(&array_b(0, 0), multi_iterator.block_pointers()[1].pointer);
EXPECT_EQ(sizeof(int), multi_iterator.block_pointers()[0].inner_byte_stride);
EXPECT_EQ(sizeof(int) * 2,
multi_iterator.block_pointers()[1].inner_byte_stride);
EXPECT_THAT(multi_iterator.StepForward({2, 3}), ::testing::ElementsAre(0, 3));
EXPECT_THAT(multi_iterator.position(), ::testing::ElementsAre(2, 0));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/nditerable_array.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/nditerable_array_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
7b28c983-e40f-40d6-8d0d-bb13ca2bdef9 | cpp | google/arolla | oblivious | arolla/decision_forest/pointwise_evaluation/oblivious.cc | arolla/decision_forest/pointwise_evaluation/oblivious_test.cc | #include "arolla/decision_forest/pointwise_evaluation/oblivious.h"
#include <cstddef>
#include <memory>
#include <optional>
#include <utility>
#include <vector>
#include "absl/log/check.h"
#include "arolla/decision_forest/decision_forest.h"
#include "arolla/decision_forest/split_condition.h"
namespace arolla {
namespace {
bool IsPowerOf2(size_t x) { return (x & (x - 1)) == 0; }
struct StackEntry {
DecisionTreeNodeId node_id;
int depth;
};
template <typename CallbackFn>
bool TraverseTree(const DecisionTree& tree, CallbackFn callback) {
std::vector<StackEntry> stack;
stack.reserve(32);
stack.push_back(StackEntry{GetTreeRootId(tree), 0});
while (!stack.empty()) {
auto [node_id, depth] = stack.back();
stack.pop_back();
if (!callback(node_id, depth)) {
return false;
}
if (!node_id.is_leaf()) {
const auto& node = tree.split_nodes[node_id.split_node_index()];
stack.push_back(StackEntry{node.child_if_true, depth + 1});
stack.push_back(StackEntry{node.child_if_false, depth + 1});
}
}
return true;
}
}
std::optional<ObliviousDecisionTree> ToObliviousTree(const DecisionTree& tree) {
size_t region_count = tree.adjustments.size();
if (!IsPowerOf2(region_count)) {
return std::nullopt;
}
size_t depth = region_count ? __builtin_ctz(region_count) : 0;
std::vector<std::shared_ptr<const SplitCondition>> layer_splits;
layer_splits.reserve(depth);
std::vector<float> adjustments;
adjustments.reserve(region_count);
auto process_node = [&](DecisionTreeNodeId node_id, int current_depth) {
if (node_id.is_leaf()) {
if (current_depth != depth) {
return false;
}
adjustments.push_back(tree.adjustments[node_id.adjustment_index()] *
tree.weight);
} else {
if (current_depth >= depth) {
return false;
}
const auto& node = tree.split_nodes[node_id.split_node_index()];
if (layer_splits.size() == current_depth) {
layer_splits.push_back(node.condition);
} else {
DCHECK_LT(current_depth, layer_splits.size());
if (*layer_splits[current_depth] != *node.condition) {
return false;
}
}
}
return true;
};
if (!TraverseTree(tree, process_node)) {
return std::nullopt;
}
return ObliviousDecisionTree{tree.tag, std::move(layer_splits),
std::move(adjustments)};
}
} | #include "arolla/decision_forest/pointwise_evaluation/oblivious.h"
#include <limits>
#include <memory>
#include <optional>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "arolla/decision_forest/decision_forest.h"
#include "arolla/decision_forest/split_condition.h"
#include "arolla/decision_forest/split_conditions/interval_split_condition.h"
namespace arolla {
namespace {
using ::testing::ElementsAre;
constexpr auto S = DecisionTreeNodeId::SplitNodeId;
constexpr auto A = DecisionTreeNodeId::AdjustmentId;
constexpr float inf = std::numeric_limits<float>::infinity();
std::shared_ptr<SplitCondition> Cond(int input_id, float left, float right) {
return std::make_shared<IntervalSplitCondition>(input_id, left, right);
}
TEST(ObliviousTest, Errors) {
{
DecisionTree tree;
tree.split_nodes = {{A(0), S(1), Cond(0, -inf, 1.0)},
{A(1), A(2), Cond(0, -1.0, inf)}};
tree.adjustments = {0.0, 1.0, 2.0};
EXPECT_EQ(ToObliviousTree(tree), std::nullopt);
}
{
DecisionTree tree;
tree.split_nodes = {{A(0), S(1), Cond(0, -inf, 1.0)},
{S(2), A(2), Cond(0, -1.0, inf)},
{A(1), A(3), Cond(0, -1.0, inf)}};
tree.adjustments = {0.0, 1.0, 2.0, 3.0};
EXPECT_EQ(ToObliviousTree(tree), std::nullopt);
}
{
DecisionTree tree;
tree.split_nodes = {{S(2), S(1), Cond(0, -inf, 1.0)},
{A(1), A(2), Cond(0, -1.0, inf)},
{A(0), A(3), Cond(0, 1.0, inf)}};
tree.adjustments = {0.0, 1.0, 2.0, 3.0};
EXPECT_EQ(ToObliviousTree(tree), std::nullopt);
}
}
TEST(ObliviousTest, Ok) {
{
DecisionTree tree;
tree.adjustments = {2.0};
tree.weight = 0.5;
auto oblivious_tree = ToObliviousTree(tree);
ASSERT_TRUE(oblivious_tree.has_value());
EXPECT_THAT(oblivious_tree->layer_splits, ElementsAre());
EXPECT_THAT(oblivious_tree->adjustments, ElementsAre(1.0));
}
{
DecisionTree tree;
tree.split_nodes = {{A(0), A(1), Cond(0, -inf, 1.0)}};
tree.adjustments = {7.0, 3.0};
tree.weight = 2.0;
auto oblivious_tree = ToObliviousTree(tree);
ASSERT_TRUE(oblivious_tree.has_value());
EXPECT_EQ(oblivious_tree->layer_splits.size(), 1);
EXPECT_EQ(*oblivious_tree->layer_splits[0], *Cond(0, -inf, 1.0));
EXPECT_THAT(oblivious_tree->adjustments, ElementsAre(14.0, 6.0));
}
{
DecisionTree tree;
tree.split_nodes = {{S(2), S(1), Cond(0, -inf, 1.0)},
{A(1), A(2), Cond(0, -1.0, inf)},
{A(0), A(3), Cond(0, -1.0, inf)}};
tree.adjustments = {0.0, 1.0, 2.0, 3.0};
auto oblivious_tree = ToObliviousTree(tree);
ASSERT_TRUE(oblivious_tree.has_value());
EXPECT_EQ(oblivious_tree->layer_splits.size(), 2);
EXPECT_EQ(*oblivious_tree->layer_splits[0], *Cond(0, -inf, 1.0));
EXPECT_EQ(*oblivious_tree->layer_splits[1], *Cond(0, -1.0, inf));
EXPECT_THAT(oblivious_tree->adjustments, ElementsAre(0.0, 3.0, 1.0, 2.0));
}
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/decision_forest/pointwise_evaluation/oblivious.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/decision_forest/pointwise_evaluation/oblivious_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
0cabf035-1484-418d-8410-4cd1d9607b33 | cpp | google/quiche | qpack_send_stream | quiche/quic/core/qpack/qpack_send_stream.cc | quiche/quic/core/qpack/qpack_send_stream_test.cc | #include "quiche/quic/core/qpack/qpack_send_stream.h"
#include "absl/base/macros.h"
#include "absl/strings/string_view.h"
#include "quiche/quic/core/quic_session.h"
namespace quic {
QpackSendStream::QpackSendStream(QuicStreamId id, QuicSession* session,
uint64_t http3_stream_type)
: QuicStream(id, session, true, WRITE_UNIDIRECTIONAL),
http3_stream_type_(http3_stream_type),
stream_type_sent_(false) {}
void QpackSendStream::OnStreamReset(const QuicRstStreamFrame& ) {
QUIC_BUG(quic_bug_10805_1)
<< "OnStreamReset() called for write unidirectional stream.";
}
bool QpackSendStream::OnStopSending(QuicResetStreamError ) {
stream_delegate()->OnStreamError(
QUIC_HTTP_CLOSED_CRITICAL_STREAM,
"STOP_SENDING received for QPACK send stream");
return false;
}
void QpackSendStream::WriteStreamData(absl::string_view data) {
QuicConnection::ScopedPacketFlusher flusher(session()->connection());
MaybeSendStreamType();
WriteOrBufferData(data, false, nullptr);
}
uint64_t QpackSendStream::NumBytesBuffered() const {
return QuicStream::BufferedDataBytes();
}
void QpackSendStream::MaybeSendStreamType() {
if (!stream_type_sent_) {
char type[sizeof(http3_stream_type_)];
QuicDataWriter writer(ABSL_ARRAYSIZE(type), type);
writer.WriteVarInt62(http3_stream_type_);
WriteOrBufferData(absl::string_view(writer.data(), writer.length()), false,
nullptr);
stream_type_sent_ = true;
}
}
} | #include "quiche/quic/core/qpack/qpack_send_stream.h"
#include <memory>
#include <string>
#include <vector>
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "quiche/quic/core/crypto/null_encrypter.h"
#include "quiche/quic/core/http/http_constants.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/test_tools/quic_config_peer.h"
#include "quiche/quic/test_tools/quic_connection_peer.h"
#include "quiche/quic/test_tools/quic_spdy_session_peer.h"
#include "quiche/quic/test_tools/quic_test_utils.h"
namespace quic {
namespace test {
namespace {
using ::testing::_;
using ::testing::AnyNumber;
using ::testing::Invoke;
using ::testing::StrictMock;
struct TestParams {
TestParams(const ParsedQuicVersion& version, Perspective perspective)
: version(version), perspective(perspective) {
QUIC_LOG(INFO) << "TestParams: version: "
<< ParsedQuicVersionToString(version)
<< ", perspective: " << perspective;
}
TestParams(const TestParams& other)
: version(other.version), perspective(other.perspective) {}
ParsedQuicVersion version;
Perspective perspective;
};
std::string PrintToString(const TestParams& tp) {
return absl::StrCat(
ParsedQuicVersionToString(tp.version), "_",
(tp.perspective == Perspective::IS_CLIENT ? "client" : "server"));
}
std::vector<TestParams> GetTestParams() {
std::vector<TestParams> params;
ParsedQuicVersionVector all_supported_versions = AllSupportedVersions();
for (const auto& version : AllSupportedVersions()) {
if (!VersionUsesHttp3(version.transport_version)) {
continue;
}
for (Perspective p : {Perspective::IS_SERVER, Perspective::IS_CLIENT}) {
params.emplace_back(version, p);
}
}
return params;
}
class QpackSendStreamTest : public QuicTestWithParam<TestParams> {
public:
QpackSendStreamTest()
: connection_(new StrictMock<MockQuicConnection>(
&helper_, &alarm_factory_, perspective(),
SupportedVersions(GetParam().version))),
session_(connection_) {
EXPECT_CALL(session_, OnCongestionWindowChange(_)).Times(AnyNumber());
session_.Initialize();
connection_->SetEncrypter(
ENCRYPTION_FORWARD_SECURE,
std::make_unique<NullEncrypter>(connection_->perspective()));
if (connection_->version().SupportsAntiAmplificationLimit()) {
QuicConnectionPeer::SetAddressValidated(connection_);
}
QuicConfigPeer::SetReceivedInitialSessionFlowControlWindow(
session_.config(), kMinimumFlowControlSendWindow);
QuicConfigPeer::SetReceivedInitialMaxStreamDataBytesUnidirectional(
session_.config(), kMinimumFlowControlSendWindow);
QuicConfigPeer::SetReceivedMaxUnidirectionalStreams(session_.config(), 3);
session_.OnConfigNegotiated();
qpack_send_stream_ =
QuicSpdySessionPeer::GetQpackDecoderSendStream(&session_);
ON_CALL(session_, WritevData(_, _, _, _, _, _))
.WillByDefault(Invoke(&session_, &MockQuicSpdySession::ConsumeData));
}
Perspective perspective() const { return GetParam().perspective; }
MockQuicConnectionHelper helper_;
MockAlarmFactory alarm_factory_;
StrictMock<MockQuicConnection>* connection_;
StrictMock<MockQuicSpdySession> session_;
QpackSendStream* qpack_send_stream_;
};
INSTANTIATE_TEST_SUITE_P(Tests, QpackSendStreamTest,
::testing::ValuesIn(GetTestParams()),
::testing::PrintToStringParamName());
TEST_P(QpackSendStreamTest, WriteStreamTypeOnlyFirstTime) {
std::string data = "data";
EXPECT_CALL(session_, WritevData(_, 1, _, _, _, _));
EXPECT_CALL(session_, WritevData(_, data.length(), _, _, _, _));
qpack_send_stream_->WriteStreamData(absl::string_view(data));
EXPECT_CALL(session_, WritevData(_, data.length(), _, _, _, _));
qpack_send_stream_->WriteStreamData(absl::string_view(data));
EXPECT_CALL(session_, WritevData(_, _, _, _, _, _)).Times(0);
qpack_send_stream_->MaybeSendStreamType();
}
TEST_P(QpackSendStreamTest, StopSendingQpackStream) {
EXPECT_CALL(*connection_,
CloseConnection(QUIC_HTTP_CLOSED_CRITICAL_STREAM, _, _));
qpack_send_stream_->OnStopSending(
QuicResetStreamError::FromInternal(QUIC_STREAM_CANCELLED));
}
TEST_P(QpackSendStreamTest, ReceiveDataOnSendStream) {
QuicStreamFrame frame(qpack_send_stream_->id(), false, 0, "test");
EXPECT_CALL(
*connection_,
CloseConnection(QUIC_DATA_RECEIVED_ON_WRITE_UNIDIRECTIONAL_STREAM, _, _));
qpack_send_stream_->OnStreamFrame(frame);
}
TEST_P(QpackSendStreamTest, GetSendWindowSizeFromSession) {
EXPECT_NE(session_.GetFlowControlSendWindowSize(qpack_send_stream_->id()),
std::numeric_limits<QuicByteCount>::max());
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/qpack/qpack_send_stream.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/qpack/qpack_send_stream_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
2bc2b96f-d502-4c20-9eae-a838f751e478 | cpp | google/libphonenumber | logger | cpp/src/phonenumbers/logger.cc | cpp/test/phonenumbers/logger_test.cc | #include "phonenumbers/logger.h"
#include <cstddef>
namespace i18n {
namespace phonenumbers {
Logger* Logger::impl_ = NULL;
}
} | #include <string>
#include <gtest/gtest.h>
#include "phonenumbers/base/memory/scoped_ptr.h"
#include "phonenumbers/default_logger.h"
#include "phonenumbers/logger.h"
namespace i18n {
namespace phonenumbers {
class StringLogger : public Logger {
public:
virtual ~StringLogger() {}
const string& message() const {
return msg_;
}
virtual void WriteMessage(const string& msg) {
msg_ += msg;
}
private:
string msg_;
};
class LoggerTest : public ::testing::Test {
protected:
virtual void SetUp() {
test_logger_.reset(new StringLogger());
test_logger_->set_level(LOG_INFO);
old_logger_ = Logger::mutable_logger_impl();
Logger::set_logger_impl(test_logger_.get());
}
virtual void TearDown() {
Logger::set_logger_impl(old_logger_);
}
scoped_ptr<StringLogger> test_logger_;
Logger* old_logger_;
};
TEST_F(LoggerTest, LoggerIgnoresHigherVerbosity) {
LOG(LOG_DEBUG) << "Hello";
EXPECT_EQ("", test_logger_->message());
}
TEST_F(LoggerTest, LoggerOutputsNewline) {
LOG(LOG_INFO) << "Hello";
EXPECT_EQ("Hello\n", test_logger_->message());
}
TEST_F(LoggerTest, LoggerLogsEqualVerbosity) {
LOG(LOG_INFO) << "Hello";
EXPECT_EQ("Hello\n", test_logger_->message());
}
TEST_F(LoggerTest, LoggerLogsMoreSeriousMessages) {
LOG(LOG_WARNING) << "Hello";
EXPECT_EQ("Hello\n", test_logger_->message());
}
TEST_F(LoggerTest, LoggerConcatenatesMessages) {
LOG(LOG_INFO) << "Hello";
ASSERT_EQ("Hello\n", test_logger_->message());
LOG(LOG_INFO) << " World";
EXPECT_EQ("Hello\n World\n", test_logger_->message());
}
TEST_F(LoggerTest, LoggerHandlesDifferentTypes) {
LOG(LOG_INFO) << "Hello " << 42;
EXPECT_EQ("Hello 42\n", test_logger_->message());
}
TEST_F(LoggerTest, LoggerIgnoresVerboseLogs) {
VLOG(1) << "Hello";
EXPECT_EQ("", test_logger_->message());
VLOG(0) << "Hello";
EXPECT_EQ("", test_logger_->message());
test_logger_->set_level(LOG_DEBUG);
VLOG(1) << "Hello";
EXPECT_EQ("", test_logger_->message());
VLOG(0) << "Hello";
EXPECT_EQ("Hello\n", test_logger_->message());
}
TEST_F(LoggerTest, LoggerShowsDebugLogsAtDebugLevel) {
test_logger_->set_level(LOG_DEBUG);
LOG(LOG_DEBUG) << "Debug hello";
EXPECT_EQ("Debug hello\n", test_logger_->message());
}
TEST_F(LoggerTest, LoggerOutputsDebugLogsWhenVerbositySet) {
int verbose_log_level = 2;
test_logger_->set_verbosity_level(verbose_log_level);
LOG(LOG_DEBUG) << "Debug hello";
EXPECT_EQ("Debug hello\n", test_logger_->message());
}
TEST_F(LoggerTest, LoggerOutputsErrorLogsWhenVerbositySet) {
int verbose_log_level = 2;
test_logger_->set_verbosity_level(verbose_log_level);
LOG(ERROR) << "Error hello";
EXPECT_EQ("Error hello\n", test_logger_->message());
}
TEST_F(LoggerTest, LoggerOutputsLogsAccordingToVerbosity) {
int verbose_log_level = 2;
test_logger_->set_verbosity_level(verbose_log_level);
VLOG(verbose_log_level + 1) << "Hello 3";
EXPECT_EQ("", test_logger_->message());
VLOG(verbose_log_level - 1) << "Hello";
EXPECT_EQ("Hello\n", test_logger_->message());
VLOG(verbose_log_level) << "Hello 2";
EXPECT_EQ("Hello\nHello 2\n", test_logger_->message());
}
}
} | https://github.com/google/libphonenumber/blob/9aa9aaa39ad8098aef56071d2df4f6f8d251c98b/cpp/src/phonenumbers/logger.cc | https://github.com/google/libphonenumber/blob/9aa9aaa39ad8098aef56071d2df4f6f8d251c98b/cpp/test/phonenumbers/logger_test.cc | 9aa9aaa39ad8098aef56071d2df4f6f8d251c98b |
c75468e1-4f67-47b9-82b2-6d1dc12cb6bb | cpp | google/glog | logging | src/logging.cc | src/logging_unittest.cc | #define _GNU_SOURCE 1
#include "glog/logging.h"
#include <algorithm>
#include <cassert>
#include <chrono>
#include <cstddef>
#include <cstdint>
#include <iomanip>
#include <iterator>
#include <memory>
#include <mutex>
#include <shared_mutex>
#include <string>
#include <thread>
#include <tuple>
#include <type_traits>
#include <utility>
#include "config.h"
#include "glog/platform.h"
#include "glog/raw_logging.h"
#include "stacktrace.h"
#include "utilities.h"
#ifdef GLOG_OS_WINDOWS
# include "windows/dirent.h"
#else
# include <dirent.h>
#endif
#include <fcntl.h>
#include <sys/stat.h>
#include <cctype>
#include <cerrno>
#include <climits>
#include <cstdarg>
#include <cstdio>
#include <cstdlib>
#include <ctime>
#include <regex>
#include <sstream>
#include <vector>
#ifdef HAVE__CHSIZE_S
# include <io.h>
#endif
#ifdef HAVE_PWD_H
# include <pwd.h>
#endif
#ifdef HAVE_SYS_UTSNAME_H
# include <sys/utsname.h>
#endif
#ifdef HAVE_SYSLOG_H
# include <syslog.h>
#endif
#ifdef HAVE_SYS_TYPES_H
# include <sys/types.h>
#endif
#ifdef HAVE_UNISTD_H
# include <unistd.h>
#endif
#ifndef HAVE_MODE_T
typedef int mode_t;
#endif
using std::dec;
using std::hex;
using std::min;
using std::ostream;
using std::ostringstream;
using std::setfill;
using std::setw;
using std::string;
using std::vector;
using std::fclose;
using std::fflush;
using std::FILE;
using std::fprintf;
using std::fwrite;
using std::perror;
#ifdef __QNX__
using std::fdopen;
#endif
#define EXCLUSIVE_LOCKS_REQUIRED(mu)
enum { PATH_SEPARATOR = '/' };
#ifndef HAVE_PREAD
static ssize_t pread(int fd, void* buf, size_t count, off_t offset) {
off_t orig_offset = lseek(fd, 0, SEEK_CUR);
if (orig_offset == (off_t)-1) return -1;
if (lseek(fd, offset, SEEK_CUR) == (off_t)-1) return -1;
ssize_t len = read(fd, buf, count);
if (len < 0) return len;
if (lseek(fd, orig_offset, SEEK_SET) == (off_t)-1) return -1;
return len;
}
#endif
#ifndef HAVE_PWRITE
static ssize_t pwrite(int fd, void* buf, size_t count, off_t offset) {
off_t orig_offset = lseek(fd, 0, SEEK_CUR);
if (orig_offset == (off_t)-1) return -1;
if (lseek(fd, offset, SEEK_CUR) == (off_t)-1) return -1;
ssize_t len = write(fd, buf, count);
if (len < 0) return len;
if (lseek(fd, orig_offset, SEEK_SET) == (off_t)-1) return -1;
return len;
}
#endif
static void GetHostName(string* hostname) {
#if defined(HAVE_SYS_UTSNAME_H)
struct utsname buf;
if (uname(&buf) < 0) {
*buf.nodename = '\0';
}
*hostname = buf.nodename;
#elif defined(GLOG_OS_WINDOWS)
char buf[MAX_COMPUTERNAME_LENGTH + 1];
DWORD len = MAX_COMPUTERNAME_LENGTH + 1;
if (GetComputerNameA(buf, &len)) {
*hostname = buf;
} else {
hostname->clear();
}
#else
# warning There is no way to retrieve the host name.
*hostname = "(unknown)";
#endif
}
static bool TerminalSupportsColor() {
bool term_supports_color = false;
#ifdef GLOG_OS_WINDOWS
term_supports_color = true;
#else
const char* const term = getenv("TERM");
if (term != nullptr && term[0] != '\0') {
term_supports_color =
!strcmp(term, "xterm") || !strcmp(term, "xterm-color") ||
!strcmp(term, "xterm-256color") || !strcmp(term, "screen-256color") ||
!strcmp(term, "konsole") || !strcmp(term, "konsole-16color") ||
!strcmp(term, "konsole-256color") || !strcmp(term, "screen") ||
!strcmp(term, "linux") || !strcmp(term, "cygwin");
}
#endif
return term_supports_color;
}
#if defined(__cpp_lib_unreachable) && (__cpp_lib_unreachable >= 202202L)
# define GLOG_UNREACHABLE std::unreachable()
#elif !defined(NDEBUG)
# define GLOG_UNREACHABLE assert(false)
#else
# if defined(_MSC_VER)
# define GLOG_UNREACHABLE __assume(false)
# elif defined(__has_builtin)
# if __has_builtin(unreachable)
# define GLOG_UNREACHABLE __builtin_unreachable()
# endif
# endif
# if !defined(GLOG_UNREACHABLE) && defined(__GNUG__)
# define GLOG_UNREACHABLE __builtin_unreachable()
# endif
# if !defined(GLOG_UNREACHABLE)
# define GLOG_UNREACHABLE
# endif
#endif
namespace google {
GLOG_NO_EXPORT
std::string StrError(int err);
enum GLogColor { COLOR_DEFAULT, COLOR_RED, COLOR_GREEN, COLOR_YELLOW };
static GLogColor SeverityToColor(LogSeverity severity) {
switch (severity) {
case GLOG_INFO:
return COLOR_DEFAULT;
case GLOG_WARNING:
return COLOR_YELLOW;
case GLOG_ERROR:
case GLOG_FATAL:
return COLOR_RED;
}
GLOG_UNREACHABLE;
}
#ifdef GLOG_OS_WINDOWS
static WORD GetColorAttribute(GLogColor color) {
switch (color) {
case COLOR_RED:
return FOREGROUND_RED;
case COLOR_GREEN:
return FOREGROUND_GREEN;
case COLOR_YELLOW:
return FOREGROUND_RED | FOREGROUND_GREEN;
case COLOR_DEFAULT:
break;
}
return 0;
}
#else
static const char* GetAnsiColorCode(GLogColor color) {
switch (color) {
case COLOR_RED:
return "1";
case COLOR_GREEN:
return "2";
case COLOR_YELLOW:
return "3";
case COLOR_DEFAULT:
return "";
};
return nullptr;
}
#endif
static uint32 MaxLogSize() {
return (FLAGS_max_log_size > 0 && FLAGS_max_log_size < 4096
? FLAGS_max_log_size
: 1);
}
const size_t LogMessage::kMaxLogMessageLen = 30000;
namespace logging {
namespace internal {
struct LogMessageData {
LogMessageData();
int preserved_errno_;
char message_text_[LogMessage::kMaxLogMessageLen + 1];
LogMessage::LogStream stream_;
LogSeverity severity_;
int line_;
void (LogMessage::*send_method_)();
union {
LogSink* sink_;
std::vector<std::string>*
outvec_;
std::string* message_;
};
size_t num_prefix_chars_;
size_t num_chars_to_log_;
size_t num_chars_to_syslog_;
const char* basename_;
const char* fullname_;
bool has_been_flushed_;
bool first_fatal_;
std::thread::id thread_id_;
LogMessageData(const LogMessageData&) = delete;
LogMessageData& operator=(const LogMessageData&) = delete;
};
}
}
static std::mutex log_mutex;
int64 LogMessage::num_messages_[NUM_SEVERITIES] = {0, 0, 0, 0};
static bool stop_writing = false;
const char* const LogSeverityNames[] = {"INFO", "WARNING", "ERROR", "FATAL"};
static bool exit_on_dfatal = true;
const char* GetLogSeverityName(LogSeverity severity) {
return LogSeverityNames[severity];
}
static bool SendEmailInternal(const char* dest, const char* subject,
const char* body, bool use_logging);
base::Logger::~Logger() = default;
namespace {
constexpr std::intmax_t kSecondsInDay = 60 * 60 * 24;
constexpr std::intmax_t kSecondsInWeek = kSecondsInDay * 7;
class PrefixFormatter {
public:
PrefixFormatter(PrefixFormatterCallback callback, void* data) noexcept
: version{V2}, callback_v2{callback}, data{data} {}
void operator()(std::ostream& s, const LogMessage& message) const {
switch (version) {
case V2:
callback_v2(s, message, data);
break;
}
}
PrefixFormatter(const PrefixFormatter& other) = delete;
PrefixFormatter& operator=(const PrefixFormatter& other) = delete;
private:
enum Version { V2 } version;
union {
PrefixFormatterCallback callback_v2;
};
void* data;
};
std::unique_ptr<PrefixFormatter> g_prefix_formatter;
class LogFileObject : public base::Logger {
public:
LogFileObject(LogSeverity severity, const char* base_filename);
~LogFileObject() override;
void Write(bool force_flush,
const std::chrono::system_clock::time_point&
timestamp,
const char* message, size_t message_len) override;
void SetBasename(const char* basename);
void SetExtension(const char* ext);
void SetSymlinkBasename(const char* symlink_basename);
void Flush() override;
uint32 LogSize() override {
std::lock_guard<std::mutex> l{mutex_};
return file_length_;
}
void FlushUnlocked(const std::chrono::system_clock::time_point& now);
private:
static const uint32 kRolloverAttemptFrequency = 0x20;
std::mutex mutex_;
bool base_filename_selected_;
string base_filename_;
string symlink_basename_;
string filename_extension_;
std::unique_ptr<FILE> file_;
LogSeverity severity_;
uint32 bytes_since_flush_{0};
uint32 dropped_mem_length_{0};
uint32 file_length_{0};
unsigned int rollover_attempt_;
std::chrono::system_clock::time_point
next_flush_time_;
std::chrono::system_clock::time_point start_time_;
bool CreateLogfile(const string& time_pid_string);
};
class LogCleaner {
public:
LogCleaner();
void Enable(const std::chrono::minutes& overdue);
void Disable();
void Run(const std::chrono::system_clock::time_point& current_time,
bool base_filename_selected, const string& base_filename,
const string& filename_extension);
bool enabled() const { return enabled_; }
private:
vector<string> GetOverdueLogNames(
string log_directory,
const std::chrono::system_clock::time_point& current_time,
const string& base_filename, const string& filename_extension) const;
bool IsLogFromCurrentProject(const string& filepath,
const string& base_filename,
const string& filename_extension) const;
bool IsLogLastModifiedOver(
const string& filepath,
const std::chrono::system_clock::time_point& current_time) const;
bool enabled_{false};
std::chrono::minutes overdue_{
std::chrono::duration<int, std::ratio<kSecondsInWeek>>{1}};
std::chrono::system_clock::time_point
next_cleanup_time_;
};
LogCleaner log_cleaner;
}
class LogDestination {
public:
friend class LogMessage;
friend void ReprintFatalMessage();
friend base::Logger* base::GetLogger(LogSeverity);
friend void base::SetLogger(LogSeverity, base::Logger*);
static void SetLogDestination(LogSeverity severity,
const char* base_filename);
static void SetLogSymlink(LogSeverity severity, const char* symlink_basename);
static void AddLogSink(LogSink* destination);
static void RemoveLogSink(LogSink* destination);
static void SetLogFilenameExtension(const char* filename_extension);
static void SetStderrLogging(LogSeverity min_severity);
static void SetEmailLogging(LogSeverity min_severity, const char* addresses);
static void LogToStderr();
static void FlushLogFiles(int min_severity);
static void FlushLogFilesUnsafe(int min_severity);
static const int kNetworkBytes = 1400;
static const string& hostname();
static const bool& terminal_supports_color() {
return terminal_supports_color_;
}
static void DeleteLogDestinations();
LogDestination(LogSeverity severity, const char* base_filename);
private:
#if defined(__cpp_lib_shared_mutex) && (__cpp_lib_shared_mutex >= 201505L)
using SinkMutex = std::shared_mutex;
using SinkLock = std::lock_guard<SinkMutex>;
#else
using SinkMutex = std::shared_timed_mutex;
using SinkLock = std::unique_lock<SinkMutex>;
#endif
friend std::default_delete<LogDestination>;
~LogDestination();
static void MaybeLogToStderr(LogSeverity severity, const char* message,
size_t message_len, size_t prefix_len);
static void MaybeLogToEmail(LogSeverity severity, const char* message,
size_t len);
static void MaybeLogToLogfile(
LogSeverity severity,
const std::chrono::system_clock::time_point& timestamp,
const char* message, size_t len);
static void LogToAllLogfiles(
LogSeverity severity,
const std::chrono::system_clock::time_point& timestamp,
const char* message, size_t len);
static void LogToSinks(LogSeverity severity, const char* full_filename,
const char* base_filename, int line,
const LogMessageTime& time, const char* message,
size_t message_len);
static void WaitForSinks(logging::internal::LogMessageData* data);
static LogDestination* log_destination(LogSeverity severity);
base::Logger* GetLoggerImpl() const { return logger_; }
void SetLoggerImpl(base::Logger* logger);
void ResetLoggerImpl() { SetLoggerImpl(&fileobject_); }
LogFileObject fileobject_;
base::Logger* logger_;
static std::unique_ptr<LogDestination> log_destinations_[NUM_SEVERITIES];
static std::underlying_type_t<LogSeverity> email_logging_severity_;
static string addresses_;
static string hostname_;
static bool terminal_supports_color_;
static std::unique_ptr<vector<LogSink*>> sinks_;
static SinkMutex sink_mutex_;
LogDestination(const LogDestination&) = delete;
LogDestination& operator=(const LogDestination&) = delete;
};
std::underlying_type_t<LogSeverity> LogDestination::email_logging_severity_ =
99999;
string LogDestination::addresses_;
string LogDestination::hostname_;
std::unique_ptr<vector<LogSink*>> LogDestination::sinks_;
LogDestination::SinkMutex LogDestination::sink_mutex_;
bool LogDestination::terminal_supports_color_ = TerminalSupportsColor();
const string& LogDestination::hostname() {
if (hostname_.empty()) {
GetHostName(&hostname_);
if (hostname_.empty()) {
hostname_ = "(unknown)";
}
}
return hostname_;
}
LogDestination::LogDestination(LogSeverity severity, const char* base_filename)
: fileobject_(severity, base_filename), logger_(&fileobject_) {}
LogDestination::~LogDestination() { ResetLoggerImpl(); }
void LogDestination::SetLoggerImpl(base::Logger* logger) {
if (logger_ == logger) {
return;
}
if (logger_ && logger_ != &fileobject_) {
delete logger_;
}
logger_ = logger;
}
inline void LogDestination::FlushLogFilesUnsafe(int min_severity) {
std::for_each(std::next(std::begin(log_destinations_), min_severity),
std::end(log_destinations_),
[now = std::chrono::system_clock::now()](
std::unique_ptr<LogDestination>& log) {
if (log != nullptr) {
log->fileobject_.FlushUnlocked(now);
}
});
}
inline void LogDestination::FlushLogFiles(int min_severity) {
std::lock_guard<std::mutex> l{log_mutex};
for (int i = min_severity; i < NUM_SEVERITIES; i++) {
LogDestination* log = log_destination(static_cast<LogSeverity>(i));
if (log != nullptr) {
log->logger_->Flush();
}
}
}
inline void LogDestination::SetLogDestination(LogSeverity severity,
const char* base_filename) {
std::lock_guard<std::mutex> l{log_mutex};
log_destination(severity)->fileobject_.SetBasename(base_filename);
}
inline void LogDestination::SetLogSymlink(LogSeverity severity,
const char* symlink_basename) {
CHECK_GE(severity, 0);
CHECK_LT(severity, NUM_SEVERITIES);
std::lock_guard<std::mutex> l{log_mutex};
log_destination(severity)->fileobject_.SetSymlinkBasename(symlink_basename);
}
inline void LogDestination::AddLogSink(LogSink* destination) {
SinkLock l{sink_mutex_};
if (sinks_ == nullptr) sinks_ = std::make_unique<std::vector<LogSink*>>();
sinks_->push_back(destination);
}
inline void LogDestination::RemoveLogSink(LogSink* destination) {
SinkLock l{sink_mutex_};
if (sinks_) {
sinks_->erase(std::remove(sinks_->begin(), sinks_->end(), destination),
sinks_->end());
}
}
inline void LogDestination::SetLogFilenameExtension(const char* ext) {
std::lock_guard<std::mutex> l{log_mutex};
for (int severity = 0; severity < NUM_SEVERITIES; ++severity) {
log_destination(static_cast<LogSeverity>(severity))
->fileobject_.SetExtension(ext);
}
}
inline void LogDestination::SetStderrLogging(LogSeverity min_severity) {
std::lock_guard<std::mutex> l{log_mutex};
FLAGS_stderrthreshold = min_severity;
}
inline void LogDestination::LogToStderr() {
SetStderrLogging(GLOG_INFO);
for (int i = 0; i < NUM_SEVERITIES; ++i) {
SetLogDestination(static_cast<LogSeverity>(i),
"");
}
}
inline void LogDestination::SetEmailLogging(LogSeverity min_severity,
const char* addresses) {
std::lock_guard<std::mutex> l{log_mutex};
LogDestination::email_logging_severity_ = min_severity;
LogDestination::addresses_ = addresses;
}
static void ColoredWriteToStderrOrStdout(FILE* output, LogSeverity severity,
const char* message, size_t len) {
bool is_stdout = (output == stdout);
const GLogColor color = (LogDestination::terminal_supports_color() &&
((!is_stdout && FLAGS_colorlogtostderr) ||
(is_stdout && FLAGS_colorlogtostdout)))
? SeverityToColor(severity)
: COLOR_DEFAULT;
if (COLOR_DEFAULT == color) {
fwrite(message, len, 1, output);
return;
}
#ifdef GLOG_OS_WINDOWS
const HANDLE output_handle =
GetStdHandle(is_stdout ? STD_OUTPUT_HANDLE : STD_ERROR_HANDLE);
CONSOLE_SCREEN_BUFFER_INFO buffer_info;
GetConsoleScreenBufferInfo(output_handle, &buffer_info);
const WORD old_color_attrs = buffer_info.wAttributes;
fflush(output);
SetConsoleTextAttribute(output_handle,
GetColorAttribute(color) | FOREGROUND_INTENSITY);
fwrite(message, len, 1, output);
fflush(output);
SetConsoleTextAttribute(output_handle, old_color_attrs);
#else
fprintf(output, "\033[0;3%sm", GetAnsiColorCode(color));
fwrite(message, len, 1, output);
fprintf(output, "\033[m");
#endif
}
static void ColoredWriteToStdout(LogSeverity severity, const char* message,
size_t len) {
FILE* output = stdout;
if (severity >= FLAGS_stderrthreshold) {
output = stderr;
}
ColoredWriteToStderrOrStdout(output, severity, message, len);
}
static void ColoredWriteToStderr(LogSeverity severity, const char* message,
size_t len) {
ColoredWriteToStderrOrStdout(stderr, severity, message, len);
}
static void WriteToStderr(const char* message, size_t len) {
fwrite(message, len, 1, stderr);
}
inline void LogDestination::MaybeLogToStderr(LogSeverity severity,
const char* message,
size_t message_len,
size_t prefix_len) {
if ((severity >= FLAGS_stderrthreshold) || FLAGS_alsologtostderr) {
ColoredWriteToStderr(severity, message, message_len);
AlsoErrorWrite(severity,
glog_internal_namespace_::ProgramInvocationShortName(),
message + prefix_len);
}
}
inline void LogDestination::MaybeLogToEmail(LogSeverity severity,
const char* message, size_t len) {
if (severity >= email_logging_severity_ || severity >= FLAGS_logemaillevel) {
string to(FLAGS_alsologtoemail);
if (!addresses_.empty()) {
if (!to.empty()) {
to += ",";
}
to += addresses_;
}
const string subject(
string("[LOG] ") + LogSeverityNames[severity] + ": " +
glog_internal_namespace_::ProgramInvocationShortName());
string body(hostname());
body += "\n\n";
body.append(message, len);
SendEmailInternal(to.c_str(), subject.c_str(), body.c_str(), false);
}
}
inline void LogDestination::MaybeLogToLogfile(
LogSeverity severity,
const std::chrono::system_clock::time_point& timestamp, const char* message,
size_t len) {
const bool should_flush = severity > FLAGS_logbuflevel;
LogDestination* destination = log_destination(severity);
destination->logger_->Write(should_flush, timestamp, message, len);
}
inline void LogDestination::LogToAllLogfiles(
LogSeverity severity,
const std::chrono::system_clock::time_point& timestamp, const char* message,
size_t len) {
if (FLAGS_logtostdout) {
ColoredWriteToStdout(severity, message, len);
} else if (FLAGS_logtostderr) {
ColoredWriteToStderr(severity, message, len);
} else {
for (int i = severity; i >= 0; --i) {
LogDestination::MaybeLogToLogfile(static_cast<LogSeverity>(i), timestamp,
message, len);
}
}
}
inline void LogDestination::LogToSinks(LogSeverity severity,
const char* full_filename,
const char* base_filename, int line,
const LogMessageTime& time,
const char* message,
size_t message_len) {
std::shared_lock<SinkMutex> l{sink_mutex_};
if (sinks_) {
for (size_t i = sinks_->size(); i-- > 0;) {
(*sinks_)[i]->send(severity, full_filename, base_filename, line, time,
message, message_len);
}
}
}
inline void LogDestination::WaitForSinks(
logging::internal::LogMessageData* data) {
std::shared_lock<SinkMutex> l{sink_mutex_};
if (sinks_) {
for (size_t i = sinks_->size(); i-- > 0;) {
(*sinks_)[i]->WaitTillSent();
}
}
const bool send_to_sink =
(data->send_method_ == &LogMessage::SendToSink) ||
(data->send_method_ == &LogMessage::SendToSinkAndLog);
if (send_to_sink && data->sink_ != nullptr) {
data->sink_->WaitTillSent();
}
}
std::unique_ptr<LogDestination>
LogDestination::log_destinations_[NUM_SEVERITIES];
inline LogDestination* LogDestination::log_destination(LogSeverity severity) {
if (log_destinations_[severity] == nullptr) {
log_destinations_[severity] =
std::make_unique<LogDestination>(severity, nullptr);
}
return log_destinations_[severity].get();
}
void LogDestination::DeleteLogDestinations() {
for (auto& log_destination : log_destinations_) {
log_destination.reset();
}
SinkLock l{sink_mutex_};
sinks_.reset();
}
namespace {
std::string g_application_fingerprint;
}
void SetApplicationFingerprint(const std::string& fingerprint) {
g_application_fingerprint = fingerprint;
}
namespace {
#ifdef GLOG_OS_WINDOWS
const char possible_dir_delim[] = {'\\', '/'};
#else
const char possible_dir_delim[] = {'/'};
#endif
string PrettyDuration(const std::chrono::duration<int>& secs) {
std::stringstream result;
int mins = secs.count() / 60;
int hours = mins / 60;
mins = mins % 60;
int s = secs.count() % 60;
result.fill('0');
result << hours << ':' << setw(2) << mins << ':' << setw(2) << s;
return result.str();
}
LogFileObject::LogFileObject(LogSeverity severity, const char* base_filename)
: base_filename_selected_(base_filename != nullptr),
base_filename_((base_filename != nullptr) ? base_filename : ""),
symlink_basename_(glog_internal_namespace_::ProgramInvocationShortName()),
filename_extension_(),
severity_(severity),
rollover_attempt_(kRolloverAttemptFrequency - 1),
start_time_(std::chrono::system_clock::now()) {}
LogFileObject::~LogFileObject() {
std::lock_guard<std::mutex> l{mutex_};
file_ = nullptr;
}
void LogFileObject::SetBasename(const char* basename) {
std::lock_guard<std::mutex> l{mutex_};
base_filename_selected_ = true;
if (base_filename_ != basename) {
if (file_ != nullptr) {
file_ = nullptr;
rollover_attempt_ = kRolloverAttemptFrequency - 1;
}
base_filename_ = basename;
}
}
void LogFileObject::SetExtension(const char* ext) {
std::lock_guard<std::mutex> l{mutex_};
if (filename_extension_ != ext) {
if (file_ != nullptr) {
file_ = nullptr;
rollover_attempt_ = kRolloverAttemptFrequency - 1;
}
filename_extension_ = ext;
}
}
void LogFileObject::SetSymlinkBasename(const char* symlink_basename) {
std::lock_guard<std::mutex> l{mutex_};
symlink_basename_ = symlink_basename;
}
void LogFileObject::Flush() {
std::lock_guard<std::mutex> l{mutex_};
FlushUnlocked(std::chrono::system_clock::now());
}
void LogFileObject::FlushUnlocked(
const std::chrono::system_clock::time_point& now) {
if (file_ != nullptr) {
fflush(file_.get());
bytes_since_flush_ = 0;
}
next_flush_time_ =
now + std::chrono::duration_cast<std::chrono::system_clock::duration>(
std::chrono::duration<int32>{FLAGS_logbufsecs});
}
bool LogFileObject::CreateLogfile(const string& time_pid_string) {
string string_filename = base_filename_;
if (FLAGS_timestamp_in_logfile_name) {
string_filename += time_pid_string;
}
string_filename += filename_extension_;
const char* filename = string_filename.c_str();
int flags = O_WRONLY | O_CREAT;
if (FLAGS_timestamp_in_logfile_name) {
flags = flags | O_EXCL;
}
FileDescriptor fd{
open(filename, flags, static_cast<mode_t>(FLAGS_logfile_mode))};
if (!fd) return false;
#ifdef HAVE_FCNTL
fcntl(fd.get(), F_SETFD, FD_CLOEXEC);
static struct flock w_lock;
w_lock.l_type = F_WRLCK;
w_lock.l_start = 0;
w_lock.l_whence = SEEK_SET;
w_lock.l_len = 0;
int wlock_ret = fcntl(fd.get(), F_SETLK, &w_lock);
if (wlock_ret == -1) {
return false;
}
#endif
file_.reset(fdopen(fd.release(), "a"));
if (file_ == nullptr) {
if (FLAGS_timestamp_in_logfile_name) {
unlink(filename);
}
return false;
}
#ifdef GLOG_OS_WINDOWS
if (!FLAGS_timestamp_in_logfile_name) {
if (fseek(file_.get(), 0, SEEK_END) != 0) {
return false;
}
}
#endif
if (!symlink_basename_.empty()) {
const char* slash = strrchr(filename, PATH_SEPARATOR);
const string linkname =
symlink_basename_ + '.' + LogSeverityNames[severity_];
string linkpath;
if (slash)
linkpath = string(
filename, static_cast<size_t>(slash - filename + 1));
linkpath += linkname;
unlink(linkpath.c_str());
#if defined(GLOG_OS_WINDOWS)
#elif defined(HAVE_UNISTD_H)
const char* linkdest = slash ? (slash + 1) : filename;
if (symlink(linkdest, linkpath.c_str()) != 0) {
}
if (!FLAGS_log_link.empty()) {
linkpath = FLAGS_log_link + "/" + linkname;
unlink(linkpath.c_str());
if (symlink(filename, linkpath.c_str()) != 0) {
}
}
#endif
}
return true;
}
void LogFileObject::Write(
bool force_flush, const std::chrono::system_clock::time_point& timestamp,
const char* message, size_t message_len) {
std::lock_guard<std::mutex> l{mutex_};
if (base_filename_selected_ && base_filename_.empty()) {
return;
}
auto cleanupLogs = [this, current_time = timestamp] {
if (log_cleaner.enabled()) {
log_cleaner.Run(current_time, base_filename_selected_, base_filename_,
filename_extension_);
}
};
ScopedExit<decltype(cleanupLogs)> cleanupAtEnd{cleanupLogs};
if (file_length_ >> 20U >= MaxLogSize() || PidHasChanged()) {
file_ = nullptr;
file_length_ = bytes_since_flush_ = dropped_mem_length_ = 0;
rollover_attempt_ = kRolloverAttemptFrequency - 1;
}
if (file_ == nullptr) {
if (++rollover_attempt_ != kRolloverAttemptFrequency) return;
rollover_attempt_ = 0;
struct ::tm tm_time;
std::time_t t = std::chrono::system_clock::to_time_t(timestamp);
if (FLAGS_log_utc_time) {
gmtime_r(&t, &tm_time);
} else {
localtime_r(&t, &tm_time);
}
ostringstream time_pid_stream;
time_pid_stream.fill('0');
time_pid_stream << 1900 + tm_time.tm_year << setw(2) << 1 + tm_time.tm_mon
<< setw(2) << tm_time.tm_mday << '-' << setw(2)
<< tm_time.tm_hour << setw(2) << tm_time.tm_min << setw(2)
<< tm_time.tm_sec << '.' << GetMainThreadPid();
const string& time_pid_string = time_pid_stream.str();
if (base_filename_selected_) {
if (!CreateLogfile(time_pid_string)) {
perror("Could not create log file");
fprintf(stderr, "COULD NOT CREATE LOGFILE '%s'!\n",
time_pid_string.c_str());
return;
}
} else {
string stripped_filename(
glog_internal_namespace_::ProgramInvocationShortName());
string hostname;
GetHostName(&hostname);
string uidname = MyUserName();
if (uidname.empty()) uidname = "invalid-user";
stripped_filename = stripped_filename + '.' + hostname + '.' + uidname +
".log." + LogSeverityNames[severity_] + '.';
const vector<string>& log_dirs = GetLoggingDirectories();
bool success = false;
for (const auto& log_dir : log_dirs) {
base_filename_ = log_dir + "/" + stripped_filename;
if (CreateLogfile(time_pid_string)) {
success = true;
break;
}
}
if (success == false) {
perror("Could not create logging file");
fprintf(stderr, "COULD NOT CREATE A LOGGINGFILE %s!",
time_pid_string.c_str());
return;
}
}
if (FLAGS_log_file_header) {
ostringstream file_header_stream;
file_header_stream.fill('0');
file_header_stream << "Log file created at: " << 1900 + tm_time.tm_year
<< '/' << setw(2) << 1 + tm_time.tm_mon << '/'
<< setw(2) << tm_time.tm_mday << ' ' << setw(2)
<< tm_time.tm_hour << ':' << setw(2) << tm_time.tm_min
<< ':' << setw(2) << tm_time.tm_sec
<< (FLAGS_log_utc_time ? " UTC\n" : "\n")
<< "Running on machine: " << LogDestination::hostname()
<< '\n';
if (!g_application_fingerprint.empty()) {
file_header_stream << "Application fingerprint: "
<< g_application_fingerprint << '\n';
}
const char* const date_time_format = FLAGS_log_year_in_prefix
? "yyyymmdd hh:mm:ss.uuuuuu"
: "mmdd hh:mm:ss.uuuuuu";
file_header_stream
<< "Running duration (h:mm:ss): "
<< PrettyDuration(
std::chrono::duration_cast<std::chrono::duration<int>>(
timestamp - start_time_))
<< '\n'
<< "Log line format: [IWEF]" << date_time_format << " "
<< "threadid file:line] msg" << '\n';
const string& file_header_string = file_header_stream.str();
const size_t header_len = file_header_string.size();
fwrite(file_header_string.data(), 1, header_len, file_.get());
file_length_ += header_len;
bytes_since_flush_ += header_len;
}
}
if (!stop_writing) {
errno = 0;
fwrite(message, 1, message_len, file_.get());
if (FLAGS_stop_logging_if_full_disk &&
errno == ENOSPC) {
stop_writing = true;
return;
} else {
file_length_ += message_len;
bytes_since_flush_ += message_len;
}
} else {
if (timestamp >= next_flush_time_) {
stop_writing = false;
}
return;
}
if (force_flush || (bytes_since_flush_ >= 1000000) ||
(timestamp >= next_flush_time_)) {
FlushUnlocked(timestamp);
#ifdef GLOG_OS_LINUX
if (FLAGS_drop_log_memory && file_length_ >= (3U << 20U)) {
uint32 total_drop_length =
(file_length_ & ~((1U << 20U) - 1U)) - (1U << 20U);
uint32 this_drop_length = total_drop_length - dropped_mem_length_;
if (this_drop_length >= (2U << 20U)) {
# if defined(HAVE_POSIX_FADVISE)
posix_fadvise(
fileno(file_.get()), static_cast<off_t>(dropped_mem_length_),
static_cast<off_t>(this_drop_length), POSIX_FADV_DONTNEED);
# endif
dropped_mem_length_ = total_drop_length;
}
}
#endif
}
}
LogCleaner::LogCleaner() = default;
void LogCleaner::Enable(const std::chrono::minutes& overdue) {
enabled_ = true;
overdue_ = overdue;
}
void LogCleaner::Disable() { enabled_ = false; }
void LogCleaner::Run(const std::chrono::system_clock::time_point& current_time,
bool base_filename_selected, const string& base_filename,
const string& filename_extension) {
assert(enabled_);
assert(!base_filename_selected || !base_filename.empty());
if (current_time < next_cleanup_time_) {
return;
}
next_cleanup_time_ =
current_time +
std::chrono::duration_cast<std::chrono::system_clock::duration>(
std::chrono::duration<int32>{FLAGS_logcleansecs});
vector<string> dirs;
if (!base_filename_selected) {
dirs = GetLoggingDirectories();
} else {
size_t pos = base_filename.find_last_of(possible_dir_delim, string::npos,
sizeof(possible_dir_delim));
if (pos != string::npos) {
string dir = base_filename.substr(0, pos + 1);
dirs.push_back(dir);
} else {
dirs.emplace_back(".");
}
}
for (const std::string& dir : dirs) {
vector<string> logs = GetOverdueLogNames(dir, current_time, base_filename,
filename_extension);
for (const std::string& log : logs) {
int result = unlink(log.c_str());
if (result != 0) {
perror(("Could not remove overdue log " + log).c_str());
}
}
}
}
vector<string> LogCleaner::GetOverdueLogNames(
string log_directory,
const std::chrono::system_clock::time_point& current_time,
const string& base_filename, const string& filename_extension) const {
vector<string> overdue_log_names;
DIR* dir;
struct dirent* ent;
if ((dir = opendir(log_directory.c_str()))) {
while ((ent = readdir(dir))) {
if (strcmp(ent->d_name, ".") == 0 || strcmp(ent->d_name, "..") == 0) {
continue;
}
string filepath = ent->d_name;
const char* const dir_delim_end =
possible_dir_delim + sizeof(possible_dir_delim);
if (!log_directory.empty() &&
std::find(possible_dir_delim, dir_delim_end,
log_directory[log_directory.size() - 1]) != dir_delim_end) {
filepath = log_directory + filepath;
}
if (IsLogFromCurrentProject(filepath, base_filename,
filename_extension) &&
IsLogLastModifiedOver(filepath, current_time)) {
overdue_log_names.push_back(filepath);
}
}
closedir(dir);
}
return overdue_log_names;
}
bool LogCleaner::IsLogFromCurrentProject(
const string& filepath, const string& base_filename,
const string& filename_extension) const {
string cleaned_base_filename;
const char* const dir_delim_end =
possible_dir_delim + sizeof(possible_dir_delim);
size_t real_filepath_size = filepath.size();
for (char c : base_filename) {
if (cleaned_base_filename.empty()) {
cleaned_base_filename += c;
} else if (std::find(possible_dir_delim, dir_delim_end, c) ==
dir_delim_end ||
(!cleaned_base_filename.empty() &&
c != cleaned_base_filename[cleaned_base_filename.size() - 1])) {
cleaned_base_filename += c;
}
}
if (filepath.find(cleaned_base_filename) != 0) {
return false;
}
if (!filename_extension.empty()) {
if (cleaned_base_filename.size() >= real_filepath_size) {
return false;
}
string ext = filepath.substr(cleaned_base_filename.size(),
filename_extension.size());
if (ext == filename_extension) {
cleaned_base_filename += filename_extension;
} else {
if (filename_extension.size() >= real_filepath_size) {
return false;
}
real_filepath_size = filepath.size() - filename_extension.size();
if (filepath.substr(real_filepath_size) != filename_extension) {
return false;
}
}
}
for (size_t i = cleaned_base_filename.size(); i < real_filepath_size; i++) {
const char& c = filepath[i];
if (i <= cleaned_base_filename.size() + 7) {
if (c < '0' || c > '9') {
return false;
}
} else if (i == cleaned_base_filename.size() + 8) {
if (c != '-') {
return false;
}
} else if (i <= cleaned_base_filename.size() + 14) {
if (c < '0' || c > '9') {
return false;
}
} else if (i == cleaned_base_filename.size() + 15) {
if (c != '.') {
return false;
}
} else if (i >= cleaned_base_filename.size() + 16) {
if (c < '0' || c > '9') {
return false;
}
}
}
return true;
}
bool LogCleaner::IsLogLastModifiedOver(
const string& filepath,
const std::chrono::system_clock::time_point& current_time) const {
struct stat file_stat;
if (stat(filepath.c_str(), &file_stat) == 0) {
const auto last_modified_time =
std::chrono::system_clock::from_time_t(file_stat.st_mtime);
const auto diff = current_time - last_modified_time;
return diff >= overdue_;
}
return false;
}
}
static std::mutex fatal_msg_lock;
static logging::internal::CrashReason crash_reason;
static bool fatal_msg_exclusive = true;
static logging::internal::LogMessageData fatal_msg_data_exclusive;
static logging::internal::LogMessageData fatal_msg_data_shared;
#ifdef GLOG_THREAD_LOCAL_STORAGE
static thread_local bool thread_data_available = true;
# if defined(__cpp_lib_byte) && __cpp_lib_byte >= 201603L
alignas(logging::internal::LogMessageData) static thread_local std::byte
thread_msg_data[sizeof(logging::internal::LogMessageData)];
# else
static thread_local std::aligned_storage<
sizeof(logging::internal::LogMessageData),
alignof(logging::internal::LogMessageData)>::type thread_msg_data;
# endif
#endif
logging::internal::LogMessageData::LogMessageData()
: stream_(message_text_, LogMessage::kMaxLogMessageLen, 0) {}
LogMessage::LogMessage(const char* file, int line, LogSeverity severity,
int64 ctr, void (LogMessage::*send_method)())
: allocated_(nullptr) {
Init(file, line, severity, send_method);
data_->stream_.set_ctr(ctr);
}
LogMessage::LogMessage(const char* file, int line,
const logging::internal::CheckOpString& result)
: allocated_(nullptr) {
Init(file, line, GLOG_FATAL, &LogMessage::SendToLog);
stream() << "Check failed: " << (*result.str_) << " ";
}
LogMessage::LogMessage(const char* file, int line) : allocated_(nullptr) {
Init(file, line, GLOG_INFO, &LogMessage::SendToLog);
}
LogMessage::LogMessage(const char* file, int line, LogSeverity severity)
: allocated_(nullptr) {
Init(file, line, severity, &LogMessage::SendToLog);
}
LogMessage::LogMessage(const char* file, int line, LogSeverity severity,
LogSink* sink, bool also_send_to_log)
: allocated_(nullptr) {
Init(file, line, severity,
also_send_to_log ? &LogMessage::SendToSinkAndLog
: &LogMessage::SendToSink);
data_->sink_ = sink;
}
LogMessage::LogMessage(const char* file, int line, LogSeverity severity,
vector<string>* outvec)
: allocated_(nullptr) {
Init(file, line, severity, &LogMessage::SaveOrSendToLog);
data_->outvec_ = outvec;
}
LogMessage::LogMessage(const char* file, int line, LogSeverity severity,
string* message)
: allocated_(nullptr) {
Init(file, line, severity, &LogMessage::WriteToStringAndLog);
data_->message_ = message;
}
void LogMessage::Init(const char* file, int line, LogSeverity severity,
void (LogMessage::*send_method)()) {
allocated_ = nullptr;
if (severity != GLOG_FATAL || !exit_on_dfatal) {
#ifdef GLOG_THREAD_LOCAL_STORAGE
if (thread_data_available) {
thread_data_available = false;
data_ = new (&thread_msg_data) logging::internal::LogMessageData;
} else {
allocated_ = new logging::internal::LogMessageData();
data_ = allocated_;
}
#else
allocated_ = new logging::internal::LogMessageData();
data_ = allocated_;
#endif
data_->first_fatal_ = false;
} else {
std::lock_guard<std::mutex> l{fatal_msg_lock};
if (fatal_msg_exclusive) {
fatal_msg_exclusive = false;
data_ = &fatal_msg_data_exclusive;
data_->first_fatal_ = true;
} else {
data_ = &fatal_msg_data_shared;
data_->first_fatal_ = false;
}
}
data_->preserved_errno_ = errno;
data_->severity_ = severity;
data_->line_ = line;
data_->send_method_ = send_method;
data_->sink_ = nullptr;
data_->outvec_ = nullptr;
const auto now = std::chrono::system_clock::now();
time_ = LogMessageTime(now);
data_->num_chars_to_log_ = 0;
data_->num_chars_to_syslog_ = 0;
data_->basename_ = const_basename(file);
data_->fullname_ = file;
data_->has_been_flushed_ = false;
data_->thread_id_ = std::this_thread::get_id();
if (FLAGS_log_prefix && (line != kNoLogPrefix)) {
std::ios saved_fmt(nullptr);
saved_fmt.copyfmt(stream());
stream().fill('0');
if (g_prefix_formatter == nullptr) {
stream() << LogSeverityNames[severity][0];
if (FLAGS_log_year_in_prefix) {
stream() << setw(4) << 1900 + time_.year();
}
stream() << setw(2) << 1 + time_.month() << setw(2) << time_.day() << ' '
<< setw(2) << time_.hour() << ':' << setw(2) << time_.min()
<< ':' << setw(2) << time_.sec() << "." << setw(6)
<< time_.usec() << ' ' << setfill(' ') << setw(5)
<< data_->thread_id_ << setfill('0') << ' ' << data_->basename_
<< ':' << data_->line_ << "] ";
} else {
(*g_prefix_formatter)(stream(), *this);
stream() << " ";
}
stream().copyfmt(saved_fmt);
}
data_->num_prefix_chars_ = data_->stream_.pcount();
if (!FLAGS_log_backtrace_at.empty()) {
char fileline[128];
std::snprintf(fileline, sizeof(fileline), "%s:%d", data_->basename_, line);
#ifdef HAVE_STACKTRACE
if (FLAGS_log_backtrace_at == fileline) {
string stacktrace = GetStackTrace();
stream() << " (stacktrace:\n" << stacktrace << ") ";
}
#endif
}
}
LogSeverity LogMessage::severity() const noexcept { return data_->severity_; }
int LogMessage::line() const noexcept { return data_->line_; }
const std::thread::id& LogMessage::thread_id() const noexcept {
return data_->thread_id_;
}
const char* LogMessage::fullname() const noexcept { return data_->fullname_; }
const char* LogMessage::basename() const noexcept { return data_->basename_; }
const LogMessageTime& LogMessage::time() const noexcept { return time_; }
LogMessage::~LogMessage() noexcept(false) {
Flush();
bool fail = data_->severity_ == GLOG_FATAL && exit_on_dfatal;
#ifdef GLOG_THREAD_LOCAL_STORAGE
if (data_ == static_cast<void*>(&thread_msg_data)) {
data_->~LogMessageData();
thread_data_available = true;
} else {
delete allocated_;
}
#else
delete allocated_;
#endif
if (fail) {
const char* message = "*** Check failure stack trace: ***\n";
if (write(fileno(stderr), message, strlen(message)) < 0) {
}
AlsoErrorWrite(GLOG_FATAL,
glog_internal_namespace_::ProgramInvocationShortName(),
message);
#if defined(__cpp_lib_uncaught_exceptions) && \
(__cpp_lib_uncaught_exceptions >= 201411L)
if (std::uncaught_exceptions() == 0)
#else
if (!std::uncaught_exception())
#endif
{
Fail();
}
}
}
int LogMessage::preserved_errno() const { return data_->preserved_errno_; }
ostream& LogMessage::stream() { return data_->stream_; }
void LogMessage::Flush() {
if (data_->has_been_flushed_ || data_->severity_ < FLAGS_minloglevel) {
return;
}
data_->num_chars_to_log_ = data_->stream_.pcount();
data_->num_chars_to_syslog_ =
data_->num_chars_to_log_ - data_->num_prefix_chars_;
bool append_newline =
(data_->message_text_[data_->num_chars_to_log_ - 1] != '\n');
char original_final_char = '\0';
if (append_newline) {
original_final_char = data_->message_text_[data_->num_chars_to_log_];
data_->message_text_[data_->num_chars_to_log_++] = '\n';
}
data_->message_text_[data_->num_chars_to_log_] = '\0';
{
std::lock_guard<std::mutex> l{log_mutex};
(this->*(data_->send_method_))();
++num_messages_[static_cast<int>(data_->severity_)];
}
LogDestination::WaitForSinks(data_);
if (append_newline) {
data_->message_text_[data_->num_chars_to_log_ - 1] = original_final_char;
}
if (data_->preserved_errno_ != 0) {
errno = data_->preserved_errno_;
}
data_->has_been_flushed_ = true;
}
static std::chrono::system_clock::time_point fatal_time;
static char fatal_message[256];
void ReprintFatalMessage() {
if (fatal_message[0]) {
const size_t n = strlen(fatal_message);
if (!FLAGS_logtostderr) {
WriteToStderr(fatal_message, n);
}
LogDestination::LogToAllLogfiles(GLOG_ERROR, fatal_time, fatal_message, n);
}
}
void LogMessage::SendToLog() EXCLUSIVE_LOCKS_REQUIRED(log_mutex) {
static bool already_warned_before_initgoogle = false;
RAW_DCHECK(data_->num_chars_to_log_ > 0 &&
data_->message_text_[data_->num_chars_to_log_ - 1] == '\n',
"");
if (!already_warned_before_initgoogle && !IsGoogleLoggingInitialized()) {
const char w[] =
"WARNING: Logging before InitGoogleLogging() is "
"written to STDERR\n";
WriteToStderr(w, strlen(w));
already_warned_before_initgoogle = true;
}
if (FLAGS_logtostderr || FLAGS_logtostdout || !IsGoogleLoggingInitialized()) {
if (FLAGS_logtostdout) {
ColoredWriteToStdout(data_->severity_, data_->message_text_,
data_->num_chars_to_log_);
} else {
ColoredWriteToStderr(data_->severity_, data_->message_text_,
data_->num_chars_to_log_);
}
LogDestination::LogToSinks(
data_->severity_, data_->fullname_, data_->basename_, data_->line_,
time_, data_->message_text_ + data_->num_prefix_chars_,
(data_->num_chars_to_log_ - data_->num_prefix_chars_ - 1));
} else {
LogDestination::LogToAllLogfiles(data_->severity_, time_.when(),
data_->message_text_,
data_->num_chars_to_log_);
LogDestination::MaybeLogToStderr(data_->severity_, data_->message_text_,
data_->num_chars_to_log_,
data_->num_prefix_chars_);
LogDestination::MaybeLogToEmail(data_->severity_, data_->message_text_,
data_->num_chars_to_log_);
LogDestination::LogToSinks(
data_->severity_, data_->fullname_, data_->basename_, data_->line_,
time_, data_->message_text_ + data_->num_prefix_chars_,
(data_->num_chars_to_log_ - data_->num_prefix_chars_ - 1));
}
if (data_->severity_ == GLOG_FATAL && exit_on_dfatal) {
if (data_->first_fatal_) {
RecordCrashReason(&crash_reason);
SetCrashReason(&crash_reason);
const size_t copy =
min(data_->num_chars_to_log_, sizeof(fatal_message) - 1);
memcpy(fatal_message, data_->message_text_, copy);
fatal_message[copy] = '\0';
fatal_time = time_.when();
}
if (!FLAGS_logtostderr && !FLAGS_logtostdout) {
for (auto& log_destination : LogDestination::log_destinations_) {
if (log_destination) {
log_destination->logger_->Write(
true, std::chrono::system_clock::time_point{}, "", 0);
}
}
}
LogDestination::WaitForSinks(data_);
}
}
void LogMessage::RecordCrashReason(logging::internal::CrashReason* reason) {
reason->filename = fatal_msg_data_exclusive.fullname_;
reason->line_number = fatal_msg_data_exclusive.line_;
reason->message = fatal_msg_data_exclusive.message_text_ +
fatal_msg_data_exclusive.num_prefix_chars_;
#ifdef HAVE_STACKTRACE
reason->depth = GetStackTrace(reason->stack, ARRAYSIZE(reason->stack), 4);
#else
reason->depth = 0;
#endif
}
GLOG_NO_EXPORT logging_fail_func_t g_logging_fail_func =
reinterpret_cast<logging_fail_func_t>(&abort);
NullStream::NullStream() : LogMessage::LogStream(message_buffer_, 2, 0) {}
NullStream::NullStream(const char* , int ,
const logging::internal::CheckOpString& )
: LogMessage::LogStream(message_buffer_, 2, 0) {}
NullStream& NullStream::stream() { return *this; }
NullStreamFatal::~NullStreamFatal() {
std::abort();
}
logging_fail_func_t InstallFailureFunction(logging_fail_func_t fail_func) {
return std::exchange(g_logging_fail_func, fail_func);
}
void LogMessage::Fail() { g_logging_fail_func(); }
void LogMessage::SendToSink() EXCLUSIVE_LOCKS_REQUIRED(log_mutex) {
if (data_->sink_ != nullptr) {
RAW_DCHECK(data_->num_chars_to_log_ > 0 &&
data_->message_text_[data_->num_chars_to_log_ - 1] == '\n',
"");
data_->sink_->send(
data_->severity_, data_->fullname_, data_->basename_, data_->line_,
time_, data_->message_text_ + data_->num_prefix_chars_,
(data_->num_chars_to_log_ - data_->num_prefix_chars_ - 1));
}
}
void LogMessage::SendToSinkAndLog() EXCLUSIVE_LOCKS_REQUIRED(log_mutex) {
SendToSink();
SendToLog();
}
void LogMessage::SaveOrSendToLog() EXCLUSIVE_LOCKS_REQUIRED(log_mutex) {
if (data_->outvec_ != nullptr) {
RAW_DCHECK(data_->num_chars_to_log_ > 0 &&
data_->message_text_[data_->num_chars_to_log_ - 1] == '\n',
"");
const char* start = data_->message_text_ + data_->num_prefix_chars_;
size_t len = data_->num_chars_to_log_ - data_->num_prefix_chars_ - 1;
data_->outvec_->push_back(string(start, len));
} else {
SendToLog();
}
}
void LogMessage::WriteToStringAndLog() EXCLUSIVE_LOCKS_REQUIRED(log_mutex) {
if (data_->message_ != nullptr) {
RAW_DCHECK(data_->num_chars_to_log_ > 0 &&
data_->message_text_[data_->num_chars_to_log_ - 1] == '\n',
"");
const char* start = data_->message_text_ + data_->num_prefix_chars_;
size_t len = data_->num_chars_to_log_ - data_->num_prefix_chars_ - 1;
data_->message_->assign(start, len);
}
SendToLog();
}
void LogMessage::SendToSyslogAndLog() {
#ifdef HAVE_SYSLOG_H
static bool openlog_already_called = false;
if (!openlog_already_called) {
openlog(glog_internal_namespace_::ProgramInvocationShortName(),
LOG_CONS | LOG_NDELAY | LOG_PID, LOG_USER);
openlog_already_called = true;
}
const int SEVERITY_TO_LEVEL[] = {LOG_INFO, LOG_WARNING, LOG_ERR, LOG_EMERG};
syslog(LOG_USER | SEVERITY_TO_LEVEL[static_cast<int>(data_->severity_)],
"%.*s", static_cast<int>(data_->num_chars_to_syslog_),
data_->message_text_ + data_->num_prefix_chars_);
SendToLog();
#else
LOG(ERROR) << "No syslog support: message=" << data_->message_text_;
#endif
}
base::Logger* base::GetLogger(LogSeverity severity) {
std::lock_guard<std::mutex> l{log_mutex};
return LogDestination::log_destination(severity)->GetLoggerImpl();
}
void base::SetLogger(LogSeverity severity, base::Logger* logger) {
std::lock_guard<std::mutex> l{log_mutex};
LogDestination::log_destination(severity)->SetLoggerImpl(logger);
}
int64 LogMessage::num_messages(int severity) {
std::lock_guard<std::mutex> l{log_mutex};
return num_messages_[severity];
}
ostream& operator<<(ostream& os, const Counter_t&) {
#ifdef DISABLE_RTTI
LogMessage::LogStream* log = static_cast<LogMessage::LogStream*>(&os);
#else
auto* log = dynamic_cast<LogMessage::LogStream*>(&os);
#endif
CHECK(log && log == log->self())
<< "You must not use COUNTER with non-glog ostream";
os << log->ctr();
return os;
}
ErrnoLogMessage::ErrnoLogMessage(const char* file, int line,
LogSeverity severity, int64 ctr,
void (LogMessage::*send_method)())
: LogMessage(file, line, severity, ctr, send_method) {}
ErrnoLogMessage::~ErrnoLogMessage() {
stream() << ": " << StrError(preserved_errno()) << " [" << preserved_errno()
<< "]";
}
void FlushLogFiles(LogSeverity min_severity) {
LogDestination::FlushLogFiles(min_severity);
}
void FlushLogFilesUnsafe(LogSeverity min_severity) {
LogDestination::FlushLogFilesUnsafe(min_severity);
}
void SetLogDestination(LogSeverity severity, const char* base_filename) {
LogDestination::SetLogDestination(severity, base_filename);
}
void SetLogSymlink(LogSeverity severity, const char* symlink_basename) {
LogDestination::SetLogSymlink(severity, symlink_basename);
}
LogSink::~LogSink() = default;
void LogSink::WaitTillSent() {
}
string LogSink::ToString(LogSeverity severity, const char* file, int line,
const LogMessageTime& time, const char* message,
size_t message_len) {
ostringstream stream;
stream.fill('0');
stream << LogSeverityNames[severity][0];
if (FLAGS_log_year_in_prefix) {
stream << setw(4) << 1900 + time.year();
}
stream << setw(2) << 1 + time.month() << setw(2) << time.day() << ' '
<< setw(2) << time.hour() << ':' << setw(2) << time.min() << ':'
<< setw(2) << time.sec() << '.' << setw(6) << time.usec() << ' '
<< setfill(' ') << setw(5) << std::this_thread::get_id()
<< setfill('0') << ' ' << file << ':' << line << "] ";
(stream.write)(message, static_cast<std::streamsize>(message_len));
return stream.str();
}
void AddLogSink(LogSink* destination) {
LogDestination::AddLogSink(destination);
}
void RemoveLogSink(LogSink* destination) {
LogDestination::RemoveLogSink(destination);
}
void SetLogFilenameExtension(const char* ext) {
LogDestination::SetLogFilenameExtension(ext);
}
void SetStderrLogging(LogSeverity min_severity) {
LogDestination::SetStderrLogging(min_severity);
}
void SetEmailLogging(LogSeverity min_severity, const char* addresses) {
LogDestination::SetEmailLogging(min_severity, addresses);
}
void LogToStderr() { LogDestination::LogToStderr(); }
namespace base {
namespace internal {
bool GetExitOnDFatal();
bool GetExitOnDFatal() {
std::lock_guard<std::mutex> l{log_mutex};
return exit_on_dfatal;
}
void SetExitOnDFatal(bool value);
void SetExitOnDFatal(bool value) {
std::lock_guard<std::mutex> l{log_mutex};
exit_on_dfatal = value;
}
}
}
#ifndef GLOG_OS_EMSCRIPTEN
static const char kDontNeedShellEscapeChars[] =
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"abcdefghijklmnopqrstuvwxyz"
"0123456789+-_.=/:,@";
static string ShellEscape(const string& src) {
string result;
if (!src.empty() &&
src.find_first_not_of(kDontNeedShellEscapeChars) == string::npos) {
result.assign(src);
} else if (src.find_first_of('\'') == string::npos) {
result.assign("'");
result.append(src);
result.append("'");
} else {
result.assign("\"");
for (size_t i = 0; i < src.size(); ++i) {
switch (src[i]) {
case '\\':
case '$':
case '"':
case '`':
result.append("\\");
}
result.append(src, i, 1);
}
result.append("\"");
}
return result;
}
static inline void trim(std::string& s) {
const auto toRemove = [](char ch) { return std::isspace(ch) == 0; };
s.erase(s.begin(), std::find_if(s.begin(), s.end(), toRemove));
s.erase(std::find_if(s.rbegin(), s.rend(), toRemove).base(), s.end());
}
#endif
static bool SendEmailInternal(const char* dest, const char* subject,
const char* body, bool use_logging) {
#ifndef GLOG_OS_EMSCRIPTEN
if (dest && *dest) {
std::istringstream ss(dest);
std::ostringstream sanitized_dests;
std::string s;
while (std::getline(ss, s, ',')) {
trim(s);
if (s.empty()) {
continue;
}
if (!std::regex_match(
s,
std::regex("^[a-zA-Z0-9]"
"[a-zA-Z0-9.!#$%&'*+/=?^_`{|}~-]*@[a-zA-Z0-9]"
"(?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\\.[a-zA-Z0-9]"
"(?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)*$"))) {
if (use_logging) {
VLOG(1) << "Invalid destination email address:" << s;
} else {
fprintf(stderr, "Invalid destination email address: %s\n", s.c_str());
}
return false;
}
if (!sanitized_dests.str().empty()) {
sanitized_dests << ",";
}
sanitized_dests << s;
}
const std::string& tmp = sanitized_dests.str();
dest = tmp.c_str();
if (use_logging) {
VLOG(1) << "Trying to send TITLE:" << subject << " BODY:" << body
<< " to " << dest;
} else {
fprintf(stderr, "Trying to send TITLE: %s BODY: %s to %s\n", subject,
body, dest);
}
string logmailer;
if (FLAGS_logmailer.empty()) {
logmailer = "/bin/mail";
} else {
logmailer = ShellEscape(FLAGS_logmailer);
}
string cmd =
logmailer + " -s" + ShellEscape(subject) + " " + ShellEscape(dest);
if (use_logging) {
VLOG(4) << "Mailing command: " << cmd;
}
FILE* pipe = popen(cmd.c_str(), "w");
if (pipe != nullptr) {
if (body) {
fwrite(body, sizeof(char), strlen(body), pipe);
}
bool ok = pclose(pipe) != -1;
if (!ok) {
if (use_logging) {
LOG(ERROR) << "Problems sending mail to " << dest << ": "
<< StrError(errno);
} else {
fprintf(stderr, "Problems sending mail to %s: %s\n", dest,
StrError(errno).c_str());
}
}
return ok;
} else {
if (use_logging) {
LOG(ERROR) << "Unable to send mail to " << dest;
} else {
fprintf(stderr, "Unable to send mail to %s\n", dest);
}
}
}
#else
(void)dest;
(void)subject;
(void)body;
(void)use_logging;
LOG(WARNING) << "Email support not available; not sending message";
#endif
return false;
}
bool SendEmail(const char* dest, const char* subject, const char* body) {
return SendEmailInternal(dest, subject, body, true);
}
static void GetTempDirectories(vector<string>& list) {
list.clear();
#ifdef GLOG_OS_WINDOWS
char tmp[MAX_PATH];
if (GetTempPathA(MAX_PATH, tmp)) list.push_back(tmp);
list.push_back("C:\\TMP\\");
list.push_back("C:\\TEMP\\");
#else
const char* candidates[] = {
getenv("TEST_TMPDIR"),
getenv("TMPDIR"),
getenv("TMP"),
"/tmp",
};
for (auto d : candidates) {
if (!d) continue;
string dstr = d;
if (dstr[dstr.size() - 1] != '/') {
dstr += "/";
}
list.push_back(dstr);
struct stat statbuf;
if (!stat(d, &statbuf) && S_ISDIR(statbuf.st_mode)) {
return;
}
}
#endif
}
static std::unique_ptr<std::vector<std::string>> logging_directories_list;
const vector<string>& GetLoggingDirectories() {
if (logging_directories_list == nullptr) {
logging_directories_list = std::make_unique<std::vector<std::string>>();
if (!FLAGS_log_dir.empty()) {
if (std::find(std::begin(possible_dir_delim),
std::end(possible_dir_delim),
FLAGS_log_dir.back()) == std::end(possible_dir_delim)) {
logging_directories_list->push_back(FLAGS_log_dir + "/");
} else {
logging_directories_list->push_back(FLAGS_log_dir);
}
} else {
GetTempDirectories(*logging_directories_list);
#ifdef GLOG_OS_WINDOWS
char tmp[MAX_PATH];
if (GetWindowsDirectoryA(tmp, MAX_PATH))
logging_directories_list->push_back(tmp);
logging_directories_list->push_back(".\\");
#else
logging_directories_list->push_back("./");
#endif
}
}
return *logging_directories_list;
}
GLOG_NO_EXPORT
void GetExistingTempDirectories(vector<string>& list) {
GetTempDirectories(list);
auto i_dir = list.begin();
while (i_dir != list.end()) {
if (access(i_dir->c_str(), 0)) {
i_dir = list.erase(i_dir);
} else {
++i_dir;
}
}
}
void TruncateLogFile(const char* path, uint64 limit, uint64 keep) {
#if defined(HAVE_UNISTD_H) || defined(HAVE__CHSIZE_S)
struct stat statbuf;
const int kCopyBlockSize = 8 << 10;
char copybuf[kCopyBlockSize];
off_t read_offset, write_offset;
int flags = O_RDWR;
# ifdef GLOG_OS_LINUX
const char* procfd_prefix = "/proc/self/fd/";
if (strncmp(procfd_prefix, path, strlen(procfd_prefix))) flags |= O_NOFOLLOW;
# endif
FileDescriptor fd{open(path, flags)};
if (!fd) {
if (errno == EFBIG) {
# ifdef HAVE__CHSIZE_S
if (_chsize_s(fd.get(), 0) != 0) {
# else
if (truncate(path, 0) == -1) {
# endif
PLOG(ERROR) << "Unable to truncate " << path;
} else {
LOG(ERROR) << "Truncated " << path << " due to EFBIG error";
}
} else {
PLOG(ERROR) << "Unable to open " << path;
}
return;
}
if (fstat(fd.get(), &statbuf) == -1) {
PLOG(ERROR) << "Unable to fstat()";
return;
}
if (!S_ISREG(statbuf.st_mode)) return;
if (statbuf.st_size <= static_cast<off_t>(limit)) return;
if (statbuf.st_size <= static_cast<off_t>(keep)) return;
LOG(INFO) << "Truncating " << path << " to " << keep << " bytes";
read_offset = statbuf.st_size - static_cast<off_t>(keep);
write_offset = 0;
ssize_t bytesin, bytesout;
while ((bytesin = pread(fd.get(), copybuf, sizeof(copybuf), read_offset)) >
0) {
bytesout =
pwrite(fd.get(), copybuf, static_cast<size_t>(bytesin), write_offset);
if (bytesout == -1) {
PLOG(ERROR) << "Unable to write to " << path;
break;
} else if (bytesout != bytesin) {
LOG(ERROR) << "Expected to write " << bytesin << ", wrote " << bytesout;
}
read_offset += bytesin;
write_offset += bytesout;
}
if (bytesin == -1) PLOG(ERROR) << "Unable to read from " << path;
# ifdef HAVE__CHSIZE_S
if (_chsize_s(fd.get(), write_offset) != 0) {
# else
if (ftruncate(fd.get(), write_offset) == -1) {
# endif
PLOG(ERROR) << "Unable to truncate " << path;
}
#else
LOG(ERROR) << "No log truncation support.";
#endif
}
void TruncateStdoutStderr() {
#ifdef HAVE_UNISTD_H
uint64 limit = MaxLogSize() << 20U;
uint64 keep = 1U << 20U;
TruncateLogFile("/proc/self/fd/1", limit, keep);
TruncateLogFile("/proc/self/fd/2", limit, keep);
#else
LOG(ERROR) << "No log truncation support.";
#endif
}
namespace logging {
namespace internal {
#define DEFINE_CHECK_STROP_IMPL(name, func, expected) \
std::unique_ptr<string> Check##func##expected##Impl( \
const char* s1, const char* s2, const char* names) { \
bool equal = s1 == s2 || (s1 && s2 && !func(s1, s2)); \
if (equal == (expected)) \
return nullptr; \
else { \
ostringstream ss; \
if (!s1) s1 = ""; \
if (!s2) s2 = ""; \
ss << #name " failed: " << names << " (" << s1 << " vs. " << s2 << ")"; \
return std::make_unique<std::string>(ss.str()); \
} \
}
DEFINE_CHECK_STROP_IMPL(CHECK_STREQ, strcmp, true)
DEFINE_CHECK_STROP_IMPL(CHECK_STRNE, strcmp, false)
DEFINE_CHECK_STROP_IMPL(CHECK_STRCASEEQ, strcasecmp, true)
DEFINE_CHECK_STROP_IMPL(CHECK_STRCASENE, strcasecmp, false)
#undef DEFINE_CHECK_STROP_IMPL
}
}
GLOG_NO_EXPORT
int posix_strerror_r(int err, char* buf, size_t len) {
if (buf == nullptr || len <= 0) {
errno = EINVAL;
return -1;
}
buf[0] = '\000';
int old_errno = errno;
errno = 0;
char* rc = reinterpret_cast<char*>(strerror_r(err, buf, len));
if (errno) {
buf[0] = '\000';
return -1;
}
errno = old_errno;
buf[len - 1] = '\000';
if (!rc) {
return 0;
} else {
if (rc == buf) {
return 0;
} else {
buf[0] = '\000';
#if defined(GLOG_OS_MACOSX) || defined(GLOG_OS_FREEBSD) || \
defined(GLOG_OS_OPENBSD)
if (reinterpret_cast<intptr_t>(rc) < sys_nerr) {
return -1;
}
#endif
strncat(buf, rc, len - 1);
return 0;
}
}
}
string StrError(int err) {
char buf[100];
int rc = posix_strerror_r(err, buf, sizeof(buf));
if ((rc < 0) || (buf[0] == '\000')) {
std::snprintf(buf, sizeof(buf), "Error number %d", err);
}
return buf;
}
LogMessageFatal::LogMessageFatal(const char* file, int line)
: LogMessage(file, line, GLOG_FATAL) {}
LogMessageFatal::LogMessageFatal(const char* file, int line,
const logging::internal::CheckOpString& result)
: LogMessage(file, line, result) {}
LogMessageFatal::~LogMessageFatal() noexcept(false) {
Flush();
LogMessage::Fail();
}
namespace logging {
namespace internal {
CheckOpMessageBuilder::CheckOpMessageBuilder(const char* exprtext)
: stream_(new ostringstream) {
*stream_ << exprtext << " (";
}
CheckOpMessageBuilder::~CheckOpMessageBuilder() { delete stream_; }
ostream* CheckOpMessageBuilder::ForVar2() {
*stream_ << " vs. ";
return stream_;
}
std::unique_ptr<string> CheckOpMessageBuilder::NewString() {
*stream_ << ")";
return std::make_unique<std::string>(stream_->str());
}
template <>
void MakeCheckOpValueString(std::ostream* os, const char& v) {
if (v >= 32 && v <= 126) {
(*os) << "'" << v << "'";
} else {
(*os) << "char value " << static_cast<short>(v);
}
}
template <>
void MakeCheckOpValueString(std::ostream* os, const signed char& v) {
if (v >= 32 && v <= 126) {
(*os) << "'" << v << "'";
} else {
(*os) << "signed char value " << static_cast<short>(v);
}
}
template <>
void MakeCheckOpValueString(std::ostream* os, const unsigned char& v) {
if (v >= 32 && v <= 126) {
(*os) << "'" << v << "'";
} else {
(*os) << "unsigned char value " << static_cast<unsigned short>(v);
}
}
template <>
void MakeCheckOpValueString(std::ostream* os, const std::nullptr_t& ) {
(*os) << "nullptr";
}
}
}
void InitGoogleLogging(const char* argv0) { InitGoogleLoggingUtilities(argv0); }
void InstallPrefixFormatter(PrefixFormatterCallback callback, void* data) {
if (callback != nullptr) {
g_prefix_formatter = std::make_unique<PrefixFormatter>(callback, data);
} else {
g_prefix_formatter = nullptr;
}
}
void ShutdownGoogleLogging() {
ShutdownGoogleLoggingUtilities();
LogDestination::DeleteLogDestinations();
logging_directories_list = nullptr;
g_prefix_formatter = nullptr;
}
void EnableLogCleaner(unsigned int overdue_days) {
log_cleaner.Enable(std::chrono::duration_cast<std::chrono::minutes>(
std::chrono::duration<unsigned, std::ratio<kSecondsInDay>>{
overdue_days}));
}
void EnableLogCleaner(const std::chrono::minutes& overdue) {
log_cleaner.Enable(overdue);
}
void DisableLogCleaner() { log_cleaner.Disable(); }
LogMessageTime::LogMessageTime() = default;
namespace {
template <class... Args>
struct void_impl {
using type = void;
};
template <class... Args>
using void_t = typename void_impl<Args...>::type;
template <class T, class E = void>
struct has_member_tm_gmtoff : std::false_type {};
template <class T>
struct has_member_tm_gmtoff<T, void_t<decltype(&T::tm_gmtoff)>>
: std::true_type {};
template <class T = std::tm>
auto Breakdown(const std::chrono::system_clock::time_point& now)
-> std::enable_if_t<!has_member_tm_gmtoff<T>::value,
std::tuple<std::tm, std::time_t, std::chrono::hours>> {
std::time_t timestamp = std::chrono::system_clock::to_time_t(now);
std::tm tm_local;
std::tm tm_utc;
int isdst = 0;
if (FLAGS_log_utc_time) {
gmtime_r(×tamp, &tm_local);
localtime_r(×tamp, &tm_utc);
isdst = tm_utc.tm_isdst;
tm_utc = tm_local;
} else {
localtime_r(×tamp, &tm_local);
isdst = tm_local.tm_isdst;
gmtime_r(×tamp, &tm_utc);
}
std::time_t gmt_sec = std::mktime(&tm_utc);
using namespace std::chrono_literals;
const auto gmtoffset = std::chrono::duration_cast<std::chrono::hours>(
now - std::chrono::system_clock::from_time_t(gmt_sec) +
(isdst ? 1h : 0h));
return std::make_tuple(tm_local, timestamp, gmtoffset);
}
template <class T = std::tm>
auto Breakdown(const std::chrono::system_clock::time_point& now)
-> std::enable_if_t<has_member_tm_gmtoff<T>::value,
std::tuple<std::tm, std::time_t, std::chrono::hours>> {
std::time_t timestamp = std::chrono::system_clock::to_time_t(now);
T tm;
if (FLAGS_log_utc_time) {
gmtime_r(×tamp, &tm);
} else {
localtime_r(×tamp, &tm);
}
const auto gmtoffset = std::chrono::duration_cast<std::chrono::hours>(
std::chrono::seconds{tm.tm_gmtoff});
return std::make_tuple(tm, timestamp, gmtoffset);
}
}
LogMessageTime::LogMessageTime(std::chrono::system_clock::time_point now)
: timestamp_{now} {
std::time_t timestamp;
std::tie(tm_, timestamp, gmtoffset_) = Breakdown(now);
usecs_ = std::chrono::duration_cast<std::chrono::microseconds>(
now - std::chrono::system_clock::from_time_t(timestamp));
}
} | #include <fcntl.h>
#include <chrono>
#include <cstdio>
#include <cstdlib>
#include <fstream>
#include <iomanip>
#include <iostream>
#include <memory>
#include <mutex>
#include <queue>
#include <sstream>
#include <stdexcept>
#include <string>
#include <thread>
#include <vector>
#include "config.h"
#ifdef HAVE_GLOB_H
# include <glob.h>
#endif
#include <sys/stat.h>
#ifdef HAVE_UNISTD_H
# include <unistd.h>
#endif
#ifdef HAVE_SYS_WAIT_H
# include <sys/wait.h>
#endif
#include "base/commandlineflags.h"
#include "glog/logging.h"
#include "glog/raw_logging.h"
#include "googletest.h"
#include "stacktrace.h"
#include "utilities.h"
#ifdef GLOG_USE_GFLAGS
# include <gflags/gflags.h>
using namespace GFLAGS_NAMESPACE;
#endif
#ifdef HAVE_LIB_GMOCK
# include <gmock/gmock.h>
# include "mock-log.h"
using google::glog_testing::ScopedMockLog;
using testing::_;
using testing::AllOf;
using testing::AnyNumber;
using testing::HasSubstr;
using testing::InitGoogleMock;
using testing::StrictMock;
using testing::StrNe;
#endif
using namespace std;
using namespace google;
namespace google {
namespace base {
namespace internal {
bool GetExitOnDFatal();
void SetExitOnDFatal(bool value);
}
}
}
static void TestLogging(bool check_counts);
static void TestRawLogging();
static void LogWithLevels(int v, int severity, bool err, bool alsoerr);
static void TestLoggingLevels();
static void TestVLogModule();
static void TestLogString();
static void TestLogSink();
static void TestLogToString();
static void TestLogSinkWaitTillSent();
static void TestCHECK();
static void TestDCHECK();
static void TestSTREQ();
static void TestBasename();
static void TestBasenameAppendWhenNoTimestamp();
static void TestTwoProcessesWrite();
static void TestSymlink();
static void TestExtension();
static void TestWrapper();
static void TestErrno();
static void TestTruncate();
static void TestCustomLoggerDeletionOnShutdown();
static void TestLogPeriodically();
static int x = -1;
static void BM_Check1(int n) {
while (n-- > 0) {
CHECK_GE(n, x);
CHECK_GE(n, x);
CHECK_GE(n, x);
CHECK_GE(n, x);
CHECK_GE(n, x);
CHECK_GE(n, x);
CHECK_GE(n, x);
CHECK_GE(n, x);
}
}
BENCHMARK(BM_Check1)
static void CheckFailure(int a, int b, const char* file, int line,
const char* msg);
static void BM_Check3(int n) {
while (n-- > 0) {
if (n < x) CheckFailure(n, x, __FILE__, __LINE__, "n < x");
if (n < x) CheckFailure(n, x, __FILE__, __LINE__, "n < x");
if (n < x) CheckFailure(n, x, __FILE__, __LINE__, "n < x");
if (n < x) CheckFailure(n, x, __FILE__, __LINE__, "n < x");
if (n < x) CheckFailure(n, x, __FILE__, __LINE__, "n < x");
if (n < x) CheckFailure(n, x, __FILE__, __LINE__, "n < x");
if (n < x) CheckFailure(n, x, __FILE__, __LINE__, "n < x");
if (n < x) CheckFailure(n, x, __FILE__, __LINE__, "n < x");
}
}
BENCHMARK(BM_Check3)
static void BM_Check2(int n) {
if (n == 17) {
x = 5;
}
while (n-- > 0) {
CHECK(n >= x);
CHECK(n >= x);
CHECK(n >= x);
CHECK(n >= x);
CHECK(n >= x);
CHECK(n >= x);
CHECK(n >= x);
CHECK(n >= x);
}
}
BENCHMARK(BM_Check2)
static void CheckFailure(int, int, const char* , int ,
const char* ) {}
static void BM_logspeed(int n) {
while (n-- > 0) {
LOG(INFO) << "test message";
}
}
BENCHMARK(BM_logspeed)
static void BM_vlog(int n) {
while (n-- > 0) {
VLOG(1) << "test message";
}
}
BENCHMARK(BM_vlog)
namespace {
void PrefixAttacher(std::ostream& s, const LogMessage& m, void* data) {
if (data == nullptr || *static_cast<string*>(data) != "good data") {
return;
}
s << GetLogSeverityName(m.severity())[0] << setw(4) << 1900 + m.time().year()
<< setw(2) << 1 + m.time().month() << setw(2) << m.time().day() << ' '
<< setw(2) << m.time().hour() << ':' << setw(2) << m.time().min() << ':'
<< setw(2) << m.time().sec() << "." << setw(6) << m.time().usec() << ' '
<< setfill(' ') << setw(5) << m.thread_id() << setfill('0') << ' '
<< m.basename() << ':' << m.line() << "]";
}
}
int main(int argc, char** argv) {
FLAGS_colorlogtostderr = false;
FLAGS_timestamp_in_logfile_name = true;
setbuf(stderr, nullptr);
CaptureTestStderr();
LogWithLevels(FLAGS_v, FLAGS_stderrthreshold, FLAGS_logtostderr,
FLAGS_alsologtostderr);
LogWithLevels(0, 0, false, false);
const string early_stderr = GetCapturedTestStderr();
EXPECT_FALSE(IsGoogleLoggingInitialized());
string prefix_attacher_data = "good data";
InitGoogleLogging(argv[0]);
InstallPrefixFormatter(&PrefixAttacher, &prefix_attacher_data);
EXPECT_TRUE(IsGoogleLoggingInitialized());
RunSpecifiedBenchmarks();
FLAGS_logtostderr = true;
InitGoogleTest(&argc, argv);
#ifdef HAVE_LIB_GMOCK
InitGoogleMock(&argc, argv);
#endif
#ifdef GLOG_USE_GFLAGS
ParseCommandLineFlags(&argc, &argv, true);
#endif
CHECK_EQ(RUN_ALL_TESTS(), 0);
CaptureTestStderr();
LogMessage("dummy", LogMessage::kNoLogPrefix, GLOG_INFO).stream()
<< early_stderr;
TestLogging(true);
TestRawLogging();
TestLoggingLevels();
TestVLogModule();
TestLogString();
TestLogSink();
TestLogToString();
TestLogSinkWaitTillSent();
TestCHECK();
TestDCHECK();
TestSTREQ();
EXPECT_TRUE(
MungeAndDiffTestStderr(FLAGS_test_srcdir + "/src/logging_unittest.err"));
FLAGS_logtostderr = false;
FLAGS_logtostdout = true;
FLAGS_stderrthreshold = NUM_SEVERITIES;
CaptureTestStdout();
TestRawLogging();
TestLoggingLevels();
TestLogString();
TestLogSink();
TestLogToString();
TestLogSinkWaitTillSent();
TestCHECK();
TestDCHECK();
TestSTREQ();
EXPECT_TRUE(
MungeAndDiffTestStdout(FLAGS_test_srcdir + "/src/logging_unittest.out"));
FLAGS_logtostdout = false;
TestBasename();
TestBasenameAppendWhenNoTimestamp();
TestTwoProcessesWrite();
TestSymlink();
TestExtension();
TestWrapper();
TestErrno();
TestTruncate();
TestCustomLoggerDeletionOnShutdown();
TestLogPeriodically();
fprintf(stdout, "PASS\n");
return 0;
}
void TestLogging(bool check_counts) {
int64 base_num_infos = LogMessage::num_messages(GLOG_INFO);
int64 base_num_warning = LogMessage::num_messages(GLOG_WARNING);
int64 base_num_errors = LogMessage::num_messages(GLOG_ERROR);
LOG(INFO) << string("foo ") << "bar " << 10 << ' ' << 3.4;
for (int i = 0; i < 10; ++i) {
int old_errno = errno;
errno = i;
PLOG_EVERY_N(ERROR, 2) << "Plog every 2, iteration " << COUNTER;
errno = old_errno;
LOG_EVERY_N(ERROR, 3) << "Log every 3, iteration " << COUNTER << endl;
LOG_EVERY_N(ERROR, 4) << "Log every 4, iteration " << COUNTER << endl;
LOG_IF_EVERY_N(WARNING, true, 5) << "Log if every 5, iteration " << COUNTER;
LOG_IF_EVERY_N(WARNING, false, 3)
<< "Log if every 3, iteration " << COUNTER;
LOG_IF_EVERY_N(INFO, true, 1) << "Log if every 1, iteration " << COUNTER;
LOG_IF_EVERY_N(ERROR, (i < 3), 2)
<< "Log if less than 3 every 2, iteration " << COUNTER;
}
LOG_IF(WARNING, true) << "log_if this";
LOG_IF(WARNING, false) << "don't log_if this";
char s[] = "array";
LOG(INFO) << s;
const char const_s[] = "const array";
LOG(INFO) << const_s;
int j = 1000;
LOG(ERROR) << string("foo") << ' ' << j << ' ' << setw(10) << j << " "
<< setw(1) << hex << j;
LOG(INFO) << "foo " << std::setw(10) << 1.0;
{
google::LogMessage outer(__FILE__, __LINE__, GLOG_ERROR);
outer.stream() << "outer";
LOG(ERROR) << "inner";
}
LogMessage("foo", LogMessage::kNoLogPrefix, GLOG_INFO).stream()
<< "no prefix";
if (check_counts) {
CHECK_EQ(base_num_infos + 15, LogMessage::num_messages(GLOG_INFO));
CHECK_EQ(base_num_warning + 3, LogMessage::num_messages(GLOG_WARNING));
CHECK_EQ(base_num_errors + 17, LogMessage::num_messages(GLOG_ERROR));
}
}
static void NoAllocNewHook() { LOG(FATAL) << "unexpected new"; }
struct NewHook {
NewHook() { g_new_hook = &NoAllocNewHook; }
~NewHook() { g_new_hook = nullptr; }
};
namespace {
int* allocInt() { return new int; }
}
TEST(DeathNoAllocNewHook, logging) {
NewHook new_hook;
(void)&allocInt;
ASSERT_DEATH({ allocInt(); }, "unexpected new");
}
void TestRawLogging() {
auto* foo = new string("foo ");
string huge_str(50000, 'a');
FlagSaver saver;
NewHook new_hook;
RAW_LOG(INFO, "%s%s%d%c%f", foo->c_str(), "bar ", 10, ' ', 3.4);
char s[] = "array";
RAW_LOG(WARNING, "%s", s);
const char const_s[] = "const array";
RAW_LOG(INFO, "%s", const_s);
void* p = reinterpret_cast<void*>(PTR_TEST_VALUE);
RAW_LOG(INFO, "ptr %p", p);
p = nullptr;
RAW_LOG(INFO, "ptr %p", p);
int j = 1000;
RAW_LOG(ERROR, "%s%d%c%010d%s%1x", foo->c_str(), j, ' ', j, " ", j);
RAW_VLOG(0, "foo %d", j);
#if defined(NDEBUG)
RAW_LOG(INFO, "foo %d", j);
#else
RAW_DLOG(INFO, "foo %d", j);
#endif
RAW_LOG(WARNING, "Huge string: %s", huge_str.c_str());
RAW_VLOG(0, "Huge string: %s", huge_str.c_str());
FLAGS_v = 0;
RAW_LOG(INFO, "log");
RAW_VLOG(0, "vlog 0 on");
RAW_VLOG(1, "vlog 1 off");
RAW_VLOG(2, "vlog 2 off");
RAW_VLOG(3, "vlog 3 off");
FLAGS_v = 2;
RAW_LOG(INFO, "log");
RAW_VLOG(1, "vlog 1 on");
RAW_VLOG(2, "vlog 2 on");
RAW_VLOG(3, "vlog 3 off");
#if defined(NDEBUG)
RAW_DCHECK(1 == 2, " RAW_DCHECK's shouldn't be compiled in normal mode");
#endif
RAW_CHECK(1 == 1, "should be ok");
RAW_DCHECK(true, "should be ok");
delete foo;
}
void LogWithLevels(int v, int severity, bool err, bool alsoerr) {
RAW_LOG(INFO,
"Test: v=%d stderrthreshold=%d logtostderr=%d alsologtostderr=%d", v,
severity, err, alsoerr);
FlagSaver saver;
FLAGS_v = v;
FLAGS_stderrthreshold = severity;
FLAGS_logtostderr = err;
FLAGS_alsologtostderr = alsoerr;
RAW_VLOG(-1, "vlog -1");
RAW_VLOG(0, "vlog 0");
RAW_VLOG(1, "vlog 1");
RAW_LOG(INFO, "log info");
RAW_LOG(WARNING, "log warning");
RAW_LOG(ERROR, "log error");
VLOG(-1) << "vlog -1";
VLOG(0) << "vlog 0";
VLOG(1) << "vlog 1";
LOG(INFO) << "log info";
LOG(WARNING) << "log warning";
LOG(ERROR) << "log error";
VLOG_IF(-1, true) << "vlog_if -1";
VLOG_IF(-1, false) << "don't vlog_if -1";
VLOG_IF(0, true) << "vlog_if 0";
VLOG_IF(0, false) << "don't vlog_if 0";
VLOG_IF(1, true) << "vlog_if 1";
VLOG_IF(1, false) << "don't vlog_if 1";
LOG_IF(INFO, true) << "log_if info";
LOG_IF(INFO, false) << "don't log_if info";
LOG_IF(WARNING, true) << "log_if warning";
LOG_IF(WARNING, false) << "don't log_if warning";
LOG_IF(ERROR, true) << "log_if error";
LOG_IF(ERROR, false) << "don't log_if error";
int c;
c = 1;
VLOG_IF(100, c -= 2) << "vlog_if 100 expr";
EXPECT_EQ(c, -1);
c = 1;
VLOG_IF(0, c -= 2) << "vlog_if 0 expr";
EXPECT_EQ(c, -1);
c = 1;
LOG_IF(INFO, c -= 2) << "log_if info expr";
EXPECT_EQ(c, -1);
c = 1;
LOG_IF(ERROR, c -= 2) << "log_if error expr";
EXPECT_EQ(c, -1);
c = 2;
VLOG_IF(0, c -= 2) << "don't vlog_if 0 expr";
EXPECT_EQ(c, 0);
c = 2;
LOG_IF(ERROR, c -= 2) << "don't log_if error expr";
EXPECT_EQ(c, 0);
c = 3;
LOG_IF_EVERY_N(INFO, c -= 4, 1) << "log_if info every 1 expr";
EXPECT_EQ(c, -1);
c = 3;
LOG_IF_EVERY_N(ERROR, c -= 4, 1) << "log_if error every 1 expr";
EXPECT_EQ(c, -1);
c = 4;
LOG_IF_EVERY_N(ERROR, c -= 4, 3) << "don't log_if info every 3 expr";
EXPECT_EQ(c, 0);
c = 4;
LOG_IF_EVERY_N(ERROR, c -= 4, 3) << "don't log_if error every 3 expr";
EXPECT_EQ(c, 0);
c = 5;
VLOG_IF_EVERY_N(0, c -= 4, 1) << "vlog_if 0 every 1 expr";
EXPECT_EQ(c, 1);
c = 5;
VLOG_IF_EVERY_N(100, c -= 4, 3) << "vlog_if 100 every 3 expr";
EXPECT_EQ(c, 1);
c = 6;
VLOG_IF_EVERY_N(0, c -= 6, 1) << "don't vlog_if 0 every 1 expr";
EXPECT_EQ(c, 0);
c = 6;
VLOG_IF_EVERY_N(100, c -= 6, 3) << "don't vlog_if 100 every 1 expr";
EXPECT_EQ(c, 0);
}
void TestLoggingLevels() {
LogWithLevels(0, GLOG_INFO, false, false);
LogWithLevels(1, GLOG_INFO, false, false);
LogWithLevels(-1, GLOG_INFO, false, false);
LogWithLevels(0, GLOG_WARNING, false, false);
LogWithLevels(0, GLOG_ERROR, false, false);
LogWithLevels(0, GLOG_FATAL, false, false);
LogWithLevels(0, GLOG_FATAL, true, false);
LogWithLevels(0, GLOG_FATAL, false, true);
LogWithLevels(1, GLOG_WARNING, false, false);
LogWithLevels(1, GLOG_FATAL, false, true);
}
int TestVlogHelper() {
if (VLOG_IS_ON(1)) {
return 1;
}
return 0;
}
void TestVLogModule() {
int c = TestVlogHelper();
EXPECT_EQ(0, c);
#if defined(__GNUC__)
EXPECT_EQ(0, SetVLOGLevel("logging_unittest", 1));
c = TestVlogHelper();
EXPECT_EQ(1, c);
#endif
}
TEST(DeathRawCHECK, logging) {
ASSERT_DEATH(RAW_CHECK(false, "failure 1"),
"RAW: Check false failed: failure 1");
ASSERT_DEBUG_DEATH(RAW_DCHECK(1 == 2, "failure 2"),
"RAW: Check 1 == 2 failed: failure 2");
}
void TestLogString() {
vector<string> errors;
vector<string>* no_errors = nullptr;
LOG_STRING(INFO, &errors) << "LOG_STRING: "
<< "collected info";
LOG_STRING(WARNING, &errors) << "LOG_STRING: "
<< "collected warning";
LOG_STRING(ERROR, &errors) << "LOG_STRING: "
<< "collected error";
LOG_STRING(INFO, no_errors) << "LOG_STRING: "
<< "reported info";
LOG_STRING(WARNING, no_errors) << "LOG_STRING: "
<< "reported warning";
LOG_STRING(ERROR, nullptr) << "LOG_STRING: "
<< "reported error";
for (auto& error : errors) {
LOG(INFO) << "Captured by LOG_STRING: " << error;
}
}
void TestLogToString() {
string error;
string* no_error = nullptr;
LOG_TO_STRING(INFO, &error) << "LOG_TO_STRING: "
<< "collected info";
LOG(INFO) << "Captured by LOG_TO_STRING: " << error;
LOG_TO_STRING(WARNING, &error) << "LOG_TO_STRING: "
<< "collected warning";
LOG(INFO) << "Captured by LOG_TO_STRING: " << error;
LOG_TO_STRING(ERROR, &error) << "LOG_TO_STRING: "
<< "collected error";
LOG(INFO) << "Captured by LOG_TO_STRING: " << error;
LOG_TO_STRING(INFO, no_error) << "LOG_TO_STRING: "
<< "reported info";
LOG_TO_STRING(WARNING, no_error) << "LOG_TO_STRING: "
<< "reported warning";
LOG_TO_STRING(ERROR, nullptr) << "LOG_TO_STRING: "
<< "reported error";
}
class TestLogSinkImpl : public LogSink {
public:
vector<string> errors;
void send(LogSeverity severity, const char* ,
const char* base_filename, int line,
const LogMessageTime& logmsgtime, const char* message,
size_t message_len) override {
errors.push_back(ToString(severity, base_filename, line, logmsgtime,
message, message_len));
}
};
void TestLogSink() {
TestLogSinkImpl sink;
LogSink* no_sink = nullptr;
LOG_TO_SINK(&sink, INFO) << "LOG_TO_SINK: "
<< "collected info";
LOG_TO_SINK(&sink, WARNING) << "LOG_TO_SINK: "
<< "collected warning";
LOG_TO_SINK(&sink, ERROR) << "LOG_TO_SINK: "
<< "collected error";
LOG_TO_SINK(no_sink, INFO) << "LOG_TO_SINK: "
<< "reported info";
LOG_TO_SINK(no_sink, WARNING) << "LOG_TO_SINK: "
<< "reported warning";
LOG_TO_SINK(nullptr, ERROR) << "LOG_TO_SINK: "
<< "reported error";
LOG_TO_SINK_BUT_NOT_TO_LOGFILE(&sink, INFO)
<< "LOG_TO_SINK_BUT_NOT_TO_LOGFILE: "
<< "collected info";
LOG_TO_SINK_BUT_NOT_TO_LOGFILE(&sink, WARNING)
<< "LOG_TO_SINK_BUT_NOT_TO_LOGFILE: "
<< "collected warning";
LOG_TO_SINK_BUT_NOT_TO_LOGFILE(&sink, ERROR)
<< "LOG_TO_SINK_BUT_NOT_TO_LOGFILE: "
<< "collected error";
LOG_TO_SINK_BUT_NOT_TO_LOGFILE(no_sink, INFO)
<< "LOG_TO_SINK_BUT_NOT_TO_LOGFILE: "
<< "thrashed info";
LOG_TO_SINK_BUT_NOT_TO_LOGFILE(no_sink, WARNING)
<< "LOG_TO_SINK_BUT_NOT_TO_LOGFILE: "
<< "thrashed warning";
LOG_TO_SINK_BUT_NOT_TO_LOGFILE(nullptr, ERROR)
<< "LOG_TO_SINK_BUT_NOT_TO_LOGFILE: "
<< "thrashed error";
LOG(INFO) << "Captured by LOG_TO_SINK:";
for (auto& error : sink.errors) {
LogMessage("foo", LogMessage::kNoLogPrefix, GLOG_INFO).stream() << error;
}
}
enum { CASE_A, CASE_B };
void TestCHECK() {
CHECK(1 == 1);
CHECK_EQ(1, 1);
CHECK_NE(1, 2);
CHECK_GE(1, 1);
CHECK_GE(2, 1);
CHECK_LE(1, 1);
CHECK_LE(1, 2);
CHECK_GT(2, 1);
CHECK_LT(1, 2);
#if !defined(GLOG_OS_MACOSX)
CHECK_EQ(CASE_A, CASE_A);
CHECK_NE(CASE_A, CASE_B);
CHECK_GE(CASE_A, CASE_A);
CHECK_GE(CASE_B, CASE_A);
CHECK_LE(CASE_A, CASE_A);
CHECK_LE(CASE_A, CASE_B);
CHECK_GT(CASE_B, CASE_A);
CHECK_LT(CASE_A, CASE_B);
#endif
}
void TestDCHECK() {
#if defined(NDEBUG)
DCHECK(1 == 2) << " DCHECK's shouldn't be compiled in normal mode";
#endif
DCHECK(1 == 1);
DCHECK_EQ(1, 1);
DCHECK_NE(1, 2);
DCHECK_GE(1, 1);
DCHECK_GE(2, 1);
DCHECK_LE(1, 1);
DCHECK_LE(1, 2);
DCHECK_GT(2, 1);
DCHECK_LT(1, 2);
auto* orig_ptr = new int64;
int64* ptr = DCHECK_NOTNULL(orig_ptr);
CHECK_EQ(ptr, orig_ptr);
delete orig_ptr;
}
void TestSTREQ() {
CHECK_STREQ("this", "this");
CHECK_STREQ(nullptr, nullptr);
CHECK_STRCASEEQ("this", "tHiS");
CHECK_STRCASEEQ(nullptr, nullptr);
CHECK_STRNE("this", "tHiS");
CHECK_STRNE("this", nullptr);
CHECK_STRCASENE("this", "that");
CHECK_STRCASENE(nullptr, "that");
CHECK_STREQ((string("a") + "b").c_str(), "ab");
CHECK_STREQ(string("test").c_str(), (string("te") + string("st")).c_str());
}
TEST(DeathSTREQ, logging) {
ASSERT_DEATH(CHECK_STREQ(nullptr, "this"), "");
ASSERT_DEATH(CHECK_STREQ("this", "siht"), "");
ASSERT_DEATH(CHECK_STRCASEEQ(nullptr, "siht"), "");
ASSERT_DEATH(CHECK_STRCASEEQ("this", "siht"), "");
ASSERT_DEATH(CHECK_STRNE(nullptr, nullptr), "");
ASSERT_DEATH(CHECK_STRNE("this", "this"), "");
ASSERT_DEATH(CHECK_STREQ((string("a") + "b").c_str(), "abc"), "");
}
TEST(CheckNOTNULL, Simple) {
int64 t;
void* ptr = static_cast<void*>(&t);
void* ref = CHECK_NOTNULL(ptr);
EXPECT_EQ(ptr, ref);
CHECK_NOTNULL(reinterpret_cast<char*>(ptr));
CHECK_NOTNULL(reinterpret_cast<unsigned char*>(ptr));
CHECK_NOTNULL(reinterpret_cast<int*>(ptr));
CHECK_NOTNULL(reinterpret_cast<int64*>(ptr));
}
TEST(DeathCheckNN, Simple) {
ASSERT_DEATH(CHECK_NOTNULL(static_cast<void*>(nullptr)), "");
}
static void GetFiles(const string& pattern, vector<string>* files) {
files->clear();
#if defined(HAVE_GLOB_H)
glob_t g;
const int r = glob(pattern.c_str(), 0, nullptr, &g);
CHECK((r == 0) || (r == GLOB_NOMATCH)) << ": error matching " << pattern;
for (size_t i = 0; i < g.gl_pathc; i++) {
files->push_back(string(g.gl_pathv[i]));
}
globfree(&g);
#elif defined(GLOG_OS_WINDOWS)
WIN32_FIND_DATAA data;
HANDLE handle = FindFirstFileA(pattern.c_str(), &data);
size_t index = pattern.rfind('\\');
if (index == string::npos) {
LOG(FATAL) << "No directory separator.";
}
const string dirname = pattern.substr(0, index + 1);
if (handle == INVALID_HANDLE_VALUE) {
return;
}
do {
files->push_back(dirname + data.cFileName);
} while (FindNextFileA(handle, &data));
if (!FindClose(handle)) {
LOG_SYSRESULT(GetLastError());
}
#else
# error There is no way to do glob.
#endif
}
static void DeleteFiles(const string& pattern) {
vector<string> files;
GetFiles(pattern, &files);
for (auto& file : files) {
CHECK(unlink(file.c_str()) == 0) << ": " << strerror(errno);
}
}
static void CheckFile(const string& name, const string& expected_string,
const bool checkInFileOrNot = true) {
vector<string> files;
GetFiles(name + "*", &files);
CHECK_EQ(files.size(), 1UL);
std::unique_ptr<std::FILE> file{fopen(files[0].c_str(), "r")};
CHECK(file != nullptr) << ": could not open " << files[0];
char buf[1000];
while (fgets(buf, sizeof(buf), file.get()) != nullptr) {
char* first = strstr(buf, expected_string.c_str());
if (checkInFileOrNot != (first == nullptr)) {
return;
}
}
LOG(FATAL) << "Did " << (checkInFileOrNot ? "not " : "") << "find "
<< expected_string << " in " << files[0];
}
static void TestBasename() {
fprintf(stderr, "==== Test setting log file basename\n");
const string dest = FLAGS_test_tmpdir + "/logging_test_basename";
DeleteFiles(dest + "*");
SetLogDestination(GLOG_INFO, dest.c_str());
LOG(INFO) << "message to new base";
FlushLogFiles(GLOG_INFO);
CheckFile(dest, "message to new base");
LogToStderr();
DeleteFiles(dest + "*");
}
static void TestBasenameAppendWhenNoTimestamp() {
fprintf(stderr,
"==== Test setting log file basename without timestamp and appending "
"properly\n");
const string dest =
FLAGS_test_tmpdir + "/logging_test_basename_append_when_no_timestamp";
DeleteFiles(dest + "*");
ofstream out(dest.c_str());
out << "test preexisting content" << endl;
out.close();
CheckFile(dest, "test preexisting content");
FLAGS_timestamp_in_logfile_name = false;
SetLogDestination(GLOG_INFO, dest.c_str());
LOG(INFO) << "message to new base, appending to preexisting file";
FlushLogFiles(GLOG_INFO);
FLAGS_timestamp_in_logfile_name = true;
CheckFile(dest, "test preexisting content");
CheckFile(dest, "message to new base, appending to preexisting file");
LogToStderr();
DeleteFiles(dest + "*");
}
static void TestTwoProcessesWrite() {
#if defined(HAVE_SYS_WAIT_H) && defined(HAVE_UNISTD_H) && defined(HAVE_FCNTL)
fprintf(stderr,
"==== Test setting log file basename and two processes writing - "
"second should fail\n");
const string dest =
FLAGS_test_tmpdir + "/logging_test_basename_two_processes_writing";
DeleteFiles(dest + "*");
FLAGS_timestamp_in_logfile_name = false;
SetLogDestination(GLOG_INFO, dest.c_str());
LOG(INFO) << "message to new base, parent";
FlushLogFiles(GLOG_INFO);
pid_t pid = fork();
CHECK_ERR(pid);
if (pid == 0) {
LOG(INFO) << "message to new base, child - should only appear on STDERR "
"not on the file";
ShutdownGoogleLogging();
exit(EXIT_SUCCESS);
} else if (pid > 0) {
wait(nullptr);
}
FLAGS_timestamp_in_logfile_name = true;
CheckFile(dest, "message to new base, parent");
CheckFile(dest,
"message to new base, child - should only appear on STDERR not on "
"the file",
false);
LogToStderr();
DeleteFiles(dest + "*");
#endif
}
static void TestSymlink() {
#ifndef GLOG_OS_WINDOWS
fprintf(stderr, "==== Test setting log file symlink\n");
string dest = FLAGS_test_tmpdir + "/logging_test_symlink";
string sym = FLAGS_test_tmpdir + "/symlinkbase";
DeleteFiles(dest + "*");
DeleteFiles(sym + "*");
SetLogSymlink(GLOG_INFO, "symlinkbase");
SetLogDestination(GLOG_INFO, dest.c_str());
LOG(INFO) << "message to new symlink";
FlushLogFiles(GLOG_INFO);
CheckFile(sym, "message to new symlink");
DeleteFiles(dest + "*");
DeleteFiles(sym + "*");
#endif
}
static void TestExtension() {
fprintf(stderr, "==== Test setting log file extension\n");
string dest = FLAGS_test_tmpdir + "/logging_test_extension";
DeleteFiles(dest + "*");
SetLogDestination(GLOG_INFO, dest.c_str());
SetLogFilenameExtension("specialextension");
LOG(INFO) << "message to new extension";
FlushLogFiles(GLOG_INFO);
CheckFile(dest, "message to new extension");
vector<string> filenames;
GetFiles(dest + "*", &filenames);
CHECK_EQ(filenames.size(), 1UL);
CHECK(strstr(filenames[0].c_str(), "specialextension") != nullptr);
LogToStderr();
DeleteFiles(dest + "*");
}
struct MyLogger : public base::Logger {
string data;
explicit MyLogger(bool* set_on_destruction)
: set_on_destruction_(set_on_destruction) {}
~MyLogger() override { *set_on_destruction_ = true; }
void Write(bool ,
const std::chrono::system_clock::time_point& ,
const char* message, size_t length) override {
data.append(message, length);
}
void Flush() override {}
uint32 LogSize() override { return static_cast<uint32>(data.length()); }
private:
bool* set_on_destruction_;
};
static void TestWrapper() {
fprintf(stderr, "==== Test log wrapper\n");
bool custom_logger_deleted = false;
auto* my_logger = new MyLogger(&custom_logger_deleted);
base::Logger* old_logger = base::GetLogger(GLOG_INFO);
base::SetLogger(GLOG_INFO, my_logger);
LOG(INFO) << "Send to wrapped logger";
CHECK(strstr(my_logger->data.c_str(), "Send to wrapped logger") != nullptr);
FlushLogFiles(GLOG_INFO);
EXPECT_FALSE(custom_logger_deleted);
base::SetLogger(GLOG_INFO, old_logger);
EXPECT_TRUE(custom_logger_deleted);
}
static void TestErrno() {
fprintf(stderr, "==== Test errno preservation\n");
errno = ENOENT;
TestLogging(false);
CHECK_EQ(errno, ENOENT);
}
static void TestOneTruncate(const char* path, uint64 limit, uint64 keep,
size_t dsize, size_t ksize, size_t expect) {
FileDescriptor fd{open(path, O_RDWR | O_CREAT | O_TRUNC, 0600)};
CHECK_ERR(fd);
const char *discardstr = "DISCARDME!", *keepstr = "KEEPME!";
const size_t discard_size = strlen(discardstr), keep_size = strlen(keepstr);
size_t written = 0;
while (written < dsize) {
size_t bytes = min(dsize - written, discard_size);
CHECK_ERR(write(fd.get(), discardstr, bytes));
written += bytes;
}
written = 0;
while (written < ksize) {
size_t bytes = min(ksize - written, keep_size);
CHECK_ERR(write(fd.get(), keepstr, bytes));
written += bytes;
}
TruncateLogFile(path, limit, keep);
struct stat statbuf;
CHECK_ERR(fstat(fd.get(), &statbuf));
CHECK_EQ(static_cast<size_t>(statbuf.st_size), expect);
CHECK_ERR(lseek(fd.get(), 0, SEEK_SET));
const size_t buf_size = static_cast<size_t>(statbuf.st_size) + 1;
std::vector<char> buf(buf_size);
CHECK_ERR(read(fd.get(), buf.data(), buf_size));
const char* p = buf.data();
size_t checked = 0;
while (checked < expect) {
size_t bytes = min(expect - checked, keep_size);
CHECK(!memcmp(p, keepstr, bytes));
checked += bytes;
}
}
static void TestTruncate() {
#ifdef HAVE_UNISTD_H
fprintf(stderr, "==== Test log truncation\n");
string path = FLAGS_test_tmpdir + "/truncatefile";
TestOneTruncate(path.c_str(), 10, 10, 10, 10, 10);
TestOneTruncate(path.c_str(), 2U << 20U, 4U << 10U, 3U << 20U, 4U << 10U,
4U << 10U);
TestOneTruncate(path.c_str(), 10, 20, 0, 20, 20);
TestOneTruncate(path.c_str(), 10, 0, 0, 0, 0);
TestOneTruncate(path.c_str(), 10, 50, 0, 10, 10);
TestOneTruncate(path.c_str(), 50, 100, 0, 30, 30);
# if !defined(GLOG_OS_MACOSX) && !defined(GLOG_OS_WINDOWS)
string linkname = path + ".link";
unlink(linkname.c_str());
CHECK_ERR(symlink(path.c_str(), linkname.c_str()));
TestOneTruncate(linkname.c_str(), 10, 10, 0, 30, 30);
# endif
# if defined(GLOG_OS_LINUX)
int fd;
CHECK_ERR(fd = open(path.c_str(), O_APPEND | O_WRONLY));
char fdpath[64];
std::snprintf(fdpath, sizeof(fdpath), "/proc/self/fd/%d", fd);
TestOneTruncate(fdpath, 10, 10, 10, 10, 10);
# endif
#endif
}
struct RecordDeletionLogger : public base::Logger {
RecordDeletionLogger(bool* set_on_destruction, base::Logger* wrapped_logger)
: set_on_destruction_(set_on_destruction),
wrapped_logger_(wrapped_logger) {
*set_on_destruction_ = false;
}
~RecordDeletionLogger() override { *set_on_destruction_ = true; }
void Write(bool force_flush,
const std::chrono::system_clock::time_point& timestamp,
const char* message, size_t length) override {
wrapped_logger_->Write(force_flush, timestamp, message, length);
}
void Flush() override { wrapped_logger_->Flush(); }
uint32 LogSize() override { return wrapped_logger_->LogSize(); }
private:
bool* set_on_destruction_;
base::Logger* wrapped_logger_;
};
static void TestCustomLoggerDeletionOnShutdown() {
bool custom_logger_deleted = false;
base::SetLogger(GLOG_INFO,
new RecordDeletionLogger(&custom_logger_deleted,
base::GetLogger(GLOG_INFO)));
EXPECT_TRUE(IsGoogleLoggingInitialized());
ShutdownGoogleLogging();
EXPECT_TRUE(custom_logger_deleted);
EXPECT_FALSE(IsGoogleLoggingInitialized());
}
namespace LogTimes {
constexpr int64_t LOG_PERIOD_NS = 10000000;
constexpr int64_t LOG_PERIOD_TOL_NS = 500000;
constexpr size_t MAX_CALLS = 10;
}
struct LogTimeRecorder {
LogTimeRecorder() = default;
size_t m_streamTimes{0};
std::chrono::steady_clock::time_point m_callTimes[LogTimes::MAX_CALLS];
};
std::ostream& operator<<(std::ostream& stream, LogTimeRecorder& t) {
t.m_callTimes[t.m_streamTimes++] = std::chrono::steady_clock::now();
return stream;
}
int64 elapsedTime_ns(const std::chrono::steady_clock::time_point& begin,
const std::chrono::steady_clock::time_point& end) {
return std::chrono::duration_cast<std::chrono::nanoseconds>((end - begin))
.count();
}
static void TestLogPeriodically() {
fprintf(stderr, "==== Test log periodically\n");
LogTimeRecorder timeLogger;
constexpr double LOG_PERIOD_SEC = LogTimes::LOG_PERIOD_NS * 1e-9;
while (timeLogger.m_streamTimes < LogTimes::MAX_CALLS) {
LOG_EVERY_T(INFO, LOG_PERIOD_SEC)
<< timeLogger << "Timed Message #" << timeLogger.m_streamTimes;
}
int64 nsBetweenCalls[LogTimes::MAX_CALLS - 1];
for (size_t i = 1; i < LogTimes::MAX_CALLS; ++i) {
nsBetweenCalls[i - 1] = elapsedTime_ns(timeLogger.m_callTimes[i - 1],
timeLogger.m_callTimes[i]);
}
for (long time_ns : nsBetweenCalls) {
EXPECT_NEAR(time_ns, LogTimes::LOG_PERIOD_NS, LogTimes::LOG_PERIOD_TOL_NS);
}
}
namespace google {
inline namespace glog_internal_namespace_ {
extern bool SafeFNMatch_(const char* pattern, size_t patt_len, const char* str,
size_t str_len);
}
}
static bool WrapSafeFNMatch(string pattern, string str) {
pattern += "abc";
str += "defgh";
return SafeFNMatch_(pattern.data(), pattern.size() - 3, str.data(),
str.size() - 5);
}
TEST(SafeFNMatch, logging) {
CHECK(WrapSafeFNMatch("foo", "foo"));
CHECK(!WrapSafeFNMatch("foo", "bar"));
CHECK(!WrapSafeFNMatch("foo", "fo"));
CHECK(!WrapSafeFNMatch("foo", "foo2"));
CHECK(WrapSafeFNMatch("bar/foo.ext", "bar/foo.ext"));
CHECK(WrapSafeFNMatch("*ba*r/fo*o.ext*", "bar/foo.ext"));
CHECK(!WrapSafeFNMatch("bar/foo.ext", "bar/baz.ext"));
CHECK(!WrapSafeFNMatch("bar/foo.ext", "bar/foo"));
CHECK(!WrapSafeFNMatch("bar/foo.ext", "bar/foo.ext.zip"));
CHECK(WrapSafeFNMatch("ba?,
const char* base_filename, int line,
const LogMessageTime& logmsgtime, const char* message,
size_t message_len) override {
if (tid_ == std::this_thread::get_id()) {
writer_.Buffer(ToString(severity, base_filename, line, logmsgtime,
message, message_len));
}
}
void WaitTillSent() override {
if (tid_ == std::this_thread::get_id()) writer_.Wait();
}
private:
std::thread::id tid_;
TestLogSinkWriter writer_;
};
static void TestLogSinkWaitTillSent() {
global_messages.clear();
{
using namespace std::chrono_literals;
TestWaitingLogSink sink;
LOG(INFO) << "Message 1";
std::this_thread::sleep_for(60ms);
LOG(ERROR) << "Message 2";
std::this_thread::sleep_for(60ms);
LOG(WARNING) << "Message 3";
std::this_thread::sleep_for(60ms);
}
for (auto& global_message : global_messages) {
LOG(INFO) << "Sink capture: " << global_message;
}
CHECK_EQ(global_messages.size(), 3UL);
}
TEST(Strerror, logging) {
int errcode = EINTR;
std::string msg = strerror(errcode);
const size_t buf_size = msg.size() + 1;
std::vector<char> buf(buf_size);
CHECK_EQ(posix_strerror_r(errcode, nullptr, 0), -1);
buf[0] = 'A';
CHECK_EQ(posix_strerror_r(errcode, buf.data(), 0), -1);
CHECK_EQ(buf[0], 'A');
CHECK_EQ(posix_strerror_r(errcode, nullptr, buf_size), -1);
#if defined(GLOG_OS_MACOSX) || defined(GLOG_OS_FREEBSD) || \
defined(GLOG_OS_OPENBSD)
CHECK_EQ(posix_strerror_r(errcode, buf.data(), 1), -1);
#else
CHECK_EQ(posix_strerror_r(errcode, buf.data(), 1), 0);
#endif
CHECK_STREQ(buf.data(), "");
CHECK_EQ(posix_strerror_r(errcode, buf.data(), buf_size), 0);
CHECK_STREQ(buf.data(), msg.c_str());
CHECK_EQ(msg, StrError(errcode));
}
#ifdef HAVE_LIB_GMOCK
TEST(DVLog, Basic) {
ScopedMockLog log;
# if defined(NDEBUG)
EXPECT_CALL(log, Log(_, _, _)).Times(0);
# else
EXPECT_CALL(log, Log(GLOG_INFO, __FILE__, "debug log"));
# endif
FLAGS_v = 1;
DVLOG(1) << "debug log";
}
TEST(DVLog, V0) {
ScopedMockLog log;
EXPECT_CALL(log, Log(_, _, _)).Times(0);
FLAGS_v = 0;
DVLOG(1) << "debug log";
}
TEST(LogAtLevel, Basic) {
ScopedMockLog log;
EXPECT_CALL(log, Log(GLOG_WARNING, StrNe(__FILE__), "function version"));
EXPECT_CALL(log, Log(GLOG_INFO, __FILE__, "macro version"));
LogSeverity severity = GLOG_WARNING;
LogAtLevel(severity, "function version");
severity = GLOG_INFO;
LOG_AT_LEVEL(severity) << "macro" << ' ' << "version";
}
TEST(TestExitOnDFatal, ToBeOrNotToBe) {
EXPECT_TRUE(base::internal::GetExitOnDFatal());
base::internal::SetExitOnDFatal(false);
EXPECT_FALSE(base::internal::GetExitOnDFatal());
{
ScopedMockLog log;
const LogSeverity severity =
# if defined(NDEBUG)
GLOG_ERROR;
# else
GLOG_FATAL;
# endif
EXPECT_CALL(log, Log(severity, __FILE__, "This should not be fatal"));
LOG(DFATAL) << "This should not be fatal";
}
base::internal::SetExitOnDFatal(true);
EXPECT_TRUE(base::internal::GetExitOnDFatal());
# ifdef GTEST_HAS_DEATH_TEST
EXPECT_DEBUG_DEATH({ LOG(DFATAL) << "This should be fatal in debug mode"; },
"This should be fatal in debug mode");
# endif
}
# ifdef HAVE_STACKTRACE
static void BacktraceAtHelper() {
LOG(INFO) << "Not me";
LOG(INFO) << "Backtrace me";
}
static int kBacktraceAtLine = __LINE__ - 2;
TEST(LogBacktraceAt, DoesNotBacktraceWhenDisabled) {
StrictMock<ScopedMockLog> log;
FLAGS_log_backtrace_at = "";
EXPECT_CALL(log, Log(_, _, "Backtrace me"));
EXPECT_CALL(log, Log(_, _, "Not me"));
BacktraceAtHelper();
}
TEST(LogBacktraceAt, DoesBacktraceAtRightLineWhenEnabled) {
StrictMock<ScopedMockLog> log;
char where[100];
std::snprintf(where, 100, "%s:%d", const_basename(__FILE__),
kBacktraceAtLine);
FLAGS_log_backtrace_at = where;
EXPECT_CALL(
log, Log(_, _,
AllOf(HasSubstr("stacktrace:"), HasSubstr("BacktraceAtHelper"),
HasSubstr("main"), HasSubstr("Backtrace me"))));
EXPECT_CALL(log, Log(_, _, "Not me"));
BacktraceAtHelper();
}
# endif
#endif
struct UserDefinedClass {
bool operator==(const UserDefinedClass&) const { return true; }
};
inline ostream& operator<<(ostream& out, const UserDefinedClass&) {
out << "OK";
return out;
}
TEST(UserDefinedClass, logging) {
UserDefinedClass u;
vector<string> buf;
LOG_STRING(INFO, &buf) << u;
CHECK_EQ(1UL, buf.size());
CHECK(buf[0].find("OK") != string::npos);
CHECK_EQ(u, u);
}
TEST(LogMsgTime, gmtoff) {
google::LogMessage log_obj(__FILE__, __LINE__);
std::chrono::seconds gmtoff = log_obj.time().gmtoffset();
using namespace std::chrono_literals;
constexpr std::chrono::hours utc_min_offset = -12h;
constexpr std::chrono::hours utc_max_offset = +14h;
EXPECT_TRUE((gmtoff >= utc_min_offset) && (gmtoff <= utc_max_offset));
}
TEST(EmailLogging, ValidAddress) {
FlagSaver saver;
FLAGS_logmailer = "/usr/bin/true";
EXPECT_TRUE(
SendEmail("[email protected]", "Example subject", "Example body"));
}
TEST(EmailLogging, MultipleAddresses) {
FlagSaver saver;
FLAGS_logmailer = "/usr/bin/true";
EXPECT_TRUE(SendEmail("[email protected],[email protected]", "Example subject",
"Example body"));
}
TEST(EmailLogging, InvalidAddress) {
FlagSaver saver;
FLAGS_logmailer = "/usr/bin/true";
EXPECT_FALSE(SendEmail("hello world@foo", "Example subject", "Example body"));
}
TEST(EmailLogging, MaliciousAddress) {
FlagSaver saver;
FLAGS_logmailer = "/usr/bin/true";
EXPECT_FALSE(
SendEmail("!/bin/[email protected]", "Example subject", "Example body"));
}
TEST(Logging, FatalThrow) {
auto const fail_func =
InstallFailureFunction(+[]()
#if defined(__has_attribute)
# if __has_attribute(noreturn)
__attribute__((noreturn))
# endif
#endif
{ throw std::logic_error{"fail"}; });
auto restore_fail = [fail_func] { InstallFailureFunction(fail_func); };
ScopedExit<decltype(restore_fail)> restore{restore_fail};
EXPECT_THROW({ LOG(FATAL) << "must throw to fail"; }, std::logic_error);
} | https://github.com/google/glog/blob/de309c08c05382fee0792380de7df1bd65332da2/src/logging.cc | https://github.com/google/glog/blob/de309c08c05382fee0792380de7df1bd65332da2/src/logging_unittest.cc | de309c08c05382fee0792380de7df1bd65332da2 |
f41932d8-4e64-4b72-807e-e6ff75eaddc7 | cpp | tensorflow/tensorflow | colorspace_op | tensorflow/core/kernels/image/colorspace_op.cc | tensorflow/core/kernels/image/colorspace_op_test.cc | #define EIGEN_USE_THREADS
#include "tensorflow/core/kernels/image/colorspace_op.h"
#include <algorithm>
#include <cmath>
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_types.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
typedef Eigen::ThreadPoolDevice CPUDevice;
typedef Eigen::GpuDevice GPUDevice;
template <typename Device, typename T>
class RGBToHSVOp : public OpKernel {
public:
explicit RGBToHSVOp(OpKernelConstruction* context) : OpKernel(context) {}
void Compute(OpKernelContext* context) override {
const Tensor& input = context->input(0);
OP_REQUIRES(context, input.dims() >= 1,
errors::InvalidArgument("input must be at least 1D",
input.shape().DebugString()));
auto channels = input.dim_size(input.dims() - 1);
OP_REQUIRES(context, channels == 3,
errors::FailedPrecondition(
"input must have 3 channels but input only has ", channels,
" channels."));
Tensor* output = nullptr;
OP_REQUIRES_OK(context,
context->allocate_output(0, input.shape(), &output));
typename TTypes<T, 2>::ConstTensor input_data = input.flat_inner_dims<T>();
typename TTypes<T, 2>::Tensor output_data = output->flat_inner_dims<T>();
Tensor trange;
OP_REQUIRES_OK(
context, context->allocate_temp(DataTypeToEnum<T>::value,
TensorShape({input_data.dimension(0)}),
&trange));
typename TTypes<T, 1>::Tensor range(trange.tensor<T, 1>());
functor::RGBToHSV<Device, T>()(context->eigen_device<Device>(), input_data,
range, output_data);
}
};
template <typename Device, typename T>
class HSVToRGBOp : public OpKernel {
public:
explicit HSVToRGBOp(OpKernelConstruction* context) : OpKernel(context) {}
void Compute(OpKernelContext* context) override {
const Tensor& input = context->input(0);
OP_REQUIRES(context, input.dims() >= 1,
errors::InvalidArgument("input must be at least 1D",
input.shape().DebugString()));
auto channels = input.dim_size(input.dims() - 1);
OP_REQUIRES(context, channels == 3,
errors::FailedPrecondition(
"input must have 3 channels but input only has ", channels,
" channels."));
Tensor* output = nullptr;
OP_REQUIRES_OK(context,
context->allocate_output(0, input.shape(), &output));
typename TTypes<T, 2>::ConstTensor input_data = input.flat_inner_dims<T>();
typename TTypes<T, 2>::Tensor output_data = output->flat_inner_dims<T>();
functor::HSVToRGB<Device, T>()(context->eigen_device<Device>(), input_data,
output_data);
}
};
#define REGISTER_CPU(T) \
REGISTER_KERNEL_BUILDER( \
Name("RGBToHSV").Device(DEVICE_CPU).TypeConstraint<T>("T"), \
RGBToHSVOp<CPUDevice, T>); \
template class RGBToHSVOp<CPUDevice, T>; \
REGISTER_KERNEL_BUILDER( \
Name("HSVToRGB").Device(DEVICE_CPU).TypeConstraint<T>("T"), \
HSVToRGBOp<CPUDevice, T>); \
template class HSVToRGBOp<CPUDevice, T>;
TF_CALL_float(REGISTER_CPU);
TF_CALL_double(REGISTER_CPU);
TF_CALL_half(REGISTER_CPU);
TF_CALL_bfloat16(REGISTER_CPU);
#if (defined(GOOGLE_CUDA) && GOOGLE_CUDA) || \
(defined(TENSORFLOW_USE_ROCM) && TENSORFLOW_USE_ROCM)
namespace functor {
#define DECLARE_GPU(T) \
template <> \
void RGBToHSV<GPUDevice, T>::operator()( \
const GPUDevice& d, TTypes<T, 2>::ConstTensor input_data, \
TTypes<T, 1>::Tensor range, TTypes<T, 2>::Tensor output_data); \
extern template struct RGBToHSV<GPUDevice, T>; \
template <> \
void HSVToRGB<GPUDevice, T>::operator()( \
const GPUDevice& d, TTypes<T, 2>::ConstTensor input_data, \
TTypes<T, 2>::Tensor output_data); \
extern template struct HSVToRGB<GPUDevice, T>;
TF_CALL_float(DECLARE_GPU);
TF_CALL_double(DECLARE_GPU);
}
#define REGISTER_GPU(T) \
REGISTER_KERNEL_BUILDER( \
Name("RGBToHSV").Device(DEVICE_GPU).TypeConstraint<T>("T"), \
RGBToHSVOp<GPUDevice, T>); \
REGISTER_KERNEL_BUILDER( \
Name("HSVToRGB").Device(DEVICE_GPU).TypeConstraint<T>("T"), \
HSVToRGBOp<GPUDevice, T>);
TF_CALL_float(REGISTER_GPU);
TF_CALL_double(REGISTER_GPU);
#endif
} | #include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
template <typename T>
class RGBToHSVOpTest : public OpsTestBase {
protected:
void MakeOp(DataType data_type) {
TF_EXPECT_OK(NodeDefBuilder("rgb_to_hsv_op", "RGBToHSV")
.Input(FakeInput(data_type))
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
}
void CheckBlack(DataType data_type) {
AddInputFromArray<T>(TensorShape({3}), {0, 0, 0});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), data_type, TensorShape({3}));
test::FillValues<T>(&expected, {0.0, 0.0, 0.0});
test::ExpectTensorEqual<T>(expected, *GetOutput(0));
}
void CheckGray(DataType data_type) {
AddInputFromArray<T>(TensorShape({3}), {.5, .5, .5});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), data_type, TensorShape({3}));
test::FillValues<T>(&expected, {0.0, 0.0, .5});
test::ExpectTensorEqual<T>(expected, *GetOutput(0));
}
void CheckWhite(DataType data_type) {
AddInputFromArray<T>(TensorShape({3}), {1, 1, 1});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), data_type, TensorShape({3}));
test::FillValues<T>(&expected, {0.0, 0.0, 1.0});
test::ExpectTensorEqual<T>(expected, *GetOutput(0));
}
void CheckRedMax(DataType data_type) {
AddInputFromArray<T>(TensorShape({3}), {.8f, .4f, .2f});
TF_ASSERT_OK(RunOpKernel());
T expected_h = 1. / 6. * .2 / .6;
T expected_s = .6 / .8;
T expected_v = .8 / 1.;
Tensor expected(allocator(), data_type, TensorShape({3}));
test::FillValues<T>(&expected, {expected_h, expected_s, expected_v});
test::ExpectTensorNear<T>(expected, *GetOutput(0), 1e-6);
}
void CheckGreenMax(DataType data_type) {
AddInputFromArray<T>(TensorShape({3}), {.2f, .8f, .4f});
TF_ASSERT_OK(RunOpKernel());
T expected_h = 1. / 6. * (2.0 + (.2 / .6));
T expected_s = .6 / .8;
T expected_v = .8 / 1.;
Tensor expected(allocator(), data_type, TensorShape({3}));
test::FillValues<T>(&expected, {expected_h, expected_s, expected_v});
test::ExpectTensorNear<T>(expected, *GetOutput(0), 1e-6);
}
void CheckBlueMax(DataType data_type) {
AddInputFromArray<T>(TensorShape({3}), {.4f, .2f, .8f});
TF_ASSERT_OK(RunOpKernel());
T expected_h = 1. / 6. * (4.0 + (.2 / .6));
T expected_s = .6 / .8;
T expected_v = .8 / 1.;
Tensor expected(allocator(), data_type, TensorShape({3}));
test::FillValues<T>(&expected, {expected_h, expected_s, expected_v});
test::ExpectTensorNear<T>(expected, *GetOutput(0), 1e-6);
}
void CheckNegativeDifference(DataType data_type) {
AddInputFromArray<T>(TensorShape({3}), {0, .1f, .2f});
TF_ASSERT_OK(RunOpKernel());
T expected_h = 1. / 6. * (4.0 + (-.1 / .2));
T expected_s = .2 / .2;
T expected_v = .2 / 1.;
Tensor expected(allocator(), data_type, TensorShape({3}));
test::FillValues<T>(&expected, {expected_h, expected_s, expected_v});
test::ExpectTensorNear<T>(expected, *GetOutput(0), 1e-6);
}
};
template <typename T>
class HSVToRGBOpTest : public OpsTestBase {
protected:
void MakeOp(DataType data_type) {
TF_EXPECT_OK(NodeDefBuilder("hsv_to_rgb_op", "HSVToRGB")
.Input(FakeInput(data_type))
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
}
void CheckBlack(DataType data_type) {
AddInputFromArray<T>(TensorShape({3}), {0.0, 0.0, 0.0});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), data_type, TensorShape({3}));
test::FillValues<T>(&expected, {0, 0, 0});
test::ExpectTensorEqual<T>(expected, *GetOutput(0));
}
void CheckGray(DataType data_type) {
AddInputFromArray<T>(TensorShape({3}), {0.0, 0.0, .5});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), data_type, TensorShape({3}));
test::FillValues<T>(&expected, {.5, .5, .5});
test::ExpectTensorEqual<T>(expected, *GetOutput(0));
}
void CheckWhite(DataType data_type) {
AddInputFromArray<T>(TensorShape({3}), {0.0, 0.0, 1.0});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), data_type, TensorShape({3}));
test::FillValues<T>(&expected, {1, 1, 1});
test::ExpectTensorEqual<T>(expected, *GetOutput(0));
}
void CheckRedMax(DataType data_type) {
T expected_h = 1. / 6. * .2 / .6;
T expected_s = .6 / .8;
T expected_v = .8 / 1.;
AddInputFromArray<T>(TensorShape({3}),
{expected_h, expected_s, expected_v});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), data_type, TensorShape({3}));
test::FillValues<T>(&expected, {.8, .4, .2});
test::ExpectTensorNear<T>(expected, *GetOutput(0), 1e-6);
}
void CheckGreenMax(DataType data_type) {
T expected_h = 1. / 6. * (2.0 + (.2 / .6));
T expected_s = .6 / .8;
T expected_v = .8 / 1.;
AddInputFromArray<T>(TensorShape({3}),
{expected_h, expected_s, expected_v});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), data_type, TensorShape({3}));
test::FillValues<T>(&expected, {.2, .8, .4});
test::ExpectTensorNear<T>(expected, *GetOutput(0), 1e-6);
}
void CheckBlueMax(DataType data_type) {
T expected_h = 1. / 6. * (4.0 + (.2 / .6));
T expected_s = .6 / .8;
T expected_v = .8 / 1.0;
AddInputFromArray<T>(TensorShape({3}),
{expected_h, expected_s, expected_v});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), data_type, TensorShape({3}));
test::FillValues<T>(&expected, {.4, .2, .8});
test::ExpectTensorNear<T>(expected, *GetOutput(0), 1e-6);
}
void CheckNegativeDifference(DataType data_type) {
T expected_h = 1. / 6. * (4.0 + (-.1 / .2));
T expected_s = .2 / .2;
T expected_v = .2 / 1.;
AddInputFromArray<T>(TensorShape({3}),
{expected_h, expected_s, expected_v});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), data_type, TensorShape({3}));
test::FillValues<T>(&expected, {0, .1f, .2f});
test::ExpectTensorNear<T>(expected, *GetOutput(0), 1e-6);
}
};
#define TEST_COLORSPACE(test, dt) \
TEST_F(test, CheckBlack) { \
MakeOp(dt); \
CheckBlack(dt); \
} \
TEST_F(test, CheckGray) { \
MakeOp(dt); \
CheckGray(dt); \
} \
TEST_F(test, CheckWhite) { \
MakeOp(dt); \
CheckWhite(dt); \
} \
TEST_F(test, CheckRedMax) { \
MakeOp(dt); \
CheckRedMax(dt); \
} \
TEST_F(test, CheckGreenMax) { \
MakeOp(dt); \
CheckGreenMax(dt); \
} \
TEST_F(test, CheckBlueMax) { \
MakeOp(dt); \
CheckBlueMax(dt); \
} \
TEST_F(test, CheckNegativeDifference) { \
MakeOp(dt); \
CheckNegativeDifference(dt); \
}
typedef RGBToHSVOpTest<float> rgb_to_hsv_float;
typedef RGBToHSVOpTest<double> rgb_to_hsv_double;
TEST_COLORSPACE(rgb_to_hsv_float, DT_FLOAT);
TEST_COLORSPACE(rgb_to_hsv_double, DT_DOUBLE);
typedef HSVToRGBOpTest<float> hsv_to_rgb_float;
typedef HSVToRGBOpTest<double> hsv_to_rgb_double;
TEST_COLORSPACE(hsv_to_rgb_float, DT_FLOAT);
TEST_COLORSPACE(hsv_to_rgb_double, DT_DOUBLE);
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/image/colorspace_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/image/colorspace_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0bf94df6-92bc-41e0-9aff-4d9ce0fb735c | cpp | tensorflow/tensorflow | sparse_xent_op | tensorflow/core/kernels/sparse_xent_op.cc | tensorflow/core/kernels/sparse_xent_op_test.cc | #define EIGEN_USE_THREADS
#include "tensorflow/core/kernels/sparse_xent_op.h"
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_types.h"
#include "tensorflow/core/util/determinism.h"
#include "tensorflow/core/util/env_var.h"
namespace tensorflow {
typedef Eigen::ThreadPoolDevice CPUDevice;
typedef Eigen::GpuDevice GPUDevice;
template <typename Index>
Status CheckInvalidLabelIndex(const Tensor& labels, int64_t max_index) {
if (labels.NumElements() == 0) return absl::OkStatus();
const auto label_values = labels.vec<Index>();
int64_t bad_index;
auto min_max_dim_value = std::minmax_element(
label_values.data(), label_values.data() + label_values.size());
if (*min_max_dim_value.first < 0 || *min_max_dim_value.second >= max_index) {
bad_index = (*min_max_dim_value.first < 0) ? *min_max_dim_value.first
: *min_max_dim_value.second;
return errors::InvalidArgument(
"Received a label value of ", bad_index,
" which is outside the valid range of [0, ", max_index,
"). Label values: ", labels.SummarizeValue(labels.NumElements()));
}
return absl::OkStatus();
}
template <typename Device, typename T, typename Index>
class SparseSoftmaxXentWithLogitsOp : public OpKernel {
public:
explicit SparseSoftmaxXentWithLogitsOp(OpKernelConstruction* context)
: OpKernel(context) {}
void Compute(OpKernelContext* context) override {
const Tensor& logits = context->input(0);
const Tensor& labels = context->input(1);
OP_REQUIRES(context, TensorShapeUtils::IsMatrix(logits.shape()),
errors::InvalidArgument("logits must be 2-D, but got shape ",
logits.shape().DebugString()));
OP_REQUIRES(context, TensorShapeUtils::IsVector(labels.shape()),
errors::InvalidArgument("labels must be 1-D, but got shape ",
labels.shape().DebugString()));
OP_REQUIRES(context, logits.dim_size(0) == labels.dim_size(0),
errors::InvalidArgument(
"logits and labels must have the same first dimension, "
"got logits shape ",
logits.shape().DebugString(), " and labels shape ",
labels.shape().DebugString()));
OP_REQUIRES(context, logits.dim_size(1) > 0,
errors::InvalidArgument(
"Must have at least one class, but got logits shape ",
logits.shape().DebugString()));
if (std::is_same<Device, GPUDevice>::value) {
OP_REQUIRES(
context, !OpDeterminismRequired(),
errors::Unimplemented(
"The GPU implementation of SparseSoftmaxCrossEntropyWithLogits"
" that would have been executed is not deterministic. Note that"
" the Python API uses an alternative, deterministic,"
" GPU-accelerated path when determinsim is enabled."));
}
Tensor scratch;
OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<T>::value,
labels.shape(), &scratch));
Tensor* loss_out = nullptr;
OP_REQUIRES_OK(context, context->forward_input_or_allocate_output(
{1}, 0, labels.shape(), &loss_out));
Tensor* back_out = nullptr;
OP_REQUIRES_OK(context, context->forward_input_or_allocate_output(
{0}, 1, logits.shape(), &back_out));
if (logits.dim_size(0) > 0) {
if (std::is_same<Device, CPUDevice>::value) {
OP_REQUIRES_OK(
context, CheckInvalidLabelIndex<Index>(labels, logits.dim_size(1)));
}
functor::SparseXentFunctor<Device, T, Index> functor;
functor(context, logits.matrix<T>(), labels.vec<Index>(),
scratch.vec<T>(), loss_out->vec<T>(), back_out->matrix<T>());
}
}
};
namespace functor {
template <typename T, typename Index>
struct SparseXentFunctor<CPUDevice, T, Index> {
void operator()(OpKernelContext* ctx, typename TTypes<T>::ConstMatrix logits,
typename TTypes<Index>::ConstVec labels,
typename TTypes<T>::Vec scratch, typename TTypes<T>::Vec loss,
typename TTypes<T>::Matrix backprop) {
SparseXentEigenImpl<CPUDevice, T, Index>::Compute(ctx, logits, labels,
scratch, loss, backprop);
}
};
}
#define REGISTER(Dev, T, Index) \
REGISTER_KERNEL_BUILDER( \
Name("SparseSoftmaxCrossEntropyWithLogits") \
.Device(DEVICE_##Dev) \
.TypeConstraint<T>("T") \
.TypeConstraint<Index>("Tlabels"), \
SparseSoftmaxXentWithLogitsOp<Dev##Device, T, Index>);
REGISTER(CPU, float, int32)
REGISTER(CPU, float, int64_t)
REGISTER(CPU, double, int32)
REGISTER(CPU, double, int64_t)
REGISTER(CPU, Eigen::half, int32)
REGISTER(CPU, Eigen::half, int64_t)
REGISTER(CPU, bfloat16, int32)
REGISTER(CPU, bfloat16, int64_t)
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
REGISTER(GPU, float, int32)
REGISTER(GPU, float, int64_t)
REGISTER(GPU, Eigen::half, int32)
REGISTER(GPU, Eigen::half, int64_t)
REGISTER(GPU, Eigen::bfloat16, int32)
REGISTER(GPU, Eigen::bfloat16, int64_t)
#endif
#undef REGISTER
} | #include <random>
#include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/kernels/xent_op.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
template <class T>
static Graph* SparseXent(int batch_size, int num_classes, DataType type) {
Graph* g = new Graph(OpRegistry::Global());
Tensor logits(type, TensorShape({batch_size, num_classes}));
logits.flat<T>().setRandom();
Tensor labels(DT_INT64, TensorShape({batch_size}));
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_int_distribution<> dist(0, num_classes - 1);
auto labels_t = labels.flat<int64_t>();
for (int i = 0; i < batch_size; ++i) {
labels_t(i) = dist(gen);
}
test::graph::Binary(g, "SparseSoftmaxCrossEntropyWithLogits",
test::graph::Constant(g, logits),
test::graph::Constant(g, labels));
return g;
}
#define BM_SparseXentDev(BATCH, CLASS, DEVICE, C_TYPE, TF_TYPE) \
static void BM_SparseXent##_##BATCH##_##CLASS##_##DEVICE##_##C_TYPE( \
::testing::benchmark::State& state) { \
test::Benchmark(#DEVICE, SparseXent<C_TYPE>(BATCH, CLASS, TF_TYPE), \
false) \
.Run(state); \
const int64_t tot = \
static_cast<int64_t>(state.iterations()) * BATCH * CLASS; \
state.SetItemsProcessed(tot); \
state.SetBytesProcessed(tot * sizeof(C_TYPE)); \
} \
BENCHMARK(BM_SparseXent##_##BATCH##_##CLASS##_##DEVICE##_##C_TYPE);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
BM_SparseXentDev(8, 1000000, gpu, float, DT_FLOAT);
BM_SparseXentDev(16, 10000, gpu, float, DT_FLOAT);
BM_SparseXentDev(16, 30000, gpu, float, DT_FLOAT);
BM_SparseXentDev(16, 100000, gpu, float, DT_FLOAT);
BM_SparseXentDev(32, 10000, gpu, float, DT_FLOAT);
BM_SparseXentDev(32, 30000, gpu, float, DT_FLOAT);
BM_SparseXentDev(32, 100000, gpu, float, DT_FLOAT);
BM_SparseXentDev(64, 10000, gpu, float, DT_FLOAT);
BM_SparseXentDev(64, 30000, gpu, float, DT_FLOAT);
BM_SparseXentDev(64, 100000, gpu, float, DT_FLOAT);
#endif
#define BM_SparseXentDev_CPU(C_TYPE, TF_TYPE) \
BM_SparseXentDev(8, 1000000, cpu, C_TYPE, TF_TYPE); \
BM_SparseXentDev(16, 10000, cpu, C_TYPE, TF_TYPE); \
BM_SparseXentDev(16, 100000, cpu, C_TYPE, TF_TYPE); \
BM_SparseXentDev(32, 10000, cpu, C_TYPE, TF_TYPE); \
BM_SparseXentDev(32, 100000, cpu, C_TYPE, TF_TYPE); \
BM_SparseXentDev(64, 10000, cpu, C_TYPE, TF_TYPE); \
BM_SparseXentDev(64, 100000, cpu, C_TYPE, TF_TYPE);
BM_SparseXentDev_CPU(float, DT_FLOAT);
BM_SparseXentDev_CPU(bfloat16, DT_BFLOAT16);
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/sparse_xent_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/sparse_xent_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
eab9cf0e-5b44-4116-a994-bad172501059 | cpp | google/tensorstore | arena | tensorstore/internal/arena.h | tensorstore/internal/arena_test.cc | #ifndef TENSORSTORE_INTERNAL_ARENA_H_
#define TENSORSTORE_INTERNAL_ARENA_H_
#include <stddef.h>
#include <memory>
#include <new>
#include <utility>
#include "tensorstore/internal/exception_macros.h"
#include "tensorstore/internal/integer_overflow.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
namespace internal {
class Arena {
public:
Arena() : remaining_bytes_(0) {}
explicit Arena(tensorstore::span<unsigned char> initial_buffer)
: initial_buffer_(initial_buffer),
remaining_bytes_(initial_buffer.size()) {}
template <typename T = unsigned char>
T* allocate(size_t n, size_t alignment = alignof(T)) {
size_t num_bytes;
if (MulOverflow(n, sizeof(T), &num_bytes)) {
TENSORSTORE_THROW_BAD_ALLOC;
}
void* ptr = static_cast<void*>(initial_buffer_.end() - remaining_bytes_);
if (std::align(alignment, num_bytes, ptr, remaining_bytes_)) {
remaining_bytes_ -= num_bytes;
} else {
ptr = ::operator new(num_bytes, std::align_val_t(alignment));
}
return static_cast<T*>(ptr);
}
template <typename T>
void deallocate(T* p, size_t n, size_t alignment = alignof(T)) {
if (static_cast<void*>(p) >= static_cast<void*>(initial_buffer_.data()) &&
static_cast<void*>(p + n) <=
static_cast<void*>(initial_buffer_.data() +
initial_buffer_.size())) {
return;
}
::operator delete(static_cast<void*>(p), n * sizeof(T),
std::align_val_t(alignment));
}
private:
tensorstore::span<unsigned char> initial_buffer_;
size_t remaining_bytes_;
};
template <typename T = unsigned char>
class ArenaAllocator {
public:
using value_type = T;
using pointer = T*;
using void_pointer = void*;
using const_void_pointer = const void*;
using reference = T&;
using const_pointer = const T*;
using const_reference = const T&;
using size_type = size_t;
using difference_type = ptrdiff_t;
template <typename U>
struct rebind {
using other = ArenaAllocator<U>;
};
ArenaAllocator(Arena* arena) : arena_(arena) {}
template <typename U>
ArenaAllocator(ArenaAllocator<U> other) : arena_(other.arena()) {}
T* allocate(size_t n) const { return arena_->allocate<T>(n); }
void deallocate(T* p, size_t n) const { arena_->deallocate(p, n); }
template <typename... Arg>
void construct(T* p, Arg&&... arg) {
new (p) T(std::forward<Arg>(arg)...);
}
void destroy(T* p) { p->~T(); }
Arena* arena() const { return arena_; }
friend bool operator==(ArenaAllocator a, ArenaAllocator b) {
return a.arena_ == b.arena_;
}
friend bool operator!=(ArenaAllocator a, ArenaAllocator b) {
return a.arena_ != b.arena_;
}
Arena* arena_;
};
}
}
#endif | #include "tensorstore/internal/arena.h"
#include <algorithm>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/util/span.h"
namespace {
using ::tensorstore::internal::Arena;
using ::tensorstore::internal::ArenaAllocator;
bool Contains(tensorstore::span<const unsigned char> buffer, void* ptr) {
return ptr >= buffer.data() && ptr < buffer.data() + buffer.size();
}
TEST(ArenaTest, Small) {
unsigned char buffer[1024];
Arena arena(buffer);
std::vector<int, ArenaAllocator<int>> vec(100, &arena);
EXPECT_EQ(&arena, vec.get_allocator().arena());
std::fill(vec.begin(), vec.end(), 5);
EXPECT_TRUE(Contains(buffer, vec.data()));
}
TEST(ArenaTest, Alignment) {
alignas(16) unsigned char buffer[1024];
for (int x = 1; x <= 16; x *= 2) {
Arena arena(buffer);
unsigned char* ptr1 = arena.allocate(1, 1);
EXPECT_EQ(&buffer[0], ptr1);
unsigned char* ptr2 = arena.allocate(1, x);
EXPECT_EQ(0u, reinterpret_cast<std::uintptr_t>(ptr2) % x);
EXPECT_EQ(&buffer[x], ptr2);
arena.deallocate(ptr1, 1, 1);
arena.deallocate(ptr2, 1, x);
}
{
Arena arena(buffer);
unsigned char* ptr = arena.allocate(2000, 16);
EXPECT_EQ(0u, reinterpret_cast<std::uintptr_t>(ptr) % 16);
arena.deallocate(ptr, 2000, 16);
}
}
TEST(ArenaTest, Large) {
unsigned char buffer[1024];
Arena arena(buffer);
std::vector<int, ArenaAllocator<int>> vec(&arena);
vec.resize(2000);
std::fill(vec.begin(), vec.end(), 7);
EXPECT_FALSE(Contains(buffer, vec.data()));
}
TEST(ArenaTest, MultipleSmall) {
unsigned char buffer[1024];
Arena arena(buffer);
std::vector<std::int32_t, ArenaAllocator<int>> vec(100, &arena);
EXPECT_EQ(&arena, vec.get_allocator().arena());
std::fill(vec.begin(), vec.end(), 5);
EXPECT_TRUE(Contains(buffer, vec.data()));
std::vector<std::int32_t, ArenaAllocator<int>> vec2(100, &arena);
std::fill(vec2.begin(), vec2.end(), 6);
EXPECT_TRUE(Contains(buffer, vec2.data()));
std::vector<std::int32_t, ArenaAllocator<int>> vec3(100, &arena);
std::fill(vec3.begin(), vec3.end(), 7);
EXPECT_FALSE(Contains(buffer, vec3.data()));
std::vector<std::int32_t, ArenaAllocator<int>> vec4(5, &arena);
std::fill(vec4.begin(), vec4.end(), 8);
EXPECT_TRUE(Contains(buffer, vec4.data()));
EXPECT_THAT(vec,
::testing::ElementsAreArray(std::vector<std::int32_t>(100, 5)));
EXPECT_THAT(vec2,
::testing::ElementsAreArray(std::vector<std::int32_t>(100, 6)));
EXPECT_THAT(vec3,
::testing::ElementsAreArray(std::vector<std::int32_t>(100, 7)));
EXPECT_THAT(vec4,
::testing::ElementsAreArray(std::vector<std::int32_t>(5, 8)));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/arena.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/arena_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
21b3e2ff-cbf9-4303-875a-b4942003e6eb | cpp | abseil/abseil-cpp | chi_square | absl/random/internal/chi_square.cc | absl/random/internal/chi_square_test.cc | #include "absl/random/internal/chi_square.h"
#include <cmath>
#include "absl/random/internal/distribution_test_util.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace random_internal {
namespace {
#if defined(__EMSCRIPTEN__)
inline double fma(double x, double y, double z) {
return (x * y) + z;
}
#endif
template <typename T, unsigned N>
inline T EvaluatePolynomial(T x, const T (&poly)[N]) {
#if !defined(__EMSCRIPTEN__)
using std::fma;
#endif
T p = poly[N - 1];
for (unsigned i = 2; i <= N; i++) {
p = fma(p, x, poly[N - i]);
}
return p;
}
static constexpr int kLargeDOF = 150;
double POZ(double z) {
static constexpr double kP1[] = {
0.797884560593, -0.531923007300, 0.319152932694,
-0.151968751364, 0.059054035642, -0.019198292004,
0.005198775019, -0.001075204047, 0.000124818987,
};
static constexpr double kP2[] = {
0.999936657524, 0.000535310849, -0.002141268741, 0.005353579108,
-0.009279453341, 0.011630447319, -0.010557625006, 0.006549791214,
-0.002034254874, -0.000794620820, 0.001390604284, -0.000676904986,
-0.000019538132, 0.000152529290, -0.000045255659,
};
const double kZMax = 6.0;
if (z == 0.0) {
return 0.5;
}
double x;
double y = 0.5 * std::fabs(z);
if (y >= (kZMax * 0.5)) {
x = 1.0;
} else if (y < 1.0) {
double w = y * y;
x = EvaluatePolynomial(w, kP1) * y * 2.0;
} else {
y -= 2.0;
x = EvaluatePolynomial(y, kP2);
}
return z > 0.0 ? ((x + 1.0) * 0.5) : ((1.0 - x) * 0.5);
}
double normal_survival(double z) {
static constexpr double kR[] = {
1.0, 0.196854, 0.115194, 0.000344, 0.019527,
};
double r = EvaluatePolynomial(z, kR);
r *= r;
return 0.5 / (r * r);
}
}
double ChiSquareValue(int dof, double p) {
static constexpr double kChiEpsilon =
0.000001;
static constexpr double kChiMax =
99999.0;
const double p_value = 1.0 - p;
if (dof < 1 || p_value > 1.0) {
return 0.0;
}
if (dof > kLargeDOF) {
const double z = InverseNormalSurvival(p_value);
const double mean = 1 - 2.0 / (9 * dof);
const double variance = 2.0 / (9 * dof);
if (variance != 0) {
double term = z * std::sqrt(variance) + mean;
return dof * (term * term * term);
}
}
if (p_value <= 0.0) return kChiMax;
double min_chisq = 0.0;
double max_chisq = kChiMax;
double current = dof / std::sqrt(p_value);
while ((max_chisq - min_chisq) > kChiEpsilon) {
if (ChiSquarePValue(current, dof) < p_value) {
max_chisq = current;
} else {
min_chisq = current;
}
current = (max_chisq + min_chisq) * 0.5;
}
return current;
}
double ChiSquarePValue(double chi_square, int dof) {
static constexpr double kLogSqrtPi =
0.5723649429247000870717135;
static constexpr double kInverseSqrtPi =
0.5641895835477562869480795;
if (dof > kLargeDOF) {
const double chi_square_scaled = std::pow(chi_square / dof, 1.0 / 3);
const double mean = 1 - 2.0 / (9 * dof);
const double variance = 2.0 / (9 * dof);
if (variance != 0) {
const double z = (chi_square_scaled - mean) / std::sqrt(variance);
if (z > 0) {
return normal_survival(z);
} else if (z < 0) {
return 1.0 - normal_survival(-z);
} else {
return 0.5;
}
}
}
if (chi_square <= 0.0) return 1.0;
if (dof < 1) return 0;
auto capped_exp = [](double x) { return x < -20 ? 0.0 : std::exp(x); };
static constexpr double kBigX = 20;
double a = 0.5 * chi_square;
const bool even = !(dof & 1);
const double y = capped_exp(-a);
double s = even ? y : (2.0 * POZ(-std::sqrt(chi_square)));
if (dof <= 2) {
return s;
}
chi_square = 0.5 * (dof - 1.0);
double z = (even ? 1.0 : 0.5);
if (a > kBigX) {
double e = (even ? 0.0 : kLogSqrtPi);
double c = std::log(a);
while (z <= chi_square) {
e = std::log(z) + e;
s += capped_exp(c * z - a - e);
z += 1.0;
}
return s;
}
double e = (even ? 1.0 : (kInverseSqrtPi / std::sqrt(a)));
double c = 0.0;
while (z <= chi_square) {
e = e * (a / z);
c = c + e;
z += 1.0;
}
return c * y + s;
}
}
ABSL_NAMESPACE_END
} | #include "absl/random/internal/chi_square.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <iterator>
#include <numeric>
#include <vector>
#include "gtest/gtest.h"
#include "absl/base/macros.h"
using absl::random_internal::ChiSquare;
using absl::random_internal::ChiSquarePValue;
using absl::random_internal::ChiSquareValue;
using absl::random_internal::ChiSquareWithExpected;
namespace {
TEST(ChiSquare, Value) {
struct {
int line;
double chi_square;
int df;
double confidence;
} const specs[] = {
{__LINE__, 0, 0, 0.01},
{__LINE__, 0.00016, 1, 0.01},
{__LINE__, 1.64650, 8, 0.01},
{__LINE__, 5.81221, 16, 0.01},
{__LINE__, 156.4319, 200, 0.01},
{__LINE__, 1121.3784, 1234, 0.01},
{__LINE__, 53557.1629, 54321, 0.01},
{__LINE__, 651662.6647, 654321, 0.01},
{__LINE__, 0, 0, 0.99},
{__LINE__, 6.635, 1, 0.99},
{__LINE__, 20.090, 8, 0.99},
{__LINE__, 32.000, 16, 0.99},
{__LINE__, 249.4456, 200, 0.99},
{__LINE__, 1131.1573, 1023, 0.99},
{__LINE__, 1352.5038, 1234, 0.99},
{__LINE__, 55090.7356, 54321, 0.99},
{__LINE__, 656985.1514, 654321, 0.99},
{__LINE__, 16.2659, 3, 0.999},
{__LINE__, 22.4580, 6, 0.999},
{__LINE__, 267.5409, 200, 0.999},
{__LINE__, 1168.5033, 1023, 0.999},
{__LINE__, 55345.1741, 54321, 0.999},
{__LINE__, 657861.7284, 654321, 0.999},
{__LINE__, 51.1772, 24, 0.999},
{__LINE__, 59.7003, 30, 0.999},
{__LINE__, 37.6984, 15, 0.999},
{__LINE__, 29.5898, 10, 0.999},
{__LINE__, 27.8776, 9, 0.999},
{__LINE__, 0.000157088, 1, 0.01},
{__LINE__, 5.31852, 2, 0.93},
{__LINE__, 1.92256, 4, 0.25},
{__LINE__, 10.7709, 13, 0.37},
{__LINE__, 26.2514, 17, 0.93},
{__LINE__, 36.4799, 29, 0.84},
{__LINE__, 25.818, 31, 0.27},
{__LINE__, 63.3346, 64, 0.50},
{__LINE__, 196.211, 128, 0.9999},
{__LINE__, 215.21, 243, 0.10},
{__LINE__, 285.393, 256, 0.90},
{__LINE__, 984.504, 1024, 0.1923},
{__LINE__, 2043.85, 2048, 0.4783},
{__LINE__, 48004.6, 48273, 0.194},
};
for (const auto& spec : specs) {
SCOPED_TRACE(spec.line);
const double val = ChiSquareValue(spec.df, spec.confidence);
const double err = std::max(5e-6, spec.chi_square / 5e3);
EXPECT_NEAR(spec.chi_square, val, err) << spec.line;
}
EXPECT_NEAR(49.2680, ChiSquareValue(100, 1e-6), 5);
EXPECT_NEAR(123.499, ChiSquareValue(200, 1e-6), 5);
EXPECT_NEAR(149.449, ChiSquareValue(100, 0.999), 0.01);
EXPECT_NEAR(161.318, ChiSquareValue(100, 0.9999), 0.01);
EXPECT_NEAR(172.098, ChiSquareValue(100, 0.99999), 0.01);
EXPECT_NEAR(381.426, ChiSquareValue(300, 0.999), 0.05);
EXPECT_NEAR(399.756, ChiSquareValue(300, 0.9999), 0.1);
EXPECT_NEAR(416.126, ChiSquareValue(300, 0.99999), 0.2);
}
TEST(ChiSquareTest, PValue) {
struct {
int line;
double pval;
double chi_square;
int df;
} static const specs[] = {
{__LINE__, 1, 0, 0},
{__LINE__, 0, 0.001, 0},
{__LINE__, 1.000, 0, 453},
{__LINE__, 0.134471, 7972.52, 7834},
{__LINE__, 0.203922, 28.32, 23},
{__LINE__, 0.737171, 48274, 48472},
{__LINE__, 0.444146, 583.1234, 579},
{__LINE__, 0.294814, 138.2, 130},
{__LINE__, 0.0816532, 12.63, 7},
{__LINE__, 0, 682.32, 67},
{__LINE__, 0.49405, 999, 999},
{__LINE__, 1.000, 0, 9999},
{__LINE__, 0.997477, 0.00001, 1},
{__LINE__, 0, 5823.21, 5040},
};
for (const auto& spec : specs) {
SCOPED_TRACE(spec.line);
const double pval = ChiSquarePValue(spec.chi_square, spec.df);
EXPECT_NEAR(spec.pval, pval, 1e-3);
}
}
TEST(ChiSquareTest, CalcChiSquare) {
struct {
int line;
std::vector<int> expected;
std::vector<int> actual;
} const specs[] = {
{__LINE__,
{56, 234, 76, 1, 546, 1, 87, 345, 1, 234},
{2, 132, 4, 43, 234, 8, 345, 8, 236, 56}},
{__LINE__,
{123, 36, 234, 367, 345, 2, 456, 567, 234, 567},
{123, 56, 2345, 8, 345, 8, 2345, 23, 48, 267}},
{__LINE__,
{123, 234, 345, 456, 567, 678, 789, 890, 98, 76},
{123, 234, 345, 456, 567, 678, 789, 890, 98, 76}},
{__LINE__, {3, 675, 23, 86, 2, 8, 2}, {456, 675, 23, 86, 23, 65, 2}},
{__LINE__, {1}, {23}},
};
for (const auto& spec : specs) {
SCOPED_TRACE(spec.line);
double chi_square = 0;
for (int i = 0; i < spec.expected.size(); ++i) {
const double diff = spec.actual[i] - spec.expected[i];
chi_square += (diff * diff) / spec.expected[i];
}
EXPECT_NEAR(chi_square,
ChiSquare(std::begin(spec.actual), std::end(spec.actual),
std::begin(spec.expected), std::end(spec.expected)),
1e-5);
}
}
TEST(ChiSquareTest, CalcChiSquareInt64) {
const int64_t data[3] = {910293487, 910292491, 910216780};
double sum = std::accumulate(std::begin(data), std::end(data), double{0});
size_t n = std::distance(std::begin(data), std::end(data));
double a = ChiSquareWithExpected(std::begin(data), std::end(data), sum / n);
EXPECT_NEAR(4.254101, a, 1e-6);
double b =
ChiSquareWithExpected(std::begin(data), std::end(data), 910267586.0);
EXPECT_NEAR(4.254101, b, 1e-6);
}
TEST(ChiSquareTest, TableData) {
const double data[100][5] = {
{2.706, 3.841, 5.024, 6.635, 10.828},
{4.605, 5.991, 7.378, 9.210, 13.816},
{6.251, 7.815, 9.348, 11.345, 16.266},
{7.779, 9.488, 11.143, 13.277, 18.467},
{9.236, 11.070, 12.833, 15.086, 20.515},
{10.645, 12.592, 14.449, 16.812, 22.458},
{12.017, 14.067, 16.013, 18.475, 24.322},
{13.362, 15.507, 17.535, 20.090, 26.125},
{14.684, 16.919, 19.023, 21.666, 27.877},
{15.987, 18.307, 20.483, 23.209, 29.588},
{17.275, 19.675, 21.920, 24.725, 31.264},
{18.549, 21.026, 23.337, 26.217, 32.910},
{19.812, 22.362, 24.736, 27.688, 34.528},
{21.064, 23.685, 26.119, 29.141, 36.123},
{22.307, 24.996, 27.488, 30.578, 37.697},
{23.542, 26.296, 28.845, 32.000, 39.252},
{24.769, 27.587, 30.191, 33.409, 40.790},
{25.989, 28.869, 31.526, 34.805, 42.312},
{27.204, 30.144, 32.852, 36.191, 43.820},
{28.412, 31.410, 34.170, 37.566, 45.315},
{29.615, 32.671, 35.479, 38.932, 46.797},
{30.813, 33.924, 36.781, 40.289, 48.268},
{32.007, 35.172, 38.076, 41.638, 49.728},
{33.196, 36.415, 39.364, 42.980, 51.179},
{34.382, 37.652, 40.646, 44.314, 52.620},
{35.563, 38.885, 41.923, 45.642, 54.052},
{36.741, 40.113, 43.195, 46.963, 55.476},
{37.916, 41.337, 44.461, 48.278, 56.892},
{39.087, 42.557, 45.722, 49.588, 58.301},
{40.256, 43.773, 46.979, 50.892, 59.703},
{41.422, 44.985, 48.232, 52.191, 61.098},
{42.585, 46.194, 49.480, 53.486, 62.487},
{43.745, 47.400, 50.725, 54.776, 63.870},
{44.903, 48.602, 51.966, 56.061, 65.247},
{46.059, 49.802, 53.203, 57.342, 66.619},
{47.212, 50.998, 54.437, 58.619, 67.985},
{48.363, 52.192, 55.668, 59.893, 69.347},
{49.513, 53.384, 56.896, 61.162, 70.703},
{50.660, 54.572, 58.120, 62.428, 72.055},
{51.805, 55.758, 59.342, 63.691, 73.402},
{52.949, 56.942, 60.561, 64.950, 74.745},
{54.090, 58.124, 61.777, 66.206, 76.084},
{55.230, 59.304, 62.990, 67.459, 77.419},
{56.369, 60.481, 64.201, 68.710, 78.750},
{57.505, 61.656, 65.410, 69.957, 80.077},
{58.641, 62.830, 66.617, 71.201, 81.400},
{59.774, 64.001, 67.821, 72.443, 82.720},
{60.907, 65.171, 69.023, 73.683, 84.037},
{62.038, 66.339, 70.222, 74.919, 85.351},
{63.167, 67.505, 71.420, 76.154, 86.661},
{64.295, 68.669, 72.616, 77.386, 87.968},
{65.422, 69.832, 73.810, 78.616, 89.272},
{66.548, 70.993, 75.002, 79.843, 90.573},
{67.673, 72.153, 76.192, 81.069, 91.872},
{68.796, 73.311, 77.380, 82.292, 93.168},
{69.919, 74.468, 78.567, 83.513, 94.461},
{71.040, 75.624, 79.752, 84.733, 95.751},
{72.160, 76.778, 80.936, 85.950, 97.039},
{73.279, 77.931, 82.117, 87.166, 98.324},
{74.397, 79.082, 83.298, 88.379, 99.607},
{75.514, 80.232, 84.476, 89.591, 100.888},
{76.630, 81.381, 85.654, 90.802, 102.166},
{77.745, 82.529, 86.830, 92.010, 103.442},
{78.860, 83.675, 88.004, 93.217, 104.716},
{79.973, 84.821, 89.177, 94.422, 105.988},
{81.085, 85.965, 90.349, 95.626, 107.258},
{82.197, 87.108, 91.519, 96.828, 108.526},
{83.308, 88.250, 92.689, 98.028, 109.791},
{84.418, 89.391, 93.856, 99.228, 111.055},
{85.527, 90.531, 95.023, 100.425, 112.317},
{86.635, 91.670, 96.189, 101.621, 113.577},
{87.743, 92.808, 97.353, 102.816, 114.835},
{88.850, 93.945, 98.516, 104.010, 116.092},
{89.956, 95.081, 99.678, 105.202, 117.346},
{91.061, 96.217, 100.839, 106.393, 118.599},
{92.166, 97.351, 101.999, 107.583, 119.850},
{93.270, 98.484, 103.158, 108.771, 121.100},
{94.374, 99.617, 104.316, 109.958, 122.348},
{95.476, 100.749, 105.473, 111.144, 123.594},
{96.578, 101.879, 106.629, 112.329, 124.839},
{97.680, 103.010, 107.783, 113.512, 126.083},
{98.780, 104.139, 108.937, 114.695, 127.324},
{99.880, 105.267, 110.090, 115.876, 128.565},
{100.980, 106.395, 111.242, 117.057, 129.804},
{102.079, 107.522, 112.393, 118.236, 131.041},
{103.177, 108.648, 113.544, 119.414, 132.277},
{104.275, 109.773, 114.693, 120.591, 133.512},
{105.372, 110.898, 115.841, 121.767, 134.746},
{106.469, 112.022, 116.989, 122.942, 135.978},
{107.565, 113.145, 118.136, 124.116, 137.208},
{108.661, 114.268, 119.282, 125.289, 138.438},
{109.756, 115.390, 120.427, 126.462, 139.666},
{110.850, 116.511, 121.571, 127.633, 140.893},
{111.944, 117.632, 122.715, 128.803, 142.119},
{113.038, 118.752, 123.858, 129.973, 143.344},
{114.131, 119.871, 125.000, 131.141, 144.567},
{115.223, 120.990, 126.141, 132.309, 145.789},
{116.315, 122.108, 127.282, 133.476, 147.010},
{117.407, 123.225, 128.422, 134.642, 148.230},
{118.498, 124.342, 129.561, 135.807, 149.449}
};
for (int i = 0; i < ABSL_ARRAYSIZE(data); i++) {
const double E = 0.0001;
EXPECT_NEAR(ChiSquarePValue(data[i][0], i + 1), 0.10, E)
<< i << " " << data[i][0];
EXPECT_NEAR(ChiSquarePValue(data[i][1], i + 1), 0.05, E)
<< i << " " << data[i][1];
EXPECT_NEAR(ChiSquarePValue(data[i][2], i + 1), 0.025, E)
<< i << " " << data[i][2];
EXPECT_NEAR(ChiSquarePValue(data[i][3], i + 1), 0.01, E)
<< i << " " << data[i][3];
EXPECT_NEAR(ChiSquarePValue(data[i][4], i + 1), 0.001, E)
<< i << " " << data[i][4];
const double F = 0.1;
EXPECT_NEAR(ChiSquareValue(i + 1, 0.90), data[i][0], F) << i;
EXPECT_NEAR(ChiSquareValue(i + 1, 0.95), data[i][1], F) << i;
EXPECT_NEAR(ChiSquareValue(i + 1, 0.975), data[i][2], F) << i;
EXPECT_NEAR(ChiSquareValue(i + 1, 0.99), data[i][3], F) << i;
EXPECT_NEAR(ChiSquareValue(i + 1, 0.999), data[i][4], F) << i;
}
}
TEST(ChiSquareTest, ChiSquareTwoIterator) {
const int counts[10] = {6, 6, 18, 33, 38, 38, 28, 21, 9, 3};
const double expected[10] = {4.6, 8.8, 18.4, 30.0, 38.2,
38.2, 30.0, 18.4, 8.8, 4.6};
double chi_square = ChiSquare(std::begin(counts), std::end(counts),
std::begin(expected), std::end(expected));
EXPECT_NEAR(chi_square, 2.69, 0.001);
const int dof = 7;
double p_value_05 = ChiSquarePValue(14.067, dof);
EXPECT_NEAR(p_value_05, 0.05, 0.001);
double p_actual = ChiSquarePValue(chi_square, dof);
EXPECT_GT(p_actual, 0.05);
}
TEST(ChiSquareTest, DiceRolls) {
const int rolls[6] = {22, 11, 17, 14, 20, 18};
double sum = std::accumulate(std::begin(rolls), std::end(rolls), double{0});
size_t n = std::distance(std::begin(rolls), std::end(rolls));
double a = ChiSquareWithExpected(std::begin(rolls), std::end(rolls), sum / n);
EXPECT_NEAR(a, 4.70588, 1e-5);
EXPECT_LT(a, ChiSquareValue(4, 0.95));
double p_a = ChiSquarePValue(a, 4);
EXPECT_NEAR(p_a, 0.318828, 1e-5);
double b = ChiSquareWithExpected(std::begin(rolls), std::end(rolls), 17.0);
EXPECT_NEAR(b, 4.70588, 1e-5);
EXPECT_LT(b, ChiSquareValue(5, 0.95));
double p_b = ChiSquarePValue(b, 5);
EXPECT_NEAR(p_b, 0.4528180, 1e-5);
}
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/random/internal/chi_square.cc | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/random/internal/chi_square_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
b607fa56-9dba-4632-8c78-0e876e514ba2 | cpp | tensorflow/tensorflow | random_ops | tensorflow/compiler/tf2xla/kernels/random_ops.cc | tensorflow/lite/kernels/random_ops_test.cc | #include <vector>
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "tensorflow/compiler/tf2xla/lib/broadcast.h"
#include "tensorflow/compiler/tf2xla/lib/random.h"
#include "tensorflow/compiler/tf2xla/mlir_xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/shape_util.h"
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "xla/hlo/builder/lib/constants.h"
#include "xla/hlo/builder/lib/dynamic_shaped_ops.h"
#include "xla/hlo/builder/value_inference.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/op_requires.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace {
class RandomUniformOp : public XlaOpKernel {
public:
explicit RandomUniformOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {}
void Compile(XlaOpKernelContext* ctx) override {
TensorShape shape;
OP_REQUIRES_OK(ctx, ctx->ConstantInputAsShape(
0, &shape, xla::ValueInferenceMode::kUpperBound));
const DataType dtype = output_type(0);
xla::Shape xla_shape;
OP_REQUIRES_OK(ctx, TensorShapeToXLAShape(dtype, shape, &xla_shape));
xla::XlaBuilder* b = ctx->builder();
LOG_FIRST_N(WARNING, 1)
<< "Warning: Using tf.random.uniform with XLA compilation will ignore "
"seeds; consider using tf.random.stateless_uniform instead if "
"reproducible behavior is desired. "
<< name();
xla::XlaOp result = xla::RngUniform(XlaHelpers::Zero(b, dtype),
XlaHelpers::One(b, dtype), xla_shape);
auto result_status_or =
SetAllDimensionSizes(&ctx->value_inference(), result, ctx->Input(0));
OP_REQUIRES_OK(ctx, result_status_or.status());
result = result_status_or.value();
ctx->SetOutput(0, result);
}
private:
RandomUniformOp(const RandomUniformOp&) = delete;
void operator=(const RandomUniformOp&) = delete;
};
REGISTER_XLA_OP(Name("RandomUniform").CompileTimeConstantInput("shape"),
RandomUniformOp);
REGISTER_XLA_OP(Name("RandomShuffle"), MlirXlaOpKernel);
class RandomUniformIntOp : public XlaOpKernel {
public:
explicit RandomUniformIntOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {}
void Compile(XlaOpKernelContext* ctx) override {
TensorShape shape;
OP_REQUIRES_OK(ctx, ctx->ConstantInputAsShape(0, &shape));
xla::Shape xla_shape;
OP_REQUIRES_OK(ctx,
TensorShapeToXLAShape(input_type(1), shape, &xla_shape));
const TensorShape minval_shape = ctx->InputShape(1);
const TensorShape maxval_shape = ctx->InputShape(2);
OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(minval_shape),
errors::InvalidArgument("minval must be 0-D, got shape ",
minval_shape.DebugString()));
OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(maxval_shape),
errors::InvalidArgument("maxval must be 0-D, got shape ",
maxval_shape.DebugString()));
auto minval = ctx->Input(1);
auto maxval = ctx->Input(2);
LOG_FIRST_N(WARNING, 1)
<< "Warning: Using tf.random.uniform with XLA compilation will ignore "
"seeds; consider using tf.random.stateless_uniform instead if "
"reproducible behavior is desired. "
<< name();
ctx->SetOutput(0, xla::RngUniform(minval, maxval, xla_shape));
}
private:
RandomUniformIntOp(const RandomUniformIntOp&) = delete;
void operator=(const RandomUniformIntOp&) = delete;
};
REGISTER_XLA_OP(Name("RandomUniformInt").CompileTimeConstantInput("shape"),
RandomUniformIntOp);
class RandomStandardNormalOp : public XlaOpKernel {
public:
explicit RandomStandardNormalOp(OpKernelConstruction* ctx)
: XlaOpKernel(ctx) {}
void Compile(XlaOpKernelContext* ctx) override {
const DataType dtype = output_type(0);
TensorShape shape;
OP_REQUIRES_OK(ctx, ctx->ConstantInputAsShape(
0, &shape, xla::ValueInferenceMode::kUpperBound));
xla::Shape xla_shape;
OP_REQUIRES_OK(ctx, TensorShapeToXLAShape(dtype, shape, &xla_shape));
xla::XlaBuilder* b = ctx->builder();
xla::XlaOp result = xla::RngNormal(XlaHelpers::Zero(b, dtype),
XlaHelpers::One(b, dtype), xla_shape);
auto result_status_or =
SetAllDimensionSizes(&ctx->value_inference(), result, ctx->Input(0));
OP_REQUIRES_OK(ctx, result_status_or.status());
result = result_status_or.value();
ctx->SetOutput(0, result);
}
private:
RandomStandardNormalOp(const RandomStandardNormalOp&) = delete;
void operator=(const RandomStandardNormalOp&) = delete;
};
REGISTER_XLA_OP(Name("RandomStandardNormal").CompileTimeConstantInput("shape"),
RandomStandardNormalOp);
class TruncatedNormalOp : public XlaOpKernel {
public:
explicit TruncatedNormalOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {}
void Compile(XlaOpKernelContext* ctx) override {
const DataType dtype = output_type(0);
TensorShape shape;
OP_REQUIRES_OK(ctx, ctx->ConstantInputAsShape(0, &shape));
xla::Shape xla_shape;
OP_REQUIRES_OK(ctx, TensorShapeToXLAShape(dtype, shape, &xla_shape));
xla::XlaBuilder* b = ctx->builder();
xla::XlaOp one = xla::One(b, xla_shape.element_type());
xla::XlaOp min_positive =
xla::MinPositiveNormalValue(b, xla_shape.element_type());
LOG_FIRST_N(WARNING, 1)
<< "Warning: Using tf.random.truncated_normal with XLA "
"compilation will ignore seeds; consider using "
"tf.random.stateless_truncated_normal instead if "
"reproducible behavior is desired. "
<< name();
auto uniform = xla::RngUniform(min_positive, one, xla_shape);
ctx->SetOutput(0, TruncatedNormal(uniform));
}
};
REGISTER_XLA_OP(Name("TruncatedNormal")
.CompileTimeConstantInput("shape")
.TypeConstraint("dtype", {DT_FLOAT, DT_DOUBLE}),
TruncatedNormalOp);
static absl::StatusOr<xla::XlaOp> BroadcastParameters(
xla::XlaOp params, TensorShape& output_shape) {
int rank = output_shape.dims();
std::vector<int64_t> bcast_shape;
for (int i = 1; i < rank; ++i) {
bcast_shape.push_back(output_shape.dim_size(i));
}
bcast_shape.push_back(output_shape.dim_size(0));
TF_ASSIGN_OR_RETURN(xla::XlaOp bcast_params,
BroadcastTo(params, bcast_shape));
std::vector<int64_t> permutation;
permutation.push_back(rank - 1);
for (int i = 0; i < rank - 1; ++i) {
permutation.push_back(i);
}
return xla::Transpose(bcast_params, permutation);
}
class ParameterizedTruncatedNormalOp : public XlaOpKernel {
public:
explicit ParameterizedTruncatedNormalOp(OpKernelConstruction* ctx)
: XlaOpKernel(ctx) {}
void Compile(XlaOpKernelContext* ctx) override {
const DataType dtype = output_type(0);
TensorShape shape;
OP_REQUIRES_OK(ctx, ctx->ConstantInputAsShape(0, &shape));
xla::Shape xla_shape;
OP_REQUIRES_OK(ctx, TensorShapeToXLAShape(dtype, shape, &xla_shape));
OP_REQUIRES(ctx, xla_shape.rank() >= 1,
errors::InvalidArgument(
"shape parameter must have rank >= 1, received (",
xla::ShapeUtil::HumanString(xla_shape), ")"));
xla::XlaBuilder* b = ctx->builder();
xla::XlaOp one = xla::One(b, xla_shape.element_type());
xla::XlaOp min_positive =
xla::MinPositiveNormalValue(b, xla_shape.element_type());
LOG_FIRST_N(WARNING, 1)
<< "Warning: Using tf.random.truncated_normal with XLA "
"compilation will ignore seeds; consider using "
"tf.random.stateless_truncated_normal instead if "
"reproducible behavior is desired. "
<< name();
xla::XlaOp uniform = xla::RngUniform(min_positive, one, xla_shape);
auto result = b->ReportErrorOrReturn([&]() -> absl::StatusOr<xla::XlaOp> {
TF_ASSIGN_OR_RETURN(xla::XlaOp means,
BroadcastParameters(ctx->Input(1), shape));
TF_ASSIGN_OR_RETURN(xla::XlaOp stddevs,
BroadcastParameters(ctx->Input(2), shape));
TF_ASSIGN_OR_RETURN(xla::XlaOp minvals,
BroadcastParameters(ctx->Input(3), shape));
TF_ASSIGN_OR_RETURN(xla::XlaOp maxvals,
BroadcastParameters(ctx->Input(4), shape));
return ParameterizedTruncatedNormal(uniform, means, stddevs, minvals,
maxvals);
});
ctx->SetOutput(0, result);
}
};
REGISTER_XLA_OP(Name("ParameterizedTruncatedNormal")
.CompileTimeConstantInput("shape")
.TypeConstraint("dtype", {DT_FLOAT, DT_DOUBLE}),
ParameterizedTruncatedNormalOp);
}
} | #include <algorithm>
#include <initializer_list>
#include <vector>
#include <gtest/gtest.h>
#include "absl/strings/str_cat.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
enum class InputType {
kConst = 0,
kDynamic = 1,
};
class RandomOpModel : public SingleOpModel {
public:
RandomOpModel(BuiltinOperator op_code, InputType input_type,
const std::initializer_list<int32_t>& shape,
int32_t seed = 0, int32_t seed2 = 0) {
bool is_input_const = (input_type == InputType::kConst);
if (is_input_const) {
input_ = AddConstInput(TensorType_INT32, shape,
{static_cast<int32_t>(shape.size())});
} else {
input_ =
AddInput({TensorType_INT32, {static_cast<int32_t>(shape.size())}});
}
output_ = AddOutput({TensorType_FLOAT32, {}});
SetBuiltinOp(op_code, BuiltinOptions_RandomOptions,
CreateRandomOptions(builder_, seed, seed2).Union());
BuildInterpreter({GetShape(input_)});
if (!is_input_const) {
PopulateTensor<int32_t>(input_, std::vector<int32_t>(shape));
}
}
int input() { return input_; }
int output() { return output_; }
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
private:
int input_;
int output_;
};
class MultinomialOpModel : public SingleOpModel {
public:
MultinomialOpModel(InputType input_type,
const std::initializer_list<float>& logits,
int num_batches, int num_classes, int num_samples,
int32_t seed = 0, int32_t seed2 = 0,
tflite::TensorType output_type = TensorType_INT64) {
bool is_input_const = (input_type == InputType::kConst);
auto logits_shape = {num_batches, num_classes};
if (is_input_const) {
logits_ = AddConstInput(TensorType_FLOAT32, logits, logits_shape);
} else {
logits_ = AddInput({TensorType_FLOAT32, logits_shape});
}
num_samples_ = AddConstInput(TensorType_INT32, {num_samples}, {});
output_ = AddOutput({output_type, {}});
SetBuiltinOp(BuiltinOperator_MULTINOMIAL, BuiltinOptions_RandomOptions,
CreateRandomOptions(builder_, seed, seed2).Union());
BuildInterpreter({GetShape(logits_), GetShape(num_samples_)});
if (!is_input_const) {
PopulateTensor<float>(logits_, std::vector<float>(logits));
}
}
int logits() { return logits_; }
int num_samples() { return num_samples_; }
int output() { return output_; }
std::vector<int64_t> GetOutput() { return ExtractVector<int64_t>(output_); }
std::vector<int32_t> GetInt32Output() {
return ExtractVector<int32_t>(output_);
}
private:
int logits_;
int num_samples_;
int output_;
};
class TestSuite : public testing::TestWithParam<std::tuple<
BuiltinOperator, InputType>> {
};
TEST_P(TestSuite, NonDeterministicOutputWithSeedsEqualToZero)
{
BuiltinOperator op_code = std::get<0>(GetParam());
InputType input_type = std::get<1>(GetParam());
RandomOpModel m1(op_code, input_type,
{100, 50, 5}, 0, 0);
ASSERT_EQ(m1.Invoke(), kTfLiteOk);
std::vector<float> output1a = m1.GetOutput();
EXPECT_EQ(output1a.size(), 100 * 50 * 5);
ASSERT_EQ(m1.Invoke(), kTfLiteOk);
std::vector<float> output1b = m1.GetOutput();
EXPECT_NE(output1a, output1b);
RandomOpModel m2(op_code, input_type,
{100, 50, 5}, 0, 0);
ASSERT_EQ(m2.Invoke(), kTfLiteOk);
std::vector<float> output2a = m2.GetOutput();
EXPECT_EQ(output2a.size(), 100 * 50 * 5);
ASSERT_EQ(m2.Invoke(), kTfLiteOk);
std::vector<float> output2b = m2.GetOutput();
EXPECT_NE(output2a, output2b);
EXPECT_NE(output1a, output2a);
EXPECT_NE(output1b, output2b);
}
TEST_P(TestSuite, DeterministicOutputWithNonZeroSeeds) {
BuiltinOperator op_code = std::get<0>(GetParam());
InputType input_type = std::get<1>(GetParam());
RandomOpModel m1(op_code, input_type, {100, 50, 5},
1234, 5678);
ASSERT_EQ(m1.Invoke(), kTfLiteOk);
std::vector<float> output1a = m1.GetOutput();
EXPECT_EQ(output1a.size(), 100 * 50 * 5);
ASSERT_EQ(m1.Invoke(), kTfLiteOk);
std::vector<float> output1b = m1.GetOutput();
EXPECT_NE(output1a, output1b);
RandomOpModel m2(op_code, input_type, {100, 50, 5},
1234, 5678);
ASSERT_EQ(m2.Invoke(), kTfLiteOk);
std::vector<float> output2a = m2.GetOutput();
EXPECT_EQ(output2a.size(), 100 * 50 * 5);
ASSERT_EQ(m2.Invoke(), kTfLiteOk);
std::vector<float> output2b = m2.GetOutput();
EXPECT_NE(output2a, output2b);
EXPECT_EQ(output1a, output2a);
EXPECT_EQ(output1b, output2b);
}
INSTANTIATE_TEST_SUITE_P(
RandomOpTest, TestSuite,
testing::Combine(
testing::Values(BuiltinOperator_RANDOM_UNIFORM,
BuiltinOperator_RANDOM_STANDARD_NORMAL),
testing::Values(InputType::kConst, InputType::kDynamic)),
[](const testing::TestParamInfo<TestSuite::ParamType>& info) {
std::string name = absl::StrCat(
std::get<0>(info.param) == BuiltinOperator_RANDOM_UNIFORM ?
"_RandomUniformOp" : "_RandomStandardNormalOp",
std::get<1>(info.param) == InputType::kConst ?
"_ConstInput" : "_DynamicInput");
return name;
}
);
TEST(RandomUniformOpTest, OutputMeanAndVariance) {
RandomOpModel m(BuiltinOperator_RANDOM_UNIFORM,
InputType::kConst,
{100, 50, 5}, 1234, 5678);
const std::vector<float> output_data(100 * 50 * 5,
std::numeric_limits<float>::infinity());
m.PopulateTensor(m.output(), output_data);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
auto output = m.GetOutput();
EXPECT_EQ(output.size(), 100 * 50 * 5);
double sum = 0;
for (const auto r : output) {
sum += r;
}
double mean = sum / output.size();
ASSERT_LT(std::abs(mean - 0.5), 0.05);
double sum_squared = 0;
for (const auto r : output) {
sum_squared += std::pow(r - mean, 2);
}
double var = sum_squared / output.size();
EXPECT_LT(std::abs(1. / 12 - var), 0.05);
}
TEST(RandomStandardNormalOpTest, OutputMeanAndVariance) {
RandomOpModel m(BuiltinOperator_RANDOM_STANDARD_NORMAL,
InputType::kConst,
{100, 50, 5}, 1234, 5678);
const std::vector<float> output_data(100 * 50 * 5,
std::numeric_limits<float>::infinity());
m.PopulateTensor(m.output(), output_data);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
auto output = m.GetOutput();
EXPECT_EQ(output.size(), 100 * 50 * 5);
double sum = 0;
for (const auto r : output) {
sum += r;
}
double mean = sum / output.size();
ASSERT_LT(std::abs(mean), 0.05);
double sum_squared = 0;
for (const auto r : output) {
sum_squared += std::pow(r - mean, 2);
}
double var = sum_squared / output.size();
EXPECT_LT(std::abs(1.0 - var), 0.05);
}
class MultinomialOpTestSuite : public testing::TestWithParam<InputType> {};
TEST_P(MultinomialOpTestSuite, NonDeterministicOutputWithSeedsEqualToZero) {
const std::initializer_list<float> kLogits = {logf(0.3f), logf(0.7f)};
const int kNumBatches = 1;
const int kNumClasses = 2;
const int kNumSamples = 30;
MultinomialOpModel m1(GetParam(), kLogits, kNumBatches, kNumClasses,
kNumSamples, 0, 0);
ASSERT_EQ(m1.Invoke(), kTfLiteOk);
std::vector<int64_t> output1a = m1.GetOutput();
EXPECT_EQ(output1a.size(), kNumSamples);
ASSERT_EQ(m1.Invoke(), kTfLiteOk);
std::vector<int64_t> output1b = m1.GetOutput();
EXPECT_NE(output1a, output1b);
MultinomialOpModel m2(GetParam(), kLogits, kNumBatches, kNumClasses,
kNumSamples, 0, 0);
ASSERT_EQ(m2.Invoke(), kTfLiteOk);
std::vector<int64_t> output2a = m2.GetOutput();
EXPECT_EQ(output2a.size(), kNumSamples);
ASSERT_EQ(m2.Invoke(), kTfLiteOk);
std::vector<int64_t> output2b = m2.GetOutput();
EXPECT_NE(output2a, output2b);
EXPECT_NE(output1a, output2a);
EXPECT_NE(output1b, output2b);
}
TEST_P(MultinomialOpTestSuite, DeterministicOutputWithNonZeroSeeds) {
const std::initializer_list<float> kLogits = {logf(0.3f), logf(0.7f)};
const int kNumBatches = 1;
const int kNumClasses = 2;
const int kNumSamples = 30;
MultinomialOpModel m1(GetParam(), kLogits, kNumBatches, kNumClasses,
kNumSamples, 123, 456);
ASSERT_EQ(m1.Invoke(), kTfLiteOk);
std::vector<int64_t> output1a = m1.GetOutput();
EXPECT_EQ(output1a.size(), kNumBatches * kNumSamples);
ASSERT_EQ(m1.Invoke(), kTfLiteOk);
std::vector<int64_t> output1b = m1.GetOutput();
EXPECT_EQ(output1b.size(), kNumBatches * kNumSamples);
EXPECT_NE(output1a, output1b);
MultinomialOpModel m2(GetParam(), kLogits, kNumBatches, kNumClasses,
kNumSamples, 123, 456);
ASSERT_EQ(m2.Invoke(), kTfLiteOk);
std::vector<int64_t> output2a = m2.GetOutput();
EXPECT_EQ(output2a.size(), kNumBatches * kNumSamples);
ASSERT_EQ(m2.Invoke(), kTfLiteOk);
std::vector<int64_t> output2b = m2.GetOutput();
EXPECT_EQ(output2b.size(), kNumBatches * kNumSamples);
EXPECT_NE(output2a, output2b);
EXPECT_EQ(output1a, output2a);
EXPECT_EQ(output1b, output2b);
}
INSTANTIATE_TEST_SUITE_P(
RandomOpTest2, MultinomialOpTestSuite,
testing::Values(InputType::kConst, InputType::kDynamic),
[](const testing::TestParamInfo<MultinomialOpTestSuite::ParamType>& info) {
std::string name = absl::StrCat(
"_MultinomialOp",
info.param == InputType::kConst ? "_ConstInput" : "_DynamicInput");
return name;
});
TEST(MultinomialTest, ValidateTFLiteOutputisTheSameAsTFOutput_OutputTypeInt32) {
const std::initializer_list<float> kLogits = {-1.2039728, -0.35667497};
const int kNumBatches = 1;
const int kNumClasses = 2;
const int kNumSamples = 10;
MultinomialOpModel m(InputType::kConst, kLogits, kNumBatches,
kNumClasses, kNumSamples, 1234, 5678,
TensorType_INT32);
const std::vector<std::vector<int32_t>> expected_outputs = {
{1, 0, 1, 0, 1, 1, 1, 1, 1, 1},
{1, 1, 1, 0, 1, 1, 0, 0, 0, 1},
{0, 1, 1, 0, 1, 1, 1, 1, 0, 1},
{1, 1, 1, 0, 1, 0, 0, 0, 1, 0}};
for (int i = 0; i < expected_outputs.size(); i++) {
ASSERT_EQ(m.Invoke(), kTfLiteOk);
auto output = m.GetInt32Output();
EXPECT_EQ(output.size(), kNumBatches * kNumSamples);
EXPECT_EQ(expected_outputs[i], output);
}
}
TEST(MultinomialTest, ValidateTFLiteOutputisTheSameAsTFOutput) {
const std::initializer_list<float> kLogits = {-1.609438, -1.2039728,
-0.6931472};
const int kNumBatches = 1;
const int kNumClasses = 3;
const int kNumSamples = 15;
MultinomialOpModel m(InputType::kConst, kLogits, kNumBatches,
kNumClasses, kNumSamples, 5678, 1234);
const std::vector<std::vector<int64_t>> expected_outputs = {
{1, 2, 1, 2, 1, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2},
{1, 2, 0, 0, 2, 1, 2, 0, 1, 0, 2, 2, 0, 2, 2},
{1, 1, 2, 2, 2, 2, 1, 1, 2, 2, 0, 0, 2, 2, 2},
{0, 1, 1, 1, 2, 0, 1, 2, 1, 1, 2, 2, 1, 2, 2},
{0, 2, 2, 0, 2, 0, 2, 0, 1, 1, 2, 2, 0, 0, 1}};
for (int i = 0; i < expected_outputs.size(); i++) {
ASSERT_EQ(m.Invoke(), kTfLiteOk);
auto output = m.GetOutput();
EXPECT_EQ(output.size(), kNumBatches * kNumSamples);
EXPECT_EQ(expected_outputs[i], output);
}
}
TEST(MultinomialTest,
ValidateTFLiteOutputisTheSameAsTFOutput_MultiBatchMultiInvoke) {
const std::vector<float> kProb = {0.1f, 0.2f, 0.7f, 0.2f, 0.3f,
0.5f, 0.1f, 0.1f, 0.8f};
const std::initializer_list<float> kLogits = {
logf(0.1f), logf(0.2f), logf(0.7f), logf(0.2f), logf(0.3f),
logf(0.5f), logf(0.1f), logf(0.1f), logf(0.8f)};
const int kNumBatches = 3;
const int kNumClasses = 3;
const int kNumSamples = 10;
MultinomialOpModel m(InputType::kConst, kLogits, kNumBatches,
kNumClasses, kNumSamples, 1234, 5678);
const std::vector<std::vector<int64_t>> expected_output = {
{2, 1, 2, 1, 2, 2, 2, 2, 2, 2, 2, 2, 0, 2, 2,
2, 2, 1, 1, 2, 2, 2, 1, 2, 2, 1, 2, 2, 2, 2},
{2, 2, 2, 0, 2, 1, 0, 0, 2, 0, 2, 0, 2, 1, 2,
2, 0, 0, 2, 2, 2, 2, 2, 2, 1, 2, 1, 1, 2, 2},
{2, 0, 0, 0, 1, 2, 1, 2, 0, 0, 2, 2, 2, 2, 0,
2, 1, 2, 2, 1, 2, 2, 1, 2, 2, 2, 1, 2, 2, 2}};
for (int i = 0; i < 3; i++) {
ASSERT_EQ(m.Invoke(), kTfLiteOk);
auto output = m.GetOutput();
EXPECT_EQ(output.size(), kNumBatches * kNumSamples);
EXPECT_EQ(expected_output[i], output);
}
}
TEST(MultinomialTest, ValidateClassProbabilities) {
const std::vector<float> kProb = {0.1f, 0.9f, 0.2f, 0.8f, 0.3f,
0.7f, 0.4f, 0.6f, 0.5f, 0.5f};
const std::initializer_list<float> kLogits = {
logf(0.1f), logf(0.9f), logf(0.2f), logf(0.8f), logf(0.3f),
logf(0.7f), logf(0.4f), logf(0.6f), logf(0.5f), logf(0.5f)};
const int kNumBatches = 5;
const int kNumClasses = 2;
const int kNumSamples = 10000;
MultinomialOpModel m(InputType::kConst, kLogits, kNumBatches,
kNumClasses, kNumSamples, 1234, 5678);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
auto output = m.GetOutput();
EXPECT_EQ(output.size(), kNumBatches * kNumSamples);
int total_count = 0;
for (int i = 0; i < kNumBatches; i++) {
for (int j = 0; j < kNumClasses; j++) {
int idx = i * kNumClasses + j;
const int expected_count = static_cast<int>(kProb[idx] * kNumSamples);
const int allowed_misses = static_cast<int>(expected_count / 20);
int actual_count = std::count(output.begin() + i * kNumSamples,
output.begin() + (i + 1) * kNumSamples, j);
EXPECT_LE(abs(actual_count - expected_count), allowed_misses);
total_count += actual_count;
}
}
EXPECT_EQ(total_count, kNumBatches * kNumSamples);
}
TEST(MultinomialTest, ValidatePreciseOutput) {
const std::initializer_list<float> kLogits = {1000.0f, 1001.0f};
const int kNumBatches = 1;
const int kNumClasses = 2;
const int kNumSamples = 1000;
MultinomialOpModel m(InputType::kConst, kLogits, kNumBatches,
kNumClasses, kNumSamples, 1234, 5678);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
auto output = m.GetOutput();
EXPECT_EQ(output.size(), kNumBatches * kNumSamples);
int c0 = std::count(output.begin(), output.end(), 0);
int c1 = std::count(output.begin(), output.end(), 1);
double p0 = static_cast<double>(c0) / (c0 + c1);
EXPECT_LT(std::abs(p0 - 0.26894142137), 0.01);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/kernels/random_ops.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/random_ops_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
31b643bb-7a3b-4dc4-9c36-efa9bd166423 | cpp | abseil/abseil-cpp | nonsecure_base | absl/random/internal/nonsecure_base.h | absl/random/internal/nonsecure_base_test.cc | #ifndef ABSL_RANDOM_INTERNAL_NONSECURE_BASE_H_
#define ABSL_RANDOM_INTERNAL_NONSECURE_BASE_H_
#include <algorithm>
#include <cstdint>
#include <iterator>
#include <type_traits>
#include <utility>
#include <vector>
#include "absl/base/macros.h"
#include "absl/container/inlined_vector.h"
#include "absl/meta/type_traits.h"
#include "absl/random/internal/pool_urbg.h"
#include "absl/random/internal/salted_seed_seq.h"
#include "absl/random/internal/seed_material.h"
#include "absl/types/span.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace random_internal {
class RandenPoolSeedSeq {
private:
struct ContiguousTag {};
struct BufferTag {};
template <typename Contiguous>
void generate_impl(ContiguousTag, Contiguous begin, Contiguous end) {
const size_t n = static_cast<size_t>(std::distance(begin, end));
auto* a = &(*begin);
RandenPool<uint8_t>::Fill(
absl::MakeSpan(reinterpret_cast<uint8_t*>(a), sizeof(*a) * n));
}
template <typename RandomAccessIterator>
void generate_impl(BufferTag, RandomAccessIterator begin,
RandomAccessIterator end) {
const size_t n = std::distance(begin, end);
absl::InlinedVector<uint32_t, 8> data(n, 0);
RandenPool<uint32_t>::Fill(absl::MakeSpan(data.begin(), data.end()));
std::copy(std::begin(data), std::end(data), begin);
}
public:
using result_type = uint32_t;
size_t size() { return 0; }
template <typename OutIterator>
void param(OutIterator) const {}
template <typename RandomAccessIterator>
void generate(RandomAccessIterator begin, RandomAccessIterator end) {
if (begin != end) {
using U = typename std::iterator_traits<RandomAccessIterator>::value_type;
using TagType = absl::conditional_t<
(std::is_pointer<RandomAccessIterator>::value ||
std::is_same<RandomAccessIterator,
typename std::vector<U>::iterator>::value),
ContiguousTag, BufferTag>;
generate_impl(TagType{}, begin, end);
}
}
};
template <typename URBG, typename Seeder = RandenPoolSeedSeq>
class NonsecureURBGBase {
public:
using result_type = typename URBG::result_type;
NonsecureURBGBase() : urbg_(ConstructURBG()) {}
NonsecureURBGBase(const NonsecureURBGBase&) = delete;
NonsecureURBGBase& operator=(const NonsecureURBGBase&) = delete;
NonsecureURBGBase(NonsecureURBGBase&&) = default;
NonsecureURBGBase& operator=(NonsecureURBGBase&&) = default;
template <class SSeq, typename = typename absl::enable_if_t<
!std::is_same<SSeq, NonsecureURBGBase>::value>>
explicit NonsecureURBGBase(SSeq&& seq)
: urbg_(ConstructURBG(std::forward<SSeq>(seq))) {}
static constexpr result_type(min)() { return (URBG::min)(); }
static constexpr result_type(max)() { return (URBG::max)(); }
result_type operator()() { return urbg_(); }
void discard(unsigned long long values) {
urbg_.discard(values);
}
bool operator==(const NonsecureURBGBase& other) const {
return urbg_ == other.urbg_;
}
bool operator!=(const NonsecureURBGBase& other) const {
return !(urbg_ == other.urbg_);
}
private:
static URBG ConstructURBG() {
Seeder seeder;
return URBG(seeder);
}
template <typename SSeq>
static URBG ConstructURBG(SSeq&& seq) {
auto salted_seq =
random_internal::MakeSaltedSeedSeq(std::forward<SSeq>(seq));
return URBG(salted_seq);
}
URBG urbg_;
};
}
ABSL_NAMESPACE_END
}
#endif | #include "absl/random/internal/nonsecure_base.h"
#include <algorithm>
#include <cstdint>
#include <iostream>
#include <memory>
#include <random>
#include <sstream>
#include "gtest/gtest.h"
#include "absl/random/distributions.h"
#include "absl/random/random.h"
#include "absl/strings/str_cat.h"
namespace {
using ExampleNonsecureURBG =
absl::random_internal::NonsecureURBGBase<std::mt19937>;
template <typename T>
void Use(const T&) {}
}
TEST(NonsecureURBGBase, DefaultConstructorIsValid) {
ExampleNonsecureURBG urbg;
}
TEST(RecommendedTemplates, CanBeConstructed) {
absl::BitGen default_generator;
absl::InsecureBitGen insecure_generator;
}
TEST(RecommendedTemplates, CanDiscardValues) {
absl::BitGen default_generator;
absl::InsecureBitGen insecure_generator;
default_generator.discard(5);
insecure_generator.discard(5);
}
TEST(NonsecureURBGBase, StandardInterface) {
using E = absl::random_internal::NonsecureURBGBase<std::minstd_rand>;
using T = typename E::result_type;
static_assert(!std::is_copy_constructible<E>::value,
"NonsecureURBGBase should not be copy constructible");
static_assert(!absl::is_copy_assignable<E>::value,
"NonsecureURBGBase should not be copy assignable");
static_assert(std::is_move_constructible<E>::value,
"NonsecureURBGBase should be move constructible");
static_assert(absl::is_move_assignable<E>::value,
"NonsecureURBGBase should be move assignable");
static_assert(std::is_same<decltype(std::declval<E>()()), T>::value,
"return type of operator() must be result_type");
{
const E x, y;
Use(x);
Use(y);
static_assert(std::is_same<decltype(x == y), bool>::value,
"return type of operator== must be bool");
static_assert(std::is_same<decltype(x != y), bool>::value,
"return type of operator== must be bool");
}
E e;
std::seed_seq q{1, 2, 3};
E{};
E{q};
{
E tmp(q);
E m = std::move(tmp);
E n(std::move(m));
EXPECT_TRUE(e != n);
}
{
E a(q);
E b(q);
EXPECT_TRUE(a != e);
EXPECT_TRUE(a == b);
a();
EXPECT_TRUE(a != b);
}
unsigned long long z = 1;
e.discard(z);
}
TEST(NonsecureURBGBase, SeedSeqConstructorIsValid) {
std::seed_seq seq;
ExampleNonsecureURBG rbg(seq);
}
TEST(NonsecureURBGBase, CompatibleWithDistributionUtils) {
ExampleNonsecureURBG rbg;
absl::Uniform(rbg, 0, 100);
absl::Uniform(rbg, 0.5, 0.7);
absl::Poisson<uint32_t>(rbg);
absl::Exponential<float>(rbg);
}
TEST(NonsecureURBGBase, CompatibleWithStdDistributions) {
ExampleNonsecureURBG rbg;
static_cast<void>(std::uniform_int_distribution<uint32_t>(0, 100)(rbg));
static_cast<void>(std::uniform_real_distribution<float>()(rbg));
static_cast<void>(std::bernoulli_distribution(0.2)(rbg));
}
TEST(NonsecureURBGBase, ConsecutiveDefaultInstancesYieldUniqueVariates) {
const size_t kNumSamples = 128;
ExampleNonsecureURBG rbg1;
ExampleNonsecureURBG rbg2;
for (size_t i = 0; i < kNumSamples; i++) {
EXPECT_NE(rbg1(), rbg2());
}
}
TEST(NonsecureURBGBase, EqualSeedSequencesYieldEqualVariates) {
std::seed_seq seq;
ExampleNonsecureURBG rbg1(seq);
ExampleNonsecureURBG rbg2(seq);
for (uint32_t i = 0; i < 1000; i++) {
EXPECT_EQ(rbg1(), rbg2());
}
rbg1.discard(100);
rbg2.discard(100);
for (uint32_t i = 0; i < 1000; i++) {
EXPECT_EQ(rbg1(), rbg2());
}
}
TEST(RandenPoolSeedSeqTest, SeederWorksForU32) {
absl::random_internal::RandenPoolSeedSeq seeder;
uint32_t state[2] = {0, 0};
seeder.generate(std::begin(state), std::end(state));
EXPECT_FALSE(state[0] == 0 && state[1] == 0);
}
TEST(RandenPoolSeedSeqTest, SeederWorksForU64) {
absl::random_internal::RandenPoolSeedSeq seeder;
uint64_t state[2] = {0, 0};
seeder.generate(std::begin(state), std::end(state));
EXPECT_FALSE(state[0] == 0 && state[1] == 0);
EXPECT_FALSE((state[0] >> 32) == 0 && (state[1] >> 32) == 0);
}
TEST(RandenPoolSeedSeqTest, SeederWorksForS32) {
absl::random_internal::RandenPoolSeedSeq seeder;
int32_t state[2] = {0, 0};
seeder.generate(std::begin(state), std::end(state));
EXPECT_FALSE(state[0] == 0 && state[1] == 0);
}
TEST(RandenPoolSeedSeqTest, SeederWorksForVector) {
absl::random_internal::RandenPoolSeedSeq seeder;
std::vector<uint32_t> state(2);
seeder.generate(std::begin(state), std::end(state));
EXPECT_FALSE(state[0] == 0 && state[1] == 0);
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/random/internal/nonsecure_base.h | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/random/internal/nonsecure_base_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
282aa471-5c12-4468-8a0c-38230ff0f653 | cpp | tensorflow/tensorflow | broadcast_in_dim | tensorflow/lite/experimental/shlo/legacy/src/broadcast_in_dim.cc | tensorflow/lite/experimental/shlo/legacy/test/broadcast_in_dim_test.cc | #include <algorithm>
#include <iterator>
#include <type_traits>
#include <vector>
#include "absl/status/status.h"
#include "absl/types/span.h"
#include "tensorflow/lite/experimental/shlo/legacy/include/shlo.h"
#include "tensorflow/lite/experimental/shlo/legacy/src/dispatch.h"
#include "tensorflow/lite/experimental/shlo/legacy/src/storage.h"
#include "tensorflow/lite/experimental/shlo/legacy/src/util.h"
namespace stablehlo {
namespace {
bool IsUnique(absl::Span<const DimensionSize> span) {
std::vector<DimensionSize> temp(span.begin(), span.end());
auto i = std::unique(temp.begin(), temp.end());
return std::distance(temp.begin(), i) == span.size();
}
template <typename Value>
absl::Status CheckParameters(
const Value& operand, absl::Span<const DimensionSize> broadcast_dimensions,
Value& result) {
if (!operand.is_per_axis_quantized()) {
if (!(result.element_type() == operand.element_type())) {
return absl::InvalidArgumentError(
"Constraint violation: element_type(result) = element_type(operand) "
"if !is_per_axis_quantized(operand)");
}
}
if (!(broadcast_dimensions.size() == operand.rank())) {
return absl::InvalidArgumentError(
"Constraint violation: size(broadcast_dimensions) = rank(operand)");
} else if (!(*std::min_element(broadcast_dimensions.begin(),
broadcast_dimensions.end()) >= 0 and
*std::max_element(broadcast_dimensions.begin(),
broadcast_dimensions.end()) < result.rank())) {
return absl::InvalidArgumentError(
"Constraint violation: 0 <= broadcast_dimensions < rank(result)");
} else if (!(IsUnique(broadcast_dimensions))) {
return absl::InvalidArgumentError(
"Constraint violation: is_unique(broadcast_dimensions)");
} else {
for (auto d : operand.axes()) {
if (!(operand.dim(d) == 1 or
operand.dim(d) == result.dim(broadcast_dimensions[d]))) {
return absl::InvalidArgumentError(
"Constraint violation: dim(operand, d) = 1 or dim(operand, d) = "
"dim(result, broadcast_dimensions[d])");
}
}
}
if constexpr (std::is_same_v<Value, QuantizedTensor>) {
if (operand.is_per_axis_quantized()) {
if (!(operand.is_per_axis_quantized() and
result.storage_type() == operand.storage_type() and
result.expressed_type() == operand.expressed_type() and
result.storage_min() == operand.storage_min() and
result.storage_max() == operand.storage_max())) {
return absl::InvalidArgumentError(
"Constraint violation: element_type(result) = "
"element_type(operand) with exceptions if "
"is_per_axis_quantized(operand)");
}
}
if (result.is_per_axis_quantized()) {
if (!(*result.quantized_dimension() ==
broadcast_dimensions[*operand.quantized_dimension()])) {
return absl::InvalidArgumentError(
"quantization_dimension(result) = "
"broadcast_dimensions[quantization_dimension(operand)]");
}
if (operand.dim(*operand.quantized_dimension()) == 1) {
auto n = result.dim(*result.quantized_dimension());
for (auto i = 0; i < n; ++i) {
if (!(result.scales(i) == operand.scales(0) and
result.zero_points(i) == operand.zero_points(0))) {
return absl::InvalidArgumentError(
"If dim(operand, quantization_dimension(operand)) = 1, then "
"scales(result)[i] = scales(operand)[0] and "
"zero_points(result)[i] = zero_points(operand)[0] for i in "
"range(dim(result, quantization_dimension(result)))");
}
}
}
}
}
if (operand.layout().has_strides() || result.layout().has_strides()) {
return absl::InvalidArgumentError("Stides not supported yet");
}
return absl::OkStatus();
}
template <ElementType storage_type, ElementType expressed_type, typename Value>
absl::Status BroadcastInDim(
const Value& operand, absl::Span<const DimensionSize> broadcast_dimensions,
Value& result) {
if (auto check = CheckParameters(operand, broadcast_dimensions, result);
!check.ok()) {
return check;
}
using S = Storage<storage_type>;
auto operand_buffer = operand.buffer();
auto result_buffer = result.buffer();
if constexpr (std::is_same_v<Value, Tensor>) {
if (storage_type != operand.element_type()) {
return absl::InvalidArgumentError("Unexpected tensor element type");
}
TensorIndex operand_index(operand.shape());
for (TensorIndexIterator result_index_iter{result.shape()};
result_index_iter.has_next(); ++result_index_iter) {
for (auto d = 0; d < operand.rank(); ++d) {
if (operand.dim(d) == 1) {
operand_index.set(d, 0);
} else {
auto b = broadcast_dimensions[d];
operand_index.set(d, (*result_index_iter)[b]);
}
}
auto linearized_operand_index = operand_index.linearize();
auto linearized_result_index = result_index_iter->linearize();
auto value = S::Get(operand_buffer, linearized_operand_index);
S::Set(result_buffer, linearized_result_index, value);
}
} else if constexpr (std::is_same_v<Value, QuantizedTensor>) {
if (storage_type != result.storage_type()) {
return absl::InvalidArgumentError("Unexpected storage type");
} else if (expressed_type != result.expressed_type()) {
return absl::InvalidArgumentError("Unexpected expressed type");
}
if (!(operand.is_per_tensor_quantized() and
result.is_per_tensor_quantized())) {
return absl::InvalidArgumentError(
"Only per-tensor quantization is currently supported");
}
using ET = typename Storage<expressed_type>::Type;
const QuantizedParameter& operand_quant_param =
operand.type().element_type().parameters(0);
const QuantizedParameter& result_quant_param =
result.type().element_type().parameters(0);
ET result_scale_inv = ET(1.0) / static_cast<ET>(result_quant_param.scale);
TensorIndex operand_index(operand.shape());
for (TensorIndexIterator result_index_iter{result.shape()};
result_index_iter.has_next(); ++result_index_iter) {
for (auto d = 0; d < operand.rank(); ++d) {
if (operand.dim(d) == 1) {
operand_index.set(d, 0);
} else {
auto b = broadcast_dimensions[d];
operand_index.set(d, (*result_index_iter)[b]);
}
}
auto linearized_operand_index = operand_index.linearize();
auto linearized_result_index = result_index_iter->linearize();
auto operand_storage = S::Get(operand_buffer, linearized_operand_index);
auto result_storage =
DequantizeOpQuantizePartial<storage_type, expressed_type>(
operand_storage, operand_quant_param, result_scale_inv,
result_quant_param.zero_point, [](auto x) { return x; });
S::Set(result_buffer, linearized_result_index, result_storage);
}
if (auto status = CompleteQuantization<storage_type>(result);
!status.ok()) {
return status;
}
}
return absl::OkStatus();
}
}
absl::Status BroadcastInDim(
const Tensor& operand, absl::Span<const DimensionSize> broadcast_dimensions,
Tensor& result) {
DISPATCH_BOOL_INT_FLOAT(BroadcastInDim, result.element_type(), operand,
broadcast_dimensions, result);
}
absl::Status BroadcastInDim(
const QuantizedTensor& operand,
absl::Span<const DimensionSize> broadcast_dimensions,
QuantizedTensor& result) {
DISPATCH_QUANTIZED(BroadcastInDim, result.storage_type(),
result.expressed_type(), operand, broadcast_dimensions,
result);
}
} | #include <initializer_list>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/types/span.h"
#include "tensorflow/lite/experimental/shlo/legacy/include/shlo.h"
#include "tensorflow/lite/experimental/shlo/legacy/src/debug.h"
#include "tensorflow/lite/experimental/shlo/legacy/src/storage.h"
#include "tensorflow/lite/experimental/shlo/legacy/test/util.h"
namespace stablehlo {
namespace testing {
template <ElementType element_type>
void test(std::initializer_list<DimensionSize>&& operand_shape,
std::vector<typename Storage<element_type>::Type>&& operand_values,
std::initializer_list<DimensionSize>&& broadcast_dimensions_values,
std::initializer_list<DimensionSize>&& result_shape,
std::vector<typename Storage<element_type>::Type>&& expected_values) {
Tensor operand(TensorType(Shape(operand_shape), element_type),
operand_values.data());
Tensor expected(TensorType(Shape(result_shape), element_type),
expected_values.data());
std::vector<typename Storage<element_type>::Type> result_values(
expected_values.size());
Tensor result(TensorType(Shape(result_shape), element_type),
result_values.data());
absl::Span<const DimensionSize> broadcast_dimensions(
broadcast_dimensions_values);
ASSERT_OK(BroadcastInDim(operand, broadcast_dimensions, result));
EXPECT_EQ(result, expected)
<< "operand: " << operand
<< "\nbroadcast_dimensions: " << ToString(broadcast_dimensions);
}
template <ElementType storage_type, ElementType expressed_type>
void test(
QuantizedParameter&& quantized_parameter,
std::initializer_list<DimensionSize>&& operand_shape,
std::vector<typename Storage<expressed_type>::Type>&& operand_values,
std::initializer_list<DimensionSize>&& broadcast_dimensions_values,
std::initializer_list<DimensionSize>&& result_shape,
std::vector<typename Storage<expressed_type>::Type>&& expected_values) {
auto operand_quant_values = QuantizeVector<storage_type, expressed_type>(
operand_values, quantized_parameter);
auto expected_quant_values = QuantizeVector<storage_type, expressed_type>(
expected_values, quantized_parameter);
std::vector<typename Storage<storage_type>::Type> result_quant_values(
expected_quant_values.size());
QuantizedTensorElementType element_type(storage_type, expressed_type,
std::move(quantized_parameter));
QuantizedTensor operand(
QuantizedTensorType(Shape(operand_shape),
QuantizedTensorElementType(element_type)),
operand_quant_values.data());
QuantizedTensor expected(
QuantizedTensorType(Shape(result_shape),
QuantizedTensorElementType(element_type)),
expected_quant_values.data());
QuantizedTensor result(
QuantizedTensorType(Shape(result_shape),
QuantizedTensorElementType(element_type)),
result_quant_values.data());
absl::Span<const DimensionSize> broadcast_dimensions(
broadcast_dimensions_values);
auto res = BroadcastInDim(operand, broadcast_dimensions, result);
ASSERT_OK(BroadcastInDim(operand, broadcast_dimensions, result));
EXPECT_EQ(result, expected)
<< "operand: " << operand
<< "\nbroadcast_dimensions: " << ToString(broadcast_dimensions);
}
TEST(BroadcastInDim, Unquantized) {
test<ElementType::kI1>({1, 3}, {true, false, true}, {2, 1}, {2, 3, 2},
{true, true, false, false, true, true, true, true,
false, false, true, true});
test<ElementType::kSI8>({1, 3}, {1, 2, 3}, {2, 1}, {2, 3, 2},
{1, 1, 2, 2, 3, 3, 1, 1, 2, 2, 3, 3});
test<ElementType::kSI16>({1, 3}, {1, 2, 3}, {2, 1}, {2, 3, 2},
{1, 1, 2, 2, 3, 3, 1, 1, 2, 2, 3, 3});
test<ElementType::kSI32>({1, 3}, {1, 2, 3}, {2, 1}, {2, 3, 2},
{1, 1, 2, 2, 3, 3, 1, 1, 2, 2, 3, 3});
test<ElementType::kBF16>({1, 3}, {1, 2, 3}, {2, 1}, {2, 3, 2},
{1, 1, 2, 2, 3, 3, 1, 1, 2, 2, 3, 3});
test<ElementType::kF16>({1, 3}, {1, 2, 3}, {2, 1}, {2, 3, 2},
{1, 1, 2, 2, 3, 3, 1, 1, 2, 2, 3, 3});
test<ElementType::kF32>({1, 3}, {1, 2, 3}, {2, 1}, {2, 3, 2},
{1, 1, 2, 2, 3, 3, 1, 1, 2, 2, 3, 3});
}
TEST(BroadcastInDim, Quantized) {
test<ElementType::kSI8, ElementType::kBF16>(
{.scale = 0.1, .zero_point = 0}, {1, 3}, {1, 2, 3}, {2, 1}, {2, 3, 2},
{1, 1, 2, 2, 3, 3, 1, 1, 2, 2, 3, 3});
test<ElementType::kSI8, ElementType::kF16>(
{.scale = 0.1, .zero_point = 0}, {1, 3}, {1, 2, 3}, {2, 1}, {2, 3, 2},
{1, 1, 2, 2, 3, 3, 1, 1, 2, 2, 3, 3});
test<ElementType::kSI8, ElementType::kF32>(
{.scale = 0.1, .zero_point = 0}, {1, 3}, {1, 2, 3}, {2, 1}, {2, 3, 2},
{1, 1, 2, 2, 3, 3, 1, 1, 2, 2, 3, 3});
test<ElementType::kSI16, ElementType::kBF16>(
{.scale = 0.1, .zero_point = 0}, {1, 3}, {1, 2, 3}, {2, 1}, {2, 3, 2},
{1, 1, 2, 2, 3, 3, 1, 1, 2, 2, 3, 3});
test<ElementType::kSI16, ElementType::kF16>(
{.scale = 0.1, .zero_point = 0}, {1, 3}, {1, 2, 3}, {2, 1}, {2, 3, 2},
{1, 1, 2, 2, 3, 3, 1, 1, 2, 2, 3, 3});
test<ElementType::kSI16, ElementType::kF32>(
{.scale = 0.1, .zero_point = 0}, {1, 3}, {1, 2, 3}, {2, 1}, {2, 3, 2},
{1, 1, 2, 2, 3, 3, 1, 1, 2, 2, 3, 3});
test<ElementType::kSI32, ElementType::kBF16>(
{.scale = 0.1, .zero_point = 0}, {1, 3}, {1, 2, 3}, {2, 1}, {2, 3, 2},
{1, 1, 2, 2, 3, 3, 1, 1, 2, 2, 3, 3});
test<ElementType::kSI32, ElementType::kF16>(
{.scale = 0.1, .zero_point = 0}, {1, 3}, {1, 2, 3}, {2, 1}, {2, 3, 2},
{1, 1, 2, 2, 3, 3, 1, 1, 2, 2, 3, 3});
test<ElementType::kSI32, ElementType::kF32>(
{.scale = 0.1, .zero_point = 0}, {1, 3}, {1, 2, 3}, {2, 1}, {2, 3, 2},
{1, 1, 2, 2, 3, 3, 1, 1, 2, 2, 3, 3});
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/legacy/src/broadcast_in_dim.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/legacy/test/broadcast_in_dim_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
70594a59-cbf4-48c5-b398-788d2ee64d20 | cpp | google/leveldb | arena | util/arena.cc | util/arena_test.cc | #include "util/arena.h"
namespace leveldb {
static const int kBlockSize = 4096;
Arena::Arena()
: alloc_ptr_(nullptr), alloc_bytes_remaining_(0), memory_usage_(0) {}
Arena::~Arena() {
for (size_t i = 0; i < blocks_.size(); i++) {
delete[] blocks_[i];
}
}
char* Arena::AllocateFallback(size_t bytes) {
if (bytes > kBlockSize / 4) {
char* result = AllocateNewBlock(bytes);
return result;
}
alloc_ptr_ = AllocateNewBlock(kBlockSize);
alloc_bytes_remaining_ = kBlockSize;
char* result = alloc_ptr_;
alloc_ptr_ += bytes;
alloc_bytes_remaining_ -= bytes;
return result;
}
char* Arena::AllocateAligned(size_t bytes) {
const int align = (sizeof(void*) > 8) ? sizeof(void*) : 8;
static_assert((align & (align - 1)) == 0,
"Pointer size should be a power of 2");
size_t current_mod = reinterpret_cast<uintptr_t>(alloc_ptr_) & (align - 1);
size_t slop = (current_mod == 0 ? 0 : align - current_mod);
size_t needed = bytes + slop;
char* result;
if (needed <= alloc_bytes_remaining_) {
result = alloc_ptr_ + slop;
alloc_ptr_ += needed;
alloc_bytes_remaining_ -= needed;
} else {
result = AllocateFallback(bytes);
}
assert((reinterpret_cast<uintptr_t>(result) & (align - 1)) == 0);
return result;
}
char* Arena::AllocateNewBlock(size_t block_bytes) {
char* result = new char[block_bytes];
blocks_.push_back(result);
memory_usage_.fetch_add(block_bytes + sizeof(char*),
std::memory_order_relaxed);
return result;
}
} | #include "util/arena.h"
#include "gtest/gtest.h"
#include "util/random.h"
namespace leveldb {
TEST(ArenaTest, Empty) { Arena arena; }
TEST(ArenaTest, Simple) {
std::vector<std::pair<size_t, char*>> allocated;
Arena arena;
const int N = 100000;
size_t bytes = 0;
Random rnd(301);
for (int i = 0; i < N; i++) {
size_t s;
if (i % (N / 10) == 0) {
s = i;
} else {
s = rnd.OneIn(4000)
? rnd.Uniform(6000)
: (rnd.OneIn(10) ? rnd.Uniform(100) : rnd.Uniform(20));
}
if (s == 0) {
s = 1;
}
char* r;
if (rnd.OneIn(10)) {
r = arena.AllocateAligned(s);
} else {
r = arena.Allocate(s);
}
for (size_t b = 0; b < s; b++) {
r[b] = i % 256;
}
bytes += s;
allocated.push_back(std::make_pair(s, r));
ASSERT_GE(arena.MemoryUsage(), bytes);
if (i > N / 10) {
ASSERT_LE(arena.MemoryUsage(), bytes * 1.10);
}
}
for (size_t i = 0; i < allocated.size(); i++) {
size_t num_bytes = allocated[i].first;
const char* p = allocated[i].second;
for (size_t b = 0; b < num_bytes; b++) {
ASSERT_EQ(int(p[b]) & 0xff, i % 256);
}
}
}
} | https://github.com/google/leveldb/blob/23e35d792b9154f922b8b575b12596a4d8664c65/util/arena.cc | https://github.com/google/leveldb/blob/23e35d792b9154f922b8b575b12596a4d8664c65/util/arena_test.cc | 23e35d792b9154f922b8b575b12596a4d8664c65 |
c7cec2c6-4c63-4839-b12d-e64a94d8516d | cpp | tensorflow/tensorflow | depthwiseconv_float | tensorflow/lite/kernels/internal/reference/depthwiseconv_float.h | tensorflow/lite/kernels/internal/depthwiseconv_float_test.cc | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_DEPTHWISECONV_FLOAT_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_DEPTHWISECONV_FLOAT_H_
#include "tensorflow/lite/kernels/internal/common.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace reference_ops {
inline void DepthwiseConv(
const DepthwiseParams& params, const RuntimeShape& input_shape,
const float* input_data, const RuntimeShape& filter_shape,
const float* filter_data, const RuntimeShape& bias_shape,
const float* bias_data, const RuntimeShape& output_shape,
float* output_data) {
const int stride_width = params.stride_width;
const int stride_height = params.stride_height;
const int dilation_width_factor = params.dilation_width_factor;
const int dilation_height_factor = params.dilation_height_factor;
const int pad_width = params.padding_values.width;
const int pad_height = params.padding_values.height;
const int depth_multiplier = params.depth_multiplier;
const float output_activation_min = params.float_activation_min;
const float output_activation_max = params.float_activation_max;
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4);
const int batches = MatchingDim(input_shape, 0, output_shape, 0);
const int output_depth = MatchingDim(filter_shape, 3, output_shape, 3);
const int input_height = input_shape.Dims(1);
const int input_width = input_shape.Dims(2);
const int input_depth = input_shape.Dims(3);
const int filter_height = filter_shape.Dims(1);
const int filter_width = filter_shape.Dims(2);
const int output_height = output_shape.Dims(1);
const int output_width = output_shape.Dims(2);
TFLITE_DCHECK_EQ(output_depth, input_depth * depth_multiplier);
TFLITE_DCHECK_EQ(bias_shape.FlatSize(), output_depth);
for (int b = 0; b < batches; ++b) {
for (int out_y = 0; out_y < output_height; ++out_y) {
for (int out_x = 0; out_x < output_width; ++out_x) {
for (int ic = 0; ic < input_depth; ++ic) {
for (int m = 0; m < depth_multiplier; m++) {
const int oc = m + ic * depth_multiplier;
const int in_x_origin = (out_x * stride_width) - pad_width;
const int in_y_origin = (out_y * stride_height) - pad_height;
float total = 0.f;
for (int filter_y = 0; filter_y < filter_height; ++filter_y) {
for (int filter_x = 0; filter_x < filter_width; ++filter_x) {
const int in_x = in_x_origin + dilation_width_factor * filter_x;
const int in_y =
in_y_origin + dilation_height_factor * filter_y;
if ((in_x >= 0) && (in_x < input_width) && (in_y >= 0) &&
(in_y < input_height)) {
float input_value =
input_data[Offset(input_shape, b, in_y, in_x, ic)];
float filter_value = filter_data[Offset(
filter_shape, 0, filter_y, filter_x, oc)];
total += (input_value * filter_value);
}
}
}
float bias_value = 0.0f;
if (bias_data) {
bias_value = bias_data[oc];
}
output_data[Offset(output_shape, b, out_y, out_x, oc)] =
ActivationFunctionWithMinMax(total + bias_value,
output_activation_min,
output_activation_max);
}
}
}
}
}
}
}
}
#endif | #include <algorithm>
#include <cmath>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/kernels/internal/common.h"
#include "tensorflow/lite/kernels/internal/test_util.h"
#include "tensorflow/lite/kernels/internal/types.h"
#define ALLOW_SLOW_GENERIC_DEPTHWISECONV_FALLBACK
#include "tensorflow/lite/kernels/internal/optimized/cpu_check.h"
#include "tensorflow/lite/kernels/internal/optimized/depthwiseconv_float.h"
#include "tensorflow/lite/kernels/internal/reference/depthwiseconv_float.h"
namespace tflite {
namespace {
void TestOneDepthwiseConv(
const DepthwiseParams& params, const RuntimeShape& input_shape,
const float* input_data, const RuntimeShape& filter_shape,
const float* filter_data, const RuntimeShape& bias_shape,
const float* bias_data, const RuntimeShape& output_shape) {
const int output_buffer_size = output_shape.FlatSize();
std::vector<float> output_data(output_buffer_size);
std::vector<float> reference_output_data(output_buffer_size);
reference_ops::DepthwiseConv(params, input_shape, input_data, filter_shape,
filter_data, bias_shape, bias_data, output_shape,
reference_output_data.data());
optimized_ops::DepthwiseConvImpl(
params, input_shape, input_data, filter_shape, filter_data, bias_shape,
bias_data, output_shape, output_data.data(), CpuFlags(),
0,
output_shape.Dims(1), 1);
double sum_abs_diff = 0;
float max_abs_val = 0;
for (int i = 0; i < output_buffer_size; i++) {
sum_abs_diff += std::abs(output_data[i] - reference_output_data[i]);
max_abs_val = std::max(max_abs_val, std::abs(reference_output_data[i]));
}
if (sum_abs_diff != 0.f) {
const float mean_diff =
static_cast<float>(sum_abs_diff / output_buffer_size);
const float relative_error = std::abs(mean_diff) / max_abs_val;
ASSERT_LT(relative_error, 1e-5f);
}
}
bool TryTestOneDepthwiseConv() {
const int batch = UniformRandomInt(1, 2);
const int input_depth = ExponentialRandomPositiveInt(0.9f, 6, 50);
const int input_width = ExponentialRandomPositiveInt(0.9f, 20, 200);
const int input_height = ExponentialRandomPositiveInt(0.9f, 20, 200);
const int filter_width = ExponentialRandomPositiveInt(0.9f, 4, 10);
const int filter_height = ExponentialRandomPositiveInt(0.9f, 4, 10);
const int depth_multiplier = ExponentialRandomPositiveInt(0.8f, 6, 50);
const int stride = ExponentialRandomPositiveInt(0.9f, 3, 8);
const int output_depth = input_depth * depth_multiplier;
const int dilation_width_factor = RandomElement(std::vector<int>({1, 2, 4}));
const int dilation_height_factor = RandomElement(std::vector<int>({1, 2, 4}));
float output_activation_min, output_activation_max;
FusedActivationFunctionType ac =
RandomElement(std::vector<FusedActivationFunctionType>(
{FusedActivationFunctionType::kNone,
FusedActivationFunctionType::kRelu,
FusedActivationFunctionType::kRelu1,
FusedActivationFunctionType::kRelu6}));
GetActivationMinMax(ac, &output_activation_min, &output_activation_max);
const int kMaxSupportedOutputDepth = 1024;
if (output_depth > kMaxSupportedOutputDepth) {
return false;
}
RuntimeShape input_shape_inference(
{batch, input_height, input_width, input_depth});
RuntimeShape output_shape_inference;
int pad_width, pad_height;
const auto padding_type =
UniformRandomInt(0, 1) ? PaddingType::kSame : PaddingType::kValid;
if (!ComputeConvSizes(input_shape_inference, output_depth, filter_width,
filter_height, stride, dilation_width_factor,
dilation_height_factor, padding_type,
&output_shape_inference, &pad_width, &pad_height)) {
return false;
}
RuntimeShape filter_shape_inference(
{1, filter_height, filter_width, output_depth});
RuntimeShape bias_shape_inference({1, 1, 1, output_depth});
const int input_buffer_size = input_shape_inference.FlatSize();
const int filter_buffer_size = filter_shape_inference.FlatSize();
std::vector<float> input_data(input_buffer_size);
std::vector<float> filter_data(filter_buffer_size);
std::vector<float> bias_data(output_depth);
const float input_amplitude = 1.f;
const float filter_amplitude = 1.f;
const float bias_amplitude =
filter_width * filter_height * input_amplitude * filter_amplitude;
FillRandom(&input_data, -input_amplitude, input_amplitude);
FillRandom(&filter_data, -filter_amplitude, filter_amplitude);
FillRandom(&bias_data, -bias_amplitude, bias_amplitude);
DepthwiseParams op_params;
op_params.padding_type = PaddingType::kSame;
op_params.padding_values.width = pad_width;
op_params.padding_values.height = pad_height;
op_params.stride_width = stride;
op_params.stride_height = stride;
op_params.dilation_width_factor = dilation_width_factor;
op_params.dilation_height_factor = dilation_height_factor;
op_params.depth_multiplier = depth_multiplier;
op_params.float_activation_min = output_activation_min;
op_params.float_activation_max = output_activation_max;
TestOneDepthwiseConv(op_params, input_shape_inference, input_data.data(),
filter_shape_inference, filter_data.data(),
bias_shape_inference, bias_data.data(),
output_shape_inference);
return true;
}
void TestOneDepthwiseConv() {
while (!TryTestOneDepthwiseConv()) {
}
}
TEST(TestDepthwiseConv, TestDepthwiseConv) {
const int kTestsToRun = 10 * 1000;
for (int i = 0; i < kTestsToRun; i++) {
TestOneDepthwiseConv();
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/internal/reference/depthwiseconv_float.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/internal/depthwiseconv_float_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e91a7dc9-1c90-4876-9a47-8825aed4fb9a | cpp | tensorflow/tensorflow | cpp_generator | tensorflow/c/experimental/ops/gen/cpp/cpp_generator.cc | tensorflow/c/experimental/ops/gen/cpp/cpp_generator_test.cc | #include "tensorflow/c/experimental/ops/gen/cpp/cpp_generator.h"
#include "tensorflow/c/experimental/ops/gen/cpp/renderers/cpp_file_renderer.h"
#include "tensorflow/core/lib/io/path.h"
namespace tensorflow {
namespace generator {
CppGenerator::CppGenerator(cpp::CppConfig cpp_config, PathConfig path_config)
: controller_(path_config),
cpp_config_(cpp_config),
path_config_(path_config) {}
SourceCode CppGenerator::GenerateOneFile(
cpp::RendererContext::Mode mode) const {
SourceCode generated_code;
const std::vector<OpSpec> ops(controller_.GetModelOps());
std::vector<cpp::OpView> views(ops.begin(), ops.end());
cpp::RendererContext context{mode, generated_code, cpp_config_, path_config_};
cpp::CppFileRenderer(context, views).Render();
return generated_code;
}
SourceCode CppGenerator::HeaderFileContents() const {
return GenerateOneFile(cpp::RendererContext::kHeader);
}
SourceCode CppGenerator::SourceFileContents() const {
return GenerateOneFile(cpp::RendererContext::kSource);
}
string CppGenerator::HeaderFileName() const {
return io::JoinPath(path_config_.output_path, cpp_config_.unit + "_ops.h");
}
string CppGenerator::SourceFileName() const {
return io::JoinPath(path_config_.output_path, cpp_config_.unit + "_ops.cc");
}
void CppGenerator::WriteHeaderFile() const {
controller_.WriteFile(HeaderFileName(), HeaderFileContents());
}
void CppGenerator::WriteSourceFile() const {
controller_.WriteFile(SourceFileName(), SourceFileContents());
}
}
} | #include "tensorflow/c/experimental/ops/gen/cpp/cpp_generator.h"
#include <algorithm>
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace generator {
namespace {
TEST(CppGeneratorTest, typical_usage) {
string category = "testing";
string name_space = "tensorflow::ops";
string output_dir = "tensorflow/c/experimental/ops/gen/cpp/golden";
string source_dir = "tensorflow";
string api_dirs = "";
std::vector<string> ops = {
"Neg",
"MatMul",
"IdentityN",
"SparseSoftmaxCrossEntropyWithLogits",
"AccumulatorApplyGradient",
"VarHandleOp",
"RestoreV2",
};
cpp::CppConfig cpp_config(category, name_space);
PathConfig controller_config(output_dir, source_dir, api_dirs, ops);
CppGenerator generator(cpp_config, controller_config);
Env *env = Env::Default();
string golden_dir = io::JoinPath(testing::TensorFlowSrcRoot(),
controller_config.tf_output_dir);
string generated_header = generator.HeaderFileContents().Render();
string generated_source = generator.SourceFileContents().Render();
string expected_header;
string header_file_name = io::JoinPath(golden_dir, "testing_ops.h.golden");
TF_CHECK_OK(ReadFileToString(env, header_file_name, &expected_header));
string expected_source;
string source_file_name = io::JoinPath(golden_dir, "testing_ops.cc.golden");
TF_CHECK_OK(ReadFileToString(env, source_file_name, &expected_source));
expected_header.erase(
std::remove(expected_header.begin(), expected_header.end(), '\r'),
expected_header.end());
expected_source.erase(
std::remove(expected_source.begin(), expected_source.end(), '\r'),
expected_source.end());
EXPECT_EQ(expected_header, generated_header);
EXPECT_EQ(expected_source, generated_source);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/experimental/ops/gen/cpp/cpp_generator.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/experimental/ops/gen/cpp/cpp_generator_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
1ea8ed3d-c026-443e-b379-84a13dec9008 | cpp | google/quiche | qbone_packet_exchanger | quiche/quic/qbone/qbone_packet_exchanger.cc | quiche/quic/qbone/qbone_packet_exchanger_test.cc | #include "quiche/quic/qbone/qbone_packet_exchanger.h"
#include <memory>
#include <string>
#include <utility>
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
namespace quic {
bool QbonePacketExchanger::ReadAndDeliverPacket(
QboneClientInterface* qbone_client) {
bool blocked = false;
std::string error;
std::unique_ptr<QuicData> packet = ReadPacket(&blocked, &error);
if (packet == nullptr) {
if (!blocked && visitor_) {
visitor_->OnReadError(error);
}
return false;
}
qbone_client->ProcessPacketFromNetwork(packet->AsStringPiece());
return true;
}
void QbonePacketExchanger::WritePacketToNetwork(const char* packet,
size_t size) {
if (visitor_) {
absl::Status status = visitor_->OnWrite(absl::string_view(packet, size));
if (!status.ok()) {
QUIC_LOG_EVERY_N_SEC(ERROR, 60) << status;
}
}
bool blocked = false;
std::string error;
if (packet_queue_.empty() && !write_blocked_) {
if (WritePacket(packet, size, &blocked, &error)) {
return;
}
if (blocked) {
write_blocked_ = true;
} else {
QUIC_LOG_EVERY_N_SEC(ERROR, 60) << "Packet write failed: " << error;
if (visitor_) {
visitor_->OnWriteError(error);
}
}
}
if (packet_queue_.size() >= max_pending_packets_) {
return;
}
auto data_copy = new char[size];
memcpy(data_copy, packet, size);
packet_queue_.push_back(
std::make_unique<QuicData>(data_copy, size, true));
}
void QbonePacketExchanger::SetWritable() {
write_blocked_ = false;
while (!packet_queue_.empty()) {
bool blocked = false;
std::string error;
if (WritePacket(packet_queue_.front()->data(),
packet_queue_.front()->length(), &blocked, &error)) {
packet_queue_.pop_front();
} else {
if (!blocked && visitor_) {
visitor_->OnWriteError(error);
}
write_blocked_ = blocked;
return;
}
}
}
} | #include "quiche/quic/qbone/qbone_packet_exchanger.h"
#include <list>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/qbone/mock_qbone_client.h"
namespace quic {
namespace {
using ::testing::StrEq;
using ::testing::StrictMock;
const size_t kMaxPendingPackets = 2;
class MockVisitor : public QbonePacketExchanger::Visitor {
public:
MOCK_METHOD(void, OnReadError, (const std::string&), (override));
MOCK_METHOD(void, OnWriteError, (const std::string&), (override));
MOCK_METHOD(absl::Status, OnWrite, (absl::string_view), (override));
};
class FakeQbonePacketExchanger : public QbonePacketExchanger {
public:
using QbonePacketExchanger::QbonePacketExchanger;
void AddPacketToBeRead(std::unique_ptr<QuicData> packet) {
packets_to_be_read_.push_back(std::move(packet));
}
void SetReadError(const std::string& error) { read_error_ = error; }
void ForceWriteFailure(bool blocked, const std::string& error) {
write_blocked_ = blocked;
write_error_ = error;
}
const std::vector<std::string>& packets_written() const {
return packets_written_;
}
private:
std::unique_ptr<QuicData> ReadPacket(bool* blocked,
std::string* error) override {
*blocked = false;
if (packets_to_be_read_.empty()) {
*blocked = read_error_.empty();
*error = read_error_;
return nullptr;
}
std::unique_ptr<QuicData> packet = std::move(packets_to_be_read_.front());
packets_to_be_read_.pop_front();
return packet;
}
bool WritePacket(const char* packet, size_t size, bool* blocked,
std::string* error) override {
*blocked = false;
if (write_blocked_ || !write_error_.empty()) {
*blocked = write_blocked_;
*error = write_error_;
return false;
}
packets_written_.push_back(std::string(packet, size));
return true;
}
std::string read_error_;
std::list<std::unique_ptr<QuicData>> packets_to_be_read_;
std::string write_error_;
bool write_blocked_ = false;
std::vector<std::string> packets_written_;
};
TEST(QbonePacketExchangerTest,
ReadAndDeliverPacketDeliversPacketToQboneClient) {
StrictMock<MockVisitor> visitor;
FakeQbonePacketExchanger exchanger(&visitor, kMaxPendingPackets);
StrictMock<MockQboneClient> client;
std::string packet = "data";
exchanger.AddPacketToBeRead(
std::make_unique<QuicData>(packet.data(), packet.length()));
EXPECT_CALL(client, ProcessPacketFromNetwork(StrEq("data")));
EXPECT_TRUE(exchanger.ReadAndDeliverPacket(&client));
}
TEST(QbonePacketExchangerTest,
ReadAndDeliverPacketNotifiesVisitorOnReadFailure) {
MockVisitor visitor;
FakeQbonePacketExchanger exchanger(&visitor, kMaxPendingPackets);
MockQboneClient client;
std::string io_error = "I/O error";
exchanger.SetReadError(io_error);
EXPECT_CALL(visitor, OnReadError(StrEq(io_error))).Times(1);
EXPECT_FALSE(exchanger.ReadAndDeliverPacket(&client));
}
TEST(QbonePacketExchangerTest,
ReadAndDeliverPacketDoesNotNotifyVisitorOnBlockedIO) {
MockVisitor visitor;
FakeQbonePacketExchanger exchanger(&visitor, kMaxPendingPackets);
MockQboneClient client;
EXPECT_FALSE(exchanger.ReadAndDeliverPacket(&client));
}
TEST(QbonePacketExchangerTest,
WritePacketToNetworkWritesDirectlyToNetworkWhenNotBlocked) {
MockVisitor visitor;
FakeQbonePacketExchanger exchanger(&visitor, kMaxPendingPackets);
MockQboneClient client;
std::string packet = "data";
exchanger.WritePacketToNetwork(packet.data(), packet.length());
ASSERT_EQ(exchanger.packets_written().size(), 1);
EXPECT_THAT(exchanger.packets_written()[0], StrEq(packet));
}
TEST(QbonePacketExchangerTest,
WritePacketToNetworkQueuesPacketsAndProcessThemLater) {
MockVisitor visitor;
FakeQbonePacketExchanger exchanger(&visitor, kMaxPendingPackets);
MockQboneClient client;
exchanger.ForceWriteFailure(true, "");
std::vector<std::string> packets = {"packet0", "packet1"};
for (int i = 0; i < packets.size(); i++) {
exchanger.WritePacketToNetwork(packets[i].data(), packets[i].length());
}
ASSERT_TRUE(exchanger.packets_written().empty());
exchanger.ForceWriteFailure(false, "");
exchanger.SetWritable();
ASSERT_EQ(exchanger.packets_written().size(), 2);
for (int i = 0; i < packets.size(); i++) {
EXPECT_THAT(exchanger.packets_written()[i], StrEq(packets[i]));
}
}
TEST(QbonePacketExchangerTest,
SetWritableContinuesProcessingPacketIfPreviousCallBlocked) {
MockVisitor visitor;
FakeQbonePacketExchanger exchanger(&visitor, kMaxPendingPackets);
MockQboneClient client;
exchanger.ForceWriteFailure(true, "");
std::vector<std::string> packets = {"packet0", "packet1"};
for (int i = 0; i < packets.size(); i++) {
exchanger.WritePacketToNetwork(packets[i].data(), packets[i].length());
}
ASSERT_TRUE(exchanger.packets_written().empty());
exchanger.SetWritable();
ASSERT_TRUE(exchanger.packets_written().empty());
exchanger.ForceWriteFailure(false, "");
exchanger.SetWritable();
ASSERT_EQ(exchanger.packets_written().size(), 2);
for (int i = 0; i < packets.size(); i++) {
EXPECT_THAT(exchanger.packets_written()[i], StrEq(packets[i]));
}
}
TEST(QbonePacketExchangerTest, WritePacketToNetworkDropsPacketIfQueueIfFull) {
std::vector<std::string> packets = {"packet0", "packet1", "packet2"};
size_t queue_size = packets.size() - 1;
MockVisitor visitor;
FakeQbonePacketExchanger exchanger(&visitor, queue_size);
MockQboneClient client;
exchanger.ForceWriteFailure(true, "");
for (int i = 0; i < packets.size(); i++) {
exchanger.WritePacketToNetwork(packets[i].data(), packets[i].length());
}
ASSERT_TRUE(exchanger.packets_written().empty());
exchanger.ForceWriteFailure(false, "");
exchanger.SetWritable();
ASSERT_EQ(exchanger.packets_written().size(), queue_size);
for (int i = 0; i < queue_size; i++) {
EXPECT_THAT(exchanger.packets_written()[i], StrEq(packets[i]));
}
}
TEST(QbonePacketExchangerTest, WriteErrorsGetNotified) {
MockVisitor visitor;
FakeQbonePacketExchanger exchanger(&visitor, kMaxPendingPackets);
MockQboneClient client;
std::string packet = "data";
std::string io_error = "I/O error";
exchanger.ForceWriteFailure(false, io_error);
EXPECT_CALL(visitor, OnWriteError(StrEq(io_error))).Times(1);
exchanger.WritePacketToNetwork(packet.data(), packet.length());
ASSERT_TRUE(exchanger.packets_written().empty());
exchanger.ForceWriteFailure(true, "");
exchanger.WritePacketToNetwork(packet.data(), packet.length());
std::string sys_error = "sys error";
exchanger.ForceWriteFailure(false, sys_error);
EXPECT_CALL(visitor, OnWriteError(StrEq(sys_error))).Times(1);
exchanger.SetWritable();
ASSERT_TRUE(exchanger.packets_written().empty());
}
TEST(QbonePacketExchangerTest, NullVisitorDoesntCrash) {
FakeQbonePacketExchanger exchanger(nullptr, kMaxPendingPackets);
MockQboneClient client;
std::string packet = "data";
std::string io_error = "I/O error";
exchanger.SetReadError(io_error);
EXPECT_FALSE(exchanger.ReadAndDeliverPacket(&client));
exchanger.ForceWriteFailure(false, io_error);
exchanger.WritePacketToNetwork(packet.data(), packet.length());
EXPECT_TRUE(exchanger.packets_written().empty());
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/qbone/qbone_packet_exchanger.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/qbone/qbone_packet_exchanger_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
46384a61-f663-42db-afe4-785b596082c0 | cpp | tensorflow/tensorflow | platform_strings | tensorflow/core/platform/platform_strings.cc | tensorflow/core/platform/platform_strings_test.cc | #include "tensorflow/core/platform/platform_strings.h"
#include <cerrno>
#include <cstdio>
#include <cstring>
#include <string>
#include <vector>
namespace tensorflow {
int GetPlatformStrings(const std::string& path,
std::vector<std::string>* found) {
int result;
FILE* ifp = fopen(path.c_str(), "rb");
if (ifp != nullptr) {
static const char prefix[] = TF_PLAT_STR_MAGIC_PREFIX_;
int first_char = prefix[1];
int last_char = -1;
int c;
while ((c = getc(ifp)) != EOF) {
if (c == first_char && last_char == 0) {
int i = 2;
while (prefix[i] != 0 && (c = getc(ifp)) == prefix[i]) {
i++;
}
if (prefix[i] == 0) {
std::string str;
while ((c = getc(ifp)) != EOF && c != 0) {
str.push_back(c);
}
if (!str.empty()) {
found->push_back(str);
}
}
}
last_char = c;
}
result = (ferror(ifp) == 0) ? 0 : errno;
if (fclose(ifp) != 0) {
result = errno;
}
} else {
result = errno;
}
return result;
}
} | #include "tensorflow/core/platform/platform_strings.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#ifndef _WIN32
#include <unistd.h>
#endif
#include <string>
#include <vector>
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/init_main.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/str_util.h"
TF_PLATFORM_STRINGS()
typedef std::vector<std::string> string_vec;
static int PrintStrings(const std::string file_name) {
int rc = 0;
string_vec str;
if (!tensorflow::GetPlatformStrings(file_name, &str)) {
for (int i = 0; i != str.size(); i++) {
printf("%s\n", str[i].c_str());
}
} else {
perror(file_name.c_str());
rc = 2;
}
return rc;
}
static bool GetValue(const string_vec &str, const std::string ¯o_name,
std::string *pvalue) {
std::string nam_eq = macro_name + "=";
int i = 0;
while (i != str.size() && !absl::StartsWith(str[i], nam_eq)) {
i++;
}
bool found = (i != str.size());
if (found) {
*pvalue = str[i].substr(nam_eq.size());
}
return found;
}
static void CheckStr(const string_vec &str, const std::string ¯o_name,
const std::string &value) {
std::string value_from_str;
if (GetValue(str, macro_name, &value_from_str)) {
if (value != value_from_str) {
LOG(ERROR) << "===== value=" << value
<< " value_from_str=" << value_from_str;
for (int i = 0; i != str.size(); i++) {
LOG(ERROR) << "% " << str[i];
}
LOG(ERROR) << "=====";
}
CHECK_EQ(value, value_from_str) << " " << macro_name << ": bad value";
} else {
if (value != macro_name) {
LOG(ERROR) << "===== value=" << value << " macro_name=" << macro_name;
for (int i = 0; i != str.size(); i++) {
LOG(ERROR) << "% " << str[i];
}
LOG(ERROR) << "=====";
}
CHECK_EQ(value, macro_name) << " " << macro_name << ": not found in binary";
}
}
#define AS_STR_1_(x) #x
#define AS_STR(x) AS_STR_1_(x)
static int RunTest(const std::string &binary_name) {
int rc = 0;
string_vec str;
if (!tensorflow::GetPlatformStrings(binary_name, &str)) {
CheckStr(str, "__linux__", AS_STR(__linux__));
CheckStr(str, "_WIN32", AS_STR(_WIN32));
CheckStr(str, "__APPLE__", AS_STR(__APPLE__));
CheckStr(str, "__x86_64__", AS_STR(__x86_64__));
CheckStr(str, "__aarch64__", AS_STR(__aarch64__));
CheckStr(str, "__powerpc64__", AS_STR(__powerpc64__));
CheckStr(str, "TF_PLAT_STR_VERSION", TF_PLAT_STR_VERSION_);
} else {
perror(binary_name.c_str());
rc = 2;
}
return rc;
}
int main(int argc, char *argv[]) {
tensorflow::Env *env = tensorflow::Env::Default();
static const char usage[] = "usage: platform_strings_test [file...]";
int rc = 0;
tensorflow::port::InitMain(usage, &argc, &argv);
if (argc == 1) {
printf("rc=%d\n", PrintStrings(env->GetExecutablePath()));
rc = RunTest(env->GetExecutablePath());
} else {
for (int argn = 1; argn != argc; argn++) {
rc |= PrintStrings(argv[argn]);
}
}
return rc;
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/platform/platform_strings.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/platform/platform_strings_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
df17418c-8f4a-4b2b-acdf-84b34d9ea22d | cpp | google/cel-cpp | arena_string_pool | common/arena_string_pool.h | common/arena_string_pool_test.cc | #ifndef THIRD_PARTY_CEL_CPP_COMMON_ARENA_STRING_POOL_H_
#define THIRD_PARTY_CEL_CPP_COMMON_ARENA_STRING_POOL_H_
#include <memory>
#include "absl/base/attributes.h"
#include "absl/base/nullability.h"
#include "absl/strings/string_view.h"
#include "common/arena_string.h"
#include "internal/string_pool.h"
#include "google/protobuf/arena.h"
namespace cel {
class ArenaStringPool;
absl::Nonnull<std::unique_ptr<ArenaStringPool>> NewArenaStringPool(
absl::Nonnull<google::protobuf::Arena*> arena ABSL_ATTRIBUTE_LIFETIME_BOUND);
class ArenaStringPool final {
public:
ArenaStringPool(const ArenaStringPool&) = delete;
ArenaStringPool(ArenaStringPool&&) = delete;
ArenaStringPool& operator=(const ArenaStringPool&) = delete;
ArenaStringPool& operator=(ArenaStringPool&&) = delete;
ArenaString InternString(absl::string_view string) {
return ArenaString(strings_.InternString(string));
}
ArenaString InternString(ArenaString) = delete;
private:
friend absl::Nonnull<std::unique_ptr<ArenaStringPool>> NewArenaStringPool(
absl::Nonnull<google::protobuf::Arena*>);
explicit ArenaStringPool(absl::Nonnull<google::protobuf::Arena*> arena)
: strings_(arena) {}
internal::StringPool strings_;
};
inline absl::Nonnull<std::unique_ptr<ArenaStringPool>> NewArenaStringPool(
absl::Nonnull<google::protobuf::Arena*> arena ABSL_ATTRIBUTE_LIFETIME_BOUND) {
return std::unique_ptr<ArenaStringPool>(new ArenaStringPool(arena));
}
}
#endif | #include "common/arena_string_pool.h"
#include "internal/testing.h"
#include "google/protobuf/arena.h"
namespace cel {
namespace {
TEST(ArenaStringPool, InternString) {
google::protobuf::Arena arena;
auto string_pool = NewArenaStringPool(&arena);
auto expected = string_pool->InternString("Hello World!");
auto got = string_pool->InternString("Hello World!");
EXPECT_EQ(expected.data(), got.data());
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/arena_string_pool.h | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/arena_string_pool_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
e6e6d503-beb5-4872-bfe4-475bd4b96eb3 | cpp | tensorflow/tensorflow | broadcast_canonicalizer | third_party/xla/xla/service/broadcast_canonicalizer.cc | third_party/xla/xla/service/broadcast_canonicalizer_test.cc | #include "xla/service/broadcast_canonicalizer.h"
#include <cstdint>
#include <iterator>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/hlo_creation_utils.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
BroadcastCanonicalizer::BroadcastCanonicalizer() {}
absl::StatusOr<bool> BroadcastCanonicalizer::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (const auto& computation :
module->MakeNonfusionComputations(execution_threads)) {
for (HloInstruction* hlo : computation->MakeInstructionPostOrder()) {
if (hlo->opcode() != HloOpcode::kBroadcast) {
continue;
}
if (absl::c_is_sorted(hlo->dimensions())) {
continue;
}
std::vector<int64_t> new_dims(hlo->dimensions().begin(),
hlo->dimensions().end());
std::vector<int64_t> original_dims(hlo->dimensions().begin(),
hlo->dimensions().end());
std::vector<int64_t> new_broadcast_dims(hlo->shape().dimensions().begin(),
hlo->shape().dimensions().end());
absl::c_sort(new_dims);
const int64_t rank = hlo->shape().rank();
for (int i = 0; i < new_dims.size(); ++i) {
new_broadcast_dims[new_dims[i]] =
hlo->operand(0)->shape().dimensions(i);
}
auto new_broadcast = MakeBroadcastHlo(hlo->mutable_operand(0), new_dims,
new_broadcast_dims);
std::vector<int64_t> transpose_dims(rank);
absl::c_iota(transpose_dims, 0);
for (int i = 0; i < new_dims.size(); ++i) {
transpose_dims[new_dims[i]] = new_dims[std::distance(
original_dims.begin(), absl::c_find(original_dims, new_dims[i]))];
}
TF_RETURN_IF_ERROR(computation->ReplaceWithNewInstruction(
hlo, HloInstruction::CreateTranspose(hlo->shape(), new_broadcast,
transpose_dims)));
changed = true;
}
}
return changed;
}
} | #include "xla/service/broadcast_canonicalizer.h"
#include <functional>
#include <memory>
#include <optional>
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/tests/filecheck.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace {
class BroadcastCanonicalizerTest : public HloTestBase {};
TEST_F(BroadcastCanonicalizerTest, ReshapeBroadcast) {
const char* hlo = R"(
HloModule fusion.1644
ENTRY fusion.1644 {
parameter.2 = f32[2,3,2]{2,1,0} parameter(0)
%broadcast.399 = f32[3,2,8,2]{3,2,1,0} broadcast(%parameter.2), dimensions={1,0,3}
ROOT %reshape.43 = f32[3,16,1,2]{3,2,1,0} reshape(f32[3,2,8,2]{3,2,1,0} %broadcast.399)
}
)";
RunAndFilecheckHloRewrite(hlo, BroadcastCanonicalizer{}, R"(
)");
}
TEST_F(BroadcastCanonicalizerTest, ReshapeBroadcast22) {
const char* hlo = R"(
HloModule fusion.1644
ENTRY fusion.1644 {
parameter.2 = f32[5,6,7]{2,1,0} parameter(0)
%broadcast.399 = f32[8,7,9,5,6]{4,3,2,1,0} broadcast(%parameter.2), dimensions={3,4,1}
ROOT %reshape.43 = f32[8,7,45,1,6]{4,3,2,1,0} reshape(%broadcast.399)
}
)";
RunAndFilecheckHloRewrite(hlo, BroadcastCanonicalizer{}, R"(
)");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/broadcast_canonicalizer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/broadcast_canonicalizer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8476d0e3-b0ac-4493-9140-47533d5d00cd | cpp | tensorflow/tensorflow | dynamic_slice_thunk | third_party/xla/xla/service/gpu/runtime/dynamic_slice_thunk.cc | third_party/xla/xla/service/gpu/runtime/dynamic_slice_thunk_test.cc | #include "xla/service/gpu/runtime/dynamic_slice_thunk.h"
#include <algorithm>
#include <cstdint>
#include <memory>
#include <optional>
#include <utility>
#include <variant>
#include <vector>
#include "absl/container/inlined_vector.h"
#include "absl/status/status.h"
#include "absl/synchronization/mutex.h"
#include "llvm/ADT/STLExtras.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/gpu/buffer_allocations.h"
#include "xla/service/gpu/ir_emission_utils.h"
#include "xla/service/gpu/runtime/sequential_thunk.h"
#include "xla/service/gpu/runtime/thunk.h"
#include "xla/service/gpu/runtime/while_thunk.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/memory_allocation.h"
#include "xla/stream_executor/stream.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
DynamicSliceThunk::DynamicSliceThunk(
ThunkInfo thunk_info, std::unique_ptr<ThunkSequence> embedded_thunk,
std::vector<std::optional<BufferAllocation::Slice>> arguments,
std::vector<std::unique_ptr<BufferAllocation>> fake_allocations,
std::vector<std::optional<std::vector<Offset>>> offsets,
std::vector<std::optional<Shape>> orig_shapes,
std::vector<std::optional<Shape>> sliced_shapes,
std::vector<std::optional<uint64_t>> offset_byte_sizes)
: Thunk(Kind::kDynamicSlice, thunk_info),
embedded_thunk_(std::make_unique<SequentialThunk>(
ThunkInfo(), std::move(*embedded_thunk))),
fake_allocations_(std::move(fake_allocations)) {
for (auto [arg, offsets, orig_shape, sliced_shape, offset_byte_size] :
llvm::zip_equal(arguments, offsets, orig_shapes, sliced_shapes,
offset_byte_sizes)) {
slices_.push_back(SliceDef{
std::move(arg),
std::move(offsets),
std::move(orig_shape),
std::move(sliced_shape),
std::move(offset_byte_size),
});
}
for (SliceDef& slice : slices_) {
offsets_allocs_base_.push_back(offsets_allocs_size_);
if (slice.sliced_shape.has_value()) {
offsets_allocs_size_ += slice.sliced_shape->rank() * sizeof(int64_t);
}
}
}
DynamicSliceThunk::OffsetArray::OffsetArray(const Literal& l) {
CHECK(l.shape().IsArray()) << "Expected array literal, got " << l.ToString();
for (int i = 0; i < l.element_count(); i++) {
switch (l.shape().element_type()) {
case S32:
values.push_back(l.data<int32_t>()[i]);
break;
case S64:
values.push_back(l.data<int64_t>()[i]);
break;
case U32:
values.push_back(l.data<uint32_t>()[i]);
break;
case U64:
CHECK(l.data<uint64_t>()[i] <
static_cast<uint64_t>(std::numeric_limits<int64_t>::max()))
<< "Offset value: " << l.data<uint64_t>()[i]
<< " cannot fit in int64_t";
values.push_back(l.data<uint64_t>()[i]);
break;
default:
CHECK(false) << "Offset array must be of a supported integer type "
"(S32, S64, U32, U64), found: "
<< l.shape().element_type();
}
}
}
absl::Status DynamicSliceThunk::Prepare(const PrepareParams& params,
ResourceRequests& resource_requests) {
for (SliceDef& slice : slices_) {
if (slice.offsets.has_value()) {
TF_RET_CHECK(slice.embedded_thunk_argument.has_value());
TF_RET_CHECK(slice.orig_shape.has_value());
TF_RET_CHECK(slice.sliced_shape.has_value());
TF_RET_CHECK(slice.offset_byte_size.has_value());
TF_RET_CHECK(slice.orig_shape->IsArray());
TF_RET_CHECK(slice.sliced_shape->IsArray());
TF_RET_CHECK(slice.offsets->size() == slice.orig_shape->rank());
TF_RET_CHECK(slice.sliced_shape->rank() == slice.orig_shape->rank());
}
}
TF_RETURN_IF_ERROR(embedded_thunk_->Prepare(params, resource_requests));
return absl::OkStatus();
}
absl::Status DynamicSliceThunk::Initialize(const InitializeParams& params) {
TF_RETURN_IF_ERROR(embedded_thunk_->Initialize(params));
absl::MutexLock lock(&mutex_);
if (offsets_allocs_.contains(params.executor)) return absl::OkStatus();
VLOG(2) << "Allocate " << offsets_allocs_size_
<< " bytes for transferring offsets on executor: " << params.executor;
TF_ASSIGN_OR_RETURN(
std::unique_ptr<se::MemoryAllocation> allocation,
params.executor->HostMemoryAllocate(offsets_allocs_size_));
offsets_allocs_.emplace(params.executor, std::move(allocation));
return absl::OkStatus();
}
absl::Status DynamicSliceThunk::ExecuteOnStream(const ExecuteParams& params) {
se::Stream& stream = *params.stream;
const BufferAllocations& orig_allocations = *params.buffer_allocations;
absl::InlinedVector<se::DeviceMemoryBase, 8> slice_buffers(
slices_.size(), se::DeviceMemoryBase());
int64_t* offsets_alloc = [&] {
absl::MutexLock lock(&mutex_);
return reinterpret_cast<int64_t*>(
offsets_allocs_.at(stream.parent())->opaque());
}();
auto offset_value = [&](int64_t arg_idx, int64_t offset_idx) -> int64_t& {
return offsets_alloc[offsets_allocs_base_.at(arg_idx) + offset_idx];
};
VLOG(2) << "Execute address computation thunk: slices=" << slices_.size();
for (auto [argument_idx, slice] : llvm::enumerate(slices_)) {
if (!slice.embedded_thunk_argument.has_value()) {
continue;
}
se::DeviceMemoryBase argument_buffer =
orig_allocations.GetDeviceAddress(*slice.embedded_thunk_argument);
if (!slice.offsets.has_value()) {
slice_buffers[argument_idx] = argument_buffer;
continue;
}
const Shape& src_shape = *slice.orig_shape;
const Shape& dst_shape = *slice.sliced_shape;
absl::InlinedVector<int64_t, 4> slice_starts;
slice_starts.reserve(dst_shape.rank());
int64_t num_transfers = 0;
for (auto [offset_idx, values] : llvm::enumerate(llvm::zip(
*slice.offsets, src_shape.dimensions(), dst_shape.dimensions()))) {
auto [offset, src_dim, dst_dim] = values;
if (uint64_t* const_offset = std::get_if<uint64_t>(&offset)) {
VLOG(2) << " - arg " << argument_idx << "[" << offset_idx
<< "]: constant offset = " << *const_offset;
offset_value(argument_idx, offset_idx) = *const_offset;
} else if (std::holds_alternative<LoopIter>(offset)) {
TF_ASSIGN_OR_RETURN(int64_t iter, WhileThunk::CurrentLoopIteration());
VLOG(2) << " - arg " << argument_idx << "[" << offset_idx
<< "]: loop iteration offset = " << iter;
offset_value(argument_idx, offset_idx) = iter;
} else if (OffsetArray* offset_array =
std::get_if<OffsetArray>(&offset)) {
TF_ASSIGN_OR_RETURN(int64_t iter, WhileThunk::CurrentLoopIteration());
VLOG(2) << " - arg " << argument_idx << "[" << offset_idx
<< "]: offset array offset = " << offset_array->values[iter];
offset_value(argument_idx, offset_idx) = offset_array->values[iter];
} else {
auto alloc_slice = std::get<BufferAllocation::Slice>(offset);
VLOG(2) << " - arg " << argument_idx << "[" << offset_idx
<< "]: transfer offset from device " << alloc_slice.ToString();
se::DeviceMemoryBase offset_src =
orig_allocations.GetDeviceAddress(alloc_slice);
int64_t* offset_dst = &offset_value(argument_idx, offset_idx);
TF_RETURN_IF_ERROR(
stream.Memcpy(offset_dst, offset_src, *slice.offset_byte_size));
++num_transfers;
}
}
if (num_transfers > 0) {
VLOG(2) << "Wait for completion of " << num_transfers << " transfer";
TF_RETURN_IF_ERROR(stream.BlockHostUntilDone());
}
for (auto [offset_idx, values] : llvm::enumerate(
llvm::zip(src_shape.dimensions(), dst_shape.dimensions()))) {
auto [src_dim, dst_dim] = values;
int64_t start_index =
std::min(std::max(offset_value(argument_idx, offset_idx), int64_t{0}),
src_dim - dst_dim);
slice_starts.push_back(start_index);
}
int64_t new_size = ShapeUtil::ByteSizeOf(dst_shape);
int64_t new_offset = 0;
for (auto [start, stride] :
llvm::zip(slice_starts, *ShapeUtil::ByteStrides(src_shape))) {
new_offset += start * stride;
}
VLOG(2) << "Create sliced argument " << argument_idx << " of shape "
<< slice.sliced_shape->ToString()
<< " by slicing argument of shape " << slice.orig_shape->ToString()
<< " at offset " << new_offset << " with " << new_size;
slice_buffers[argument_idx] =
argument_buffer.GetByteSlice(new_offset, new_size);
}
BufferAllocations slice_allocations(slice_buffers,
orig_allocations.device_ordinal(),
orig_allocations.memory_allocator());
Thunk::ExecuteParams new_params =
Thunk::ExecuteParams::CloneWithNewAllocations(params, slice_allocations);
TF_RETURN_IF_ERROR(embedded_thunk_->ExecuteOnStream(new_params));
return absl::OkStatus();
}
}
} | #include "xla/service/gpu/runtime/dynamic_slice_thunk.h"
#include <algorithm>
#include <cstdint>
#include <functional>
#include <memory>
#include <optional>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/status/statusor.h"
#include "absl/strings/ascii.h"
#include "xla/ffi/ffi.h"
#include "xla/ffi/ffi_api.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/gpu/buffer_allocations.h"
#include "xla/service/gpu/matmul_utils.h"
#include "xla/service/gpu/runtime/custom_call_thunk.h"
#include "xla/service/gpu/runtime/gemm_thunk.h"
#include "xla/service/gpu/runtime/thunk.h"
#include "xla/service/platform_util.h"
#include "xla/service/service_executable_run_options.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/blas.h"
#include "xla/stream_executor/command_buffer.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/device_memory_allocator.h"
#include "xla/stream_executor/gpu/gpu_types.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/platform_manager.h"
#include "xla/stream_executor/stream.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/stream_executor/stream_executor_memory_allocator.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/types.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
#if GOOGLE_CUDA
#define PLATFORM "CUDA"
#elif TENSORFLOW_USE_ROCM
#define PLATFORM "ROCM"
#endif
namespace xla::gpu {
namespace {
static se::StreamExecutor* GpuExecutor() {
auto name =
absl::AsciiStrToUpper(PlatformUtil::CanonicalPlatformName("gpu").value());
auto* platform = se::PlatformManager::PlatformWithName(name).value();
return platform->ExecutorForDevice(0).value();
}
}
TEST(DynamicSliceThunkTest, SlicedGemm) {
se::StreamExecutor* executor = GpuExecutor();
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
int64_t lhs_length = sizeof(float) * 2 * 4;
int64_t rhs_length = sizeof(float) * 3 * 1;
int64_t out_length = sizeof(float) * 1 * 1;
int64_t offset_length = sizeof(int64_t);
std::vector<std::unique_ptr<BufferAllocation>> fake_allocations(4);
fake_allocations.push_back(
std::make_unique<BufferAllocation>(0, rhs_length, 0));
BufferAllocation::Slice slice_lhs_fake(fake_allocations.back().get(), 0,
rhs_length);
BufferAllocation alloc_lhs(0, lhs_length, 0);
BufferAllocation::Slice slice_lhs(&alloc_lhs, 0, lhs_length);
fake_allocations.push_back(
std::make_unique<BufferAllocation>(1, rhs_length, 0));
BufferAllocation::Slice slice_rhs(fake_allocations.back().get(), 0,
rhs_length);
fake_allocations.push_back(
std::make_unique<BufferAllocation>(2, out_length, 0));
BufferAllocation::Slice slice_out(fake_allocations.back().get(), 0,
out_length);
fake_allocations.push_back(std::make_unique<BufferAllocation>(
3, 1024 * 1024, 0));
BufferAllocation::Slice slice_workspace(fake_allocations.back().get(), 0,
1024 * 1024);
BufferAllocation alloc_lhs_offset_0(4, offset_length,
0);
BufferAllocation::Slice slice_lhs_offset_0(&alloc_lhs_offset_0, 0,
offset_length);
BufferAllocation alloc_lhs_offset_1(5, offset_length,
0);
BufferAllocation::Slice slice_lhs_offset_1(&alloc_lhs_offset_1, 0,
offset_length);
auto config =
GemmConfig::For(ShapeUtil::MakeShape(PrimitiveType::F32, {1, 3}), {}, {1},
ShapeUtil::MakeShape(PrimitiveType::F32, {3, 1}), {}, {0},
ShapeUtil::MakeShape(PrimitiveType::F32, {1, 1}), 1.0,
0.0, 0.0, PrecisionConfig::ALG_UNSET, std::nullopt,
se::blas::kDefaultComputePrecision, false, false);
ASSERT_TRUE(config.ok());
ThunkSequence seq;
seq.emplace_back(std::make_unique<GemmThunk>(
Thunk::ThunkInfo(), config.value(), slice_lhs_fake, slice_rhs, slice_out,
slice_workspace, true));
std::vector<DynamicSliceThunk::Offset> lhs_offsets{slice_lhs_offset_0,
slice_lhs_offset_1};
DynamicSliceThunk thunk(
Thunk::ThunkInfo(), std::make_unique<ThunkSequence>(std::move(seq)),
{slice_lhs, slice_rhs, slice_out, slice_workspace},
std::move(fake_allocations),
{lhs_offsets, std::nullopt, std::nullopt, std::nullopt},
{ShapeUtil::MakeShape(PrimitiveType::F32, {2, 4}), std::nullopt,
std::nullopt, std::nullopt},
{ShapeUtil::MakeShape(PrimitiveType::F32, {1, 3}), std::nullopt,
std::nullopt, std::nullopt},
{sizeof(int64_t), std::nullopt, std::nullopt, std::nullopt});
se::DeviceMemory<float> lhs = executor->AllocateArray<float>(2 * 4);
std::vector<float> lhs_arr{1, 2, 3, 4, 5, 6, 7, 8};
TF_ASSERT_OK(stream->Memcpy(&lhs, lhs_arr.data(), lhs_length));
se::DeviceMemory<float> rhs = executor->AllocateArray<float>(3 * 1);
std::vector<float> rhs_arr(3, 1);
TF_ASSERT_OK(stream->Memcpy(&rhs, rhs_arr.data(), rhs_length));
se::DeviceMemory<float> out = executor->AllocateArray<float>(1 * 1);
TF_ASSERT_OK(stream->MemZero(&out, out_length));
se::DeviceMemory<float> workspace =
executor->AllocateArray<float>(1024 * 1024);
TF_ASSERT_OK(stream->MemZero(&workspace, 1024 * 1024));
se::DeviceMemory<int64_t> lhs_offset_0 = executor->AllocateArray<int64_t>(1);
se::DeviceMemory<int64_t> lhs_offset_1 = executor->AllocateArray<int64_t>(1);
std::vector<int64_t> lhs_offset_arr{0, 1};
TF_ASSERT_OK(
stream->Memcpy(&lhs_offset_0, &lhs_offset_arr[0], offset_length));
TF_ASSERT_OK(
stream->Memcpy(&lhs_offset_1, &lhs_offset_arr[1], offset_length));
ServiceExecutableRunOptions run_options;
se::StreamExecutorMemoryAllocator allocator(executor);
BufferAllocations allocations(
{lhs, rhs, out, workspace, lhs_offset_0, lhs_offset_1}, 0, &allocator);
Thunk::ExecuteParams params = Thunk::ExecuteParams::Create(
run_options, allocations, stream.get(), stream.get(), nullptr, nullptr);
Thunk::ExecutableSource source = {"", {}};
TF_ASSERT_OK(thunk.Initialize(
{executor, source, &allocations, stream.get(), stream.get()}));
TF_ASSERT_OK(thunk.ExecuteOnStream(params));
TF_ASSERT_OK(stream->BlockHostUntilDone());
std::vector<float> dst(1, 0);
TF_ASSERT_OK(stream->Memcpy(dst.data(), out, out_length));
ASSERT_EQ(dst, std::vector<float>({9}));
}
TEST(DynamicSliceThunkTest, MulipleSlicedOperandsGemm) {
se::StreamExecutor* executor = GpuExecutor();
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
int64_t length = sizeof(float) * 2 * 4;
int64_t out_length = sizeof(float) * 1;
int64_t offset_length = sizeof(int64_t);
int64_t slice_length = sizeof(float) * 3;
std::vector<std::unique_ptr<BufferAllocation>> fake_allocations(4);
fake_allocations.push_back(std::make_unique<BufferAllocation>(
0, slice_length, 0));
BufferAllocation::Slice slice_lhs_fake(fake_allocations.back().get(), 0,
slice_length);
fake_allocations.push_back(std::make_unique<BufferAllocation>(
1, slice_length, 0));
BufferAllocation::Slice slice_rhs_fake(fake_allocations.back().get(), 0,
slice_length);
BufferAllocation alloc_lhs(0, length, 0);
BufferAllocation::Slice slice_lhs(&alloc_lhs, 0, length);
BufferAllocation alloc_rhs(1, length, 0);
BufferAllocation::Slice slice_rhs(&alloc_rhs, 0, length);
fake_allocations.push_back(
std::make_unique<BufferAllocation>(2, out_length, 0));
BufferAllocation::Slice slice_out(fake_allocations.back().get(), 0,
out_length);
fake_allocations.push_back(std::make_unique<BufferAllocation>(
3, 1024 * 1024, 0));
BufferAllocation::Slice slice_workspace(fake_allocations.back().get(), 0,
1024 * 1024);
BufferAllocation alloc_lhs_offset_0(4, offset_length,
0);
BufferAllocation::Slice slice_lhs_offset_0(&alloc_lhs_offset_0, 0,
offset_length);
BufferAllocation alloc_lhs_offset_1(5, offset_length,
0);
BufferAllocation::Slice slice_lhs_offset_1(&alloc_lhs_offset_1, 0,
offset_length);
BufferAllocation alloc_rhs_offset_0(6, offset_length,
0);
BufferAllocation::Slice slice_rhs_offset_0(&alloc_rhs_offset_0, 0,
offset_length);
BufferAllocation alloc_rhs_offset_1(7, offset_length,
0);
BufferAllocation::Slice slice_rhs_offset_1(&alloc_rhs_offset_1, 0,
offset_length);
auto config =
GemmConfig::For(ShapeUtil::MakeShape(PrimitiveType::F32, {1, 3}), {}, {1},
ShapeUtil::MakeShape(PrimitiveType::F32, {3, 1}), {}, {0},
ShapeUtil::MakeShape(PrimitiveType::F32, {1, 1}), 1.0,
0.0, 0.0, PrecisionConfig::ALG_UNSET, std::nullopt,
se::blas::kDefaultComputePrecision, false, false);
ASSERT_TRUE(config.ok());
ThunkSequence seq;
seq.emplace_back(std::make_unique<GemmThunk>(
Thunk::ThunkInfo(), config.value(), slice_lhs_fake, slice_rhs_fake,
slice_out, slice_workspace, true));
std::vector<DynamicSliceThunk::Offset> lhs_offsets{slice_lhs_offset_0,
slice_lhs_offset_1};
std::vector<DynamicSliceThunk::Offset> rhs_offsets{slice_rhs_offset_0,
slice_rhs_offset_1};
DynamicSliceThunk thunk(
Thunk::ThunkInfo(), std::make_unique<ThunkSequence>(std::move(seq)),
{slice_lhs, slice_rhs, slice_out, slice_workspace},
std::move(fake_allocations),
{lhs_offsets, rhs_offsets, std::nullopt, std::nullopt},
{ShapeUtil::MakeShape(PrimitiveType::F32, {2, 4}),
ShapeUtil::MakeShape(PrimitiveType::F32, {8, 1}), std::nullopt,
std::nullopt},
{ShapeUtil::MakeShape(PrimitiveType::F32, {1, 3}),
ShapeUtil::MakeShape(PrimitiveType::F32, {3, 1}), std::nullopt,
std::nullopt},
{sizeof(int64_t), sizeof(int64_t), std::nullopt, std::nullopt});
std::vector<float> arr{1, 2, 3, 4, 5, 6, 7, 8};
se::DeviceMemory<float> lhs = executor->AllocateArray<float>(2 * 4);
TF_ASSERT_OK(stream->Memcpy(&lhs, arr.data(), length));
se::DeviceMemory<float> rhs = executor->AllocateArray<float>(8);
std::vector<float> rhs_arr(8, 1);
TF_ASSERT_OK(stream->Memcpy(&rhs, arr.data(), length));
se::DeviceMemory<float> out = executor->AllocateArray<float>(1);
TF_ASSERT_OK(stream->MemZero(&out, out_length));
se::DeviceMemory<float> workspace =
executor->AllocateArray<float>(1024 * 1024);
TF_ASSERT_OK(stream->MemZero(&workspace, 1024 * 1024));
se::DeviceMemory<int64_t> lhs_offset_0 = executor->AllocateArray<int64_t>(1);
se::DeviceMemory<int64_t> lhs_offset_1 = executor->AllocateArray<int64_t>(1);
std::vector<int64_t> lhs_offset_arr{0, 1};
TF_ASSERT_OK(
stream->Memcpy(&lhs_offset_0, &lhs_offset_arr[0], offset_length));
TF_ASSERT_OK(
stream->Memcpy(&lhs_offset_1, &lhs_offset_arr[1], offset_length));
se::DeviceMemory<int64_t> rhs_offset_0 = executor->AllocateArray<int64_t>(1);
se::DeviceMemory<int64_t> rhs_offset_1 = executor->AllocateArray<int64_t>(1);
std::vector<int64_t> rhs_offset_arr{2, 0};
TF_ASSERT_OK(
stream->Memcpy(&rhs_offset_0, &rhs_offset_arr[0], offset_length));
TF_ASSERT_OK(
stream->Memcpy(&rhs_offset_1, &rhs_offset_arr[1], offset_length));
ServiceExecutableRunOptions run_options;
se::StreamExecutorMemoryAllocator allocator(executor);
BufferAllocations allocations({lhs, rhs, out, workspace, lhs_offset_0,
lhs_offset_1, rhs_offset_0, rhs_offset_1},
0, &allocator);
Thunk::ExecuteParams params = Thunk::ExecuteParams::Create(
run_options, allocations, stream.get(), stream.get(), nullptr, nullptr);
Thunk::ExecutableSource source = {"", {}};
TF_ASSERT_OK(thunk.Initialize(
{executor, source, &allocations, stream.get(), stream.get()}));
TF_ASSERT_OK(thunk.ExecuteOnStream(params));
TF_ASSERT_OK(stream->BlockHostUntilDone());
std::vector<float> dst(1, 0);
TF_ASSERT_OK(stream->Memcpy(dst.data(), out, out_length));
ASSERT_EQ(dst, std::vector<float>({2 * 3 + 3 * 4 + 4 * 5}));
}
static absl::Status Memcpy(se::Stream* stream, ffi::AnyBuffer src,
ffi::Result<ffi::AnyBuffer> dst) {
se::DeviceMemoryBase dst_mem = dst->device_memory();
se::DeviceMemoryBase src_mem = src.device_memory();
return stream->MemcpyD2D(&dst_mem, src_mem, src_mem.size());
}
XLA_FFI_DEFINE_HANDLER(kMemcpy, Memcpy,
ffi::Ffi::Bind()
.Ctx<ffi::Stream>()
.Arg<ffi::AnyBuffer>()
.Ret<ffi::AnyBuffer>()
);
XLA_FFI_REGISTER_HANDLER(ffi::GetXlaFfiApi(), "__xla_test$$memcpy", PLATFORM,
kMemcpy);
TEST(DynamicSliceThunkTest, SlicedMemcpy) {
se::StreamExecutor* executor = GpuExecutor();
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
int64_t src_count = 8 * 8 * 10 * 8;
int64_t dst_count = 8 * 8;
int64_t src_length = sizeof(int32_t) * src_count;
int64_t dst_length = sizeof(int32_t) * dst_count;
int64_t offset_length = sizeof(int64_t);
int64_t slice_length = sizeof(int32_t) * dst_count;
std::vector<std::unique_ptr<BufferAllocation>> fake_allocations(2);
fake_allocations.push_back(std::make_unique<BufferAllocation>(
0, slice_length, 0));
BufferAllocation::Slice slice_src_fake(fake_allocations.back().get(), 0,
slice_length);
BufferAllocation alloc_src(0, src_length, 0);
BufferAllocation::Slice slice_src(&alloc_src, 0, src_length);
fake_allocations.push_back(
std::make_unique<BufferAllocation>(1, dst_length, 0));
BufferAllocation::Slice slice_dst(fake_allocations.back().get(), 0,
dst_length);
BufferAllocation alloc_offset_0(2, offset_length, 0);
BufferAllocation::Slice slice_offset_0(&alloc_offset_0, 0, offset_length);
BufferAllocation alloc_offset_1(3, offset_length, 0);
BufferAllocation::Slice slice_offset_1(&alloc_offset_1, 0, offset_length);
BufferAllocation alloc_offset_2(4, offset_length, 0);
BufferAllocation::Slice slice_offset_2(&alloc_offset_2, 0, offset_length);
BufferAllocation alloc_offset_3(5, offset_length, 0);
BufferAllocation::Slice slice_offset_3(&alloc_offset_3, 0, offset_length);
auto registration = xla::ffi::FindHandler("__xla_test$$memcpy", PLATFORM);
ASSERT_TRUE(registration.ok());
std::vector<std::optional<CustomCallThunk::Slice>> operands{
CustomCallThunk::Slice{slice_src_fake,
ShapeUtil::MakeShape(PrimitiveType::S32, {8, 8})}};
std::vector<std::optional<CustomCallThunk::Slice>> results{
CustomCallThunk::Slice{slice_dst,
ShapeUtil::MakeShape(PrimitiveType::S32, {8, 8})}};
ThunkSequence seq;
TF_ASSERT_OK_AND_ASSIGN(
seq.emplace_back(),
CustomCallThunk::Create(Thunk::ThunkInfo(), registration->bundle,
operands, results,
CustomCallThunk::AttributesMap(),
nullptr));
std::vector<DynamicSliceThunk::Offset> slice_offsets{
slice_offset_0, slice_offset_1, slice_offset_2, slice_offset_3};
DynamicSliceThunk thunk(
Thunk::ThunkInfo(), std::make_unique<ThunkSequence>(std::move(seq)),
{slice_src, slice_dst}, std::move(fake_allocations),
{slice_offsets, std::nullopt},
{ShapeUtil::MakeShape(PrimitiveType::S32, {8, 8, 10, 8}), std::nullopt},
{ShapeUtil::MakeShape(PrimitiveType::S32, {1, 1, 8, 8}), std::nullopt},
{sizeof(int64_t), std::nullopt});
se::DeviceMemory<int32_t> src = executor->AllocateArray<int32_t>(src_count);
std::vector<int32_t> src_arr(src_count, 0);
for (unsigned i = 0; i < src_count; ++i) src_arr[i] = i;
TF_ASSERT_OK(stream->Memcpy(&src, src_arr.data(), src_length));
se::DeviceMemory<int32_t> dst = executor->AllocateArray<int32_t>(dst_count);
TF_ASSERT_OK(stream->MemZero(&dst, dst_length));
se::DeviceMemory<int64_t> offset_0 = executor->AllocateArray<int64_t>(1);
se::DeviceMemory<int64_t> offset_1 = executor->AllocateArray<int64_t>(1);
se::DeviceMemory<int64_t> offset_2 = executor->AllocateArray<int64_t>(1);
se::DeviceMemory<int64_t> offset_3 = executor->AllocateArray<int64_t>(1);
std::vector<int64_t> offset_arr{3, 5, 2, 0};
TF_ASSERT_OK(stream->Memcpy(&offset_0, &offset_arr[0], offset_length));
TF_ASSERT_OK(stream->Memcpy(&offset_1, &offset_arr[1], offset_length));
TF_ASSERT_OK(stream->Memcpy(&offset_2, &offset_arr[2], offset_length));
TF_ASSERT_OK(stream->Memcpy(&offset_3, &offset_arr[3], offset_length));
ServiceExecutableRunOptions run_options;
se::StreamExecutorMemoryAllocator allocator(executor);
BufferAllocations allocations(
{src, dst, offset_0, offset_1, offset_2, offset_3}, 0, &allocator);
Thunk::ExecuteParams params = Thunk::ExecuteParams::Create(
run_options, allocations, stream.get(), stream.get(), nullptr, nullptr);
Thunk::ExecutableSource source = {"", {}};
TF_ASSERT_OK(thunk.Initialize(
{executor, source, &allocations, stream.get(), stream.get()}));
TF_ASSERT_OK(thunk.ExecuteOnStream(params));
TF_ASSERT_OK(stream->BlockHostUntilDone());
std::vector<int32_t> out(dst_count, 0);
TF_ASSERT_OK(stream->Memcpy(out.data(), dst, dst_length));
std::vector<int32_t> ref(dst_count, 0);
int64_t offset_val =
offset_arr[3] +
8 * (offset_arr[2] + 10 * (offset_arr[1] + 8 * offset_arr[0]));
std::copy(src_arr.begin() + offset_val,
src_arr.begin() + offset_val + dst_count, ref.begin());
ASSERT_EQ(out, ref);
}
TEST(DynamicSliceThunkTest, SlicedOutputMemcpy) {
se::StreamExecutor* executor = GpuExecutor();
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
int64_t src_count = 8 * 8 * 10 * 2;
int64_t dst_count = 2 * 2 * 2 * 2;
int64_t slice_count = 2 * 2;
int64_t src_length = sizeof(int32_t) * src_count;
int64_t dst_length = sizeof(int32_t) * dst_count;
int64_t offset_length = sizeof(int64_t);
int64_t slice_length = sizeof(int32_t) * slice_count;
std::vector<std::unique_ptr<BufferAllocation>> fake_allocations(2);
fake_allocations.push_back(std::make_unique<BufferAllocation>(
0, slice_length, 0));
BufferAllocation::Slice slice_src_fake(fake_allocations.back().get(), 0,
slice_length);
fake_allocations.push_back(std::make_unique<BufferAllocation>(
1, slice_length, 0));
BufferAllocation::Slice slice_dst_fake(fake_allocations.back().get(), 0,
slice_length);
BufferAllocation alloc_src(0, src_length, 0);
BufferAllocation::Slice slice_src(&alloc_src, 0, src_length);
BufferAllocation alloc_dst(1, dst_length, 0);
BufferAllocation::Slice slice_dst(&alloc_dst, 0, dst_length);
BufferAllocation alloc_src_offset_0(2, offset_length, 0);
BufferAllocation::Slice slice_src_offset_0(&alloc_src_offset_0, 0,
offset_length);
BufferAllocation alloc_src_offset_1(3, offset_length, 0);
BufferAllocation::Slice slice_src_offset_1(&alloc_src_offset_1, 0,
offset_length);
BufferAllocation alloc_src_offset_2(4, offset_length, 0);
BufferAllocation::Slice slice_src_offset_2(&alloc_src_offset_2, 0,
offset_length);
BufferAllocation alloc_src_offset_3(5, offset_length, 0);
BufferAllocation::Slice slice_src_offset_3(&alloc_src_offset_3, 0,
offset_length);
BufferAllocation alloc_dst_offset_0(6, offset_length, 0);
BufferAllocation::Slice slice_dst_offset_0(&alloc_dst_offset_0, 0,
offset_length);
BufferAllocation alloc_dst_offset_1(7, offset_length, 0);
BufferAllocation::Slice slice_dst_offset_1(&alloc_dst_offset_1, 0,
offset_length);
BufferAllocation alloc_dst_offset_2(8, offset_length, 0);
BufferAllocation::Slice slice_dst_offset_2(&alloc_dst_offset_2, 0,
offset_length);
BufferAllocation alloc_dst_offset_3(9, offset_length, 0);
BufferAllocation::Slice slice_dst_offset_3(&alloc_dst_offset_3, 0,
offset_length);
auto registration = xla::ffi::FindHandler("__xla_test$$memcpy", PLATFORM);
ASSERT_TRUE(registration.ok());
std::vector<std::optional<CustomCallThunk::Slice>> operands{
CustomCallThunk::Slice{slice_src_fake,
ShapeUtil::MakeShape(PrimitiveType::S32, {2, 2})}};
std::vector<std::optional<CustomCallThunk::Slice>> results{
CustomCallThunk::Slice{slice_dst_fake,
ShapeUtil::MakeShape(PrimitiveType::S32, {2, 2})}};
ThunkSequence seq;
TF_ASSERT_OK_AND_ASSIGN(
seq.emplace_back(),
CustomCallThunk::Create(Thunk::ThunkInfo(), registration->bundle,
operands, results,
CustomCallThunk::AttributesMap(),
nullptr));
std::vector<DynamicSliceThunk::Offset> slice_src_offsets{
slice_src_offset_0, slice_src_offset_1, slice_src_offset_2,
slice_src_offset_3};
std::vector<DynamicSliceThunk::Offset> slice_dst_offsets{
slice_dst_offset_0, slice_dst_offset_1, slice_dst_offset_2,
slice_dst_offset_3};
DynamicSliceThunk thunk(
Thunk::ThunkInfo(), std::make_unique<ThunkSequence>(std::move(seq)),
{slice_src, slice_dst}, std::move(fake_allocations),
{slice_src_offsets, slice_dst_offsets},
{ShapeUtil::MakeShape(PrimitiveType::S32, {8, 8, 10, 2}),
ShapeUtil::MakeShape(PrimitiveType::S32, {2, 2, 2, 2})},
{ShapeUtil::MakeShape(PrimitiveType::S32, {1, 1, 2, 2}),
ShapeUtil::MakeShape(PrimitiveType::S32, {1, 1, 2, 2})},
{sizeof(int64_t), sizeof(int64_t)});
se::DeviceMemory<int32_t> src = executor->AllocateArray<int32_t>(src_count);
std::vector<int32_t> src_arr(src_count, 0);
for (unsigned i = 0; i < src_count; ++i) src_arr[i] = i;
TF_ASSERT_OK(stream->Memcpy(&src, src_arr.data(), src_length));
se::DeviceMemory<int32_t> dst = executor->AllocateArray<int32_t>(dst_count);
TF_ASSERT_OK(stream->MemZero(&dst, dst_length));
se::DeviceMemory<int64_t> src_offset_0 = executor->AllocateArray<int64_t>(1);
se::DeviceMemory<int64_t> src_offset_1 = executor->AllocateArray<int64_t>(1);
se::DeviceMemory<int64_t> src_offset_2 = executor->AllocateArray<int64_t>(1);
se::DeviceMemory<int64_t> src_offset_3 = executor->AllocateArray<int64_t>(1);
std::vector<int64_t> src_offset_arr{3, 5, 2, 0};
TF_ASSERT_OK(
stream->Memcpy(&src_offset_0, &src_offset_arr[0], offset_length));
TF_ASSERT_OK(
stream->Memcpy(&src_offset_1, &src_offset_arr[1], offset_length));
TF_ASSERT_OK(
stream->Memcpy(&src_offset_2, &src_offset_arr[2], offset_length));
TF_ASSERT_OK(
stream->Memcpy(&src_offset_3, &src_offset_arr[3], offset_length));
se::DeviceMemory<int64_t> dst_offset_0 = executor->AllocateArray<int64_t>(1);
se::DeviceMemory<int64_t> dst_offset_1 = executor->AllocateArray<int64_t>(1);
se::DeviceMemory<int64_t> dst_offset_2 = executor->AllocateArray<int64_t>(1);
se::DeviceMemory<int64_t> dst_offset_3 = executor->AllocateArray<int64_t>(1);
std::vector<int64_t> dst_offset_arr{1, 1, 0, 0};
TF_ASSERT_OK(
stream->Memcpy(&dst_offset_0, &dst_offset_arr[0], offset_length));
TF_ASSERT_OK(
stream->Memcpy(&dst_offset_1, &dst_offset_arr[1], offset_length));
TF_ASSERT_OK(
stream->Memcpy(&dst_offset_2, &dst_offset_arr[2], offset_length));
TF_ASSERT_OK(
stream->Memcpy(&dst_offset_3, &dst_offset_arr[3], offset_length));
ServiceExecutableRunOptions run_options;
se::StreamExecutorMemoryAllocator allocator(executor);
BufferAllocations allocations(
{src, dst, src_offset_0, src_offset_1, src_offset_2, src_offset_3,
dst_offset_0, dst_offset_1, dst_offset_2, dst_offset_3},
0, &allocator);
Thunk::ExecuteParams params = Thunk::ExecuteParams::Create(
run_options, allocations, stream.get(), stream.get(), nullptr, nullptr);
Thunk::ExecutableSource source = {"", {}};
TF_ASSERT_OK(thunk.Initialize(
{executor, source, &allocations, stream.get(), stream.get()}));
TF_ASSERT_OK(thunk.ExecuteOnStream(params));
TF_ASSERT_OK(stream->BlockHostUntilDone());
std::vector<int32_t> out(dst_count, 0);
TF_ASSERT_OK(stream->Memcpy(out.data(), dst, dst_length));
std::vector<int32_t> ref(dst_count, 0);
int64_t src_offset_val =
src_offset_arr[3] +
2 * (src_offset_arr[2] +
10 * (src_offset_arr[1] + 8 * src_offset_arr[0]));
int64_t dst_offset_val =
dst_offset_arr[3] +
2 * (dst_offset_arr[2] + 2 * (dst_offset_arr[1] + 2 * dst_offset_arr[0]));
std::copy(src_arr.begin() + src_offset_val,
src_arr.begin() + src_offset_val + slice_count,
ref.begin() + dst_offset_val);
ASSERT_EQ(out, ref);
}
TEST(DynamicSliceThunkTest, SlicedGemmArbitraryArgumentOrder) {
se::StreamExecutor* executor = GpuExecutor();
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
int64_t lhs_length = sizeof(float) * 2 * 4;
int64_t rhs_length = sizeof(float) * 3 * 1;
int64_t out_length = sizeof(float) * 1 * 1;
int64_t offset_length = sizeof(int64_t);
std::vector<std::unique_ptr<BufferAllocation>> fake_allocations(4);
fake_allocations.push_back(
std::make_unique<BufferAllocation>(0, rhs_length, 0));
BufferAllocation::Slice slice_lhs_fake(fake_allocations.back().get(), 0,
rhs_length);
fake_allocations.push_back(
std::make_unique<BufferAllocation>(1, rhs_length, 0));
BufferAllocation::Slice slice_rhs_fake(fake_allocations.back().get(), 0,
rhs_length);
fake_allocations.push_back(
std::make_unique<BufferAllocation>(2, out_length, 0));
BufferAllocation::Slice slice_out_fake(fake_allocations.back().get(), 0,
out_length);
fake_allocations.push_back(std::make_unique<BufferAllocation>(
3, 1024 * 1024, 0));
BufferAllocation::Slice slice_workspace_fake(fake_allocations.back().get(), 0,
1024 * 1024);
BufferAllocation alloc_lhs(1, lhs_length, 0);
BufferAllocation::Slice slice_lhs(&alloc_lhs, 0, lhs_length);
BufferAllocation alloc_rhs(3, rhs_length, 0);
BufferAllocation::Slice slice_rhs(&alloc_rhs, 0, rhs_length);
BufferAllocation alloc_out(2, out_length, 0);
BufferAllocation::Slice slice_out(&alloc_out, 0, out_length);
BufferAllocation alloc_workspace(0, 1024 * 1024, 0);
BufferAllocation::Slice slice_workspace(&alloc_workspace, 0, 1024 * 1024);
BufferAllocation alloc_lhs_offset_0(4, offset_length,
0);
BufferAllocation::Slice slice_lhs_offset_0(&alloc_lhs_offset_0, 0,
offset_length);
BufferAllocation alloc_lhs_offset_1(5, offset_length,
0);
BufferAllocation::Slice slice_lhs_offset_1(&alloc_lhs_offset_1, 0,
offset_length);
auto config =
GemmConfig::For(ShapeUtil::MakeShape(PrimitiveType::F32, {1, 3}), {}, {1},
ShapeUtil::MakeShape(PrimitiveType::F32, {3, 1}), {}, {0},
ShapeUtil::MakeShape(PrimitiveType::F32, {1, 1}), 1.0,
0.0, 0.0, PrecisionConfig::ALG_UNSET, std::nullopt,
se::blas::kDefaultComputePrecision, false, false);
ASSERT_TRUE(config.ok());
ThunkSequence seq;
seq.emplace_back(std::make_unique<GemmThunk>(
Thunk::ThunkInfo(), config.value(), slice_lhs_fake, slice_rhs_fake,
slice_out_fake, slice_workspace_fake, true));
std::vector<DynamicSliceThunk::Offset> lhs_offsets{slice_lhs_offset_0,
slice_lhs_offset_1};
DynamicSliceThunk thunk(
Thunk::ThunkInfo(), std::make_unique<ThunkSequence>(std::move(seq)),
{slice_lhs, slice_rhs, slice_out, slice_workspace},
std::move(fake_allocations),
{lhs_offsets, std::nullopt, std::nullopt, std::nullopt},
{ShapeUtil::MakeShape(PrimitiveType::F32, {2, 4}), std::nullopt,
std::nullopt, std::nullopt},
{ShapeUtil::MakeShape(PrimitiveType::F32, {1, 3}), std::nullopt,
std::nullopt, std::nullopt},
{sizeof(int64_t), std::nullopt, std::nullopt, std::nullopt});
se::DeviceMemory<float> lhs = executor->AllocateArray<float>(2 * 4);
std::vector<float> lhs_arr{1, 2, 3, 4, 5, 6, 7, 8};
TF_ASSERT_OK(stream->Memcpy(&lhs, lhs_arr.data(), lhs_length));
se::DeviceMemory<float> rhs = executor->AllocateArray<float>(3 * 1);
std::vector<float> rhs_arr(3, 1);
TF_ASSERT_OK(stream->Memcpy(&rhs, rhs_arr.data(), rhs_length));
se::DeviceMemory<float> out = executor->AllocateArray<float>(1 * 1);
TF_ASSERT_OK(stream->MemZero(&out, out_length));
se::DeviceMemory<float> workspace =
executor->AllocateArray<float>(1024 * 1024);
TF_ASSERT_OK(stream->MemZero(&workspace, 1024 * 1024));
se::DeviceMemory<int64_t> lhs_offset_0 = executor->AllocateArray<int64_t>(1);
se::DeviceMemory<int64_t> lhs_offset_1 = executor->AllocateArray<int64_t>(1);
std::vector<int64_t> lhs_offset_arr{0, 1};
TF_ASSERT_OK(
stream->Memcpy(&lhs_offset_0, &lhs_offset_arr[0], offset_length));
TF_ASSERT_OK(
stream->Memcpy(&lhs_offset_1, &lhs_offset_arr[1], offset_length));
ServiceExecutableRunOptions run_options;
se::StreamExecutorMemoryAllocator allocator(executor);
BufferAllocations allocations(
{workspace, lhs, out, rhs, lhs_offset_0, lhs_offset_1}, 0, &allocator);
Thunk::ExecuteParams params = Thunk::ExecuteParams::Create(
run_options, allocations, stream.get(), stream.get(), nullptr, nullptr);
Thunk::ExecutableSource source = {"", {}};
TF_ASSERT_OK(thunk.Initialize(
{executor, source, &allocations, stream.get(), stream.get()}));
TF_ASSERT_OK(thunk.ExecuteOnStream(params));
TF_ASSERT_OK(stream->BlockHostUntilDone());
std::vector<float> dst(1, 0);
TF_ASSERT_OK(stream->Memcpy(dst.data(), out, out_length));
ASSERT_EQ(dst, std::vector<float>({9}));
}
TEST(DynamicSliceThunkTest, SlicedGemmArbitraryNumberOfArguments) {
se::StreamExecutor* executor = GpuExecutor();
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
int64_t lhs_length = sizeof(float) * 2 * 4;
int64_t rhs_length = sizeof(float) * 3 * 1;
int64_t out_length = sizeof(float) * 1 * 1;
int64_t offset_length = sizeof(int64_t);
std::vector<std::unique_ptr<BufferAllocation>> fake_allocations(4);
fake_allocations.push_back(
std::make_unique<BufferAllocation>(0, rhs_length, 0));
BufferAllocation::Slice slice_lhs_fake(fake_allocations.back().get(), 0,
rhs_length);
fake_allocations.push_back(
std::make_unique<BufferAllocation>(1, rhs_length, 0));
BufferAllocation::Slice slice_rhs_fake(fake_allocations.back().get(), 0,
rhs_length);
fake_allocations.push_back(
std::make_unique<BufferAllocation>(2, out_length, 0));
BufferAllocation::Slice slice_out_fake(fake_allocations.back().get(), 0,
out_length);
fake_allocations.push_back(std::make_unique<BufferAllocation>(
3, 1024 * 1024, 0));
BufferAllocation::Slice slice_workspace_fake(fake_allocations.back().get(), 0,
1024 * 1024);
BufferAllocation alloc_lhs(7, lhs_length, 0);
BufferAllocation::Slice slice_lhs(&alloc_lhs, 0, lhs_length);
BufferAllocation alloc_rhs(3, rhs_length, 0);
BufferAllocation::Slice slice_rhs(&alloc_rhs, 0, rhs_length);
BufferAllocation alloc_out(2, out_length, 0);
BufferAllocation::Slice slice_out(&alloc_out, 0, out_length);
BufferAllocation alloc_workspace(0, 1024 * 1024, 0);
BufferAllocation::Slice slice_workspace(&alloc_workspace, 0, 1024 * 1024);
BufferAllocation alloc_lhs_offset_0(4, offset_length,
0);
BufferAllocation::Slice slice_lhs_offset_0(&alloc_lhs_offset_0, 0,
offset_length);
BufferAllocation alloc_lhs_offset_1(5, offset_length,
0);
BufferAllocation::Slice slice_lhs_offset_1(&alloc_lhs_offset_1, 0,
offset_length);
auto config =
GemmConfig::For(ShapeUtil::MakeShape(PrimitiveType::F32, {1, 3}), {}, {1},
ShapeUtil::MakeShape(PrimitiveType::F32, {3, 1}), {}, {0},
ShapeUtil::MakeShape(PrimitiveType::F32, {1, 1}), 1.0,
0.0, 0.0, PrecisionConfig::ALG_UNSET, std::nullopt,
se::blas::kDefaultComputePrecision, false, false);
ASSERT_TRUE(config.ok());
ThunkSequence seq;
seq.emplace_back(std::make_unique<GemmThunk>(
Thunk::ThunkInfo(), config.value(), slice_lhs_fake, slice_rhs_fake,
slice_out_fake, slice_workspace_fake, true));
std::vector<DynamicSliceThunk::Offset> lhs_offsets{slice_lhs_offset_0,
slice_lhs_offset_1};
DynamicSliceThunk thunk(
Thunk::ThunkInfo(), std::make_unique<ThunkSequence>(std::move(seq)),
{slice_lhs, slice_rhs, slice_out, slice_workspace},
std::move(fake_allocations),
{lhs_offsets, std::nullopt, std::nullopt, std::nullopt},
{ShapeUtil::MakeShape(PrimitiveType::F32, {2, 4}), std::nullopt,
std::nullopt, std::nullopt},
{ShapeUtil::MakeShape(PrimitiveType::F32, {1, 3}), std::nullopt,
std::nullopt, std::nullopt},
{sizeof(int64_t), std::nullopt, std::nullopt, std::nullopt});
se::DeviceMemory<float> lhs = executor->AllocateArray<float>(2 * 4);
std::vector<float> lhs_arr{1, 2, 3, 4, 5, 6, 7, 8};
TF_ASSERT_OK(stream->Memcpy(&lhs, lhs_arr.data(), lhs_length));
se::DeviceMemory<float> rhs = executor->AllocateArray<float>(3 * 1);
std::vector<float> rhs_arr(3, 1);
TF_ASSERT_OK(stream->Memcpy(&rhs, rhs_arr.data(), rhs_length));
se::DeviceMemory<float> out = executor->AllocateArray<float>(1 * 1);
TF_ASSERT_OK(stream->MemZero(&out, out_length));
se::DeviceMemory<float> workspace =
executor->AllocateArray<float>(1024 * 1024);
TF_ASSERT_OK(stream->MemZero(&workspace, 1024 * 1024));
se::DeviceMemory<int64_t> lhs_offset_0 = executor->AllocateArray<int64_t>(1);
se::DeviceMemory<int64_t> lhs_offset_1 = executor->AllocateArray<int64_t>(1);
std::vector<int64_t> lhs_offset_arr{0, 1};
TF_ASSERT_OK(
stream->Memcpy(&lhs_offset_0, &lhs_offset_arr[0], offset_length));
TF_ASSERT_OK(
stream->Memcpy(&lhs_offset_1, &lhs_offset_arr[1], offset_length));
ServiceExecutableRunOptions run_options;
se::StreamExecutorMemoryAllocator allocator(executor);
BufferAllocations allocations(
{workspace, se::DeviceMemoryBase(), out, rhs,
lhs_offset_0, lhs_offset_1, rhs, lhs},
0, &allocator);
Thunk::ExecuteParams params = Thunk::ExecuteParams::Create(
run_options, allocations, stream.get(), stream.get(), nullptr, nullptr);
Thunk::ExecutableSource source = {"", {}};
TF_ASSERT_OK(thunk.Initialize(
{executor, source, &allocations, stream.get(), stream.get()}));
TF_ASSERT_OK(thunk.ExecuteOnStream(params));
TF_ASSERT_OK(stream->BlockHostUntilDone());
std::vector<float> dst(1, 0);
TF_ASSERT_OK(stream->Memcpy(dst.data(), out, out_length));
ASSERT_EQ(dst, std::vector<float>({9}));
}
TEST(DynamicSliceThunkTest, SlicedTupledOperandGemm) {
se::StreamExecutor* executor = GpuExecutor();
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
int64_t lhs_length = sizeof(float) * 2 * 4;
int64_t rhs_length = sizeof(float) * 3 * 1;
int64_t out_length = sizeof(float) * 1 * 1;
int64_t offset_length = sizeof(int64_t);
std::vector<std::unique_ptr<BufferAllocation>> fake_allocations(4);
fake_allocations.push_back(
std::make_unique<BufferAllocation>(0, rhs_length, 0));
BufferAllocation::Slice slice_lhs_fake(fake_allocations.back().get(), 0,
rhs_length);
BufferAllocation alloc_lhs(0, 3 * lhs_length, 0);
BufferAllocation::Slice slice_lhs(&alloc_lhs, lhs_length, lhs_length);
fake_allocations.push_back(
std::make_unique<BufferAllocation>(1, rhs_length, 0));
BufferAllocation::Slice slice_rhs(fake_allocations.back().get(), 0,
rhs_length);
fake_allocations.push_back(
std::make_unique<BufferAllocation>(2, out_length, 0));
BufferAllocation::Slice slice_out(fake_allocations.back().get(), 0,
out_length);
fake_allocations.push_back(std::make_unique<BufferAllocation>(
3, 1024 * 1024, 0));
BufferAllocation::Slice slice_workspace(fake_allocations.back().get(), 0,
1024 * 1024);
BufferAllocation alloc_lhs_offset_0(4, offset_length,
0);
BufferAllocation::Slice slice_lhs_offset_0(&alloc_lhs_offset_0, 0,
offset_length);
BufferAllocation alloc_lhs_offset_1(5, offset_length,
0);
BufferAllocation::Slice slice_lhs_offset_1(&alloc_lhs_offset_1, 0,
offset_length);
auto config =
GemmConfig::For(ShapeUtil::MakeShape(PrimitiveType::F32, {1, 3}), {}, {1},
ShapeUtil::MakeShape(PrimitiveType::F32, {3, 1}), {}, {0},
ShapeUtil::MakeShape(PrimitiveType::F32, {1, 1}), 1.0,
0.0, 0.0, PrecisionConfig::ALG_UNSET, std::nullopt,
se::blas::kDefaultComputePrecision, false, false);
ASSERT_TRUE(config.ok());
ThunkSequence seq;
seq.emplace_back(std::make_unique<GemmThunk>(
Thunk::ThunkInfo(), config.value(), slice_lhs_fake, slice_rhs, slice_out,
slice_workspace, true));
std::vector<DynamicSliceThunk::Offset> lhs_offsets{slice_lhs_offset_0,
slice_lhs_offset_1};
DynamicSliceThunk thunk(
Thunk::ThunkInfo(), std::make_unique<ThunkSequence>(std::move(seq)),
{slice_lhs, slice_rhs, slice_out, slice_workspace},
std::move(fake_allocations),
{lhs_offsets, std::nullopt, std::nullopt, std::nullopt},
{ShapeUtil::MakeShape(PrimitiveType::F32, {2, 4}), std::nullopt,
std::nullopt, std::nullopt},
{ShapeUtil::MakeShape(PrimitiveType::F32, {1, 3}), std::nullopt,
std::nullopt, std::nullopt},
{sizeof(int64_t), std::nullopt, std::nullopt, std::nullopt});
se::DeviceMemory<float> lhs_whole_buffer =
executor->AllocateArray<float>(2 * 4 * 3);
TF_ASSERT_OK(stream->MemZero(&lhs_whole_buffer, 2 * 4 * 3));
std::vector<float> lhs_arr{1, 2, 3, 4, 5, 6, 7, 8};
se::DeviceMemoryBase lhs =
lhs_whole_buffer.GetByteSlice(lhs_length, lhs_length);
TF_ASSERT_OK(stream->Memcpy(&lhs, lhs_arr.data(), lhs_length));
se::DeviceMemory<float> rhs = executor->AllocateArray<float>(3 * 1);
std::vector<float> rhs_arr(3, 1);
TF_ASSERT_OK(stream->Memcpy(&rhs, rhs_arr.data(), rhs_length));
se::DeviceMemory<float> out = executor->AllocateArray<float>(1 * 1);
TF_ASSERT_OK(stream->MemZero(&out, out_length));
se::DeviceMemory<float> workspace =
executor->AllocateArray<float>(1024 * 1024);
TF_ASSERT_OK(stream->MemZero(&workspace, 1024 * 1024));
se::DeviceMemory<int64_t> lhs_offset_0 = executor->AllocateArray<int64_t>(1);
se::DeviceMemory<int64_t> lhs_offset_1 = executor->AllocateArray<int64_t>(1);
std::vector<int64_t> lhs_offset_arr{0, 1};
TF_ASSERT_OK(
stream->Memcpy(&lhs_offset_0, &lhs_offset_arr[0], offset_length));
TF_ASSERT_OK(
stream->Memcpy(&lhs_offset_1, &lhs_offset_arr[1], offset_length));
ServiceExecutableRunOptions run_options;
se::StreamExecutorMemoryAllocator allocator(executor);
BufferAllocations allocations(
{lhs_whole_buffer, rhs, out, workspace, lhs_offset_0, lhs_offset_1}, 0,
&allocator);
Thunk::ExecuteParams params = Thunk::ExecuteParams::Create(
run_options, allocations, stream.get(), stream.get(), nullptr, nullptr);
Thunk::ExecutableSource source = {"", {}};
TF_ASSERT_OK(thunk.Initialize(
{executor, source, &allocations, stream.get(), stream.get()}));
TF_ASSERT_OK(thunk.ExecuteOnStream(params));
TF_ASSERT_OK(stream->BlockHostUntilDone());
std::vector<float> dst(1, 0);
TF_ASSERT_OK(stream->Memcpy(dst.data(), out, out_length));
ASSERT_EQ(dst, std::vector<float>({9}));
}
TEST(DynamicSliceThunkTest, SlicedMemcpyOOB) {
se::StreamExecutor* executor = GpuExecutor();
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
int64_t src_count = 8 * 8 * 10 * 2;
int64_t dst_count = 2 * 2 * 2 * 2;
int64_t slice_count = 2 * 2;
int64_t src_length = sizeof(int32_t) * src_count;
int64_t dst_length = sizeof(int32_t) * dst_count;
int64_t offset_length = sizeof(int64_t);
int64_t slice_length = sizeof(int32_t) * slice_count;
std::vector<std::unique_ptr<BufferAllocation>> fake_allocations(2);
fake_allocations.push_back(std::make_unique<BufferAllocation>(
0, slice_length, 0));
BufferAllocation::Slice slice_src_fake(fake_allocations.back().get(), 0,
slice_length);
fake_allocations.push_back(std::make_unique<BufferAllocation>(
1, slice_length, 0));
BufferAllocation::Slice slice_dst_fake(fake_allocations.back().get(), 0,
slice_length);
BufferAllocation alloc_src(0, src_length, 0);
BufferAllocation::Slice slice_src(&alloc_src, 0, src_length);
BufferAllocation alloc_dst(1, dst_length, 0);
BufferAllocation::Slice slice_dst(&alloc_dst, 0, dst_length);
BufferAllocation alloc_src_offset_0(2, offset_length, 0);
BufferAllocation::Slice slice_src_offset_0(&alloc_src_offset_0, 0,
offset_length);
BufferAllocation alloc_src_offset_1(3, offset_length, 0);
BufferAllocation::Slice slice_src_offset_1(&alloc_src_offset_1, 0,
offset_length);
BufferAllocation alloc_src_offset_2(4, offset_length, 0);
BufferAllocation::Slice slice_src_offset_2(&alloc_src_offset_2, 0,
offset_length);
BufferAllocation alloc_src_offset_3(5, offset_length, 0);
BufferAllocation::Slice slice_src_offset_3(&alloc_src_offset_3, 0,
offset_length);
BufferAllocation alloc_dst_offset_0(6, offset_length, 0);
BufferAllocation::Slice slice_dst_offset_0(&alloc_dst_offset_0, 0,
offset_length);
BufferAllocation alloc_dst_offset_1(7, offset_length, 0);
BufferAllocation::Slice slice_dst_offset_1(&alloc_dst_offset_1, 0,
offset_length);
BufferAllocation alloc_dst_offset_2(8, offset_length, 0);
BufferAllocation::Slice slice_dst_offset_2(&alloc_dst_offset_2, 0,
offset_length);
BufferAllocation alloc_dst_offset_3(9, offset_length, 0);
BufferAllocation::Slice slice_dst_offset_3(&alloc_dst_offset_3, 0,
offset_length);
auto registration = xla::ffi::FindHandler("__xla_test$$memcpy", PLATFORM);
ASSERT_TRUE(registration.ok());
std::vector<std::optional<CustomCallThunk::Slice>> operands{
CustomCallThunk::Slice{slice_src_fake,
ShapeUtil::MakeShape(PrimitiveType::S32, {2, 2})}};
std::vector<std::optional<CustomCallThunk::Slice>> results{
CustomCallThunk::Slice{slice_dst_fake,
ShapeUtil::MakeShape(PrimitiveType::S32, {2, 2})}};
ThunkSequence seq;
TF_ASSERT_OK_AND_ASSIGN(
seq.emplace_back(),
CustomCallThunk::Create(Thunk::ThunkInfo(), registration->bundle,
operands, results,
CustomCallThunk::AttributesMap(),
nullptr));
std::vector<DynamicSliceThunk::Offset> slice_src_offsets{
slice_src_offset_0, slice_src_offset_1, slice_src_offset_2,
slice_src_offset_3};
std::vector<DynamicSliceThunk::Offset> slice_dst_offsets{
slice_dst_offset_0, slice_dst_offset_1, slice_dst_offset_2,
slice_dst_offset_3};
DynamicSliceThunk thunk(
Thunk::ThunkInfo(), std::make_unique<ThunkSequence>(std::move(seq)),
{slice_src, slice_dst}, std::move(fake_allocations),
{slice_src_offsets, slice_dst_offsets},
{ShapeUtil::MakeShape(PrimitiveType::S32, {8, 8, 10, 2}),
ShapeUtil::MakeShape(PrimitiveType::S32, {2, 2, 2, 2})},
{ShapeUtil::MakeShape(PrimitiveType::S32, {1, 1, 2, 2}),
ShapeUtil::MakeShape(PrimitiveType::S32, {1, 1, 2, 2})},
{sizeof(int64_t), sizeof(int64_t)});
se::DeviceMemory<int32_t> src = executor->AllocateArray<int32_t>(src_count);
std::vector<int32_t> src_arr(src_count, 0);
for (unsigned i = 0; i < src_count; ++i) src_arr[i] = i;
TF_ASSERT_OK(stream->Memcpy(&src, src_arr.data(), src_length));
se::DeviceMemory<int32_t> dst = executor->AllocateArray<int32_t>(dst_count);
TF_ASSERT_OK(stream->MemZero(&dst, dst_length));
se::DeviceMemory<int64_t> src_offset_0 = executor->AllocateArray<int64_t>(1);
se::DeviceMemory<int64_t> src_offset_1 = executor->AllocateArray<int64_t>(1);
se::DeviceMemory<int64_t> src_offset_2 = executor->AllocateArray<int64_t>(1);
se::DeviceMemory<int64_t> src_offset_3 = executor->AllocateArray<int64_t>(1);
std::vector<int64_t> src_ref_offset_arr{3, 5, 2, 0};
std::vector<int64_t> src_offset_arr{3, 5, 2, -3};
TF_ASSERT_OK(
stream->Memcpy(&src_offset_0, &src_offset_arr[0], offset_length));
TF_ASSERT_OK(
stream->Memcpy(&src_offset_1, &src_offset_arr[1], offset_length));
TF_ASSERT_OK(
stream->Memcpy(&src_offset_2, &src_offset_arr[2], offset_length));
TF_ASSERT_OK(
stream->Memcpy(&src_offset_3, &src_offset_arr[3], offset_length));
se::DeviceMemory<int64_t> dst_offset_0 = executor->AllocateArray<int64_t>(1);
se::DeviceMemory<int64_t> dst_offset_1 = executor->AllocateArray<int64_t>(1);
se::DeviceMemory<int64_t> dst_offset_2 = executor->AllocateArray<int64_t>(1);
se::DeviceMemory<int64_t> dst_offset_3 = executor->AllocateArray<int64_t>(1);
std::vector<int64_t> dst_ref_offset_arr{1, 1, 0, 0};
std::vector<int64_t> dst_offset_arr{3, 2, 5, -4};
TF_ASSERT_OK(
stream->Memcpy(&dst_offset_0, &dst_offset_arr[0], offset_length));
TF_ASSERT_OK(
stream->Memcpy(&dst_offset_1, &dst_offset_arr[1], offset_length));
TF_ASSERT_OK(
stream->Memcpy(&dst_offset_2, &dst_offset_arr[2], offset_length));
TF_ASSERT_OK(
stream->Memcpy(&dst_offset_3, &dst_offset_arr[3], offset_length));
ServiceExecutableRunOptions run_options;
se::StreamExecutorMemoryAllocator allocator(executor);
BufferAllocations allocations(
{src, dst, src_offset_0, src_offset_1, src_offset_2, src_offset_3,
dst_offset_0, dst_offset_1, dst_offset_2, dst_offset_3},
0, &allocator);
Thunk::ExecuteParams params = Thunk::ExecuteParams::Create(
run_options, allocations, stream.get(), stream.get(), nullptr, nullptr);
Thunk::ExecutableSource source = {"", {}};
TF_ASSERT_OK(thunk.Initialize(
{executor, source, &allocations, stream.get(), stream.get()}));
TF_ASSERT_OK(thunk.ExecuteOnStream(params));
TF_ASSERT_OK(stream->BlockHostUntilDone());
std::vector<int32_t> out(dst_count, 0);
TF_ASSERT_OK(stream->Memcpy(out.data(), dst, dst_length));
std::vector<int32_t> ref(dst_count, 0);
int64_t src_offset_val =
src_ref_offset_arr[3] +
2 * (src_ref_offset_arr[2] +
10 * (src_ref_offset_arr[1] + 8 * src_ref_offset_arr[0]));
int64_t dst_offset_val =
dst_ref_offset_arr[3] +
2 * (dst_ref_offset_arr[2] +
2 * (dst_ref_offset_arr[1] + 2 * dst_ref_offset_arr[0]));
std::copy(src_arr.begin() + src_offset_val,
src_arr.begin() + src_offset_val + slice_count,
ref.begin() + dst_offset_val);
ASSERT_EQ(out, ref);
}
TEST(DynamicSliceThunkTest, SlicedOperandsSameBufferGemm) {
se::StreamExecutor* executor = GpuExecutor();
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
int64_t lhs_length = sizeof(float) * 2 * 4;
int64_t rhs_length = sizeof(float) * 3 * 1;
int64_t out_length = sizeof(float) * 1 * 1;
int64_t offset_length = sizeof(int64_t);
std::vector<std::unique_ptr<BufferAllocation>> fake_allocations(4);
fake_allocations.push_back(
std::make_unique<BufferAllocation>(0, rhs_length, 0));
BufferAllocation::Slice slice_lhs_fake(fake_allocations.back().get(), 0,
rhs_length);
fake_allocations.push_back(
std::make_unique<BufferAllocation>(1, rhs_length, 0));
BufferAllocation::Slice slice_rhs_fake(fake_allocations.back().get(), 0,
rhs_length);
fake_allocations.push_back(
std::make_unique<BufferAllocation>(2, out_length, 0));
BufferAllocation::Slice slice_out_fake(fake_allocations.back().get(), 0,
out_length);
fake_allocations.push_back(std::make_unique<BufferAllocation>(
3, 1024 * 1024, 0));
BufferAllocation::Slice slice_workspace_fake(fake_allocations.back().get(), 0,
1024 * 1024);
BufferAllocation alloc(0, lhs_length + rhs_length + out_length,
0);
BufferAllocation::Slice slice_lhs(&alloc, 0, lhs_length);
BufferAllocation::Slice slice_rhs(&alloc, lhs_length, rhs_length);
BufferAllocation::Slice slice_out(&alloc, lhs_length + rhs_length,
out_length);
BufferAllocation alloc_workspace(1, 1024 * 1024, 0);
BufferAllocation::Slice slice_workspace(&alloc_workspace, 0, 1024 * 1024);
BufferAllocation alloc_lhs_offset_0(2, offset_length,
0);
BufferAllocation::Slice slice_lhs_offset_0(&alloc_lhs_offset_0, 0,
offset_length);
BufferAllocation alloc_lhs_offset_1(3, offset_length,
0);
BufferAllocation::Slice slice_lhs_offset_1(&alloc_lhs_offset_1, 0,
offset_length);
auto config =
GemmConfig::For(ShapeUtil::MakeShape(PrimitiveType::F32, {1, 3}), {}, {1},
ShapeUtil::MakeShape(PrimitiveType::F32, {3, 1}), {}, {0},
ShapeUtil::MakeShape(PrimitiveType::F32, {1, 1}), 1.0,
0.0, 0.0, PrecisionConfig::ALG_UNSET, std::nullopt,
se::blas::kDefaultComputePrecision, false, false);
ASSERT_TRUE(config.ok());
ThunkSequence seq;
seq.emplace_back(std::make_unique<GemmThunk>(
Thunk::ThunkInfo(), config.value(), slice_lhs_fake, slice_rhs_fake,
slice_out_fake, slice_workspace_fake, true));
std::vector<DynamicSliceThunk::Offset> lhs_offsets{slice_lhs_offset_0,
slice_lhs_offset_1};
DynamicSliceThunk thunk(
Thunk::ThunkInfo(), std::make_unique<ThunkSequence>(std::move(seq)),
{slice_lhs, slice_rhs, slice_out, slice_workspace},
std::move(fake_allocations),
{lhs_offsets, std::nullopt, std::nullopt, std::nullopt},
{ShapeUtil::MakeShape(PrimitiveType::F32, {2, 4}), std::nullopt,
std::nullopt, std::nullopt},
{ShapeUtil::MakeShape(PrimitiveType::F32, {1, 3}), std::nullopt,
std::nullopt, std::nullopt},
{sizeof(int64_t), std::nullopt, std::nullopt, std::nullopt});
se::DeviceMemory<float> buffer =
executor->AllocateArray<float>(lhs_length + rhs_length + out_length);
TF_ASSERT_OK(stream->MemZero(&buffer, lhs_length + rhs_length + out_length));
se::DeviceMemoryBase lhs = buffer.GetByteSlice(0, lhs_length);
std::vector<float> lhs_arr{1, 2, 3, 4, 5, 6, 7, 8};
TF_ASSERT_OK(stream->Memcpy(&lhs, lhs_arr.data(), lhs_length));
se::DeviceMemoryBase rhs = buffer.GetByteSlice(lhs_length, rhs_length);
std::vector<float> rhs_arr(3, 1);
TF_ASSERT_OK(stream->Memcpy(&rhs, rhs_arr.data(), rhs_length));
se::DeviceMemoryBase out =
buffer.GetByteSlice(lhs_length + rhs_length, out_length);
se::DeviceMemory<float> workspace =
executor->AllocateArray<float>(1024 * 1024);
TF_ASSERT_OK(stream->MemZero(&workspace, 1024 * 1024));
se::DeviceMemory<int64_t> lhs_offset_0 = executor->AllocateArray<int64_t>(1);
se::DeviceMemory<int64_t> lhs_offset_1 = executor->AllocateArray<int64_t>(1);
std::vector<int64_t> lhs_offset_arr{0, 1};
TF_ASSERT_OK(
stream->Memcpy(&lhs_offset_0, &lhs_offset_arr[0], offset_length));
TF_ASSERT_OK(
stream->Memcpy(&lhs_offset_1, &lhs_offset_arr[1], offset_length));
ServiceExecutableRunOptions run_options;
se::StreamExecutorMemoryAllocator allocator(executor);
BufferAllocations allocations({buffer, workspace, lhs_offset_0, lhs_offset_1},
0, &allocator);
Thunk::ExecuteParams params = Thunk::ExecuteParams::Create(
run_options, allocations, stream.get(), stream.get(), nullptr, nullptr);
Thunk::ExecutableSource source = {"", {}};
TF_ASSERT_OK(thunk.Initialize(
{executor, source, &allocations, stream.get(), stream.get()}));
TF_ASSERT_OK(thunk.ExecuteOnStream(params));
TF_ASSERT_OK(stream->BlockHostUntilDone());
std::vector<float> dst(1, 0);
TF_ASSERT_OK(stream->Memcpy(dst.data(), out, out_length));
ASSERT_EQ(dst, std::vector<float>({9}));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/runtime/dynamic_slice_thunk.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/runtime/dynamic_slice_thunk_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
70211bbd-e67e-484e-a711-d1383b5f6173 | cpp | google/quiche | blind_sign_auth | quiche/blind_sign_auth/blind_sign_auth.cc | quiche/blind_sign_auth/blind_sign_auth_test.cc | #include "quiche/blind_sign_auth/blind_sign_auth.h"
#include <cstddef>
#include <cstdint>
#include <cstring>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/functional/bind_front.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/escaping.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "absl/types/span.h"
#include "anonymous_tokens/cpp/crypto/crypto_utils.h"
#include "anonymous_tokens/cpp/privacy_pass/rsa_bssa_public_metadata_client.h"
#include "anonymous_tokens/cpp/privacy_pass/token_encodings.h"
#include "anonymous_tokens/cpp/shared/proto_utils.h"
#include "quiche/blind_sign_auth/blind_sign_auth_interface.h"
#include "quiche/blind_sign_auth/blind_sign_auth_protos.h"
#include "quiche/blind_sign_auth/blind_sign_message_interface.h"
#include "quiche/blind_sign_auth/blind_sign_message_response.h"
#include "quiche/common/platform/api/quiche_logging.h"
#include "quiche/common/quiche_random.h"
namespace quiche {
namespace {
template <typename T>
std::string OmitDefault(T value) {
return value == 0 ? "" : absl::StrCat(value);
}
constexpr absl::string_view kIssuerHostname =
"https:
}
void BlindSignAuth::GetTokens(std::optional<std::string> oauth_token,
int num_tokens, ProxyLayer proxy_layer,
BlindSignAuthServiceType service_type,
SignedTokenCallback callback) {
privacy::ppn::GetInitialDataRequest request;
request.set_use_attestation(false);
request.set_service_type(BlindSignAuthServiceTypeToString(service_type));
request.set_location_granularity(
privacy::ppn::GetInitialDataRequest_LocationGranularity_CITY_GEOS);
request.set_validation_version(2);
request.set_proxy_layer(QuicheProxyLayerToPpnProxyLayer(proxy_layer));
std::string body = request.SerializeAsString();
BlindSignMessageCallback initial_data_callback = absl::bind_front(
&BlindSignAuth::GetInitialDataCallback, this, oauth_token, num_tokens,
proxy_layer, service_type, std::move(callback));
fetcher_->DoRequest(BlindSignMessageRequestType::kGetInitialData, oauth_token,
body, std::move(initial_data_callback));
}
void BlindSignAuth::GetInitialDataCallback(
std::optional<std::string> oauth_token, int num_tokens,
ProxyLayer proxy_layer, BlindSignAuthServiceType service_type,
SignedTokenCallback callback,
absl::StatusOr<BlindSignMessageResponse> response) {
if (!response.ok()) {
QUICHE_LOG(WARNING) << "GetInitialDataRequest failed: "
<< response.status();
std::move(callback)(absl::InvalidArgumentError(
"GetInitialDataRequest failed: invalid response"));
return;
}
absl::StatusCode code = response->status_code();
if (code != absl::StatusCode::kOk) {
std::string message =
absl::StrCat("GetInitialDataRequest failed with code: ", code);
QUICHE_LOG(WARNING) << message;
std::move(callback)(
absl::InvalidArgumentError("GetInitialDataRequest failed"));
return;
}
privacy::ppn::GetInitialDataResponse initial_data_response;
if (!initial_data_response.ParseFromString(response->body())) {
QUICHE_LOG(WARNING) << "Failed to parse GetInitialDataResponse";
std::move(callback)(
absl::InternalError("Failed to parse GetInitialDataResponse"));
return;
}
bool use_privacy_pass_client =
initial_data_response.has_privacy_pass_data() &&
auth_options_.enable_privacy_pass();
if (use_privacy_pass_client) {
QUICHE_DVLOG(1) << "Using Privacy Pass client";
GeneratePrivacyPassTokens(initial_data_response, std::move(oauth_token),
num_tokens, proxy_layer, service_type,
std::move(callback));
} else {
QUICHE_LOG(ERROR) << "Non-Privacy Pass tokens are no longer supported";
std::move(callback)(absl::UnimplementedError(
"Non-Privacy Pass tokens are no longer supported"));
}
}
void BlindSignAuth::GeneratePrivacyPassTokens(
privacy::ppn::GetInitialDataResponse initial_data_response,
std::optional<std::string> oauth_token, int num_tokens,
ProxyLayer proxy_layer, BlindSignAuthServiceType service_type,
SignedTokenCallback callback) {
anonymous_tokens::RSAPublicKey public_key_proto;
if (!public_key_proto.ParseFromString(
initial_data_response.at_public_metadata_public_key()
.serialized_public_key())) {
std::move(callback)(
absl::InvalidArgumentError("Failed to parse Privacy Pass public key"));
return;
}
absl::StatusOr<bssl::UniquePtr<RSA>> bssl_rsa_key =
anonymous_tokens::CreatePublicKeyRSA(
public_key_proto.n(), public_key_proto.e());
if (!bssl_rsa_key.ok()) {
QUICHE_LOG(ERROR) << "Failed to create RSA public key: "
<< bssl_rsa_key.status();
std::move(callback)(absl::InternalError("Failed to create RSA public key"));
return;
}
absl::StatusOr<anonymous_tokens::Extensions> extensions =
anonymous_tokens::DecodeExtensions(
initial_data_response.privacy_pass_data()
.public_metadata_extensions());
if (!extensions.ok()) {
QUICHE_LOG(WARNING) << "Failed to decode extensions: "
<< extensions.status();
std::move(callback)(
absl::InvalidArgumentError("Failed to decode extensions"));
return;
}
std::vector<uint16_t> kExpectedExtensionTypes = {
0x0001, 0x0002,
0xF001, 0xF002, 0xF003};
absl::Status result =
anonymous_tokens::ValidateExtensionsOrderAndValues(
*extensions, absl::MakeSpan(kExpectedExtensionTypes), absl::Now());
if (!result.ok()) {
QUICHE_LOG(WARNING) << "Failed to validate extensions: " << result;
std::move(callback)(
absl::InvalidArgumentError("Failed to validate extensions"));
return;
}
absl::StatusOr<anonymous_tokens::ExpirationTimestamp>
expiration_timestamp = anonymous_tokens::
ExpirationTimestamp::FromExtension(extensions->extensions.at(0));
if (!expiration_timestamp.ok()) {
QUICHE_LOG(WARNING) << "Failed to parse expiration timestamp: "
<< expiration_timestamp.status();
std::move(callback)(
absl::InvalidArgumentError("Failed to parse expiration timestamp"));
return;
}
absl::Time public_metadata_expiry_time =
absl::FromUnixSeconds(expiration_timestamp->timestamp);
absl::StatusOr<anonymous_tokens::GeoHint> geo_hint =
anonymous_tokens::GeoHint::FromExtension(
extensions->extensions.at(1));
QUICHE_CHECK(geo_hint.ok());
anonymous_tokens::TokenChallenge challenge;
challenge.issuer_name = kIssuerHostname;
absl::StatusOr<std::string> token_challenge =
anonymous_tokens::MarshalTokenChallenge(challenge);
if (!token_challenge.ok()) {
QUICHE_LOG(WARNING) << "Failed to marshal token challenge: "
<< token_challenge.status();
std::move(callback)(
absl::InvalidArgumentError("Failed to marshal token challenge"));
return;
}
QuicheRandom* random = QuicheRandom::GetInstance();
std::vector<anonymous_tokens::ExtendedTokenRequest>
extended_token_requests;
std::vector<std::unique_ptr<anonymous_tokens::
PrivacyPassRsaBssaPublicMetadataClient>>
privacy_pass_clients;
std::vector<std::string> privacy_pass_blinded_tokens;
for (int i = 0; i < num_tokens; i++) {
auto client = anonymous_tokens::
PrivacyPassRsaBssaPublicMetadataClient::Create(*bssl_rsa_key.value());
if (!client.ok()) {
QUICHE_LOG(WARNING) << "Failed to create Privacy Pass client: "
<< client.status();
std::move(callback)(
absl::InternalError("Failed to create Privacy Pass client"));
return;
}
std::string nonce_rand(32, '\0');
random->RandBytes(nonce_rand.data(), nonce_rand.size());
absl::StatusOr<anonymous_tokens::ExtendedTokenRequest>
extended_token_request = client.value()->CreateTokenRequest(
*token_challenge, nonce_rand,
initial_data_response.privacy_pass_data().token_key_id(),
*extensions);
if (!extended_token_request.ok()) {
QUICHE_LOG(WARNING) << "Failed to create ExtendedTokenRequest: "
<< extended_token_request.status();
std::move(callback)(
absl::InternalError("Failed to create ExtendedTokenRequest"));
return;
}
privacy_pass_clients.push_back(*std::move(client));
extended_token_requests.push_back(*extended_token_request);
privacy_pass_blinded_tokens.push_back(absl::Base64Escape(
extended_token_request->request.blinded_token_request));
}
privacy::ppn::AuthAndSignRequest sign_request;
sign_request.set_service_type(BlindSignAuthServiceTypeToString(service_type));
sign_request.set_key_type(privacy::ppn::AT_PUBLIC_METADATA_KEY_TYPE);
sign_request.set_key_version(
initial_data_response.at_public_metadata_public_key().key_version());
sign_request.mutable_blinded_token()->Assign(
privacy_pass_blinded_tokens.begin(), privacy_pass_blinded_tokens.end());
sign_request.mutable_public_metadata_extensions()->assign(
initial_data_response.privacy_pass_data().public_metadata_extensions());
sign_request.set_do_not_use_rsa_public_exponent(true);
sign_request.set_proxy_layer(QuicheProxyLayerToPpnProxyLayer(proxy_layer));
absl::StatusOr<anonymous_tokens::AnonymousTokensUseCase>
use_case = anonymous_tokens::ParseUseCase(
initial_data_response.at_public_metadata_public_key().use_case());
if (!use_case.ok()) {
QUICHE_LOG(WARNING) << "Failed to parse use case: " << use_case.status();
std::move(callback)(absl::InvalidArgumentError("Failed to parse use case"));
return;
}
BlindSignMessageCallback auth_and_sign_callback =
absl::bind_front(&BlindSignAuth::PrivacyPassAuthAndSignCallback, this,
std::move(initial_data_response.privacy_pass_data()
.public_metadata_extensions()),
public_metadata_expiry_time, *geo_hint, *use_case,
std::move(privacy_pass_clients), std::move(callback));
fetcher_->DoRequest(BlindSignMessageRequestType::kAuthAndSign, oauth_token,
sign_request.SerializeAsString(),
std::move(auth_and_sign_callback));
}
void BlindSignAuth::PrivacyPassAuthAndSignCallback(
std::string encoded_extensions, absl::Time public_key_expiry_time,
anonymous_tokens::GeoHint geo_hint,
anonymous_tokens::AnonymousTokensUseCase use_case,
std::vector<std::unique_ptr<anonymous_tokens::
PrivacyPassRsaBssaPublicMetadataClient>>
privacy_pass_clients,
SignedTokenCallback callback,
absl::StatusOr<BlindSignMessageResponse> response) {
if (!response.ok()) {
QUICHE_LOG(WARNING) << "AuthAndSign failed: " << response.status();
std::move(callback)(
absl::InvalidArgumentError("AuthAndSign failed: invalid response"));
return;
}
absl::StatusCode code = response->status_code();
if (code != absl::StatusCode::kOk) {
std::string message = absl::StrCat("AuthAndSign failed with code: ", code);
QUICHE_LOG(WARNING) << message;
std::move(callback)(absl::InvalidArgumentError("AuthAndSign failed"));
return;
}
privacy::ppn::AuthAndSignResponse sign_response;
if (!sign_response.ParseFromString(response->body())) {
QUICHE_LOG(WARNING) << "Failed to parse AuthAndSignResponse";
std::move(callback)(
absl::InternalError("Failed to parse AuthAndSignResponse"));
return;
}
if (static_cast<size_t>(sign_response.blinded_token_signature_size()) !=
privacy_pass_clients.size()) {
QUICHE_LOG(WARNING) << "Number of signatures does not equal number of "
"Privacy Pass tokens sent";
std::move(callback)(
absl::InternalError("Number of signatures does not equal number of "
"Privacy Pass tokens sent"));
return;
}
std::vector<BlindSignToken> tokens_vec;
for (int i = 0; i < sign_response.blinded_token_signature_size(); i++) {
std::string unescaped_blinded_sig;
if (!absl::Base64Unescape(sign_response.blinded_token_signature()[i],
&unescaped_blinded_sig)) {
QUICHE_LOG(WARNING) << "Failed to unescape blinded signature";
std::move(callback)(
absl::InternalError("Failed to unescape blinded signature"));
return;
}
absl::StatusOr<anonymous_tokens::Token> token =
privacy_pass_clients[i]->FinalizeToken(unescaped_blinded_sig);
if (!token.ok()) {
QUICHE_LOG(WARNING) << "Failed to finalize token: " << token.status();
std::move(callback)(absl::InternalError("Failed to finalize token"));
return;
}
absl::StatusOr<std::string> marshaled_token =
anonymous_tokens::MarshalToken(*token);
if (!marshaled_token.ok()) {
QUICHE_LOG(WARNING) << "Failed to marshal token: "
<< marshaled_token.status();
std::move(callback)(absl::InternalError("Failed to marshal token"));
return;
}
privacy::ppn::PrivacyPassTokenData privacy_pass_token_data;
privacy_pass_token_data.mutable_token()->assign(
ConvertBase64ToWebSafeBase64(absl::Base64Escape(*marshaled_token)));
privacy_pass_token_data.mutable_encoded_extensions()->assign(
ConvertBase64ToWebSafeBase64(absl::Base64Escape(encoded_extensions)));
privacy_pass_token_data.set_use_case_override(use_case);
tokens_vec.push_back(
BlindSignToken{privacy_pass_token_data.SerializeAsString(),
public_key_expiry_time, geo_hint});
}
std::move(callback)(absl::Span<BlindSignToken>(tokens_vec));
}
privacy::ppn::ProxyLayer BlindSignAuth::QuicheProxyLayerToPpnProxyLayer(
quiche::ProxyLayer proxy_layer) {
switch (proxy_layer) {
case ProxyLayer::kProxyA: {
return privacy::ppn::ProxyLayer::PROXY_A;
}
case ProxyLayer::kProxyB: {
return privacy::ppn::ProxyLayer::PROXY_B;
}
}
}
std::string BlindSignAuth::ConvertBase64ToWebSafeBase64(
std::string base64_string) {
absl::c_replace(base64_string, '+', '-');
absl::c_replace(base64_string, '/', '_');
return base64_string;
}
std::string BlindSignAuthServiceTypeToString(
quiche::BlindSignAuthServiceType service_type) {
switch (service_type) {
case BlindSignAuthServiceType::kChromeIpBlinding: {
return "chromeipblinding";
}
case BlindSignAuthServiceType::kCronetIpBlinding: {
return "cronetipblinding";
}
case BlindSignAuthServiceType::kWebviewIpBlinding: {
return "chromeipblinding";
}
}
}
} | #include "quiche/blind_sign_auth/blind_sign_auth.h"
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/escaping.h"
#include "absl/strings/string_view.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "absl/types/span.h"
#include "anonymous_tokens/cpp/crypto/crypto_utils.h"
#include "anonymous_tokens/cpp/privacy_pass/token_encodings.h"
#include "anonymous_tokens/cpp/testing/utils.h"
#include "openssl/base.h"
#include "openssl/digest.h"
#include "quiche/blind_sign_auth/blind_sign_auth_interface.h"
#include "quiche/blind_sign_auth/blind_sign_auth_protos.h"
#include "quiche/blind_sign_auth/blind_sign_message_interface.h"
#include "quiche/blind_sign_auth/blind_sign_message_response.h"
#include "quiche/blind_sign_auth/test_tools/mock_blind_sign_message_interface.h"
#include "quiche/common/platform/api/quiche_mutex.h"
#include "quiche/common/platform/api/quiche_test.h"
#include "quiche/common/test_tools/quiche_test_utils.h"
namespace quiche {
namespace test {
namespace {
using ::testing::_;
using ::testing::Eq;
using ::testing::InSequence;
using ::testing::Invoke;
using ::testing::StartsWith;
using ::testing::Unused;
class BlindSignAuthTest : public QuicheTest {
protected:
void SetUp() override {
auto [test_rsa_public_key, test_rsa_private_key] =
anonymous_tokens::GetStrongTestRsaKeyPair2048();
ANON_TOKENS_ASSERT_OK_AND_ASSIGN(
rsa_public_key_,
anonymous_tokens::CreatePublicKeyRSA(
test_rsa_public_key.n, test_rsa_public_key.e));
ANON_TOKENS_ASSERT_OK_AND_ASSIGN(
rsa_private_key_,
anonymous_tokens::CreatePrivateKeyRSA(
test_rsa_private_key.n, test_rsa_private_key.e,
test_rsa_private_key.d, test_rsa_private_key.p,
test_rsa_private_key.q, test_rsa_private_key.dp,
test_rsa_private_key.dq, test_rsa_private_key.crt));
anonymous_tokens::RSAPublicKey public_key;
public_key.set_n(test_rsa_public_key.n);
public_key.set_e(test_rsa_public_key.e);
public_key_proto_.set_key_version(1);
public_key_proto_.set_use_case("TEST_USE_CASE");
public_key_proto_.set_serialized_public_key(public_key.SerializeAsString());
public_key_proto_.set_sig_hash_type(
anonymous_tokens::AT_HASH_TYPE_SHA384);
public_key_proto_.set_mask_gen_function(
anonymous_tokens::AT_MGF_SHA384);
public_key_proto_.set_salt_length(48);
public_key_proto_.set_key_size(256);
public_key_proto_.set_message_mask_type(
anonymous_tokens::AT_MESSAGE_MASK_NO_MASK);
public_key_proto_.set_message_mask_size(0);
expected_get_initial_data_request_.set_use_attestation(false);
expected_get_initial_data_request_.set_service_type("chromeipblinding");
expected_get_initial_data_request_.set_location_granularity(
privacy::ppn::GetInitialDataRequest_LocationGranularity_CITY_GEOS);
expected_get_initial_data_request_.set_validation_version(2);
expected_get_initial_data_request_.set_proxy_layer(privacy::ppn::PROXY_A);
privacy::ppn::GetInitialDataResponse fake_get_initial_data_response;
*fake_get_initial_data_response.mutable_at_public_metadata_public_key() =
public_key_proto_;
fake_get_initial_data_response_ = fake_get_initial_data_response;
privacy::ppn::GetInitialDataResponse::PrivacyPassData privacy_pass_data;
ANON_TOKENS_ASSERT_OK_AND_ASSIGN(
std::string public_key_der,
anonymous_tokens::RsaSsaPssPublicKeyToDerEncoding(
rsa_public_key_.get()));
const EVP_MD* sha256 = EVP_sha256();
ANON_TOKENS_ASSERT_OK_AND_ASSIGN(
token_key_id_, anonymous_tokens::ComputeHash(
public_key_der, *sha256));
anonymous_tokens::ExpirationTimestamp
expiration_timestamp;
int64_t one_hour_away = absl::ToUnixSeconds(absl::Now() + absl::Hours(1));
expiration_timestamp.timestamp = one_hour_away - (one_hour_away % 900);
expiration_timestamp.timestamp_precision = 900;
absl::StatusOr<anonymous_tokens::Extension>
expiration_extension = expiration_timestamp.AsExtension();
QUICHE_EXPECT_OK(expiration_extension);
extensions_.extensions.push_back(*expiration_extension);
anonymous_tokens::GeoHint geo_hint;
geo_hint.geo_hint = "US,US-AL,ALABASTER";
absl::StatusOr<anonymous_tokens::Extension>
geo_hint_extension = geo_hint.AsExtension();
QUICHE_EXPECT_OK(geo_hint_extension);
extensions_.extensions.push_back(*geo_hint_extension);
anonymous_tokens::ServiceType service_type;
service_type.service_type_id =
anonymous_tokens::ServiceType::kChromeIpBlinding;
absl::StatusOr<anonymous_tokens::Extension>
service_type_extension = service_type.AsExtension();
QUICHE_EXPECT_OK(service_type_extension);
extensions_.extensions.push_back(*service_type_extension);
anonymous_tokens::DebugMode debug_mode;
debug_mode.mode = anonymous_tokens::DebugMode::kDebug;
absl::StatusOr<anonymous_tokens::Extension>
debug_mode_extension = debug_mode.AsExtension();
QUICHE_EXPECT_OK(debug_mode_extension);
extensions_.extensions.push_back(*debug_mode_extension);
anonymous_tokens::ProxyLayer proxy_layer;
proxy_layer.layer =
anonymous_tokens::ProxyLayer::kProxyA;
absl::StatusOr<anonymous_tokens::Extension>
proxy_layer_extension = proxy_layer.AsExtension();
QUICHE_EXPECT_OK(proxy_layer_extension);
extensions_.extensions.push_back(*proxy_layer_extension);
absl::StatusOr<std::string> serialized_extensions =
anonymous_tokens::EncodeExtensions(extensions_);
QUICHE_EXPECT_OK(serialized_extensions);
privacy_pass_data.set_token_key_id(token_key_id_);
privacy_pass_data.set_public_metadata_extensions(*serialized_extensions);
*fake_get_initial_data_response.mutable_public_metadata_info() =
public_metadata_info_;
*fake_get_initial_data_response.mutable_privacy_pass_data() =
privacy_pass_data;
fake_get_initial_data_response_ = fake_get_initial_data_response;
privacy::ppn::BlindSignAuthOptions options;
options.set_enable_privacy_pass(true);
blind_sign_auth_ =
std::make_unique<BlindSignAuth>(&mock_message_interface_, options);
}
void TearDown() override { blind_sign_auth_.reset(nullptr); }
public:
void CreateSignResponse(const std::string& body, bool use_privacy_pass) {
privacy::ppn::AuthAndSignRequest request;
ASSERT_TRUE(request.ParseFromString(body));
EXPECT_EQ(request.service_type(), "chromeipblinding");
EXPECT_EQ(request.key_type(), privacy::ppn::AT_PUBLIC_METADATA_KEY_TYPE);
EXPECT_EQ(request.public_key_hash(), "");
EXPECT_EQ(request.key_version(), public_key_proto_.key_version());
EXPECT_EQ(request.do_not_use_rsa_public_exponent(), true);
EXPECT_NE(request.blinded_token().size(), 0);
if (use_privacy_pass) {
EXPECT_EQ(request.public_metadata_extensions(),
fake_get_initial_data_response_.privacy_pass_data()
.public_metadata_extensions());
} else {
EXPECT_EQ(request.public_metadata_info().SerializeAsString(),
public_metadata_info_.SerializeAsString());
}
privacy::ppn::AuthAndSignResponse response;
for (const auto& request_token : request.blinded_token()) {
std::string decoded_blinded_token;
ASSERT_TRUE(absl::Base64Unescape(request_token, &decoded_blinded_token));
if (use_privacy_pass) {
absl::StatusOr<std::string> signature =
anonymous_tokens::TestSignWithPublicMetadata(
decoded_blinded_token, request.public_metadata_extensions(),
*rsa_private_key_, false);
QUICHE_EXPECT_OK(signature);
response.add_blinded_token_signature(absl::Base64Escape(*signature));
} else {
absl::StatusOr<std::string> serialized_token =
anonymous_tokens::TestSign(
decoded_blinded_token, rsa_private_key_.get());
QUICHE_EXPECT_OK(serialized_token);
response.add_blinded_token_signature(
absl::Base64Escape(*serialized_token));
}
}
sign_response_ = response;
}
void ValidatePrivacyPassTokensOutput(absl::Span<BlindSignToken> tokens) {
for (const auto& token : tokens) {
privacy::ppn::PrivacyPassTokenData privacy_pass_token_data;
ASSERT_TRUE(privacy_pass_token_data.ParseFromString(token.token));
std::string decoded_token;
ASSERT_TRUE(absl::WebSafeBase64Unescape(privacy_pass_token_data.token(),
&decoded_token));
EXPECT_EQ(privacy_pass_token_data.encoded_extensions().back(), '=');
std::string decoded_extensions;
ASSERT_TRUE(absl::WebSafeBase64Unescape(
privacy_pass_token_data.encoded_extensions(), &decoded_extensions));
EXPECT_EQ(token.geo_hint.geo_hint, "US,US-AL,ALABASTER");
EXPECT_EQ(token.geo_hint.country_code, "US");
EXPECT_EQ(token.geo_hint.region, "US-AL");
EXPECT_EQ(token.geo_hint.city, "ALABASTER");
}
}
MockBlindSignMessageInterface mock_message_interface_;
std::unique_ptr<BlindSignAuth> blind_sign_auth_;
anonymous_tokens::RSABlindSignaturePublicKey
public_key_proto_;
bssl::UniquePtr<RSA> rsa_public_key_;
bssl::UniquePtr<RSA> rsa_private_key_;
std::string token_key_id_;
anonymous_tokens::Extensions extensions_;
privacy::ppn::PublicMetadataInfo public_metadata_info_;
privacy::ppn::AuthAndSignResponse sign_response_;
privacy::ppn::GetInitialDataResponse fake_get_initial_data_response_;
std::string oauth_token_ = "oauth_token";
privacy::ppn::GetInitialDataRequest expected_get_initial_data_request_;
};
TEST_F(BlindSignAuthTest, TestGetTokensFailedNetworkError) {
EXPECT_CALL(mock_message_interface_,
DoRequest(Eq(BlindSignMessageRequestType::kGetInitialData),
Eq(oauth_token_), _, _))
.Times(1)
.WillOnce([=](auto&&, auto&&, auto&&, auto get_initial_data_cb) {
std::move(get_initial_data_cb)(
absl::InternalError("Failed to create socket"));
});
EXPECT_CALL(mock_message_interface_,
DoRequest(Eq(BlindSignMessageRequestType::kAuthAndSign), _, _, _))
.Times(0);
int num_tokens = 1;
QuicheNotification done;
SignedTokenCallback callback =
[&done](absl::StatusOr<absl::Span<BlindSignToken>> tokens) {
EXPECT_THAT(tokens.status().code(), absl::StatusCode::kInvalidArgument);
done.Notify();
};
blind_sign_auth_->GetTokens(oauth_token_, num_tokens, ProxyLayer::kProxyA,
BlindSignAuthServiceType::kChromeIpBlinding,
std::move(callback));
done.WaitForNotification();
}
TEST_F(BlindSignAuthTest, TestGetTokensFailedBadGetInitialDataResponse) {
*fake_get_initial_data_response_.mutable_at_public_metadata_public_key()
->mutable_use_case() = "SPAM";
BlindSignMessageResponse fake_public_key_response(
absl::StatusCode::kOk,
fake_get_initial_data_response_.SerializeAsString());
EXPECT_CALL(
mock_message_interface_,
DoRequest(Eq(BlindSignMessageRequestType::kGetInitialData),
Eq(oauth_token_),
Eq(expected_get_initial_data_request_.SerializeAsString()), _))
.Times(1)
.WillOnce([=](auto&&, auto&&, auto&&, auto get_initial_data_cb) {
std::move(get_initial_data_cb)(fake_public_key_response);
});
EXPECT_CALL(mock_message_interface_,
DoRequest(Eq(BlindSignMessageRequestType::kAuthAndSign), _, _, _))
.Times(0);
int num_tokens = 1;
QuicheNotification done;
SignedTokenCallback callback =
[&done](absl::StatusOr<absl::Span<BlindSignToken>> tokens) {
EXPECT_THAT(tokens.status().code(), absl::StatusCode::kInvalidArgument);
done.Notify();
};
blind_sign_auth_->GetTokens(oauth_token_, num_tokens, ProxyLayer::kProxyA,
BlindSignAuthServiceType::kChromeIpBlinding,
std::move(callback));
done.WaitForNotification();
}
TEST_F(BlindSignAuthTest, TestGetTokensFailedBadAuthAndSignResponse) {
BlindSignMessageResponse fake_public_key_response(
absl::StatusCode::kOk,
fake_get_initial_data_response_.SerializeAsString());
{
InSequence seq;
EXPECT_CALL(
mock_message_interface_,
DoRequest(
Eq(BlindSignMessageRequestType::kGetInitialData), Eq(oauth_token_),
Eq(expected_get_initial_data_request_.SerializeAsString()), _))
.Times(1)
.WillOnce([=](auto&&, auto&&, auto&&, auto get_initial_data_cb) {
std::move(get_initial_data_cb)(fake_public_key_response);
});
EXPECT_CALL(mock_message_interface_,
DoRequest(Eq(BlindSignMessageRequestType::kAuthAndSign),
Eq(oauth_token_), _, _))
.Times(1)
.WillOnce(Invoke([this](Unused, Unused, const std::string& body,
BlindSignMessageCallback callback) {
CreateSignResponse(body, false);
sign_response_.add_blinded_token_signature("invalid_signature%");
BlindSignMessageResponse response(absl::StatusCode::kOk,
sign_response_.SerializeAsString());
std::move(callback)(response);
}));
}
int num_tokens = 1;
QuicheNotification done;
SignedTokenCallback callback =
[&done](absl::StatusOr<absl::Span<BlindSignToken>> tokens) {
EXPECT_THAT(tokens.status().code(), absl::StatusCode::kInternal);
done.Notify();
};
blind_sign_auth_->GetTokens(oauth_token_, num_tokens, ProxyLayer::kProxyA,
BlindSignAuthServiceType::kChromeIpBlinding,
std::move(callback));
done.WaitForNotification();
}
TEST_F(BlindSignAuthTest, TestPrivacyPassGetTokensSucceeds) {
BlindSignMessageResponse fake_public_key_response(
absl::StatusCode::kOk,
fake_get_initial_data_response_.SerializeAsString());
{
InSequence seq;
EXPECT_CALL(
mock_message_interface_,
DoRequest(
Eq(BlindSignMessageRequestType::kGetInitialData), Eq(oauth_token_),
Eq(expected_get_initial_data_request_.SerializeAsString()), _))
.Times(1)
.WillOnce([=](auto&&, auto&&, auto&&, auto get_initial_data_cb) {
std::move(get_initial_data_cb)(fake_public_key_response);
});
EXPECT_CALL(mock_message_interface_,
DoRequest(Eq(BlindSignMessageRequestType::kAuthAndSign),
Eq(oauth_token_), _, _))
.Times(1)
.WillOnce(Invoke([this](Unused, Unused, const std::string& body,
BlindSignMessageCallback callback) {
CreateSignResponse(body, true);
BlindSignMessageResponse response(absl::StatusCode::kOk,
sign_response_.SerializeAsString());
std::move(callback)(response);
}));
}
int num_tokens = 1;
QuicheNotification done;
SignedTokenCallback callback =
[this, &done](absl::StatusOr<absl::Span<BlindSignToken>> tokens) {
QUICHE_EXPECT_OK(tokens);
ValidatePrivacyPassTokensOutput(*tokens);
done.Notify();
};
blind_sign_auth_->GetTokens(oauth_token_, num_tokens, ProxyLayer::kProxyA,
BlindSignAuthServiceType::kChromeIpBlinding,
std::move(callback));
done.WaitForNotification();
}
TEST_F(BlindSignAuthTest, TestPrivacyPassGetTokensFailsWithBadExtensions) {
privacy::ppn::BlindSignAuthOptions options;
options.set_enable_privacy_pass(true);
blind_sign_auth_ =
std::make_unique<BlindSignAuth>(&mock_message_interface_, options);
public_key_proto_.set_message_mask_type(
anonymous_tokens::AT_MESSAGE_MASK_NO_MASK);
public_key_proto_.set_message_mask_size(0);
*fake_get_initial_data_response_.mutable_at_public_metadata_public_key() =
public_key_proto_;
fake_get_initial_data_response_.mutable_privacy_pass_data()
->set_public_metadata_extensions("spam");
BlindSignMessageResponse fake_public_key_response(
absl::StatusCode::kOk,
fake_get_initial_data_response_.SerializeAsString());
EXPECT_CALL(
mock_message_interface_,
DoRequest(Eq(BlindSignMessageRequestType::kGetInitialData),
Eq(oauth_token_),
Eq(expected_get_initial_data_request_.SerializeAsString()), _))
.Times(1)
.WillOnce([=](auto&&, auto&&, auto&&, auto get_initial_data_cb) {
std::move(get_initial_data_cb)(fake_public_key_response);
});
int num_tokens = 1;
QuicheNotification done;
SignedTokenCallback callback =
[&done](absl::StatusOr<absl::Span<BlindSignToken>> tokens) {
EXPECT_THAT(tokens.status().code(), absl::StatusCode::kInvalidArgument);
done.Notify();
};
blind_sign_auth_->GetTokens(oauth_token_, num_tokens, ProxyLayer::kProxyA,
BlindSignAuthServiceType::kChromeIpBlinding,
std::move(callback));
done.WaitForNotification();
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/blind_sign_auth/blind_sign_auth.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/blind_sign_auth/blind_sign_auth_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
040cc8ee-f53e-4ba5-b169-1de71bba8616 | cpp | tensorflow/tensorflow | parallel_interleave_dataset_op | tensorflow/core/kernels/data/experimental/parallel_interleave_dataset_op.cc | tensorflow/core/kernels/data/experimental/parallel_interleave_dataset_op_test.cc | #include "tensorflow/core/kernels/data/experimental/parallel_interleave_dataset_op.h"
#include <atomic>
#include <deque>
#include <functional>
#include <string>
#include <utility>
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/common_runtime/input_colocation_exemption_registry.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/stats_aggregator.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/lib/gtl/cleanup.h"
#include "tensorflow/core/lib/random/random.h"
#include "tensorflow/core/platform/blocking_counter.h"
#include "tensorflow/core/platform/stringprintf.h"
#include "tensorflow/core/profiler/lib/traceme.h"
#include "tensorflow/core/profiler/lib/traceme_encode.h"
namespace tensorflow {
namespace data {
namespace experimental {
constexpr const char* const
ParallelInterleaveDatasetOp::kDatasetType;
constexpr const char* const
ParallelInterleaveDatasetOp::kInputDataset;
constexpr const char* const
ParallelInterleaveDatasetOp::kOtherArguments;
constexpr const char* const
ParallelInterleaveDatasetOp::kCycleLength;
constexpr const char* const
ParallelInterleaveDatasetOp::kBlockLength;
constexpr const char* const
ParallelInterleaveDatasetOp::kDeterministic;
constexpr const char* const ParallelInterleaveDatasetOp::kSloppy;
constexpr const char* const
ParallelInterleaveDatasetOp::kBufferOutputElements;
constexpr const char* const
ParallelInterleaveDatasetOp::kPrefetchInputElements;
constexpr const char* const ParallelInterleaveDatasetOp::kFunc;
constexpr const char* const
ParallelInterleaveDatasetOp::kTarguments;
constexpr const char* const
ParallelInterleaveDatasetOp::kOutputTypes;
constexpr const char* const
ParallelInterleaveDatasetOp::kOutputShapes;
constexpr char kInputExhausted[] = "input_exhausted";
constexpr char kNextIndex[] = "next_index";
constexpr char kBlockCount[] = "block_count";
constexpr char kWorkersSize[] = "workers_size";
constexpr char kInterleaveSize[] = "interleave_size";
constexpr char kInterleaveIndices[] = "interleave_indices";
constexpr char kStagingSize[] = "staging_size";
constexpr char kStagingIndices[] = "staging_indices";
constexpr char kWorkerThreadsRunning[] = "worker_threads_running";
constexpr char kDataParallelInterleaveWorker[] =
"data_parallel_interleave_worker";
constexpr char kWorker[] = "worker";
constexpr char kInputSize[] = "input_size";
constexpr char kInput[] = "input";
constexpr char kOutputsSize[] = "outputs_size";
constexpr char kOutputs[] = "outputs";
constexpr char kIsProducing[] = "is_producing";
constexpr char kWorkerThread[] = "worker_thread";
constexpr char kIteratorExhausted[] = "iterator_exhausted";
constexpr char kIteratorCreationStatus[] = "iterator_creation_status";
constexpr char kOutput[] = "output";
constexpr char kEndOfSequence[] = "end_of_sequence";
constexpr char kStatus[] = "status";
constexpr char kOutputSize[] = "output_size";
constexpr char kCode[] = "code";
constexpr char KMessage[] = "msg";
class ParallelInterleaveDatasetOp::Dataset : public DatasetBase {
public:
Dataset(OpKernelContext* ctx, const DatasetBase* input,
std::unique_ptr<CapturedFunction> captured_func, int64_t cycle_length,
int64_t block_length, DeterminismPolicy deterministic,
int64_t buffer_output_elements, int64_t prefetch_input_elements,
const DataTypeVector& output_types,
const std::vector<PartialTensorShape>& output_shapes, int op_version)
: DatasetBase(DatasetContext(ctx)),
input_(input),
captured_func_(std::move(captured_func)),
cycle_length_(cycle_length),
block_length_(block_length),
deterministic_(deterministic),
buffer_output_elements_(buffer_output_elements),
prefetch_input_elements_(prefetch_input_elements),
output_types_(output_types),
output_shapes_(output_shapes),
traceme_metadata_(
{{"block_length",
strings::Printf("%lld", static_cast<long long>(block_length))},
{"cycle_length",
strings::Printf("%lld", static_cast<long long>(cycle_length))},
{"deterministic",
deterministic.IsDeterministic() || deterministic.IsDefault()
? "true"
: "false"}}),
op_version_(op_version) {
input_->Ref();
}
~Dataset() override { input_->Unref(); }
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
name_utils::IteratorPrefixParams params;
params.op_version = op_version_;
bool deterministic =
deterministic_.IsDeterministic() || deterministic_.IsDefault();
return std::make_unique<Iterator>(
Iterator::Params{
this, name_utils::IteratorPrefix(kDatasetType, prefix, params)},
deterministic);
}
const DataTypeVector& output_dtypes() const override { return output_types_; }
const std::vector<PartialTensorShape>& output_shapes() const override {
return output_shapes_;
}
string DebugString() const override {
name_utils::DatasetDebugStringParams params;
params.op_version = op_version_;
return name_utils::DatasetDebugString(kDatasetType, params);
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
inputs->push_back(input_);
return absl::OkStatus();
}
Status CheckExternalState() const override {
TF_RETURN_IF_ERROR(captured_func_->CheckExternalState());
return input_->CheckExternalState();
}
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
std::vector<std::pair<size_t, Node*>> inputs;
std::vector<std::pair<size_t, absl::Span<Node* const>>> list_inputs;
int input_index = 0;
Node* input_node;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_node));
inputs.emplace_back(input_index++, input_node);
std::vector<Node*> other_arguments;
DataTypeVector other_arguments_types;
TF_RETURN_IF_ERROR(captured_func_->AddToGraph(ctx, b, &other_arguments,
&other_arguments_types));
list_inputs.emplace_back(input_index++, other_arguments);
Node* cycle_length_node;
TF_RETURN_IF_ERROR(b->AddScalar(cycle_length_, &cycle_length_node));
inputs.emplace_back(input_index++, cycle_length_node);
Node* block_length_node;
TF_RETURN_IF_ERROR(b->AddScalar(block_length_, &block_length_node));
inputs.emplace_back(input_index++, block_length_node);
if (op_version_ == 1) {
Node* sloppy_node;
TF_RETURN_IF_ERROR(
b->AddScalar(deterministic_.IsNondeterministic(), &sloppy_node));
inputs.emplace_back(input_index++, sloppy_node);
}
Node* buffer_output_elements_node;
TF_RETURN_IF_ERROR(
b->AddScalar(buffer_output_elements_, &buffer_output_elements_node));
inputs.emplace_back(input_index++, buffer_output_elements_node);
Node* prefetch_input_elements_node;
TF_RETURN_IF_ERROR(
b->AddScalar(prefetch_input_elements_, &prefetch_input_elements_node));
inputs.emplace_back(input_index++, prefetch_input_elements_node);
std::vector<std::pair<StringPiece, AttrValue>> attrs;
AttrValue f;
b->BuildAttrValue(captured_func_->func(), &f);
attrs.emplace_back(kFunc, f);
if (op_version_ == 2) {
AttrValue deterministic_attr;
b->BuildAttrValue(deterministic_.String(), &deterministic_attr);
attrs.emplace_back(kDeterministic, deterministic_attr);
}
AttrValue other_arguments_types_attr;
b->BuildAttrValue(other_arguments_types, &other_arguments_types_attr);
attrs.emplace_back(kTarguments, other_arguments_types_attr);
TF_RETURN_IF_ERROR(b->AddDataset(this, inputs, list_inputs, attrs, output));
return absl::OkStatus();
}
private:
int64_t num_threads() const {
return cycle_length_ + prefetch_input_elements_;
}
class Iterator : public DatasetIterator<Dataset> {
public:
explicit Iterator(const Params& params, bool deterministic)
: DatasetIterator<Dataset>(params),
deterministic_(deterministic),
workers_(dataset()->num_threads()),
worker_thread_states_(dataset()->num_threads()) {}
~Iterator() override {
CancelThreads();
if (deregister_fn_) deregister_fn_();
}
Status Initialize(IteratorContext* ctx) override {
cancellation_manager_ = std::make_unique<CancellationManager>();
IteratorContext::Params params(ctx);
params.cancellation_manager = cancellation_manager_.get();
TF_RETURN_IF_ERROR(dataset()->input_->MakeIterator(
IteratorContext(params), this, prefix(), &input_impl_));
return dataset()->captured_func_->Instantiate(
ctx, &instantiated_captured_func_);
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(EnsureWorkerThreadsStarted(ctx));
while (!cancelled_) {
bool can_produce_elements = false;
bool must_wait_for_input = true;
for (int64_t i = 0; i < interleave_indices_.size(); ++i) {
int64_t index = (next_index_ + i) % interleave_indices_.size();
int64_t current_worker_index = interleave_indices_[index];
if (current_worker_index < 0) {
continue;
}
WorkerState* current_worker = &workers_[current_worker_index];
can_produce_elements |= current_worker->MayHaveElements();
if (!current_worker->outputs.empty()) {
next_index_ = index;
const bool element_acquired_sloppily = !deterministic_ && i > 1;
if (!element_acquired_sloppily) {
block_count_++;
if (block_count_ == dataset()->block_length_) {
next_index_ = (index + 1) % interleave_indices_.size();
block_count_ = 0;
}
} else {
block_count_ = 0;
}
*end_of_sequence = false;
Status s = current_worker->outputs.front().status;
tsl::profiler::TraceMe traceme([&] {
return tsl::profiler::TraceMeEncode(
"ParallelInterleaveConsume",
{{"element_id", current_worker->outputs.front().id}});
});
current_worker->outputs.front().output.swap(*out_tensors);
current_worker->outputs.pop_front();
current_worker->cond_var.notify_one();
return s;
} else if (current_worker->is_producing && deterministic_) {
if (next_index_ != index) {
next_index_ = index;
block_count_ = 0;
}
break;
} else if (!current_worker->is_producing) {
interleave_indices_[index] = -1;
if (input_impl_) {
std::vector<Tensor> args;
bool end_of_input = false;
Status s = input_impl_->GetNext(ctx, &args, &end_of_input);
if (end_of_input) {
input_impl_.reset();
} else {
current_worker->SetInputs(s, std::move(args));
staging_indices_.emplace_back(current_worker_index);
}
}
if (!staging_indices_.empty()) {
interleave_indices_[index] = staging_indices_.front();
staging_indices_.pop_front();
next_index_ = (index + 1) % interleave_indices_.size();
block_count_ = 0;
can_produce_elements = true;
must_wait_for_input = false;
break;
}
}
}
if (!can_produce_elements && !input_impl_) {
*end_of_sequence = true;
return absl::OkStatus();
}
if (must_wait_for_input) {
RecordStop(ctx);
if (deterministic_) {
workers_[interleave_indices_[next_index_]].cond_var.wait(l);
} else {
any_element_available_cond_var_.wait(l);
}
RecordStart(ctx);
}
}
return errors::Cancelled(
"ParallelInterleaveDatasetOp::Dataset::Iterator::GetNext");
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeAsyncInterleaveManyNode(
std::move(args), {model::MakeNonTunableParameter(
kCycleLength, dataset()->cycle_length_),
model::MakeNonTunableParameter(
kDeterministic, deterministic_ ? 1.0 : 0.0)});
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
TF_RETURN_IF_ERROR(ctx->HandleCheckExternalStateStatus(
dataset()->captured_func_->CheckExternalState()));
mutex_lock l(mu_);
mutex_lock ckpt_l(ckpt_mu_);
if (input_impl_) {
TF_RETURN_IF_ERROR(SaveInput(ctx, writer, input_impl_));
} else {
TF_RETURN_IF_ERROR(writer->WriteScalar(prefix(), kInputExhausted, ""));
}
TF_RETURN_IF_ERROR(
writer->WriteScalar(prefix(), kNextIndex, next_index_));
TF_RETURN_IF_ERROR(
writer->WriteScalar(prefix(), kBlockCount, block_count_));
TF_RETURN_IF_ERROR(
writer->WriteScalar(prefix(), kWorkersSize, workers_.size()));
for (int i = 0; i < workers_.size(); ++i) {
TF_RETURN_IF_ERROR(WriteWorkerStateLocked(writer, i));
}
for (int i = 0; i < worker_thread_states_.size(); ++i) {
TF_RETURN_IF_ERROR(WriteWorkerThreadStateLocked(ctx, writer, i));
}
TF_RETURN_IF_ERROR(writer->WriteScalar(prefix(), kInterleaveSize,
interleave_indices_.size()));
for (int i = 0; i < interleave_indices_.size(); ++i) {
TF_RETURN_IF_ERROR(writer->WriteScalar(
prefix(), strings::StrCat(kInterleaveIndices, "_", i),
interleave_indices_[i]));
}
TF_RETURN_IF_ERROR(
writer->WriteScalar(prefix(), kStagingSize, staging_indices_.size()));
for (int i = 0; i < staging_indices_.size(); ++i) {
TF_RETURN_IF_ERROR(writer->WriteScalar(
prefix(), strings::StrCat(kStagingIndices, "_", i),
staging_indices_[i]));
}
if (!worker_threads_.empty()) {
TF_RETURN_IF_ERROR(
writer->WriteScalar(prefix(), kWorkerThreadsRunning, ""));
}
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
{
mutex_lock l(mu_);
mutex_lock ckpt_l(ckpt_mu_);
if (!reader->Contains(prefix(), kInputExhausted)) {
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, input_impl_));
} else {
input_impl_.reset();
}
int64_t temp;
TF_RETURN_IF_ERROR(reader->ReadScalar(prefix(), kNextIndex, &temp));
next_index_ = size_t(temp);
TF_RETURN_IF_ERROR(reader->ReadScalar(prefix(), kBlockCount, &temp));
block_count_ = size_t(temp);
TF_RETURN_IF_ERROR(reader->ReadScalar(prefix(), kWorkersSize, &temp));
if (temp != dataset()->num_threads()) {
return errors::Internal("Expected ", dataset()->num_threads(),
" worker states but found ", temp, ".");
}
for (size_t i = 0; i < dataset()->num_threads(); ++i) {
TF_RETURN_IF_ERROR(ReadWorkerStateLocked(ctx, reader, i));
}
}
std::unique_ptr<thread::ThreadPool> threadpool = ctx->CreateThreadPool(
"read_worker_thread_state", dataset()->num_threads());
Status s = absl::OkStatus();
BlockingCounter counter(dataset()->num_threads());
for (size_t i = 0; i < dataset()->num_threads(); ++i) {
threadpool->Schedule([this, i, ctx, reader, &s, &counter] {
WorkerThreadState state;
Status result = ReadWorkerThreadStateLocked(ctx, reader, i, &state);
mutex_lock l(mu_);
mutex_lock ckpt_l(ckpt_mu_);
if (!result.ok()) {
s.Update(result);
counter.DecrementCount();
return;
}
worker_thread_states_[i] = std::move(state);
counter.DecrementCount();
});
}
counter.Wait();
if (!s.ok()) {
return s;
}
mutex_lock l(mu_);
mutex_lock ckpt_l(ckpt_mu_);
std::set<int64_t> all_indices;
{
int64_t interleave_size;
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kInterleaveSize, &interleave_size));
interleave_indices_.reserve(interleave_size);
for (int64_t i = 0; i < interleave_size; ++i) {
int64_t temp;
TF_RETURN_IF_ERROR(reader->ReadScalar(
prefix(), strings::StrCat(kInterleaveIndices, "_", i), &temp));
if (temp >= 0 && all_indices.find(temp) != all_indices.end()) {
return errors::Internal(
"Duplicate entry for ", temp,
" found when reading interleave and staging indices.");
}
if (temp >= 0) {
all_indices.insert(temp);
}
interleave_indices_.emplace_back(temp);
}
}
{
int64_t staging_size;
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kStagingSize, &staging_size));
for (int i = 0; i < staging_size; ++i) {
int64_t temp;
TF_RETURN_IF_ERROR(reader->ReadScalar(
prefix(), strings::StrCat(kStagingIndices, "_", i), &temp));
if (all_indices.find(temp) != all_indices.end()) {
return errors::Internal(
"Duplicate entry for ", temp,
" found when reading interleave and staging indices.");
}
if (temp >= 0) {
all_indices.insert(temp);
}
staging_indices_.emplace_back(temp);
}
}
if (reader->Contains(prefix(), kWorkerThreadsRunning)) {
worker_threads_.reserve(dataset()->num_threads());
for (size_t i = 0; i < dataset()->num_threads(); ++i) {
std::shared_ptr<IteratorContext> new_ctx(new IteratorContext(*ctx));
worker_threads_.emplace_back(ctx->StartThread(
strings::StrCat(kDataParallelInterleaveWorker, "_", i),
[this, new_ctx, i]() { WorkerThread(new_ctx, i); }));
}
}
return absl::OkStatus();
}
TraceMeMetadata GetTraceMeMetadata() const override {
return dataset()->traceme_metadata_;
}
private:
struct OutputElem {
Status status;
std::vector<Tensor> output;
int64_t id = -1;
explicit OutputElem(const Status& s) : status(s) {}
OutputElem(const Status& s, int64_t id) : status(s), id(id) {}
};
struct WorkerState {
std::vector<Tensor> input;
std::deque<OutputElem> outputs;
bool is_producing = false;
condition_variable cond_var;
inline bool MayHaveElements() const {
return is_producing || !outputs.empty();
}
void SetInputs(const Status& s, std::vector<Tensor> input_arguments) {
if (s.ok()) {
DCHECK(!MayHaveElements())
<< "Tried to start inputs, despite already producing!";
input = std::move(input_arguments);
is_producing = true;
cond_var.notify_one();
} else {
outputs.emplace_back(s);
}
}
};
struct WorkerThreadState {
OutputElem output_elem;
bool end_of_sequence = false;
Status iterator_creation_status;
std::vector<Tensor> input;
std::unique_ptr<IteratorBase> iterator;
WorkerThreadState() : output_elem(absl::OkStatus()) {}
};
void CancelThreads() TF_LOCKS_EXCLUDED(mu_) {
cancellation_manager_->StartCancel();
mutex_lock l(mu_);
cancelled_ = true;
for (auto& worker : workers_) {
worker.cond_var.notify_all();
}
}
Status EnsureWorkerThreadsStarted(IteratorContext* ctx)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
if (worker_threads_.empty() && input_impl_) {
worker_threads_.reserve(dataset()->num_threads());
for (int64_t i = 0; i < dataset()->num_threads(); ++i) {
std::vector<Tensor> args;
bool end_of_input = false;
Status s = input_impl_->GetNext(ctx, &args, &end_of_input);
if (end_of_input) {
input_impl_.reset();
return absl::OkStatus();
}
if (i < dataset()->cycle_length_) {
interleave_indices_.push_back(i);
} else {
staging_indices_.push_back(i);
}
workers_[i].SetInputs(s, std::move(args));
std::shared_ptr<IteratorContext> new_ctx(new IteratorContext(*ctx));
worker_threads_.push_back(ctx->StartThread(
strings::StrCat(kDataParallelInterleaveWorker, "_", i),
[this, new_ctx, i]() { WorkerThread(new_ctx, i); }));
}
DCHECK(interleave_indices_.size() == dataset()->cycle_length_);
DCHECK(staging_indices_.size() == dataset()->prefetch_input_elements_);
}
return absl::OkStatus();
}
void WorkerThread(const std::shared_ptr<IteratorContext>& ctx,
const int64_t thread_index) {
RecordStart(ctx.get());
auto cleanup = gtl::MakeCleanup([this, thread_index, ctx] {
mutex_lock l(mu_);
workers_[thread_index].cond_var.notify_all();
RecordStop(ctx.get());
});
bool make_new_iterator;
{
tf_shared_lock l(ckpt_mu_);
make_new_iterator =
worker_thread_states_[thread_index].iterator == nullptr &&
worker_thread_states_[thread_index].iterator_creation_status.ok();
}
bool thread_potentially_in_staging = true;
while (true) {
Status iterator_creation_status;
if (make_new_iterator) {
bool read_new_input;
{
tf_shared_lock l(ckpt_mu_);
read_new_input = worker_thread_states_[thread_index].input.empty();
}
if (read_new_input) {
mutex_lock l(mu_);
while (!cancelled_ && !workers_[thread_index].is_producing) {
RecordStop(ctx.get());
workers_[thread_index].cond_var.wait(l);
RecordStart(ctx.get());
}
if (cancelled_) return;
tf_shared_lock ckpt_l(ckpt_mu_);
worker_thread_states_[thread_index].input.swap(
workers_[thread_index].input);
}
{
mutex_lock l(mu_);
thread_potentially_in_staging =
absl::c_find(staging_indices_, thread_index) !=
staging_indices_.end();
}
{
tf_shared_lock l(ckpt_mu_);
worker_thread_states_[thread_index].iterator_creation_status =
MakeIteratorFromInputElement(
ctx.get(), this, worker_thread_states_[thread_index].input,
thread_index, *instantiated_captured_func_, prefix(),
&worker_thread_states_[thread_index].iterator,
model_node());
iterator_creation_status =
worker_thread_states_[thread_index].iterator_creation_status;
if (!iterator_creation_status.ok()) {
worker_thread_states_[thread_index].input.clear();
} else if (thread_potentially_in_staging) {
DisableAutotune(
ctx.get(),
worker_thread_states_[thread_index].iterator.get());
}
}
} else {
tf_shared_lock l(ckpt_mu_);
iterator_creation_status =
worker_thread_states_[thread_index].iterator_creation_status;
make_new_iterator = true;
}
if (!iterator_creation_status.ok()) {
mutex_lock l(mu_);
while (!cancelled_ && workers_[thread_index].outputs.size() ==
dataset()->buffer_output_elements_) {
RecordStop(ctx.get());
workers_[thread_index].cond_var.wait(l);
RecordStart(ctx.get());
}
if (cancelled_) return;
tf_shared_lock ckpt_l(ckpt_mu_);
workers_[thread_index].outputs.emplace_back(iterator_creation_status);
workers_[thread_index].is_producing = false;
worker_thread_states_[thread_index].iterator_creation_status =
absl::OkStatus();
if (deterministic_) {
workers_[thread_index].cond_var.notify_one();
} else {
any_element_available_cond_var_.notify_one();
}
} else {
bool end_of_sequence = false;
while (!end_of_sequence) {
if (thread_potentially_in_staging) {
mutex_lock l(mu_);
thread_potentially_in_staging =
absl::c_find(staging_indices_, thread_index) !=
staging_indices_.end();
if (!thread_potentially_in_staging) {
tf_shared_lock l(ckpt_mu_);
EnableAutotune(
ctx.get(),
worker_thread_states_[thread_index].iterator.get());
}
}
{
tf_shared_lock ckpt_l(ckpt_mu_);
if (worker_thread_states_[thread_index].output_elem.status.ok() &&
worker_thread_states_[thread_index]
.output_elem.output.empty() &&
!worker_thread_states_[thread_index].end_of_sequence) {
int64_t& id =
worker_thread_states_[thread_index].output_elem.id;
tsl::profiler::TraceMe traceme(
[&] {
id = tsl::profiler::TraceMe::NewActivityId();
return tsl::profiler::TraceMeEncode(
"ParallelInterleaveProduce", {{"element_id", id}});
},
profiler::kInfo);
worker_thread_states_[thread_index].output_elem.status =
worker_thread_states_[thread_index].iterator->GetNext(
ctx.get(),
&worker_thread_states_[thread_index].output_elem.output,
&worker_thread_states_[thread_index].end_of_sequence);
end_of_sequence =
worker_thread_states_[thread_index].end_of_sequence;
} else {
end_of_sequence =
worker_thread_states_[thread_index].end_of_sequence;
}
}
{
mutex_lock l(mu_);
while (!cancelled_ && workers_[thread_index].outputs.size() ==
dataset()->buffer_output_elements_) {
RecordStop(ctx.get());
workers_[thread_index].cond_var.wait(l);
RecordStart(ctx.get());
}
if (cancelled_) return;
tf_shared_lock ckpt_l(ckpt_mu_);
workers_[thread_index].is_producing = !end_of_sequence;
if (end_of_sequence) {
worker_thread_states_[thread_index].iterator.reset();
worker_thread_states_[thread_index].input.clear();
worker_thread_states_[thread_index].end_of_sequence = false;
} else {
workers_[thread_index].outputs.emplace_back(
worker_thread_states_[thread_index].output_elem.status,
worker_thread_states_[thread_index].output_elem.id);
workers_[thread_index].outputs.back().output.swap(
worker_thread_states_[thread_index].output_elem.output);
}
worker_thread_states_[thread_index].output_elem.status =
absl::OkStatus();
if (deterministic_) {
workers_[thread_index].cond_var.notify_one();
} else {
any_element_available_cond_var_.notify_one();
}
}
}
}
}
}
Status WriteWorkerStateLocked(IteratorStateWriter* writer, int index)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_, ckpt_mu_) {
string iterator_name =
strings::StrCat(prefix(), "::", kWorker, "_", index);
TF_RETURN_IF_ERROR(writer->WriteScalar(iterator_name, kInputSize,
workers_[index].input.size()));
for (int i = 0; i < workers_[index].input.size(); ++i) {
TF_RETURN_IF_ERROR(writer->WriteTensor(iterator_name,
strings::StrCat(kInput, "_", i),
workers_[index].input[i]));
}
TF_RETURN_IF_ERROR(writer->WriteScalar(iterator_name, kOutputsSize,
workers_[index].outputs.size()));
for (int i = 0; i < workers_[index].outputs.size(); ++i) {
TF_RETURN_IF_ERROR(WriteOutputElemLocked(
writer, workers_[index].outputs[i], iterator_name,
strings::StrCat(kOutputs, "_", i)));
}
if (workers_[index].is_producing) {
TF_RETURN_IF_ERROR(
writer->WriteScalar(iterator_name, kIsProducing, ""));
}
return absl::OkStatus();
}
Status ReadWorkerStateLocked(IteratorContext* ctx,
IteratorStateReader* reader, int index)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_, ckpt_mu_) {
string worker_prefix =
strings::StrCat(prefix(), "::", kWorker, "_", index);
int64_t input_size;
TF_RETURN_IF_ERROR(
reader->ReadScalar(worker_prefix, kInputSize, &input_size));
workers_[index].input.reserve(input_size);
for (int i = 0; i < input_size; ++i) {
workers_[index].input.emplace_back();
TF_RETURN_IF_ERROR(reader->ReadTensor(ctx->flr(), worker_prefix,
strings::StrCat(kInput, "_", i),
&workers_[index].input.back()));
}
int64_t outputs_size;
TF_RETURN_IF_ERROR(
reader->ReadScalar(worker_prefix, kOutputsSize, &outputs_size));
for (int i = 0; i < outputs_size; ++i) {
workers_[index].outputs.emplace_back(absl::OkStatus());
TF_RETURN_IF_ERROR(ReadOutputElemLocked(
ctx, reader, &workers_[index].outputs.back(), worker_prefix,
strings::StrCat(kOutputs, "_", i)));
}
if (reader->Contains(worker_prefix, kIsProducing)) {
workers_[index].is_producing = true;
} else {
workers_[index].is_producing = false;
}
return absl::OkStatus();
}
Status WriteWorkerThreadStateLocked(SerializationContext* ctx,
IteratorStateWriter* writer, int index)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_, ckpt_mu_) {
string iterator_name =
strings::StrCat(prefix(), "::", kWorkerThread, "_", index);
if (worker_thread_states_[index].iterator != nullptr) {
TF_RETURN_IF_ERROR(
SaveInput(ctx, writer, worker_thread_states_[index].iterator));
} else {
TF_RETURN_IF_ERROR(
writer->WriteScalar(iterator_name, kIteratorExhausted, ""));
}
TF_RETURN_IF_ERROR(
writer->WriteScalar(iterator_name, kInputSize,
worker_thread_states_[index].input.size()));
for (int i = 0; i < worker_thread_states_[index].input.size(); ++i) {
TF_RETURN_IF_ERROR(
writer->WriteTensor(iterator_name, strings::StrCat(kInput, "_", i),
worker_thread_states_[index].input[i]));
}
TF_RETURN_IF_ERROR(WriteStatusLocked(
writer, iterator_name, kIteratorCreationStatus,
worker_thread_states_[index].iterator_creation_status));
TF_RETURN_IF_ERROR(WriteOutputElemLocked(
writer, worker_thread_states_[index].output_elem, iterator_name,
kOutput));
if (worker_thread_states_[index].end_of_sequence) {
TF_RETURN_IF_ERROR(
writer->WriteScalar(iterator_name, kEndOfSequence, ""));
}
return absl::OkStatus();
}
Status ReadWorkerThreadStateLocked(IteratorContext* ctx,
IteratorStateReader* reader, int index,
WorkerThreadState* state) {
string worker_prefix =
strings::StrCat(prefix(), "::", kWorkerThread, "_", index);
int64_t input_size;
TF_RETURN_IF_ERROR(
reader->ReadScalar(worker_prefix, kInputSize, &input_size));
state->input.reserve(input_size);
for (int i = 0; i < input_size; ++i) {
state->input.emplace_back();
TF_RETURN_IF_ERROR(reader->ReadTensor(ctx->flr(), worker_prefix,
strings::StrCat(kInput, "_", i),
&state->input.back()));
}
if (reader->Contains(worker_prefix, kIteratorExhausted)) {
state->iterator.reset();
} else {
std::unique_ptr<IteratorBase> iterator;
TF_RETURN_IF_ERROR(MakeIteratorFromInputElement(
ctx, this, state->input, index, *instantiated_captured_func_,
prefix(), &iterator, nullptr));
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, iterator));
state->iterator.swap(iterator);
}
TF_RETURN_IF_ERROR(ReadStatusLocked(reader, worker_prefix,
kIteratorCreationStatus,
&state->iterator_creation_status));
TF_RETURN_IF_ERROR(ReadOutputElemLocked(ctx, reader, &state->output_elem,
worker_prefix, kOutput));
if (reader->Contains(worker_prefix, kEndOfSequence)) {
state->end_of_sequence = true;
} else {
state->end_of_sequence = false;
}
return absl::OkStatus();
}
Status WriteOutputElemLocked(IteratorStateWriter* writer,
const OutputElem& output_elem,
const string& iterator_name,
const string& prefix)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_, ckpt_mu_) {
TF_RETURN_IF_ERROR(WriteStatusLocked(
writer, iterator_name, strings::StrCat(prefix, "_", kStatus),
output_elem.status));
TF_RETURN_IF_ERROR(writer->WriteScalar(
iterator_name, strings::StrCat(prefix, "_", kOutputSize),
output_elem.output.size()));
for (int i = 0; i < output_elem.output.size(); ++i) {
TF_RETURN_IF_ERROR(writer->WriteTensor(
iterator_name, strings::StrCat(prefix, "_", kOutput, "_", i),
output_elem.output[i]));
}
return absl::OkStatus();
}
Status ReadOutputElemLocked(IteratorContext* ctx,
IteratorStateReader* reader,
OutputElem* output_elem,
const string& iterator_name,
const string& prefix) {
TF_RETURN_IF_ERROR(ReadStatusLocked(reader, iterator_name,
strings::StrCat(prefix, "_", kStatus),
&output_elem->status));
int64_t output_size;
TF_RETURN_IF_ERROR(reader->ReadScalar(
iterator_name, strings::StrCat(prefix, "_", kOutputSize),
&output_size));
output_elem->output.reserve(output_size);
for (int i = 0; i < output_size; ++i) {
output_elem->output.emplace_back();
TF_RETURN_IF_ERROR(
reader->ReadTensor(ctx->flr(), iterator_name,
strings::StrCat(prefix, "_", kOutput, "_", i),
&output_elem->output.back()));
}
return absl::OkStatus();
}
Status WriteStatusLocked(IteratorStateWriter* writer,
const string& iterator_name, const string& prefix,
const Status& status)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_, ckpt_mu_) {
TF_RETURN_IF_ERROR(writer->WriteScalar(
iterator_name, strings::StrCat(prefix, "_", kCode),
static_cast<int64_t>(status.code())));
if (!status.ok()) {
TF_RETURN_IF_ERROR(writer->WriteScalar(
iterator_name, strings::StrCat(prefix, "_", KMessage),
std::string(status.message())));
}
return absl::OkStatus();
}
Status ReadStatusLocked(IteratorStateReader* reader,
const string& iterator_name, const string& prefix,
Status* status) {
int64_t code_int;
TF_RETURN_IF_ERROR(reader->ReadScalar(
iterator_name, strings::StrCat(prefix, "_", kCode), &code_int));
absl::StatusCode code = static_cast<absl::StatusCode>(code_int);
if (code != absl::StatusCode::kOk) {
tstring error_message;
TF_RETURN_IF_ERROR(reader->ReadScalar(
iterator_name, strings::StrCat(prefix, "_", KMessage),
&error_message));
*status = Status(code, error_message);
} else {
*status = absl::OkStatus();
}
return absl::OkStatus();
}
mutex mu_ TF_ACQUIRED_BEFORE(ckpt_mu_);
condition_variable any_element_available_cond_var_;
const bool deterministic_;
mutex ckpt_mu_;
std::unique_ptr<CancellationManager> cancellation_manager_;
std::unique_ptr<IteratorBase> input_impl_ TF_GUARDED_BY(mu_);
std::unique_ptr<InstantiatedCapturedFunction> instantiated_captured_func_;
std::vector<WorkerState> workers_ TF_GUARDED_BY(mu_);
std::vector<WorkerThreadState> worker_thread_states_
TF_GUARDED_BY(ckpt_mu_);
std::vector<int64_t> interleave_indices_ TF_GUARDED_BY(mu_);
std::deque<int64_t> staging_indices_ TF_GUARDED_BY(mu_);
size_t next_index_ TF_GUARDED_BY(mu_) = 0;
size_t block_count_ TF_GUARDED_BY(mu_) = 0;
bool cancelled_ TF_GUARDED_BY(mu_) = false;
std::vector<std::unique_ptr<Thread>> worker_threads_ TF_GUARDED_BY(mu_);
std::function<void()> deregister_fn_;
};
const DatasetBase* const input_;
const std::unique_ptr<CapturedFunction> captured_func_;
const int64_t cycle_length_;
const int64_t block_length_;
const DeterminismPolicy deterministic_;
const int64_t buffer_output_elements_;
const int64_t prefetch_input_elements_;
const DataTypeVector output_types_;
const std::vector<PartialTensorShape> output_shapes_;
const TraceMeMetadata traceme_metadata_;
const int op_version_;
};
ParallelInterleaveDatasetOp::ParallelInterleaveDatasetOp(
OpKernelConstruction* ctx)
: UnaryDatasetOpKernel(ctx),
op_version_(ctx->HasAttr(kDeterministic) ? 2 : 1) {
OP_REQUIRES_OK(ctx, FunctionMetadata::Create(ctx, kFunc, {},
&func_metadata_));
if (op_version_ == 2) {
std::string deterministic;
OP_REQUIRES_OK(ctx, ctx->GetAttr(kDeterministic, &deterministic));
OP_REQUIRES_OK(
ctx, DeterminismPolicy::FromString(deterministic, &deterministic_));
}
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputTypes, &output_types_));
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputShapes, &output_shapes_));
}
void ParallelInterleaveDatasetOp::MakeDataset(OpKernelContext* ctx,
DatasetBase* input,
DatasetBase** output) {
int64_t cycle_length = 0;
OP_REQUIRES_OK(ctx, ParseScalarArgument(ctx, kCycleLength, &cycle_length));
OP_REQUIRES(ctx, cycle_length > 0,
errors::InvalidArgument("`cycle_length` must be > 0"));
int64_t block_length = 0;
OP_REQUIRES_OK(ctx, ParseScalarArgument(ctx, kBlockLength, &block_length));
OP_REQUIRES(ctx, block_length > 0,
errors::InvalidArgument("`block_length` must be > 0"));
if (op_version_ == 1) {
bool sloppy = false;
OP_REQUIRES_OK(ctx, ParseScalarArgument(ctx, kSloppy, &sloppy));
if (sloppy) {
deterministic_ =
DeterminismPolicy(DeterminismPolicy::Type::kNondeterministic);
} else {
deterministic_ =
DeterminismPolicy(DeterminismPolicy::Type::kDeterministic);
}
}
int64_t buffer_output_elements = 0;
OP_REQUIRES_OK(ctx, ParseScalarArgument(ctx, kBufferOutputElements,
&buffer_output_elements));
OP_REQUIRES(ctx, buffer_output_elements > 0,
errors::InvalidArgument("`buffer_output_elements` must be > 0"));
int64_t prefetch_input_elements = 0;
OP_REQUIRES_OK(ctx, ParseScalarArgument(ctx, kPrefetchInputElements,
&prefetch_input_elements));
OP_REQUIRES(
ctx, prefetch_input_elements >= 0,
errors::InvalidArgument("`prefetch_input_elements` must be >= 0"));
std::unique_ptr<CapturedFunction> captured_func;
OP_REQUIRES_OK(ctx,
CapturedFunction::Create(ctx, func_metadata_, kOtherArguments,
&captured_func));
*output = new Dataset(ctx, input, std::move(captured_func), cycle_length,
block_length, deterministic_, buffer_output_elements,
prefetch_input_elements, output_types_, output_shapes_,
op_version_);
}
namespace {
REGISTER_KERNEL_BUILDER(Name("ParallelInterleaveDataset").Device(DEVICE_CPU),
ParallelInterleaveDatasetOp);
REGISTER_KERNEL_BUILDER(
Name("ExperimentalParallelInterleaveDataset").Device(DEVICE_CPU),
ParallelInterleaveDatasetOp);
REGISTER_KERNEL_BUILDER(
Name("LegacyParallelInterleaveDatasetV2").Device(DEVICE_CPU),
ParallelInterleaveDatasetOp);
REGISTER_INPUT_COLOCATION_EXEMPTION("ParallelInterleaveDataset");
REGISTER_INPUT_COLOCATION_EXEMPTION("ExperimentalParallelInterleaveDataset");
REGISTER_INPUT_COLOCATION_EXEMPTION("LegacyParallelInterleaveDatasetV2");
}
}
}
} | #include "tensorflow/core/kernels/data/experimental/parallel_interleave_dataset_op.h"
#include "tensorflow/core/data/dataset_test_base.h"
#include "tensorflow/core/kernels/data/tensor_slice_dataset_op.h"
namespace tensorflow {
namespace data {
namespace experimental {
namespace {
constexpr char kNodeName[] = "parallel_interleave_dataset";
constexpr int kOpVersion = 2;
class ParallelInterleaveDatasetParams : public DatasetParams {
public:
template <typename T>
ParallelInterleaveDatasetParams(
T input_dataset_params, std::vector<Tensor> other_arguments,
int64_t cycle_length, int64_t block_length,
const std::string& deterministic, int64_t buffer_output_elements,
int64_t prefetch_input_elements, FunctionDefHelper::AttrValueWrapper func,
std::vector<FunctionDef> func_lib, DataTypeVector type_arguments,
const DataTypeVector& output_dtypes,
const std::vector<PartialTensorShape>& output_shapes, string node_name)
: DatasetParams(std::move(output_dtypes), std::move(output_shapes),
std::move(node_name)),
other_arguments_(std::move(other_arguments)),
cycle_length_(cycle_length),
block_length_(block_length),
deterministic_(deterministic),
buffer_output_elements_(buffer_output_elements),
prefetch_input_elements_(prefetch_input_elements),
func_(std::move(func)),
func_lib_(std::move(func_lib)),
type_arguments_(std::move(type_arguments)) {
input_dataset_params_.push_back(std::make_unique<T>(input_dataset_params));
op_version_ = kOpVersion;
name_utils::IteratorPrefixParams params;
params.op_version = op_version_;
iterator_prefix_ = name_utils::IteratorPrefix(
input_dataset_params.dataset_type(),
input_dataset_params.iterator_prefix(), params);
}
std::vector<Tensor> GetInputTensors() const override {
auto input_tensors = other_arguments_;
input_tensors.emplace_back(
CreateTensor<int64_t>(TensorShape({}), {cycle_length_}));
input_tensors.emplace_back(
CreateTensor<int64_t>(TensorShape({}), {block_length_}));
input_tensors.emplace_back(
CreateTensor<int64_t>(TensorShape({}), {buffer_output_elements_}));
input_tensors.emplace_back(
CreateTensor<int64_t>(TensorShape({}), {prefetch_input_elements_}));
return input_tensors;
}
Status GetInputNames(std::vector<string>* input_names) const override {
input_names->emplace_back(ParallelInterleaveDatasetOp::kInputDataset);
for (int i = 0; i < other_arguments_.size(); ++i) {
input_names->emplace_back(
absl::StrCat(ParallelInterleaveDatasetOp::kOtherArguments, "_", i));
}
input_names->emplace_back(ParallelInterleaveDatasetOp::kCycleLength);
input_names->emplace_back(ParallelInterleaveDatasetOp::kBlockLength);
input_names->emplace_back(
ParallelInterleaveDatasetOp::kBufferOutputElements);
input_names->emplace_back(
ParallelInterleaveDatasetOp::kPrefetchInputElements);
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
*attr_vector = {{"f", func_},
{"deterministic", deterministic_},
{"Targuments", type_arguments_},
{"output_shapes", output_shapes_},
{"output_types", output_dtypes_},
{"metadata", ""}};
return absl::OkStatus();
}
string dataset_type() const override {
return ParallelInterleaveDatasetOp::kDatasetType;
}
std::vector<FunctionDef> func_lib() const override { return func_lib_; }
private:
std::vector<Tensor> other_arguments_;
int64_t cycle_length_;
int64_t block_length_;
std::string deterministic_;
int64_t buffer_output_elements_;
int64_t prefetch_input_elements_;
FunctionDefHelper::AttrValueWrapper func_;
std::vector<FunctionDef> func_lib_;
DataTypeVector type_arguments_;
};
class ParallelInterleaveDatasetOpTest : public DatasetOpsTestBase {};
FunctionDefHelper::AttrValueWrapper MakeTensorSliceDatasetFunc(
const DataTypeVector& output_types,
const std::vector<PartialTensorShape>& output_shapes) {
return FunctionDefHelper::FunctionRef(
"MakeTensorSliceDataset",
{{TensorSliceDatasetOp::kToutputTypes, output_types},
{TensorSliceDatasetOp::kOutputShapes, output_shapes}});
}
ParallelInterleaveDatasetParams ParallelInterleaveDatasetParams1() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{3, 3, 1},
{0, 1, 2, 3, 4, 5, 6, 7, 8})},
"tensor_slice");
return ParallelInterleaveDatasetParams(
tensor_slice_dataset_params,
{},
1,
1,
DeterminismPolicy::kDeterministic,
1,
1,
MakeTensorSliceDatasetFunc(
DataTypeVector({DT_INT64}),
std::vector<PartialTensorShape>({PartialTensorShape({1})})),
{test::function::MakeTensorSliceDataset()},
{},
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
ParallelInterleaveDatasetParams ParallelInterleaveDatasetParams2() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{3, 3, 1},
{0, 1, 2, 3, 4, 5, 6, 7, 8})},
"tensor_slice");
return ParallelInterleaveDatasetParams(
tensor_slice_dataset_params,
{},
2,
1,
DeterminismPolicy::kDeterministic,
1,
0,
MakeTensorSliceDatasetFunc(
DataTypeVector({DT_INT64}),
std::vector<PartialTensorShape>({PartialTensorShape({1})})),
{test::function::MakeTensorSliceDataset()},
{},
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
ParallelInterleaveDatasetParams ParallelInterleaveDatasetParams3() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{3, 3, 1},
{0, 1, 2, 3, 4, 5, 6, 7, 8})},
"tensor_slice");
return ParallelInterleaveDatasetParams(
tensor_slice_dataset_params,
{},
3,
1,
DeterminismPolicy::kNondeterministic,
3,
2,
MakeTensorSliceDatasetFunc(
DataTypeVector({DT_INT64}),
std::vector<PartialTensorShape>({PartialTensorShape({1})})),
{test::function::MakeTensorSliceDataset()},
{},
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
ParallelInterleaveDatasetParams ParallelInterleaveDatasetParams4() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{3, 3, 1},
{0, 1, 2, 3, 4, 5, 6, 7, 8})},
"tensor_slice");
return ParallelInterleaveDatasetParams(
tensor_slice_dataset_params,
{},
5,
1,
DeterminismPolicy::kNondeterministic,
1,
2,
MakeTensorSliceDatasetFunc(
DataTypeVector({DT_INT64}),
std::vector<PartialTensorShape>({PartialTensorShape({1})})),
{test::function::MakeTensorSliceDataset()},
{},
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
ParallelInterleaveDatasetParams ParallelInterleaveDatasetParams5() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<tstring>(
TensorShape{3, 3, 1}, {"a", "b", "c", "d", "e", "f", "g", "h", "i"})},
"tensor_slice");
return ParallelInterleaveDatasetParams(
tensor_slice_dataset_params,
{},
2,
2,
DeterminismPolicy::kDeterministic,
2,
2,
MakeTensorSliceDatasetFunc(
DataTypeVector({DT_STRING}),
std::vector<PartialTensorShape>({PartialTensorShape({1})})),
{test::function::MakeTensorSliceDataset()},
{},
{DT_STRING},
{PartialTensorShape({1})},
kNodeName);
}
ParallelInterleaveDatasetParams EmptyInputParams() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{Tensor{}},
"tensor_slice");
return ParallelInterleaveDatasetParams(
tensor_slice_dataset_params,
{},
2,
2,
DeterminismPolicy::kNondeterministic,
2,
2,
MakeTensorSliceDatasetFunc(
DataTypeVector({DT_FLOAT}),
std::vector<PartialTensorShape>({PartialTensorShape({1})})),
{test::function::MakeTensorSliceDataset()},
{},
{DT_FLOAT},
{PartialTensorShape({1})},
kNodeName);
}
ParallelInterleaveDatasetParams InvalidCycleLengthParams() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{3, 3, 1},
{0, 1, 2, 3, 4, 5, 6, 7, 8})},
"tensor_slice");
return ParallelInterleaveDatasetParams(
tensor_slice_dataset_params,
{},
0,
1,
DeterminismPolicy::kDeterministic,
1,
1,
MakeTensorSliceDatasetFunc(
DataTypeVector({DT_INT64}),
std::vector<PartialTensorShape>({PartialTensorShape({1})})),
{test::function::MakeTensorSliceDataset()},
{},
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
ParallelInterleaveDatasetParams InvalidBlockLengthParams() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{3, 3, 1},
{0, 1, 2, 3, 4, 5, 6, 7, 8})},
"tensor_slice");
return ParallelInterleaveDatasetParams(
tensor_slice_dataset_params,
{},
1,
-1,
DeterminismPolicy::kDeterministic,
1,
1,
MakeTensorSliceDatasetFunc(
DataTypeVector({DT_INT64}),
std::vector<PartialTensorShape>({PartialTensorShape({1})})),
{test::function::MakeTensorSliceDataset()},
{},
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
ParallelInterleaveDatasetParams InvalidBufferOutputElementsParams() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{3, 3, 1},
{0, 1, 2, 3, 4, 5, 6, 7, 8})},
"tensor_slice");
return ParallelInterleaveDatasetParams(
tensor_slice_dataset_params,
{},
1,
1,
DeterminismPolicy::kDeterministic,
0,
1,
MakeTensorSliceDatasetFunc(
DataTypeVector({DT_INT64}),
std::vector<PartialTensorShape>({PartialTensorShape({1})})),
{test::function::MakeTensorSliceDataset()},
{},
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
ParallelInterleaveDatasetParams InvalidPrefetchInputElementsParams() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{3, 3, 1},
{0, 1, 2, 3, 4, 5, 6, 7, 8})},
"tensor_slice");
return ParallelInterleaveDatasetParams(
tensor_slice_dataset_params,
{},
1,
1,
DeterminismPolicy::kDeterministic,
1,
-1,
MakeTensorSliceDatasetFunc(
DataTypeVector({DT_INT64}),
std::vector<PartialTensorShape>({PartialTensorShape({1})})),
{test::function::MakeTensorSliceDataset()},
{},
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
std::vector<GetNextTestCase<ParallelInterleaveDatasetParams>>
GetNextTestCases() {
return {{ParallelInterleaveDatasetParams1(),
CreateTensors<int64_t>(
TensorShape{1}, {{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}}),
true},
{ParallelInterleaveDatasetParams2(),
CreateTensors<int64_t>(
TensorShape{1}, {{0}, {3}, {1}, {4}, {2}, {5}, {6}, {7}, {8}}),
true},
{ParallelInterleaveDatasetParams3(),
CreateTensors<int64_t>(
TensorShape{1}, {{0}, {3}, {6}, {1}, {4}, {7}, {2}, {5}, {8}}),
false},
{ParallelInterleaveDatasetParams4(),
CreateTensors<int64_t>(
TensorShape{1}, {{0}, {3}, {6}, {1}, {4}, {7}, {2}, {5}, {8}}),
false},
{ParallelInterleaveDatasetParams5(),
CreateTensors<tstring>(
TensorShape{1},
{{"a"}, {"b"}, {"d"}, {"e"}, {"c"}, {"f"}, {"g"}, {"h"}, {"i"}}),
false},
{EmptyInputParams(),
CreateTensors<tstring>(TensorShape{1}, {}),
true}};
}
ITERATOR_GET_NEXT_TEST_P(ParallelInterleaveDatasetOpTest,
ParallelInterleaveDatasetParams, GetNextTestCases())
TEST_F(ParallelInterleaveDatasetOpTest, DatasetNodeName) {
auto dataset_params = ParallelInterleaveDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
TEST_F(ParallelInterleaveDatasetOpTest, DatasetTypeString) {
auto dataset_params = ParallelInterleaveDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
name_utils::OpNameParams params;
params.op_version = dataset_params.op_version();
TF_ASSERT_OK(CheckDatasetTypeString(
name_utils::OpName(ParallelInterleaveDatasetOp::kDatasetType, params)));
}
TEST_F(ParallelInterleaveDatasetOpTest, DatasetOutputDtypes) {
auto dataset_params = ParallelInterleaveDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputDtypes({DT_INT64}));
}
TEST_F(ParallelInterleaveDatasetOpTest, DatasetOutputShapes) {
auto dataset_params = ParallelInterleaveDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputShapes({PartialTensorShape({1})}));
}
std::vector<CardinalityTestCase<ParallelInterleaveDatasetParams>>
CardinalityTestCases() {
return {{ParallelInterleaveDatasetParams1(),
kUnknownCardinality},
{ParallelInterleaveDatasetParams2(),
kUnknownCardinality},
{ParallelInterleaveDatasetParams3(),
kUnknownCardinality},
{ParallelInterleaveDatasetParams4(),
kUnknownCardinality},
{ParallelInterleaveDatasetParams5(),
kUnknownCardinality}};
}
DATASET_CARDINALITY_TEST_P(ParallelInterleaveDatasetOpTest,
ParallelInterleaveDatasetParams,
CardinalityTestCases())
TEST_F(ParallelInterleaveDatasetOpTest, IteratorOutputDtypes) {
auto dataset_params = ParallelInterleaveDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputDtypes({DT_INT64}));
}
TEST_F(ParallelInterleaveDatasetOpTest, IteratorOutputShapes) {
auto dataset_params = ParallelInterleaveDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputShapes({PartialTensorShape({1})}));
}
TEST_F(ParallelInterleaveDatasetOpTest, IteratorPrefix) {
auto dataset_params = ParallelInterleaveDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
name_utils::IteratorPrefixParams params;
params.op_version = dataset_params.op_version();
TF_ASSERT_OK(CheckIteratorPrefix(
name_utils::IteratorPrefix(ParallelInterleaveDatasetOp::kDatasetType,
dataset_params.iterator_prefix(), params)));
}
std::vector<IteratorSaveAndRestoreTestCase<ParallelInterleaveDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {{ParallelInterleaveDatasetParams1(),
{0, 4, 11},
CreateTensors<int64_t>(
TensorShape{1}, {{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}}),
true},
{ParallelInterleaveDatasetParams2(),
{0, 4, 11},
CreateTensors<int64_t>(
TensorShape{1}, {{0}, {3}, {1}, {4}, {2}, {5}, {6}, {7}, {8}}),
true},
{ParallelInterleaveDatasetParams3(),
{0, 4, 11},
CreateTensors<int64_t>(
TensorShape{1}, {{0}, {3}, {6}, {1}, {4}, {7}, {2}, {5}, {8}}),
false},
{ParallelInterleaveDatasetParams4(),
{0, 4, 11},
CreateTensors<int64_t>(
TensorShape{1}, {{0}, {3}, {6}, {1}, {4}, {7}, {2}, {5}, {8}}),
false},
{ParallelInterleaveDatasetParams5(),
{0, 4, 11},
CreateTensors<tstring>(
TensorShape{1},
{{"a"}, {"b"}, {"d"}, {"e"}, {"c"}, {"f"}, {"g"}, {"h"}, {"i"}}),
false}};
}
ITERATOR_SAVE_AND_RESTORE_TEST_P(ParallelInterleaveDatasetOpTest,
ParallelInterleaveDatasetParams,
IteratorSaveAndRestoreTestCases())
TEST_F(ParallelInterleaveDatasetOpTest, InvalidArguments) {
std::vector<ParallelInterleaveDatasetParams> invalid_params = {
InvalidCycleLengthParams(), InvalidBlockLengthParams(),
InvalidBufferOutputElementsParams(),
InvalidPrefetchInputElementsParams()};
for (auto& dataset_params : invalid_params) {
EXPECT_EQ(Initialize(dataset_params).code(),
absl::StatusCode::kInvalidArgument);
}
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/experimental/parallel_interleave_dataset_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/experimental/parallel_interleave_dataset_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
1624f2bd-0f8b-4ecc-937f-e20454f99463 | cpp | tensorflow/tensorflow | xplane_to_op_metrics_db | tensorflow/core/profiler/convert/xplane_to_op_metrics_db.cc | tensorflow/core/profiler/convert/xplane_to_op_metrics_db_test.cc | #include "tensorflow/core/profiler/convert/xplane_to_op_metrics_db.h"
#include <algorithm>
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "xla/tsl/profiler/utils/tf_op_utils.h"
#include "xla/tsl/profiler/utils/tf_xplane_visitor.h"
#include "xla/tsl/profiler/utils/timespan.h"
#include "xla/tsl/profiler/utils/xplane_schema.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/profiler/convert/op_metrics_db_combiner.h"
#include "tensorflow/core/profiler/convert/op_stack.h"
#include "tensorflow/core/profiler/protobuf/op_metrics.pb.h"
#include "tensorflow/core/profiler/protobuf/xplane.pb.h"
#include "tensorflow/core/profiler/utils/cost_utils.h"
#include "tensorflow/core/profiler/utils/op_metrics_db_utils.h"
#include "tensorflow/core/profiler/utils/op_utils.h"
#include "tensorflow/core/profiler/utils/trace_utils.h"
#include "tensorflow/core/profiler/utils/xplane_schema.h"
#include "tensorflow/core/profiler/utils/xplane_visitor.h"
namespace tensorflow {
namespace profiler {
namespace {
constexpr uint64_t kRootSymbolId = 0;
enum TfActivityType { kTfOpBegin, kTfOpEnd };
struct TfActivity {
uint64 timestamp_ps;
uint32 tf_op_id;
TfActivityType activity_type;
tsl::profiler::TfOp tf_op;
bool is_eager;
};
struct TfOpInfo {
explicit TfOpInfo(uint64 ts) : start_timestamp_ps(ts) {}
uint64 start_timestamp_ps;
uint64 children_duration_ps = 0;
};
void ProcessOneTfActivity(const TfActivity& activity,
OpStack<TfOpInfo>* tf_op_stack,
TfMetricsDbData* tf_metrics_data) {
uint32 tf_op_id = activity.tf_op_id;
switch (activity.activity_type) {
case kTfOpBegin: {
tf_op_stack->Push(tf_op_id,
std::make_unique<TfOpInfo>(activity.timestamp_ps));
break;
}
case kTfOpEnd: {
std::unique_ptr<TfOpInfo> info = tf_op_stack->Pop(tf_op_id);
if (info == nullptr) {
VLOG(1) << "No begin event found for TF activity id=" << tf_op_id
<< " name=" << activity.tf_op.name
<< " type=" << activity.tf_op.type;
break;
}
tsl::profiler::Timespan tf_op_span = tsl::profiler::PicoSpan(
info->start_timestamp_ps, activity.timestamp_ps);
tf_metrics_data->tf_metrics_db_builder.EnterOp(
activity.tf_op.name, activity.tf_op.type, activity.is_eager,
tf_op_span.duration_ps(), info->children_duration_ps);
TfOpInfo* parent_info = tf_op_stack->Top();
if (parent_info != nullptr) {
parent_info->children_duration_ps += tf_op_span.duration_ps();
}
if (tsl::profiler::IsInfeedEnqueueOp(activity.tf_op.type)) {
tf_metrics_data->tf_metrics_db_builder.EnterHostInfeedEnqueue(
tf_op_span);
}
break;
}
}
}
void ProcessTfActivities(std::vector<TfActivity>* tf_activities,
TfMetricsDbData* tf_metrics_db_data) {
if (tf_activities->empty()) return;
absl::c_stable_sort(*tf_activities,
[](const TfActivity& a, const TfActivity& b) {
return a.timestamp_ps < b.timestamp_ps;
});
OpStack<TfOpInfo> tf_op_stack;
for (const auto& tf_activity : *tf_activities) {
ProcessOneTfActivity(tf_activity, &tf_op_stack, tf_metrics_db_data);
}
SetTotalTimePs(
tf_metrics_db_data->tf_metrics_db,
tf_activities->back().timestamp_ps - tf_activities->front().timestamp_ps);
}
void CollectTfActivities(
const XLineVisitor& line,
const absl::flat_hash_map<int64_t, tsl::profiler::TfOp>& tf_ops,
std::vector<TfActivity>* tf_activities) {
uint32 tf_op_id = 0;
if (IsDerivedThreadId(line.Id())) return;
tf_activities->reserve(line.NumEvents() * 2);
line.ForEachEvent(
[&tf_ops, &tf_op_id, &tf_activities](const XEventVisitor& event) {
const tsl::profiler::TfOp* tf_op = gtl::FindOrNull(tf_ops, event.Id());
if (tf_op != nullptr) {
++tf_op_id;
bool is_eager = false;
if (std::optional<XStatVisitor> stat =
event.GetStat(StatType::kIsEager)) {
is_eager = stat->IntValue();
}
tsl::profiler::Timespan span = event.GetTimespan();
tf_activities->push_back(
{span.begin_ps(), tf_op_id, kTfOpBegin, *tf_op, is_eager});
tf_activities->push_back(
{span.end_ps(), tf_op_id, kTfOpEnd, *tf_op, is_eager});
}
if (auto tf_op_stat = event.GetStat(StatType::kTfOp);
tf_op_stat.has_value()) {
++tf_op_id;
tsl::profiler::TfOp tf_op =
tsl::profiler::ParseTfOpFullname(tf_op_stat->StrOrRefValue());
tsl::profiler::Timespan span = event.GetTimespan();
tf_activities->push_back(
{span.begin_ps(), tf_op_id, kTfOpBegin, tf_op, false});
tf_activities->push_back(
{span.end_ps(), tf_op_id, kTfOpEnd, tf_op, false});
}
});
}
}
absl::flat_hash_map<int64_t, tsl::profiler::TfOp>
CollectTfOpsFromHostThreadsXPlane(const XPlane& host_trace) {
absl::flat_hash_map<int64_t, tsl::profiler::TfOp> tf_ops;
for (const auto& id_metadata : host_trace.event_metadata()) {
const XEventMetadata& metadata = id_metadata.second;
tsl::profiler::TfOp tf_op =
tsl::profiler::ParseTfOpFullname(metadata.name());
if (tf_op.category != tsl::profiler::Category::kUnknown) {
tf_ops.try_emplace(metadata.id(), tf_op);
}
}
return tf_ops;
}
TfMetricsDbData ConvertHostThreadsXLineToTfMetricsDbData(
const XLineVisitor& line,
const absl::flat_hash_map<int64_t, tsl::profiler::TfOp>& tf_ops) {
TfMetricsDbData tf_metrics_db_data;
std::vector<TfActivity> tf_activities;
CollectTfActivities(line, tf_ops, &tf_activities);
ProcessTfActivities(&tf_activities, &tf_metrics_db_data);
return tf_metrics_db_data;
}
void ConsumeTfMetricsDbData(TfMetricsDbData src, OpMetricsDbCombiner* dst) {
AddIdleOp(src.tf_metrics_db);
dst->Combine(src.tf_metrics_db, false);
src.tf_metrics_db.Clear();
}
OpMetricsDb ConvertHostThreadsXPlaneToOpMetricsDb(const XPlane& host_trace) {
absl::flat_hash_map<int64_t, tsl::profiler::TfOp> tf_ops =
CollectTfOpsFromHostThreadsXPlane(host_trace);
OpMetricsDb result;
OpMetricsDbCombiner combiner(&result);
XPlaneVisitor plane = tsl::profiler::CreateTfXPlaneVisitor(&host_trace);
plane.ForEachLine([&tf_ops, &combiner](const XLineVisitor& line) {
ConsumeTfMetricsDbData(
ConvertHostThreadsXLineToTfMetricsDbData(line, tf_ops), &combiner);
});
return result;
}
OpMetricsDb ConvertTpuDeviceTraceXPlaneToOpMetricsDb(
const XPlane& device_trace) {
XPlaneVisitor plane = tsl::profiler::CreateTfXPlaneVisitor(&device_trace);
using OpMetricBySymbol =
absl::flat_hash_map<uint64_t, OpMetrics>;
XEventsOpMetricsDbBuilder builder;
plane.ForEachLine([&](const XLineVisitor& line) {
line.ForEachEvent(
[&](const XEventVisitor& event) { builder.AddOpMetric(event); });
});
return builder.Finalize(
plane.GetStat(StatType::kTotalProfileDurationPs)->IntOrUintValue());
}
OpMetricsDb ConvertDeviceTraceXPlaneToOpMetricsDb(const XPlane& device_trace) {
OpMetricsDb result;
DeviceOpMetricsDbBuilder device_op_metrics_db_builder(&result);
int64_t first_op_offset_ps = kint64max;
int64_t last_op_offset_ps = 0;
TfOpRoofLineCostEstimator op_level_cost_estimator;
XPlaneVisitor plane = tsl::profiler::CreateTfXPlaneVisitor(&device_trace);
plane.ForEachLine([&](const XLineVisitor& line) {
if (IsDerivedThreadId(line.Id())) return;
line.ForEachEvent([&](const XEventVisitor& event) {
first_op_offset_ps = std::min(first_op_offset_ps, event.OffsetPs());
last_op_offset_ps = std::max(last_op_offset_ps, event.EndOffsetPs());
absl::string_view tf_op_full_name;
bool is_eager = false;
int64_t program_id = 0;
absl::string_view deduplicated_name = "";
event.ForEachStat([&](const XStatVisitor& stat) {
if (stat.Type() == StatType::kTfOp) {
tf_op_full_name = stat.StrOrRefValue();
} else if (stat.Type() == StatType::kIsEager) {
is_eager = stat.IntValue();
} else if (stat.Type() == StatType::kProgramId) {
program_id = stat.IntOrUintValue();
} else if (stat.Type() == StatType::kDeduplicatedName) {
deduplicated_name = stat.StrOrRefValue();
}
});
if (tf_op_full_name.empty()) return;
tsl::profiler::TfOp tf_op =
tsl::profiler::ParseTfOpFullname(tf_op_full_name);
TfOpRoofLineCostEstimator::OpRoofLineStats costs;
if (tf_op.category != tsl::profiler::Category::kUnknown) {
costs = op_level_cost_estimator.Predict(event);
}
device_op_metrics_db_builder.EnterOp(
program_id,
absl::StrCat(tf_op.name, "/", event.Name()),
tf_op.type,
tf_op_full_name, deduplicated_name, is_eager,
1, event.DurationPs(),
0, costs.flops, costs.bytes_accessed);
});
});
SetTotalTimePs(
result, last_op_offset_ps ? last_op_offset_ps - first_op_offset_ps : 0);
AddIdleOp(result);
return result;
}
}
} | #include "tensorflow/core/profiler/convert/xplane_to_op_metrics_db.h"
#include <cstdint>
#include <string>
#include <utility>
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/profiler/protobuf/op_metrics.pb.h"
#include "tensorflow/core/profiler/protobuf/xplane.pb.h"
#include "tensorflow/core/profiler/utils/math_utils.h"
#include "tensorflow/core/profiler/utils/op_metrics_db_utils.h"
#include "tensorflow/core/profiler/utils/xplane_builder.h"
#include "tensorflow/core/profiler/utils/xplane_schema.h"
#include "tensorflow/core/profiler/utils/xplane_test_utils.h"
namespace tensorflow {
namespace profiler {
namespace {
#if defined(PLATFORM_GOOGLE)
using ::testing::EqualsProto;
#endif
void AddTensorFlowTpuOpEvent(std::string&& name, std::string&& tf_op_fullname,
int64_t start_timestamp_ns, int64_t duration_ns,
std::string&& hlo_category, uint64 flops,
uint64 bytes_accessed, int64_t occurences,
int64_t self_duration, int64_t program_id,
int64_t symbol_id, XPlaneBuilder* plane,
XLineBuilder* line) {
XEventBuilder event = line->AddEvent(*plane->GetOrCreateEventMetadata(name));
event.SetTimestampNs(start_timestamp_ns);
event.SetDurationNs(duration_ns);
event.SetNumOccurrences(occurences);
XStatsBuilder<XEventMetadata> event_metadata(
plane->GetOrCreateEventMetadata(name), plane);
event_metadata.AddStatValue(
*plane->GetOrCreateStatMetadata(GetStatTypeStr(StatType::kTfOp)),
tf_op_fullname);
event_metadata.AddStatValue(
*plane->GetOrCreateStatMetadata(GetStatTypeStr(StatType::kHloCategory)),
hlo_category);
event_metadata.AddStatValue(
*plane->GetOrCreateStatMetadata(GetStatTypeStr(StatType::kFlops)), flops);
event_metadata.AddStatValue(
*plane->GetOrCreateStatMetadata(GetStatTypeStr(StatType::kSymbolId)),
symbol_id);
event_metadata.AddStatValue(
*plane->GetOrCreateStatMetadata(GetStatTypeStr(StatType::kProgramId)),
program_id);
}
void AddTensorFlowOpEvent(std::string&& tf_op_fullname,
int64_t start_timestamp_ns, int64_t duration_ns,
bool on_device, absl::string_view kernel_name,
XPlaneBuilder* plane, XLineBuilder* line) {
absl::string_view name = on_device ? kernel_name : tf_op_fullname;
XEventBuilder event = line->AddEvent(*plane->GetOrCreateEventMetadata(name));
event.SetTimestampNs(start_timestamp_ns);
event.SetDurationNs(duration_ns);
if (!on_device) return;
event.AddStatValue(
*plane->GetOrCreateStatMetadata(GetStatTypeStr(StatType::kTfOp)),
*plane->GetOrCreateStatMetadata(std::move(tf_op_fullname)));
}
void AddXlaCpuOpEvent(std::string&& hlo_op_name, std::string&& tf_op,
int64_t start_timestamp_ns, int64_t duration_ns,
XPlaneBuilder* plane, XLineBuilder* line) {
XEventBuilder event =
line->AddEvent(*plane->GetOrCreateEventMetadata(hlo_op_name));
event.SetTimestampNs(start_timestamp_ns);
event.SetDurationNs(duration_ns);
event.ParseAndAddStatValue(
*plane->GetOrCreateStatMetadata(GetStatTypeStr(StatType::kTfOp)), tf_op);
}
TEST(ConvertXPlaneToOpMetricsDb, HostOpMetricsDb) {
static constexpr char kTfOp1[] = "TfOp1";
static constexpr char kTfOp2[] = "TfOp2";
constexpr int64_t kTfOp1StartNs = 100000;
constexpr int64_t kTfOp1DurationNs = 8000;
constexpr int64_t kTfOp2StartNs = 110000;
constexpr int64_t kTfOp2DurationNs = 10000;
XSpace xspace;
XPlane* xplane = GetOrCreateHostXPlane(&xspace);
XPlaneBuilder host_plane(xplane);
XLineBuilder thread1 = host_plane.GetOrCreateLine(10);
AddTensorFlowOpEvent(absl::StrCat(kTfOp1, ":", kTfOp1), kTfOp1StartNs,
kTfOp1DurationNs, false,
"", &host_plane, &thread1);
XLineBuilder thread2 = host_plane.GetOrCreateLine(20);
AddTensorFlowOpEvent(absl::StrCat(kTfOp1, ":", kTfOp1), kTfOp1StartNs,
kTfOp1DurationNs, false,
"", &host_plane, &thread2);
AddTensorFlowOpEvent(absl::StrCat(kTfOp2, ":", kTfOp2), kTfOp2StartNs,
kTfOp2DurationNs, false,
"", &host_plane, &thread2);
OpMetricsDb op_metrics = ConvertHostThreadsXPlaneToOpMetricsDb(*xplane);
EXPECT_EQ(3, op_metrics.metrics_db_size());
uint64 total_op_duration =
tsl::profiler::NanoToPico(kTfOp1DurationNs * 2 + kTfOp2DurationNs);
EXPECT_EQ(total_op_duration, op_metrics.total_op_time_ps());
uint64 total_duration = tsl::profiler::NanoToPico(
kTfOp2StartNs - kTfOp1StartNs + kTfOp2DurationNs + kTfOp1DurationNs);
EXPECT_EQ(total_duration, op_metrics.total_time_ps());
const OpMetrics& op_1 = op_metrics.metrics_db().at(0);
EXPECT_EQ(kTfOp1, op_1.name());
EXPECT_EQ(kTfOp1, op_1.category());
EXPECT_EQ(2, op_1.occurrences());
EXPECT_EQ(tsl::profiler::NanoToPico(kTfOp1DurationNs) * 2, op_1.time_ps());
const OpMetrics& idle = op_metrics.metrics_db().at(1);
EXPECT_EQ(kIdle, idle.name());
EXPECT_EQ(kIdle, idle.category());
EXPECT_EQ(tsl::profiler::NanoToPico(2000), idle.time_ps());
const OpMetrics& op_2 = op_metrics.metrics_db().at(2);
EXPECT_EQ(kTfOp2, op_2.name());
EXPECT_EQ(kTfOp2, op_2.category());
EXPECT_EQ(1, op_2.occurrences());
EXPECT_EQ(tsl::profiler::NanoToPico(kTfOp2DurationNs), op_2.time_ps());
}
TEST(ConvertXPlaneToOpMetricsDb, DeviceOpMetricsDb) {
static constexpr char kTfOp1[] = "TfOp1";
static constexpr char kTfOp2[] = "TfOp2";
static constexpr char kKernel1[] = "kernel1";
static constexpr char kKernel2[] = "kernel2";
static constexpr char kKernel3[] = "kernel3";
constexpr int64_t kKernel1StartNs = 100000;
constexpr int64_t kKernel1DurationNs = 8000;
constexpr int64_t kKernel2StartNs = 110000;
constexpr int64_t kKernel2DurationNs = 10000;
constexpr int64_t kKernel3StartNs = 120000;
constexpr int64_t kKernel3DurationNs = 10000;
XSpace xspace;
XPlane* xplane = GetOrCreateGpuXPlane(&xspace, 0);
XPlaneBuilder device_plane(xplane);
XLineBuilder stream1 = device_plane.GetOrCreateLine(10);
AddTensorFlowOpEvent(absl::StrCat(kTfOp1, ":", kTfOp1), kKernel1StartNs,
kKernel1DurationNs, true, kKernel1,
&device_plane, &stream1);
AddTensorFlowOpEvent(absl::StrCat(kTfOp1, ":", kTfOp1), kKernel2StartNs,
kKernel2DurationNs, true, kKernel2,
&device_plane, &stream1);
XLineBuilder stream2 = device_plane.GetOrCreateLine(20);
AddTensorFlowOpEvent(absl::StrCat(kTfOp1, ":", kTfOp1), kKernel1StartNs,
kKernel1DurationNs, true, kKernel1,
&device_plane, &stream2);
AddTensorFlowOpEvent(absl::StrCat(kTfOp1, ":", kTfOp1), kKernel2StartNs,
kKernel2DurationNs, true, kKernel2,
&device_plane, &stream2);
AddTensorFlowOpEvent(absl::StrCat(kTfOp2, ":", kTfOp2), kKernel3StartNs,
kKernel3DurationNs, true, kKernel3,
&device_plane, &stream2);
OpMetricsDb op_metrics = ConvertDeviceTraceXPlaneToOpMetricsDb(*xplane);
EXPECT_EQ(4, op_metrics.metrics_db_size());
uint64 total_op_duration = tsl::profiler::NanoToPico(
kKernel1DurationNs * 2 + kKernel2DurationNs * 2 + kKernel3DurationNs);
EXPECT_EQ(total_op_duration, op_metrics.total_op_time_ps());
uint64 total_duration = tsl::profiler::NanoToPico(
kKernel3StartNs + kKernel3DurationNs - kKernel1StartNs);
EXPECT_EQ(std::max(total_duration, total_op_duration),
op_metrics.total_time_ps());
const OpMetrics& op_1 = op_metrics.metrics_db().at(0);
EXPECT_EQ(absl::StrCat(kTfOp1, "/", kKernel1), op_1.name());
EXPECT_EQ(kTfOp1, op_1.category());
EXPECT_EQ(2, op_1.occurrences());
EXPECT_EQ(tsl::profiler::NanoToPico(kKernel1DurationNs) * 2, op_1.time_ps());
const OpMetrics& op_2 = op_metrics.metrics_db().at(1);
EXPECT_EQ(absl::StrCat(kTfOp1, "/", kKernel2), op_2.name());
EXPECT_EQ(kTfOp1, op_2.category());
EXPECT_EQ(2, op_2.occurrences());
EXPECT_EQ(tsl::profiler::NanoToPico(kKernel2DurationNs) * 2, op_2.time_ps());
const OpMetrics& op_3 = op_metrics.metrics_db().at(2);
EXPECT_EQ(absl::StrCat(kTfOp2, "/", kKernel3), op_3.name());
EXPECT_EQ(kTfOp2, op_3.category());
EXPECT_EQ(1, op_3.occurrences());
EXPECT_EQ(tsl::profiler::NanoToPico(kKernel3DurationNs), op_3.time_ps());
const OpMetrics& idle = op_metrics.metrics_db().at(3);
EXPECT_EQ(kIdle, idle.name());
EXPECT_EQ(kIdle, idle.category());
EXPECT_EQ(tsl::profiler::NanoToPico(0), idle.time_ps());
}
TEST(ConvertXPlaneToOpMetricsDb, TpuDeviceOpMetricsDb) {
XSpace xspace;
XPlane* xplane = GetOrCreateTpuXPlane(&xspace, 0, "TPU V4",
0,
0);
XPlaneBuilder device_plane(xplane);
device_plane.AddStatValue(
*device_plane.GetOrCreateStatMetadata(
GetStatTypeStr(StatType::kTotalProfileDurationPs)),
1000);
XLineBuilder stream1 = device_plane.GetOrCreateLine(10);
AddTensorFlowTpuOpEvent("MatMul", "while:MatMul", 0, 10, "MatMul", 34, 45, 2,
5, 1, 1, &device_plane, &stream1);
OpMetricsDb op_metrics = ConvertTpuDeviceTraceXPlaneToOpMetricsDb(*xplane);
#if defined(PLATFORM_GOOGLE)
EXPECT_THAT(op_metrics,
EqualsProto(R"pb(metrics_db {
hlo_module_id: 1
self_time_ps: 10000
flops: 68
occurrences: 2
name: "MatMul"
time_ps: 10000
category: "MatMul"
provenance: "while:MatMul"
min_time_ps: 10000
}
metrics_db { name: "IDLE" category: "IDLE" }
total_time_ps: 10000
total_op_time_ps: 10000
)pb"));
#endif
}
TEST(ConvertXPlaneToOpMetricsDb, HostXPlaneWithXlaOps) {
XPlane xplane;
XPlaneBuilder plane(&xplane);
XLineBuilder line = plane.GetOrCreateLine(10);
AddXlaCpuOpEvent("xla_op", "tf_op", 100000, 8000, &plane, &line);
AddXlaCpuOpEvent("xla_op2", "tf_op2", 110000, 10000, &plane, &line);
OpMetricsDb op_metrics = ConvertHostThreadsXPlaneToOpMetricsDb(xplane);
#if defined(PLATFORM_GOOGLE)
EXPECT_THAT(op_metrics, EqualsProto(R"pb(metrics_db {
self_time_ps: 8000000
occurrences: 1
name: "tf_op"
time_ps: 8000000
}
metrics_db {
self_time_ps: 10000000
occurrences: 1
name: "tf_op2"
time_ps: 10000000
}
metrics_db {
self_time_ps: 2000000
name: "IDLE"
time_ps: 2000000
category: "IDLE"
}
total_time_ps: 20000000
total_op_time_ps: 18000000
precision_stats {}
)pb"));
#endif
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/convert/xplane_to_op_metrics_db.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/convert/xplane_to_op_metrics_db_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d7587362-ad42-4b12-b651-20d6bd9f85a2 | cpp | tensorflow/tensorflow | rename_node | tensorflow/tools/graph_transforms/rename_node.cc | tensorflow/tools/graph_transforms/rename_node_test.cc | #include <string>
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/tools/graph_transforms/transform_utils.h"
namespace tensorflow {
namespace graph_transforms {
Status RenameNode(const GraphDef& input_graph_def,
const TransformFuncContext& context,
GraphDef* output_graph_def) {
if (!context.params.count("old_node_name") ||
(context.params.at("old_node_name").size() != 1) ||
!context.params.count("new_node_name") ||
(context.params.at("new_node_name").size() != 1)) {
return errors::InvalidArgument(
"rename_node expects exactly one 'old_node_name' and one "
"'new_node_name' argument, e.g. "
"rename_node(old_attribute_name=super/deep/output, "
"new_attribute_name=output)");
}
const std::string old_node_name = context.params.at("old_node_name")[0];
const std::string new_node_name = context.params.at("new_node_name")[0];
output_graph_def->Clear();
for (const NodeDef& input_node : input_graph_def.node()) {
NodeDef* node = output_graph_def->mutable_node()->Add();
*node = input_node;
if (node->name() == new_node_name) {
return Status(absl::StatusCode::kInvalidArgument,
"A node is alreading using " + new_node_name + "as name.");
}
if (node->name() == old_node_name) {
node->set_name(new_node_name);
}
for (std::string& input_name : *node->mutable_input()) {
std::string prefix;
std::string input_node_name;
std::string suffix;
NodeNamePartsFromInput(input_name, &prefix, &input_node_name, &suffix);
if (input_node_name == old_node_name) {
std::string new_input_name = prefix + new_node_name + suffix;
input_name = new_input_name;
}
}
}
return OkStatus();
}
REGISTER_GRAPH_TRANSFORM("rename_node", RenameNode);
}
} | #include <string>
#include <utility>
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/tools/graph_transforms/transform_utils.h"
namespace tensorflow {
namespace graph_transforms {
Status RenameNode(const GraphDef& input_graph_def,
const TransformFuncContext& context,
GraphDef* output_graph_def);
TEST(RenameNodeTest, Rename) {
GraphDef in_graph;
NodeDef* node = in_graph.add_node();
node->set_name("input");
node->set_op("Placeholder");
NodeDef* node_splitter = in_graph.add_node();
node_splitter->set_name("splitter");
node_splitter->set_op("Split");
NodeDef* node_adder = in_graph.add_node();
node_adder->set_op("Add");
node_adder->set_name("adder");
node_adder->add_input("splitter");
node_adder->add_input("splitter:1");
GraphDef result;
TransformFuncContext context;
context.input_names = {};
context.output_names = {"adder"};
context.params.insert(std::pair<string, std::vector<string>>(
{"old_node_name", {std::string("splitter")}}));
context.params.insert(std::pair<string, std::vector<string>>(
{"new_node_name", {string("demux")}}));
TF_ASSERT_OK(RenameNode(in_graph, context, &result));
std::map<string, const NodeDef*> node_lookup;
MapNamesToNodes(result, &node_lookup);
EXPECT_EQ(1, node_lookup.count("demux"));
EXPECT_EQ(1, node_lookup.count("adder"));
EXPECT_EQ(2, node_lookup["adder"]->input().size());
EXPECT_EQ("demux", node_lookup["adder"]->input()[0]);
EXPECT_EQ("demux:1", node_lookup["adder"]->input()[1]);
}
TEST(RenameNodeTest, FailWhenNameAlreadyExists) {
GraphDef in_graph;
NodeDef* node = in_graph.add_node();
node->set_name("input");
node->set_op("Placeholder");
NodeDef* node_splitter = in_graph.add_node();
node_splitter->set_name("splitter");
node_splitter->set_op("Split");
NodeDef* node_adder = in_graph.add_node();
node_adder->set_op("Add");
node_adder->set_name("adder");
node_adder->add_input("splitter");
node_adder->add_input("splitter:1");
GraphDef result;
TransformFuncContext context;
context.input_names = {};
context.output_names = {"adder"};
context.params.insert(std::pair<string, std::vector<string>>(
{"old_node_name", {std::string("splitter")}}));
context.params.insert(std::pair<string, std::vector<string>>(
{"new_node_name", {string("adder")}}));
EXPECT_FALSE(RenameNode(in_graph, context, &result).ok());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/rename_node.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/rename_node_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
38a634d6-0df3-4df7-8bf8-cc745ad43372 | cpp | tensorflow/tensorflow | abi | third_party/xla/third_party/tsl/tsl/platform/abi.cc | third_party/xla/third_party/tsl/tsl/platform/abi_test.cc | #include "tsl/platform/abi.h"
#include "tsl/platform/types.h"
#if defined(_MSC_VER)
#include <windows.h>
#include <cstring>
#else
#include <cxxabi.h>
#include <cstdlib>
#endif
#include <memory>
#include <string>
#if defined(_MSC_VER)
extern "C" char* __unDName(char* output_string, const char* name,
int max_string_length, void* (*p_alloc)(std::size_t),
void (*p_free)(void*), unsigned short disable_flags);
#endif
namespace tsl {
namespace port {
string MaybeAbiDemangle(const char* name) {
#if defined(_MSC_VER)
std::unique_ptr<char> demangled{__unDName(nullptr, name, 0, std::malloc,
std::free,
static_cast<unsigned short>(0))};
return string(demangled.get() != nullptr ? demangled.get() : name);
#else
int status = 0;
std::unique_ptr<char, void (*)(void*)> res{
abi::__cxa_demangle(name, nullptr, nullptr, &status), std::free};
return (status == 0) ? res.get() : name;
#endif
}
}
} | #include "tsl/platform/abi.h"
#include <typeinfo>
#include "tsl/platform/test.h"
namespace tsl {
struct MyRandomPODType {};
TEST(AbiTest, AbiDemangleTest) {
EXPECT_EQ(port::MaybeAbiDemangle(typeid(int).name()), "int");
#ifdef PLATFORM_WINDOWS
const char pod_type_name[] = "struct tsl::MyRandomPODType";
#else
const char pod_type_name[] = "tsl::MyRandomPODType";
#endif
EXPECT_EQ(port::MaybeAbiDemangle(typeid(MyRandomPODType).name()),
pod_type_name);
EXPECT_EQ(
port::MaybeAbiDemangle("help! i'm caught in a C++ mangle factoryasdf"),
"help! i'm caught in a C++ mangle factoryasdf");
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/abi.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/abi_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d0c9aec6-77ab-4a35-8e60-bb07389258da | cpp | tensorflow/tensorflow | hlo_program_serdes | third_party/xla/xla/python/ifrt/hlo/hlo_program_serdes.cc | third_party/xla/xla/python/ifrt/hlo/hlo_program_serdes_test.cc | #include <memory>
#include <string>
#include <utility>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/ExtensibleRTTI.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Support/LogicalResult.h"
#include "stablehlo/dialect/Serialization.h"
#include "xla/mlir/utils/error_util.h"
#include "xla/mlir_hlo/mhlo/transforms/passes.h"
#include "xla/pjrt/mlir_to_hlo.h"
#include "xla/python/ifrt/hlo/hlo_program.h"
#include "xla/python/ifrt/serdes.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace ifrt {
namespace {
class HloProgramSerDes : public llvm::RTTIExtends<HloProgramSerDes, SerDes> {
public:
absl::string_view type_name() const override {
return "xla::ifrt::XlaProgram";
}
absl::StatusOr<std::string> Serialize(Serializable& serializable) override {
const auto& program = llvm::cast<HloProgram>(serializable);
if (program.mlir_module == nullptr) {
return absl::InvalidArgumentError("Unable to serialize null MLIR module");
}
mlir::OwningOpRef<mlir::ModuleOp> module(
llvm::cast<mlir::ModuleOp>(program.mlir_module->clone()));
TF_ASSIGN_OR_RETURN(std::string serialized,
xla::SerializeUsingVersionedStablehlo(
*module, xla::GetDefaultStablehloVersion()));
return serialized;
}
absl::StatusOr<std::unique_ptr<Serializable>> Deserialize(
const std::string& serialized,
std::unique_ptr<DeserializeOptions>) override {
auto context = std::make_unique<mlir::MLIRContext>(
mlir::MLIRContext::Threading::DISABLED);
mlir::BaseScopedDiagnosticHandler diagnostic_handler(context.get());
mlir::OwningOpRef<mlir::ModuleOp> module =
mlir::stablehlo::deserializePortableArtifact(serialized, context.get());
if (!module) {
const absl::Status status = diagnostic_handler.ConsumeStatus();
return absl::InvalidArgumentError(
absl::StrCat("Failed to deserialize StableHLO module;\n\nDetailed "
"error from MLIR: ",
status.message()));
}
mlir::PassManager pm(context.get());
pm.addPass(mlir::mhlo::createStablehloLegalizeToHloPass());
if (!mlir::succeeded(pm.run(*module))) {
const absl::Status status = diagnostic_handler.ConsumeStatus();
return absl::InvalidArgumentError(absl::StrCat(
"Failed to legalize StableHLO to MHLO;\n\nDetailed error from MLIR: ",
status.message()));
}
return std::make_unique<HloProgram>(std::move(context), std::move(module));
}
static char ID;
};
char HloProgramSerDes::ID = 0;
bool register_xla_program_serdes = ([]() {
RegisterSerDes<HloProgram>(std::make_unique<HloProgramSerDes>());
}(), true);
}
}
} | #include <memory>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "llvm/Support/Casting.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinDialect.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/Support/DebugStringHelper.h"
#include "xla/mlir_hlo/mhlo/IR/hlo_ops.h"
#include "xla/pjrt/mlir_to_hlo.h"
#include "xla/python/ifrt/hlo/hlo_program.h"
#include "xla/python/ifrt/serdes.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace ifrt {
namespace {
using ::testing::HasSubstr;
using ::testing::Not;
using ::tsl::testing::StatusIs;
TEST(HloProgramSerDesTest, RoundTrip) {
static constexpr absl::string_view kMlirModuleStr = R"(
module {
func.func @main(%arg0: tensor<2x3xf32>) -> tensor<2x3xf32> {
%0 = "mhlo.copy"(%arg0) : (tensor<2x3xf32>) -> tensor<2x3xf32>
%1 = mhlo.constant dense<1.000000e+00> : tensor<f32>
%2 = "mhlo.broadcast"(%1) {broadcast_sizes = dense<[2, 3]> : tensor<2xi64>} : (tensor<f32>) -> tensor<2x3xf32>
%3 = mhlo.add %0, %2 : tensor<2x3xf32>
return %3 : tensor<2x3xf32>
}
})";
Serialized serialized;
{
auto context = std::make_unique<mlir::MLIRContext>();
TF_ASSERT_OK_AND_ASSIGN(
mlir::OwningOpRef<mlir::ModuleOp> module,
xla::ParseMlirModuleString(kMlirModuleStr, *context));
auto program =
std::make_unique<HloProgram>(std::move(context), std::move(module));
TF_ASSERT_OK_AND_ASSIGN(serialized, Serialize(*program));
}
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloProgram> xla_program,
Deserialize<HloProgram>(serialized, nullptr));
bool has_unsupported_dialect = false;
xla_program->mlir_module->walk([&](mlir::Operation *op) {
if (!llvm::isa<mlir::BuiltinDialect, mlir::func::FuncDialect,
mlir::mhlo::MhloDialect>(op->getDialect())) {
LOG(ERROR) << "Found an op with an unsupported dialect: "
<< mlir::debugString(op);
has_unsupported_dialect = true;
}
});
EXPECT_FALSE(has_unsupported_dialect);
}
TEST(HloProgramSerDesTest, SerializationError) {
static constexpr absl::string_view kMlirModuleStr = R"(
module {
func.func @main(%arg0: tensor<f32>) -> tensor<f32> {
%0 = "UnknownOp"(%arg0) : (tensor<f32>) -> tensor<f32>
return %0 : tensor<f32>
}
})";
Serialized serialized;
{
auto context = std::make_unique<mlir::MLIRContext>();
context->allowUnregisteredDialects();
TF_ASSERT_OK_AND_ASSIGN(
mlir::OwningOpRef<mlir::ModuleOp> module,
xla::ParseMlirModuleString(kMlirModuleStr, *context));
auto program =
std::make_unique<HloProgram>(std::move(context), std::move(module));
EXPECT_THAT(Serialize(*program),
StatusIs(Not(absl::StatusCode::kOk),
HasSubstr("Failed to serialize StableHLO")));
}
}
TEST(HloProgramSerDesTest, DeserializationError) {
static constexpr absl::string_view kMlirModuleStr = R"(
module {
func.func @main(%arg0: tensor<f32>) -> tensor<f32> {
return %arg0 : tensor<f32>
}
})";
Serialized serialized;
{
auto context = std::make_unique<mlir::MLIRContext>();
TF_ASSERT_OK_AND_ASSIGN(
mlir::OwningOpRef<mlir::ModuleOp> module,
xla::ParseMlirModuleString(kMlirModuleStr, *context));
auto program =
std::make_unique<HloProgram>(std::move(context), std::move(module));
TF_ASSERT_OK_AND_ASSIGN(serialized, Serialize(*program));
}
serialized.set_data("invalid data");
EXPECT_THAT(Deserialize<HloProgram>(serialized, nullptr),
StatusIs(Not(absl::StatusCode::kOk),
HasSubstr("Failed to deserialize StableHLO module")));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/ifrt/hlo/hlo_program_serdes.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/ifrt/hlo/hlo_program_serdes_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
66f3c55a-dbb9-4df7-90ab-dabb19c6b4bc | cpp | tensorflow/tensorflow | topk_accuracy_eval_stage | tensorflow/lite/tools/evaluation/stages/topk_accuracy_eval_stage.cc | tensorflow/lite/tools/evaluation/stages/topk_accuracy_eval_stage_test.cc | #include "tensorflow/lite/tools/evaluation/stages/topk_accuracy_eval_stage.h"
#include <stdint.h>
#include <algorithm>
#include <numeric>
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/tools/evaluation/proto/evaluation_config.pb.h"
#include "tensorflow/lite/tools/evaluation/proto/evaluation_stages.pb.h"
namespace tflite {
namespace evaluation {
namespace {
std::vector<int> GetTopKIndices(const std::vector<float>& values, int k) {
std::vector<int> indices(values.size());
std::iota(indices.begin(), indices.end(), 0);
std::stable_sort(indices.begin(), indices.end(),
[&values](int a, int b) { return values[a] > values[b]; });
indices.resize(k);
return indices;
}
}
TfLiteStatus TopkAccuracyEvalStage::Init() {
num_runs_ = 0;
auto& params = config_.specification().topk_accuracy_eval_params();
if (!params.has_k()) {
LOG(ERROR) << "Value of k not provided for TopkAccuracyEvalStage";
return kTfLiteError;
}
accuracy_counts_ = std::vector<int>(params.k(), 0);
if (ground_truth_labels_.empty()) {
LOG(ERROR) << "Ground-truth labels are empty";
return kTfLiteError;
}
num_total_labels_ = ground_truth_labels_.size();
if (params.k() > num_total_labels_) {
LOG(ERROR) << "k is too large";
return kTfLiteError;
}
if (!model_output_shape_) {
LOG(ERROR) << "Model output details not correctly set";
return kTfLiteError;
}
if (!(model_output_shape_->size == 2) ||
!(model_output_shape_->data[0] == 1) ||
!(model_output_shape_->data[1] == num_total_labels_)) {
LOG(ERROR) << "Invalid model_output_shape_";
return kTfLiteError;
}
if (model_output_type_ != kTfLiteFloat32 &&
model_output_type_ != kTfLiteUInt8 && model_output_type_ != kTfLiteInt8) {
LOG(ERROR) << "model_output_type_ not supported";
return kTfLiteError;
}
return kTfLiteOk;
}
TfLiteStatus TopkAccuracyEvalStage::Run() {
if (!model_output_) {
LOG(ERROR) << "model_output_ not set correctly";
return kTfLiteError;
}
if (!ground_truth_label_) {
LOG(ERROR) << "ground_truth_label_ not provided";
return kTfLiteError;
}
auto& params = config_.specification().topk_accuracy_eval_params();
std::vector<float> probabilities;
probabilities.reserve(num_total_labels_);
if (model_output_type_ == kTfLiteFloat32) {
auto probs = static_cast<float*>(model_output_);
for (size_t i = 0; i < num_total_labels_; i++) {
probabilities.push_back(probs[i]);
}
} else if (model_output_type_ == kTfLiteUInt8) {
auto probs = static_cast<uint8_t*>(model_output_);
for (size_t i = 0; i < num_total_labels_; i++) {
probabilities.push_back(probs[i]);
}
} else if (model_output_type_ == kTfLiteInt8) {
auto probs = static_cast<int8_t*>(model_output_);
for (size_t i = 0; i < num_total_labels_; i++) {
probabilities.push_back(probs[i]);
}
}
std::vector<int> top_k = GetTopKIndices(probabilities, params.k());
UpdateCounts(top_k);
return kTfLiteOk;
}
EvaluationStageMetrics TopkAccuracyEvalStage::LatestMetrics() {
EvaluationStageMetrics metrics;
if (num_runs_ == 0) return metrics;
metrics.set_num_runs(num_runs_);
auto* topk_metrics =
metrics.mutable_process_metrics()->mutable_topk_accuracy_metrics();
for (const auto& count : accuracy_counts_) {
topk_metrics->add_topk_accuracies(static_cast<float>(count) / num_runs_);
}
return metrics;
}
void TopkAccuracyEvalStage::UpdateCounts(const std::vector<int>& topk_indices) {
for (size_t i = 0; i < topk_indices.size(); ++i) {
if (*ground_truth_label_ == ground_truth_labels_[topk_indices[i]]) {
for (size_t j = i; j < topk_indices.size(); j++) {
accuracy_counts_[j] += 1;
}
break;
}
}
num_runs_++;
}
}
} | #include "tensorflow/lite/tools/evaluation/stages/topk_accuracy_eval_stage.h"
#include <stdint.h>
#include <string>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/tools/evaluation/proto/evaluation_config.pb.h"
#include "tensorflow/lite/tools/evaluation/proto/evaluation_stages.pb.h"
namespace tflite {
namespace evaluation {
namespace {
constexpr char kTopkAccuracyEvalStageName[] = "topk_accuracy_eval_stage";
constexpr int kNumCategories = 1001;
EvaluationStageConfig GetTopkAccuracyEvalStageConfig() {
EvaluationStageConfig config;
config.set_name(kTopkAccuracyEvalStageName);
auto* params =
config.mutable_specification()->mutable_topk_accuracy_eval_params();
params->set_k(5);
return config;
}
template <typename T>
T* ResetOutputArray(T array[]) {
for (int i = 0; i < kNumCategories; i++) {
array[i] = 0;
}
return array;
}
std::vector<std::string> CreateGroundTruthLabels() {
std::vector<std::string> ground_truth_labels;
ground_truth_labels.reserve(kNumCategories);
for (int i = 0; i < kNumCategories; i++) {
ground_truth_labels.push_back(std::to_string(i));
}
return ground_truth_labels;
}
TEST(TopkAccuracyEvalStage, NoInitializers) {
EvaluationStageConfig config = GetTopkAccuracyEvalStageConfig();
TopkAccuracyEvalStage stage = TopkAccuracyEvalStage(config);
EXPECT_EQ(stage.Init(), kTfLiteError);
}
TEST(TopkAccuracyEvalStage, NoK) {
EvaluationStageConfig config = GetTopkAccuracyEvalStageConfig();
config.mutable_specification()
->mutable_topk_accuracy_eval_params()
->clear_k();
TopkAccuracyEvalStage stage = TopkAccuracyEvalStage(config);
std::vector<std::string> ground_truth_labels = CreateGroundTruthLabels();
TfLiteIntArray* model_output_shape = TfLiteIntArrayCreate(2);
model_output_shape->data[0] = 1;
model_output_shape->data[1] = kNumCategories;
TfLiteType model_output_type = kTfLiteFloat32;
stage.SetTaskInfo(ground_truth_labels, model_output_type, model_output_shape);
EXPECT_EQ(stage.Init(), kTfLiteError);
TfLiteIntArrayFree(model_output_shape);
}
TEST(TopkAccuracyEvalStage, NoGroundTruthLabels) {
EvaluationStageConfig config = GetTopkAccuracyEvalStageConfig();
TopkAccuracyEvalStage stage = TopkAccuracyEvalStage(config);
std::vector<std::string> ground_truth_labels = {};
TfLiteIntArray* model_output_shape = TfLiteIntArrayCreate(2);
model_output_shape->data[0] = 1;
model_output_shape->data[1] = kNumCategories;
TfLiteType model_output_type = kTfLiteFloat32;
stage.SetTaskInfo(ground_truth_labels, model_output_type, model_output_shape);
EXPECT_EQ(stage.Init(), kTfLiteError);
TfLiteIntArrayFree(model_output_shape);
}
TEST(TopkAccuracyEvalStage, KTooLarge) {
EvaluationStageConfig config = GetTopkAccuracyEvalStageConfig();
config.mutable_specification()->mutable_topk_accuracy_eval_params()->set_k(
10000);
TopkAccuracyEvalStage stage = TopkAccuracyEvalStage(config);
std::vector<std::string> ground_truth_labels = CreateGroundTruthLabels();
TfLiteIntArray* model_output_shape = TfLiteIntArrayCreate(2);
model_output_shape->data[0] = 1;
model_output_shape->data[1] = kNumCategories;
TfLiteType model_output_type = kTfLiteFloat32;
stage.SetTaskInfo(ground_truth_labels, model_output_type, model_output_shape);
EXPECT_EQ(stage.Init(), kTfLiteError);
TfLiteIntArrayFree(model_output_shape);
}
TEST(TopkAccuracyEvalStage, WeirdModelOutputShape) {
EvaluationStageConfig config = GetTopkAccuracyEvalStageConfig();
TopkAccuracyEvalStage stage = TopkAccuracyEvalStage(config);
std::vector<std::string> ground_truth_labels = CreateGroundTruthLabels();
TfLiteIntArray* model_output_shape = TfLiteIntArrayCreate(2);
model_output_shape->data[0] = 1;
model_output_shape->data[1] = kNumCategories + 1;
TfLiteType model_output_type = kTfLiteFloat32;
stage.SetTaskInfo(ground_truth_labels, model_output_type, model_output_shape);
EXPECT_EQ(stage.Init(), kTfLiteError);
TfLiteIntArrayFree(model_output_shape);
}
TEST(TopkAccuracyEvalStage, UnsupportedModelOutputType) {
EvaluationStageConfig config = GetTopkAccuracyEvalStageConfig();
TopkAccuracyEvalStage stage = TopkAccuracyEvalStage(config);
std::vector<std::string> ground_truth_labels = CreateGroundTruthLabels();
TfLiteIntArray* model_output_shape = TfLiteIntArrayCreate(2);
model_output_shape->data[0] = 1;
model_output_shape->data[1] = kNumCategories + 1;
TfLiteType model_output_type = kTfLiteComplex64;
stage.SetTaskInfo(ground_truth_labels, model_output_type, model_output_shape);
EXPECT_EQ(stage.Init(), kTfLiteError);
TfLiteIntArrayFree(model_output_shape);
}
TEST(TopkAccuracyEvalStage, NoInputs) {
EvaluationStageConfig config = GetTopkAccuracyEvalStageConfig();
TopkAccuracyEvalStage stage = TopkAccuracyEvalStage(config);
std::vector<std::string> ground_truth_labels = CreateGroundTruthLabels();
TfLiteIntArray* model_output_shape = TfLiteIntArrayCreate(2);
model_output_shape->data[0] = 1;
model_output_shape->data[1] = kNumCategories;
TfLiteType model_output_type = kTfLiteFloat32;
stage.SetTaskInfo(ground_truth_labels, model_output_type, model_output_shape);
EXPECT_EQ(stage.Init(), kTfLiteOk);
TfLiteIntArrayFree(model_output_shape);
EXPECT_EQ(stage.Run(), kTfLiteError);
}
TEST(TopkAccuracyEvalStage, InvalidGroundTruth) {
EvaluationStageConfig config = GetTopkAccuracyEvalStageConfig();
TopkAccuracyEvalStage stage = TopkAccuracyEvalStage(config);
std::vector<std::string> ground_truth_labels = CreateGroundTruthLabels();
TfLiteIntArray* model_output_shape = TfLiteIntArrayCreate(2);
model_output_shape->data[0] = 1;
model_output_shape->data[1] = kNumCategories;
TfLiteType model_output_type = kTfLiteFloat32;
stage.SetTaskInfo(ground_truth_labels, model_output_type, model_output_shape);
EXPECT_EQ(stage.Init(), kTfLiteOk);
TfLiteIntArrayFree(model_output_shape);
float array[kNumCategories];
float* tensor = ResetOutputArray(array);
tensor[0] = 0.8;
stage.SetEvalInputs(tensor, nullptr);
EXPECT_EQ(stage.Run(), kTfLiteError);
}
TEST(TopkAccuracyEvalStage, FloatTest_CorrectLabelsAtLastIndices) {
EvaluationStageConfig config = GetTopkAccuracyEvalStageConfig();
TopkAccuracyEvalStage stage = TopkAccuracyEvalStage(config);
std::vector<std::string> ground_truth_labels = CreateGroundTruthLabels();
TfLiteIntArray* model_output_shape = TfLiteIntArrayCreate(2);
model_output_shape->data[0] = 1;
model_output_shape->data[1] = kNumCategories;
TfLiteType model_output_type = kTfLiteFloat32;
stage.SetTaskInfo(ground_truth_labels, model_output_type, model_output_shape);
EXPECT_EQ(stage.Init(), kTfLiteOk);
TfLiteIntArrayFree(model_output_shape);
float array[kNumCategories];
float* tensor = ResetOutputArray(array);
tensor[4] = 0.9;
tensor[3] = 0.8;
tensor[2] = 0.7;
tensor[1] = 0.6;
tensor[0] = 0.5;
std::string ground_truth = "0";
stage.SetEvalInputs(tensor, &ground_truth);
EXPECT_EQ(stage.Run(), kTfLiteOk);
EvaluationStageMetrics metrics = stage.LatestMetrics();
EXPECT_EQ(1, metrics.num_runs());
auto accuracy_metrics = metrics.process_metrics().topk_accuracy_metrics();
EXPECT_FLOAT_EQ(1.0, accuracy_metrics.topk_accuracies(4));
for (int i = 0; i < 4; ++i) {
EXPECT_FLOAT_EQ(0.0, accuracy_metrics.topk_accuracies(i));
}
ground_truth = "1";
stage.SetEvalInputs(tensor, &ground_truth);
EXPECT_EQ(stage.Run(), kTfLiteOk);
metrics = stage.LatestMetrics();
EXPECT_EQ(2, metrics.num_runs());
accuracy_metrics = metrics.process_metrics().topk_accuracy_metrics();
EXPECT_FLOAT_EQ(1.0, accuracy_metrics.topk_accuracies(4));
EXPECT_FLOAT_EQ(0.5, accuracy_metrics.topk_accuracies(3));
for (int i = 0; i < 3; ++i) {
EXPECT_FLOAT_EQ(0.0, accuracy_metrics.topk_accuracies(i));
}
}
class CorrectTopkAccuracyEvalTest : public ::testing::Test {
protected:
template <typename T>
void VerifyCorrectBehaviorForType(T ground_truth_0_value,
T ground_truth_1_value,
TfLiteType model_output_type) {
EvaluationStageConfig config = GetTopkAccuracyEvalStageConfig();
TopkAccuracyEvalStage stage = TopkAccuracyEvalStage(config);
std::vector<std::string> ground_truth_labels = CreateGroundTruthLabels();
TfLiteIntArray* model_output_shape = TfLiteIntArrayCreate(2);
model_output_shape->data[0] = 1;
model_output_shape->data[1] = kNumCategories;
stage.SetTaskInfo(ground_truth_labels, model_output_type,
model_output_shape);
EXPECT_EQ(stage.Init(), kTfLiteOk);
TfLiteIntArrayFree(model_output_shape);
EvaluationStageMetrics metrics = stage.LatestMetrics();
EXPECT_EQ(0, metrics.num_runs());
auto accuracy_metrics = metrics.process_metrics().topk_accuracy_metrics();
EXPECT_EQ(0, accuracy_metrics.topk_accuracies_size());
T array[kNumCategories];
T* tensor = ResetOutputArray(array);
tensor[0] = ground_truth_0_value;
std::string ground_truth = "0";
stage.SetEvalInputs(tensor, &ground_truth);
EXPECT_EQ(stage.Run(), kTfLiteOk);
metrics = stage.LatestMetrics();
EXPECT_EQ(1, metrics.num_runs());
accuracy_metrics = metrics.process_metrics().topk_accuracy_metrics();
for (int i = 0; i < accuracy_metrics.topk_accuracies_size(); ++i) {
EXPECT_FLOAT_EQ(1.0, accuracy_metrics.topk_accuracies(i));
}
tensor[1] = ground_truth_1_value;
ground_truth = "1";
stage.SetEvalInputs(tensor, &ground_truth);
EXPECT_EQ(stage.Run(), kTfLiteOk);
metrics = stage.LatestMetrics();
EXPECT_EQ(2, metrics.num_runs());
accuracy_metrics = metrics.process_metrics().topk_accuracy_metrics();
for (int i = 0; i < accuracy_metrics.topk_accuracies_size(); ++i) {
EXPECT_FLOAT_EQ(1.0, accuracy_metrics.topk_accuracies(i));
}
}
};
TEST_F(CorrectTopkAccuracyEvalTest, FloatTest) {
VerifyCorrectBehaviorForType(static_cast<float>(0.8), static_cast<float>(0.9),
kTfLiteFloat32);
}
TEST_F(CorrectTopkAccuracyEvalTest, Int8Test) {
VerifyCorrectBehaviorForType(static_cast<int8_t>(1), static_cast<int8_t>(2),
kTfLiteInt8);
}
TEST_F(CorrectTopkAccuracyEvalTest, UInt8Test) {
VerifyCorrectBehaviorForType(static_cast<uint8_t>(1), static_cast<uint8_t>(2),
kTfLiteUInt8);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/evaluation/stages/topk_accuracy_eval_stage.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/evaluation/stages/topk_accuracy_eval_stage_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b03d42a1-41b2-4114-b009-33cfe164365f | cpp | tensorflow/tensorflow | mkl_relu_op | tensorflow/core/kernels/mkl/mkl_relu_op.cc | tensorflow/core/kernels/mkl/mkl_relu_op_test.cc | #if defined(INTEL_MKL) && !defined(ENABLE_ONEDNN_V3)
#include <unordered_map>
#include "unsupported/Eigen/CXX11/Tensor"
#include "dnnl.hpp"
#include "tensorflow/core/framework/numeric_op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/util/mkl_util.h"
#if defined(DNNL_AARCH64_USE_ACL) && defined(ENABLE_ONEDNN_OPENMP)
#include "tensorflow/core/platform/mutex.h"
#endif
using dnnl::algorithm;
using dnnl::eltwise_forward;
using dnnl::memory;
using dnnl::prop_kind;
using dnnl::stream;
using EltwiseFwdPd = dnnl::eltwise_forward::primitive_desc;
using EltwiseBwdPd = dnnl::eltwise_backward::primitive_desc;
namespace tensorflow {
template <typename T>
class MklEltwiseFwdParams {
public:
memory::dims src_dims;
memory::desc src_md;
algorithm alg_kind;
float alpha;
float beta;
MklEltwiseFwdParams(memory::dims src_dims, memory::desc src_md,
algorithm alg_kind, float alpha, float beta)
: src_dims(src_dims),
src_md(src_md),
alg_kind(alg_kind),
alpha(alpha),
beta(beta) {}
};
template <typename T>
class MklEltwiseFwdPrimitive : public MklPrimitive {
public:
explicit MklEltwiseFwdPrimitive(const MklEltwiseFwdParams<T>& fwdParams)
: MklPrimitive(engine(engine::kind::cpu, 0)) {
if (context_.eltwise_fwd == nullptr) {
Setup(fwdParams);
}
}
~MklEltwiseFwdPrimitive() {}
void Execute(const T* src_data, T* dst_data,
std::shared_ptr<stream> fwd_stream) {
#if defined(DNNL_AARCH64_USE_ACL) && defined(ENABLE_ONEDNN_OPENMP)
mutex_lock lock(primitive_execution_mu_);
#endif
#ifndef ENABLE_ONEDNN_OPENMP
context_.src_mem->set_data_handle(
static_cast<void*>(const_cast<T*>(src_data)), *fwd_stream);
context_.dst_mem->set_data_handle(static_cast<void*>(dst_data),
*fwd_stream);
#else
context_.src_mem->set_data_handle(
static_cast<void*>(const_cast<T*>(src_data)));
context_.dst_mem->set_data_handle(static_cast<void*>(dst_data));
#endif
DCHECK_EQ(context_.fwd_primitives.size(),
context_.fwd_primitives_args.size());
execute_primitives(context_.fwd_primitives, fwd_stream,
context_.fwd_primitives_args);
context_.src_mem->set_data_handle(DummyData);
context_.dst_mem->set_data_handle(DummyData);
}
std::shared_ptr<EltwiseFwdPd> GetEltwiseFwdPd() { return context_.fwd_pd; }
private:
struct EltwiseFwdContext {
std::shared_ptr<memory> src_mem;
std::shared_ptr<memory> dst_mem;
std::shared_ptr<dnnl::eltwise_forward::desc> fwd_desc;
std::shared_ptr<EltwiseFwdPd> fwd_pd;
std::shared_ptr<memory::desc> src_md;
std::shared_ptr<memory::desc> dst_md;
std::shared_ptr<dnnl::primitive> eltwise_fwd;
std::vector<dnnl::primitive> fwd_primitives;
std::vector<std::unordered_map<int, memory>> fwd_primitives_args;
EltwiseFwdContext()
: src_mem(nullptr),
dst_mem(nullptr),
fwd_desc(nullptr),
fwd_pd(nullptr),
src_md(nullptr),
dst_md(nullptr),
eltwise_fwd(nullptr) {}
};
void Setup(const MklEltwiseFwdParams<T>& fwdParams) {
context_.src_md.reset(new memory::desc(fwdParams.src_md.data));
context_.fwd_desc.reset(new eltwise_forward::desc(
prop_kind::forward, fwdParams.alg_kind, *context_.src_md,
fwdParams.alpha, fwdParams.beta));
context_.fwd_pd.reset(new EltwiseFwdPd(*context_.fwd_desc, cpu_engine_));
auto fwd_pd = context_.fwd_pd.get();
context_.src_mem.reset(
new memory(fwd_pd->src_desc(), cpu_engine_, DummyData));
context_.dst_mem.reset(
new memory(fwd_pd->dst_desc(), cpu_engine_, DummyData));
context_.eltwise_fwd.reset(new eltwise_forward(*context_.fwd_pd));
context_.fwd_primitives_args.push_back(
{{DNNL_ARG_SRC, *context_.src_mem}, {DNNL_ARG_DST, *context_.dst_mem}});
context_.fwd_primitives.push_back(*context_.eltwise_fwd);
}
struct EltwiseFwdContext context_;
#if defined(DNNL_AARCH64_USE_ACL) && defined(ENABLE_ONEDNN_OPENMP)
mutex primitive_execution_mu_;
#endif
};
template <typename T>
class MklEltwiseFwdPrimitiveFactory : public MklPrimitiveFactory<T> {
public:
static MklEltwiseFwdPrimitive<T>* Get(
const MklEltwiseFwdParams<T>& fwdParams) {
MklEltwiseFwdPrimitive<T>* eltwise_forward = nullptr;
eltwise_forward = static_cast<MklEltwiseFwdPrimitive<T>*>(
MklEltwiseFwdPrimitiveFactory<T>::GetInstance().GetEltwiseFwd(
fwdParams));
if (eltwise_forward == nullptr) {
eltwise_forward = new MklEltwiseFwdPrimitive<T>(fwdParams);
MklEltwiseFwdPrimitiveFactory<T>::GetInstance().SetEltwiseFwd(
fwdParams, eltwise_forward);
}
return eltwise_forward;
}
static MklEltwiseFwdPrimitiveFactory& GetInstance() {
static MklEltwiseFwdPrimitiveFactory instance_;
return instance_;
}
private:
MklEltwiseFwdPrimitiveFactory() {}
~MklEltwiseFwdPrimitiveFactory() {}
static string CreateKey(const MklEltwiseFwdParams<T>& fwdParams) {
string prefix = "eltwise_fwd";
FactoryKeyCreator key_creator;
key_creator.AddAsKey(prefix);
key_creator.AddAsKey(fwdParams.src_dims);
key_creator.AddAsKey<int>(static_cast<int>(fwdParams.alg_kind));
key_creator.AddAsKey<float>(static_cast<float>(fwdParams.alpha));
key_creator.AddAsKey<float>(static_cast<float>(fwdParams.beta));
return key_creator.GetKey();
}
MklPrimitive* GetEltwiseFwd(const MklEltwiseFwdParams<T>& fwdParams) {
string key = CreateKey(fwdParams);
return this->GetOp(key);
}
void SetEltwiseFwd(const MklEltwiseFwdParams<T>& fwdParams,
MklPrimitive* op) {
string key = CreateKey(fwdParams);
this->SetOp(key, op);
}
};
template <typename T>
class MklEltwiseBwdParams {
public:
memory::dims src_dims;
memory::desc common_md;
algorithm alg_kind;
float alpha;
float beta;
int forward_input_type;
MklEltwiseBwdParams(const memory::dims& src_dims,
const memory::desc& common_md, algorithm alg_kind,
float alpha, float beta, int forward_input_type = -1)
: src_dims(src_dims),
common_md(common_md),
alg_kind(alg_kind),
alpha(alpha),
beta(beta),
forward_input_type(forward_input_type) {}
};
template <typename T>
class MklEltwiseBwdPrimitive : public MklPrimitive {
public:
explicit MklEltwiseBwdPrimitive(const MklEltwiseBwdParams<T>& bwdParams)
: MklPrimitive(engine(engine::kind::cpu, 0)) {
if (context_.eltwise_bwd == nullptr) {
Setup(bwdParams);
}
}
~MklEltwiseBwdPrimitive() {}
void Execute(const T* src_data, const T* diff_dst_data, T* diff_src_data,
std::shared_ptr<stream> bwd_stream) {
#if defined(DNNL_AARCH64_USE_ACL) && defined(ENABLE_ONEDNN_OPENMP)
mutex_lock lock(primitive_execution_mu_);
#endif
#ifndef ENABLE_ONEDNN_OPENMP
context_.src_mem->set_data_handle(
static_cast<void*>(const_cast<T*>(src_data)), *bwd_stream);
context_.diff_dst_mem->set_data_handle(
static_cast<void*>(const_cast<T*>(diff_dst_data)), *bwd_stream);
context_.diff_src_mem->set_data_handle(static_cast<void*>(diff_src_data),
*bwd_stream);
#else
context_.src_mem->set_data_handle(
static_cast<void*>(const_cast<T*>(src_data)));
context_.diff_dst_mem->set_data_handle(
static_cast<void*>(const_cast<T*>(diff_dst_data)));
context_.diff_src_mem->set_data_handle(static_cast<void*>(diff_src_data));
#endif
DCHECK_EQ(context_.bwd_primitives.size(),
context_.bwd_primitives_args.size());
execute_primitives(context_.bwd_primitives, bwd_stream,
context_.bwd_primitives_args);
context_.src_mem->set_data_handle(DummyData);
context_.diff_dst_mem->set_data_handle(DummyData);
context_.diff_src_mem->set_data_handle(DummyData);
}
std::shared_ptr<EltwiseBwdPd> GetEltwiseBwdPd() { return context_.bwd_pd; }
private:
struct EltwiseBwdContext {
std::shared_ptr<memory> src_mem;
std::shared_ptr<memory> diff_dst_mem;
std::shared_ptr<memory> diff_src_mem;
std::shared_ptr<dnnl::eltwise_backward::desc> bwd_desc;
std::shared_ptr<memory::desc> src_md;
std::shared_ptr<memory::desc> diff_dst_md;
std::shared_ptr<memory::desc> common_md;
std::shared_ptr<dnnl::eltwise_forward::desc> fwd_desc;
std::shared_ptr<EltwiseFwdPd> fwd_pd;
std::shared_ptr<EltwiseBwdPd> bwd_pd;
std::shared_ptr<dnnl::primitive> eltwise_bwd;
std::vector<dnnl::primitive> bwd_primitives;
std::vector<MemoryArgsMap> bwd_primitives_args;
EltwiseBwdContext()
: src_mem(nullptr),
diff_dst_mem(nullptr),
diff_src_mem(nullptr),
src_md(nullptr),
diff_dst_md(nullptr),
common_md(nullptr),
fwd_desc(nullptr),
fwd_pd(nullptr),
bwd_pd(nullptr),
eltwise_bwd(nullptr) {}
};
void Setup(const MklEltwiseBwdParams<T>& bwdParams) {
context_.src_md.reset(new memory::desc(bwdParams.common_md.data));
context_.diff_dst_md.reset(new memory::desc(bwdParams.common_md.data));
context_.fwd_desc.reset(new dnnl::eltwise_forward::desc(
prop_kind::forward_training, bwdParams.alg_kind, *context_.src_md,
bwdParams.alpha, bwdParams.beta));
context_.fwd_pd.reset(new EltwiseFwdPd(*context_.fwd_desc, cpu_engine_));
context_.bwd_desc.reset(new dnnl::eltwise_backward::desc(
bwdParams.alg_kind, *context_.diff_dst_md, *context_.src_md,
bwdParams.alpha, bwdParams.beta));
context_.bwd_pd.reset(
new EltwiseBwdPd(*context_.bwd_desc, cpu_engine_, *context_.fwd_pd));
auto bwd_pd = context_.bwd_pd.get();
context_.src_mem.reset(
new memory(bwd_pd->src_desc(), cpu_engine_, DummyData));
context_.diff_dst_mem.reset(
new memory(bwd_pd->diff_dst_desc(), cpu_engine_, DummyData));
context_.diff_src_mem.reset(
new memory(bwd_pd->diff_src_desc(), cpu_engine_, DummyData));
context_.eltwise_bwd.reset(new dnnl::eltwise_backward(*context_.bwd_pd));
context_.bwd_primitives_args.push_back(
{{bwdParams.forward_input_type, *context_.src_mem},
{DNNL_ARG_DIFF_DST, *context_.diff_dst_mem},
{DNNL_ARG_DIFF_SRC, *context_.diff_src_mem}});
context_.bwd_primitives.push_back(*context_.eltwise_bwd);
}
struct EltwiseBwdContext context_;
#if defined(DNNL_AARCH64_USE_ACL) && defined(ENABLE_ONEDNN_OPENMP)
mutex primitive_execution_mu_;
#endif
};
template <typename T>
class MklEltwiseBwdPrimitiveFactory : public MklPrimitiveFactory<T> {
private:
MklEltwiseBwdPrimitiveFactory() {}
~MklEltwiseBwdPrimitiveFactory() {}
public:
static MklEltwiseBwdPrimitive<T>* Get(
const MklEltwiseBwdParams<T>& bwdParams) {
MklEltwiseBwdPrimitive<T>* eltwise_backward = nullptr;
eltwise_backward = static_cast<MklEltwiseBwdPrimitive<T>*>(
MklEltwiseBwdPrimitiveFactory<T>::GetInstance().GetEltwiseBwd(
bwdParams));
if (eltwise_backward == nullptr) {
eltwise_backward = new MklEltwiseBwdPrimitive<T>(bwdParams);
MklEltwiseBwdPrimitiveFactory<T>::GetInstance().SetEltwiseBwd(
bwdParams, eltwise_backward);
}
return eltwise_backward;
}
static MklEltwiseBwdPrimitiveFactory& GetInstance() {
static MklEltwiseBwdPrimitiveFactory instance_;
return instance_;
}
private:
static string CreateKey(const MklEltwiseBwdParams<T>& bwdParams) {
string prefix = "eltwise_bwd";
FactoryKeyCreator key_creator;
key_creator.AddAsKey(prefix);
key_creator.AddAsKey(bwdParams.src_dims);
key_creator.AddAsKey(static_cast<int>(bwdParams.alg_kind));
key_creator.AddAsKey(static_cast<float>(bwdParams.alpha));
key_creator.AddAsKey(static_cast<float>(bwdParams.beta));
return key_creator.GetKey();
}
MklPrimitive* GetEltwiseBwd(const MklEltwiseBwdParams<T>& bwdParams) {
string key = CreateKey(bwdParams);
return this->GetOp(key);
}
void SetEltwiseBwd(const MklEltwiseBwdParams<T>& bwdParams,
MklPrimitive* op) {
string key = CreateKey(bwdParams);
this->SetOp(key, op);
}
};
typedef Eigen::ThreadPoolDevice CPUDevice;
template <typename Device, typename T, algorithm alg_kind>
class MklReluOpBase : public OpKernel {
public:
~MklReluOpBase() {}
explicit MklReluOpBase(OpKernelConstruction* context, float alpha, float beta)
: OpKernel(context), alpha_(alpha), beta_(beta) {}
virtual void Compute_Scalar(OpKernelContext* context) = 0;
void Compute(OpKernelContext* context) override {
try {
const size_t src_index = 0;
const size_t dst_index = 0;
const Tensor& src_tensor = MklGetInput(context, src_index);
MklDnnShape dnn_shape_src;
GetMklShape(context, src_index, &dnn_shape_src);
if (src_tensor.dims() == 0) {
Compute_Scalar(context);
return;
}
MklDnnShape dnn_shape_dst;
TensorShape tf_shape_dst;
Tensor* dst_tensor = nullptr;
if (src_tensor.shape().num_elements() == 0) {
dnn_shape_dst.SetMklTensor(false);
tf_shape_dst = MklGetInput(context, src_index).shape();
AllocateOutputSetMklShape(context, dst_index, &dst_tensor, tf_shape_dst,
dnn_shape_dst);
return;
}
MklDnnData<T> src(&cpu_engine);
memory::dims src_dims;
memory::desc src_md({}, memory::data_type::undef,
memory::format_tag::undef);
if (dnn_shape_src.IsMklTensor()) {
src_md = dnn_shape_src.GetMklLayout();
src_dims = dnn_shape_src.GetSizesAsMklDnnDims();
} else {
src_dims = TFShapeToMklDnnDims(src_tensor.shape());
auto src_strides = CalculateTFStrides(src_dims);
src_md = MklDnnData<T>::CreateBlockedMemDesc(src_dims, src_strides);
}
MklEltwiseFwdParams<T> fwdParams(src_dims, src_md, alg_kind, alpha_,
beta_);
Eigen::ThreadPoolInterface* eigen_interface =
EigenThreadPoolFromTfContext(context);
tsl::OneDnnThreadPool eigen_tp(eigen_interface,
ThreadPoolUseCallerThread());
MklEltwiseFwdPrimitive<T>* eltwise_fwd =
MklEltwiseFwdPrimitiveFactory<T>::Get(fwdParams);
auto eltwise_fwd_pd = eltwise_fwd->GetEltwiseFwdPd();
std::shared_ptr<stream> fwd_cpu_stream;
fwd_cpu_stream.reset(CreateStream(&eigen_tp, eltwise_fwd->GetEngine()));
bool is_src_reordered = false;
const T* src_data = src_tensor.flat<T>().data();
if (src_md != eltwise_fwd_pd->src_desc()) {
src.SetUsrMem(src_md, &src_tensor);
src.CheckReorderToOpMem(eltwise_fwd_pd->src_desc(), cpu_engine,
context);
src_data = const_cast<T*>(
reinterpret_cast<T*>(src.GetOpMem().get_data_handle()));
is_src_reordered = true;
}
if (is_src_reordered || dnn_shape_src.IsMklTensor()) {
dnn_shape_dst.SetMklTensor(true);
auto dst_pd = eltwise_fwd_pd->dst_desc();
dnn_shape_dst.SetMklLayout(&dst_pd);
dnn_shape_dst.SetElemType(MklDnnType<T>());
if (dnn_shape_src.IsMklTensor()) {
dnn_shape_dst.SetTfLayout(dnn_shape_src.GetDimension(),
dnn_shape_src.GetSizesAsMklDnnDims(),
dnn_shape_src.GetTfDataFormat());
} else {
dnn_shape_dst.SetTfLayout(src_tensor.dims(),
TFShapeToMklDnnDims(src_tensor.shape()),
MklTensorFormat::FORMAT_BLOCKED);
}
tf_shape_dst.AddDim(dst_pd.get_size() / sizeof(T));
} else {
dnn_shape_dst.SetMklTensor(false);
tf_shape_dst = src_tensor.shape();
}
if (is_src_reordered) {
AllocateOutputSetMklShape(context, dst_index, &dst_tensor, tf_shape_dst,
dnn_shape_dst);
} else {
OP_REQUIRES_OK(context, context->forward_input_or_allocate_output(
{static_cast<const int>(src_index)},
static_cast<const int>(dst_index),
tf_shape_dst, &dst_tensor));
AllocateOutputSetMklShape(context, dst_index, dnn_shape_dst);
}
T* dst_data = dst_tensor->flat<T>().data();
eltwise_fwd->Execute(src_data, dst_data, fwd_cpu_stream);
} catch (dnnl::error& e) {
string error_msg = "Status: " + std::to_string(e.status) +
", message: " + string(e.message) + ", in file " +
string(__FILE__) + ":" + std::to_string(__LINE__);
OP_REQUIRES_OK(
context,
errors::Aborted("Operation received an exception:", error_msg));
}
}
private:
engine cpu_engine = engine(engine::kind::cpu, 0);
std::shared_ptr<EltwiseFwdPd> relu_fwd_pd;
protected:
float alpha_;
float beta_;
};
template <typename Device, typename T, algorithm alg_kind>
class MklReluGradOpBase : public OpKernel {
public:
~MklReluGradOpBase() {}
explicit MklReluGradOpBase(OpKernelConstruction* context, float alpha,
float beta)
: OpKernel(context), alpha_(alpha), beta_(beta) {}
virtual void Compute_Scalar(OpKernelContext* context) = 0;
virtual int GetDiffDstIndex() const { return 0; }
virtual int GetSrcIndex() const { return 1; }
virtual int GetDiffSrcIndex() const { return 0; }
virtual int GetTypeOfInputTensorFromFwdOp() const { return DNNL_ARG_SRC; }
void Compute(OpKernelContext* context) {
try {
MklDnnData<T> src(&cpu_engine);
MklDnnData<T> diff_dst(&cpu_engine);
size_t diff_dst_index = GetDiffDstIndex();
size_t src_index = GetSrcIndex();
const size_t diff_src_index = GetDiffSrcIndex();
const Tensor& src_tensor = MklGetInput(context, src_index);
const Tensor& diff_dst_tensor = MklGetInput(context, diff_dst_index);
Tensor* diff_src_tensor = nullptr;
MklDnnShape dnn_shape_src, dnn_shape_diff_dst;
GetMklShape(context, src_index, &dnn_shape_src);
GetMklShape(context, diff_dst_index, &dnn_shape_diff_dst);
int src_dims_size = src_tensor.dims();
if (src_dims_size == 0) {
Compute_Scalar(context);
return;
}
TensorShape tf_shape_diff_src;
MklDnnShape dnn_shape_diff_src;
if (src_tensor.shape().num_elements() == 0) {
dnn_shape_diff_src.SetMklTensor(false);
tf_shape_diff_src = MklGetInput(context, diff_src_index).shape();
AllocateOutputSetMklShape(context, diff_src_index, &diff_src_tensor,
tf_shape_diff_src, dnn_shape_diff_src);
return;
}
memory::dims src_dims = {};
memory::desc src_md({}, memory::data_type::undef,
memory::format_tag::undef);
memory::desc diff_dst_md({}, memory::data_type::undef,
memory::format_tag::undef);
if (!dnn_shape_src.IsMklTensor() && !dnn_shape_diff_dst.IsMklTensor()) {
src_dims = TFShapeToMklDnnDims(src_tensor.shape());
auto src_strides = CalculateTFStrides(src_dims);
src_md = MklDnnData<T>::CreateBlockedMemDesc(src_dims, src_strides);
diff_dst_md = src_md;
} else if (dnn_shape_src.IsMklTensor() &&
!dnn_shape_diff_dst.IsMklTensor()) {
src_md = dnn_shape_src.GetMklLayout();
src_dims = dnn_shape_src.GetSizesAsMklDnnDims();
MklTensorFormat src_mkl_data_format = dnn_shape_src.GetTfDataFormat();
auto src_tf_data_format =
MklDnnDataFormatToTFDataFormat(src_mkl_data_format);
auto diff_dst_dims = TFShapeToMklDnnDimsInNCHW(diff_dst_tensor.shape(),
src_tf_data_format);
diff_dst_md = memory::desc(
diff_dst_dims, MklDnnType<T>(),
MklTensorFormatToMklDnnDataFormat(src_mkl_data_format));
} else if (!dnn_shape_src.IsMklTensor() &&
dnn_shape_diff_dst.IsMklTensor()) {
diff_dst_md = dnn_shape_diff_dst.GetMklLayout();
MklTensorFormat diff_dst_mkl_data_format =
dnn_shape_diff_dst.GetTfDataFormat();
auto diff_dst_tf_data_format =
MklDnnDataFormatToTFDataFormat(diff_dst_mkl_data_format);
src_dims = (src_tensor.dims() == 4)
? TFShapeToMklDnnDimsInNCHW(src_tensor.shape(),
diff_dst_tf_data_format)
: TFShapeToMklDnnDimsInNCDHW(src_tensor.shape(),
diff_dst_tf_data_format);
src_md = memory::desc(
src_dims, MklDnnType<T>(),
MklTensorFormatToMklDnnDataFormat(diff_dst_mkl_data_format));
} else {
src_md = dnn_shape_src.GetMklLayout();
diff_dst_md = dnn_shape_diff_dst.GetMklLayout();
src_dims = dnn_shape_src.GetSizesAsMklDnnDims();
}
memory::desc common_md({}, memory::data_type::undef,
memory::format_tag::undef);
if (dnn_shape_src.IsMklTensor() || dnn_shape_diff_dst.IsMklTensor()) {
common_md = dnn_shape_src.IsMklTensor() ? src_md : diff_dst_md;
} else {
common_md = src_md;
}
MklEltwiseBwdParams<T> bwdParams(src_dims, common_md, alg_kind, alpha_,
beta_, GetTypeOfInputTensorFromFwdOp());
Eigen::ThreadPoolInterface* eigen_interface =
EigenThreadPoolFromTfContext(context);
tsl::OneDnnThreadPool eigen_tp(eigen_interface,
ThreadPoolUseCallerThread());
MklEltwiseBwdPrimitive<T>* eltwise_bwd =
MklEltwiseBwdPrimitiveFactory<T>::Get(bwdParams);
auto eltwise_bwd_pd = eltwise_bwd->GetEltwiseBwdPd();
std::shared_ptr<stream> bwd_cpu_stream;
bwd_cpu_stream.reset(CreateStream(&eigen_tp, eltwise_bwd->GetEngine()));
const T* src_data = src_tensor.flat<T>().data();
if (src_md != eltwise_bwd_pd->src_desc()) {
src.SetUsrMem(src_md, &src_tensor);
src.CheckReorderToOpMem(eltwise_bwd_pd.get()->diff_src_desc(),
cpu_engine, context);
src_data = const_cast<T*>(
reinterpret_cast<T*>(src.GetOpMem().get_data_handle()));
}
const T* diff_dst_data = diff_dst_tensor.flat<T>().data();
if (diff_dst_md != eltwise_bwd_pd->diff_dst_desc()) {
diff_dst.SetUsrMem(diff_dst_md, &diff_dst_tensor);
diff_dst.CheckReorderToOpMem(eltwise_bwd_pd.get()->diff_src_desc(),
cpu_engine, context);
diff_dst_data = const_cast<T*>(
reinterpret_cast<T*>(diff_dst.GetOpMem().get_data_handle()));
}
if (dnn_shape_src.IsMklTensor() || dnn_shape_diff_dst.IsMklTensor()) {
auto diff_src_pd = eltwise_bwd_pd->diff_src_desc();
dnn_shape_diff_src.SetMklTensor(true);
dnn_shape_diff_src.SetMklLayout(&diff_src_pd);
dnn_shape_diff_src.SetElemType(MklDnnType<T>());
if (dnn_shape_src.IsMklTensor()) {
dnn_shape_diff_src.SetTfLayout(dnn_shape_src.GetDimension(),
dnn_shape_src.GetSizesAsMklDnnDims(),
dnn_shape_src.GetTfDataFormat());
} else {
dnn_shape_diff_src.SetTfLayout(
dnn_shape_diff_dst.GetDimension(),
dnn_shape_diff_dst.GetSizesAsMklDnnDims(),
dnn_shape_diff_dst.GetTfDataFormat());
}
tf_shape_diff_src.AddDim(diff_src_pd.get_size() / sizeof(T));
} else {
dnn_shape_diff_src.SetMklTensor(false);
tf_shape_diff_src = src_tensor.shape();
}
OP_REQUIRES_OK(context, context->forward_input_or_allocate_output(
{static_cast<const int>(diff_dst_index)},
static_cast<const int>(diff_src_index),
tf_shape_diff_src, &diff_src_tensor));
AllocateOutputSetMklShape(context, diff_src_index, dnn_shape_diff_src);
T* diff_src_data = diff_src_tensor->flat<T>().data();
eltwise_bwd->Execute(src_data, diff_dst_data, diff_src_data,
bwd_cpu_stream);
} catch (dnnl::error& e) {
string error_msg = "Status: " + std::to_string(e.status) +
", message: " + string(e.message) + ", in file " +
string(__FILE__) + ":" + std::to_string(__LINE__);
OP_REQUIRES_OK(
context,
errors::Aborted("Operation received an exception:", error_msg));
}
}
private:
engine cpu_engine = engine(engine::kind::cpu, 0);
std::shared_ptr<EltwiseFwdPd> relu_fwd_pd;
protected:
float alpha_;
float beta_;
};
template <typename Device, typename T>
class MklReluOp
: public MklReluOpBase<Device, T, dnnl::algorithm::eltwise_relu> {
public:
~MklReluOp() {}
explicit MklReluOp(OpKernelConstruction* context)
: MklReluOpBase<Device, T, dnnl::algorithm::eltwise_relu>(context, 0.0f,
0.0f) {}
virtual void Compute_Scalar(OpKernelContext* context) {
const size_t src_index = 0;
const size_t dst_index = 0;
const Tensor& src_tensor = MklGetInput(context, src_index);
MklDnnShape dnn_shape_src;
GetMklShape(context, src_index, &dnn_shape_src);
Tensor* dst_tensor = nullptr;
void* user_i =
static_cast<void*>(const_cast<T*>(src_tensor.flat<T>().data()));
MklDnnShape dnn_shape_dst;
dnn_shape_dst.SetMklTensor(false);
AllocateOutputSetMklShape(context, dst_index, &dst_tensor,
src_tensor.shape(), dnn_shape_dst);
void* out_o = static_cast<void*>(dst_tensor->flat<T>().data());
(static_cast<T*>(out_o))[0] =
std::max((static_cast<T*>(user_i))[0], static_cast<T>(0));
return;
}
};
template <typename Device, typename T>
class MklReluGradOp
: public MklReluGradOpBase<Device, T, dnnl::algorithm::eltwise_relu> {
public:
~MklReluGradOp() {}
explicit MklReluGradOp(OpKernelConstruction* context)
: MklReluGradOpBase<Device, T, dnnl::algorithm::eltwise_relu>(
context, 0.0f, 0.0f) {}
virtual void Compute_Scalar(OpKernelContext* context) {
const size_t diff_dst_index = 0;
const size_t src_index = 1;
const size_t diff_src_index = 0;
const Tensor& src_tensor = MklGetInput(context, src_index);
const Tensor& diff_dst_tensor = MklGetInput(context, diff_dst_index);
Tensor* diff_src_tensor = nullptr;
MklDnnShape dnn_shape_diff_dst;
GetMklShape(context, diff_dst_index, &dnn_shape_diff_dst);
MklDnnShape dnn_shape_diff_src;
dnn_shape_diff_src.SetMklTensor(false);
AllocateOutputSetMklShape(context, diff_src_index, &diff_src_tensor,
diff_dst_tensor.shape(), dnn_shape_diff_src);
void* out_o = static_cast<void*>(diff_src_tensor->flat<T>().data());
void* user_i =
static_cast<void*>(const_cast<T*>(src_tensor.flat<T>().data()));
void* user_g =
static_cast<void*>(const_cast<T*>(diff_dst_tensor.flat<T>().data()));
(static_cast<T*>(out_o))[0] =
(static_cast<T*>(user_g))[0] *
(static_cast<T>((static_cast<T*>(user_i))[0] > static_cast<T>(0)));
return;
}
};
template <typename Device, typename T>
class MklEluOp : public MklReluOpBase<Device, T, dnnl::algorithm::eltwise_elu> {
public:
~MklEluOp() {}
explicit MklEluOp(OpKernelConstruction* context)
: MklReluOpBase<Device, T, dnnl::algorithm::eltwise_elu>(context, 0.0f,
0.0f) {}
virtual void Compute_Scalar(OpKernelContext* context) {
const size_t src_index = 0;
const size_t dst_index = 0;
const Tensor& src_tensor = MklGetInput(context, src_index);
MklDnnShape dnn_shape_src;
GetMklShape(context, src_index, &dnn_shape_src);
Tensor* dst_tensor = nullptr;
void* user_i =
static_cast<void*>(const_cast<T*>(src_tensor.flat<T>().data()));
MklDnnShape dnn_shape_dst;
dnn_shape_dst.SetMklTensor(false);
AllocateOutputSetMklShape(context, dst_index, &dst_tensor,
src_tensor.shape(), dnn_shape_dst);
void* out_o = static_cast<void*>(dst_tensor->flat<T>().data());
T feature = (static_cast<T*>(user_i))[0];
if (feature < static_cast<T>(0))
(static_cast<T*>(out_o))[0] = Eigen::numext::exp(feature);
else
(static_cast<T*>(out_o))[0] = feature;
return;
}
};
template <typename Device, typename T>
class MklEluGradOp
: public MklReluGradOpBase<Device, T, dnnl::algorithm::eltwise_elu> {
public:
~MklEluGradOp() {}
explicit MklEluGradOp(OpKernelConstruction* context)
: MklReluGradOpBase<Device, T, dnnl::algorithm::eltwise_elu>(
context, 0.0f, 0.0f) {}
virtual void Compute_Scalar(OpKernelContext* context) {
const size_t diff_dst_index = 0;
const size_t src_index = 1;
const size_t diff_src_index = 0;
const Tensor& src_tensor = MklGetInput(context, src_index);
const Tensor& diff_dst_tensor = MklGetInput(context, diff_dst_index);
Tensor* diff_src_tensor = nullptr;
MklDnnShape dnn_shape_diff_dst;
GetMklShape(context, diff_dst_index, &dnn_shape_diff_dst);
MklDnnShape dnn_shape_diff_src;
dnn_shape_diff_src.SetMklTensor(false);
AllocateOutputSetMklShape(context, diff_src_index, &diff_src_tensor,
diff_dst_tensor.shape(), dnn_shape_diff_src);
void* out_o = static_cast<void*>(diff_src_tensor->flat<T>().data());
void* user_i =
static_cast<void*>(const_cast<T*>(src_tensor.flat<T>().data()));
void* user_g =
static_cast<void*>(const_cast<T*>(diff_dst_tensor.flat<T>().data()));
T feature = (static_cast<T*>(user_i))[0];
if (feature > static_cast<T>(0)) {
(static_cast<T*>(out_o))[0] = (static_cast<T*>(user_g))[0];
} else {
T elu = Eigen::numext::exp(feature) - static_cast<T>(1);
(static_cast<T*>(out_o))[0] =
(static_cast<T*>(user_g))[0] * (elu + static_cast<T>(1));
}
}
};
template <typename Device, typename T>
class MklTanhOp
: public MklReluOpBase<Device, T, dnnl::algorithm::eltwise_tanh> {
public:
~MklTanhOp() {}
explicit MklTanhOp(OpKernelConstruction* context)
: MklReluOpBase<Device, T, dnnl::algorithm::eltwise_tanh>(context, 0.0f,
0.0f) {}
virtual void Compute_Scalar(OpKernelContext* context) {
const size_t src_index = 0;
const size_t dst_index = 0;
const Tensor& src_tensor = MklGetInput(context, src_index);
MklDnnShape dnn_shape_src;
GetMklShape(context, src_index, &dnn_shape_src);
Tensor* dst_tensor = nullptr;
void* user_i =
static_cast<void*>(const_cast<T*>(src_tensor.flat<T>().data()));
MklDnnShape dnn_shape_dst;
dnn_shape_dst.SetMklTensor(false);
AllocateOutputSetMklShape(context, dst_index, &dst_tensor,
src_tensor.shape(), dnn_shape_dst);
void* out_o = static_cast<void*>(dst_tensor->flat<T>().data());
T feature = (static_cast<T*>(user_i))[0];
T e1 = Eigen::numext::exp(feature);
T e2 = Eigen::numext::exp(-feature);
(static_cast<T*>(out_o))[0] = (e1 - e2) / (e1 + e2);
return;
}
};
template <typename Device, typename T>
class MklTanhGradOp
: public MklReluGradOpBase<Device, T,
dnnl::algorithm::eltwise_tanh_use_dst_for_bwd> {
public:
~MklTanhGradOp() {}
explicit MklTanhGradOp(OpKernelConstruction* context)
: MklReluGradOpBase<Device, T,
dnnl::algorithm::eltwise_tanh_use_dst_for_bwd>(
context, 0.0f, 0.0f) {}
virtual int GetDiffDstIndex() const { return 1; }
virtual int GetSrcIndex() const { return 0; }
virtual int GetDiffSrcIndex() const { return 0; }
virtual int GetTypeOfInputTensorFromFwdOp() const { return DNNL_ARG_DST; }
virtual void Compute_Scalar(OpKernelContext* context) {
const size_t diff_dst_index = GetDiffDstIndex();
const size_t src_index = GetSrcIndex();
const size_t diff_src_index = GetDiffSrcIndex();
const Tensor& src_tensor = MklGetInput(context, src_index);
const Tensor& diff_dst_tensor = MklGetInput(context, diff_dst_index);
Tensor* diff_src_tensor = nullptr;
MklDnnShape dnn_shape_diff_dst;
GetMklShape(context, diff_dst_index, &dnn_shape_diff_dst);
MklDnnShape dnn_shape_diff_src;
dnn_shape_diff_src.SetMklTensor(false);
AllocateOutputSetMklShape(context, diff_src_index, &diff_src_tensor,
diff_dst_tensor.shape(), dnn_shape_diff_src);
void* out_o = static_cast<void*>(diff_src_tensor->flat<T>().data());
void* user_i =
static_cast<void*>(const_cast<T*>(src_tensor.flat<T>().data()));
T tanh = (static_cast<T*>(user_i))[0];
void* user_g =
static_cast<void*>(const_cast<T*>(diff_dst_tensor.flat<T>().data()));
(static_cast<T*>(out_o))[0] =
(static_cast<T*>(user_g))[0] * (static_cast<T>(1) - tanh * tanh);
}
};
#define RELU6_UPPER_BOUND 6.0f
template <typename Device, typename T>
class MklRelu6Op
: public MklReluOpBase<Device, T, dnnl::algorithm::eltwise_bounded_relu> {
public:
~MklRelu6Op() {}
explicit MklRelu6Op(OpKernelConstruction* context)
: MklReluOpBase<Device, T, dnnl::algorithm::eltwise_bounded_relu>(
context, RELU6_UPPER_BOUND, 0.0f) {}
virtual void Compute_Scalar(OpKernelContext* context) {
const size_t src_index = 0;
const size_t dst_index = 0;
const Tensor& src_tensor = MklGetInput(context, src_index);
MklDnnShape dnn_shape_src;
GetMklShape(context, src_index, &dnn_shape_src);
Tensor* dst_tensor = nullptr;
T* user_i = const_cast<T*>(src_tensor.flat<T>().data());
MklDnnShape dnn_shape_dst;
dnn_shape_dst.SetMklTensor(false);
AllocateOutputSetMklShape(context, dst_index, &dst_tensor,
src_tensor.shape(), dnn_shape_dst);
T* out_o = dst_tensor->flat<T>().data();
out_o[0] = std::min(std::max(user_i[0], static_cast<T>(0)),
static_cast<T>(RELU6_UPPER_BOUND));
return;
}
};
template <typename Device, typename T>
class MklRelu6GradOp
: public MklReluGradOpBase<Device, T,
dnnl::algorithm::eltwise_bounded_relu> {
public:
~MklRelu6GradOp() {}
explicit MklRelu6GradOp(OpKernelConstruction* context)
: MklReluGradOpBase<Device, T, dnnl::algorithm::eltwise_bounded_relu>(
context, RELU6_UPPER_BOUND, 0.0f) {}
virtual void Compute_Scalar(OpKernelContext* context) {
const size_t diff_dst_index = 0;
const size_t src_index = 1;
const size_t diff_src_index = 0;
const Tensor& src_tensor = MklGetInput(context, src_index);
const Tensor& diff_dst_tensor = MklGetInput(context, diff_dst_index);
Tensor* diff_src_tensor = nullptr;
MklDnnShape dnn_shape_diff_dst;
GetMklShape(context, diff_dst_index, &dnn_shape_diff_dst);
MklDnnShape dnn_shape_diff_src;
dnn_shape_diff_src.SetMklTensor(false);
AllocateOutputSetMklShape(context, diff_src_index, &diff_src_tensor,
diff_dst_tensor.shape(), dnn_shape_diff_src);
T* out_o = diff_src_tensor->flat<T>().data();
T* user_i = const_cast<T*>(src_tensor.flat<T>().data());
T* user_g = const_cast<T*>(diff_dst_tensor.flat<T>().data());
out_o[0] = user_g[0] *
static_cast<T>(user_i[0] > static_cast<T>(0) &&
(user_i[0] < static_cast<T>(RELU6_UPPER_BOUND)));
return;
}
};
template <typename Device, typename T>
class MklLeakyReluOp
: public MklReluOpBase<Device, T, dnnl::algorithm::eltwise_relu> {
public:
~MklLeakyReluOp() {}
explicit MklLeakyReluOp(OpKernelConstruction* context)
: MklReluOpBase<Device, T, dnnl::algorithm::eltwise_relu>(context, 0.0f,
0.0f) {
float alpha;
OP_REQUIRES_OK(context, context->GetAttr("alpha", &alpha));
OP_REQUIRES(
context, alpha <= 1,
errors::InvalidArgument("MKL LeakyRelu only supports alpha <= 1. "
"alpha is: ",
alpha));
this->alpha_ = alpha;
}
virtual void Compute_Scalar(OpKernelContext* context) {
const size_t src_index = 0;
const size_t dst_index = 0;
const Tensor& src_tensor = MklGetInput(context, src_index);
MklDnnShape dnn_shape_src;
GetMklShape(context, src_index, &dnn_shape_src);
Tensor* dst_tensor = nullptr;
T* user_i = const_cast<T*>(src_tensor.flat<T>().data());
MklDnnShape dnn_shape_dst;
dnn_shape_dst.SetMklTensor(false);
AllocateOutputSetMklShape(context, dst_index, &dst_tensor,
src_tensor.shape(), dnn_shape_dst);
T* out_o = dst_tensor->flat<T>().data();
out_o[0] = user_i[0] >= T(0) ? user_i[0] : user_i[0] * T(this->alpha_);
return;
}
};
template <typename Device, typename T>
class MklLeakyReluGradOp
: public MklReluGradOpBase<Device, T, dnnl::algorithm::eltwise_relu> {
public:
~MklLeakyReluGradOp() {}
explicit MklLeakyReluGradOp(OpKernelConstruction* context)
: MklReluGradOpBase<Device, T, dnnl::algorithm::eltwise_relu>(
context, 0.0f, 0.0f) {
float alpha;
OP_REQUIRES_OK(context, context->GetAttr("alpha", &alpha));
OP_REQUIRES(
context, alpha <= 1,
errors::InvalidArgument("MKL LeakyRelu only supports alpha <= 1. "
"alpha is: ",
alpha));
this->alpha_ = alpha;
}
virtual void Compute_Scalar(OpKernelContext* context) {
const size_t diff_dst_index = 0;
const size_t src_index = 1;
const size_t diff_src_index = 0;
const Tensor& src_tensor = MklGetInput(context, src_index);
const Tensor& diff_dst_tensor = MklGetInput(context, diff_dst_index);
Tensor* diff_src_tensor = nullptr;
MklDnnShape dnn_shape_diff_dst;
GetMklShape(context, diff_dst_index, &dnn_shape_diff_dst);
MklDnnShape dnn_shape_diff_src;
dnn_shape_diff_src.SetMklTensor(false);
AllocateOutputSetMklShape(context, diff_src_index, &diff_src_tensor,
diff_dst_tensor.shape(), dnn_shape_diff_src);
T* out_o = diff_src_tensor->flat<T>().data();
T* user_i = const_cast<T*>(src_tensor.flat<T>().data());
T* user_g = const_cast<T*>(diff_dst_tensor.flat<T>().data());
out_o[0] = user_i[0] >= static_cast<T>(0)
? user_g[0]
: user_g[0] * static_cast<T>(this->alpha_);
return;
}
};
#define REGISTER_RELU_MKL_SUPPORTED_KERNELS_TYPES(type) \
REGISTER_KERNEL_BUILDER( \
Name("_MklRelu") \
.Device(DEVICE_CPU) \
.TypeConstraint<type>("T") \
.Label(mkl_op_registry::kMklLayoutDependentOpLabel), \
MklReluOp<CPUDevice, type>); \
REGISTER_KERNEL_BUILDER( \
Name("_MklReluGrad") \
.Device(DEVICE_CPU) \
.TypeConstraint<type>("T") \
.Label(mkl_op_registry::kMklLayoutDependentOpLabel), \
MklReluGradOp<CPUDevice, type>);
TF_CALL_float(REGISTER_RELU_MKL_SUPPORTED_KERNELS_TYPES);
TF_CALL_bfloat16(REGISTER_RELU_MKL_SUPPORTED_KERNELS_TYPES);
#define REGISTER_ELU_MKL_SUPPORTED_KERNELS_TYPES(type) \
REGISTER_KERNEL_BUILDER( \
Name("_MklElu") \
.Device(DEVICE_CPU) \
.TypeConstraint<type>("T") \
.Label(mkl_op_registry::kMklLayoutDependentOpLabel), \
MklEluOp<CPUDevice, type>); \
REGISTER_KERNEL_BUILDER( \
Name("_MklEluGrad") \
.Device(DEVICE_CPU) \
.TypeConstraint<type>("T") \
.Label(mkl_op_registry::kMklLayoutDependentOpLabel), \
MklEluGradOp<CPUDevice, type>);
TF_CALL_float(REGISTER_ELU_MKL_SUPPORTED_KERNELS_TYPES);
TF_CALL_bfloat16(REGISTER_ELU_MKL_SUPPORTED_KERNELS_TYPES);
#define REGISTER_TANH_MKL_SUPPORTED_KERNELS_TYPES(type) \
REGISTER_KERNEL_BUILDER( \
Name("_MklTanh") \
.Device(DEVICE_CPU) \
.TypeConstraint<type>("T") \
.Label(mkl_op_registry::kMklLayoutDependentOpLabel), \
MklTanhOp<CPUDevice, type>); \
REGISTER_KERNEL_BUILDER( \
Name("_MklTanhGrad") \
.Device(DEVICE_CPU) \
.TypeConstraint<type>("T") \
.Label(mkl_op_registry::kMklLayoutDependentOpLabel), \
MklTanhGradOp<CPUDevice, type>);
TF_CALL_float(REGISTER_TANH_MKL_SUPPORTED_KERNELS_TYPES);
TF_CALL_bfloat16(REGISTER_TANH_MKL_SUPPORTED_KERNELS_TYPES);
#define REGISTER_RELU6_MKL_SUPPORTED_KERNELS_TYPES(type) \
REGISTER_KERNEL_BUILDER( \
Name("_MklRelu6") \
.Device(DEVICE_CPU) \
.TypeConstraint<type>("T") \
.Label(mkl_op_registry::kMklLayoutDependentOpLabel), \
MklRelu6Op<CPUDevice, type>); \
REGISTER_KERNEL_BUILDER( \
Name("_MklRelu6Grad") \
.Device(DEVICE_CPU) \
.TypeConstraint<type>("T") \
.Label(mkl_op_registry::kMklLayoutDependentOpLabel), \
MklRelu6GradOp<CPUDevice, type>);
TF_CALL_float(REGISTER_RELU6_MKL_SUPPORTED_KERNELS_TYPES);
TF_CALL_bfloat16(REGISTER_RELU6_MKL_SUPPORTED_KERNELS_TYPES);
#define REGISTER_LeakyRelu_MKL_SUPPORTED_KERNELS_TYPES(type) \
REGISTER_KERNEL_BUILDER( \
Name("_MklLeakyRelu") \
.Device(DEVICE_CPU) \
.TypeConstraint<type>("T") \
.Label(mkl_op_registry::kMklLayoutDependentOpLabel), \
MklLeakyReluOp<CPUDevice, type>); \
REGISTER_KERNEL_BUILDER( \
Name("_MklLeakyReluGrad") \
.Device(DEVICE_CPU) \
.TypeConstraint<type>("T") \
.Label(mkl_op_registry::kMklLayoutDependentOpLabel), \
MklLeakyReluGradOp<CPUDevice, type>);
TF_CALL_float(REGISTER_LeakyRelu_MKL_SUPPORTED_KERNELS_TYPES);
TF_CALL_bfloat16(REGISTER_LeakyRelu_MKL_SUPPORTED_KERNELS_TYPES);
}
#endif | #if defined(INTEL_MKL) && !defined(ENABLE_ONEDNN_V3) && defined(ENABLE_MKL)
#include "absl/strings/match.h"
#include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/nn_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/platform/cpu_info.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/stacktrace_handler.h"
#include "tensorflow/core/platform/str_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/core/util/mkl_util.h"
namespace tensorflow {
static Graph* Activation(const string& op_name, const string& kind,
const TensorShape& shape) {
auto* graph = new Graph(OpRegistry::Global());
const string node_name = kind + "_" + op_name;
const bool isForwardOp = !tensorflow::str_util::EndsWith(op_name, "Grad");
const bool isDefault = (kind == "Default");
Tensor input_t(DT_FLOAT, shape);
input_t.flat<float>().setRandom();
Node* input = test::graph::Constant(graph, input_t, "input");
Node* not_mkl_shape =
test::graph::Constant(graph, GetMklMetaTensor(), "not_mkl");
if (isForwardOp) {
if (isDefault) {
TF_CHECK_OK(NodeBuilder(graph->NewName(node_name), op_name)
.Input(input)
.Attr("T", DT_FLOAT)
.Finalize(graph, nullptr));
return graph;
}
TF_CHECK_OK(NodeBuilder(graph->NewName(node_name), "_Mkl" + op_name)
.Input(input)
.Input(not_mkl_shape)
.Attr("T", DT_FLOAT)
.Attr("_kernel", "MklLayoutDependentOp")
.Finalize(graph, nullptr));
return graph;
}
Tensor grad_t(DT_FLOAT, shape);
grad_t.flat<float>().setRandom();
Node* grad = test::graph::Constant(graph, grad_t, "grad");
if (isDefault) {
TF_CHECK_OK(NodeBuilder(graph->NewName(node_name), op_name)
.Input(grad)
.Input(input)
.Attr("T", DT_FLOAT)
.Finalize(graph, nullptr));
return graph;
}
TF_CHECK_OK(NodeBuilder(graph->NewName(node_name), "_Mkl" + op_name)
.Input(grad)
.Input(input)
.Input(not_mkl_shape)
.Input(not_mkl_shape)
.Attr("T", DT_FLOAT)
.Attr("_kernel", "MklLayoutDependentOp")
.Finalize(graph, nullptr));
return graph;
}
#define BM_Activation(op, kind, A, B, C, D, type) \
static void BM_##op##_##kind##_##type##_##A##_##B##_##C##_##D( \
::testing::benchmark::State& state) { \
int64 num_computed_elements = (A) * (B) * (C) * (D); \
int64 flops_per_iter = num_computed_elements; \
\
test::Benchmark(#type, Activation(#op, #kind, {A, B, C, D}), \
false) \
.Run(state); \
state.SetItemsProcessed(state.iterations() * flops_per_iter); \
} \
BENCHMARK(BM_##op##_##kind##_##type##_##A##_##B##_##C##_##D)
#define BM(op, A, B, C, D, type) \
BM_Activation(op, Default, A, B, C, D, type); \
BM_Activation(op, Mkl, A, B, C, D, type);
#define TEST_ALL_SIZES(OP) \
BM(OP, 2, 4, 8, 16, cpu); \
BM(OP, 3, 5, 9, 17, cpu); \
BM(OP, 32, 64, 128, 256, cpu); \
BM(OP, 33, 65, 129, 257, cpu);
TEST_ALL_SIZES(Tanh)
TEST_ALL_SIZES(TanhGrad)
TEST_ALL_SIZES(Relu)
TEST_ALL_SIZES(ReluGrad)
TEST_ALL_SIZES(Elu)
TEST_ALL_SIZES(EluGrad)
TEST_ALL_SIZES(Relu6)
TEST_ALL_SIZES(Relu6Grad)
TEST_ALL_SIZES(LeakyRelu)
TEST_ALL_SIZES(LeakyReluGrad)
}
#endif | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/mkl/mkl_relu_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/mkl/mkl_relu_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
89bd881f-5e83-40d0-a29d-07fe18039cf9 | cpp | tensorflow/tensorflow | type_id_registry | third_party/xla/xla/ffi/type_id_registry.cc | third_party/xla/xla/ffi/type_id_registry_test.cc | #include "xla/ffi/type_id_registry.h"
#include <atomic>
#include <cstdint>
#include <string>
#include <string_view>
#include "absl/base/attributes.h"
#include "absl/base/const_init.h"
#include "absl/container/flat_hash_map.h"
#include "absl/status/statusor.h"
#include "absl/synchronization/mutex.h"
#include "xla/util.h"
namespace xla::ffi {
ABSL_CONST_INIT absl::Mutex type_registry_mutex(absl::kConstInit);
using ExternalTypeIdRegistry =
absl::flat_hash_map<std::string, TypeIdRegistry::TypeId>;
static ExternalTypeIdRegistry& StaticExternalTypeIdRegistry() {
static auto* registry = new ExternalTypeIdRegistry();
return *registry;
}
TypeIdRegistry::TypeId TypeIdRegistry::GetNextTypeId() {
static auto* counter = new std::atomic<int64_t>(1);
return TypeId(counter->fetch_add(1));
}
absl::StatusOr<TypeIdRegistry::TypeId> TypeIdRegistry::RegisterExternalTypeId(
std::string_view name) {
absl::MutexLock lock(&type_registry_mutex);
auto& registry = StaticExternalTypeIdRegistry();
auto emplaced = registry.emplace(name, TypeId(0));
if (!emplaced.second) {
return Internal("Type id %d already registered for type name %s",
emplaced.first->second.value(), name);
}
return emplaced.first->second = GetNextTypeId();
}
} | #include "xla/ffi/type_id_registry.h"
#include <cstdint>
#include "absl/status/status.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla::ffi {
namespace {
using ::testing::HasSubstr;
TEST(TypeIdRegistryTest, RegisterExternalTypeId) {
TF_ASSERT_OK_AND_ASSIGN(auto type_id,
TypeIdRegistry::RegisterExternalTypeId("foo"));
EXPECT_GE(type_id.value(), 0);
auto duplicate_type_id = TypeIdRegistry::RegisterExternalTypeId("foo");
EXPECT_THAT(duplicate_type_id.status().message(),
HasSubstr("already registered for type name foo"));
}
TEST(TypeIdRegistryTest, RegisterInternalTypeId) {
auto int32_type_id = TypeIdRegistry::GetTypeId<int32_t>();
auto int64_type_id = TypeIdRegistry::GetTypeId<int64_t>();
EXPECT_NE(int32_type_id, int64_type_id);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/ffi/type_id_registry.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/ffi/type_id_registry_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e4078ac1-cb85-470a-adbf-9698468e662b | cpp | google/quiche | quic_linux_socket_utils | quiche/quic/core/quic_linux_socket_utils.cc | quiche/quic/core/quic_linux_socket_utils_test.cc | #include "quiche/quic/core/quic_linux_socket_utils.h"
#include <linux/net_tstamp.h>
#include <netinet/in.h>
#include <cstddef>
#include <cstdint>
#include <string>
#include "quiche/quic/core/quic_syscall_wrapper.h"
#include "quiche/quic/platform/api/quic_ip_address.h"
#include "quiche/quic/platform/api/quic_logging.h"
#include "quiche/quic/platform/api/quic_socket_address.h"
#include "quiche/common/platform/api/quiche_logging.h"
namespace quic {
QuicMsgHdr::QuicMsgHdr(iovec* iov, size_t iov_len, char* cbuf, size_t cbuf_size)
: cbuf_(cbuf), cbuf_size_(cbuf_size), cmsg_(nullptr) {
hdr_.msg_name = nullptr;
hdr_.msg_namelen = 0;
hdr_.msg_iov = iov;
hdr_.msg_iovlen = iov_len;
hdr_.msg_flags = 0;
hdr_.msg_control = nullptr;
hdr_.msg_controllen = 0;
}
void QuicMsgHdr::SetPeerAddress(const QuicSocketAddress& peer_address) {
QUICHE_DCHECK(peer_address.IsInitialized());
raw_peer_address_ = peer_address.generic_address();
hdr_.msg_name = &raw_peer_address_;
hdr_.msg_namelen = raw_peer_address_.ss_family == AF_INET
? sizeof(sockaddr_in)
: sizeof(sockaddr_in6);
}
void QuicMsgHdr::SetIpInNextCmsg(const QuicIpAddress& self_address) {
if (!self_address.IsInitialized()) {
return;
}
if (self_address.IsIPv4()) {
QuicLinuxSocketUtils::SetIpInfoInCmsgData(
self_address, GetNextCmsgData<in_pktinfo>(IPPROTO_IP, IP_PKTINFO));
} else {
QuicLinuxSocketUtils::SetIpInfoInCmsgData(
self_address, GetNextCmsgData<in6_pktinfo>(IPPROTO_IPV6, IPV6_PKTINFO));
}
}
void* QuicMsgHdr::GetNextCmsgDataInternal(int cmsg_level, int cmsg_type,
size_t data_size) {
hdr_.msg_controllen += CMSG_SPACE(data_size);
QUICHE_DCHECK_LE(hdr_.msg_controllen, cbuf_size_);
if (cmsg_ == nullptr) {
QUICHE_DCHECK_EQ(nullptr, hdr_.msg_control);
memset(cbuf_, 0, cbuf_size_);
hdr_.msg_control = cbuf_;
cmsg_ = CMSG_FIRSTHDR(&hdr_);
} else {
QUICHE_DCHECK_NE(nullptr, hdr_.msg_control);
cmsg_ = CMSG_NXTHDR(&hdr_, cmsg_);
}
QUICHE_DCHECK_NE(nullptr, cmsg_) << "Insufficient control buffer space";
cmsg_->cmsg_len = CMSG_LEN(data_size);
cmsg_->cmsg_level = cmsg_level;
cmsg_->cmsg_type = cmsg_type;
return CMSG_DATA(cmsg_);
}
void QuicMMsgHdr::InitOneHeader(int i, const BufferedWrite& buffered_write) {
mmsghdr* mhdr = GetMMsgHdr(i);
msghdr* hdr = &mhdr->msg_hdr;
iovec* iov = GetIov(i);
iov->iov_base = const_cast<char*>(buffered_write.buffer);
iov->iov_len = buffered_write.buf_len;
hdr->msg_iov = iov;
hdr->msg_iovlen = 1;
hdr->msg_control = nullptr;
hdr->msg_controllen = 0;
QUICHE_DCHECK(buffered_write.peer_address.IsInitialized());
sockaddr_storage* peer_address_storage = GetPeerAddressStorage(i);
*peer_address_storage = buffered_write.peer_address.generic_address();
hdr->msg_name = peer_address_storage;
hdr->msg_namelen = peer_address_storage->ss_family == AF_INET
? sizeof(sockaddr_in)
: sizeof(sockaddr_in6);
}
void QuicMMsgHdr::SetIpInNextCmsg(int i, const QuicIpAddress& self_address) {
if (!self_address.IsInitialized()) {
return;
}
if (self_address.IsIPv4()) {
QuicLinuxSocketUtils::SetIpInfoInCmsgData(
self_address, GetNextCmsgData<in_pktinfo>(i, IPPROTO_IP, IP_PKTINFO));
} else {
QuicLinuxSocketUtils::SetIpInfoInCmsgData(
self_address,
GetNextCmsgData<in6_pktinfo>(i, IPPROTO_IPV6, IPV6_PKTINFO));
}
}
void* QuicMMsgHdr::GetNextCmsgDataInternal(int i, int cmsg_level, int cmsg_type,
size_t data_size) {
mmsghdr* mhdr = GetMMsgHdr(i);
msghdr* hdr = &mhdr->msg_hdr;
cmsghdr*& cmsg = *GetCmsgHdr(i);
hdr->msg_controllen += CMSG_SPACE(data_size);
QUICHE_DCHECK_LE(hdr->msg_controllen, cbuf_size_);
if (cmsg == nullptr) {
QUICHE_DCHECK_EQ(nullptr, hdr->msg_control);
hdr->msg_control = GetCbuf(i);
cmsg = CMSG_FIRSTHDR(hdr);
} else {
QUICHE_DCHECK_NE(nullptr, hdr->msg_control);
cmsg = CMSG_NXTHDR(hdr, cmsg);
}
QUICHE_DCHECK_NE(nullptr, cmsg) << "Insufficient control buffer space";
cmsg->cmsg_len = CMSG_LEN(data_size);
cmsg->cmsg_level = cmsg_level;
cmsg->cmsg_type = cmsg_type;
return CMSG_DATA(cmsg);
}
int QuicMMsgHdr::num_bytes_sent(int num_packets_sent) {
QUICHE_DCHECK_LE(0, num_packets_sent);
QUICHE_DCHECK_LE(num_packets_sent, num_msgs_);
int bytes_sent = 0;
iovec* iov = GetIov(0);
for (int i = 0; i < num_packets_sent; ++i) {
bytes_sent += iov[i].iov_len;
}
return bytes_sent;
}
int QuicLinuxSocketUtils::GetUDPSegmentSize(int fd) {
int optval;
socklen_t optlen = sizeof(optval);
int rc = getsockopt(fd, SOL_UDP, UDP_SEGMENT, &optval, &optlen);
if (rc < 0) {
QUIC_LOG_EVERY_N_SEC(INFO, 10)
<< "getsockopt(UDP_SEGMENT) failed: " << strerror(errno);
return -1;
}
QUIC_LOG_EVERY_N_SEC(INFO, 10)
<< "getsockopt(UDP_SEGMENT) returned segment size: " << optval;
return optval;
}
bool QuicLinuxSocketUtils::EnableReleaseTime(int fd, clockid_t clockid) {
struct LinuxSockTxTime {
clockid_t clockid;
uint32_t flags;
};
LinuxSockTxTime so_txtime_val{clockid, 0};
if (setsockopt(fd, SOL_SOCKET, SO_TXTIME, &so_txtime_val,
sizeof(so_txtime_val)) != 0) {
QUIC_LOG_EVERY_N_SEC(INFO, 10)
<< "setsockopt(SOL_SOCKET,SO_TXTIME) failed: " << strerror(errno);
return false;
}
return true;
}
bool QuicLinuxSocketUtils::GetTtlFromMsghdr(struct msghdr* hdr, int* ttl) {
if (hdr->msg_controllen > 0) {
struct cmsghdr* cmsg;
for (cmsg = CMSG_FIRSTHDR(hdr); cmsg != nullptr;
cmsg = CMSG_NXTHDR(hdr, cmsg)) {
if ((cmsg->cmsg_level == IPPROTO_IP && cmsg->cmsg_type == IP_TTL) ||
(cmsg->cmsg_level == IPPROTO_IPV6 &&
cmsg->cmsg_type == IPV6_HOPLIMIT)) {
*ttl = *(reinterpret_cast<int*>(CMSG_DATA(cmsg)));
return true;
}
}
}
return false;
}
void QuicLinuxSocketUtils::SetIpInfoInCmsgData(
const QuicIpAddress& self_address, void* cmsg_data) {
QUICHE_DCHECK(self_address.IsInitialized());
const std::string& address_str = self_address.ToPackedString();
if (self_address.IsIPv4()) {
in_pktinfo* pktinfo = static_cast<in_pktinfo*>(cmsg_data);
pktinfo->ipi_ifindex = 0;
memcpy(&pktinfo->ipi_spec_dst, address_str.c_str(), address_str.length());
} else if (self_address.IsIPv6()) {
in6_pktinfo* pktinfo = static_cast<in6_pktinfo*>(cmsg_data);
memcpy(&pktinfo->ipi6_addr, address_str.c_str(), address_str.length());
} else {
QUIC_BUG(quic_bug_10598_1) << "Unrecognized IPAddress";
}
}
size_t QuicLinuxSocketUtils::SetIpInfoInCmsg(const QuicIpAddress& self_address,
cmsghdr* cmsg) {
std::string address_string;
if (self_address.IsIPv4()) {
cmsg->cmsg_len = CMSG_LEN(sizeof(in_pktinfo));
cmsg->cmsg_level = IPPROTO_IP;
cmsg->cmsg_type = IP_PKTINFO;
in_pktinfo* pktinfo = reinterpret_cast<in_pktinfo*>(CMSG_DATA(cmsg));
memset(pktinfo, 0, sizeof(in_pktinfo));
pktinfo->ipi_ifindex = 0;
address_string = self_address.ToPackedString();
memcpy(&pktinfo->ipi_spec_dst, address_string.c_str(),
address_string.length());
return sizeof(in_pktinfo);
} else if (self_address.IsIPv6()) {
cmsg->cmsg_len = CMSG_LEN(sizeof(in6_pktinfo));
cmsg->cmsg_level = IPPROTO_IPV6;
cmsg->cmsg_type = IPV6_PKTINFO;
in6_pktinfo* pktinfo = reinterpret_cast<in6_pktinfo*>(CMSG_DATA(cmsg));
memset(pktinfo, 0, sizeof(in6_pktinfo));
address_string = self_address.ToPackedString();
memcpy(&pktinfo->ipi6_addr, address_string.c_str(),
address_string.length());
return sizeof(in6_pktinfo);
} else {
QUIC_BUG(quic_bug_10598_2) << "Unrecognized IPAddress";
return 0;
}
}
WriteResult QuicLinuxSocketUtils::WritePacket(int fd, const QuicMsgHdr& hdr) {
int rc;
do {
rc = GetGlobalSyscallWrapper()->Sendmsg(fd, hdr.hdr(), 0);
} while (rc < 0 && errno == EINTR);
if (rc >= 0) {
return WriteResult(WRITE_STATUS_OK, rc);
}
return WriteResult((errno == EAGAIN || errno == EWOULDBLOCK)
? WRITE_STATUS_BLOCKED
: WRITE_STATUS_ERROR,
errno);
}
WriteResult QuicLinuxSocketUtils::WriteMultiplePackets(int fd,
QuicMMsgHdr* mhdr,
int* num_packets_sent) {
*num_packets_sent = 0;
if (mhdr->num_msgs() <= 0) {
return WriteResult(WRITE_STATUS_ERROR, EINVAL);
}
int rc;
do {
rc = GetGlobalSyscallWrapper()->Sendmmsg(fd, mhdr->mhdr(), mhdr->num_msgs(),
0);
} while (rc < 0 && errno == EINTR);
if (rc > 0) {
*num_packets_sent = rc;
return WriteResult(WRITE_STATUS_OK, mhdr->num_bytes_sent(rc));
} else if (rc == 0) {
QUIC_BUG(quic_bug_10598_3)
<< "sendmmsg returned 0, returning WRITE_STATUS_ERROR. errno: "
<< errno;
errno = EIO;
}
return WriteResult((errno == EAGAIN || errno == EWOULDBLOCK)
? WRITE_STATUS_BLOCKED
: WRITE_STATUS_ERROR,
errno);
}
} | #include "quiche/quic/core/quic_linux_socket_utils.h"
#include <netinet/in.h>
#include <stdint.h>
#include <cstddef>
#include <sstream>
#include <string>
#include <vector>
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/test_tools/quic_mock_syscall_wrapper.h"
#include "quiche/common/quiche_circular_deque.h"
using testing::_;
using testing::InSequence;
using testing::Invoke;
namespace quic {
namespace test {
namespace {
class QuicLinuxSocketUtilsTest : public QuicTest {
protected:
WriteResult TestWriteMultiplePackets(
int fd,
const quiche::QuicheCircularDeque<BufferedWrite>::const_iterator& first,
const quiche::QuicheCircularDeque<BufferedWrite>::const_iterator& last,
int* num_packets_sent) {
QuicMMsgHdr mhdr(
first, last, kCmsgSpaceForIp,
[](QuicMMsgHdr* mhdr, int i, const BufferedWrite& buffered_write) {
mhdr->SetIpInNextCmsg(i, buffered_write.self_address);
});
WriteResult res =
QuicLinuxSocketUtils::WriteMultiplePackets(fd, &mhdr, num_packets_sent);
return res;
}
MockQuicSyscallWrapper mock_syscalls_;
ScopedGlobalSyscallWrapperOverride syscall_override_{&mock_syscalls_};
};
void CheckIpAndTtlInCbuf(msghdr* hdr, const void* cbuf,
const QuicIpAddress& self_addr, int ttl) {
const bool is_ipv4 = self_addr.IsIPv4();
const size_t ip_cmsg_space = is_ipv4 ? kCmsgSpaceForIpv4 : kCmsgSpaceForIpv6;
EXPECT_EQ(cbuf, hdr->msg_control);
EXPECT_EQ(ip_cmsg_space + CMSG_SPACE(sizeof(uint16_t)), hdr->msg_controllen);
cmsghdr* cmsg = CMSG_FIRSTHDR(hdr);
EXPECT_EQ(cmsg->cmsg_len, is_ipv4 ? CMSG_LEN(sizeof(in_pktinfo))
: CMSG_LEN(sizeof(in6_pktinfo)));
EXPECT_EQ(cmsg->cmsg_level, is_ipv4 ? IPPROTO_IP : IPPROTO_IPV6);
EXPECT_EQ(cmsg->cmsg_type, is_ipv4 ? IP_PKTINFO : IPV6_PKTINFO);
const std::string& self_addr_str = self_addr.ToPackedString();
if (is_ipv4) {
in_pktinfo* pktinfo = reinterpret_cast<in_pktinfo*>(CMSG_DATA(cmsg));
EXPECT_EQ(0, memcmp(&pktinfo->ipi_spec_dst, self_addr_str.c_str(),
self_addr_str.length()));
} else {
in6_pktinfo* pktinfo = reinterpret_cast<in6_pktinfo*>(CMSG_DATA(cmsg));
EXPECT_EQ(0, memcmp(&pktinfo->ipi6_addr, self_addr_str.c_str(),
self_addr_str.length()));
}
cmsg = CMSG_NXTHDR(hdr, cmsg);
EXPECT_EQ(cmsg->cmsg_len, CMSG_LEN(sizeof(int)));
EXPECT_EQ(cmsg->cmsg_level, is_ipv4 ? IPPROTO_IP : IPPROTO_IPV6);
EXPECT_EQ(cmsg->cmsg_type, is_ipv4 ? IP_TTL : IPV6_HOPLIMIT);
EXPECT_EQ(ttl, *reinterpret_cast<int*>(CMSG_DATA(cmsg)));
EXPECT_EQ(nullptr, CMSG_NXTHDR(hdr, cmsg));
}
void CheckMsghdrWithoutCbuf(const msghdr* hdr, const void* buffer,
size_t buf_len,
const QuicSocketAddress& peer_addr) {
EXPECT_EQ(
peer_addr.host().IsIPv4() ? sizeof(sockaddr_in) : sizeof(sockaddr_in6),
hdr->msg_namelen);
sockaddr_storage peer_generic_addr = peer_addr.generic_address();
EXPECT_EQ(0, memcmp(hdr->msg_name, &peer_generic_addr, hdr->msg_namelen));
EXPECT_EQ(1u, hdr->msg_iovlen);
EXPECT_EQ(buffer, hdr->msg_iov->iov_base);
EXPECT_EQ(buf_len, hdr->msg_iov->iov_len);
EXPECT_EQ(0, hdr->msg_flags);
EXPECT_EQ(nullptr, hdr->msg_control);
EXPECT_EQ(0u, hdr->msg_controllen);
}
void CheckIpAndGsoSizeInCbuf(msghdr* hdr, const void* cbuf,
const QuicIpAddress& self_addr,
uint16_t gso_size) {
const bool is_ipv4 = self_addr.IsIPv4();
const size_t ip_cmsg_space = is_ipv4 ? kCmsgSpaceForIpv4 : kCmsgSpaceForIpv6;
EXPECT_EQ(cbuf, hdr->msg_control);
EXPECT_EQ(ip_cmsg_space + CMSG_SPACE(sizeof(uint16_t)), hdr->msg_controllen);
cmsghdr* cmsg = CMSG_FIRSTHDR(hdr);
EXPECT_EQ(cmsg->cmsg_len, is_ipv4 ? CMSG_LEN(sizeof(in_pktinfo))
: CMSG_LEN(sizeof(in6_pktinfo)));
EXPECT_EQ(cmsg->cmsg_level, is_ipv4 ? IPPROTO_IP : IPPROTO_IPV6);
EXPECT_EQ(cmsg->cmsg_type, is_ipv4 ? IP_PKTINFO : IPV6_PKTINFO);
const std::string& self_addr_str = self_addr.ToPackedString();
if (is_ipv4) {
in_pktinfo* pktinfo = reinterpret_cast<in_pktinfo*>(CMSG_DATA(cmsg));
EXPECT_EQ(0, memcmp(&pktinfo->ipi_spec_dst, self_addr_str.c_str(),
self_addr_str.length()));
} else {
in6_pktinfo* pktinfo = reinterpret_cast<in6_pktinfo*>(CMSG_DATA(cmsg));
EXPECT_EQ(0, memcmp(&pktinfo->ipi6_addr, self_addr_str.c_str(),
self_addr_str.length()));
}
cmsg = CMSG_NXTHDR(hdr, cmsg);
EXPECT_EQ(cmsg->cmsg_len, CMSG_LEN(sizeof(uint16_t)));
EXPECT_EQ(cmsg->cmsg_level, SOL_UDP);
EXPECT_EQ(cmsg->cmsg_type, UDP_SEGMENT);
EXPECT_EQ(gso_size, *reinterpret_cast<uint16_t*>(CMSG_DATA(cmsg)));
EXPECT_EQ(nullptr, CMSG_NXTHDR(hdr, cmsg));
}
TEST_F(QuicLinuxSocketUtilsTest, QuicMsgHdr) {
QuicSocketAddress peer_addr(QuicIpAddress::Loopback4(), 1234);
char packet_buf[1024];
iovec iov{packet_buf, sizeof(packet_buf)};
{
QuicMsgHdr quic_hdr(&iov, 1, nullptr, 0);
quic_hdr.SetPeerAddress(peer_addr);
CheckMsghdrWithoutCbuf(quic_hdr.hdr(), packet_buf, sizeof(packet_buf),
peer_addr);
}
for (bool is_ipv4 : {true, false}) {
QuicIpAddress self_addr =
is_ipv4 ? QuicIpAddress::Loopback4() : QuicIpAddress::Loopback6();
alignas(cmsghdr) char cbuf[kCmsgSpaceForIp + kCmsgSpaceForTTL];
QuicMsgHdr quic_hdr(&iov, 1, cbuf, sizeof(cbuf));
quic_hdr.SetPeerAddress(peer_addr);
msghdr* hdr = const_cast<msghdr*>(quic_hdr.hdr());
EXPECT_EQ(nullptr, hdr->msg_control);
EXPECT_EQ(0u, hdr->msg_controllen);
quic_hdr.SetIpInNextCmsg(self_addr);
EXPECT_EQ(cbuf, hdr->msg_control);
const size_t ip_cmsg_space =
is_ipv4 ? kCmsgSpaceForIpv4 : kCmsgSpaceForIpv6;
EXPECT_EQ(ip_cmsg_space, hdr->msg_controllen);
if (is_ipv4) {
*quic_hdr.GetNextCmsgData<int>(IPPROTO_IP, IP_TTL) = 32;
} else {
*quic_hdr.GetNextCmsgData<int>(IPPROTO_IPV6, IPV6_HOPLIMIT) = 32;
}
CheckIpAndTtlInCbuf(hdr, cbuf, self_addr, 32);
}
}
TEST_F(QuicLinuxSocketUtilsTest, QuicMMsgHdr) {
quiche::QuicheCircularDeque<BufferedWrite> buffered_writes;
char packet_buf1[1024];
char packet_buf2[512];
buffered_writes.emplace_back(
packet_buf1, sizeof(packet_buf1), QuicIpAddress::Loopback4(),
QuicSocketAddress(QuicIpAddress::Loopback4(), 4));
buffered_writes.emplace_back(
packet_buf2, sizeof(packet_buf2), QuicIpAddress::Loopback6(),
QuicSocketAddress(QuicIpAddress::Loopback6(), 6));
QuicMMsgHdr quic_mhdr_without_cbuf(buffered_writes.begin(),
buffered_writes.end(), 0);
for (size_t i = 0; i < buffered_writes.size(); ++i) {
const BufferedWrite& bw = buffered_writes[i];
CheckMsghdrWithoutCbuf(&quic_mhdr_without_cbuf.mhdr()[i].msg_hdr, bw.buffer,
bw.buf_len, bw.peer_address);
}
QuicMMsgHdr quic_mhdr_with_cbuf(
buffered_writes.begin(), buffered_writes.end(),
kCmsgSpaceForIp + kCmsgSpaceForSegmentSize,
[](QuicMMsgHdr* mhdr, int i, const BufferedWrite& buffered_write) {
mhdr->SetIpInNextCmsg(i, buffered_write.self_address);
*mhdr->GetNextCmsgData<uint16_t>(i, SOL_UDP, UDP_SEGMENT) = 1300;
});
for (size_t i = 0; i < buffered_writes.size(); ++i) {
const BufferedWrite& bw = buffered_writes[i];
msghdr* hdr = &quic_mhdr_with_cbuf.mhdr()[i].msg_hdr;
CheckIpAndGsoSizeInCbuf(hdr, hdr->msg_control, bw.self_address, 1300);
}
}
TEST_F(QuicLinuxSocketUtilsTest, WriteMultiplePackets_NoPacketsToSend) {
int num_packets_sent;
quiche::QuicheCircularDeque<BufferedWrite> buffered_writes;
EXPECT_CALL(mock_syscalls_, Sendmmsg(_, _, _, _)).Times(0);
EXPECT_EQ(WriteResult(WRITE_STATUS_ERROR, EINVAL),
TestWriteMultiplePackets(1, buffered_writes.begin(),
buffered_writes.end(), &num_packets_sent));
}
TEST_F(QuicLinuxSocketUtilsTest, WriteMultiplePackets_WriteBlocked) {
int num_packets_sent;
quiche::QuicheCircularDeque<BufferedWrite> buffered_writes;
buffered_writes.emplace_back(nullptr, 0, QuicIpAddress(),
QuicSocketAddress(QuicIpAddress::Any4(), 0));
EXPECT_CALL(mock_syscalls_, Sendmmsg(_, _, _, _))
.WillOnce(Invoke([](int , mmsghdr* ,
unsigned int , int ) {
errno = EWOULDBLOCK;
return -1;
}));
EXPECT_EQ(WriteResult(WRITE_STATUS_BLOCKED, EWOULDBLOCK),
TestWriteMultiplePackets(1, buffered_writes.begin(),
buffered_writes.end(), &num_packets_sent));
EXPECT_EQ(0, num_packets_sent);
}
TEST_F(QuicLinuxSocketUtilsTest, WriteMultiplePackets_WriteError) {
int num_packets_sent;
quiche::QuicheCircularDeque<BufferedWrite> buffered_writes;
buffered_writes.emplace_back(nullptr, 0, QuicIpAddress(),
QuicSocketAddress(QuicIpAddress::Any4(), 0));
EXPECT_CALL(mock_syscalls_, Sendmmsg(_, _, _, _))
.WillOnce(Invoke([](int , mmsghdr* ,
unsigned int , int ) {
errno = EPERM;
return -1;
}));
EXPECT_EQ(WriteResult(WRITE_STATUS_ERROR, EPERM),
TestWriteMultiplePackets(1, buffered_writes.begin(),
buffered_writes.end(), &num_packets_sent));
EXPECT_EQ(0, num_packets_sent);
}
TEST_F(QuicLinuxSocketUtilsTest, WriteMultiplePackets_WriteSuccess) {
int num_packets_sent;
quiche::QuicheCircularDeque<BufferedWrite> buffered_writes;
const int kNumBufferedWrites = 10;
static_assert(kNumBufferedWrites < 256, "Must be less than 256");
std::vector<std::string> buffer_holder;
for (int i = 0; i < kNumBufferedWrites; ++i) {
size_t buf_len = (i + 1) * 2;
std::ostringstream buffer_ostream;
while (buffer_ostream.str().length() < buf_len) {
buffer_ostream << i;
}
buffer_holder.push_back(buffer_ostream.str().substr(0, buf_len - 1) + '$');
buffered_writes.emplace_back(buffer_holder.back().data(), buf_len,
QuicIpAddress(),
QuicSocketAddress(QuicIpAddress::Any4(), 0));
if (i != 0) {
ASSERT_TRUE(buffered_writes.back().self_address.FromString("127.0.0.1"));
}
std::ostringstream peer_ip_ostream;
QuicIpAddress peer_ip_address;
peer_ip_ostream << "127.0.1." << i + 1;
ASSERT_TRUE(peer_ip_address.FromString(peer_ip_ostream.str()));
buffered_writes.back().peer_address =
QuicSocketAddress(peer_ip_address, i + 1);
}
InSequence s;
for (int expected_num_packets_sent : {1, 2, 3, 10}) {
SCOPED_TRACE(testing::Message()
<< "expected_num_packets_sent=" << expected_num_packets_sent);
EXPECT_CALL(mock_syscalls_, Sendmmsg(_, _, _, _))
.WillOnce(Invoke([&](int , mmsghdr* msgvec, unsigned int vlen,
int ) {
EXPECT_LE(static_cast<unsigned int>(expected_num_packets_sent), vlen);
for (unsigned int i = 0; i < vlen; ++i) {
const BufferedWrite& buffered_write = buffered_writes[i];
const msghdr& hdr = msgvec[i].msg_hdr;
EXPECT_EQ(1u, hdr.msg_iovlen);
EXPECT_EQ(buffered_write.buffer, hdr.msg_iov->iov_base);
EXPECT_EQ(buffered_write.buf_len, hdr.msg_iov->iov_len);
sockaddr_storage expected_peer_address =
buffered_write.peer_address.generic_address();
EXPECT_EQ(0, memcmp(&expected_peer_address, hdr.msg_name,
sizeof(sockaddr_storage)));
EXPECT_EQ(buffered_write.self_address.IsInitialized(),
hdr.msg_control != nullptr);
}
return expected_num_packets_sent;
}))
.RetiresOnSaturation();
int expected_bytes_written = 0;
for (auto it = buffered_writes.cbegin();
it != buffered_writes.cbegin() + expected_num_packets_sent; ++it) {
expected_bytes_written += it->buf_len;
}
EXPECT_EQ(
WriteResult(WRITE_STATUS_OK, expected_bytes_written),
TestWriteMultiplePackets(1, buffered_writes.cbegin(),
buffered_writes.cend(), &num_packets_sent));
EXPECT_EQ(expected_num_packets_sent, num_packets_sent);
}
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_linux_socket_utils.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_linux_socket_utils_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
d2752f1d-79d1-488c-96f5-04c7a961ad96 | cpp | google/arolla | fingerprint | arolla/util/fingerprint.cc | arolla/util/fingerprint_test.cc | #include "arolla/util/fingerprint.h"
#include <cstddef>
#include <cstdint>
#include <ostream>
#include <string>
#include "absl/hash/hash.h"
#include "absl/numeric/int128.h"
#include "absl/random/random.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "cityhash/city.h"
#include "arolla/util/types.h"
namespace arolla {
namespace {
uint32_t RuntimeSeed() {
static uint32_t result = absl::Hash<int>{}(501816262);
return result;
}
}
std::string Fingerprint::AsString() const {
return absl::StrFormat("%032x", value);
}
signed_size_t Fingerprint::PythonHash() const {
return absl::Hash<Fingerprint>()(*this);
}
std::ostream& operator<<(std::ostream& ostream,
const Fingerprint& fingerprint) {
return ostream << absl::StreamFormat("%032x", fingerprint.value);
}
Fingerprint RandomFingerprint() {
absl::BitGen bitgen;
return Fingerprint{absl::MakeUint128(absl::Uniform<uint64_t>(bitgen),
absl::Uniform<uint64_t>(bitgen))};
}
FingerprintHasher::FingerprintHasher(absl::string_view salt)
: state_{3102879407, 2758948377}
{
Combine(RuntimeSeed(), salt);
}
Fingerprint FingerprintHasher::Finish() && {
return Fingerprint{absl::MakeUint128(state_.second, state_.first)};
}
void FingerprintHasher::CombineRawBytes(const void* data, size_t size) {
state_ = cityhash::CityHash128WithSeed(
static_cast<const char*>(data), size, state_);
}
} | #include "arolla/util/fingerprint.h"
#include <limits>
#include <string>
#include <tuple>
#include <type_traits>
#include <utility>
#include "gtest/gtest.h"
#include "absl/container/flat_hash_set.h"
#include "arolla/util/struct_field.h"
namespace arolla {
namespace {
static_assert(
std::is_trivially_constructible_v<Fingerprint>,
"Make sure that fingerprint is trivially constructed, so that adding it to "
"a struct does not slow down the struct's initialization time.");
struct A {};
static_assert(!std::is_default_constructible_v<FingerprintHasherTraits<A>>);
struct AWithFingerPrintMethod {
void ArollaFingerprint(FingerprintHasher* hasher) const {
hasher->Combine(19);
}
};
struct AWithStructFields {
int a;
double b;
constexpr static auto ArollaStructFields() {
using CppType = AWithStructFields;
return std::tuple{
AROLLA_DECLARE_STRUCT_FIELD(a),
AROLLA_DECLARE_STRUCT_FIELD(b),
};
}
void ArollaFingerprint(FingerprintHasher* hasher) const {
CombineStructFields(hasher, *this);
}
};
template <typename... Ts>
Fingerprint MakeDummyFingerprint(const Ts&... values) {
return FingerprintHasher("dummy-salt").Combine(values...).Finish();
}
TEST(FingerprintTest, Empty) {
Fingerprint fgpt{};
EXPECT_EQ(fgpt.AsString(), "00000000000000000000000000000000");
}
TEST(FingerprintTest, RandomFingerprint) {
constexpr int N = 1024;
absl::flat_hash_set<Fingerprint> set;
set.reserve(N);
for (int i = 0; i < N; ++i) {
set.insert(RandomFingerprint());
}
EXPECT_EQ(set.size(), N);
}
TEST(FingerprintTest, AWithFingerPrintMethod) {
EXPECT_EQ(MakeDummyFingerprint(AWithFingerPrintMethod()),
MakeDummyFingerprint(19));
}
TEST(FingerprintTest, AWithStructFields) {
EXPECT_EQ(MakeDummyFingerprint(AWithStructFields{.a = 5, .b = 7.}),
MakeDummyFingerprint(5, 7.));
}
TEST(FingerprintTest, TestPrimitives) {
EXPECT_NE(MakeDummyFingerprint(5), MakeDummyFingerprint(6));
EXPECT_NE(MakeDummyFingerprint<std::string>("5"),
MakeDummyFingerprint<std::string>("6"));
}
TEST(FingerprintTest, FloatingPointZero) {
EXPECT_NE(MakeDummyFingerprint(0.0).PythonHash(),
MakeDummyFingerprint(-0.0).PythonHash());
EXPECT_NE(MakeDummyFingerprint(0.f).PythonHash(),
MakeDummyFingerprint(-0.f).PythonHash());
}
TEST(FingerprintTest, FloatingPointNAN) {
EXPECT_NE(MakeDummyFingerprint(std::numeric_limits<float>::quiet_NaN())
.PythonHash(),
MakeDummyFingerprint(-std::numeric_limits<float>::quiet_NaN())
.PythonHash());
EXPECT_NE(MakeDummyFingerprint(std::numeric_limits<double>::quiet_NaN())
.PythonHash(),
MakeDummyFingerprint(-std::numeric_limits<double>::quiet_NaN())
.PythonHash());
}
TEST(FingerprintTest, PythonHash) {
EXPECT_EQ(MakeDummyFingerprint(4).PythonHash(),
MakeDummyFingerprint(4).PythonHash());
EXPECT_NE(MakeDummyFingerprint(5).PythonHash(),
MakeDummyFingerprint(6).PythonHash());
}
TEST(FingerprintTest, Less) {
EXPECT_LT(Fingerprint{27}, Fingerprint{37});
EXPECT_FALSE(Fingerprint{27} < Fingerprint{27});
}
TEST(FingerprintTest, CombineRawBytes) {
{
FingerprintHasher h1("dummy-salt");
FingerprintHasher h2("dummy-salt");
h1.CombineRawBytes("foobar", 6);
h2.CombineRawBytes("foobar", 6);
EXPECT_EQ(std::move(h1).Finish(), std::move(h2).Finish());
}
{
FingerprintHasher h1("dummy-salt");
FingerprintHasher h2("dummy-salt");
h1.CombineRawBytes("foobar", 6);
h2.CombineRawBytes("barfoo", 6);
EXPECT_NE(std::move(h1).Finish(), std::move(h2).Finish());
}
}
class Circle {
public:
Circle(int x, int y, int r) : center_(x, y), radius_(r) {
FingerprintHasher hasher("arolla::TestCircle");
hasher.Combine(center_.first, center_.second, radius_);
fingerprint_ = std::move(hasher).Finish();
}
const Fingerprint& fingerprint() { return fingerprint_; }
private:
std::pair<int, int> center_;
int radius_;
Fingerprint fingerprint_;
};
TEST(FingerprintTest, UserDefined) {
EXPECT_NE(Circle(0, 0, 1).fingerprint(), Circle(0, 0, 2).fingerprint());
EXPECT_NE(Circle(1, 1, 1).fingerprint(), Circle(0, 0, 1).fingerprint());
}
TEST(FingerprintTest, HasArollaFingerprintMethodRegression) {
struct OverloadedType {
int ArollaFingerprint() const { return 0; }
void ArollaFingerprint(FingerprintHasher*) const {}
};
EXPECT_TRUE(
fingerprint_impl::HasArollaFingerprintMethod<OverloadedType>::value);
struct WrongType {
int ArollaFingerprint() const { return 0; }
};
EXPECT_FALSE(fingerprint_impl::HasArollaFingerprintMethod<WrongType>::value);
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/util/fingerprint.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/util/fingerprint_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
9336a6ec-7fe8-48a5-a72c-958db037a159 | cpp | google/quiche | simple_session_notifier | quiche/quic/test_tools/simple_session_notifier.cc | quiche/quic/test_tools/simple_session_notifier_test.cc | #include "quiche/quic/test_tools/simple_session_notifier.h"
#include "quiche/quic/core/quic_utils.h"
#include "quiche/quic/platform/api/quic_logging.h"
#include "quiche/quic/test_tools/quic_test_utils.h"
namespace quic {
namespace test {
SimpleSessionNotifier::SimpleSessionNotifier(QuicConnection* connection)
: last_control_frame_id_(kInvalidControlFrameId),
least_unacked_(1),
least_unsent_(1),
connection_(connection) {}
SimpleSessionNotifier::~SimpleSessionNotifier() {
while (!control_frames_.empty()) {
DeleteFrame(&control_frames_.front());
control_frames_.pop_front();
}
}
SimpleSessionNotifier::StreamState::StreamState()
: bytes_total(0),
bytes_sent(0),
fin_buffered(false),
fin_sent(false),
fin_outstanding(false),
fin_lost(false) {}
SimpleSessionNotifier::StreamState::~StreamState() {}
QuicConsumedData SimpleSessionNotifier::WriteOrBufferData(
QuicStreamId id, QuicByteCount data_length, StreamSendingState state) {
return WriteOrBufferData(id, data_length, state, NOT_RETRANSMISSION);
}
QuicConsumedData SimpleSessionNotifier::WriteOrBufferData(
QuicStreamId id, QuicByteCount data_length, StreamSendingState state,
TransmissionType transmission_type) {
if (!stream_map_.contains(id)) {
stream_map_[id] = StreamState();
}
StreamState& stream_state = stream_map_.find(id)->second;
const bool had_buffered_data =
HasBufferedStreamData() || HasBufferedControlFrames();
QuicStreamOffset offset = stream_state.bytes_sent;
QUIC_DVLOG(1) << "WriteOrBuffer stream_id: " << id << " [" << offset << ", "
<< offset + data_length << "), fin: " << (state != NO_FIN);
stream_state.bytes_total += data_length;
stream_state.fin_buffered = state != NO_FIN;
if (had_buffered_data) {
QUIC_DLOG(WARNING) << "Connection is write blocked";
return {0, false};
}
const size_t length = stream_state.bytes_total - stream_state.bytes_sent;
connection_->SetTransmissionType(transmission_type);
QuicConsumedData consumed =
connection_->SendStreamData(id, length, stream_state.bytes_sent, state);
QUIC_DVLOG(1) << "consumed: " << consumed;
OnStreamDataConsumed(id, stream_state.bytes_sent, consumed.bytes_consumed,
consumed.fin_consumed);
return consumed;
}
void SimpleSessionNotifier::OnStreamDataConsumed(QuicStreamId id,
QuicStreamOffset offset,
QuicByteCount data_length,
bool fin) {
StreamState& state = stream_map_.find(id)->second;
if (QuicUtils::IsCryptoStreamId(connection_->transport_version(), id) &&
data_length > 0) {
crypto_bytes_transferred_[connection_->encryption_level()].Add(
offset, offset + data_length);
}
state.bytes_sent += data_length;
state.fin_sent = fin;
state.fin_outstanding = fin;
}
size_t SimpleSessionNotifier::WriteCryptoData(EncryptionLevel level,
QuicByteCount data_length,
QuicStreamOffset offset) {
crypto_state_[level].bytes_total += data_length;
size_t bytes_written =
connection_->SendCryptoData(level, data_length, offset);
crypto_state_[level].bytes_sent += bytes_written;
crypto_bytes_transferred_[level].Add(offset, offset + bytes_written);
return bytes_written;
}
void SimpleSessionNotifier::WriteOrBufferRstStream(
QuicStreamId id, QuicRstStreamErrorCode error,
QuicStreamOffset bytes_written) {
QUIC_DVLOG(1) << "Writing RST_STREAM_FRAME";
const bool had_buffered_data =
HasBufferedStreamData() || HasBufferedControlFrames();
control_frames_.emplace_back((QuicFrame(new QuicRstStreamFrame(
++last_control_frame_id_, id, error, bytes_written))));
if (error != QUIC_STREAM_NO_ERROR) {
stream_map_.erase(id);
}
if (had_buffered_data) {
QUIC_DLOG(WARNING) << "Connection is write blocked";
return;
}
WriteBufferedControlFrames();
}
void SimpleSessionNotifier::WriteOrBufferWindowUpate(
QuicStreamId id, QuicStreamOffset byte_offset) {
QUIC_DVLOG(1) << "Writing WINDOW_UPDATE";
const bool had_buffered_data =
HasBufferedStreamData() || HasBufferedControlFrames();
QuicControlFrameId control_frame_id = ++last_control_frame_id_;
control_frames_.emplace_back(
(QuicFrame(QuicWindowUpdateFrame(control_frame_id, id, byte_offset))));
if (had_buffered_data) {
QUIC_DLOG(WARNING) << "Connection is write blocked";
return;
}
WriteBufferedControlFrames();
}
void SimpleSessionNotifier::WriteOrBufferPing() {
QUIC_DVLOG(1) << "Writing PING_FRAME";
const bool had_buffered_data =
HasBufferedStreamData() || HasBufferedControlFrames();
control_frames_.emplace_back(
(QuicFrame(QuicPingFrame(++last_control_frame_id_))));
if (had_buffered_data) {
QUIC_DLOG(WARNING) << "Connection is write blocked";
return;
}
WriteBufferedControlFrames();
}
void SimpleSessionNotifier::WriteOrBufferAckFrequency(
const QuicAckFrequencyFrame& ack_frequency_frame) {
QUIC_DVLOG(1) << "Writing ACK_FREQUENCY";
const bool had_buffered_data =
HasBufferedStreamData() || HasBufferedControlFrames();
QuicControlFrameId control_frame_id = ++last_control_frame_id_;
control_frames_.emplace_back((
QuicFrame(new QuicAckFrequencyFrame(control_frame_id,
control_frame_id,
ack_frequency_frame.packet_tolerance,
ack_frequency_frame.max_ack_delay))));
if (had_buffered_data) {
QUIC_DLOG(WARNING) << "Connection is write blocked";
return;
}
WriteBufferedControlFrames();
}
void SimpleSessionNotifier::NeuterUnencryptedData() {
if (QuicVersionUsesCryptoFrames(connection_->transport_version())) {
for (const auto& interval : crypto_bytes_transferred_[ENCRYPTION_INITIAL]) {
QuicCryptoFrame crypto_frame(ENCRYPTION_INITIAL, interval.min(),
interval.max() - interval.min());
OnFrameAcked(QuicFrame(&crypto_frame), QuicTime::Delta::Zero(),
QuicTime::Zero());
}
return;
}
for (const auto& interval : crypto_bytes_transferred_[ENCRYPTION_INITIAL]) {
QuicStreamFrame stream_frame(
QuicUtils::GetCryptoStreamId(connection_->transport_version()), false,
interval.min(), interval.max() - interval.min());
OnFrameAcked(QuicFrame(stream_frame), QuicTime::Delta::Zero(),
QuicTime::Zero());
}
}
void SimpleSessionNotifier::OnCanWrite() {
if (connection_->framer().is_processing_packet()) {
QUIC_BUG(simple_notifier_write_mid_packet_processing)
<< "Try to write mid packet processing.";
return;
}
if (!RetransmitLostCryptoData() || !RetransmitLostControlFrames() ||
!RetransmitLostStreamData()) {
return;
}
if (!WriteBufferedCryptoData() || !WriteBufferedControlFrames()) {
return;
}
for (const auto& pair : stream_map_) {
const auto& state = pair.second;
if (!StreamHasBufferedData(pair.first)) {
continue;
}
const size_t length = state.bytes_total - state.bytes_sent;
const bool can_bundle_fin =
state.fin_buffered && (state.bytes_sent + length == state.bytes_total);
connection_->SetTransmissionType(NOT_RETRANSMISSION);
QuicConnection::ScopedEncryptionLevelContext context(
connection_,
connection_->framer().GetEncryptionLevelToSendApplicationData());
QuicConsumedData consumed = connection_->SendStreamData(
pair.first, length, state.bytes_sent, can_bundle_fin ? FIN : NO_FIN);
QUIC_DVLOG(1) << "Tries to write stream_id: " << pair.first << " ["
<< state.bytes_sent << ", " << state.bytes_sent + length
<< "), fin: " << can_bundle_fin
<< ", and consumed: " << consumed;
OnStreamDataConsumed(pair.first, state.bytes_sent, consumed.bytes_consumed,
consumed.fin_consumed);
if (length != consumed.bytes_consumed ||
(can_bundle_fin && !consumed.fin_consumed)) {
break;
}
}
}
void SimpleSessionNotifier::OnStreamReset(QuicStreamId id,
QuicRstStreamErrorCode error) {
if (error != QUIC_STREAM_NO_ERROR) {
stream_map_.erase(id);
}
}
bool SimpleSessionNotifier::WillingToWrite() const {
QUIC_DVLOG(1) << "has_buffered_control_frames: " << HasBufferedControlFrames()
<< " as_lost_control_frames: " << !lost_control_frames_.empty()
<< " has_buffered_stream_data: " << HasBufferedStreamData()
<< " has_lost_stream_data: " << HasLostStreamData();
return HasBufferedControlFrames() || !lost_control_frames_.empty() ||
HasBufferedStreamData() || HasLostStreamData();
}
QuicByteCount SimpleSessionNotifier::StreamBytesSent() const {
QuicByteCount bytes_sent = 0;
for (const auto& pair : stream_map_) {
const auto& state = pair.second;
bytes_sent += state.bytes_sent;
}
return bytes_sent;
}
QuicByteCount SimpleSessionNotifier::StreamBytesToSend() const {
QuicByteCount bytes_to_send = 0;
for (const auto& pair : stream_map_) {
const auto& state = pair.second;
bytes_to_send += (state.bytes_total - state.bytes_sent);
}
return bytes_to_send;
}
bool SimpleSessionNotifier::OnFrameAcked(const QuicFrame& frame,
QuicTime::Delta ,
QuicTime ) {
QUIC_DVLOG(1) << "Acking " << frame;
if (frame.type == CRYPTO_FRAME) {
StreamState* state = &crypto_state_[frame.crypto_frame->level];
QuicStreamOffset offset = frame.crypto_frame->offset;
QuicByteCount data_length = frame.crypto_frame->data_length;
QuicIntervalSet<QuicStreamOffset> newly_acked(offset, offset + data_length);
newly_acked.Difference(state->bytes_acked);
if (newly_acked.Empty()) {
return false;
}
state->bytes_acked.Add(offset, offset + data_length);
state->pending_retransmissions.Difference(offset, offset + data_length);
return true;
}
if (frame.type != STREAM_FRAME) {
return OnControlFrameAcked(frame);
}
if (!stream_map_.contains(frame.stream_frame.stream_id)) {
return false;
}
auto* state = &stream_map_.find(frame.stream_frame.stream_id)->second;
QuicStreamOffset offset = frame.stream_frame.offset;
QuicByteCount data_length = frame.stream_frame.data_length;
QuicIntervalSet<QuicStreamOffset> newly_acked(offset, offset + data_length);
newly_acked.Difference(state->bytes_acked);
const bool fin_newly_acked = frame.stream_frame.fin && state->fin_outstanding;
if (newly_acked.Empty() && !fin_newly_acked) {
return false;
}
state->bytes_acked.Add(offset, offset + data_length);
if (fin_newly_acked) {
state->fin_outstanding = false;
state->fin_lost = false;
}
state->pending_retransmissions.Difference(offset, offset + data_length);
return true;
}
void SimpleSessionNotifier::OnFrameLost(const QuicFrame& frame) {
QUIC_DVLOG(1) << "Losting " << frame;
if (frame.type == CRYPTO_FRAME) {
StreamState* state = &crypto_state_[frame.crypto_frame->level];
QuicStreamOffset offset = frame.crypto_frame->offset;
QuicByteCount data_length = frame.crypto_frame->data_length;
QuicIntervalSet<QuicStreamOffset> bytes_lost(offset, offset + data_length);
bytes_lost.Difference(state->bytes_acked);
if (bytes_lost.Empty()) {
return;
}
for (const auto& lost : bytes_lost) {
state->pending_retransmissions.Add(lost.min(), lost.max());
}
return;
}
if (frame.type != STREAM_FRAME) {
OnControlFrameLost(frame);
return;
}
if (!stream_map_.contains(frame.stream_frame.stream_id)) {
return;
}
auto* state = &stream_map_.find(frame.stream_frame.stream_id)->second;
QuicStreamOffset offset = frame.stream_frame.offset;
QuicByteCount data_length = frame.stream_frame.data_length;
QuicIntervalSet<QuicStreamOffset> bytes_lost(offset, offset + data_length);
bytes_lost.Difference(state->bytes_acked);
const bool fin_lost = state->fin_outstanding && frame.stream_frame.fin;
if (bytes_lost.Empty() && !fin_lost) {
return;
}
for (const auto& lost : bytes_lost) {
state->pending_retransmissions.Add(lost.min(), lost.max());
}
state->fin_lost = fin_lost;
}
bool SimpleSessionNotifier::RetransmitFrames(const QuicFrames& frames,
TransmissionType type) {
QuicConnection::ScopedPacketFlusher retransmission_flusher(connection_);
connection_->SetTransmissionType(type);
for (const QuicFrame& frame : frames) {
if (frame.type == CRYPTO_FRAME) {
const StreamState& state = crypto_state_[frame.crypto_frame->level];
const EncryptionLevel current_encryption_level =
connection_->encryption_level();
QuicIntervalSet<QuicStreamOffset> retransmission(
frame.crypto_frame->offset,
frame.crypto_frame->offset + frame.crypto_frame->data_length);
retransmission.Difference(state.bytes_acked);
for (const auto& interval : retransmission) {
QuicStreamOffset offset = interval.min();
QuicByteCount length = interval.max() - interval.min();
connection_->SetDefaultEncryptionLevel(frame.crypto_frame->level);
size_t consumed = connection_->SendCryptoData(frame.crypto_frame->level,
length, offset);
if (consumed < length) {
return false;
}
}
connection_->SetDefaultEncryptionLevel(current_encryption_level);
}
if (frame.type != STREAM_FRAME) {
if (GetControlFrameId(frame) == kInvalidControlFrameId) {
continue;
}
QuicFrame copy = CopyRetransmittableControlFrame(frame);
if (!connection_->SendControlFrame(copy)) {
DeleteFrame(©);
return false;
}
continue;
}
if (!stream_map_.contains(frame.stream_frame.stream_id)) {
continue;
}
const auto& state = stream_map_.find(frame.stream_frame.stream_id)->second;
QuicIntervalSet<QuicStreamOffset> retransmission(
frame.stream_frame.offset,
frame.stream_frame.offset + frame.stream_frame.data_length);
EncryptionLevel retransmission_encryption_level =
connection_->encryption_level();
if (QuicUtils::IsCryptoStreamId(connection_->transport_version(),
frame.stream_frame.stream_id)) {
for (size_t i = 0; i < NUM_ENCRYPTION_LEVELS; ++i) {
if (retransmission.Intersects(crypto_bytes_transferred_[i])) {
retransmission_encryption_level = static_cast<EncryptionLevel>(i);
retransmission.Intersection(crypto_bytes_transferred_[i]);
break;
}
}
}
retransmission.Difference(state.bytes_acked);
bool retransmit_fin = frame.stream_frame.fin && state.fin_outstanding;
QuicConsumedData consumed(0, false);
for (const auto& interval : retransmission) {
QuicStreamOffset retransmission_offset = interval.min();
QuicByteCount retransmission_length = interval.max() - interval.min();
const bool can_bundle_fin =
retransmit_fin &&
(retransmission_offset + retransmission_length == state.bytes_sent);
QuicConnection::ScopedEncryptionLevelContext context(
connection_,
QuicUtils::IsCryptoStreamId(connection_->transport_version(),
frame.stream_frame.stream_id)
? retransmission_encryption_level
: connection_->framer()
.GetEncryptionLevelToSendApplicationData());
consumed = connection_->SendStreamData(
frame.stream_frame.stream_id, retransmission_length,
retransmission_offset, can_bundle_fin ? FIN : NO_FIN);
QUIC_DVLOG(1) << "stream " << frame.stream_frame.stream_id
<< " is forced to retransmit stream data ["
<< retransmission_offset << ", "
<< retransmission_offset + retransmission_length
<< ") and fin: " << can_bundle_fin
<< ", consumed: " << consumed;
if (can_bundle_fin) {
retransmit_fin = !consumed.fin_consumed;
}
if (consumed.bytes_consumed < retransmission_length ||
(can_bundle_fin && !consumed.fin_consumed)) {
return false;
}
}
if (retransmit_fin) {
QUIC_DVLOG(1) << "stream " << frame.stream_frame.stream_id
<< " retransmits fin only frame.";
consumed = connection_->SendStreamData(frame.stream_frame.stream_id, 0,
state.bytes_sent, FIN);
if (!consumed.fin_consumed) {
return false;
}
}
}
return true;
}
bool SimpleSessionNotifier::IsFrameOutstanding(const QuicFrame& frame) const {
if (frame.type == CRYPTO_FRAME) {
QuicStreamOffset offset = frame.crypto_frame->offset;
QuicByteCount data_length = frame.crypto_frame->data_length;
bool ret = data_length > 0 &&
!crypto_state_[frame.crypto_frame->level].bytes_acked.Contains(
offset, offset + data_length);
return ret;
}
if (frame.type != STREAM_FRAME) {
return IsControlFrameOutstanding(frame);
}
if (!stream_map_.contains(frame.stream_frame.stream_id)) {
return false;
}
const auto& state = stream_map_.find(frame.stream_frame.stream_id)->second;
QuicStreamOffset offset = frame.stream_frame.offset;
QuicByteCount data_length = frame.stream_frame.data_length;
return (data_length > 0 &&
!state.bytes_acked.Contains(offset, offset + data_length)) ||
(frame.stream_frame.fin && state.fin_outstanding);
}
bool SimpleSessionNotifier::HasUnackedCryptoData() const {
if (QuicVersionUsesCryptoFrames(connection_->transport_version())) {
for (size_t i = 0; i < NUM_ENCRYPTION_LEVELS; ++i) {
const StreamState& state = crypto_state_[i];
if (state.bytes_total > state.bytes_sent) {
return true;
}
QuicIntervalSet<QuicStreamOffset> bytes_to_ack(0, state.bytes_total);
bytes_to_ack.Difference(state.bytes_acked);
if (!bytes_to_ack.Empty()) {
return true;
}
}
return false;
}
if (!stream_map_.contains(
QuicUtils::GetCryptoStreamId(connection_->transport_version()))) {
return false;
}
const auto& state =
stream_map_
.find(QuicUtils::GetCryptoStreamId(connection_->transport_version()))
->second;
if (state.bytes_total > state.bytes_sent) {
return true;
}
QuicIntervalSet<QuicStreamOffset> bytes_to_ack(0, state.bytes_total);
bytes_to_ack.Difference(state.bytes_acked);
return !bytes_to_ack.Empty();
}
bool SimpleSessionNotifier::HasUnackedStreamData() const {
for (const auto& it : stream_map_) {
if (StreamIsWaitingForAcks(it.first)) return true;
}
return false;
}
bool SimpleSessionNotifier::OnControlFrameAcked(const QuicFrame& frame) {
QuicControlFrameId id = GetControlFrameId(frame);
if (id == kInvalidControlFrameId) {
return false;
}
QUICHE_DCHECK(id < least_unacked_ + control_frames_.size());
if (id < least_unacked_ ||
GetControlFrameId(control_frames_.at(id - least_unacked_)) ==
kInvalidControlFrameId) {
return false;
}
SetControlFrameId(kInvalidControlFrameId,
&control_frames_.at(id - least_unacked_));
lost_control_frames_.erase(id);
while (!control_frames_.empty() &&
GetControlFrameId(control_frames_.front()) == kInvalidControlFrameId) {
DeleteFrame(&control_frames_.front());
control_frames_.pop_front();
++least_unacked_;
}
return true;
}
void SimpleSessionNotifier::OnControlFrameLost(const QuicFrame& frame) {
QuicControlFrameId id = GetControlFrameId(frame);
if (id == kInvalidControlFrameId) {
return;
}
QUICHE_DCHECK(id < least_unacked_ + control_frames_.size());
if (id < least_unacked_ ||
GetControlFrameId(control_frames_.at(id - least_unacked_)) ==
kInvalidControlFrameId) {
return;
}
if (!lost_control_frames_.contains(id)) {
lost_control_frames_[id] = true;
}
}
bool SimpleSessionNotifier::IsControlFrameOutstanding(
const QuicFrame& frame) const {
QuicControlFrameId id = GetControlFrameId(frame);
if (id == kInvalidControlFrameId) {
return false;
}
return id < least_unacked_ + control_frames_.size() && id >= least_unacked_ &&
GetControlFrameId(control_frames_.at(id - least_unacked_)) !=
kInvalidControlFrameId;
}
bool SimpleSessionNotifier::RetransmitLostControlFrames() {
while (!lost_control_frames_.empty()) {
QuicFrame pending = control_frames_.at(lost_control_frames_.begin()->first -
least_unacked_);
QuicFrame copy = CopyRetransmittableControlFrame(pending);
connection_->SetTransmissionType(LOSS_RETRANSMISSION);
if (!connection_->SendControlFrame(copy)) {
DeleteFrame(©);
break;
}
lost_control_frames_.pop_front();
}
return lost_control_frames_.empty();
}
bool SimpleSessionNotifier::RetransmitLostCryptoData() {
if (QuicVersionUsesCryptoFrames(connection_->transport_version())) {
for (EncryptionLevel level :
{ENCRYPTION_INITIAL, ENCRYPTION_HANDSHAKE, ENCRYPTION_ZERO_RTT,
ENCRYPTION_FORWARD_SECURE}) {
auto& state = crypto_state_[level];
while (!state.pending_retransmissions.Empty()) {
connection_->SetTransmissionType(HANDSHAKE_RETRANSMISSION);
EncryptionLevel current_encryption_level =
connection_->encryption_level();
connection_->SetDefaultEncryptionLevel(level);
QuicIntervalSet<QuicStreamOffset> retransmission(
state.pending_retransmissions.begin()->min(),
state.pending_retransmissions.begin()->max());
retransmission.Intersection(crypto_bytes_transferred_[level]);
QuicStreamOffset retransmission_offset = retransmission.begin()->min();
QuicByteCount retransmission_length =
retransmission.begin()->max() - retransmission.begin()->min();
size_t bytes_consumed = connection_->SendCryptoData(
level, retransmission_length, retransmission_offset);
connection_->SetDefaultEncryptionLevel(current_encryption_level);
state.pending_retransmissions.Difference(
retransmission_offset, retransmission_offset + bytes_consumed);
if (bytes_consumed < retransmission_length) {
return false;
}
}
}
return true;
}
if (!stream_map_.contains(
QuicUtils::GetCryptoStreamId(connection_->transport_version()))) {
return true;
}
auto& state =
stream_map_
.find(QuicUtils::GetCryptoStreamId(connection_->transport_version()))
->second;
while (!state.pending_retransmissions.Empty()) {
connection_->SetTransmissionType(HANDSHAKE_RETRANSMISSION);
QuicIntervalSet<QuicStreamOffset> retransmission(
state.pending_retransmissions.begin()->min(),
state.pending_retransmissions.begin()->max());
EncryptionLevel retransmission_encryption_level = ENCRYPTION_INITIAL;
for (size_t i = 0; i < NUM_ENCRYPTION_LEVELS; ++i) {
if (retransmission.Intersects(crypto_bytes_transferred_[i])) {
retransmission_encryption_level = static_cast<EncryptionLevel>(i);
retransmission.Intersection(crypto_bytes_transferred_[i]);
break;
}
}
QuicStreamOffset retransmission_offset = retransmission.begin()->min();
QuicByteCount retransmission_length =
retransmission.begin()->max() - retransmission.begin()->min();
EncryptionLevel current_encryption_level = connection_->encryption_level();
connection_->SetDefaultEncryptionLevel(retransmission_encryption_level);
QuicConsumedData consumed = connection_->SendStreamData(
QuicUtils::GetCryptoStreamId(connection_->transport_version()),
retransmission_length, retransmission_offset, NO_FIN);
connection_->SetDefaultEncryptionLevel(current_encryption_level);
state.pending_retransmissions.Difference(
retransmission_offset, retransmission_offset + consumed.bytes_consumed);
if (consumed.bytes_consumed < retransmission_length) {
break;
}
}
return state.pending_retransmissions.Empty();
}
bool SimpleSessionNotifier::RetransmitLostStreamData() {
for (auto& pair : stream_map_) {
StreamState& state = pair.second;
QuicConsumedData consumed(0, false);
while (!state.pending_retransmissions.Empty() || state.fin_lost) {
connection_->SetTransmissionType(LOSS_RETRANSMISSION);
if (state.pending_retransmissions.Empty()) {
QUIC_DVLOG(1) << "stream " << pair.first
<< " retransmits fin only frame.";
consumed =
connection_->SendStreamData(pair.first, 0, state.bytes_sent, FIN);
state.fin_lost = !consumed.fin_consumed;
if (state.fin_lost) {
QUIC_DLOG(INFO) << "Connection is write blocked";
return false;
}
} else {
QuicStreamOffset offset = state.pending_retransmissions.begin()->min();
QuicByteCount length = state.pending_retransmissions.begin()->max() -
state.pending_retransmissions.begin()->min();
const bool can_bundle_fin =
state.fin_lost && (offset + length == state.bytes_sent);
consumed = connection_->SendStreamData(pair.first, length, offset,
can_bundle_fin ? FIN : NO_FIN);
QUIC_DVLOG(1) << "stream " << pair.first
<< " tries to retransmit stream data [" << offset << ", "
<< offset + length << ") and fin: " << can_bundle_fin
<< ", consumed: " << consumed;
state.pending_retransmissions.Difference(
offset, offset + consumed.bytes_consumed);
if (consumed.fin_consumed) {
state.fin_lost = false;
}
if (length > consumed.bytes_consumed ||
(can_bundle_fin && !consumed.fin_consumed)) {
QUIC_DVLOG(1) << "Connection is write blocked";
break;
}
}
}
}
return !HasLostStreamData();
}
bool SimpleSessionNotifier::WriteBufferedCryptoData() {
for (size_t i = 0; i < NUM_ENCRYPTION_LEVELS; ++i) {
const StreamState& state = crypto_state_[i];
QuicIntervalSet<QuicStreamOffset> buffered_crypto_data(0,
state.bytes_total);
buffered_crypto_data.Difference(crypto_bytes_transferred_[i]);
for (const auto& interval : buffered_crypto_data) {
size_t bytes_written = connection_->SendCryptoData(
static_cast<EncryptionLevel>(i), interval.Length(), interval.min());
crypto_state_[i].bytes_sent += bytes_written;
crypto_bytes_transferred_[i].Add(interval.min(),
interval.min() + bytes_written);
if (bytes_written < interval.Length()) {
return false;
}
}
}
return true;
}
bool SimpleSessionNotifier::WriteBufferedControlFrames() {
while (HasBufferedControlFrames()) {
QuicFrame frame_to_send =
control_frames_.at(least_unsent_ - least_unacked_);
QuicFrame copy = CopyRetransmittableControlFrame(frame_to_send);
connection_->SetTransmissionType(NOT_RETRANSMISSION);
if (!connection_->SendControlFrame(copy)) {
DeleteFrame(©);
break;
}
++least_unsent_;
}
return !HasBufferedControlFrames();
}
bool SimpleSessionNotifier::HasBufferedControlFrames() const {
return least_unsent_ < least_unacked_ + control_frames_.size();
}
bool SimpleSessionNotifier::HasBufferedStreamData() const {
for (const auto& pair : stream_map_) {
const auto& state = pair.second;
if (state.bytes_total > state.bytes_sent ||
(state.fin_buffered && !state.fin_sent)) {
return true;
}
}
return false;
}
bool SimpleSessionNotifier::StreamIsWaitingForAcks(QuicStreamId id) const {
if (!stream_map_.contains(id)) {
return false;
}
const StreamState& state = stream_map_.find(id)->second;
return !state.bytes_acked.Contains(0, state.bytes_sent) ||
state.fin_outstanding;
}
bool SimpleSessionNotifier::StreamHasBufferedData(QuicStreamId id) const {
if (!stream_map_.contains(id)) {
return false;
}
const StreamState& state = stream_map_.find(id)->second;
return state.bytes_total > state.bytes_sent ||
(state.fin_buffered && !state.fin_sent);
}
bool SimpleSessionNotifier::HasLostStreamData() const {
for (const auto& pair : stream_map_) {
const auto& state = pair.second;
if (!state.pending_retransmissions.Empty() || state.fin_lost) {
return true;
}
}
return false;
}
}
} | #include "quiche/quic/test_tools/simple_session_notifier.h"
#include <memory>
#include <string>
#include <utility>
#include "quiche/quic/core/crypto/null_encrypter.h"
#include "quiche/quic/core/quic_utils.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/test_tools/quic_connection_peer.h"
#include "quiche/quic/test_tools/quic_test_utils.h"
#include "quiche/quic/test_tools/simple_data_producer.h"
using testing::_;
using testing::InSequence;
using testing::Return;
using testing::StrictMock;
namespace quic {
namespace test {
namespace {
class MockQuicConnectionWithSendStreamData : public MockQuicConnection {
public:
MockQuicConnectionWithSendStreamData(MockQuicConnectionHelper* helper,
MockAlarmFactory* alarm_factory,
Perspective perspective)
: MockQuicConnection(helper, alarm_factory, perspective) {}
MOCK_METHOD(QuicConsumedData, SendStreamData,
(QuicStreamId id, size_t write_length, QuicStreamOffset offset,
StreamSendingState state),
(override));
};
class SimpleSessionNotifierTest : public QuicTest {
public:
SimpleSessionNotifierTest()
: connection_(&helper_, &alarm_factory_, Perspective::IS_CLIENT),
notifier_(&connection_) {
connection_.set_visitor(&visitor_);
connection_.SetSessionNotifier(¬ifier_);
EXPECT_FALSE(notifier_.WillingToWrite());
EXPECT_EQ(0u, notifier_.StreamBytesSent());
EXPECT_FALSE(notifier_.HasBufferedStreamData());
}
bool ControlFrameConsumed(const QuicFrame& frame) {
DeleteFrame(&const_cast<QuicFrame&>(frame));
return true;
}
MockQuicConnectionHelper helper_;
MockAlarmFactory alarm_factory_;
MockQuicConnectionVisitor visitor_;
StrictMock<MockQuicConnectionWithSendStreamData> connection_;
SimpleSessionNotifier notifier_;
};
TEST_F(SimpleSessionNotifierTest, WriteOrBufferData) {
InSequence s;
EXPECT_CALL(connection_, SendStreamData(3, 1024, 0, NO_FIN))
.WillOnce(Return(QuicConsumedData(1024, false)));
notifier_.WriteOrBufferData(3, 1024, NO_FIN);
EXPECT_EQ(0u, notifier_.StreamBytesToSend());
EXPECT_CALL(connection_, SendStreamData(5, 512, 0, NO_FIN))
.WillOnce(Return(QuicConsumedData(512, false)));
notifier_.WriteOrBufferData(5, 512, NO_FIN);
EXPECT_FALSE(notifier_.WillingToWrite());
EXPECT_CALL(connection_, SendStreamData(5, 512, 512, FIN))
.WillOnce(Return(QuicConsumedData(256, false)));
notifier_.WriteOrBufferData(5, 512, FIN);
EXPECT_TRUE(notifier_.WillingToWrite());
EXPECT_EQ(1792u, notifier_.StreamBytesSent());
EXPECT_EQ(256u, notifier_.StreamBytesToSend());
EXPECT_TRUE(notifier_.HasBufferedStreamData());
EXPECT_CALL(connection_, SendStreamData(7, 1024, 0, FIN)).Times(0);
notifier_.WriteOrBufferData(7, 1024, FIN);
EXPECT_EQ(1792u, notifier_.StreamBytesSent());
}
TEST_F(SimpleSessionNotifierTest, WriteOrBufferRstStream) {
InSequence s;
EXPECT_CALL(connection_, SendStreamData(5, 1024, 0, FIN))
.WillOnce(Return(QuicConsumedData(1024, true)));
notifier_.WriteOrBufferData(5, 1024, FIN);
EXPECT_TRUE(notifier_.StreamIsWaitingForAcks(5));
EXPECT_TRUE(notifier_.HasUnackedStreamData());
EXPECT_CALL(connection_, SendControlFrame(_))
.WillRepeatedly(
Invoke(this, &SimpleSessionNotifierTest::ControlFrameConsumed));
notifier_.WriteOrBufferRstStream(5, QUIC_STREAM_NO_ERROR, 1024);
EXPECT_TRUE(notifier_.StreamIsWaitingForAcks(5));
EXPECT_TRUE(notifier_.HasUnackedStreamData());
notifier_.WriteOrBufferRstStream(5, QUIC_ERROR_PROCESSING_STREAM, 1024);
EXPECT_FALSE(notifier_.StreamIsWaitingForAcks(5));
EXPECT_FALSE(notifier_.HasUnackedStreamData());
}
TEST_F(SimpleSessionNotifierTest, WriteOrBufferPing) {
InSequence s;
EXPECT_CALL(connection_, SendControlFrame(_))
.WillRepeatedly(
Invoke(this, &SimpleSessionNotifierTest::ControlFrameConsumed));
notifier_.WriteOrBufferPing();
EXPECT_EQ(0u, notifier_.StreamBytesToSend());
EXPECT_FALSE(notifier_.WillingToWrite());
EXPECT_CALL(connection_, SendStreamData(3, 1024, 0, NO_FIN))
.WillOnce(Return(QuicConsumedData(1024, false)));
notifier_.WriteOrBufferData(3, 1024, NO_FIN);
EXPECT_EQ(0u, notifier_.StreamBytesToSend());
EXPECT_CALL(connection_, SendStreamData(5, 512, 0, NO_FIN))
.WillOnce(Return(QuicConsumedData(256, false)));
notifier_.WriteOrBufferData(5, 512, NO_FIN);
EXPECT_TRUE(notifier_.WillingToWrite());
EXPECT_CALL(connection_, SendControlFrame(_)).Times(0);
notifier_.WriteOrBufferPing();
}
TEST_F(SimpleSessionNotifierTest, NeuterUnencryptedData) {
if (QuicVersionUsesCryptoFrames(connection_.transport_version())) {
return;
}
InSequence s;
connection_.SetDefaultEncryptionLevel(ENCRYPTION_INITIAL);
EXPECT_CALL(connection_, SendStreamData(QuicUtils::GetCryptoStreamId(
connection_.transport_version()),
1024, 0, NO_FIN))
.WillOnce(Return(QuicConsumedData(1024, false)));
notifier_.WriteOrBufferData(
QuicUtils::GetCryptoStreamId(connection_.transport_version()), 1024,
NO_FIN);
connection_.SetDefaultEncryptionLevel(ENCRYPTION_ZERO_RTT);
EXPECT_CALL(connection_, SendStreamData(QuicUtils::GetCryptoStreamId(
connection_.transport_version()),
1024, 1024, NO_FIN))
.WillOnce(Return(QuicConsumedData(1024, false)));
notifier_.WriteOrBufferData(
QuicUtils::GetCryptoStreamId(connection_.transport_version()), 1024,
NO_FIN);
QuicStreamFrame stream_frame(
QuicUtils::GetCryptoStreamId(connection_.transport_version()), false,
1024, 1024);
notifier_.OnFrameAcked(QuicFrame(stream_frame), QuicTime::Delta::Zero(),
QuicTime::Zero());
EXPECT_TRUE(notifier_.StreamIsWaitingForAcks(
QuicUtils::GetCryptoStreamId(connection_.transport_version())));
EXPECT_TRUE(notifier_.HasUnackedStreamData());
notifier_.NeuterUnencryptedData();
EXPECT_FALSE(notifier_.StreamIsWaitingForAcks(
QuicUtils::GetCryptoStreamId(connection_.transport_version())));
EXPECT_FALSE(notifier_.HasUnackedStreamData());
}
TEST_F(SimpleSessionNotifierTest, OnCanWrite) {
if (QuicVersionUsesCryptoFrames(connection_.transport_version())) {
return;
}
InSequence s;
connection_.SetDefaultEncryptionLevel(ENCRYPTION_INITIAL);
EXPECT_CALL(connection_, SendStreamData(QuicUtils::GetCryptoStreamId(
connection_.transport_version()),
1024, 0, NO_FIN))
.WillOnce(Return(QuicConsumedData(1024, false)));
notifier_.WriteOrBufferData(
QuicUtils::GetCryptoStreamId(connection_.transport_version()), 1024,
NO_FIN);
connection_.SetDefaultEncryptionLevel(ENCRYPTION_ZERO_RTT);
EXPECT_CALL(connection_, SendStreamData(QuicUtils::GetCryptoStreamId(
connection_.transport_version()),
1024, 1024, NO_FIN))
.WillOnce(Return(QuicConsumedData(1024, false)));
notifier_.WriteOrBufferData(
QuicUtils::GetCryptoStreamId(connection_.transport_version()), 1024,
NO_FIN);
EXPECT_CALL(connection_, SendStreamData(3, 1024, 0, FIN))
.WillOnce(Return(QuicConsumedData(512, false)));
notifier_.WriteOrBufferData(3, 1024, FIN);
EXPECT_CALL(connection_, SendStreamData(5, _, _, _)).Times(0);
notifier_.WriteOrBufferData(5, 1024, NO_FIN);
EXPECT_CALL(connection_, SendControlFrame(_)).Times(0);
notifier_.WriteOrBufferRstStream(5, QUIC_ERROR_PROCESSING_STREAM, 1024);
QuicStreamFrame frame1(
QuicUtils::GetCryptoStreamId(connection_.transport_version()), false, 500,
1000);
QuicStreamFrame frame2(3, false, 0, 512);
notifier_.OnFrameLost(QuicFrame(frame1));
notifier_.OnFrameLost(QuicFrame(frame2));
EXPECT_CALL(connection_, SendStreamData(QuicUtils::GetCryptoStreamId(
connection_.transport_version()),
524, 500, NO_FIN))
.WillOnce(Return(QuicConsumedData(524, false)));
EXPECT_CALL(connection_, SendStreamData(QuicUtils::GetCryptoStreamId(
connection_.transport_version()),
476, 1024, NO_FIN))
.WillOnce(Return(QuicConsumedData(476, false)));
EXPECT_CALL(connection_, SendStreamData(3, 512, 0, NO_FIN))
.WillOnce(Return(QuicConsumedData(512, false)));
EXPECT_CALL(connection_, SendControlFrame(_))
.WillOnce(Invoke(this, &SimpleSessionNotifierTest::ControlFrameConsumed));
EXPECT_CALL(connection_, SendStreamData(3, 512, 512, FIN))
.WillOnce(Return(QuicConsumedData(512, true)));
notifier_.OnCanWrite();
EXPECT_FALSE(notifier_.WillingToWrite());
}
TEST_F(SimpleSessionNotifierTest, OnCanWriteCryptoFrames) {
if (!QuicVersionUsesCryptoFrames(connection_.transport_version())) {
return;
}
SimpleDataProducer producer;
connection_.SetDataProducer(&producer);
InSequence s;
connection_.SetDefaultEncryptionLevel(ENCRYPTION_INITIAL);
EXPECT_CALL(connection_, SendCryptoData(ENCRYPTION_INITIAL, 1024, 0))
.WillOnce(Invoke(&connection_,
&MockQuicConnection::QuicConnection_SendCryptoData));
EXPECT_CALL(connection_, CloseConnection(QUIC_PACKET_WRITE_ERROR, _, _));
std::string crypto_data1(1024, 'a');
producer.SaveCryptoData(ENCRYPTION_INITIAL, 0, crypto_data1);
std::string crypto_data2(524, 'a');
producer.SaveCryptoData(ENCRYPTION_INITIAL, 500, crypto_data2);
notifier_.WriteCryptoData(ENCRYPTION_INITIAL, 1024, 0);
connection_.SetEncrypter(ENCRYPTION_ZERO_RTT, std::make_unique<NullEncrypter>(
Perspective::IS_CLIENT));
connection_.SetDefaultEncryptionLevel(ENCRYPTION_ZERO_RTT);
EXPECT_CALL(connection_, SendCryptoData(ENCRYPTION_ZERO_RTT, 1024, 0))
.WillOnce(Invoke(&connection_,
&MockQuicConnection::QuicConnection_SendCryptoData));
std::string crypto_data3(1024, 'a');
producer.SaveCryptoData(ENCRYPTION_ZERO_RTT, 0, crypto_data3);
notifier_.WriteCryptoData(ENCRYPTION_ZERO_RTT, 1024, 0);
EXPECT_CALL(connection_, SendStreamData(3, 1024, 0, FIN))
.WillOnce(Return(QuicConsumedData(512, false)));
notifier_.WriteOrBufferData(3, 1024, FIN);
EXPECT_CALL(connection_, SendStreamData(5, _, _, _)).Times(0);
notifier_.WriteOrBufferData(5, 1024, NO_FIN);
EXPECT_CALL(connection_, SendControlFrame(_)).Times(0);
notifier_.WriteOrBufferRstStream(5, QUIC_ERROR_PROCESSING_STREAM, 1024);
QuicCryptoFrame crypto_frame1(ENCRYPTION_INITIAL, 500, 524);
QuicCryptoFrame crypto_frame2(ENCRYPTION_ZERO_RTT, 0, 476);
QuicStreamFrame stream3_frame(3, false, 0, 512);
notifier_.OnFrameLost(QuicFrame(&crypto_frame1));
notifier_.OnFrameLost(QuicFrame(&crypto_frame2));
notifier_.OnFrameLost(QuicFrame(stream3_frame));
EXPECT_CALL(connection_, SendCryptoData(ENCRYPTION_INITIAL, 524, 500))
.WillOnce(Invoke(&connection_,
&MockQuicConnection::QuicConnection_SendCryptoData));
EXPECT_CALL(connection_, SendCryptoData(ENCRYPTION_ZERO_RTT, 476, 0))
.WillOnce(Invoke(&connection_,
&MockQuicConnection::QuicConnection_SendCryptoData));
EXPECT_CALL(connection_, SendStreamData(3, 512, 0, NO_FIN))
.WillOnce(Return(QuicConsumedData(512, false)));
EXPECT_CALL(connection_, SendControlFrame(_))
.WillOnce(Invoke(this, &SimpleSessionNotifierTest::ControlFrameConsumed));
EXPECT_CALL(connection_, SendStreamData(3, 512, 512, FIN))
.WillOnce(Return(QuicConsumedData(512, true)));
notifier_.OnCanWrite();
EXPECT_FALSE(notifier_.WillingToWrite());
}
TEST_F(SimpleSessionNotifierTest, RetransmitFrames) {
InSequence s;
connection_.SetEncrypter(
ENCRYPTION_FORWARD_SECURE,
std::make_unique<NullEncrypter>(Perspective::IS_CLIENT));
EXPECT_CALL(connection_, SendStreamData(3, 10, 0, FIN))
.WillOnce(Return(QuicConsumedData(10, true)));
notifier_.WriteOrBufferData(3, 10, FIN);
QuicStreamFrame frame1(3, true, 0, 10);
EXPECT_CALL(connection_, SendStreamData(5, 10, 0, FIN))
.WillOnce(Return(QuicConsumedData(10, true)));
notifier_.WriteOrBufferData(5, 10, FIN);
QuicStreamFrame frame2(5, true, 0, 10);
EXPECT_CALL(connection_, SendControlFrame(_))
.WillOnce(Invoke(this, &SimpleSessionNotifierTest::ControlFrameConsumed));
notifier_.WriteOrBufferRstStream(5, QUIC_STREAM_NO_ERROR, 10);
QuicStreamFrame ack_frame1(3, false, 3, 4);
QuicStreamFrame ack_frame2(5, false, 8, 2);
notifier_.OnFrameAcked(QuicFrame(ack_frame1), QuicTime::Delta::Zero(),
QuicTime::Zero());
notifier_.OnFrameAcked(QuicFrame(ack_frame2), QuicTime::Delta::Zero(),
QuicTime::Zero());
EXPECT_FALSE(notifier_.WillingToWrite());
QuicRstStreamFrame rst_stream(1, 5, QUIC_STREAM_NO_ERROR, 10);
QuicFrames frames;
frames.push_back(QuicFrame(frame2));
frames.push_back(QuicFrame(&rst_stream));
frames.push_back(QuicFrame(frame1));
EXPECT_CALL(connection_, SendStreamData(5, 8, 0, NO_FIN))
.WillOnce(Return(QuicConsumedData(8, false)));
EXPECT_CALL(connection_, SendStreamData(5, 0, 10, FIN))
.WillOnce(Return(QuicConsumedData(0, true)));
EXPECT_CALL(connection_, SendControlFrame(_))
.WillOnce(Invoke(this, &SimpleSessionNotifierTest::ControlFrameConsumed));
EXPECT_CALL(connection_, SendStreamData(3, 3, 0, NO_FIN))
.WillOnce(Return(QuicConsumedData(2, false)));
notifier_.RetransmitFrames(frames, PTO_RETRANSMISSION);
EXPECT_FALSE(notifier_.WillingToWrite());
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/test_tools/simple_session_notifier.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/test_tools/simple_session_notifier_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
85410ddd-a164-4604-bf44-65fbae760e54 | cpp | tensorflow/tensorflow | ragged_tensor_to_sparse_kernel | tensorflow/core/kernels/ragged_tensor_to_sparse_kernel.cc | tensorflow/core/kernels/ragged_tensor_to_sparse_kernel_test.cc | #include <limits>
#include <memory>
#include <string>
#include <vector>
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/platform/errors.h"
namespace tensorflow {
using errors::InvalidArgument;
template <typename SPLITS_TYPE>
class RaggedTensorToSparseOp : public OpKernel {
public:
using OpKernel::OpKernel;
using ConstFlatSplits = typename TTypes<SPLITS_TYPE>::ConstFlat;
void Compute(OpKernelContext* context) override {
OpInputList rt_nested_splits_in;
OP_REQUIRES_OK(
context, context->input_list("rt_nested_splits", &rt_nested_splits_in));
const int rt_nested_splits_len = rt_nested_splits_in.size();
OP_REQUIRES(context, rt_nested_splits_len > 0,
errors::InvalidArgument("rt_nested_splits must be non empty"));
std::vector<ConstFlatSplits> rt_nested_splits;
rt_nested_splits.reserve(rt_nested_splits_len);
for (int i = 0; i < rt_nested_splits_len; ++i) {
rt_nested_splits.push_back(rt_nested_splits_in[i].flat<SPLITS_TYPE>());
}
const Tensor& rt_dense_values_in = context->input(rt_nested_splits_len);
OP_REQUIRES_OK(context,
ValidateInputs(rt_nested_splits, rt_dense_values_in));
std::vector<int64_t> index_prefix(rt_nested_splits_len);
std::vector<std::vector<int64_t>> index_suffixes =
MakeIndexSuffixes(rt_dense_values_in.shape());
const int64_t nvals =
(rt_nested_splits.back()(rt_nested_splits.back().size() - 1) *
index_suffixes.size());
const int64_t indices_len =
rt_nested_splits_len + rt_dense_values_in.dims();
Tensor* sparse_indices_out = nullptr;
OP_REQUIRES_OK(
context, context->allocate_output(0, TensorShape({nvals, indices_len}),
&sparse_indices_out));
auto sparse_indices = sparse_indices_out->tensor<int64_t, 2>();
std::vector<int64_t> pos(rt_nested_splits_len);
int64_t& final_pos = pos[rt_nested_splits_len - 1];
int next_index = 0;
int max_final_pos = rt_nested_splits.back().size() - 1;
for (; final_pos < max_final_pos; ++final_pos) {
for (int dim = rt_nested_splits_len - 2; dim >= 0; --dim) {
while (IsCompleted(pos, dim, rt_nested_splits)) {
pos[dim] += 1;
}
}
for (int dim = 0; dim < index_prefix.size(); ++dim) {
int start = dim > 0 ? rt_nested_splits[dim - 1](pos[dim - 1]) : 0;
index_prefix[dim] = pos[dim] - start;
}
const auto& final_splits = rt_nested_splits[rt_nested_splits_len - 1];
int64_t slice_len = final_splits(final_pos + 1) - final_splits(final_pos);
for (int64_t i = 0; i < slice_len; ++i) {
for (const auto& index_suffix : index_suffixes) {
int dim = 0;
for (int64_t index : index_prefix) {
sparse_indices(next_index, dim++) = index;
}
sparse_indices(next_index, dim++) = i;
for (int64_t index : index_suffix) {
sparse_indices(next_index, dim++) = index;
}
DCHECK_EQ(dim, indices_len);
++next_index;
}
}
}
DCHECK_EQ(next_index, nvals);
if (rt_dense_values_in.dims() == 1) {
context->set_output(1, rt_dense_values_in);
} else {
Tensor sparse_values_out(rt_dense_values_in.dtype());
bool shapes_match = sparse_values_out.CopyFrom(
rt_dense_values_in, {rt_dense_values_in.NumElements()});
DCHECK(shapes_match);
context->set_output(1, sparse_values_out);
}
int64_t ndims = rt_nested_splits_len + rt_dense_values_in.dims();
Tensor* sparse_dense_shape_out = nullptr;
OP_REQUIRES_OK(context, context->allocate_output(2, TensorShape({ndims}),
&sparse_dense_shape_out));
auto sparse_dense_shape = sparse_dense_shape_out->vec<int64_t>();
sparse_dense_shape(0) = rt_nested_splits_in[0].dim_size(0) - 1;
for (int dim = 0; dim < rt_nested_splits_len; ++dim) {
const auto& splits = rt_nested_splits[dim];
SPLITS_TYPE max_width = 0;
for (int i = 1; i < splits.size(); ++i) {
max_width = std::max(max_width, splits(i) - splits(i - 1));
}
sparse_dense_shape(dim + 1) = max_width;
}
for (int dim = 1; dim < rt_dense_values_in.dims(); ++dim) {
sparse_dense_shape(dim + rt_nested_splits_len) =
rt_dense_values_in.dim_size(dim);
}
}
private:
static ::tensorflow::Status ValidateInputs(
std::vector<ConstFlatSplits> rt_nested_splits,
const Tensor& rt_dense_values_in) {
for (int i = 0; i < rt_nested_splits.size(); ++i) {
if (rt_nested_splits[i].size() == 0) {
return InvalidArgument("ragged splits may not be empty.");
}
if (rt_nested_splits[i](0) != 0) {
return InvalidArgument("First value of ragged splits must be 0.");
}
for (int j = 1; j < rt_nested_splits[i].size(); ++j) {
if (rt_nested_splits[i](j) < rt_nested_splits[i](j - 1)) {
return InvalidArgument(
"Ragged splits should be non decreasing, but we got ",
rt_nested_splits[i](j - 1), " followed by ",
rt_nested_splits[i](j));
}
}
if (i > 0) {
SPLITS_TYPE last_split =
rt_nested_splits[i - 1](rt_nested_splits[i - 1].size() - 1);
if (rt_nested_splits[i].size() != last_split + 1) {
return InvalidArgument(
"Final value of ragged splits must match the length "
"the corresponding ragged values.");
}
}
}
if (rt_dense_values_in.dim_size(0) !=
rt_nested_splits.back()(rt_nested_splits.back().size() - 1)) {
return InvalidArgument(
"Final value of ragged splits must match the length "
"the corresponding ragged values.");
}
return absl::OkStatus();
}
static std::vector<std::vector<int64_t>> MakeIndexSuffixes(
const TensorShape& values_shape) {
std::vector<std::vector<int64_t>> suffixes{{}};
for (int dim = 1; dim < values_shape.dims(); ++dim) {
std::vector<std::vector<int64_t>> new_suffixes;
for (const auto& suffix : suffixes) {
for (int i = 0; i < values_shape.dim_size(dim); ++i) {
new_suffixes.push_back(suffix);
new_suffixes.back().push_back(i);
}
}
suffixes.swap(new_suffixes);
}
return suffixes;
}
static bool IsCompleted(
const std::vector<int64_t>& pos, int dim,
const std::vector<ConstFlatSplits>& rt_nested_splits) {
int64_t current_child = pos[dim + 1];
int64_t limit_child = rt_nested_splits[dim](pos[dim] + 1);
return current_child >= limit_child;
}
};
REGISTER_KERNEL_BUILDER(Name("RaggedTensorToSparse")
.Device(DEVICE_CPU)
.TypeConstraint<int32>("Tsplits"),
RaggedTensorToSparseOp<int32>);
REGISTER_KERNEL_BUILDER(Name("RaggedTensorToSparse")
.Device(DEVICE_CPU)
.TypeConstraint<int64_t>("Tsplits"),
RaggedTensorToSparseOp<int64_t>);
} | #include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/shape_inference.h"
#include "tensorflow/core/framework/shape_inference_testutil.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
class RaggedTensorToSparseTest : public ::tensorflow::OpsTestBase {
protected:
static constexpr int kSparseIndicesOutput = 0;
static constexpr int kSparseValuesOutput = 1;
static constexpr int kSparseDenseShapeOutput = 2;
template <typename T>
void BuildRaggedTensorToSparseGraph(
const std::vector<std::vector<int64_t>>& rt_nested_splits,
const TensorShape& rt_dense_values_shape,
const std::vector<T>& rt_dense_values) {
const auto& dtype = DataTypeToEnum<T>::v();
int64_t num_splits = rt_nested_splits.size();
TF_ASSERT_OK(NodeDefBuilder("tested_op", "RaggedTensorToSparse")
.Input(FakeInput(num_splits))
.Input(FakeInput(dtype))
.Attr("RAGGED_RANK", num_splits)
.Attr("T", dtype)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
for (const auto& splits : rt_nested_splits) {
int64_t splits_size = splits.size();
AddInputFromArray<int64_t>(TensorShape({splits_size}), splits);
}
AddInputFromArray<T>(rt_dense_values_shape, rt_dense_values);
}
};
TEST_F(RaggedTensorToSparseTest, OneSplits_Values1D) {
BuildRaggedTensorToSparseGraph<int>({{0, 3, 3, 5, 6}},
TensorShape({6}),
{1, 2, 3, 4, 5, 6});
TF_ASSERT_OK(RunOpKernel());
test::ExpectTensorEqual<int64_t>(
*GetOutput(kSparseIndicesOutput),
test::AsTensor<int64_t>({0, 0, 0, 1, 0, 2, 2, 0, 2, 1, 3, 0}, {6, 2}));
test::ExpectTensorEqual<int>(*GetOutput(kSparseValuesOutput),
test::AsTensor<int>({1, 2, 3, 4, 5, 6}));
test::ExpectTensorEqual<int64_t>(*GetOutput(kSparseDenseShapeOutput),
test::AsTensor<int64_t>({4, 3}));
}
TEST_F(RaggedTensorToSparseTest, EmptyRows) {
BuildRaggedTensorToSparseGraph<int>({{0, 0, 4, 4, 6, 6}},
TensorShape({6}),
{1, 2, 3, 4, 5, 6});
TF_ASSERT_OK(RunOpKernel());
test::ExpectTensorEqual<int64_t>(
*GetOutput(kSparseIndicesOutput),
test::AsTensor<int64_t>({1, 0, 1, 1, 1, 2, 1, 3, 3, 0, 3, 1}, {6, 2}));
test::ExpectTensorEqual<int>(*GetOutput(kSparseValuesOutput),
test::AsTensor<int>({1, 2, 3, 4, 5, 6}));
test::ExpectTensorEqual<int64_t>(*GetOutput(kSparseDenseShapeOutput),
test::AsTensor<int64_t>({5, 4}));
}
TEST_F(RaggedTensorToSparseTest, OneSplits_Values2D) {
BuildRaggedTensorToSparseGraph<int>(
{{0, 3, 3, 5, 6}},
TensorShape({6, 2}),
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});
TF_ASSERT_OK(RunOpKernel());
std::vector<int64_t> expected_splits_12_3 = {
0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 2, 0, 0, 2, 1,
2, 0, 0, 2, 0, 1, 2, 1, 0, 2, 1, 1, 3, 0, 0, 3, 0, 1};
std::vector<int> expected_values = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12};
test::ExpectTensorEqual<int64_t>(
*GetOutput(kSparseIndicesOutput),
test::AsTensor<int64_t>(expected_splits_12_3, {12, 3}));
test::ExpectTensorEqual<int>(*GetOutput(kSparseValuesOutput),
test::AsTensor<int>(expected_values));
test::ExpectTensorEqual<int64_t>(*GetOutput(kSparseDenseShapeOutput),
test::AsTensor<int64_t>({4, 3, 2}));
}
TEST_F(RaggedTensorToSparseTest, TwoSplits_Values1D) {
BuildRaggedTensorToSparseGraph<int>(
{{0, 3, 3, 5, 7}, {0, 1, 3, 3, 8, 11, 11, 15}},
TensorShape({15}),
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15});
TF_ASSERT_OK(RunOpKernel());
std::vector<int64_t> expected_splits_15_3 = {
0, 0, 0, 0, 1, 0, 0, 1, 1, 2, 0, 0, 2, 0, 1, 2, 0, 2, 2, 0, 3, 2, 0,
4, 2, 1, 0, 2, 1, 1, 2, 1, 2, 3, 1, 0, 3, 1, 1, 3, 1, 2, 3, 1, 3};
std::vector<int> expected_values = {1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 14, 15};
test::ExpectTensorEqual<int>(*GetOutput(kSparseValuesOutput),
test::AsTensor<int>(expected_values));
test::ExpectTensorEqual<int64_t>(
*GetOutput(kSparseIndicesOutput),
test::AsTensor<int64_t>(expected_splits_15_3, {15, 3}));
test::ExpectTensorEqual<int64_t>(*GetOutput(kSparseDenseShapeOutput),
test::AsTensor<int64_t>({4, 3, 5}));
}
TEST_F(RaggedTensorToSparseTest, ShapeFn) {
ShapeInferenceTestOp op("RaggedTensorToSparse");
(*op.node_def.mutable_attr())["RAGGED_RANK"].set_i(0);
INFER_ERROR("Requires RAGGED_RANK>0", op, "?");
(*op.node_def.mutable_attr())["RAGGED_RANK"].set_i(1);
INFER_OK(op, "?;?", "[?,?];[?];[?]");
INFER_OK(op, "?;[?]", "[?,2];[?];[2]");
INFER_OK(op, "?;[?,?]", "[?,3];[?];[3]");
INFER_OK(op, "[?];[5]", "[5,2];[5];[2]");
INFER_OK(op, "[?];[5,2]", "[10,3];[10];[3]");
INFER_ERROR("Shape must be rank 1 but is rank 0", op, "[];?");
INFER_ERROR("Shape must be rank 1 but is rank 2", op, "[5,5];?");
INFER_ERROR("Shape must be at least rank 1 but is rank 0", op, "?;[]");
(*op.node_def.mutable_attr())["RAGGED_RANK"].set_i(2);
INFER_OK(op, "?;?;?", "[?,?];[?];[?]");
INFER_OK(op, "?;?;[?]", "[?,3];[?];[3]");
INFER_OK(op, "?;?;[?,?]", "[?,4];[?];[4]");
INFER_OK(op, "[?];[?];[5]", "[5,3];[5];[3]");
INFER_OK(op, "[?];[?];[5,2]", "[10,4];[10];[4]");
INFER_ERROR("Shape must be rank 1 but is rank 2", op, "?;[5,5];?");
(*op.node_def.mutable_attr())["RAGGED_RANK"].set_i(3);
INFER_OK(op, "?;?;?;?", "[?,?];[?];[?]");
INFER_OK(op, "?;?;?;[?]", "[?,4];[?];[4]");
INFER_OK(op, "?;?;?;[5]", "[5,4];[5];[4]");
}
TEST_F(RaggedTensorToSparseTest, NoSplits) {
const auto& dtype = DataTypeToEnum<int>::v();
TF_ASSERT_OK(NodeDefBuilder("tested_op", "RaggedTensorToSparse")
.Input(FakeInput(0))
.Input(FakeInput(dtype))
.Attr("RAGGED_RANK", 0)
.Attr("T", dtype)
.Finalize(node_def()));
EXPECT_TRUE(absl::StartsWith(
InitOp().message(),
"Value for attr 'RAGGED_RANK' of 0 must be at least minimum 1"));
}
TEST_F(RaggedTensorToSparseTest, InvalidArg_BadSplitStart) {
BuildRaggedTensorToSparseGraph<int>({{5, 7, 10}},
TensorShape({0}),
{});
EXPECT_EQ("First value of ragged splits must be 0.", RunOpKernel().message());
}
TEST_F(RaggedTensorToSparseTest, InvalidArg_BadSplitLengths1) {
BuildRaggedTensorToSparseGraph<int>({{0, 5}, {0, 2, 4, 6}},
TensorShape({0}),
{});
EXPECT_EQ(
"Final value of ragged splits must match the length "
"the corresponding ragged values.",
RunOpKernel().message());
}
TEST_F(RaggedTensorToSparseTest, InvalidArg_BadSplitLengths2) {
BuildRaggedTensorToSparseGraph<int>({{0, 5}},
TensorShape({0}),
{});
EXPECT_EQ(
"Final value of ragged splits must match the length "
"the corresponding ragged values.",
RunOpKernel().message());
}
TEST_F(RaggedTensorToSparseTest, InvalidArg_EmptySplits) {
BuildRaggedTensorToSparseGraph<int>({{}},
TensorShape({0}),
{});
EXPECT_EQ("ragged splits may not be empty.", RunOpKernel().message());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/ragged_tensor_to_sparse_kernel.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/ragged_tensor_to_sparse_kernel_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
772ecdae-719a-4552-84ed-7c36b3f447ac | cpp | google/quiche | capsule | quiche/common/capsule.cc | quiche/common/capsule_test.cc | #include "quiche/common/capsule.h"
#include <cstddef>
#include <cstdint>
#include <limits>
#include <ostream>
#include <string>
#include <type_traits>
#include <utility>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/escaping.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "absl/types/variant.h"
#include "quiche/common/platform/api/quiche_bug_tracker.h"
#include "quiche/common/platform/api/quiche_export.h"
#include "quiche/common/platform/api/quiche_logging.h"
#include "quiche/common/quiche_buffer_allocator.h"
#include "quiche/common/quiche_data_reader.h"
#include "quiche/common/quiche_data_writer.h"
#include "quiche/common/quiche_ip_address.h"
#include "quiche/common/quiche_status_utils.h"
#include "quiche/common/wire_serialization.h"
#include "quiche/web_transport/web_transport.h"
namespace quiche {
std::string CapsuleTypeToString(CapsuleType capsule_type) {
switch (capsule_type) {
case CapsuleType::DATAGRAM:
return "DATAGRAM";
case CapsuleType::LEGACY_DATAGRAM:
return "LEGACY_DATAGRAM";
case CapsuleType::LEGACY_DATAGRAM_WITHOUT_CONTEXT:
return "LEGACY_DATAGRAM_WITHOUT_CONTEXT";
case CapsuleType::CLOSE_WEBTRANSPORT_SESSION:
return "CLOSE_WEBTRANSPORT_SESSION";
case CapsuleType::DRAIN_WEBTRANSPORT_SESSION:
return "DRAIN_WEBTRANSPORT_SESSION";
case CapsuleType::ADDRESS_REQUEST:
return "ADDRESS_REQUEST";
case CapsuleType::ADDRESS_ASSIGN:
return "ADDRESS_ASSIGN";
case CapsuleType::ROUTE_ADVERTISEMENT:
return "ROUTE_ADVERTISEMENT";
case CapsuleType::WT_STREAM:
return "WT_STREAM";
case CapsuleType::WT_STREAM_WITH_FIN:
return "WT_STREAM_WITH_FIN";
case CapsuleType::WT_RESET_STREAM:
return "WT_RESET_STREAM";
case CapsuleType::WT_STOP_SENDING:
return "WT_STOP_SENDING";
case CapsuleType::WT_MAX_STREAM_DATA:
return "WT_MAX_STREAM_DATA";
case CapsuleType::WT_MAX_STREAMS_BIDI:
return "WT_MAX_STREAMS_BIDI";
case CapsuleType::WT_MAX_STREAMS_UNIDI:
return "WT_MAX_STREAMS_UNIDI";
}
return absl::StrCat("Unknown(", static_cast<uint64_t>(capsule_type), ")");
}
std::ostream& operator<<(std::ostream& os, const CapsuleType& capsule_type) {
os << CapsuleTypeToString(capsule_type);
return os;
}
Capsule Capsule::Datagram(absl::string_view http_datagram_payload) {
return Capsule(DatagramCapsule{http_datagram_payload});
}
Capsule Capsule::LegacyDatagram(absl::string_view http_datagram_payload) {
return Capsule(LegacyDatagramCapsule{http_datagram_payload});
}
Capsule Capsule::LegacyDatagramWithoutContext(
absl::string_view http_datagram_payload) {
return Capsule(LegacyDatagramWithoutContextCapsule{http_datagram_payload});
}
Capsule Capsule::CloseWebTransportSession(
webtransport::SessionErrorCode error_code,
absl::string_view error_message) {
return Capsule(CloseWebTransportSessionCapsule({error_code, error_message}));
}
Capsule Capsule::AddressRequest() { return Capsule(AddressRequestCapsule()); }
Capsule Capsule::AddressAssign() { return Capsule(AddressAssignCapsule()); }
Capsule Capsule::RouteAdvertisement() {
return Capsule(RouteAdvertisementCapsule());
}
Capsule Capsule::Unknown(uint64_t capsule_type,
absl::string_view unknown_capsule_data) {
return Capsule(UnknownCapsule{capsule_type, unknown_capsule_data});
}
bool Capsule::operator==(const Capsule& other) const {
return capsule_ == other.capsule_;
}
std::string DatagramCapsule::ToString() const {
return absl::StrCat("DATAGRAM[",
absl::BytesToHexString(http_datagram_payload), "]");
}
std::string LegacyDatagramCapsule::ToString() const {
return absl::StrCat("LEGACY_DATAGRAM[",
absl::BytesToHexString(http_datagram_payload), "]");
}
std::string LegacyDatagramWithoutContextCapsule::ToString() const {
return absl::StrCat("LEGACY_DATAGRAM_WITHOUT_CONTEXT[",
absl::BytesToHexString(http_datagram_payload), "]");
}
std::string CloseWebTransportSessionCapsule::ToString() const {
return absl::StrCat("CLOSE_WEBTRANSPORT_SESSION(error_code=", error_code,
",error_message=\"", error_message, "\")");
}
std::string DrainWebTransportSessionCapsule::ToString() const {
return "DRAIN_WEBTRANSPORT_SESSION()";
}
std::string AddressRequestCapsule::ToString() const {
std::string rv = "ADDRESS_REQUEST[";
for (auto requested_address : requested_addresses) {
absl::StrAppend(&rv, "(", requested_address.request_id, "-",
requested_address.ip_prefix.ToString(), ")");
}
absl::StrAppend(&rv, "]");
return rv;
}
std::string AddressAssignCapsule::ToString() const {
std::string rv = "ADDRESS_ASSIGN[";
for (auto assigned_address : assigned_addresses) {
absl::StrAppend(&rv, "(", assigned_address.request_id, "-",
assigned_address.ip_prefix.ToString(), ")");
}
absl::StrAppend(&rv, "]");
return rv;
}
std::string RouteAdvertisementCapsule::ToString() const {
std::string rv = "ROUTE_ADVERTISEMENT[";
for (auto ip_address_range : ip_address_ranges) {
absl::StrAppend(&rv, "(", ip_address_range.start_ip_address.ToString(), "-",
ip_address_range.end_ip_address.ToString(), "-",
static_cast<int>(ip_address_range.ip_protocol), ")");
}
absl::StrAppend(&rv, "]");
return rv;
}
std::string UnknownCapsule::ToString() const {
return absl::StrCat("Unknown(", type, ") [", absl::BytesToHexString(payload),
"]");
}
std::string WebTransportStreamDataCapsule::ToString() const {
return absl::StrCat(CapsuleTypeToString(capsule_type()),
" [stream_id=", stream_id,
", data=", absl::BytesToHexString(data), "]");
}
std::string WebTransportResetStreamCapsule::ToString() const {
return absl::StrCat("WT_RESET_STREAM(stream_id=", stream_id,
", error_code=", error_code, ")");
}
std::string WebTransportStopSendingCapsule::ToString() const {
return absl::StrCat("WT_STOP_SENDING(stream_id=", stream_id,
", error_code=", error_code, ")");
}
std::string WebTransportMaxStreamDataCapsule::ToString() const {
return absl::StrCat("WT_MAX_STREAM_DATA (stream_id=", stream_id,
", max_stream_data=", max_stream_data, ")");
}
std::string WebTransportMaxStreamsCapsule::ToString() const {
return absl::StrCat(CapsuleTypeToString(capsule_type()),
" (max_streams=", max_stream_count, ")");
}
std::string Capsule::ToString() const {
return absl::visit([](const auto& capsule) { return capsule.ToString(); },
capsule_);
}
std::ostream& operator<<(std::ostream& os, const Capsule& capsule) {
os << capsule.ToString();
return os;
}
CapsuleParser::CapsuleParser(Visitor* visitor) : visitor_(visitor) {
QUICHE_DCHECK_NE(visitor_, nullptr);
}
class WirePrefixWithId {
public:
using DataType = PrefixWithId;
WirePrefixWithId(const PrefixWithId& prefix) : prefix_(prefix) {}
size_t GetLengthOnWire() {
return ComputeLengthOnWire(
WireVarInt62(prefix_.request_id),
WireUint8(prefix_.ip_prefix.address().IsIPv4() ? 4 : 6),
WireBytes(prefix_.ip_prefix.address().ToPackedString()),
WireUint8(prefix_.ip_prefix.prefix_length()));
}
absl::Status SerializeIntoWriter(QuicheDataWriter& writer) {
return AppendToStatus(
quiche::SerializeIntoWriter(
writer, WireVarInt62(prefix_.request_id),
WireUint8(prefix_.ip_prefix.address().IsIPv4() ? 4 : 6),
WireBytes(prefix_.ip_prefix.address().ToPackedString()),
WireUint8(prefix_.ip_prefix.prefix_length())),
" while serializing a PrefixWithId");
}
private:
const PrefixWithId& prefix_;
};
class WireIpAddressRange {
public:
using DataType = IpAddressRange;
explicit WireIpAddressRange(const IpAddressRange& range) : range_(range) {}
size_t GetLengthOnWire() {
return ComputeLengthOnWire(
WireUint8(range_.start_ip_address.IsIPv4() ? 4 : 6),
WireBytes(range_.start_ip_address.ToPackedString()),
WireBytes(range_.end_ip_address.ToPackedString()),
WireUint8(range_.ip_protocol));
}
absl::Status SerializeIntoWriter(QuicheDataWriter& writer) {
return AppendToStatus(
::quiche::SerializeIntoWriter(
writer, WireUint8(range_.start_ip_address.IsIPv4() ? 4 : 6),
WireBytes(range_.start_ip_address.ToPackedString()),
WireBytes(range_.end_ip_address.ToPackedString()),
WireUint8(range_.ip_protocol)),
" while serializing an IpAddressRange");
}
private:
const IpAddressRange& range_;
};
template <typename... T>
absl::StatusOr<quiche::QuicheBuffer> SerializeCapsuleFields(
CapsuleType type, QuicheBufferAllocator* allocator, T... fields) {
size_t capsule_payload_size = ComputeLengthOnWire(fields...);
return SerializeIntoBuffer(allocator, WireVarInt62(type),
WireVarInt62(capsule_payload_size), fields...);
}
absl::StatusOr<quiche::QuicheBuffer> SerializeCapsuleWithStatus(
const Capsule& capsule, quiche::QuicheBufferAllocator* allocator) {
switch (capsule.capsule_type()) {
case CapsuleType::DATAGRAM:
return SerializeCapsuleFields(
capsule.capsule_type(), allocator,
WireBytes(capsule.datagram_capsule().http_datagram_payload));
case CapsuleType::LEGACY_DATAGRAM:
return SerializeCapsuleFields(
capsule.capsule_type(), allocator,
WireBytes(capsule.legacy_datagram_capsule().http_datagram_payload));
case CapsuleType::LEGACY_DATAGRAM_WITHOUT_CONTEXT:
return SerializeCapsuleFields(
capsule.capsule_type(), allocator,
WireBytes(capsule.legacy_datagram_without_context_capsule()
.http_datagram_payload));
case CapsuleType::CLOSE_WEBTRANSPORT_SESSION:
return SerializeCapsuleFields(
capsule.capsule_type(), allocator,
WireUint32(capsule.close_web_transport_session_capsule().error_code),
WireBytes(
capsule.close_web_transport_session_capsule().error_message));
case CapsuleType::DRAIN_WEBTRANSPORT_SESSION:
return SerializeCapsuleFields(capsule.capsule_type(), allocator);
case CapsuleType::ADDRESS_REQUEST:
return SerializeCapsuleFields(
capsule.capsule_type(), allocator,
WireSpan<WirePrefixWithId>(absl::MakeConstSpan(
capsule.address_request_capsule().requested_addresses)));
case CapsuleType::ADDRESS_ASSIGN:
return SerializeCapsuleFields(
capsule.capsule_type(), allocator,
WireSpan<WirePrefixWithId>(absl::MakeConstSpan(
capsule.address_assign_capsule().assigned_addresses)));
case CapsuleType::ROUTE_ADVERTISEMENT:
return SerializeCapsuleFields(
capsule.capsule_type(), allocator,
WireSpan<WireIpAddressRange>(absl::MakeConstSpan(
capsule.route_advertisement_capsule().ip_address_ranges)));
case CapsuleType::WT_STREAM:
case CapsuleType::WT_STREAM_WITH_FIN:
return SerializeCapsuleFields(
capsule.capsule_type(), allocator,
WireVarInt62(capsule.web_transport_stream_data().stream_id),
WireBytes(capsule.web_transport_stream_data().data));
case CapsuleType::WT_RESET_STREAM:
return SerializeCapsuleFields(
capsule.capsule_type(), allocator,
WireVarInt62(capsule.web_transport_reset_stream().stream_id),
WireVarInt62(capsule.web_transport_reset_stream().error_code));
case CapsuleType::WT_STOP_SENDING:
return SerializeCapsuleFields(
capsule.capsule_type(), allocator,
WireVarInt62(capsule.web_transport_stop_sending().stream_id),
WireVarInt62(capsule.web_transport_stop_sending().error_code));
case CapsuleType::WT_MAX_STREAM_DATA:
return SerializeCapsuleFields(
capsule.capsule_type(), allocator,
WireVarInt62(capsule.web_transport_max_stream_data().stream_id),
WireVarInt62(
capsule.web_transport_max_stream_data().max_stream_data));
case CapsuleType::WT_MAX_STREAMS_BIDI:
case CapsuleType::WT_MAX_STREAMS_UNIDI:
return SerializeCapsuleFields(
capsule.capsule_type(), allocator,
WireVarInt62(capsule.web_transport_max_streams().max_stream_count));
default:
return SerializeCapsuleFields(
capsule.capsule_type(), allocator,
WireBytes(capsule.unknown_capsule().payload));
}
}
QuicheBuffer SerializeDatagramCapsuleHeader(uint64_t datagram_size,
QuicheBufferAllocator* allocator) {
absl::StatusOr<QuicheBuffer> buffer =
SerializeIntoBuffer(allocator, WireVarInt62(CapsuleType::DATAGRAM),
WireVarInt62(datagram_size));
if (!buffer.ok()) {
return QuicheBuffer();
}
return *std::move(buffer);
}
QUICHE_EXPORT QuicheBuffer SerializeWebTransportStreamCapsuleHeader(
webtransport::StreamId stream_id, bool fin, uint64_t write_size,
QuicheBufferAllocator* allocator) {
absl::StatusOr<QuicheBuffer> buffer = SerializeIntoBuffer(
allocator,
WireVarInt62(fin ? CapsuleType::WT_STREAM_WITH_FIN
: CapsuleType::WT_STREAM),
WireVarInt62(write_size + QuicheDataWriter::GetVarInt62Len(stream_id)),
WireVarInt62(stream_id));
if (!buffer.ok()) {
return QuicheBuffer();
}
return *std::move(buffer);
}
QuicheBuffer SerializeCapsule(const Capsule& capsule,
quiche::QuicheBufferAllocator* allocator) {
absl::StatusOr<QuicheBuffer> serialized =
SerializeCapsuleWithStatus(capsule, allocator);
if (!serialized.ok()) {
QUICHE_BUG(capsule_serialization_failed)
<< "Failed to serialize the following capsule:\n"
<< capsule << "Serialization error: " << serialized.status();
return QuicheBuffer();
}
return *std::move(serialized);
}
bool CapsuleParser::IngestCapsuleFragment(absl::string_view capsule_fragment) {
if (parsing_error_occurred_) {
return false;
}
absl::StrAppend(&buffered_data_, capsule_fragment);
while (true) {
const absl::StatusOr<size_t> buffered_data_read = AttemptParseCapsule();
if (!buffered_data_read.ok()) {
ReportParseFailure(buffered_data_read.status().message());
buffered_data_.clear();
return false;
}
if (*buffered_data_read == 0) {
break;
}
buffered_data_.erase(0, *buffered_data_read);
}
static constexpr size_t kMaxCapsuleBufferSize = 1024 * 1024;
if (buffered_data_.size() > kMaxCapsuleBufferSize) {
buffered_data_.clear();
ReportParseFailure("Refusing to buffer too much capsule data");
return false;
}
return true;
}
namespace {
absl::Status ReadWebTransportStreamId(QuicheDataReader& reader,
webtransport::StreamId& id) {
uint64_t raw_id;
if (!reader.ReadVarInt62(&raw_id)) {
return absl::InvalidArgumentError("Failed to read WebTransport Stream ID");
}
if (raw_id > std::numeric_limits<uint32_t>::max()) {
return absl::InvalidArgumentError("Stream ID does not fit into a uint32_t");
}
id = static_cast<webtransport::StreamId>(raw_id);
return absl::OkStatus();
}
absl::StatusOr<Capsule> ParseCapsulePayload(QuicheDataReader& reader,
CapsuleType type) {
switch (type) {
case CapsuleType::DATAGRAM:
return Capsule::Datagram(reader.ReadRemainingPayload());
case CapsuleType::LEGACY_DATAGRAM:
return Capsule::LegacyDatagram(reader.ReadRemainingPayload());
case CapsuleType::LEGACY_DATAGRAM_WITHOUT_CONTEXT:
return Capsule::LegacyDatagramWithoutContext(
reader.ReadRemainingPayload());
case CapsuleType::CLOSE_WEBTRANSPORT_SESSION: {
CloseWebTransportSessionCapsule capsule;
if (!reader.ReadUInt32(&capsule.error_code)) {
return absl::InvalidArgumentError(
"Unable to parse capsule CLOSE_WEBTRANSPORT_SESSION error code");
}
capsule.error_message = reader.ReadRemainingPayload();
return Capsule(std::move(capsule));
}
case CapsuleType::DRAIN_WEBTRANSPORT_SESSION:
return Capsule(DrainWebTransportSessionCapsule());
case CapsuleType::ADDRESS_REQUEST: {
AddressRequestCapsule capsule;
while (!reader.IsDoneReading()) {
PrefixWithId requested_address;
if (!reader.ReadVarInt62(&requested_address.request_id)) {
return absl::InvalidArgumentError(
"Unable to parse capsule ADDRESS_REQUEST request ID");
}
uint8_t address_family;
if (!reader.ReadUInt8(&address_family)) {
return absl::InvalidArgumentError(
"Unable to parse capsule ADDRESS_REQUEST family");
}
if (address_family != 4 && address_family != 6) {
return absl::InvalidArgumentError("Bad ADDRESS_REQUEST family");
}
absl::string_view ip_address_bytes;
if (!reader.ReadStringPiece(&ip_address_bytes,
address_family == 4
? QuicheIpAddress::kIPv4AddressSize
: QuicheIpAddress::kIPv6AddressSize)) {
return absl::InvalidArgumentError(
"Unable to read capsule ADDRESS_REQUEST address");
}
quiche::QuicheIpAddress ip_address;
if (!ip_address.FromPackedString(ip_address_bytes.data(),
ip_address_bytes.size())) {
return absl::InvalidArgumentError(
"Unable to parse capsule ADDRESS_REQUEST address");
}
uint8_t ip_prefix_length;
if (!reader.ReadUInt8(&ip_prefix_length)) {
return absl::InvalidArgumentError(
"Unable to parse capsule ADDRESS_REQUEST IP prefix length");
}
if (ip_prefix_length > QuicheIpPrefix(ip_address).prefix_length()) {
return absl::InvalidArgumentError("Invalid IP prefix length");
}
requested_address.ip_prefix =
QuicheIpPrefix(ip_address, ip_prefix_length);
capsule.requested_addresses.push_back(requested_address);
}
return Capsule(std::move(capsule));
}
case CapsuleType::ADDRESS_ASSIGN: {
AddressAssignCapsule capsule;
while (!reader.IsDoneReading()) {
PrefixWithId assigned_address;
if (!reader.ReadVarInt62(&assigned_address.request_id)) {
return absl::InvalidArgumentError(
"Unable to parse capsule ADDRESS_ASSIGN request ID");
}
uint8_t address_family;
if (!reader.ReadUInt8(&address_family)) {
return absl::InvalidArgumentError(
"Unable to parse capsule ADDRESS_ASSIGN family");
}
if (address_family != 4 && address_family != 6) {
return absl::InvalidArgumentError("Bad ADDRESS_ASSIGN family");
}
absl::string_view ip_address_bytes;
if (!reader.ReadStringPiece(&ip_address_bytes,
address_family == 4
? QuicheIpAddress::kIPv4AddressSize
: QuicheIpAddress::kIPv6AddressSize)) {
return absl::InvalidArgumentError(
"Unable to read capsule ADDRESS_ASSIGN address");
}
quiche::QuicheIpAddress ip_address;
if (!ip_address.FromPackedString(ip_address_bytes.data(),
ip_address_bytes.size())) {
return absl::InvalidArgumentError(
"Unable to parse capsule ADDRESS_ASSIGN address");
}
uint8_t ip_prefix_length;
if (!reader.ReadUInt8(&ip_prefix_length)) {
return absl::InvalidArgumentError(
"Unable to parse capsule ADDRESS_ASSIGN IP prefix length");
}
if (ip_prefix_length > QuicheIpPrefix(ip_address).prefix_length()) {
return absl::InvalidArgumentError("Invalid IP prefix length");
}
assigned_address.ip_prefix =
QuicheIpPrefix(ip_address, ip_prefix_length);
capsule.assigned_addresses.push_back(assigned_address);
}
return Capsule(std::move(capsule));
}
case CapsuleType::ROUTE_ADVERTISEMENT: {
RouteAdvertisementCapsule capsule;
while (!reader.IsDoneReading()) {
uint8_t address_family;
if (!reader.ReadUInt8(&address_family)) {
return absl::InvalidArgumentError(
"Unable to parse capsule ROUTE_ADVERTISEMENT family");
}
if (address_family != 4 && address_family != 6) {
return absl::InvalidArgumentError("Bad ROUTE_ADVERTISEMENT family");
}
IpAddressRange ip_address_range;
absl::string_view start_ip_address_bytes;
if (!reader.ReadStringPiece(&start_ip_address_bytes,
address_family == 4
? QuicheIpAddress::kIPv4AddressSize
: QuicheIpAddress::kIPv6AddressSize)) {
return absl::InvalidArgumentError(
"Unable to read capsule ROUTE_ADVERTISEMENT start address");
}
if (!ip_address_range.start_ip_address.FromPackedString(
start_ip_address_bytes.data(), start_ip_address_bytes.size())) {
return absl::InvalidArgumentError(
"Unable to parse capsule ROUTE_ADVERTISEMENT start address");
}
absl::string_view end_ip_address_bytes;
if (!reader.ReadStringPiece(&end_ip_address_bytes,
address_family == 4
? QuicheIpAddress::kIPv4AddressSize
: QuicheIpAddress::kIPv6AddressSize)) {
return absl::InvalidArgumentError(
"Unable to read capsule ROUTE_ADVERTISEMENT end address");
}
if (!ip_address_range.end_ip_address.FromPackedString(
end_ip_address_bytes.data(), end_ip_address_bytes.size())) {
return absl::InvalidArgumentError(
"Unable to parse capsule ROUTE_ADVERTISEMENT end address");
}
if (!reader.ReadUInt8(&ip_address_range.ip_protocol)) {
return absl::InvalidArgumentError(
"Unable to parse capsule ROUTE_ADVERTISEMENT IP protocol");
}
capsule.ip_address_ranges.push_back(ip_address_range);
}
return Capsule(std::move(capsule));
}
case CapsuleType::WT_STREAM:
case CapsuleType::WT_STREAM_WITH_FIN: {
WebTransportStreamDataCapsule capsule;
capsule.fin = (type == CapsuleType::WT_STREAM_WITH_FIN);
QUICHE_RETURN_IF_ERROR(
ReadWebTransportStreamId(reader, capsule.stream_id));
capsule.data = reader.ReadRemainingPayload();
return Capsule(std::move(capsule));
}
case CapsuleType::WT_RESET_STREAM: {
WebTransportResetStreamCapsule capsule;
QUICHE_RETURN_IF_ERROR(
ReadWebTransportStreamId(reader, capsule.stream_id));
if (!reader.ReadVarInt62(&capsule.error_code)) {
return absl::InvalidArgumentError(
"Failed to parse the RESET_STREAM error code");
}
return Capsule(std::move(capsule));
}
case CapsuleType::WT_STOP_SENDING: {
WebTransportStopSendingCapsule capsule;
QUICHE_RETURN_IF_ERROR(
ReadWebTransportStreamId(reader, capsule.stream_id));
if (!reader.ReadVarInt62(&capsule.error_code)) {
return absl::InvalidArgumentError(
"Failed to parse the STOP_SENDING error code");
}
return Capsule(std::move(capsule));
}
case CapsuleType::WT_MAX_STREAM_DATA: {
WebTransportMaxStreamDataCapsule capsule;
QUICHE_RETURN_IF_ERROR(
ReadWebTransportStreamId(reader, capsule.stream_id));
if (!reader.ReadVarInt62(&capsule.max_stream_data)) {
return absl::InvalidArgumentError(
"Failed to parse the max stream data field");
}
return Capsule(std::move(capsule));
}
case CapsuleType::WT_MAX_STREAMS_UNIDI:
case CapsuleType::WT_MAX_STREAMS_BIDI: {
WebTransportMaxStreamsCapsule capsule;
capsule.stream_type = type == CapsuleType::WT_MAX_STREAMS_UNIDI
? webtransport::StreamType::kUnidirectional
: webtransport::StreamType::kBidirectional;
if (!reader.ReadVarInt62(&capsule.max_stream_count)) {
return absl::InvalidArgumentError(
"Failed to parse the max streams field");
}
return Capsule(std::move(capsule));
}
default:
return Capsule(UnknownCapsule{static_cast<uint64_t>(type),
reader.ReadRemainingPayload()});
}
}
}
absl::StatusOr<size_t> CapsuleParser::AttemptParseCapsule() {
QUICHE_DCHECK(!parsing_error_occurred_);
if (buffered_data_.empty()) {
return 0;
}
QuicheDataReader capsule_fragment_reader(buffered_data_);
uint64_t capsule_type64;
if (!capsule_fragment_reader.ReadVarInt62(&capsule_type64)) {
QUICHE_DVLOG(2) << "Partial read: not enough data to read capsule type";
return 0;
}
absl::string_view capsule_data;
if (!capsule_fragment_reader.ReadStringPieceVarInt62(&capsule_data)) {
QUICHE_DVLOG(2)
<< "Partial read: not enough data to read capsule length or "
"full capsule data";
return 0;
}
QuicheDataReader capsule_data_reader(capsule_data);
absl::StatusOr<Capsule> capsule = ParseCapsulePayload(
capsule_data_reader, static_cast<CapsuleType>(capsule_type64));
QUICHE_RETURN_IF_ERROR(capsule.status());
if (!visitor_->OnCapsule(*capsule)) {
return absl::AbortedError("Visitor failed to process capsule");
}
return capsule_fragment_reader.PreviouslyReadPayload().length();
}
void CapsuleParser::ReportParseFailure(absl::string_view error_message) {
if (parsing_error_occurred_) {
QUICHE_BUG(multiple parse errors) << "Experienced multiple parse failures";
return;
}
parsing_error_occurred_ = true;
visitor_->OnCapsuleParseFailure(error_message);
}
void CapsuleParser::ErrorIfThereIsRemainingBufferedData() {
if (parsing_error_occurred_) {
return;
}
if (!buffered_data_.empty()) {
ReportParseFailure("Incomplete capsule left at the end of the stream");
}
}
bool PrefixWithId::operator==(const PrefixWithId& other) const {
return request_id == other.request_id && ip_prefix == other.ip_prefix;
}
bool IpAddressRange::operator==(const IpAddressRange& other) const {
return start_ip_address == other.start_ip_address &&
end_ip_address == other.end_ip_address &&
ip_protocol == other.ip_protocol;
}
bool AddressAssignCapsule::operator==(const AddressAssignCapsule& other) const {
return assigned_addresses == other.assigned_addresses;
}
bool AddressRequestCapsule::operator==(
const AddressRequestCapsule& other) const {
return requested_addresses == other.requested_addresses;
}
bool RouteAdvertisementCapsule::operator==(
const RouteAdvertisementCapsule& other) const {
return ip_address_ranges == other.ip_address_ranges;
}
bool WebTransportStreamDataCapsule::operator==(
const WebTransportStreamDataCapsule& other) const {
return stream_id == other.stream_id && data == other.data && fin == other.fin;
}
bool WebTransportResetStreamCapsule::operator==(
const WebTransportResetStreamCapsule& other) const {
return stream_id == other.stream_id && error_code == other.error_code;
}
bool WebTransportStopSendingCapsule::operator==(
const WebTransportStopSendingCapsule& other) const {
return stream_id == other.stream_id && error_code == other.error_code;
}
bool WebTransportMaxStreamDataCapsule::operator==(
const WebTransportMaxStreamDataCapsule& other) const {
return stream_id == other.stream_id &&
max_stream_data == other.max_stream_data;
}
bool WebTransportMaxStreamsCapsule::operator==(
const WebTransportMaxStreamsCapsule& other) const {
return stream_type == other.stream_type &&
max_stream_count == other.max_stream_count;
}
} | #include "quiche/common/capsule.h"
#include <cstddef>
#include <deque>
#include <string>
#include <vector>
#include "absl/strings/escaping.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "quiche/common/platform/api/quiche_test.h"
#include "quiche/common/quiche_buffer_allocator.h"
#include "quiche/common/quiche_ip_address.h"
#include "quiche/common/simple_buffer_allocator.h"
#include "quiche/common/test_tools/quiche_test_utils.h"
#include "quiche/web_transport/web_transport.h"
using ::testing::_;
using ::testing::InSequence;
using ::testing::Return;
using ::webtransport::StreamType;
namespace quiche {
namespace test {
class CapsuleParserPeer {
public:
static std::string* buffered_data(CapsuleParser* capsule_parser) {
return &capsule_parser->buffered_data_;
}
};
namespace {
class MockCapsuleParserVisitor : public CapsuleParser::Visitor {
public:
MockCapsuleParserVisitor() {
ON_CALL(*this, OnCapsule(_)).WillByDefault(Return(true));
}
~MockCapsuleParserVisitor() override = default;
MOCK_METHOD(bool, OnCapsule, (const Capsule& capsule), (override));
MOCK_METHOD(void, OnCapsuleParseFailure, (absl::string_view error_message),
(override));
};
class CapsuleTest : public QuicheTest {
public:
CapsuleTest() : capsule_parser_(&visitor_) {}
void ValidateParserIsEmpty() {
EXPECT_CALL(visitor_, OnCapsule(_)).Times(0);
EXPECT_CALL(visitor_, OnCapsuleParseFailure(_)).Times(0);
capsule_parser_.ErrorIfThereIsRemainingBufferedData();
EXPECT_TRUE(CapsuleParserPeer::buffered_data(&capsule_parser_)->empty());
}
void TestSerialization(const Capsule& capsule,
const std::string& expected_bytes) {
quiche::QuicheBuffer serialized_capsule =
SerializeCapsule(capsule, SimpleBufferAllocator::Get());
quiche::test::CompareCharArraysWithHexError(
"Serialized capsule", serialized_capsule.data(),
serialized_capsule.size(), expected_bytes.data(),
expected_bytes.size());
}
::testing::StrictMock<MockCapsuleParserVisitor> visitor_;
CapsuleParser capsule_parser_;
};
TEST_F(CapsuleTest, DatagramCapsule) {
std::string capsule_fragment;
ASSERT_TRUE(
absl::HexStringToBytes("00"
"08"
"a1a2a3a4a5a6a7a8",
&capsule_fragment));
std::string datagram_payload;
ASSERT_TRUE(absl::HexStringToBytes("a1a2a3a4a5a6a7a8", &datagram_payload));
Capsule expected_capsule = Capsule::Datagram(datagram_payload);
{
EXPECT_CALL(visitor_, OnCapsule(expected_capsule));
ASSERT_TRUE(capsule_parser_.IngestCapsuleFragment(capsule_fragment));
}
ValidateParserIsEmpty();
TestSerialization(expected_capsule, capsule_fragment);
}
TEST_F(CapsuleTest, DatagramCapsuleViaHeader) {
std::string datagram_payload;
ASSERT_TRUE(absl::HexStringToBytes("a1a2a3a4a5a6a7a8", &datagram_payload));
quiche::QuicheBuffer expected_capsule = SerializeCapsule(
Capsule::Datagram(datagram_payload), SimpleBufferAllocator::Get());
quiche::QuicheBuffer actual_header = SerializeDatagramCapsuleHeader(
datagram_payload.size(), SimpleBufferAllocator::Get());
EXPECT_EQ(expected_capsule.AsStringView(),
absl::StrCat(actual_header.AsStringView(), datagram_payload));
}
TEST_F(CapsuleTest, LegacyDatagramCapsule) {
std::string capsule_fragment;
ASSERT_TRUE(
absl::HexStringToBytes("80ff37a0"
"08"
"a1a2a3a4a5a6a7a8",
&capsule_fragment));
std::string datagram_payload;
ASSERT_TRUE(absl::HexStringToBytes("a1a2a3a4a5a6a7a8", &datagram_payload));
Capsule expected_capsule = Capsule::LegacyDatagram(datagram_payload);
{
EXPECT_CALL(visitor_, OnCapsule(expected_capsule));
ASSERT_TRUE(capsule_parser_.IngestCapsuleFragment(capsule_fragment));
}
ValidateParserIsEmpty();
TestSerialization(expected_capsule, capsule_fragment);
}
TEST_F(CapsuleTest, LegacyDatagramWithoutContextCapsule) {
std::string capsule_fragment;
ASSERT_TRUE(absl::HexStringToBytes(
"80ff37a5"
"08"
"a1a2a3a4a5a6a7a8",
&capsule_fragment));
std::string datagram_payload;
ASSERT_TRUE(absl::HexStringToBytes("a1a2a3a4a5a6a7a8", &datagram_payload));
Capsule expected_capsule =
Capsule::LegacyDatagramWithoutContext(datagram_payload);
{
EXPECT_CALL(visitor_, OnCapsule(expected_capsule));
ASSERT_TRUE(capsule_parser_.IngestCapsuleFragment(capsule_fragment));
}
ValidateParserIsEmpty();
TestSerialization(expected_capsule, capsule_fragment);
}
TEST_F(CapsuleTest, CloseWebTransportStreamCapsule) {
std::string capsule_fragment;
ASSERT_TRUE(
absl::HexStringToBytes("6843"
"09"
"00001234"
"68656c6c6f",
&capsule_fragment));
Capsule expected_capsule = Capsule::CloseWebTransportSession(
0x1234, "hello");
{
EXPECT_CALL(visitor_, OnCapsule(expected_capsule));
ASSERT_TRUE(capsule_parser_.IngestCapsuleFragment(capsule_fragment));
}
ValidateParserIsEmpty();
TestSerialization(expected_capsule, capsule_fragment);
}
TEST_F(CapsuleTest, DrainWebTransportStreamCapsule) {
std::string capsule_fragment;
ASSERT_TRUE(absl::HexStringToBytes(
"800078ae"
"00",
&capsule_fragment));
Capsule expected_capsule = Capsule(DrainWebTransportSessionCapsule());
{
EXPECT_CALL(visitor_, OnCapsule(expected_capsule));
ASSERT_TRUE(capsule_parser_.IngestCapsuleFragment(capsule_fragment));
}
ValidateParserIsEmpty();
TestSerialization(expected_capsule, capsule_fragment);
}
TEST_F(CapsuleTest, AddressAssignCapsule) {
std::string capsule_fragment;
ASSERT_TRUE(absl::HexStringToBytes(
"9ECA6A00"
"1A"
"00"
"04"
"C000022A"
"1F"
"01"
"06"
"20010db8123456780000000000000000"
"40",
&capsule_fragment));
Capsule expected_capsule = Capsule::AddressAssign();
quiche::QuicheIpAddress ip_address1;
ip_address1.FromString("192.0.2.42");
PrefixWithId assigned_address1;
assigned_address1.request_id = 0;
assigned_address1.ip_prefix =
quiche::QuicheIpPrefix(ip_address1, 31);
expected_capsule.address_assign_capsule().assigned_addresses.push_back(
assigned_address1);
quiche::QuicheIpAddress ip_address2;
ip_address2.FromString("2001:db8:1234:5678::");
PrefixWithId assigned_address2;
assigned_address2.request_id = 1;
assigned_address2.ip_prefix =
quiche::QuicheIpPrefix(ip_address2, 64);
expected_capsule.address_assign_capsule().assigned_addresses.push_back(
assigned_address2);
{
EXPECT_CALL(visitor_, OnCapsule(expected_capsule));
ASSERT_TRUE(capsule_parser_.IngestCapsuleFragment(capsule_fragment));
}
ValidateParserIsEmpty();
TestSerialization(expected_capsule, capsule_fragment);
}
TEST_F(CapsuleTest, AddressRequestCapsule) {
std::string capsule_fragment;
ASSERT_TRUE(absl::HexStringToBytes(
"9ECA6A01"
"1A"
"00"
"04"
"C000022A"
"1F"
"01"
"06"
"20010db8123456780000000000000000"
"40",
&capsule_fragment));
Capsule expected_capsule = Capsule::AddressRequest();
quiche::QuicheIpAddress ip_address1;
ip_address1.FromString("192.0.2.42");
PrefixWithId requested_address1;
requested_address1.request_id = 0;
requested_address1.ip_prefix =
quiche::QuicheIpPrefix(ip_address1, 31);
expected_capsule.address_request_capsule().requested_addresses.push_back(
requested_address1);
quiche::QuicheIpAddress ip_address2;
ip_address2.FromString("2001:db8:1234:5678::");
PrefixWithId requested_address2;
requested_address2.request_id = 1;
requested_address2.ip_prefix =
quiche::QuicheIpPrefix(ip_address2, 64);
expected_capsule.address_request_capsule().requested_addresses.push_back(
requested_address2);
{
EXPECT_CALL(visitor_, OnCapsule(expected_capsule));
ASSERT_TRUE(capsule_parser_.IngestCapsuleFragment(capsule_fragment));
}
ValidateParserIsEmpty();
TestSerialization(expected_capsule, capsule_fragment);
}
TEST_F(CapsuleTest, RouteAdvertisementCapsule) {
std::string capsule_fragment;
ASSERT_TRUE(absl::HexStringToBytes(
"9ECA6A02"
"2C"
"04"
"C0000218"
"C000022A"
"00"
"06"
"00000000000000000000000000000000"
"ffffffffffffffffffffffffffffffff"
"01",
&capsule_fragment));
Capsule expected_capsule = Capsule::RouteAdvertisement();
IpAddressRange ip_address_range1;
ip_address_range1.start_ip_address.FromString("192.0.2.24");
ip_address_range1.end_ip_address.FromString("192.0.2.42");
ip_address_range1.ip_protocol = 0;
expected_capsule.route_advertisement_capsule().ip_address_ranges.push_back(
ip_address_range1);
IpAddressRange ip_address_range2;
ip_address_range2.start_ip_address.FromString("::");
ip_address_range2.end_ip_address.FromString(
"ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff");
ip_address_range2.ip_protocol = 1;
expected_capsule.route_advertisement_capsule().ip_address_ranges.push_back(
ip_address_range2);
{
EXPECT_CALL(visitor_, OnCapsule(expected_capsule));
ASSERT_TRUE(capsule_parser_.IngestCapsuleFragment(capsule_fragment));
}
ValidateParserIsEmpty();
TestSerialization(expected_capsule, capsule_fragment);
}
TEST_F(CapsuleTest, WebTransportStreamData) {
std::string capsule_fragment;
ASSERT_TRUE(
absl::HexStringToBytes("990b4d3b"
"04"
"17"
"abcdef",
&capsule_fragment));
Capsule expected_capsule = Capsule(WebTransportStreamDataCapsule());
expected_capsule.web_transport_stream_data().stream_id = 0x17;
expected_capsule.web_transport_stream_data().data = "\xab\xcd\xef";
expected_capsule.web_transport_stream_data().fin = false;
{
EXPECT_CALL(visitor_, OnCapsule(expected_capsule));
ASSERT_TRUE(capsule_parser_.IngestCapsuleFragment(capsule_fragment));
}
ValidateParserIsEmpty();
TestSerialization(expected_capsule, capsule_fragment);
}
TEST_F(CapsuleTest, WebTransportStreamDataHeader) {
std::string capsule_fragment;
ASSERT_TRUE(absl::HexStringToBytes(
"990b4d3b"
"04"
"17",
&capsule_fragment));
QuicheBufferAllocator* allocator = SimpleBufferAllocator::Get();
QuicheBuffer capsule_header =
quiche::SerializeWebTransportStreamCapsuleHeader(0x17, false, 3,
allocator);
EXPECT_EQ(capsule_header.AsStringView(), capsule_fragment);
}
TEST_F(CapsuleTest, WebTransportStreamDataWithFin) {
std::string capsule_fragment;
ASSERT_TRUE(
absl::HexStringToBytes("990b4d3c"
"04"
"17"
"abcdef",
&capsule_fragment));
Capsule expected_capsule = Capsule(WebTransportStreamDataCapsule());
expected_capsule.web_transport_stream_data().stream_id = 0x17;
expected_capsule.web_transport_stream_data().data = "\xab\xcd\xef";
expected_capsule.web_transport_stream_data().fin = true;
{
EXPECT_CALL(visitor_, OnCapsule(expected_capsule));
ASSERT_TRUE(capsule_parser_.IngestCapsuleFragment(capsule_fragment));
}
ValidateParserIsEmpty();
TestSerialization(expected_capsule, capsule_fragment);
}
TEST_F(CapsuleTest, WebTransportResetStream) {
std::string capsule_fragment;
ASSERT_TRUE(
absl::HexStringToBytes("990b4d39"
"02"
"17"
"07",
&capsule_fragment));
Capsule expected_capsule = Capsule(WebTransportResetStreamCapsule());
expected_capsule.web_transport_reset_stream().stream_id = 0x17;
expected_capsule.web_transport_reset_stream().error_code = 0x07;
{
EXPECT_CALL(visitor_, OnCapsule(expected_capsule));
ASSERT_TRUE(capsule_parser_.IngestCapsuleFragment(capsule_fragment));
}
ValidateParserIsEmpty();
TestSerialization(expected_capsule, capsule_fragment);
}
TEST_F(CapsuleTest, WebTransportStopSending) {
std::string capsule_fragment;
ASSERT_TRUE(
absl::HexStringToBytes("990b4d3a"
"02"
"17"
"07",
&capsule_fragment));
Capsule expected_capsule = Capsule(WebTransportStopSendingCapsule());
expected_capsule.web_transport_stop_sending().stream_id = 0x17;
expected_capsule.web_transport_stop_sending().error_code = 0x07;
{
EXPECT_CALL(visitor_, OnCapsule(expected_capsule));
ASSERT_TRUE(capsule_parser_.IngestCapsuleFragment(capsule_fragment));
}
ValidateParserIsEmpty();
TestSerialization(expected_capsule, capsule_fragment);
}
TEST_F(CapsuleTest, WebTransportMaxStreamData) {
std::string capsule_fragment;
ASSERT_TRUE(
absl::HexStringToBytes("990b4d3e"
"02"
"17"
"10",
&capsule_fragment));
Capsule expected_capsule = Capsule(WebTransportMaxStreamDataCapsule());
expected_capsule.web_transport_max_stream_data().stream_id = 0x17;
expected_capsule.web_transport_max_stream_data().max_stream_data = 0x10;
{
EXPECT_CALL(visitor_, OnCapsule(expected_capsule));
ASSERT_TRUE(capsule_parser_.IngestCapsuleFragment(capsule_fragment));
}
ValidateParserIsEmpty();
TestSerialization(expected_capsule, capsule_fragment);
}
TEST_F(CapsuleTest, WebTransportMaxStreamsBi) {
std::string capsule_fragment;
ASSERT_TRUE(
absl::HexStringToBytes("990b4d3f"
"01"
"17",
&capsule_fragment));
Capsule expected_capsule = Capsule(WebTransportMaxStreamsCapsule());
expected_capsule.web_transport_max_streams().stream_type =
StreamType::kBidirectional;
expected_capsule.web_transport_max_streams().max_stream_count = 0x17;
{
EXPECT_CALL(visitor_, OnCapsule(expected_capsule));
ASSERT_TRUE(capsule_parser_.IngestCapsuleFragment(capsule_fragment));
}
ValidateParserIsEmpty();
TestSerialization(expected_capsule, capsule_fragment);
}
TEST_F(CapsuleTest, WebTransportMaxStreamsUni) {
std::string capsule_fragment;
ASSERT_TRUE(
absl::HexStringToBytes("990b4d40"
"01"
"17",
&capsule_fragment));
Capsule expected_capsule = Capsule(WebTransportMaxStreamsCapsule());
expected_capsule.web_transport_max_streams().stream_type =
StreamType::kUnidirectional;
expected_capsule.web_transport_max_streams().max_stream_count = 0x17;
{
EXPECT_CALL(visitor_, OnCapsule(expected_capsule));
ASSERT_TRUE(capsule_parser_.IngestCapsuleFragment(capsule_fragment));
}
ValidateParserIsEmpty();
TestSerialization(expected_capsule, capsule_fragment);
}
TEST_F(CapsuleTest, UnknownCapsule) {
std::string capsule_fragment;
ASSERT_TRUE(
absl::HexStringToBytes("17"
"08"
"a1a2a3a4a5a6a7a8",
&capsule_fragment));
std::string unknown_capsule_data;
ASSERT_TRUE(
absl::HexStringToBytes("a1a2a3a4a5a6a7a8", &unknown_capsule_data));
Capsule expected_capsule = Capsule::Unknown(0x17, unknown_capsule_data);
{
EXPECT_CALL(visitor_, OnCapsule(expected_capsule));
ASSERT_TRUE(capsule_parser_.IngestCapsuleFragment(capsule_fragment));
}
ValidateParserIsEmpty();
TestSerialization(expected_capsule, capsule_fragment);
}
TEST_F(CapsuleTest, TwoCapsules) {
std::string capsule_fragment;
ASSERT_TRUE(
absl::HexStringToBytes("00"
"08"
"a1a2a3a4a5a6a7a8"
"00"
"08"
"b1b2b3b4b5b6b7b8",
&capsule_fragment));
std::string datagram_payload1;
ASSERT_TRUE(absl::HexStringToBytes("a1a2a3a4a5a6a7a8", &datagram_payload1));
std::string datagram_payload2;
ASSERT_TRUE(absl::HexStringToBytes("b1b2b3b4b5b6b7b8", &datagram_payload2));
Capsule expected_capsule1 = Capsule::Datagram(datagram_payload1);
Capsule expected_capsule2 = Capsule::Datagram(datagram_payload2);
{
InSequence s;
EXPECT_CALL(visitor_, OnCapsule(expected_capsule1));
EXPECT_CALL(visitor_, OnCapsule(expected_capsule2));
ASSERT_TRUE(capsule_parser_.IngestCapsuleFragment(capsule_fragment));
}
ValidateParserIsEmpty();
}
TEST_F(CapsuleTest, TwoCapsulesPartialReads) {
std::string capsule_fragment1;
ASSERT_TRUE(absl::HexStringToBytes(
"00"
"08"
"a1a2a3a4",
&capsule_fragment1));
std::string capsule_fragment2;
ASSERT_TRUE(absl::HexStringToBytes(
"a5a6a7a8"
"00",
&capsule_fragment2));
std::string capsule_fragment3;
ASSERT_TRUE(absl::HexStringToBytes(
"08"
"b1b2b3b4b5b6b7b8",
&capsule_fragment3));
capsule_parser_.ErrorIfThereIsRemainingBufferedData();
std::string datagram_payload1;
ASSERT_TRUE(absl::HexStringToBytes("a1a2a3a4a5a6a7a8", &datagram_payload1));
std::string datagram_payload2;
ASSERT_TRUE(absl::HexStringToBytes("b1b2b3b4b5b6b7b8", &datagram_payload2));
Capsule expected_capsule1 = Capsule::Datagram(datagram_payload1);
Capsule expected_capsule2 = Capsule::Datagram(datagram_payload2);
{
InSequence s;
EXPECT_CALL(visitor_, OnCapsule(expected_capsule1));
EXPECT_CALL(visitor_, OnCapsule(expected_capsule2));
ASSERT_TRUE(capsule_parser_.IngestCapsuleFragment(capsule_fragment1));
ASSERT_TRUE(capsule_parser_.IngestCapsuleFragment(capsule_fragment2));
ASSERT_TRUE(capsule_parser_.IngestCapsuleFragment(capsule_fragment3));
}
ValidateParserIsEmpty();
}
TEST_F(CapsuleTest, TwoCapsulesOneByteAtATime) {
std::string capsule_fragment;
ASSERT_TRUE(
absl::HexStringToBytes("00"
"08"
"a1a2a3a4a5a6a7a8"
"00"
"08"
"b1b2b3b4b5b6b7b8",
&capsule_fragment));
std::string datagram_payload1;
ASSERT_TRUE(absl::HexStringToBytes("a1a2a3a4a5a6a7a8", &datagram_payload1));
std::string datagram_payload2;
ASSERT_TRUE(absl::HexStringToBytes("b1b2b3b4b5b6b7b8", &datagram_payload2));
Capsule expected_capsule1 = Capsule::Datagram(datagram_payload1);
Capsule expected_capsule2 = Capsule::Datagram(datagram_payload2);
for (size_t i = 0; i < capsule_fragment.size(); i++) {
if (i < capsule_fragment.size() / 2 - 1) {
EXPECT_CALL(visitor_, OnCapsule(_)).Times(0);
ASSERT_TRUE(
capsule_parser_.IngestCapsuleFragment(capsule_fragment.substr(i, 1)));
} else if (i == capsule_fragment.size() / 2 - 1) {
EXPECT_CALL(visitor_, OnCapsule(expected_capsule1));
ASSERT_TRUE(
capsule_parser_.IngestCapsuleFragment(capsule_fragment.substr(i, 1)));
EXPECT_TRUE(CapsuleParserPeer::buffered_data(&capsule_parser_)->empty());
} else if (i < capsule_fragment.size() - 1) {
EXPECT_CALL(visitor_, OnCapsule(_)).Times(0);
ASSERT_TRUE(
capsule_parser_.IngestCapsuleFragment(capsule_fragment.substr(i, 1)));
} else {
EXPECT_CALL(visitor_, OnCapsule(expected_capsule2));
ASSERT_TRUE(
capsule_parser_.IngestCapsuleFragment(capsule_fragment.substr(i, 1)));
EXPECT_TRUE(CapsuleParserPeer::buffered_data(&capsule_parser_)->empty());
}
}
capsule_parser_.ErrorIfThereIsRemainingBufferedData();
EXPECT_TRUE(CapsuleParserPeer::buffered_data(&capsule_parser_)->empty());
}
TEST_F(CapsuleTest, PartialCapsuleThenError) {
std::string capsule_fragment;
ASSERT_TRUE(
absl::HexStringToBytes("00"
"08"
"a1a2a3a4",
&capsule_fragment));
EXPECT_CALL(visitor_, OnCapsule(_)).Times(0);
{
EXPECT_CALL(visitor_, OnCapsuleParseFailure(_)).Times(0);
ASSERT_TRUE(capsule_parser_.IngestCapsuleFragment(capsule_fragment));
}
{
EXPECT_CALL(visitor_,
OnCapsuleParseFailure(
"Incomplete capsule left at the end of the stream"));
capsule_parser_.ErrorIfThereIsRemainingBufferedData();
}
}
TEST_F(CapsuleTest, RejectOverlyLongCapsule) {
std::string capsule_fragment;
ASSERT_TRUE(
absl::HexStringToBytes("17"
"80123456",
&capsule_fragment));
absl::StrAppend(&capsule_fragment, std::string(1111111, '?'));
EXPECT_CALL(visitor_, OnCapsuleParseFailure(
"Refusing to buffer too much capsule data"));
EXPECT_FALSE(capsule_parser_.IngestCapsuleFragment(capsule_fragment));
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/common/capsule.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/common/capsule_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
aae77314-eb0b-4792-8519-6a254ba3f7fb | cpp | tensorflow/tensorflow | bits | tensorflow/lite/experimental/microfrontend/lib/bits.h | third_party/xla/xla/tsl/lib/core/bits_test.cc | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_BITS_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_BITS_H_
#ifdef __cplusplus
#include <cstdint>
extern "C" {
#endif
static inline int CountLeadingZeros32Slow(uint64_t n) {
int zeroes = 28;
if (n >> 16) zeroes -= 16, n >>= 16;
if (n >> 8) zeroes -= 8, n >>= 8;
if (n >> 4) zeroes -= 4, n >>= 4;
return "\4\3\2\2\1\1\1\1\0\0\0\0\0\0\0"[n] + zeroes;
}
static inline int CountLeadingZeros32(uint32_t n) {
#if !defined(__clang__) && defined(_MSC_VER)
unsigned long result = 0;
if (_BitScanReverse(&result, n)) {
return 31 - result;
}
return 32;
#elif defined(__clang__) && defined(__GNUC__)
if (n == 0) {
return 32;
}
return __builtin_clz(n);
#else
return CountLeadingZeros32Slow(n);
#endif
}
static inline int MostSignificantBit32(uint32_t n) {
return 32 - CountLeadingZeros32(n);
}
static inline int CountLeadingZeros64Slow(uint64_t n) {
int zeroes = 60;
if (n >> 32) zeroes -= 32, n >>= 32;
if (n >> 16) zeroes -= 16, n >>= 16;
if (n >> 8) zeroes -= 8, n >>= 8;
if (n >> 4) zeroes -= 4, n >>= 4;
return "\4\3\2\2\1\1\1\1\0\0\0\0\0\0\0"[n] + zeroes;
}
static inline int CountLeadingZeros64(uint64_t n) {
#if !defined(__clang__) && defined(_MSC_VER) && defined(_M_X64)
unsigned long result = 0;
if (_BitScanReverse64(&result, n)) {
return 63 - result;
}
return 64;
#elif !defined(__clang__) && defined(_MSC_VER)
unsigned long result = 0;
if ((n >> 32) && _BitScanReverse(&result, n >> 32)) {
return 31 - result;
}
if (_BitScanReverse(&result, n)) {
return 63 - result;
}
return 64;
#elif defined(__clang__) || defined(__GNUC__)
if (n == 0) {
return 64;
}
return __builtin_clzll(n);
#else
return CountLeadingZeros64Slow(n);
#endif
}
static inline int MostSignificantBit64(uint64_t n) {
return 64 - CountLeadingZeros64(n);
}
#ifdef __cplusplus
}
#endif
#endif | #include "xla/tsl/lib/core/bits.h"
#include <cstdint>
#include "tsl/platform/test.h"
namespace tsl {
namespace {
TEST(BitsTest, NextPowerOfTwoS64) {
constexpr int64_t kMaxRepresentablePowerOfTwo =
static_cast<int64_t>(uint64_t{1} << 62);
EXPECT_EQ(NextPowerOfTwoS64(0), 1);
EXPECT_EQ(NextPowerOfTwoS64(1), 1);
EXPECT_EQ(NextPowerOfTwoS64(2), 2);
EXPECT_EQ(NextPowerOfTwoS64(3), 4);
EXPECT_EQ(NextPowerOfTwoS64(kMaxRepresentablePowerOfTwo - 1),
kMaxRepresentablePowerOfTwo);
EXPECT_EQ(NextPowerOfTwoS64(kMaxRepresentablePowerOfTwo),
kMaxRepresentablePowerOfTwo);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/microfrontend/lib/bits.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/lib/core/bits_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
6824ada7-01ad-4c47-ba4e-358ca1192093 | cpp | tensorflow/tensorflow | gpu_delegate_compatibility_checker | tensorflow/lite/tools/delegates/compatibility/gpu/gpu_delegate_compatibility_checker.cc | tensorflow/lite/tools/delegates/compatibility/gpu/gpu_delegate_compatibility_checker_test.cc | #include "tensorflow/lite/tools/delegates/compatibility/gpu/gpu_delegate_compatibility_checker.h"
#include <functional>
#include <memory>
#include <string>
#include <unordered_map>
#include "absl/status/status.h"
#include "tensorflow/lite/model_builder.h"
#include "tensorflow/lite/tools/delegates/compatibility/protos/compatibility_result.pb.h"
#include "tensorflow/lite/tools/versioning/gpu_compatibility.h"
#include "tensorflow/lite/tools/versioning/op_signature.h"
namespace tflite {
namespace tools {
namespace {
void convertToValidationFailureType(absl::Status status,
proto::OpCompatibilityResult* op_result) {
auto compatibility_failure = op_result->add_compatibility_failures();
compatibility_failure->set_description(std::string(status.message()));
switch (status.code()) {
case absl::StatusCode::kInvalidArgument:
compatibility_failure->set_failure_type(
proto::CompatibilityFailureType::DCC_INVALID_ARGUMENT);
break;
case absl::StatusCode::kUnimplemented:
compatibility_failure->set_failure_type(
proto::CompatibilityFailureType::DCC_UNIMPLEMENTED_ERROR);
break;
case absl::StatusCode::kInternal:
compatibility_failure->set_failure_type(
proto::CompatibilityFailureType::DCC_INTERNAL_ERROR);
break;
case absl::StatusCode::kOutOfRange:
compatibility_failure->set_failure_type(
proto::CompatibilityFailureType::DCC_OUT_OF_RANGE);
break;
default:
compatibility_failure->set_failure_type(
proto::CompatibilityFailureType::DCC_INTERNAL_ERROR);
compatibility_failure->set_description(
"Unknown validation failure type.");
}
}
}
std::unordered_map<std::string, std::string>
tools::GpuDelegateCompatibilityChecker::getDccConfigurations() {
return {};
}
absl::Status tools::GpuDelegateCompatibilityChecker::setDccConfigurations(
const std::unordered_map<std::string, std::string>& dcc_configs) {
return absl::OkStatus();
}
absl::Status
tools::GpuDelegateCompatibilityChecker::checkModelCompatibilityOnline(
tflite::FlatBufferModel* model_buffer,
tflite::proto::CompatibilityResult* result) {
return absl::UnimplementedError(
"Online mode is not supported on GPU delegate compatibility checker.");
}
absl::Status tools::GpuDelegateCompatibilityChecker::checkOpSigCompatibility(
const OpSignature& op_sig,
tflite::proto::OpCompatibilityResult* op_result) {
auto status = CheckGpuDelegateCompatibility(op_sig);
if (!status.ok()) {
convertToValidationFailureType(status, op_result);
op_result->set_is_supported(false);
} else {
op_result->set_is_supported(true);
}
return absl::OkStatus();
}
}
} | #include "tensorflow/lite/tools/delegates/compatibility/gpu/gpu_delegate_compatibility_checker.h"
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorflow/core/platform/resource_loader.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/model_builder.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/tools/delegates/compatibility/protos/compatibility_result.pb.h"
namespace tflite {
namespace tools {
#ifndef EXPECT_OK
#define EXPECT_OK(x) EXPECT_TRUE(x.ok());
#endif
namespace {
class AddOpModel : public SingleOpModel {
public:
AddOpModel(const TensorData& input1, const TensorData& input2,
const TensorData& output, ActivationFunctionType activation_type) {
input1_ = AddInput(input1);
input2_ = AddInput(input2);
output_ = AddOutput(output);
SetBuiltinOp(BuiltinOperator_ADD, BuiltinOptions_AddOptions,
CreateAddOptions(builder_, activation_type).Union());
BuildInterpreter({GetShape(input1_), GetShape(input2_)});
}
protected:
int input1_;
int input2_;
int output_;
};
}
TEST(GpuDelegateCompatibilityCheckerTest, CheckOnlineMode) {
const std::string& full_path =
tensorflow::GetDataDependencyFilepath("tensorflow/lite/testdata/add.bin");
auto fb_model = FlatBufferModel::BuildFromFile(full_path.data());
ASSERT_TRUE(fb_model);
proto::CompatibilityResult compatibility_result;
GpuDelegateCompatibilityChecker gpu_dcc;
EXPECT_EQ(
gpu_dcc
.checkModelCompatibilityOnline(fb_model.get(), &compatibility_result)
.code(),
absl::StatusCode::kUnimplemented);
}
TEST(GpuDelegateCompatibilityCheckerTest, CompatibleModelOfflineMode) {
const std::string& full_path =
tensorflow::GetDataDependencyFilepath("tensorflow/lite/testdata/add.bin");
auto fb_model = FlatBufferModel::BuildFromFile(full_path.data());
ASSERT_TRUE(fb_model);
proto::CompatibilityResult compatibility_result;
GpuDelegateCompatibilityChecker gpu_dcc;
EXPECT_OK(gpu_dcc.checkModelCompatibilityOffline(fb_model.get(),
&compatibility_result));
for (auto op_compatibility_result :
compatibility_result.compatibility_results()) {
EXPECT_TRUE(op_compatibility_result.is_supported());
}
EXPECT_EQ(compatibility_result.compatibility_results_size(), 2);
}
TEST(GpuDelegateCompatibilityCheckerTest, IncompatibleModelOfflineMode) {
const std::string& full_path = tensorflow::GetDataDependencyFilepath(
"tensorflow/lite/testdata/conv3d_huge_im2col.bin");
auto fb_model = FlatBufferModel::BuildFromFile(full_path.data());
ASSERT_TRUE(fb_model);
proto::CompatibilityResult compatibility_result;
GpuDelegateCompatibilityChecker gpu_dcc;
EXPECT_OK(gpu_dcc.checkModelCompatibilityOffline(fb_model.get(),
&compatibility_result));
for (auto op_compatibility_result :
compatibility_result.compatibility_results()) {
EXPECT_FALSE(op_compatibility_result.is_supported());
}
EXPECT_EQ(compatibility_result.compatibility_results_size(), 1);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/delegates/compatibility/gpu/gpu_delegate_compatibility_checker.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/delegates/compatibility/gpu/gpu_delegate_compatibility_checker_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d5771585-ded8-4367-ad73-ce5eaccac290 | cpp | tensorflow/tensorflow | while_gradients | tensorflow/cc/framework/while_gradients.cc | tensorflow/cc/framework/while_gradients_test.cc | #include "tensorflow/cc/framework/while_gradients.h"
#include <string>
#include "tensorflow/cc/framework/gradients.h"
#include "tensorflow/cc/framework/scope_internal.h"
#include "tensorflow/cc/ops/control_flow_ops_internal.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/cc/ops/while_loop.h"
namespace tensorflow {
namespace {
using ops::BodyGraphBuilderFn;
using ops::BuildWhileLoop;
using ops::CondGraphBuilderFn;
Output ToOutput(OutputTensor output_tensor) {
return Output(const_cast<Node*>(output_tensor.node), output_tensor.index);
}
std::vector<Output> ToOutputVector(
const std::vector<OutputTensor>& output_tensors) {
const int n = output_tensors.size();
std::vector<Output> result;
result.reserve(n);
for (int i = 0; i < n; ++i) result.push_back(ToOutput(output_tensors[i]));
return result;
}
string BackPropFrameName(const string& forward_frame_name) {
return strings::StrCat(forward_frame_name, "_backprop");
}
Status AddForwardLoopCounter(WhileContext* while_ctx, const Scope& scope,
Output* count) {
Output zero = ops::Const(scope, 0, {});
CondGraphBuilderFn cond_fn = [while_ctx](const Scope& scope,
const std::vector<Output>& inputs,
Output* output) {
*output = ToOutput(while_ctx->cond_output());
return absl::OkStatus();
};
BodyGraphBuilderFn body_fn = [](const Scope& scope,
const std::vector<Output>& inputs,
std::vector<Output>* outputs) {
DCHECK_EQ(inputs.size(), 1);
outputs->emplace_back(ops::Add(scope, inputs[0], 1));
return scope.status();
};
std::vector<Output> outputs;
TF_RETURN_IF_ERROR(BuildWhileLoop(scope, {zero}, cond_fn, body_fn,
while_ctx->frame_name(), &outputs,
false));
*count = outputs[0];
return absl::OkStatus();
}
Status AddBackPropLoopCounter(WhileContext* while_ctx, const Output& loop_count,
const Scope& scope,
Output* backprop_execution_pred) {
CondGraphBuilderFn cond_fn = [](const Scope& scope,
const std::vector<Output>& inputs,
Output* output) {
DCHECK_EQ(inputs.size(), 1);
*output = ops::Greater(scope, inputs[0], 0);
return scope.status();
};
BodyGraphBuilderFn body_fn = [](const Scope& scope,
const std::vector<Output>& inputs,
std::vector<Output>* outputs) {
DCHECK_EQ(inputs.size(), 1);
outputs->emplace_back(ops::Subtract(scope, inputs[0], 1));
return scope.status();
};
string frame_name = BackPropFrameName(while_ctx->frame_name());
std::vector<Output> outputs;
TF_RETURN_IF_ERROR(BuildWhileLoop(
scope, {loop_count}, cond_fn, body_fn, frame_name, &outputs,
false, backprop_execution_pred));
return absl::OkStatus();
}
Status AddWhileGradientLoop(WhileContext* while_ctx,
const std::vector<Output>& grad_inputs,
const Output& backprop_execution_pred,
const Scope& parent_scope,
std::vector<Output>* grad_outputs) {
DCHECK_EQ(grad_inputs.size(), while_ctx->body_outputs().size());
DCHECK_EQ(while_ctx->body_inputs().size(), while_ctx->body_outputs().size());
Scope scope = parent_scope.NewSubScope("while");
CondGraphBuilderFn cond_fn = [backprop_execution_pred](
const Scope& scope,
const std::vector<Output>& inputs,
Output* output) {
*output = backprop_execution_pred;
return absl::OkStatus();
};
BodyGraphBuilderFn body_fn = [while_ctx](const Scope& scope,
const std::vector<Output>& inputs,
std::vector<Output>* outputs) {
std::vector<Output> body_outputs =
ToOutputVector(while_ctx->body_outputs());
std::vector<Output> body_inputs = ToOutputVector(while_ctx->body_inputs());
return AddSymbolicGradients(scope, body_outputs, body_inputs, inputs,
outputs);
};
string frame_name = BackPropFrameName(while_ctx->frame_name());
TF_RETURN_IF_ERROR(BuildWhileLoop(scope, grad_inputs, cond_fn, body_fn,
frame_name, grad_outputs,
false));
return absl::OkStatus();
}
}
Status AddWhileLoopGradient(WhileContext* while_ctx, const Scope& scope,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
Output forward_loop_count;
TF_RETURN_IF_ERROR(AddForwardLoopCounter(
while_ctx, scope.NewSubScope("ForwardLoopCounter"), &forward_loop_count));
Output backprop_counter_cond;
TF_RETURN_IF_ERROR(AddBackPropLoopCounter(
while_ctx, forward_loop_count, scope.NewSubScope("BackPropLoopCounter"),
&backprop_counter_cond));
return AddWhileGradientLoop(while_ctx, grad_inputs, backprop_counter_cond,
scope, grad_outputs);
}
} | #include "tensorflow/cc/client/client_session.h"
#include "tensorflow/cc/framework/gradients.h"
#include "tensorflow/cc/framework/testutil.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/cc/ops/while_loop.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
class WhileGradientsTest : public ::testing::Test {
protected:
WhileGradientsTest() : scope_(Scope::NewRootScope()) {}
void Init(int num_inputs, DataType dtype = DT_INT32) {
for (int i = 0; i < num_inputs; ++i) {
inputs_.push_back(ops::Placeholder(scope_, dtype));
}
}
void CreateLoop(const ops::CondGraphBuilderFn& cond,
const ops::BodyGraphBuilderFn& body,
const std::vector<Output>* inputs = nullptr) {
if (inputs == nullptr) inputs = &inputs_;
TF_ASSERT_OK(ops::BuildWhileLoop(scope_, *inputs, cond, body, "test_loop",
&outputs_));
}
void CreateBackprop() {
TF_ASSERT_OK(
AddSymbolicGradients(scope_, outputs_, inputs_, &grad_outputs_));
ASSERT_EQ(grad_outputs_.size(), inputs_.size());
}
template <typename T>
void Run(const std::vector<Input::Initializer>& input_values,
const std::vector<T>& expected_grad_values) {
Run<T>(ClientSession(scope_), input_values, expected_grad_values);
}
template <typename T>
void Run(const ClientSession& session,
const std::vector<Input::Initializer>& input_values,
const std::vector<T>& expected_grad_values,
const RunOptions& run_options = RunOptions(),
RunMetadata* run_metadata = nullptr) {
DCHECK_EQ(input_values.size(), inputs_.size());
ClientSession::FeedType feeds;
for (int i = 0; i < inputs_.size(); ++i) {
feeds.emplace(inputs_[i], input_values[i]);
}
std::vector<Operation> run_outputs;
std::vector<Tensor> out_tensors;
TF_ASSERT_OK(session.Run(run_options, feeds, grad_outputs_, run_outputs,
&out_tensors, run_metadata));
ASSERT_EQ(out_tensors.size(), grad_outputs_.size());
DCHECK_EQ(expected_grad_values.size(), out_tensors.size());
for (int i = 0; i < out_tensors.size(); ++i) {
test::ExpectTensorEqual<T>(
out_tensors[i], test::AsTensor<T>({expected_grad_values[i]}, {}));
}
}
Scope scope_;
std::vector<Output> inputs_;
std::vector<Output> outputs_;
std::vector<Output> grad_outputs_;
};
TEST_F(WhileGradientsTest, Basic) {
Init(1);
CreateLoop(
[](const Scope& s, const std::vector<Output>& inputs, Output* output) {
*output = ops::Less(s, inputs[0], 10);
return s.status();
},
[](const Scope& s, const std::vector<Output>& inputs,
std::vector<Output>* outputs) {
outputs->push_back(ops::AddN(s, {inputs[0], 1}));
return s.status();
});
CreateBackprop();
Run<int>({1}, {1});
Run<int>({11}, {1});
}
TEST_F(WhileGradientsTest, MultipleLoopVars) {
Init(3);
CreateLoop(
[](const Scope& s, const std::vector<Output>& inputs, Output* output) {
*output = ops::Less(s, inputs[0], 10);
return s.status();
},
[](const Scope& s, const std::vector<Output>& inputs,
std::vector<Output>* outputs) {
outputs->push_back(ops::AddN(s, {inputs[0], inputs[1]}));
outputs->push_back(ops::AddN(s, {inputs[1], 1}));
outputs->push_back(inputs[2]);
return s.status();
});
CreateBackprop();
Run<int>({0, 1, 2}, {1, 5, 1});
Run<int>({1, 1, 0}, {1, 5, 1});
Run<int>({0, 0, 0}, {1, 6, 1});
}
TEST_F(WhileGradientsTest, Chaining) {
Init(2, DT_DOUBLE);
std::vector<Output> loop_inputs = {ops::Multiply(scope_, inputs_[0], 2.0),
ops::Multiply(scope_, inputs_[1], 2.0)};
CreateLoop(
[](const Scope& s, const std::vector<Output>& inputs, Output* output) {
*output = ops::LogicalAnd(s, ops::Greater(s, inputs[0], 0.0),
ops::Greater(s, inputs[1], 0.0));
return s.status();
},
[](const Scope& s, const std::vector<Output>& inputs,
std::vector<Output>* outputs) {
outputs->push_back(ops::AddN(s, {inputs[0], -1.0}));
outputs->push_back(inputs[1]);
return s.status();
},
&loop_inputs);
outputs_[0] = ops::Neg(scope_, outputs_[0]);
CreateBackprop();
Run<double>({1.0, 1.0}, {-2.0, 2.0});
Run<double>({0.0, 0.0}, {-2.0, 2.0});
}
TEST_F(WhileGradientsTest, MultipleDevices) {
scope_ = scope_.WithDevice("/cpu:0");
Init(2);
CreateLoop(
[](const Scope& s, const std::vector<Output>& inputs, Output* output) {
*output = ops::Less(s, inputs[0], 10);
return s.status();
},
[](const Scope& s, const std::vector<Output>& inputs,
std::vector<Output>* outputs) {
Scope cpu1_scope = s.WithDevice("/cpu:1");
outputs->push_back(ops::AddN(cpu1_scope, {inputs[0], inputs[1]}));
outputs->push_back(inputs[1]);
return cpu1_scope.status();
});
Scope cpu1_scope = scope_.WithDevice("/cpu:1");
TF_ASSERT_OK(
AddSymbolicGradients(cpu1_scope, outputs_, inputs_, &grad_outputs_));
ASSERT_EQ(grad_outputs_.size(), inputs_.size());
SessionOptions session_options;
(*session_options.config.mutable_device_count())["CPU"] = 2;
RunOptions run_options;
run_options.set_output_partition_graphs(true);
RunMetadata run_metadata;
Run<int>(ClientSession(scope_, session_options), {0, 1}, {1, 11}, run_options,
&run_metadata);
ASSERT_EQ(run_metadata.partition_graphs().size(), 2);
for (const GraphDef& partition_graph : run_metadata.partition_graphs()) {
EXPECT_GE(partition_graph.node().size(), 1);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/cc/framework/while_gradients.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/cc/framework/while_gradients_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8516a4c9-8e79-4fdf-822d-a8ebae3ce68e | cpp | tensorflow/tensorflow | structure_verifier | tensorflow/core/grappler/verifiers/structure_verifier.cc | tensorflow/core/grappler/verifiers/structure_verifier_test.cc | #include "tensorflow/core/grappler/verifiers/structure_verifier.h"
#include <string>
#include <vector>
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/graph/validate.h"
#include "tensorflow/core/grappler/utils/topological_sort.h"
#include "tensorflow/core/grappler/verifiers/graph_verifier.h"
#include "tensorflow/core/lib/core/status.h"
namespace tensorflow {
namespace grappler {
Status StructureVerifier::Verify(const GraphDef& graph) {
StatusGroup status_group;
FunctionLibraryDefinition function_library(OpRegistry::Global(),
graph.library());
status_group.Update(tensorflow::graph::ValidateGraphDefAgainstOpRegistry(
graph, function_library));
status_group.Update(tensorflow::graph::VerifyNoDuplicateNodeNames(graph));
std::vector<const NodeDef*> topo_order;
status_group.Update(ComputeTopologicalOrder(graph, &topo_order));
return status_group.as_concatenated_status();
}
}
} | #include "tensorflow/core/grappler/verifiers/structure_verifier.h"
#include <memory>
#include "absl/strings/match.h"
#include "tensorflow/cc/ops/parsing_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/shape_inference.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
class StructureVerifierTest : public ::testing::Test {
protected:
StructureVerifierTest() { verifier_ = std::make_unique<StructureVerifier>(); }
void SetGraph(const string& gdef_ascii) {
CHECK(protobuf::TextFormat::ParseFromString(gdef_ascii, &graph_));
}
GraphDef graph_;
std::unique_ptr<StructureVerifier> verifier_;
};
Status Scalars(shape_inference::InferenceContext* c) {
for (int i = 0; i < c->num_outputs(); ++i) {
c->set_output(i, c->Scalar());
}
return absl::OkStatus();
}
REGISTER_OP("TestParams").Output("o: float").SetShapeFn(Scalars);
REGISTER_OP("TestInput")
.Output("a: float")
.Output("b: float")
.SetShapeFn(Scalars);
REGISTER_OP("TestMul")
.Input("a: float")
.Input("b: float")
.Output("o: float")
.SetShapeFn(Scalars);
TEST_F(StructureVerifierTest, ValidGraphs) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output a = ops::Const(s.WithOpName("a"), 0.0f, {10, 10});
ops::ShapeN b(s.WithOpName("b"), {a, a, a});
GraphDef graph;
TF_CHECK_OK(s.ToGraphDef(&graph));
TF_EXPECT_OK(verifier_->Verify(graph));
SetGraph(
"node { name: 'W1' op: 'TestParams' }"
"node { name: 'input' op: 'TestInput' }"
"node { name: 't1' op: 'TestMul' input: [ 'W1', 'input:1' ] }");
TF_EXPECT_OK(verifier_->Verify(graph_));
}
TEST_F(StructureVerifierTest, OpNotRegistered) {
SetGraph(
"node { name: 'input' op: 'OpNotRegistered' }"
"node { name: 't1' op: 'TestMul' input: [ 'input:0', 't2' ] }"
"node { name: 't2' op: 'TestMul' input: [ 'input:1', 't1' ] }");
Status status = verifier_->Verify(graph_);
EXPECT_TRUE(errors::IsNotFound(status));
EXPECT_TRUE(absl::StrContains(status.message(), "Op type not registered"));
}
TEST_F(StructureVerifierTest, DuplicateNodeNames) {
SetGraph(
"node { name: 'A' op: 'TestParams' }"
"node { name: 'A' op: 'TestInput' }");
Status status = verifier_->Verify(graph_);
EXPECT_TRUE(errors::IsAlreadyExists(status));
EXPECT_TRUE(absl::StrContains(status.message(), "Node already exists:"));
}
TEST_F(StructureVerifierTest, GraphWithInvalidCycle) {
SetGraph(
"node { name: 'input' op: 'TestInput' }"
"node { name: 't1' op: 'TestMul' input: [ 'input:0', 't2' ] }"
"node { name: 't2' op: 'TestMul' input: [ 'input:1', 't1' ] }");
Status status = verifier_->Verify(graph_);
EXPECT_TRUE(errors::IsInvalidArgument(status));
EXPECT_TRUE(absl::StrContains(
status.message(), "The graph couldn't be sorted in topological order"));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/verifiers/structure_verifier.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/verifiers/structure_verifier_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
142e80f4-e36b-41d7-9010-10ab919d054d | cpp | google/cel-cpp | map_value | common/values/map_value.cc | common/values/map_value_test.cc | #include <cstddef>
#include <utility>
#include "absl/base/attributes.h"
#include "absl/log/absl_check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "common/casting.h"
#include "common/value.h"
#include "common/value_kind.h"
#include "internal/status_macros.h"
namespace cel {
namespace {
absl::Status InvalidMapKeyTypeError(ValueKind kind) {
return absl::InvalidArgumentError(
absl::StrCat("Invalid map key type: '", ValueKindToString(kind), "'"));
}
}
absl::string_view MapValue::GetTypeName() const {
return absl::visit(
[](const auto& alternative) -> absl::string_view {
return alternative.GetTypeName();
},
variant_);
}
std::string MapValue::DebugString() const {
return absl::visit(
[](const auto& alternative) -> std::string {
return alternative.DebugString();
},
variant_);
}
absl::Status MapValue::SerializeTo(AnyToJsonConverter& converter,
absl::Cord& value) const {
return absl::visit(
[&converter, &value](const auto& alternative) -> absl::Status {
return alternative.SerializeTo(converter, value);
},
variant_);
}
absl::StatusOr<Json> MapValue::ConvertToJson(
AnyToJsonConverter& converter) const {
return absl::visit(
[&converter](const auto& alternative) -> absl::StatusOr<Json> {
return alternative.ConvertToJson(converter);
},
variant_);
}
absl::StatusOr<JsonObject> MapValue::ConvertToJsonObject(
AnyToJsonConverter& converter) const {
return absl::visit(
[&converter](const auto& alternative) -> absl::StatusOr<JsonObject> {
return alternative.ConvertToJsonObject(converter);
},
variant_);
}
bool MapValue::IsZeroValue() const {
return absl::visit(
[](const auto& alternative) -> bool { return alternative.IsZeroValue(); },
variant_);
}
absl::StatusOr<bool> MapValue::IsEmpty() const {
return absl::visit(
[](const auto& alternative) -> bool { return alternative.IsEmpty(); },
variant_);
}
absl::StatusOr<size_t> MapValue::Size() const {
return absl::visit(
[](const auto& alternative) -> size_t { return alternative.Size(); },
variant_);
}
namespace common_internal {
absl::Status MapValueEqual(ValueManager& value_manager, const MapValue& lhs,
const MapValue& rhs, Value& result) {
if (Is(lhs, rhs)) {
result = BoolValue{true};
return absl::OkStatus();
}
CEL_ASSIGN_OR_RETURN(auto lhs_size, lhs.Size());
CEL_ASSIGN_OR_RETURN(auto rhs_size, rhs.Size());
if (lhs_size != rhs_size) {
result = BoolValue{false};
return absl::OkStatus();
}
CEL_ASSIGN_OR_RETURN(auto lhs_iterator, lhs.NewIterator(value_manager));
Value lhs_key;
Value lhs_value;
Value rhs_value;
for (size_t index = 0; index < lhs_size; ++index) {
ABSL_CHECK(lhs_iterator->HasNext());
CEL_RETURN_IF_ERROR(lhs_iterator->Next(value_manager, lhs_key));
bool rhs_value_found;
CEL_ASSIGN_OR_RETURN(rhs_value_found,
rhs.Find(value_manager, lhs_key, rhs_value));
if (!rhs_value_found) {
result = BoolValue{false};
return absl::OkStatus();
}
CEL_RETURN_IF_ERROR(lhs.Get(value_manager, lhs_key, lhs_value));
CEL_RETURN_IF_ERROR(lhs_value.Equal(value_manager, rhs_value, result));
if (auto bool_value = As<BoolValue>(result);
bool_value.has_value() && !bool_value->NativeValue()) {
return absl::OkStatus();
}
}
ABSL_DCHECK(!lhs_iterator->HasNext());
result = BoolValue{true};
return absl::OkStatus();
}
absl::Status MapValueEqual(ValueManager& value_manager,
const ParsedMapValueInterface& lhs,
const MapValue& rhs, Value& result) {
auto lhs_size = lhs.Size();
CEL_ASSIGN_OR_RETURN(auto rhs_size, rhs.Size());
if (lhs_size != rhs_size) {
result = BoolValue{false};
return absl::OkStatus();
}
CEL_ASSIGN_OR_RETURN(auto lhs_iterator, lhs.NewIterator(value_manager));
Value lhs_key;
Value lhs_value;
Value rhs_value;
for (size_t index = 0; index < lhs_size; ++index) {
ABSL_CHECK(lhs_iterator->HasNext());
CEL_RETURN_IF_ERROR(lhs_iterator->Next(value_manager, lhs_key));
bool rhs_value_found;
CEL_ASSIGN_OR_RETURN(rhs_value_found,
rhs.Find(value_manager, lhs_key, rhs_value));
if (!rhs_value_found) {
result = BoolValue{false};
return absl::OkStatus();
}
CEL_RETURN_IF_ERROR(lhs.Get(value_manager, lhs_key, lhs_value));
CEL_RETURN_IF_ERROR(lhs_value.Equal(value_manager, rhs_value, result));
if (auto bool_value = As<BoolValue>(result);
bool_value.has_value() && !bool_value->NativeValue()) {
return absl::OkStatus();
}
}
ABSL_DCHECK(!lhs_iterator->HasNext());
result = BoolValue{true};
return absl::OkStatus();
}
}
absl::Status CheckMapKey(const Value& key) {
switch (key.kind()) {
case ValueKind::kBool:
ABSL_FALLTHROUGH_INTENDED;
case ValueKind::kInt:
ABSL_FALLTHROUGH_INTENDED;
case ValueKind::kUint:
ABSL_FALLTHROUGH_INTENDED;
case ValueKind::kString:
return absl::OkStatus();
default:
return InvalidMapKeyTypeError(key.kind());
}
}
common_internal::ValueVariant MapValue::ToValueVariant() const& {
return absl::visit(
[](const auto& alternative) -> common_internal::ValueVariant {
return alternative;
},
variant_);
}
common_internal::ValueVariant MapValue::ToValueVariant() && {
return absl::visit(
[](auto&& alternative) -> common_internal::ValueVariant {
return std::move(alternative);
},
std::move(variant_));
}
} | #include <cstdint>
#include <memory>
#include <sstream>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "common/casting.h"
#include "common/json.h"
#include "common/memory.h"
#include "common/type.h"
#include "common/type_factory.h"
#include "common/value.h"
#include "common/value_testing.h"
#include "internal/status_macros.h"
#include "internal/testing.h"
namespace cel {
namespace {
using ::absl_testing::IsOk;
using ::absl_testing::IsOkAndHolds;
using ::absl_testing::StatusIs;
using ::testing::TestParamInfo;
using ::testing::UnorderedElementsAreArray;
TEST(MapValue, CheckKey) {
EXPECT_THAT(CheckMapKey(BoolValue()), IsOk());
EXPECT_THAT(CheckMapKey(IntValue()), IsOk());
EXPECT_THAT(CheckMapKey(UintValue()), IsOk());
EXPECT_THAT(CheckMapKey(StringValue()), IsOk());
EXPECT_THAT(CheckMapKey(BytesValue()),
StatusIs(absl::StatusCode::kInvalidArgument));
}
class MapValueTest : public common_internal::ThreadCompatibleValueTest<> {
public:
template <typename... Args>
absl::StatusOr<MapValue> NewIntDoubleMapValue(Args&&... args) {
CEL_ASSIGN_OR_RETURN(auto builder,
value_manager().NewMapValueBuilder(MapType()));
(static_cast<void>(builder->Put(std::forward<Args>(args).first,
std::forward<Args>(args).second)),
...);
return std::move(*builder).Build();
}
template <typename... Args>
absl::StatusOr<MapValue> NewJsonMapValue(Args&&... args) {
CEL_ASSIGN_OR_RETURN(auto builder,
value_manager().NewMapValueBuilder(JsonMapType()));
(static_cast<void>(builder->Put(std::forward<Args>(args).first,
std::forward<Args>(args).second)),
...);
return std::move(*builder).Build();
}
};
TEST_P(MapValueTest, Default) {
MapValue map_value;
EXPECT_THAT(map_value.IsEmpty(), IsOkAndHolds(true));
EXPECT_THAT(map_value.Size(), IsOkAndHolds(0));
EXPECT_EQ(map_value.DebugString(), "{}");
ASSERT_OK_AND_ASSIGN(auto list_value, map_value.ListKeys(value_manager()));
EXPECT_THAT(list_value.IsEmpty(), IsOkAndHolds(true));
EXPECT_THAT(list_value.Size(), IsOkAndHolds(0));
EXPECT_EQ(list_value.DebugString(), "[]");
ASSERT_OK_AND_ASSIGN(auto iterator, map_value.NewIterator(value_manager()));
EXPECT_FALSE(iterator->HasNext());
EXPECT_THAT(iterator->Next(value_manager()),
StatusIs(absl::StatusCode::kFailedPrecondition));
}
TEST_P(MapValueTest, Kind) {
ASSERT_OK_AND_ASSIGN(
auto value,
NewIntDoubleMapValue(std::pair{IntValue(0), DoubleValue(3.0)},
std::pair{IntValue(1), DoubleValue(4.0)},
std::pair{IntValue(2), DoubleValue(5.0)}));
EXPECT_EQ(value.kind(), MapValue::kKind);
EXPECT_EQ(Value(value).kind(), MapValue::kKind);
}
TEST_P(MapValueTest, DebugString) {
ASSERT_OK_AND_ASSIGN(
auto value,
NewIntDoubleMapValue(std::pair{IntValue(0), DoubleValue(3.0)},
std::pair{IntValue(1), DoubleValue(4.0)},
std::pair{IntValue(2), DoubleValue(5.0)}));
{
std::ostringstream out;
out << value;
EXPECT_EQ(out.str(), "{0: 3.0, 1: 4.0, 2: 5.0}");
}
{
std::ostringstream out;
out << Value(value);
EXPECT_EQ(out.str(), "{0: 3.0, 1: 4.0, 2: 5.0}");
}
}
TEST_P(MapValueTest, IsEmpty) {
ASSERT_OK_AND_ASSIGN(
auto value,
NewIntDoubleMapValue(std::pair{IntValue(0), DoubleValue(3.0)},
std::pair{IntValue(1), DoubleValue(4.0)},
std::pair{IntValue(2), DoubleValue(5.0)}));
EXPECT_THAT(value.IsEmpty(), IsOkAndHolds(false));
}
TEST_P(MapValueTest, Size) {
ASSERT_OK_AND_ASSIGN(
auto value,
NewIntDoubleMapValue(std::pair{IntValue(0), DoubleValue(3.0)},
std::pair{IntValue(1), DoubleValue(4.0)},
std::pair{IntValue(2), DoubleValue(5.0)}));
EXPECT_THAT(value.Size(), IsOkAndHolds(3));
}
TEST_P(MapValueTest, Get) {
ASSERT_OK_AND_ASSIGN(
auto map_value,
NewIntDoubleMapValue(std::pair{IntValue(0), DoubleValue(3.0)},
std::pair{IntValue(1), DoubleValue(4.0)},
std::pair{IntValue(2), DoubleValue(5.0)}));
ASSERT_OK_AND_ASSIGN(auto value, map_value.Get(value_manager(), IntValue(0)));
ASSERT_TRUE(InstanceOf<DoubleValue>(value));
ASSERT_EQ(Cast<DoubleValue>(value).NativeValue(), 3.0);
ASSERT_OK_AND_ASSIGN(value, map_value.Get(value_manager(), IntValue(1)));
ASSERT_TRUE(InstanceOf<DoubleValue>(value));
ASSERT_EQ(Cast<DoubleValue>(value).NativeValue(), 4.0);
ASSERT_OK_AND_ASSIGN(value, map_value.Get(value_manager(), IntValue(2)));
ASSERT_TRUE(InstanceOf<DoubleValue>(value));
ASSERT_EQ(Cast<DoubleValue>(value).NativeValue(), 5.0);
EXPECT_THAT(map_value.Get(value_manager(), IntValue(3)),
StatusIs(absl::StatusCode::kNotFound));
}
TEST_P(MapValueTest, Find) {
ASSERT_OK_AND_ASSIGN(
auto map_value,
NewIntDoubleMapValue(std::pair{IntValue(0), DoubleValue(3.0)},
std::pair{IntValue(1), DoubleValue(4.0)},
std::pair{IntValue(2), DoubleValue(5.0)}));
Value value;
bool ok;
ASSERT_OK_AND_ASSIGN(std::tie(value, ok),
map_value.Find(value_manager(), IntValue(0)));
ASSERT_TRUE(ok);
ASSERT_TRUE(InstanceOf<DoubleValue>(value));
ASSERT_EQ(Cast<DoubleValue>(value).NativeValue(), 3.0);
ASSERT_OK_AND_ASSIGN(std::tie(value, ok),
map_value.Find(value_manager(), IntValue(1)));
ASSERT_TRUE(ok);
ASSERT_TRUE(InstanceOf<DoubleValue>(value));
ASSERT_EQ(Cast<DoubleValue>(value).NativeValue(), 4.0);
ASSERT_OK_AND_ASSIGN(std::tie(value, ok),
map_value.Find(value_manager(), IntValue(2)));
ASSERT_TRUE(ok);
ASSERT_TRUE(InstanceOf<DoubleValue>(value));
ASSERT_EQ(Cast<DoubleValue>(value).NativeValue(), 5.0);
ASSERT_OK_AND_ASSIGN(std::tie(value, ok),
map_value.Find(value_manager(), IntValue(3)));
ASSERT_FALSE(ok);
}
TEST_P(MapValueTest, Has) {
ASSERT_OK_AND_ASSIGN(
auto map_value,
NewIntDoubleMapValue(std::pair{IntValue(0), DoubleValue(3.0)},
std::pair{IntValue(1), DoubleValue(4.0)},
std::pair{IntValue(2), DoubleValue(5.0)}));
ASSERT_OK_AND_ASSIGN(auto value, map_value.Has(value_manager(), IntValue(0)));
ASSERT_TRUE(InstanceOf<BoolValue>(value));
ASSERT_TRUE(Cast<BoolValue>(value).NativeValue());
ASSERT_OK_AND_ASSIGN(value, map_value.Has(value_manager(), IntValue(1)));
ASSERT_TRUE(InstanceOf<BoolValue>(value));
ASSERT_TRUE(Cast<BoolValue>(value).NativeValue());
ASSERT_OK_AND_ASSIGN(value, map_value.Has(value_manager(), IntValue(2)));
ASSERT_TRUE(InstanceOf<BoolValue>(value));
ASSERT_TRUE(Cast<BoolValue>(value).NativeValue());
ASSERT_OK_AND_ASSIGN(value, map_value.Has(value_manager(), IntValue(3)));
ASSERT_TRUE(InstanceOf<BoolValue>(value));
ASSERT_FALSE(Cast<BoolValue>(value).NativeValue());
}
TEST_P(MapValueTest, ListKeys) {
ASSERT_OK_AND_ASSIGN(
auto map_value,
NewIntDoubleMapValue(std::pair{IntValue(0), DoubleValue(3.0)},
std::pair{IntValue(1), DoubleValue(4.0)},
std::pair{IntValue(2), DoubleValue(5.0)}));
ASSERT_OK_AND_ASSIGN(auto list_keys, map_value.ListKeys(value_manager()));
std::vector<int64_t> keys;
ASSERT_OK(
list_keys.ForEach(value_manager(), [&keys](const Value& element) -> bool {
keys.push_back(Cast<IntValue>(element).NativeValue());
return true;
}));
EXPECT_THAT(keys, UnorderedElementsAreArray({0, 1, 2}));
}
TEST_P(MapValueTest, ForEach) {
ASSERT_OK_AND_ASSIGN(
auto value,
NewIntDoubleMapValue(std::pair{IntValue(0), DoubleValue(3.0)},
std::pair{IntValue(1), DoubleValue(4.0)},
std::pair{IntValue(2), DoubleValue(5.0)}));
std::vector<std::pair<int64_t, double>> entries;
EXPECT_OK(value.ForEach(
value_manager(), [&entries](const Value& key, const Value& value) {
entries.push_back(std::pair{Cast<IntValue>(key).NativeValue(),
Cast<DoubleValue>(value).NativeValue()});
return true;
}));
EXPECT_THAT(entries,
UnorderedElementsAreArray(
{std::pair{0, 3.0}, std::pair{1, 4.0}, std::pair{2, 5.0}}));
}
TEST_P(MapValueTest, NewIterator) {
ASSERT_OK_AND_ASSIGN(
auto value,
NewIntDoubleMapValue(std::pair{IntValue(0), DoubleValue(3.0)},
std::pair{IntValue(1), DoubleValue(4.0)},
std::pair{IntValue(2), DoubleValue(5.0)}));
ASSERT_OK_AND_ASSIGN(auto iterator, value.NewIterator(value_manager()));
std::vector<int64_t> keys;
while (iterator->HasNext()) {
ASSERT_OK_AND_ASSIGN(auto element, iterator->Next(value_manager()));
ASSERT_TRUE(InstanceOf<IntValue>(element));
keys.push_back(Cast<IntValue>(element).NativeValue());
}
EXPECT_EQ(iterator->HasNext(), false);
EXPECT_THAT(iterator->Next(value_manager()),
StatusIs(absl::StatusCode::kFailedPrecondition));
EXPECT_THAT(keys, UnorderedElementsAreArray({0, 1, 2}));
}
TEST_P(MapValueTest, ConvertToJson) {
ASSERT_OK_AND_ASSIGN(
auto value,
NewJsonMapValue(std::pair{StringValue("0"), DoubleValue(3.0)},
std::pair{StringValue("1"), DoubleValue(4.0)},
std::pair{StringValue("2"), DoubleValue(5.0)}));
EXPECT_THAT(value.ConvertToJson(value_manager()),
IsOkAndHolds(Json(MakeJsonObject({{JsonString("0"), 3.0},
{JsonString("1"), 4.0},
{JsonString("2"), 5.0}}))));
}
INSTANTIATE_TEST_SUITE_P(
MapValueTest, MapValueTest,
::testing::Values(MemoryManagement::kPooling,
MemoryManagement::kReferenceCounting),
MapValueTest::ToString);
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/values/map_value.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/values/map_value_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
3406a3e3-4682-478d-868d-9065687f1b4b | cpp | tensorflow/tensorflow | hlo_module_dce | third_party/xla/xla/service/hlo_module_dce.cc | third_party/xla/xla/service/hlo_module_dce_test.cc | #include "xla/service/hlo_module_dce.h"
#include <deque>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/hlo_dce.h"
#include "xla/service/hlo_liveness_analysis.h"
#include "xla/service/tuple_simplifier.h"
#include "xla/service/while_loop_simplifier.h"
#include "xla/status_macros.h"
#include "xla/types.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
namespace xla {
namespace {
absl::StatusOr<bool> RunWhileDCE(
HloModule* module, HloLivenessAnalysis* liveness,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
std::vector<HloComputation*> while_body_comps_to_dce;
for (auto* computation : module->computations(execution_threads)) {
for (auto* instruction : computation->instructions()) {
if (instruction->opcode() != HloOpcode::kWhile) {
continue;
}
const auto* xla_while = instruction;
auto* while_body_comp = xla_while->while_body();
auto* while_body_param = while_body_comp->parameter_instruction(0);
auto* while_body_root = while_body_comp->root_instruction();
if (!xla_while->shape().IsTuple() ||
while_body_root->opcode() != HloOpcode::kTuple) {
VLOG(1) << "WhileDCE SKIP while: " << xla_while->ToString();
continue;
}
const int64_t tuple_element_count =
ShapeUtil::TupleElementCount(xla_while->shape());
bool modified_while_body_comp = false;
for (int64_t i = 0; i < tuple_element_count; ++i) {
if (liveness->IsLive(xla_while, {i})) {
continue;
}
VLOG(1) << "WhileDCE Dead while tuple element."
<< " while: " << xla_while->name() << " tuple_index: " << i;
HloInstruction* pass_thru_gte = while_body_comp->AddInstruction(
HloInstruction::CreateGetTupleElement(
while_body_param->shape().tuple_shapes(i), while_body_param,
i));
TF_RETURN_IF_ERROR(
while_body_root->ReplaceOperandWith(i, pass_thru_gte));
changed = true;
modified_while_body_comp = true;
}
if (modified_while_body_comp) {
while_body_comps_to_dce.push_back(while_body_comp);
}
}
}
for (auto* while_body_comp : while_body_comps_to_dce) {
TF_ASSIGN_OR_RETURN(bool changed_for_computation,
HloDCE::RunOnComputation(
while_body_comp,
false));
changed |= changed_for_computation;
}
return changed;
}
}
absl::StatusOr<bool> HloModuleDCE::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
VLOG(2) << "Before HloModuleDCE:";
XLA_VLOG_LINES(3, module->ToString());
std::unique_ptr<HloLivenessAnalysis> liveness;
TF_ASSIGN_OR_RETURN(liveness, HloLivenessAnalysis::Run(*module));
TF_ASSIGN_OR_RETURN(bool hlo_module_dce_changed,
RunWhileDCE(module, liveness.get(), execution_threads));
WhileLoopSimplifier while_loop_simplifier;
TF_ASSIGN_OR_RETURN(bool while_loop_simplifier_changed,
while_loop_simplifier.Run(module, execution_threads));
TupleSimplifier tuple_simplifier;
TF_ASSIGN_OR_RETURN(bool tuple_simplifier_changed,
tuple_simplifier.Run(module, execution_threads));
HloDCE hlo_dce;
TF_ASSIGN_OR_RETURN(bool hlo_dce_changed,
hlo_dce.Run(module, execution_threads));
VLOG(2) << "After HloModuleDCE:";
XLA_VLOG_LINES(3, module->ToString());
return hlo_module_dce_changed | hlo_dce_changed | tuple_simplifier_changed |
while_loop_simplifier_changed;
}
} | #include "xla/service/hlo_module_dce.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/literal_test_util.h"
#include "xla/tests/test_utils.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/types.h"
namespace xla {
namespace {
class HloModuleDceTest : public HloTestBase {
protected:
HloModuleDceTest() {}
bool HasInstruction(const HloComputation& computation,
const HloInstruction* instruction) {
return absl::c_linear_search(computation.instructions(), instruction);
}
bool WhileBodyHasPassThroughTupleElement(const HloComputation* computation,
const std::string& while_name,
const int64_t tuple_index) {
for (auto* instruction : computation->instructions()) {
if (instruction->opcode() == HloOpcode::kWhile &&
instruction->name() == while_name) {
auto* while_body_comp = instruction->while_body();
auto* while_body_param = while_body_comp->parameter_instruction(0);
auto* while_body_root = while_body_comp->root_instruction();
if (while_body_root->opcode() != HloOpcode::kTuple) {
return false;
}
auto* operand = while_body_root->operand(tuple_index);
if (operand->opcode() == HloOpcode::kGetTupleElement &&
operand->tuple_index() == tuple_index &&
operand->operand(0) == while_body_param) {
return true;
}
return false;
}
}
return false;
}
std::vector<const HloInstruction*> GetWhileLoops(
const HloComputation* computation) {
std::vector<const HloInstruction*> while_loops;
for (auto* instruction : computation->instructions()) {
if (instruction->opcode() == HloOpcode::kWhile) {
while_loops.push_back(instruction);
}
}
return while_loops;
}
};
TEST_F(HloModuleDceTest, WhileWithLiveOutputs) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule SimpleLoop
SimpleLoop.body {
loop_var.1 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0
constant.1 = s32[] constant(1)
add = s32[] add(get-tuple-element.1, constant.1)
get-tuple-element.2 = s32[3]{0} get-tuple-element(loop_var.1), index=1
multiply = s32[3]{0} multiply(get-tuple-element.2, get-tuple-element.2)
ROOT tuple = (s32[], s32[3]{0}) tuple(add, multiply)
}
SimpleLoop.condition {
loop_var.2 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.3 = s32[] get-tuple-element(loop_var.2), index=0
constant.2 = s32[] constant(5)
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
ENTRY SimpleLoop {
constant.3 = s32[] constant(0)
constant.4 = s32[3]{0} constant({0, 1, 2})
tuple.1 = (s32[], s32[3]{0}) tuple(constant.3, constant.4)
ROOT while = (s32[], s32[3]{0}) while(tuple.1), condition=
SimpleLoop.condition, body=SimpleLoop.body
})")
.value();
HloModuleDCE dce;
EXPECT_FALSE(dce.Run(module.get()).value());
EXPECT_FALSE(WhileBodyHasPassThroughTupleElement(module->entry_computation(),
"while", 0));
EXPECT_FALSE(WhileBodyHasPassThroughTupleElement(module->entry_computation(),
"while", 1));
}
TEST_F(HloModuleDceTest, WhileWithUnusedSideEffectingTupleElement) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule SimpleLoop
SimpleLoop.body {
loop_var.1 = (s32[], f32[]) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0
constant.1 = s32[] constant(1)
add = s32[] add(get-tuple-element.1, constant.1)
get-tuple-element.2 = f32[] get-tuple-element(loop_var.1), index=1
constant.2 = f32[] constant(1.0)
rng = f32[] rng(constant.2, get-tuple-element.2), distribution=rng_uniform
add.1 = f32[] add(get-tuple-element.2, constant.2)
ROOT tuple = (s32[], f32[]) tuple(add, add.1)
}
SimpleLoop.condition {
loop_var.2 = (s32[], f32[]) parameter(0)
get-tuple-element.3 = s32[] get-tuple-element(loop_var.2), index=0
constant.3 = s32[] constant(5)
ROOT less-than = pred[] compare(get-tuple-element.3, constant.3), direction=LT
}
ENTRY SimpleLoop {
constant.4 = s32[] constant(0)
constant.5 = f32[] constant(0.0)
tuple.1 = (s32[], f32[]) tuple(constant.4, constant.5)
while = (s32[], f32[]) while(tuple.1), condition=
SimpleLoop.condition, body=SimpleLoop.body
ROOT get-tuple-element.4 = s32[] get-tuple-element(while), index=0
})")
.value();
HloModuleDCE dce;
EXPECT_FALSE(dce.Run(module.get()).value());
EXPECT_FALSE(WhileBodyHasPassThroughTupleElement(module->entry_computation(),
"while", 0));
EXPECT_FALSE(WhileBodyHasPassThroughTupleElement(module->entry_computation(),
"while", 1));
}
TEST_F(HloModuleDceTest, OneWhileWithDeadTupleElement) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule SimpleLoop
SimpleLoop.body {
loop_var.1 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0
constant.1 = s32[] constant(1)
add = s32[] add(get-tuple-element.1, constant.1)
get-tuple-element.2 = s32[3]{0} get-tuple-element(loop_var.1), index=1
multiply = s32[3]{0} multiply(get-tuple-element.2, get-tuple-element.2)
ROOT tuple = (s32[], s32[3]{0}) tuple(add, multiply)
}
SimpleLoop.condition {
loop_var.2 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.3 = s32[] get-tuple-element(loop_var.2), index=0
constant.2 = s32[] constant(5)
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
ENTRY SimpleLoop {
constant.3 = s32[] constant(0)
constant.4 = s32[3]{0} constant({0, 1, 2})
tuple.1 = (s32[], s32[3]{0}) tuple(constant.3, constant.4)
while = (s32[], s32[3]{0}) while(tuple.1), condition=
SimpleLoop.condition, body=SimpleLoop.body
ROOT get-tuple-element.4 = s32[] get-tuple-element(while), index=0
})")
.value();
HloModuleDCE dce;
EXPECT_FALSE(WhileBodyHasPassThroughTupleElement(module->entry_computation(),
"while", 1));
EXPECT_TRUE(dce.Run(module.get()).value());
EXPECT_FALSE(WhileBodyHasPassThroughTupleElement(module->entry_computation(),
"while", 0));
auto while_loops = GetWhileLoops(module->entry_computation());
EXPECT_EQ(1, while_loops.size());
EXPECT_EQ(1, ShapeUtil::TupleElementCount(while_loops[0]->shape()));
}
TEST_F(HloModuleDceTest, OneWhileWithTupleElementUsedByCond) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule SimpleLoop
SimpleLoop.body {
loop_var.1 = (s32[], s32[]) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0
constant.1 = s32[] constant(1)
add = s32[] add(get-tuple-element.1, constant.1)
get-tuple-element.2 = s32[] get-tuple-element(loop_var.1), index=1
multiply = s32[] multiply(get-tuple-element.2, get-tuple-element.2)
ROOT tuple = (s32[], s32[]) tuple(add, multiply)
}
SimpleLoop.condition {
loop_var.2 = (s32[], s32[]) parameter(0)
get-tuple-element.3 = s32[] get-tuple-element(loop_var.2), index=1
constant.2 = s32[] constant(5)
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
ENTRY SimpleLoop {
constant.3 = s32[] constant(0)
constant.4 = s32[] constant(0)
tuple.1 = (s32[], s32[]) tuple(constant.3, constant.4)
while = (s32[], s32[]) while(tuple.1), condition=
SimpleLoop.condition, body=SimpleLoop.body
ROOT get-tuple-element.4 = s32[] get-tuple-element(while), index=0
})")
.value();
HloModuleDCE dce;
EXPECT_FALSE(WhileBodyHasPassThroughTupleElement(module->entry_computation(),
"while", 1));
EXPECT_FALSE(dce.Run(module.get()).value());
EXPECT_FALSE(WhileBodyHasPassThroughTupleElement(module->entry_computation(),
"while", 0));
EXPECT_FALSE(WhileBodyHasPassThroughTupleElement(module->entry_computation(),
"while", 1));
}
TEST_F(HloModuleDceTest, TwoWhilesWithDeadTupleElement) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule SimpleLoop
SimpleLoop.body0 {
loop_var.1 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0
constant.1 = s32[] constant(1)
add = s32[] add(get-tuple-element.1, constant.1)
get-tuple-element.2 = s32[3]{0} get-tuple-element(loop_var.1), index=1
multiply = s32[3]{0} multiply(get-tuple-element.2, get-tuple-element.2)
ROOT tuple = (s32[], s32[3]{0}) tuple(add, multiply)
}
SimpleLoop.condition0 {
loop_var.2 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.3 = s32[] get-tuple-element(loop_var.2), index=0
constant.2 = s32[] constant(5)
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
SimpleLoop.body1 {
loop_var.3 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.4 = s32[] get-tuple-element(loop_var.3), index=0
constant.3 = s32[] constant(1)
add.1 = s32[] add(get-tuple-element.4, constant.3)
get-tuple-element.5 = s32[3]{0} get-tuple-element(loop_var.3), index=1
multiply.1 = s32[3]{0} multiply(get-tuple-element.5, get-tuple-element.5)
ROOT tuple.1 = (s32[], s32[3]{0}) tuple(add.1, multiply.1)
}
SimpleLoop.condition1 {
loop_var.4 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.6 = s32[] get-tuple-element(loop_var.4), index=0
constant.4 = s32[] constant(10)
ROOT less-than.1 = pred[] compare(get-tuple-element.6, constant.4), direction=LT
}
ENTRY SimpleLoop {
constant.5 = s32[] constant(0)
constant.6 = s32[3]{0} constant({0, 1, 2})
tuple.2 = (s32[], s32[3]{0}) tuple(constant.5, constant.6)
while.1 = (s32[], s32[3]{0}) while(tuple.2), condition=
SimpleLoop.condition0, body=SimpleLoop.body0
get-tuple-element.7 = s32[] get-tuple-element(while.1), index=0
tuple.3 = (s32[], s32[3]{0}) tuple(get-tuple-element.7, constant.6)
while.2 = (s32[], s32[3]{0}) while(tuple.3), condition=
SimpleLoop.condition1, body=SimpleLoop.body1
ROOT get-tuple-element.8 = s32[] get-tuple-element(while.2), index=0
})")
.value();
HloModuleDCE dce;
EXPECT_FALSE(WhileBodyHasPassThroughTupleElement(module->entry_computation(),
"while.1", 1));
EXPECT_FALSE(WhileBodyHasPassThroughTupleElement(module->entry_computation(),
"while.2", 1));
EXPECT_TRUE(dce.Run(module.get()).value());
EXPECT_FALSE(WhileBodyHasPassThroughTupleElement(module->entry_computation(),
"while.1", 0));
EXPECT_FALSE(WhileBodyHasPassThroughTupleElement(module->entry_computation(),
"while.2", 0));
auto while_loops = GetWhileLoops(module->entry_computation());
EXPECT_EQ(2, while_loops.size());
EXPECT_EQ(1, ShapeUtil::TupleElementCount(while_loops[0]->shape()));
EXPECT_EQ(1, ShapeUtil::TupleElementCount(while_loops[1]->shape()));
}
TEST_F(HloModuleDceTest, TwoWhilesWithDeadTupleElementSwizzled) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule SimpleLoop
SimpleLoop.body0 {
loop_var.1 = (s32[3]{0}, s32[]) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=1
constant.1 = s32[] constant(1)
add = s32[] add(get-tuple-element.1, constant.1)
get-tuple-element.2 = s32[3]{0} get-tuple-element(loop_var.1), index=0
multiply = s32[3]{0} multiply(get-tuple-element.2, get-tuple-element.2)
ROOT tuple = (s32[3]{0}, s32[]) tuple(multiply, add)
}
SimpleLoop.condition0 {
loop_var.2 = (s32[3]{0}, s32[]) parameter(0)
get-tuple-element.3 = s32[] get-tuple-element(loop_var.2), index=1
constant.2 = s32[] constant(5)
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
SimpleLoop.body1 {
loop_var.3 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.4 = s32[] get-tuple-element(loop_var.3), index=0
constant.3 = s32[] constant(1)
add.1 = s32[] add(get-tuple-element.4, constant.3)
get-tuple-element.5 = s32[3]{0} get-tuple-element(loop_var.3), index=1
multiply.1 = s32[3]{0} multiply(get-tuple-element.5, get-tuple-element.5)
ROOT tuple.1 = (s32[], s32[3]{0}) tuple(add.1, multiply.1)
}
SimpleLoop.condition1 {
loop_var.4 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.6 = s32[] get-tuple-element(loop_var.4), index=0
constant.4 = s32[] constant(10)
ROOT less-than.1 = pred[] compare(get-tuple-element.6, constant.4), direction=LT
}
ENTRY SimpleLoop {
constant.5 = s32[] constant(0)
constant.6 = s32[3]{0} constant({0, 1, 2})
tuple.2 = (s32[3]{0}, s32[]) tuple(constant.6, constant.5)
while.1 = (s32[3]{0}, s32[]) while(tuple.2), condition=
SimpleLoop.condition0, body=SimpleLoop.body0
get-tuple-element.7 = s32[] get-tuple-element(while.1), index=1
tuple.3 = (s32[], s32[3]{0}) tuple(get-tuple-element.7, constant.6)
while.2 = (s32[], s32[3]{0}) while(tuple.3), condition=
SimpleLoop.condition1, body=SimpleLoop.body1
ROOT get-tuple-element.8 = s32[] get-tuple-element(while.2), index=0
})")
.value();
HloModuleDCE dce;
EXPECT_FALSE(WhileBodyHasPassThroughTupleElement(module->entry_computation(),
"while.1", 0));
EXPECT_FALSE(WhileBodyHasPassThroughTupleElement(module->entry_computation(),
"while.2", 1));
EXPECT_TRUE(dce.Run(module.get()).value());
EXPECT_FALSE(WhileBodyHasPassThroughTupleElement(module->entry_computation(),
"while.1", 1));
EXPECT_FALSE(WhileBodyHasPassThroughTupleElement(module->entry_computation(),
"while.2", 0));
auto while_loops = GetWhileLoops(module->entry_computation());
EXPECT_EQ(2, while_loops.size());
EXPECT_EQ(1, ShapeUtil::TupleElementCount(while_loops[0]->shape()));
EXPECT_EQ(1, ShapeUtil::TupleElementCount(while_loops[1]->shape()));
}
TEST_F(HloModuleDceTest, WhileWithOutfeed) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule OutfeedLoop
WhileBody {
body_param = (s32[]) parameter(0)
token0 = token[] after-all()
constant.2 = s32[] constant(2)
outfeed_tuple = (s32[]) outfeed(constant.2, token0)
get-tuple-element.1 = s32[] get-tuple-element(body_param), index=0
constant.1 = s32[] constant(1)
add = s32[] add(get-tuple-element.1, constant.1)
ROOT tuple = (s32[]) tuple(add)
}
WhileCondition {
cond_param = (s32[]) parameter(0)
get-tuple-element.3 = s32[] get-tuple-element(cond_param), index=0
constant.2 = s32[] constant(10)
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
ENTRY SimpleLoop {
constant.3 = s32[] constant(0)
tuple.1 = (s32[]) tuple(constant.3)
while = (s32[]) while(tuple.1), condition=WhileCondition,
body=WhileBody
ROOT rtuple = () tuple()
})")
.value();
HloModuleDCE dce;
EXPECT_FALSE(dce.Run(module.get()).value());
EXPECT_FALSE(WhileBodyHasPassThroughTupleElement(module->entry_computation(),
"while", 0));
}
TEST_F(HloModuleDceTest, WhileWithOnlyLoopVariableBumping) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule InfiniteLoop
WhileBody {
body_param = (s32[], s32[]) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element(body_param), index=0
get-tuple-element.2 = s32[] get-tuple-element(body_param), index=1
constant.1 = s32[] constant(1)
add = s32[] add(get-tuple-element.1, constant.1)
ROOT tuple = (s32[], s32[]) tuple(add, get-tuple-element.2)
}
WhileCondition {
cond_param = (s32[], s32[]) parameter(0)
get-tuple-element.3 = s32[] get-tuple-element(cond_param), index=0
constant.2 = s32[] constant(10)
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
ENTRY SimpleLoop {
p0 = (s32[]) parameter(0)
get-tuple-element.5 = s32[] get-tuple-element(p0), index=0
constant.3 = s32[] constant(0)
tuple.1 = (s32[], s32[]) tuple(constant.3, get-tuple-element.5)
while = (s32[], s32[]) while(tuple.1), condition=WhileCondition,
body=WhileBody
ROOT get-tuple-element.4 = s32[] get-tuple-element(while), index=1
})")
.value();
HloModuleDCE dce;
EXPECT_TRUE(dce.Run(module.get()).value());
EXPECT_FALSE(WhileBodyHasPassThroughTupleElement(module->entry_computation(),
"while", 0));
}
TEST_F(HloModuleDceTest, TwoWhilesWithDeadWhileLoop) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TwoWhilesWithDeadWhileLoop
SimpleLoop.body0 {
loop_var.1 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0
constant.1 = s32[] constant(1)
add = s32[] add(get-tuple-element.1, constant.1)
get-tuple-element.2 = s32[3]{0} get-tuple-element(loop_var.1), index=1
ROOT tuple = (s32[], s32[3]{0}) tuple(add, get-tuple-element.2)
}
SimpleLoop.condition0 {
loop_var.2 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.3 = s32[] get-tuple-element(loop_var.2), index=0
constant.2 = s32[] constant(5)
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
SimpleLoop.body1 {
loop_var.3 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.4 = s32[] get-tuple-element(loop_var.3), index=0
constant.3 = s32[] constant(1)
add.1 = s32[] add(get-tuple-element.4, constant.3)
get-tuple-element.5 = s32[3]{0} get-tuple-element(loop_var.3), index=1
ROOT tuple.1 = (s32[], s32[3]{0}) tuple(add.1, get-tuple-element.5)
}
SimpleLoop.condition1 {
loop_var.4 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.6 = s32[] get-tuple-element(loop_var.4), index=0
constant.4 = s32[] constant(5)
ROOT less-than.1 = pred[] compare(get-tuple-element.6, constant.4), direction=LT
}
ENTRY SimpleLoop {
constant.5 = s32[] constant(0)
constant.6 = s32[3]{0} constant({0, 1, 2})
tuple.2 = (s32[], s32[3]{0}) tuple(constant.5, constant.6)
while.1 = (s32[], s32[3]{0}) while(tuple.2), condition=
SimpleLoop.condition0, body=SimpleLoop.body0
get-tuple-element.7 = s32[3]{0} get-tuple-element(while.1), index=1
constant.7 = s32[] constant(0)
tuple.3 = (s32[], s32[3]{0}) tuple(constant.7, get-tuple-element.7)
while.2 = (s32[], s32[3]{0}) while(tuple.3), condition=
SimpleLoop.condition1, body=SimpleLoop.body1
ROOT get-tuple-element.8 = s32[] get-tuple-element(while.2), index=0
})")
.value();
HloModuleDCE dce;
EXPECT_TRUE(WhileBodyHasPassThroughTupleElement(module->entry_computation(),
"while.1", 1));
EXPECT_TRUE(WhileBodyHasPassThroughTupleElement(module->entry_computation(),
"while.2", 1));
EXPECT_TRUE(dce.Run(module.get()).value());
EXPECT_FALSE(WhileBodyHasPassThroughTupleElement(module->entry_computation(),
"while.2", 0));
auto while_loops = GetWhileLoops(module->entry_computation());
EXPECT_EQ(1, while_loops.size());
EXPECT_EQ(1, ShapeUtil::TupleElementCount(while_loops[0]->shape()));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_module_dce.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_module_dce_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5aa9fdf1-bc85-46d2-9996-2eebe164027e | cpp | tensorflow/tensorflow | broadcast_to | tensorflow/lite/kernels/broadcast_to.cc | tensorflow/lite/kernels/broadcast_to_test.cc | #include "tensorflow/lite/kernels/internal/reference/broadcast_to.h"
#include <string.h>
#include <cstdint>
#include <memory>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace broadcastto {
constexpr int kInputTensor = 0;
constexpr int kShapeTensor = 1;
constexpr int kOutputTensor = 0;
constexpr int kMaxDims = 8;
struct BroadcastToContext {
BroadcastToContext(TfLiteContext* context, TfLiteNode* node) {
input = GetInput(context, node, kInputTensor);
shape = GetInput(context, node, kShapeTensor);
output = GetOutput(context, node, kOutputTensor);
}
const TfLiteTensor* input;
const TfLiteTensor* shape;
TfLiteTensor* output;
};
TfLiteStatus ResizeOutputTensor(TfLiteContext* context,
BroadcastToContext* op_context) {
TF_LITE_ENSURE_EQ(context, NumDimensions(op_context->shape), 1);
int input_num_dims = NumDimensions(op_context->input);
int output_num_dims = SizeOfDimension(op_context->shape, 0);
TF_LITE_ENSURE_MSG(context, input_num_dims <= output_num_dims,
"Output shape must be broadcastable from input shape.");
TF_LITE_ENSURE_MSG(context, output_num_dims <= kMaxDims,
"BroadcastTo only supports 1-8D tensor.");
auto get_shape_data = [op_context](int i) -> int32_t {
if (op_context->shape->type == kTfLiteInt32) {
return GetTensorData<int32_t>(op_context->shape)[i];
} else {
return GetTensorData<int64_t>(op_context->shape)[i];
}
};
int extending_dims = output_num_dims - input_num_dims;
for (int idx = 0; idx < input_num_dims; ++idx) {
TF_LITE_ENSURE_MSG(context,
(SizeOfDimension(op_context->input, idx) == 1 ||
SizeOfDimension(op_context->input, idx) ==
get_shape_data(extending_dims + idx)),
"Output shape must be broadcastable from input shape.");
}
TfLiteIntArray* output_shape = TfLiteIntArrayCreate(output_num_dims);
std::unique_ptr<TfLiteIntArray, void (*)(TfLiteIntArray*)>
scoped_output_shape(output_shape, TfLiteIntArrayFree);
for (int idx = 0; idx < output_num_dims; ++idx) {
output_shape->data[idx] = get_shape_data(idx);
}
return context->ResizeTensor(context, op_context->output,
scoped_output_shape.release());
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE(context, NumInputs(node) == 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
TF_LITE_ENSURE_MSG(context,
(NumDimensions(GetInput(context, node, 0)) <= kMaxDims),
"BroadcastTo only supports 1-8D tensor.");
BroadcastToContext op_context(context, node);
TF_LITE_ENSURE(context, op_context.shape->type == kTfLiteInt32 ||
op_context.shape->type == kTfLiteInt64);
TF_LITE_ENSURE_EQ(context, op_context.input->type, op_context.output->type);
TF_LITE_ENSURE(context, op_context.input->type != kTfLiteString);
if (IsConstantOrPersistentTensor(op_context.shape)) {
return ResizeOutputTensor(context, &op_context);
}
SetTensorToDynamic(op_context.output);
return kTfLiteOk;
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
BroadcastToContext op_context(context, node);
if (IsDynamicTensor(op_context.output)) {
TF_LITE_ENSURE_OK(context, ResizeOutputTensor(context, &op_context));
}
reference_ops::BroadcastTo<kMaxDims>(
GetTensorShape(op_context.input), op_context.input->data.raw,
GetTensorShape(op_context.output), op_context.output->data.raw,
op_context.input->type);
return kTfLiteOk;
}
}
TfLiteRegistration* Register_BROADCAST_TO() {
static TfLiteRegistration r = {nullptr, nullptr, broadcastto::Prepare,
broadcastto::Eval};
return &r;
}
}
}
} | #include <cstdint>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/core/kernels/register.h"
#include "tensorflow/lite/core/model.h"
#include "tensorflow/lite/kernels/test_util.h"
namespace tflite {
namespace {
using ::testing::ElementsAreArray;
template <class InputType, class ShapeType = int32_t>
class BroadcastToOpModel : public SingleOpModel {
public:
BroadcastToOpModel(std::initializer_list<int> input_shape,
std::initializer_list<int> shape_shape) {
input_ = AddInput({GetTensorType<InputType>(), input_shape});
shape_ = AddInput({GetTensorType<ShapeType>(), shape_shape});
output_ = AddOutput(GetTensorType<InputType>());
SetBuiltinOp(BuiltinOperator_BROADCAST_TO,
BuiltinOptions_BroadcastToOptions,
CreateBroadcastToOptions(builder_).Union());
BuildInterpreter({input_shape, shape_shape});
}
BroadcastToOpModel(std::initializer_list<int> input_shape,
std::initializer_list<int> shape_shape,
std::initializer_list<ShapeType> shape_values) {
input_ = AddInput({GetTensorType<InputType>(), input_shape});
shape_ =
AddConstInput(GetTensorType<ShapeType>(), shape_values, shape_shape);
output_ = AddOutput(GetTensorType<InputType>());
SetBuiltinOp(BuiltinOperator_BROADCAST_TO,
BuiltinOptions_BroadcastToOptions,
CreateBroadcastToOptions(builder_).Union());
BuildInterpreter({input_shape, shape_shape});
}
void SetInput(std::initializer_list<InputType> data) {
PopulateTensor(input_, data);
}
void SetShape(std::initializer_list<ShapeType> data) {
PopulateTensor(shape_, data);
}
std::vector<InputType> GetOutput() {
return ExtractVector<InputType>(output_);
}
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
protected:
int input_;
int shape_;
int output_;
};
template <typename T>
class BroadcastToOpTest : public ::testing::Test {};
using DataTypes = ::testing::Types<float, uint8_t, int8_t, int16_t, int32_t>;
TYPED_TEST_SUITE(BroadcastToOpTest, DataTypes);
#if GTEST_HAS_DEATH_TEST
TYPED_TEST(BroadcastToOpTest, ShapeMustBe1D) {
EXPECT_DEATH(
BroadcastToOpModel<TypeParam>({2, 3, 4, 4}, {2, 2}, {2, 3, 4, 4}), "");
BroadcastToOpModel<TypeParam> m({2, 3, 4, 4}, {2, 2});
m.SetShape({2, 3, 4, 4});
EXPECT_THAT(m.Invoke(), kTfLiteError);
}
TYPED_TEST(BroadcastToOpTest, TooManyDimensions) {
EXPECT_DEATH(BroadcastToOpModel<TypeParam>({1, 2, 3, 4, 5, 6, 7, 8, 9}, {9},
{2, 2, 3, 4, 5, 6, 7, 8, 9}),
"BroadcastTo only supports 1-8D tensor.");
EXPECT_DEATH(BroadcastToOpModel<TypeParam>({1, 2, 3, 4, 5, 6, 7, 8, 9}, {9}),
"BroadcastTo only supports 1-8D tensor.");
}
TYPED_TEST(BroadcastToOpTest, MismatchDimension) {
EXPECT_DEATH(BroadcastToOpModel<TypeParam>({2, 4, 1, 2}, {4}, {2, 4, 1, 3}),
"Output shape must be broadcastable from input shape.");
EXPECT_DEATH(
BroadcastToOpModel<TypeParam>({2, 4, 1, 2, 3}, {4}, {2, 4, 1, 2}),
"Output shape must be broadcastable from input shape.");
BroadcastToOpModel<TypeParam> m1({2, 4, 1, 2}, {4});
m1.SetShape({2, 3, 4, 4});
EXPECT_THAT(m1.Invoke(), kTfLiteError);
BroadcastToOpModel<TypeParam> m2({2, 4, 1, 2}, {5});
m2.SetShape({1, 2, 3, 4, 4});
EXPECT_THAT(m2.Invoke(), kTfLiteError);
}
#endif
TYPED_TEST(BroadcastToOpTest, BroadcastTo1DConstTest) {
BroadcastToOpModel<TypeParam> m({1}, {1}, {4});
m.SetInput({3});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({4}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({3, 3, 3, 3}));
}
TYPED_TEST(BroadcastToOpTest, BroadcastTo4DConstTest) {
BroadcastToOpModel<TypeParam> m({1, 1, 1, 2}, {4}, {1, 1, 2, 2});
m.SetInput({3, 4});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 1, 2, 2}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({3, 4, 3, 4}));
}
TYPED_TEST(BroadcastToOpTest, BroadcastTo8DConstTest) {
BroadcastToOpModel<TypeParam> m({1, 1, 1, 1, 1, 1, 2, 1}, {8},
{1, 1, 1, 1, 1, 1, 2, 2});
m.SetInput({3, 4});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 1, 1, 1, 1, 1, 2, 2}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({3, 3, 4, 4}));
}
TYPED_TEST(BroadcastToOpTest, BroadcastTo1DDynamicTest) {
BroadcastToOpModel<TypeParam> m({1}, {1});
m.SetInput({3});
m.SetShape({4});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({4}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({3, 3, 3, 3}));
}
TYPED_TEST(BroadcastToOpTest, BroadcastTo4DDynamicTest) {
BroadcastToOpModel<TypeParam> m({1, 1, 1, 2}, {4});
m.SetInput({3, 4});
m.SetShape({1, 1, 2, 2});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 1, 2, 2}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({3, 4, 3, 4}));
}
TYPED_TEST(BroadcastToOpTest, BroadcastTo8DDynamicTest) {
BroadcastToOpModel<TypeParam> m({1, 1, 1, 1, 1, 1, 2, 1}, {8});
m.SetInput({3, 4});
m.SetShape({1, 1, 1, 1, 1, 1, 2, 2});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 1, 1, 1, 1, 1, 2, 2}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({3, 3, 4, 4}));
}
TYPED_TEST(BroadcastToOpTest, ComplexBroadcast4DConstTest) {
BroadcastToOpModel<TypeParam> m({1, 3, 1, 2}, {4}, {3, 3, 2, 2});
m.SetInput({1, 2, 3, 4, 5, 6});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({3, 3, 2, 2}));
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray({1, 2, 1, 2, 3, 4, 3, 4, 5, 6, 5, 6, 1, 2, 1, 2, 3, 4,
3, 4, 5, 6, 5, 6, 1, 2, 1, 2, 3, 4, 3, 4, 5, 6, 5, 6}));
}
TYPED_TEST(BroadcastToOpTest, ComplexBroadcast4DDynamicTest) {
BroadcastToOpModel<TypeParam> m({1, 3, 1, 2}, {4});
m.SetInput({1, 2, 3, 4, 5, 6});
m.SetShape({3, 3, 2, 2});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({3, 3, 2, 2}));
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray({1, 2, 1, 2, 3, 4, 3, 4, 5, 6, 5, 6, 1, 2, 1, 2, 3, 4,
3, 4, 5, 6, 5, 6, 1, 2, 1, 2, 3, 4, 3, 4, 5, 6, 5, 6}));
}
TYPED_TEST(BroadcastToOpTest, ComplexBroadcast6DConstTest) {
BroadcastToOpModel<TypeParam> m({1, 2, 1, 3, 1, 2}, {6}, {2, 2, 1, 3, 2, 2});
m.SetInput({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 2, 1, 3, 2, 2}));
EXPECT_THAT(m.GetOutput(),
ElementsAreArray({1, 2, 1, 2, 3, 4, 3, 4, 5, 6, 5, 6,
7, 8, 7, 8, 9, 10, 9, 10, 11, 12, 11, 12,
1, 2, 1, 2, 3, 4, 3, 4, 5, 6, 5, 6,
7, 8, 7, 8, 9, 10, 9, 10, 11, 12, 11, 12}));
}
TYPED_TEST(BroadcastToOpTest, ComplexBroadcast6DDynamicTest) {
BroadcastToOpModel<TypeParam> m({1, 2, 1, 3, 1, 2}, {6});
m.SetInput({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});
m.SetShape({2, 2, 1, 3, 2, 2});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 2, 1, 3, 2, 2}));
EXPECT_THAT(m.GetOutput(),
ElementsAreArray({1, 2, 1, 2, 3, 4, 3, 4, 5, 6, 5, 6,
7, 8, 7, 8, 9, 10, 9, 10, 11, 12, 11, 12,
1, 2, 1, 2, 3, 4, 3, 4, 5, 6, 5, 6,
7, 8, 7, 8, 9, 10, 9, 10, 11, 12, 11, 12}));
}
TYPED_TEST(BroadcastToOpTest, ComplexBroadcast8DConstTest) {
BroadcastToOpModel<TypeParam> m({1, 3, 1, 2, 1, 4, 1, 1}, {8},
{2, 3, 1, 2, 2, 4, 1, 1});
m.SetInput({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 3, 1, 2, 2, 4, 1, 1}));
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray({1, 2, 3, 4, 1, 2, 3, 4, 5, 6, 7, 8, 5, 6,
7, 8, 9, 10, 11, 12, 9, 10, 11, 12, 13, 14, 15, 16,
13, 14, 15, 16, 17, 18, 19, 20, 17, 18, 19, 20, 21, 22,
23, 24, 21, 22, 23, 24, 1, 2, 3, 4, 1, 2, 3, 4,
5, 6, 7, 8, 5, 6, 7, 8, 9, 10, 11, 12, 9, 10,
11, 12, 13, 14, 15, 16, 13, 14, 15, 16, 17, 18, 19, 20,
17, 18, 19, 20, 21, 22, 23, 24, 21, 22, 23, 24}));
}
TYPED_TEST(BroadcastToOpTest, ComplexBroadcast8DDynamicTest) {
BroadcastToOpModel<TypeParam> m({2, 1, 1, 2, 1, 4, 1, 1}, {8});
m.SetInput({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16});
m.SetShape({2, 3, 2, 2, 2, 4, 1, 1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 3, 2, 2, 2, 4, 1, 1}));
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray(
{1, 2, 3, 4, 1, 2, 3, 4, 5, 6, 7, 8, 5, 6, 7, 8,
1, 2, 3, 4, 1, 2, 3, 4, 5, 6, 7, 8, 5, 6, 7, 8,
1, 2, 3, 4, 1, 2, 3, 4, 5, 6, 7, 8, 5, 6, 7, 8,
1, 2, 3, 4, 1, 2, 3, 4, 5, 6, 7, 8, 5, 6, 7, 8,
1, 2, 3, 4, 1, 2, 3, 4, 5, 6, 7, 8, 5, 6, 7, 8,
1, 2, 3, 4, 1, 2, 3, 4, 5, 6, 7, 8, 5, 6, 7, 8,
9, 10, 11, 12, 9, 10, 11, 12, 13, 14, 15, 16, 13, 14, 15, 16,
9, 10, 11, 12, 9, 10, 11, 12, 13, 14, 15, 16, 13, 14, 15, 16,
9, 10, 11, 12, 9, 10, 11, 12, 13, 14, 15, 16, 13, 14, 15, 16,
9, 10, 11, 12, 9, 10, 11, 12, 13, 14, 15, 16, 13, 14, 15, 16,
9, 10, 11, 12, 9, 10, 11, 12, 13, 14, 15, 16, 13, 14, 15, 16,
9, 10, 11, 12, 9, 10, 11, 12, 13, 14, 15, 16, 13, 14, 15, 16}));
}
TYPED_TEST(BroadcastToOpTest, ExtendingShape4DConstTest) {
BroadcastToOpModel<TypeParam> m({3, 1, 2}, {4}, {3, 3, 2, 2});
m.SetInput({1, 2, 3, 4, 5, 6});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({3, 3, 2, 2}));
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray({1, 2, 1, 2, 3, 4, 3, 4, 5, 6, 5, 6, 1, 2, 1, 2, 3, 4,
3, 4, 5, 6, 5, 6, 1, 2, 1, 2, 3, 4, 3, 4, 5, 6, 5, 6}));
}
TYPED_TEST(BroadcastToOpTest, NoBroadcastingConstTest) {
BroadcastToOpModel<TypeParam> m({3, 1, 2}, {3}, {3, 1, 2});
m.SetInput({1, 2, 3, 4, 5, 6});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({3, 1, 2}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({1, 2, 3, 4, 5, 6}));
}
TYPED_TEST(BroadcastToOpTest, NoBroadcasting8DConstTest) {
BroadcastToOpModel<TypeParam> m({3, 1, 1, 1, 1, 1, 1, 2}, {8},
{3, 1, 1, 1, 1, 1, 1, 2});
m.SetInput({1, 2, 3, 4, 5, 6});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({3, 1, 1, 1, 1, 1, 1, 2}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({1, 2, 3, 4, 5, 6}));
}
TYPED_TEST(BroadcastToOpTest, Int64ShapeConstTest) {
BroadcastToOpModel<TypeParam, int64_t> m({1, 1, 1, 1, 1, 1, 2, 1}, {8},
{1, 1, 1, 1, 1, 1, 2, 2});
m.SetInput({3, 4});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 1, 1, 1, 1, 1, 2, 2}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({3, 3, 4, 4}));
}
TYPED_TEST(BroadcastToOpTest, Int64ShapeDDynamicTest) {
BroadcastToOpModel<TypeParam, int64_t> m({1, 1, 1, 1, 1, 1, 2, 1}, {8});
m.SetInput({3, 4});
m.SetShape({1, 1, 1, 1, 1, 1, 2, 2});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 1, 1, 1, 1, 1, 2, 2}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({3, 3, 4, 4}));
}
TYPED_TEST(BroadcastToOpTest, BroadcastToEmtpyShapeTest) {
BroadcastToOpModel<TypeParam> m({3, 1, 2}, {3}, {3, 0, 2});
m.SetInput({1, 2, 3, 4, 5, 6});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({3, 0, 2}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/broadcast_to.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/broadcast_to_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
83b1774b-22a8-4980-b94c-a9ffd854051b | cpp | tensorflow/tensorflow | tf_to_uniform_attribute_utils | tensorflow/compiler/mlir/quantization/tensorflow/utils/tf_to_uniform_attribute_utils.cc | tensorflow/compiler/mlir/quantization/tensorflow/utils/tf_to_uniform_attribute_utils_test.cc | #include "tensorflow/compiler/mlir/quantization/tensorflow/utils/tf_to_uniform_attribute_utils.h"
#include <array>
#include <cstdint>
#include <memory>
#include <string>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinTypeInterfaces.h"
#include "mlir/IR/OperationSupport.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/IR/TypeUtilities.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_utils.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/ops/uniform_op_quant_spec.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/quantization_options.pb.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_types.h"
#include "tensorflow/core/util/quantization/uniform_quant_ops_attr.pb.h"
namespace mlir::quant {
using QuantMethod = tensorflow::quantization::QuantizationMethod::PresetMethod;
enum class OpType {
kDynamicRangeOp,
kUnaryOp,
kBinaryOp,
kQuantizationOp,
};
constexpr std::array<absl::string_view, 3> kQuantizationAxisAttrs = {
"input_quantization_axis", "quantization_axis", "rhs_quantization_axis"};
constexpr std::array<absl::string_view, 2> kSuffixes = {"_min_val", "_max_val"};
Attribute GetWindowStridesValue(
PatternRewriter& rewriter, llvm::StringMap<Attribute>& identifier_to_attr) {
ArrayAttr stride = mlir::dyn_cast<ArrayAttr>(identifier_to_attr["strides"]);
const int stride_h = mlir::cast<IntegerAttr>(stride[1]).getInt();
const int stride_w = mlir::cast<IntegerAttr>(stride[2]).getInt();
return rewriter.getI64ArrayAttr({stride_h, stride_w});
}
Attribute GetLhsDilationValue(PatternRewriter& rewriter,
llvm::StringMap<Attribute>& identifier_to_attr) {
return rewriter.getI64ArrayAttr({1, 1});
}
Attribute GetRhsDilationValue(PatternRewriter& rewriter,
llvm::StringMap<Attribute>& identifier_to_attr) {
ArrayAttr dilations =
mlir::dyn_cast<ArrayAttr>(identifier_to_attr["dilations"]);
const int dilation_h = mlir::cast<IntegerAttr>(dilations[1]).getInt();
const int dilation_w = mlir::cast<IntegerAttr>(dilations[2]).getInt();
return rewriter.getI64ArrayAttr({dilation_h, dilation_w});
}
Attribute GetPaddingValue(PatternRewriter& rewriter,
llvm::StringMap<Attribute>& identifier_to_attr) {
llvm::StringRef padding =
mlir::dyn_cast<StringAttr>(identifier_to_attr["padding"]).getValue();
return rewriter.getStringAttr(padding);
}
Attribute GetExplicitPaddingValue(
PatternRewriter& rewriter, llvm::StringMap<Attribute>& identifier_to_attr) {
ArrayAttr explicit_padding =
mlir::dyn_cast<ArrayAttr>(identifier_to_attr["explicit_paddings"]);
return explicit_padding;
}
Attribute GetDimensionNumbersValue(
PatternRewriter& rewriter, llvm::StringMap<Attribute>& identifier_to_attr) {
tensorflow::UniformQuantizedConvolutionDimensionNumbersAttr dimension_numbers;
if (!tensorflow::protobuf::TextFormat::ParseFromString(
R"pb(
input_batch_dimension: 0
input_feature_dimension: 3
input_spatial_dimensions: 1
input_spatial_dimensions: 2
kernel_output_feature_dimension: 3
kernel_input_feature_dimension: 2
kernel_spatial_dimensions: 0
kernel_spatial_dimensions: 1
output_batch_dimension: 0
output_feature_dimension: 3
output_spatial_dimensions: 1
output_spatial_dimensions: 2
)pb",
&dimension_numbers)) {
return rewriter.getStringAttr("");
}
return rewriter.getStringAttr(dimension_numbers.SerializeAsString());
}
Attribute GetBatchGroupCountValue(
PatternRewriter& rewriter, llvm::StringMap<Attribute>& identifier_to_attr) {
return rewriter.getI64IntegerAttr(1);
}
Attribute GetQuantizationAxis(PatternRewriter& rewriter, Operation* op,
const int operand_index) {
auto* defining_op = op->getOperand(operand_index).getDefiningOp();
for (auto attr : kQuantizationAxisAttrs) {
if (defining_op->hasAttr(attr)) {
return defining_op->getAttr(attr);
}
}
return rewriter.getI64IntegerAttr(-1);
}
LogicalResult CheckIfAttrIs8Bit(const std::string& attr, Operation* op,
bool& is_8_bit) {
Type element_type;
if (attr == "lhs_quantization" || attr == "input_quantization" ||
attr == "quantization") {
if (op->getNumOperands() < 1) {
return failure();
}
element_type = getElementTypeOrSelf(op->getOperand(0).getType());
}
if (attr == "rhs_quantization") {
if (op->getNumOperands() < 2) {
return failure();
}
element_type = getElementTypeOrSelf(op->getOperand(1).getType());
}
if (attr == "output_quantization") {
if (op->getNumResults() < 1) {
return failure();
}
element_type = getElementTypeOrSelf(op->getOpResult(0).getType());
}
if (element_type) {
is_8_bit = mlir::isa<TF::Qint8Type>(element_type);
return success();
}
return failure();
}
LogicalResult FillQuantizationAttributes(
PatternRewriter& rewriter, Operation* op, NamedAttrList& attrs,
llvm::StringMap<Attribute>& identifier_to_attr, OpType op_type) {
absl::flat_hash_map<std::string, int> min_max_scheme_for_8bit = {
{"min", -128}, {"max", 127}};
absl::flat_hash_map<std::string, int> min_max_schema_for_32bit = {
{"min", -2147483648}, {"max", 2147483647}};
std::vector<std::string> quantization_attributes;
switch (op_type) {
case OpType::kDynamicRangeOp:
quantization_attributes = {"rhs_quantization"};
break;
case OpType::kUnaryOp:
quantization_attributes = {"quantization"};
break;
case OpType::kBinaryOp:
quantization_attributes = {"lhs_quantization", "rhs_quantization",
"output_quantization"};
break;
case OpType::kQuantizationOp:
quantization_attributes = {"input_quantization", "output_quantization"};
break;
default:
quantization_attributes = {};
break;
}
for (const auto& attr : quantization_attributes) {
bool attr_is_8_bit;
if (failed(CheckIfAttrIs8Bit(attr, op, attr_is_8_bit))) {
return failure();
}
for (int i = 0; i < kSuffixes.size(); i++) {
int64_t quant_val;
if (attr_is_8_bit) {
quant_val = i == 0 ? min_max_scheme_for_8bit["min"]
: min_max_scheme_for_8bit["max"];
} else {
quant_val = i == 0 ? min_max_schema_for_32bit["min"]
: min_max_schema_for_32bit["max"];
}
std::string attr_minmax = absl::StrCat(attr, kSuffixes[i]);
attrs.push_back(rewriter.getNamedAttr(
attr_minmax, rewriter.getI64IntegerAttr(quant_val)));
}
}
return success();
}
LogicalResult FillAttributesForUniformQuantizedDotOp(
PatternRewriter& rewriter, Operation* op,
llvm::StringMap<Attribute>& identifier_to_attr,
QuantMethod quantization_method, bool enable_per_channel_quantization) {
NamedAttrList attrs;
if (quantization_method ==
tensorflow::quantization::QuantizationMethod::METHOD_DYNAMIC_RANGE_INT8) {
if (failed(FillQuantizationAttributes(rewriter, op, attrs,
identifier_to_attr,
OpType::kDynamicRangeOp))) {
return failure();
}
} else {
if (failed(FillQuantizationAttributes(
rewriter, op, attrs, identifier_to_attr, OpType::kBinaryOp))) {
return failure();
}
attrs.push_back(rewriter.getNamedAttr("lhs_quantization_axis",
rewriter.getI64IntegerAttr(-1)));
}
std::unique_ptr<OpQuantSpec> spec = GetUniformOpQuantSpec(op);
absl::flat_hash_set<int> operands = spec->quantizable_operands;
int quant_dim = -1;
if (enable_per_channel_quantization && operands.size() == 1) {
quant_dim = spec->coeff_op_quant_dim[*(operands.begin())];
}
attrs.push_back(rewriter.getNamedAttr("rhs_quantization_axis",
rewriter.getI64IntegerAttr(quant_dim)));
attrs.push_back(rewriter.getNamedAttr("output_quantization_axis",
rewriter.getI64IntegerAttr(quant_dim)));
op->setAttrs(rewriter.getDictionaryAttr(attrs));
return success();
}
LogicalResult FillAttributesForUniformQuantizedConvolutionOp(
PatternRewriter& rewriter, Operation* op,
llvm::StringMap<Attribute>& identifier_to_attr,
QuantMethod quantization_method, bool enable_per_channel_quantization) {
NamedAttrList attrs;
absl::flat_hash_map<std::string, Attribute (*)(PatternRewriter&,
llvm::StringMap<Attribute>&)>
attribute_getter_map;
attribute_getter_map = {{"window_strides", GetWindowStridesValue},
{"lhs_dilation", GetLhsDilationValue},
{"rhs_dilation", GetRhsDilationValue},
{"padding", GetPaddingValue},
{"explicit_padding", GetExplicitPaddingValue},
{"dimension_numbers", GetDimensionNumbersValue},
{"batch_group_count", GetBatchGroupCountValue}};
for (auto& attr : op->getAttrs()) {
llvm::StringRef attr_name = attr.getName().getValue();
if (attribute_getter_map.find(attr_name.str()) !=
attribute_getter_map.end()) {
auto attr_val =
(attribute_getter_map[attr_name.str()])(rewriter, identifier_to_attr);
attrs.push_back(rewriter.getNamedAttr(attr_name, attr_val));
}
}
auto feature_group_cnt_attr = llvm::StringRef("feature_group_count");
int feature_group_cnt = 1;
ShapedType input_shape =
mlir::dyn_cast<ShapedType>(op->getOperand(0).getType());
if (!input_shape) {
return op->emitError(
"Only input with known shape is supported for Uniform Quantized "
"opset.");
}
if (op->getParentOfType<func::FuncOp>().getName().contains("depthwise_")) {
feature_group_cnt = input_shape.getDimSize(3);
}
attrs.push_back(rewriter.getNamedAttr(
feature_group_cnt_attr, rewriter.getI64IntegerAttr(feature_group_cnt)));
if (quantization_method ==
tensorflow::quantization::QuantizationMethod::METHOD_DYNAMIC_RANGE_INT8) {
if (failed(FillQuantizationAttributes(rewriter, op, attrs,
identifier_to_attr,
OpType::kDynamicRangeOp))) {
return failure();
}
} else {
if (failed(FillQuantizationAttributes(
rewriter, op, attrs, identifier_to_attr, OpType::kBinaryOp))) {
return failure();
}
}
if (quantization_method !=
tensorflow::quantization::QuantizationMethod::METHOD_DYNAMIC_RANGE_INT8) {
attrs.push_back(rewriter.getNamedAttr("lhs_quantization_axis",
rewriter.getI64IntegerAttr(-1)));
}
std::unique_ptr<OpQuantSpec> spec = GetUniformOpQuantSpec(op);
absl::flat_hash_set<int> operands = spec->quantizable_operands;
int quant_dim = -1;
if (enable_per_channel_quantization && operands.size() == 1) {
quant_dim = spec->coeff_op_quant_dim[*(operands.begin())];
}
attrs.push_back(rewriter.getNamedAttr("rhs_quantization_axis",
rewriter.getI64IntegerAttr(quant_dim)));
attrs.push_back(rewriter.getNamedAttr("output_quantization_axis",
rewriter.getI64IntegerAttr(quant_dim)));
op->setAttrs(rewriter.getDictionaryAttr(attrs));
return success();
}
LogicalResult FillAttributesForUniformQuantizedAddOp(
PatternRewriter& rewriter, Operation* op,
llvm::StringMap<Attribute>& identifier_to_attr,
const QuantMethod quantization_method,
const bool enable_per_channel_quantization) {
NamedAttrList attrs;
if (failed(FillQuantizationAttributes(rewriter, op, attrs, identifier_to_attr,
OpType::kBinaryOp))) {
return failure();
}
Attribute activation_quantization_axis = rewriter.getI64IntegerAttr(-1);
if (enable_per_channel_quantization) {
activation_quantization_axis =
GetQuantizationAxis(rewriter, op, 0);
if (activation_quantization_axis == rewriter.getI64IntegerAttr(-1)) {
activation_quantization_axis =
GetQuantizationAxis(rewriter, op, 1);
}
}
attrs.push_back(rewriter.getNamedAttr("lhs_quantization_axis",
activation_quantization_axis));
attrs.push_back(rewriter.getNamedAttr("rhs_quantization_axis",
activation_quantization_axis));
attrs.push_back(rewriter.getNamedAttr("output_quantization_axis",
activation_quantization_axis));
op->setAttrs(rewriter.getDictionaryAttr(attrs));
return success();
}
LogicalResult FillAttributesForUniformQuantizedClipByValueOp(
PatternRewriter& rewriter, Operation* op,
llvm::StringMap<Attribute>& identifier_to_attr,
QuantMethod quantization_method, bool enable_per_channel_quantization) {
NamedAttrList attrs;
if (failed(FillQuantizationAttributes(rewriter, op, attrs, identifier_to_attr,
OpType::kUnaryOp))) {
return failure();
}
Attribute activation_quantization_axis = rewriter.getI64IntegerAttr(-1);
if (enable_per_channel_quantization) {
activation_quantization_axis =
GetQuantizationAxis(rewriter, op, 0);
}
attrs.push_back(
rewriter.getNamedAttr("quantization_axis", activation_quantization_axis));
op->setAttrs(rewriter.getDictionaryAttr(attrs));
return success();
}
LogicalResult FillAttributesForUniformRequantizeOp(
PatternRewriter& rewriter, Operation* op,
llvm::StringMap<Attribute>& identifier_to_attr,
QuantMethod quantization_method, bool enable_per_channel_quantization) {
NamedAttrList attrs;
if (failed(FillQuantizationAttributes(rewriter, op, attrs, identifier_to_attr,
OpType::kQuantizationOp))) {
return failure();
}
Attribute activation_quantization_axis = rewriter.getI64IntegerAttr(-1);
Attribute output_quantization_axis = rewriter.getI64IntegerAttr(-1);
if (enable_per_channel_quantization) {
activation_quantization_axis =
GetQuantizationAxis(rewriter, op, 0);
auto output_scale_type =
mlir::dyn_cast<ShapedType>(op->getOperand(3).getType());
if (!output_scale_type) {
return failure();
}
if (output_scale_type.hasRank() && 0 < output_scale_type.getRank()) {
output_quantization_axis = activation_quantization_axis;
}
}
attrs.push_back(rewriter.getNamedAttr("input_quantization_axis",
activation_quantization_axis));
attrs.push_back(rewriter.getNamedAttr("output_quantization_axis",
output_quantization_axis));
op->setAttrs(rewriter.getDictionaryAttr(attrs));
return success();
}
LogicalResult FillAttributesForUniformQuantizeOp(
PatternRewriter& rewriter, Operation* op,
llvm::StringMap<Attribute>& identifier_to_attr,
QuantMethod quantization_method, bool enable_per_channel_quantization) {
NamedAttrList attrs;
if (failed(FillQuantizationAttributes(rewriter, op, attrs, identifier_to_attr,
OpType::kUnaryOp))) {
return failure();
}
Attribute quantization_axis = rewriter.getI64IntegerAttr(-1);
if (enable_per_channel_quantization) {
quantization_axis = rewriter.getI64IntegerAttr(3);
}
attrs.push_back(
rewriter.getNamedAttr("quantization_axis", quantization_axis));
op->setAttrs(rewriter.getDictionaryAttr(attrs));
return success();
}
} | #include "tensorflow/compiler/mlir/quantization/tensorflow/utils/tf_to_uniform_attribute_utils.h"
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "llvm/ADT/StringMap.h"
#include "mlir/IR/AsmState.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Block.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/Parser/Parser.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/quantization_options.pb.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_dialect.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
namespace mlir::quant {
namespace {
using QuantMethod = tensorflow::quantization::QuantizationMethod::PresetMethod;
class EmptyPatternRewriter : public mlir::PatternRewriter {
public:
explicit EmptyPatternRewriter(const OpBuilder& op_builder)
: mlir::PatternRewriter(op_builder) {}
~EmptyPatternRewriter() override = default;
};
class TfToUniformAttributeUtilsTestPeer {
public:
explicit TfToUniformAttributeUtilsTestPeer() = delete;
explicit TfToUniformAttributeUtilsTestPeer(MLIRContext* ctx)
: rewriter_(OpBuilder(ctx)) {}
EmptyPatternRewriter rewriter_;
};
class TfToUniformAttributeUtilsTest : public ::testing::Test {
protected:
TfToUniformAttributeUtilsTest() : ctx_() {
ctx_.loadDialect<TF::TensorFlowDialect>();
}
MLIRContext ctx_;
};
TF::UniformQuantizedAddOp ParseUniformQuantizedAddOp(
const absl::string_view add_op_str, Block& block, MLIRContext& ctx) {
const LogicalResult parse_result =
parseSourceString(add_op_str, &block, ParserConfig(&ctx));
EXPECT_TRUE(succeeded(parse_result));
auto uq_add_op = dyn_cast_or_null<TF::UniformQuantizedAddOp>(block.back());
EXPECT_TRUE(uq_add_op);
return uq_add_op;
}
TF::UniformRequantizeOp ParseUniformRequantizedOp(
const absl::string_view requant_op_str, Block& block, MLIRContext& ctx) {
const LogicalResult parse_result =
parseSourceString(requant_op_str, &block, ParserConfig(&ctx));
EXPECT_TRUE(succeeded(parse_result));
auto uq_requant_op = dyn_cast_or_null<TF::UniformRequantizeOp>(block.back());
EXPECT_TRUE(uq_requant_op);
return uq_requant_op;
}
TEST_F(TfToUniformAttributeUtilsTest, UniformQuantizedAddOpAttributes) {
TfToUniformAttributeUtilsTestPeer test_peer(&ctx_);
constexpr absl::string_view kAddOpExpr =
R"mlir(
%0 = "tf.Const"() {value = #tf_type<tensor_proto : "0x746674656"> : tensor<1x3x2x2x!tf_type.qint32>} : () -> tensor<1x3x2x2x!tf_type.qint32>
%1 = "tf.Const"() {value = #tf_type<tensor_proto : "0x746674656"> : tensor<2x!tf_type.qint32>} : () -> tensor<2x!tf_type.qint32>
%2 = "tf.Const"() {value = dense<1.0> : tensor<f32>} : () -> tensor<f32>
%3 = "tf.Const"() {value = dense<0> : tensor<i32>} : () -> tensor<i32>
%4 = "tf.UniformQuantizedAdd"(%0, %1, %2, %3, %2, %3, %2, %3) {device = "", lhs_quantization_axis = -1 : i64, lhs_quantization_max_val = 127 : i64, lhs_quantization_min_val = -127 : i64, output_quantization_axis = -1 : i64, output_quantization_max_val = 127 : i64, output_quantization_min_val = -127 : i64, rhs_quantization_axis = -1 : i64, rhs_quantization_max_val = 127 : i64, rhs_quantization_min_val = -127 : i64} : (tensor<1x3x2x2x!tf_type.qint32>, tensor<2x!tf_type.qint32>, tensor<f32>, tensor<i32>, tensor<f32>, tensor<i32>, tensor<f32>, tensor<i32>) -> tensor<1x3x2x2x!tf_type.qint32>
)mlir";
Block block{};
TF::UniformQuantizedAddOp op =
ParseUniformQuantizedAddOp(kAddOpExpr, block, ctx_);
llvm::StringMap<Attribute> identifier_to_attr;
QuantMethod quantization_method =
tensorflow::quantization::QuantizationMethod::METHOD_STATIC_RANGE_INT8;
auto res = FillAttributesForUniformQuantizedAddOp(
test_peer.rewriter_, op, identifier_to_attr, quantization_method,
false);
ASSERT_TRUE(succeeded(res));
ASSERT_EQ(2147483647, op.getLhsQuantizationMaxValAttr().getInt());
ASSERT_EQ(-2147483648, op.getLhsQuantizationMinValAttr().getInt());
ASSERT_EQ(2147483647, op.getRhsQuantizationMaxValAttr().getInt());
ASSERT_EQ(-2147483648, op.getRhsQuantizationMinValAttr().getInt());
ASSERT_EQ(2147483647, op.getOutputQuantizationMaxValAttr().getInt());
ASSERT_EQ(-2147483648, op.getOutputQuantizationMinValAttr().getInt());
ASSERT_EQ(-1, op.getLhsQuantizationAxisAttr().getInt());
ASSERT_EQ(-1, op.getRhsQuantizationAxisAttr().getInt());
ASSERT_EQ(-1, op.getOutputQuantizationAxisAttr().getInt());
}
TEST_F(TfToUniformAttributeUtilsTest, UniformQuantizedRequantizeOpAttributes) {
TfToUniformAttributeUtilsTestPeer test_peer(&ctx_);
constexpr absl::string_view kRequantOpExpr =
R"mlir(
%0 = "tf.Const"() {value = #tf_type<tensor_proto : "0x746674656"> : tensor<1x3x2x2x!tf_type.qint32>, quantization_axis = 3} : () -> tensor<1x3x2x2x!tf_type.qint32>
%1 = "tf.Const"() {value = dense<1.0> : tensor<2xf32>} : () -> tensor<2xf32>
%2 = "tf.Const"() {value = dense<2> : tensor<2xi32>} : () -> tensor<2xi32>
%3 = "tf.Const"() {value = dense<1.0> : tensor<f32>} : () -> tensor<f32>
%4 = "tf.Const"() {value = dense<0> : tensor<i32>} : () -> tensor<i32>
%5 = "tf.UniformRequantize"(%0, %1, %2, %3, %4) {device = "", input_quantization_axis = 3 : i64, input_quantization_max_val = 127 : i64, input_quantization_min_val = -127 : i64, output_quantization_axis = -1 : i64, output_quantization_max_val = 127 : i64, output_quantization_min_val = -127 : i64} : (tensor<1x3x2x2x!tf_type.qint32>, tensor<2xf32>, tensor<2xi32>, tensor<f32>, tensor<i32>) -> tensor<1x3x2x2x!tf_type.qint8>
)mlir";
Block block{};
TF::UniformRequantizeOp op =
ParseUniformRequantizedOp(kRequantOpExpr, block, ctx_);
llvm::StringMap<Attribute> identifier_to_attr;
QuantMethod quantization_method =
tensorflow::quantization::QuantizationMethod::METHOD_STATIC_RANGE_INT8;
auto res = FillAttributesForUniformRequantizeOp(
test_peer.rewriter_, op, identifier_to_attr, quantization_method,
true);
ASSERT_TRUE(succeeded(res));
ASSERT_EQ(2147483647, op.getInputQuantizationMaxValAttr().getInt());
ASSERT_EQ(-2147483648, op.getInputQuantizationMinValAttr().getInt());
ASSERT_EQ(127, op.getOutputQuantizationMaxValAttr().getInt());
ASSERT_EQ(-128, op.getOutputQuantizationMinValAttr().getInt());
ASSERT_EQ(3, op.getInputQuantizationAxisAttr().getInt());
ASSERT_EQ(-1, op.getOutputQuantizationAxisAttr().getInt());
}
TEST_F(TfToUniformAttributeUtilsTest,
UniformQuantizedRequantizeOpAttributes_OutputPerChannel) {
TfToUniformAttributeUtilsTestPeer test_peer(&ctx_);
constexpr absl::string_view kRequantOpExpr =
R"mlir(
%0 = "tf.Const"() {value = #tf_type<tensor_proto : "0x746674656"> : tensor<1x3x2x2x!tf_type.qint32>, quantization_axis = 3} : () -> tensor<1x3x2x2x!tf_type.qint32>
%1 = "tf.Const"() {value = dense<1.0> : tensor<2xf32>} : () -> tensor<2xf32>
%2 = "tf.Const"() {value = dense<2> : tensor<2xi32>} : () -> tensor<2xi32>
%3 = "tf.Const"() {value = dense<1.0> : tensor<2xf32>} : () -> tensor<2xf32>
%4 = "tf.Const"() {value = dense<0> : tensor<2xi32>} : () -> tensor<2xi32>
%5 = "tf.UniformRequantize"(%0, %1, %2, %3, %4) {device = "", input_quantization_axis = 3 : i64, input_quantization_max_val = 127 : i64, input_quantization_min_val = -127 : i64, output_quantization_axis = 1 : i64, output_quantization_max_val = 127 : i64, output_quantization_min_val = -127 : i64} : (tensor<1x3x2x2x!tf_type.qint32>, tensor<2xf32>, tensor<2xi32>, tensor<2xf32>, tensor<2xi32>) -> tensor<1x3x2x2x!tf_type.qint8>
)mlir";
Block block{};
TF::UniformRequantizeOp op =
ParseUniformRequantizedOp(kRequantOpExpr, block, ctx_);
llvm::StringMap<Attribute> identifier_to_attr;
QuantMethod quantization_method =
tensorflow::quantization::QuantizationMethod::METHOD_STATIC_RANGE_INT8;
auto res = FillAttributesForUniformRequantizeOp(
test_peer.rewriter_, op, identifier_to_attr, quantization_method,
true);
ASSERT_TRUE(succeeded(res));
ASSERT_EQ(2147483647, op.getInputQuantizationMaxValAttr().getInt());
ASSERT_EQ(-2147483648, op.getInputQuantizationMinValAttr().getInt());
ASSERT_EQ(127, op.getOutputQuantizationMaxValAttr().getInt());
ASSERT_EQ(-128, op.getOutputQuantizationMinValAttr().getInt());
ASSERT_EQ(3, op.getInputQuantizationAxisAttr().getInt());
ASSERT_EQ(3, op.getOutputQuantizationAxisAttr().getInt());
}
TEST_F(TfToUniformAttributeUtilsTest,
UniformQuantizedRequantizeOpAttributes_DisablePerChannelQuantization) {
TfToUniformAttributeUtilsTestPeer test_peer(&ctx_);
constexpr absl::string_view kRequantOpExpr =
R"mlir(
%0 = "tf.Const"() {value = #tf_type<tensor_proto : "0x746674656"> : tensor<1x3x2x2x!tf_type.qint32>, quantization_axis = 3} : () -> tensor<1x3x2x2x!tf_type.qint32>
%1 = "tf.Const"() {value = dense<1.0> : tensor<2xf32>} : () -> tensor<2xf32>
%2 = "tf.Const"() {value = dense<2> : tensor<2xi32>} : () -> tensor<2xi32>
%3 = "tf.Const"() {value = dense<1.0> : tensor<f32>} : () -> tensor<f32>
%4 = "tf.Const"() {value = dense<0> : tensor<i32>} : () -> tensor<i32>
%5 = "tf.UniformRequantize"(%0, %1, %2, %3, %4) {device = "", input_quantization_axis = 3 : i64, input_quantization_max_val = 127 : i64, input_quantization_min_val = -127 : i64, output_quantization_axis = -1 : i64, output_quantization_max_val = 127 : i64, output_quantization_min_val = -127 : i64} : (tensor<1x3x2x2x!tf_type.qint32>, tensor<2xf32>, tensor<2xi32>, tensor<f32>, tensor<i32>) -> tensor<1x3x2x2x!tf_type.qint8>
)mlir";
Block block{};
TF::UniformRequantizeOp op =
ParseUniformRequantizedOp(kRequantOpExpr, block, ctx_);
llvm::StringMap<Attribute> identifier_to_attr;
QuantMethod quantization_method =
tensorflow::quantization::QuantizationMethod::METHOD_STATIC_RANGE_INT8;
auto res = FillAttributesForUniformRequantizeOp(
test_peer.rewriter_, op, identifier_to_attr, quantization_method,
false);
ASSERT_TRUE(succeeded(res));
ASSERT_EQ(2147483647, op.getInputQuantizationMaxValAttr().getInt());
ASSERT_EQ(-2147483648, op.getInputQuantizationMinValAttr().getInt());
ASSERT_EQ(127, op.getOutputQuantizationMaxValAttr().getInt());
ASSERT_EQ(-128, op.getOutputQuantizationMinValAttr().getInt());
ASSERT_EQ(-1, op.getInputQuantizationAxisAttr().getInt());
ASSERT_EQ(-1, op.getOutputQuantizationAxisAttr().getInt());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/tensorflow/utils/tf_to_uniform_attribute_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/tensorflow/utils/tf_to_uniform_attribute_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a3cdbd40-8d4c-4144-9518-6b84e52b8158 | cpp | tensorflow/tensorflow | svd | third_party/xla/xla/hlo/builder/lib/svd.cc | third_party/xla/xla/hlo/builder/lib/svd_test.cc | #include "xla/hlo/builder/lib/svd.h"
#include <memory>
#include <numeric>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/builder/lib/arithmetic.h"
#include "xla/hlo/builder/lib/comparators.h"
#include "xla/hlo/builder/lib/constants.h"
#include "xla/hlo/builder/lib/loops.h"
#include "xla/hlo/builder/lib/math.h"
#include "xla/hlo/builder/lib/matrix.h"
#include "xla/hlo/builder/lib/slicing.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
struct HouseHolderResult {
XlaOp v;
XlaOp beta;
XlaOp a;
};
struct JacobiRotation {
XlaOp c;
XlaOp s;
};
struct JacobiUpdate {
XlaOp v;
XlaOp w;
};
struct OneSidedJacobiRotation {
JacobiRotation rot_l;
JacobiRotation rot_r;
};
absl::StatusOr<HouseHolderResult> HouseRow(
XlaOp a, XlaOp i, XlaOp j, XlaOp eps,
PrecisionConfig::Precision precision) {
XlaBuilder* builder = a.builder();
TF_ASSIGN_OR_RETURN(Shape a_shape, builder->GetShape(a));
const int64_t num_dims = a_shape.rank();
const int64_t n = ShapeUtil::GetDimension(a_shape, -1);
XlaOp zero = ScalarLike(i, 0);
XlaOp x = DynamicSliceInMinorDims(a, {i, zero}, {1, n});
const int64_t num_batch_dims = num_dims - 2;
std::vector<int64_t> batch_dims(num_batch_dims);
for (int k = 0; k < num_batch_dims; ++k) {
batch_dims[k] = ShapeUtil::GetDimension(a_shape, k);
}
TF_ASSIGN_OR_RETURN(Shape x_shape, builder->GetShape(x));
auto idx = Iota(builder, ShapeUtil::MakeShape(S32, x_shape.dimensions()),
num_dims - 1);
auto zeros = ZerosLike(x);
auto v = Select(Gt(idx, j), x, zeros);
auto one = ScalarLike(v, 1.0);
auto sigma =
Sqrt(Reduce(Square(v), ScalarLike(v, 0.0),
CreateScalarAddComputation(x_shape.element_type(), builder),
{num_dims - 1}));
std::vector<int64_t> broadcast_dims(num_dims - 1);
std::iota(broadcast_dims.begin(), broadcast_dims.end(), 0);
auto x_0j = DynamicSliceInMinorDims(x, {zero, j}, {1, 1});
auto mu = Mul(sigma, Sqrt(Square(Div(x_0j, sigma, broadcast_dims)) + one),
broadcast_dims);
auto v_0j = Select(
Le(x_0j, ScalarLike(x_0j, 0.0)), Sub(x_0j, mu),
-Mul(sigma, Div(sigma, Add(x_0j, mu), broadcast_dims), broadcast_dims));
auto beta = Div(ScalarLike(v_0j, 2.0),
(Square(Div(sigma, v_0j, broadcast_dims)) + one));
v = Select(
BroadcastInDim(Lt(sigma, eps), x_shape.dimensions(), broadcast_dims), v,
v / v_0j);
v = Select(Eq(idx, j), zeros + one, v);
beta = Select(Lt(Add(sigma, ZerosLike(beta), broadcast_dims), eps),
ZerosLike(beta), beta);
HouseHolderResult result;
result.v = v;
result.beta = beta;
result.a = Sub(a, Mul(beta, BatchDot(BatchDot(a, false, v, true, precision),
v, precision)));
return result;
}
absl::StatusOr<HouseHolderResult> HouseCol(
XlaOp a, XlaOp i, XlaOp j, XlaOp eps,
PrecisionConfig::Precision precision) {
XlaBuilder* builder = a.builder();
TF_ASSIGN_OR_RETURN(Shape a_shape, builder->GetShape(a));
const int64_t num_dims = a_shape.rank();
const int64_t m = ShapeUtil::GetDimension(a_shape, -2);
XlaOp zero = ScalarLike(i, 0);
XlaOp x = DynamicSliceInMinorDims(a, {zero, j}, {m, 1});
const int64_t num_batch_dims = num_dims - 2;
std::vector<int64_t> batch_dims(num_batch_dims);
for (int k = 0; k < num_batch_dims; ++k) {
batch_dims[k] = ShapeUtil::GetDimension(a_shape, k);
}
TF_ASSIGN_OR_RETURN(Shape x_shape, builder->GetShape(x));
auto idx = Iota(builder, ShapeUtil::MakeShape(S32, x_shape.dimensions()),
num_dims - 2);
auto zeros = ZerosLike(x);
auto v = Select(Gt(idx, i), x, zeros);
auto one = ScalarLike(v, 1.0);
auto sigma =
Sqrt(Reduce(Square(v), ScalarLike(v, 0.0),
CreateScalarAddComputation(x_shape.element_type(), builder),
{num_dims - 2}));
std::vector<int64_t> broadcast_dims(num_dims - 1);
std::iota(broadcast_dims.begin(), broadcast_dims.end(), 0);
broadcast_dims[num_dims - 2] = num_dims - 1;
auto x_0i = DynamicSliceInMinorDims(x, {i, zero}, {1, 1});
auto mu = Mul(sigma, Sqrt(Square(Div(x_0i, sigma, broadcast_dims)) + one),
broadcast_dims);
auto v_0i = Select(
Le(x_0i, ScalarLike(x_0i, 0.0)), Sub(x_0i, mu),
-Mul(sigma, Div(sigma, Add(x_0i, mu), broadcast_dims), broadcast_dims));
auto beta = Div(ScalarLike(v_0i, 2.0),
(Square(Div(sigma, v_0i, broadcast_dims)) + one));
v = Select(
BroadcastInDim(Lt(sigma, eps), x_shape.dimensions(), broadcast_dims), v,
v / v_0i);
v = Select(Eq(idx, i), zeros + one, v);
beta = Select(Lt(Add(sigma, ZerosLike(beta), broadcast_dims), eps),
ZerosLike(beta), beta);
HouseHolderResult result;
result.v = v;
result.beta = beta;
result.a = Sub(
a, Mul(beta, BatchDot(v, false, BatchDot(v, true, a, false, precision),
false, precision)));
return result;
}
absl::StatusOr<SVDResult> HouseHolderBidiagonalization(
XlaOp a, XlaOp eps, PrecisionConfig::Precision precision) {
XlaBuilder* builder = a.builder();
TF_ASSIGN_OR_RETURN(Shape a_shape, builder->GetShape(a));
const int64_t num_dims = a_shape.rank();
const int64_t num_batch_dims = num_dims - 2;
std::vector<int64_t> batch_dims(num_batch_dims);
for (int i = 0; i < num_batch_dims; ++i) {
batch_dims[i] = ShapeUtil::GetDimension(a_shape, i);
}
const int64_t m = ShapeUtil::GetDimension(a_shape, -2);
const int64_t n = ShapeUtil::GetDimension(a_shape, -1);
XlaOp u_init = Broadcast(
IdentityMatrix(builder, a_shape.element_type(), m, m), batch_dims);
XlaOp v_init = Broadcast(
IdentityMatrix(builder, a_shape.element_type(), n, n), batch_dims);
auto while_cond_fn = [&](absl::Span<const XlaOp> values,
XlaBuilder* cond_builder) -> absl::StatusOr<XlaOp> {
auto i = values[0];
return Lt(i, ScalarLike(i, n - 2));
};
auto while_body_fn =
[&](absl::Span<const XlaOp> values,
XlaBuilder* body_builder) -> absl::StatusOr<std::vector<XlaOp>> {
auto i = values[0];
auto one = ScalarLike(i, 1);
auto u = values[1];
auto v = values[2];
auto a = values[3];
auto eps = values[4];
TF_ASSIGN_OR_RETURN(HouseHolderResult house_col,
HouseCol(a, i, i, eps, precision));
u = Sub(u,
Mul(house_col.beta, BatchDot(BatchDot(u, house_col.v, precision),
false, house_col.v, true, precision)));
a = house_col.a;
TF_ASSIGN_OR_RETURN(HouseHolderResult house_row,
HouseRow(a, i, i + one, eps, precision));
v = Sub(v, Mul(house_row.beta,
BatchDot(BatchDot(v, false, house_row.v, true, precision),
house_row.v, precision)));
a = house_row.a;
std::vector<XlaOp> updated_values;
updated_values.reserve(values.size());
updated_values.push_back(i + one);
updated_values.push_back(u);
updated_values.push_back(v);
updated_values.push_back(a);
updated_values.push_back(eps);
return updated_values;
};
std::vector<XlaOp> values(5);
values[0] = Zero(builder, S32);
values[1] = u_init;
values[2] = v_init;
values[3] = a;
values[4] = eps;
TF_ASSIGN_OR_RETURN(values,
WhileLoopHelper(while_cond_fn, while_body_fn, values,
"HouseHolderBidiagonalization", builder));
for (int k = 2; k > 0; --k) {
if (n - k >= 0) {
XlaOp index = ScalarLike(values[0], n - k);
TF_ASSIGN_OR_RETURN(HouseHolderResult house_col,
HouseCol(values[3], index, index, eps, precision));
values[1] = Sub(values[1],
Mul(house_col.beta,
BatchDot(BatchDot(values[1], house_col.v, precision),
false, house_col.v, true, precision)));
values[3] = house_col.a;
}
}
SVDResult result;
result.u = values[1];
result.v = values[2];
result.d = values[3];
return result;
}
absl::StatusOr<JacobiRotation> MakeJacobi(XlaOp ps, XlaOp qs, XlaOp pqs,
XlaOp eps) {
auto zero = ScalarLike(ps, 0.0);
auto one = ScalarLike(ps, 1.0);
auto two = ScalarLike(ps, 2.0);
auto tau = (qs - ps) / (pqs * two);
auto t_pos = one / (tau + Sqrt(one + Square(tau)));
auto t_neg = -one / (-tau + Sqrt(one + Square(tau)));
auto t = Select(Ge(tau, zero), t_pos, t_neg);
auto c_temp = Rsqrt(one + Square(t));
auto s_temp = t * c_temp;
auto c = Select(Ge(Abs(pqs), eps), c_temp, ZerosLike(c_temp) + one);
auto s = Select(Ge(Abs(pqs), eps), s_temp, ZerosLike(s_temp));
auto rnorm = Rsqrt(Square(c) + Square(s));
JacobiRotation rot;
rot.c = c * rnorm;
rot.s = s * rnorm;
return rot;
}
absl::StatusOr<OneSidedJacobiRotation> GetOneSidedJacobiRotation(XlaOp a,
XlaOp p,
XlaOp q,
XlaOp eps) {
XlaOp a_pp = DynamicSliceInMinorDims(a, {p, p}, {1, 1});
XlaOp a_pq = DynamicSliceInMinorDims(a, {p, q}, {1, 1});
XlaOp a_qp = DynamicSliceInMinorDims(a, {q, p}, {1, 1});
XlaOp a_qq = DynamicSliceInMinorDims(a, {q, q}, {1, 1});
XlaOp one = ScalarLike(a, 1.0);
XlaOp t = a_pp + a_qq;
XlaOp d = a_qp - a_pq;
XlaOp u = Div(t, d);
XlaOp tmp = Rsqrt(one + Square(u));
JacobiRotation rot;
XlaOp zeros = ZerosLike(tmp);
XlaOp ones = zeros + one;
rot.s = Select(Lt(Abs(d), eps), zeros, -tmp);
rot.c = Select(Lt(Abs(d), eps), ones, Mul(u, tmp));
XlaOp a_pp_new = rot.c * a_pp - rot.s * a_qp;
XlaOp a_pq_new = rot.c * a_pq - rot.s * a_qq;
XlaOp a_qq_new = rot.s * a_pq + rot.c * a_qq;
OneSidedJacobiRotation rots;
TF_ASSIGN_OR_RETURN(rots.rot_r,
MakeJacobi(a_pp_new, a_qq_new, a_pq_new, eps));
rots.rot_l.c = rot.c * rots.rot_r.c - rot.s * rots.rot_r.s;
rots.rot_l.s = rot.s * rots.rot_r.c + rot.c * rots.rot_r.s;
return rots;
}
absl::StatusOr<SVDResult> OneSidedJacobiUpdate(SVDResult svd_result, XlaOp p,
XlaOp q, XlaOp eps) {
XlaOp u = svd_result.u;
XlaOp v = svd_result.v;
XlaOp d = svd_result.d;
XlaBuilder* builder = d.builder();
TF_ASSIGN_OR_RETURN(Shape d_shape, builder->GetShape(d));
const int64_t num_dims = d_shape.rank();
const int64_t num_batch_dims = num_dims - 2;
std::vector<int64_t> batch_dims(num_batch_dims);
for (int i = 0; i < num_batch_dims; ++i) {
batch_dims[i] = ShapeUtil::GetDimension(d_shape, i);
}
const int64_t m = ShapeUtil::GetDimension(d_shape, -2);
const int64_t n = ShapeUtil::GetDimension(d_shape, -1);
TF_ASSIGN_OR_RETURN(OneSidedJacobiRotation onesided_jacobi,
GetOneSidedJacobiRotation(d, p, q, eps));
auto zero = ScalarLike(p, 0);
std::vector<int64_t> pq_dims(batch_dims.begin(), batch_dims.end());
pq_dims.push_back(1);
pq_dims.push_back(1);
auto pq_zero = ScalarLike(d, 0.0);
auto pq_zeros = Broadcast(pq_zero, pq_dims);
std::vector<int64_t> broadcast_dims(batch_dims.size());
std::iota(broadcast_dims.begin(), broadcast_dims.end(), 0);
broadcast_dims.push_back(num_dims - 1);
auto slice_p = DynamicSliceInMinorDims(d, {p, zero}, {1, n});
auto slice_q = DynamicSliceInMinorDims(d, {q, zero}, {1, n});
auto slice_p_new =
onesided_jacobi.rot_l.c * slice_p - onesided_jacobi.rot_l.s * slice_q;
auto slice_q_new =
onesided_jacobi.rot_l.s * slice_p + onesided_jacobi.rot_l.c * slice_q;
d = DynamicUpdateSliceInMinorDims(d, slice_p_new, {p, zero});
d = DynamicUpdateSliceInMinorDims(d, slice_q_new, {q, zero});
slice_p = DynamicSliceInMinorDims(d, {zero, p}, {m, 1});
slice_q = DynamicSliceInMinorDims(d, {zero, q}, {m, 1});
slice_p_new =
onesided_jacobi.rot_r.c * slice_p - onesided_jacobi.rot_r.s * slice_q;
slice_q_new =
onesided_jacobi.rot_r.s * slice_p + onesided_jacobi.rot_r.c * slice_q;
d = DynamicUpdateSliceInMinorDims(d, slice_p_new, {zero, p});
d = DynamicUpdateSliceInMinorDims(d, slice_q_new, {zero, q});
d = DynamicUpdateSliceInMinorDims(d, pq_zeros, {p, q});
d = DynamicUpdateSliceInMinorDims(d, pq_zeros, {q, p});
slice_p = DynamicSliceInMinorDims(u, {zero, p}, {m, 1});
slice_q = DynamicSliceInMinorDims(u, {zero, q}, {m, 1});
slice_p_new =
onesided_jacobi.rot_l.c * slice_p - onesided_jacobi.rot_l.s * slice_q;
slice_p_new = Mul(
slice_p_new,
Rsqrt(Reduce(Square(slice_p_new), pq_zero,
CreateScalarAddComputation(d_shape.element_type(), builder),
{num_dims - 2})),
broadcast_dims);
slice_q_new =
onesided_jacobi.rot_l.s * slice_p + onesided_jacobi.rot_l.c * slice_q;
slice_q_new = Mul(
slice_q_new,
Rsqrt(Reduce(Square(slice_q_new), pq_zero,
CreateScalarAddComputation(d_shape.element_type(), builder),
{num_dims - 2})),
broadcast_dims);
u = DynamicUpdateSliceInMinorDims(u, slice_p_new, {zero, p});
u = DynamicUpdateSliceInMinorDims(u, slice_q_new, {zero, q});
slice_p = DynamicSliceInMinorDims(v, {zero, p}, {n, 1});
slice_q = DynamicSliceInMinorDims(v, {zero, q}, {n, 1});
slice_p_new =
onesided_jacobi.rot_r.c * slice_p - onesided_jacobi.rot_r.s * slice_q;
slice_p_new = Mul(
slice_p_new,
Rsqrt(Reduce(Square(slice_p_new), pq_zero,
CreateScalarAddComputation(d_shape.element_type(), builder),
{num_dims - 2})),
broadcast_dims);
slice_q_new =
onesided_jacobi.rot_r.s * slice_p + onesided_jacobi.rot_r.c * slice_q;
slice_q_new = Mul(
slice_q_new,
Rsqrt(Reduce(Square(slice_q_new), pq_zero,
CreateScalarAddComputation(d_shape.element_type(), builder),
{num_dims - 2})),
broadcast_dims);
v = DynamicUpdateSliceInMinorDims(v, slice_p_new, {zero, p});
v = DynamicUpdateSliceInMinorDims(v, slice_q_new, {zero, q});
svd_result.d = d;
svd_result.u = u;
svd_result.v = v;
return svd_result;
}
absl::StatusOr<XlaOp> ComputeToleranceComparison(XlaOp w, XlaOp epsilon) {
XlaBuilder* builder = w.builder();
TF_ASSIGN_OR_RETURN(Shape shape, builder->GetShape(w));
auto num_dims = static_cast<int32_t>(shape.rank());
int64_t n = shape.dimensions(num_dims - 1);
shape.set_dimensions(num_dims - 2, n);
auto w_sliced = SliceInMinorDims(w, {0, 0}, {n, n});
auto diag = GetMatrixDiagonal(w_sliced);
diag = Select(Lt(diag, ZerosLike(diag)), -diag, diag);
std::vector<int64_t> broadcasted_dims(num_dims - 1);
std::iota(broadcasted_dims.begin(), broadcasted_dims.end(), 0);
auto broadcast_to_rows =
BroadcastInDim(diag, shape.dimensions(), broadcasted_dims);
broadcasted_dims.back() = num_dims - 1;
auto broadcast_to_columns =
BroadcastInDim(diag, shape.dimensions(), broadcasted_dims);
XlaOp tolerance;
if (builder->GetShape(epsilon)->element_type() == BF16 ||
builder->GetShape(epsilon)->element_type() == F16) {
auto upscale_eps = ConvertElementType(epsilon, F32);
tolerance = ConvertElementType(broadcast_to_rows, F32) *
ConvertElementType(broadcast_to_columns, F32) * upscale_eps *
upscale_eps;
tolerance = ConvertElementType(tolerance,
builder->GetShape(epsilon)->element_type());
} else {
tolerance = broadcast_to_rows * broadcast_to_columns * epsilon * epsilon;
}
return Lt(tolerance, Square(Select(GetDiagonalMask(w_sliced),
ZerosLike(w_sliced), w_sliced)));
}
absl::StatusOr<std::vector<XlaOp>> WhileLoopFn(
absl::Span<const XlaOp> initial_values,
int matrix_dimension,
int max_sweep_updates,
absl::string_view name,
XlaBuilder* builder) {
auto while_cond_fn = [&](absl::Span<const XlaOp> values,
XlaBuilder* cond_builder) -> absl::StatusOr<XlaOp> {
auto k = values[0];
auto max_sweeps = ScalarLike(k, max_sweep_updates);
auto sweep_update_cond = Gt(max_sweeps, k);
TF_ASSIGN_OR_RETURN(auto tolerance_comparison,
ComputeToleranceComparison(values[3], values[4]));
auto tolerance_cond = ReduceAll(
tolerance_comparison, xla::ConstantR0<bool>(cond_builder, false),
CreateScalarOrComputation(PRED, cond_builder));
return And(sweep_update_cond, tolerance_cond);
};
auto while_body_fn =
[&](absl::Span<const XlaOp> values,
XlaBuilder* body_builder) -> absl::StatusOr<std::vector<XlaOp>> {
auto while_cond_fn_inner =
[&](absl::Span<const XlaOp> values_inner,
XlaBuilder* inner_cond_builder) -> absl::StatusOr<XlaOp> {
auto p = values_inner[0];
return Lt(p, ScalarLike(p, matrix_dimension - 1));
};
auto while_body_fn_inner = [&](absl::Span<const XlaOp> values_inner,
XlaBuilder* inner_body_builder)
-> absl::StatusOr<std::vector<XlaOp>> {
auto while_cond_fn_innermost =
[&](absl::Span<const XlaOp> values_innermost,
XlaBuilder* innermost_cond_builder) -> absl::StatusOr<XlaOp> {
auto q = values_innermost[1];
return Lt(q, ScalarLike(q, matrix_dimension));
};
auto while_body_fn_innermost =
[&](absl::Span<const XlaOp> values_innermost,
XlaBuilder* innermost_body_builder)
-> absl::StatusOr<std::vector<XlaOp>> {
auto p = values_innermost[0];
auto q = values_innermost[1];
SVDResult onesided_jacobi_update;
onesided_jacobi_update.u = values_innermost[2];
onesided_jacobi_update.v = values_innermost[3];
onesided_jacobi_update.d = values_innermost[4];
auto eps = values_innermost[5];
TF_ASSIGN_OR_RETURN(
onesided_jacobi_update,
OneSidedJacobiUpdate(onesided_jacobi_update, p, q, eps));
std::vector<XlaOp> updated_values_innermost;
updated_values_innermost.reserve(values_innermost.size());
updated_values_innermost.push_back(p);
updated_values_innermost.push_back(q + ScalarLike(q, 1));
updated_values_innermost.push_back(onesided_jacobi_update.u);
updated_values_innermost.push_back(onesided_jacobi_update.v);
updated_values_innermost.push_back(onesided_jacobi_update.d);
updated_values_innermost.push_back(eps);
return updated_values_innermost;
};
std::vector<XlaOp> values_innermost(6);
auto p = values_inner[0];
auto q = p + ScalarLike(p, 1);
values_innermost[0] = p;
values_innermost[1] = q;
values_innermost[2] = values_inner[1];
values_innermost[3] = values_inner[2];
values_innermost[4] = values_inner[3];
values_innermost[5] = values_inner[4];
TF_ASSIGN_OR_RETURN(
values_innermost,
WhileLoopHelper(while_cond_fn_innermost, while_body_fn_innermost,
values_innermost, absl::StrCat(name, "-Innermost"),
inner_body_builder));
std::vector<XlaOp> updated_values_inner;
updated_values_inner.reserve(values_inner.size());
updated_values_inner.push_back(p + ScalarLike(p, 1));
updated_values_inner.push_back(values_innermost[2]);
updated_values_inner.push_back(values_innermost[3]);
updated_values_inner.push_back(values_innermost[4]);
updated_values_inner.push_back(values_innermost[5]);
return updated_values_inner;
};
XlaOp k = values[0];
std::vector<XlaOp> values_inner(5);
values_inner[0] = ScalarLike(k, 0);
values_inner[1] = values[1];
values_inner[2] = values[2];
values_inner[3] = values[3];
values_inner[4] = values[4];
TF_ASSIGN_OR_RETURN(
values_inner,
WhileLoopHelper(while_cond_fn_inner, while_body_fn_inner, values_inner,
absl::StrCat(name, "-Inner"), body_builder));
std::vector<XlaOp> updated_values;
updated_values.reserve(values_inner.size());
updated_values.push_back(k + ScalarLike(k, 1));
updated_values.push_back(values_inner[1]);
updated_values.push_back(values_inner[2]);
updated_values.push_back(values_inner[3]);
updated_values.push_back(values_inner[4]);
return updated_values;
};
std::vector<XlaOp> values;
TF_ASSIGN_OR_RETURN(values, WhileLoopHelper(while_cond_fn, while_body_fn,
initial_values, name, builder));
return values;
}
absl::StatusOr<SVDResult> SortBySingularValuesAndPostProcessing(
SVDResult result) {
XlaBuilder* builder = result.d.builder();
TF_ASSIGN_OR_RETURN(Shape shape, builder->GetShape(result.d));
const int64_t num_dims = shape.rank();
auto dimensions = shape.dimensions();
const int64_t m = ShapeUtil::GetDimension(shape, -2);
const int64_t n = ShapeUtil::GetDimension(shape, -1);
std::vector<int64_t> broadcast_dims(num_dims - 1);
std::iota(broadcast_dims.begin(), broadcast_dims.end(), 0);
broadcast_dims[num_dims - 2] = num_dims - 1;
auto d = GetMatrixDiagonal(result.d);
auto zeros = ZerosLike(d);
auto one = ScalarLike(d, 1.0);
auto sign = Select(Ge(d, zeros), zeros + one, zeros - one);
d = Select(Ge(d, zeros), d, -d);
result.v = Mul(result.v, sign, broadcast_dims);
d = BroadcastInDim(d, dimensions, broadcast_dims);
XlaOp sort_u_result =
Sort({d, SliceInMinorDims(result.u, {0, 0}, {m, n})},
CreateScalarGtComputation(
{shape.element_type(), shape.element_type()}, builder),
num_dims - 1);
XlaOp sort_v_result =
Sort({SliceInMinorDims(d, {0, 0}, {n, n}), result.v},
CreateScalarGtComputation(
{shape.element_type(), shape.element_type()}, builder),
num_dims - 1);
result.d = GetMatrixDiagonal(GetTupleElement(sort_v_result, 0));
result.v = GetTupleElement(sort_v_result, 1);
result.v = Mul(
result.v,
Rsqrt(Reduce(Square(result.v), ScalarLike(d, 0.0),
CreateScalarAddComputation(shape.element_type(), builder),
{num_dims - 2})),
broadcast_dims);
result.u = ConcatInDim(builder,
{GetTupleElement(sort_u_result, 1),
SliceInMinorDims(result.u, {0, n}, {m, m})},
num_dims - 1);
result.u = Mul(
result.u,
Rsqrt(Reduce(Square(result.u), ScalarLike(d, 0.0),
CreateScalarAddComputation(shape.element_type(), builder),
{num_dims - 2})),
broadcast_dims);
return result;
}
}
SVDResult SVD(XlaOp a, int64_t max_iter, float epsilon,
PrecisionConfig::Precision precision) {
XlaBuilder* builder = a.builder();
auto return_error = [&](const absl::Status& status) {
SVDResult result;
result.u = builder->ReportError(status);
result.v = builder->ReportError(status);
result.d = builder->ReportError(status);
return result;
};
auto shape_with_status = builder->GetShape(a);
if (!shape_with_status.status().ok()) {
return return_error(shape_with_status.status());
}
Shape a_shape = shape_with_status.value();
const int64_t num_dims = a_shape.rank();
const int64_t num_batch_dims = num_dims - 2;
std::vector<int64_t> batch_dims(num_batch_dims);
for (int i = 0; i < num_batch_dims; ++i) {
batch_dims[i] = ShapeUtil::GetDimension(a_shape, i);
}
int64_t m = ShapeUtil::GetDimension(a_shape, -2);
int64_t n = ShapeUtil::GetDimension(a_shape, -1);
bool maybe_transpose = m < n;
if (maybe_transpose) {
a = TransposeInMinorDims(a);
std::swap(m, n);
}
auto eps = ScalarLike(a, epsilon);
auto svd_result_or = HouseHolderBidiagonalization(a, eps, precision);
if (!svd_result_or.ok()) {
return return_error(svd_result_or.status());
}
SVDResult svd_result = svd_result_or.value();
auto output_with_status = WhileLoopFn(
{
Zero(builder, S32),
svd_result.u,
svd_result.v,
svd_result.d,
eps,
},
n,
max_iter,
"CyclicOneSidedJacobi",
builder);
if (!output_with_status.status().ok()) {
return return_error(output_with_status.status());
}
auto output = output_with_status.value();
svd_result.u = output[1];
svd_result.v = output[2];
svd_result.d = output[3];
svd_result_or = SortBySingularValuesAndPostProcessing(svd_result);
if (!svd_result_or.ok()) {
return return_error(svd_result_or.status());
}
svd_result = svd_result_or.value();
if (maybe_transpose) {
std::swap(svd_result.u, svd_result.v);
}
return svd_result;
}
} | #include "xla/hlo/builder/lib/svd.h"
#include <numeric>
#include <utility>
#include <vector>
#include "absl/status/statusor.h"
#include "xla/array2d.h"
#include "xla/array3d.h"
#include "xla/error_spec.h"
#include "xla/hlo/builder/lib/arithmetic.h"
#include "xla/hlo/builder/lib/constants.h"
#include "xla/hlo/builder/lib/matrix.h"
#include "xla/hlo/builder/lib/slicing.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/tests/client_library_test_base.h"
#include "xla/tests/test_macros.h"
#include "xla/xla_data.pb.h"
namespace xla {
class SVDTest : public ClientLibraryTestBase {
protected:
void SetUp() override {
ClientLibraryTestBase::SetUp();
batch_3d_4x5_ = Array3D<float>{
{
{4, 6, 8, 10, 1},
{6, 45, 54, 63, 1},
{8, 54, 146, 166, 1},
{10, 63, 166, 310, 1},
},
{
{16, 24, 8, 12, 6},
{24, 61, 82, 48, 5},
{8, 82, 100, 6, 4},
{12, 48, 6, 62, 3},
},
};
}
void TearDown() override { ClientLibraryTestBase::TearDown(); }
Array3D<float> GetUnitMatrix3D(int32_t batch_dim, int32_t mat_dim) {
Array3D<float> result(batch_dim, mat_dim, mat_dim, 0.0);
for (int i = 0; i < batch_dim; ++i) {
for (int j = 0; j < mat_dim; ++j) {
result({i, j, j}) = 1.0;
}
}
return result;
}
XlaOp ComputeMatmulUDVT(SVDResult result, XlaBuilder* builder) {
Shape u_shape = builder->GetShape(result.u).value();
Shape v_shape = builder->GetShape(result.v).value();
int64_t m = ShapeUtil::GetDimension(u_shape, -1);
int64_t n = ShapeUtil::GetDimension(v_shape, -1);
auto v = result.v;
auto u = result.u;
auto d = result.d;
if (m > n) {
u = SliceInMinorDims(u, {0, 0}, {m, n});
} else if (m < n) {
v = SliceInMinorDims(v, {0, 0}, {n, m});
}
int num_dims = u_shape.rank();
std::vector<int64_t> broadcast_dims(num_dims - 1);
std::iota(broadcast_dims.begin(), broadcast_dims.end(), 0);
broadcast_dims[num_dims - 2] = num_dims - 1;
return BatchDot(Mul(u, d, broadcast_dims), TransposeInMinorDims(v),
PrecisionConfig::HIGHEST);
}
XlaOp GetAverageAbsoluteError(XlaOp m1, XlaOp m2, XlaBuilder* builder) {
Shape shape = builder->GetShape(m1).value();
int64_t size = 1;
for (auto d : shape.dimensions()) {
size *= d;
}
return ReduceAll(Abs(m1 - m2), ConstantR0WithType(builder, F32, 0),
CreateScalarAddComputation(F32, builder)) /
ConstantR0WithType(builder, F32, size);
}
Array2D<float> GenerateRandomMatrix(int xsize, int ysize) {
Array2D<float> result{xsize, ysize, 0.0};
result.FillRandom(10 , 2 );
return result;
}
Array3D<float> batch_3d_4x5_;
};
XLA_TEST_F(SVDTest, Simple2D) {
XlaBuilder builder(TestName());
Array2D<float> simple_2d_4x4_ = Array2D<float>{
{4, 6, 8, 10},
{6, 45, 54, 63},
{8, 54, 146, 166},
{10, 63, 166, 310},
};
XlaOp a;
auto a_data = CreateR2Parameter<float>(simple_2d_4x4_, 0, "a", &builder, &a);
auto result = SVD(a, 100, 1e-6);
ComputeMatmulUDVT(result, &builder);
ComputeAndCompareR2<float>(&builder, simple_2d_4x4_, {a_data.get()},
ErrorSpec(1e-3, 1e-3));
}
XLA_TEST_F(SVDTest, Test_VWVt_EQ_A_2x4x5) {
XlaBuilder builder(TestName());
XlaOp a;
auto a_data = CreateR3Parameter<float>(batch_3d_4x5_, 0, "a", &builder, &a);
auto result = SVD(a, 100, 1e-8);
ComputeMatmulUDVT(result, &builder);
ComputeAndCompareR3<float>(&builder, batch_3d_4x5_, {a_data.get()},
ErrorSpec(1e-3, 1e-3));
}
XLA_TEST_F(SVDTest, Test_Orthogonality_U) {
XlaBuilder builder(TestName());
XlaOp a;
auto a_data = CreateR3Parameter<float>(batch_3d_4x5_, 0, "a", &builder, &a);
auto result = SVD(a, 100, 1e-8);
ComputeMatmulUDVT(result, &builder);
BatchDot(result.u, TransposeInMinorDims(result.u));
ComputeAndCompareR3<float>(&builder, GetUnitMatrix3D(2, 4), {a_data.get()},
ErrorSpec(1e-2, 1e-2));
}
XLA_TEST_F(SVDTest, Test_Orthogonality_V) {
XlaBuilder builder(TestName());
XlaOp a;
auto a_data = CreateR3Parameter<float>(batch_3d_4x5_, 0, "a", &builder, &a);
auto result = SVD(a, 100, 1e-8);
BatchDot(result.v, TransposeInMinorDims(result.v), PrecisionConfig::HIGHEST);
ComputeAndCompareR3<float>(&builder, GetUnitMatrix3D(2, 5), {a_data.get()},
ErrorSpec(1e-3, 1e-3));
}
XLA_TEST_F(SVDTest, TestSingleValuesMatchNumpy) {
XlaBuilder builder(TestName());
auto singular_values = Array2D<float>{
{431.05153007, 49.88334164, 20.94464584, 3.24845468},
{179.73128591, 68.05162245, 21.77679503, 13.94319712},
};
XlaOp a;
auto a_data = CreateR3Parameter<float>(batch_3d_4x5_, 0, "a", &builder, &a);
auto result = SVD(a, 100, 1e-8);
Add(result.d, ZerosLike(result.d));
ComputeAndCompareR2<float>(&builder, singular_values, {a_data.get()},
ErrorSpec(1e-3, 1e-3));
}
XLA_TEST_F(SVDTest,
DISABLED_ON_INTERPRETER(Various_Size_Random_Matrix_512x128)) {
XlaBuilder builder(TestName());
Array2D<float> a_val = GenerateRandomMatrix(512, 128);
XlaOp a;
auto a_data = CreateR2Parameter<float>(a_val, 0, "a", &builder, &a);
auto result = SVD(a, 100, 1e-4);
GetAverageAbsoluteError(ComputeMatmulUDVT(result, &builder), a, &builder);
ComputeAndCompareR0<float>(&builder, 1e-3, {a_data.get()},
ErrorSpec(1e-3, 1e-3));
}
XLA_TEST_F(SVDTest, Various_Size_Random_Matrix_128x256) {
XlaBuilder builder(TestName());
Array2D<float> a_val = GenerateRandomMatrix(128, 256);
XlaOp a;
auto a_data = CreateR2Parameter<float>(a_val, 0, "a", &builder, &a);
auto result = SVD(a, 100, 1e-4);
GetAverageAbsoluteError(ComputeMatmulUDVT(result, &builder), a, &builder);
ComputeAndCompareR0<float>(&builder, 1e-3, {a_data.get()},
ErrorSpec(1e-3, 1e-3));
}
XLA_TEST_F(SVDTest, Various_Size_Random_Matrix_256x128) {
XlaBuilder builder(TestName());
Array2D<float> a_val = GenerateRandomMatrix(256, 128);
XlaOp a;
auto a_data = CreateR2Parameter<float>(a_val, 0, "a", &builder, &a);
auto result = SVD(a, 100, 1e-4);
GetAverageAbsoluteError(ComputeMatmulUDVT(result, &builder), a, &builder);
ComputeAndCompareR0<float>(&builder, 1e-3, {a_data.get()},
ErrorSpec(1e-3, 1e-3));
}
XLA_TEST_F(SVDTest,
DISABLED_ON_INTERPRETER(Various_Size_Random_Matrix_128x512)) {
XlaBuilder builder(TestName());
Array2D<float> a_val = GenerateRandomMatrix(128, 512);
XlaOp a;
auto a_data = CreateR2Parameter<float>(a_val, 0, "a", &builder, &a);
auto result = SVD(a, 100, 1e-4);
GetAverageAbsoluteError(ComputeMatmulUDVT(result, &builder), a, &builder);
ComputeAndCompareR0<float>(&builder, 1e-3, {a_data.get()},
ErrorSpec(1e-3, 1e-3));
}
XLA_TEST_F(SVDTest, DISABLED_ON_CPU(DISABLED_ON_INTERPRETER(
Various_Size_Random_Matrix_512x256))) {
XlaBuilder builder(TestName());
Array2D<float> a_val = GenerateRandomMatrix(512, 256);
XlaOp a;
auto a_data = CreateR2Parameter<float>(a_val, 0, "a", &builder, &a);
auto result = SVD(a, 100, 1e-4);
GetAverageAbsoluteError(ComputeMatmulUDVT(result, &builder), a, &builder);
ComputeAndCompareR0<float>(&builder, 1e-3, {a_data.get()},
ErrorSpec(1e-3, 1e-3));
}
XLA_TEST_F(SVDTest, DISABLED_ON_GPU(DISABLED_ON_CPU(DISABLED_ON_INTERPRETER(
Various_Size_Random_Matrix_512x512)))) {
XlaBuilder builder(TestName());
Array2D<float> a_val = GenerateRandomMatrix(512, 512);
XlaOp a;
auto a_data = CreateR2Parameter<float>(a_val, 0, "a", &builder, &a);
auto result = SVD(a, 100, 1e-4);
GetAverageAbsoluteError(ComputeMatmulUDVT(result, &builder), a, &builder);
ComputeAndCompareR0<float>(&builder, 1e-3, {a_data.get()},
ErrorSpec(1e-3, 1e-3));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/builder/lib/svd.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/builder/lib/svd_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
716754ad-2df8-41c0-9550-49ab9b1acfa3 | cpp | google/quiche | quic_header_list | quiche/quic/core/http/quic_header_list.cc | quiche/quic/core/http/quic_header_list_test.cc | #include "quiche/quic/core/http/quic_header_list.h"
#include <limits>
#include <string>
#include "absl/strings/string_view.h"
#include "quiche/quic/core/qpack/qpack_header_table.h"
#include "quiche/quic/core/quic_packets.h"
#include "quiche/quic/platform/api/quic_flags.h"
namespace quic {
void QuicHeaderList::OnHeader(absl::string_view name, absl::string_view value) {
header_list_.emplace_back(std::string(name), std::string(value));
}
void QuicHeaderList::OnHeaderBlockEnd(size_t uncompressed_header_bytes,
size_t compressed_header_bytes) {
uncompressed_header_bytes_ = uncompressed_header_bytes;
compressed_header_bytes_ = compressed_header_bytes;
}
void QuicHeaderList::Clear() {
header_list_.clear();
uncompressed_header_bytes_ = 0;
compressed_header_bytes_ = 0;
}
std::string QuicHeaderList::DebugString() const {
std::string s = "{ ";
for (const auto& p : *this) {
s.append(p.first + "=" + p.second + ", ");
}
s.append("}");
return s;
}
} | #include "quiche/quic/core/http/quic_header_list.h"
#include <string>
#include "quiche/quic/platform/api/quic_flags.h"
#include "quiche/quic/platform/api/quic_test.h"
using ::testing::ElementsAre;
using ::testing::Pair;
namespace quic::test {
class QuicHeaderListTest : public QuicTest {};
TEST_F(QuicHeaderListTest, OnHeader) {
QuicHeaderList headers;
headers.OnHeader("foo", "bar");
headers.OnHeader("april", "fools");
headers.OnHeader("beep", "");
EXPECT_THAT(headers, ElementsAre(Pair("foo", "bar"), Pair("april", "fools"),
Pair("beep", "")));
}
TEST_F(QuicHeaderListTest, DebugString) {
QuicHeaderList headers;
headers.OnHeader("foo", "bar");
headers.OnHeader("april", "fools");
headers.OnHeader("beep", "");
EXPECT_EQ("{ foo=bar, april=fools, beep=, }", headers.DebugString());
}
TEST_F(QuicHeaderListTest, IsCopyableAndAssignable) {
QuicHeaderList headers;
headers.OnHeader("foo", "bar");
headers.OnHeader("april", "fools");
headers.OnHeader("beep", "");
QuicHeaderList headers2(headers);
QuicHeaderList headers3 = headers;
EXPECT_THAT(headers2, ElementsAre(Pair("foo", "bar"), Pair("april", "fools"),
Pair("beep", "")));
EXPECT_THAT(headers3, ElementsAre(Pair("foo", "bar"), Pair("april", "fools"),
Pair("beep", "")));
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/http/quic_header_list.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/http/quic_header_list_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
f811e098-b9eb-4721-a5ef-a33f172b9062 | cpp | tensorflow/tensorflow | basic_rnn | tensorflow/lite/kernels/basic_rnn.cc | tensorflow/lite/kernels/basic_rnn_test.cc | #include <cstddef>
#include <cstdint>
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/kernel_utils.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace rnn {
namespace {
struct OpData {
int scratch_tensor_index;
bool compute_row_sums = false;
};
}
constexpr int kInputTensor = 0;
constexpr int kWeightsTensor = 1;
constexpr int kRecurrentWeightsTensor = 2;
constexpr int kBiasTensor = 3;
constexpr int kHiddenStateTensor = 4;
constexpr int kOutputTensor = 0;
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
auto* op_data = new OpData();
context->AddTensors(context, 6,
&op_data->scratch_tensor_index);
return op_data;
}
void Free(TfLiteContext* context, void* buffer) {
delete reinterpret_cast<OpData*>(buffer);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, node->inputs->size, 5);
TF_LITE_ENSURE_EQ(context, node->outputs->size, 1);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
const TfLiteTensor* input_weights;
TF_LITE_ENSURE_OK(
context, GetInputSafe(context, node, kWeightsTensor, &input_weights));
const TfLiteTensor* recurrent_weights;
TF_LITE_ENSURE_OK(
context,
GetInputSafe(context, node, kRecurrentWeightsTensor, &recurrent_weights));
const TfLiteTensor* bias;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kBiasTensor, &bias));
const TfLiteTensor* hidden_state;
TF_LITE_ENSURE_OK(
context, GetInputSafe(context, node, kHiddenStateTensor, &hidden_state));
const int batch_size = input->dims->data[0];
const int num_units = input_weights->dims->data[0];
TF_LITE_ENSURE_EQ(context, input->dims->data[1],
input_weights->dims->data[1]);
TF_LITE_ENSURE_EQ(context, input_weights->dims->data[0], bias->dims->data[0]);
TF_LITE_ENSURE_EQ(context, recurrent_weights->dims->data[0],
bias->dims->data[0]);
TF_LITE_ENSURE_EQ(context, recurrent_weights->dims->data[1],
bias->dims->data[0]);
TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteFloat32);
TF_LITE_ENSURE_TYPES_EQ(context, input_weights->type,
recurrent_weights->type);
TF_LITE_ENSURE_EQ(context, NumDimensions(hidden_state), 2);
TF_LITE_ENSURE_EQ(context, hidden_state->dims->data[0], batch_size);
TF_LITE_ENSURE_EQ(context, hidden_state->dims->data[1], num_units);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
TfLiteIntArray* output_size_array = TfLiteIntArrayCreate(2);
output_size_array->data[0] = batch_size;
output_size_array->data[1] = num_units;
TF_LITE_ENSURE_OK(context,
context->ResizeTensor(context, output, output_size_array));
const bool is_hybrid = IsHybridOp(input, input_weights);
if (is_hybrid) {
auto* op_data = reinterpret_cast<OpData*>(node->user_data);
op_data->compute_row_sums = true;
TfLiteIntArrayFree(node->temporaries);
node->temporaries = TfLiteIntArrayCreate(6);
node->temporaries->data[0] = op_data->scratch_tensor_index;
TfLiteTensor* input_quantized;
TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, 0,
&input_quantized));
input_quantized->type = input_weights->type;
input_quantized->allocation_type = kTfLiteArenaRw;
if (!TfLiteIntArrayEqual(input_quantized->dims, input->dims)) {
TfLiteIntArray* input_quantized_size = TfLiteIntArrayCopy(input->dims);
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, input_quantized,
input_quantized_size));
}
node->temporaries->data[1] = op_data->scratch_tensor_index + 1;
TfLiteTensor* hidden_state_quantized;
TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, 1,
&hidden_state_quantized));
hidden_state_quantized->type = input_weights->type;
hidden_state_quantized->allocation_type = kTfLiteArenaRw;
if (!TfLiteIntArrayEqual(hidden_state_quantized->dims,
hidden_state->dims)) {
TfLiteIntArray* hidden_state_quantized_size =
TfLiteIntArrayCopy(hidden_state->dims);
TF_LITE_ENSURE_OK(context,
context->ResizeTensor(context, hidden_state_quantized,
hidden_state_quantized_size));
}
node->temporaries->data[2] = op_data->scratch_tensor_index + 2;
TfLiteTensor* scaling_factors;
TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, 2,
&scaling_factors));
scaling_factors->type = kTfLiteFloat32;
scaling_factors->allocation_type = kTfLiteArenaRw;
int scaling_dims[1] = {batch_size};
if (!TfLiteIntArrayEqualsArray(scaling_factors->dims, 1, scaling_dims)) {
TfLiteIntArray* scaling_factors_size = TfLiteIntArrayCreate(1);
scaling_factors_size->data[0] = batch_size;
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, scaling_factors,
scaling_factors_size));
}
node->temporaries->data[3] = op_data->scratch_tensor_index + 3;
TfLiteTensor* accum_scratch;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, 3, &accum_scratch));
accum_scratch->type = kTfLiteInt32;
accum_scratch->allocation_type = kTfLiteArenaRw;
int accum_scratch_dims[2] = {num_units, batch_size};
if (!TfLiteIntArrayEqualsArray(accum_scratch->dims, 2,
accum_scratch_dims)) {
TfLiteIntArray* accum_scratch_size = TfLiteIntArrayCreate(2);
accum_scratch_size->data[0] = accum_scratch_dims[0];
accum_scratch_size->data[1] = accum_scratch_dims[1];
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, accum_scratch,
accum_scratch_size));
}
node->temporaries->data[4] = op_data->scratch_tensor_index + 4;
TfLiteTensor* zero_points;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, 4, &zero_points));
zero_points->type = kTfLiteInt32;
zero_points->allocation_type = kTfLiteArenaRw;
int zero_points_dims[1] = {batch_size};
if (!TfLiteIntArrayEqualsArray(zero_points->dims, 1, zero_points_dims)) {
TfLiteIntArray* zero_points_size = TfLiteIntArrayCreate(1);
zero_points_size->data[0] = batch_size;
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, zero_points,
zero_points_size));
}
node->temporaries->data[5] = op_data->scratch_tensor_index + 5;
TfLiteTensor* row_sums;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, 5, &row_sums));
row_sums->type = kTfLiteInt32;
row_sums->name = "Rnn_row_sums";
row_sums->allocation_type = kTfLiteArenaRwPersistent;
int row_sums_dims[2] = {2, num_units};
if (!TfLiteIntArrayEqualsArray(row_sums->dims, 2, row_sums_dims)) {
TfLiteIntArray* row_sums_size = TfLiteIntArrayCreate(2);
row_sums_size->data[0] = row_sums_dims[0];
row_sums_size->data[1] = row_sums_dims[1];
TF_LITE_ENSURE_OK(
context, context->ResizeTensor(context, row_sums, row_sums_size));
}
}
return kTfLiteOk;
}
TfLiteStatus EvalFloat(const TfLiteTensor* input,
const TfLiteTensor* input_weights,
const TfLiteTensor* recurrent_weights,
const TfLiteTensor* bias, const TfLiteRNNParams* params,
TfLiteTensor* hidden_state, TfLiteTensor* output) {
const int batch_size = input->dims->data[0];
const int num_units = input_weights->dims->data[0];
const int input_size = input->dims->data[1];
const int output_batch_leading_dim =
output->dims->data[output->dims->size - 1];
float* hidden_state_ptr_batch = GetTensorData<float>(hidden_state);
const float* input_ptr_batch = GetTensorData<float>(input);
float* output_ptr_batch = GetTensorData<float>(output);
const float* input_weights_ptr = GetTensorData<float>(input_weights);
const float* recurrent_weights_ptr = GetTensorData<float>(recurrent_weights);
const float* bias_ptr = GetTensorData<float>(bias);
kernel_utils::RnnBatchStep(
input_ptr_batch, input_weights_ptr, recurrent_weights_ptr, bias_ptr,
input_size, num_units, batch_size, output_batch_leading_dim,
params->activation, hidden_state_ptr_batch, output_ptr_batch);
return kTfLiteOk;
}
TfLiteStatus EvalHybrid(const TfLiteTensor* input,
const TfLiteTensor* input_weights,
const TfLiteTensor* recurrent_weights,
const TfLiteTensor* bias, const TfLiteRNNParams* params,
TfLiteTensor* input_scratch,
TfLiteTensor* hidden_state_scratch,
TfLiteTensor* scaling_factors,
TfLiteTensor* hidden_state, TfLiteTensor* output,
TfLiteTensor* zero_points, TfLiteTensor* accum_scratch,
TfLiteTensor* row_sums, bool* compute_row_sums) {
const int batch_size = input->dims->data[0];
const int num_units = input_weights->dims->data[0];
const int input_size = input->dims->data[1];
const int output_batch_leading_dim =
output->dims->data[output->dims->size - 1];
float* hidden_state_ptr_batch = GetTensorData<float>(hidden_state);
const float* input_ptr_batch = GetTensorData<float>(input);
float* output_ptr_batch = GetTensorData<float>(output);
const int8_t* input_weights_ptr = GetTensorData<int8_t>(input_weights);
const int8_t* recurrent_weights_ptr =
GetTensorData<int8_t>(recurrent_weights);
const float* bias_ptr = GetTensorData<float>(bias);
float input_weights_scale = input_weights->params.scale;
float recurrent_weights_scale = recurrent_weights->params.scale;
int8_t* quantized_input_ptr = GetTensorData<int8_t>(input_scratch);
int8_t* quantized_hidden_state_ptr =
GetTensorData<int8_t>(hidden_state_scratch);
float* scaling_factors_ptr = GetTensorData<float>(scaling_factors);
int32_t* accum_scratch_ptr = GetTensorData<int32_t>(accum_scratch);
int32_t* zero_points_ptr = nullptr;
int32_t* row_sums_ptr = nullptr;
if (params->asymmetric_quantize_inputs) {
zero_points_ptr = GetTensorData<int32_t>(zero_points);
row_sums_ptr = GetTensorData<int32_t>(row_sums);
}
kernel_utils::RnnBatchStep(
input_ptr_batch, input_weights_ptr, input_weights_scale,
recurrent_weights_ptr, recurrent_weights_scale, bias_ptr, input_size,
num_units, batch_size, output_batch_leading_dim, params->activation,
quantized_input_ptr, quantized_hidden_state_ptr, scaling_factors_ptr,
hidden_state_ptr_batch, output_ptr_batch,
params->asymmetric_quantize_inputs, zero_points_ptr, accum_scratch_ptr,
row_sums_ptr, compute_row_sums);
return kTfLiteOk;
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
auto* params = reinterpret_cast<TfLiteRNNParams*>(node->builtin_data);
auto* op_data = reinterpret_cast<OpData*>(node->user_data);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
const TfLiteTensor* input_weights;
TF_LITE_ENSURE_OK(
context, GetInputSafe(context, node, kWeightsTensor, &input_weights));
const TfLiteTensor* recurrent_weights;
TF_LITE_ENSURE_OK(
context,
GetInputSafe(context, node, kRecurrentWeightsTensor, &recurrent_weights));
const TfLiteTensor* bias;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kBiasTensor, &bias));
TfLiteTensor* hidden_state =
GetVariableInput(context, node, kHiddenStateTensor);
TF_LITE_ENSURE(context, hidden_state != nullptr);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
switch (input_weights->type) {
case kTfLiteFloat32:
return EvalFloat(input, input_weights, recurrent_weights, bias, params,
hidden_state, output);
case kTfLiteUInt8:
case kTfLiteInt8: {
TfLiteTensor* input_quantized;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, 0, &input_quantized));
TfLiteTensor* hidden_state_quantized;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, 1, &hidden_state_quantized));
TfLiteTensor* scaling_factors;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, 2, &scaling_factors));
TfLiteTensor* accum_scratch;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, 3, &accum_scratch));
TfLiteTensor* zero_points;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, 4, &zero_points));
TfLiteTensor* row_sums;
TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, 5, &row_sums));
return EvalHybrid(input, input_weights, recurrent_weights, bias, params,
input_quantized, hidden_state_quantized,
scaling_factors, hidden_state, output, zero_points,
accum_scratch, row_sums, &op_data->compute_row_sums);
}
default:
TF_LITE_KERNEL_LOG(context, "Type %s not currently supported.",
TfLiteTypeGetName(input_weights->type));
return kTfLiteError;
}
}
}
TfLiteRegistration* Register_RNN() {
static TfLiteRegistration r = {rnn::Init, rnn::Free, rnn::Prepare, rnn::Eval};
return &r;
}
}
}
} | #include <initializer_list>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "flatbuffers/flatbuffers.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using ::testing::ElementsAreArray;
static float rnn_input[] = {
0.23689353, 0.285385, 0.037029743, -0.19858193, -0.27569133,
0.43773448, 0.60379338, 0.35562468, -0.69424844, -0.93421471,
-0.87287879, 0.37144363, -0.62476718, 0.23791671, 0.40060222,
0.1356622, -0.99774903, -0.98858172, -0.38952237, -0.47685933,
0.31073618, 0.71511042, -0.63767755, -0.31729108, 0.33468103,
0.75801885, 0.30660987, -0.37354088, 0.77002847, -0.62747043,
-0.68572164, 0.0069220066, 0.65791464, 0.35130811, 0.80834007,
-0.61777675, -0.21095741, 0.41213346, 0.73784804, 0.094794154,
0.47791874, 0.86496925, -0.53376222, 0.85315156, 0.10288584,
0.86684, -0.011186242, 0.10513687, 0.87825835, 0.59929144,
0.62827742, 0.18899453, 0.31440187, 0.99059987, 0.87170351,
-0.35091716, 0.74861872, 0.17831337, 0.2755419, 0.51864719,
0.55084288, 0.58982027, -0.47443086, 0.20875752, -0.058871567,
-0.66609079, 0.59098077, 0.73017097, 0.74604273, 0.32882881,
-0.17503482, 0.22396147, 0.19379807, 0.29120302, 0.077113032,
-0.70331609, 0.15804303, -0.93407321, 0.40182066, 0.036301374,
0.66521823, 0.0300982, -0.7747041, -0.02038002, 0.020698071,
-0.90300065, 0.62870288, -0.23068321, 0.27531278, -0.095755219,
-0.712036, -0.17384434, -0.50593495, -0.18646687, -0.96508682,
0.43519354, 0.14744234, 0.62589407, 0.1653645, -0.10651493,
-0.045277178, 0.99032974, -0.88255352, -0.85147917, 0.28153265,
0.19455957, -0.55479527, -0.56042433, 0.26048636, 0.84702539,
0.47587705, -0.074295521, -0.12287641, 0.70117295, 0.90532446,
0.89782166, 0.79817224, 0.53402734, -0.33286154, 0.073485017,
-0.56172788, -0.044897556, 0.89964068, -0.067662835, 0.76863563,
0.93455386, -0.6324693, -0.083922029};
static float rnn_golden_output[] = {
0.496726, 0, 0.965996, 0, 0.0584254, 0,
0, 0.12315, 0, 0, 0.612266, 0.456601,
0, 0.52286, 1.16099, 0.0291232,
0, 0, 0.524901, 0, 0, 0,
0, 1.02116, 0, 1.35762, 0, 0.356909,
0.436415, 0.0355727, 0, 0,
0, 0, 0, 0.262335, 0, 0,
0, 1.33992, 0, 2.9739, 0, 0,
1.31914, 2.66147, 0, 0,
0.942568, 0, 0, 0, 0.025507, 0,
0, 0, 0.321429, 0.569141, 1.25274, 1.57719,
0.8158, 1.21805, 0.586239, 0.25427,
1.04436, 0, 0.630725, 0, 0.133801, 0.210693,
0.363026, 0, 0.533426, 0, 1.25926, 0.722707,
0, 1.22031, 1.30117, 0.495867,
0.222187, 0, 0.72725, 0, 0.767003, 0,
0, 0.147835, 0, 0, 0, 0.608758,
0.469394, 0.00720298, 0.927537, 0,
0.856974, 0.424257, 0, 0, 0.937329, 0,
0, 0, 0.476425, 0, 0.566017, 0.418462,
0.141911, 0.996214, 1.13063, 0,
0.967899, 0, 0, 0, 0.0831304, 0,
0, 1.00378, 0, 0, 0, 1.44818,
1.01768, 0.943891, 0.502745, 0,
0.940135, 0, 0, 0, 0, 0,
0, 2.13243, 0, 0.71208, 0.123918, 1.53907,
1.30225, 1.59644, 0.70222, 0,
0.804329, 0, 0.430576, 0, 0.505872, 0.509603,
0.343448, 0, 0.107756, 0.614544, 1.44549, 1.52311,
0.0454298, 0.300267, 0.562784, 0.395095,
0.228154, 0, 0.675323, 0, 1.70536, 0.766217,
0, 0, 0, 0.735363, 0.0759267, 1.91017,
0.941888, 0, 0, 0,
0, 0, 1.5909, 0, 0, 0,
0, 0.5755, 0, 0.184687, 0, 1.56296,
0.625285, 0, 0, 0,
0, 0, 0.0857888, 0, 0, 0,
0, 0.488383, 0.252786, 0, 0, 0,
1.02817, 1.85665, 0, 0,
0.00981836, 0, 1.06371, 0, 0, 0,
0, 0, 0, 0.290445, 0.316406, 0,
0.304161, 1.25079, 0.0707152, 0,
0.986264, 0.309201, 0, 0, 0, 0,
0, 1.64896, 0.346248, 0, 0.918175, 0.78884,
0.524981, 1.92076, 2.07013, 0.333244,
0.415153, 0.210318, 0, 0, 0, 0,
0, 2.02616, 0, 0.728256, 0.84183, 0.0907453,
0.628881, 3.58099, 1.49974, 0};
static std::initializer_list<float> rnn_weights = {
0.461459, 0.153381, 0.529743, -0.00371218, 0.676267, -0.211346,
0.317493, 0.969689, -0.343251, 0.186423, 0.398151, 0.152399,
0.448504, 0.317662, 0.523556, -0.323514, 0.480877, 0.333113,
-0.757714, -0.674487, -0.643585, 0.217766, -0.0251462, 0.79512,
-0.595574, -0.422444, 0.371572, -0.452178, -0.556069, -0.482188,
-0.685456, -0.727851, 0.841829, 0.551535, -0.232336, 0.729158,
-0.00294906, -0.69754, 0.766073, -0.178424, 0.369513, -0.423241,
0.548547, -0.0152023, -0.757482, -0.85491, 0.251331, -0.989183,
0.306261, -0.340716, 0.886103, -0.0726757, -0.723523, -0.784303,
0.0354295, 0.566564, -0.485469, -0.620498, 0.832546, 0.697884,
-0.279115, 0.294415, -0.584313, 0.548772, 0.0648819, 0.968726,
0.723834, -0.0080452, -0.350386, -0.272803, 0.115121, -0.412644,
-0.824713, -0.992843, -0.592904, -0.417893, 0.863791, -0.423461,
-0.147601, -0.770664, -0.479006, 0.654782, 0.587314, -0.639158,
0.816969, -0.337228, 0.659878, 0.73107, 0.754768, -0.337042,
0.0960841, 0.368357, 0.244191, -0.817703, -0.211223, 0.442012,
0.37225, -0.623598, -0.405423, 0.455101, 0.673656, -0.145345,
-0.511346, -0.901675, -0.81252, -0.127006, 0.809865, -0.721884,
0.636255, 0.868989, -0.347973, -0.10179, -0.777449, 0.917274,
0.819286, 0.206218, -0.00785118, 0.167141, 0.45872, 0.972934,
-0.276798, 0.837861, 0.747958, -0.0151566, -0.330057, -0.469077,
0.277308, 0.415818};
static std::initializer_list<float> rnn_recurrent_weights = {
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1};
static std::initializer_list<float> rnn_bias = {
0.065691948, -0.69055247, 0.1107955, -0.97084129, -0.23957068, -0.23566568,
-0.389184, 0.47481549, -0.4791103, 0.29931796, 0.10463274, 0.83918178,
0.37197268, 0.61957061, 0.3956964, -0.37609905};
class RNNOpModel : public SingleOpModel {
public:
RNNOpModel(int batches, int units, int size,
const TensorType& weights = TensorType_FLOAT32,
const TensorType& recurrent_weights = TensorType_FLOAT32,
bool asymmetric_quantize_inputs = false)
: batches_(batches), units_(units), input_size_(size) {
input_ = AddInput(TensorType_FLOAT32);
weights_ = AddInput(weights);
recurrent_weights_ = AddInput(recurrent_weights);
bias_ = AddInput(TensorType_FLOAT32);
hidden_state_ = AddVariableInput(TensorType_FLOAT32);
output_ = AddOutput(TensorType_FLOAT32);
SetBuiltinOp(BuiltinOperator_RNN, BuiltinOptions_RNNOptions,
CreateRNNOptions(builder_, ActivationFunctionType_RELU,
asymmetric_quantize_inputs)
.Union());
BuildInterpreter({{batches_, input_size_},
{units_, input_size_},
{units_, units_},
{units_},
{batches_, units_}});
}
void SetBias(std::initializer_list<float> f) { PopulateTensor(bias_, f); }
void SetWeights(std::initializer_list<float> f) {
PopulateTensor(weights_, f);
}
void SetRecurrentWeights(std::initializer_list<float> f) {
PopulateTensor(recurrent_weights_, f);
}
void SetInput(std::initializer_list<float> data) {
PopulateTensor(input_, data);
}
void SetInput(int offset, float* begin, float* end) {
PopulateTensor(input_, offset, begin, end);
}
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
int input_size() { return input_size_; }
int num_units() { return units_; }
int num_batches() { return batches_; }
protected:
int input_;
int weights_;
int recurrent_weights_;
int bias_;
int hidden_state_;
int output_;
int batches_;
int units_;
int input_size_;
};
class HybridRNNOpModel : public RNNOpModel {
public:
HybridRNNOpModel(int batches, int units, int size, TensorType tensor_type,
bool asymmetric_quantize_inputs)
: RNNOpModel(batches, units, size, tensor_type, tensor_type,
asymmetric_quantize_inputs) {
tensor_type_ = tensor_type;
}
TensorType tensor_type_;
void SetWeights(int weights_idx, const std::vector<float>& f) {
if (tensor_type_ == TensorType_UINT8) {
SymmetricQuantizeAndPopulate(weights_idx, f);
} else {
SignedSymmetricQuantizeAndPopulate(weights_idx, f);
}
}
void SetWeights(std::initializer_list<float> f) { SetWeights(weights_, f); }
void SetRecurrentWeights(std::initializer_list<float> f) {
SetWeights(recurrent_weights_, f);
}
};
TEST(RnnOpTest, BlackBoxTest) {
RNNOpModel rnn(2, 16, 8);
rnn.SetWeights(rnn_weights);
rnn.SetBias(rnn_bias);
rnn.SetRecurrentWeights(rnn_recurrent_weights);
const int input_sequence_size = sizeof(rnn_input) / sizeof(float) /
(rnn.input_size() * rnn.num_batches());
for (int i = 0; i < input_sequence_size; i++) {
float* batch_start = rnn_input + i * rnn.input_size();
float* batch_end = batch_start + rnn.input_size();
rnn.SetInput(0, batch_start, batch_end);
rnn.SetInput(rnn.input_size(), batch_start, batch_end);
ASSERT_EQ(rnn.Invoke(), kTfLiteOk);
float* golden_start = rnn_golden_output + i * rnn.num_units();
float* golden_end = golden_start + rnn.num_units();
std::vector<float> expected;
expected.insert(expected.end(), golden_start, golden_end);
expected.insert(expected.end(), golden_start, golden_end);
EXPECT_THAT(rnn.GetOutput(), ElementsAreArray(ArrayFloatNear(expected)));
}
}
class HybridRnnOpTest : public ::testing::TestWithParam<bool> {};
TEST_P(HybridRnnOpTest, BlackBoxTestUint8) {
HybridRNNOpModel rnn(2, 16, 8, TensorType_UINT8, GetParam());
rnn.SetWeights(rnn_weights);
rnn.SetBias(rnn_bias);
rnn.SetRecurrentWeights(rnn_recurrent_weights);
const int input_sequence_size = sizeof(rnn_input) / sizeof(float) /
(rnn.input_size() * rnn.num_batches());
for (int i = 0; i < input_sequence_size; i++) {
float* batch_start = rnn_input + i * rnn.input_size();
float* batch_end = batch_start + rnn.input_size();
rnn.SetInput(0, batch_start, batch_end);
rnn.SetInput(rnn.input_size(), batch_start, batch_end);
ASSERT_EQ(rnn.Invoke(), kTfLiteOk);
float* golden_start = rnn_golden_output + i * rnn.num_units();
float* golden_end = golden_start + rnn.num_units();
std::vector<float> expected;
expected.insert(expected.end(), golden_start, golden_end);
expected.insert(expected.end(), golden_start, golden_end);
EXPECT_THAT(rnn.GetOutput(), ElementsAreArray(ArrayFloatNear(
expected, 0.0104)));
}
}
TEST_P(HybridRnnOpTest, BlackBoxTestInt8) {
HybridRNNOpModel rnn(2, 16, 8, TensorType_INT8, GetParam());
rnn.SetWeights(rnn_weights);
rnn.SetBias(rnn_bias);
rnn.SetRecurrentWeights(rnn_recurrent_weights);
const int input_sequence_size = sizeof(rnn_input) / sizeof(float) /
(rnn.input_size() * rnn.num_batches());
for (int i = 0; i < input_sequence_size; i++) {
float* batch_start = rnn_input + i * rnn.input_size();
float* batch_end = batch_start + rnn.input_size();
rnn.SetInput(0, batch_start, batch_end);
rnn.SetInput(rnn.input_size(), batch_start, batch_end);
ASSERT_EQ(rnn.Invoke(), kTfLiteOk);
float* golden_start = rnn_golden_output + i * rnn.num_units();
float* golden_end = golden_start + rnn.num_units();
std::vector<float> expected;
expected.insert(expected.end(), golden_start, golden_end);
expected.insert(expected.end(), golden_start, golden_end);
EXPECT_THAT(rnn.GetOutput(), ElementsAreArray(ArrayFloatNear(
expected, 0.0104)));
}
}
INSTANTIATE_TEST_SUITE_P(HybridRnnOpTest, HybridRnnOpTest,
::testing::ValuesIn({false, true}));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/basic_rnn.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/basic_rnn_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ae917258-0a9b-4bd7-b025-c4d9b3ac9c98 | cpp | tensorflow/tensorflow | ifrt_serving_core_selector | tensorflow/core/tfrt/ifrt/ifrt_serving_core_selector.cc | tensorflow/core/tfrt/ifrt/ifrt_serving_core_selector_test.cc | #include "tensorflow/core/tfrt/ifrt/ifrt_serving_core_selector.h"
#include <cstdint>
#include "absl/strings/str_cat.h"
#include "absl/synchronization/mutex.h"
#include "xla/tsl/framework/serving_device_selector.h"
namespace tensorflow {
namespace ifrt_serving {
IfrtServingCoreSelector::IfrtServingCoreSelector(
tsl::ServingDeviceSelector* device_selector, int num_cores)
: device_selector_(device_selector), num_cores_(num_cores) {}
tsl::DeviceReservation IfrtServingCoreSelector::ReserveDevice(
int64_t program_id) {
absl::MutexLock lock(&mu_);
int64_t run_count = run_counter_[program_id]++;
if (run_count < num_cores_) {
return tsl::DeviceReservation(run_count, nullptr);
}
return device_selector_->ReserveDevice(absl::StrCat(program_id));
}
}
} | #include "tensorflow/core/tfrt/ifrt/ifrt_serving_core_selector.h"
#include <cstdint>
#include <memory>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/str_cat.h"
#include "xla/tsl/framework/serving_device_selector.h"
#include "xla/tsl/framework/test_util/mock_serving_device_selector.h"
namespace tensorflow {
namespace ifrt_serving {
namespace {
class IfrtServingCoreSelectorTest : public ::testing::Test {
protected:
explicit IfrtServingCoreSelectorTest() {
core_selector_ = std::make_unique<IfrtServingCoreSelector>(
&serving_device_selector_, num_cores_);
}
tsl::test_util::MockServingDeviceSelector serving_device_selector_;
std::unique_ptr<IfrtServingCoreSelector> core_selector_;
int num_cores_ = 2;
};
TEST_F(IfrtServingCoreSelectorTest, ReservedDevicesReturns) {
int64_t program_id1 = 111111;
EXPECT_CALL(serving_device_selector_,
ReserveDevice(absl::StrCat(program_id1)))
.WillOnce([this](::testing::Unused) {
return tsl::DeviceReservation(0, &serving_device_selector_);
});
for (int i = 0; i < num_cores_; ++i) {
EXPECT_THAT(core_selector_->ReserveDevice(program_id1).device_index(), i);
}
tsl::DeviceReservation reservation =
core_selector_->ReserveDevice(program_id1);
EXPECT_THAT(reservation.device_index(), 0);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/ifrt/ifrt_serving_core_selector.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/ifrt/ifrt_serving_core_selector_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
72404f3d-ecd2-4b7c-a0d4-46a6e8782029 | cpp | google/cel-cpp | proto_message_type_adapter | eval/public/structs/proto_message_type_adapter.cc | eval/public/structs/proto_message_type_adapter_test.cc | #include "eval/public/structs/proto_message_type_adapter.h"
#include <cstdint>
#include <limits>
#include <string>
#include <utility>
#include <vector>
#include "google/protobuf/util/message_differencer.h"
#include "absl/base/no_destructor.h"
#include "absl/log/absl_check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/strings/substitute.h"
#include "absl/types/optional.h"
#include "absl/types/span.h"
#include "base/attribute.h"
#include "common/memory.h"
#include "eval/public/cel_options.h"
#include "eval/public/cel_value.h"
#include "eval/public/containers/internal_field_backed_list_impl.h"
#include "eval/public/containers/internal_field_backed_map_impl.h"
#include "eval/public/message_wrapper.h"
#include "eval/public/structs/cel_proto_wrap_util.h"
#include "eval/public/structs/field_access_impl.h"
#include "eval/public/structs/legacy_type_adapter.h"
#include "eval/public/structs/legacy_type_info_apis.h"
#include "extensions/protobuf/internal/qualify.h"
#include "extensions/protobuf/memory_manager.h"
#include "internal/casts.h"
#include "internal/status_macros.h"
#include "google/protobuf/arena.h"
#include "google/protobuf/descriptor.h"
#include "google/protobuf/map_field.h"
#include "google/protobuf/message.h"
namespace google::api::expr::runtime {
namespace {
using ::cel::extensions::ProtoMemoryManagerArena;
using ::cel::extensions::ProtoMemoryManagerRef;
using ::google::protobuf::FieldDescriptor;
using ::google::protobuf::Message;
using ::google::protobuf::Reflection;
using LegacyQualifyResult = LegacyTypeAccessApis::LegacyQualifyResult;
const std::string& UnsupportedTypeName() {
static absl::NoDestructor<std::string> kUnsupportedTypeName(
"<unknown message>");
return *kUnsupportedTypeName;
}
CelValue MessageCelValueFactory(const google::protobuf::Message* message);
inline absl::StatusOr<const google::protobuf::Message*> UnwrapMessage(
const MessageWrapper& value, absl::string_view op) {
if (!value.HasFullProto() || value.message_ptr() == nullptr) {
return absl::InternalError(
absl::StrCat(op, " called on non-message type."));
}
return static_cast<const google::protobuf::Message*>(value.message_ptr());
}
inline absl::StatusOr<google::protobuf::Message*> UnwrapMessage(
const MessageWrapper::Builder& value, absl::string_view op) {
if (!value.HasFullProto() || value.message_ptr() == nullptr) {
return absl::InternalError(
absl::StrCat(op, " called on non-message type."));
}
return static_cast<google::protobuf::Message*>(value.message_ptr());
}
bool ProtoEquals(const google::protobuf::Message& m1, const google::protobuf::Message& m2) {
if (m1.GetDescriptor() != m2.GetDescriptor()) {
return false;
}
return google::protobuf::util::MessageDifferencer::Equals(m1, m2);
}
bool CelFieldIsPresent(const google::protobuf::Message* message,
const google::protobuf::FieldDescriptor* field_desc,
const google::protobuf::Reflection* reflection) {
if (field_desc->is_map()) {
return reflection->FieldSize(*message, field_desc) != 0;
}
if (field_desc->is_repeated()) {
return reflection->FieldSize(*message, field_desc) != 0;
}
return reflection->HasField(*message, field_desc);
}
absl::StatusOr<bool> HasFieldImpl(const google::protobuf::Message* message,
const google::protobuf::Descriptor* descriptor,
absl::string_view field_name) {
ABSL_ASSERT(descriptor == message->GetDescriptor());
const Reflection* reflection = message->GetReflection();
const FieldDescriptor* field_desc = descriptor->FindFieldByName(field_name);
if (field_desc == nullptr && reflection != nullptr) {
field_desc = reflection->FindKnownExtensionByName(field_name);
}
if (field_desc == nullptr) {
return absl::NotFoundError(absl::StrCat("no_such_field : ", field_name));
}
if (reflection == nullptr) {
return absl::FailedPreconditionError(
"google::protobuf::Reflection unavailble in CEL field access.");
}
return CelFieldIsPresent(message, field_desc, reflection);
}
absl::StatusOr<CelValue> CreateCelValueFromField(
const google::protobuf::Message* message, const google::protobuf::FieldDescriptor* field_desc,
ProtoWrapperTypeOptions unboxing_option, google::protobuf::Arena* arena) {
if (field_desc->is_map()) {
auto* map = google::protobuf::Arena::Create<internal::FieldBackedMapImpl>(
arena, message, field_desc, &MessageCelValueFactory, arena);
return CelValue::CreateMap(map);
}
if (field_desc->is_repeated()) {
auto* list = google::protobuf::Arena::Create<internal::FieldBackedListImpl>(
arena, message, field_desc, &MessageCelValueFactory, arena);
return CelValue::CreateList(list);
}
CEL_ASSIGN_OR_RETURN(
CelValue result,
internal::CreateValueFromSingleField(message, field_desc, unboxing_option,
&MessageCelValueFactory, arena));
return result;
}
absl::StatusOr<CelValue> GetFieldImpl(const google::protobuf::Message* message,
const google::protobuf::Descriptor* descriptor,
absl::string_view field_name,
ProtoWrapperTypeOptions unboxing_option,
cel::MemoryManagerRef memory_manager) {
ABSL_ASSERT(descriptor == message->GetDescriptor());
const Reflection* reflection = message->GetReflection();
const FieldDescriptor* field_desc = descriptor->FindFieldByName(field_name);
if (field_desc == nullptr && reflection != nullptr) {
std::string ext_name(field_name);
field_desc = reflection->FindKnownExtensionByName(ext_name);
}
if (field_desc == nullptr) {
return CreateNoSuchFieldError(memory_manager, field_name);
}
google::protobuf::Arena* arena = ProtoMemoryManagerArena(memory_manager);
return CreateCelValueFromField(message, field_desc, unboxing_option, arena);
}
class LegacyQualifyState final
: public cel::extensions::protobuf_internal::ProtoQualifyState {
public:
using ProtoQualifyState::ProtoQualifyState;
LegacyQualifyState(const LegacyQualifyState&) = delete;
LegacyQualifyState& operator=(const LegacyQualifyState&) = delete;
absl::optional<CelValue>& result() { return result_; }
private:
void SetResultFromError(absl::Status status,
cel::MemoryManagerRef memory_manager) override {
result_ = CreateErrorValue(memory_manager, status);
}
void SetResultFromBool(bool value) override {
result_ = CelValue::CreateBool(value);
}
absl::Status SetResultFromField(
const google::protobuf::Message* message, const google::protobuf::FieldDescriptor* field,
ProtoWrapperTypeOptions unboxing_option,
cel::MemoryManagerRef memory_manager) override {
CEL_ASSIGN_OR_RETURN(result_, CreateCelValueFromField(
message, field, unboxing_option,
ProtoMemoryManagerArena(memory_manager)));
return absl::OkStatus();
}
absl::Status SetResultFromRepeatedField(
const google::protobuf::Message* message, const google::protobuf::FieldDescriptor* field,
int index, cel::MemoryManagerRef memory_manager) override {
CEL_ASSIGN_OR_RETURN(result_,
internal::CreateValueFromRepeatedField(
message, field, index, &MessageCelValueFactory,
ProtoMemoryManagerArena(memory_manager)));
return absl::OkStatus();
}
absl::Status SetResultFromMapField(
const google::protobuf::Message* message, const google::protobuf::FieldDescriptor* field,
const google::protobuf::MapValueConstRef& value,
cel::MemoryManagerRef memory_manager) override {
CEL_ASSIGN_OR_RETURN(result_,
internal::CreateValueFromMapValue(
message, field, &value, &MessageCelValueFactory,
ProtoMemoryManagerArena(memory_manager)));
return absl::OkStatus();
}
absl::optional<CelValue> result_;
};
absl::StatusOr<LegacyQualifyResult> QualifyImpl(
const google::protobuf::Message* message, const google::protobuf::Descriptor* descriptor,
absl::Span<const cel::SelectQualifier> path, bool presence_test,
cel::MemoryManagerRef memory_manager) {
google::protobuf::Arena* arena = ProtoMemoryManagerArena(memory_manager);
ABSL_DCHECK(descriptor == message->GetDescriptor());
LegacyQualifyState qualify_state(message, descriptor,
message->GetReflection());
for (int i = 0; i < path.size() - 1; i++) {
const auto& qualifier = path.at(i);
CEL_RETURN_IF_ERROR(qualify_state.ApplySelectQualifier(
qualifier, ProtoMemoryManagerRef(arena)));
if (qualify_state.result().has_value()) {
LegacyQualifyResult result;
result.value = std::move(qualify_state.result()).value();
result.qualifier_count = result.value.IsError() ? -1 : i + 1;
return result;
}
}
const auto& last_qualifier = path.back();
LegacyQualifyResult result;
result.qualifier_count = -1;
if (presence_test) {
CEL_RETURN_IF_ERROR(qualify_state.ApplyLastQualifierHas(
last_qualifier, ProtoMemoryManagerRef(arena)));
} else {
CEL_RETURN_IF_ERROR(qualify_state.ApplyLastQualifierGet(
last_qualifier, ProtoMemoryManagerRef(arena)));
}
result.value = *qualify_state.result();
return result;
}
std::vector<absl::string_view> ListFieldsImpl(
const CelValue::MessageWrapper& instance) {
if (instance.message_ptr() == nullptr) {
return std::vector<absl::string_view>();
}
ABSL_ASSERT(instance.HasFullProto());
const auto* message =
static_cast<const google::protobuf::Message*>(instance.message_ptr());
const auto* reflect = message->GetReflection();
std::vector<const google::protobuf::FieldDescriptor*> fields;
reflect->ListFields(*message, &fields);
std::vector<absl::string_view> field_names;
field_names.reserve(fields.size());
for (const auto* field : fields) {
field_names.emplace_back(field->name());
}
return field_names;
}
class DucktypedMessageAdapter : public LegacyTypeAccessApis,
public LegacyTypeMutationApis,
public LegacyTypeInfoApis {
public:
absl::StatusOr<bool> HasField(
absl::string_view field_name,
const CelValue::MessageWrapper& value) const override {
CEL_ASSIGN_OR_RETURN(const google::protobuf::Message* message,
UnwrapMessage(value, "HasField"));
return HasFieldImpl(message, message->GetDescriptor(), field_name);
}
absl::StatusOr<CelValue> GetField(
absl::string_view field_name, const CelValue::MessageWrapper& instance,
ProtoWrapperTypeOptions unboxing_option,
cel::MemoryManagerRef memory_manager) const override {
CEL_ASSIGN_OR_RETURN(const google::protobuf::Message* message,
UnwrapMessage(instance, "GetField"));
return GetFieldImpl(message, message->GetDescriptor(), field_name,
unboxing_option, memory_manager);
}
absl::StatusOr<LegacyTypeAccessApis::LegacyQualifyResult> Qualify(
absl::Span<const cel::SelectQualifier> qualifiers,
const CelValue::MessageWrapper& instance, bool presence_test,
cel::MemoryManagerRef memory_manager) const override {
CEL_ASSIGN_OR_RETURN(const google::protobuf::Message* message,
UnwrapMessage(instance, "Qualify"));
return QualifyImpl(message, message->GetDescriptor(), qualifiers,
presence_test, memory_manager);
}
bool IsEqualTo(
const CelValue::MessageWrapper& instance,
const CelValue::MessageWrapper& other_instance) const override {
absl::StatusOr<const google::protobuf::Message*> lhs =
UnwrapMessage(instance, "IsEqualTo");
absl::StatusOr<const google::protobuf::Message*> rhs =
UnwrapMessage(other_instance, "IsEqualTo");
if (!lhs.ok() || !rhs.ok()) {
return false;
}
return ProtoEquals(**lhs, **rhs);
}
absl::string_view GetTypename(
const MessageWrapper& wrapped_message) const override {
if (!wrapped_message.HasFullProto() ||
wrapped_message.message_ptr() == nullptr) {
return UnsupportedTypeName();
}
auto* message =
static_cast<const google::protobuf::Message*>(wrapped_message.message_ptr());
return message->GetDescriptor()->full_name();
}
std::string DebugString(
const MessageWrapper& wrapped_message) const override {
if (!wrapped_message.HasFullProto() ||
wrapped_message.message_ptr() == nullptr) {
return UnsupportedTypeName();
}
auto* message =
static_cast<const google::protobuf::Message*>(wrapped_message.message_ptr());
return message->ShortDebugString();
}
bool DefinesField(absl::string_view field_name) const override {
return true;
}
absl::StatusOr<CelValue::MessageWrapper::Builder> NewInstance(
cel::MemoryManagerRef memory_manager) const override {
return absl::UnimplementedError("NewInstance is not implemented");
}
absl::StatusOr<CelValue> AdaptFromWellKnownType(
cel::MemoryManagerRef memory_manager,
CelValue::MessageWrapper::Builder instance) const override {
if (!instance.HasFullProto() || instance.message_ptr() == nullptr) {
return absl::UnimplementedError(
"MessageLite is not supported, descriptor is required");
}
return ProtoMessageTypeAdapter(
static_cast<const google::protobuf::Message*>(instance.message_ptr())
->GetDescriptor(),
nullptr)
.AdaptFromWellKnownType(memory_manager, instance);
}
absl::Status SetField(
absl::string_view field_name, const CelValue& value,
cel::MemoryManagerRef memory_manager,
CelValue::MessageWrapper::Builder& instance) const override {
if (!instance.HasFullProto() || instance.message_ptr() == nullptr) {
return absl::UnimplementedError(
"MessageLite is not supported, descriptor is required");
}
return ProtoMessageTypeAdapter(
static_cast<const google::protobuf::Message*>(instance.message_ptr())
->GetDescriptor(),
nullptr)
.SetField(field_name, value, memory_manager, instance);
}
std::vector<absl::string_view> ListFields(
const CelValue::MessageWrapper& instance) const override {
return ListFieldsImpl(instance);
}
const LegacyTypeAccessApis* GetAccessApis(
const MessageWrapper& wrapped_message) const override {
return this;
}
const LegacyTypeMutationApis* GetMutationApis(
const MessageWrapper& wrapped_message) const override {
return this;
}
static const DucktypedMessageAdapter& GetSingleton() {
static absl::NoDestructor<DucktypedMessageAdapter> instance;
return *instance;
}
};
CelValue MessageCelValueFactory(const google::protobuf::Message* message) {
return CelValue::CreateMessageWrapper(
MessageWrapper(message, &DucktypedMessageAdapter::GetSingleton()));
}
}
std::string ProtoMessageTypeAdapter::DebugString(
const MessageWrapper& wrapped_message) const {
if (!wrapped_message.HasFullProto() ||
wrapped_message.message_ptr() == nullptr) {
return UnsupportedTypeName();
}
auto* message =
static_cast<const google::protobuf::Message*>(wrapped_message.message_ptr());
return message->ShortDebugString();
}
absl::string_view ProtoMessageTypeAdapter::GetTypename(
const MessageWrapper& wrapped_message) const {
return descriptor_->full_name();
}
const LegacyTypeMutationApis* ProtoMessageTypeAdapter::GetMutationApis(
const MessageWrapper& wrapped_message) const {
return this;
}
const LegacyTypeAccessApis* ProtoMessageTypeAdapter::GetAccessApis(
const MessageWrapper& wrapped_message) const {
return this;
}
absl::optional<LegacyTypeInfoApis::FieldDescription>
ProtoMessageTypeAdapter::FindFieldByName(absl::string_view field_name) const {
if (descriptor_ == nullptr) {
return absl::nullopt;
}
const google::protobuf::FieldDescriptor* field_descriptor =
descriptor_->FindFieldByName(field_name);
if (field_descriptor == nullptr) {
return absl::nullopt;
}
return LegacyTypeInfoApis::FieldDescription{field_descriptor->number(),
field_descriptor->name()};
}
absl::Status ProtoMessageTypeAdapter::ValidateSetFieldOp(
bool assertion, absl::string_view field, absl::string_view detail) const {
if (!assertion) {
return absl::InvalidArgumentError(
absl::Substitute("SetField failed on message $0, field '$1': $2",
descriptor_->full_name(), field, detail));
}
return absl::OkStatus();
}
absl::StatusOr<CelValue::MessageWrapper::Builder>
ProtoMessageTypeAdapter::NewInstance(
cel::MemoryManagerRef memory_manager) const {
if (message_factory_ == nullptr) {
return absl::UnimplementedError(
absl::StrCat("Cannot create message ", descriptor_->name()));
}
google::protobuf::Arena* arena = ProtoMemoryManagerArena(memory_manager);
const Message* prototype = message_factory_->GetPrototype(descriptor_);
Message* msg = (prototype != nullptr) ? prototype->New(arena) : nullptr;
if (msg == nullptr) {
return absl::InvalidArgumentError(
absl::StrCat("Failed to create message ", descriptor_->name()));
}
return MessageWrapper::Builder(msg);
}
bool ProtoMessageTypeAdapter::DefinesField(absl::string_view field_name) const {
return descriptor_->FindFieldByName(field_name) != nullptr;
}
absl::StatusOr<bool> ProtoMessageTypeAdapter::HasField(
absl::string_view field_name, const CelValue::MessageWrapper& value) const {
CEL_ASSIGN_OR_RETURN(const google::protobuf::Message* message,
UnwrapMessage(value, "HasField"));
return HasFieldImpl(message, descriptor_, field_name);
}
absl::StatusOr<CelValue> ProtoMessageTypeAdapter::GetField(
absl::string_view field_name, const CelValue::MessageWrapper& instance,
ProtoWrapperTypeOptions unboxing_option,
cel::MemoryManagerRef memory_manager) const {
CEL_ASSIGN_OR_RETURN(const google::protobuf::Message* message,
UnwrapMessage(instance, "GetField"));
return GetFieldImpl(message, descriptor_, field_name, unboxing_option,
memory_manager);
}
absl::StatusOr<LegacyTypeAccessApis::LegacyQualifyResult>
ProtoMessageTypeAdapter::Qualify(
absl::Span<const cel::SelectQualifier> qualifiers,
const CelValue::MessageWrapper& instance, bool presence_test,
cel::MemoryManagerRef memory_manager) const {
CEL_ASSIGN_OR_RETURN(const google::protobuf::Message* message,
UnwrapMessage(instance, "Qualify"));
return QualifyImpl(message, descriptor_, qualifiers, presence_test,
memory_manager);
}
absl::Status ProtoMessageTypeAdapter::SetField(
const google::protobuf::FieldDescriptor* field, const CelValue& value,
google::protobuf::Arena* arena, google::protobuf::Message* message) const {
if (field->is_map()) {
constexpr int kKeyField = 1;
constexpr int kValueField = 2;
const CelMap* cel_map;
CEL_RETURN_IF_ERROR(ValidateSetFieldOp(
value.GetValue<const CelMap*>(&cel_map) && cel_map != nullptr,
field->name(), "value is not CelMap"));
auto entry_descriptor = field->message_type();
CEL_RETURN_IF_ERROR(
ValidateSetFieldOp(entry_descriptor != nullptr, field->name(),
"failed to find map entry descriptor"));
auto key_field_descriptor = entry_descriptor->FindFieldByNumber(kKeyField);
auto value_field_descriptor =
entry_descriptor->FindFieldByNumber(kValueField);
CEL_RETURN_IF_ERROR(
ValidateSetFieldOp(key_field_descriptor != nullptr, field->name(),
"failed to find key field descriptor"));
CEL_RETURN_IF_ERROR(
ValidateSetFieldOp(value_field_descriptor != nullptr, field->name(),
"failed to find value field descriptor"));
CEL_ASSIGN_OR_RETURN(const CelList* key_list, cel_map->ListKeys(arena));
for (int i = 0; i < key_list->size(); i++) {
CelValue key = (*key_list).Get(arena, i);
auto value = (*cel_map).Get(arena, key);
CEL_RETURN_IF_ERROR(ValidateSetFieldOp(value.has_value(), field->name(),
"error serializing CelMap"));
Message* entry_msg = message->GetReflection()->AddMessage(message, field);
CEL_RETURN_IF_ERROR(internal::SetValueToSingleField(
key, key_field_descriptor, entry_msg, arena));
CEL_RETURN_IF_ERROR(internal::SetValueToSingleField(
value.value(), value_field_descriptor, entry_msg, arena));
}
} else if (field->is_repeated()) {
const CelList* cel_list;
CEL_RETURN_IF_ERROR(ValidateSetFieldOp(
value.GetValue<const CelList*>(&cel_list) && cel_list != nullptr,
field->name(), "expected CelList value"));
for (int i = 0; i < cel_list->size(); i++) {
CEL_RETURN_IF_ERROR(internal::AddValueToRepeatedField(
(*cel_list).Get(arena, i), field, message, arena));
}
} else {
CEL_RETURN_IF_ERROR(
internal::SetValueToSingleField(value, field, message, arena));
}
return absl::OkStatus();
}
absl::Status ProtoMessageTypeAdapter::SetField(
absl::string_view field_name, const CelValue& value,
cel::MemoryManagerRef memory_manager,
CelValue::MessageWrapper::Builder& instance) const {
google::protobuf::Arena* arena =
cel::extensions::ProtoMemoryManagerArena(memory_manager);
CEL_ASSIGN_OR_RETURN(google::protobuf::Message * mutable_message,
UnwrapMessage(instance, "SetField"));
const google::protobuf::FieldDescriptor* field_descriptor =
descriptor_->FindFieldByName(field_name);
CEL_RETURN_IF_ERROR(
ValidateSetFieldOp(field_descriptor != nullptr, field_name, "not found"));
return SetField(field_descriptor, value, arena, mutable_message);
}
absl::Status ProtoMessageTypeAdapter::SetFieldByNumber(
int64_t field_number, const CelValue& value,
cel::MemoryManagerRef memory_manager,
CelValue::MessageWrapper::Builder& instance) const {
google::protobuf::Arena* arena =
cel::extensions::ProtoMemoryManagerArena(memory_manager);
CEL_ASSIGN_OR_RETURN(google::protobuf::Message * mutable_message,
UnwrapMessage(instance, "SetField"));
const google::protobuf::FieldDescriptor* field_descriptor =
descriptor_->FindFieldByNumber(field_number);
CEL_RETURN_IF_ERROR(ValidateSetFieldOp(
field_descriptor != nullptr, absl::StrCat(field_number), "not found"));
return SetField(field_descriptor, value, arena, mutable_message);
}
absl::StatusOr<CelValue> ProtoMessageTypeAdapter::AdaptFromWellKnownType(
cel::MemoryManagerRef memory_manager,
CelValue::MessageWrapper::Builder instance) const {
google::protobuf::Arena* arena =
cel::extensions::ProtoMemoryManagerArena(memory_manager);
CEL_ASSIGN_OR_RETURN(google::protobuf::Message * message,
UnwrapMessage(instance, "AdaptFromWellKnownType"));
return internal::UnwrapMessageToValue(message, &MessageCelValueFactory,
arena);
}
bool ProtoMessageTypeAdapter::IsEqualTo(
const CelValue::MessageWrapper& instance,
const CelValue::MessageWrapper& other_instance) const {
absl::StatusOr<const google::protobuf::Message*> lhs =
UnwrapMessage(instance, "IsEqualTo");
absl::StatusOr<const google::protobuf::Message*> rhs =
UnwrapMessage(other_instance, "IsEqualTo");
if (!lhs.ok() || !rhs.ok()) {
return false;
}
return ProtoEquals(**lhs, **rhs);
}
std::vector<absl::string_view> ProtoMessageTypeAdapter::ListFields(
const CelValue::MessageWrapper& instance) const {
return ListFieldsImpl(instance);
}
const LegacyTypeInfoApis& GetGenericProtoTypeInfoInstance() {
return DucktypedMessageAdapter::GetSingleton();
}
} | #include "eval/public/structs/proto_message_type_adapter.h"
#include <vector>
#include "google/protobuf/wrappers.pb.h"
#include "google/protobuf/descriptor.pb.h"
#include "absl/status/status.h"
#include "base/attribute.h"
#include "common/value.h"
#include "eval/public/cel_value.h"
#include "eval/public/containers/container_backed_list_impl.h"
#include "eval/public/containers/container_backed_map_impl.h"
#include "eval/public/message_wrapper.h"
#include "eval/public/structs/legacy_type_adapter.h"
#include "eval/public/structs/legacy_type_info_apis.h"
#include "eval/public/testing/matchers.h"
#include "eval/testutil/test_message.pb.h"
#include "extensions/protobuf/memory_manager.h"
#include "internal/proto_matchers.h"
#include "internal/testing.h"
#include "runtime/runtime_options.h"
#include "google/protobuf/arena.h"
#include "google/protobuf/descriptor.h"
#include "google/protobuf/message.h"
namespace google::api::expr::runtime {
namespace {
using ::absl_testing::IsOkAndHolds;
using ::absl_testing::StatusIs;
using ::cel::ProtoWrapperTypeOptions;
using ::cel::extensions::ProtoMemoryManagerRef;
using ::cel::internal::test::EqualsProto;
using ::google::protobuf::Int64Value;
using ::testing::_;
using ::testing::AllOf;
using ::testing::ElementsAre;
using ::testing::Eq;
using ::testing::Field;
using ::testing::HasSubstr;
using ::testing::Optional;
using ::testing::Truly;
using LegacyQualifyResult = LegacyTypeAccessApis::LegacyQualifyResult;
class ProtoMessageTypeAccessorTest : public testing::TestWithParam<bool> {
public:
ProtoMessageTypeAccessorTest()
: type_specific_instance_(
google::protobuf::DescriptorPool::generated_pool()->FindMessageTypeByName(
"google.api.expr.runtime.TestMessage"),
google::protobuf::MessageFactory::generated_factory()) {}
const LegacyTypeAccessApis& GetAccessApis() {
bool use_generic_instance = GetParam();
if (use_generic_instance) {
return *GetGenericProtoTypeInfoInstance().GetAccessApis(dummy_);
} else {
return type_specific_instance_;
}
}
private:
ProtoMessageTypeAdapter type_specific_instance_;
CelValue::MessageWrapper dummy_;
};
TEST_P(ProtoMessageTypeAccessorTest, HasFieldSingular) {
google::protobuf::Arena arena;
const LegacyTypeAccessApis& accessor = GetAccessApis();
TestMessage example;
MessageWrapper value(&example, nullptr);
EXPECT_THAT(accessor.HasField("int64_value", value), IsOkAndHolds(false));
example.set_int64_value(10);
EXPECT_THAT(accessor.HasField("int64_value", value), IsOkAndHolds(true));
}
TEST_P(ProtoMessageTypeAccessorTest, HasFieldRepeated) {
google::protobuf::Arena arena;
const LegacyTypeAccessApis& accessor = GetAccessApis();
TestMessage example;
MessageWrapper value(&example, nullptr);
EXPECT_THAT(accessor.HasField("int64_list", value), IsOkAndHolds(false));
example.add_int64_list(10);
EXPECT_THAT(accessor.HasField("int64_list", value), IsOkAndHolds(true));
}
TEST_P(ProtoMessageTypeAccessorTest, HasFieldMap) {
google::protobuf::Arena arena;
const LegacyTypeAccessApis& accessor = GetAccessApis();
TestMessage example;
example.set_int64_value(10);
MessageWrapper value(&example, nullptr);
EXPECT_THAT(accessor.HasField("int64_int32_map", value), IsOkAndHolds(false));
(*example.mutable_int64_int32_map())[2] = 3;
EXPECT_THAT(accessor.HasField("int64_int32_map", value), IsOkAndHolds(true));
}
TEST_P(ProtoMessageTypeAccessorTest, HasFieldUnknownField) {
google::protobuf::Arena arena;
const LegacyTypeAccessApis& accessor = GetAccessApis();
TestMessage example;
example.set_int64_value(10);
MessageWrapper value(&example, nullptr);
EXPECT_THAT(accessor.HasField("unknown_field", value),
StatusIs(absl::StatusCode::kNotFound));
}
TEST_P(ProtoMessageTypeAccessorTest, HasFieldNonMessageType) {
google::protobuf::Arena arena;
const LegacyTypeAccessApis& accessor = GetAccessApis();
MessageWrapper value(static_cast<const google::protobuf::MessageLite*>(nullptr),
nullptr);
EXPECT_THAT(accessor.HasField("unknown_field", value),
StatusIs(absl::StatusCode::kInternal));
}
TEST_P(ProtoMessageTypeAccessorTest, GetFieldSingular) {
google::protobuf::Arena arena;
const LegacyTypeAccessApis& accessor = GetAccessApis();
auto manager = ProtoMemoryManagerRef(&arena);
TestMessage example;
example.set_int64_value(10);
MessageWrapper value(&example, nullptr);
EXPECT_THAT(accessor.GetField("int64_value", value,
ProtoWrapperTypeOptions::kUnsetNull, manager),
IsOkAndHolds(test::IsCelInt64(10)));
}
TEST_P(ProtoMessageTypeAccessorTest, GetFieldNoSuchField) {
google::protobuf::Arena arena;
const LegacyTypeAccessApis& accessor = GetAccessApis();
auto manager = ProtoMemoryManagerRef(&arena);
TestMessage example;
example.set_int64_value(10);
MessageWrapper value(&example, nullptr);
EXPECT_THAT(accessor.GetField("unknown_field", value,
ProtoWrapperTypeOptions::kUnsetNull, manager),
IsOkAndHolds(test::IsCelError(StatusIs(
absl::StatusCode::kNotFound, HasSubstr("unknown_field")))));
}
TEST_P(ProtoMessageTypeAccessorTest, GetFieldNotAMessage) {
google::protobuf::Arena arena;
const LegacyTypeAccessApis& accessor = GetAccessApis();
auto manager = ProtoMemoryManagerRef(&arena);
MessageWrapper value(static_cast<const google::protobuf::MessageLite*>(nullptr),
nullptr);
EXPECT_THAT(accessor.GetField("int64_value", value,
ProtoWrapperTypeOptions::kUnsetNull, manager),
StatusIs(absl::StatusCode::kInternal));
}
TEST_P(ProtoMessageTypeAccessorTest, GetFieldRepeated) {
google::protobuf::Arena arena;
const LegacyTypeAccessApis& accessor = GetAccessApis();
auto manager = ProtoMemoryManagerRef(&arena);
TestMessage example;
example.add_int64_list(10);
example.add_int64_list(20);
MessageWrapper value(&example, nullptr);
ASSERT_OK_AND_ASSIGN(
CelValue result,
accessor.GetField("int64_list", value,
ProtoWrapperTypeOptions::kUnsetNull, manager));
const CelList* held_value;
ASSERT_TRUE(result.GetValue(&held_value)) << result.DebugString();
EXPECT_EQ(held_value->size(), 2);
EXPECT_THAT((*held_value)[0], test::IsCelInt64(10));
EXPECT_THAT((*held_value)[1], test::IsCelInt64(20));
}
TEST_P(ProtoMessageTypeAccessorTest, GetFieldMap) {
google::protobuf::Arena arena;
const LegacyTypeAccessApis& accessor = GetAccessApis();
auto manager = ProtoMemoryManagerRef(&arena);
TestMessage example;
(*example.mutable_int64_int32_map())[10] = 20;
MessageWrapper value(&example, nullptr);
ASSERT_OK_AND_ASSIGN(
CelValue result,
accessor.GetField("int64_int32_map", value,
ProtoWrapperTypeOptions::kUnsetNull, manager));
const CelMap* held_value;
ASSERT_TRUE(result.GetValue(&held_value)) << result.DebugString();
EXPECT_EQ(held_value->size(), 1);
EXPECT_THAT((*held_value)[CelValue::CreateInt64(10)],
Optional(test::IsCelInt64(20)));
}
TEST_P(ProtoMessageTypeAccessorTest, GetFieldWrapperType) {
google::protobuf::Arena arena;
const LegacyTypeAccessApis& accessor = GetAccessApis();
auto manager = ProtoMemoryManagerRef(&arena);
TestMessage example;
example.mutable_int64_wrapper_value()->set_value(10);
MessageWrapper value(&example, nullptr);
EXPECT_THAT(accessor.GetField("int64_wrapper_value", value,
ProtoWrapperTypeOptions::kUnsetNull, manager),
IsOkAndHolds(test::IsCelInt64(10)));
}
TEST_P(ProtoMessageTypeAccessorTest, GetFieldWrapperTypeUnsetNullUnbox) {
google::protobuf::Arena arena;
const LegacyTypeAccessApis& accessor = GetAccessApis();
auto manager = ProtoMemoryManagerRef(&arena);
TestMessage example;
MessageWrapper value(&example, nullptr);
EXPECT_THAT(accessor.GetField("int64_wrapper_value", value,
ProtoWrapperTypeOptions::kUnsetNull, manager),
IsOkAndHolds(test::IsCelNull()));
example.mutable_int64_wrapper_value()->clear_value();
EXPECT_THAT(accessor.GetField("int64_wrapper_value", value,
ProtoWrapperTypeOptions::kUnsetNull, manager),
IsOkAndHolds(test::IsCelInt64(_)));
}
TEST_P(ProtoMessageTypeAccessorTest,
GetFieldWrapperTypeUnsetDefaultValueUnbox) {
google::protobuf::Arena arena;
const LegacyTypeAccessApis& accessor = GetAccessApis();
auto manager = ProtoMemoryManagerRef(&arena);
TestMessage example;
MessageWrapper value(&example, nullptr);
EXPECT_THAT(
accessor.GetField("int64_wrapper_value", value,
ProtoWrapperTypeOptions::kUnsetProtoDefault, manager),
IsOkAndHolds(test::IsCelInt64(_)));
example.mutable_int64_wrapper_value()->clear_value();
EXPECT_THAT(
accessor.GetField("int64_wrapper_value", value,
ProtoWrapperTypeOptions::kUnsetProtoDefault, manager),
IsOkAndHolds(test::IsCelInt64(_)));
}
TEST_P(ProtoMessageTypeAccessorTest, IsEqualTo) {
const LegacyTypeAccessApis& accessor = GetAccessApis();
TestMessage example;
example.mutable_int64_wrapper_value()->set_value(10);
TestMessage example2;
example2.mutable_int64_wrapper_value()->set_value(10);
MessageWrapper value(&example, nullptr);
MessageWrapper value2(&example2, nullptr);
EXPECT_TRUE(accessor.IsEqualTo(value, value2));
EXPECT_TRUE(accessor.IsEqualTo(value2, value));
}
TEST_P(ProtoMessageTypeAccessorTest, IsEqualToSameTypeInequal) {
const LegacyTypeAccessApis& accessor = GetAccessApis();
TestMessage example;
example.mutable_int64_wrapper_value()->set_value(10);
TestMessage example2;
example2.mutable_int64_wrapper_value()->set_value(12);
MessageWrapper value(&example, nullptr);
MessageWrapper value2(&example2, nullptr);
EXPECT_FALSE(accessor.IsEqualTo(value, value2));
EXPECT_FALSE(accessor.IsEqualTo(value2, value));
}
TEST_P(ProtoMessageTypeAccessorTest, IsEqualToDifferentTypeInequal) {
const LegacyTypeAccessApis& accessor = GetAccessApis();
TestMessage example;
example.mutable_int64_wrapper_value()->set_value(10);
Int64Value example2;
example2.set_value(10);
MessageWrapper value(&example, nullptr);
MessageWrapper value2(&example2, nullptr);
EXPECT_FALSE(accessor.IsEqualTo(value, value2));
EXPECT_FALSE(accessor.IsEqualTo(value2, value));
}
TEST_P(ProtoMessageTypeAccessorTest, IsEqualToNonMessageInequal) {
const LegacyTypeAccessApis& accessor = GetAccessApis();
TestMessage example;
example.mutable_int64_wrapper_value()->set_value(10);
TestMessage example2;
example2.mutable_int64_wrapper_value()->set_value(10);
MessageWrapper value(&example, nullptr);
MessageWrapper value2(static_cast<const google::protobuf::MessageLite*>(&example2),
nullptr);
EXPECT_FALSE(accessor.IsEqualTo(value, value2));
EXPECT_FALSE(accessor.IsEqualTo(value2, value));
}
INSTANTIATE_TEST_SUITE_P(GenericAndSpecific, ProtoMessageTypeAccessorTest,
testing::Bool());
TEST(GetGenericProtoTypeInfoInstance, GetTypeName) {
const LegacyTypeInfoApis& info_api = GetGenericProtoTypeInfoInstance();
TestMessage test_message;
CelValue::MessageWrapper wrapped_message(&test_message, nullptr);
EXPECT_EQ(info_api.GetTypename(wrapped_message), test_message.GetTypeName());
}
TEST(GetGenericProtoTypeInfoInstance, DebugString) {
const LegacyTypeInfoApis& info_api = GetGenericProtoTypeInfoInstance();
TestMessage test_message;
test_message.set_string_value("abcd");
CelValue::MessageWrapper wrapped_message(&test_message, nullptr);
EXPECT_EQ(info_api.DebugString(wrapped_message),
test_message.ShortDebugString());
}
TEST(GetGenericProtoTypeInfoInstance, GetAccessApis) {
const LegacyTypeInfoApis& info_api = GetGenericProtoTypeInfoInstance();
TestMessage test_message;
test_message.set_string_value("abcd");
CelValue::MessageWrapper wrapped_message(&test_message, nullptr);
auto* accessor = info_api.GetAccessApis(wrapped_message);
google::protobuf::Arena arena;
auto manager = ProtoMemoryManagerRef(&arena);
ASSERT_OK_AND_ASSIGN(
CelValue result,
accessor->GetField("string_value", wrapped_message,
ProtoWrapperTypeOptions::kUnsetNull, manager));
EXPECT_THAT(result, test::IsCelString("abcd"));
}
TEST(GetGenericProtoTypeInfoInstance, FallbackForNonMessage) {
const LegacyTypeInfoApis& info_api = GetGenericProtoTypeInfoInstance();
TestMessage test_message;
test_message.set_string_value("abcd");
CelValue::MessageWrapper wrapped_message(
static_cast<const google::protobuf::MessageLite*>(&test_message), nullptr);
EXPECT_EQ(info_api.GetTypename(wrapped_message), "<unknown message>");
EXPECT_EQ(info_api.DebugString(wrapped_message), "<unknown message>");
CelValue::MessageWrapper null_message(
static_cast<const google::protobuf::Message*>(nullptr), nullptr);
EXPECT_EQ(info_api.GetTypename(null_message), "<unknown message>");
EXPECT_EQ(info_api.DebugString(null_message), "<unknown message>");
}
TEST(ProtoMessageTypeAdapter, NewInstance) {
google::protobuf::Arena arena;
ProtoMessageTypeAdapter adapter(
google::protobuf::DescriptorPool::generated_pool()->FindMessageTypeByName(
"google.api.expr.runtime.TestMessage"),
google::protobuf::MessageFactory::generated_factory());
auto manager = ProtoMemoryManagerRef(&arena);
ASSERT_OK_AND_ASSIGN(CelValue::MessageWrapper::Builder result,
adapter.NewInstance(manager));
EXPECT_EQ(result.message_ptr()->SerializeAsString(), "");
}
TEST(ProtoMessageTypeAdapter, NewInstanceUnsupportedDescriptor) {
google::protobuf::Arena arena;
google::protobuf::DescriptorPool pool;
google::protobuf::FileDescriptorProto faked_file;
faked_file.set_name("faked.proto");
faked_file.set_syntax("proto3");
faked_file.set_package("google.api.expr.runtime");
auto msg_descriptor = faked_file.add_message_type();
msg_descriptor->set_name("FakeMessage");
pool.BuildFile(faked_file);
ProtoMessageTypeAdapter adapter(
pool.FindMessageTypeByName("google.api.expr.runtime.FakeMessage"),
google::protobuf::MessageFactory::generated_factory());
auto manager = ProtoMemoryManagerRef(&arena);
EXPECT_THAT(
adapter.NewInstance(manager),
StatusIs(absl::StatusCode::kInvalidArgument, HasSubstr("FakeMessage")));
}
TEST(ProtoMessageTypeAdapter, DefinesField) {
ProtoMessageTypeAdapter adapter(
google::protobuf::DescriptorPool::generated_pool()->FindMessageTypeByName(
"google.api.expr.runtime.TestMessage"),
google::protobuf::MessageFactory::generated_factory());
EXPECT_TRUE(adapter.DefinesField("int64_value"));
EXPECT_FALSE(adapter.DefinesField("not_a_field"));
}
TEST(ProtoMessageTypeAdapter, SetFieldSingular) {
google::protobuf::Arena arena;
ProtoMessageTypeAdapter adapter(
google::protobuf::DescriptorPool::generated_pool()->FindMessageTypeByName(
"google.api.expr.runtime.TestMessage"),
google::protobuf::MessageFactory::generated_factory());
auto manager = ProtoMemoryManagerRef(&arena);
ASSERT_OK_AND_ASSIGN(CelValue::MessageWrapper::Builder value,
adapter.NewInstance(manager));
ASSERT_OK(adapter.SetField("int64_value", CelValue::CreateInt64(10), manager,
value));
TestMessage message;
message.set_int64_value(10);
EXPECT_EQ(value.message_ptr()->SerializeAsString(),
message.SerializeAsString());
ASSERT_THAT(adapter.SetField("not_a_field", CelValue::CreateInt64(10),
manager, value),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("field 'not_a_field': not found")));
}
TEST(ProtoMessageTypeAdapter, SetFieldRepeated) {
google::protobuf::Arena arena;
ProtoMessageTypeAdapter adapter(
google::protobuf::DescriptorPool::generated_pool()->FindMessageTypeByName(
"google.api.expr.runtime.TestMessage"),
google::protobuf::MessageFactory::generated_factory());
auto manager = ProtoMemoryManagerRef(&arena);
ContainerBackedListImpl list(
{CelValue::CreateInt64(1), CelValue::CreateInt64(2)});
CelValue value_to_set = CelValue::CreateList(&list);
ASSERT_OK_AND_ASSIGN(CelValue::MessageWrapper::Builder instance,
adapter.NewInstance(manager));
ASSERT_OK(adapter.SetField("int64_list", value_to_set, manager, instance));
TestMessage message;
message.add_int64_list(1);
message.add_int64_list(2);
EXPECT_EQ(instance.message_ptr()->SerializeAsString(),
message.SerializeAsString());
}
TEST(ProtoMessageTypeAdapter, SetFieldNotAField) {
google::protobuf::Arena arena;
ProtoMessageTypeAdapter adapter(
google::protobuf::DescriptorPool::generated_pool()->FindMessageTypeByName(
"google.api.expr.runtime.TestMessage"),
google::protobuf::MessageFactory::generated_factory());
auto manager = ProtoMemoryManagerRef(&arena);
ASSERT_OK_AND_ASSIGN(CelValue::MessageWrapper::Builder instance,
adapter.NewInstance(manager));
ASSERT_THAT(adapter.SetField("not_a_field", CelValue::CreateInt64(10),
manager, instance),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("field 'not_a_field': not found")));
}
TEST(ProtoMesssageTypeAdapter, SetFieldWrongType) {
google::protobuf::Arena arena;
ProtoMessageTypeAdapter adapter(
google::protobuf::DescriptorPool::generated_pool()->FindMessageTypeByName(
"google.api.expr.runtime.TestMessage"),
google::protobuf::MessageFactory::generated_factory());
auto manager = ProtoMemoryManagerRef(&arena);
ContainerBackedListImpl list(
{CelValue::CreateInt64(1), CelValue::CreateInt64(2)});
CelValue list_value = CelValue::CreateList(&list);
CelMapBuilder builder;
ASSERT_OK(builder.Add(CelValue::CreateInt64(1), CelValue::CreateInt64(2)));
ASSERT_OK(builder.Add(CelValue::CreateInt64(2), CelValue::CreateInt64(4)));
CelValue map_value = CelValue::CreateMap(&builder);
CelValue int_value = CelValue::CreateInt64(42);
ASSERT_OK_AND_ASSIGN(CelValue::MessageWrapper::Builder instance,
adapter.NewInstance(manager));
EXPECT_THAT(adapter.SetField("int64_value", map_value, manager, instance),
StatusIs(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(adapter.SetField("int64_value", list_value, manager, instance),
StatusIs(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(
adapter.SetField("int64_int32_map", list_value, manager, instance),
StatusIs(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(adapter.SetField("int64_int32_map", int_value, manager, instance),
StatusIs(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(adapter.SetField("int64_list", int_value, manager, instance),
StatusIs(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(adapter.SetField("int64_list", map_value, manager, instance),
StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST(ProtoMesssageTypeAdapter, SetFieldNotAMessage) {
google::protobuf::Arena arena;
ProtoMessageTypeAdapter adapter(
google::protobuf::DescriptorPool::generated_pool()->FindMessageTypeByName(
"google.api.expr.runtime.TestMessage"),
google::protobuf::MessageFactory::generated_factory());
auto manager = ProtoMemoryManagerRef(&arena);
CelValue int_value = CelValue::CreateInt64(42);
CelValue::MessageWrapper::Builder instance(
static_cast<google::protobuf::MessageLite*>(nullptr));
EXPECT_THAT(adapter.SetField("int64_value", int_value, manager, instance),
StatusIs(absl::StatusCode::kInternal));
}
TEST(ProtoMesssageTypeAdapter, SetFieldNullMessage) {
google::protobuf::Arena arena;
ProtoMessageTypeAdapter adapter(
google::protobuf::DescriptorPool::generated_pool()->FindMessageTypeByName(
"google.api.expr.runtime.TestMessage"),
google::protobuf::MessageFactory::generated_factory());
auto manager = ProtoMemoryManagerRef(&arena);
CelValue int_value = CelValue::CreateInt64(42);
CelValue::MessageWrapper::Builder instance(
static_cast<google::protobuf::Message*>(nullptr));
EXPECT_THAT(adapter.SetField("int64_value", int_value, manager, instance),
StatusIs(absl::StatusCode::kInternal));
}
TEST(ProtoMessageTypeAdapter, AdaptFromWellKnownType) {
google::protobuf::Arena arena;
ProtoMessageTypeAdapter adapter(
google::protobuf::DescriptorPool::generated_pool()->FindMessageTypeByName(
"google.protobuf.Int64Value"),
google::protobuf::MessageFactory::generated_factory());
auto manager = ProtoMemoryManagerRef(&arena);
ASSERT_OK_AND_ASSIGN(CelValue::MessageWrapper::Builder instance,
adapter.NewInstance(manager));
ASSERT_OK(
adapter.SetField("value", CelValue::CreateInt64(42), manager, instance));
ASSERT_OK_AND_ASSIGN(CelValue value,
adapter.AdaptFromWellKnownType(manager, instance));
EXPECT_THAT(value, test::IsCelInt64(42));
}
TEST(ProtoMessageTypeAdapter, AdaptFromWellKnownTypeUnspecial) {
google::protobuf::Arena arena;
ProtoMessageTypeAdapter adapter(
google::protobuf::DescriptorPool::generated_pool()->FindMessageTypeByName(
"google.api.expr.runtime.TestMessage"),
google::protobuf::MessageFactory::generated_factory());
auto manager = ProtoMemoryManagerRef(&arena);
ASSERT_OK_AND_ASSIGN(CelValue::MessageWrapper::Builder instance,
adapter.NewInstance(manager));
ASSERT_OK(adapter.SetField("int64_value", CelValue::CreateInt64(42), manager,
instance));
ASSERT_OK_AND_ASSIGN(CelValue value,
adapter.AdaptFromWellKnownType(manager, instance));
EXPECT_THAT(value, test::IsCelMessage(EqualsProto("int64_value: 42")));
}
TEST(ProtoMessageTypeAdapter, AdaptFromWellKnownTypeNotAMessageError) {
google::protobuf::Arena arena;
ProtoMessageTypeAdapter adapter(
google::protobuf::DescriptorPool::generated_pool()->FindMessageTypeByName(
"google.api.expr.runtime.TestMessage"),
google::protobuf::MessageFactory::generated_factory());
auto manager = ProtoMemoryManagerRef(&arena);
CelValue::MessageWrapper::Builder instance(
static_cast<google::protobuf::MessageLite*>(nullptr));
EXPECT_THAT(adapter.AdaptFromWellKnownType(manager, instance),
StatusIs(absl::StatusCode::kInternal));
}
TEST(ProtoMesssageTypeAdapter, TypeInfoDebug) {
ProtoMessageTypeAdapter adapter(
google::protobuf::DescriptorPool::generated_pool()->FindMessageTypeByName(
"google.api.expr.runtime.TestMessage"),
google::protobuf::MessageFactory::generated_factory());
TestMessage message;
message.set_int64_value(42);
EXPECT_THAT(adapter.DebugString(MessageWrapper(&message, &adapter)),
HasSubstr(message.ShortDebugString()));
EXPECT_THAT(adapter.DebugString(MessageWrapper()),
HasSubstr("<unknown message>"));
}
TEST(ProtoMesssageTypeAdapter, TypeInfoName) {
ProtoMessageTypeAdapter adapter(
google::protobuf::DescriptorPool::generated_pool()->FindMessageTypeByName(
"google.api.expr.runtime.TestMessage"),
google::protobuf::MessageFactory::generated_factory());
EXPECT_EQ(adapter.GetTypename(MessageWrapper()),
"google.api.expr.runtime.TestMessage");
}
TEST(ProtoMesssageTypeAdapter, FindFieldFound) {
ProtoMessageTypeAdapter adapter(
google::protobuf::DescriptorPool::generated_pool()->FindMessageTypeByName(
"google.api.expr.runtime.TestMessage"),
google::protobuf::MessageFactory::generated_factory());
EXPECT_THAT(
adapter.FindFieldByName("int64_value"),
Optional(Truly([](const LegacyTypeInfoApis::FieldDescription& desc) {
return desc.name == "int64_value" && desc.number == 2;
})))
<< "expected field int64_value: 2";
}
TEST(ProtoMesssageTypeAdapter, FindFieldNotFound) {
ProtoMessageTypeAdapter adapter(
google::protobuf::DescriptorPool::generated_pool()->FindMessageTypeByName(
"google.api.expr.runtime.TestMessage"),
google::protobuf::MessageFactory::generated_factory());
EXPECT_EQ(adapter.FindFieldByName("foo_not_a_field"), absl::nullopt);
}
TEST(ProtoMesssageTypeAdapter, TypeInfoMutator) {
google::protobuf::Arena arena;
ProtoMessageTypeAdapter adapter(
google::protobuf::DescriptorPool::generated_pool()->FindMessageTypeByName(
"google.api.expr.runtime.TestMessage"),
google::protobuf::MessageFactory::generated_factory());
auto manager = ProtoMemoryManagerRef(&arena);
const LegacyTypeMutationApis* api = adapter.GetMutationApis(MessageWrapper());
ASSERT_NE(api, nullptr);
ASSERT_OK_AND_ASSIGN(MessageWrapper::Builder builder,
api->NewInstance(manager));
EXPECT_NE(dynamic_cast<TestMessage*>(builder.message_ptr()), nullptr);
}
TEST(ProtoMesssageTypeAdapter, TypeInfoAccesor) {
google::protobuf::Arena arena;
ProtoMessageTypeAdapter adapter(
google::protobuf::DescriptorPool::generated_pool()->FindMessageTypeByName(
"google.api.expr.runtime.TestMessage"),
google::protobuf::MessageFactory::generated_factory());
auto manager = ProtoMemoryManagerRef(&arena);
TestMessage message;
message.set_int64_value(42);
CelValue::MessageWrapper wrapped(&message, &adapter);
const LegacyTypeAccessApis* api = adapter.GetAccessApis(MessageWrapper());
ASSERT_NE(api, nullptr);
EXPECT_THAT(api->GetField("int64_value", wrapped,
ProtoWrapperTypeOptions::kUnsetNull, manager),
IsOkAndHolds(test::IsCelInt64(42)));
}
TEST(ProtoMesssageTypeAdapter, Qualify) {
google::protobuf::Arena arena;
ProtoMessageTypeAdapter adapter(
google::protobuf::DescriptorPool::generated_pool()->FindMessageTypeByName(
"google.api.expr.runtime.TestMessage"),
google::protobuf::MessageFactory::generated_factory());
auto manager = ProtoMemoryManagerRef(&arena);
TestMessage message;
message.mutable_message_value()->set_int64_value(42);
CelValue::MessageWrapper wrapped(&message, &adapter);
const LegacyTypeAccessApis* api = adapter.GetAccessApis(MessageWrapper());
ASSERT_NE(api, nullptr);
std::vector<cel::SelectQualifier> qualfiers{
cel::FieldSpecifier{12, "message_value"},
cel::FieldSpecifier{2, "int64_value"}};
EXPECT_THAT(
api->Qualify(qualfiers, wrapped,
false, manager),
IsOkAndHolds(Field(&LegacyQualifyResult::value, test::IsCelInt64(42))));
}
TEST(ProtoMesssageTypeAdapter, QualifyDynamicFieldAccessUnsupported) {
google::protobuf::Arena arena;
ProtoMessageTypeAdapter adapter(
google::protobuf::DescriptorPool::generated_pool()->FindMessageTypeByName(
"google.api.expr.runtime.TestMessage"),
google::protobuf::MessageFactory::generated_factory());
auto manager = ProtoMemoryManagerRef(&arena);
TestMessage message;
message.mutable_message_value()->set_int64_value(42);
CelValue::MessageWrapper wrapped(&message, &adapter);
const LegacyTypeAccessApis* api = adapter.GetAccessApis(MessageWrapper());
ASSERT_NE(api, nullptr);
std::vector<cel::SelectQualifier> qualfiers{
cel::FieldSpecifier{12, "message_value"},
cel::AttributeQualifier::OfString("int64_value")};
EXPECT_THAT(api->Qualify(qualfiers, wrapped,
false, manager),
StatusIs(absl::StatusCode::kUnimplemented));
}
TEST(ProtoMesssageTypeAdapter, QualifyNoSuchField) {
google::protobuf::Arena arena;
ProtoMessageTypeAdapter adapter(
google::protobuf::DescriptorPool::generated_pool()->FindMessageTypeByName(
"google.api.expr.runtime.TestMessage"),
google::protobuf::MessageFactory::generated_factory());
auto manager = ProtoMemoryManagerRef(&arena);
TestMessage message;
message.mutable_message_value()->set_int64_value(42);
CelValue::MessageWrapper wrapped(&message, &adapter);
const LegacyTypeAccessApis* api = adapter.GetAccessApis(MessageWrapper());
ASSERT_NE(api, nullptr);
std::vector<cel::SelectQualifier> qualfiers{
cel::FieldSpecifier{12, "message_value"},
cel::FieldSpecifier{99, "not_a_field"},
cel::FieldSpecifier{2, "int64_value"}};
EXPECT_THAT(api->Qualify(qualfiers, wrapped,
false, manager),
IsOkAndHolds(Field(
&LegacyQualifyResult::value,
test::IsCelError(StatusIs(absl::StatusCode::kNotFound,
HasSubstr("no_such_field"))))));
}
TEST(ProtoMesssageTypeAdapter, QualifyHasNoSuchField) {
google::protobuf::Arena arena;
ProtoMessageTypeAdapter adapter(
google::protobuf::DescriptorPool::generated_pool()->FindMessageTypeByName(
"google.api.expr.runtime.TestMessage"),
google::protobuf::MessageFactory::generated_factory());
auto manager = ProtoMemoryManagerRef(&arena);
TestMessage message;
message.mutable_message_value()->set_int64_value(42);
CelValue::MessageWrapper wrapped(&message, &adapter);
const LegacyTypeAccessApis* api = adapter.GetAccessApis(MessageWrapper());
ASSERT_NE(api, nullptr);
std::vector<cel::SelectQualifier> qualfiers{
cel::FieldSpecifier{12, "message_value"},
cel::FieldSpecifier{99, "not_a_field"}};
EXPECT_THAT(api->Qualify(qualfiers, wrapped,
true, manager),
IsOkAndHolds(Field(
&LegacyQualifyResult::value,
test::IsCelError(StatusIs(absl::StatusCode::kNotFound,
HasSubstr("no_such_field"))))));
}
TEST(ProtoMesssageTypeAdapter, QualifyNoSuchFieldLeaf) {
google::protobuf::Arena arena;
ProtoMessageTypeAdapter adapter(
google::protobuf::DescriptorPool::generated_pool()->FindMessageTypeByName(
"google.api.expr.runtime.TestMessage"),
google::protobuf::MessageFactory::generated_factory());
auto manager = ProtoMemoryManagerRef(&arena);
TestMessage message;
message.mutable_message_value()->set_int64_value(42);
CelValue::MessageWrapper wrapped(&message, &adapter);
const LegacyTypeAccessApis* api = adapter.GetAccessApis(MessageWrapper());
ASSERT_NE(api, nullptr);
std::vector<cel::SelectQualifier> qualfiers{
cel::FieldSpecifier{12, "message_value"},
cel::FieldSpecifier{99, "not_a_field"}};
EXPECT_THAT(api->Qualify(qualfiers, wrapped,
false, manager),
IsOkAndHolds(Field(
&LegacyQualifyResult::value,
test::IsCelError(StatusIs(absl::StatusCode::kNotFound,
HasSubstr("no_such_field"))))));
}
TEST(ProtoMesssageTypeAdapter, QualifyMapTraversalSupport) {
google::protobuf::Arena arena;
ProtoMessageTypeAdapter adapter(
google::protobuf::DescriptorPool::generated_pool()->FindMessageTypeByName(
"google.api.expr.runtime.TestMessage"),
google::protobuf::MessageFactory::generated_factory());
auto manager = ProtoMemoryManagerRef(&arena);
TestMessage message;
(*message.mutable_string_message_map())["@key"].set_int64_value(42);
CelValue::MessageWrapper wrapped(&message, &adapter);
const LegacyTypeAccessApis* api = adapter.GetAccessApis(MessageWrapper());
ASSERT_NE(api, nullptr);
std::vector<cel::SelectQualifier> qualfiers{
cel::FieldSpecifier{210, "string_message_map"},
cel::AttributeQualifier::OfString("@key"),
cel::FieldSpecifier{2, "int64_value"}};
EXPECT_THAT(
api->Qualify(qualfiers, wrapped,
false, manager),
IsOkAndHolds(Field(&LegacyQualifyResult::value, test::IsCelInt64(42))));
}
TEST(ProtoMesssageTypeAdapter, TypedFieldAccessOnMapUnsupported) {
google::protobuf::Arena arena;
ProtoMessageTypeAdapter adapter(
google::protobuf::DescriptorPool::generated_pool()->FindMessageTypeByName(
"google.api.expr.runtime.TestMessage"),
google::protobuf::MessageFactory::generated_factory());
auto manager = ProtoMemoryManagerRef(&arena);
TestMessage message;
(*message.mutable_string_message_map())["@key"].set_int64_value(42);
CelValue::MessageWrapper wrapped(&message, &adapter);
const LegacyTypeAccessApis* api = adapter.GetAccessApis(MessageWrapper());
ASSERT_NE(api, nullptr);
std::vector<cel::SelectQualifier> qualfiers{
cel::FieldSpecifier{210, "string_message_map"},
cel::FieldSpecifier{2, "value"}, cel::FieldSpecifier{2, "int64_value"}};
EXPECT_THAT(api->Qualify(qualfiers, wrapped,
false, manager),
StatusIs(absl::StatusCode::kUnimplemented));
}
TEST(ProtoMesssageTypeAdapter, QualifyMapTraversalWrongKeyType) {
google::protobuf::Arena arena;
ProtoMessageTypeAdapter adapter(
google::protobuf::DescriptorPool::generated_pool()->FindMessageTypeByName(
"google.api.expr.runtime.TestMessage"),
google::protobuf::MessageFactory::generated_factory());
auto manager = ProtoMemoryManagerRef(&arena);
TestMessage message;
(*message.mutable_string_message_map())["@key"].set_int64_value(42);
CelValue::MessageWrapper wrapped(&message, &adapter);
const LegacyTypeAccessApis* api = adapter.GetAccessApis(MessageWrapper());
ASSERT_NE(api, nullptr);
std::vector<cel::SelectQualifier> qualfiers{
cel::FieldSpecifier{210, "string_message_map"},
cel::AttributeQualifier::OfInt(0), cel::FieldSpecifier{2, "int64_value"}};
EXPECT_THAT(api->Qualify(qualfiers, wrapped,
false, manager),
IsOkAndHolds(Field(&LegacyQualifyResult::value,
test::IsCelError(StatusIs(
absl::StatusCode::kInvalidArgument,
HasSubstr("Invalid map key type"))))));
}
TEST(ProtoMesssageTypeAdapter, QualifyMapTraversalHasWrongKeyType) {
google::protobuf::Arena arena;
ProtoMessageTypeAdapter adapter(
google::protobuf::DescriptorPool::generated_pool()->FindMessageTypeByName(
"google.api.expr.runtime.TestMessage"),
google::protobuf::MessageFactory::generated_factory());
auto manager = ProtoMemoryManagerRef(&arena);
TestMessage message;
(*message.mutable_string_message_map())["@key"].set_int64_value(42);
CelValue::MessageWrapper wrapped(&message, &adapter);
const LegacyTypeAccessApis* api = adapter.GetAccessApis(MessageWrapper());
ASSERT_NE(api, nullptr);
std::vector<cel::SelectQualifier> qualfiers{
cel::FieldSpecifier{210, "string_message_map"},
cel::AttributeQualifier::OfInt(0)};
EXPECT_THAT(api->Qualify(qualfiers, wrapped,
true, manager),
IsOkAndHolds(Field(&LegacyQualifyResult::value,
test::IsCelError(StatusIs(
absl::StatusCode::kUnknown,
HasSubstr("No matching overloads"))))));
}
TEST(ProtoMesssageTypeAdapter, QualifyMapTraversalSupportNoSuchKey) {
google::protobuf::Arena arena;
ProtoMessageTypeAdapter adapter(
google::protobuf::DescriptorPool::generated_pool()->FindMessageTypeByName(
"google.api.expr.runtime.TestMessage"),
google::protobuf::MessageFactory::generated_factory());
auto manager = ProtoMemoryManagerRef(&arena);
TestMessage message;
(*message.mutable_string_message_map())["@key"].set_int64_value(42);
CelValue::MessageWrapper wrapped(&message, &adapter);
const LegacyTypeAccessApis* api = adapter.GetAccessApis(MessageWrapper());
ASSERT_NE(api, nullptr);
std::vector<cel::SelectQualifier> qualfiers{
cel::FieldSpecifier{210, "string_message_map"},
cel::AttributeQualifier::OfString("bad_key"),
cel::FieldSpecifier{2, "int64_value"}};
EXPECT_THAT(api->Qualify(qualfiers, wrapped,
false, manager),
IsOkAndHolds(Field(
&LegacyQualifyResult::value,
test::IsCelError(StatusIs(absl::StatusCode::kNotFound,
HasSubstr("Key not found"))))));
}
TEST(ProtoMesssageTypeAdapter, QualifyMapTraversalInt32Key) {
google::protobuf::Arena arena;
ProtoMessageTypeAdapter adapter(
google::protobuf::DescriptorPool::generated_pool()->FindMessageTypeByName(
"google.api.expr.runtime.TestMessage"),
google::protobuf::MessageFactory::generated_factory());
auto manager = ProtoMemoryManagerRef(&arena);
TestMessage message;
(*message.mutable_int32_int32_map())[0] = 42;
CelValue::MessageWrapper wrapped(&message, &adapter);
const LegacyTypeAccessApis* api = adapter.GetAccessApis(MessageWrapper());
ASSERT_NE(api, nullptr);
std::vector<cel::SelectQualifier> qualfiers{
cel::FieldSpecifier{205, "int32_int32_map"},
cel::AttributeQualifier::OfInt(0)};
EXPECT_THAT(
api->Qualify(qualfiers, wrapped,
false, manager),
IsOkAndHolds(Field(&LegacyQualifyResult::value, test::IsCelInt64(42))));
}
TEST(ProtoMesssageTypeAdapter, QualifyMapTraversalIntOutOfRange) {
google::protobuf::Arena arena;
ProtoMessageTypeAdapter adapter(
google::protobuf::DescriptorPool::generated_pool()->FindMessageTypeByName(
"google.api.expr.runtime.TestMessage"),
google::protobuf::MessageFactory::generated_factory());
auto manager = ProtoMemoryManagerRef(&arena);
TestMessage message;
(*message.mutable_int32_int32_map())[0] = 42;
CelValue::MessageWrapper wrapped(&message, &adapter);
const LegacyTypeAccessApis* api = adapter.GetAccessApis(MessageWrapper());
ASSERT_NE(api, nullptr);
std::vector<cel::SelectQualifier> qualfiers{
cel::FieldSpecifier{205, "int32_int32_map"},
cel::AttributeQualifier::OfInt(1LL << 32)};
EXPECT_THAT(api->Qualify(qualfiers, wrapped,
false, manager),
IsOkAndHolds(Field(
&LegacyQualifyResult::value,
test::IsCelError(StatusIs(absl::StatusCode::kOutOfRange,
HasSubstr("integer overflow"))))));
}
TEST(ProtoMesssageTypeAdapter, QualifyMapTraversalUint32Key) {
google::protobuf::Arena arena;
ProtoMessageTypeAdapter adapter(
google::protobuf::DescriptorPool::generated_pool()->FindMessageTypeByName(
"google.api.expr.runtime.TestMessage"),
google::protobuf::MessageFactory::generated_factory());
auto manager = ProtoMemoryManagerRef(&arena);
TestMessage message;
(*message.mutable_uint32_uint32_map())[0] = 42;
CelValue::MessageWrapper wrapped(&message, &adapter);
const LegacyTypeAccessApis* api = adapter.GetAccessApis(MessageWrapper());
ASSERT_NE(api, nullptr);
std::vector<cel::SelectQualifier> qualfiers{
cel::FieldSpecifier{206, "uint32_uint32_map"},
cel::AttributeQualifier::OfUint(0)};
EXPECT_THAT(
api->Qualify(qualfiers, wrapped,
false, manager),
IsOkAndHolds(Field(&LegacyQualifyResult::value, test::IsCelUint64(42))));
}
TEST(ProtoMesssageTypeAdapter, QualifyMapTraversalUintOutOfRange) {
google::protobuf::Arena arena;
ProtoMessageTypeAdapter adapter(
google::protobuf::DescriptorPool::generated_pool()->FindMessageTypeByName(
"google.api.expr.runtime.TestMessage"),
google::protobuf::MessageFactory::generated_factory());
auto manager = ProtoMemoryManagerRef(&arena);
TestMessage message;
(*message.mutable_uint32_uint32_map())[0] = 42;
CelValue::MessageWrapper wrapped(&message, &adapter);
const LegacyTypeAccessApis* api = adapter.GetAccessApis(MessageWrapper());
ASSERT_NE(api, nullptr);
std::vector<cel::SelectQualifier> qualfiers{
cel::FieldSpecifier{206, "uint32_uint32_map"},
cel::AttributeQualifier::OfUint(1LL << 32)};
EXPECT_THAT(api->Qualify(qualfiers, wrapped,
false, manager),
IsOkAndHolds(Field(
&LegacyQualifyResult::value,
test::IsCelError(StatusIs(absl::StatusCode::kOutOfRange,
HasSubstr("integer overflow"))))));
}
TEST(ProtoMesssageTypeAdapter, QualifyMapTraversalUnexpectedFieldAccess) {
google::protobuf::Arena arena;
ProtoMessageTypeAdapter adapter(
google::protobuf::DescriptorPool::generated_pool()->FindMessageTypeByName(
"google.api.expr.runtime.TestMessage"),
google::protobuf::MessageFactory::generated_factory());
auto manager = ProtoMemoryManagerRef(&arena);
TestMessage message;
(*message.mutable_string_message_map())["@key"].set_int64_value(42);
CelValue::MessageWrapper wrapped(&message, &adapter);
const LegacyTypeAccessApis* api = adapter.GetAccessApis(MessageWrapper());
ASSERT_NE(api, nullptr);
std::vector<cel::SelectQualifier> qualfiers{
cel::FieldSpecifier{210, "string_message_map"},
cel::FieldSpecifier{0, "field_like_key"}};
auto result = api->Qualify(qualfiers, wrapped,
false, manager);
EXPECT_THAT(api->Qualify(qualfiers, wrapped,
false, manager),
StatusIs(absl::StatusCode::kUnimplemented, _));
}
TEST(ProtoMesssageTypeAdapter, UntypedQualifiersNotYetSupported) {
google::protobuf::Arena arena;
ProtoMessageTypeAdapter adapter(
google::protobuf::DescriptorPool::generated_pool()->FindMessageTypeByName(
"google.api.expr.runtime.TestMessage"),
google::protobuf::MessageFactory::generated_factory());
auto manager = ProtoMemoryManagerRef(&arena);
TestMessage message;
(*message.mutable_string_message_map())["@key"].set_int64_value(42);
CelValue::MessageWrapper wrapped(&message, &adapter);
const LegacyTypeAccessApis* api = adapter.GetAccessApis(MessageWrapper());
ASSERT_NE(api, nullptr);
std::vector<cel::SelectQualifier> qualfiers{
cel::AttributeQualifier::OfString("string_message_map"),
cel::AttributeQualifier::OfString("@key"),
cel::AttributeQualifier::OfString("int64_value")};
EXPECT_THAT(api->Qualify(qualfiers, wrapped,
false, manager),
StatusIs(absl::StatusCode::kUnimplemented, _));
}
TEST(ProtoMesssageTypeAdapter, QualifyRepeatedIndexWrongType) {
google::protobuf::Arena arena;
ProtoMessageTypeAdapter adapter(
google::protobuf::DescriptorPool::generated_pool()->FindMessageTypeByName(
"google.api.expr.runtime.TestMessage"),
google::protobuf::MessageFactory::generated_factory());
auto manager = ProtoMemoryManagerRef(&arena);
TestMessage message;
message.add_message_list()->add_int64_list(1);
message.add_message_list()->add_int64_list(2);
CelValue::MessageWrapper wrapped(&message, &adapter);
const LegacyTypeAccessApis* api = adapter.GetAccessApis(MessageWrapper());
ASSERT_NE(api, nullptr);
std::vector<cel::SelectQualifier> qualfiers{
cel::FieldSpecifier{112, "message_list"},
cel::AttributeQualifier::OfBool(false),
cel::FieldSpecifier{102, "int64_list"},
cel::AttributeQualifier::OfInt(0)};
EXPECT_THAT(
api->Qualify(qualfiers, wrapped,
false, manager),
IsOkAndHolds(Field(&LegacyQualifyResult::value,
test::IsCelError(StatusIs(
absl::StatusCode::kUnknown,
HasSubstr("No matching overloads found"))))));
}
TEST(ProtoMesssageTypeAdapter, QualifyRepeatedTypeCheckError) {
google::protobuf::Arena arena;
ProtoMessageTypeAdapter adapter(
google::protobuf::DescriptorPool::generated_pool()->FindMessageTypeByName(
"google.api.expr.runtime.TestMessage"),
google::protobuf::MessageFactory::generated_factory());
auto manager = ProtoMemoryManagerRef(&arena);
TestMessage message;
message.add_int64_list(1);
message.add_int64_list(2);
CelValue::MessageWrapper wrapped(&message, &adapter);
const LegacyTypeAccessApis* api = adapter.GetAccessApis(MessageWrapper());
ASSERT_NE(api, nullptr);
std::vector<cel::SelectQualifier> qualfiers{
cel::FieldSpecifier{102, "int64_list"}, cel::AttributeQualifier::OfInt(0),
cel::AttributeQualifier::OfInt(1)};
EXPECT_THAT(api->Qualify(qualfiers, wrapped,
false, manager),
StatusIs(absl::StatusCode::kInternal,
HasSubstr("Unexpected qualify intermediate type")));
}
TEST(ProtoMesssageTypeAdapter, QualifyRepeatedLeaf) {
google::protobuf::Arena arena;
ProtoMessageTypeAdapter adapter(
google::protobuf::DescriptorPool::generated_pool()->FindMessageTypeByName(
"google.api.expr.runtime.TestMessage"),
google::protobuf::MessageFactory::generated_factory());
auto manager = ProtoMemoryManagerRef(&arena);
TestMessage message;
auto* nested = message.mutable_message_value();
nested->add_int64_list(1);
nested->add_int64_list(2);
CelValue::MessageWrapper wrapped(&message, &adapter);
const LegacyTypeAccessApis* api = adapter.GetAccessApis(MessageWrapper());
ASSERT_NE(api, nullptr);
std::vector<cel::SelectQualifier> qualfiers{
cel::FieldSpecifier{12, "message_value"},
cel::FieldSpecifier{102, "int64_list"},
};
EXPECT_THAT(
api->Qualify(qualfiers, wrapped,
false, manager),
IsOkAndHolds(Field(&LegacyQualifyResult::value,
test::IsCelList(ElementsAre(test::IsCelInt64(1),
test::IsCelInt64(2))))));
}
TEST(ProtoMesssageTypeAdapter, QualifyRepeatedIndexLeaf) {
google::protobuf::Arena arena;
ProtoMessageTypeAdapter adapter(
google::protobuf::DescriptorPool::generated_pool()->FindMessageTypeByName(
"google.api.expr.runtime.TestMessage"),
google::protobuf::MessageFactory::generated_factory());
auto manager = ProtoMemoryManagerRef(&arena);
TestMessage message;
auto* nested = message.mutable_message_value();
nested->add_int64_list(1);
nested->add_int64_list(2);
CelValue::MessageWrapper wrapped(&message, &adapter);
const LegacyTypeAccessApis* api = adapter.GetAccessApis(MessageWrapper());
ASSERT_NE(api, nullptr);
std::vector<cel::SelectQualifier> qualfiers{
cel::FieldSpecifier{12, "message_value"},
cel::FieldSpecifier{102, "int64_list"},
cel::AttributeQualifier::OfInt(1)};
EXPECT_THAT(
api->Qualify(qualfiers, wrapped,
false, manager),
IsOkAndHolds(Field(&LegacyQualifyResult::value, test::IsCelInt64(2))));
}
TEST(ProtoMesssageTypeAdapter, QualifyRepeatedIndexLeafOutOfBounds) {
google::protobuf::Arena arena;
ProtoMessageTypeAdapter adapter(
google::protobuf::DescriptorPool::generated_pool()->FindMessageTypeByName(
"google.api.expr.runtime.TestMessage"),
google::protobuf::MessageFactory::generated_factory());
auto manager = ProtoMemoryManagerRef(&arena);
TestMessage message;
auto* nested = message.mutable_message_value();
nested->add_int64_list(1);
nested->add_int64_list(2);
CelValue::MessageWrapper wrapped(&message, &adapter);
const LegacyTypeAccessApis* api = adapter.GetAccessApis(MessageWrapper());
ASSERT_NE(api, nullptr);
std::vector<cel::SelectQualifier> qualfiers{
cel::FieldSpecifier{12, "message_value"},
cel::FieldSpecifier{102, "int64_list"},
cel::AttributeQualifier::OfInt(2)};
EXPECT_THAT(api->Qualify(qualfiers, wrapped,
false, manager),
IsOkAndHolds(Field(&LegacyQualifyResult::value,
test::IsCelError(StatusIs(
absl::StatusCode::kInvalidArgument,
HasSubstr("index out of bounds"))))));
}
TEST(ProtoMesssageTypeAdapter, QualifyMapLeaf) {
google::protobuf::Arena arena;
ProtoMessageTypeAdapter adapter(
google::protobuf::DescriptorPool::generated_pool()->FindMessageTypeByName(
"google.api.expr.runtime.TestMessage"),
google::protobuf::MessageFactory::generated_factory());
auto manager = ProtoMemoryManagerRef(&arena);
TestMessage message;
auto* nested_map =
message.mutable_message_value()->mutable_string_int32_map();
(*nested_map)["@key"] = 42;
(*nested_map)["@key2"] = -42;
CelValue::MessageWrapper wrapped(&message, &adapter);
const LegacyTypeAccessApis* api = adapter.GetAccessApis(MessageWrapper());
ASSERT_NE(api, nullptr);
std::vector<cel::SelectQualifier> qualfiers{
cel::FieldSpecifier{12, "message_value"},
cel::FieldSpecifier{203, "string_int32_map"},
};
EXPECT_THAT(api->Qualify(qualfiers, wrapped,
false, manager),
IsOkAndHolds(Field(
&LegacyQualifyResult::value, Truly([](const CelValue& v) {
return v.IsMap() && v.MapOrDie()->size() == 2;
}))));
}
TEST(ProtoMesssageTypeAdapter, QualifyMapIndexLeaf) {
google::protobuf::Arena arena;
ProtoMessageTypeAdapter adapter(
google::protobuf::DescriptorPool::generated_pool()->FindMessageTypeByName(
"google.api.expr.runtime.TestMessage"),
google::protobuf::MessageFactory::generated_factory());
auto manager = ProtoMemoryManagerRef(&arena);
TestMessage message;
auto* nested_map =
message.mutable_message_value()->mutable_string_int32_map();
(*nested_map)["@key"] = 42;
(*nested_map)["@key2"] = -42;
CelValue::MessageWrapper wrapped(&message, &adapter);
const LegacyTypeAccessApis* api = adapter.GetAccessApis(MessageWrapper());
ASSERT_NE(api, nullptr);
std::vector<cel::SelectQualifier> qualfiers{
cel::FieldSpecifier{12, "message_value"},
cel::FieldSpecifier{203, "string_int32_map"},
cel::AttributeQualifier::OfString("@key")};
EXPECT_THAT(
api->Qualify(qualfiers, wrapped,
false, manager),
IsOkAndHolds(Field(&LegacyQualifyResult::value, test::IsCelInt64(42))));
}
TEST(ProtoMesssageTypeAdapter, QualifyMapIndexLeafWrongType) {
google::protobuf::Arena arena;
ProtoMessageTypeAdapter adapter(
google::protobuf::DescriptorPool::generated_pool()->FindMessageTypeByName(
"google.api.expr.runtime.TestMessage"),
google::protobuf::MessageFactory::generated_factory());
auto manager = ProtoMemoryManagerRef(&arena);
TestMessage message;
auto* nested_map =
message.mutable_message_value()->mutable_string_int32_map();
(*nested_map)["@key"] = 42;
(*nested_map)["@key2"] = -42;
CelValue::MessageWrapper wrapped(&message, &adapter);
const LegacyTypeAccessApis* api = adapter.GetAccessApis(MessageWrapper());
ASSERT_NE(api, nullptr);
std::vector<cel::SelectQualifier> qualfiers{
cel::FieldSpecifier{12, "message_value"},
cel::FieldSpecifier{203, "string_int32_map"},
cel::AttributeQualifier::OfInt(0)};
EXPECT_THAT(api->Qualify(qualfiers, wrapped,
false, manager),
IsOkAndHolds(Field(&LegacyQualifyResult::value,
test::IsCelError(StatusIs(
absl::StatusCode::kInvalidArgument,
HasSubstr("Invalid map key type"))))));
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/public/structs/proto_message_type_adapter.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/public/structs/proto_message_type_adapter_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
1aab19fb-dac4-4d6e-935a-6fba6c4f96d3 | cpp | google/quiche | http2_constants | quiche/http2/http2_constants.cc | quiche/http2/http2_constants_test.cc | #include "quiche/http2/http2_constants.h"
#include <string>
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "quiche/common/platform/api/quiche_logging.h"
namespace http2 {
std::string Http2FrameTypeToString(Http2FrameType v) {
switch (v) {
case Http2FrameType::DATA:
return "DATA";
case Http2FrameType::HEADERS:
return "HEADERS";
case Http2FrameType::PRIORITY:
return "PRIORITY";
case Http2FrameType::RST_STREAM:
return "RST_STREAM";
case Http2FrameType::SETTINGS:
return "SETTINGS";
case Http2FrameType::PUSH_PROMISE:
return "PUSH_PROMISE";
case Http2FrameType::PING:
return "PING";
case Http2FrameType::GOAWAY:
return "GOAWAY";
case Http2FrameType::WINDOW_UPDATE:
return "WINDOW_UPDATE";
case Http2FrameType::CONTINUATION:
return "CONTINUATION";
case Http2FrameType::ALTSVC:
return "ALTSVC";
case Http2FrameType::PRIORITY_UPDATE:
return "PRIORITY_UPDATE";
}
return absl::StrCat("UnknownFrameType(", static_cast<int>(v), ")");
}
std::string Http2FrameTypeToString(uint8_t v) {
return Http2FrameTypeToString(static_cast<Http2FrameType>(v));
}
std::string Http2FrameFlagsToString(Http2FrameType type, uint8_t flags) {
std::string s;
auto append_and_clear = [&s, &flags](absl::string_view v, uint8_t bit) {
if (!s.empty()) {
s.push_back('|');
}
absl::StrAppend(&s, v);
flags ^= bit;
};
if (flags & 0x01) {
if (type == Http2FrameType::DATA || type == Http2FrameType::HEADERS) {
append_and_clear("END_STREAM", Http2FrameFlag::END_STREAM);
} else if (type == Http2FrameType::SETTINGS ||
type == Http2FrameType::PING) {
append_and_clear("ACK", Http2FrameFlag::ACK);
}
}
if (flags & 0x04) {
if (type == Http2FrameType::HEADERS ||
type == Http2FrameType::PUSH_PROMISE ||
type == Http2FrameType::CONTINUATION) {
append_and_clear("END_HEADERS", Http2FrameFlag::END_HEADERS);
}
}
if (flags & 0x08) {
if (type == Http2FrameType::DATA || type == Http2FrameType::HEADERS ||
type == Http2FrameType::PUSH_PROMISE) {
append_and_clear("PADDED", Http2FrameFlag::PADDED);
}
}
if (flags & 0x20) {
if (type == Http2FrameType::HEADERS) {
append_and_clear("PRIORITY", Http2FrameFlag::PRIORITY);
}
}
if (flags != 0) {
append_and_clear(absl::StrFormat("0x%02x", flags), flags);
}
QUICHE_DCHECK_EQ(0, flags);
return s;
}
std::string Http2FrameFlagsToString(uint8_t type, uint8_t flags) {
return Http2FrameFlagsToString(static_cast<Http2FrameType>(type), flags);
}
std::string Http2ErrorCodeToString(uint32_t v) {
switch (v) {
case 0x0:
return "NO_ERROR";
case 0x1:
return "PROTOCOL_ERROR";
case 0x2:
return "INTERNAL_ERROR";
case 0x3:
return "FLOW_CONTROL_ERROR";
case 0x4:
return "SETTINGS_TIMEOUT";
case 0x5:
return "STREAM_CLOSED";
case 0x6:
return "FRAME_SIZE_ERROR";
case 0x7:
return "REFUSED_STREAM";
case 0x8:
return "CANCEL";
case 0x9:
return "COMPRESSION_ERROR";
case 0xa:
return "CONNECT_ERROR";
case 0xb:
return "ENHANCE_YOUR_CALM";
case 0xc:
return "INADEQUATE_SECURITY";
case 0xd:
return "HTTP_1_1_REQUIRED";
}
return absl::StrCat("UnknownErrorCode(0x", absl::Hex(v), ")");
}
std::string Http2ErrorCodeToString(Http2ErrorCode v) {
return Http2ErrorCodeToString(static_cast<uint32_t>(v));
}
std::string Http2SettingsParameterToString(uint32_t v) {
switch (v) {
case 0x1:
return "HEADER_TABLE_SIZE";
case 0x2:
return "ENABLE_PUSH";
case 0x3:
return "MAX_CONCURRENT_STREAMS";
case 0x4:
return "INITIAL_WINDOW_SIZE";
case 0x5:
return "MAX_FRAME_SIZE";
case 0x6:
return "MAX_HEADER_LIST_SIZE";
}
return absl::StrCat("UnknownSettingsParameter(0x", absl::Hex(v), ")");
}
std::string Http2SettingsParameterToString(Http2SettingsParameter v) {
return Http2SettingsParameterToString(static_cast<uint32_t>(v));
}
constexpr char const* kHttp2InvalidHeaderNames[] = {
"connection", "host", "keep-alive", "proxy-connection",
"transfer-encoding", "",
};
const InvalidHeaderSet& GetInvalidHttp2HeaderSet() {
static const auto* invalid_header_set =
new InvalidHeaderSet(std::begin(http2::kHttp2InvalidHeaderNames),
std::end(http2::kHttp2InvalidHeaderNames));
return *invalid_header_set;
}
} | #include "quiche/http2/http2_constants.h"
#include "quiche/common/platform/api/quiche_test.h"
namespace http2 {
namespace test {
namespace {
class Http2ConstantsTest : public quiche::test::QuicheTest {};
TEST(Http2ConstantsTest, Http2FrameType) {
EXPECT_EQ(Http2FrameType::DATA, static_cast<Http2FrameType>(0));
EXPECT_EQ(Http2FrameType::HEADERS, static_cast<Http2FrameType>(1));
EXPECT_EQ(Http2FrameType::PRIORITY, static_cast<Http2FrameType>(2));
EXPECT_EQ(Http2FrameType::RST_STREAM, static_cast<Http2FrameType>(3));
EXPECT_EQ(Http2FrameType::SETTINGS, static_cast<Http2FrameType>(4));
EXPECT_EQ(Http2FrameType::PUSH_PROMISE, static_cast<Http2FrameType>(5));
EXPECT_EQ(Http2FrameType::PING, static_cast<Http2FrameType>(6));
EXPECT_EQ(Http2FrameType::GOAWAY, static_cast<Http2FrameType>(7));
EXPECT_EQ(Http2FrameType::WINDOW_UPDATE, static_cast<Http2FrameType>(8));
EXPECT_EQ(Http2FrameType::CONTINUATION, static_cast<Http2FrameType>(9));
EXPECT_EQ(Http2FrameType::ALTSVC, static_cast<Http2FrameType>(10));
}
TEST(Http2ConstantsTest, Http2FrameTypeToString) {
EXPECT_EQ("DATA", Http2FrameTypeToString(Http2FrameType::DATA));
EXPECT_EQ("HEADERS", Http2FrameTypeToString(Http2FrameType::HEADERS));
EXPECT_EQ("PRIORITY", Http2FrameTypeToString(Http2FrameType::PRIORITY));
EXPECT_EQ("RST_STREAM", Http2FrameTypeToString(Http2FrameType::RST_STREAM));
EXPECT_EQ("SETTINGS", Http2FrameTypeToString(Http2FrameType::SETTINGS));
EXPECT_EQ("PUSH_PROMISE",
Http2FrameTypeToString(Http2FrameType::PUSH_PROMISE));
EXPECT_EQ("PING", Http2FrameTypeToString(Http2FrameType::PING));
EXPECT_EQ("GOAWAY", Http2FrameTypeToString(Http2FrameType::GOAWAY));
EXPECT_EQ("WINDOW_UPDATE",
Http2FrameTypeToString(Http2FrameType::WINDOW_UPDATE));
EXPECT_EQ("CONTINUATION",
Http2FrameTypeToString(Http2FrameType::CONTINUATION));
EXPECT_EQ("ALTSVC", Http2FrameTypeToString(Http2FrameType::ALTSVC));
EXPECT_EQ("DATA", Http2FrameTypeToString(0));
EXPECT_EQ("HEADERS", Http2FrameTypeToString(1));
EXPECT_EQ("PRIORITY", Http2FrameTypeToString(2));
EXPECT_EQ("RST_STREAM", Http2FrameTypeToString(3));
EXPECT_EQ("SETTINGS", Http2FrameTypeToString(4));
EXPECT_EQ("PUSH_PROMISE", Http2FrameTypeToString(5));
EXPECT_EQ("PING", Http2FrameTypeToString(6));
EXPECT_EQ("GOAWAY", Http2FrameTypeToString(7));
EXPECT_EQ("WINDOW_UPDATE", Http2FrameTypeToString(8));
EXPECT_EQ("CONTINUATION", Http2FrameTypeToString(9));
EXPECT_EQ("ALTSVC", Http2FrameTypeToString(10));
EXPECT_EQ("UnknownFrameType(99)", Http2FrameTypeToString(99));
}
TEST(Http2ConstantsTest, Http2FrameFlag) {
EXPECT_EQ(Http2FrameFlag::END_STREAM, static_cast<Http2FrameFlag>(0x01));
EXPECT_EQ(Http2FrameFlag::ACK, static_cast<Http2FrameFlag>(0x01));
EXPECT_EQ(Http2FrameFlag::END_HEADERS, static_cast<Http2FrameFlag>(0x04));
EXPECT_EQ(Http2FrameFlag::PADDED, static_cast<Http2FrameFlag>(0x08));
EXPECT_EQ(Http2FrameFlag::PRIORITY, static_cast<Http2FrameFlag>(0x20));
EXPECT_EQ(Http2FrameFlag::END_STREAM, 0x01);
EXPECT_EQ(Http2FrameFlag::ACK, 0x01);
EXPECT_EQ(Http2FrameFlag::END_HEADERS, 0x04);
EXPECT_EQ(Http2FrameFlag::PADDED, 0x08);
EXPECT_EQ(Http2FrameFlag::PRIORITY, 0x20);
}
TEST(Http2ConstantsTest, Http2FrameFlagsToString) {
EXPECT_EQ("END_STREAM", Http2FrameFlagsToString(Http2FrameType::DATA,
Http2FrameFlag::END_STREAM));
EXPECT_EQ("END_STREAM",
Http2FrameFlagsToString(Http2FrameType::HEADERS, 0x01));
EXPECT_EQ("ACK", Http2FrameFlagsToString(Http2FrameType::SETTINGS,
Http2FrameFlag::ACK));
EXPECT_EQ("ACK", Http2FrameFlagsToString(Http2FrameType::PING, 0x01));
EXPECT_EQ("0x02", Http2FrameFlagsToString(0xff, 0x02));
EXPECT_EQ("END_HEADERS",
Http2FrameFlagsToString(Http2FrameType::HEADERS,
Http2FrameFlag::END_HEADERS));
EXPECT_EQ("END_HEADERS",
Http2FrameFlagsToString(Http2FrameType::PUSH_PROMISE, 0x04));
EXPECT_EQ("END_HEADERS", Http2FrameFlagsToString(0x09, 0x04));
EXPECT_EQ("0x04", Http2FrameFlagsToString(0xff, 0x04));
EXPECT_EQ("PADDED", Http2FrameFlagsToString(Http2FrameType::DATA,
Http2FrameFlag::PADDED));
EXPECT_EQ("PADDED", Http2FrameFlagsToString(Http2FrameType::HEADERS, 0x08));
EXPECT_EQ("PADDED", Http2FrameFlagsToString(0x05, 0x08));
EXPECT_EQ("0x08", Http2FrameFlagsToString(0xff, Http2FrameFlag::PADDED));
EXPECT_EQ("0x10", Http2FrameFlagsToString(Http2FrameType::SETTINGS, 0x10));
EXPECT_EQ("PRIORITY", Http2FrameFlagsToString(Http2FrameType::HEADERS, 0x20));
EXPECT_EQ("0x20",
Http2FrameFlagsToString(Http2FrameType::PUSH_PROMISE, 0x20));
EXPECT_EQ("0x40", Http2FrameFlagsToString(0xff, 0x40));
EXPECT_EQ("0x80", Http2FrameFlagsToString(0xff, 0x80));
EXPECT_EQ("END_STREAM|PADDED|0xf6",
Http2FrameFlagsToString(Http2FrameType::DATA, 0xff));
EXPECT_EQ("END_STREAM|END_HEADERS|PADDED|PRIORITY|0xd2",
Http2FrameFlagsToString(Http2FrameType::HEADERS, 0xff));
EXPECT_EQ("0xff", Http2FrameFlagsToString(Http2FrameType::PRIORITY, 0xff));
EXPECT_EQ("0xff", Http2FrameFlagsToString(Http2FrameType::RST_STREAM, 0xff));
EXPECT_EQ("ACK|0xfe",
Http2FrameFlagsToString(Http2FrameType::SETTINGS, 0xff));
EXPECT_EQ("END_HEADERS|PADDED|0xf3",
Http2FrameFlagsToString(Http2FrameType::PUSH_PROMISE, 0xff));
EXPECT_EQ("ACK|0xfe", Http2FrameFlagsToString(Http2FrameType::PING, 0xff));
EXPECT_EQ("0xff", Http2FrameFlagsToString(Http2FrameType::GOAWAY, 0xff));
EXPECT_EQ("0xff",
Http2FrameFlagsToString(Http2FrameType::WINDOW_UPDATE, 0xff));
EXPECT_EQ("END_HEADERS|0xfb",
Http2FrameFlagsToString(Http2FrameType::CONTINUATION, 0xff));
EXPECT_EQ("0xff", Http2FrameFlagsToString(Http2FrameType::ALTSVC, 0xff));
EXPECT_EQ("0xff", Http2FrameFlagsToString(0xff, 0xff));
}
TEST(Http2ConstantsTest, Http2ErrorCode) {
EXPECT_EQ(Http2ErrorCode::HTTP2_NO_ERROR, static_cast<Http2ErrorCode>(0x0));
EXPECT_EQ(Http2ErrorCode::PROTOCOL_ERROR, static_cast<Http2ErrorCode>(0x1));
EXPECT_EQ(Http2ErrorCode::INTERNAL_ERROR, static_cast<Http2ErrorCode>(0x2));
EXPECT_EQ(Http2ErrorCode::FLOW_CONTROL_ERROR,
static_cast<Http2ErrorCode>(0x3));
EXPECT_EQ(Http2ErrorCode::SETTINGS_TIMEOUT, static_cast<Http2ErrorCode>(0x4));
EXPECT_EQ(Http2ErrorCode::STREAM_CLOSED, static_cast<Http2ErrorCode>(0x5));
EXPECT_EQ(Http2ErrorCode::FRAME_SIZE_ERROR, static_cast<Http2ErrorCode>(0x6));
EXPECT_EQ(Http2ErrorCode::REFUSED_STREAM, static_cast<Http2ErrorCode>(0x7));
EXPECT_EQ(Http2ErrorCode::CANCEL, static_cast<Http2ErrorCode>(0x8));
EXPECT_EQ(Http2ErrorCode::COMPRESSION_ERROR,
static_cast<Http2ErrorCode>(0x9));
EXPECT_EQ(Http2ErrorCode::CONNECT_ERROR, static_cast<Http2ErrorCode>(0xa));
EXPECT_EQ(Http2ErrorCode::ENHANCE_YOUR_CALM,
static_cast<Http2ErrorCode>(0xb));
EXPECT_EQ(Http2ErrorCode::INADEQUATE_SECURITY,
static_cast<Http2ErrorCode>(0xc));
EXPECT_EQ(Http2ErrorCode::HTTP_1_1_REQUIRED,
static_cast<Http2ErrorCode>(0xd));
}
TEST(Http2ConstantsTest, Http2ErrorCodeToString) {
EXPECT_EQ("NO_ERROR", Http2ErrorCodeToString(Http2ErrorCode::HTTP2_NO_ERROR));
EXPECT_EQ("NO_ERROR", Http2ErrorCodeToString(0x0));
EXPECT_EQ("PROTOCOL_ERROR",
Http2ErrorCodeToString(Http2ErrorCode::PROTOCOL_ERROR));
EXPECT_EQ("PROTOCOL_ERROR", Http2ErrorCodeToString(0x1));
EXPECT_EQ("INTERNAL_ERROR",
Http2ErrorCodeToString(Http2ErrorCode::INTERNAL_ERROR));
EXPECT_EQ("INTERNAL_ERROR", Http2ErrorCodeToString(0x2));
EXPECT_EQ("FLOW_CONTROL_ERROR",
Http2ErrorCodeToString(Http2ErrorCode::FLOW_CONTROL_ERROR));
EXPECT_EQ("FLOW_CONTROL_ERROR", Http2ErrorCodeToString(0x3));
EXPECT_EQ("SETTINGS_TIMEOUT",
Http2ErrorCodeToString(Http2ErrorCode::SETTINGS_TIMEOUT));
EXPECT_EQ("SETTINGS_TIMEOUT", Http2ErrorCodeToString(0x4));
EXPECT_EQ("STREAM_CLOSED",
Http2ErrorCodeToString(Http2ErrorCode::STREAM_CLOSED));
EXPECT_EQ("STREAM_CLOSED", Http2ErrorCodeToString(0x5));
EXPECT_EQ("FRAME_SIZE_ERROR",
Http2ErrorCodeToString(Http2ErrorCode::FRAME_SIZE_ERROR));
EXPECT_EQ("FRAME_SIZE_ERROR", Http2ErrorCodeToString(0x6));
EXPECT_EQ("REFUSED_STREAM",
Http2ErrorCodeToString(Http2ErrorCode::REFUSED_STREAM));
EXPECT_EQ("REFUSED_STREAM", Http2ErrorCodeToString(0x7));
EXPECT_EQ("CANCEL", Http2ErrorCodeToString(Http2ErrorCode::CANCEL));
EXPECT_EQ("CANCEL", Http2ErrorCodeToString(0x8));
EXPECT_EQ("COMPRESSION_ERROR",
Http2ErrorCodeToString(Http2ErrorCode::COMPRESSION_ERROR));
EXPECT_EQ("COMPRESSION_ERROR", Http2ErrorCodeToString(0x9));
EXPECT_EQ("CONNECT_ERROR",
Http2ErrorCodeToString(Http2ErrorCode::CONNECT_ERROR));
EXPECT_EQ("CONNECT_ERROR", Http2ErrorCodeToString(0xa));
EXPECT_EQ("ENHANCE_YOUR_CALM",
Http2ErrorCodeToString(Http2ErrorCode::ENHANCE_YOUR_CALM));
EXPECT_EQ("ENHANCE_YOUR_CALM", Http2ErrorCodeToString(0xb));
EXPECT_EQ("INADEQUATE_SECURITY",
Http2ErrorCodeToString(Http2ErrorCode::INADEQUATE_SECURITY));
EXPECT_EQ("INADEQUATE_SECURITY", Http2ErrorCodeToString(0xc));
EXPECT_EQ("HTTP_1_1_REQUIRED",
Http2ErrorCodeToString(Http2ErrorCode::HTTP_1_1_REQUIRED));
EXPECT_EQ("HTTP_1_1_REQUIRED", Http2ErrorCodeToString(0xd));
EXPECT_EQ("UnknownErrorCode(0x123)", Http2ErrorCodeToString(0x123));
}
TEST(Http2ConstantsTest, Http2SettingsParameter) {
EXPECT_EQ(Http2SettingsParameter::HEADER_TABLE_SIZE,
static_cast<Http2SettingsParameter>(0x1));
EXPECT_EQ(Http2SettingsParameter::ENABLE_PUSH,
static_cast<Http2SettingsParameter>(0x2));
EXPECT_EQ(Http2SettingsParameter::MAX_CONCURRENT_STREAMS,
static_cast<Http2SettingsParameter>(0x3));
EXPECT_EQ(Http2SettingsParameter::INITIAL_WINDOW_SIZE,
static_cast<Http2SettingsParameter>(0x4));
EXPECT_EQ(Http2SettingsParameter::MAX_FRAME_SIZE,
static_cast<Http2SettingsParameter>(0x5));
EXPECT_EQ(Http2SettingsParameter::MAX_HEADER_LIST_SIZE,
static_cast<Http2SettingsParameter>(0x6));
EXPECT_TRUE(IsSupportedHttp2SettingsParameter(
Http2SettingsParameter::HEADER_TABLE_SIZE));
EXPECT_TRUE(
IsSupportedHttp2SettingsParameter(Http2SettingsParameter::ENABLE_PUSH));
EXPECT_TRUE(IsSupportedHttp2SettingsParameter(
Http2SettingsParameter::MAX_CONCURRENT_STREAMS));
EXPECT_TRUE(IsSupportedHttp2SettingsParameter(
Http2SettingsParameter::INITIAL_WINDOW_SIZE));
EXPECT_TRUE(IsSupportedHttp2SettingsParameter(
Http2SettingsParameter::MAX_FRAME_SIZE));
EXPECT_TRUE(IsSupportedHttp2SettingsParameter(
Http2SettingsParameter::MAX_HEADER_LIST_SIZE));
EXPECT_FALSE(IsSupportedHttp2SettingsParameter(
static_cast<Http2SettingsParameter>(0)));
EXPECT_FALSE(IsSupportedHttp2SettingsParameter(
static_cast<Http2SettingsParameter>(7)));
}
TEST(Http2ConstantsTest, Http2SettingsParameterToString) {
EXPECT_EQ("HEADER_TABLE_SIZE",
Http2SettingsParameterToString(
Http2SettingsParameter::HEADER_TABLE_SIZE));
EXPECT_EQ("HEADER_TABLE_SIZE", Http2SettingsParameterToString(0x1));
EXPECT_EQ("ENABLE_PUSH", Http2SettingsParameterToString(
Http2SettingsParameter::ENABLE_PUSH));
EXPECT_EQ("ENABLE_PUSH", Http2SettingsParameterToString(0x2));
EXPECT_EQ("MAX_CONCURRENT_STREAMS",
Http2SettingsParameterToString(
Http2SettingsParameter::MAX_CONCURRENT_STREAMS));
EXPECT_EQ("MAX_CONCURRENT_STREAMS", Http2SettingsParameterToString(0x3));
EXPECT_EQ("INITIAL_WINDOW_SIZE",
Http2SettingsParameterToString(
Http2SettingsParameter::INITIAL_WINDOW_SIZE));
EXPECT_EQ("INITIAL_WINDOW_SIZE", Http2SettingsParameterToString(0x4));
EXPECT_EQ("MAX_FRAME_SIZE", Http2SettingsParameterToString(
Http2SettingsParameter::MAX_FRAME_SIZE));
EXPECT_EQ("MAX_FRAME_SIZE", Http2SettingsParameterToString(0x5));
EXPECT_EQ("MAX_HEADER_LIST_SIZE",
Http2SettingsParameterToString(
Http2SettingsParameter::MAX_HEADER_LIST_SIZE));
EXPECT_EQ("MAX_HEADER_LIST_SIZE", Http2SettingsParameterToString(0x6));
EXPECT_EQ("UnknownSettingsParameter(0x123)",
Http2SettingsParameterToString(0x123));
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/http2_constants.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/http2_constants_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
277ee37f-d713-49c2-8335-47aaf3e46bf8 | cpp | tensorflow/tensorflow | topk_rewriter | third_party/xla/xla/service/topk_rewriter.cc | third_party/xla/xla/service/topk_rewriter_test.cc | #include "xla/service/topk_rewriter.h"
#include <array>
#include <cstdint>
#include <memory>
#include <optional>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "xla/hlo/builder/lib/comparators.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/primitive_util.h"
#include "xla/service/pattern_matcher.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
namespace xla {
namespace m = match;
static absl::StatusOr<HloComputation*> BuilderToHloComputation(
XlaComputation& comp, HloComputation* sibling_computation) {
TF_ASSIGN_OR_RETURN(ProgramShape program_shape, comp.GetProgramShape());
HloModuleConfig config(program_shape);
TF_ASSIGN_OR_RETURN(auto new_module,
HloModule::CreateFromProto(comp.proto(), config));
HloModule* dest_module = sibling_computation->parent();
HloCloneContext context(dest_module);
return dest_module->DeepCloneComputation(new_module->entry_computation(),
&context);
}
static bool IsNanSafeGt(HloComputation* comp) {
namespace m = match;
auto match_bitcast_f32 = [](int64_t parameter_number) {
auto param = m::Parameter(parameter_number)
.WithShape(m::Shape().WithElementType(F32));
auto param_s32 =
m::BitcastConvert(param).WithShape(m::Shape().WithElementType(S32));
auto param_u32 =
m::BitcastConvert(param).WithShape(m::Shape().WithElementType(U32));
return m::Select(
m::Lt(param_s32, m::ConstantScalar(0)),
m::BitcastConvert(
m::Subtract(m::ConstantScalar(std::numeric_limits<int32_t>::max()),
param_u32))
.WithShape(m::Shape().WithElementType(S32)),
param_s32);
};
auto match_bitcast_f32_with_convert = [](int64_t parameter_number) {
auto param = m::Parameter(parameter_number)
.WithShape(m::Shape().WithElementType(F32));
auto param_s32 =
m::BitcastConvert(param).WithShape(m::Shape().WithElementType(S32));
auto param_u32 =
m::BitcastConvert(param).WithShape(m::Shape().WithElementType(U32));
auto max_u32 =
m::Convert(m::ConstantScalar(std::numeric_limits<int32_t>::max()))
.WithShape(m::Shape().WithElementType(U32));
return m::Select(m::Lt(param_s32, m::ConstantScalar(0)),
m::BitcastConvert(m::Subtract(max_u32, param_u32))
.WithShape(m::Shape().WithElementType(S32)),
param_s32);
};
auto match_bitcast_bf16 = [](int64_t parameter_number) {
auto param = m::Convert(m::Parameter(parameter_number)
.WithShape(m::Shape().WithElementType(BF16)))
.WithShape(m::Shape().WithElementType(F32));
auto param_s32 =
m::BitcastConvert(param).WithShape(m::Shape().WithElementType(S32));
auto param_u32 =
m::BitcastConvert(param).WithShape(m::Shape().WithElementType(U32));
return m::Select(
m::Lt(param_s32, m::ConstantScalar(0)),
m::BitcastConvert(
m::Subtract(m::ConstantScalar(std::numeric_limits<int32_t>::max()),
param_u32))
.WithShape(m::Shape().WithElementType(S32)),
param_s32);
};
auto match_bitcast_bf16_with_convert = [](int64_t parameter_number) {
auto param = m::Convert(m::Parameter(parameter_number)
.WithShape(m::Shape().WithElementType(BF16)))
.WithShape(m::Shape().WithElementType(F32));
auto param_s32 =
m::BitcastConvert(param).WithShape(m::Shape().WithElementType(S32));
auto param_u32 =
m::BitcastConvert(param).WithShape(m::Shape().WithElementType(U32));
auto max_u32 =
m::Convert(m::ConstantScalar(std::numeric_limits<int32_t>::max()))
.WithShape(m::Shape().WithElementType(U32));
return m::Select(m::Lt(param_s32, m::ConstantScalar(0)),
m::BitcastConvert(m::Subtract(max_u32, param_u32))
.WithShape(m::Shape().WithElementType(S32)),
param_s32);
};
auto match_generic_iec559 = [](int64_t parameter_number,
PrimitiveType fp_type,
PrimitiveType int_type) {
auto param = m::Parameter(parameter_number)
.WithShape(m::Shape().WithElementType(fp_type));
auto signed_value = m::BitcastConvert(param).WithShape(
m::Shape().WithElementType(int_type));
int64_t bit_width = primitive_util::BitWidth(fp_type);
auto max_value = m::ConstantScalar(LsbMask<uint64_t>(bit_width - 1));
auto flipped_value = m::XorAnyOrder(max_value, signed_value);
auto is_negative = m::Lt(signed_value, m::ConstantScalar(0));
return m::Select(is_negative, flipped_value, signed_value);
};
auto match_generic_iec559_with_convert =
[](int64_t parameter_number, PrimitiveType param_type,
PrimitiveType fp_type, PrimitiveType int_type) {
auto param = m::Parameter(parameter_number)
.WithShape(m::Shape().WithElementType(param_type));
auto convert =
m::Convert(param).WithShape(m::Shape().WithElementType(fp_type));
auto signed_value = m::BitcastConvert(convert).WithShape(
m::Shape().WithElementType(int_type));
int64_t bit_width = primitive_util::BitWidth(fp_type);
auto max_value = m::ConstantScalar(LsbMask<uint64_t>(bit_width - 1));
auto flipped_value = m::XorAnyOrder(max_value, signed_value);
auto is_negative = m::Lt(signed_value, m::ConstantScalar(0));
return m::Select(is_negative, flipped_value, signed_value);
};
auto match_s32 = [](int64_t parameter_number) {
auto param = m::Parameter(parameter_number)
.WithShape(m::Shape().WithElementType(S32));
return param;
};
auto match_compare = [](PrimitiveType type) {
auto param0 = m::Parameter(0).WithShape(m::Shape().WithElementType(type));
auto param1 = m::Parameter(1).WithShape(m::Shape().WithElementType(type));
return m::Gt(param0, param1);
};
auto match_default_compare = [](PrimitiveType type) {
auto params_with_type = [&](int i, PrimitiveType t) {
return m::Parameter(i).WithShape(m::Shape().WithElementType(t));
};
auto params =
std::vector({
params_with_type(0, type), params_with_type(1, type),
params_with_type(2, S32), params_with_type(3, S32)});
auto const_true = m::Broadcast(m::Constant());
auto values_gt = m::Gt(params[0], params[1]);
return m::Select(const_true, values_gt, const_true);
};
auto match_all_types = [](HloInstruction* root, auto callback) {
bool result = false;
for (auto type : {BF16, F32, S32, U32}) {
result = result || Match(root, callback(type));
}
return result;
};
return Match(comp->root_instruction(),
m::Gt(match_generic_iec559(0, F32, S32),
match_generic_iec559(1, F32, S32))) ||
Match(comp->root_instruction(),
m::Gt(match_generic_iec559(0, BF16, S16),
match_generic_iec559(1, BF16, S16))) ||
Match(comp->root_instruction(),
m::Gt(match_generic_iec559_with_convert(0, BF16, F32, S32),
match_generic_iec559_with_convert(1, BF16, F32, S32))) ||
Match(comp->root_instruction(),
m::Gt(match_bitcast_f32(0), match_bitcast_f32(1))) ||
Match(comp->root_instruction(),
m::Gt(match_bitcast_bf16(0), match_bitcast_bf16(1))) ||
Match(comp->root_instruction(),
m::Gt(match_bitcast_f32_with_convert(0),
match_bitcast_f32_with_convert(1))) ||
Match(comp->root_instruction(),
m::Gt(match_bitcast_bf16_with_convert(0),
match_bitcast_bf16_with_convert(1))) ||
Match(comp->root_instruction(), m::Gt(match_s32(0), match_s32(1))) ||
match_all_types(comp->root_instruction(), match_compare) ||
match_all_types(comp->root_instruction(), match_default_compare);
}
static bool HasIota(HloSortInstruction* sort, HloInstruction* data) {
namespace m = match;
const std::array<int64_t, 1> sort_dims = {
data->shape().dimensions(sort->sort_dimension())};
auto match_iota = [](auto dims) {
return m::Iota().WithShape(m::Shape().WithElementType(S32).WithDims(dims));
};
return Match(sort->operand(1), match_iota(data->shape().dimensions())) ||
Match(sort->operand(1), m::Broadcast(match_iota(sort_dims)));
}
std::optional<int64_t> TopkRewriter::SortIsInTopK(HloInstruction* inst) {
HloSortInstruction* sort = DynCast<HloSortInstruction>(inst);
if (sort == nullptr) {
return std::nullopt;
}
if (sort->operand_count() != 1 && sort->operand_count() != 2) {
return std::nullopt;
}
HloInstruction* data = sort->mutable_operand(0);
if (sort->operand_count() == 2 && !HasIota(sort, data)) {
return std::nullopt;
}
if (!IsNanSafeGt(sort->to_apply())) {
return std::nullopt;
}
const int64_t sort_dim = sort->sort_dimension();
bool supported = true;
std::optional<int64_t> k;
for (HloInstruction* user : sort->users()) {
const HloInstruction* slice = user;
if (sort->operand_count() == 2) {
if (user->opcode() != HloOpcode::kGetTupleElement ||
user->user_count() != 1) {
supported = false;
break;
}
slice = user->users()[0];
}
if (slice->opcode() != HloOpcode::kSlice) {
supported = false;
break;
}
if (absl::c_any_of(slice->slice_starts(), [](int x) { return x != 0; }) ||
absl::c_any_of(slice->slice_strides(), [](int x) { return x != 1; })) {
supported = false;
break;
}
for (int64_t i = 0; i < slice->slice_limits().size(); ++i) {
if (i != sort_dim &&
slice->slice_limits(i) != slice->operand(0)->shape().dimensions(i)) {
supported = false;
break;
}
}
if (!supported) {
break;
}
if (k == std::nullopt) {
k = slice->slice_limits(sort_dim);
} else if (k != slice->slice_limits(sort_dim)) {
supported = false;
break;
}
}
if (k == std::nullopt || !supported) {
return std::nullopt;
}
return k;
}
struct TopKCustomCall {
HloInstruction* topk;
HloInstruction* value_gte;
HloInstruction* index_gte;
};
TopKCustomCall CreateTopKCustomCall(HloInstruction* input,
const int64_t sort_dim, const int64_t k,
HloComputation* comparator,
HloComputation* comp) {
Shape data_shape = input->shape();
PrimitiveType element_type = data_shape.element_type();
bool has_batch = data_shape.rank() >= 2;
int64_t input_size = data_shape.dimensions(sort_dim);
int64_t batch_size = 1;
Shape topk_input_shape;
if (has_batch) {
batch_size =
ShapeUtil::ElementsIn(data_shape) / data_shape.dimensions(sort_dim);
topk_input_shape =
ShapeUtil::MakeShape(element_type, {batch_size, input_size});
if (data_shape.rank() > 2) {
input = comp->AddInstruction(HloInstruction::CreateReshape(
sort_dim == 0
? ShapeUtil::MakeShape(element_type, {input_size, batch_size})
: ShapeUtil::MakeShape(element_type, {batch_size, input_size}),
input));
}
if (sort_dim == 0) {
input = comp->AddInstruction(
HloInstruction::CreateTranspose(topk_input_shape, input, {1, 0}));
}
} else {
topk_input_shape = data_shape;
}
Shape topk_shape =
has_batch
? ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(element_type, {batch_size, k}),
ShapeUtil::MakeShape(S32, {batch_size, k})})
: ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(element_type, {k}),
ShapeUtil::MakeShape(S32, {k})});
HloInstruction* topk = comp->AddInstruction(HloInstruction::CreateCustomCall(
topk_shape, {input}, comparator, "TopK"));
HloInstruction* value_gte =
comp->AddInstruction(HloInstruction::CreateGetTupleElement(
topk->shape().tuple_shapes(0), topk, 0));
HloInstruction* index_gte =
comp->AddInstruction(HloInstruction::CreateGetTupleElement(
topk->shape().tuple_shapes(1), topk, 1));
if (has_batch) {
if (sort_dim == 0) {
value_gte = comp->AddInstruction(HloInstruction::CreateTranspose(
ShapeUtil::MakeShape(element_type, {k, batch_size}), value_gte,
{1, 0}));
index_gte = comp->AddInstruction(HloInstruction::CreateTranspose(
ShapeUtil::MakeShape(S32, {k, batch_size}), index_gte, {1, 0}));
}
if (data_shape.rank() > 2) {
std::vector<int64_t> shape_dim(data_shape.dimensions().begin(),
data_shape.dimensions().end());
shape_dim[sort_dim] = k;
value_gte = comp->AddInstruction(HloInstruction::CreateReshape(
ShapeUtil::MakeShape(element_type, shape_dim), value_gte));
index_gte = comp->AddInstruction(HloInstruction::CreateReshape(
ShapeUtil::MakeShape(S32, shape_dim), index_gte));
}
}
return {topk, value_gte, index_gte};
}
absl::StatusOr<HloInstruction*> TopkRewriter::TransformPatternToCustomCall(
HloInstruction* inst) {
std::optional<int64_t> k = SortIsInTopK(inst);
if (!k) {
return nullptr;
}
HloSortInstruction* sort = DynCast<HloSortInstruction>(inst);
HloInstruction* data = sort->mutable_operand(0);
const PrimitiveType element_type = data->shape().element_type();
if (element_type != F32 && element_type != BF16) {
return nullptr;
}
const int64_t sort_dim = sort->sort_dimension();
if (sort_dim != 0 && sort_dim != data->shape().rank() - 1) {
return nullptr;
}
if (!is_profitable_to_convert_(sort, *k)) {
return nullptr;
}
TopKCustomCall topkcc = CreateTopKCustomCall(
data, sort_dim, k.value(), sort->to_apply(), inst->parent());
for (HloInstruction* user : sort->users()) {
if (sort->operand_count() == 2) {
HloInstruction* gte = user;
for (HloInstruction* slice : gte->users()) {
if (gte->tuple_index() == 0) {
TF_RETURN_IF_ERROR(slice->ReplaceAllUsesWith(topkcc.value_gte));
} else if (gte->tuple_index() == 1) {
TF_RETURN_IF_ERROR(slice->ReplaceAllUsesWith(topkcc.index_gte));
} else {
LOG(FATAL) << "Sort with more than 2 output isn't supported in "
"topk rewriter";
}
}
} else {
TF_RETURN_IF_ERROR(user->ReplaceAllUsesWith(topkcc.value_gte));
}
}
return topkcc.topk;
}
absl::StatusOr<bool> TopkRewriter::TransformToCustomCall(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (HloComputation* comp : module->computations(execution_threads)) {
for (HloInstruction* inst : comp->MakeInstructionPostOrder()) {
TF_ASSIGN_OR_RETURN(HloInstruction * topkcc,
TransformPatternToCustomCall(inst));
if (topkcc != nullptr) {
VLOG(2) << "Rewritten Topk: " << topkcc->ToString();
changed = true;
}
}
}
return changed;
}
absl::StatusOr<bool> TopkRewriter::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
TF_ASSIGN_OR_RETURN(auto transform_to_customcall_changed,
TransformToCustomCall(module, execution_threads));
changed |= transform_to_customcall_changed;
return changed;
}
class TopkDecomposerVisitor : public DfsHloRewriteVisitor {
public:
explicit TopkDecomposerVisitor(HloPredicate should_decompose)
: should_decompose_(should_decompose) {}
absl::Status HandleCustomCall(HloInstruction* inst) override {
if (should_decompose_ && !should_decompose_(inst)) {
return absl::OkStatus();
}
HloCustomCallInstruction* call = DynCast<HloCustomCallInstruction>(inst);
if (call == nullptr || call->custom_call_target() != "TopK") {
return absl::OkStatus();
}
HloComputation* comparator = call->to_apply();
return DecomposeTopK(call, comparator);
}
absl::Status HandleTopK(HloInstruction* topk) override {
if (should_decompose_ && !should_decompose_(topk)) {
return absl::OkStatus();
}
TF_ASSIGN_OR_RETURN(HloComputation * comparator,
CreateVariadicComparator(topk));
return DecomposeTopK(topk, comparator);
}
private:
bool HasSingleUserReadingOnlyTheValueOutput(HloInstruction* inst) {
return inst->user_count() == 1 && inst->users().front()->tuple_index() == 0;
}
absl::StatusOr<HloComputation*> CreateVariadicComparator(
HloInstruction* inst) {
HloTopKInstruction* topk = DynCast<HloTopKInstruction>(inst);
XlaBuilder b(absl::StrCat("comparator_", topk->name()));
std::vector<PrimitiveType> ptypes = {
topk->operand(0)->shape().element_type()};
if (!HasSingleUserReadingOnlyTheValueOutput(inst)) {
ptypes.emplace_back(PrimitiveType::S32);
}
XlaComputation comparison = topk->largest()
? CreateScalarGtComputation(ptypes, &b)
: CreateScalarLtComputation(ptypes, &b);
TF_ASSIGN_OR_RETURN(HloComputation * comparator,
BuilderToHloComputation(comparison, topk->parent()));
return comparator;
}
absl::Status DecomposeTopK(HloInstruction* call,
HloComputation* variadic_comparator) {
HloComputation* comp = call->parent();
HloInstruction* input = call->mutable_operand(0);
Shape iota_shape = input->shape();
iota_shape.set_element_type(S32);
size_t sort_dimension = input->shape().dimensions_size() - 1;
std::vector<int64_t> zeroes(iota_shape.rank(), 0);
std::vector<int64_t> ones(iota_shape.rank(), 1);
auto slice_tuple = [&](HloInstruction* sort, const size_t index) {
return comp->AddInstruction(HloInstruction::CreateSlice(
call->shape().tuple_shapes(index),
comp->AddInstruction(HloInstruction::CreateGetTupleElement(
sort->shape().tuple_shapes(index), sort, index)),
zeroes, call->shape().tuple_shapes(index).dimensions(), ones));
};
CHECK_NE(variadic_comparator, nullptr);
if (HasSingleUserReadingOnlyTheValueOutput(call) &&
variadic_comparator->num_parameters() == 2) {
HloInstruction* sort = comp->AddInstruction(HloInstruction::CreateSort(
{input->shape()}, sort_dimension, {input}, variadic_comparator,
true));
TF_RETURN_IF_ERROR(ReplaceInstruction(
call->users().front(),
comp->AddInstruction(HloInstruction::CreateSlice(
call->shape().tuple_shapes(0), sort, zeroes,
call->shape().tuple_shapes(0).dimensions(), ones))));
sort->set_metadata(call->metadata());
} else {
HloInstruction* iota = comp->AddInstruction(
HloInstruction::CreateIota(iota_shape, iota_shape.rank() - 1));
HloInstruction* sort = comp->AddInstruction(HloInstruction::CreateSort(
ShapeUtil::MakeTupleShape({input->shape(), iota_shape}),
sort_dimension, {input, iota}, variadic_comparator,
true));
TF_RETURN_IF_ERROR(ReplaceInstruction(
call, comp->AddInstruction(HloInstruction::CreateTuple(
{slice_tuple(sort, 0), slice_tuple(sort, 1)}))));
sort->set_metadata(call->metadata());
}
return absl::OkStatus();
}
private:
HloPredicate should_decompose_;
};
absl::StatusOr<bool> TopkDecomposer::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
return TopkDecomposerVisitor(should_decompose_)
.RunOnModule(module, execution_threads);
}
} | #include "xla/service/topk_rewriter.h"
#include <algorithm>
#include <memory>
#include <numeric>
#include <optional>
#include <utility>
#include <vector>
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/service/hlo_dce.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/service/tuple_simplifier.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/literal_test_util.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace m = ::xla::match;
namespace xla {
namespace {
namespace op = xla::testing::opcode_matchers;
using ::tsl::testing::IsOkAndHolds;
using TopkRewriterTest = HloTestBase;
std::string getComparator() {
return R"(
%compare {
%p.1.lhs.8 = s32[] parameter(2)
%p.1.rhs.9 = s32[] parameter(3)
%p.0.lhs.6 = f32[] parameter(0)
%bitcast-convert.11 = s32[] bitcast-convert(%p.0.lhs.6)
%constant.15 = s32[] constant(0)
%compare.16 = pred[] compare(%bitcast-convert.11, %constant.15), direction=LT
%constant.10 = u32[] constant(2147483647)
%bitcast-convert.12 = u32[] bitcast-convert(%p.0.lhs.6)
%subtract.13 = u32[] subtract(%constant.10, %bitcast-convert.12)
%bitcast-convert.14 = s32[] bitcast-convert(%subtract.13)
%select.17 = s32[] select(%compare.16, %bitcast-convert.14,
%bitcast-convert.11)
%p.0.rhs.7 = f32[] parameter(1)
%bitcast-convert.19 = s32[] bitcast-convert(%p.0.rhs.7)
%constant.23 = s32[] constant(0)
%compare.24 = pred[] compare(%bitcast-convert.19, %constant.23), direction=LT
%constant.18 = u32[] constant(2147483647)
%bitcast-convert.20 = u32[] bitcast-convert(%p.0.rhs.7)
%subtract.21 = u32[] subtract(%constant.18, %bitcast-convert.20)
%bitcast-convert.22 = s32[] bitcast-convert(%subtract.21)
%select.25 = s32[] select(%compare.24, %bitcast-convert.22,
%bitcast-convert.19)
ROOT %compare.26 = pred[] compare(%select.17, %select.25), direction=GT
})";
}
std::string getConvertMaxComparator() {
return R"(
%compare {
%p.1.lhs.6 = s32[] parameter(2)
%p.1.rhs.7 = s32[] parameter(3)
%p.0.lhs.4 = f32[] parameter(0)
%bitcast-convert = s32[] bitcast-convert(f32[] %p.0.lhs.4)
%constant = s32[] constant(0)
%compare = pred[] compare(s32[] %bitcast-convert, s32[] %constant), direction=LT
%constant.1 = s32[] constant(2147483647)
%convert = u32[] convert(s32[] %constant.1)
%bitcast-convert.1 = u32[] bitcast-convert(f32[] %p.0.lhs.4)
%subtract = u32[] subtract(u32[] %convert, u32[] %bitcast-convert.1)
%bitcast-convert.2 = s32[] bitcast-convert(u32[] %subtract)
%select = s32[] select(pred[] %compare, s32[] %bitcast-convert.2, s32[] %bitcast-convert)
%p.0.rhs.5 = f32[] parameter(1)
%bitcast-convert.3 = s32[] bitcast-convert(f32[] %p.0.rhs.5)
%compare.1 = pred[] compare(s32[] %bitcast-convert.3, s32[] %constant), direction=LT
%bitcast-convert.4 = u32[] bitcast-convert(f32[] %p.0.rhs.5)
%subtract.1 = u32[] subtract(u32[] %convert, u32[] %bitcast-convert.4)
%bitcast-convert.5 = s32[] bitcast-convert(u32[] %subtract.1)
%select.1 = s32[] select(pred[] %compare.1, s32[] %bitcast-convert.5, s32[] %bitcast-convert.3)
ROOT %compare.2 = pred[] compare(s32[] %select, s32[] %select.1), direction=GT
})";
}
std::string getComparatorNoIota() {
return R"(
%compare {
%p.0.lhs.6 = f32[] parameter(0)
%bitcast-convert.11 = s32[] bitcast-convert(%p.0.lhs.6)
%constant.15 = s32[] constant(0)
%compare.16 = pred[] compare(%bitcast-convert.11, %constant.15), direction=LT
%constant.10 = u32[] constant(2147483647)
%bitcast-convert.12 = u32[] bitcast-convert(%p.0.lhs.6)
%subtract.13 = u32[] subtract(%constant.10, %bitcast-convert.12)
%bitcast-convert.14 = s32[] bitcast-convert(%subtract.13)
%select.17 = s32[] select(%compare.16, %bitcast-convert.14,
%bitcast-convert.11)
%p.0.rhs.7 = f32[] parameter(1)
%bitcast-convert.19 = s32[] bitcast-convert(%p.0.rhs.7)
%constant.23 = s32[] constant(0)
%compare.24 = pred[] compare(%bitcast-convert.19, %constant.23), direction=LT
%constant.18 = u32[] constant(2147483647)
%bitcast-convert.20 = u32[] bitcast-convert(%p.0.rhs.7)
%subtract.21 = u32[] subtract(%constant.18, %bitcast-convert.20)
%bitcast-convert.22 = s32[] bitcast-convert(%subtract.21)
%select.25 = s32[] select(%compare.24, %bitcast-convert.22,
%bitcast-convert.19)
ROOT %compare.26 = pred[] compare(%select.17, %select.25), direction=GT
})";
}
std::string getCompareComparator() {
return R"(
%compare {
%Arg_0.100 = f32[] parameter(0)
%Arg_1.101 = f32[] parameter(1)
%Arg_2.102 = s32[] parameter(2)
%Arg_3.103 = s32[] parameter(3)
ROOT %compare.56364 = pred[] compare(f32[] %Arg_0.100, f32[] %Arg_1.101), direction=GT, type=TOTALORDER
})";
}
std::string getStableComparator() {
return R"(
%compare {
%p.1.lhs.40628 = s32[] parameter(2)
%p.1.rhs.40629 = s32[] parameter(3)
%constant.40630 = pred[] constant(true)
%broadcast.40631 = pred[] broadcast(pred[] %constant.40630), dimensions={}
%p.0.lhs.40626 = f32[] parameter(0)
%p.0.rhs.40627 = f32[] parameter(1)
%compare.40632 = pred[] compare(f32[] %p.0.lhs.40626, f32[] %p.0.rhs.40627), direction=GT, type=TOTALORDER
ROOT %select.40633 = pred[] select(pred[] %broadcast.40631, pred[] %compare.40632, pred[] %broadcast.40631)
})";
}
bool IsStableSort(const HloInstruction* inst) {
auto* sort = DynCast<HloSortInstruction>(inst);
return sort != nullptr && sort->is_stable();
}
TEST_F(TopkRewriterTest, Rewrite) {
for (std::string comparator :
{getComparator(), getCompareComparator(), getStableComparator()}) {
const std::string hlo_string = R"(
HloModule module
)" + comparator + R"(
ENTRY cluster {
%arg_tuple.1 = f32[8,1234567] parameter(0)
%iota.4 = s32[8,1234567] iota(), iota_dimension=1
%sort.27 = (f32[8,1234567], s32[8,1234567]) sort(%arg_tuple.1, %iota.4),
dimensions={1}, is_stable=true, to_apply=%compare
%get-tuple-element.28 = f32[8,1234567] get-tuple-element(%sort.27), index=0
%slice.29 = f32[8,5] slice(%get-tuple-element.28), slice={[0:8], [0:5]}
%get-tuple-element.30 = s32[8,1234567] get-tuple-element(%sort.27), index=1
%slice.31 = s32[8,5] slice(%get-tuple-element.30), slice={[0:8], [0:5]}
ROOT %tuple.32 = (f32[8,5], s32[8,5]) tuple(%slice.29, %slice.31)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TopkRewriter rewriter(
[](const HloSortInstruction*, int64_t) { return true; });
TF_ASSERT_OK_AND_ASSIGN(bool changed, rewriter.Run(module.get()));
TF_ASSERT_OK(HloDCE().Run(module.get()).status());
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(
m::GetTupleElement(m::CustomCall(m::Parameter(0)), 0),
m::GetTupleElement(m::CustomCall(m::Parameter(0)), 1))));
const HloInstruction* cc =
module->entry_computation()->root_instruction()->operand(0)->operand(0);
EXPECT_THAT(cc->custom_call_target(), "TopK");
}
}
TEST_F(TopkRewriterTest, RewriteWithBroadcast) {
for (std::string comparator :
{getComparator(), getCompareComparator(), getStableComparator()}) {
const std::string hlo_string = R"(
HloModule module
)" + comparator + R"(
ENTRY cluster {
%arg_tuple.1 = f32[8,1234567] parameter(0)
%iota.4 = s32[1234567]{0} iota(), iota_dimension=0
%broadcast.5 = s32[8,1234567]{1,0} broadcast(iota.4), dimensions={1}
%sort.27 = (f32[8,1234567], s32[8,1234567]) sort(%arg_tuple.1, %broadcast.5),
dimensions={1}, is_stable=true, to_apply=%compare
%get-tuple-element.28 = f32[8,1234567] get-tuple-element(%sort.27), index=0
%slice.29 = f32[8,5] slice(%get-tuple-element.28), slice={[0:8], [0:5]}
%get-tuple-element.30 = s32[8,1234567] get-tuple-element(%sort.27), index=1
%slice.31 = s32[8,5] slice(%get-tuple-element.30), slice={[0:8], [0:5]}
ROOT %tuple.32 = (f32[8,5], s32[8,5]) tuple(%slice.29, %slice.31)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TopkRewriter rewriter(
[](const HloSortInstruction*, int64_t) { return true; });
TF_ASSERT_OK_AND_ASSIGN(bool changed, rewriter.Run(module.get()));
TF_ASSERT_OK(HloDCE().Run(module.get()).status());
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(
m::GetTupleElement(m::CustomCall(m::Parameter(0)), 0),
m::GetTupleElement(m::CustomCall(m::Parameter(0)), 1))));
const HloInstruction* cc =
module->entry_computation()->root_instruction()->operand(0)->operand(0);
EXPECT_THAT(cc->custom_call_target(), "TopK");
}
}
TEST_F(TopkRewriterTest, RewriteWithConvertMaxComparator) {
const std::string hlo_string = R"(
HloModule module
)" + getConvertMaxComparator() + R"(
ENTRY cluster {
%arg_tuple.1 = f32[8,1234567] parameter(0)
%iota.4 = s32[8,1234567] iota(), iota_dimension=1
%sort.27 = (f32[8,1234567], s32[8,1234567]) sort(%arg_tuple.1, %iota.4),
dimensions={1}, is_stable=true, to_apply=%compare
%get-tuple-element.28 = f32[8,1234567] get-tuple-element(%sort.27), index=0
%slice.29 = f32[8,5] slice(%get-tuple-element.28), slice={[0:8], [0:5]}
%get-tuple-element.30 = s32[8,1234567] get-tuple-element(%sort.27), index=1
%slice.31 = s32[8,5] slice(%get-tuple-element.30), slice={[0:8], [0:5]}
ROOT %tuple.32 = (f32[8,5], s32[8,5]) tuple(%slice.29, %slice.31)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TopkRewriter rewriter(
[](const HloSortInstruction*, int64_t) { return true; });
TF_ASSERT_OK_AND_ASSIGN(bool changed, rewriter.Run(module.get()));
TF_ASSERT_OK(HloDCE().Run(module.get()).status());
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(
m::GetTupleElement(m::CustomCall(m::Parameter(0)), 0),
m::GetTupleElement(m::CustomCall(m::Parameter(0)), 1))));
const HloInstruction* cc =
module->entry_computation()->root_instruction()->operand(0)->operand(0);
EXPECT_THAT(cc->custom_call_target(), "TopK");
}
TEST_F(TopkRewriterTest, RewriteUnbatched) {
const std::string hlo_string = R"(
HloModule module
)" + getComparator() + R"(
ENTRY cluster {
%arg_tuple.1 = f32[1234567] parameter(0)
%iota.4 = s32[1234567] iota(), iota_dimension=0
%sort.27 = (f32[1234567], s32[1234567]) sort(%arg_tuple.1, %iota.4),
dimensions={0}, is_stable=true, to_apply=%compare
%get-tuple-element.28 = f32[1234567] get-tuple-element(%sort.27), index=0
%slice.29 = f32[5] slice(%get-tuple-element.28), slice={[0:5]}
%get-tuple-element.30 = s32[1234567] get-tuple-element(%sort.27), index=1
%slice.31 = s32[5] slice(%get-tuple-element.30), slice={[0:5]}
ROOT %tuple.32 = (f32[5], s32[5]) tuple(%slice.29, %slice.31)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TopkRewriter rewriter(
[](const HloSortInstruction*, int64_t) { return true; });
TF_ASSERT_OK_AND_ASSIGN(bool changed, rewriter.Run(module.get()));
TF_ASSERT_OK(HloDCE().Run(module.get()).status());
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(
m::GetTupleElement(m::CustomCall(m::Parameter(0)), 0),
m::GetTupleElement(m::CustomCall(m::Parameter(0)), 1))));
const HloInstruction* cc =
module->entry_computation()->root_instruction()->operand(0)->operand(0);
EXPECT_THAT(cc->custom_call_target(), "TopK");
}
TEST_F(TopkRewriterTest, RewriteTranspose) {
const std::string hlo_string = R"(
HloModule module
)" + getComparator() + R"(
ENTRY cluster {
%arg_tuple.1 = f32[1234567,8] parameter(0)
%iota.4 = s32[1234567,8] iota(), iota_dimension=0
%sort.27 = (f32[1234567,8], s32[1234567,8]) sort(%arg_tuple.1, %iota.4),
dimensions={0}, is_stable=true, to_apply=%compare
%get-tuple-element.28 = f32[1234567,8] get-tuple-element(%sort.27), index=0
%slice.29 = f32[5,8] slice(%get-tuple-element.28), slice={[0:5], [0:8]}
%get-tuple-element.30 = s32[1234567,8] get-tuple-element(%sort.27), index=1
%slice.31 = s32[5,8] slice(%get-tuple-element.30), slice={[0:5], [0:8]}
ROOT %tuple.32 = (f32[5,8], s32[5,8]) tuple(%slice.29, %slice.31)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TopkRewriter rewriter(
[](const HloSortInstruction*, int64_t) { return true; });
TF_ASSERT_OK_AND_ASSIGN(bool changed, rewriter.Run(module.get()));
TF_ASSERT_OK(HloDCE().Run(module.get()).status());
EXPECT_TRUE(changed);
LOG(INFO) << module->entry_computation()->ToString();
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(
m::Transpose(m::GetTupleElement(
m::CustomCall(m::Transpose(m::Parameter(0))), 0)),
m::Transpose(m::GetTupleElement(
m::CustomCall(m::Transpose(m::Parameter(0))), 1)))));
const HloInstruction* cc = module->entry_computation()
->root_instruction()
->operand(0)
->operand(0)
->operand(0);
EXPECT_THAT(cc->custom_call_target(), "TopK");
}
TEST_F(TopkRewriterTest, RewriteReshape) {
const std::string hlo_string = R"(
HloModule module
)" + getComparator() + R"(
ENTRY cluster {
%arg_tuple.1 = f32[3,8,1234567] parameter(0)
%iota.4 = s32[3,8,1234567] iota(), iota_dimension=2
%sort.27 = (f32[3,8,1234567], s32[3,8,1234567]) sort(%arg_tuple.1, %iota.4),
dimensions={2}, is_stable=true, to_apply=%compare
%get-tuple-element.28 = f32[3, 8,1234567] get-tuple-element(%sort.27), index=0
%slice.29 = f32[3,8,5] slice(%get-tuple-element.28), slice={[0:3], [0:8], [0:5]}
%get-tuple-element.30 = s32[3,8,1234567] get-tuple-element(%sort.27), index=1
%slice.31 = s32[3,8,5] slice(%get-tuple-element.30), slice={[0:3], [0:8], [0:5]}
ROOT %tuple.32 = (f32[3,8,5], s32[3,8,5]) tuple(%slice.29, %slice.31)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TopkRewriter rewriter(
[](const HloSortInstruction*, int64_t) { return true; });
TF_ASSERT_OK_AND_ASSIGN(bool changed, rewriter.Run(module.get()));
TF_ASSERT_OK(HloDCE().Run(module.get()).status());
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(
m::Reshape(m::GetTupleElement(
m::CustomCall(m::Reshape(m::Parameter(0))), 0)),
m::Reshape(m::GetTupleElement(
m::CustomCall(m::Reshape(m::Parameter(0))), 1)))));
const HloInstruction* cc = module->entry_computation()
->root_instruction()
->operand(0)
->operand(0)
->operand(0);
EXPECT_THAT(cc->custom_call_target(), "TopK");
}
TEST_F(TopkRewriterTest, RewriteNoIota) {
const std::string hlo_string = R"(
HloModule module
)" + getComparatorNoIota() + R"(
ENTRY cluster {
%arg_tuple.1 = f32[8,1234567] parameter(0)
%sort.27 = f32[8,1234567] sort(%arg_tuple.1), dimensions={1}, is_stable=true, to_apply=%compare
ROOT %slice.29 = f32[8,5] slice(%sort.27), slice={[0:8], [0:5]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TopkRewriter rewriter(
[](const HloSortInstruction*, int64_t) { return true; });
TF_ASSERT_OK_AND_ASSIGN(bool changed, rewriter.Run(module.get()));
TF_ASSERT_OK(HloDCE().Run(module.get()).status());
EXPECT_TRUE(changed);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::GetTupleElement(m::CustomCall(m::Parameter(0)), 0)));
const HloInstruction* cc =
module->entry_computation()->root_instruction()->operand(0);
EXPECT_THAT(cc->custom_call_target(), "TopK");
}
TEST_F(TopkRewriterTest, RoundTripNoIota) {
const std::string hlo_string = R"(
HloModule module
)" + getComparatorNoIota() + R"(
ENTRY cluster {
%arg_tuple.1 = f32[8,1234567] parameter(0)
%sort.27 = f32[8,1234567] sort(%arg_tuple.1), dimensions={1}, is_stable=true, to_apply=%compare
ROOT %slice.29 = f32[8,5] slice(%sort.27), slice={[0:8], [0:5]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto run_topk_pass = [&] {
TopkRewriter rewriter(
[](const HloSortInstruction*, int64_t) { return true; });
TF_ASSERT_OK_AND_ASSIGN(bool changed, rewriter.Run(module.get()));
TF_ASSERT_OK(HloDCE().Run(module.get()).status());
ASSERT_TRUE(changed);
ASSERT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::GetTupleElement(m::CustomCall(m::Parameter(0)), 0)));
const HloInstruction* cc =
module->entry_computation()->root_instruction()->operand(0);
ASSERT_THAT(cc->custom_call_target(), "TopK");
};
run_topk_pass();
TF_ASSERT_OK_AND_ASSIGN(bool decomposer_changed,
TopkDecomposer().Run(module.get()));
EXPECT_TRUE(decomposer_changed);
TF_ASSERT_OK(HloDCE().Run(module.get()).status());
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Slice(
m::Sort(m::Parameter(0)).WithPredicate(IsStableSort))));
run_topk_pass();
}
TEST_F(TopkRewriterTest, RoundTripOnlyIota) {
const std::string hlo_string = R"(
HloModule module
)" + getComparator() + R"(
ENTRY cluster {
%arg_tuple.1 = f32[8,1234567] parameter(0)
%iota.4 = s32[1234567]{0} iota(), iota_dimension=0
%broadcast.5 = s32[8,1234567]{1,0} broadcast(iota.4), dimensions={1}
%sort.27 = (f32[8,1234567], s32[8,1234567]) sort(%arg_tuple.1, %broadcast.5),
dimensions={1}, is_stable=true, to_apply=%compare
%get-tuple-element.28 = s32[8,1234567] get-tuple-element(%sort.27), index=1
ROOT %slice.29 = s32[8,5] slice(%get-tuple-element.28), slice={[0:8], [0:5]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto run_topk_pass = [&] {
TopkRewriter rewriter(
[](const HloSortInstruction*, int64_t) { return true; });
TF_ASSERT_OK_AND_ASSIGN(bool changed, rewriter.Run(module.get()));
TF_ASSERT_OK(HloDCE().Run(module.get()).status());
ASSERT_TRUE(changed);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::GetTupleElement(m::CustomCall(m::Parameter(0)), 1)));
const HloInstruction* cc =
module->entry_computation()->root_instruction()->operand(0);
ASSERT_THAT(cc->custom_call_target(), "TopK");
};
run_topk_pass();
TF_ASSERT_OK_AND_ASSIGN(bool decomposer_changed,
TopkDecomposer().Run(module.get()));
EXPECT_TRUE(decomposer_changed);
TF_ASSERT_OK(TupleSimplifier().Run(module.get()).status());
TF_ASSERT_OK(HloDCE().Run(module.get()).status());
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Slice(m::GetTupleElement(
m::Sort(m::Parameter(0), m::Iota()).WithPredicate(IsStableSort),
1))));
run_topk_pass();
}
TEST_F(TopkRewriterTest, RoundTrip) {
const std::string hlo_string = R"(
HloModule module
)" + getComparator() + R"(
ENTRY cluster {
%arg_tuple.1 = f32[8,1234567] parameter(0)
%iota.4 = s32[8,1234567] iota(), iota_dimension=1
%sort.27 = (f32[8,1234567], s32[8,1234567]) sort(%arg_tuple.1, %iota.4),
dimensions={1}, is_stable=true, to_apply=%compare
%get-tuple-element.28 = f32[8,1234567] get-tuple-element(%sort.27), index=0
%slice.29 = f32[8,5] slice(%get-tuple-element.28), slice={[0:8], [0:5]}
%get-tuple-element.30 = s32[8,1234567] get-tuple-element(%sort.27), index=1
%slice.31 = s32[8,5] slice(%get-tuple-element.30), slice={[0:8], [0:5]}
ROOT %tuple.32 = (f32[8,5], s32[8,5]) tuple(%slice.29, %slice.31)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto run_topk_pass = [&] {
TopkRewriter rewriter(
[](const HloSortInstruction*, int64_t) { return true; });
TF_ASSERT_OK_AND_ASSIGN(bool changed, rewriter.Run(module.get()));
TF_ASSERT_OK(HloDCE().Run(module.get()).status());
ASSERT_TRUE(changed);
ASSERT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(
m::GetTupleElement(m::CustomCall(m::Parameter(0)), 0),
m::GetTupleElement(m::CustomCall(m::Parameter(0)), 1))));
const HloInstruction* cc =
module->entry_computation()->root_instruction()->operand(0)->operand(0);
ASSERT_THAT(cc->custom_call_target(), "TopK");
};
run_topk_pass();
TF_ASSERT_OK_AND_ASSIGN(bool decomposer_changed,
TopkDecomposer().Run(module.get()));
EXPECT_TRUE(decomposer_changed);
TF_ASSERT_OK(HloDCE().Run(module.get()).status());
TF_ASSERT_OK(TupleSimplifier().Run(module.get()).status());
auto sort_matcher =
m::Sort(m::Parameter(0), m::Iota()).WithPredicate(IsStableSort);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(m::Slice(m::GetTupleElement(sort_matcher, 0)),
m::Slice(m::GetTupleElement(sort_matcher, 1)))));
run_topk_pass();
}
TEST_F(TopkRewriterTest, RoundTripValueOnly) {
const std::string hlo_string = R"(
HloModule module
)" + getComparator() + R"(
ENTRY cluster {
%arg_tuple.1 = f32[8,1234567] parameter(0)
%iota.4 = s32[8,1234567] iota(), iota_dimension=1
%sort.27 = (f32[8,1234567], s32[8,1234567]) sort(%arg_tuple.1, %iota.4),
dimensions={1}, is_stable=true, to_apply=%compare
%get-tuple-element.28 = f32[8,1234567] get-tuple-element(%sort.27), index=0
ROOT %slice.29 = f32[8,5] slice(%get-tuple-element.28), slice={[0:8], [0:5]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto run_topk_pass = [&] {
TopkRewriter rewriter(
[](const HloSortInstruction*, int64_t) { return true; });
TF_ASSERT_OK_AND_ASSIGN(bool changed, rewriter.Run(module.get()));
TF_ASSERT_OK(HloDCE().Run(module.get()).status());
ASSERT_TRUE(changed);
ASSERT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::GetTupleElement(m::CustomCall(m::Parameter(0)), 0)));
const HloInstruction* cc =
module->entry_computation()->root_instruction()->operand(0);
ASSERT_THAT(cc->custom_call_target(), "TopK");
};
run_topk_pass();
TF_ASSERT_OK_AND_ASSIGN(bool decomposer_changed,
TopkDecomposer().Run(module.get()));
EXPECT_TRUE(decomposer_changed);
TF_ASSERT_OK(HloDCE().Run(module.get()).status());
TF_ASSERT_OK(TupleSimplifier().Run(module.get()).status());
auto sort_matcher =
m::Sort(m::Parameter(0), m::Iota()).WithPredicate(IsStableSort);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Slice(m::GetTupleElement(sort_matcher, 0))));
run_topk_pass();
}
TEST_F(TopkRewriterTest, SanityCheckOutput) {
const std::string hlo_string = R"(
HloModule module
)" + getCompareComparator() + R"(
ENTRY cluster {
%arg_tuple.1 = f32[1234] parameter(0)
%iota.4 = s32[1234] iota(), iota_dimension=0
%sort.27 = (f32[1234], s32[1234]) sort(%arg_tuple.1, %iota.4),
dimensions={0}, is_stable=true, to_apply=%compare
%get-tuple-element.28 = f32[1234] get-tuple-element(%sort.27), index=0
%slice.29 = f32[5] slice(%get-tuple-element.28), slice={[0:5]}
%get-tuple-element.30 = s32[1234] get-tuple-element(%sort.27), index=1
%slice.31 = s32[5] slice(%get-tuple-element.30), slice={[0:5]}
ROOT %tuple.32 = (f32[5], s32[5]) tuple(%slice.29, %slice.31)
})";
TF_ASSERT_OK_AND_ASSIGN(auto source_module,
ParseAndReturnVerifiedModule(hlo_string));
auto topk_module = source_module->Clone();
EXPECT_THAT(TopkRewriter([](const HloSortInstruction*, int64_t) {
return true;
}).Run(topk_module.get()),
IsOkAndHolds(true));
auto decomposed_module = topk_module->Clone();
EXPECT_THAT(TopkDecomposer().Run(decomposed_module.get()),
IsOkAndHolds(true));
const size_t source_size = 1234;
std::vector<float> source(source_size);
std::iota(source.begin(), source.end(), 80000);
auto input = LiteralUtil::CreateR1<float>(source);
std::vector<float> top_k({81233, 81232, 81231, 81230, 81229});
auto check_result = [&](std::unique_ptr<HloModule> module) {
TF_ASSERT_OK_AND_ASSIGN(auto result, Execute(std::move(module), {&input}));
LiteralTestUtil::ExpectR1Equal<float>(top_k, result.DecomposeTuple()[0]);
};
check_result(std::move(source_module));
check_result(std::move(decomposed_module));
}
TEST_F(TopkRewriterTest, Equivalent) {
const std::string hlo_string = R"(
HloModule module
)" + getCompareComparator() + R"(
ENTRY cluster {
%arg_tuple.1 = f32[1234] parameter(0)
%iota.4 = s32[1234] iota(), iota_dimension=0
%sort.27 = (f32[1234], s32[1234]) sort(%arg_tuple.1, %iota.4),
dimensions={0}, is_stable=true, to_apply=%compare
%get-tuple-element.28 = f32[1234] get-tuple-element(%sort.27), index=0
%slice.29 = f32[5] slice(%get-tuple-element.28), slice={[0:5]}
%get-tuple-element.30 = s32[1234] get-tuple-element(%sort.27), index=1
%slice.31 = s32[5] slice(%get-tuple-element.30), slice={[0:5]}
ROOT %tuple.32 = (f32[5], s32[5]) tuple(%slice.29, %slice.31)
})";
TF_ASSERT_OK_AND_ASSIGN(auto source_module,
ParseAndReturnVerifiedModule(hlo_string));
auto round_trip = [](HloModule* module) {
EXPECT_THAT(TopkRewriter([](const HloSortInstruction*, int64_t) {
return true;
}).Run(module),
IsOkAndHolds(true));
EXPECT_THAT(TopkDecomposer().Run(module), IsOkAndHolds(true));
};
EXPECT_TRUE(
RunAndCompare(std::move(source_module), std::nullopt, round_trip));
}
TEST_F(TopkRewriterTest, DecomposerStability) {
const std::string hlo_string = R"(
HloModule module
)" + getCompareComparator() + R"(
ENTRY cluster {
%constant.1 = f32[] constant(42)
%broadcast.2= f32[1234] broadcast(f32[] %constant.1), dimensions={}
%iota.4 = s32[1234] iota(), iota_dimension=0
%sort.27 = (f32[1234], s32[1234]) sort(%broadcast.2, %iota.4),
dimensions={0}, is_stable=true, to_apply=%compare
%get-tuple-element.28 = f32[1234] get-tuple-element(%sort.27), index=0
%slice.29 = f32[5] slice(%get-tuple-element.28), slice={[0:5]}
%get-tuple-element.30 = s32[1234] get-tuple-element(%sort.27), index=1
%slice.31 = s32[5] slice(%get-tuple-element.30), slice={[0:5]}
ROOT %tuple.32 = (f32[5], s32[5]) tuple(%slice.29, %slice.31)
})";
TF_ASSERT_OK_AND_ASSIGN(auto source_module,
ParseAndReturnVerifiedModule(hlo_string));
auto round_trip = [](HloModule* module) {
EXPECT_THAT(TopkRewriter([](const HloSortInstruction*, int64_t) {
return true;
}).Run(module),
IsOkAndHolds(true));
EXPECT_THAT(TopkDecomposer().Run(module), IsOkAndHolds(true));
};
EXPECT_TRUE(RunAndCompareNoHloPasses(std::move(source_module), std::nullopt,
round_trip));
}
TEST_F(TopkRewriterTest, TopKDecomposition) {
const std::string hlo_string = R"(
HloModule topk
ENTRY TopK {
x = bf16[10,10]{0,1} parameter(0)
ROOT topk = (bf16[10,2]{0,1}, s32[10,2]{0,1}) topk(x), k=2, largest=true
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool decomposer_changed,
TopkDecomposer().Run(module.get()));
EXPECT_TRUE(decomposer_changed);
TF_ASSERT_OK(HloDCE().Run(module.get()).status());
TF_ASSERT_OK(TupleSimplifier().Run(module.get()).status());
auto sort_matcher = op::Sort(op::Parameter(0), op::Iota());
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Tuple(op::Slice(op::GetTupleElement(sort_matcher, 0)),
op::Slice(op::GetTupleElement(sort_matcher, 1))));
TopkRewriter rewriter(
[](const HloSortInstruction*, int64_t) { return true; });
TF_ASSERT_OK_AND_ASSIGN(bool changed, rewriter.Run(module.get()));
TF_ASSERT_OK(HloDCE().Run(module.get()).status());
EXPECT_TRUE(changed);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/topk_rewriter.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/topk_rewriter_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3a0f99c2-f130-42c7-848e-16e5e583a8ca | cpp | google/langsvr | decode | src/lsp/decode.cc | src/lsp/decode_test.cc | #include "langsvr/lsp/decode.h"
namespace langsvr::lsp {
Result<SuccessType> Decode(const json::Value& v, Null&) {
return v.Null();
}
Result<SuccessType> Decode(const json::Value& v, Boolean& out) {
auto res = v.Bool();
if (res == Success) [[likely]] {
out = res.Get();
return Success;
}
return res.Failure();
}
Result<SuccessType> Decode(const json::Value& v, Integer& out) {
auto res = v.I64();
if (res == Success) [[likely]] {
out = res.Get();
return Success;
}
return res.Failure();
}
Result<SuccessType> Decode(const json::Value& v, Uinteger& out) {
auto res = v.U64();
if (res == Success) [[likely]] {
out = res.Get();
return Success;
}
return res.Failure();
}
Result<SuccessType> Decode(const json::Value& v, Decimal& out) {
auto res = v.F64();
if (res == Success) [[likely]] {
out = res.Get();
return Success;
}
return res.Failure();
}
Result<SuccessType> Decode(const json::Value& v, String& out) {
auto res = v.String();
if (res == Success) [[likely]] {
out = res.Get();
return Success;
}
return res.Failure();
}
} | #include "langsvr/json/builder.h"
#include "langsvr/lsp/lsp.h"
#include "langsvr/lsp/printer.h"
#include "gmock/gmock.h"
namespace langsvr::lsp {
namespace {
TEST(DecodeTest, ShowDocumentParams) {
auto b = json::Builder::Create();
auto parse_res = b->Parse(
R"({"selection":{"end":{"character":4,"line":3},"start":{"character":2,"line":1}},"uri":"file.txt"})");
EXPECT_EQ(parse_res, Success);
ShowDocumentParams got;
auto decode_res = Decode(*parse_res.Get(), got);
EXPECT_EQ(decode_res, Success);
ShowDocumentParams expected;
expected.uri = "file.txt";
expected.selection = Range{{1, 2}, {3, 4}};
EXPECT_EQ(got, expected);
}
TEST(DecodeTest, ErrNullStruct) {
auto b = json::Builder::Create();
auto parse_res = b->Parse("null");
EXPECT_EQ(parse_res, Success);
SemanticTokensFullDelta got;
auto decode_res = Decode(*parse_res.Get(), got);
EXPECT_NE(decode_res, Success);
}
TEST(DecodeTest, ErrNumberStruct) {
auto b = json::Builder::Create();
auto parse_res = b->Parse("42");
EXPECT_EQ(parse_res, Success);
SemanticTokensFullDelta got;
auto decode_res = Decode(*parse_res.Get(), got);
EXPECT_NE(decode_res, Success);
}
}
} | https://github.com/google/langsvr/blob/303c526231a90049a3e384549720f3fbd453cf66/src/lsp/decode.cc | https://github.com/google/langsvr/blob/303c526231a90049a3e384549720f3fbd453cf66/src/lsp/decode_test.cc | 303c526231a90049a3e384549720f3fbd453cf66 |
c819c062-ecf7-46e5-a83c-b400d4ef4882 | cpp | tensorflow/tensorflow | xplane_to_memory_profile | tensorflow/core/profiler/convert/xplane_to_memory_profile.cc | tensorflow/core/profiler/convert/xplane_to_memory_profile_test.cc | #include "tensorflow/core/profiler/convert/xplane_to_memory_profile.h"
#include <algorithm>
#include <string>
#include <tuple>
#include <type_traits>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "xla/tsl/profiler/utils/tf_xplane_visitor.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/profiler/protobuf/memory_profile.pb.h"
#include "tensorflow/core/profiler/protobuf/xplane.pb.h"
#include "tensorflow/core/profiler/utils/xplane_schema.h"
#include "tensorflow/core/profiler/utils/xplane_utils.h"
#include "tensorflow/core/profiler/utils/xplane_visitor.h"
namespace tensorflow {
namespace profiler {
namespace {
constexpr int64_t kInvalidStepId = -1;
using IndexMetaPair =
std::pair<int64_t , const MemoryActivityMetadata*>;
bool IsMemoryAllocation(int64_t event_type) {
return event_type == HostEventType::kMemoryAllocation;
}
bool IsMemoryDeallocation(int64_t event_type) {
return event_type == HostEventType::kMemoryDeallocation;
}
void UpdateProfileSummary(const MemoryAggregationStats& stats,
int64_t time_offset_ps,
MemoryProfileSummary* summary) {
summary->set_peak_bytes_usage_lifetime(stats.peak_bytes_in_use());
MemoryAggregationStats* peak_stats = summary->mutable_peak_stats();
if (stats.stack_reserved_bytes() + stats.heap_allocated_bytes() >=
peak_stats->peak_bytes_in_use()) {
*peak_stats = stats;
peak_stats->set_peak_bytes_in_use(stats.stack_reserved_bytes() +
stats.heap_allocated_bytes());
summary->set_peak_stats_time_ps(time_offset_ps);
summary->set_memory_capacity(stats.stack_reserved_bytes() +
stats.heap_allocated_bytes() +
stats.free_memory_bytes());
}
}
MemoryProfile GenerateMemoryProfile(const XPlane* host_trace) {
XPlaneVisitor plane = tsl::profiler::CreateTfXPlaneVisitor(host_trace);
MemoryProfile memory_profile;
plane.ForEachLine([&](const XLineVisitor& line) {
line.ForEachEvent([&](const XEventVisitor& event) {
int64_t event_type =
event.Type().value_or(HostEventType::kUnknownHostEventType);
if (!(IsMemoryAllocation(event_type) ||
IsMemoryDeallocation(event_type))) {
return;
}
MemoryAggregationStats stats;
MemoryActivityMetadata metadata;
if (IsMemoryAllocation(event_type)) {
metadata.set_memory_activity(ALLOCATION);
} else if (IsMemoryDeallocation(event_type)) {
metadata.set_memory_activity(DEALLOCATION);
}
metadata.set_step_id(kInvalidStepId);
std::string memory_id;
event.ForEachStat([&](const XStatVisitor& stat) {
if (!stat.Type().has_value()) return;
switch (stat.Type().value()) {
case StatType::kIndexOnHost:
case StatType::kDeviceOrdinal:
memory_id = absl::StrCat(stat.IntValue());
break;
case StatType::kAllocatorName:
memory_id = std::string(stat.StrOrRefValue());
break;
case StatType::kBytesReserved:
stats.set_stack_reserved_bytes(stat.IntValue());
break;
case StatType::kBytesAllocated:
stats.set_heap_allocated_bytes(stat.IntValue());
break;
case StatType::kBytesAvailable:
stats.set_free_memory_bytes(stat.IntValue());
break;
case StatType::kFragmentation:
stats.set_fragmentation(stat.DoubleValue());
break;
case StatType::kPeakBytesInUse:
stats.set_peak_bytes_in_use(stat.IntValue());
break;
case StatType::kRequestedBytes:
metadata.set_requested_bytes(stat.IntValue());
break;
case StatType::kAllocationBytes:
metadata.set_allocation_bytes(stat.IntValue());
break;
case StatType::kAddress:
metadata.set_address(stat.IntValue());
break;
case StatType::kTfOp:
metadata.set_tf_op_name(std::string(stat.StrOrRefValue()));
break;
case StatType::kGroupId:
metadata.set_step_id(stat.IntValue());
break;
case StatType::kRegionType:
metadata.set_region_type(std::string(stat.StrOrRefValue()));
break;
case StatType::kDataType:
metadata.set_data_type(tensorflow::DataTypeString(
static_cast<tensorflow::DataType>(stat.IntValue())));
break;
case StatType::kTensorShapes:
metadata.set_tensor_shape(std::string(stat.StrOrRefValue()));
break;
}
});
MemoryProfileSummary* summary =
(*memory_profile.mutable_memory_profile_per_allocator())[memory_id]
.mutable_profile_summary();
UpdateProfileSummary(stats, event.OffsetPs(), summary);
MemoryProfileSnapshot* snapshot =
(*memory_profile.mutable_memory_profile_per_allocator())[memory_id]
.add_memory_profile_snapshots();
snapshot->set_time_offset_ps(event.OffsetPs());
*snapshot->mutable_aggregation_stats() = std::move(stats);
*snapshot->mutable_activity_metadata() = std::move(metadata);
});
});
return memory_profile;
}
void UpdateStepId(PerAllocatorMemoryProfile* memory_profile) {
int64_t last_valid_step_id = -1;
for (auto& snapshot : *memory_profile->mutable_memory_profile_snapshots()) {
DCHECK(snapshot.has_activity_metadata());
if (snapshot.mutable_activity_metadata()->step_id() == kInvalidStepId) {
snapshot.mutable_activity_metadata()->set_step_id(last_valid_step_id + 1);
} else {
last_valid_step_id = snapshot.mutable_activity_metadata()->step_id();
}
}
}
void UpdateDeallocation(PerAllocatorMemoryProfile* memory_profile) {
absl::flat_hash_map<uint64 , const MemoryActivityMetadata*>
addr_metadata_map;
for (auto& snapshot : *memory_profile->mutable_memory_profile_snapshots()) {
uint64 address = snapshot.activity_metadata().address();
if (snapshot.activity_metadata().memory_activity() == DEALLOCATION) {
if (addr_metadata_map.contains(address)) {
const MemoryActivityMetadata* alloc_meta = addr_metadata_map[address];
snapshot.mutable_activity_metadata()->set_tf_op_name(
alloc_meta->tf_op_name());
snapshot.mutable_activity_metadata()->set_region_type(
alloc_meta->region_type());
snapshot.mutable_activity_metadata()->set_data_type(
alloc_meta->data_type());
snapshot.mutable_activity_metadata()->set_tensor_shape(
alloc_meta->tensor_shape());
addr_metadata_map.erase(address);
} else {
VLOG(2)
<< "Can't find matching memory allocation for this deallocation: "
<< snapshot.DebugString();
}
} else if (!addr_metadata_map.contains(address)) {
addr_metadata_map[address] = &snapshot.activity_metadata();
} else {
VLOG(2) << "There are two allocations recorded for the same address: "
<< address
<< ". The later allocation event is: " << snapshot.DebugString();
}
}
VLOG(2) << "Number of allocations that cannot find matching dealloctions: "
<< addr_metadata_map.size();
}
int64_t GetPeakMemoryStep(int64_t peak_bytes_profile,
const PerAllocatorMemoryProfile* memory_profile) {
int64_t peak_bytes_profile_step_id = 0;
for (const auto& snapshot : memory_profile->memory_profile_snapshots()) {
if (peak_bytes_profile ==
snapshot.aggregation_stats().heap_allocated_bytes() +
snapshot.aggregation_stats().stack_reserved_bytes()) {
DCHECK(snapshot.has_activity_metadata());
peak_bytes_profile_step_id = snapshot.activity_metadata().step_id();
}
}
return peak_bytes_profile_step_id;
}
struct MetadataComparator {
bool operator()(const IndexMetaPair& a, const IndexMetaPair& b) const {
const MemoryActivityMetadata* a_meta = a.second;
const MemoryActivityMetadata* b_meta = b.second;
DCHECK_NE(a_meta, nullptr);
DCHECK_NE(b_meta, nullptr);
auto lhs =
std::make_tuple(-a_meta->allocation_bytes(), -a_meta->requested_bytes(),
a_meta->tf_op_name(), a_meta->region_type(),
a_meta->data_type(), a_meta->tensor_shape());
auto rhs =
std::make_tuple(-b_meta->allocation_bytes(), -b_meta->requested_bytes(),
b_meta->tf_op_name(), b_meta->region_type(),
b_meta->data_type(), b_meta->tensor_shape());
return lhs < rhs;
}
};
void InsertSpecialAllocations(int64_t unmapped_allocation_bytes,
int64_t step_id,
PerAllocatorMemoryProfile* memory_profile,
std::vector<IndexMetaPair>* active_allocs) {
int index = 0;
if (unmapped_allocation_bytes > 0) {
MemoryActivityMetadata* special_allocation =
memory_profile->add_special_allocations();
special_allocation->set_memory_activity(ALLOCATION);
special_allocation->set_requested_bytes(unmapped_allocation_bytes);
special_allocation->set_allocation_bytes(unmapped_allocation_bytes);
special_allocation->set_address(0);
special_allocation->set_tf_op_name("unused preallocated device memory");
special_allocation->set_step_id(step_id);
special_allocation->set_region_type("persist/dynamic");
special_allocation->set_data_type(
tensorflow::DataTypeString(static_cast<tensorflow::DataType>(0)));
special_allocation->set_tensor_shape("unknown");
active_allocs->push_back({--index, special_allocation});
}
int64_t stack_bytes =
memory_profile->profile_summary().peak_stats().stack_reserved_bytes();
if (stack_bytes > 0) {
MemoryActivityMetadata* special_allocation =
memory_profile->add_special_allocations();
special_allocation->set_memory_activity(ALLOCATION);
special_allocation->set_requested_bytes(stack_bytes);
special_allocation->set_allocation_bytes(stack_bytes);
special_allocation->set_address(0);
special_allocation->set_tf_op_name("stack");
special_allocation->set_step_id(step_id);
special_allocation->set_region_type("stack");
special_allocation->set_data_type(
tensorflow::DataTypeString(static_cast<tensorflow::DataType>(0)));
special_allocation->set_tensor_shape("unknown");
active_allocs->push_back({--index, special_allocation});
}
}
bool operator==(const IndexMetaPair& a, const IndexMetaPair& b) {
const MemoryActivityMetadata* a_meta = a.second;
const MemoryActivityMetadata* b_meta = b.second;
return a_meta->allocation_bytes() == b_meta->allocation_bytes() &&
a_meta->requested_bytes() == b_meta->requested_bytes() &&
a_meta->tf_op_name() == b_meta->tf_op_name() &&
a_meta->region_type() == b_meta->region_type() &&
a_meta->data_type() == b_meta->data_type() &&
a_meta->tensor_shape() == b_meta->tensor_shape();
}
void ProcessActiveAllocations(int64_t peak_bytes_profile_step_id,
PerAllocatorMemoryProfile* memory_profile) {
int64_t unmapped_allocation_bytes =
memory_profile->profile_summary().peak_stats().heap_allocated_bytes();
int64_t unmapped_deallocation_bytes = 0;
absl::flat_hash_map<int64_t , IndexMetaPair> active_alloc_map;
for (int i = 0; i < memory_profile->memory_profile_snapshots_size(); i++) {
const auto& snapshot = memory_profile->memory_profile_snapshots().at(i);
DCHECK(snapshot.has_activity_metadata());
const MemoryActivityMetadata& metadata = snapshot.activity_metadata();
if (snapshot.time_offset_ps() >
memory_profile->profile_summary().peak_stats_time_ps())
break;
if (metadata.step_id() != peak_bytes_profile_step_id) continue;
if (metadata.memory_activity() == ALLOCATION) {
active_alloc_map[metadata.address()] = {i, &metadata};
unmapped_allocation_bytes -= metadata.allocation_bytes();
} else {
DCHECK_EQ(metadata.memory_activity(), DEALLOCATION);
if (active_alloc_map.contains(metadata.address())) {
active_alloc_map.erase(metadata.address());
} else {
unmapped_deallocation_bytes += metadata.allocation_bytes();
}
unmapped_allocation_bytes += metadata.allocation_bytes();
}
}
unmapped_allocation_bytes -= unmapped_deallocation_bytes;
VLOG(2) << "unmapped_allocation_bytes=" << unmapped_allocation_bytes
<< ", unmapped_deallocation_bytes=" << unmapped_deallocation_bytes;
std::vector<IndexMetaPair> active_allocs;
for (const auto& address_and_index_meta : active_alloc_map) {
active_allocs.push_back(address_and_index_meta.second);
}
InsertSpecialAllocations(unmapped_allocation_bytes,
peak_bytes_profile_step_id, memory_profile,
&active_allocs);
std::sort(active_allocs.begin(), active_allocs.end(), MetadataComparator());
for (int i = 0, end = active_allocs.size(); i < end; i++) {
ActiveAllocation* allocation = memory_profile->add_active_allocations();
allocation->set_snapshot_index(active_allocs[i].first);
if (active_allocs[i].first < 0) {
allocation->set_special_index(-active_allocs[i].first - 1);
} else {
allocation->set_special_index(-1);
}
allocation->set_num_occurrences(1);
const int last_alloc = active_allocs.size() - 1;
while (i < last_alloc && active_allocs[i] == active_allocs[i + 1]) {
allocation->set_num_occurrences(allocation->num_occurrences() + 1);
i++;
}
}
VLOG(2) << "Distinctive active allocation count="
<< memory_profile->active_allocations_size();
}
void SaveActiveAllocationSnapshots(
protobuf::RepeatedPtrField<MemoryProfileSnapshot>* snapshots,
protobuf::RepeatedPtrField<ActiveAllocation>* active_allocations) {
std::vector<MemoryProfileSnapshot*> samples;
for (const auto& allocation : *active_allocations) {
auto orig_index = allocation.snapshot_index();
if (orig_index < 0) continue;
samples.push_back(&(*snapshots)[orig_index]);
}
int new_index = 0;
for (auto& allocation : *active_allocations) {
int64_t origin_index = allocation.snapshot_index();
if (origin_index < 0) continue;
allocation.set_snapshot_index(new_index);
new_index++;
}
protobuf::RepeatedPtrField<MemoryProfileSnapshot> new_snapshots;
new_snapshots.Reserve(samples.size());
for (const auto& sample : samples) {
*new_snapshots.Add() = std::move(*sample);
}
*snapshots = std::move(new_snapshots);
}
void SampleMemoryProfileTimeline(int64_t max_num_snapshots,
PerAllocatorMemoryProfile* memory_profile) {
const protobuf::RepeatedPtrField<MemoryProfileSnapshot>& original_snapshots =
memory_profile->memory_profile_snapshots();
protobuf::RepeatedPtrField<MemoryProfileSnapshot>* timeline_snapshots =
memory_profile->mutable_sampled_timeline_snapshots();
int64_t snapshot_count = original_snapshots.size();
if (snapshot_count > max_num_snapshots) {
auto max_box_filter = [&](int filter_width, int count, int start) {
for (int i = 0; i < count; i++) {
const MemoryProfileSnapshot* max_snapshot =
&original_snapshots[start + filter_width * i];
int64_t max_bytes =
max_snapshot->aggregation_stats().heap_allocated_bytes() +
max_snapshot->aggregation_stats().stack_reserved_bytes();
for (int index = start + filter_width * i + 1;
index < start + filter_width * (i + 1); index++) {
int64_t bytes = original_snapshots[index]
.aggregation_stats()
.heap_allocated_bytes() +
original_snapshots[index]
.aggregation_stats()
.stack_reserved_bytes();
if (bytes > max_bytes) {
max_snapshot = &original_snapshots[index];
max_bytes = bytes;
}
}
*timeline_snapshots->Add() = *max_snapshot;
}
};
int width = snapshot_count / max_num_snapshots;
int count1 = max_num_snapshots * (width + 1) - snapshot_count;
int count2 = max_num_snapshots - count1;
max_box_filter(width, count1, 0);
max_box_filter(width + 1, count2, width * count1);
} else {
*timeline_snapshots = original_snapshots;
}
}
void ProcessMemoryProfileProto(int64_t max_num_snapshots,
MemoryProfile* memory_profile) {
memory_profile->set_num_hosts(1);
for (const auto& id_and_allocator_profile :
memory_profile->memory_profile_per_allocator()) {
if (!id_and_allocator_profile.second.memory_profile_snapshots().empty()) {
memory_profile->add_memory_ids(id_and_allocator_profile.first);
}
}
absl::c_sort(*memory_profile->mutable_memory_ids());
for (auto& id_and_allocator_profile :
*memory_profile->mutable_memory_profile_per_allocator()) {
PerAllocatorMemoryProfile* allocator_memory_profile =
&id_and_allocator_profile.second;
protobuf::RepeatedPtrField<MemoryProfileSnapshot>* snapshots =
allocator_memory_profile->mutable_memory_profile_snapshots();
absl::c_sort(*snapshots, [](const MemoryProfileSnapshot& a,
const MemoryProfileSnapshot& b) {
return a.time_offset_ps() < b.time_offset_ps();
});
UpdateStepId(allocator_memory_profile);
UpdateDeallocation(allocator_memory_profile);
SampleMemoryProfileTimeline(max_num_snapshots, allocator_memory_profile);
int64_t peak_step_id =
GetPeakMemoryStep(allocator_memory_profile->profile_summary()
.peak_stats()
.peak_bytes_in_use(),
allocator_memory_profile);
ProcessActiveAllocations(peak_step_id, allocator_memory_profile);
SaveActiveAllocationSnapshots(
snapshots, allocator_memory_profile->mutable_active_allocations());
}
}
template <typename Proto>
Status ConvertProtoToJson(const Proto& proto_output, std::string* json_output) {
protobuf::util::JsonPrintOptions json_options;
json_options.always_print_primitive_fields = true;
auto status = protobuf::util::MessageToJsonString(proto_output, json_output,
json_options);
if (!status.ok()) {
auto error_msg = status.message();
return errors::Internal(
"Could not convert proto to JSON string: ",
absl::string_view(error_msg.data(), error_msg.length()));
}
return absl::OkStatus();
}
}
MemoryProfile ConvertXPlaneToMemoryProfile(const XPlane& host_plane,
int64_t max_num_snapshots) {
MemoryProfile memory_profile = GenerateMemoryProfile(&host_plane);
ProcessMemoryProfileProto(max_num_snapshots, &memory_profile);
memory_profile.set_version(1);
return memory_profile;
}
Status ConvertXSpaceToMemoryProfileJson(const XSpace& xspace,
std::string* json_output) {
if (const XPlane* host_plane =
FindPlaneWithName(xspace, kHostThreadsPlaneName)) {
MemoryProfile memory_profile = ConvertXPlaneToMemoryProfile(*host_plane);
TF_RETURN_IF_ERROR(ConvertProtoToJson(memory_profile, json_output));
}
return absl::OkStatus();
}
}
} | #include "tensorflow/core/profiler/convert/xplane_to_memory_profile.h"
#include "absl/strings/string_view.h"
#include "xla/tsl/profiler/utils/group_events.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/profiler/protobuf/memory_profile.pb.h"
#include "tensorflow/core/profiler/protobuf/xplane.pb.h"
#include "tensorflow/core/profiler/utils/xplane_builder.h"
#include "tensorflow/core/profiler/utils/xplane_schema.h"
#include "tensorflow/core/profiler/utils/xplane_test_utils.h"
namespace tensorflow {
namespace profiler {
namespace {
TEST(ConvertXPlaneToMemoryProfile, OneAllocatorMultiActivitiesTest) {
XSpace space;
XPlane* host_plane = GetOrCreateHostXPlane(&space);
XPlaneBuilder host_plane_builder(host_plane);
host_plane_builder.ReserveLines(1);
auto tf_executor_thread = host_plane_builder.GetOrCreateLine(0);
CreateXEvent(&host_plane_builder, &tf_executor_thread, "MemoryAllocation",
40000, 1000,
{{StatType::kBytesReserved, int64_t{2000}},
{StatType::kBytesAllocated, int64_t{3000}},
{StatType::kBytesAvailable, int64_t{5000}},
{StatType::kPeakBytesInUse, int64_t{8500}},
{StatType::kRequestedBytes, int64_t{200}},
{StatType::kAllocationBytes, int64_t{256}},
{StatType::kAddress, int64_t{222333}},
{StatType::kStepId, int64_t{-93746}},
{StatType::kDataType, int64_t{1}},
{StatType::kAllocatorName, "GPU_0_bfc"},
{StatType::kTfOp, "foo/bar"},
{StatType::kRegionType, "output"},
{StatType::kTensorShapes, "[3, 3, 512, 512]"}});
CreateXEvent(&host_plane_builder, &tf_executor_thread, "MemoryDeallocation",
50000, 1000,
{{StatType::kBytesReserved, int64_t{2000}},
{StatType::kBytesAllocated, int64_t{2744}},
{StatType::kBytesAvailable, int64_t{5256}},
{StatType::kPeakBytesInUse, int64_t{8500}},
{StatType::kRequestedBytes, int64_t{200}},
{StatType::kAllocationBytes, int64_t{256}},
{StatType::kAddress, int64_t{222333}},
{StatType::kStepId, int64_t{0}},
{StatType::kDataType, int64_t{0}},
{StatType::kAllocatorName, "GPU_0_bfc"},
{StatType::kRegionType, ""},
{StatType::kTensorShapes, ""}});
CreateXEvent(&host_plane_builder, &tf_executor_thread, "MemoryAllocation",
70000, 1000,
{{StatType::kBytesReserved, int64_t{2000}},
{StatType::kBytesAllocated, int64_t{5000}},
{StatType::kBytesAvailable, int64_t{3000}},
{StatType::kPeakBytesInUse, int64_t{9500}},
{StatType::kRequestedBytes, int64_t{300}},
{StatType::kAllocationBytes, int64_t{300}},
{StatType::kAddress, int64_t{345678}},
{StatType::kStepId, int64_t{-93746}},
{StatType::kDataType, int64_t{9}},
{StatType::kAllocatorName, "GPU_0_bfc"},
{StatType::kTfOp, "mul_grad/Sum"},
{StatType::kRegionType, "temp"},
{StatType::kTensorShapes, "[1, 2]"}});
tsl::profiler::GroupTfEvents(&space);
MemoryProfile memory_profile = ConvertXPlaneToMemoryProfile(*host_plane);
EXPECT_EQ(memory_profile.memory_profile_per_allocator().size(), 1);
EXPECT_EQ(memory_profile.num_hosts(), 1);
EXPECT_EQ(memory_profile.memory_ids_size(), 1);
EXPECT_EQ(memory_profile.memory_profile_per_allocator().begin()->first,
"GPU_0_bfc");
EXPECT_EQ(memory_profile.version(), 1);
const auto& allocator_memory_profile =
memory_profile.memory_profile_per_allocator().begin()->second;
EXPECT_EQ(
allocator_memory_profile.profile_summary().peak_bytes_usage_lifetime(),
9500);
EXPECT_EQ(allocator_memory_profile.profile_summary()
.peak_stats()
.peak_bytes_in_use(),
7000);
EXPECT_EQ(allocator_memory_profile.profile_summary().peak_stats_time_ps(),
70000);
EXPECT_EQ(allocator_memory_profile.sampled_timeline_snapshots_size(), 3);
EXPECT_EQ(allocator_memory_profile.memory_profile_snapshots_size(), 1);
EXPECT_EQ(allocator_memory_profile.memory_profile_snapshots()
.at(0)
.activity_metadata()
.tf_op_name(),
"mul_grad/Sum");
EXPECT_EQ(allocator_memory_profile.active_allocations_size(), 3);
EXPECT_EQ(
allocator_memory_profile.active_allocations().at(2).snapshot_index(), 0);
EXPECT_EQ(allocator_memory_profile.special_allocations_size(), 2);
EXPECT_EQ(allocator_memory_profile.special_allocations().at(1).tf_op_name(),
"stack");
EXPECT_EQ(
allocator_memory_profile.special_allocations().at(1).allocation_bytes(),
2000);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/convert/xplane_to_memory_profile.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/convert/xplane_to_memory_profile_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
fe31defb-0fd1-4d9a-820f-8eb2460a1483 | cpp | tensorflow/tensorflow | object_accessor | tensorflow/lite/delegates/gpu/gl/compiler/object_accessor.cc | tensorflow/lite/delegates/gpu/gl/compiler/object_accessor_test.cc | #include "tensorflow/lite/delegates/gpu/gl/compiler/object_accessor.h"
#include <string>
#include <utility>
#include <variant>
#include "absl/strings/ascii.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/str_split.h"
#include "absl/strings/string_view.h"
#include "absl/types/variant.h"
#include "tensorflow/lite/delegates/gpu/common/access_type.h"
#include "tensorflow/lite/delegates/gpu/common/data_type.h"
#include "tensorflow/lite/delegates/gpu/common/types.h"
#include "tensorflow/lite/delegates/gpu/gl/compiler/preprocessor.h"
#include "tensorflow/lite/delegates/gpu/gl/compiler/variable_accessor.h"
#include "tensorflow/lite/delegates/gpu/gl/object.h"
namespace tflite {
namespace gpu {
namespace gl {
namespace object_accessor_internal {
IndexedElement ParseElement(absl::string_view input) {
auto i = input.find('[');
if (i == std::string::npos || input.back() != ']') {
return {};
}
return {input.substr(0, i),
absl::StrSplit(input.substr(i + 1, input.size() - i - 2), ',',
absl::SkipWhitespace())};
}
}
namespace {
void MaybeConvertToHalf(DataType data_type, absl::string_view value,
std::string* output) {
if (data_type == DataType::FLOAT16) {
absl::StrAppend(output, "Vec4ToHalf(", value, ")");
} else {
absl::StrAppend(output, value);
}
}
void MaybeConvertFromHalf(DataType data_type, absl::string_view value,
std::string* output) {
if (data_type == DataType::FLOAT16) {
absl::StrAppend(output, "Vec4FromHalf(", value, ")");
} else {
absl::StrAppend(output, value);
}
}
struct ReadFromTextureGenerator {
RewriteStatus operator()(size_t) const {
if (element.indices.size() != 1) {
result->append("WRONG_NUMBER_OF_INDICES");
return RewriteStatus::ERROR;
}
if (sampler_textures) {
absl::StrAppend(result, "texelFetch(", element.object_name, ", ivec2(",
element.indices[0], ", 0), 0)");
} else {
absl::StrAppend(result, "imageLoad(", element.object_name, ", ivec2(",
element.indices[0], ", 0))");
}
return RewriteStatus::SUCCESS;
}
template <typename Shape>
RewriteStatus operator()(const Shape&) const {
if (element.indices.size() != Shape::size()) {
result->append("WRONG_NUMBER_OF_INDICES");
return RewriteStatus::ERROR;
}
if (sampler_textures) {
absl::StrAppend(result, "texelFetch(", element.object_name, ", ivec",
Shape::size(), "(", absl::StrJoin(element.indices, ", "),
"), 0)");
} else {
absl::StrAppend(result, "imageLoad(", element.object_name, ", ivec",
Shape::size(), "(", absl::StrJoin(element.indices, ", "),
"))");
}
return RewriteStatus::SUCCESS;
}
const object_accessor_internal::IndexedElement& element;
const bool sampler_textures;
std::string* result;
};
struct ReadFromBufferGenerator {
RewriteStatus operator()(size_t) const {
if (element.indices.size() != 1) {
result->append("WRONG_NUMBER_OF_INDICES");
return RewriteStatus::ERROR;
}
MaybeConvertFromHalf(
data_type,
absl::StrCat(element.object_name, ".data[", element.indices[0], "]"),
result);
return RewriteStatus::SUCCESS;
}
RewriteStatus operator()(const uint2& size) const {
if (element.indices.size() == 1) {
return (*this)(1U);
}
if (element.indices.size() != 2) {
result->append("WRONG_NUMBER_OF_INDICES");
return RewriteStatus::ERROR;
}
MaybeConvertFromHalf(
data_type,
absl::StrCat(element.object_name, ".data[", element.indices[0], " + $",
element.object_name, "_w$ * (", element.indices[1], ")]"),
result);
*requires_sizes = true;
return RewriteStatus::SUCCESS;
}
RewriteStatus operator()(const uint3& size) const {
if (element.indices.size() == 1) {
return (*this)(1U);
}
if (element.indices.size() != 3) {
result->append("WRONG_NUMBER_OF_INDICES");
return RewriteStatus::ERROR;
}
MaybeConvertFromHalf(
data_type,
absl::StrCat(element.object_name, ".data[", element.indices[0], " + $",
element.object_name, "_w$ * (", element.indices[1], " + $",
element.object_name, "_h$ * (", element.indices[2], "))]"),
result);
*requires_sizes = true;
return RewriteStatus::SUCCESS;
}
DataType data_type;
const object_accessor_internal::IndexedElement& element;
std::string* result;
bool* requires_sizes;
};
RewriteStatus GenerateReadAccessor(
const Object& object,
const object_accessor_internal::IndexedElement& element,
bool sampler_textures, std::string* result, bool* requires_sizes) {
switch (object.object_type) {
case ObjectType::BUFFER:
return std::visit(ReadFromBufferGenerator{object.data_type, element,
result, requires_sizes},
object.size);
case ObjectType::TEXTURE:
return std::visit(
ReadFromTextureGenerator{element, sampler_textures, result},
object.size);
case ObjectType::UNKNOWN:
return RewriteStatus::ERROR;
}
}
struct WriteToBufferGenerator {
RewriteStatus operator()(size_t) const {
if (element.indices.size() != 1) {
result->append("WRONG_NUMBER_OF_INDICES");
return RewriteStatus::ERROR;
}
absl::StrAppend(result, element.object_name, ".data[", element.indices[0],
"] = ");
MaybeConvertToHalf(data_type, value, result);
return RewriteStatus::SUCCESS;
}
RewriteStatus operator()(const uint2& size) const {
if (element.indices.size() == 1) {
return (*this)(1U);
}
if (element.indices.size() != 2) {
result->append("WRONG_NUMBER_OF_INDICES");
return RewriteStatus::ERROR;
}
absl::StrAppend(result, element.object_name, ".data[", element.indices[0],
" + $", element.object_name, "_w$ * (", element.indices[1],
")] = ");
MaybeConvertToHalf(data_type, value, result);
*requires_sizes = true;
return RewriteStatus::SUCCESS;
}
RewriteStatus operator()(const uint3& size) const {
if (element.indices.size() == 1) {
return (*this)(1U);
}
if (element.indices.size() != 3) {
result->append("WRONG_NUMBER_OF_INDICES");
return RewriteStatus::ERROR;
}
absl::StrAppend(result, element.object_name, ".data[", element.indices[0],
" + $", element.object_name, "_w$ * (", element.indices[1],
" + $", element.object_name, "_h$ * (", element.indices[2],
"))] = ");
MaybeConvertToHalf(data_type, value, result);
*requires_sizes = true;
return RewriteStatus::SUCCESS;
}
DataType data_type;
const object_accessor_internal::IndexedElement& element;
absl::string_view value;
std::string* result;
bool* requires_sizes;
};
struct WriteToTextureGenerator {
RewriteStatus operator()(size_t) const {
if (element.indices.size() != 1) {
result->append("WRONG_NUMBER_OF_INDICES");
return RewriteStatus::ERROR;
}
absl::StrAppend(result, "imageStore(", element.object_name, ", ivec2(",
element.indices[0], ", 0), ", value, ")");
return RewriteStatus::SUCCESS;
}
template <typename Shape>
RewriteStatus operator()(const Shape&) const {
if (element.indices.size() != Shape::size()) {
result->append("WRONG_NUMBER_OF_INDICES");
return RewriteStatus::ERROR;
}
absl::StrAppend(result, "imageStore(", element.object_name, ", ivec",
Shape::size(), "(", absl::StrJoin(element.indices, ", "),
"), ", value, ")");
return RewriteStatus::SUCCESS;
}
const object_accessor_internal::IndexedElement& element;
absl::string_view value;
std::string* result;
};
RewriteStatus GenerateWriteAccessor(
const Object& object,
const object_accessor_internal::IndexedElement& element,
absl::string_view value, std::string* result, bool* requires_sizes) {
switch (object.object_type) {
case ObjectType::BUFFER:
return std::visit(WriteToBufferGenerator{object.data_type, element, value,
result, requires_sizes},
object.size);
case ObjectType::TEXTURE:
return std::visit(WriteToTextureGenerator{element, value, result},
object.size);
case ObjectType::UNKNOWN:
return RewriteStatus::ERROR;
}
}
std::string ToAccessModifier(AccessType access, bool use_readonly_modifier) {
switch (access) {
case AccessType::READ:
return use_readonly_modifier ? " readonly" : "";
case AccessType::WRITE:
return " writeonly";
case AccessType::READ_WRITE:
return " restrict";
}
return " unknown_access";
}
std::string ToBufferType(DataType data_type) {
switch (data_type) {
case DataType::UINT8:
case DataType::UINT16:
case DataType::UINT32:
return "uvec4";
case DataType::UINT64:
return "u64vec4_not_available_in_glsl";
case DataType::INT8:
case DataType::INT16:
case DataType::INT32:
return "ivec4";
case DataType::INT64:
return "i64vec4_not_available_in_glsl";
case DataType::FLOAT16:
return "uvec2";
case DataType::BOOL:
case DataType::FLOAT32:
return "vec4";
case DataType::FLOAT64:
return "dvec4";
case DataType::UNKNOWN:
return "unknown_buffer_type";
}
}
struct TextureImageTypeGetter {
std::string operator()(size_t) const {
return (*this)(uint2());
}
std::string operator()(const uint2&) const {
switch (type) {
case DataType::UINT16:
case DataType::UINT32:
return "uimage2D";
case DataType::INT16:
case DataType::INT32:
return "iimage2D";
case DataType::FLOAT16:
case DataType::FLOAT32:
return "image2D";
default:
return "unknown_image_2d";
}
}
std::string operator()(const uint3&) const {
switch (type) {
case DataType::UINT16:
case DataType::UINT32:
return "uimage2DArray";
case DataType::INT16:
case DataType::INT32:
return "iimage2DArray";
case DataType::FLOAT16:
case DataType::FLOAT32:
return "image2DArray";
default:
return "unknown_image_2d_array";
}
}
DataType type;
};
struct TextureSamplerTypeGetter {
std::string operator()(size_t) const {
return (*this)(uint2());
}
std::string operator()(const uint2&) const {
switch (type) {
case DataType::FLOAT16:
case DataType::FLOAT32:
return "sampler2D";
case DataType::INT32:
case DataType::INT16:
return "isampler2D";
case DataType::UINT32:
case DataType::UINT16:
return "usampler2D";
default:
return "unknown_sampler2D";
}
}
std::string operator()(const uint3&) const {
switch (type) {
case DataType::FLOAT16:
case DataType::FLOAT32:
return "sampler2DArray";
case DataType::INT32:
case DataType::INT16:
return "isampler2DArray";
case DataType::UINT32:
case DataType::UINT16:
return "usampler2DArray";
default:
return "unknown_sampler2DArray";
}
}
DataType type;
};
std::string ToImageType(const Object& object, bool sampler_textures) {
if (sampler_textures && (object.access == AccessType::READ)) {
return std::visit(TextureSamplerTypeGetter{object.data_type}, object.size);
} else {
return std::visit(TextureImageTypeGetter{object.data_type}, object.size);
}
}
std::string ToImageLayoutQualifier(DataType type) {
switch (type) {
case DataType::UINT16:
return "rgba16ui";
case DataType::UINT32:
return "rgba32ui";
case DataType::INT16:
return "rgba16i";
case DataType::INT32:
return "rgba32i";
case DataType::FLOAT16:
return "rgba16f";
case DataType::FLOAT32:
return "rgba32f";
default:
return "unknown_image_layout";
}
}
std::string ToImagePrecision(DataType type) {
switch (type) {
case DataType::UINT16:
case DataType::INT16:
case DataType::FLOAT16:
return "mediump";
case DataType::UINT32:
case DataType::INT32:
case DataType::FLOAT32:
return "highp";
default:
return "unknown_image_precision";
}
}
struct SizeParametersAdder {
void operator()(size_t) const {}
void operator()(const uint2& size) const {
variable_accessor->AddUniformParameter(
{absl::StrCat(object_name, "_w"), static_cast<int32_t>(size.x)});
}
void operator()(const uint3& size) const {
variable_accessor->AddUniformParameter(
{absl::StrCat(object_name, "_w"), static_cast<int32_t>(size.x)});
variable_accessor->AddUniformParameter(
{absl::StrCat(object_name, "_h"), static_cast<int32_t>(size.y)});
}
absl::string_view object_name;
VariableAccessor* variable_accessor;
};
void AddSizeParameters(absl::string_view object_name, const Object& object,
VariableAccessor* parameters) {
std::visit(SizeParametersAdder{object_name, parameters}, object.size);
}
void GenerateObjectDeclaration(absl::string_view name, const Object& object,
std::string* declaration, bool is_mali,
bool sampler_textures) {
switch (object.object_type) {
case ObjectType::BUFFER:
absl::StrAppend(declaration, "layout(binding = ", object.binding, ")",
ToAccessModifier(object.access, !is_mali), " buffer B",
object.binding, " { ", ToBufferType(object.data_type),
" data[]; } ", name, ";\n");
break;
case ObjectType::TEXTURE:
if (sampler_textures && (object.access == AccessType::READ)) {
absl::StrAppend(declaration, "layout(binding = ", object.binding,
") uniform ", ToImagePrecision(object.data_type), " ",
ToImageType(object, sampler_textures), " ", name,
";\n");
} else {
absl::StrAppend(
declaration, "layout(", ToImageLayoutQualifier(object.data_type),
", binding = ", object.binding, ")",
ToAccessModifier(object.access, true), " uniform ",
ToImagePrecision(object.data_type), " ",
ToImageType(object, sampler_textures), " ", name, ";\n");
}
break;
case ObjectType::UNKNOWN:
break;
}
}
}
RewriteStatus ObjectAccessor::Rewrite(absl::string_view input,
std::string* output) {
std::pair<absl::string_view, absl::string_view> n =
absl::StrSplit(input, absl::MaxSplits('=', 1), absl::SkipWhitespace());
if (n.first.empty()) {
return RewriteStatus::NOT_RECOGNIZED;
}
if (n.second.empty()) {
return RewriteRead(absl::StripAsciiWhitespace(n.first), output);
}
return RewriteWrite(absl::StripAsciiWhitespace(n.first),
absl::StripAsciiWhitespace(n.second), output);
}
RewriteStatus ObjectAccessor::RewriteRead(absl::string_view location,
std::string* output) {
auto element = object_accessor_internal::ParseElement(location);
if (element.object_name.empty()) {
return RewriteStatus::NOT_RECOGNIZED;
}
auto it = name_to_object_.find(
std::string(element.object_name.data(), element.object_name.size()));
if (it == name_to_object_.end()) {
return RewriteStatus::NOT_RECOGNIZED;
}
bool requires_sizes = false;
auto status = GenerateReadAccessor(it->second, element, sampler_textures_,
output, &requires_sizes);
if (requires_sizes) {
AddSizeParameters(it->first, it->second, variable_accessor_);
}
return status;
}
RewriteStatus ObjectAccessor::RewriteWrite(absl::string_view location,
absl::string_view value,
std::string* output) {
auto element = object_accessor_internal::ParseElement(location);
if (element.object_name.empty()) {
return RewriteStatus::NOT_RECOGNIZED;
}
auto it = name_to_object_.find(
std::string(element.object_name.data(), element.object_name.size()));
if (it == name_to_object_.end()) {
return RewriteStatus::NOT_RECOGNIZED;
}
bool requires_sizes = false;
auto status = GenerateWriteAccessor(it->second, element, value, output,
&requires_sizes);
if (requires_sizes) {
AddSizeParameters(it->first, it->second, variable_accessor_);
}
return status;
}
bool ObjectAccessor::AddObject(const std::string& name, Object object) {
if (object.object_type == ObjectType::UNKNOWN) {
return false;
}
return name_to_object_.insert({name, std::move(object)}).second;
}
std::string ObjectAccessor::GetObjectDeclarations() const {
std::string declarations;
for (auto& o : name_to_object_) {
GenerateObjectDeclaration(o.first, o.second, &declarations, is_mali_,
sampler_textures_);
}
return declarations;
}
std::string ObjectAccessor::GetFunctionsDeclarations() const {
for (const auto& o : name_to_object_) {
if (o.second.data_type == DataType::FLOAT16 &&
o.second.object_type == ObjectType::BUFFER) {
return absl::StrCat(
"#define Vec4FromHalf(v) vec4(unpackHalf2x16(v.x), "
"unpackHalf2x16(v.y))\n",
"#define Vec4ToHalf(v) uvec2(packHalf2x16(v.xy), "
"packHalf2x16(v.zw))");
}
}
return "";
}
std::vector<Object> ObjectAccessor::GetObjects() const {
std::vector<Object> objects;
objects.reserve(name_to_object_.size());
for (auto& o : name_to_object_) {
objects.push_back(o.second);
}
return objects;
}
}
}
} | #include "tensorflow/lite/delegates/gpu/gl/compiler/object_accessor.h"
#include <string>
#include <variant>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/types/variant.h"
#include "tensorflow/lite/delegates/gpu/common/types.h"
#include "tensorflow/lite/delegates/gpu/gl/compiler/preprocessor.h"
#include "tensorflow/lite/delegates/gpu/gl/compiler/variable_accessor.h"
#include "tensorflow/lite/delegates/gpu/gl/object.h"
#include "tensorflow/lite/delegates/gpu/gl/variable.h"
namespace tflite {
namespace gpu {
namespace gl {
struct ParameterComparator {
template <typename T>
bool operator()(const T& t) const {
const T* v = std::get_if<T>(&p.value);
return v && t == *v;
}
const Variable& p;
};
bool operator==(const Variable& l, const Variable& r) {
return l.name == r.name && std::visit(ParameterComparator{l}, r.value);
}
namespace {
TEST(Preprocessor, CornerCases) {
VariableAccessor variable_accessor(false);
ObjectAccessor accessor(false, &variable_accessor);
std::string result;
ASSERT_EQ(accessor.Rewrite("", &result), RewriteStatus::NOT_RECOGNIZED);
ASSERT_EQ(accessor.Rewrite("=", &result), RewriteStatus::NOT_RECOGNIZED);
}
TEST(Preprocessor, ReadFromBuffer) {
VariableAccessor variable_accessor(false);
ObjectAccessor accessor(false, &variable_accessor);
ASSERT_TRUE(
accessor.AddObject("obj", MakeReadonlyBuffer(std::vector<float>{1.0})));
std::string result;
EXPECT_EQ(accessor.Rewrite("obj[i]", &result), RewriteStatus::SUCCESS);
EXPECT_TRUE(variable_accessor.GetUniformParameters().empty());
ASSERT_EQ(result, "obj.data[i]");
}
TEST(Preprocessor, ReadFromBufferLinear) {
VariableAccessor variable_accessor(false);
ObjectAccessor accessor(false, &variable_accessor);
ASSERT_TRUE(accessor.AddObject(
"obj", MakeReadonlyBuffer(uint3(1, 2, 3), std::vector<float>{1.0})));
std::string result;
EXPECT_EQ(accessor.Rewrite("obj[i]", &result), RewriteStatus::SUCCESS);
EXPECT_TRUE(variable_accessor.GetUniformParameters().empty());
ASSERT_EQ(result, "obj.data[i]");
}
TEST(Preprocessor, ReadFromBufferByIndex) {
VariableAccessor variable_accessor(false);
ObjectAccessor accessor(false, &variable_accessor);
ASSERT_TRUE(accessor.AddObject(
"obj", MakeReadonlyBuffer(uint3(1, 2, 3), std::vector<float>{1.0})));
std::string result;
EXPECT_EQ(accessor.Rewrite("obj[x,y + 5,z]", &result),
RewriteStatus::SUCCESS);
EXPECT_THAT(variable_accessor.GetUniformParameters(),
testing::UnorderedElementsAre(Variable{"obj_w", 1},
Variable{"obj_h", 2}));
ASSERT_EQ(result, "obj.data[x + $obj_w$ * (y + 5 + $obj_h$ * (z))]");
}
TEST(Preprocessor, ReadFromTexture) {
VariableAccessor variable_accessor(false);
ObjectAccessor accessor(false, &variable_accessor);
ASSERT_TRUE(accessor.AddObject(
"obj", MakeReadonlyTexture(uint3(1, 2, 3), {1.0, 2.0, 3.0, 4.0})));
std::string result;
EXPECT_EQ(accessor.Rewrite("obj[i,j,k]", &result), RewriteStatus::SUCCESS);
EXPECT_TRUE(variable_accessor.GetUniformParameters().empty());
ASSERT_EQ(result, "imageLoad(obj, ivec3(i, j, k))");
}
TEST(Preprocessor, ReadFromTexture1D) {
VariableAccessor variable_accessor(false);
ObjectAccessor accessor(false, &variable_accessor);
ASSERT_TRUE(
accessor.AddObject("obj", MakeReadonlyTexture({1.0, 2.0, 3.0, 4.0})));
std::string result;
EXPECT_EQ(accessor.Rewrite("obj[i]", &result), RewriteStatus::SUCCESS);
EXPECT_TRUE(variable_accessor.GetUniformParameters().empty());
ASSERT_EQ(result, "imageLoad(obj, ivec2(i, 0))");
}
TEST(Preprocessor, WriteToBuffer) {
VariableAccessor variable_accessor(false);
ObjectAccessor accessor(false, &variable_accessor);
ASSERT_TRUE(
accessor.AddObject("obj", MakeReadonlyBuffer(std::vector<float>{1.0})));
std::string result;
EXPECT_EQ(accessor.Rewrite(" obj[i] =value", &result),
RewriteStatus::SUCCESS);
EXPECT_TRUE(variable_accessor.GetUniformParameters().empty());
ASSERT_EQ(result, "obj.data[i] = value");
}
TEST(Preprocessor, WriteToBufferByIndex) {
VariableAccessor variable_accessor(false);
ObjectAccessor accessor(false, &variable_accessor);
ASSERT_TRUE(accessor.AddObject(
"obj", MakeReadonlyBuffer(uint3(1, 2, 3), {1.0, 2.0, 3.0, 4.0})));
std::string result;
EXPECT_EQ(accessor.Rewrite(" obj[i,j,k] =value", &result),
RewriteStatus::SUCCESS);
EXPECT_THAT(variable_accessor.GetUniformParameters(),
testing::UnorderedElementsAre(Variable{"obj_w", 1},
Variable{"obj_h", 2}));
ASSERT_EQ(result, "obj.data[i + $obj_w$ * (j + $obj_h$ * (k))] = value");
}
TEST(Preprocessor, WriteToTexture) {
VariableAccessor variable_accessor(false);
ObjectAccessor accessor(false, &variable_accessor);
ASSERT_TRUE(accessor.AddObject(
"obj", MakeReadonlyTexture(uint3(1, 1, 1), {1.0, 2.0, 3.0, 4.0})));
std::string result;
EXPECT_EQ(accessor.Rewrite("obj[i,j,k]= value ", &result),
RewriteStatus::SUCCESS);
ASSERT_EQ(result, "imageStore(obj, ivec3(i, j, k), value)");
}
TEST(Preprocessor, WriteToTexture1D) {
VariableAccessor variable_accessor(false);
ObjectAccessor accessor(false, &variable_accessor);
ASSERT_TRUE(
accessor.AddObject("obj", MakeReadonlyTexture({1.0, 2.0, 3.0, 4.0})));
std::string result;
EXPECT_EQ(accessor.Rewrite("obj[i]= value ", &result),
RewriteStatus::SUCCESS);
EXPECT_TRUE(variable_accessor.GetUniformParameters().empty());
ASSERT_EQ(result, "imageStore(obj, ivec2(i, 0), value)");
}
TEST(Preprocessor, FailedWriteToBuffer) {
VariableAccessor variable_accessor(false);
ObjectAccessor accessor(false, &variable_accessor);
ASSERT_TRUE(
accessor.AddObject("obj", MakeReadonlyBuffer(std::vector<float>{1.0})));
std::string result;
EXPECT_EQ(accessor.Rewrite(" obj[i,j] =value", &result),
RewriteStatus::ERROR);
ASSERT_EQ(result, "WRONG_NUMBER_OF_INDICES");
}
TEST(Preprocessor, FailedWriteToTexture) {
VariableAccessor variable_accessor(false);
ObjectAccessor accessor(false, &variable_accessor);
ASSERT_TRUE(accessor.AddObject(
"obj", MakeReadonlyTexture(uint3(1, 1, 1), {1.0, 2.0, 3.0, 4.0})));
std::string result;
EXPECT_EQ(accessor.Rewrite("obj[i]= value ", &result), RewriteStatus::ERROR);
ASSERT_EQ(result, "WRONG_NUMBER_OF_INDICES");
}
TEST(Preprocessor, DeclareTexture) {
VariableAccessor variable_accessor(false);
ObjectAccessor accessor(false, &variable_accessor);
ASSERT_TRUE(accessor.AddObject(
"obj", MakeReadonlyTexture(uint3(1, 1, 1), {1.0, 2.0, 3.0, 4.0})));
ASSERT_EQ(accessor.GetObjectDeclarations(),
"layout(rgba32f, binding = 0) readonly uniform highp image2DArray "
"obj;\n");
}
TEST(Preprocessor, DeclareBuffer) {
VariableAccessor variable_accessor(false);
ObjectAccessor accessor(true, &variable_accessor);
ASSERT_TRUE(
accessor.AddObject("obj", MakeReadonlyBuffer(std::vector<float>{1.0})));
ASSERT_EQ(accessor.GetObjectDeclarations(),
"layout(binding = 0) buffer B0 { vec4 data[]; } obj;\n");
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/gl/compiler/object_accessor.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/gl/compiler/object_accessor_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f91e14b2-7735-43ce-8b22-6f38cce4d87a | cpp | tensorflow/tensorflow | debug_graph_utils | tensorflow/core/debug/debug_graph_utils.cc | tensorflow/core/debug/debug_graph_utils_test.cc | #include "tensorflow/core/debug/debug_graph_utils.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/common_runtime/memory_types.h"
#include "tensorflow/core/framework/kernel_def.pb.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/protobuf/debug.pb.h"
namespace tensorflow {
namespace {
Status ParseBoolString(const string& bool_str, bool* bool_val) {
const string lower_bool_str = absl::AsciiStrToLower(bool_str);
if (lower_bool_str == "false" || lower_bool_str == "f" ||
lower_bool_str == "0") {
*bool_val = false;
} else if (lower_bool_str == "true" || lower_bool_str == "t" ||
lower_bool_str == "1") {
*bool_val = true;
} else {
return absl::InvalidArgumentError(
absl::StrCat("Invalid string for bool value: ", bool_str));
}
return absl::OkStatus();
}
}
Status DebugNodeInserter::InsertNodes(
const protobuf::RepeatedPtrField<DebugTensorWatch>& watches, Graph* graph,
Device* device) {
if (watches.empty()) {
return absl::OkStatus();
}
std::vector<string> default_debug_ops;
std::vector<string> default_debug_urls;
std::unordered_map<string, std::vector<string>> tensor_watches;
std::unordered_map<string, std::vector<string>> tensor_watch_urls;
std::unordered_map<string, bool> tensor_tolerate_failures;
for (const DebugTensorWatch& watch : watches) {
if (watch.debug_ops().empty()) {
continue;
}
if (watch.debug_urls().empty()) {
continue;
}
if (watch.node_name() == "*") {
if (watch.output_slot() == -1) {
default_debug_ops.insert(default_debug_ops.end(),
watch.debug_ops().begin(),
watch.debug_ops().end());
default_debug_urls.insert(default_debug_urls.end(),
watch.debug_urls().begin(),
watch.debug_urls().end());
} else {
return Status(absl::StatusCode::kFailedPrecondition,
strings::StrCat(
"output_slot is expected to be -1 for wildcard ",
"node name (\"*\"), but got ", watch.output_slot()));
}
continue;
} else {
if (watch.output_slot() < 0) {
return Status(
absl::StatusCode::kFailedPrecondition,
strings::StrCat("A negative output_slot in DebugTensorWatch is ",
"valid only for the wildcard node name (\"*\"), ",
"but got node name ", watch.node_name()));
}
}
string tensor_name =
strings::StrCat(watch.node_name(), ":", watch.output_slot());
std::vector<string> debug_ops;
for (const string& debug_op : watch.debug_ops()) {
debug_ops.push_back(debug_op);
}
tensor_watches[tensor_name] = debug_ops;
tensor_tolerate_failures[tensor_name] =
watch.tolerate_debug_op_creation_failures();
std::vector<string> urls;
for (const string& url : watch.debug_urls()) {
urls.push_back(url);
}
tensor_watch_urls[tensor_name] = urls;
}
if (tensor_watches.empty()) {
return absl::OkStatus();
}
DeviceType device_type = DeviceType{device->device_type()};
std::vector<const Edge*> edges_to_remove;
for (Node* src_node : graph->nodes()) {
std::unordered_map<int, std::vector<const Edge*>> output_slot_to_edges;
for (const Edge* edge : src_node->out_edges()) {
const int src_output = edge->src_output();
if (output_slot_to_edges.find(src_output) == output_slot_to_edges.end()) {
output_slot_to_edges[src_output] = {edge};
} else {
output_slot_to_edges[src_output].push_back(edge);
}
}
for (int src_output_slot = 0; src_output_slot < src_node->num_outputs();
++src_output_slot) {
const string tensor_name =
strings::StrCat(src_node->name(), ":", src_output_slot);
const bool explicit_tensor_match =
tensor_watches.find(tensor_name) != tensor_watches.end();
if (!explicit_tensor_match && default_debug_ops.empty()) {
continue;
}
const DataType src_dt = src_node->output_type(src_output_slot);
MemoryType memory_type;
TF_RETURN_IF_ERROR(MemoryTypeForOutput(device_type, graph, src_node,
src_output_slot, &memory_type));
const std::vector<string> debug_ops = explicit_tensor_match
? tensor_watches[tensor_name]
: default_debug_ops;
const std::vector<string> debug_urls =
explicit_tensor_match ? tensor_watch_urls[tensor_name]
: default_debug_urls;
Node* copy_node;
Status copy_s =
CreateCopyNode(graph, device_type, memory_type == HOST_MEMORY,
src_node->name(), src_output_slot, src_dt, tensor_name,
debug_ops, debug_urls, ©_node);
if (!copy_s.ok()) {
return Status(
absl::StatusCode::kFailedPrecondition,
strings::StrCat("Failed to create Copy/CopyHost node for tensor ",
tensor_name, ", due to: ", copy_s.message()));
}
graph->AddEdge(src_node, src_output_slot, copy_node, 0);
std::vector<Node*> debug_nodes;
for (size_t i = 0; i < debug_ops.size(); ++i) {
const string& debug_op_name = debug_ops[i];
Node* debug_node;
Status debug_s = CreateDebugNode(graph, *device, copy_node->name(),
src_dt, tensor_name, debug_urls, i,
debug_op_name, &debug_node);
if (debug_s.ok()) {
graph->AddEdge(copy_node, 0, debug_node, 0);
debug_nodes.push_back(debug_node);
} else {
if (tensor_tolerate_failures[tensor_name]) {
LOG(INFO) << "Tolerating failure to create debug node: "
<< "tensor name = " << tensor_name << "; "
<< "debug op name = " << debug_op_name;
} else {
return Status(
absl::StatusCode::kFailedPrecondition,
strings::StrCat("Failed to create debug node ", debug_op_name,
" for tensor ", tensor_name,
", due to: ", debug_s.message()));
}
}
}
const bool is_ref = IsRefType(src_node->output_type(src_output_slot));
for (const Edge* edge : output_slot_to_edges[src_output_slot]) {
if (!is_ref) {
edges_to_remove.push_back(edge);
graph->AddEdge(copy_node, 0, edge->dst(), edge->dst_input());
}
for (Node* debug_node : debug_nodes) {
if (!src_node->IsEnter() && !src_node->IsNextIteration()) {
graph->AddEdge(debug_node, Graph::kControlSlot, edge->dst(),
Graph::kControlSlot);
}
}
}
}
}
for (const Edge* edge : edges_to_remove) {
graph->RemoveEdge(edge);
}
return absl::OkStatus();
}
void DebugNodeInserter::DeparallelizeWhileLoops(Graph* graph, Device* device) {
bool deparallelized_a_loop = false;
for (Node* node : graph->nodes()) {
if (node->IsEnter()) {
const AttrValue* parallel_iterations =
node->attrs().Find("parallel_iterations");
if (parallel_iterations && parallel_iterations->i() > 1) {
deparallelized_a_loop = true;
VLOG(1) << "Changing the parallel_iterations attribute of the "
<< "Enter/RefEnter node \"" << node->name() << "\" on device \""
<< device->name() << "\" from " << parallel_iterations->i()
<< " to 1.";
node->AddAttr<int64_t>("parallel_iterations", 1);
}
}
}
if (deparallelized_a_loop) {
LOG(INFO) << "For debugging, tfdbg has set the parallel_iterations "
<< "attribute of all scheduled Enter/RefEnter nodes to 1. (This "
<< "does not affect subsequent non-debug runs.)";
}
}
const string DebugNodeInserter::GetCopyNodeName(const string& node_name,
const int output_slot) {
return strings::StrCat("__copy_", node_name, "_", output_slot);
}
const string DebugNodeInserter::GetDebugNodeName(const string& tensor_name,
const int debug_op_num,
const string& debug_op_name) {
return strings::StrCat("__dbg_", tensor_name, "_", debug_op_num, "_",
debug_op_name);
}
Status DebugNodeInserter::CreateCopyNode(
Graph* graph, const DeviceType device_type, const bool is_host_memory,
const string& src_node_name, const int src_output, const DataType src_dt,
const string& tensor_name, const std::vector<string>& debug_ops,
const std::vector<string>& debug_urls, Node** copy_node) {
const string kGatedGrpcAttributeKey = "gated_grpc";
NodeDef node_def;
const KernelDef* kdef;
const string copy_op_name = is_host_memory ? "CopyHost" : "Copy";
const string copy_node_name = GetCopyNodeName(src_node_name, src_output);
std::vector<string> debug_ops_spec;
for (const string& debug_op : debug_ops) {
for (const string& debug_url : debug_urls) {
string debug_op_name_proper;
std::unordered_map<string, string> custom_attributes;
TF_RETURN_IF_ERROR(ParseDebugOpName(debug_op, &debug_op_name_proper,
&custom_attributes));
bool gated_grpc_value = false;
if (custom_attributes.find(kGatedGrpcAttributeKey) !=
custom_attributes.end()) {
TF_RETURN_IF_ERROR(ParseBoolString(
custom_attributes[kGatedGrpcAttributeKey], &gated_grpc_value));
}
debug_ops_spec.push_back(strings::StrCat(debug_op_name_proper, ";",
debug_url, ";",
gated_grpc_value ? "1" : "0"));
}
}
auto builder = NodeDefBuilder(copy_node_name, copy_op_name)
.Input(src_node_name, src_output, src_dt)
.Attr("debug_ops_spec", debug_ops_spec);
if (!builder.Finalize(&node_def).ok()) {
return Status(
absl::StatusCode::kFailedPrecondition,
strings::StrCat("Failed to create node definition ", "for copy op ",
copy_node_name, " on watched tensor ", tensor_name));
}
Status s = FindKernelDef(device_type, node_def, &kdef, nullptr);
if (!s.ok()) {
return Status(
absl::StatusCode::kFailedPrecondition,
strings::StrCat("Failed to find kernel definition ", "for copy op ",
copy_node_name, " on watched tensor ", tensor_name));
}
if (!NodeBuilder(builder).Finalize(graph, copy_node).ok()) {
return Status(absl::StatusCode::kFailedPrecondition,
strings::StrCat("Failed to create copy node ", copy_node_name,
" on watched tensor ", tensor_name));
}
return absl::OkStatus();
}
Status DebugNodeInserter::ParseDebugOpName(
const string& debug_op_name, string* debug_op_name_proper,
std::unordered_map<string, string>* attributes) {
const size_t l_index = debug_op_name.find('(');
const size_t r_index = debug_op_name.find(')');
if (l_index == string::npos && r_index == string::npos) {
*debug_op_name_proper = debug_op_name;
} else {
if (l_index == string::npos || l_index == 0 ||
r_index != debug_op_name.size() - 1) {
return absl::InvalidArgumentError(
absl::StrCat("Malformed debug op name \"", debug_op_name, "\""));
}
*debug_op_name_proper = debug_op_name.substr(0, l_index);
string arguments = debug_op_name.substr(l_index + 1, r_index - l_index - 1);
std::vector<string> attribute_segs = str_util::Split(arguments, ";");
for (const string& attribute_seg : attribute_segs) {
StringPiece seg(attribute_seg);
str_util::RemoveWhitespaceContext(&seg);
if (seg.empty()) {
continue;
}
const size_t eq_index = seg.find('=');
if (eq_index == string::npos) {
return absl::InvalidArgumentError(absl::StrCat(
"Malformed attributes in debug op name \"", debug_op_name, "\""));
}
const string key(seg.substr(0, eq_index));
const string value(
seg.substr(eq_index + 1, attribute_seg.size() - eq_index - 1));
if (key.empty() || value.empty()) {
return absl::InvalidArgumentError(absl::StrCat(
"Malformed attributes in debug op name \"", debug_op_name, "\""));
}
if (attributes->find(key) == attributes->end()) {
(*attributes)[key] = value;
} else {
return absl::InvalidArgumentError(
absl::StrCat("Duplicate attribute name \"", key,
"\" found in the debug op: \"", debug_op_name, "\""));
}
}
}
return absl::OkStatus();
}
Status DebugNodeInserter::SetDebugNodeAttributes(
Node* debug_node, const std::unordered_map<string, string>& attributes) {
std::unordered_set<string> unfulfilled_keys;
for (const auto& item : attributes) {
unfulfilled_keys.insert(item.first);
}
for (const auto& attr : debug_node->op_def().attr()) {
if (attributes.find(attr.name()) != attributes.end()) {
const string& attr_value = attributes.at(attr.name());
if (attr.type() == "string") {
debug_node->AddAttr<string>(attr.name(), attr_value);
} else if (attr.type() == "float") {
float float_value = 0.0;
if (!::tensorflow::strings::safe_strtof(attr_value, &float_value)) {
return absl::InvalidArgumentError(absl::StrCat(
"Invalid value string for float-type attribute ", attr.name(),
"of debug node ", debug_node->name(), ": \"", attr_value, "\""));
}
debug_node->AddAttr<float>(attr.name(), float_value);
} else if (attr.type() == "int") {
int64_t int_value = 0;
if (!::tensorflow::strings::safe_strto64(attr_value, &int_value)) {
return absl::InvalidArgumentError(absl::StrCat(
"Invalid value string for int-type attribute ", attr.name(),
"of debug node ", debug_node->name(), ": \"", attr_value, "\""));
}
debug_node->AddAttr<int>(attr.name(), int_value);
} else if (attr.type() == "bool") {
bool bool_value;
if (!ParseBoolString(attr_value, &bool_value).ok()) {
return absl::InvalidArgumentError(absl::StrCat(
"Invalid value string for bool-type attribute ", attr.name(),
"of debug node ", debug_node->name(), ": \"", attr_value, "\""));
}
debug_node->AddAttr<bool>(attr.name(), bool_value);
} else {
return absl::InvalidArgumentError(
absl::StrCat("Unsupported type of custom attribute for debug ops: ",
attr.type()));
}
unfulfilled_keys.erase(attr.name());
}
}
if (unfulfilled_keys.empty()) {
return absl::OkStatus();
} else {
return absl::InvalidArgumentError(absl::StrCat(
unfulfilled_keys.size(),
" attribute key(s) were not valid for debug node ", debug_node->name(),
": ", absl::StrJoin(unfulfilled_keys, ", ")));
}
}
Status DebugNodeInserter::CreateDebugNode(
Graph* graph, const Device& device, const string& src_copy_node_name,
const DataType src_dt, const string& tensor_name,
const std::vector<string>& debug_urls, const int debug_op_num,
const string& debug_op_name, Node** debug_node) {
NodeDef node_def;
const KernelDef* kdef;
string debug_op_name_proper;
std::unordered_map<string, string> custom_attributes;
TF_RETURN_IF_ERROR(ParseDebugOpName(debug_op_name, &debug_op_name_proper,
&custom_attributes));
const string debug_node_name =
GetDebugNodeName(tensor_name, debug_op_num, debug_op_name_proper);
auto builder = NodeDefBuilder(debug_node_name, debug_op_name_proper)
.Input(src_copy_node_name, 0, src_dt)
.Attr("device_name", device.name())
.Attr("tensor_name", tensor_name)
.Attr("debug_urls", debug_urls);
if (!builder.Finalize(&node_def).ok()) {
return absl::FailedPreconditionError(
absl::StrCat("Failed to create node definition for debug op ",
debug_op_name_proper, " on watched tensor ", tensor_name));
}
if (!FindKernelDef(DeviceType(device.device_type()), node_def, &kdef, nullptr)
.ok()) {
return absl::FailedPreconditionError(
absl::StrCat("Failed to find kernel definition for debug op ",
debug_op_name_proper, " on watched tensor ", tensor_name));
}
if (!NodeBuilder(builder).Finalize(graph, debug_node).ok()) {
return absl::FailedPreconditionError(
absl::StrCat("Failed to create debug node ", debug_op_name_proper,
" on watched tensor ", tensor_name));
}
if (!custom_attributes.empty()) {
TF_RETURN_IF_ERROR(SetDebugNodeAttributes(*debug_node, custom_attributes));
}
return absl::OkStatus();
}
} | #include "tensorflow/core/debug/debug_graph_utils.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/str_util.h"
namespace tensorflow {
class DebugGraphUtilsTest : public ::testing::Test {
protected:
Status ParseDebugOpName(const string& debug_op_name,
string* debug_op_name_proper,
std::unordered_map<string, string>* attributes) {
return DebugNodeInserter::ParseDebugOpName(
debug_op_name, debug_op_name_proper, attributes);
}
};
TEST_F(DebugGraphUtilsTest, TestParseNoAttributeDebugOpName) {
string debug_op_name_proper;
std::unordered_map<string, string> attributes;
TF_ASSERT_OK(
ParseDebugOpName("DebugIdentity", &debug_op_name_proper, &attributes));
ASSERT_EQ("DebugIdentity", debug_op_name_proper);
ASSERT_EQ(0, attributes.size());
}
TEST_F(DebugGraphUtilsTest, TestMalformedDebugOpName) {
string debug_op_name_proper;
std::unordered_map<string, string> attributes;
Status s = ParseDebugOpName("(mute_if_healthy=true)", &debug_op_name_proper,
&attributes);
ASSERT_TRUE(errors::IsInvalidArgument(s));
s = ParseDebugOpName("DebugNumericSummary(", &debug_op_name_proper,
&attributes);
ASSERT_TRUE(errors::IsInvalidArgument(s));
s = ParseDebugOpName("DebugNumericSummary)", &debug_op_name_proper,
&attributes);
ASSERT_TRUE(errors::IsInvalidArgument(s));
}
TEST_F(DebugGraphUtilsTest, TestDebugOpNameWithMalformedAttributes) {
string debug_op_name_proper;
std::unordered_map<string, string> attributes;
Status s = ParseDebugOpName("DebugNumericSummary(=)", &debug_op_name_proper,
&attributes);
ASSERT_TRUE(errors::IsInvalidArgument(s));
s = ParseDebugOpName("DebugNumericSummary(mute_if_healthy=)",
&debug_op_name_proper, &attributes);
ASSERT_TRUE(errors::IsInvalidArgument(s));
s = ParseDebugOpName("DebugNumericSummary(=true)", &debug_op_name_proper,
&attributes);
ASSERT_TRUE(errors::IsInvalidArgument(s));
s = ParseDebugOpName("DebugNumericSummary(mute_if_healthy:true)",
&debug_op_name_proper, &attributes);
ASSERT_TRUE(errors::IsInvalidArgument(s));
s = ParseDebugOpName("DebugNumericSummary(mute_if_healthy=true;threshold=)",
&debug_op_name_proper, &attributes);
ASSERT_TRUE(errors::IsInvalidArgument(s));
s = ParseDebugOpName(
"DebugNumericSummary(mute_if_healthy=true;threshold:300.0)",
&debug_op_name_proper, &attributes);
ASSERT_TRUE(errors::IsInvalidArgument(s));
}
TEST_F(DebugGraphUtilsTest, TestValidDebugOpNameWithSingleAttribute) {
string debug_op_name_proper;
std::unordered_map<string, string> attributes;
TF_ASSERT_OK(ParseDebugOpName("DebugNumericSummary()", &debug_op_name_proper,
&attributes));
ASSERT_EQ("DebugNumericSummary", debug_op_name_proper);
ASSERT_EQ(0, attributes.size());
attributes.clear();
TF_ASSERT_OK(ParseDebugOpName("DebugNumericSummary(mute_if_healthy=true)",
&debug_op_name_proper, &attributes));
ASSERT_EQ("DebugNumericSummary", debug_op_name_proper);
ASSERT_EQ(1, attributes.size());
ASSERT_EQ("true", attributes["mute_if_healthy"]);
}
TEST_F(DebugGraphUtilsTest, TestValidDebugOpNameWithMoreThanOneAttributes) {
string debug_op_name_proper;
std::unordered_map<string, string> attributes;
TF_ASSERT_OK(ParseDebugOpName(
"DebugNumericSummary(mute_if_healthy=true; threshold=300.0)",
&debug_op_name_proper, &attributes));
ASSERT_EQ("DebugNumericSummary", debug_op_name_proper);
ASSERT_EQ(2, attributes.size());
ASSERT_EQ("true", attributes["mute_if_healthy"]);
ASSERT_EQ("300.0", attributes["threshold"]);
attributes.clear();
TF_ASSERT_OK(ParseDebugOpName(
"DebugNumericSummary(mute_if_healthy=true;threshold=300.0;first_n=100)",
&debug_op_name_proper, &attributes));
ASSERT_EQ("DebugNumericSummary", debug_op_name_proper);
ASSERT_EQ(3, attributes.size());
ASSERT_EQ("true", attributes["mute_if_healthy"]);
ASSERT_EQ("300.0", attributes["threshold"]);
ASSERT_EQ("100", attributes["first_n"]);
}
TEST_F(DebugGraphUtilsTest, TestValidDebugOpNameWithMoreDuplicateAttributes) {
string debug_op_name_proper;
std::unordered_map<string, string> attributes;
Status s = ParseDebugOpName(
"DebugNumericSummary(mute_if_healthy=true; lower_bound=3; "
"mute_if_healthy=false;)",
&debug_op_name_proper, &attributes);
ASSERT_TRUE(errors::IsInvalidArgument(s));
}
TEST_F(DebugGraphUtilsTest, TestValidDebugOpNameWithWhitespaceInAttributes) {
string debug_op_name_proper;
std::unordered_map<string, string> attributes;
TF_ASSERT_OK(ParseDebugOpName(
"DebugNumericSummary( mute_if_healthy=true; threshold=300.0 )",
&debug_op_name_proper, &attributes));
ASSERT_EQ("DebugNumericSummary", debug_op_name_proper);
ASSERT_EQ(2, attributes.size());
ASSERT_EQ("true", attributes["mute_if_healthy"]);
ASSERT_EQ("300.0", attributes["threshold"]);
attributes.clear();
TF_ASSERT_OK(ParseDebugOpName(
"DebugNumericSummary(;;mute_if_healthy=true; threshold=300.0;;)",
&debug_op_name_proper, &attributes));
ASSERT_EQ("DebugNumericSummary", debug_op_name_proper);
ASSERT_EQ(2, attributes.size());
ASSERT_EQ("true", attributes["mute_if_healthy"]);
ASSERT_EQ("300.0", attributes["threshold"]);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/debug/debug_graph_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/debug/debug_graph_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d694b9e1-5657-4324-bf6e-f7b39fa399c0 | cpp | tensorflow/tensorflow | quantized_tensor_element_type | tensorflow/lite/experimental/shlo/quantized_tensor_element_type.cc | tensorflow/lite/experimental/shlo/quantized_tensor_element_type_test.cc | #include "tensorflow/lite/experimental/shlo/quantized_tensor_element_type.h"
#include <sstream>
#include <string>
#include <type_traits>
#include <variant>
#include "tensorflow/lite/experimental/shlo/data_type.h"
namespace shlo_ref {
std::string ToString(const QuantizedElementTypePerTensor& t) {
std::stringstream sstr;
sstr << "QuantizedPerTensor[" << ToString(t.StorageType()) << ", "
<< ToString(t.ExpressedType()) << "]";
return sstr.str();
}
std::string ToString(const QuantizedElementTypePerAxis& t) {
std::stringstream sstr;
sstr << "QuantizedPerAxis[" << ToString(t.StorageType()) << ", "
<< ToString(t.ExpressedType()) << ", " << t.QuantizedDimension() << "]";
return sstr.str();
}
QuantizedElementTypePerTensor BaselineType(
const QuantizedElementTypePerTensor& type) {
QuantizedElementTypePerTensor baseline = type;
std::visit(
[](auto& scale) -> void {
scale = std::remove_reference_t<decltype(scale)>(1);
},
baseline.Scale());
std::visit(
[](auto& zero_point) -> void {
zero_point = std::remove_reference_t<decltype(zero_point)>(0);
},
baseline.ZeroPoint());
return baseline;
}
QuantizedElementTypePerAxis BaselineType(
const QuantizedElementTypePerAxis& type) {
QuantizedElementTypePerAxis baseline = type;
std::visit(
[](auto& scales) -> void {
using T = std::remove_reference_t<decltype(scales[0])>;
absl::c_fill(scales, static_cast<T>(1));
},
baseline.Scales());
std::visit(
[](auto& zero_points) -> void {
using T = std::remove_reference_t<decltype(zero_points[0])>;
absl::c_fill(zero_points, static_cast<T>(0));
},
baseline.ZeroPoints());
return baseline;
}
} | #include "tensorflow/lite/experimental/shlo/quantized_tensor_element_type.h"
#include <array>
#include <cstdint>
#include <variant>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/shlo/data_type.h"
namespace shlo_ref {
namespace {
using testing::Each;
using testing::ElementsAreArray;
using testing::FloatEq;
using testing::Pointwise;
TEST(Quantization, IsValidQuantizationTypePairWorks) {
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kI1, DataType::kI1));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kI1, DataType::kSI4));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kI1, DataType::kSI8));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kI1, DataType::kSI16));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kI1, DataType::kSI32));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kI1, DataType::kBF16));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kI1, DataType::kF16));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kI1, DataType::kF32));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kSI4, DataType::kI1));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kSI4, DataType::kSI4));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kSI4, DataType::kSI8));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kSI4, DataType::kSI16));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kSI4, DataType::kSI32));
EXPECT_TRUE(IsValidQuantizationTypePair(DataType::kSI4, DataType::kBF16));
EXPECT_TRUE(IsValidQuantizationTypePair(DataType::kSI4, DataType::kF16));
EXPECT_TRUE(IsValidQuantizationTypePair(DataType::kSI4, DataType::kF32));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kSI8, DataType::kI1));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kSI8, DataType::kSI4));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kSI8, DataType::kSI8));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kSI8, DataType::kSI16));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kSI8, DataType::kSI32));
EXPECT_TRUE(IsValidQuantizationTypePair(DataType::kSI8, DataType::kBF16));
EXPECT_TRUE(IsValidQuantizationTypePair(DataType::kSI8, DataType::kF16));
EXPECT_TRUE(IsValidQuantizationTypePair(DataType::kSI8, DataType::kF32));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kSI16, DataType::kI1));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kSI16, DataType::kSI4));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kSI16, DataType::kSI8));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kSI16, DataType::kSI16));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kSI16, DataType::kSI32));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kSI16, DataType::kBF16));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kSI16, DataType::kF16));
EXPECT_TRUE(IsValidQuantizationTypePair(DataType::kSI16, DataType::kF32));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kSI32, DataType::kI1));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kSI32, DataType::kSI4));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kSI32, DataType::kSI8));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kSI32, DataType::kSI16));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kSI32, DataType::kSI32));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kSI32, DataType::kBF16));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kSI32, DataType::kF16));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kSI32, DataType::kF32));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kBF16, DataType::kI1));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kBF16, DataType::kSI4));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kBF16, DataType::kSI8));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kBF16, DataType::kSI16));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kBF16, DataType::kSI32));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kBF16, DataType::kBF16));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kBF16, DataType::kF16));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kBF16, DataType::kF32));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kF16, DataType::kI1));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kF16, DataType::kSI4));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kF16, DataType::kSI8));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kF16, DataType::kSI16));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kF16, DataType::kSI32));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kF16, DataType::kBF16));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kF16, DataType::kF16));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kF16, DataType::kF32));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kF32, DataType::kI1));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kF32, DataType::kSI4));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kF32, DataType::kSI8));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kF32, DataType::kSI16));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kF32, DataType::kSI32));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kF32, DataType::kBF16));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kF32, DataType::kF16));
EXPECT_FALSE(IsValidQuantizationTypePair(DataType::kF32, DataType::kF32));
}
struct QuantizationPair {
DataType storage_type;
DataType expressed_type;
};
std::vector<QuantizationPair> ValidQuantizationTypePairs() {
return {QuantizationPair{.storage_type = DataType::kSI4,
.expressed_type = DataType::kBF16},
QuantizationPair{.storage_type = DataType::kSI4,
.expressed_type = DataType::kF16},
QuantizationPair{.storage_type = DataType::kSI4,
.expressed_type = DataType::kF32},
QuantizationPair{.storage_type = DataType::kSI8,
.expressed_type = DataType::kBF16},
QuantizationPair{.storage_type = DataType::kSI8,
.expressed_type = DataType::kF16},
QuantizationPair{.storage_type = DataType::kSI8,
.expressed_type = DataType::kF32},
QuantizationPair{.storage_type = DataType::kSI16,
.expressed_type = DataType::kF32}};
}
struct PerTensorTest : testing::TestWithParam<QuantizationPair> {
static constexpr auto ExtractValueAsInt = [](auto v) {
return static_cast<int32_t>(v);
};
static constexpr auto ExtractValueAsFloat = [](auto v) {
return static_cast<float>(v);
};
};
TEST_P(PerTensorTest, BuildPerTensorWorks) {
const QuantizationPair& config = GetParam();
QuantizedElementTypePerTensor type(config.storage_type, 1,
config.expressed_type, 2.5);
EXPECT_EQ(type.StorageType(), config.storage_type);
EXPECT_EQ(type.ExpressedType(), config.expressed_type);
EXPECT_EQ(std::visit(ExtractValueAsInt, type.ZeroPoint()), 1);
EXPECT_THAT(std::visit(ExtractValueAsFloat, type.Scale()), FloatEq(2.5));
}
TEST_P(PerTensorTest, BaselineTypeWorks) {
float scale = 0.5f;
int32_t zero_point = 3;
const QuantizationPair& config = GetParam();
QuantizedElementTypePerTensor element(config.storage_type, zero_point,
config.expressed_type, scale);
const auto baseline = BaselineType(element);
EXPECT_EQ(baseline.StorageType(), element.StorageType());
EXPECT_EQ(baseline.ExpressedType(), element.ExpressedType());
EXPECT_EQ(std::visit(ExtractValueAsInt, baseline.ZeroPoint()), 0);
EXPECT_THAT(std::visit(ExtractValueAsFloat, baseline.Scale()), FloatEq(1));
}
INSTANTIATE_TEST_SUITE_P(PerTensor, PerTensorTest,
testing::ValuesIn(ValidQuantizationTypePairs()));
struct PerAxisTest : testing::TestWithParam<QuantizationPair> {
static constexpr auto ExtractValueAsInt = [](auto v) {
return std::vector<int32_t>(v.begin(), v.end());
};
static constexpr auto ExtractValueAsFloat = [](auto v) {
return std::vector<float>(v.begin(), v.end());
};
};
TEST_P(PerAxisTest, BuildPerAxisWorks) {
const QuantizationPair& config = GetParam();
const std::vector<int32_t> ref_zero_points{1, 2, 3};
const std::vector<float> ref_scales{1.5, 2.5, 3.5};
QuantizedElementTypePerAxis type(config.storage_type, ref_zero_points,
config.expressed_type, ref_scales,
1);
EXPECT_EQ(type.StorageType(), config.storage_type);
EXPECT_EQ(type.ExpressedType(), config.expressed_type);
EXPECT_THAT(std::visit(ExtractValueAsInt, type.ZeroPoints()),
ElementsAreArray(ref_zero_points));
EXPECT_THAT(std::visit(ExtractValueAsFloat, type.Scales()),
Pointwise(FloatEq(), ref_scales));
}
TEST_P(PerAxisTest, BaselineTypeWorks) {
const QuantizationPair& config = GetParam();
float scales[3] = {0.5f, 0.6f, 0.2f};
int32_t zero_points[3] = {3, 1, 2};
const QuantizedElementTypePerAxis element(config.storage_type, scales,
config.expressed_type, zero_points,
3u);
const auto baseline = BaselineType(element);
const auto extracted_zero_points =
std::visit(ExtractValueAsInt, baseline.ZeroPoints());
const auto extracted_scales =
std::visit(ExtractValueAsFloat, baseline.Scales());
EXPECT_EQ(baseline.StorageType(), element.StorageType());
EXPECT_EQ(baseline.ExpressedType(), element.ExpressedType());
EXPECT_EQ(baseline.QuantizedDimension(), element.QuantizedDimension());
EXPECT_THAT(extracted_zero_points, Each(0));
EXPECT_THAT(extracted_zero_points.size(), std::size(zero_points));
EXPECT_THAT(extracted_scales, Each(FloatEq(1.0f)));
EXPECT_THAT(extracted_scales.size(), std::size(scales));
}
INSTANTIATE_TEST_SUITE_P(PerAxis, PerAxisTest,
testing::ValuesIn(ValidQuantizationTypePairs()));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/quantized_tensor_element_type.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/quantized_tensor_element_type_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b5b5cb72-a89f-46c4-99ab-2d6652fe6b51 | cpp | tensorflow/tensorflow | cutlass_gemm_custom_kernel | third_party/xla/xla/service/gpu/kernels/cutlass_gemm_custom_kernel.cc | third_party/xla/xla/service/gpu/kernels/cutlass_gemm_custom_kernel_test.cc | #include "xla/service/gpu/kernels/cutlass_gemm_custom_kernel.h"
#include <cstddef>
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "third_party/gpus/cuda/include/cuda.h"
#include "xla/service/gpu/kernels/custom_kernel.h"
#include "xla/service/gpu/kernels/cutlass_gemm.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/kernel.h"
#include "xla/stream_executor/kernel_spec.h"
#include "xla/stream_executor/launch_dim.h"
#include "xla/xla_data.pb.h"
namespace xla::gpu::kernel::gemm_universal {
static constexpr auto Default = Arch::kDefault;
static constexpr auto Sm80 = Arch::kSm80;
static constexpr auto Sm90 = Arch::kSm90;
extern template struct Adaptor<F32xF32ToF32<Default>>;
extern template struct DeviceKernel<F32xF32ToF32<Default>>;
extern template struct Adaptor<Bf16xBf16ToBf16<Default>>;
extern template struct DeviceKernel<Bf16xBf16ToBf16<Default>>;
extern template struct Adaptor<Bf16xBf16ToBf16<Sm80>>;
extern template struct DeviceKernel<Bf16xBf16ToBf16<Sm80>>;
extern template struct Adaptor<Bf16xBf16ToBf16<Sm90>>;
extern template struct DeviceKernel<Bf16xBf16ToBf16<Sm90>>;
using KernelArgsPacking = se::MultiKernelLoaderSpec::KernelArgsPacking;
template <typename Dim>
static Dim As(Dim3 dim3) {
return Dim(dim3.x, dim3.y, dim3.z);
}
template <typename Dim>
static std::optional<Dim> As(std::optional<Dim3> dim3) {
if (dim3.has_value()) return Dim(dim3->x, dim3->y, dim3->z);
return std::nullopt;
}
static int32_t* SlicePtr(const se::KernelArgsDeviceMemoryArray* args,
int64_t index) {
const void* opaque = args->device_memory_ptr(index);
return static_cast<int32_t*>(const_cast<void*>(opaque));
}
template <typename Tag>
KernelArgsPacking ArgsPacking(GemmMode mode, int32_t batch_count, int32_t m,
int32_t n, int32_t k, const ArgsIndices& indices,
const DynamicSliceIndices& slices,
int32_t device_sms, Adaptor<Tag> adaptor) {
using Packed = absl::StatusOr<std::unique_ptr<se::KernelArgsPackedArrayBase>>;
struct Params {
#if defined(_MSC_VER)
alignas(64) std::byte storage[1024];
#else
alignas(128) std::byte storage[1024];
#endif
};
return [=](const se::Kernel& kernel, const se::KernelArgs& args) -> Packed {
auto* mem_args = se::Cast<se::KernelArgsDeviceMemoryArray>(&args);
Arguments arguments = {mode, batch_count, m, n, k};
arguments.lhs = const_cast<void*>(mem_args->device_memory_ptr(indices.lhs));
arguments.rhs = const_cast<void*>(mem_args->device_memory_ptr(indices.rhs));
arguments.out = const_cast<void*>(mem_args->device_memory_ptr(indices.out));
if (indices.has_workspace) {
size_t num_mem_args = mem_args->device_memory_args().size();
arguments.workspace =
const_cast<void*>(mem_args->device_memory_ptr(num_mem_args - 1));
} else {
arguments.workspace = nullptr;
}
if (slices.out.has_value()) {
arguments.slices.out = SlicePtr(mem_args, *slices.out);
}
if (!adaptor.CanImplement(arguments)) {
return absl::InternalError(absl::StrCat(
"CUTLASS kernel can not implement gemm for a given problem size",
": m=", m, ", n=", n, ", k=", k));
}
auto threads = As<se::ThreadDim>(adaptor.ThreadDim());
auto shmem_bytes = adaptor.SharedMemoryBytes();
static int32_t sm_occupancy =
kernel.GetMaxOccupiedBlocksPerCore(threads, shmem_bytes).value_or(1);
if (sm_occupancy == 0) {
LOG_FIRST_N(WARNING, 1)
<< "CUTLASS gemm kernel reported 0 occupancy: threads_per_block="
<< (threads.x * threads.y * threads.z)
<< ", dynamic_shared_memory_bytes=" << shmem_bytes;
}
Params params;
adaptor.Initialize(¶ms, arguments, device_sms, sm_occupancy);
return se::PackKernelArgs<Params, DynamicSliceArguments>(
args.number_of_shared_bytes(), params, arguments.slices);
};
}
template <typename Tag>
static CustomKernel Load(std::string name, GemmMode mode, int32_t batch_count,
int32_t m, int32_t n, int32_t k,
const ArgsIndices& indices,
const DynamicSliceIndices& slices,
const se::DeviceDescription& device,
Adaptor<Tag> adaptor = {},
DeviceKernel<Tag> kernel = {}) {
auto cluster_dim = As<se::ClusterDim>(adaptor.ClusterDim());
auto block_dim = As<se::BlockDim>(adaptor.BlockDim(m, n, k));
auto thread_dim = As<se::ThreadDim>(adaptor.ThreadDim());
auto shared_memory_bytes = adaptor.SharedMemoryBytes();
auto packing = ArgsPacking<Tag>(mode, batch_count, m, n, k, indices, slices,
device.core_count(), adaptor);
se::MultiKernelLoaderSpec spec(2, std::move(packing));
spec.AddInProcessSymbol(kernel.symbol(), name);
if (cluster_dim.has_value()) {
return CustomKernel(std::move(name), std::move(spec), block_dim, thread_dim,
*cluster_dim, shared_memory_bytes);
} else {
return CustomKernel(std::move(name), std::move(spec), block_dim, thread_dim,
shared_memory_bytes);
}
}
absl::StatusOr<std::vector<CustomKernel>> GetCutlassGemmKernels(
std::string name, PrimitiveType dot_type, PrimitiveType lhs_type,
PrimitiveType rhs_type, int32_t m, int32_t n, int32_t k,
const ArgsIndices& indices, const DynamicSliceIndices& slices,
const se::DeviceDescription& device) {
absl::flat_hash_map<std::tuple<PrimitiveType, PrimitiveType, PrimitiveType>,
std::vector<CustomKernel>>
kernels = {
{{BF16, BF16, BF16},
{{Load<Bf16xBf16ToBf16<Default>>(name, GemmMode::kGemm, 1, m, n, k,
indices, slices, device)}}},
{{BF16, BF16, F32},
{{Load<Bf16xBf16ToF32<Default>>(name, GemmMode::kGemm, 1, m, n, k,
indices, slices, device)}}},
{{F32, BF16, F32},
{{Load<F32xBf16ToF32<Default>>(name, GemmMode::kGemm, 1, m, n, k,
indices, slices, device)},
{Load<F32xBf16ToF32<Default>>(name, GemmMode::kGemmSplitKParallel,
16, m, n, k, indices,
slices, device)}}},
{{BF16, S8, F32},
{{Load<Bf16xS8ToF32<Default>>(name, GemmMode::kGemm, 1, m, n, k,
indices, slices, device)},
{Load<Bf16xS8ToF32<Default>>(name, GemmMode::kGemmSplitKParallel,
16, m, n, k, indices,
slices, device)}}},
{{F32, F32, F32},
{{Load<F32xF32ToF32<Default>>(name, GemmMode::kGemm, 1, m, n, k,
indices, slices, device)}}}};
auto loaded_kernels = kernels.find({lhs_type, rhs_type, dot_type});
if (loaded_kernels != kernels.end()) {
return loaded_kernels->second;
} else {
std::string kernel_name = PrimitiveType_Name(lhs_type) + "x" +
PrimitiveType_Name(rhs_type) + "To" +
PrimitiveType_Name(dot_type);
return absl::InvalidArgumentError(absl::StrCat(
"Unsupported CUTLASS gemm data type for kernel: ", kernel_name));
}
}
absl::StatusOr<CustomKernel> LoadCutlassGemmKernel(
std::string name, const std::string& library_path, PrimitiveType dtype,
int32_t m, int32_t n, int32_t k, const ArgsIndices& indices,
const DynamicSliceIndices& slices, const se::DeviceDescription& device) {
auto adaptor = Adaptor<DlOpenedKernel>::Load(library_path);
if (!adaptor.has_value()) {
return absl::InternalError(
absl::StrCat("Failed to load CUTLASS adaptor from a shared library: ",
library_path));
}
auto kernel = DeviceKernel<DlOpenedKernel>::Load(library_path);
if (!kernel.has_value()) {
return absl::InternalError(absl::StrCat(
"Failed to load CUTLASS kernel from a shared library: ", library_path));
}
return Load<DlOpenedKernel>(std::move(name), GemmMode::kGemm,
1, m, n, k, indices, slices,
device, *adaptor, *kernel);
}
} | #include "xla/service/gpu/kernels/cutlass_gemm_custom_kernel.h"
#include <cstdint>
#include <cstring>
#include <string>
#include <vector>
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/kernel.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/platform_manager.h"
#include "xla/stream_executor/stream.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/path.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla::gpu::kernel::gemm_universal {
TEST(CutlassGemmKernelTest, SimpleGemm) {
se::Platform* platform =
se::PlatformManager::PlatformWithName("CUDA").value();
se::StreamExecutor* executor = platform->ExecutorForDevice(0).value();
auto stream = executor->CreateStream().value();
TF_ASSERT_OK_AND_ASSIGN(
auto custom_kernels,
GetCutlassGemmKernels("cutlass_gemm", PrimitiveType::F32,
PrimitiveType::F32, PrimitiveType::F32, 4, 4, 4,
{0, 1, 2}, {},
executor->GetDeviceDescription()));
auto custom_kernel = custom_kernels[0];
TF_ASSERT_OK_AND_ASSIGN(auto gemm,
executor->LoadKernel(custom_kernel.kernel_spec()));
int64_t length = 4 * 4;
int64_t byte_length = sizeof(float) * length;
se::DeviceMemory<float> a = executor->AllocateArray<float>(length, 0);
se::DeviceMemory<float> b = executor->AllocateArray<float>(length, 0);
se::DeviceMemory<float> c = executor->AllocateArray<float>(length, 0);
float value = 2.0;
uint32_t pattern;
std::memcpy(&pattern, &value, sizeof(pattern));
TF_ASSERT_OK(stream->Memset32(&a, pattern, byte_length));
TF_ASSERT_OK(stream->Memset32(&b, pattern, byte_length));
TF_ASSERT_OK(stream->MemZero(&c, byte_length));
se::KernelArgsDeviceMemoryArray arr(
std::vector<se::DeviceMemoryBase>({a, b, c}),
custom_kernel.shared_memory_bytes());
TF_ASSERT_OK(stream->Launch(custom_kernel.thread_dims(),
custom_kernel.block_dims(), *gemm, arr));
std::vector<float> dst(length, -1.0f);
TF_ASSERT_OK(stream->Memcpy(dst.data(), c, byte_length));
std::vector<float> expected(length, 16.0);
ASSERT_EQ(dst, expected);
}
TEST(CutlassGemmKernelTest, LoadFromSharedLibrary) {
std::string kernel_lib_path =
tsl::io::JoinPath(tsl::testing::XlaSrcRoot(), "service", "gpu", "kernels",
"cutlass_gemm_kernel_f32xf32_to_f32.so");
se::Platform* platform =
se::PlatformManager::PlatformWithName("CUDA").value();
se::StreamExecutor* executor = platform->ExecutorForDevice(0).value();
auto stream = executor->CreateStream().value();
auto custom_kernel = LoadCutlassGemmKernel(
"cutlass_gemm", kernel_lib_path, PrimitiveType::F32, 4, 4, 4,
{0, 1, 2}, {}, executor->GetDeviceDescription());
TF_ASSERT_OK_AND_ASSIGN(auto gemm,
executor->LoadKernel(custom_kernel->kernel_spec()));
int64_t length = 4 * 4;
int64_t byte_length = sizeof(float) * length;
se::DeviceMemory<float> a = executor->AllocateArray<float>(length, 0);
se::DeviceMemory<float> b = executor->AllocateArray<float>(length, 0);
se::DeviceMemory<float> c = executor->AllocateArray<float>(length, 0);
float value = 2.0;
uint32_t pattern;
std::memcpy(&pattern, &value, sizeof(pattern));
TF_ASSERT_OK(stream->Memset32(&a, pattern, byte_length));
TF_ASSERT_OK(stream->Memset32(&b, pattern, byte_length));
TF_ASSERT_OK(stream->MemZero(&c, byte_length));
se::KernelArgsDeviceMemoryArray arr(
std::vector<se::DeviceMemoryBase>({a, b, c}),
custom_kernel->shared_memory_bytes());
TF_ASSERT_OK(stream->Launch(custom_kernel->thread_dims(),
custom_kernel->block_dims(), *gemm, arr));
std::vector<float> dst(length, -1.0f);
TF_ASSERT_OK(stream->Memcpy(dst.data(), c, byte_length));
std::vector<float> expected(length, 16.0);
ASSERT_EQ(dst, expected);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/kernels/cutlass_gemm_custom_kernel.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/kernels/cutlass_gemm_custom_kernel_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
bcd78a0f-f497-45e9-bed7-3f0cc8ce3d72 | cpp | tensorflow/tensorflow | input_colocation_exemption_registry | tensorflow/core/common_runtime/input_colocation_exemption_registry.cc | tensorflow/core/common_runtime/input_colocation_exemption_registry_test.cc | #include "tensorflow/core/common_runtime/input_colocation_exemption_registry.h"
#include <set>
#include <string>
#include "tensorflow/core/platform/logging.h"
namespace tensorflow {
InputColocationExemptionRegistry* InputColocationExemptionRegistry::Global() {
static InputColocationExemptionRegistry* registry =
new InputColocationExemptionRegistry;
return registry;
}
void InputColocationExemptionRegistry::Register(const string& op) {
auto it = ops_.find(op);
if (it != ops_.end()) {
LOG(WARNING) << "Input colocation exemption for op: " << op
<< " already registered";
} else {
ops_.insert(op);
}
}
} | #include "tensorflow/core/common_runtime/input_colocation_exemption_registry.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
REGISTER_INPUT_COLOCATION_EXEMPTION("op 1");
REGISTER_INPUT_COLOCATION_EXEMPTION("op 2");
}
TEST(RPCFactoryRegistryTest, TestBasic) {
auto exempt_ops = InputColocationExemptionRegistry::Global()->Get();
EXPECT_EQ(exempt_ops.size(), 2);
EXPECT_NE(exempt_ops.find("op 1"), exempt_ops.end());
EXPECT_NE(exempt_ops.find("op 2"), exempt_ops.end());
EXPECT_EQ(exempt_ops.find("op 3"), exempt_ops.end());
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/input_colocation_exemption_registry.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/input_colocation_exemption_registry_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ae58849b-b731-44fd-832e-ff17424f19f3 | cpp | abseil/abseil-cpp | cord_rep_btree | absl/strings/internal/cord_rep_btree.cc | absl/strings/internal/cord_rep_btree_test.cc | #include "absl/strings/internal/cord_rep_btree.h"
#include <atomic>
#include <cassert>
#include <cstdint>
#include <iostream>
#include <ostream>
#include <string>
#include "absl/base/attributes.h"
#include "absl/base/config.h"
#include "absl/base/internal/raw_logging.h"
#include "absl/base/optimization.h"
#include "absl/strings/internal/cord_data_edge.h"
#include "absl/strings/internal/cord_internal.h"
#include "absl/strings/internal/cord_rep_consume.h"
#include "absl/strings/internal/cord_rep_flat.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace cord_internal {
#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
constexpr size_t CordRepBtree::kMaxCapacity;
#endif
namespace {
using NodeStack = CordRepBtree * [CordRepBtree::kMaxDepth];
using EdgeType = CordRepBtree::EdgeType;
using OpResult = CordRepBtree::OpResult;
using CopyResult = CordRepBtree::CopyResult;
constexpr auto kFront = CordRepBtree::kFront;
constexpr auto kBack = CordRepBtree::kBack;
ABSL_CONST_INIT std::atomic<bool> cord_btree_exhaustive_validation(false);
void DumpAll(const CordRep* rep,
bool include_contents,
std::ostream& stream,
size_t depth = 0) {
assert(depth <= CordRepBtree::kMaxDepth + 2);
std::string sharing = const_cast<CordRep*>(rep)->refcount.IsOne()
? std::string("Private")
: absl::StrCat("Shared(", rep->refcount.Get(), ")");
std::string sptr = absl::StrCat("0x", absl::Hex(rep));
auto maybe_dump_data = [&stream, include_contents](const CordRep* r) {
if (include_contents) {
constexpr size_t kMaxDataLength = 60;
stream << ", data = \""
<< EdgeData(r).substr(0, kMaxDataLength)
<< (r->length > kMaxDataLength ? "\"..." : "\"");
}
stream << '\n';
};
stream << std::string(depth * 2, ' ') << sharing << " (" << sptr << ") ";
if (rep->IsBtree()) {
const CordRepBtree* node = rep->btree();
std::string label =
node->height() ? absl::StrCat("Node(", node->height(), ")") : "Leaf";
stream << label << ", len = " << node->length
<< ", begin = " << node->begin() << ", end = " << node->end()
<< "\n";
for (CordRep* edge : node->Edges()) {
DumpAll(edge, include_contents, stream, depth + 1);
}
} else if (rep->tag == SUBSTRING) {
const CordRepSubstring* substring = rep->substring();
stream << "Substring, len = " << rep->length
<< ", start = " << substring->start;
maybe_dump_data(rep);
DumpAll(substring->child, include_contents, stream, depth + 1);
} else if (rep->tag >= FLAT) {
stream << "Flat, len = " << rep->length
<< ", cap = " << rep->flat()->Capacity();
maybe_dump_data(rep);
} else if (rep->tag == EXTERNAL) {
stream << "Extn, len = " << rep->length;
maybe_dump_data(rep);
}
}
CordRepSubstring* CreateSubstring(CordRep* rep, size_t offset, size_t n) {
assert(n != 0);
assert(offset + n <= rep->length);
assert(offset != 0 || n != rep->length);
if (rep->tag == SUBSTRING) {
CordRepSubstring* substring = rep->substring();
offset += substring->start;
rep = CordRep::Ref(substring->child);
CordRep::Unref(substring);
}
assert(rep->IsExternal() || rep->IsFlat());
CordRepSubstring* substring = new CordRepSubstring();
substring->length = n;
substring->tag = SUBSTRING;
substring->start = offset;
substring->child = rep;
return substring;
}
inline CordRep* MakeSubstring(CordRep* rep, size_t offset, size_t n) {
if (n == rep->length) return rep;
if (n == 0) return CordRep::Unref(rep), nullptr;
return CreateSubstring(rep, offset, n);
}
inline CordRep* MakeSubstring(CordRep* rep, size_t offset) {
if (offset == 0) return rep;
return CreateSubstring(rep, offset, rep->length - offset);
}
CordRep* ResizeEdge(CordRep* edge, size_t length, bool is_mutable) {
assert(length > 0);
assert(length <= edge->length);
assert(IsDataEdge(edge));
if (length >= edge->length) return edge;
if (is_mutable && (edge->tag >= FLAT || edge->tag == SUBSTRING)) {
edge->length = length;
return edge;
}
return CreateSubstring(edge, 0, length);
}
template <EdgeType edge_type>
inline absl::string_view Consume(absl::string_view s, size_t n) {
return edge_type == kBack ? s.substr(n) : s.substr(0, s.size() - n);
}
template <EdgeType edge_type>
inline absl::string_view Consume(char* dst, absl::string_view s, size_t n) {
if (edge_type == kBack) {
memcpy(dst, s.data(), n);
return s.substr(n);
} else {
const size_t offset = s.size() - n;
memcpy(dst, s.data() + offset, n);
return s.substr(0, offset);
}
}
template <typename R, typename Fn>
inline void FastUnref(R* r, Fn&& fn) {
if (r->refcount.IsOne()) {
fn(r);
} else if (!r->refcount.DecrementExpectHighRefcount()) {
fn(r);
}
}
void DeleteSubstring(CordRepSubstring* substring) {
CordRep* rep = substring->child;
if (!rep->refcount.Decrement()) {
if (rep->tag >= FLAT) {
CordRepFlat::Delete(rep->flat());
} else {
assert(rep->tag == EXTERNAL);
CordRepExternal::Delete(rep->external());
}
}
delete substring;
}
void DeleteLeafEdge(CordRep* rep) {
assert(IsDataEdge(rep));
if (rep->tag >= FLAT) {
CordRepFlat::Delete(rep->flat());
} else if (rep->tag == EXTERNAL) {
CordRepExternal::Delete(rep->external());
} else {
DeleteSubstring(rep->substring());
}
}
template <EdgeType edge_type>
struct StackOperations {
inline bool owned(int depth) const { return depth < share_depth; }
inline CordRepBtree* node(int depth) const { return stack[depth]; }
inline CordRepBtree* BuildStack(CordRepBtree* tree, int depth) {
assert(depth <= tree->height());
int current_depth = 0;
while (current_depth < depth && tree->refcount.IsOne()) {
stack[current_depth++] = tree;
tree = tree->Edge(edge_type)->btree();
}
share_depth = current_depth + (tree->refcount.IsOne() ? 1 : 0);
while (current_depth < depth) {
stack[current_depth++] = tree;
tree = tree->Edge(edge_type)->btree();
}
return tree;
}
inline void BuildOwnedStack(CordRepBtree* tree, int height) {
assert(height <= CordRepBtree::kMaxHeight);
int depth = 0;
while (depth < height) {
assert(tree->refcount.IsOne());
stack[depth++] = tree;
tree = tree->Edge(edge_type)->btree();
}
assert(tree->refcount.IsOne());
share_depth = depth + 1;
}
static inline CordRepBtree* Finalize(CordRepBtree* tree, OpResult result) {
switch (result.action) {
case CordRepBtree::kPopped:
tree = edge_type == kBack ? CordRepBtree::New(tree, result.tree)
: CordRepBtree::New(result.tree, tree);
if (ABSL_PREDICT_FALSE(tree->height() > CordRepBtree::kMaxHeight)) {
tree = CordRepBtree::Rebuild(tree);
ABSL_RAW_CHECK(tree->height() <= CordRepBtree::kMaxHeight,
"Max height exceeded");
}
return tree;
case CordRepBtree::kCopied:
CordRep::Unref(tree);
ABSL_FALLTHROUGH_INTENDED;
case CordRepBtree::kSelf:
return result.tree;
}
ABSL_UNREACHABLE();
return result.tree;
}
template <bool propagate = false>
inline CordRepBtree* Unwind(CordRepBtree* tree, int depth, size_t length,
OpResult result) {
if (depth != 0) {
do {
CordRepBtree* node = stack[--depth];
const bool owned = depth < share_depth;
switch (result.action) {
case CordRepBtree::kPopped:
assert(!propagate);
result = node->AddEdge<edge_type>(owned, result.tree, length);
break;
case CordRepBtree::kCopied:
result = node->SetEdge<edge_type>(owned, result.tree, length);
if (propagate) stack[depth] = result.tree;
break;
case CordRepBtree::kSelf:
node->length += length;
while (depth > 0) {
node = stack[--depth];
node->length += length;
}
return node;
}
} while (depth > 0);
}
return Finalize(tree, result);
}
inline CordRepBtree* Propagate(CordRepBtree* tree, int depth, size_t length,
OpResult result) {
return Unwind<true>(tree, depth, length, result);
}
int share_depth;
NodeStack stack;
};
}
void SetCordBtreeExhaustiveValidation(bool do_exaustive_validation) {
cord_btree_exhaustive_validation.store(do_exaustive_validation,
std::memory_order_relaxed);
}
bool IsCordBtreeExhaustiveValidationEnabled() {
return cord_btree_exhaustive_validation.load(std::memory_order_relaxed);
}
void CordRepBtree::Dump(const CordRep* rep, absl::string_view label,
bool include_contents, std::ostream& stream) {
stream << "===================================\n";
if (!label.empty()) {
stream << label << '\n';
stream << "-----------------------------------\n";
}
if (rep) {
DumpAll(rep, include_contents, stream);
} else {
stream << "NULL\n";
}
}
void CordRepBtree::Dump(const CordRep* rep, absl::string_view label,
std::ostream& stream) {
Dump(rep, label, false, stream);
}
void CordRepBtree::Dump(const CordRep* rep, std::ostream& stream) {
Dump(rep, absl::string_view(), false, stream);
}
template <size_t size>
static void DestroyTree(CordRepBtree* tree) {
for (CordRep* node : tree->Edges()) {
if (node->refcount.Decrement()) continue;
for (CordRep* edge : node->btree()->Edges()) {
if (edge->refcount.Decrement()) continue;
if (size == 1) {
DeleteLeafEdge(edge);
} else {
CordRepBtree::Destroy(edge->btree());
}
}
CordRepBtree::Delete(node->btree());
}
CordRepBtree::Delete(tree);
}
void CordRepBtree::Destroy(CordRepBtree* tree) {
switch (tree->height()) {
case 0:
for (CordRep* edge : tree->Edges()) {
if (!edge->refcount.Decrement()) {
DeleteLeafEdge(edge);
}
}
return CordRepBtree::Delete(tree);
case 1:
return DestroyTree<1>(tree);
default:
return DestroyTree<2>(tree);
}
}
bool CordRepBtree::IsValid(const CordRepBtree* tree, bool shallow) {
#define NODE_CHECK_VALID(x) \
if (!(x)) { \
ABSL_RAW_LOG(ERROR, "CordRepBtree::CheckValid() FAILED: %s", #x); \
return false; \
}
#define NODE_CHECK_EQ(x, y) \
if ((x) != (y)) { \
ABSL_RAW_LOG(ERROR, \
"CordRepBtree::CheckValid() FAILED: %s != %s (%s vs %s)", #x, \
#y, absl::StrCat(x).c_str(), absl::StrCat(y).c_str()); \
return false; \
}
NODE_CHECK_VALID(tree != nullptr);
NODE_CHECK_VALID(tree->IsBtree());
NODE_CHECK_VALID(tree->height() <= kMaxHeight);
NODE_CHECK_VALID(tree->begin() < tree->capacity());
NODE_CHECK_VALID(tree->end() <= tree->capacity());
NODE_CHECK_VALID(tree->begin() <= tree->end());
size_t child_length = 0;
for (CordRep* edge : tree->Edges()) {
NODE_CHECK_VALID(edge != nullptr);
if (tree->height() > 0) {
NODE_CHECK_VALID(edge->IsBtree());
NODE_CHECK_VALID(edge->btree()->height() == tree->height() - 1);
} else {
NODE_CHECK_VALID(IsDataEdge(edge));
}
child_length += edge->length;
}
NODE_CHECK_EQ(child_length, tree->length);
if ((!shallow || IsCordBtreeExhaustiveValidationEnabled()) &&
tree->height() > 0) {
for (CordRep* edge : tree->Edges()) {
if (!IsValid(edge->btree(), shallow)) return false;
}
}
return true;
#undef NODE_CHECK_VALID
#undef NODE_CHECK_EQ
}
#ifndef NDEBUG
CordRepBtree* CordRepBtree::AssertValid(CordRepBtree* tree, bool shallow) {
if (!IsValid(tree, shallow)) {
Dump(tree, "CordRepBtree validation failed:", false, std::cout);
ABSL_RAW_LOG(FATAL, "CordRepBtree::CheckValid() FAILED");
}
return tree;
}
const CordRepBtree* CordRepBtree::AssertValid(const CordRepBtree* tree,
bool shallow) {
if (!IsValid(tree, shallow)) {
Dump(tree, "CordRepBtree validation failed:", false, std::cout);
ABSL_RAW_LOG(FATAL, "CordRepBtree::CheckValid() FAILED");
}
return tree;
}
#endif
template <EdgeType edge_type>
inline OpResult CordRepBtree::AddEdge(bool owned, CordRep* edge, size_t delta) {
if (size() >= kMaxCapacity) return {New(edge), kPopped};
OpResult result = ToOpResult(owned);
result.tree->Add<edge_type>(edge);
result.tree->length += delta;
return result;
}
template <EdgeType edge_type>
OpResult CordRepBtree::SetEdge(bool owned, CordRep* edge, size_t delta) {
OpResult result;
const size_t idx = index(edge_type);
if (owned) {
result = {this, kSelf};
CordRep::Unref(edges_[idx]);
} else {
result = {CopyRaw(length), kCopied};
constexpr int shift = edge_type == kFront ? 1 : 0;
for (CordRep* r : Edges(begin() + shift, back() + shift)) {
CordRep::Ref(r);
}
}
result.tree->edges_[idx] = edge;
result.tree->length += delta;
return result;
}
template <EdgeType edge_type>
CordRepBtree* CordRepBtree::AddCordRep(CordRepBtree* tree, CordRep* rep) {
const int depth = tree->height();
const size_t length = rep->length;
StackOperations<edge_type> ops;
CordRepBtree* leaf = ops.BuildStack(tree, depth);
const OpResult result =
leaf->AddEdge<edge_type>(ops.owned(depth), rep, length);
return ops.Unwind(tree, depth, length, result);
}
template <>
CordRepBtree* CordRepBtree::NewLeaf<kBack>(absl::string_view data,
size_t extra) {
CordRepBtree* leaf = CordRepBtree::New(0);
size_t length = 0;
size_t end = 0;
const size_t cap = leaf->capacity();
while (!data.empty() && end != cap) {
auto* flat = CordRepFlat::New(data.length() + extra);
flat->length = (std::min)(data.length(), flat->Capacity());
length += flat->length;
leaf->edges_[end++] = flat;
data = Consume<kBack>(flat->Data(), data, flat->length);
}
leaf->length = length;
leaf->set_end(end);
return leaf;
}
template <>
CordRepBtree* CordRepBtree::NewLeaf<kFront>(absl::string_view data,
size_t extra) {
CordRepBtree* leaf = CordRepBtree::New(0);
size_t length = 0;
size_t begin = leaf->capacity();
leaf->set_end(leaf->capacity());
while (!data.empty() && begin != 0) {
auto* flat = CordRepFlat::New(data.length() + extra);
flat->length = (std::min)(data.length(), flat->Capacity());
length += flat->length;
leaf->edges_[--begin] = flat;
data = Consume<kFront>(flat->Data(), data, flat->length);
}
leaf->length = length;
leaf->set_begin(begin);
return leaf;
}
template <>
absl::string_view CordRepBtree::AddData<kBack>(absl::string_view data,
size_t extra) {
assert(!data.empty());
assert(size() < capacity());
AlignBegin();
const size_t cap = capacity();
do {
CordRepFlat* flat = CordRepFlat::New(data.length() + extra);
const size_t n = (std::min)(data.length(), flat->Capacity());
flat->length = n;
edges_[fetch_add_end(1)] = flat;
data = Consume<kBack>(flat->Data(), data, n);
} while (!data.empty() && end() != cap);
return data;
}
template <>
absl::string_view CordRepBtree::AddData<kFront>(absl::string_view data,
size_t extra) {
assert(!data.empty());
assert(size() < capacity());
AlignEnd();
do {
CordRepFlat* flat = CordRepFlat::New(data.length() + extra);
const size_t n = (std::min)(data.length(), flat->Capacity());
flat->length = n;
edges_[sub_fetch_begin(1)] = flat;
data = Consume<kFront>(flat->Data(), data, n);
} while (!data.empty() && begin() != 0);
return data;
}
template <EdgeType edge_type>
CordRepBtree* CordRepBtree::AddData(CordRepBtree* tree, absl::string_view data,
size_t extra) {
if (ABSL_PREDICT_FALSE(data.empty())) return tree;
const size_t original_data_size = data.size();
int depth = tree->height();
StackOperations<edge_type> ops;
CordRepBtree* leaf = ops.BuildStack(tree, depth);
if (leaf->size() < leaf->capacity()) {
OpResult result = leaf->ToOpResult(ops.owned(depth));
data = result.tree->AddData<edge_type>(data, extra);
if (data.empty()) {
result.tree->length += original_data_size;
return ops.Unwind(tree, depth, original_data_size, result);
}
size_t delta = original_data_size - data.size();
assert(delta > 0);
result.tree->length += delta;
tree = ops.Propagate(tree, depth, delta, result);
ops.share_depth = depth + 1;
}
for (;;) {
OpResult result = {CordRepBtree::NewLeaf<edge_type>(data, extra), kPopped};
if (result.tree->length == data.size()) {
return ops.Unwind(tree, depth, result.tree->length, result);
}
data = Consume<edge_type>(data, result.tree->length);
tree = ops.Unwind(tree, depth, result.tree->length, result);
depth = tree->height();
ops.BuildOwnedStack(tree, depth);
}
}
template <EdgeType edge_type>
CordRepBtree* CordRepBtree::Merge(CordRepBtree* dst, CordRepBtree* src) {
assert(dst->height() >= src->height());
const size_t length = src->length;
const int depth = dst->height() - src->height();
StackOperations<edge_type> ops;
CordRepBtree* merge_node = ops.BuildStack(dst, depth);
OpResult result;
if (merge_node->size() + src->size() <= kMaxCapacity) {
result = merge_node->ToOpResult(ops.owned(depth));
result.tree->Add<edge_type>(src->Edges());
result.tree->length += src->length;
if (src->refcount.IsOne()) {
Delete(src);
} else {
for (CordRep* edge : src->Edges()) CordRep::Ref(edge);
CordRepBtree::Unref(src);
}
} else {
result = {src, kPopped};
}
if (depth) {
return ops.Unwind(dst, depth, length, result);
}
return ops.Finalize(dst, result);
}
CopyResult CordRepBtree::CopySuffix(size_t offset) {
assert(offset < this->length);
int height = this->height();
CordRepBtree* node = this;
size_t len = node->length - offset;
CordRep* back = node->Edge(kBack);
while (back->length >= len) {
offset = back->length - len;
if (--height < 0) {
return {MakeSubstring(CordRep::Ref(back), offset), height};
}
node = back->btree();
back = node->Edge(kBack);
}
if (offset == 0) return {CordRep::Ref(node), height};
Position pos = node->IndexBeyond(offset);
CordRepBtree* sub = node->CopyToEndFrom(pos.index, len);
const CopyResult result = {sub, height};
while (pos.n != 0) {
assert(pos.index >= 1);
const size_t begin = pos.index - 1;
sub->set_begin(begin);
CordRep* const edge = node->Edge(begin);
len = pos.n;
offset = edge->length - len;
if (--height < 0) {
sub->edges_[begin] = MakeSubstring(CordRep::Ref(edge), offset, len);
return result;
}
node = edge->btree();
pos = node->IndexBeyond(offset);
CordRepBtree* nsub = node->CopyToEndFrom(pos.index, len);
sub->edges_[begin] = nsub;
sub = nsub;
}
sub->set_begin(pos.index);
return result;
}
CopyResult CordRepBtree::CopyPrefix(size_t n, bool allow_folding) {
assert(n > 0);
assert(n <= this->length);
int height = this->height();
CordRepBtree* node = this;
CordRep* front = node->Edge(kFront);
if (allow_folding) {
while (front->length >= n) {
if (--height < 0) return {MakeSubstring(CordRep::Ref(front), 0, n), -1};
node = front->btree();
front = node->Edge(kFront);
}
}
if (node->length == n) return {CordRep::Ref(node), height};
Position pos = node->IndexOf(n);
CordRepBtree* sub = node->CopyBeginTo(pos.index, n);
const CopyResult result = {sub, height};
while (pos.n != 0) {
size_t end = pos.index;
n = pos.n;
CordRep* edge = node->Edge(pos.index);
if (--height < 0) {
sub->edges_[end++] = MakeSubstring(CordRep::Ref(edge), 0, n);
sub->set_end(end);
AssertValid(result.edge->btree());
return result;
}
node = edge->btree();
pos = node->IndexOf(n);
CordRepBtree* nsub = node->CopyBeginTo(pos.index, n);
sub->edges_[end++] = nsub;
sub->set_end(end);
sub = nsub;
}
sub->set_end(pos.index);
AssertValid(result.edge->btree());
return result;
}
CordRep* CordRepBtree::ExtractFront(CordRepBtree* tree) {
CordRep* front = tree->Edge(tree->begin());
if (tree->refcount.IsOne()) {
Unref(tree->Edges(tree->begin() + 1, tree->end()));
CordRepBtree::Delete(tree);
} else {
CordRep::Ref(front);
CordRep::Unref(tree);
}
return front;
}
CordRepBtree* CordRepBtree::ConsumeBeginTo(CordRepBtree* tree, size_t end,
size_t new_length) {
assert(end <= tree->end());
if (tree->refcount.IsOne()) {
Unref(tree->Edges(end, tree->end()));
tree->set_end(end);
tree->length = new_length;
} else {
CordRepBtree* old = tree;
tree = tree->CopyBeginTo(end, new_length);
CordRep::Unref(old);
}
return tree;
}
CordRep* CordRepBtree::RemoveSuffix(CordRepBtree* tree, size_t n) {
assert(tree != nullptr);
assert(n <= tree->length);
const size_t len = tree->length;
if (ABSL_PREDICT_FALSE(n == 0)) {
return tree;
}
if (ABSL_PREDICT_FALSE(n >= len)) {
CordRepBtree::Unref(tree);
return nullptr;
}
size_t length = len - n;
int height = tree->height();
bool is_mutable = tree->refcount.IsOne();
Position pos = tree->IndexOfLength(length);
while (pos.index == tree->begin()) {
CordRep* edge = ExtractFront(tree);
is_mutable &= edge->refcount.IsOne();
if (height-- == 0) return ResizeEdge(edge, length, is_mutable);
tree = edge->btree();
pos = tree->IndexOfLength(length);
}
CordRepBtree* top = tree = ConsumeBeginTo(tree, pos.index + 1, length);
CordRep* edge = tree->Edge(pos.index);
length = pos.n;
while (length != edge->length) {
assert(tree->refcount.IsOne());
const bool edge_is_mutable = edge->refcount.IsOne();
if (height-- == 0) {
tree->edges_[pos.index] = ResizeEdge(edge, length, edge_is_mutable);
return AssertValid(top);
}
if (!edge_is_mutable) {
tree->edges_[pos.index] = edge->btree()->CopyPrefix(length, false).edge;
CordRep::Unref(edge);
return AssertValid(top);
}
tree = edge->btree();
pos = tree->IndexOfLength(length);
tree = ConsumeBeginTo(edge->btree(), pos.index + 1, length);
edge = tree->Edge(pos.index);
length = pos.n;
}
return AssertValid(top);
}
CordRep* CordRepBtree::SubTree(size_t offset, size_t n) {
assert(n <= this->length);
assert(offset <= this->length - n);
if (ABSL_PREDICT_FALSE(n == 0)) return nullptr;
CordRepBtree* node = this;
int height = node->height();
Position front = node->IndexOf(offset);
CordRep* left = node->edges_[front.index];
while (front.n + n <= left->length) {
if (--height < 0) return MakeSubstring(CordRep::Ref(left), front.n, n);
node = left->btree();
front = node->IndexOf(front.n);
left = node->edges_[front.index];
}
const Position back = node->IndexBefore(front, n);
CordRep* const right = node->edges_[back.index];
assert(back.index > front.index);
CopyResult prefix;
CopyResult suffix;
if (height > 0) {
prefix = left->btree()->CopySuffix(front.n);
suffix = right->btree()->CopyPrefix(back.n);
if (front.index + 1 == back.index) {
height = (std::max)(prefix.height, suffix.height) + 1;
}
for (int h = prefix.height + 1; h < height; ++h) {
prefix.edge = CordRepBtree::New(prefix.edge);
}
for (int h = suffix.height + 1; h < height; ++h) {
suffix.edge = CordRepBtree::New(suffix.edge);
}
} else {
prefix = CopyResult{MakeSubstring(CordRep::Ref(left), front.n), -1};
suffix = CopyResult{MakeSubstring(CordRep::Ref(right), 0, back.n), -1};
}
CordRepBtree* sub = CordRepBtree::New(height);
size_t end = 0;
sub->edges_[end++] = prefix.edge;
for (CordRep* r : node->Edges(front.index + 1, back.index)) {
sub->edges_[end++] = CordRep::Ref(r);
}
sub->edges_[end++] = suffix.edge;
sub->set_end(end);
sub->length = n;
return AssertValid(sub);
}
CordRepBtree* CordRepBtree::MergeTrees(CordRepBtree* left,
CordRepBtree* right) {
return left->height() >= right->height() ? Merge<kBack>(left, right)
: Merge<kFront>(right, left);
}
bool CordRepBtree::IsFlat(absl::string_view* fragment) const {
if (height() == 0 && size() == 1) {
if (fragment) *fragment = Data(begin());
return true;
}
return false;
}
bool CordRepBtree::IsFlat(size_t offset, const size_t n,
absl::string_view* fragment) const {
assert(n <= this->length);
assert(offset <= this->length - n);
if (ABSL_PREDICT_FALSE(n == 0)) return false;
int height = this->height();
const CordRepBtree* node = this;
for (;;) {
const Position front = node->IndexOf(offset);
const CordRep* edge = node->Edge(front.index);
if (edge->length < front.n + n) return false;
if (--height < 0) {
if (fragment) *fragment = EdgeData(edge).substr(front.n, n);
return true;
}
offset = front.n;
node = node->Edge(front.index)->btree();
}
}
char CordRepBtree::GetCharacter(size_t offset) const {
assert(offset < length);
const CordRepBtree* node = this;
int height = node->height();
for (;;) {
Position front = node->IndexOf(offset);
if (--height < 0) return node->Data(front.index)[front.n];
offset = front.n;
node = node->Edge(front.index)->btree();
}
}
Span<char> CordRepBtree::GetAppendBufferSlow(size_t size) {
assert(height() >= 4);
assert(refcount.IsOne());
const int depth = height();
CordRepBtree* node = this;
CordRepBtree* stack[kMaxDepth];
for (int i = 0; i < depth; ++i) {
node = node->Edge(kBack)->btree();
if (!node->refcount.IsOne()) return {};
stack[i] = node;
}
CordRep* const edge = node->Edge(kBack);
if (!edge->refcount.IsOne() || edge->tag < FLAT) return {};
const size_t avail = edge->flat()->Capacity() - edge->length;
if (avail == 0) return {};
size_t delta = (std::min)(size, avail);
Span<char> span = {edge->flat()->Data() + edge->length, delta};
edge->length += delta;
this->length += delta;
for (int i = 0; i < depth; ++i) {
stack[i]->length += delta;
}
return span;
}
CordRepBtree* CordRepBtree::CreateSlow(CordRep* rep) {
if (rep->IsBtree()) return rep->btree();
CordRepBtree* node = nullptr;
auto consume = [&node](CordRep* r, size_t offset, size_t length) {
r = MakeSubstring(r, offset, length);
if (node == nullptr) {
node = New(r);
} else {
node = CordRepBtree::AddCordRep<kBack>(node, r);
}
};
Consume(rep, consume);
return node;
}
CordRepBtree* CordRepBtree::AppendSlow(CordRepBtree* tree, CordRep* rep) {
if (ABSL_PREDICT_TRUE(rep->IsBtree())) {
return MergeTrees(tree, rep->btree());
}
auto consume = [&tree](CordRep* r, size_t offset, size_t length) {
r = MakeSubstring(r, offset, length);
tree = CordRepBtree::AddCordRep<kBack>(tree, r);
};
Consume(rep, consume);
return tree;
}
CordRepBtree* CordRepBtree::PrependSlow(CordRepBtree* tree, CordRep* rep) {
if (ABSL_PREDICT_TRUE(rep->IsBtree())) {
return MergeTrees(rep->btree(), tree);
}
auto consume = [&tree](CordRep* r, size_t offset, size_t length) {
r = MakeSubstring(r, offset, length);
tree = CordRepBtree::AddCordRep<kFront>(tree, r);
};
ReverseConsume(rep, consume);
return tree;
}
CordRepBtree* CordRepBtree::Append(CordRepBtree* tree, absl::string_view data,
size_t extra) {
return CordRepBtree::AddData<kBack>(tree, data, extra);
}
CordRepBtree* CordRepBtree::Prepend(CordRepBtree* tree, absl::string_view data,
size_t extra) {
return CordRepBtree::AddData<kFront>(tree, data, extra);
}
template CordRepBtree* CordRepBtree::AddCordRep<kFront>(CordRepBtree* tree,
CordRep* rep);
template CordRepBtree* CordRepBtree::AddCordRep<kBack>(CordRepBtree* tree,
CordRep* rep);
template CordRepBtree* CordRepBtree::AddData<kFront>(CordRepBtree* tree,
absl::string_view data,
size_t extra);
template CordRepBtree* CordRepBtree::AddData<kBack>(CordRepBtree* tree,
absl::string_view data,
size_t extra);
void CordRepBtree::Rebuild(CordRepBtree** stack, CordRepBtree* tree,
bool consume) {
bool owned = consume && tree->refcount.IsOne();
if (tree->height() == 0) {
for (CordRep* edge : tree->Edges()) {
if (!owned) edge = CordRep::Ref(edge);
size_t height = 0;
size_t length = edge->length;
CordRepBtree* node = stack[0];
OpResult result = node->AddEdge<kBack>(true, edge, length);
while (result.action == CordRepBtree::kPopped) {
stack[height] = result.tree;
if (stack[++height] == nullptr) {
result.action = CordRepBtree::kSelf;
stack[height] = CordRepBtree::New(node, result.tree);
} else {
node = stack[height];
result = node->AddEdge<kBack>(true, result.tree, length);
}
}
while (stack[++height] != nullptr) {
stack[height]->length += length;
}
}
} else {
for (CordRep* rep : tree->Edges()) {
Rebuild(stack, rep->btree(), owned);
}
}
if (consume) {
if (owned) {
CordRepBtree::Delete(tree);
} else {
CordRepBtree::Unref(tree);
}
}
}
CordRepBtree* CordRepBtree::Rebuild(CordRepBtree* tree) {
CordRepBtree* node = CordRepBtree::New();
CordRepBtree* stack[CordRepBtree::kMaxDepth + 1] = {node};
Rebuild(stack, tree, true);
for (CordRepBtree* parent : stack) {
if (parent == nullptr) return node;
node = parent;
}
assert(false);
return nullptr;
}
CordRepBtree::ExtractResult CordRepBtree::ExtractAppendBuffer(
CordRepBtree* tree, size_t extra_capacity) {
int depth = 0;
NodeStack stack;
ExtractResult result;
result.tree = tree;
result.extracted = nullptr;
while (tree->height() > 0) {
if (!tree->refcount.IsOne()) return result;
stack[depth++] = tree;
tree = tree->Edge(kBack)->btree();
}
if (!tree->refcount.IsOne()) return result;
CordRep* rep = tree->Edge(kBack);
if (!(rep->IsFlat() && rep->refcount.IsOne())) return result;
CordRepFlat* flat = rep->flat();
const size_t length = flat->length;
const size_t avail = flat->Capacity() - flat->length;
if (extra_capacity > avail) return result;
result.extracted = flat;
while (tree->size() == 1) {
CordRepBtree::Delete(tree);
if (--depth < 0) {
result.tree = nullptr;
return result;
}
rep = tree;
tree = stack[depth];
}
tree->set_end(tree->end() - 1);
tree->length -= length;
while (depth > 0) {
tree = stack[--depth];
tree->length -= length;
}
while (tree->size() == 1) {
int height = tree->height();
rep = tree->Edge(kBack);
Delete(tree);
if (height == 0) {
result.tree = rep;
return result;
}
tree = rep->btree();
}
result.tree = tree;
return result;
}
}
ABSL_NAMESPACE_END
} | #include "absl/strings/internal/cord_rep_btree.h"
#include <cmath>
#include <deque>
#include <iostream>
#include <string>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/base/config.h"
#include "absl/base/internal/raw_logging.h"
#include "absl/cleanup/cleanup.h"
#include "absl/strings/internal/cord_data_edge.h"
#include "absl/strings/internal/cord_internal.h"
#include "absl/strings/internal/cord_rep_test_util.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace cord_internal {
class CordRepBtreeTestPeer {
public:
static void SetEdge(CordRepBtree* node, size_t idx, CordRep* edge) {
node->edges_[idx] = edge;
}
static void AddEdge(CordRepBtree* node, CordRep* edge) {
node->edges_[node->fetch_add_end(1)] = edge;
}
};
namespace {
using ::absl::cordrep_testing::AutoUnref;
using ::absl::cordrep_testing::CordCollectRepsIf;
using ::absl::cordrep_testing::CordToString;
using ::absl::cordrep_testing::CordVisitReps;
using ::absl::cordrep_testing::CreateFlatsFromString;
using ::absl::cordrep_testing::CreateRandomString;
using ::absl::cordrep_testing::MakeExternal;
using ::absl::cordrep_testing::MakeFlat;
using ::absl::cordrep_testing::MakeSubstring;
using ::testing::_;
using ::testing::AllOf;
using ::testing::AnyOf;
using ::testing::Conditional;
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
using ::testing::Eq;
using ::testing::HasSubstr;
using ::testing::Le;
using ::testing::Ne;
using ::testing::Not;
using ::testing::SizeIs;
using ::testing::TypedEq;
MATCHER_P(EqFlatHolding, data, "Equals flat holding data") {
if (arg->tag < FLAT) {
*result_listener << "Expected FLAT, got tag " << static_cast<int>(arg->tag);
return false;
}
std::string actual = CordToString(arg);
if (actual != data) {
*result_listener << "Expected flat holding \"" << data
<< "\", got flat holding \"" << actual << "\"";
return false;
}
return true;
}
MATCHER_P(IsNode, height, absl::StrCat("Is a valid node of height ", height)) {
if (arg == nullptr) {
*result_listener << "Expected NODE, got nullptr";
return false;
}
if (arg->tag != BTREE) {
*result_listener << "Expected NODE, got " << static_cast<int>(arg->tag);
return false;
}
if (!CordRepBtree::IsValid(arg->btree())) {
CordRepBtree::Dump(arg->btree(), "Expected valid NODE, got:", false,
*result_listener->stream());
return false;
}
if (arg->btree()->height() != height) {
*result_listener << "Expected NODE of height " << height << ", got "
<< arg->btree()->height();
return false;
}
return true;
}
MATCHER_P2(IsSubstring, start, length,
absl::StrCat("Is a substring(start = ", start, ", length = ", length,
")")) {
if (arg == nullptr) {
*result_listener << "Expected substring, got nullptr";
return false;
}
if (arg->tag != SUBSTRING) {
*result_listener << "Expected SUBSTRING, got "
<< static_cast<int>(arg->tag);
return false;
}
const CordRepSubstring* const substr = arg->substring();
if (substr->start != start || substr->length != length) {
*result_listener << "Expected substring(" << start << ", " << length
<< "), got substring(" << substr->start << ", "
<< substr->length << ")";
return false;
}
return true;
}
MATCHER_P2(EqExtractResult, tree, rep, "Equals ExtractResult") {
if (arg.tree != tree || arg.extracted != rep) {
*result_listener << "Expected {" << static_cast<const void*>(tree) << ", "
<< static_cast<const void*>(rep) << "}, got {" << arg.tree
<< ", " << arg.extracted << "}";
return false;
}
return true;
}
class DataConsumer {
public:
DataConsumer(absl::string_view data, bool forward)
: data_(data), forward_(forward) {}
absl::string_view Next(size_t n) {
assert(n <= data_.size() - consumed_);
consumed_ += n;
return data_.substr(forward_ ? consumed_ - n : data_.size() - consumed_, n);
}
absl::string_view Consumed() const {
return forward_ ? data_.substr(0, consumed_)
: data_.substr(data_.size() - consumed_);
}
private:
absl::string_view data_;
size_t consumed_ = 0;
bool forward_;
};
CordRepBtree* BtreeAdd(CordRepBtree* node, bool append,
absl::string_view data) {
return append ? CordRepBtree::Append(node, data)
: CordRepBtree::Prepend(node, data);
}
void GetLeafEdges(const CordRepBtree* tree, std::vector<CordRep*>& edges) {
if (tree->height() == 0) {
for (CordRep* edge : tree->Edges()) {
edges.push_back(edge);
}
} else {
for (CordRep* edge : tree->Edges()) {
GetLeafEdges(edge->btree(), edges);
}
}
}
std::vector<CordRep*> GetLeafEdges(const CordRepBtree* tree) {
std::vector<CordRep*> edges;
GetLeafEdges(tree, edges);
return edges;
}
CordRepFlat* MakeHexFlat(size_t i) {
return MakeFlat(absl::StrCat("0x", absl::Hex(i, absl::kZeroPad4)));
}
CordRepBtree* MakeLeaf(size_t size = CordRepBtree::kMaxCapacity) {
assert(size <= CordRepBtree::kMaxCapacity);
CordRepBtree* leaf = CordRepBtree::Create(MakeHexFlat(0));
for (size_t i = 1; i < size; ++i) {
leaf = CordRepBtree::Append(leaf, MakeHexFlat(i));
}
return leaf;
}
CordRepBtree* MakeTree(size_t size, bool append = true) {
CordRepBtree* tree = CordRepBtree::Create(MakeHexFlat(0));
for (size_t i = 1; i < size; ++i) {
tree = append ? CordRepBtree::Append(tree, MakeHexFlat(i))
: CordRepBtree::Prepend(tree, MakeHexFlat(i));
}
return tree;
}
CordRepBtree* CreateTree(absl::Span<CordRep* const> reps) {
auto it = reps.begin();
CordRepBtree* tree = CordRepBtree::Create(*it);
while (++it != reps.end()) tree = CordRepBtree::Append(tree, *it);
return tree;
}
CordRepBtree* CreateTree(absl::string_view data, size_t chunk_size) {
return CreateTree(CreateFlatsFromString(data, chunk_size));
}
CordRepBtree* CreateTreeReverse(absl::string_view data, size_t chunk_size) {
std::vector<CordRep*> flats = CreateFlatsFromString(data, chunk_size);
auto rit = flats.rbegin();
CordRepBtree* tree = CordRepBtree::Create(*rit);
while (++rit != flats.rend()) tree = CordRepBtree::Prepend(tree, *rit);
return tree;
}
class CordRepBtreeTest : public testing::TestWithParam<bool> {
public:
bool shared() const { return GetParam(); }
static std::string ToString(testing::TestParamInfo<bool> param) {
return param.param ? "Shared" : "Private";
}
};
INSTANTIATE_TEST_SUITE_P(WithParam, CordRepBtreeTest, testing::Bool(),
CordRepBtreeTest::ToString);
class CordRepBtreeHeightTest : public testing::TestWithParam<int> {
public:
int height() const { return GetParam(); }
static std::string ToString(testing::TestParamInfo<int> param) {
return absl::StrCat(param.param);
}
};
INSTANTIATE_TEST_SUITE_P(WithHeights, CordRepBtreeHeightTest,
testing::Range(0, CordRepBtree::kMaxHeight),
CordRepBtreeHeightTest::ToString);
using TwoBools = testing::tuple<bool, bool>;
class CordRepBtreeDualTest : public testing::TestWithParam<TwoBools> {
public:
bool first_shared() const { return std::get<0>(GetParam()); }
bool second_shared() const { return std::get<1>(GetParam()); }
static std::string ToString(testing::TestParamInfo<TwoBools> param) {
if (std::get<0>(param.param)) {
return std::get<1>(param.param) ? "BothShared" : "FirstShared";
}
return std::get<1>(param.param) ? "SecondShared" : "Private";
}
};
INSTANTIATE_TEST_SUITE_P(WithParam, CordRepBtreeDualTest,
testing::Combine(testing::Bool(), testing::Bool()),
CordRepBtreeDualTest::ToString);
TEST(CordRepBtreeTest, SizeIsMultipleOf64) {
if (sizeof(size_t) == 8 && sizeof(void*) == 8) {
EXPECT_THAT(sizeof(CordRepBtree) % 64, Eq(0u))
<< "Should be multiple of 64";
}
}
TEST(CordRepBtreeTest, NewDestroyEmptyTree) {
auto* tree = CordRepBtree::New();
EXPECT_THAT(tree->size(), Eq(0u));
EXPECT_THAT(tree->height(), Eq(0));
EXPECT_THAT(tree->Edges(), ElementsAre());
CordRepBtree::Destroy(tree);
}
TEST(CordRepBtreeTest, NewDestroyEmptyTreeAtHeight) {
auto* tree = CordRepBtree::New(3);
EXPECT_THAT(tree->size(), Eq(0u));
EXPECT_THAT(tree->height(), Eq(3));
EXPECT_THAT(tree->Edges(), ElementsAre());
CordRepBtree::Destroy(tree);
}
TEST(CordRepBtreeTest, Btree) {
CordRep* rep = CordRepBtree::New();
EXPECT_THAT(rep->btree(), Eq(rep));
EXPECT_THAT(static_cast<const CordRep*>(rep)->btree(), Eq(rep));
CordRep::Unref(rep);
#if defined(GTEST_HAS_DEATH_TEST) && !defined(NDEBUG)
rep = MakeFlat("Hello world");
EXPECT_DEATH(rep->btree(), ".*");
EXPECT_DEATH(static_cast<const CordRep*>(rep)->btree(), ".*");
CordRep::Unref(rep);
#endif
}
TEST(CordRepBtreeTest, EdgeData) {
CordRepFlat* flat = MakeFlat("Hello world");
CordRepExternal* external = MakeExternal("Hello external");
CordRep* substr1 = MakeSubstring(1, 6, CordRep::Ref(flat));
CordRep* substr2 = MakeSubstring(1, 6, CordRep::Ref(external));
CordRep* bad_substr = MakeSubstring(1, 2, CordRep::Ref(substr1));
EXPECT_TRUE(IsDataEdge(flat));
EXPECT_THAT(EdgeData(flat).data(), TypedEq<const void*>(flat->Data()));
EXPECT_THAT(EdgeData(flat), Eq("Hello world"));
EXPECT_TRUE(IsDataEdge(external));
EXPECT_THAT(EdgeData(external).data(), TypedEq<const void*>(external->base));
EXPECT_THAT(EdgeData(external), Eq("Hello external"));
EXPECT_TRUE(IsDataEdge(substr1));
EXPECT_THAT(EdgeData(substr1).data(), TypedEq<const void*>(flat->Data() + 1));
EXPECT_THAT(EdgeData(substr1), Eq("ello w"));
EXPECT_TRUE(IsDataEdge(substr2));
EXPECT_THAT(EdgeData(substr2).data(),
TypedEq<const void*>(external->base + 1));
EXPECT_THAT(EdgeData(substr2), Eq("ello e"));
EXPECT_FALSE(IsDataEdge(bad_substr));
#if defined(GTEST_HAS_DEATH_TEST) && !defined(NDEBUG)
EXPECT_DEATH(EdgeData(bad_substr), ".*");
#endif
CordRep::Unref(bad_substr);
CordRep::Unref(substr2);
CordRep::Unref(substr1);
CordRep::Unref(external);
CordRep::Unref(flat);
}
TEST(CordRepBtreeTest, CreateUnrefLeaf) {
auto* flat = MakeFlat("a");
auto* leaf = CordRepBtree::Create(flat);
EXPECT_THAT(leaf->size(), Eq(1u));
EXPECT_THAT(leaf->height(), Eq(0));
EXPECT_THAT(leaf->Edges(), ElementsAre(flat));
CordRepBtree::Unref(leaf);
}
TEST(CordRepBtreeTest, NewUnrefNode) {
auto* leaf = CordRepBtree::Create(MakeFlat("a"));
CordRepBtree* tree = CordRepBtree::New(leaf);
EXPECT_THAT(tree->size(), Eq(1u));
EXPECT_THAT(tree->height(), Eq(1));
EXPECT_THAT(tree->Edges(), ElementsAre(leaf));
CordRepBtree::Unref(tree);
}
TEST_P(CordRepBtreeTest, AppendToLeafToCapacity) {
AutoUnref refs;
std::vector<CordRep*> flats;
flats.push_back(MakeHexFlat(0));
auto* leaf = CordRepBtree::Create(flats.back());
for (size_t i = 1; i < CordRepBtree::kMaxCapacity; ++i) {
refs.RefIf(shared(), leaf);
flats.push_back(MakeHexFlat(i));
auto* result = CordRepBtree::Append(leaf, flats.back());
EXPECT_THAT(result->height(), Eq(0));
EXPECT_THAT(result, Conditional(shared(), Ne(leaf), Eq(leaf)));
EXPECT_THAT(result->Edges(), ElementsAreArray(flats));
leaf = result;
}
CordRep::Unref(leaf);
}
TEST_P(CordRepBtreeTest, PrependToLeafToCapacity) {
AutoUnref refs;
std::deque<CordRep*> flats;
flats.push_front(MakeHexFlat(0));
auto* leaf = CordRepBtree::Create(flats.front());
for (size_t i = 1; i < CordRepBtree::kMaxCapacity; ++i) {
refs.RefIf(shared(), leaf);
flats.push_front(MakeHexFlat(i));
auto* result = CordRepBtree::Prepend(leaf, flats.front());
EXPECT_THAT(result->height(), Eq(0));
EXPECT_THAT(result, Conditional(shared(), Ne(leaf), Eq(leaf)));
EXPECT_THAT(result->Edges(), ElementsAreArray(flats));
leaf = result;
}
CordRep::Unref(leaf);
}
TEST_P(CordRepBtreeTest, AppendPrependToLeafToCapacity) {
AutoUnref refs;
std::deque<CordRep*> flats;
flats.push_front(MakeHexFlat(0));
auto* leaf = CordRepBtree::Create(flats.front());
for (size_t i = 1; i < CordRepBtree::kMaxCapacity; ++i) {
refs.RefIf(shared(), leaf);
CordRepBtree* result;
if (i % 2 != 0) {
flats.push_front(MakeHexFlat(i));
result = CordRepBtree::Prepend(leaf, flats.front());
} else {
flats.push_back(MakeHexFlat(i));
result = CordRepBtree::Append(leaf, flats.back());
}
EXPECT_THAT(result->height(), Eq(0));
EXPECT_THAT(result, Conditional(shared(), Ne(leaf), Eq(leaf)));
EXPECT_THAT(result->Edges(), ElementsAreArray(flats));
leaf = result;
}
CordRep::Unref(leaf);
}
TEST_P(CordRepBtreeTest, AppendToLeafBeyondCapacity) {
AutoUnref refs;
auto* leaf = MakeLeaf();
refs.RefIf(shared(), leaf);
CordRep* flat = MakeFlat("abc");
auto* result = CordRepBtree::Append(leaf, flat);
ASSERT_THAT(result, IsNode(1));
EXPECT_THAT(result, Ne(leaf));
absl::Span<CordRep* const> edges = result->Edges();
ASSERT_THAT(edges, ElementsAre(leaf, IsNode(0)));
EXPECT_THAT(edges[1]->btree()->Edges(), ElementsAre(flat));
CordRep::Unref(result);
}
TEST_P(CordRepBtreeTest, PrependToLeafBeyondCapacity) {
AutoUnref refs;
auto* leaf = MakeLeaf();
refs.RefIf(shared(), leaf);
CordRep* flat = MakeFlat("abc");
auto* result = CordRepBtree::Prepend(leaf, flat);
ASSERT_THAT(result, IsNode(1));
EXPECT_THAT(result, Ne(leaf));
absl::Span<CordRep* const> edges = result->Edges();
ASSERT_THAT(edges, ElementsAre(IsNode(0), leaf));
EXPECT_THAT(edges[0]->btree()->Edges(), ElementsAre(flat));
CordRep::Unref(result);
}
TEST_P(CordRepBtreeTest, AppendToTreeOneDeep) {
constexpr size_t max_cap = CordRepBtree::kMaxCapacity;
AutoUnref refs;
std::vector<CordRep*> flats;
flats.push_back(MakeHexFlat(0));
CordRepBtree* tree = CordRepBtree::Create(flats.back());
for (size_t i = 1; i <= max_cap; ++i) {
flats.push_back(MakeHexFlat(i));
tree = CordRepBtree::Append(tree, flats.back());
}
ASSERT_THAT(tree, IsNode(1));
for (size_t i = max_cap + 1; i < max_cap * max_cap; ++i) {
refs.RefIf(shared(), tree);
refs.RefIf(i % 4 == 0, tree->Edges().back());
flats.push_back(MakeHexFlat(i));
CordRepBtree* result = CordRepBtree::Append(tree, flats.back());
ASSERT_THAT(result, IsNode(1));
ASSERT_THAT(result, Conditional(shared(), Ne(tree), Eq(tree)));
std::vector<CordRep*> edges = GetLeafEdges(result);
ASSERT_THAT(edges, ElementsAreArray(flats));
tree = result;
}
CordRep::Unref(tree);
}
TEST_P(CordRepBtreeTest, AppendToTreeTwoDeep) {
constexpr size_t max_cap = CordRepBtree::kMaxCapacity;
AutoUnref refs;
std::vector<CordRep*> flats;
flats.push_back(MakeHexFlat(0));
CordRepBtree* tree = CordRepBtree::Create(flats.back());
for (size_t i = 1; i <= max_cap * max_cap; ++i) {
flats.push_back(MakeHexFlat(i));
tree = CordRepBtree::Append(tree, flats.back());
}
ASSERT_THAT(tree, IsNode(2));
for (size_t i = max_cap * max_cap + 1; i < max_cap * max_cap * max_cap; ++i) {
refs.RefIf(shared(), tree);
refs.RefIf(i % 16 == 0, tree->Edges().back());
refs.RefIf(i % 4 == 0, tree->Edges().back()->btree()->Edges().back());
flats.push_back(MakeHexFlat(i));
CordRepBtree* result = CordRepBtree::Append(tree, flats.back());
ASSERT_THAT(result, IsNode(2));
ASSERT_THAT(result, Conditional(shared(), Ne(tree), Eq(tree)));
std::vector<CordRep*> edges = GetLeafEdges(result);
ASSERT_THAT(edges, ElementsAreArray(flats));
tree = result;
}
CordRep::Unref(tree);
}
TEST_P(CordRepBtreeTest, PrependToTreeOneDeep) {
constexpr size_t max_cap = CordRepBtree::kMaxCapacity;
AutoUnref refs;
std::deque<CordRep*> flats;
flats.push_back(MakeHexFlat(0));
CordRepBtree* tree = CordRepBtree::Create(flats.back());
for (size_t i = 1; i <= max_cap; ++i) {
flats.push_front(MakeHexFlat(i));
tree = CordRepBtree::Prepend(tree, flats.front());
}
ASSERT_THAT(tree, IsNode(1));
for (size_t i = max_cap + 1; i < max_cap * max_cap; ++i) {
refs.RefIf(shared(), tree);
refs.RefIf(i % 4 == 0, tree->Edges().back());
flats.push_front(MakeHexFlat(i));
CordRepBtree* result = CordRepBtree::Prepend(tree, flats.front());
ASSERT_THAT(result, IsNode(1));
ASSERT_THAT(result, Conditional(shared(), Ne(tree), Eq(tree)));
std::vector<CordRep*> edges = GetLeafEdges(result);
ASSERT_THAT(edges, ElementsAreArray(flats));
tree = result;
}
CordRep::Unref(tree);
}
TEST_P(CordRepBtreeTest, PrependToTreeTwoDeep) {
constexpr size_t max_cap = CordRepBtree::kMaxCapacity;
AutoUnref refs;
std::deque<CordRep*> flats;
flats.push_back(MakeHexFlat(0));
CordRepBtree* tree = CordRepBtree::Create(flats.back());
for (size_t i = 1; i <= max_cap * max_cap; ++i) {
flats.push_front(MakeHexFlat(i));
tree = CordRepBtree::Prepend(tree, flats.front());
}
ASSERT_THAT(tree, IsNode(2));
for (size_t i = max_cap * max_cap + 1; i < max_cap * max_cap * max_cap; ++i) {
refs.RefIf(shared(), tree);
refs.RefIf(i % 16 == 0, tree->Edges().back());
refs.RefIf(i % 4 == 0, tree->Edges().back()->btree()->Edges().back());
flats.push_front(MakeHexFlat(i));
CordRepBtree* result = CordRepBtree::Prepend(tree, flats.front());
ASSERT_THAT(result, IsNode(2));
ASSERT_THAT(result, Conditional(shared(), Ne(tree), Eq(tree)));
std::vector<CordRep*> edges = GetLeafEdges(result);
ASSERT_THAT(edges, ElementsAreArray(flats));
tree = result;
}
CordRep::Unref(tree);
}
TEST_P(CordRepBtreeDualTest, MergeLeafsNotExceedingCapacity) {
for (bool use_append : {false, true}) {
SCOPED_TRACE(use_append ? "Using Append" : "Using Prepend");
AutoUnref refs;
std::vector<CordRep*> flats;
CordRepBtree* left = MakeLeaf(3);
GetLeafEdges(left, flats);
refs.RefIf(first_shared(), left);
CordRepBtree* right = MakeLeaf(2);
GetLeafEdges(right, flats);
refs.RefIf(second_shared(), right);
CordRepBtree* tree = use_append ? CordRepBtree::Append(left, right)
: CordRepBtree::Prepend(right, left);
EXPECT_THAT(tree, IsNode(0));
EXPECT_THAT(tree->Edges(), ElementsAreArray(flats));
CordRepBtree::Unref(tree);
}
}
TEST_P(CordRepBtreeDualTest, MergeLeafsExceedingCapacity) {
for (bool use_append : {false, true}) {
SCOPED_TRACE(use_append ? "Using Append" : "Using Prepend");
AutoUnref refs;
CordRepBtree* left = MakeLeaf(CordRepBtree::kMaxCapacity - 2);
refs.RefIf(first_shared(), left);
CordRepBtree* right = MakeLeaf(CordRepBtree::kMaxCapacity - 1);
refs.RefIf(second_shared(), right);
CordRepBtree* tree = use_append ? CordRepBtree::Append(left, right)
: CordRepBtree::Prepend(right, left);
EXPECT_THAT(tree, IsNode(1));
EXPECT_THAT(tree->Edges(), ElementsAre(left, right));
CordRepBtree::Unref(tree);
}
}
TEST_P(CordRepBtreeDualTest, MergeEqualHeightTrees) {
for (bool use_append : {false, true}) {
SCOPED_TRACE(use_append ? "Using Append" : "Using Prepend");
AutoUnref refs;
std::vector<CordRep*> flats;
CordRepBtree* left = MakeTree(CordRepBtree::kMaxCapacity * 3);
GetLeafEdges(left, flats);
refs.RefIf(first_shared(), left);
CordRepBtree* right = MakeTree(CordRepBtree::kMaxCapacity * 2);
GetLeafEdges(right, flats);
refs.RefIf(second_shared(), right);
CordRepBtree* tree = use_append ? CordRepBtree::Append(left, right)
: CordRepBtree::Prepend(right, left);
EXPECT_THAT(tree, IsNode(1));
EXPECT_THAT(tree->Edges(), SizeIs(5u));
EXPECT_THAT(GetLeafEdges(tree), ElementsAreArray(flats));
CordRepBtree::Unref(tree);
}
}
TEST_P(CordRepBtreeDualTest, MergeLeafWithTreeNotExceedingLeafCapacity) {
for (bool use_append : {false, true}) {
SCOPED_TRACE(use_append ? "Using Append" : "Using Prepend");
AutoUnref refs;
std::vector<CordRep*> flats;
CordRepBtree* left = MakeTree(CordRepBtree::kMaxCapacity * 2 + 2);
GetLeafEdges(left, flats);
refs.RefIf(first_shared(), left);
CordRepBtree* right = MakeTree(3);
GetLeafEdges(right, flats);
refs.RefIf(second_shared(), right);
CordRepBtree* tree = use_append ? CordRepBtree::Append(left, right)
: CordRepBtree::Prepend(right, left);
EXPECT_THAT(tree, IsNode(1));
EXPECT_THAT(tree->Edges(), SizeIs(3u));
EXPECT_THAT(GetLeafEdges(tree), ElementsAreArray(flats));
CordRepBtree::Unref(tree);
}
}
TEST_P(CordRepBtreeDualTest, MergeLeafWithTreeExceedingLeafCapacity) {
for (bool use_append : {false, true}) {
SCOPED_TRACE(use_append ? "Using Append" : "Using Prepend");
AutoUnref refs;
std::vector<CordRep*> flats;
CordRepBtree* left = MakeTree(CordRepBtree::kMaxCapacity * 3 - 2);
GetLeafEdges(left, flats);
refs.RefIf(first_shared(), left);
CordRepBtree* right = MakeTree(3);
GetLeafEdges(right, flats);
refs.RefIf(second_shared(), right);
CordRepBtree* tree = use_append ? CordRepBtree::Append(left, right)
: CordRepBtree::Prepend(right, left);
EXPECT_THAT(tree, IsNode(1));
EXPECT_THAT(tree->Edges(), SizeIs(4u));
EXPECT_THAT(GetLeafEdges(tree), ElementsAreArray(flats));
CordRepBtree::Unref(tree);
}
}
void RefEdgesAt(size_t depth, AutoUnref& refs, CordRepBtree* tree) {
absl::Span<CordRep* const> edges = tree->Edges();
if (depth == 0) {
refs.Ref(edges.front());
refs.Ref(edges.back());
} else {
assert(tree->height() > 0);
RefEdgesAt(depth - 1, refs, edges.front()->btree());
RefEdgesAt(depth - 1, refs, edges.back()->btree());
}
}
TEST(CordRepBtreeTest, MergeFuzzTest) {
constexpr size_t max_cap = CordRepBtree::kMaxCapacity;
std::minstd_rand rnd;
std::uniform_int_distribution<int> coin_flip(0, 1);
std::uniform_int_distribution<int> dice_throw(1, 6);
auto random_leaf_count = [&]() {
std::uniform_int_distribution<int> dist_height(0, 3);
std::uniform_int_distribution<int> dist_leaf(0, max_cap - 1);
const int height = dist_height(rnd);
return (height ? pow(max_cap, height) : 0) + dist_leaf(rnd);
};
for (int i = 0; i < 10000; ++i) {
AutoUnref refs;
std::vector<CordRep*> flats;
CordRepBtree* left = MakeTree(random_leaf_count(), coin_flip(rnd));
GetLeafEdges(left, flats);
if (dice_throw(rnd) == 1) {
std::uniform_int_distribution<size_t> dist(
0, static_cast<size_t>(left->height()));
RefEdgesAt(dist(rnd), refs, left);
}
CordRepBtree* right = MakeTree(random_leaf_count(), coin_flip(rnd));
GetLeafEdges(right, flats);
if (dice_throw(rnd) == 1) {
std::uniform_int_distribution<size_t> dist(
0, static_cast<size_t>(right->height()));
RefEdgesAt(dist(rnd), refs, right);
}
CordRepBtree* tree = CordRepBtree::Append(left, right);
EXPECT_THAT(GetLeafEdges(tree), ElementsAreArray(flats));
CordRepBtree::Unref(tree);
}
}
TEST_P(CordRepBtreeTest, RemoveSuffix) {
constexpr size_t max_cap = CordRepBtree::kMaxCapacity;
for (size_t cap : {max_cap - 1, max_cap * 2, max_cap * max_cap * 2}) {
const std::string data = CreateRandomString(cap * 512);
{
AutoUnref refs;
CordRepBtree* node = refs.RefIf(shared(), CreateTree(data, 512));
EXPECT_THAT(CordRepBtree::RemoveSuffix(node, data.length()), Eq(nullptr));
node = refs.RefIf(shared(), CreateTree(data, 512));
EXPECT_THAT(CordRepBtree::RemoveSuffix(node, 0), Eq(node));
CordRep::Unref(node);
}
for (size_t n = 1; n < data.length(); ++n) {
AutoUnref refs;
auto flats = CreateFlatsFromString(data, 512);
CordRepBtree* node = refs.RefIf(shared(), CreateTree(flats));
CordRep* rep = refs.Add(CordRepBtree::RemoveSuffix(node, n));
EXPECT_THAT(CordToString(rep), Eq(data.substr(0, data.length() - n)));
auto is_flat = [](CordRep* rep) { return rep->tag >= FLAT; };
std::vector<CordRep*> edges = CordCollectRepsIf(is_flat, rep);
ASSERT_THAT(edges.size(), Le(flats.size()));
CordRep* last_edge = edges.back();
edges.pop_back();
const size_t last_length = rep->length - edges.size() * 512;
size_t index = 0;
for (CordRep* edge : edges) {
ASSERT_THAT(edge, Eq(flats[index++]));
ASSERT_THAT(edge->length, Eq(512u));
}
if (last_length >= 500) {
EXPECT_THAT(last_edge, Eq(flats[index++]));
if (shared()) {
EXPECT_THAT(last_edge->length, Eq(512u));
} else {
EXPECT_TRUE(last_edge->refcount.IsOne());
EXPECT_THAT(last_edge->length, Eq(last_length));
}
}
}
}
}
TEST(CordRepBtreeTest, SubTree) {
constexpr size_t max_cap = CordRepBtree::kMaxCapacity;
const size_t n = max_cap * max_cap * 2;
const std::string data = CreateRandomString(n * 3);
std::vector<CordRep*> flats;
for (absl::string_view s = data; !s.empty(); s.remove_prefix(3)) {
flats.push_back(MakeFlat(s.substr(0, 3)));
}
CordRepBtree* node = CordRepBtree::Create(CordRep::Ref(flats[0]));
for (size_t i = 1; i < flats.size(); ++i) {
node = CordRepBtree::Append(node, CordRep::Ref(flats[i]));
}
for (size_t offset = 0; offset < data.length(); ++offset) {
for (size_t length = 1; length <= data.length() - offset; ++length) {
CordRep* rep = node->SubTree(offset, length);
EXPECT_THAT(CordToString(rep), Eq(data.substr(offset, length)));
CordRep::Unref(rep);
}
}
CordRepBtree::Unref(node);
for (CordRep* rep : flats) {
CordRep::Unref(rep);
}
}
TEST(CordRepBtreeTest, SubTreeOnExistingSubstring) {
AutoUnref refs;
std::string data = CreateRandomString(1000);
CordRepBtree* leaf = CordRepBtree::Create(MakeFlat("abc"));
CordRep* flat = MakeFlat(data);
leaf = CordRepBtree::Append(leaf, flat);
CordRep* result = leaf->SubTree(0, 3 + 990);
ASSERT_THAT(result->tag, Eq(BTREE));
CordRep::Unref(leaf);
leaf = result->btree();
ASSERT_THAT(leaf->Edges(), ElementsAre(_, IsSubstring(0u, 990u)));
EXPECT_THAT(leaf->Edges()[1]->substring()->child, Eq(flat));
result = leaf->SubTree(3 + 5, 970);
ASSERT_THAT(result, IsSubstring(5u, 970u));
EXPECT_THAT(result->substring()->child, Eq(flat));
CordRep::Unref(result);
CordRep::Unref(leaf);
}
TEST_P(CordRepBtreeTest, AddDataToLeaf) {
const size_t n = CordRepBtree::kMaxCapacity;
const std::string data = CreateRandomString(n * 3);
for (bool append : {true, false}) {
AutoUnref refs;
DataConsumer consumer(data, append);
SCOPED_TRACE(append ? "Append" : "Prepend");
CordRepBtree* leaf = CordRepBtree::Create(MakeFlat(consumer.Next(3)));
for (size_t i = 1; i < n; ++i) {
refs.RefIf(shared(), leaf);
CordRepBtree* result = BtreeAdd(leaf, append, consumer.Next(3));
EXPECT_THAT(result, Conditional(shared(), Ne(leaf), Eq(leaf)));
EXPECT_THAT(CordToString(result), Eq(consumer.Consumed()));
leaf = result;
}
CordRep::Unref(leaf);
}
}
TEST_P(CordRepBtreeTest, AppendDataToTree) {
AutoUnref refs;
size_t n = CordRepBtree::kMaxCapacity + CordRepBtree::kMaxCapacity / 2;
std::string data = CreateRandomString(n * 3);
CordRepBtree* tree = refs.RefIf(shared(), CreateTree(data, 3));
CordRepBtree* leaf0 = tree->Edges()[0]->btree();
CordRepBtree* leaf1 = tree->Edges()[1]->btree();
CordRepBtree* result = CordRepBtree::Append(tree, "123456789");
EXPECT_THAT(result, Conditional(shared(), Ne(tree), Eq(tree)));
EXPECT_THAT(result->Edges(),
ElementsAre(leaf0, Conditional(shared(), Ne(leaf1), Eq(leaf1))));
EXPECT_THAT(CordToString(result), Eq(data + "123456789"));
CordRep::Unref(result);
}
TEST_P(CordRepBtreeTest, PrependDataToTree) {
AutoUnref refs;
size_t n = CordRepBtree::kMaxCapacity + CordRepBtree::kMaxCapacity / 2;
std::string data = CreateRandomString(n * 3);
CordRepBtree* tree = refs.RefIf(shared(), CreateTreeReverse(data, 3));
CordRepBtree* leaf0 = tree->Edges()[0]->btree();
CordRepBtree* leaf1 = tree->Edges()[1]->btree();
CordRepBtree* result = CordRepBtree::Prepend(tree, "123456789");
EXPECT_THAT(result, Conditional(shared(), Ne(tree), Eq(tree)));
EXPECT_THAT(result->Edges(),
ElementsAre(Conditional(shared(), Ne(leaf0), Eq(leaf0)), leaf1));
EXPECT_THAT(CordToString(result), Eq("123456789" + data));
CordRep::Unref(result);
}
TEST_P(CordRepBtreeTest, AddDataToTreeThreeLevelsDeep) {
constexpr size_t max_cap = CordRepBtree::kMaxCapacity;
const size_t n = max_cap * max_cap * max_cap;
const std::string data = CreateRandomString(n * 3);
for (bool append : {true, false}) {
AutoUnref refs;
DataConsumer consumer(data, append);
SCOPED_TRACE(append ? "Append" : "Prepend");
CordRepBtree* tree = CordRepBtree::Create(MakeFlat(consumer.Next(3)));
for (size_t i = 1; i < max_cap; ++i) {
tree = BtreeAdd(tree, append, consumer.Next(3));
}
ASSERT_THAT(CordToString(tree), Eq(consumer.Consumed()));
refs.RefIf(shared(), tree);
CordRepBtree* result = BtreeAdd(tree, append, consumer.Next(3));
ASSERT_THAT(result, IsNode(1));
ASSERT_THAT(result, Ne(tree));
ASSERT_THAT(CordToString(result), Eq(consumer.Consumed()));
tree = result;
for (size_t i = max_cap + 1; i < max_cap * max_cap; ++i) {
refs.RefIf(shared(), tree);
result = BtreeAdd(tree, append, consumer.Next(3));
ASSERT_THAT(result, Conditional(shared(), Ne(tree), Eq(tree)));
ASSERT_THAT(CordToString(result), Eq(consumer.Consumed()));
tree = result;
}
refs.RefIf(shared(), tree);
result = BtreeAdd(tree, append, consumer.Next(3));
ASSERT_THAT(result, IsNode(2));
ASSERT_THAT(result, Ne(tree));
ASSERT_THAT(CordToString(result), Eq(consumer.Consumed()));
tree = result;
for (size_t i = max_cap * max_cap + 1; i < max_cap * max_cap * max_cap;
++i) {
refs.RefIf(shared(), tree);
result = BtreeAdd(tree, append, consumer.Next(3));
ASSERT_THAT(result, Conditional(shared(), Ne(tree), Eq(tree)));
ASSERT_THAT(CordToString(result), Eq(consumer.Consumed()));
tree = result;
}
CordRep::Unref(tree);
}
}
TEST_P(CordRepBtreeTest, AddLargeDataToLeaf) {
const size_t max_cap = CordRepBtree::kMaxCapacity;
const size_t n = max_cap * max_cap * max_cap * 3 + 2;
const std::string data = CreateRandomString(n * kMaxFlatLength);
for (bool append : {true, false}) {
AutoUnref refs;
SCOPED_TRACE(append ? "Append" : "Prepend");
CordRepBtree* leaf = CordRepBtree::Create(MakeFlat("abc"));
refs.RefIf(shared(), leaf);
CordRepBtree* result = BtreeAdd(leaf, append, data);
EXPECT_THAT(CordToString(result), Eq(append ? "abc" + data : data + "abc"));
CordRep::Unref(result);
}
}
TEST_P(CordRepBtreeTest, CreateFromTreeReturnsTree) {
AutoUnref refs;
CordRepBtree* leaf = CordRepBtree::Create(MakeFlat("Hello world"));
refs.RefIf(shared(), leaf);
CordRepBtree* result = CordRepBtree::Create(leaf);
EXPECT_THAT(result, Eq(leaf));
CordRep::Unref(result);
}
TEST(CordRepBtreeTest, GetCharacter) {
size_t n = CordRepBtree::kMaxCapacity * CordRepBtree::kMaxCapacity + 2;
std::string data = CreateRandomString(n * 3);
CordRepBtree* tree = CreateTree(data, 3);
tree = tree->Append(tree, MakeSubstring(4, 5, MakeFlat("abcdefghijklm")));
data += "efghi";
for (size_t i = 0; i < data.length(); ++i) {
ASSERT_THAT(tree->GetCharacter(i), Eq(data[i]));
}
CordRep::Unref(tree);
}
TEST_P(CordRepBtreeTest, IsFlatSingleFlat) {
CordRepBtree* leaf = CordRepBtree::Create(MakeFlat("Hello world"));
absl::string_view fragment;
EXPECT_TRUE(leaf->IsFlat(nullptr));
EXPECT_TRUE(leaf->IsFlat(&fragment));
EXPECT_THAT(fragment, Eq("Hello world"));
fragment = "";
EXPECT_TRUE(leaf->IsFlat(0, 11, nullptr));
EXPECT_TRUE(leaf->IsFlat(0, 11, &fragment));
EXPECT_THAT(fragment, Eq("Hello world"));
EXPECT_TRUE(leaf->IsFlat(1, 4, &fragment));
EXPECT_THAT(fragment, Eq("ello"));
EXPECT_TRUE(leaf->IsFlat(6, 5, &fragment));
EXPECT_THAT(fragment, Eq("world"));
CordRep::Unref(leaf);
}
TEST(CordRepBtreeTest, IsFlatMultiFlat) {
size_t n = CordRepBtree::kMaxCapacity * CordRepBtree::kMaxCapacity + 2;
std::string data = CreateRandomString(n * 3);
CordRepBtree* tree = CreateTree(data, 3);
tree = tree->Append(tree, MakeSubstring(4, 3, MakeFlat("abcdefghijklm")));
tree = tree->Append(tree, MakeSubstring(8, 3, MakeFlat("abcdefghijklm")));
data += "efgijk";
EXPECT_FALSE(tree->IsFlat(nullptr));
absl::string_view fragment = "Can't touch this";
EXPECT_FALSE(tree->IsFlat(&fragment));
EXPECT_THAT(fragment, Eq("Can't touch this"));
for (size_t offset = 0; offset < data.size(); offset += 3) {
EXPECT_TRUE(tree->IsFlat(offset, 3, nullptr));
EXPECT_TRUE(tree->IsFlat(offset, 3, &fragment));
EXPECT_THAT(fragment, Eq(data.substr(offset, 3)));
fragment = "Can't touch this";
if (offset > 0) {
EXPECT_FALSE(tree->IsFlat(offset - 1, 4, nullptr));
EXPECT_FALSE(tree->IsFlat(offset - 1, 4, &fragment));
EXPECT_THAT(fragment, Eq("Can't touch this"));
}
if (offset < data.size() - 4) {
EXPECT_FALSE(tree->IsFlat(offset, 4, nullptr));
EXPECT_FALSE(tree->IsFlat(offset, 4, &fragment));
EXPECT_THAT(fragment, Eq("Can't touch this"));
}
}
CordRep::Unref(tree);
}
#if defined(GTEST_HAS_DEATH_TEST) && !defined(NDEBUG)
TEST_P(CordRepBtreeHeightTest, GetAppendBufferNotPrivate) {
CordRepBtree* tree = CordRepBtree::Create(MakeExternal("Foo"));
CordRepBtree::Ref(tree);
EXPECT_DEATH(tree->GetAppendBuffer(1), ".*");
CordRepBtree::Unref(tree);
CordRepBtree::Unref(tree);
}
#endif
TEST_P(CordRepBtreeHeightTest, GetAppendBufferNotFlat) {
CordRepBtree* tree = CordRepBtree::Create(MakeExternal("Foo"));
for (int i = 1; i <= height(); ++i) {
tree = CordRepBtree::New(tree);
}
EXPECT_THAT(tree->GetAppendBuffer(1), SizeIs(0u));
CordRepBtree::Unref(tree);
}
TEST_P(CordRepBtreeHeightTest, GetAppendBufferFlatNotPrivate) {
CordRepFlat* flat = MakeFlat("abc");
CordRepBtree* tree = CordRepBtree::Create(CordRep::Ref(flat));
for (int i = 1; i <= height(); ++i) {
tree = CordRepBtree::New(tree);
}
EXPECT_THAT(tree->GetAppendBuffer(1), SizeIs(0u));
CordRepBtree::Unref(tree);
CordRep::Unref(flat);
}
TEST_P(CordRepBtreeHeightTest, GetAppendBufferTreeNotPrivate) {
if (height() == 0) return;
AutoUnref refs;
CordRepFlat* flat = MakeFlat("abc");
CordRepBtree* tree = CordRepBtree::Create(CordRep::Ref(flat));
for (int i = 1; i <= height(); ++i) {
if (i == (height() + 1) / 2) refs.Ref(tree);
tree = CordRepBtree::New(tree);
}
EXPECT_THAT(tree->GetAppendBuffer(1), SizeIs(0u));
CordRepBtree::Unref(tree);
CordRep::Unref(flat);
}
TEST_P(CordRepBtreeHeightTest, GetAppendBufferFlatNoCapacity) {
CordRepFlat* flat = MakeFlat("abc");
flat->length = flat->Capacity();
CordRepBtree* tree = CordRepBtree::Create(flat);
for (int i = 1; i <= height(); ++i) {
tree = CordRepBtree::New(tree);
}
EXPECT_THAT(tree->GetAppendBuffer(1), SizeIs(0u));
CordRepBtree::Unref(tree);
}
TEST_P(CordRepBtreeHeightTest, GetAppendBufferFlatWithCapacity) {
CordRepFlat* flat = MakeFlat("abc");
CordRepBtree* tree = CordRepBtree::Create(flat);
for (int i = 1; i <= height(); ++i) {
tree = CordRepBtree::New(tree);
}
absl::Span<char> span = tree->GetAppendBuffer(2);
EXPECT_THAT(span, SizeIs(2u));
EXPECT_THAT(span.data(), TypedEq<void*>(flat->Data() + 3));
EXPECT_THAT(tree->length, Eq(5u));
size_t avail = flat->Capacity() - 5;
span = tree->GetAppendBuffer(avail + 100);
EXPECT_THAT(span, SizeIs(avail));
EXPECT_THAT(span.data(), TypedEq<void*>(flat->Data() + 5));
EXPECT_THAT(tree->length, Eq(5 + avail));
CordRepBtree::Unref(tree);
}
TEST(CordRepBtreeTest, Dump) {
std::stringstream ss;
CordRepBtree::Dump(nullptr, ss);
CordRepBtree::Dump(nullptr, "Once upon a label", ss);
CordRepBtree::Dump(nullptr, "Once upon a label", false, ss);
CordRepBtree::Dump(nullptr, "Once upon a label", true, ss);
CordRepFlat* flat = MakeFlat("Hello world");
CordRepExternal* external = MakeExternal("Hello external");
CordRep* substr_flat = MakeSubstring(1, 6, CordRep::Ref(flat));
CordRep* substr_external = MakeSubstring(2, 7, CordRep::Ref(external));
CordRepBtree* tree = CordRepBtree::Create(flat);
tree = CordRepBtree::Append(tree, external);
tree = CordRepBtree::Append(tree, substr_flat);
tree = CordRepBtree::Append(tree, substr_external);
while (tree->height() == 0) {
tree = CordRepBtree::Append(tree, CordRep::Ref(flat));
tree = CordRepBtree::Append(tree, CordRep::Ref(external));
tree = CordRepBtree::Append(tree, CordRep::Ref(substr_flat));
tree = CordRepBtree::Append(tree, CordRep::Ref(substr_external));
}
for (int api = 0; api <= 3; ++api) {
absl::string_view api_scope;
std::stringstream ss;
switch (api) {
case 0:
api_scope = "Bare";
CordRepBtree::Dump(tree, ss);
break;
case 1:
api_scope = "Label only";
CordRepBtree::Dump(tree, "Once upon a label", ss);
break;
case 2:
api_scope = "Label no content";
CordRepBtree::Dump(tree, "Once upon a label", false, ss);
break;
default:
api_scope = "Label and content";
CordRepBtree::Dump(tree, "Once upon a label", true, ss);
break;
}
SCOPED_TRACE(api_scope);
std::string str = ss.str();
EXPECT_THAT(str, AllOf(HasSubstr("Node(1)"), HasSubstr("Leaf"),
HasSubstr("Private"), HasSubstr("Shared")));
EXPECT_THAT(str, AllOf(HasSubstr("len = 11"), HasSubstr("len = 14"),
HasSubstr("len = 6"), HasSubstr("len = 7"),
HasSubstr("start = 1"), HasSubstr("start = 2")));
EXPECT_THAT(
str, AllOf(HasSubstr(absl::StrCat("0x", absl::Hex(flat))),
HasSubstr(absl::StrCat("0x", absl::Hex(external))),
HasSubstr(absl::StrCat("0x", absl::Hex(substr_flat))),
HasSubstr(absl::StrCat("0x", absl::Hex(substr_external)))));
if (api != 0) {
EXPECT_THAT(str, HasSubstr("Once upon a label"));
}
if (api != 3) {
EXPECT_THAT(str, Not(AnyOf((HasSubstr("data = \"Hello world\""),
HasSubstr("data = \"Hello external\""),
HasSubstr("data = \"ello w\""),
HasSubstr("data = \"llo ext\"")))));
} else {
EXPECT_THAT(str, AllOf((HasSubstr("data = \"Hello world\""),
HasSubstr("data = \"Hello external\""),
HasSubstr("data = \"ello w\""),
HasSubstr("data = \"llo ext\""))));
}
}
CordRep::Unref(tree);
}
TEST(CordRepBtreeTest, IsValid) {
EXPECT_FALSE(CordRepBtree::IsValid(nullptr));
CordRepBtree* empty = CordRepBtree::New(0);
EXPECT_TRUE(CordRepBtree::IsValid(empty));
CordRep::Unref(empty);
for (bool as_tree : {false, true}) {
CordRepBtree* leaf = CordRepBtree::Create(MakeFlat("abc"));
CordRepBtree* tree = as_tree ? CordRepBtree::New(leaf) : nullptr;
CordRepBtree* check = as_tree ? tree : leaf;
ASSERT_TRUE(CordRepBtree::IsValid(check));
leaf->length--;
EXPECT_FALSE(CordRepBtree::IsValid(check));
leaf->length++;
ASSERT_TRUE(CordRepBtree::IsValid(check));
leaf->tag--;
EXPECT_FALSE(CordRepBtree::IsValid(check));
leaf->tag++;
ASSERT_TRUE(CordRepBtree::IsValid(check));
leaf->storage[0] = static_cast<uint8_t>(CordRepBtree::kMaxHeight + 1);
EXPECT_FALSE(CordRepBtree::IsValid(check));
leaf->storage[0] = 1;
EXPECT_FALSE(CordRepBtree::IsValid(check));
leaf->storage[0] = 0;
ASSERT_TRUE(CordRepBtree::IsValid(check));
const uint8_t begin = leaf->storage[1];
leaf->storage[1] = static_cast<uint8_t>(CordRepBtree::kMaxCapacity);
EXPECT_FALSE(CordRepBtree::IsValid(check));
leaf->storage[1] = 2;
EXPECT_FALSE(CordRepBtree::IsValid(check));
leaf->storage[1] = begin;
ASSERT_TRUE(CordRepBtree::IsValid(check));
const uint8_t end = leaf->storage[2];
leaf->storage[2] = static_cast<uint8_t>(CordRepBtree::kMaxCapacity + 1);
EXPECT_FALSE(CordRepBtree::IsValid(check));
leaf->storage[2] = end;
ASSERT_TRUE(CordRepBtree::IsValid(check));
CordRep* const edge = leaf->Edges()[0];
const uint8_t tag = edge->tag;
CordRepBtreeTestPeer::SetEdge(leaf, begin, nullptr);
EXPECT_FALSE(CordRepBtree::IsValid(check));
CordRepBtreeTestPeer::SetEdge(leaf, begin, edge);
edge->tag = BTREE;
EXPECT_FALSE(CordRepBtree::IsValid(check));
edge->tag = tag;
if (as_tree) {
ASSERT_TRUE(CordRepBtree::IsValid(check));
leaf->length--;
EXPECT_FALSE(CordRepBtree::IsValid(check));
leaf->length++;
ASSERT_TRUE(CordRepBtree::IsValid(check));
tree->storage[0] = static_cast<uint8_t>(2);
EXPECT_FALSE(CordRepBtree::IsValid(check));
tree->storage[0] = 1;
ASSERT_TRUE(CordRepBtree::IsValid(check));
CordRep* const edge = tree->Edges()[0];
const uint8_t tag = edge->tag;
edge->tag = FLAT;
EXPECT_FALSE(CordRepBtree::IsValid(check));
edge->tag = tag;
}
ASSERT_TRUE(CordRepBtree::IsValid(check));
CordRep::Unref(check);
}
}
TEST(CordRepBtreeTest, AssertValid) {
CordRepBtree* tree = CordRepBtree::Create(MakeFlat("abc"));
const CordRepBtree* ctree = tree;
EXPECT_THAT(CordRepBtree::AssertValid(tree), Eq(tree));
EXPECT_THAT(CordRepBtree::AssertValid(ctree), Eq(ctree));
#if defined(GTEST_HAS_DEATH_TEST)
CordRepBtree* nulltree = nullptr;
const CordRepBtree* cnulltree = nullptr;
EXPECT_DEBUG_DEATH(
EXPECT_THAT(CordRepBtree::AssertValid(nulltree), Eq(nulltree)), ".*");
EXPECT_DEBUG_DEATH(
EXPECT_THAT(CordRepBtree::AssertValid(cnulltree), Eq(cnulltree)), ".*");
tree->length--;
EXPECT_DEBUG_DEATH(EXPECT_THAT(CordRepBtree::AssertValid(tree), Eq(tree)),
".*");
EXPECT_DEBUG_DEATH(EXPECT_THAT(CordRepBtree::AssertValid(ctree), Eq(ctree)),
".*");
tree->length++;
#endif
CordRep::Unref(tree);
}
TEST(CordRepBtreeTest, CheckAssertValidShallowVsDeep) {
const bool exhaustive_validation = IsCordBtreeExhaustiveValidationEnabled();
auto cleanup = absl::MakeCleanup([exhaustive_validation] {
SetCordBtreeExhaustiveValidation(exhaustive_validation);
});
CordRep* flat = MakeFlat("abc");
CordRepBtree* tree = CordRepBtree::Create(flat);
constexpr size_t max_cap = CordRepBtree::kMaxCapacity;
const size_t n = max_cap * max_cap * 2;
for (size_t i = 0; i < n; ++i) {
tree = CordRepBtree::Append(tree, MakeFlat("Hello world"));
}
flat->length = 100;
SetCordBtreeExhaustiveValidation(false);
EXPECT_FALSE(CordRepBtree::IsValid(tree));
EXPECT_TRUE(CordRepBtree::IsValid(tree, true));
EXPECT_FALSE(CordRepBtree::IsValid(tree, false));
CordRepBtree::AssertValid(tree);
CordRepBtree::AssertValid(tree, true);
#if defined(GTEST_HAS_DEATH_TEST)
EXPECT_DEBUG_DEATH(CordRepBtree::AssertValid(tree, false), ".*");
#endif
SetCordBtreeExhaustiveValidation(true);
EXPECT_FALSE(CordRepBtree::IsValid(tree));
EXPECT_FALSE(CordRepBtree::IsValid(tree, true));
EXPECT_FALSE(CordRepBtree::IsValid(tree, false));
#if defined(GTEST_HAS_DEATH_TEST)
EXPECT_DEBUG_DEATH(CordRepBtree::AssertValid(tree), ".*");
EXPECT_DEBUG_DEATH(CordRepBtree::AssertValid(tree, true), ".*");
#endif
flat->length = 3;
CordRep::Unref(tree);
}
TEST_P(CordRepBtreeTest, Rebuild) {
for (size_t size : {3u, 8u, 100u, 10000u, 1000000u}) {
SCOPED_TRACE(absl::StrCat("Rebuild @", size));
std::vector<CordRepFlat*> flats;
for (size_t i = 0; i < size; ++i) {
flats.push_back(CordRepFlat::New(2));
flats.back()->Data()[0] = 'x';
flats.back()->length = 1;
}
size_t split_count = 0;
size_t split_limit = 3;
auto it = flats.begin();
CordRepBtree* left = nullptr;
CordRepBtree* right = CordRepBtree::New(*it);
while (++it != flats.end()) {
if (++split_count >= split_limit) {
split_limit += split_limit / 16;
left = left ? CordRepBtree::Append(left, right) : right;
right = CordRepBtree::New(*it);
} else {
right = CordRepBtree::Append(right, *it);
}
}
left = left ? CordRepBtree::Append(left, right) : right;
AutoUnref ref;
left = ref.Add(CordRepBtree::Rebuild(ref.RefIf(shared(), left)));
ASSERT_TRUE(CordRepBtree::IsValid(left));
bool ok = true;
it = flats.begin();
CordVisitReps(left, [&](CordRep* edge) {
if (edge->tag < FLAT) return;
ok = ok && (it != flats.end() && *it++ == edge);
});
EXPECT_TRUE(ok && it == flats.end()) << "Rebuild edges mismatch";
}
}
CordRepBtree::ExtractResult ExtractLast(CordRepBtree* input, size_t cap = 1) {
return CordRepBtree::ExtractAppendBuffer(input, cap);
}
TEST(CordRepBtreeTest, ExtractAppendBufferLeafSingleFlat) {
CordRep* flat = MakeFlat("Abc");
CordRepBtree* leaf = CordRepBtree::Create(flat);
EXPECT_THAT(ExtractLast(leaf), EqExtractResult(nullptr, flat));
CordRep::Unref(flat);
}
TEST(CordRepBtreeTest, ExtractAppendBufferNodeSingleFlat) {
CordRep* flat = MakeFlat("Abc");
CordRepBtree* leaf = CordRepBtree::Create(flat);
CordRepBtree* node = CordRepBtree::New(leaf);
EXPECT_THAT(ExtractLast(node), EqExtractResult(nullptr, flat));
CordRep::Unref(flat);
}
TEST(CordRepBtreeTest, ExtractAppendBufferLeafTwoFlats) {
std::vector<CordRep*> flats = CreateFlatsFromString("abcdef", 3);
CordRepBtree* leaf = CreateTree(flats);
EXPECT_THAT(ExtractLast(leaf), EqExtractResult(flats[0], flats[1]));
CordRep::Unref(flats[0]);
CordRep::Unref(flats[1]);
}
TEST(CordRepBtreeTest, ExtractAppendBufferNodeTwoFlats) {
std::vector<CordRep*> flats = CreateFlatsFromString("abcdef", 3);
CordRepBtree* leaf = CreateTree(flats);
CordRepBtree* node = CordRepBtree::New(leaf);
EXPECT_THAT(ExtractLast(node), EqExtractResult(flats[0], flats[1]));
CordRep::Unref(flats[0]);
CordRep::Unref(flats[1]);
}
TEST(CordRepBtreeTest, ExtractAppendBufferNodeTwoFlatsInTwoLeafs) {
std::vector<CordRep*> flats = CreateFlatsFromString("abcdef", 3);
CordRepBtree* leaf1 = CordRepBtree::Create(flats[0]);
CordRepBtree* leaf2 = CordRepBtree::Create(flats[1]);
CordRepBtree* node = CordRepBtree::New(leaf1, leaf2);
EXPECT_THAT(ExtractLast(node), EqExtractResult(flats[0], flats[1]));
CordRep::Unref(flats[0]);
CordRep::Unref(flats[1]);
}
TEST(CordRepBtreeTest, ExtractAppendBufferLeafThreeFlats) {
std::vector<CordRep*> flats = CreateFlatsFromString("abcdefghi", 3);
CordRepBtree* leaf = CreateTree(flats);
EXPECT_THAT(ExtractLast(leaf), EqExtractResult(leaf, flats[2]));
CordRep::Unref(flats[2]);
CordRep::Unref(leaf);
}
TEST(CordRepBtreeTest, ExtractAppendBufferNodeThreeFlatsRightNoFolding) {
CordRep* flat = MakeFlat("Abc");
std::vector<CordRep*> flats = CreateFlatsFromString("defghi", 3);
CordRepBtree* leaf1 = CordRepBtree::Create(flat);
CordRepBtree* leaf2 = CreateTree(flats);
CordRepBtree* node = CordRepBtree::New(leaf1, leaf2);
EXPECT_THAT(ExtractLast(node), EqExtractResult(node, flats[1]));
EXPECT_THAT(node->Edges(), ElementsAre(leaf1, leaf2));
EXPECT_THAT(leaf1->Edges(), ElementsAre(flat));
EXPECT_THAT(leaf2->Edges(), ElementsAre(flats[0]));
CordRep::Unref(node);
CordRep::Unref(flats[1]);
}
TEST(CordRepBtreeTest, ExtractAppendBufferNodeThreeFlatsRightLeafFolding) {
CordRep* flat = MakeFlat("Abc");
std::vector<CordRep*> flats = CreateFlatsFromString("defghi", 3);
CordRepBtree* leaf1 = CreateTree(flats);
CordRepBtree* leaf2 = CordRepBtree::Create(flat);
CordRepBtree* node = CordRepBtree::New(leaf1, leaf2);
EXPECT_THAT(ExtractLast(node), EqExtractResult(leaf1, flat));
EXPECT_THAT(leaf1->Edges(), ElementsAreArray(flats));
CordRep::Unref(leaf1);
CordRep::Unref(flat);
}
TEST(CordRepBtreeTest, ExtractAppendBufferNoCapacity) {
std::vector<CordRep*> flats = CreateFlatsFromString("abcdef", 3);
CordRepBtree* leaf = CreateTree(flats);
size_t avail = flats[1]->flat()->Capacity() - flats[1]->length;
EXPECT_THAT(ExtractLast(leaf, avail + 1), EqExtractResult(leaf, nullptr));
EXPECT_THAT(ExtractLast(leaf, avail), EqExtractResult(flats[0], flats[1]));
CordRep::Unref(flats[0]);
CordRep::Unref(flats[1]);
}
TEST(CordRepBtreeTest, ExtractAppendBufferNotFlat) {
std::vector<CordRep*> flats = CreateFlatsFromString("abcdef", 3);
auto substr = MakeSubstring(1, 2, flats[1]);
CordRepBtree* leaf = CreateTree({flats[0], substr});
EXPECT_THAT(ExtractLast(leaf), EqExtractResult(leaf, nullptr));
CordRep::Unref(leaf);
}
TEST(CordRepBtreeTest, ExtractAppendBufferShared) {
std::vector<CordRep*> flats = CreateFlatsFromString("abcdef", 3);
CordRepBtree* leaf = CreateTree(flats);
CordRep::Ref(flats[1]);
EXPECT_THAT(ExtractLast(leaf), EqExtractResult(leaf, nullptr));
CordRep::Unref(flats[1]);
CordRep::Ref(leaf);
EXPECT_THAT(ExtractLast(leaf), EqExtractResult(leaf, nullptr));
CordRep::Unref(leaf);
CordRepBtree* node = CordRepBtree::New(leaf);
CordRep::Ref(node);
EXPECT_THAT(ExtractLast(node), EqExtractResult(node, nullptr));
CordRep::Unref(node);
CordRep::Unref(node);
}
}
}
ABSL_NAMESPACE_END
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/strings/internal/cord_rep_btree.cc | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/strings/internal/cord_rep_btree_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
db90c9df-2043-44fd-a8f2-b28813345dea | cpp | tensorflow/tensorflow | quantize_and_dequantize | tensorflow/lite/delegates/gpu/gl/kernels/quantize_and_dequantize.cc | tensorflow/lite/delegates/gpu/cl/kernels/quantize_and_dequantize_test.cc | #include "tensorflow/lite/delegates/gpu/gl/kernels/quantize_and_dequantize.h"
#include <any>
#include <memory>
#include <string>
#include "absl/memory/memory.h"
#include "tensorflow/lite/delegates/gpu/common/data_type.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/types.h"
namespace tflite {
namespace gpu {
namespace gl {
namespace {
class QuantizeAndDequantize : public NodeShader {
public:
absl::Status GenerateCode(const GenerationContext& ctx,
GeneratedCode* generated_code) const final {
std::string code = R"(
value_0 = clamp(value_0, vec4($quant_min$), vec4($quant_max$));
value_0 = (value_0 - vec4($quant_min$)) / vec4($quant_scale$);
value_0 = floor(value_0 + vec4(0.5));
value_0 = value_0 * vec4($quant_scale$) + vec4($quant_min$);
)";
const auto& attr =
std::any_cast<const QuantizeAndDequantizeAttributes&>(ctx.op_attr);
*generated_code = {
{{"quant_min", attr.min},
{"quant_max", attr.max},
{"quant_scale", attr.scale}},
{},
{},
uint3(),
uint3(),
code,
IOStructure::AUTO,
IOStructure::AUTO,
};
return absl::OkStatus();
}
};
}
std::unique_ptr<NodeShader> NewQuantizeAndDequantizeNodeShader() {
return std::make_unique<QuantizeAndDequantize>();
}
}
}
} | #include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/cl/kernels/cl_test.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/tasks/quantize_and_dequantize_test_util.h"
namespace tflite {
namespace gpu {
namespace cl {
namespace {
TEST_F(OpenCLOperationTest, QuantAndDequant_Dim2Bits8) {
auto status = QuantAndDequant_Dim2Bits8Test(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, QuantAndDequant_Dim3Bits8_NegativeRange) {
auto status = QuantAndDequant_Dim3Bits8_NegativeRangeTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, QuantAndDequant_Dim3Bits16) {
auto status = QuantAndDequant_Dim3Bits16Test(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, QuantAndDequant_Dim2Bits16_NegativeRange) {
auto status = QuantAndDequant_Dim2Bits16_NegativeRangeTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/gl/kernels/quantize_and_dequantize.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/cl/kernels/quantize_and_dequantize_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
678376f5-040b-442d-b9eb-167c86ddc768 | cpp | tensorflow/tensorflow | set_ops | tensorflow/core/ops/set_ops.cc | tensorflow/core/ops/set_ops_test.cc | #include "tensorflow/core/framework/common_shape_fns.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/shape_inference.h"
namespace tensorflow {
using shape_inference::DimensionHandle;
using shape_inference::InferenceContext;
using shape_inference::ShapeHandle;
REGISTER_OP("SetSize")
.Input("set_indices: int64")
.Input("set_values: T")
.Input("set_shape: int64")
.Attr("validate_indices: bool = true")
.Attr("T: {int8, int16, int32, int64, uint8, uint16, string}")
.Output("size: int32")
.SetShapeFn(shape_inference::UnknownShape);
REGISTER_OP("DenseToDenseSetOperation")
.Input("set1: T")
.Input("set2: T")
.Attr("set_operation: string")
.Attr("validate_indices: bool = true")
.Attr("T: {int8, int16, int32, int64, uint8, uint16, string}")
.Output("result_indices: int64")
.Output("result_values: T")
.Output("result_shape: int64")
.SetShapeFn([](InferenceContext* c) {
if (c->num_inputs() != 2) {
return errors::InvalidArgument("len(inputs) != 2.");
}
DimensionHandle output_rank;
ShapeHandle input0_shape = c->input(0);
TF_RETURN_IF_ERROR(c->WithRankAtLeast(input0_shape, 2, &input0_shape));
if (c->RankKnown(input0_shape)) {
const int32_t input0_rank = c->Rank(input0_shape);
ShapeHandle input1_shape = c->input(1);
TF_RETURN_IF_ERROR(
c->WithRank(input1_shape, input0_rank, &input1_shape));
if (c->RankKnown(input1_shape)) {
const int32_t rank = c->Rank(input1_shape);
ShapeHandle group0_shape;
TF_RETURN_IF_ERROR(
c->Subshape(input0_shape, 0, rank - 1, &group0_shape));
ShapeHandle group1_shape;
TF_RETURN_IF_ERROR(
c->Subshape(input1_shape, 0, rank - 1, &group1_shape));
ShapeHandle unused_shape;
TF_RETURN_IF_ERROR(
c->Merge(group0_shape, group1_shape, &unused_shape));
}
output_rank = c->MakeDim(input0_rank);
} else {
ShapeHandle input1_shape = c->input(1);
TF_RETURN_IF_ERROR(c->WithRankAtLeast(input1_shape, 2, &input1_shape));
if (c->RankKnown(input1_shape)) {
output_rank = c->MakeDim(c->Rank(input1_shape));
} else {
output_rank = c->UnknownDim();
}
}
c->set_output(0, c->Matrix(c->UnknownDim(), output_rank));
c->set_output(1, c->Vector(c->UnknownDim()));
c->set_output(2, c->Vector(output_rank));
return absl::OkStatus();
});
REGISTER_OP("DenseToSparseSetOperation")
.Input("set1: T")
.Input("set2_indices: int64")
.Input("set2_values: T")
.Input("set2_shape: int64")
.Attr("set_operation: string")
.Attr("validate_indices: bool = true")
.Attr("T: {int8, int16, int32, int64, uint8, uint16, string}")
.Output("result_indices: int64")
.Output("result_values: T")
.Output("result_shape: int64")
.SetShapeFn([](InferenceContext* c) {
if (c->num_inputs() != 4) {
return errors::InvalidArgument("len(inputs) != 4.");
}
ShapeHandle input1_shape_shape = c->input(3);
TF_RETURN_IF_ERROR(shape_inference::ValidateSparseTensor(
c, c->input(1), c->input(2), input1_shape_shape));
DimensionHandle input1_rank_dim = c->Dim(input1_shape_shape, 0);
DimensionHandle output_rank_dim;
ShapeHandle input0_shape = c->input(0);
TF_RETURN_IF_ERROR(c->WithRankAtLeast(input0_shape, 2, &input0_shape));
if (c->RankKnown(input0_shape)) {
const int32_t input0_rank = c->Rank(input0_shape);
TF_RETURN_IF_ERROR(
c->WithValue(input1_rank_dim, input0_rank, &input1_rank_dim));
output_rank_dim = c->MakeDim(input0_rank);
} else if (c->ValueKnown(input1_rank_dim)) {
output_rank_dim = input1_rank_dim;
} else {
output_rank_dim = c->UnknownDim();
}
c->set_output(0, c->Matrix(c->UnknownDim(), output_rank_dim));
c->set_output(1, c->Vector(c->UnknownDim()));
c->set_output(2, c->Vector(output_rank_dim));
return absl::OkStatus();
});
REGISTER_OP("SparseToSparseSetOperation")
.Input("set1_indices: int64")
.Input("set1_values: T")
.Input("set1_shape: int64")
.Input("set2_indices: int64")
.Input("set2_values: T")
.Input("set2_shape: int64")
.Attr("set_operation: string")
.Attr("validate_indices: bool = true")
.Attr("T: {int8, int16, int32, int64, uint8, uint16, string}")
.Output("result_indices: int64")
.Output("result_values: T")
.Output("result_shape: int64")
.SetShapeFn([](InferenceContext* c) {
if (c->num_inputs() != 6) {
return errors::InvalidArgument("len(inputs) != 6.");
}
ShapeHandle input0_shape_shape = c->input(2);
ShapeHandle input1_shape_shape = c->input(5);
TF_RETURN_IF_ERROR(shape_inference::ValidateSparseTensor(
c, c->input(0), c->input(1), input0_shape_shape));
TF_RETURN_IF_ERROR(shape_inference::ValidateSparseTensor(
c, c->input(3), c->input(4), input1_shape_shape));
DimensionHandle input0_rank_dim = c->Dim(input0_shape_shape, 0);
DimensionHandle input1_rank_dim = c->Dim(input1_shape_shape, 0);
DimensionHandle output_rank_dim;
if (c->ValueKnown(input0_rank_dim)) {
const int64_t input0_rank = c->Value(input0_rank_dim);
if (input0_rank < 2) {
return errors::InvalidArgument("Input 0, expected rank >= 2, got ",
input0_rank, ".");
}
TF_RETURN_IF_ERROR(
c->WithValue(input1_rank_dim, input0_rank, &input1_rank_dim));
output_rank_dim = input0_rank_dim;
} else if (c->ValueKnown(input1_rank_dim)) {
const int64_t input1_rank = c->Value(input1_rank_dim);
if (input1_rank < 2) {
return errors::InvalidArgument("Input 1, expected rank >= 2, got ",
input1_rank, ".");
}
output_rank_dim = input1_rank_dim;
} else {
output_rank_dim = c->UnknownDim();
}
c->set_output(0, c->Matrix(c->UnknownDim(), output_rank_dim));
c->set_output(1, c->Vector(c->UnknownDim()));
c->set_output(2, c->Vector(output_rank_dim));
return absl::OkStatus();
});
} | #include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/shape_inference_testutil.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
TEST(SetOpsTest, DenseToDenseShape_InvalidNumberOfInputs) {
ShapeInferenceTestOp op("DenseToDenseSetOperation");
op.input_tensors.resize(3);
INFER_ERROR("Wrong number of inputs passed", op, "?;?;?");
}
TEST(SetOpsTest, DenseToDenseShape) {
ShapeInferenceTestOp op("DenseToDenseSetOperation");
INFER_OK(op, "?;?", "[?,?];[?];[?]");
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op, "[?];?");
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op, "?;[?]");
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op, "[2];?");
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op, "?;[2]");
INFER_ERROR("Shape must be rank 2 but is rank 3", op, "[?,?];[?,?,?]");
INFER_ERROR("Shape must be rank 3 but is rank 2", op, "[?,?,?];[?,?]");
INFER_ERROR("Shape must be rank 2 but is rank 3", op, "[2,1];[2,1,2]");
INFER_ERROR("Shape must be rank 3 but is rank 2", op, "[2,1,2];[2,1]");
INFER_OK(op, "[?,?];?", "[?,2];[?];[2]");
INFER_OK(op, "?;[?,?]", "[?,2];[?];[2]");
INFER_OK(op, "[?,?];[?,?]", "[?,2];[?];[2]");
INFER_OK(op, "[?,?,?,?];?", "[?,4];[?];[4]");
INFER_OK(op, "?;[?,?,?,?]", "[?,4];[?];[4]");
INFER_OK(op, "[?,?,?,?];[?,?,?,?]", "[?,4];[?];[4]");
INFER_OK(op, "[5,3,2,1];?", "[?,4];[?];[4]");
INFER_OK(op, "?;[5,3,2,1]", "[?,4];[?];[4]");
INFER_OK(op, "[5,3,2,1];[?,?,?,?]", "[?,4];[?];[4]");
INFER_OK(op, "[?,?,?,?];[5,3,2,1]", "[?,4];[?];[4]");
INFER_OK(op, "[5,3,2,1];[?,?,?,?]", "[?,4];[?];[4]");
INFER_ERROR("Dimension 0 in both shapes must be equal", op,
"[4,?,2,?];[3,1,?,5]");
INFER_ERROR("Dimension 2 in both shapes must be equal", op,
"[4,3,2,1];[4,3,3,1]");
INFER_OK(op, "[4,5,6,7];[?,?,?,?]", "[?,4];[?];[4]");
INFER_OK(op, "[4,5,6,7];[?,?,?,4]", "[?,4];[?];[4]");
INFER_OK(op, "[?,?,?,?];[4,5,6,7]", "[?,4];[?];[4]");
INFER_OK(op, "[4,?,2,?];[?,1,?,5]", "[?,4];[?];[4]");
INFER_OK(op, "[4,5,6,7];[4,?,6,?]", "[?,4];[?];[4]");
INFER_OK(op, "[4,5,6,7];[4,5,6,4]", "[?,4];[?];[4]");
}
TEST(SetOpsTest, DenseToSparseShape_InvalidNumberOfInputs) {
ShapeInferenceTestOp op("DenseToSparseSetOperation");
op.input_tensors.resize(5);
INFER_ERROR("Wrong number of inputs passed", op, "?;?;?;?;?");
}
TEST(SetOpsTest, DenseToSparseShape) {
ShapeInferenceTestOp op("DenseToSparseSetOperation");
INFER_OK(op, "?;?;?;?", "[?,?];[?];[?]");
INFER_OK(op, "?;?;?;?", "[?,?];[?];[?]");
INFER_OK(op, "?;[?,?];[?];[?]", "[?,?];[?];[?]");
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op, "[?];?;?;?");
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op,
"[?];[?,?];[?];[?]");
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op,
"[?];[5,3];[5];[3]");
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op, "[2];?;?;?");
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op,
"[2];[?,?];[?];[?]");
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op,
"[2];[5,3];[5];[3]");
INFER_OK(op, "[?,?];?;?;?", "[?,2];[?];[2]");
INFER_OK(op, "[?,?];[?,?];[?];[?]", "[?,2];[?];[2]");
INFER_OK(op, "?;[?,2];[?];[2]", "[?,d3_0];[?];[d3_0]");
INFER_OK(op, "?;[5,2];[5];[2]", "[?,d3_0];[?];[d3_0]");
INFER_OK(op, "[?,?];[5,2];[5];[2]", "[?,2];[?];[2]");
INFER_OK(op, "[4,3];[5,2];[5];[2]", "[?,2];[?];[2]");
INFER_ERROR("elements in index (5) and values (6) do not match", op,
"?;[5,3];[6];[3]");
INFER_ERROR("rank (3) and shape rank (4) do not match", op,
"?;[5,3];[5];[4]");
}
TEST(SetOpsTest, SparseToSparseShape_InvalidNumberOfInputs) {
ShapeInferenceTestOp op("SparseToSparseSetOperation");
op.input_tensors.resize(7);
INFER_ERROR("Wrong number of inputs passed", op, "?;?;?;?;?;?;?");
}
TEST(SetOpsTest, SparseToSparseShape) {
ShapeInferenceTestOp op("SparseToSparseSetOperation");
INFER_OK(op, "?;?;?;?;?;?", "[?,?];[?];[?]");
INFER_OK(op, "[?,?];[?];[?];[?,?];[?];[?]", "[?,?];[?];[?]");
INFER_OK(op, "?;?;?;[?,?];[?];[?]", "[?,?];[?];[?]");
INFER_OK(op, "[?,?];[?];[?];?;?;?", "[?,?];[?];[?]");
INFER_OK(op, "[?,2];[?];[2];?;?;?", "[?,d2_0];[?];[d2_0]");
INFER_OK(op, "?;?;?;[?,2];[?];[2]", "[?,d5_0];[?];[d5_0]");
INFER_OK(op, "[?,2];[?];[2];[?,?];[?];[?]", "[?,d2_0];[?];[d2_0]");
INFER_OK(op, "[?,?];[?];[?];[?,2];[?];[2]", "[?,d5_0];[?];[d5_0]");
INFER_OK(op, "[?,2];[?];[2];[?,2];[?];[2]", "[?,d2_0];[?];[d2_0]");
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ops/set_ops.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ops/set_ops_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
721cbde5-7ea7-4461-a9c7-5f9a290fb85e | cpp | google/cel-cpp | copy_on_write | internal/copy_on_write.h | internal/copy_on_write_test.cc | #ifndef THIRD_PARTY_CEL_CPP_INTERNAL_COPY_ON_WRITE_H_
#define THIRD_PARTY_CEL_CPP_INTERNAL_COPY_ON_WRITE_H_
#include <algorithm>
#include <atomic>
#include <memory>
#include <type_traits>
#include <utility>
#include "absl/base/attributes.h"
#include "absl/base/optimization.h"
#include "absl/log/absl_check.h"
namespace cel::internal {
template <typename T>
class ABSL_ATTRIBUTE_TRIVIAL_ABI CopyOnWrite final {
private:
struct Rep final {
Rep() = default;
template <typename... Args,
typename = std::enable_if_t<std::is_constructible_v<T, Args...>>>
explicit Rep(Args&&... args) : value(std::forward<Args>(value)...) {}
Rep(const Rep&) = delete;
Rep(Rep&&) = delete;
Rep& operator=(const Rep&) = delete;
Rep& operator=(Rep&&) = delete;
std::atomic<int32_t> refs = 1;
T value;
void Ref() {
const auto count = refs.fetch_add(1, std::memory_order_relaxed);
ABSL_DCHECK_GT(count, 0);
}
void Unref() {
const auto count = refs.fetch_sub(1, std::memory_order_acq_rel);
ABSL_DCHECK_GT(count, 0);
if (count == 1) {
delete this;
}
}
bool Unique() const {
const auto count = refs.load(std::memory_order_acquire);
ABSL_DCHECK_GT(count, 0);
return count == 1;
}
};
public:
static_assert(std::is_copy_constructible_v<T>,
"T must be copy constructible");
static_assert(std::is_destructible_v<T>, "T must be destructible");
template <typename = std::enable_if_t<std::is_default_constructible_v<T>>>
CopyOnWrite() : rep_(new Rep()) {}
CopyOnWrite(const CopyOnWrite<T>& other) : rep_(other.rep_) { rep_->Ref(); }
CopyOnWrite(CopyOnWrite<T>&& other) noexcept : rep_(other.rep_) {
other.rep_ = nullptr;
}
~CopyOnWrite() {
if (rep_ != nullptr) {
rep_->Unref();
}
}
CopyOnWrite<T>& operator=(const CopyOnWrite<T>& other) {
ABSL_DCHECK_NE(this, std::addressof(other));
other.rep_->Ref();
rep_->Unref();
rep_ = other.rep_;
return *this;
}
CopyOnWrite<T>& operator=(CopyOnWrite<T>&& other) noexcept {
ABSL_DCHECK_NE(this, std::addressof(other));
rep_->Unref();
rep_ = other.rep_;
other.rep_ = nullptr;
return *this;
}
T& mutable_get() ABSL_ATTRIBUTE_LIFETIME_BOUND {
ABSL_DCHECK(rep_ != nullptr) << "Object in moved-from state.";
if (ABSL_PREDICT_FALSE(!rep_->Unique())) {
auto* rep = new Rep(static_cast<const T&>(rep_->value));
rep_->Unref();
rep_ = rep;
}
return rep_->value;
}
const T& get() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
ABSL_DCHECK(rep_ != nullptr) << "Object in moved-from state.";
return rep_->value;
}
void swap(CopyOnWrite<T>& other) noexcept {
using std::swap;
swap(rep_, other.rep_);
}
private:
Rep* rep_;
};
template <typename T>
void swap(CopyOnWrite<T>& lhs, CopyOnWrite<T>& rhs) noexcept {
lhs.swap(rhs);
}
}
#endif | #include "internal/copy_on_write.h"
#include <cstdint>
#include "internal/testing.h"
namespace cel::internal {
namespace {
TEST(CopyOnWrite, Basic) {
CopyOnWrite<int32_t> original;
EXPECT_EQ(&original.mutable_get(), &original.get());
{
auto duplicate = original;
EXPECT_EQ(&duplicate.get(), &original.get());
EXPECT_NE(&duplicate.mutable_get(), &original.get());
}
EXPECT_EQ(&original.mutable_get(), &original.get());
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/internal/copy_on_write.h | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/internal/copy_on_write_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
3d9b2f2f-d60b-4eec-b8fb-be7dca0e0400 | cpp | tensorflow/tensorflow | bad_indices_policy | tensorflow/core/util/bad_indices_policy.cc | tensorflow/core/util/bad_indices_policy_test.cc | #include "tensorflow/core/util/bad_indices_policy.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
namespace tensorflow {
constexpr char kDefault[] = "DEFAULT";
constexpr char kErrorStr[] = "ERROR";
constexpr char kIgnoreStr[] = "IGNORE";
absl::StatusOr<BadIndicesPolicy> BadIndicesPolicyFromString(
absl::string_view str) {
if (str.empty()) return BadIndicesPolicy::kDefault;
if (str == kDefault) return BadIndicesPolicy::kDefault;
if (str == kErrorStr) return BadIndicesPolicy::kError;
if (str == kIgnoreStr) return BadIndicesPolicy::kIgnore;
return absl::InvalidArgumentError(
absl::StrCat("Unknown bad indices handling attribute: ", str));
}
} | #include "tensorflow/core/util/bad_indices_policy.h"
#include <gmock/gmock.h>
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
constexpr absl::string_view kDefault = "DEFAULT";
constexpr absl::string_view kErrorStr = "ERROR";
constexpr absl::string_view kIgnoreStr = "IGNORE";
class BadIndicesPolicyFromStringTest : public ::testing::Test {
protected:
void TestValidInput(absl::string_view input, BadIndicesPolicy expected) {
absl::StatusOr<BadIndicesPolicy> result = BadIndicesPolicyFromString(input);
ASSERT_TRUE(result.ok());
EXPECT_EQ(result.value(), expected);
}
};
TEST_F(BadIndicesPolicyFromStringTest, EmptyString) {
TestValidInput("", BadIndicesPolicy::kDefault);
}
TEST_F(BadIndicesPolicyFromStringTest, DefaultKeyword) {
TestValidInput(kDefault, BadIndicesPolicy::kDefault);
}
TEST_F(BadIndicesPolicyFromStringTest, ErrorKeyword) {
TestValidInput(kErrorStr, BadIndicesPolicy::kError);
}
TEST_F(BadIndicesPolicyFromStringTest, IgnoreKeyword) {
TestValidInput(kIgnoreStr, BadIndicesPolicy::kIgnore);
}
TEST_F(BadIndicesPolicyFromStringTest, InvalidInput) {
absl::StatusOr<BadIndicesPolicy> result =
BadIndicesPolicyFromString("unknown");
ASSERT_FALSE(result.ok());
EXPECT_THAT(result.status().message(),
::testing::HasSubstr("Unknown bad indices handling attribute"));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/bad_indices_policy.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/bad_indices_policy_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
9cd2f506-a605-4a5f-9004-2be24156c63f | cpp | tensorflow/tensorflow | immutable_constant_op | tensorflow/core/kernels/immutable_constant_op.cc | tensorflow/core/kernels/immutable_constant_op_test.cc | #include "tensorflow/core/kernels/immutable_constant_op.h"
#include <unordered_set>
#include "tensorflow/core/framework/types.pb.h"
namespace tensorflow {
namespace {
class MemmappedTensorAllocator : public Allocator {
public:
MemmappedTensorAllocator() {}
Status InitializeFromRegion(const string& name, Env* env) {
const auto status =
env->NewReadOnlyMemoryRegionFromFile(name, &memory_region_);
if (!status.ok()) {
return status;
}
return absl::OkStatus();
}
string Name() override { return "MemmappedTensorAllocator"; }
void* AllocateRaw(size_t alignment, size_t num_bytes) override {
if ((reinterpret_cast<intptr_t>(memory_region_->data())) % alignment != 0) {
allocation_status_ =
errors::Internal("Readonly memory region has wrong alignment");
return nullptr;
}
if (num_bytes > memory_region_->length()) {
allocation_status_ = errors::Internal(
"Readonly memory region has wrong length (", memory_region_->length(),
") when allocating ", num_bytes);
return nullptr;
}
return const_cast<void*>(memory_region_->data());
}
void DeallocateRaw(void* ptr) override {
if (ptr != memory_region_->data()) {
LOG(ERROR)
<< "Deallocating not allocated region for readonly memory region";
}
if (delete_on_deallocate_) {
delete this;
}
}
const Status& allocation_status() const { return allocation_status_; }
void set_delete_on_deallocate() { delete_on_deallocate_ = true; }
bool AllocatesOpaqueHandle() const override { return true; }
private:
std::unique_ptr<ReadOnlyMemoryRegion> memory_region_;
Status allocation_status_;
bool delete_on_deallocate_ = false;
MemmappedTensorAllocator(const MemmappedTensorAllocator&) = delete;
void operator=(const MemmappedTensorAllocator&) = delete;
};
}
ImmutableConstantOp::ImmutableConstantOp(OpKernelConstruction* context)
: OpKernel(context) {
OP_REQUIRES_OK(context,
context->GetAttr(kMemoryRegionNameAttr, ®ion_name_));
OP_REQUIRES_OK(context, context->GetAttr(kDTypeAttr, &dtype_));
OP_REQUIRES(context, dtype_ != DT_RESOURCE && dtype_ != DT_VARIANT,
errors::InvalidArgument(
"Resource and variant dtypes are invalid for this op."));
OP_REQUIRES_OK(context, context->GetAttr(kShapeAttr, &shape_));
}
void ImmutableConstantOp::Compute(OpKernelContext* ctx) {
std::unique_ptr<MemmappedTensorAllocator> allocator(
new MemmappedTensorAllocator());
OP_REQUIRES_OK(ctx,
allocator->InitializeFromRegion(region_name_, ctx->env()));
OP_REQUIRES(ctx, dtype_ != DT_STRING,
errors::Unimplemented("Sorry, DT_STRING is not currently "
"supported for ImmutableConstOp."));
ctx->set_output(0, Tensor(allocator.get(), dtype_, shape_));
OP_REQUIRES_OK(ctx, allocator->allocation_status());
allocator.release()->set_delete_on_deallocate();
}
ImmutableConstantOp::~ImmutableConstantOp() {}
constexpr char const* ImmutableConstantOp::kDTypeAttr;
constexpr char const* ImmutableConstantOp::kShapeAttr;
constexpr char const* ImmutableConstantOp::kMemoryRegionNameAttr;
REGISTER_KERNEL_BUILDER(Name("ImmutableConst").Device(DEVICE_CPU),
ImmutableConstantOp);
} | #include "tensorflow/core/kernels/immutable_constant_op.h"
#include <algorithm>
#include <tuple>
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/platform/null_file_system.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/public/session.h"
namespace tensorflow {
namespace {
constexpr size_t kTestAlignment = 4096;
constexpr size_t kTestTensorSize = 4;
constexpr size_t kTestTensorSizeBytes = kTestTensorSize * sizeof(float);
class TestReadOnlyMemoryRegion : public ReadOnlyMemoryRegion {
public:
TestReadOnlyMemoryRegion() = delete;
explicit TestReadOnlyMemoryRegion(uint64 length)
: memptr_(cpu_allocator()->AllocateRaw(kTestAlignment, length)),
length_(length) {}
~TestReadOnlyMemoryRegion() override {
cpu_allocator()->DeallocateRaw(memptr_);
}
const void* data() override { return memptr_; }
float* GetWritableDataStart() { return reinterpret_cast<float*>(memptr_); }
uint64 length() override { return length_; }
protected:
void* memptr_;
uint64 length_;
};
class TestFileSystem : public NullFileSystem {
public:
~TestFileSystem() override = default;
using NullFileSystem::NewReadOnlyMemoryRegionFromFile;
Status NewReadOnlyMemoryRegionFromFile(
const string& fname, TransactionToken* token,
std::unique_ptr<ReadOnlyMemoryRegion>* result) override {
float val = 0;
StringPiece scheme, host, path;
io::ParseURI(fname, &scheme, &host, &path);
if (path == "/2") {
val = 2.0f;
} else if (path == "/3") {
val = 3.0f;
} else {
val = 0.0f;
}
auto region = new TestReadOnlyMemoryRegion(kTestTensorSizeBytes);
std::fill_n(region->GetWritableDataStart(), kTestTensorSize, val);
result->reset(region);
return absl::OkStatus();
}
};
REGISTER_FILE_SYSTEM("test", TestFileSystem);
struct ImmutableConstantOpTest {};
TEST(ImmutableConstantOpTest, Simple) {
const TensorShape kTestTensorShape({4, 1});
const TensorShape kTestTensorShapeT({1, 4});
auto root = Scope::NewRootScope().ExitOnError();
auto node1 =
ops::ImmutableConst(root, DT_FLOAT, kTestTensorShape, "test:
auto node2 =
ops::ImmutableConst(root, DT_FLOAT, kTestTensorShapeT, "test:
auto result = ops::MatMul(root, node1, node2);
GraphDef graph_def;
TF_ASSERT_OK(root.ToGraphDef(&graph_def));
SessionOptions session_options;
session_options.env = Env::Default();
session_options.config.mutable_graph_options()
->mutable_optimizer_options()
->set_opt_level(OptimizerOptions::L0);
std::unique_ptr<Session> session(NewSession(session_options));
ASSERT_TRUE(session != nullptr) << "Failed to create session";
TF_ASSERT_OK(session->Create(graph_def)) << "Can't create test graph";
std::vector<Tensor> outputs;
TF_ASSERT_OK(session->Run({}, {result.node()->name() + ":0"}, {}, &outputs));
ASSERT_EQ(outputs.size(), 1);
EXPECT_EQ(outputs.front().flat<float>()(0), 2.0f * 3.0f);
EXPECT_EQ(outputs.front().flat<float>()(1), 2.0f * 3.0f);
EXPECT_EQ(outputs.front().flat<float>()(2), 2.0f * 3.0f);
EXPECT_EQ(outputs.front().flat<float>()(kTestTensorSize - 1), 2.0f * 3.0f);
}
TEST(ImmutableConstantOpTest, ExecutionError) {
const TensorShape kBadTensorShape({40, 100});
const TensorShape kTestTensorShapeT({1, 4});
auto root = Scope::DisabledShapeInferenceScope().ExitOnError();
auto node1 =
ops::ImmutableConst(root, DT_FLOAT, kBadTensorShape, "test:
auto node2 =
ops::ImmutableConst(root, DT_FLOAT, kTestTensorShapeT, "test:
auto result = ops::MatMul(root, node1, node2);
GraphDef graph_def;
TF_ASSERT_OK(root.ToGraphDef(&graph_def));
SessionOptions session_options;
session_options.env = Env::Default();
std::unique_ptr<Session> session(NewSession(session_options));
ASSERT_TRUE(session != nullptr) << "Failed to create session";
TF_ASSERT_OK(session->Create(graph_def)) << "Can't create test graph";
std::vector<Tensor> outputs;
EXPECT_EQ(
session->Run({}, {result.node()->name() + ":0"}, {}, &outputs).code(),
error::INTERNAL);
}
Status CreateTempFileFloat(Env* env, float value, uint64 size,
string* filename) {
const string dir = testing::TmpDir();
*filename = io::JoinPath(dir, strings::StrCat("file_", value));
std::unique_ptr<WritableFile> file;
TF_RETURN_IF_ERROR(env->NewWritableFile(*filename, &file));
for (uint64 i = 0; i < size; ++i) {
StringPiece sp(static_cast<char*>(static_cast<void*>(&value)),
sizeof(value));
TF_RETURN_IF_ERROR(file->Append(sp));
}
TF_RETURN_IF_ERROR(file->Close());
return absl::OkStatus();
}
TEST(ImmutableConstantOpTest, FromFile) {
const TensorShape kFileTensorShape({1000, 1});
Env* env = Env::Default();
auto root = Scope::NewRootScope().ExitOnError();
string two_file, three_file;
TF_ASSERT_OK(CreateTempFileFloat(env, 2.0f, 1000, &two_file));
TF_ASSERT_OK(CreateTempFileFloat(env, 3.0f, 1000, &three_file));
auto node1 = ops::ImmutableConst(root, DT_FLOAT, kFileTensorShape, two_file);
auto node2 =
ops::ImmutableConst(root, DT_FLOAT, kFileTensorShape, three_file);
auto result = ops::MatMul(root, node1, node2, ops::MatMul::TransposeB(true));
GraphDef graph_def;
TF_ASSERT_OK(root.ToGraphDef(&graph_def));
SessionOptions session_options;
session_options.config.mutable_graph_options()
->mutable_optimizer_options()
->set_opt_level(OptimizerOptions::L0);
std::unique_ptr<Session> session(NewSession(session_options));
ASSERT_TRUE(session != nullptr) << "Failed to create session";
TF_ASSERT_OK(session->Create(graph_def)) << "Can't create test graph";
std::vector<Tensor> outputs;
TF_ASSERT_OK(session->Run({}, {result.node()->name() + ":0"}, {}, &outputs));
ASSERT_EQ(outputs.size(), 1);
EXPECT_EQ(outputs.front().flat<float>()(0), 2.0f * 3.0f);
EXPECT_EQ(outputs.front().flat<float>()(1), 2.0f * 3.0f);
EXPECT_EQ(outputs.front().flat<float>()(2), 2.0f * 3.0f);
}
Status CreateTempFileBadString(Env* env, char value, uint64 size,
const string suffix, string* filename) {
const string dir = testing::TmpDir();
*filename = io::JoinPath(dir, strings::StrCat("file_", suffix));
std::unique_ptr<WritableFile> file;
TF_RETURN_IF_ERROR(env->NewWritableFile(*filename, &file));
TF_RETURN_IF_ERROR(file->Append(std::string(size, value)));
TF_RETURN_IF_ERROR(file->Close());
return absl::OkStatus();
}
TEST(ImmutableConstantOpTest, FromFileStringUnimplmented) {
const TensorShape kFileTensorShape({1});
Env* env = Env::Default();
auto root = Scope::NewRootScope().ExitOnError();
string bad_file;
TF_ASSERT_OK(CreateTempFileBadString(env, '\xe2', 128, "bad_e2", &bad_file));
auto result =
ops::ImmutableConst(root, DT_STRING, kFileTensorShape, bad_file);
GraphDef graph_def;
TF_ASSERT_OK(root.ToGraphDef(&graph_def));
SessionOptions session_options;
session_options.env = Env::Default();
std::unique_ptr<Session> session(NewSession(session_options));
ASSERT_TRUE(session != nullptr) << "Failed to create session";
TF_ASSERT_OK(session->Create(graph_def)) << "Can't create test graph";
std::vector<Tensor> outputs;
EXPECT_EQ(
session->Run({}, {result.node()->name() + ":0"}, {}, &outputs).code(),
error::UNIMPLEMENTED);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/immutable_constant_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/immutable_constant_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a7c4e7e1-4c9d-4e25-9452-ecf7b7547ffd | cpp | google/arolla | memory_allocation | arolla/memory/memory_allocation.h | arolla/memory/memory_allocation_test.cc | #ifndef AROLLA_UTIL_MEMORY_ALLOCATION_H_
#define AROLLA_UTIL_MEMORY_ALLOCATION_H_
#include <utility>
#include "absl/log/check.h"
#include "arolla/memory/frame.h"
#include "arolla/util/memory.h"
namespace arolla {
class MemoryAllocation {
public:
MemoryAllocation() = default;
explicit MemoryAllocation(const FrameLayout* layout)
: layout_(layout),
alloc_(AlignedAlloc(layout->AllocAlignment(), layout->AllocSize())) {
layout_->InitializeAlignedAlloc(alloc_.get());
}
MemoryAllocation(const MemoryAllocation&) = delete;
MemoryAllocation& operator=(const MemoryAllocation&) = delete;
MemoryAllocation(MemoryAllocation&&) = default;
MemoryAllocation& operator=(MemoryAllocation&& other) {
if (alloc_ != nullptr) {
layout_->DestroyAlloc(alloc_.get());
}
layout_ = other.layout_;
alloc_ = std::move(other.alloc_);
return *this;
}
~MemoryAllocation() {
if (alloc_ != nullptr) {
layout_->DestroyAlloc(alloc_.get());
}
}
bool IsValid() const { return alloc_ != nullptr; }
FramePtr frame() {
DCHECK(IsValid());
return FramePtr(alloc_.get(), layout_);
}
ConstFramePtr frame() const {
DCHECK(IsValid());
return ConstFramePtr(alloc_.get(), layout_);
}
private:
const FrameLayout* layout_ = nullptr;
MallocPtr alloc_ = nullptr;
};
}
#endif | #include "arolla/memory/memory_allocation.h"
#include <memory>
#include <utility>
#include "gtest/gtest.h"
#include "arolla/memory/frame.h"
namespace arolla {
namespace {
struct DeleteCounter {
~DeleteCounter() { ++deletions; }
static int deletions;
};
int DeleteCounter::deletions = 0;
TEST(MemoryAllocationTest, TestEmptyValues) {
FrameLayout::Builder builder;
auto slot = builder.AddSlot<std::unique_ptr<DeleteCounter>>();
auto layout = std::move(builder).Build();
ASSERT_EQ(DeleteCounter::deletions, 0);
MemoryAllocation alloc(&layout);
EXPECT_TRUE(alloc.IsValid());
auto owned_ptr = std::make_unique<DeleteCounter>();
auto ptr = owned_ptr.get();
alloc.frame().Set(slot, std::move(owned_ptr));
EXPECT_EQ(alloc.frame().Get(slot).get(), ptr);
MemoryAllocation new_alloc(std::move(alloc));
EXPECT_TRUE(new_alloc.IsValid());
EXPECT_FALSE(alloc.IsValid());
EXPECT_EQ(new_alloc.frame().Get(slot).get(), ptr);
EXPECT_EQ(DeleteCounter::deletions, 0);
MemoryAllocation newer_alloc(&layout);
EXPECT_TRUE(newer_alloc.IsValid());
newer_alloc.frame().Set(slot, std::make_unique<DeleteCounter>());
newer_alloc = std::move(new_alloc);
EXPECT_TRUE(newer_alloc.IsValid());
EXPECT_FALSE(new_alloc.IsValid());
EXPECT_EQ(newer_alloc.frame().Get(slot).get(), ptr);
EXPECT_EQ(DeleteCounter::deletions, 1);
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/memory/memory_allocation.h | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/memory/memory_allocation_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
7d716e0e-d273-409d-bedb-bdd18c59e153 | cpp | google/quiche | qpack_encoder | quiche/quic/core/qpack/qpack_encoder.cc | quiche/quic/core/qpack/qpack_encoder_test.cc | #include "quiche/quic/core/qpack/qpack_encoder.h"
#include <algorithm>
#include <string>
#include <utility>
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "quiche/quic/core/qpack/qpack_index_conversions.h"
#include "quiche/quic/core/qpack/qpack_instruction_encoder.h"
#include "quiche/quic/core/qpack/qpack_required_insert_count.h"
#include "quiche/quic/core/qpack/value_splitting_header_list.h"
#include "quiche/quic/platform/api/quic_flag_utils.h"
#include "quiche/quic/platform/api/quic_flags.h"
#include "quiche/quic/platform/api/quic_logging.h"
namespace quic {
namespace {
const float kDrainingFraction = 0.25;
}
QpackEncoder::QpackEncoder(
DecoderStreamErrorDelegate* decoder_stream_error_delegate,
HuffmanEncoding huffman_encoding, CookieCrumbling cookie_crumbling)
: huffman_encoding_(huffman_encoding),
cookie_crumbling_(cookie_crumbling),
decoder_stream_error_delegate_(decoder_stream_error_delegate),
decoder_stream_receiver_(this),
encoder_stream_sender_(huffman_encoding),
maximum_blocked_streams_(0),
header_list_count_(0) {
QUICHE_DCHECK(decoder_stream_error_delegate_);
}
QpackEncoder::~QpackEncoder() {}
QpackEncoder::Representation QpackEncoder::EncodeIndexedHeaderField(
bool is_static, uint64_t index,
QpackBlockingManager::IndexSet* referred_indices) {
if (!is_static) {
referred_indices->insert(index);
}
return Representation::IndexedHeaderField(is_static, index);
}
QpackEncoder::Representation
QpackEncoder::EncodeLiteralHeaderFieldWithNameReference(
bool is_static, uint64_t index, absl::string_view value,
QpackBlockingManager::IndexSet* referred_indices) {
if (!is_static) {
referred_indices->insert(index);
}
return Representation::LiteralHeaderFieldNameReference(is_static, index,
value);
}
QpackEncoder::Representation QpackEncoder::EncodeLiteralHeaderField(
absl::string_view name, absl::string_view value) {
return Representation::LiteralHeaderField(name, value);
}
QpackEncoder::Representations QpackEncoder::FirstPassEncode(
QuicStreamId stream_id, const quiche::HttpHeaderBlock& header_list,
QpackBlockingManager::IndexSet* referred_indices,
QuicByteCount* encoder_stream_sent_byte_count) {
const QuicByteCount initial_encoder_stream_buffered_byte_count =
encoder_stream_sender_.BufferedByteCount();
const bool can_write_to_encoder_stream = encoder_stream_sender_.CanWrite();
Representations representations;
representations.reserve(header_list.size());
const uint64_t known_received_count =
blocking_manager_.known_received_count();
uint64_t smallest_non_evictable_index = std::min(
blocking_manager_.smallest_blocking_index(), known_received_count);
const uint64_t draining_index =
header_table_.draining_index(kDrainingFraction);
const bool blocking_allowed = blocking_manager_.blocking_allowed_on_stream(
stream_id, maximum_blocked_streams_);
bool dynamic_table_insertion_blocked = false;
bool blocked_stream_limit_exhausted = false;
for (const auto& header :
ValueSplittingHeaderList(&header_list, cookie_crumbling_)) {
absl::string_view name = header.first;
absl::string_view value = header.second;
QpackEncoderHeaderTable::MatchResult match_result =
header_table_.FindHeaderField(name, value);
switch (match_result.match_type) {
case QpackEncoderHeaderTable::MatchType::kNameAndValue: {
if (match_result.is_static) {
representations.push_back(EncodeIndexedHeaderField(
match_result.is_static, match_result.index, referred_indices));
break;
}
if (match_result.index >= draining_index) {
if (!blocking_allowed && match_result.index >= known_received_count) {
blocked_stream_limit_exhausted = true;
} else {
representations.push_back(EncodeIndexedHeaderField(
match_result.is_static, match_result.index, referred_indices));
smallest_non_evictable_index =
std::min(smallest_non_evictable_index, match_result.index);
header_table_.set_dynamic_table_entry_referenced();
break;
}
} else {
if (!blocking_allowed) {
blocked_stream_limit_exhausted = true;
} else if (QpackEntry::Size(name, value) >
header_table_.MaxInsertSizeWithoutEvictingGivenEntry(
std::min(smallest_non_evictable_index,
match_result.index))) {
dynamic_table_insertion_blocked = true;
} else {
if (can_write_to_encoder_stream) {
encoder_stream_sender_.SendDuplicate(
QpackAbsoluteIndexToEncoderStreamRelativeIndex(
match_result.index,
header_table_.inserted_entry_count()));
uint64_t new_index = header_table_.InsertEntry(name, value);
representations.push_back(EncodeIndexedHeaderField(
match_result.is_static, new_index, referred_indices));
smallest_non_evictable_index =
std::min(smallest_non_evictable_index, match_result.index);
header_table_.set_dynamic_table_entry_referenced();
break;
}
}
}
QpackEncoderHeaderTable::MatchResult match_result_name_only =
header_table_.FindHeaderName(name);
if (match_result_name_only.match_type !=
QpackEncoderHeaderTable::MatchType::kName ||
(match_result_name_only.is_static == match_result.is_static &&
match_result_name_only.index == match_result.index)) {
representations.push_back(EncodeLiteralHeaderField(name, value));
break;
}
match_result = match_result_name_only;
ABSL_FALLTHROUGH_INTENDED;
}
case QpackEncoderHeaderTable::MatchType::kName: {
if (match_result.is_static) {
if (blocking_allowed &&
QpackEntry::Size(name, value) <=
header_table_.MaxInsertSizeWithoutEvictingGivenEntry(
smallest_non_evictable_index)) {
if (can_write_to_encoder_stream) {
encoder_stream_sender_.SendInsertWithNameReference(
match_result.is_static, match_result.index, value);
uint64_t new_index = header_table_.InsertEntry(name, value);
representations.push_back(EncodeIndexedHeaderField(
false, new_index, referred_indices));
smallest_non_evictable_index =
std::min<uint64_t>(smallest_non_evictable_index, new_index);
break;
}
}
representations.push_back(EncodeLiteralHeaderFieldWithNameReference(
match_result.is_static, match_result.index, value,
referred_indices));
break;
}
if (!blocking_allowed) {
blocked_stream_limit_exhausted = true;
} else if (QpackEntry::Size(name, value) >
header_table_.MaxInsertSizeWithoutEvictingGivenEntry(
std::min(smallest_non_evictable_index,
match_result.index))) {
dynamic_table_insertion_blocked = true;
} else {
if (can_write_to_encoder_stream) {
encoder_stream_sender_.SendInsertWithNameReference(
match_result.is_static,
QpackAbsoluteIndexToEncoderStreamRelativeIndex(
match_result.index, header_table_.inserted_entry_count()),
value);
uint64_t new_index = header_table_.InsertEntry(name, value);
representations.push_back(EncodeIndexedHeaderField(
match_result.is_static, new_index, referred_indices));
smallest_non_evictable_index =
std::min(smallest_non_evictable_index, match_result.index);
header_table_.set_dynamic_table_entry_referenced();
break;
}
}
if ((blocking_allowed || match_result.index < known_received_count) &&
match_result.index >= draining_index) {
representations.push_back(EncodeLiteralHeaderFieldWithNameReference(
match_result.is_static, match_result.index, value,
referred_indices));
smallest_non_evictable_index =
std::min(smallest_non_evictable_index, match_result.index);
header_table_.set_dynamic_table_entry_referenced();
break;
}
representations.push_back(EncodeLiteralHeaderField(name, value));
break;
}
case QpackEncoderHeaderTable::MatchType::kNoMatch: {
if (!blocking_allowed) {
blocked_stream_limit_exhausted = true;
} else if (QpackEntry::Size(name, value) >
header_table_.MaxInsertSizeWithoutEvictingGivenEntry(
smallest_non_evictable_index)) {
dynamic_table_insertion_blocked = true;
} else {
if (can_write_to_encoder_stream) {
encoder_stream_sender_.SendInsertWithoutNameReference(name, value);
uint64_t new_index = header_table_.InsertEntry(name, value);
representations.push_back(EncodeIndexedHeaderField(
false, new_index, referred_indices));
smallest_non_evictable_index =
std::min<uint64_t>(smallest_non_evictable_index, new_index);
break;
}
}
representations.push_back(EncodeLiteralHeaderField(name, value));
break;
}
}
}
const QuicByteCount encoder_stream_buffered_byte_count =
encoder_stream_sender_.BufferedByteCount();
QUICHE_DCHECK_GE(encoder_stream_buffered_byte_count,
initial_encoder_stream_buffered_byte_count);
if (encoder_stream_sent_byte_count) {
*encoder_stream_sent_byte_count =
encoder_stream_buffered_byte_count -
initial_encoder_stream_buffered_byte_count;
}
if (can_write_to_encoder_stream) {
encoder_stream_sender_.Flush();
} else {
QUICHE_DCHECK_EQ(encoder_stream_buffered_byte_count,
initial_encoder_stream_buffered_byte_count);
}
++header_list_count_;
if (dynamic_table_insertion_blocked) {
QUIC_HISTOGRAM_COUNTS(
"QuicSession.Qpack.HeaderListCountWhenInsertionBlocked",
header_list_count_, 1, 1000,
50,
"The ordinality of a header list within a connection during the "
"encoding of which at least one dynamic table insertion was "
"blocked.");
} else {
QUIC_HISTOGRAM_COUNTS(
"QuicSession.Qpack.HeaderListCountWhenInsertionNotBlocked",
header_list_count_, 1, 1000,
50,
"The ordinality of a header list within a connection during the "
"encoding of which no dynamic table insertion was blocked.");
}
if (blocked_stream_limit_exhausted) {
QUIC_HISTOGRAM_COUNTS(
"QuicSession.Qpack.HeaderListCountWhenBlockedStreamLimited",
header_list_count_, 1, 1000,
50,
"The ordinality of a header list within a connection during the "
"encoding of which unacknowledged dynamic table entries could not be "
"referenced due to the limit on the number of blocked streams.");
} else {
QUIC_HISTOGRAM_COUNTS(
"QuicSession.Qpack.HeaderListCountWhenNotBlockedStreamLimited",
header_list_count_, 1, 1000,
50,
"The ordinality of a header list within a connection during the "
"encoding of which the limit on the number of blocked streams did "
"not "
"prevent referencing unacknowledged dynamic table entries.");
}
return representations;
}
std::string QpackEncoder::SecondPassEncode(
QpackEncoder::Representations representations,
uint64_t required_insert_count) const {
QpackInstructionEncoder instruction_encoder(huffman_encoding_);
std::string encoded_headers;
instruction_encoder.Encode(
Representation::Prefix(QpackEncodeRequiredInsertCount(
required_insert_count, header_table_.max_entries())),
&encoded_headers);
const uint64_t base = required_insert_count;
for (auto& representation : representations) {
if ((representation.instruction() == QpackIndexedHeaderFieldInstruction() ||
representation.instruction() ==
QpackLiteralHeaderFieldNameReferenceInstruction()) &&
!representation.s_bit()) {
representation.set_varint(QpackAbsoluteIndexToRequestStreamRelativeIndex(
representation.varint(), base));
}
instruction_encoder.Encode(representation, &encoded_headers);
}
return encoded_headers;
}
std::string QpackEncoder::EncodeHeaderList(
QuicStreamId stream_id, const quiche::HttpHeaderBlock& header_list,
QuicByteCount* encoder_stream_sent_byte_count) {
QpackBlockingManager::IndexSet referred_indices;
Representations representations =
FirstPassEncode(stream_id, header_list, &referred_indices,
encoder_stream_sent_byte_count);
const uint64_t required_insert_count =
referred_indices.empty()
? 0
: QpackBlockingManager::RequiredInsertCount(referred_indices);
if (!referred_indices.empty()) {
blocking_manager_.OnHeaderBlockSent(stream_id, std::move(referred_indices));
}
return SecondPassEncode(std::move(representations), required_insert_count);
}
bool QpackEncoder::SetMaximumDynamicTableCapacity(
uint64_t maximum_dynamic_table_capacity) {
return header_table_.SetMaximumDynamicTableCapacity(
maximum_dynamic_table_capacity);
}
void QpackEncoder::SetDynamicTableCapacity(uint64_t dynamic_table_capacity) {
encoder_stream_sender_.SendSetDynamicTableCapacity(dynamic_table_capacity);
bool success = header_table_.SetDynamicTableCapacity(dynamic_table_capacity);
QUICHE_DCHECK(success);
}
bool QpackEncoder::SetMaximumBlockedStreams(uint64_t maximum_blocked_streams) {
if (maximum_blocked_streams < maximum_blocked_streams_) {
return false;
}
maximum_blocked_streams_ = maximum_blocked_streams;
return true;
}
void QpackEncoder::OnInsertCountIncrement(uint64_t increment) {
if (increment == 0) {
OnErrorDetected(QUIC_QPACK_DECODER_STREAM_INVALID_ZERO_INCREMENT,
"Invalid increment value 0.");
return;
}
if (!blocking_manager_.OnInsertCountIncrement(increment)) {
OnErrorDetected(QUIC_QPACK_DECODER_STREAM_INCREMENT_OVERFLOW,
"Insert Count Increment instruction causes overflow.");
}
if (blocking_manager_.known_received_count() >
header_table_.inserted_entry_count()) {
OnErrorDetected(QUIC_QPACK_DECODER_STREAM_IMPOSSIBLE_INSERT_COUNT,
absl::StrCat("Increment value ", increment,
" raises known received count to ",
blocking_manager_.known_received_count(),
" exceeding inserted entry count ",
header_table_.inserted_entry_count()));
}
}
void QpackEncoder::OnHeaderAcknowledgement(QuicStreamId stream_id) {
if (!blocking_manager_.OnHeaderAcknowledgement(stream_id)) {
OnErrorDetected(
QUIC_QPACK_DECODER_STREAM_INCORRECT_ACKNOWLEDGEMENT,
absl::StrCat("Header Acknowledgement received for stream ", stream_id,
" with no outstanding header blocks."));
}
}
void QpackEncoder::OnStreamCancellation(QuicStreamId stream_id) {
blocking_manager_.OnStreamCancellation(stream_id);
}
void QpackEncoder::OnErrorDetected(QuicErrorCode error_code,
absl::string_view error_message) {
decoder_stream_error_delegate_->OnDecoderStreamError(error_code,
error_message);
}
} | #include "quiche/quic/core/qpack/qpack_encoder.h"
#include <limits>
#include <string>
#include "absl/strings/escaping.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "quiche/quic/core/qpack/qpack_instruction_encoder.h"
#include "quiche/quic/core/qpack/value_splitting_header_list.h"
#include "quiche/quic/platform/api/quic_flags.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/test_tools/qpack/qpack_encoder_peer.h"
#include "quiche/quic/test_tools/qpack/qpack_test_utils.h"
using ::testing::_;
using ::testing::Eq;
using ::testing::Return;
using ::testing::StrictMock;
namespace quic {
namespace test {
namespace {
constexpr uint64_t kTooManyBytesBuffered = 1024 * 1024;
std::string PrintToString(const testing::TestParamInfo<HuffmanEncoding>& info) {
switch (info.param) {
case HuffmanEncoding::kEnabled:
return "HuffmanEnabled";
case HuffmanEncoding::kDisabled:
return "HuffmanDisabled";
}
QUICHE_NOTREACHED();
return "InvalidValue";
}
class MockDecoderStreamErrorDelegate
: public QpackEncoder::DecoderStreamErrorDelegate {
public:
~MockDecoderStreamErrorDelegate() override = default;
MOCK_METHOD(void, OnDecoderStreamError,
(QuicErrorCode error_code, absl::string_view error_message),
(override));
};
class QpackEncoderTest : public QuicTestWithParam<HuffmanEncoding> {
protected:
QpackEncoderTest()
: huffman_encoding_(GetParam()),
encoder_(&decoder_stream_error_delegate_, huffman_encoding_,
CookieCrumbling::kEnabled),
encoder_stream_sent_byte_count_(0) {
encoder_.set_qpack_stream_sender_delegate(&encoder_stream_sender_delegate_);
encoder_.SetMaximumBlockedStreams(1);
}
~QpackEncoderTest() override = default;
bool HuffmanEnabled() const {
return huffman_encoding_ == HuffmanEncoding::kEnabled;
}
std::string Encode(const quiche::HttpHeaderBlock& header_list) {
return encoder_.EncodeHeaderList( 1, header_list,
&encoder_stream_sent_byte_count_);
}
const HuffmanEncoding huffman_encoding_;
StrictMock<MockDecoderStreamErrorDelegate> decoder_stream_error_delegate_;
StrictMock<MockQpackStreamSenderDelegate> encoder_stream_sender_delegate_;
QpackEncoder encoder_;
QuicByteCount encoder_stream_sent_byte_count_;
};
INSTANTIATE_TEST_SUITE_P(HuffmanEncoding, QpackEncoderTest,
::testing::ValuesIn({HuffmanEncoding::kEnabled,
HuffmanEncoding::kDisabled}),
PrintToString);
TEST_P(QpackEncoderTest, Empty) {
EXPECT_CALL(encoder_stream_sender_delegate_, NumBytesBuffered())
.WillRepeatedly(Return(0));
quiche::HttpHeaderBlock header_list;
std::string output = Encode(header_list);
std::string expected_output;
ASSERT_TRUE(absl::HexStringToBytes("0000", &expected_output));
EXPECT_EQ(expected_output, output);
}
TEST_P(QpackEncoderTest, EmptyName) {
EXPECT_CALL(encoder_stream_sender_delegate_, NumBytesBuffered())
.WillRepeatedly(Return(0));
quiche::HttpHeaderBlock header_list;
header_list[""] = "foo";
std::string output = Encode(header_list);
std::string expected_output;
if (HuffmanEnabled()) {
ASSERT_TRUE(absl::HexStringToBytes("0000208294e7", &expected_output));
} else {
ASSERT_TRUE(absl::HexStringToBytes("00002003666f6f", &expected_output));
}
EXPECT_EQ(expected_output, output);
}
TEST_P(QpackEncoderTest, EmptyValue) {
EXPECT_CALL(encoder_stream_sender_delegate_, NumBytesBuffered())
.WillRepeatedly(Return(0));
quiche::HttpHeaderBlock header_list;
header_list["foo"] = "";
std::string output = Encode(header_list);
std::string expected_output;
if (HuffmanEnabled()) {
ASSERT_TRUE(absl::HexStringToBytes("00002a94e700", &expected_output));
} else {
ASSERT_TRUE(absl::HexStringToBytes("000023666f6f00", &expected_output));
}
EXPECT_EQ(expected_output, output);
}
TEST_P(QpackEncoderTest, EmptyNameAndValue) {
EXPECT_CALL(encoder_stream_sender_delegate_, NumBytesBuffered())
.WillRepeatedly(Return(0));
quiche::HttpHeaderBlock header_list;
header_list[""] = "";
std::string output = Encode(header_list);
std::string expected_output;
ASSERT_TRUE(absl::HexStringToBytes("00002000", &expected_output));
EXPECT_EQ(expected_output, output);
}
TEST_P(QpackEncoderTest, Simple) {
EXPECT_CALL(encoder_stream_sender_delegate_, NumBytesBuffered())
.WillRepeatedly(Return(0));
quiche::HttpHeaderBlock header_list;
header_list["foo"] = "bar";
std::string output = Encode(header_list);
std::string expected_output;
if (HuffmanEnabled()) {
ASSERT_TRUE(absl::HexStringToBytes("00002a94e703626172", &expected_output));
} else {
ASSERT_TRUE(
absl::HexStringToBytes("000023666f6f03626172", &expected_output));
}
EXPECT_EQ(expected_output, output);
}
TEST_P(QpackEncoderTest, Multiple) {
EXPECT_CALL(encoder_stream_sender_delegate_, NumBytesBuffered())
.WillRepeatedly(Return(0));
quiche::HttpHeaderBlock header_list;
header_list["foo"] = "bar";
header_list["ZZZZZZZ"] = std::string(127, 'Z');
std::string output = Encode(header_list);
std::string expected_output_hex;
if (HuffmanEnabled()) {
expected_output_hex =
"0000"
"2a94e703626172";
} else {
expected_output_hex =
"0000"
"23666f6f03626172";
}
expected_output_hex +=
"27005a5a5a5a5a5a5a"
"7f005a5a5a5a5a5a5a"
"5a5a5a5a5a5a5a5a5a"
"5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a"
"5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a"
"5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a"
"5a5a5a5a5a5a5a5a5a";
std::string expected_output;
ASSERT_TRUE(absl::HexStringToBytes(expected_output_hex, &expected_output));
EXPECT_EQ(expected_output, output);
}
TEST_P(QpackEncoderTest, StaticTable) {
EXPECT_CALL(encoder_stream_sender_delegate_, NumBytesBuffered())
.WillRepeatedly(Return(0));
{
quiche::HttpHeaderBlock header_list;
header_list[":method"] = "GET";
header_list["accept-encoding"] = "gzip, deflate, br";
header_list["location"] = "";
std::string output = Encode(header_list);
std::string expected_output;
ASSERT_TRUE(absl::HexStringToBytes("0000d1dfcc", &expected_output));
EXPECT_EQ(expected_output, output);
}
{
quiche::HttpHeaderBlock header_list;
header_list[":method"] = "POST";
header_list["accept-encoding"] = "compress";
header_list["location"] = "foo";
std::string output = Encode(header_list);
std::string expected_output;
if (HuffmanEnabled()) {
ASSERT_TRUE(absl::HexStringToBytes("0000d45f108621e9aec2a11f5c8294e7",
&expected_output));
} else {
ASSERT_TRUE(absl::HexStringToBytes(
"0000d45f1008636f6d70726573735c03666f6f", &expected_output));
}
EXPECT_EQ(expected_output, output);
}
{
quiche::HttpHeaderBlock header_list;
header_list[":method"] = "TRACE";
header_list["accept-encoding"] = "";
std::string output = Encode(header_list);
std::string expected_output;
ASSERT_TRUE(
absl::HexStringToBytes("00005f000554524143455f1000", &expected_output));
EXPECT_EQ(expected_output, output);
}
}
TEST_P(QpackEncoderTest, DecoderStreamError) {
EXPECT_CALL(decoder_stream_error_delegate_,
OnDecoderStreamError(QUIC_QPACK_DECODER_STREAM_INTEGER_TOO_LARGE,
Eq("Encoded integer too large.")));
QpackEncoder encoder(&decoder_stream_error_delegate_, huffman_encoding_,
CookieCrumbling::kEnabled);
encoder.set_qpack_stream_sender_delegate(&encoder_stream_sender_delegate_);
std::string input;
ASSERT_TRUE(absl::HexStringToBytes("ffffffffffffffffffffff", &input));
encoder.decoder_stream_receiver()->Decode(input);
}
TEST_P(QpackEncoderTest, SplitAlongNullCharacter) {
EXPECT_CALL(encoder_stream_sender_delegate_, NumBytesBuffered())
.WillRepeatedly(Return(0));
quiche::HttpHeaderBlock header_list;
header_list["foo"] = absl::string_view("bar\0bar\0baz", 11);
std::string output = Encode(header_list);
std::string expected_output;
if (HuffmanEnabled()) {
ASSERT_TRUE(
absl::HexStringToBytes("0000"
"2a94e703626172"
"2a94e703626172"
"2a94e70362617a",
&expected_output));
} else {
ASSERT_TRUE(
absl::HexStringToBytes("0000"
"23666f6f03626172"
"23666f6f03626172"
"23666f6f0362617a",
&expected_output));
}
EXPECT_EQ(expected_output, output);
}
TEST_P(QpackEncoderTest, ZeroInsertCountIncrement) {
EXPECT_CALL(
decoder_stream_error_delegate_,
OnDecoderStreamError(QUIC_QPACK_DECODER_STREAM_INVALID_ZERO_INCREMENT,
Eq("Invalid increment value 0.")));
encoder_.OnInsertCountIncrement(0);
}
TEST_P(QpackEncoderTest, TooLargeInsertCountIncrement) {
EXPECT_CALL(
decoder_stream_error_delegate_,
OnDecoderStreamError(QUIC_QPACK_DECODER_STREAM_IMPOSSIBLE_INSERT_COUNT,
Eq("Increment value 1 raises known received count "
"to 1 exceeding inserted entry count 0")));
encoder_.OnInsertCountIncrement(1);
}
TEST_P(QpackEncoderTest, InsertCountIncrementOverflow) {
QpackEncoderHeaderTable* header_table =
QpackEncoderPeer::header_table(&encoder_);
header_table->SetMaximumDynamicTableCapacity(4096);
header_table->SetDynamicTableCapacity(4096);
header_table->InsertEntry("foo", "bar");
encoder_.OnInsertCountIncrement(1);
EXPECT_CALL(decoder_stream_error_delegate_,
OnDecoderStreamError(
QUIC_QPACK_DECODER_STREAM_INCREMENT_OVERFLOW,
Eq("Insert Count Increment instruction causes overflow.")));
encoder_.OnInsertCountIncrement(std::numeric_limits<uint64_t>::max());
}
TEST_P(QpackEncoderTest, InvalidHeaderAcknowledgement) {
EXPECT_CALL(
decoder_stream_error_delegate_,
OnDecoderStreamError(QUIC_QPACK_DECODER_STREAM_INCORRECT_ACKNOWLEDGEMENT,
Eq("Header Acknowledgement received for stream 0 "
"with no outstanding header blocks.")));
encoder_.OnHeaderAcknowledgement( 0);
}
TEST_P(QpackEncoderTest, DynamicTable) {
EXPECT_CALL(encoder_stream_sender_delegate_, NumBytesBuffered())
.WillRepeatedly(Return(0));
encoder_.SetMaximumBlockedStreams(1);
encoder_.SetMaximumDynamicTableCapacity(4096);
encoder_.SetDynamicTableCapacity(4096);
quiche::HttpHeaderBlock header_list;
header_list["foo"] = "bar";
header_list.AppendValueOrAddHeader("foo",
"baz");
header_list["cookie"] = "baz";
std::string set_dyanamic_table_capacity;
ASSERT_TRUE(absl::HexStringToBytes("3fe11f", &set_dyanamic_table_capacity));
std::string insert_entries_hex;
if (HuffmanEnabled()) {
insert_entries_hex =
"62"
"94e7";
} else {
insert_entries_hex =
"43"
"666f6f";
}
insert_entries_hex +=
"03626172"
"80"
"0362617a"
"c5"
"0362617a";
std::string insert_entries;
ASSERT_TRUE(absl::HexStringToBytes(insert_entries_hex, &insert_entries));
EXPECT_CALL(encoder_stream_sender_delegate_,
WriteStreamData(Eq(
absl::StrCat(set_dyanamic_table_capacity, insert_entries))));
std::string expected_output;
ASSERT_TRUE(absl::HexStringToBytes(
"0400"
"828180",
&expected_output));
EXPECT_EQ(expected_output, Encode(header_list));
EXPECT_EQ(insert_entries.size(), encoder_stream_sent_byte_count_);
}
TEST_P(QpackEncoderTest, SmallDynamicTable) {
EXPECT_CALL(encoder_stream_sender_delegate_, NumBytesBuffered())
.WillRepeatedly(Return(0));
encoder_.SetMaximumBlockedStreams(1);
encoder_.SetMaximumDynamicTableCapacity(QpackEntry::Size("foo", "bar"));
encoder_.SetDynamicTableCapacity(QpackEntry::Size("foo", "bar"));
quiche::HttpHeaderBlock header_list;
header_list["foo"] = "bar";
header_list.AppendValueOrAddHeader("foo",
"baz");
header_list["cookie"] = "baz";
header_list["bar"] = "baz";
std::string set_dyanamic_table_capacity;
ASSERT_TRUE(absl::HexStringToBytes("3f07", &set_dyanamic_table_capacity));
std::string insert_entry;
if (HuffmanEnabled()) {
ASSERT_TRUE(
absl::HexStringToBytes("62"
"94e7"
"03626172",
&insert_entry));
} else {
ASSERT_TRUE(
absl::HexStringToBytes("43"
"666f6f"
"03626172",
&insert_entry));
}
EXPECT_CALL(encoder_stream_sender_delegate_,
WriteStreamData(
Eq(absl::StrCat(set_dyanamic_table_capacity, insert_entry))));
std::string expected_output;
ASSERT_TRUE(
absl::HexStringToBytes("0200"
"80"
"40"
"0362617a"
"55"
"0362617a"
"23626172"
"0362617a",
&expected_output));
EXPECT_EQ(expected_output, Encode(header_list));
EXPECT_EQ(insert_entry.size(), encoder_stream_sent_byte_count_);
}
TEST_P(QpackEncoderTest, BlockedStream) {
EXPECT_CALL(encoder_stream_sender_delegate_, NumBytesBuffered())
.WillRepeatedly(Return(0));
encoder_.SetMaximumBlockedStreams(1);
encoder_.SetMaximumDynamicTableCapacity(4096);
encoder_.SetDynamicTableCapacity(4096);
quiche::HttpHeaderBlock header_list1;
header_list1["foo"] = "bar";
std::string set_dyanamic_table_capacity;
ASSERT_TRUE(absl::HexStringToBytes("3fe11f", &set_dyanamic_table_capacity));
std::string insert_entry1;
if (HuffmanEnabled()) {
ASSERT_TRUE(
absl::HexStringToBytes("62"
"94e7"
"03626172",
&insert_entry1));
} else {
ASSERT_TRUE(
absl::HexStringToBytes("43"
"666f6f"
"03626172",
&insert_entry1));
}
EXPECT_CALL(encoder_stream_sender_delegate_,
WriteStreamData(Eq(
absl::StrCat(set_dyanamic_table_capacity, insert_entry1))));
std::string expected_output;
ASSERT_TRUE(
absl::HexStringToBytes("0200"
"80",
&expected_output));
EXPECT_EQ(expected_output,
encoder_.EncodeHeaderList( 1, header_list1,
&encoder_stream_sent_byte_count_));
EXPECT_EQ(insert_entry1.size(), encoder_stream_sent_byte_count_);
quiche::HttpHeaderBlock header_list2;
header_list2["foo"] = "bar";
header_list2.AppendValueOrAddHeader("foo",
"baz");
header_list2["cookie"] = "baz";
header_list2["bar"] = "baz";
std::string entries;
if (HuffmanEnabled()) {
ASSERT_TRUE(
absl::HexStringToBytes("0000"
"2a94e7"
"03626172"
"2a94e7"
"0362617a"
"55"
"0362617a"
"23626172"
"0362617a",
&entries));
} else {
ASSERT_TRUE(
absl::HexStringToBytes("0000"
"23666f6f"
"03626172"
"23666f6f"
"0362617a"
"55"
"0362617a"
"23626172"
"0362617a",
&entries));
}
EXPECT_EQ(entries,
encoder_.EncodeHeaderList( 2, header_list2,
&encoder_stream_sent_byte_count_));
EXPECT_EQ(0u, encoder_stream_sent_byte_count_);
encoder_.OnInsertCountIncrement(1);
std::string insert_entries;
ASSERT_TRUE(absl::HexStringToBytes(
"80"
"0362617a"
"c5"
"0362617a"
"43"
"626172"
"0362617a",
&insert_entries));
EXPECT_CALL(encoder_stream_sender_delegate_,
WriteStreamData(Eq(insert_entries)));
ASSERT_TRUE(
absl::HexStringToBytes("0500"
"83828180",
&expected_output));
EXPECT_EQ(expected_output,
encoder_.EncodeHeaderList( 3, header_list2,
&encoder_stream_sent_byte_count_));
EXPECT_EQ(insert_entries.size(), encoder_stream_sent_byte_count_);
std::string expected2;
if (HuffmanEnabled()) {
ASSERT_TRUE(
absl::HexStringToBytes("0200"
"80"
"2a94e7"
"0362617a"
"55"
"0362617a"
"23626172"
"0362617a",
&expected2));
} else {
ASSERT_TRUE(
absl::HexStringToBytes("0200"
"80"
"23666f6f"
"0362617a"
"55"
"0362617a"
"23626172"
"0362617a",
&expected2));
}
EXPECT_EQ(expected2,
encoder_.EncodeHeaderList( 4, header_list2,
&encoder_stream_sent_byte_count_));
EXPECT_EQ(0u, encoder_stream_sent_byte_count_);
encoder_.OnInsertCountIncrement(2);
std::string expected3;
ASSERT_TRUE(
absl::HexStringToBytes("0400"
"828180"
"23626172"
"0362617a",
&expected3));
EXPECT_EQ(expected3,
encoder_.EncodeHeaderList( 5, header_list2,
&encoder_stream_sent_byte_count_));
EXPECT_EQ(0u, encoder_stream_sent_byte_count_);
encoder_.OnHeaderAcknowledgement(3);
std::string expected4;
ASSERT_TRUE(
absl::HexStringToBytes("0500"
"83828180",
&expected4));
EXPECT_EQ(expected4,
encoder_.EncodeHeaderList( 6, header_list2,
&encoder_stream_sent_byte_count_));
EXPECT_EQ(0u, encoder_stream_sent_byte_count_);
}
TEST_P(QpackEncoderTest, Draining) {
EXPECT_CALL(encoder_stream_sender_delegate_, NumBytesBuffered())
.WillRepeatedly(Return(0));
quiche::HttpHeaderBlock header_list1;
header_list1["one"] = "foo";
header_list1["two"] = "foo";
header_list1["three"] = "foo";
header_list1["four"] = "foo";
header_list1["five"] = "foo";
header_list1["six"] = "foo";
header_list1["seven"] = "foo";
header_list1["eight"] = "foo";
header_list1["nine"] = "foo";
header_list1["ten"] = "foo";
uint64_t maximum_dynamic_table_capacity = 0;
for (const auto& header_field : header_list1) {
maximum_dynamic_table_capacity +=
QpackEntry::Size(header_field.first, header_field.second);
}
maximum_dynamic_table_capacity += QpackEntry::Size("one", "foo");
encoder_.SetMaximumDynamicTableCapacity(maximum_dynamic_table_capacity);
encoder_.SetDynamicTableCapacity(maximum_dynamic_table_capacity);
EXPECT_CALL(encoder_stream_sender_delegate_, WriteStreamData(_));
std::string expected_output;
ASSERT_TRUE(
absl::HexStringToBytes("0b00"
"89888786858483828180",
&expected_output));
EXPECT_EQ(expected_output, Encode(header_list1));
quiche::HttpHeaderBlock header_list2;
header_list2["one"] = "foo";
ASSERT_TRUE(absl::HexStringToBytes("09", &expected_output));
EXPECT_CALL(encoder_stream_sender_delegate_,
WriteStreamData(Eq(expected_output)));
ASSERT_TRUE(
absl::HexStringToBytes("0c00"
"80",
&expected_output));
EXPECT_EQ(expected_output, Encode(header_list2));
quiche::HttpHeaderBlock header_list3;
header_list3.AppendValueOrAddHeader("two", "foo");
header_list3.AppendValueOrAddHeader("two", "bar");
std::string entries =
"0000"
"2374776f";
if (HuffmanEnabled()) {
entries += "8294e7";
} else {
entries += "03666f6f";
}
entries +=
"2374776f"
"03626172";
ASSERT_TRUE(absl::HexStringToBytes(entries, &expected_output));
EXPECT_EQ(expected_output, Encode(header_list3));
}
TEST_P(QpackEncoderTest, DynamicTableCapacityLessThanMaximum) {
encoder_.SetMaximumDynamicTableCapacity(1024);
encoder_.SetDynamicTableCapacity(30);
QpackEncoderHeaderTable* header_table =
QpackEncoderPeer::header_table(&encoder_);
EXPECT_EQ(1024u, header_table->maximum_dynamic_table_capacity());
EXPECT_EQ(30u, header_table->dynamic_table_capacity());
}
TEST_P(QpackEncoderTest, EncoderStreamWritesDisallowedThenAllowed) {
EXPECT_CALL(encoder_stream_sender_delegate_, NumBytesBuffered())
.WillRepeatedly(Return(kTooManyBytesBuffered));
encoder_.SetMaximumBlockedStreams(1);
encoder_.SetMaximumDynamicTableCapacity(4096);
encoder_.SetDynamicTableCapacity(4096);
quiche::HttpHeaderBlock header_list1;
header_list1["foo"] = "bar";
header_list1.AppendValueOrAddHeader("foo", "baz");
header_list1["cookie"] = "baz";
std::string entries;
if (HuffmanEnabled()) {
ASSERT_TRUE(
absl::HexStringToBytes("0000"
"2a94e7"
"03626172"
"2a94e7"
"0362617a"
"55"
"0362617a",
&entries));
} else {
ASSERT_TRUE(
absl::HexStringToBytes("0000"
"23666f6f"
"03626172"
"23666f6f"
"0362617a"
"55"
"0362617a",
&entries));
}
EXPECT_EQ(entries, Encode(header_list1));
EXPECT_EQ(0u, encoder_stream_sent_byte_count_);
::testing::Mock::VerifyAndClearExpectations(&encoder_stream_sender_delegate_);
EXPECT_CALL(encoder_stream_sender_delegate_, NumBytesBuffered())
.WillRepeatedly(Return(0));
quiche::HttpHeaderBlock header_list2;
header_list2["foo"] = "bar";
header_list2.AppendValueOrAddHeader("foo",
"baz");
header_list2["cookie"] = "baz";
std::string set_dyanamic_table_capacity;
ASSERT_TRUE(absl::HexStringToBytes("3fe11f", &set_dyanamic_table_capacity));
std::string insert_entries_hex;
if (HuffmanEnabled()) {
insert_entries_hex =
"62"
"94e7";
} else {
insert_entries_hex =
"43"
"666f6f";
}
insert_entries_hex +=
"03626172"
"80"
"0362617a"
"c5"
"0362617a";
std::string insert_entries;
ASSERT_TRUE(absl::HexStringToBytes(insert_entries_hex, &insert_entries));
EXPECT_CALL(encoder_stream_sender_delegate_,
WriteStreamData(Eq(
absl::StrCat(set_dyanamic_table_capacity, insert_entries))));
std::string expected_output;
ASSERT_TRUE(absl::HexStringToBytes(
"0400"
"828180",
&expected_output));
EXPECT_EQ(expected_output, Encode(header_list2));
EXPECT_EQ(insert_entries.size(), encoder_stream_sent_byte_count_);
}
TEST_P(QpackEncoderTest, EncoderStreamWritesAllowedThenDisallowed) {
EXPECT_CALL(encoder_stream_sender_delegate_, NumBytesBuffered())
.WillRepeatedly(Return(0));
encoder_.SetMaximumBlockedStreams(1);
encoder_.SetMaximumDynamicTableCapacity(4096);
encoder_.SetDynamicTableCapacity(4096);
quiche::HttpHeaderBlock header_list1;
header_list1["foo"] = "bar";
header_list1.AppendValueOrAddHeader("foo",
"baz");
header_list1["cookie"] = "baz";
std::string set_dyanamic_table_capacity;
ASSERT_TRUE(absl::HexStringToBytes("3fe11f", &set_dyanamic_table_capacity));
std::string insert_entries_hex;
if (HuffmanEnabled()) {
insert_entries_hex =
"62"
"94e7";
} else {
insert_entries_hex =
"43"
"666f6f";
}
insert_entries_hex +=
"03626172"
"80"
"0362617a"
"c5"
"0362617a";
std::string insert_entries;
ASSERT_TRUE(absl::HexStringToBytes(insert_entries_hex, &insert_entries));
EXPECT_CALL(encoder_stream_sender_delegate_,
WriteStreamData(Eq(
absl::StrCat(set_dyanamic_table_capacity, insert_entries))));
std::string expected_output;
ASSERT_TRUE(absl::HexStringToBytes(
"0400"
"828180",
&expected_output));
EXPECT_EQ(expected_output, Encode(header_list1));
EXPECT_EQ(insert_entries.size(), encoder_stream_sent_byte_count_);
::testing::Mock::VerifyAndClearExpectations(&encoder_stream_sender_delegate_);
EXPECT_CALL(encoder_stream_sender_delegate_, NumBytesBuffered())
.WillRepeatedly(Return(kTooManyBytesBuffered));
quiche::HttpHeaderBlock header_list2;
header_list2["foo"] = "bar";
header_list2["bar"] = "baz";
header_list2["cookie"] = "baz";
ASSERT_TRUE(
absl::HexStringToBytes("0400"
"82"
"23626172"
"0362617a"
"80",
&expected_output));
EXPECT_EQ(expected_output, Encode(header_list2));
EXPECT_EQ(0u, encoder_stream_sent_byte_count_);
}
TEST_P(QpackEncoderTest, UnackedEntryCannotBeEvicted) {
EXPECT_CALL(encoder_stream_sender_delegate_, NumBytesBuffered())
.WillRepeatedly(Return(0));
encoder_.SetMaximumBlockedStreams(2);
encoder_.SetMaximumDynamicTableCapacity(40);
encoder_.SetDynamicTableCapacity(40);
QpackEncoderHeaderTable* header_table =
QpackEncoderPeer::header_table(&encoder_);
EXPECT_EQ(0u, header_table->inserted_entry_count());
EXPECT_EQ(0u, header_table->dropped_entry_count());
quiche::HttpHeaderBlock header_list1;
header_list1["foo"] = "bar";
std::string set_dyanamic_table_capacity;
ASSERT_TRUE(absl::HexStringToBytes("3f09", &set_dyanamic_table_capacity));
std::string insert_entries1;
if (HuffmanEnabled()) {
ASSERT_TRUE(
absl::HexStringToBytes("62"
"94e7"
"03626172",
&insert_entries1));
} else {
ASSERT_TRUE(
absl::HexStringToBytes("43"
"666f6f"
"03626172",
&insert_entries1));
}
EXPECT_CALL(encoder_stream_sender_delegate_,
WriteStreamData(Eq(
absl::StrCat(set_dyanamic_table_capacity, insert_entries1))));
std::string expected_output;
ASSERT_TRUE(
absl::HexStringToBytes("0200"
"80",
&expected_output));
EXPECT_EQ(expected_output,
encoder_.EncodeHeaderList( 1, header_list1,
&encoder_stream_sent_byte_count_));
EXPECT_EQ(1u, header_table->inserted_entry_count());
EXPECT_EQ(0u, header_table->dropped_entry_count());
encoder_.OnStreamCancellation( 1);
quiche::HttpHeaderBlock header_list2;
header_list2["bar"] = "baz";
ASSERT_TRUE(
absl::HexStringToBytes("0000"
"23626172"
"0362617a",
&expected_output));
EXPECT_EQ(expected_output,
encoder_.EncodeHeaderList( 2, header_list2,
&encoder_stream_sent_byte_count_));
EXPECT_EQ(1u, header_table->inserted_entry_count());
EXPECT_EQ(0u, header_table->dropped_entry_count());
}
TEST_P(QpackEncoderTest, UseStaticTableNameOnlyMatch) {
EXPECT_CALL(encoder_stream_sender_delegate_, NumBytesBuffered())
.WillRepeatedly(Return(0));
encoder_.SetMaximumBlockedStreams(2);
encoder_.SetMaximumDynamicTableCapacity(4096);
encoder_.SetDynamicTableCapacity(4096);
quiche::HttpHeaderBlock header_list;
header_list[":method"] = "bar";
std::string set_dyanamic_table_capacity;
ASSERT_TRUE(absl::HexStringToBytes("3fe11f", &set_dyanamic_table_capacity));
std::string insert_entry1;
ASSERT_TRUE(
absl::HexStringToBytes("cf"
"03626172",
&insert_entry1));
EXPECT_CALL(encoder_stream_sender_delegate_,
WriteStreamData(Eq(
absl::StrCat(set_dyanamic_table_capacity, insert_entry1))));
std::string expected_output;
ASSERT_TRUE(
absl::HexStringToBytes("0200"
"80",
&expected_output));
EXPECT_EQ(expected_output,
encoder_.EncodeHeaderList( 1, header_list,
&encoder_stream_sent_byte_count_));
EXPECT_EQ(insert_entry1.size(), encoder_stream_sent_byte_count_);
EXPECT_EQ(expected_output,
encoder_.EncodeHeaderList( 2, header_list,
&encoder_stream_sent_byte_count_));
EXPECT_EQ(0u, encoder_stream_sent_byte_count_);
ASSERT_TRUE(
absl::HexStringToBytes("0000"
"5f00"
"03626172",
&expected_output));
EXPECT_EQ(expected_output,
encoder_.EncodeHeaderList( 3, header_list,
&encoder_stream_sent_byte_count_));
}
TEST_P(QpackEncoderTest, UseDynamicTableNameOnlyMatch) {
EXPECT_CALL(encoder_stream_sender_delegate_, NumBytesBuffered())
.WillRepeatedly(Return(0));
quiche::HttpHeaderBlock header_list1;
header_list1["one"] = "foo";
header_list1["two"] = "foo";
header_list1["three"] = "foo";
header_list1["four"] = "foo";
header_list1["five"] = "foo";
header_list1["six"] = "foo";
header_list1["seven"] = "foo";
header_list1["eight"] = "foo";
header_list1["nine"] = "foo";
header_list1["ten"] = "foo";
uint64_t maximum_dynamic_table_capacity = 0;
for (const auto& header_field : header_list1) {
maximum_dynamic_table_capacity +=
QpackEntry::Size(header_field.first, header_field.second);
}
maximum_dynamic_table_capacity += QpackEntry::Size("one", "bar");
encoder_.SetMaximumDynamicTableCapacity(maximum_dynamic_table_capacity);
encoder_.SetDynamicTableCapacity(maximum_dynamic_table_capacity);
EXPECT_CALL(encoder_stream_sender_delegate_, WriteStreamData(_));
std::string expected_output;
ASSERT_TRUE(
absl::HexStringToBytes("0b00"
"89888786858483828180",
&expected_output));
EXPECT_EQ(expected_output, Encode(header_list1));
quiche::HttpHeaderBlock header_list2;
header_list2["one"] = "bar";
ASSERT_TRUE(absl::HexStringToBytes(
"89"
"03626172",
&expected_output));
EXPECT_CALL(encoder_stream_sender_delegate_,
WriteStreamData(Eq(expected_output)));
ASSERT_TRUE(
absl::HexStringToBytes("0c00"
"80",
&expected_output));
EXPECT_EQ(expected_output, Encode(header_list2));
quiche::HttpHeaderBlock header_list3;
header_list3["one"] = "foo";
if (HuffmanEnabled()) {
ASSERT_TRUE(
absl::HexStringToBytes("0c00"
"40"
"8294e7",
&expected_output));
} else {
ASSERT_TRUE(
absl::HexStringToBytes("0c00"
"40"
"03666f6f",
&expected_output));
}
EXPECT_EQ(expected_output, Encode(header_list3));
}
TEST_P(QpackEncoderTest, CookieCrumblingEnabledNoDynamicTable) {
EXPECT_CALL(encoder_stream_sender_delegate_, NumBytesBuffered())
.WillRepeatedly(Return(0));
quiche::HttpHeaderBlock header_list;
header_list["cookie"] = "foo; bar";
std::string expected_output;
if (HuffmanEnabled()) {
ASSERT_TRUE(
absl::HexStringToBytes("0000"
"55"
"8294e7"
"55"
"03626172",
&expected_output));
} else {
ASSERT_TRUE(
absl::HexStringToBytes("0000"
"55"
"03666f6f"
"55"
"03626172",
&expected_output));
}
EXPECT_EQ(expected_output, Encode(header_list));
EXPECT_EQ(0u, encoder_stream_sent_byte_count_);
}
TEST_P(QpackEncoderTest, CookieCrumblingEnabledDynamicTable) {
EXPECT_CALL(encoder_stream_sender_delegate_, NumBytesBuffered())
.WillRepeatedly(Return(0));
encoder_.SetMaximumBlockedStreams(1);
encoder_.SetMaximumDynamicTableCapacity(4096);
encoder_.SetDynamicTableCapacity(4096);
quiche::HttpHeaderBlock header_list;
header_list["cookie"] = "foo; bar";
std::string set_dyanamic_table_capacity;
ASSERT_TRUE(absl::HexStringToBytes("3fe11f", &set_dyanamic_table_capacity));
std::string insert_entries;
if (HuffmanEnabled()) {
ASSERT_TRUE(absl::HexStringToBytes(
"c5"
"8294e7"
"c5"
"03626172",
&insert_entries));
} else {
ASSERT_TRUE(absl::HexStringToBytes(
"c5"
"03666f6f"
"c5"
"03626172",
&insert_entries));
}
EXPECT_CALL(encoder_stream_sender_delegate_,
WriteStreamData(Eq(
absl::StrCat(set_dyanamic_table_capacity, insert_entries))));
std::string expected_output;
ASSERT_TRUE(
absl::HexStringToBytes("0300"
"81"
"80",
&expected_output));
EXPECT_EQ(expected_output, Encode(header_list));
EXPECT_EQ(insert_entries.size(), encoder_stream_sent_byte_count_);
}
TEST_P(QpackEncoderTest, CookieCrumblingDisabledNoDynamicTable) {
QpackEncoder encoder(&decoder_stream_error_delegate_, huffman_encoding_,
CookieCrumbling::kDisabled);
EXPECT_CALL(encoder_stream_sender_delegate_, NumBytesBuffered())
.WillRepeatedly(Return(0));
quiche::HttpHeaderBlock header_list;
header_list["cookie"] = "foo; bar";
std::string expected_output;
if (HuffmanEnabled()) {
ASSERT_TRUE(absl::HexStringToBytes(
"0000"
"55"
"8694e7fb5231d9",
&expected_output));
} else {
ASSERT_TRUE(absl::HexStringToBytes(
"0000"
"55"
"08666f6f3b20626172",
&expected_output));
}
EXPECT_EQ(expected_output,
encoder.EncodeHeaderList( 1, header_list,
&encoder_stream_sent_byte_count_));
EXPECT_EQ(0u, encoder_stream_sent_byte_count_);
}
TEST_P(QpackEncoderTest, CookieCrumblingDisabledDynamicTable) {
QpackEncoder encoder(&decoder_stream_error_delegate_, huffman_encoding_,
CookieCrumbling::kDisabled);
encoder.SetMaximumBlockedStreams(1);
encoder.set_qpack_stream_sender_delegate(&encoder_stream_sender_delegate_);
EXPECT_CALL(encoder_stream_sender_delegate_, NumBytesBuffered())
.WillRepeatedly(Return(0));
encoder.SetMaximumBlockedStreams(1);
encoder.SetMaximumDynamicTableCapacity(4096);
encoder.SetDynamicTableCapacity(4096);
quiche::HttpHeaderBlock header_list;
header_list["cookie"] = "foo; bar";
std::string set_dyanamic_table_capacity;
ASSERT_TRUE(absl::HexStringToBytes("3fe11f", &set_dyanamic_table_capacity));
std::string insert_entries;
if (HuffmanEnabled()) {
ASSERT_TRUE(absl::HexStringToBytes(
"c5"
"8694e7fb5231d9",
&insert_entries));
} else {
ASSERT_TRUE(absl::HexStringToBytes(
"c5"
"08666f6f3b20626172",
&insert_entries));
}
EXPECT_CALL(encoder_stream_sender_delegate_,
WriteStreamData(Eq(
absl::StrCat(set_dyanamic_table_capacity, insert_entries))));
std::string expected_output;
ASSERT_TRUE(
absl::HexStringToBytes("0200"
"80",
&expected_output));
EXPECT_EQ(expected_output,
encoder.EncodeHeaderList( 1, header_list,
&encoder_stream_sent_byte_count_));
EXPECT_EQ(insert_entries.size(), encoder_stream_sent_byte_count_);
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/qpack/qpack_encoder.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/qpack/qpack_encoder_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
6fe4dcda-4ff4-4489-95e2-6a94fa8eb7a6 | cpp | abseil/abseil-cpp | memory | absl/memory/memory.h | absl/memory/memory_test.cc | #ifndef ABSL_MEMORY_MEMORY_H_
#define ABSL_MEMORY_MEMORY_H_
#include <cstddef>
#include <limits>
#include <memory>
#include <new>
#include <type_traits>
#include <utility>
#include "absl/base/macros.h"
#include "absl/meta/type_traits.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
template <typename T>
std::unique_ptr<T> WrapUnique(T* ptr) {
static_assert(!std::is_array<T>::value, "array types are unsupported");
static_assert(std::is_object<T>::value, "non-object types are unsupported");
return std::unique_ptr<T>(ptr);
}
using std::make_unique;
template <typename T>
auto RawPtr(T&& ptr) -> decltype(std::addressof(*ptr)) {
return (ptr != nullptr) ? std::addressof(*ptr) : nullptr;
}
inline std::nullptr_t RawPtr(std::nullptr_t) { return nullptr; }
template <typename T, typename D>
std::shared_ptr<T> ShareUniquePtr(std::unique_ptr<T, D>&& ptr) {
return ptr ? std::shared_ptr<T>(std::move(ptr)) : std::shared_ptr<T>();
}
template <typename T>
std::weak_ptr<T> WeakenPtr(const std::shared_ptr<T>& ptr) {
return std::weak_ptr<T>(ptr);
}
using std::pointer_traits;
using std::allocator_traits;
namespace memory_internal {
template <template <typename> class Extract, typename Obj, typename Default,
typename>
struct ExtractOr {
using type = Default;
};
template <template <typename> class Extract, typename Obj, typename Default>
struct ExtractOr<Extract, Obj, Default, void_t<Extract<Obj>>> {
using type = Extract<Obj>;
};
template <template <typename> class Extract, typename Obj, typename Default>
using ExtractOrT = typename ExtractOr<Extract, Obj, Default, void>::type;
template <typename Alloc>
using GetIsNothrow = typename Alloc::is_nothrow;
}
template <typename Alloc>
struct allocator_is_nothrow
: memory_internal::ExtractOrT<memory_internal::GetIsNothrow, Alloc,
std::false_type> {};
#if defined(ABSL_ALLOCATOR_NOTHROW) && ABSL_ALLOCATOR_NOTHROW
template <typename T>
struct allocator_is_nothrow<std::allocator<T>> : std::true_type {};
struct default_allocator_is_nothrow : std::true_type {};
#else
struct default_allocator_is_nothrow : std::false_type {};
#endif
namespace memory_internal {
template <typename Allocator, typename Iterator, typename... Args>
void ConstructRange(Allocator& alloc, Iterator first, Iterator last,
const Args&... args) {
for (Iterator cur = first; cur != last; ++cur) {
ABSL_INTERNAL_TRY {
std::allocator_traits<Allocator>::construct(alloc, std::addressof(*cur),
args...);
}
ABSL_INTERNAL_CATCH_ANY {
while (cur != first) {
--cur;
std::allocator_traits<Allocator>::destroy(alloc, std::addressof(*cur));
}
ABSL_INTERNAL_RETHROW;
}
}
}
template <typename Allocator, typename Iterator, typename InputIterator>
void CopyRange(Allocator& alloc, Iterator destination, InputIterator first,
InputIterator last) {
for (Iterator cur = destination; first != last;
static_cast<void>(++cur), static_cast<void>(++first)) {
ABSL_INTERNAL_TRY {
std::allocator_traits<Allocator>::construct(alloc, std::addressof(*cur),
*first);
}
ABSL_INTERNAL_CATCH_ANY {
while (cur != destination) {
--cur;
std::allocator_traits<Allocator>::destroy(alloc, std::addressof(*cur));
}
ABSL_INTERNAL_RETHROW;
}
}
}
}
ABSL_NAMESPACE_END
}
#endif | #include "absl/memory/memory.h"
#include <sys/types.h>
#include <cstddef>
#include <memory>
#include <string>
#include <type_traits>
#include <utility>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
namespace {
using ::testing::ElementsAre;
using ::testing::Return;
class DestructorVerifier {
public:
DestructorVerifier() { ++instance_count_; }
DestructorVerifier(const DestructorVerifier&) = delete;
DestructorVerifier& operator=(const DestructorVerifier&) = delete;
~DestructorVerifier() { --instance_count_; }
static int instance_count() { return instance_count_; }
private:
static int instance_count_;
};
int DestructorVerifier::instance_count_ = 0;
TEST(WrapUniqueTest, WrapUnique) {
{
auto dv = new DestructorVerifier;
EXPECT_EQ(1, DestructorVerifier::instance_count());
std::unique_ptr<DestructorVerifier> ptr = absl::WrapUnique(dv);
EXPECT_EQ(1, DestructorVerifier::instance_count());
}
EXPECT_EQ(0, DestructorVerifier::instance_count());
}
struct InitializationVerifier {
static constexpr int kDefaultScalar = 0x43;
static constexpr int kDefaultArray = 0x4B;
static void* operator new(size_t n) {
void* ret = ::operator new(n);
memset(ret, kDefaultScalar, n);
return ret;
}
static void* operator new[](size_t n) {
void* ret = ::operator new[](n);
memset(ret, kDefaultArray, n);
return ret;
}
int a;
int b;
};
struct ArrayWatch {
void* operator new[](size_t n) {
allocs().push_back(n);
return ::operator new[](n);
}
void operator delete[](void* p) { return ::operator delete[](p); }
static std::vector<size_t>& allocs() {
static auto& v = *new std::vector<size_t>;
return v;
}
};
TEST(RawPtrTest, RawPointer) {
int i = 5;
EXPECT_EQ(&i, absl::RawPtr(&i));
}
TEST(RawPtrTest, SmartPointer) {
int* o = new int(5);
std::unique_ptr<int> p(o);
EXPECT_EQ(o, absl::RawPtr(p));
}
class IntPointerNonConstDeref {
public:
explicit IntPointerNonConstDeref(int* p) : p_(p) {}
friend bool operator!=(const IntPointerNonConstDeref& a, std::nullptr_t) {
return a.p_ != nullptr;
}
int& operator*() { return *p_; }
private:
std::unique_ptr<int> p_;
};
TEST(RawPtrTest, SmartPointerNonConstDereference) {
int* o = new int(5);
IntPointerNonConstDeref p(o);
EXPECT_EQ(o, absl::RawPtr(p));
}
TEST(RawPtrTest, NullValuedRawPointer) {
int* p = nullptr;
EXPECT_EQ(nullptr, absl::RawPtr(p));
}
TEST(RawPtrTest, NullValuedSmartPointer) {
std::unique_ptr<int> p;
EXPECT_EQ(nullptr, absl::RawPtr(p));
}
TEST(RawPtrTest, Nullptr) {
auto p = absl::RawPtr(nullptr);
EXPECT_TRUE((std::is_same<std::nullptr_t, decltype(p)>::value));
EXPECT_EQ(nullptr, p);
}
TEST(RawPtrTest, Null) {
auto p = absl::RawPtr(nullptr);
EXPECT_TRUE((std::is_same<std::nullptr_t, decltype(p)>::value));
EXPECT_EQ(nullptr, p);
}
TEST(RawPtrTest, Zero) {
auto p = absl::RawPtr(nullptr);
EXPECT_TRUE((std::is_same<std::nullptr_t, decltype(p)>::value));
EXPECT_EQ(nullptr, p);
}
TEST(ShareUniquePtrTest, Share) {
auto up = absl::make_unique<int>();
int* rp = up.get();
auto sp = absl::ShareUniquePtr(std::move(up));
EXPECT_EQ(sp.get(), rp);
}
TEST(ShareUniquePtrTest, ShareNull) {
struct NeverDie {
using pointer = void*;
void operator()(pointer) {
ASSERT_TRUE(false) << "Deleter should not have been called.";
}
};
std::unique_ptr<void, NeverDie> up;
auto sp = absl::ShareUniquePtr(std::move(up));
}
TEST(WeakenPtrTest, Weak) {
auto sp = std::make_shared<int>();
auto wp = absl::WeakenPtr(sp);
EXPECT_EQ(sp.get(), wp.lock().get());
sp.reset();
EXPECT_TRUE(wp.expired());
}
TEST(AllocatorNoThrowTest, DefaultAllocator) {
#if defined(ABSL_ALLOCATOR_NOTHROW) && ABSL_ALLOCATOR_NOTHROW
EXPECT_TRUE(absl::default_allocator_is_nothrow::value);
#else
EXPECT_FALSE(absl::default_allocator_is_nothrow::value);
#endif
}
TEST(AllocatorNoThrowTest, StdAllocator) {
#if defined(ABSL_ALLOCATOR_NOTHROW) && ABSL_ALLOCATOR_NOTHROW
EXPECT_TRUE(absl::allocator_is_nothrow<std::allocator<int>>::value);
#else
EXPECT_FALSE(absl::allocator_is_nothrow<std::allocator<int>>::value);
#endif
}
TEST(AllocatorNoThrowTest, CustomAllocator) {
struct NoThrowAllocator {
using is_nothrow = std::true_type;
};
struct CanThrowAllocator {
using is_nothrow = std::false_type;
};
struct UnspecifiedAllocator {};
EXPECT_TRUE(absl::allocator_is_nothrow<NoThrowAllocator>::value);
EXPECT_FALSE(absl::allocator_is_nothrow<CanThrowAllocator>::value);
EXPECT_FALSE(absl::allocator_is_nothrow<UnspecifiedAllocator>::value);
}
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/memory/memory.h | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/memory/memory_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
da101305-6d44-451a-9a8c-94414305ae64 | cpp | tensorflow/tensorflow | variadic_op_splitter | third_party/xla/xla/service/gpu/transforms/variadic_op_splitter.cc | third_party/xla/xla/service/gpu/transforms/variadic_op_splitter_test.cc | #include "xla/service/gpu/transforms/variadic_op_splitter.h"
#include <cstdint>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/shape.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
constexpr int32_t kMaxParameters = 128;
absl::StatusOr<bool> SplitConcatenate(HloInstruction* concat,
HloComputation* comp) {
auto operands = concat->operands();
std::vector<HloInstruction*> operands_to_split(operands.begin(),
operands.end());
while (operands_to_split.size() > 1) {
std::vector<HloInstruction*> new_operands;
absl::Span<HloInstruction*> operands_span(operands_to_split);
for (int64_t offset = 0; offset < operands_to_split.size();
offset += kMaxParameters) {
if (offset > 0 && offset + kMaxParameters > operands_to_split.size()) {
new_operands.insert(new_operands.end(),
operands_to_split.begin() + offset,
operands_to_split.end());
} else {
Shape new_shape = concat->shape();
int64_t concat_dimension_size = 0;
for (int64_t i = 0;
i < kMaxParameters && offset + i < operands_to_split.size(); ++i) {
concat_dimension_size +=
operands_to_split[i + offset]->shape().dimensions(
concat->concatenate_dimension());
}
new_shape.set_dimensions(concat->concatenate_dimension(),
concat_dimension_size);
auto new_concat = comp->AddInstruction(concat->CloneWithNewOperands(
new_shape, operands_span.subspan(offset, kMaxParameters)));
new_operands.push_back(new_concat);
}
}
operands_to_split = new_operands;
}
TF_RETURN_IF_ERROR(comp->ReplaceInstruction(concat, operands_to_split[0]));
return true;
}
std::vector<HloInstruction*> GetRelevantVariadicOps(HloComputation* comp) {
std::vector<HloInstruction*> ops;
for (HloInstruction* instr : comp->instructions()) {
if (instr->opcode() == HloOpcode::kConcatenate &&
instr->operand_count() > kMaxParameters) {
ops.push_back(instr);
}
}
return ops;
}
}
absl::StatusOr<bool> VariadicOpSplitter::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (HloComputation* comp :
module->MakeNonfusionComputations(execution_threads)) {
for (HloInstruction* op : GetRelevantVariadicOps(comp)) {
TF_ASSIGN_OR_RETURN(bool result, SplitConcatenate(op, comp));
changed |= result;
}
}
return changed;
}
}
} | #include "xla/service/gpu/transforms/variadic_op_splitter.h"
#include <cstdint>
#include <vector>
#include <gtest/gtest.h>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/literal_util.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/pattern_matcher.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace gpu {
namespace {
using match::Concatenate;
class VariadicOpSplitterTest : public HloTestBase {};
TEST_F(VariadicOpSplitterTest, DontSplit) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
p0 = f16[30,41] parameter(0)
p1 = f16[30,41] parameter(1)
ROOT result = f16[60, 41] concatenate(p0, p1), dimensions={0}
})")
.value();
EXPECT_FALSE(VariadicOpSplitter().Run(module.get()).value());
}
TEST_F(VariadicOpSplitterTest, SplitInto2) {
auto builder = HloComputation::Builder(TestName());
auto operand = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR1<int32_t>({42})));
std::vector<HloInstruction*> concat_operands(255, operand);
builder.AddInstruction(HloInstruction::CreateConcatenate(
ShapeUtil::MakeShape(S32, {255}), concat_operands, 0));
auto module = CreateNewVerifiedModule();
auto entry_computation = module->AddEntryComputation(builder.Build());
EXPECT_TRUE(VariadicOpSplitter().Run(module.get()).value());
EXPECT_TRUE(Match(entry_computation->root_instruction(),
Concatenate().WithNumOperands(128).WithOperand(
0, Concatenate().WithNumOperands(128))));
}
TEST_F(VariadicOpSplitterTest, SplitInto3) {
auto builder = HloComputation::Builder(TestName());
auto operand = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR1<int32_t>({42})));
std::vector<HloInstruction*> concat_operands(256, operand);
builder.AddInstruction(HloInstruction::CreateConcatenate(
ShapeUtil::MakeShape(S32, {256}), concat_operands, 0));
auto module = CreateNewVerifiedModule();
auto entry_computation = module->AddEntryComputation(builder.Build());
EXPECT_TRUE(VariadicOpSplitter().Run(module.get()).value());
EXPECT_TRUE(Match(entry_computation->root_instruction(),
Concatenate(Concatenate().WithNumOperands(128),
Concatenate().WithNumOperands(128))));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/variadic_op_splitter.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/variadic_op_splitter_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
bdacba29-a14b-4563-ba5a-c2c9730812a0 | cpp | google/tensorstore | google_service_account_auth_provider | tensorstore/internal/oauth2/google_service_account_auth_provider.cc | tensorstore/internal/oauth2/google_service_account_auth_provider_test.cc | #include "tensorstore/internal/oauth2/google_service_account_auth_provider.h"
#include <functional>
#include <memory>
#include <string>
#include <string_view>
#include <utility>
#include "absl/strings/cord.h"
#include "absl/time/time.h"
#include "tensorstore/internal/http/http_request.h"
#include "tensorstore/internal/http/http_response.h"
#include "tensorstore/internal/http/http_transport.h"
#include "tensorstore/internal/oauth2/bearer_token.h"
#include "tensorstore/internal/oauth2/oauth_utils.h"
#include "tensorstore/internal/oauth2/refreshable_auth_provider.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
namespace tensorstore {
namespace internal_oauth2 {
using ::tensorstore::Result;
using ::tensorstore::internal_http::HttpRequestBuilder;
using ::tensorstore::internal_http::HttpResponse;
constexpr char kOAuthV4Url[] = "https:
constexpr char kOAuthScope[] = "https:
GoogleServiceAccountAuthProvider::GoogleServiceAccountAuthProvider(
const AccountCredentials& creds,
std::shared_ptr<internal_http::HttpTransport> transport,
std::function<absl::Time()> clock)
: RefreshableAuthProvider(std::move(clock)),
creds_(creds),
uri_(kOAuthV4Url),
scope_(kOAuthScope),
transport_(std::move(transport)) {}
Result<HttpResponse> GoogleServiceAccountAuthProvider::IssueRequest(
std::string_view method, std::string_view uri, absl::Cord payload) {
return transport_
->IssueRequest(
HttpRequestBuilder(method, std::string{uri})
.AddHeader("Content-Type: application/x-www-form-urlencoded")
.BuildRequest(),
internal_http::IssueRequestOptions(std::move(payload)))
.result();
}
Result<BearerTokenWithExpiration> GoogleServiceAccountAuthProvider::Refresh() {
const auto now = GetCurrentTime();
TENSORSTORE_ASSIGN_OR_RETURN(
auto body,
internal_oauth2::BuildSignedJWTRequest(
creds_.private_key,
internal_oauth2::BuildJWTHeader(creds_.private_key_id),
internal_oauth2::BuildJWTClaimBody(creds_.client_email, scope_, uri_,
now, 3600 )));
TENSORSTORE_ASSIGN_OR_RETURN(
auto response, IssueRequest("POST", uri_, absl::Cord(std::move(body))));
TENSORSTORE_RETURN_IF_ERROR(HttpResponseCodeToStatus(response));
TENSORSTORE_ASSIGN_OR_RETURN(auto result, internal_oauth2::ParseOAuthResponse(
response.payload.Flatten()));
return BearerTokenWithExpiration{std::move(result.access_token),
now + absl::Seconds(result.expires_in)};
}
}
} | #include "tensorstore/internal/oauth2/google_service_account_auth_provider.h"
#include <memory>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_map.h"
#include "tensorstore/internal/oauth2/fake_private_key.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
namespace {
using ::tensorstore::Result;
using ::tensorstore::internal_http::HttpResponse;
using ::tensorstore::internal_oauth2::GetFakePrivateKey;
using ::tensorstore::internal_oauth2::GoogleServiceAccountAuthProvider;
using ::tensorstore::internal_oauth2::GoogleServiceAccountCredentials;
const char kServiceAccountInfo[] = R"({
"token_type" : "123",
"access_token": "abc",
"expires_in": 456
})";
const GoogleServiceAccountCredentials kCreds{
"a1a111aa1111a11a11a11aa111a111a1a1111111",
GetFakePrivateKey(),
"https:
"[email protected]",
};
constexpr char kBody[] =
"grant_type=urn%3Aietf%3Aparams%3Aoauth%3Agrant-type%3Ajwt-bearer&"
"assertion="
"eyJhbGciOiJSUzI1NiIsImtpZCI6ImExYTExMWFhMTExMWExMWExMWExMWFhMTExYTExMWExYT"
"ExMTExMTEiLCJ0eXAiOiJKV1QifQ."
"eyJhdWQiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9vYXV0aDIvdjQvdG9rZW4iLCJleH"
"AiOjE1NDc2Njk3MDMsImlhdCI6MTU0NzY2NjEwMywiaXNzIjoiZm9vLWVtYWlsQGZvby1wcm9q"
"ZWN0LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2NvcGUiOiJodHRwczovL3d3dy5nb29nbG"
"VhcGlzLmNvbS9hdXRoL2Nsb3VkLXBsYXRmb3JtIn0.gvM1sjnFXwQkBTTqobnTJqE8ZCrAR-"
"SEevEZB4Quqxd836v7iHjnWBiOkUCZl_o5wQouz5pFuhkQ1BlhhAZNih_Ko2yxBi0W_NuhI-"
"18We8gSMhi8pwfNu6WqNqXkHlQAJebhJQH23yP_A2dxU3Z50maUJaAl9G0e60CIynsaeW-"
"o7QneaPxPEWjOi--XMvkOu-z8eD0CXx1dUrlzINDxWzJFoXzCk2_NZ9-"
"UPzHWai68qKo2FjbtTT3fEPA-L1IN908OWhuN2UHdvPrg_"
"h13GO7kY3K7TsWotsgsLon2KxWYaDpasaY_ZqCIXCeS4jW89gVtsOB3E6B-xdR1Gq-9g";
class TestAuthProvider : public GoogleServiceAccountAuthProvider {
public:
TestAuthProvider(const GoogleServiceAccountCredentials& creds)
: GoogleServiceAccountAuthProvider(creds, nullptr,
[this] { return this->time; }),
time(absl::FromUnixSeconds(1547666103)),
idx(0) {}
virtual Result<HttpResponse> IssueRequest(std::string_view method,
std::string_view uri,
absl::Cord body) {
request.push_back(std::make_pair(std::string(uri), std::string(body)));
if (responses.count(idx) != 0) {
return responses[idx++];
}
return HttpResponse{};
}
absl::Time time;
int idx;
absl::flat_hash_map<int, HttpResponse> responses;
std::vector<std::pair<std::string, std::string>> request;
};
TEST(GoogleServiceAccountAuthProviderTest, InitialState) {
TestAuthProvider auth({"a", "b", "c", "d"});
EXPECT_FALSE(auth.IsValid());
EXPECT_TRUE(auth.IsExpired());
}
TEST(GoogleServiceAccountAuthProviderTest, BadKeys) {
TestAuthProvider auth({"a", "b", "c", "d"});
auto result = auth.GetToken();
EXPECT_FALSE(result.ok()) << result.status();
EXPECT_EQ(0, auth.request.size());
}
TEST(OAuth2AuthProviderTest, NoResponse) {
TestAuthProvider auth(kCreds);
auto result = auth.GetToken();
EXPECT_FALSE(result.ok()) << result.status();
ASSERT_EQ(1, auth.request.size());
EXPECT_EQ("https:
auth.request[0].first);
EXPECT_EQ(kBody, auth.request[0].second);
}
TEST(GoogleServiceAccountAuthProviderTest, Status200) {
TestAuthProvider auth(kCreds);
auth.responses = {
{0,
{200,
absl::Cord(kServiceAccountInfo),
{}}},
{1,
{200,
absl::Cord(kServiceAccountInfo),
{}}},
};
{
auto result = auth.GetToken();
EXPECT_EQ(1, auth.idx);
EXPECT_TRUE(result.ok()) << result.status();
EXPECT_EQ(1, auth.request.size());
EXPECT_EQ(auth.time + absl::Seconds(456), result->expiration);
EXPECT_EQ("abc", result->token);
}
EXPECT_FALSE(auth.IsExpired());
EXPECT_TRUE(auth.IsValid());
auth.time += absl::Seconds(600);
{
auto result = auth.GetToken();
EXPECT_EQ(2, auth.idx);
EXPECT_TRUE(result.ok()) << result.status();
EXPECT_EQ(2, auth.request.size());
EXPECT_EQ(auth.time + absl::Seconds(456), result->expiration);
EXPECT_EQ("abc", result->token);
}
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/oauth2/google_service_account_auth_provider.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/oauth2/google_service_account_auth_provider_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
14b90607-2c26-427d-a00b-7fb9d69a4c4b | cpp | abseil/abseil-cpp | randen | absl/random/internal/randen.cc | absl/random/internal/randen_test.cc | #include "absl/random/internal/randen.h"
#include "absl/base/internal/raw_logging.h"
#include "absl/random/internal/randen_detect.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace random_internal {
namespace {
struct RandenState {
const void* keys;
bool has_crypto;
};
RandenState GetRandenState() {
static const RandenState state = []() {
RandenState tmp;
#if ABSL_RANDOM_INTERNAL_AES_DISPATCH
if (HasRandenHwAesImplementation() && CPUSupportsRandenHwAes()) {
tmp.has_crypto = true;
tmp.keys = RandenHwAes::GetKeys();
} else {
tmp.has_crypto = false;
tmp.keys = RandenSlow::GetKeys();
}
#elif ABSL_HAVE_ACCELERATED_AES
tmp.has_crypto = true;
tmp.keys = RandenHwAes::GetKeys();
#else
tmp.has_crypto = false;
tmp.keys = RandenSlow::GetKeys();
#endif
return tmp;
}();
return state;
}
}
Randen::Randen() {
auto tmp = GetRandenState();
keys_ = tmp.keys;
#if ABSL_RANDOM_INTERNAL_AES_DISPATCH
has_crypto_ = tmp.has_crypto;
#endif
}
}
ABSL_NAMESPACE_END
} | #include "absl/random/internal/randen.h"
#include <cstring>
#include "gtest/gtest.h"
#include "absl/meta/type_traits.h"
namespace {
using absl::random_internal::Randen;
TEST(RandenTest, CopyAndMove) {
static_assert(std::is_copy_constructible<Randen>::value,
"Randen must be copy constructible");
static_assert(absl::is_copy_assignable<Randen>::value,
"Randen must be copy assignable");
static_assert(std::is_move_constructible<Randen>::value,
"Randen must be move constructible");
static_assert(absl::is_move_assignable<Randen>::value,
"Randen must be move assignable");
}
TEST(RandenTest, Default) {
constexpr uint8_t kGolden[] = {
0xee, 0xd3, 0xe6, 0x0e, 0x09, 0x34, 0x65, 0x6c, 0xc6, 0x33, 0x53, 0x9d,
0x9b, 0x2b, 0x4e, 0x04, 0x77, 0x39, 0x43, 0x4e, 0x13, 0x4f, 0xc1, 0xc3,
0xee, 0x10, 0x04, 0xd9, 0x7c, 0xf4, 0xa9, 0xdd, 0x10, 0xca, 0xd8, 0x7f,
0x08, 0xf3, 0x7b, 0x88, 0x12, 0x29, 0xc7, 0x45, 0xf5, 0x80, 0xb7, 0xf0,
0x9f, 0x59, 0x96, 0x76, 0xd3, 0xb1, 0xdb, 0x15, 0x59, 0x6d, 0x3c, 0xff,
0xba, 0x63, 0xec, 0x30, 0xa6, 0x20, 0x7f, 0x6f, 0x60, 0x73, 0x9f, 0xb2,
0x4c, 0xa5, 0x49, 0x6f, 0x31, 0x8a, 0x80, 0x02, 0x0e, 0xe5, 0xc8, 0xd5,
0xf9, 0xea, 0x8f, 0x3b, 0x8a, 0xde, 0xd9, 0x3f, 0x5e, 0x60, 0xbf, 0x9c,
0xbb, 0x3b, 0x18, 0x78, 0x1a, 0xae, 0x70, 0xc9, 0xd5, 0x1e, 0x30, 0x56,
0xd3, 0xff, 0xb2, 0xd8, 0x37, 0x3c, 0xc7, 0x0f, 0xfe, 0x27, 0xb3, 0xf4,
0x19, 0x9a, 0x8f, 0xeb, 0x76, 0x8d, 0xfd, 0xcd, 0x9d, 0x0c, 0x42, 0x91,
0xeb, 0x06, 0xa5, 0xc3, 0x56, 0x95, 0xff, 0x3e, 0xdd, 0x05, 0xaf, 0xd5,
0xa1, 0xc4, 0x83, 0x8f, 0xb7, 0x1b, 0xdb, 0x48, 0x8c, 0xfe, 0x6b, 0x0d,
0x0e, 0x92, 0x23, 0x70, 0x42, 0x6d, 0x95, 0x34, 0x58, 0x57, 0xd3, 0x58,
0x40, 0xb8, 0x87, 0x6b, 0xc2, 0xf4, 0x1e, 0xed, 0xf3, 0x2d, 0x0b, 0x3e,
0xa2, 0x32, 0xef, 0x8e, 0xfc, 0x54, 0x11, 0x43, 0xf3, 0xab, 0x7c, 0x49,
0x8b, 0x9a, 0x02, 0x70, 0x05, 0x37, 0x24, 0x4e, 0xea, 0xe5, 0x90, 0xf0,
0x49, 0x57, 0x8b, 0xd8, 0x2f, 0x69, 0x70, 0xa9, 0x82, 0xa5, 0x51, 0xc6,
0xf5, 0x42, 0x63, 0xbb, 0x2c, 0xec, 0xfc, 0x78, 0xdb, 0x55, 0x2f, 0x61,
0x45, 0xb7, 0x3c, 0x46, 0xe3, 0xaf, 0x16, 0x18, 0xad, 0xe4, 0x2e, 0x35,
0x7e, 0xda, 0x01, 0xc1, 0x74, 0xf3, 0x6f, 0x02, 0x51, 0xe8, 0x3d, 0x1c,
0x82, 0xf0, 0x1e, 0x81,
};
alignas(16) uint8_t state[Randen::kStateBytes];
std::memset(state, 0, sizeof(state));
Randen r;
r.Generate(state);
EXPECT_EQ(0, std::memcmp(state, kGolden, sizeof(state)));
}
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/random/internal/randen.cc | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/random/internal/randen_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
2137774c-d1b0-416b-93a9-b2e5d467fb5e | cpp | tensorflow/tensorflow | table | tensorflow/lite/kernels/table.cc | tensorflow/lite/kernels/table_test.cc | #include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/common.h"
#include "tensorflow/lite/kernels/internal/reference/integer_ops/lut.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace custom {
namespace table {
constexpr int kInputTensor = 0;
constexpr int kTable = 1;
constexpr int kOutputTensor = 0;
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
const TfLiteTensor* table;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kTable, &table));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
TF_LITE_ENSURE(context,
input->type == kTfLiteInt8 || input->type == kTfLiteInt16);
TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type);
TF_LITE_ENSURE_TYPES_EQ(context, output->type, table->type);
if (input->type == kTfLiteInt16) {
TF_LITE_ENSURE_EQ(context, input->params.zero_point, 0);
TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0);
}
TF_LITE_ENSURE_EQ(context, NumDimensions(table), 1);
if (input->type == kTfLiteInt8) {
TF_LITE_ENSURE_EQ(context, NumElements(table), LUTSize<int8_t>());
} else {
TF_LITE_ENSURE_EQ(context, input->type, kTfLiteInt16);
TF_LITE_ENSURE_EQ(context, NumElements(table), LUTSize<int16_t>());
}
return context->ResizeTensor(context, output,
TfLiteIntArrayCopy(input->dims));
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
const TfLiteTensor* table;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kTable, &table));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
switch (input->type) {
case kTfLiteInt8:
reference_integer_ops::LookupTable(
GetTensorData<int8_t>(input),
MatchingFlatSize(GetTensorShape(input), GetTensorShape(output)),
GetTensorData<int8_t>(table), GetTensorData<int8_t>(output));
return kTfLiteOk;
case kTfLiteInt16:
reference_integer_ops::LookupTable(
GetTensorData<int16_t>(input),
MatchingFlatSize(GetTensorShape(input), GetTensorShape(output)),
GetTensorData<int16_t>(table), GetTensorData<int16_t>(output));
return kTfLiteOk;
default:
TF_LITE_UNSUPPORTED_TYPE(context, input->type, "Table");
}
return kTfLiteOk;
}
}
TfLiteRegistration* Register_TABLE() {
static TfLiteRegistration r = {nullptr, nullptr, table::Prepare, table::Eval};
return &r;
}
}
}
} | #include <cmath>
#include <limits>
#include <type_traits>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/kernels/internal/common.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace ops {
namespace custom {
TfLiteRegistration* Register_TABLE();
namespace {
using ::testing::ElementsAreArray;
class TableOpModel : public SingleOpModel {
public:
TableOpModel(const TensorData& input, const TensorData& table,
const TensorData& output) {
input_ = AddInput(input);
table_ = AddInput(table);
output_ = AddOutput(output);
SetCustomOp("Table", {}, Register_TABLE);
BuildInterpreter({GetShape(input_), GetShape(table_)});
}
template <typename T>
std::vector<T> GetOutput() {
return ExtractVector<T>(output_);
}
template <typename integer_dtype>
std::vector<float> GetDequantizedOutput() {
return Dequantize<integer_dtype>(ExtractVector<integer_dtype>(output_),
GetScale(output_), GetZeroPoint(output_));
}
int input() { return input_; }
int table() { return table_; }
int output() { return output_; }
protected:
int input_;
int table_;
int output_;
};
template <typename T>
inline float GetLUTTolerance(float input_min, float input_max, float output_min,
float output_max) {
static_assert(
std::is_same<T, int8_t>::value || std::is_same<T, int16_t>::value,
"T must be an int8_t or int16_t.");
const float range_sum = (input_max - input_min) + (output_max - output_min);
if (std::is_same<T, int8_t>::value) {
return range_sum / 256.0f;
} else {
return range_sum / 512.0f;
}
}
template <typename T>
void TableWithExpLUTTest() {
float input_min = -0.5f;
float input_max = 0.8f;
if (std::is_same<T, int16_t>::value) {
input_min = -0.8f;
input_max = 0.8f * std::numeric_limits<T>::max() /
static_cast<float>(std::numeric_limits<T>::max() + 1);
}
float output_min = 0.0f;
float output_max = 2.4f;
if (std::is_same<T, int16_t>::value) {
output_min = -2.4f;
output_max = 2.4f * std::numeric_limits<T>::max() /
static_cast<float>(std::numeric_limits<T>::max() + 1);
}
const float kQuantizedTolerance =
GetLUTTolerance<T>(input_min, input_max, output_min, output_max);
TableOpModel m({GetTensorType<T>(), {1, 2, 3, 1}, input_min, input_max},
{GetTensorType<T>(), {LUTSize<T>()}},
{GetTensorType<T>(), {}, output_min, output_max});
T table[LUTSize<T>()];
LUTPopulate<T>(
m.GetScale(m.input()), m.GetZeroPoint(m.input()), m.GetScale(m.output()),
m.GetZeroPoint(m.output()), [](float v) { return std::exp(v); }, table);
m.QuantizeAndPopulate<T>(m.input(), {-0.5f, -0.2f, 0.0f, 0.1f, 0.3f, 0.8f});
m.PopulateTensor<T>(m.table(), 0, table, table + LUTSize<T>());
m.Invoke();
EXPECT_THAT(m.GetDequantizedOutput<T>(),
ElementsAreArray(ArrayFloatNear(
{std::exp(-0.5f), std::exp(-0.2f), std::exp(0.0f),
std::exp(0.1f), std::exp(0.3f), std::exp(0.8f)},
kQuantizedTolerance)));
}
TEST(TableOpTest, Int8ExpLUT) { TableWithExpLUTTest<int8_t>(); }
TEST(TableOpTest, Int16ExpLUT) { TableWithExpLUTTest<int16_t>(); }
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/table.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/table_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5ae6834d-0580-44a1-8bd2-af4b865d5c58 | cpp | tensorflow/tensorflow | toco_port | tensorflow/lite/toco/toco_port.cc | tensorflow/lite/toco/toco_port_test.cc | #include "tensorflow/lite/toco/toco_port.h"
#include <cstring>
#include <string>
#include "absl/status/status.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/lite/toco/toco_types.h"
#if defined(__ANDROID__) && defined(__ARM_ARCH_7A__)
namespace std {
double round(double x) { return ::round(x); }
}
#endif
namespace toco {
namespace port {
void CopyToBuffer(const std::string& src, char* dest) {
memcpy(dest, src.data(), src.size());
}
#ifdef PLATFORM_GOOGLE
void CopyToBuffer(const absl::Cord& src, char* dest) { src.CopyToArray(dest); }
#endif
}
}
#if defined(PLATFORM_GOOGLE) && !defined(__APPLE__) && \
!defined(__ANDROID__) && !defined(_WIN32)
#include "base/init_google.h"
#include "file/base/file.h"
#include "file/base/filesystem.h"
#include "file/base/helpers.h"
#include "file/base/options.h"
#include "file/base/path.h"
namespace toco {
namespace port {
void InitGoogle(const char* usage, int* argc, char*** argv, bool remove_flags) {
::InitGoogle(usage, argc, argv, remove_flags);
}
void InitGoogleWasDoneElsewhere() {
}
void CheckInitGoogleIsDone(const char* message) {
::CheckInitGoogleIsDone(message);
}
namespace file {
tensorflow::Status ToStatus(const absl::Status& uts) {
if (!uts.ok()) {
return tensorflow::Status(absl::StatusCode(::util::RetrieveErrorCode(uts)),
uts.message());
}
return absl::OkStatus();
}
toco::port::file::Options ToOptions(const ::file::Options& options) {
CHECK_EQ(&options, &::file::Defaults());
return Options();
}
tensorflow::Status Writable(const std::string& filename) {
File* f = nullptr;
const auto status = ::file::Open(filename, "w", &f, ::file::Defaults());
if (f) {
QCHECK_OK(f->Close(::file::Defaults()));
}
return ToStatus(status);
}
tensorflow::Status Readable(const std::string& filename,
const file::Options& options) {
return ToStatus(::file::Readable(filename, ::file::Defaults()));
}
tensorflow::Status Exists(const std::string& filename,
const file::Options& options) {
auto status = ::file::Exists(filename, ::file::Defaults());
return ToStatus(status);
}
tensorflow::Status GetContents(const std::string& filename,
std::string* contents,
const file::Options& options) {
return ToStatus(::file::GetContents(filename, contents, ::file::Defaults()));
}
tensorflow::Status SetContents(const std::string& filename,
const std::string& contents,
const file::Options& options) {
return ToStatus(::file::SetContents(filename, contents, ::file::Defaults()));
}
std::string JoinPath(const std::string& a, const std::string& b) {
return ::file::JoinPath(a, b);
}
}
}
}
#else
#include <fcntl.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <cstdio>
#if defined(_WIN32)
#include <io.h>
#else
#include <unistd.h>
#endif
#if defined(PLATFORM_GOOGLE)
#include "base/commandlineflags.h"
#endif
namespace toco {
namespace port {
#if defined(_WIN32)
#define close _close
#define open _open
#define read _read
constexpr int kFileCreateMode = _S_IREAD | _S_IWRITE;
constexpr int kFileReadFlags = _O_RDONLY | _O_BINARY;
constexpr int kFileWriteFlags = _O_WRONLY | _O_BINARY | _O_CREAT;
#else
constexpr int kFileCreateMode = 0664;
constexpr int kFileReadFlags = O_RDONLY;
constexpr int kFileWriteFlags = O_CREAT | O_WRONLY;
#endif
static bool port_initialized = false;
void InitGoogleWasDoneElsewhere() { port_initialized = true; }
void InitGoogle(const char* usage, int* argc, char*** argv, bool remove_flags) {
if (!port_initialized) {
#if defined(PLATFORM_GOOGLE)
ParseCommandLineFlags(argc, argv, remove_flags);
#endif
port_initialized = true;
}
}
void CheckInitGoogleIsDone(const char* message) {
CHECK(port_initialized) << message;
}
namespace file {
tensorflow::Status Writable(const string& filename) {
FILE* f = fopen(filename.c_str(), "w");
if (f) {
fclose(f);
return tensorflow::OkStatus();
}
return tensorflow::errors::NotFound("not writable");
}
tensorflow::Status Readable(const string& filename,
const file::Options& options) {
FILE* f = fopen(filename.c_str(), "r");
if (f) {
fclose(f);
return tensorflow::OkStatus();
}
return tensorflow::errors::NotFound("not readable");
}
tensorflow::Status Exists(const string& filename,
const file::Options& options) {
struct stat statbuf;
int ret = stat(filename.c_str(), &statbuf);
if (ret == -1) {
return tensorflow::errors::NotFound("file doesn't exist");
}
return tensorflow::OkStatus();
}
tensorflow::Status GetContents(const string& path, string* output,
const file::Options& options) {
output->clear();
int fd = open(path.c_str(), kFileReadFlags);
if (fd == -1) {
return tensorflow::errors::NotFound("can't open() for read");
}
const int kBufSize = 1 << 16;
char buffer[kBufSize];
while (true) {
int size = read(fd, buffer, kBufSize);
if (size == 0) {
close(fd);
return tensorflow::OkStatus();
} else if (size == -1) {
close(fd);
return tensorflow::errors::Internal("error during read()");
} else {
output->append(buffer, size);
}
}
CHECK(0);
return tensorflow::errors::Internal("internal error");
}
tensorflow::Status SetContents(const string& filename, const string& contents,
const file::Options& options) {
int fd = open(filename.c_str(), kFileWriteFlags, kFileCreateMode);
if (fd == -1) {
return tensorflow::errors::Internal("can't open() for write");
}
size_t i = 0;
while (i < contents.size()) {
size_t to_write = contents.size() - i;
ssize_t written = write(fd, &contents[i], to_write);
if (written == -1) {
close(fd);
return tensorflow::errors::Internal("write() error");
}
i += written;
}
close(fd);
return tensorflow::OkStatus();
}
string JoinPath(const string& base, const string& filename) {
if (base.empty()) return filename;
string base_fixed = base;
if (!base_fixed.empty() && base_fixed.back() == '/') base_fixed.pop_back();
string filename_fixed = filename;
if (!filename_fixed.empty() && filename_fixed.front() == '/')
filename_fixed.erase(0, 1);
return base_fixed + "/" + filename_fixed;
}
}
}
}
#endif | #include "tensorflow/lite/toco/toco_port.h"
#include "tensorflow/lite/testing/util.h"
#include "tensorflow/lite/toco/toco_types.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
namespace toco {
namespace port {
namespace {
#ifdef PLATFORM_GOOGLE
#define TFLITE_PREFIX "third_party/tensorflow/lite/"
#else
#define TFLITE_PREFIX "tensorflow/lite/"
#endif
TEST(TocoPortTest, Exists) {
EXPECT_TRUE(
file::Exists(TFLITE_PREFIX "toco/toco_port_test.cc", file::Defaults())
.ok());
EXPECT_FALSE(
file::Exists("non-existent_file_asldjflasdjf", file::Defaults()).ok());
}
TEST(TocoPortTest, Readable) {
EXPECT_TRUE(
file::Readable(TFLITE_PREFIX "toco/toco_port_test.cc", file::Defaults())
.ok());
EXPECT_FALSE(
file::Readable("non-existent_file_asldjflasdjf", file::Defaults()).ok());
}
TEST(TocoPortTest, JoinPath) {
EXPECT_EQ("part1/part2", file::JoinPath("part1", "part2"));
EXPECT_EQ("part1/part2", file::JoinPath("part1/", "part2"));
EXPECT_EQ("part1/part2", file::JoinPath("part1", "/part2"));
EXPECT_EQ("part1/part2", file::JoinPath("part1/", "/part2"));
}
}
}
}
int main(int argc, char** argv) {
::tflite::LogToStderr();
::testing::InitGoogleTest(&argc, argv);
::toco::port::InitGoogleWasDoneElsewhere();
return RUN_ALL_TESTS();
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/toco/toco_port.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/toco/toco_port_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
af8c8af1-7b5b-4942-b777-1f91251c6e5a | cpp | tensorflow/tensorflow | ifrt_program_ops | tensorflow/core/tfrt/ops/ifrt_program_ops.cc | tensorflow/core/tfrt/kernels/ifrt_program_ops_test.cc | #include "tensorflow/core/framework/common_shape_fns.h"
#include "tensorflow/core/framework/op.h"
namespace tensorflow {
namespace tfrt_stub {
REGISTER_OP("IfrtCall")
.Input("args: Tin")
.Output("results: Tout")
.Attr("Tin: list(type) >= 0")
.Attr("Tout: list(type) >= 0")
.Attr("program_id: int")
.Attr("variable_arg_indices: list(int)")
.SetIsStateful()
.SetShapeFn(tensorflow::shape_inference::UnknownShape)
.Doc(R"(
Calls an IFRT program identified by the given program id.
This op looks up a `ServingExecutable` from `ServingExecutableRegistry` using
the program id, calls the executable with the op's inputs as arguments, and
returns its results as the op's outputs.
Note that this op is not part of a stable interface. Users must not use this op
in their SavedModel and instead rely on Ifrt Serving's mechanism that
automatically inserts this op with graph rewrite.
program_id: int64 id that can be used to look up compiled programs from
ServingExecutableRegistry`.
variable_arg_indices: must be in sorted ascending order. The argument at position
variable_arg_indices[k] in tpu program is already loaded as an ifrt array and
the input `args[variable_arg_indices[k]]` is the key to look for this loaded array.
)");
REGISTER_OP("IfrtLoadVariable")
.Input("variable: Tin")
.Output("array_key: Tout")
.Output("tensor: Tout")
.Attr("Tin: type")
.Attr("Tout: type")
.Attr("used_by_host: bool")
.SetIsStateful()
.SetShapeFn(tensorflow::shape_inference::UnknownShape)
.Doc(R"(
This op loads a restored variable tensor as a tensor future. It is areplacement of `tf.ReadVariableOp`.
This op returns a scalar string tensor containing the restored variable name, which
is composed from `container_name` and `shared_name` from a `var_handle` and can be
used as a key within the runtime, as well as a future for the tensor.
Note that this op is not part of a stable interface. Users must not use this op
in their SavedModel and instead rely on Ifrt Serving's mechanism that
automatically inserts this op with graph rewrite.
variable: the variable handle of the variable tensor to be loaded.
array_key: the key to be used to look up the loaded array by the 'IfrtCall' op.
tensor: the future of the loaded tensor. The future contains a valid tensor if `use_by_host` is true.
'used_by_host': a boolean indicating whether the variable is used by the host OP
or excelusively by the TPU.
)");
}
} | #include <cstdint>
#include <memory>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/python/ifrt/test_util.h"
#include "xla/tsl/framework/serving_device_selector.h"
#include "xla/tsl/framework/test_util/mock_serving_device_selector.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_matcher.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/lib/gtl/cleanup.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_executable_registry.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_serving_executable_test_util.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace tfrt_stub {
namespace {
using tensorflow::ifrt_serving::ServingExecutableRegistry;
using tensorflow::ifrt_serving::test_utils::GetMlirModulePath;
using tensorflow::ifrt_serving::test_utils::IfrtServingExecutableTestHelper;
using tensorflow::test::AsTensor;
using tensorflow::test::TensorEq;
using ::testing::Return;
class IfrtCallOpTest : public OpsTestBase {
protected:
Status Init(int64_t program_id, int num_inputs, DataType input_type,
const std::vector<int>& variable_arg_indices,
const std::vector<DataType>& output_type_list) {
TF_CHECK_OK(NodeDefBuilder("op", "IfrtCall")
.Input(FakeInput(num_inputs, input_type))
.Attr("program_id", program_id)
.Attr("variable_arg_indices", variable_arg_indices)
.Attr("Tout", output_type_list)
.Finalize(node_def()));
return InitOp();
}
};
TEST_F(IfrtCallOpTest, Basic) {
int64_t program_id = 123;
TF_ASSERT_OK(Init(
program_id,
2,
DT_INT32,
{},
{DT_INT32}));
tsl::test_util::MockServingDeviceSelector selector;
IfrtServingExecutableTestHelper helper(&selector);
EXPECT_CALL(selector, ReserveDevice(absl::StrCat(program_id)))
.Times(1)
.WillOnce(Return(tsl::DeviceReservation(0, nullptr)));
auto executable =
helper.MakeExecutable(program_id, GetMlirModulePath("executable.mlir"));
TF_ASSERT_OK_AND_ASSIGN(
ServingExecutableRegistry::Handle handle,
ServingExecutableRegistry::Register(program_id, std::move(executable)));
auto handle_cleaner = gtl::MakeCleanup([&handle] { handle.Release(); });
AddInputFromArray<int32_t>(TensorShape({1, 3}), {1, 2, 3});
AddInputFromArray<int32_t>(TensorShape({3, 1}), {1, 2, 3});
for (int i = 0; i < helper.num_cores() + 1; ++i) {
TF_ASSERT_OK(RunOpKernel());
}
Tensor expected_out = AsTensor<int32_t>({14}, TensorShape({1, 1}));
EXPECT_THAT(*GetOutput(0), TensorEq(expected_out));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/ops/ifrt_program_ops.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/kernels/ifrt_program_ops_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |