ID
stringlengths
36
36
Language
stringclasses
1 value
Repository Name
stringclasses
13 values
File Name
stringlengths
2
44
File Path in Repository
stringlengths
11
111
File Path for Unit Test
stringlengths
16
116
Code
stringlengths
0
278k
Unit Test - (Ground Truth)
stringlengths
127
663k
Code Url
stringlengths
91
198
Test Code Url
stringlengths
96
203
Commit Hash
stringclasses
13 values
08c7ac11-785d-49c7-8f9f-282729b8967d
cpp
tensorflow/tensorflow
single_machine
tensorflow/core/grappler/clusters/single_machine.cc
tensorflow/core/grappler/clusters/single_machine_test.cc
#include "tensorflow/core/grappler/clusters/single_machine.h" #include <atomic> #include <memory> #include "absl/status/status.h" #include "absl/strings/str_cat.h" #include "tensorflow/cc/training/queue_runner.h" #include "tensorflow/core/common_runtime/device.h" #include "tensorflow/core/common_runtime/device_mgr.h" #include "tensorflow/core/common_runtime/gpu/gpu_id.h" #include "tensorflow/core/common_runtime/gpu/gpu_id_manager.h" #include "tensorflow/core/grappler/clusters/utils.h" #include "tensorflow/core/grappler/utils.h" #include "tensorflow/core/kernels/ops_util.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/lib/strings/strcat.h" #include "tensorflow/core/platform/mutex.h" #include "tensorflow/core/platform/notification.h" #include "tensorflow/core/platform/types.h" #include "tensorflow/core/public/session.h" namespace tensorflow { namespace grappler { static std::atomic<bool> already_provisioned(false); SingleMachine::SingleMachine(int timeout_s, int num_cpu_cores, int num_gpus) : Cluster(timeout_s), expected_init_time_s_(0), closing_(false) { VLOG(1) << "Number of CPU cores: " << num_cpu_cores << " Number of GPUs: " << num_gpus; thread_pool_ = std::make_unique<thread::ThreadPool>( Env::Default(), SanitizeThreadSuffix("single_machine"), 2); (*options_.config.mutable_device_count())["CPU"] = 1; if (num_gpus > 0) { (*options_.config.mutable_device_count())["GPU"] = num_gpus; } CHECK_GE(num_cpu_cores, 1); options_.config.set_intra_op_parallelism_threads(num_cpu_cores); options_.config.add_session_inter_op_thread_pool()->set_num_threads( num_cpu_cores); if (timeout_s > 0) { options_.config.set_operation_timeout_in_ms(timeout_s * 1000); } } SingleMachine::~SingleMachine() { CloseSession(false ).IgnoreError(); thread_pool_.reset(); } Status SingleMachine::Provision() { if (already_provisioned) { return absl::UnavailableError( "Can't provision more than one single cluster at a time"); } TF_RETURN_IF_ERROR(ResetSession()); std::vector<DeviceAttributes> devices; TF_RETURN_IF_ERROR(session_->ListDevices(&devices)); for (const auto& dev : devices) { DeviceProperties attr; if (dev.device_type() == "CPU") { attr = GetLocalCPUInfo(); } else if (dev.device_type() == "GPU") { DeviceNameUtils::ParsedName parsed; if (!DeviceNameUtils::ParseFullName(dev.name(), &parsed)) { return absl::InvalidArgumentError( absl::StrCat("Not able to parse GPU device name: ", dev.name())); } TfDeviceId tf_device_id(parsed.id); PlatformDeviceId platform_device_id; Status s = GpuIdManager::TfToPlatformDeviceId(tf_device_id, &platform_device_id); if (!s.ok()) { return absl::UnavailableError( absl::StrCat("Unknown TF GPU device with id ", tf_device_id.value(), ": ", s.message())); } attr = GetLocalGPUInfo(platform_device_id); } else if (dev.device_type().find("XLA") == string::npos) { attr.set_type(dev.device_type()); } attr.set_memory_size(dev.memory_limit()); devices_[dev.name()] = attr; } already_provisioned = true; if (cpu_allocator_stats_enabled_) { TF_RETURN_IF_ERROR(ClearAllocatorStats()); } return absl::OkStatus(); } Status SingleMachine::Initialize(const GrapplerItem& item) { mutex_lock l(this->last_graph_mu_); if (last_graph_ != &item.graph || last_graph_id_ != item.id) { init_ops_ = item.init_ops; expected_init_time_s_ = item.expected_init_time; last_graph_ = nullptr; queue_runner_defs_ = item.queue_runners; last_graph_id_ = item.id; } return absl::OkStatus(); } Status SingleMachine::Shutdown() { TF_RETURN_IF_ERROR(ShutdownSession()); mutex_lock l(this->last_graph_mu_); last_graph_ = nullptr; already_provisioned = false; return absl::OkStatus(); } Status SingleMachine::Run(const GraphDef& graph_def, const std::vector<std::pair<string, Tensor>>& feed, const std::vector<string>& fetch, RunMetadata* metadata) { mutex_lock l(this->last_graph_mu_); if (last_graph_ != &graph_def) { TF_RETURN_IF_ERROR(ResetSession()); TF_RETURN_IF_ERROR(session_->Create(graph_def)); if (!init_ops_.empty()) { init_metadata_ = RunMetadata(); int64_t timeout_s = timeout_s_ + expected_init_time_s_; TF_RETURN_IF_ERROR( RunWithTimeout({}, init_ops_, &init_metadata_, timeout_s)); for (auto node : *init_metadata_.mutable_cost_graph()->mutable_node()) { node.clear_compute_cost(); } init_metadata_.clear_step_stats(); } RunOptions queue_options = run_options_; if (queue_options.trace_level() >= RunOptions::HARDWARE_TRACE) { queue_options.set_trace_level(RunOptions::SOFTWARE_TRACE); } for (size_t i = 0; i < queue_runner_defs_.size(); ++i) { std::unique_ptr<QueueRunner> queue_runner; TF_RETURN_IF_ERROR(QueueRunner::New(queue_runner_defs_[i], coordinator_.get(), &queue_runner)); TF_RETURN_IF_ERROR(queue_runner->StartAndCollectCostGraph(session_.get(), queue_options)); TF_RETURN_IF_ERROR(coordinator_->RegisterRunner(std::move(queue_runner))); TF_RETURN_IF_ERROR(coordinator_->GetStatus()); } for (int i = 0; i < NumWarmupSteps(); ++i) { TF_RETURN_IF_ERROR(RunWithTimeout(feed, fetch, nullptr)); } } if (metadata) { TF_RETURN_IF_ERROR(RunWithTimeout(feed, fetch, metadata)); CostGraphDef queue_costs; TF_RETURN_IF_ERROR(coordinator_->ExportCostGraph(&queue_costs)); MergeCosts(metadata->mutable_cost_graph(), init_metadata_.cost_graph(), queue_costs); } else { TF_RETURN_IF_ERROR(RunWithTimeout(feed, fetch, nullptr)); } last_graph_ = &graph_def; return absl::OkStatus(); } Status SingleMachine::EnablePeakMemoryStats() { EnableCPUAllocatorStats(); cpu_allocator_stats_enabled_ = true; return absl::OkStatus(); } Status SingleMachine::GetPeakMemoryUsage( std::unordered_map<string, uint64>* device_peak_memory) const { if (!cpu_allocator_stats_enabled_) { return Status(absl::StatusCode::kInvalidArgument, "Tracking allocation for CPU is not enabled."); } const DeviceMgr* device_mgr; TF_RETURN_IF_ERROR(session_->LocalDeviceManager(&device_mgr)); std::vector<Device*> devices = device_mgr->ListDevices(); device_peak_memory->clear(); for (Device* device : devices) { auto* allocator = device->GetAllocator(AllocatorAttributes()); if (!allocator->TracksAllocationSizes()) { return Status(absl::StatusCode::kInvalidArgument, "Tracking allocation is not enabled."); } absl::optional<AllocatorStats> stats = allocator->GetStats(); (*device_peak_memory)[device->name()] = (stats ? stats->peak_bytes_in_use : 0); } return absl::OkStatus(); } Status SingleMachine::RunWithTimeout( const std::vector<std::pair<string, Tensor>>& feed, const std::vector<string>& fetch, RunMetadata* run_metadata) { return RunWithTimeout(feed, fetch, run_metadata, timeout_s_); } Status SingleMachine::RunWithTimeout( const std::vector<std::pair<string, Tensor>>& feed, const std::vector<string>& fetch, RunMetadata* run_metadata, int64_t timeout_s) { { mutex_lock l(close_mu_); CHECK(!closing_); } auto status = std::make_shared<Status>(); auto local_metadata = std::make_shared<RunMetadata>(); const bool executed_in_time = ExecuteWithTimeout( [this, status, local_metadata, feed, fetch]() { *status = session_->Run(run_options_, feed, {}, fetch, nullptr, local_metadata.get()); }, timeout_s * 1000, thread_pool_.get()); if (!executed_in_time) { return absl::DeadlineExceededError(absl::StrCat( "Failed to run the graph after ", timeout_s, " seconds, aborting")); } else if (run_metadata && status->ok()) { *run_metadata = *local_metadata; } return *status; } Status SingleMachine::CloseSession(bool use_timeout) { if (!session_ || !thread_pool_) { return absl::OkStatus(); } { mutex_lock l(close_mu_); if (!closing_) { closing_ = true; } } const bool executed_in_time = ExecuteWithTimeout( [&]() { if (this->coordinator_) { this->coordinator_->RequestStop().IgnoreError(); while (!this->coordinator_->AllRunnersStopped()) { Env::Default()->SleepForMicroseconds(1000000); } this->session_->Close().IgnoreError(); this->coordinator_.reset(); } else { this->session_->Close().IgnoreError(); } mutex_lock l2(close_mu_); closing_ = false; }, use_timeout ? timeout_s_ * 1000 : -1, thread_pool_.get()); if (!executed_in_time) { return absl::UnavailableError( absl::StrCat("Failed to close the previous session after ", timeout_s_, " seconds, aborting")); } return absl::OkStatus(); } Status SingleMachine::ShutdownSession() { TF_RETURN_IF_ERROR(CloseSession(true )); auto n = std::make_shared<Notification>(); Env::Default()->SchedClosure([this, n]() { thread_pool_.reset(); n->Notify(); }); int64_t timeout_us = 1000000ll * timeout_s_; const bool notified = WaitForNotificationWithTimeout(n.get(), timeout_us); if (!notified) { return absl::UnavailableError(absl::StrCat( "The session is still running graphs after ", timeout_s_, " seconds")); } return absl::OkStatus(); } Status SingleMachine::ResetSession() { if (session_) { LOG(INFO) << "Cleaning up previous session"; TF_RETURN_IF_ERROR(ShutdownSession()); session_.reset(); } LOG(INFO) << "Starting new session"; thread_pool_ = std::make_unique<thread::ThreadPool>( Env::Default(), SanitizeThreadSuffix("single_machine"), 2); session_.reset(NewSession(options_)); if (!session_) { return absl::UnknownError("Failed to create session"); } coordinator_ = std::make_unique<Coordinator>(); device_set_ = std::make_unique<DeviceSet>(); const DeviceMgr* device_mgr; TF_RETURN_IF_ERROR(session_->LocalDeviceManager(&device_mgr)); for (auto d : device_mgr->ListDevices()) { device_set_->AddDevice(d); } return absl::OkStatus(); } void SingleMachine::MergeCosts(CostGraphDef* graph_costs, const CostGraphDef& init_costs, const CostGraphDef& queue_costs) { graph_costs->mutable_node()->Reserve(graph_costs->node_size() + init_costs.node_size() + queue_costs.node_size()); std::unordered_set<string> nodes_seen; int queue_costs_id_offset = graph_costs->node_size(); for (const auto& node : graph_costs->node()) { nodes_seen.insert(node.name()); if (node.id() >= queue_costs_id_offset) { queue_costs_id_offset = node.id() + 1; } } int init_costs_id_offset = queue_costs_id_offset + queue_costs.node_size(); for (const auto& node : queue_costs.node()) { if (nodes_seen.find(node.name()) != nodes_seen.end()) { continue; } auto* new_node = graph_costs->add_node(); new_node->MergeFrom(node); new_node->set_id(node.id() + queue_costs_id_offset); if (new_node->id() >= init_costs_id_offset) { init_costs_id_offset = new_node->id() + 1; } for (auto& input_info : *new_node->mutable_input_info()) { input_info.set_preceding_node(input_info.preceding_node() + queue_costs_id_offset); } for (auto& control_input : *new_node->mutable_control_input()) { control_input += queue_costs_id_offset; } } for (const auto& node : init_costs.node()) { if (nodes_seen.find(node.name()) != nodes_seen.end()) { continue; } auto* new_node = graph_costs->add_node(); new_node->MergeFrom(node); new_node->set_id(node.id() + init_costs_id_offset); for (auto& input_info : *new_node->mutable_input_info()) { input_info.set_preceding_node(input_info.preceding_node() + init_costs_id_offset); } for (auto& control_input : *new_node->mutable_control_input()) { control_input += init_costs_id_offset; } } } Status SingleMachine::ClearAllocatorStats() const { if (!cpu_allocator_stats_enabled_) { return Status(absl::StatusCode::kInvalidArgument, "Tracking allocation for CPU is not enabled."); } const DeviceMgr* device_mgr; TF_RETURN_IF_ERROR(session_->LocalDeviceManager(&device_mgr)); std::vector<Device*> devices = device_mgr->ListDevices(); for (Device* device : devices) { auto* allocator = device->GetAllocator(AllocatorAttributes()); if (!allocator->TracksAllocationSizes()) { return Status(absl::StatusCode::kInvalidArgument, "Tracking allocation is not enabled."); } if (!allocator->ClearStats()) { return Status( absl::StatusCode::kInvalidArgument, absl::StrCat("Clearing allocation stats is not supported for ", device->name())); } } return absl::OkStatus(); } } }
#include "tensorflow/core/grappler/clusters/single_machine.h" #include <memory> #include "tensorflow/cc/framework/scope.h" #include "tensorflow/cc/ops/resource_variable_ops.h" #include "tensorflow/cc/ops/standard_ops.h" #include "tensorflow/core/common_runtime/device.h" #include "tensorflow/core/common_runtime/device_factory.h" #include "tensorflow/core/framework/cost_graph.pb.h" #include "tensorflow/core/framework/tensor_shape.pb.h" #include "tensorflow/core/grappler/grappler_item.h" #include "tensorflow/core/grappler/inputs/trivial_test_graph_input_yielder.h" #include "tensorflow/core/grappler/utils.h" #include "tensorflow/core/platform/protobuf.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/protobuf/queue_runner.pb.h" namespace tensorflow { namespace grappler { namespace { class SingleMachineTest : public ::testing::Test { public: void SetUp() override { #if TENSORFLOW_USE_ROCM int timeout_s = 10; #else int timeout_s = 5; #endif #ifdef THREAD_SANITIZER timeout_s *= 5; #endif cluster_ = std::make_unique<SingleMachine>(timeout_s, 3 , 0 ); TF_CHECK_OK(cluster_->EnablePeakMemoryStats()); TF_CHECK_OK(cluster_->Provision()); } void TearDown() override { if (cluster_) { TF_CHECK_OK(cluster_->Shutdown()); } cluster_.reset(); } protected: std::unique_ptr<SingleMachine> cluster_; }; TEST_F(SingleMachineTest, ClusterType) { CHECK_EQ("single_machine", cluster_->type()); } TEST_F(SingleMachineTest, CostModel) { TrivialTestGraphInputYielder fake_input(4, 1, 10, false, cluster_->GetDeviceNames()); GrapplerItem item; CHECK(fake_input.NextItem(&item)); TF_CHECK_OK(cluster_->Initialize(item)); RunMetadata metadata; const int64_t start_micros = Env::Default()->NowMicros(); TF_CHECK_OK(cluster_->Run(item.graph, item.feed, item.fetch, &metadata)); const int64_t run_duration_micros = Env::Default()->NowMicros() - start_micros; EXPECT_LE(4, metadata.cost_graph().node_size()); for (const auto& node : metadata.cost_graph().node()) { if (node.name()[0] == '_' || node.name().find("/_") != string::npos) { continue; } #ifndef INTEL_MKL EXPECT_EQ(1, node.output_info_size()); #endif EXPECT_LE(8, node.output_info(0).size()); const TensorShapeProto& shape = node.output_info(0).shape(); EXPECT_EQ(2, shape.dim_size()); EXPECT_EQ(10, shape.dim(0).size()); EXPECT_EQ(1, shape.dim(1).size()); EXPECT_LE(0, node.compute_cost()); EXPECT_GE(run_duration_micros, node.compute_cost()); } } TEST_F(SingleMachineTest, Queue) { TrivialTestGraphInputYielder fake_input(4, 1, 10, true, cluster_->GetDeviceNames()); GrapplerItem item; CHECK(fake_input.NextItem(&item)); TF_CHECK_OK(cluster_->Initialize(item)); RunMetadata metadata; TF_CHECK_OK(cluster_->Run(item.graph, item.feed, item.fetch, &metadata)); } TEST_F(SingleMachineTest, MultipleItems) { TrivialTestGraphInputYielder fake_input(4, 1, 10, false, cluster_->GetDeviceNames()); for (int i = 0; i < 3; ++i) { GrapplerItem item; CHECK(fake_input.NextItem(&item)); TF_CHECK_OK(cluster_->Initialize(item)); RunMetadata metadata1; TF_CHECK_OK(cluster_->Run(item.graph, item.feed, item.fetch, &metadata1)); RunMetadata metadata2; TF_CHECK_OK(cluster_->Run(item.graph, item.feed, item.fetch, &metadata2)); EXPECT_LE(6, metadata1.cost_graph().node_size()); for (const auto& node : metadata1.cost_graph().node()) { if (node.name()[0] == '_' || node.name().find("/_") != string::npos || node.name() == "queue") { continue; } #ifndef INTEL_MKL EXPECT_EQ(1, node.output_info_size()); #endif const TensorShapeProto& shape = node.output_info(0).shape(); EXPECT_EQ(2, shape.dim_size()); EXPECT_EQ(10, shape.dim(0).size()); EXPECT_EQ(1, shape.dim(1).size()); } for (int i = 0; i < metadata1.cost_graph().node_size(); ++i) { metadata1.mutable_cost_graph()->mutable_node(i)->set_compute_cost(0); metadata1.clear_step_stats(); } for (int i = 0; i < metadata2.cost_graph().node_size(); ++i) { metadata2.mutable_cost_graph()->mutable_node(i)->set_compute_cost(0); metadata2.clear_step_stats(); } string s1; ::tensorflow::protobuf::TextFormat::PrintToString(metadata1, &s1); string s2; ::tensorflow::protobuf::TextFormat::PrintToString(metadata2, &s2); EXPECT_EQ(s1, s2); } } TEST_F(SingleMachineTest, GraphOptimizations) { tensorflow::Scope root = tensorflow::Scope::NewRootScope(); auto zero = ops::Const(root.WithOpName("zero"), 0.0f, {2, 3}); auto one = ops::Const(root.WithOpName("one"), 1.0f, {2, 3}); auto add = ops::Add(root.WithOpName("add"), zero, one); auto square = ops::Square(root.WithOpName("square"), add); auto new_shape = ops::Const(root.WithOpName("new_shape"), {3, -1}, {2}); auto reshaped = ops::Reshape(root.WithOpName("reshaped"), square, new_shape); auto final_shape = ops::Shape(root.WithOpName("final_shape"), reshaped); auto expected_shape = ops::Const(root.WithOpName("expected_shape"), {3, 2}, {2}); auto valid = ops::Equal(root.WithOpName("valid"), final_shape, expected_shape); auto all_dims = ops::Const(root.WithOpName("all_dims"), {0}, {1}); auto all_valid = ops::All(root.WithOpName("all_valid"), valid, all_dims); auto assert_valid = ops::Assert(root.WithOpName("assert_valid"), all_valid, {final_shape.output}); GrapplerItem item; TF_CHECK_OK(root.ToGraphDef(&item.graph)); item.fetch.push_back("assert_valid"); for (auto& node : *item.graph.mutable_node()) { node.set_device("/cpu:0"); } TF_CHECK_OK(cluster_->Shutdown()); cluster_->DisableOptimizer(true); TF_CHECK_OK(cluster_->Provision()); RunMetadata metadata; TF_CHECK_OK(cluster_->Initialize(item)); TF_CHECK_OK(cluster_->Run(item.graph, item.feed, item.fetch, &metadata)); std::set<string> cost_nodes; for (const auto& node : metadata.cost_graph().node()) { #ifdef INTEL_MKL if (node.name()[0] == '_' || node.name().find("/_") != string::npos) { continue; } cost_nodes.insert(node.name()); #else if (node.name()[0] != '_') { cost_nodes.insert(node.name()); } #endif } const std::set<string> expected_cost_nodes = { "zero", "one", "add", "square", "new_shape", "reshaped", "final_shape", "expected_shape", "valid", "all_dims", "all_valid", "assert_valid"}; EXPECT_EQ(expected_cost_nodes, cost_nodes); } TEST_F(SingleMachineTest, TimeOuts) { tensorflow::Scope root = tensorflow::Scope::NewRootScope(); auto q = ops::FIFOQueue(root.WithOpName("queue"), {DataType::DT_INT32}); auto dequeue = ops::QueueDequeue(root.WithOpName("dequeue"), q, {DataType::DT_INT32}); GrapplerItem item; TF_CHECK_OK(root.ToGraphDef(&item.graph)); item.fetch.push_back("dequeue"); TF_CHECK_OK(cluster_->Initialize(item)); RunMetadata metadata; Status s1 = cluster_->Run(item.graph, item.feed, item.fetch, &metadata); EXPECT_TRUE(errors::IsDeadlineExceeded(s1)); Status s2 = cluster_->Run(item.graph, item.feed, item.fetch, &metadata); EXPECT_TRUE(errors::IsDeadlineExceeded(s2)); } static void RunInfiniteTFLoop() { GrapplerItem item; NodeDef* shp = item.graph.add_node(); shp->set_name("shape"); shp->set_op("Const"); (*shp->mutable_attr())["dtype"].set_type(DT_INT32); Tensor shp_tensor(DT_INT32, TensorShape({1})); shp_tensor.flat<int32>()(0) = 1; shp_tensor.AsProtoTensorContent( (*shp->mutable_attr())["value"].mutable_tensor()); NodeDef* r = item.graph.add_node(); r->set_name("random"); r->set_op("RandomUniform"); (*r->mutable_attr())["dtype"].set_type(DT_FLOAT); (*r->mutable_attr())["T"].set_type(DT_INT32); *r->add_input() = "shape"; NodeDef* e = item.graph.add_node(); e->set_name("while/Enter"); e->set_op("Enter"); (*e->mutable_attr())["T"].set_type(DT_FLOAT); (*e->mutable_attr())["frame_name"].set_s("while/while/"); *e->add_input() = "random"; NodeDef* m = item.graph.add_node(); m->set_name("while/Merge"); m->set_op("Merge"); (*m->mutable_attr())["T"].set_type(DT_FLOAT); (*m->mutable_attr())["N"].set_i(2); *m->add_input() = "while/Enter"; *m->add_input() = "while/NextIteration"; NodeDef* t = item.graph.add_node(); t->set_name("always_true"); t->set_op("Const"); (*t->mutable_attr())["dtype"].set_type(DT_BOOL); *t->add_input() = "^while/Merge"; Tensor true_tensor(DT_BOOL, TensorShape()); true_tensor.flat<bool>()(0) = true; true_tensor.AsProtoTensorContent( (*t->mutable_attr())["value"].mutable_tensor()); NodeDef* c = item.graph.add_node(); c->set_name("while/LoopCond"); c->set_op("LoopCond"); *c->add_input() = "always_true"; NodeDef* s = item.graph.add_node(); s->set_name("while/Switch"); (*s->mutable_attr())["T"].set_type(DT_FLOAT); s->set_op("Switch"); *s->add_input() = "while/Merge"; *s->add_input() = "while/LoopCond"; NodeDef* i = item.graph.add_node(); i->set_name("while/Identity"); i->set_op("Identity"); (*i->mutable_attr())["T"].set_type(DT_FLOAT); *i->add_input() = "while/Switch:1"; NodeDef* n = item.graph.add_node(); n->set_name("while/NextIteration"); n->set_op("NextIteration"); (*n->mutable_attr())["T"].set_type(DT_FLOAT); *n->add_input() = "while/Identity"; NodeDef* x = item.graph.add_node(); x->set_name("while/Exit"); x->set_op("Exit"); (*x->mutable_attr())["T"].set_type(DT_FLOAT); *x->add_input() = "while/Switch"; item.fetch.push_back("while/Exit"); SingleMachine cluster(5, 3, 0); TF_CHECK_OK(cluster.Provision()); TF_CHECK_OK(cluster.Initialize(item)); Status s1 = cluster.Run(item.graph, item.feed, item.fetch, nullptr); if (!errors::IsDeadlineExceeded(s1)) { LOG(ERROR) << "Expected 'deadline exceeded' error, got " << s1; _exit(1); } Status s2 = cluster.Shutdown(); if (!errors::IsUnavailable(s2)) { LOG(ERROR) << "Expected 'unavailable' error, got " << s2; _exit(2); } _exit(0); } TEST_F(SingleMachineTest, InfiniteLoops) { #if !(TENSORFLOW_USE_ROCM) TF_CHECK_OK(cluster_->Shutdown()); EXPECT_EXIT(RunInfiniteTFLoop(), ::testing::ExitedWithCode(0), ".*"); #endif } TEST_F(SingleMachineTest, InitializationMemory) { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); int batch_size = 10; Output x = ops::RandomNormal(s.WithOpName("x"), {batch_size, 1}, DataType::DT_FLOAT); Output v = ops::Variable(s.WithOpName("v"), TensorShape({batch_size, 1}), DataType::DT_FLOAT); Output init = ops::Assign(s.WithOpName("init"), v, x); GrapplerItem item; TF_CHECK_OK(s.ToGraphDef(&item.graph)); item.init_ops.push_back(init.name()); item.fetch.push_back(v.name()); TF_CHECK_OK(cluster_->Initialize(item)); RunMetadata metadata; TF_CHECK_OK(cluster_->Run(item.graph, item.feed, item.fetch, &metadata)); bool found = false; for (const auto& node : metadata.cost_graph().node()) { found |= (node.name() == NodeName(init.name())); } EXPECT_TRUE(found); } namespace { template <class T> inline void SetNodeAttr(const string& key, const T& value, NodeDef* node) { AttrValue attr_value; SetAttrValue(value, &attr_value); auto* attr_map = node->mutable_attr(); (*attr_map)[key] = attr_value; } template <> inline void SetNodeAttr(const string& key, const Tensor& tensor, NodeDef* node) { TensorProto tensor_proto; tensor.AsProtoTensorContent(&tensor_proto); SetNodeAttr(key, tensor_proto, node); } } TEST_F(SingleMachineTest, PersistentMemory) { GrapplerItem item; const DataType key_dtype = DT_INT64; const DataType data_dtype = DT_INT64; NodeDef* hashtable_node = item.graph.add_node(); hashtable_node->set_op("HashTable"); hashtable_node->set_name("hash_table"); SetNodeAttr("key_dtype", key_dtype, hashtable_node); SetNodeAttr("value_dtype", data_dtype, hashtable_node); NodeDef* keys_node = item.graph.add_node(); keys_node->set_op("Const"); keys_node->set_name("table_keys"); SetNodeAttr("dtype", key_dtype, keys_node); Tensor keys(key_dtype, TensorShape{2}); keys.vec<int64_t>()(0) = 123; keys.vec<int64_t>()(1) = 321; SetNodeAttr("value", keys, keys_node); NodeDef* values_node = item.graph.add_node(); values_node->set_op("Const"); values_node->set_name("table_values"); SetNodeAttr("dtype", data_dtype, values_node); Tensor values(data_dtype, TensorShape{2}); values.vec<int64_t>()(0) = 789; values.vec<int64_t>()(1) = 987; SetNodeAttr("value", values, values_node); NodeDef* init_table_node = item.graph.add_node(); init_table_node->set_op("InitializeTable"); init_table_node->set_name("initialize_table"); SetNodeAttr("Tkey", key_dtype, init_table_node); SetNodeAttr("Tval", data_dtype, init_table_node); *init_table_node->add_input() = "hash_table"; *init_table_node->add_input() = "table_keys"; *init_table_node->add_input() = "table_values"; item.init_ops.push_back(init_table_node->name()); NodeDef* query_node = item.graph.add_node(); query_node->set_op("Const"); query_node->set_name("query"); SetNodeAttr("dtype", key_dtype, query_node); Tensor query(key_dtype, TensorShape({})); query.flat<int64_t>()(0) = 0; SetNodeAttr("value", query, query_node); NodeDef* default_value_node = item.graph.add_node(); default_value_node->set_op("Const"); default_value_node->set_name("default_table_value"); SetNodeAttr("dtype", data_dtype, default_value_node); Tensor dflt(data_dtype, TensorShape({})); dflt.flat<int64_t>()(0) = 456; SetNodeAttr("value", dflt, default_value_node); NodeDef* lookup_node = item.graph.add_node(); lookup_node->set_op("LookupTableFind"); lookup_node->set_name("table_lookup"); SetNodeAttr("Tin", key_dtype, lookup_node); SetNodeAttr("Tout", data_dtype, lookup_node); *lookup_node->add_input() = "hash_table"; *lookup_node->add_input() = "query"; *lookup_node->add_input() = "default_table_value"; item.fetch.push_back(lookup_node->name()); TF_CHECK_OK(cluster_->Initialize(item)); RunMetadata metadata; TF_CHECK_OK(cluster_->Run(item.graph, item.feed, item.fetch, &metadata)); bool found_table_init = false; bool found_hashtable = false; for (const auto& node : metadata.cost_graph().node()) { if (node.name() == "hash_table") { found_hashtable = true; EXPECT_EQ(0, node.persistent_memory_size()); } else if (node.name() == "initialize_table") { found_table_init = true; EXPECT_LE(4 * sizeof(int64_t), node.persistent_memory_size()); } } EXPECT_TRUE(found_table_init); EXPECT_TRUE(found_hashtable); } GrapplerItem CreateGrapplerItemWithResourceMemory() { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); Output a = ops::Variable(s.WithOpName("a"), TensorShape({128, 256}), DataType::DT_FLOAT); Output a_init = ops::RandomNormal(s.WithOpName("a/init"), {128, 256}, DataType::DT_FLOAT); Output a_init_assign = ops::Assign(s.WithOpName("a/init/assign"), a, a_init); Output b = ops::VarHandleOp(s.WithOpName("b"), DataType::DT_FLOAT, {256, 512}); Output b_read = ops::ReadVariableOp(s.WithOpName("b/read"), b, DataType::DT_FLOAT); Output b_init = ops::RandomNormal(s.WithOpName("b/init"), {256, 512}, DataType::DT_FLOAT); auto b_init_assign = ops::AssignVariableOp(s.WithOpName("b/init/assign"), b, b_init); ops::FIFOQueue queue(s.WithOpName("queue"), {DataType::DT_STRING}); Output some_string = ops::Const(s.WithOpName("some_string"), string("nothing")); ops::QueueEnqueue enqueue(s.WithOpName("enqueue"), queue, {some_string}); ops::QueueDequeue dequeue(s.WithOpName("dequeue"), queue, {DataType::DT_STRING}); ops::IdentityReader reader(s.WithOpName("identity_reader")); ops::ReaderRead read(s.WithOpName("read_from_queue"), reader, queue); Output var_mul = ops::MatMul(s.WithOpName("var_matmul"), a, b_read); GrapplerItem item; TF_CHECK_OK(s.ToGraphDef(&item.graph)); QueueRunnerDef queue_runner; queue_runner.set_queue_name("queue"); *queue_runner.add_enqueue_op_name() = "enqueue"; item.queue_runners.push_back(queue_runner); item.init_ops.push_back("a/init/assign"); item.init_ops.push_back("b/init/assign"); item.fetch.push_back("var_matmul"); item.fetch.push_back("dequeue"); return item; } #if defined(PLATFORM_GOOGLE) TEST_F(SingleMachineTest, ReleaseMemoryAfterDestruction) { GrapplerItem item = CreateGrapplerItemWithResourceMemory(); TF_CHECK_OK(cluster_->Initialize(item)); std::unordered_map<string, uint64> device_peak_memory_before; TF_CHECK_OK(cluster_->GetPeakMemoryUsage(&device_peak_memory_before)); EXPECT_EQ(device_peak_memory_before.size(), 1); EXPECT_LT(device_peak_memory_before.begin()->second, 400); RunMetadata metadata; TF_CHECK_OK(cluster_->Run(item.graph, item.feed, item.fetch, &metadata)); std::unordered_map<string, uint64> device_peak_memory; TF_CHECK_OK(cluster_->GetPeakMemoryUsage(&device_peak_memory)); EXPECT_EQ(device_peak_memory.size(), 1); EXPECT_GT(device_peak_memory.begin()->second, 0); TF_CHECK_OK(cluster_->Shutdown()); TF_CHECK_OK(cluster_->Provision()); std::unordered_map<string, uint64> device_peak_memory_after; TF_CHECK_OK(cluster_->GetPeakMemoryUsage(&device_peak_memory_after)); TF_CHECK_OK(cluster_->Shutdown()); EXPECT_EQ(device_peak_memory_before.size(), 1); EXPECT_EQ(device_peak_memory_after.size(), 1); EXPECT_LT(device_peak_memory_before.begin()->second, 400); EXPECT_LT(device_peak_memory_after.begin()->second, 400); } TEST_F(SingleMachineTest, PeakMemory) { GrapplerItem item = CreateGrapplerItemWithResourceMemory(); TF_CHECK_OK(cluster_->Initialize(item)); RunMetadata metadata; TF_CHECK_OK(cluster_->Run(item.graph, item.feed, item.fetch, &metadata)); std::unordered_map<string, uint64> device_peak_memory; TF_CHECK_OK(cluster_->GetPeakMemoryUsage(&device_peak_memory)); ASSERT_NE( device_peak_memory.find("/job:localhost/replica:0/task:0/device:CPU:0"), device_peak_memory.end()); uint64 cpu_memory = device_peak_memory["/job:localhost/replica:0/task:0/device:CPU:0"]; EXPECT_GT(cpu_memory, 0); TF_CHECK_OK(cluster_->Shutdown()); TF_CHECK_OK(cluster_->Provision()); device_peak_memory.clear(); TF_CHECK_OK(cluster_->GetPeakMemoryUsage(&device_peak_memory)); TF_CHECK_OK(cluster_->Shutdown()); ASSERT_NE( device_peak_memory.find("/job:localhost/replica:0/task:0/device:CPU:0"), device_peak_memory.end()); cpu_memory = device_peak_memory["/job:localhost/replica:0/task:0/device:CPU:0"]; EXPECT_LT(cpu_memory, 200); } TEST_F(SingleMachineTest, PeakMemoryStatsNotEnabled) { GrapplerItem item = CreateGrapplerItemWithResourceMemory(); TF_CHECK_OK(cluster_->Shutdown()); cluster_.reset(); SingleMachine cluster(60 , 3 , 0 ); TF_CHECK_OK(cluster.Provision()); TF_CHECK_OK(cluster.Initialize(item)); std::unordered_map<string, uint64> device_peak_memory; Status s = cluster.GetPeakMemoryUsage(&device_peak_memory); TF_CHECK_OK(cluster.Shutdown()); ASSERT_FALSE(s.ok()); EXPECT_TRUE(errors::IsInvalidArgument(s)); } #endif } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/clusters/single_machine.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/clusters/single_machine_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
e8b78d62-c279-4dac-875d-cb2ea61040db
cpp
tensorflow/tensorflow
inject_io_prefetch
tensorflow/core/grappler/optimizers/data/inject_io_prefetch.cc
tensorflow/core/grappler/optimizers/data/inject_io_prefetch_test.cc
#include "tensorflow/core/grappler/optimizers/data/inject_io_prefetch.h" #include <array> #include <cstdint> #include <string> #include <utility> #include "absl/algorithm/container.h" #include "absl/container/flat_hash_set.h" #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_join.h" #include "tensorflow/core/data/dataset_utils.h" #include "tensorflow/core/framework/function.pb.h" #include "tensorflow/core/framework/graph.pb.h" #include "tensorflow/core/framework/model.h" #include "tensorflow/core/framework/node_def.pb.h" #include "tensorflow/core/grappler/clusters/cluster.h" #include "tensorflow/core/grappler/grappler_item.h" #include "tensorflow/core/grappler/mutable_graph_view.h" #include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h" #include "tensorflow/core/grappler/optimizers/data/graph_utils.h" #include "tensorflow/core/protobuf/rewriter_config.pb.h" #include "tsl/platform/errors.h" #include "tsl/platform/statusor.h" namespace tensorflow { namespace grappler { namespace { constexpr char kAutotune[] = "autotune"; constexpr char kFunctionAttrKey[] = "f"; constexpr char kParallelInterleave[] = "ParallelInterleaveDataset"; constexpr char kParallelMap[] = "ParallelMapDataset"; constexpr char kPrefetch[] = "PrefetchDataset"; constexpr std::array<const char*, 5> kAsync = { "MapAndBatchDataset", "ParallelBatchDataset", "ParallelInterleaveDataset", "ParallelMapDataset", "PrefetchDataset"}; constexpr std::array<const char*, 6> kIo = { "ArrayRecordDataset", "FixedLengthRecordDataset", "RecordIODataset", "SSTableDataset", "TextLineDataset", "TFRecordDataset"}; bool IsAsync(const NodeDef* node) { if (!node) { return false; } return absl::c_any_of(kAsync, [&](const char* dataset) { return data::MatchesAnyVersion(dataset, node->op()); }); } bool IsIo(const NodeDef* node) { if (!node) { return false; } return absl::c_any_of(kIo, [&](const char* dataset) { return data::MatchesAnyVersion(dataset, node->op()); }); } bool IsIo(const FunctionDef& function) { for (const auto& node : function.node_def()) { if (IsIo(&node)) { return true; } } return false; } bool IsIoFunction(const std::string& function_name, const MutableGraphView& graph) { for (const auto& function : graph.graph()->library().function()) { if (function.signature().name() == function_name) { return IsIo(function); } } return false; } bool HasIoFunction(const NodeDef* node, const MutableGraphView& graph) { if (auto it = node->attr().find(kFunctionAttrKey); it != node->attr().end()) { return IsIoFunction(it->second.func().name(), graph); } return false; } bool IsParallelInterleaveWithIo(const NodeDef* node, const MutableGraphView& graph) { if (!node || !data::MatchesAnyVersion(kParallelInterleave, node->op())) { return false; } return HasIoFunction(node, graph); } bool IsParallelMap(const NodeDef* node) { if (!node) { return false; } return data::MatchesAnyVersion(kParallelMap, node->op()); } bool IsPrefetch(const NodeDef* node) { if (!node) { return false; } return node->op() == kPrefetch; } struct Edge { NodeDef* input; NodeDef* output; template <typename H> friend H AbslHashValue(H h, const Edge& e) { return H::combine(std::move(h), e.input, e.output); } friend bool operator==(const Edge& lhs, const Edge& rhs) { return lhs.input == rhs.input && lhs.output == rhs.output; } }; absl::StatusOr<bool> InjectPrefetch(const Edge& edge, MutableGraphView& graph) { NodeDef prefetch; graph_utils::SetUniqueGraphNodeName( absl::StrCat("inject/io_prefetch", edge.input->name()), graph.graph(), &prefetch); prefetch.set_op(kPrefetch); *prefetch.mutable_input()->Add() = edge.input->name(); NodeDef* autotune_value = graph_utils::AddScalarConstNode(data::model::kAutotune, &graph); *prefetch.mutable_input()->Add() = autotune_value->name(); if (!graph_utils::CopyShapesAndTypesAttrs(*edge.input, &prefetch)) { return false; } TF_RETURN_IF_ERROR(graph_utils::SetMetadataName(prefetch.name(), &prefetch)); NodeDef* added_prefetch = graph.AddNode(std::move(prefetch)); TF_RETURN_IF_ERROR( graph.UpdateFanouts(edge.input->name(), added_prefetch->name())); return true; } void GetPrefetchInjectionEdges( const MutableGraphView& graph, NodeDef* node, NodeDef* output, NodeDef* output_output, NodeDef* last_async, NodeDef* last_async_output, NodeDef* last_last_async, absl::flat_hash_set<Edge>& prefetch_injection_edges) { if (!node) { return; } if (IsAsync(output)) { last_last_async = last_async; last_async_output = output_output; last_async = output; } if (IsIo(node)) { if (IsParallelMap(last_async) && !IsPrefetch(last_last_async)) { prefetch_injection_edges.insert({last_async, last_async_output}); } return; } if (IsParallelInterleaveWithIo(node, graph)) { if (!IsPrefetch(last_async)) { prefetch_injection_edges.insert({node, output}); } return; } for (int64_t i = 0; i < node->input_size(); ++i) { NodeDef* input = graph_utils::GetInputNode(*node, graph, i); GetPrefetchInjectionEdges(graph, input, node, output, last_async, last_async_output, last_last_async, prefetch_injection_edges); } } absl::StatusOr<absl::flat_hash_set<Edge>> GetPrefetchInjectionEdges( const GrapplerItem& item, const MutableGraphView& graph) { if (graph_utils::IsItemDerivedFromFunctionDef(item, graph)) { return absl::flat_hash_set<Edge>(); } if (item.fetch.size() != 1) { return absl::InvalidArgumentError( absl::StrCat("Expected only one fetch node but there were ", item.fetch.size(), ": ", absl::StrJoin(item.fetch, ", "))); } NodeDef* sink_node = graph.GetNode(item.fetch.at(0)); NodeDef* last_node = graph_utils::GetInputNode(*sink_node, graph); absl::flat_hash_set<Edge> prefetch_injection_edges; GetPrefetchInjectionEdges( graph, last_node, sink_node, nullptr, nullptr, nullptr, nullptr, prefetch_injection_edges); return prefetch_injection_edges; } } absl::Status InjectIoPrefetchEligible::OptimizeAndCollectStats( Cluster* cluster, const GrapplerItem& item, GraphDef* output, OptimizationStats* stats) { *output = item.graph; if (!autotune_) { return absl::OkStatus(); } MutableGraphView graph(output); TF_ASSIGN_OR_RETURN(absl::flat_hash_set<Edge> prefetch_injection_edges, GetPrefetchInjectionEdges(item, graph)); stats->num_changes += prefetch_injection_edges.size(); return absl::OkStatus(); } absl::Status InjectIoPrefetch::OptimizeAndCollectStats( Cluster* cluster, const GrapplerItem& item, GraphDef* output, OptimizationStats* stats) { *output = item.graph; if (!autotune_) { return absl::OkStatus(); } MutableGraphView graph(output); TF_ASSIGN_OR_RETURN(absl::flat_hash_set<Edge> prefetch_injection_edges, GetPrefetchInjectionEdges(item, graph)); for (const auto& edge : prefetch_injection_edges) { TF_ASSIGN_OR_RETURN(bool success, InjectPrefetch(edge, graph)); stats->num_changes += success; } return absl::OkStatus(); } absl::Status InjectIoPrefetch::Init( const tensorflow::RewriterConfig_CustomGraphOptimizer* config) { if (!config) { return absl::OkStatus(); } const std::string& autotune = config->parameter_map().at(kAutotune).s(); if (autotune == "true") { autotune_ = true; } else if (autotune == "false") { autotune_ = false; } else { return absl::InvalidArgumentError(absl::StrCat( "Received an invalid value for parameter ", kAutotune, ": ", autotune)); } return absl::OkStatus(); } REGISTER_GRAPH_OPTIMIZER_AS(InjectIoPrefetch, "inject_io_prefetch"); } }
#include "tensorflow/core/grappler/optimizers/data/inject_io_prefetch.h" #include <string> #include <gtest/gtest.h> #include "xla/tsl/lib/core/status_test_util.h" #include "tensorflow/core/framework/function.h" #include "tensorflow/core/framework/function.pb.h" #include "tensorflow/core/framework/function_testlib.h" #include "tensorflow/core/framework/graph.pb.h" #include "tensorflow/core/framework/node_def.pb.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/grappler/grappler_item.h" #include "tensorflow/core/grappler/optimizers/data/graph_test_utils.h" #include "tensorflow/core/grappler/optimizers/data/graph_utils.h" namespace tensorflow { namespace grappler { namespace { using test::function::GDef; using test::function::NDef; FunctionDef InterleaveIoFunction(const std::string& name) { return FunctionDefHelper::Create( name, {"args_0: int64"}, {"identity: variant"}, {}, { {{"key_prefix"}, "Const", {}, {{"dtype", DT_STRING}}}, {{"start_key"}, "Const", {}, {{"dtype", DT_STRING}}}, {{"stop_key"}, "Const", {}, {{"dtype", DT_STRING}}}, {{"SSTableDataset"}, "SSTableDataset", {"args_0", "key_prefix:output:0", "start_key:output:0", "stop_key:output:0"}, {}}, }, {}); } GraphDef EligibleInterleaveCase() { return GDef( {NDef("files_string_1", "Const", {}, {{"value", "file1file2"}, {"dtype", DT_STRING}}), NDef("files_tensor_1", "TensorSliceDataset", {"files_1_string"}, {{"is_files", true}}), NDef("cycle_length_1", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}), NDef("block_length_1", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}), NDef("num_parallel_calls_1", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}), graph_tests_utils::MakeParallelInterleaveV4Node( "interleave_1", "files_tensor_1", "cycle_length_1", "block_length_1", "num_parallel_calls_1", "io_1", "default"), NDef("files_string_2", "Const", {}, {{"value", "file1file2"}, {"dtype", DT_STRING}}), NDef("files_tensor_2", "TensorSliceDataset", {"files_2_string"}, {{"is_files", true}}), NDef("cycle_length_2", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}), NDef("block_length_2", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}), NDef("num_parallel_calls_2", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}), graph_tests_utils::MakeParallelInterleaveV4Node( "interleave_2", "files_tensor_2", "cycle_length_2", "block_length_2", "num_parallel_calls_2", "io_2", "default"), NDef("zip", "ZipDataset", {"interleave_1", "interleave_2"}, {}), NDef("Sink", "Identity", {"zip"}, {})}, {InterleaveIoFunction("io_1"), InterleaveIoFunction("io_2")}); } GraphDef EligibleMapCase() { return GDef( {NDef("files_1", "Const", {}, {{"value", "file1file2"}, {"dtype", DT_STRING}}), NDef("key_prefix_1", "Const", {}, {{"value", 1}, {"dtype", DT_STRING}}), NDef("start_key_1", "Const", {}, {{"value", 1}, {"dtype", DT_STRING}}), NDef("stop_key_1", "Const", {}, {{"value", 1}, {"dtype", DT_STRING}}), NDef("io_1", "SSTableDataset", {"files_1", "key_prefix_1", "start_key_1", "stop_key_1"}, {}), NDef("num_parallel_calls_1", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}), graph_tests_utils::MakeParallelMapV2Node( "map_1", "io_1", "num_parallel_calls_1", "noop_1", "default", false), NDef("files_2", "Const", {}, {{"value", "file1file2"}, {"dtype", DT_STRING}}), NDef("key_prefix_2", "Const", {}, {{"value", 1}, {"dtype", DT_STRING}}), NDef("start_key_2", "Const", {}, {{"value", 1}, {"dtype", DT_STRING}}), NDef("stop_key_2", "Const", {}, {{"value", 1}, {"dtype", DT_STRING}}), NDef("io_2", "SSTableDataset", {"files_2", "key_prefix_2", "start_key_2", "stop_key_2"}, {}), NDef("num_parallel_calls_2", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}), graph_tests_utils::MakeParallelMapV2Node( "map_2", "io_2", "num_parallel_calls_2", "noop_2", "default", false), NDef("zip", "ZipDataset", {"map_1", "map_2"}, {}), NDef("Sink", "Identity", {"zip"}, {})}, {}); } TEST(InjectIoPrefetchEligible, EligibleInterleaveCaseHasNoInjection) { GrapplerItem item; item.graph = EligibleInterleaveCase(); item.fetch.push_back("Sink"); InjectIoPrefetchEligible optimizer; GraphDef output; TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output)); NodeDef zip_node = output.node(graph_utils::FindGraphNodeWithName("zip", output)); for (const auto& input_node_name : zip_node.input()) { NodeDef input_node = output.node( graph_utils::FindGraphNodeWithName(input_node_name, output)); EXPECT_NE(input_node.op(), "PrefetchDataset"); } EXPECT_EQ(item.graph.DebugString(), output.DebugString()); } TEST(InjectIoPrefetchEligible, EligibleMapCaseHasNoInjection) { GrapplerItem item; item.graph = EligibleMapCase(); item.fetch.push_back("Sink"); InjectIoPrefetchEligible optimizer; GraphDef output; TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output)); NodeDef zip_node = output.node(graph_utils::FindGraphNodeWithName("zip", output)); for (const auto& input_node_name : zip_node.input()) { NodeDef input_node = output.node( graph_utils::FindGraphNodeWithName(input_node_name, output)); EXPECT_NE(input_node.op(), "PrefetchDataset"); } EXPECT_EQ(item.graph.DebugString(), output.DebugString()); } TEST(InjectIoPrefetch, InterleaveCaseHasInjection) { GrapplerItem item; item.graph = EligibleInterleaveCase(); item.fetch.push_back("Sink"); InjectIoPrefetch optimizer; GraphDef output; TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output)); NodeDef zip_node = output.node(graph_utils::FindGraphNodeWithName("zip", output)); for (const auto& input_node_name : zip_node.input()) { NodeDef input_node = output.node( graph_utils::FindGraphNodeWithName(input_node_name, output)); EXPECT_EQ(input_node.op(), "PrefetchDataset"); } } TEST(InjectIoPrefetch, MapCaseHasInjection) { GrapplerItem item; item.graph = EligibleMapCase(); item.fetch.push_back("Sink"); InjectIoPrefetch optimizer; GraphDef output; TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output)); NodeDef zip_node = output.node(graph_utils::FindGraphNodeWithName("zip", output)); for (const auto& input_node_name : zip_node.input()) { NodeDef input_node = output.node( graph_utils::FindGraphNodeWithName(input_node_name, output)); EXPECT_EQ(input_node.op(), "PrefetchDataset"); } } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/data/inject_io_prefetch.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/data/inject_io_prefetch_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
fc3560e5-2b8f-44c2-a833-d7c51b00604e
cpp
google/cel-cpp
ternary_step
eval/eval/ternary_step.cc
eval/eval/ternary_step_test.cc
#include "eval/eval/ternary_step.h" #include <cstddef> #include <cstdint> #include <memory> #include <utility> #include "absl/status/status.h" #include "absl/status/statusor.h" #include "base/builtins.h" #include "common/casting.h" #include "common/value.h" #include "eval/eval/attribute_trail.h" #include "eval/eval/direct_expression_step.h" #include "eval/eval/evaluator_core.h" #include "eval/eval/expression_step_base.h" #include "eval/internal/errors.h" #include "internal/status_macros.h" namespace google::api::expr::runtime { namespace { using ::cel::BoolValue; using ::cel::Cast; using ::cel::ErrorValue; using ::cel::InstanceOf; using ::cel::UnknownValue; using ::cel::builtin::kTernary; using ::cel::runtime_internal::CreateNoMatchingOverloadError; inline constexpr size_t kTernaryStepCondition = 0; inline constexpr size_t kTernaryStepTrue = 1; inline constexpr size_t kTernaryStepFalse = 2; class ExhaustiveDirectTernaryStep : public DirectExpressionStep { public: ExhaustiveDirectTernaryStep(std::unique_ptr<DirectExpressionStep> condition, std::unique_ptr<DirectExpressionStep> left, std::unique_ptr<DirectExpressionStep> right, int64_t expr_id) : DirectExpressionStep(expr_id), condition_(std::move(condition)), left_(std::move(left)), right_(std::move(right)) {} absl::Status Evaluate(ExecutionFrameBase& frame, cel::Value& result, AttributeTrail& attribute) const override { cel::Value condition; cel::Value lhs; cel::Value rhs; AttributeTrail condition_attr; AttributeTrail lhs_attr; AttributeTrail rhs_attr; CEL_RETURN_IF_ERROR(condition_->Evaluate(frame, condition, condition_attr)); CEL_RETURN_IF_ERROR(left_->Evaluate(frame, lhs, lhs_attr)); CEL_RETURN_IF_ERROR(right_->Evaluate(frame, rhs, rhs_attr)); if (InstanceOf<ErrorValue>(condition) || InstanceOf<UnknownValue>(condition)) { result = std::move(condition); attribute = std::move(condition_attr); return absl::OkStatus(); } if (!InstanceOf<BoolValue>(condition)) { result = frame.value_manager().CreateErrorValue( CreateNoMatchingOverloadError(kTernary)); return absl::OkStatus(); } if (Cast<BoolValue>(condition).NativeValue()) { result = std::move(lhs); attribute = std::move(lhs_attr); } else { result = std::move(rhs); attribute = std::move(rhs_attr); } return absl::OkStatus(); } private: std::unique_ptr<DirectExpressionStep> condition_; std::unique_ptr<DirectExpressionStep> left_; std::unique_ptr<DirectExpressionStep> right_; }; class ShortcircuitingDirectTernaryStep : public DirectExpressionStep { public: ShortcircuitingDirectTernaryStep( std::unique_ptr<DirectExpressionStep> condition, std::unique_ptr<DirectExpressionStep> left, std::unique_ptr<DirectExpressionStep> right, int64_t expr_id) : DirectExpressionStep(expr_id), condition_(std::move(condition)), left_(std::move(left)), right_(std::move(right)) {} absl::Status Evaluate(ExecutionFrameBase& frame, cel::Value& result, AttributeTrail& attribute) const override { cel::Value condition; AttributeTrail condition_attr; CEL_RETURN_IF_ERROR(condition_->Evaluate(frame, condition, condition_attr)); if (InstanceOf<ErrorValue>(condition) || InstanceOf<UnknownValue>(condition)) { result = std::move(condition); attribute = std::move(condition_attr); return absl::OkStatus(); } if (!InstanceOf<BoolValue>(condition)) { result = frame.value_manager().CreateErrorValue( CreateNoMatchingOverloadError(kTernary)); return absl::OkStatus(); } if (Cast<BoolValue>(condition).NativeValue()) { return left_->Evaluate(frame, result, attribute); } return right_->Evaluate(frame, result, attribute); } private: std::unique_ptr<DirectExpressionStep> condition_; std::unique_ptr<DirectExpressionStep> left_; std::unique_ptr<DirectExpressionStep> right_; }; class TernaryStep : public ExpressionStepBase { public: explicit TernaryStep(int64_t expr_id) : ExpressionStepBase(expr_id) {} absl::Status Evaluate(ExecutionFrame* frame) const override; }; absl::Status TernaryStep::Evaluate(ExecutionFrame* frame) const { if (!frame->value_stack().HasEnough(3)) { return absl::Status(absl::StatusCode::kInternal, "Value stack underflow"); } auto args = frame->value_stack().GetSpan(3); const auto& condition = args[kTernaryStepCondition]; if (frame->enable_unknowns()) { if (condition->Is<cel::UnknownValue>()) { frame->value_stack().Pop(2); return absl::OkStatus(); } } if (condition->Is<cel::ErrorValue>()) { frame->value_stack().Pop(2); return absl::OkStatus(); } cel::Value result; if (!condition->Is<cel::BoolValue>()) { result = frame->value_factory().CreateErrorValue( CreateNoMatchingOverloadError(kTernary)); } else if (condition.GetBool().NativeValue()) { result = args[kTernaryStepTrue]; } else { result = args[kTernaryStepFalse]; } frame->value_stack().PopAndPush(args.size(), std::move(result)); return absl::OkStatus(); } } std::unique_ptr<DirectExpressionStep> CreateDirectTernaryStep( std::unique_ptr<DirectExpressionStep> condition, std::unique_ptr<DirectExpressionStep> left, std::unique_ptr<DirectExpressionStep> right, int64_t expr_id, bool shortcircuiting) { if (shortcircuiting) { return std::make_unique<ShortcircuitingDirectTernaryStep>( std::move(condition), std::move(left), std::move(right), expr_id); } return std::make_unique<ExhaustiveDirectTernaryStep>( std::move(condition), std::move(left), std::move(right), expr_id); } absl::StatusOr<std::unique_ptr<ExpressionStep>> CreateTernaryStep( int64_t expr_id) { return std::make_unique<TernaryStep>(expr_id); } }
#include "eval/eval/ternary_step.h" #include <memory> #include <string> #include <utility> #include <vector> #include "absl/base/nullability.h" #include "absl/status/status.h" #include "base/ast_internal/expr.h" #include "base/attribute.h" #include "base/attribute_set.h" #include "base/type_provider.h" #include "common/casting.h" #include "common/value.h" #include "common/value_manager.h" #include "eval/eval/attribute_trail.h" #include "eval/eval/cel_expression_flat_impl.h" #include "eval/eval/const_value_step.h" #include "eval/eval/direct_expression_step.h" #include "eval/eval/evaluator_core.h" #include "eval/eval/ident_step.h" #include "eval/public/activation.h" #include "eval/public/cel_value.h" #include "eval/public/unknown_attribute_set.h" #include "eval/public/unknown_set.h" #include "extensions/protobuf/memory_manager.h" #include "internal/status_macros.h" #include "internal/testing.h" #include "runtime/activation.h" #include "runtime/managed_value_factory.h" #include "runtime/runtime_options.h" #include "google/protobuf/arena.h" namespace google::api::expr::runtime { namespace { using ::absl_testing::StatusIs; using ::cel::BoolValue; using ::cel::Cast; using ::cel::ErrorValue; using ::cel::InstanceOf; using ::cel::IntValue; using ::cel::RuntimeOptions; using ::cel::TypeProvider; using ::cel::UnknownValue; using ::cel::ValueManager; using ::cel::ast_internal::Expr; using ::cel::extensions::ProtoMemoryManagerRef; using ::google::protobuf::Arena; using ::testing::ElementsAre; using ::testing::Eq; using ::testing::HasSubstr; using ::testing::Truly; class LogicStepTest : public testing::TestWithParam<bool> { public: absl::Status EvaluateLogic(CelValue arg0, CelValue arg1, CelValue arg2, CelValue* result, bool enable_unknown) { Expr expr0; expr0.set_id(1); auto& ident_expr0 = expr0.mutable_ident_expr(); ident_expr0.set_name("name0"); Expr expr1; expr1.set_id(2); auto& ident_expr1 = expr1.mutable_ident_expr(); ident_expr1.set_name("name1"); Expr expr2; expr2.set_id(3); auto& ident_expr2 = expr2.mutable_ident_expr(); ident_expr2.set_name("name2"); ExecutionPath path; CEL_ASSIGN_OR_RETURN(auto step, CreateIdentStep(ident_expr0, expr0.id())); path.push_back(std::move(step)); CEL_ASSIGN_OR_RETURN(step, CreateIdentStep(ident_expr1, expr1.id())); path.push_back(std::move(step)); CEL_ASSIGN_OR_RETURN(step, CreateIdentStep(ident_expr2, expr2.id())); path.push_back(std::move(step)); CEL_ASSIGN_OR_RETURN(step, CreateTernaryStep(4)); path.push_back(std::move(step)); cel::RuntimeOptions options; if (enable_unknown) { options.unknown_processing = cel::UnknownProcessingOptions::kAttributeOnly; } CelExpressionFlatImpl impl( FlatExpression(std::move(path), 0, TypeProvider::Builtin(), options)); Activation activation; std::string value("test"); activation.InsertValue("name0", arg0); activation.InsertValue("name1", arg1); activation.InsertValue("name2", arg2); auto status0 = impl.Evaluate(activation, &arena_); if (!status0.ok()) return status0.status(); *result = status0.value(); return absl::OkStatus(); } private: Arena arena_; }; TEST_P(LogicStepTest, TestBoolCond) { CelValue result; absl::Status status = EvaluateLogic(CelValue::CreateBool(true), CelValue::CreateBool(true), CelValue::CreateBool(false), &result, GetParam()); ASSERT_OK(status); ASSERT_TRUE(result.IsBool()); ASSERT_TRUE(result.BoolOrDie()); status = EvaluateLogic(CelValue::CreateBool(false), CelValue::CreateBool(true), CelValue::CreateBool(false), &result, GetParam()); ASSERT_OK(status); ASSERT_TRUE(result.IsBool()); ASSERT_FALSE(result.BoolOrDie()); } TEST_P(LogicStepTest, TestErrorHandling) { CelValue result; CelError error = absl::CancelledError(); CelValue error_value = CelValue::CreateError(&error); ASSERT_OK(EvaluateLogic(error_value, CelValue::CreateBool(true), CelValue::CreateBool(false), &result, GetParam())); ASSERT_TRUE(result.IsError()); ASSERT_OK(EvaluateLogic(CelValue::CreateBool(true), error_value, CelValue::CreateBool(false), &result, GetParam())); ASSERT_TRUE(result.IsError()); ASSERT_OK(EvaluateLogic(CelValue::CreateBool(false), error_value, CelValue::CreateBool(false), &result, GetParam())); ASSERT_TRUE(result.IsBool()); ASSERT_FALSE(result.BoolOrDie()); } TEST_F(LogicStepTest, TestUnknownHandling) { CelValue result; UnknownSet unknown_set; CelError cel_error = absl::CancelledError(); CelValue unknown_value = CelValue::CreateUnknownSet(&unknown_set); CelValue error_value = CelValue::CreateError(&cel_error); ASSERT_OK(EvaluateLogic(unknown_value, CelValue::CreateBool(true), CelValue::CreateBool(false), &result, true)); ASSERT_TRUE(result.IsUnknownSet()); ASSERT_OK(EvaluateLogic(CelValue::CreateBool(true), unknown_value, CelValue::CreateBool(false), &result, true)); ASSERT_TRUE(result.IsUnknownSet()); ASSERT_OK(EvaluateLogic(CelValue::CreateBool(false), unknown_value, CelValue::CreateBool(false), &result, true)); ASSERT_TRUE(result.IsBool()); ASSERT_FALSE(result.BoolOrDie()); ASSERT_OK(EvaluateLogic(error_value, unknown_value, CelValue::CreateBool(false), &result, true)); ASSERT_TRUE(result.IsError()); ASSERT_OK(EvaluateLogic(unknown_value, error_value, CelValue::CreateBool(false), &result, true)); ASSERT_TRUE(result.IsUnknownSet()); Expr expr0; auto& ident_expr0 = expr0.mutable_ident_expr(); ident_expr0.set_name("name0"); Expr expr1; auto& ident_expr1 = expr1.mutable_ident_expr(); ident_expr1.set_name("name1"); CelAttribute attr0(expr0.ident_expr().name(), {}), attr1(expr1.ident_expr().name(), {}); UnknownAttributeSet unknown_attr_set0({attr0}); UnknownAttributeSet unknown_attr_set1({attr1}); UnknownSet unknown_set0(unknown_attr_set0); UnknownSet unknown_set1(unknown_attr_set1); EXPECT_THAT(unknown_attr_set0.size(), Eq(1)); EXPECT_THAT(unknown_attr_set1.size(), Eq(1)); ASSERT_OK(EvaluateLogic(CelValue::CreateUnknownSet(&unknown_set0), CelValue::CreateUnknownSet(&unknown_set1), CelValue::CreateBool(false), &result, true)); ASSERT_TRUE(result.IsUnknownSet()); const auto& attrs = result.UnknownSetOrDie()->unknown_attributes(); ASSERT_THAT(attrs, testing::SizeIs(1)); EXPECT_THAT(attrs.begin()->variable_name(), Eq("name0")); } INSTANTIATE_TEST_SUITE_P(LogicStepTest, LogicStepTest, testing::Bool()); class TernaryStepDirectTest : public testing::TestWithParam<bool> { public: TernaryStepDirectTest() : value_factory_(TypeProvider::Builtin(), ProtoMemoryManagerRef(&arena_)) {} bool Shortcircuiting() { return GetParam(); } ValueManager& value_manager() { return value_factory_.get(); } protected: Arena arena_; cel::ManagedValueFactory value_factory_; }; TEST_P(TernaryStepDirectTest, ReturnLhs) { cel::Activation activation; RuntimeOptions opts; ExecutionFrameBase frame(activation, opts, value_manager()); std::unique_ptr<DirectExpressionStep> step = CreateDirectTernaryStep( CreateConstValueDirectStep(BoolValue(true), -1), CreateConstValueDirectStep(IntValue(1), -1), CreateConstValueDirectStep(IntValue(2), -1), -1, Shortcircuiting()); cel::Value result; AttributeTrail attr_unused; ASSERT_OK(step->Evaluate(frame, result, attr_unused)); ASSERT_TRUE(InstanceOf<IntValue>(result)); EXPECT_EQ(Cast<IntValue>(result).NativeValue(), 1); } TEST_P(TernaryStepDirectTest, ReturnRhs) { cel::Activation activation; RuntimeOptions opts; ExecutionFrameBase frame(activation, opts, value_manager()); std::unique_ptr<DirectExpressionStep> step = CreateDirectTernaryStep( CreateConstValueDirectStep(BoolValue(false), -1), CreateConstValueDirectStep(IntValue(1), -1), CreateConstValueDirectStep(IntValue(2), -1), -1, Shortcircuiting()); cel::Value result; AttributeTrail attr_unused; ASSERT_OK(step->Evaluate(frame, result, attr_unused)); ASSERT_TRUE(InstanceOf<IntValue>(result)); EXPECT_EQ(Cast<IntValue>(result).NativeValue(), 2); } TEST_P(TernaryStepDirectTest, ForwardError) { cel::Activation activation; RuntimeOptions opts; ExecutionFrameBase frame(activation, opts, value_manager()); cel::Value error_value = value_manager().CreateErrorValue(absl::InternalError("test error")); std::unique_ptr<DirectExpressionStep> step = CreateDirectTernaryStep( CreateConstValueDirectStep(error_value, -1), CreateConstValueDirectStep(IntValue(1), -1), CreateConstValueDirectStep(IntValue(2), -1), -1, Shortcircuiting()); cel::Value result; AttributeTrail attr_unused; ASSERT_OK(step->Evaluate(frame, result, attr_unused)); ASSERT_TRUE(InstanceOf<ErrorValue>(result)); EXPECT_THAT(Cast<ErrorValue>(result).NativeValue(), StatusIs(absl::StatusCode::kInternal, "test error")); } TEST_P(TernaryStepDirectTest, ForwardUnknown) { cel::Activation activation; RuntimeOptions opts; opts.unknown_processing = cel::UnknownProcessingOptions::kAttributeOnly; ExecutionFrameBase frame(activation, opts, value_manager()); std::vector<cel::Attribute> attrs{{cel::Attribute("var")}}; cel::UnknownValue unknown_value = value_manager().CreateUnknownValue(cel::AttributeSet(attrs)); std::unique_ptr<DirectExpressionStep> step = CreateDirectTernaryStep( CreateConstValueDirectStep(unknown_value, -1), CreateConstValueDirectStep(IntValue(2), -1), CreateConstValueDirectStep(IntValue(3), -1), -1, Shortcircuiting()); cel::Value result; AttributeTrail attr_unused; ASSERT_OK(step->Evaluate(frame, result, attr_unused)); ASSERT_TRUE(InstanceOf<UnknownValue>(result)); EXPECT_THAT(Cast<UnknownValue>(result).NativeValue().unknown_attributes(), ElementsAre(Truly([](const cel::Attribute& attr) { return attr.variable_name() == "var"; }))); } TEST_P(TernaryStepDirectTest, UnexpectedCondtionKind) { cel::Activation activation; RuntimeOptions opts; ExecutionFrameBase frame(activation, opts, value_manager()); std::unique_ptr<DirectExpressionStep> step = CreateDirectTernaryStep( CreateConstValueDirectStep(IntValue(-1), -1), CreateConstValueDirectStep(IntValue(1), -1), CreateConstValueDirectStep(IntValue(2), -1), -1, Shortcircuiting()); cel::Value result; AttributeTrail attr_unused; ASSERT_OK(step->Evaluate(frame, result, attr_unused)); ASSERT_TRUE(InstanceOf<ErrorValue>(result)); EXPECT_THAT(Cast<ErrorValue>(result).NativeValue(), StatusIs(absl::StatusCode::kUnknown, HasSubstr("No matching overloads found"))); } TEST_P(TernaryStepDirectTest, Shortcircuiting) { class RecordCallStep : public DirectExpressionStep { public: explicit RecordCallStep(bool& was_called) : DirectExpressionStep(-1), was_called_(&was_called) {} absl::Status Evaluate(ExecutionFrameBase& frame, cel::Value& result, AttributeTrail& trail) const override { *was_called_ = true; result = IntValue(1); return absl::OkStatus(); } private: absl::Nonnull<bool*> was_called_; }; bool lhs_was_called = false; bool rhs_was_called = false; cel::Activation activation; RuntimeOptions opts; ExecutionFrameBase frame(activation, opts, value_manager()); std::unique_ptr<DirectExpressionStep> step = CreateDirectTernaryStep( CreateConstValueDirectStep(BoolValue(false), -1), std::make_unique<RecordCallStep>(lhs_was_called), std::make_unique<RecordCallStep>(rhs_was_called), -1, Shortcircuiting()); cel::Value result; AttributeTrail attr_unused; ASSERT_OK(step->Evaluate(frame, result, attr_unused)); ASSERT_TRUE(InstanceOf<IntValue>(result)); EXPECT_THAT(Cast<IntValue>(result).NativeValue(), Eq(1)); bool expect_eager_eval = !Shortcircuiting(); EXPECT_EQ(lhs_was_called, expect_eager_eval); EXPECT_TRUE(rhs_was_called); } INSTANTIATE_TEST_SUITE_P(TernaryStepDirectTest, TernaryStepDirectTest, testing::Bool()); } }
https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/eval/ternary_step.cc
https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/eval/ternary_step_test.cc
4552db5798fb0853b131b783d8875794334fae7f
a6b50088-4668-4bea-9a3a-97e531d48900
cpp
tensorflow/tensorflow
data_service_client
tensorflow/core/data/service/client/data_service_client.cc
tensorflow/core/data/service/client/data_service_client_test.cc
#include "tensorflow/core/data/service/client/data_service_client.h" #include <algorithm> #include <functional> #include <limits> #include <memory> #include <optional> #include <random> #include <string> #include <string_view> #include <utility> #include <vector> #include "absl/algorithm/container.h" #include "absl/container/flat_hash_map.h" #include "absl/log/log.h" #include "absl/strings/ascii.h" #include "absl/strings/substitute.h" #include "absl/time/time.h" #include "tensorflow/core/data/service/client/common.h" #include "tensorflow/core/data/service/client/validate_utils.h" #include "tensorflow/core/data/service/common.h" #include "tensorflow/core/data/service/common.pb.h" #include "tensorflow/core/data/service/dispatcher_client.h" #include "tensorflow/core/data/service/grpc_util.h" #include "tensorflow/core/data/service/worker_client.h" #include "tensorflow/core/data/service/worker_impl.h" #include "tensorflow/core/data/utils.h" #include "tensorflow/core/framework/allocator.h" #include "tensorflow/core/framework/dataset.h" #include "tensorflow/core/framework/device_base.h" #include "tensorflow/core/framework/metrics.h" #include "tensorflow/core/framework/model.h" #include "tensorflow/core/platform/env.h" #include "tensorflow/core/platform/errors.h" #include "tensorflow/core/platform/mutex.h" #include "tensorflow/core/platform/status.h" #include "tensorflow/core/platform/statusor.h" #include "tensorflow/core/platform/thread_annotations.h" #include "tensorflow/core/profiler/lib/traceme.h" #include "tensorflow/core/profiler/lib/traceme_encode.h" #include "tsl/platform/retrying_utils.h" #include "tsl/protobuf/error_codes.pb.h" namespace tensorflow { namespace data { namespace { bool IsColocatedTask(const TaskInfo& task) { return absl::c_any_of(task.worker_tags(), [](std::string_view worker_tag) { return absl::AsciiStrToUpper(worker_tag) == kColocatedWorkerTag; }); } absl::StatusOr<DataTransferServerInfo> GetTransferServer( const std::string& protocol, const TaskInfo& task_info) { for (const auto& transfer_server : task_info.transfer_servers()) { if (transfer_server.protocol() == protocol) { return transfer_server; } } return errors::NotFound("protocol ", protocol, " is not available for worker ", task_info.worker_address()); } } DataServiceClient::DataServiceClient(const DataServiceParams& params) : params_(params), max_outstanding_requests_(params.max_outstanding_requests) {} DataServiceClient::~DataServiceClient() { VLOG(2) << "Destroying data service client for iteration id " << iteration_client_id_; task_thread_manager_.reset(); if (initialized_) { Status s = dispatcher_->ReleaseIterationClient(iteration_client_id_); if (!s.ok()) { LOG(WARNING) << "Failed to release iteration client id: " << s; } } for (auto& worker_thread : worker_threads_) { worker_thread.reset(); } DeleteLocalWorkerTasks(); VLOG(2) << "Destroyed data service dataset iterator for iteration id " << iteration_client_id_; } Status DataServiceClient::Initialize( const DeviceBase::AcceleratorDeviceInfo* accelerator_device_info, Allocator* allocator) { accelerator_device_info_ = accelerator_device_info; allocator_ = allocator; TF_RETURN_IF_ERROR(ValidateDataServiceParams(params_)); VLOG(3) << "Connecting to " << params_.address << " in tf.data service client."; dispatcher_ = std::make_unique<DataServiceDispatcherClient>(params_.address, params_.protocol); int64_t deadline_micros = kint64max; std::optional<std::string> job_name; if (!params_.job_name.empty()) { job_name = params_.job_name; } TF_RETURN_IF_ERROR(grpc_util::Retry( [&]() { return dispatcher_->GetOrCreateJob( params_.dataset_id, params_.processing_mode, job_name, params_.num_consumers, params_.cross_trainer_cache_options.has_value(), params_.target_workers, job_id_); }, strings::StrCat("get or create job with dispatcher at ", params_.address), deadline_micros)); TF_RETURN_IF_ERROR(grpc_util::Retry( [&]() { return dispatcher_->GetOrCreateIteration(job_id_, params_.repetition, iteration_client_id_); }, strings::StrCat("get or create iteration with dispatcher at ", params_.address), deadline_micros)); initialized_ = true; return absl::OkStatus(); } absl::StatusOr<GetNextResult> DataServiceClient::GetNext( DataServiceContextFactory context_factory) TF_LOCKS_EXCLUDED(mu_) { VLOG(3) << "Getting the next element from tf.data service client."; mutex_lock l(mu_); if (ctx_ == nullptr) { ctx_ = context_factory(); } EnsureThreadsStarted(); std::shared_ptr<Result> result; do { while (!ResultReady() && !Finished() && !cancelled_ && status_.ok()) { VLOG(3) << "Blocking in GetNext: " << DebugString(); get_next_cv_.wait(l); } if (cancelled_) { VLOG(3) << "Returning from GetNext due to cancellation"; return errors::Cancelled("Data service iterator was cancelled"); } if (!status_.ok()) { VLOG(3) << "Returning from GetNext with error " << status_; return status_; } if (results_.empty()) { VLOG(3) << "Returning from GetNext with end_of_sequence"; return GetNextResult::EndOfSequence(); } if (!ResultReady()) { VLOG(3) << "Returning from GetNext with internal error"; return errors::Internal("Expected a result to be ready, but none were."); } result = PopNextResult(); worker_thread_cv_.notify_one(); if (result->skip) { VLOG(3) << "Skipping result from task " << result->task_id; } } while (result->skip); GetNextResult next; next.end_of_sequence = result->end_of_sequence; if (next.end_of_sequence) { VLOG(1) << "Returning end_of_sequence"; return next; } VLOG(1) << "Returning the next element from data service dataset's " << "Iterator: task " << result->task_id << ", element " << result->element_index; if (IsCoordinatedRead()) { VLOG(1) << "Consumer " << *params_.consumer_index << ": Result " << get_next_index_++; } next.tensors.swap(result->element); return next; } void DataServiceClient::Cancel() TF_LOCKS_EXCLUDED(mu_) { mutex_lock l(mu_); for (const auto& task : tasks_) { task->worker->TryCancel(); } cancelled_ = true; worker_thread_cv_.notify_all(); manager_thread_cv_.notify_all(); get_next_cv_.notify_all(); } TraceMeMetadata DataServiceClient::GetTraceMeMetadata() const { TraceMeMetadata result; int64_t num_tasks = -1; int64_t autotuned_max_outstanding_requests = model::kAutotune; if (mu_.try_lock()) { num_tasks = tasks_.size() - finished_tasks_; autotuned_max_outstanding_requests = max_outstanding_requests_; mu_.unlock(); } result.push_back(std::make_pair( "num_tasks", num_tasks == -1 ? kTraceInfoUnavailable : strings::Printf("%lld", static_cast<long long>(num_tasks)))); result.push_back(std::make_pair("job_name", params_.job_name)); result.push_back(std::make_pair( "max_outstanding_requests", strings::Printf( "%lld", static_cast<long long>(params_.max_outstanding_requests)))); if (params_.max_outstanding_requests == model::kAutotune) { result.push_back(std::make_pair( "autotuned_max_outstanding_requests", strings::Printf("%lld", static_cast<long long>( autotuned_max_outstanding_requests)))); } return result; } void DataServiceClient::EnsureThreadsStarted() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) { if (!task_thread_manager_ && !cancelled_) { task_thread_manager_ = ctx_->StartThread("task-thread-manager", [this]() { TaskThreadManager(); }); } } bool DataServiceClient::Finished() const TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) { return num_running_worker_threads_ == 0 && !ShouldWaitForNext(); } bool DataServiceClient::ShouldWaitForNext() const TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) { if (should_finish_iteration_) { return !iteration_finished_; } return tasks_.empty() || finished_tasks_ < tasks_.size(); } void DataServiceClient::DeleteLocalWorkerTasks() TF_LOCKS_EXCLUDED(mu_) { std::vector<std::shared_ptr<Task>> tasks; { mutex_lock l(mu_); tasks = tasks_; } for (const std::shared_ptr<Task>& task : tasks) { std::shared_ptr<DataServiceWorkerImpl> worker = LocalWorkers::Get(task->info.worker_address()); if (worker && ShouldDeleteLocalTask(task->info)) { worker->DeleteLocalTask(task->info); } } } bool DataServiceClient::ShouldDeleteLocalTask(const TaskInfo& task) const TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) { if (IsCoordinatedRead()) { return false; } if (params_.target_workers == TARGET_WORKERS_LOCAL) { return true; } return params_.target_workers == TARGET_WORKERS_AUTO && IsColocatedTask(task); } void DataServiceClient::TaskThreadManager() TF_LOCKS_EXCLUDED(mu_) { auto cleanup = gtl::MakeCleanup([] { VLOG(1) << "Task thread manager exiting"; }); VLOG(1) << "Starting task thread manager"; uint64 next_check = Env::Default()->NowMicros(); while (true) { { mutex_lock l(mu_); while (!cancelled_ && Env::Default()->NowMicros() < next_check) { int64_t remaining_time = next_check - Env::Default()->NowMicros(); VLOG(4) << "Task thread manager waiting for " << remaining_time << "us"; manager_thread_cv_.wait_for(l, std::chrono::microseconds(remaining_time)); } if (cancelled_) { VLOG(3) << "Task thread manager finished"; return; } } Heartbeat(); UpdateBufferSize(); UpdateWorkerThreads(); next_check = Env::Default()->NowMicros() + absl::ToInt64Microseconds(params_.task_refresh_interval); } } void DataServiceClient::TryBlockRound(int64_t round) TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) { if (round_robin_round_limit_.has_value() && round_robin_round_limit_.value() == round) { return; } if (current_round_ >= round) { VLOG(1) << "Rejecting request to block round " << round << ", because processing has already begun for round " << current_round_; return; } VLOG(1) << "Accepting request to block round " << round; round_robin_round_limit_ = round; } void DataServiceClient::UpdateIterationFinished(bool iteration_finished) TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) { if (!iteration_finished) { return; } iteration_finished_ = true; get_next_cv_.notify_all(); worker_thread_cv_.notify_all(); } absl::StatusOr<std::unique_ptr<DataServiceWorkerClient>> DataServiceClient::CreateWorkerClient(const std::string& protocol, const TaskInfo& task_info) { TF_ASSIGN_OR_RETURN(DataTransferServerInfo transfer_server, GetTransferServer(protocol, task_info)); return CreateDataServiceWorkerClient(params_.protocol, transfer_server, accelerator_device_info_, allocator_); } absl::StatusOr<std::unique_ptr<DataServiceWorkerClient>> DataServiceClient::CreateGrpcWorkerClient(const TaskInfo& task_info) { return CreateWorkerClient(kGrpcTransferProtocol, task_info); } absl::StatusOr<std::unique_ptr<DataServiceWorkerClient>> DataServiceClient::CreateAlternativeWorkerClientWithGrpcFallback( const DataTransferServerInfo& transfer_server, const TaskInfo& task_info) { absl::StatusOr<std::unique_ptr<DataServiceWorkerClient>> worker = CreateDataServiceWorkerClient(params_.protocol, transfer_server, accelerator_device_info_, allocator_); if (worker.ok()) { LOG(INFO) << "Successfully started client for data transfer protocol '" << transfer_server.protocol() << "' for worker '" << task_info.worker_address() << "'."; return worker; } LOG(INFO) << "Failed to start client for data transfer protocol '" << transfer_server.protocol() << "' for worker '" << task_info.worker_address() << "'; falling back to grpc. " << "Original error: " << worker.status(); metrics::RecordTFDataServiceDataTransferProtocolFallback( transfer_server.protocol(), static_cast<error::Code>(worker.status().raw_code()), std::string(worker.status().message())); return CreateGrpcWorkerClient(task_info); } absl::StatusOr<std::unique_ptr<DataServiceWorkerClient>> DataServiceClient::CreateWorkerClient(const TaskInfo& task_info) { if (params_.data_transfer_protocol == kLocalTransferProtocol || ForceLocalProtocol(task_info.worker_address())) { DataTransferServerInfo info; info.set_protocol(kLocalTransferProtocol); info.set_address(task_info.worker_address()); return CreateDataServiceWorkerClient(params_.protocol, info, accelerator_device_info_, allocator_); } if (!params_.data_transfer_protocol.empty()) { TF_ASSIGN_OR_RETURN( DataTransferServerInfo transfer_server, GetTransferServer(params_.data_transfer_protocol, task_info)); return CreateAlternativeWorkerClientWithGrpcFallback(transfer_server, task_info); } if (std::string default_protocol = DefaultDataTransferProtocol(); default_protocol != kGrpcTransferProtocol) { absl::StatusOr<DataTransferServerInfo> transfer_server = GetTransferServer(default_protocol, task_info); if (transfer_server.ok()) { return CreateAlternativeWorkerClientWithGrpcFallback(*transfer_server, task_info); } VLOG(1) << "Failed to find transfer server for default data transfer " "protocol '" << default_protocol << "' for worker '" << task_info.worker_address() << "'; falling back to grpc. Original error: " << transfer_server.status(); metrics::RecordTFDataServiceDataTransferProtocolFallback( default_protocol, error::Code::NOT_FOUND, "Failed to find transfer server for default protocol"); } return CreateGrpcWorkerClient(task_info); } Status DataServiceClient::AddTask(const TaskInfo& task_info) TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) { TF_ASSIGN_OR_RETURN(std::unique_ptr<DataServiceWorkerClient> worker, CreateWorkerClient(task_info)); metrics::RecordTFDataServiceDataTransferProtocolUsed( worker->GetDataTransferProtocol(), !params_.data_transfer_protocol.empty()); tasks_.push_back(std::make_shared<Task>(task_info, std::move(worker))); worker_thread_cv_.notify_one(); if (IsCoordinatedRead()) { VLOG(1) << "Consumer " << params_.consumer_index.value() << " adding task " << task_info.task_id() << " to read from worker " << task_info.worker_address() << ". Task starting round: " << task_info.starting_round(); DCHECK_LE(current_round_, task_info.starting_round()); if (current_round_ == task_info.starting_round()) { DCHECK_EQ(next_task_index_, 0); } } if (!IsCoordinatedRead()) { std::mt19937 rng; std::shuffle(tasks_.begin(), tasks_.end(), rng); } return absl::OkStatus(); } void DataServiceClient::Heartbeat() TF_LOCKS_EXCLUDED(mu_) { ClientHeartbeatRequest req; req.set_iteration_client_id(iteration_client_id_); if (IsCoordinatedRead()) { mutex_lock l(mu_); req.set_current_round(current_round_); if (round_robin_round_limit_.has_value()) { req.set_blocked_round(round_robin_round_limit_.value()); } } { mutex_lock l(mu_); double target_processing_time_nsec = ctx_->GetTargetProcessingTimeNsec(); req.set_target_processing_time_nsec(target_processing_time_nsec); } ClientHeartbeatResponse resp; Status s = dispatcher_->ClientHeartbeat(req, resp); if (!s.ok()) { if (IsPreemptedError(s)) { LOG(WARNING) << "Failed to heartbeat to dispatcher from iteration client id " << iteration_client_id_ << ". Dispatcher address: " << params_.address << ". Error: " << s; return; } mutex_lock l(mu_); status_ = s; get_next_cv_.notify_all(); } mutex_lock l(mu_); UpdateIterationFinished(resp.iteration_finished()); if (resp.optional_block_round_case() == ClientHeartbeatResponse::kBlockRound) { TryBlockRound(resp.block_round()); } else { round_robin_round_limit_ = std::nullopt; worker_thread_cv_.notify_all(); } UpdateTasks(resp); RecordTFMetrics(resp); } void DataServiceClient::UpdateTasks(const ClientHeartbeatResponse& resp) TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) { absl::flat_hash_map<int64_t, TaskInfo> task_id_to_task; for (auto& task : resp.task_info()) { task_id_to_task[task.task_id()] = task; } if (iteration_finished_) { return; } int index = 0; while (index < tasks_.size()) { std::shared_ptr<Task> task = tasks_[index]; if (task_id_to_task.contains(task->info.task_id())) { task_id_to_task.erase(task->info.task_id()); ++index; } else { if (task->end_of_sequence) { finished_tasks_--; } tasks_.erase(tasks_.begin() + index); if (index < next_task_index_) { next_task_index_--; } if (!tasks_.empty() && next_task_index_ >= tasks_.size()) { AdvanceTaskIndex(); } } } for (auto& task : resp.task_info()) { auto it = task_id_to_task.find(task.task_id()); if (it == task_id_to_task.end()) { continue; } if (!ShouldReadFromTask(task)) { VLOG(3) << "Skipping untargeted worker task " << task.task_id(); should_finish_iteration_ = false; continue; } Status s = AddTask(it->second); if (!s.ok()) { status_ = s; get_next_cv_.notify_all(); break; } } } bool DataServiceClient::ShouldReadFromTask(const TaskInfo& task) const TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) { if (IsCoordinatedRead()) { return true; } const bool is_local_task = (LocalWorkers::Get(task.worker_address()) != nullptr); if (params_.target_workers == TARGET_WORKERS_LOCAL && !is_local_task) { return false; } const bool is_cross_tf_host_read = !is_local_task && IsColocatedTask(task); if (params_.target_workers == TARGET_WORKERS_AUTO && is_cross_tf_host_read) { return false; } return true; } void DataServiceClient::RecordTFMetrics(const ClientHeartbeatResponse& resp) TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) { for (const auto& task : resp.task_info()) { if (worker_uids_.contains(task.worker_uid())) { continue; } metrics::RecordTFDataServiceClientIterators( task.worker_uid(), resp.deployment_mode(), params_.processing_mode, IsCoordinatedRead()); worker_uids_.insert(task.worker_uid()); } } void DataServiceClient::UpdateBufferSize() TF_LOCKS_EXCLUDED(mu_) { if (params_.max_outstanding_requests == model::kAutotune) { mutex_lock l(mu_); int64_t max_outstanding_requests = ctx_->UpdateMaxOutstandingRequests( max_outstanding_requests_, tasks_.size()); if (max_outstanding_requests > max_outstanding_requests_) { worker_thread_cv_.notify_all(); } VLOG(3) << "Updated `max_outstanding_requests` from " << max_outstanding_requests_ << " to " << max_outstanding_requests << " with " << tasks_.size() << " tasks."; max_outstanding_requests_ = max_outstanding_requests; } } void DataServiceClient::UpdateWorkerThreads() TF_LOCKS_EXCLUDED(mu_) { mutex_lock l(mu_); const int64_t max_num_threads = std::min<int64_t>(tasks_.size(), max_outstanding_requests_); while (num_running_worker_threads_ < max_num_threads && !cancelled_ && status_.ok()) { num_running_worker_threads_++; auto done = [this]() { mutex_lock l(mu_); num_running_worker_threads_--; get_next_cv_.notify_all(); }; worker_threads_.push_back(ctx_->StartThread( "tf-data-service-task_thread", [this, done = std::move(done)]() { RunWorkerThread(std::move(done)); })); } } void DataServiceClient::RunWorkerThread(std::function<void()> done) TF_LOCKS_EXCLUDED(mu_) { auto cleanup = gtl::MakeCleanup([done = std::move(done)]() { done(); VLOG(1) << "Worker thread exiting"; }); VLOG(1) << "Starting worker thread"; std::shared_ptr<Task> task_to_process; int64_t num_consecutive_skipped = 0; constexpr int64_t MAX_ROUND_FALLBACK_TO_BLOCKING = 5; bool allow_skip = true; while (true) { std::shared_ptr<Result> result; { mutex_lock l(mu_); if (task_to_process) { task_to_process->in_use = false; --outstanding_requests_; task_to_process = nullptr; worker_thread_cv_.notify_one(); } while (true) { if (cancelled_ || !ShouldWaitForNext()) { return; } task_to_process = GetTaskToProcess(); if (task_to_process) { VLOG(3) << "Selected a task to process: " << task_to_process->info.ShortDebugString(); break; } worker_thread_cv_.wait(l); } DCHECK(task_to_process != nullptr); task_to_process->in_use = true; ++outstanding_requests_; if (IsCoordinatedRead()) { results_.push(std::make_shared<Result>()); ctx_->RecordBufferEnqueue(results_.back()->element); result = results_.back(); } else { result = std::make_shared<Result>(); } VLOG(3) << "Processing task " << task_to_process->info.task_id(); } int64_t deadline_micros = kint64max; Status s = GetElementTraced(task_to_process.get(), deadline_micros, !IsCoordinatedRead(), allow_skip, result); if (!s.ok()) { mutex_lock l(mu_); VLOG(1) << "Failed to get element from worker " << task_to_process->info.worker_address() << ": " << s; task_to_process->in_use = false; --outstanding_requests_; status_ = errors::CreateWithUpdatedMessage( s, absl::StrCat("Failed to get element from worker ", task_to_process->info.worker_address(), ": ", s.message())); get_next_cv_.notify_all(); return; } if (!IsCoordinatedRead()) { if (mutex_lock l(mu_); result->skip) { num_consecutive_skipped++; if (num_consecutive_skipped >= MAX_ROUND_FALLBACK_TO_BLOCKING * tasks_.size()) { allow_skip = false; VLOG(1) << "`allow_skip` is turned off. Switching to blocking " "get element calls to the workers."; } } else { num_consecutive_skipped = 0; allow_skip = true; } } } } bool DataServiceClient::ShouldProcessTask() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) { if (IsCoordinatedRead()) { return results_.size() < max_outstanding_requests_; } return results_.size() + outstanding_requests_ < max_outstanding_requests_; } std::shared_ptr<DataServiceClient::Task> DataServiceClient::GetTaskToProcess() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) { if (!ShouldProcessTask()) { return nullptr; } for (int i = 0; i < tasks_.size(); ++i) { std::shared_ptr<Task>& task = tasks_[next_task_index_]; if (IsCoordinatedRead() && (task->in_use || current_round_ >= round_robin_round_limit_.value_or( std::numeric_limits<int64_t>::max()))) { VLOG(4) << "No round robin task found. in_use: " << task->in_use << ". current_round: " << current_round_ << ". round_robin_round_limit: " << round_robin_round_limit_.value_or(-1); return nullptr; } if (current_round_ < task->info.starting_round() || task->in_use || task->end_of_sequence || task->removed) { VLOG(3) << "Skipping task " << next_task_index_ << ". starting round: " << task->info.starting_round() << ". current round: " << current_round_ << ". task->in_use: " << task->in_use << ". end_of_sequence: " << task->end_of_sequence << ". task->removed: " << task->removed; AdvanceTaskIndex(); continue; } task->round = current_round_; AdvanceTaskIndex(); return task; } return nullptr; } void DataServiceClient::AdvanceTaskIndex() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) { next_task_index_++; if (next_task_index_ >= tasks_.size()) { current_round_++; next_task_index_ = 0; } } Status DataServiceClient::TryGetElement(const Task& task, bool allow_skip, GetElementResult& result) { GetElementRequest req; req.set_task_id(task.info.task_id()); req.set_skipped_previous_round(task.skipped_previous_round); if (IsCoordinatedRead()) { req.set_consumer_index(params_.consumer_index.value()); req.set_round_index(task.round); req.set_allow_skip(true); } else { req.set_allow_skip(allow_skip); } if (params_.cross_trainer_cache_options) { req.set_trainer_id(params_.cross_trainer_cache_options->trainer_id()); } return task.worker->GetElement(req, result); } void DataServiceClient::ProcessGetElementResponse( bool enqueue_result, GetElementResult& get_element_result, std::shared_ptr<Result> result, Task& task) TF_LOCKS_EXCLUDED(mu_) { mutex_lock l(mu_); result->ready = true; result->end_of_sequence = get_element_result.end_of_sequence; result->skip = get_element_result.skip; if (!get_element_result.end_of_sequence && !get_element_result.skip) { task.skipped_previous_round = false; result->element = std::move(get_element_result.components); result->element_index = get_element_result.element_index; result->task_id = task.info.task_id(); } else if (get_element_result.skip) { task.skipped_previous_round = true; } else { task.end_of_sequence = true; finished_tasks_++; } if (enqueue_result && !result->end_of_sequence && !result->skip) { ctx_->RecordBufferEnqueue(result->element); results_.push(std::move(result)); } get_next_cv_.notify_all(); } Status DataServiceClient::GetElementTraced(Task* task, int64_t deadline_micros, bool enqueue_result, bool allow_skip, std::shared_ptr<Result> result) TF_LOCKS_EXCLUDED(mu_) { VLOG(3) << "Getting an element for task id " << task->info.task_id(); tsl::profiler::TraceMe activity("GetDataServiceElement", tsl::profiler::TraceMeLevel::kInfo); activity.AppendMetadata([&]() { return tsl::profiler::TraceMeEncode( {{"address", task->info.worker_address()}}); }); if (IsCoordinatedRead()) { VLOG(3) << "Requesting element from consumer index " << params_.consumer_index.value() << ", round " << task->round; activity.AppendMetadata([&]() { return tsl::profiler::TraceMeEncode( {{"consumer_index", params_.consumer_index.value()}, {"round_index", task->round}}); }); } Status s = GetElement(task, deadline_micros, enqueue_result, allow_skip, result); mutex_lock l(mu_); VLOG(3) << "Got an element for task id " << task->info.task_id(); return s; } Status DataServiceClient::MaybeRemoveTask(Task& task, int64_t deadline_micros, Result& result) TF_LOCKS_EXCLUDED(mu_) { bool removed; VLOG(1) << "Requesting task removal for worker " << task.info.worker_address() << " in round " << task.round; TF_RETURN_IF_ERROR(grpc_util::Retry( [&] { return dispatcher_->MaybeRemoveTask(task.info.task_id(), params_.consumer_index.value(), task.round, removed); }, [&] { mutex_lock l(mu_); return !cancelled_; }, "request task removal ", deadline_micros)); if (removed) { mutex_lock l(mu_); task.removed = true; result.ready = true; result.skip = true; get_next_cv_.notify_all(); return absl::OkStatus(); } VLOG(1) << "Failed to remove task for worker " << task.info.worker_address(); return absl::OkStatus(); } Status DataServiceClient::GetElement(Task* task, int64_t deadline_micros, bool enqueue_result, bool allow_skip, std::shared_ptr<Result> result) TF_LOCKS_EXCLUDED(mu_) { GetElementResult get_element_result; while (true) { Status s = TryGetElement(*task, allow_skip, get_element_result); if (s.ok()) { task->num_retries = 0; break; } if (!IsPreemptedError(s)) { if (task->worker->GetDataTransferProtocol() == kGrpcTransferProtocol || task->worker->GetDataTransferProtocol() == kLocalTransferProtocol) { return s; } LOG(ERROR) << "Failed to use alternative data transfer protocol '" << task->worker->GetDataTransferProtocol() << "' for worker '" << task->info.worker_address() << "'; falling back to grpc. Original error: " << s; metrics::RecordTFDataServiceDataTransferProtocolError( task->worker->GetDataTransferProtocol(), static_cast<error::Code>(s.raw_code()), std::string(s.message())); mutex_lock l(mu_); TF_ASSIGN_OR_RETURN(std::unique_ptr<DataServiceWorkerClient> worker, CreateGrpcWorkerClient(task->info)); task->worker = std::move(worker); continue; } { mutex_lock l(mu_); if (cancelled_) { return errors::Cancelled("DataServiceDataset iterator cancelled"); } } int64_t now_micros = Env::Default()->NowMicros(); if (now_micros > deadline_micros) { return s; } if (IsCoordinatedRead() && task->num_retries > 0) { TF_RETURN_IF_ERROR(MaybeRemoveTask(*task, deadline_micros, *result)); mutex_lock l(mu_); if (result->skip) { return absl::OkStatus(); } } int64_t backoff_until = std::min( deadline_micros, now_micros + absl::ToInt64Microseconds( tsl::ComputeRetryBackoff(task->num_retries++))); VLOG(1) << "Failed to get an element from worker " << task->info.worker_address() << ": " << s << ". Will retry in " << (backoff_until - now_micros) << " microseconds"; Env::Default()->SleepForMicroseconds(backoff_until - now_micros); if (!IsCoordinatedRead()) { mutex_lock l(mu_); result->ready = true; result->skip = true; return absl::OkStatus(); } } ProcessGetElementResponse(enqueue_result, get_element_result, result, *task); return absl::OkStatus(); } bool DataServiceClient::ResultReady() const TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) { return !results_.empty() && results_.front()->ready; } std::shared_ptr<DataServiceClient::Result> DataServiceClient::PopNextResult() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) { std::shared_ptr<Result> result = results_.front(); results_.pop(); ctx_->RecordBufferDequeue(result->element); return result; } bool DataServiceClient::IsCoordinatedRead() const { return params_.num_consumers.has_value(); } std::string DataServiceClient::DebugString() const TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) { return absl::Substitute( "results_ { size: $0 front.ready: $1 } iteration_finished_: $2 " "tasks { size: $3 } finished_tasks_: $4 " "num_running_worker_threads_: $5", results_.size(), !results_.empty() && results_.front()->ready, iteration_finished_, tasks_.size(), finished_tasks_, num_running_worker_threads_); } } }
#include "tensorflow/core/data/service/client/data_service_client.h" #include <functional> #include <memory> #include <string> #include <utility> #include <vector> #include "absl/time/time.h" #include "xla/tsl/lib/core/status_test_util.h" #include "tensorflow/core/data/service/client/common.h" #include "tensorflow/core/data/service/common.h" #include "tensorflow/core/data/service/test_cluster.h" #include "tensorflow/core/data/service/test_util.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/platform/env.h" #include "tensorflow/core/platform/status_matchers.h" #include "tensorflow/core/platform/statusor.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/protobuf/config.pb.h" #include "tensorflow/core/protobuf/data_service.pb.h" #include "tensorflow/core/protobuf/error_codes.pb.h" namespace tensorflow { namespace data { namespace { using ::tensorflow::data::testing::RangeDataset; using ::tensorflow::testing::IsOkAndHolds; using ::tensorflow::testing::StatusIs; using ::testing::_; using ::testing::AtLeast; using ::testing::ElementsAreArray; using ::testing::HasSubstr; using ::testing::UnorderedElementsAreArray; DataServiceParams GetDataServiceParams( const std::string& dataset_id, const std::string& data_service_address, const ProcessingModeDef::ShardingPolicy sharding_policy) { DataServiceParams params; params.dataset_id = dataset_id; params.processing_mode.set_sharding_policy(sharding_policy); params.address = data_service_address; params.protocol = "grpc"; params.data_transfer_protocol = "grpc"; params.job_name = "test_job"; params.repetition = 0; params.max_outstanding_requests = 100; params.task_refresh_interval = absl::Milliseconds(100); return params; } std::vector<int64_t> Range(const int64_t range) { std::vector<int64_t> result; for (int64_t i = 0; i < range; ++i) { result.push_back(i); } return result; } class TestDataServiceContext : public DataServiceContext { public: TestDataServiceContext() = default; ~TestDataServiceContext() override = default; std::unique_ptr<Thread> StartThread(const string& name, std::function<void()> fn) override { return absl::WrapUnique( Env::Default()->StartThread({}, name, std::move(fn))); } MOCK_METHOD(void, RecordBufferEnqueue, (const std::vector<Tensor>& element), (override)); MOCK_METHOD(void, RecordBufferDequeue, (const std::vector<Tensor>& element), (override)); double GetTargetProcessingTimeNsec() const override { return 1.0e6; } int64_t UpdateMaxOutstandingRequests(int64_t max_outstanding_requests, int64_t new_size) override { return new_size; } }; std::unique_ptr<TestDataServiceContext> GetTestDataServiceContext() { return std::make_unique<TestDataServiceContext>(); } template <class T> StatusOr<std::vector<T>> GetResults(DataServiceClient& client) { std::vector<T> results; while (true) { TF_ASSIGN_OR_RETURN(GetNextResult next, client.GetNext(GetTestDataServiceContext)); if (next.end_of_sequence) { return results; } results.push_back(next.tensors[0].unaligned_flat<T>().data()[0]); } return results; } template <class T> StatusOr<T> GetNext(DataServiceClient& client) { TF_ASSIGN_OR_RETURN(GetNextResult next, client.GetNext(GetTestDataServiceContext)); if (next.end_of_sequence) { return errors::OutOfRange( "The tf.data service has reached the end of sequence"); } return next.tensors[0].unaligned_flat<T>().data()[0]; } TEST(DataServiceClientTest, NoSharding) { TestCluster test_cluster(1); TF_ASSERT_OK(test_cluster.Initialize()); DatasetClient<int64_t> test_dataset(test_cluster); TF_ASSERT_OK_AND_ASSIGN(std::string dataset_id, test_dataset.RegisterDataset(RangeDataset(10))); DataServiceParams params = GetDataServiceParams( dataset_id, test_cluster.DispatcherAddress(), ProcessingModeDef::OFF); DataServiceClient client(params); TF_ASSERT_OK(client.Initialize(nullptr, nullptr)); EXPECT_THAT(GetResults<int64_t>(client), IsOkAndHolds(ElementsAreArray(Range(10)))); client.Cancel(); } TEST(DataServiceClientTest, DynamicSharding) { TestCluster test_cluster(3); TF_ASSERT_OK(test_cluster.Initialize()); DatasetClient<int64_t> test_dataset(test_cluster); TF_ASSERT_OK_AND_ASSIGN(std::string dataset_id, test_dataset.RegisterDataset(RangeDataset(10))); DataServiceParams params = GetDataServiceParams( dataset_id, test_cluster.DispatcherAddress(), ProcessingModeDef::DYNAMIC); DataServiceClient client(params); TF_ASSERT_OK(client.Initialize(nullptr, nullptr)); EXPECT_THAT(GetResults<int64_t>(client), IsOkAndHolds(UnorderedElementsAreArray(Range(10)))); client.Cancel(); } TEST(DataServiceClientTest, StaticSharding) { TestCluster test_cluster(3); TF_ASSERT_OK(test_cluster.Initialize()); DatasetClient<int64_t> dataset_client(test_cluster); TF_ASSERT_OK_AND_ASSIGN(std::string dataset_id, dataset_client.RegisterDataset(RangeDataset(10))); DataServiceParams params = GetDataServiceParams(dataset_id, test_cluster.DispatcherAddress(), ProcessingModeDef::FILE_OR_DATA); DataServiceClient client(params); TF_ASSERT_OK(client.Initialize(nullptr, nullptr)); EXPECT_THAT(GetResults<int64_t>(client), IsOkAndHolds(UnorderedElementsAreArray(Range(10)))); client.Cancel(); } TEST(DataServiceClientTest, RecordBufferEvents) { TestCluster test_cluster(1); TF_ASSERT_OK(test_cluster.Initialize()); DatasetClient<int64_t> test_dataset(test_cluster); TF_ASSERT_OK_AND_ASSIGN(std::string dataset_id, test_dataset.RegisterDataset(RangeDataset(10))); DataServiceParams params = GetDataServiceParams( dataset_id, test_cluster.DispatcherAddress(), ProcessingModeDef::OFF); DataServiceClient client(params); TF_ASSERT_OK(client.Initialize(nullptr, nullptr)); auto mock_context = std::make_unique<TestDataServiceContext>(); TestDataServiceContext* ctx = mock_context.get(); EXPECT_CALL(*ctx, RecordBufferEnqueue(_)).Times(AtLeast(1)); EXPECT_CALL(*ctx, RecordBufferDequeue(_)).Times(AtLeast(1)); TF_ASSERT_OK_AND_ASSIGN(GetNextResult next, client.GetNext([&mock_context]() { return std::move(mock_context); })); client.Cancel(); } TEST(DataServiceClientTest, Cancel) { TestCluster test_cluster(1); TF_ASSERT_OK(test_cluster.Initialize()); DatasetClient<int64_t> dataset_client(test_cluster); TF_ASSERT_OK_AND_ASSIGN(std::string dataset_id, dataset_client.RegisterDataset(RangeDataset(10))); DataServiceParams params = GetDataServiceParams( dataset_id, test_cluster.DispatcherAddress(), ProcessingModeDef::OFF); DataServiceClient client(params); TF_ASSERT_OK(client.Initialize(nullptr, nullptr)); client.Cancel(); EXPECT_THAT(client.GetNext(GetTestDataServiceContext), StatusIs(error::CANCELLED)); } TEST(DataServiceClientTest, ValidationError) { DataServiceParams params = GetDataServiceParams( "dataset_id", "tf_data_service_address", ProcessingModeDef::OFF); params.target_workers = TARGET_WORKERS_LOCAL; DataServiceClient client(params); EXPECT_THAT( client.Initialize(nullptr, nullptr), StatusIs( error::INVALID_ARGUMENT, HasSubstr( "Local reads require local tf.data workers, but no local worker " "is found."))); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/client/data_service_client.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/client/data_service_client_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
79cb2597-85ae-444b-a838-21b1f68986d9
cpp
google/arolla
span_input_loader
arolla/io/span_input_loader.h
arolla/io/span_input_loader_test.cc
#ifndef AROLLA_IO_SPAN_INPUT_LOADER_H_ #define AROLLA_IO_SPAN_INPUT_LOADER_H_ #include <cstddef> #include <optional> #include <string> #include <utility> #include <vector> #include "absl/container/flat_hash_map.h" #include "absl/log/check.h" #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/str_format.h" #include "absl/types/span.h" #include "arolla/io/input_loader.h" #include "arolla/memory/frame.h" #include "arolla/memory/optional_value.h" #include "arolla/memory/raw_buffer_factory.h" #include "arolla/qtype/base_types.h" #include "arolla/qtype/qtype.h" #include "arolla/qtype/qtype_traits.h" #include "arolla/util/status_macros_backport.h" namespace arolla { namespace span_input_loader_impl { template <typename T> struct InputTraits { using ArollaType = T; }; template <typename T> struct InputTraits<std::optional<T>> { using ArollaType = OptionalValue<T>; }; template <typename T> std::vector<std::pair<std::string, QTypePtr>> MakeQTypesList( std::vector<std::string> names_in_order) { std::vector<std::pair<std::string, QTypePtr>> qtypes; qtypes.reserve(names_in_order.size()); for (const auto& name : names_in_order) { qtypes.emplace_back(name, GetQType<T>()); } return qtypes; } } template <typename T> class SpanInputLoader : public StaticInputLoader<absl::Span<const T>> { using Input = absl::Span<const T>; using ArollaT = typename span_input_loader_impl::InputTraits<T>::ArollaType; public: static InputLoaderPtr<absl::Span<const T>> Create( std::vector<std::string> output_names_in_order) { return InputLoaderPtr<absl::Span<const T>>(static_cast<InputLoader<Input>*>( new SpanInputLoader<T>(std::move(output_names_in_order)))); } private: explicit SpanInputLoader(std::vector<std::string> output_names_in_order) : StaticInputLoader<absl::Span<const T>>( span_input_loader_impl::MakeQTypesList<ArollaT>( output_names_in_order)) {} absl::StatusOr<BoundInputLoader<Input>> BindImpl( const absl::flat_hash_map<std::string, TypedSlot>& output_slots) const override { std::vector<size_t> element_ids; std::vector<FrameLayout::Slot<ArollaT>> slots; for (size_t i = 0; i != this->types_in_order().size(); ++i) { if (auto it = output_slots.find(this->types_in_order()[i].first); it != output_slots.end()) { ASSIGN_OR_RETURN(auto slot, it->second.template ToSlot<ArollaT>()); element_ids.push_back(i); slots.push_back(slot); } } return BoundInputLoader<Input>( [slots = std::move(slots), element_ids = std::move(element_ids), expected_input_size = this->types_in_order().size()]( const Input& input, FramePtr frame, RawBufferFactory*) -> absl::Status { if (input.size() != expected_input_size) { return absl::InvalidArgumentError( absl::StrFormat("unexpected input count: expected %d, got %d", expected_input_size, input.size())); } for (size_t i = 0; i < slots.size(); ++i) { size_t id = element_ids[i]; DCHECK_LT(id, input.size()); frame.Set(slots[i], ArollaT(input[id])); } return absl::OkStatus(); }); } }; } #endif
#include "arolla/io/span_input_loader.h" #include <optional> #include <utility> #include "gmock/gmock.h" #include "gtest/gtest.h" #include "absl/status/status.h" #include "absl/status/status_matchers.h" #include "absl/types/span.h" #include "arolla/io/input_loader.h" #include "arolla/io/testing/matchers.h" #include "arolla/memory/frame.h" #include "arolla/memory/memory_allocation.h" #include "arolla/memory/optional_value.h" #include "arolla/qtype/optional_qtype.h" #include "arolla/qtype/qtype_traits.h" #include "arolla/qtype/typed_slot.h" namespace arolla { namespace { using ::absl_testing::IsOk; using ::absl_testing::StatusIs; using ::arolla::testing::InputLoaderSupports; using ::testing::Eq; TEST(SpanInputLoaderTest, Scalars) { std::unique_ptr<InputLoader<absl::Span<const float>>> input_loader = SpanInputLoader<float>::Create({"a", "b"}); EXPECT_THAT(input_loader, InputLoaderSupports({{"a", GetQType<float>()}, {"b", GetQType<float>()}})); FrameLayout::Builder layout_builder; auto a_slot = layout_builder.AddSlot<float>(); auto b_slot = layout_builder.AddSlot<float>(); ASSERT_OK_AND_ASSIGN( BoundInputLoader<absl::Span<const float>> bound_input_loader, input_loader->Bind({ {"a", TypedSlot::FromSlot(a_slot)}, {"b", TypedSlot::FromSlot(b_slot)}, })); FrameLayout memory_layout = std::move(layout_builder).Build(); MemoryAllocation alloc(&memory_layout); ASSERT_THAT(bound_input_loader({5, 7}, alloc.frame()), IsOk()); EXPECT_THAT(alloc.frame().Get(a_slot), Eq(5)); EXPECT_THAT(alloc.frame().Get(b_slot), Eq(7)); EXPECT_THAT(bound_input_loader({5, 7, 9}, alloc.frame()), StatusIs(absl::StatusCode::kInvalidArgument, "unexpected input count: expected 2, got 3")); ASSERT_OK_AND_ASSIGN( BoundInputLoader<absl::Span<const float>> bound_b_input_loader, input_loader->Bind({ {"b", TypedSlot::FromSlot(b_slot)}, })); ASSERT_THAT(bound_b_input_loader({2, 57}, alloc.frame()), IsOk()); EXPECT_THAT(alloc.frame().Get(a_slot), Eq(5)); EXPECT_THAT(alloc.frame().Get(b_slot), Eq(57)); } TEST(SpanInputLoaderTest, Optionals) { std::unique_ptr<InputLoader<absl::Span<const std::optional<float>>>> input_loader = SpanInputLoader<std::optional<float>>::Create({"a", "b"}); EXPECT_THAT(input_loader, InputLoaderSupports({{"a", GetOptionalQType<float>()}, {"b", GetOptionalQType<float>()}})); FrameLayout::Builder layout_builder; auto a_slot = layout_builder.AddSlot<OptionalValue<float>>(); auto b_slot = layout_builder.AddSlot<OptionalValue<float>>(); ASSERT_OK_AND_ASSIGN(BoundInputLoader<absl::Span<const std::optional<float>>> bound_input_loader, input_loader->Bind({ {"a", TypedSlot::FromSlot(a_slot)}, {"b", TypedSlot::FromSlot(b_slot)}, })); FrameLayout memory_layout = std::move(layout_builder).Build(); MemoryAllocation alloc(&memory_layout); ASSERT_THAT(bound_input_loader({5, std::nullopt}, alloc.frame()), IsOk()); EXPECT_THAT(alloc.frame().Get(a_slot), Eq(5.f)); EXPECT_THAT(alloc.frame().Get(b_slot), Eq(std::nullopt)); } } }
https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/io/span_input_loader.h
https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/io/span_input_loader_test.cc
1ca990dbeca224035efdabffecc7f3738df6b52c
a9df55e8-0ddf-4590-8744-6b2d90500b08
cpp
tensorflow/tensorflow
gpu_backend_lib
third_party/xla/xla/service/gpu/llvm_gpu_backend/gpu_backend_lib.cc
third_party/xla/xla/service/gpu/llvm_gpu_backend/gpu_backend_lib_test.cc
#include "xla/service/gpu/llvm_gpu_backend/gpu_backend_lib.h" #include <algorithm> #include <cstdint> #include <cstdlib> #include <fstream> #include <functional> #include <ios> #include <memory> #include <mutex> #include <optional> #include <string> #include <string_view> #include <system_error> #include <utility> #include <variant> #include <vector> #include "absl/base/call_once.h" #include "absl/base/const_init.h" #include "absl/base/thread_annotations.h" #include "absl/container/flat_hash_map.h" #include "absl/memory/memory.h" #include "absl/status/status.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_format.h" #include "absl/strings/str_join.h" #include "absl/strings/str_split.h" #include "absl/strings/string_view.h" #include "absl/synchronization/mutex.h" #include "llvm/ADT/Any.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringSet.h" #include "llvm/Analysis/CGSCCPassManager.h" #include "llvm/Analysis/LazyCallGraph.h" #include "llvm/Analysis/LoopAnalysisManager.h" #include "llvm/Analysis/TargetLibraryInfo.h" #include "llvm/Analysis/TargetTransformInfo.h" #include "llvm/Bitcode/BitcodeReader.h" #include "llvm/Bitcode/BitcodeWriter.h" #include "llvm/CodeGen/CommandFlags.h" #include "llvm/IR/LLVMContext.h" #include "llvm/IR/LegacyPassManager.h" #include "llvm/IR/Metadata.h" #include "llvm/IR/Module.h" #include "llvm/IR/PassManager.h" #include "llvm/IR/Verifier.h" #include "llvm/InitializePasses.h" #include "llvm/Linker/Linker.h" #include "llvm/MC/TargetRegistry.h" #include "llvm/PassRegistry.h" #include "llvm/Passes/OptimizationLevel.h" #include "llvm/Passes/PassBuilder.h" #include "llvm/Passes/StandardInstrumentations.h" #include "llvm/Support/CodeGen.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/FileSystem.h" #include "llvm/Support/Program.h" #include "llvm/Support/TargetSelect.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Target/TargetMachine.h" #include "llvm/Transforms/IPO/AlwaysInliner.h" #include "llvm/Transforms/IPO/Internalize.h" #include "llvm/Transforms/Scalar.h" #include "xla/service/gpu/llvm_gpu_backend/utils.h" #include "xla/service/gpu/metrics.h" #include "xla/service/llvm_ir/llvm_command_line_options.h" #include "xla/service/llvm_ir/llvm_type_conversion_util.h" #include "xla/stream_executor/device_description.h" #include "xla/stream_executor/semantic_version.h" #include "xla/tsl/util/env_var.h" #include "xla/util.h" #include "xla/xla.pb.h" #include "tsl/platform/cuda_root_path.h" #include "tsl/platform/env.h" #include "tsl/platform/errors.h" #include "tsl/platform/logging.h" #include "tsl/platform/path.h" #include "tsl/platform/random.h" #include "tsl/platform/rocm_rocdl_path.h" #include "tsl/platform/status.h" #include "tsl/platform/statusor.h" #include "tsl/profiler/lib/scoped_annotation.h" #include "tsl/profiler/lib/traceme.h" #if !defined(PLATFORM_GOOGLE) && TENSORFLOW_USE_ROCM #include "rocm/rocm_config.h" #endif #if GOOGLE_CUDA #include "third_party/gpus/cuda/include/cuda.h" #include "xla/stream_executor/cuda/cuda_asm_compiler.h" #endif #if TENSORFLOW_USE_SYCL #include "LLVMSPIRVLib.h" #include "LLVMSPIRVOpts.h" #endif namespace xla { namespace gpu { namespace { static llvm::codegen::RegisterCodeGenFlags CGF; const int kAMDGPUInlineThreshold = 0x100000; const int kDefaultInlineThreshold = 1100; std::string MakeNameForTempProduct(absl::string_view input_filename, absl::string_view extension) { return ReplaceFilenameExtension(tsl::io::Basename(input_filename), extension); } void InitializePasses(llvm::PassRegistry* pass_registry) { llvm::initializeCore(*pass_registry); llvm::initializeCodeGen(*pass_registry); llvm::initializeScalarOpts(*pass_registry); llvm::initializeVectorization(*pass_registry); llvm::initializeIPO(*pass_registry); llvm::initializeAnalysis(*pass_registry); llvm::initializeTransformUtils(*pass_registry); llvm::initializeInstCombine(*pass_registry); llvm::initializeTarget(*pass_registry); llvm::initializeCodeGenPrepareLegacyPassPass(*pass_registry); } std::unique_ptr<llvm::TargetMachine> GetTargetMachine( llvm::Triple triple, absl::string_view cpu_name, const DebugOptions& debug_options, absl::string_view feature_str) { std::string error; const llvm::Target* target = llvm::TargetRegistry::lookupTarget("", triple, error); if (target == nullptr) { LOG(FATAL) << "Unable to find Target for triple '" << triple.str() << "'" << " -- " << error; return nullptr; } llvm::TargetOptions target_options = llvm::codegen::InitTargetOptionsFromCodeGenFlags(llvm::Triple()); target_options.MCOptions.AsmVerbose = false; llvm::CodeGenOptLevel codegen_opt_level; switch (debug_options.xla_backend_optimization_level()) { case 1: codegen_opt_level = llvm::CodeGenOptLevel::Less; break; case 2: codegen_opt_level = llvm::CodeGenOptLevel::Default; break; case 3: codegen_opt_level = llvm::CodeGenOptLevel::Aggressive; break; default: codegen_opt_level = llvm::CodeGenOptLevel::None; } return absl::WrapUnique(target->createTargetMachine( triple.str(), llvm_ir::AsStringRef(cpu_name), llvm_ir::AsStringRef(feature_str), target_options, llvm::codegen::getExplicitRelocModel(), llvm::codegen::getExplicitCodeModel(), codegen_opt_level)); } std::string EmitModuleToPTX(llvm::Module* module, llvm::TargetMachine* target_machine) { tsl::profiler::ScopedAnnotation annotation([&] { return absl::StrFormat("XlaEmitGpuAsm:#module=%s#", module->getName().str()); }); std::string ptx; llvm::raw_string_ostream stream(ptx); llvm::buffer_ostream pstream(stream); llvm::legacy::PassManager pm; pm.add(new llvm::TargetLibraryInfoWrapperPass( llvm::Triple(module->getTargetTriple()))); target_machine->addPassesToEmitFile(pm, pstream, nullptr, llvm::CodeGenFileType::AssemblyFile); pm.run(*module); return ptx; } void FeedLLVMWithFlags(const std::vector<std::string>& cl_opts) { std::vector<const char*> fake_argv = {""}; for (const std::string& cl_opt : cl_opts) { fake_argv.push_back(cl_opt.c_str()); } llvm::cl::ParseCommandLineOptions(fake_argv.size(), fake_argv.data()); } bool CouldNeedDeviceBitcode(const llvm::Module& module) { for (const llvm::Function& function : module.functions()) { if (!function.isIntrinsic() && function.isDeclaration() && (function.getName().starts_with("__nv_") || function.getName().starts_with("__ocml_") || function.getName().starts_with("__ockl_"))) { return true; } } return false; } absl::Status LinkWithBitcodeVector( llvm::Module* module, const std::vector<std::string>& bitcode_path_vector) { llvm::Linker linker(*module); for (auto& bitcode_path : bitcode_path_vector) { if (!tsl::Env::Default()->FileExists(bitcode_path).ok()) { LOG(ERROR) << "bitcode module is required by this HLO module but was " "not found at " << bitcode_path; return xla::Internal("bitcode module not found at %s", bitcode_path); } std::unique_ptr<llvm::Module> bitcode_module = LoadIRModule(bitcode_path, &module->getContext()); bitcode_module->setDataLayout(module->getDataLayout()); if (linker.linkInModule( std::move(bitcode_module), llvm::Linker::Flags::LinkOnlyNeeded, [](llvm::Module& M, const llvm::StringSet<>& GVS) { internalizeModule(M, [&GVS](const llvm::GlobalValue& GV) { return !GV.hasName() || (GVS.count(GV.getName()) == 0); }); })) { return xla::Internal("Error linking bitcode module from %s", bitcode_path); } } return absl::OkStatus(); } absl::Status NVPTXTargetModuleLinker(llvm::Module* module, se::GpuComputeCapability gpu_version, const DebugOptions& debug_options, const std::string& device_bitcode_path) { TF_RETURN_IF_ERROR( nvptx::LinkLibdeviceIfNecessary(module, device_bitcode_path)); module->addModuleFlag(llvm::Module::Override, "nvvm-reflect-ftz", debug_options.xla_gpu_ftz()); if (debug_options.xla_gpu_ftz()) { for (llvm::Function& fn : *module) { fn.addFnAttr("denormal-fp-math-f32", "preserve-sign"); } } return absl::OkStatus(); } std::unique_ptr<llvm::TargetMachine> NVPTXGetTargetMachine( llvm::Triple target_triple, se::CudaComputeCapability compute_capability, const DebugOptions& debug_options) { #ifdef GOOGLE_CUDA absl::StatusOr<stream_executor::SemanticVersion> runtime_cuda_version = stream_executor::GetAsmCompilerVersion( debug_options.xla_gpu_cuda_data_dir()); constexpr stream_executor::SemanticVersion kCompileTimeCudaVersion{ CUDA_VERSION / 1000, (CUDA_VERSION / 10) % 100, CUDA_VERSION % 10}; auto highest_supported_cuda_version = [&] { if (runtime_cuda_version.ok()) { return std::min(runtime_cuda_version.value(), kCompileTimeCudaVersion); } return kCompileTimeCudaVersion; }(); auto ptx_version = nvptx::DetermineHighestSupportedPtxVersionFromCudaVersion( highest_supported_cuda_version); int highest_supported_ptx_version = ptx_version.major() * 10 + ptx_version.minor(); VLOG(1) << "Targeting PTX version: " << highest_supported_ptx_version; std::string feature_str = absl::StrFormat("+ptx%d", highest_supported_ptx_version); #else std::string feature_str; #endif return GetTargetMachine(target_triple, nvptx::GetSmName(compute_capability), debug_options, feature_str); } using TargetModuleLinker = std::function<absl::Status(llvm::Module*, se::GpuComputeCapability, const DebugOptions&, const std::string&)>; void DumpModule(const std::string output_filename, const llvm::Module* module) { std::error_code ec; auto out = std::make_unique<llvm::raw_fd_ostream>( llvm::StringRef(output_filename), ec, llvm::sys::fs::OF_None); if (ec) { LOG(FATAL) << "Unable to open " << output_filename << " to dump LLVM IR: " << ec.message(); return; } module->print(*out, nullptr); out->close(); } const llvm::Module* GetModule(llvm::Any IR) { if (const auto** M = llvm::any_cast<const llvm::Module*>(&IR)) return *M; if (const auto** F = llvm::any_cast<const llvm::Function*>(&IR)) { return (*F)->getParent(); } if (const auto** C = llvm::any_cast<const llvm::LazyCallGraph::SCC*>(&IR)) { return (*C)->begin()->getFunction().getParent(); } if (const auto** L = llvm::any_cast<const llvm::Loop*>(&IR)) { const llvm::Function* F = (*L)->getHeader()->getParent(); return F->getParent(); } return nullptr; } auto DumpCallbackForModule(std::string module_identifier, std::string outputs_dir) { int i = 0; return [=](llvm::StringRef pass, llvm::Any ir) mutable { const llvm::Module* module = GetModule(ir); if (!module) { return; } const std::string basename = ReplaceFilenameExtension( absl::string_view(tsl::io::Basename(module_identifier)), absl::StrFormat("pass-%02d.before.%s.ll", i++, absl::string_view(pass.str()))); DumpModule(tsl::io::JoinPath(outputs_dir, basename), module); }; } absl::Status LinkAndOptimizeModule( llvm::Module* module, se::GpuComputeCapability gpu_version, const DebugOptions& debug_options, const std::string& device_bitcode_path, TargetModuleLinker module_linker, llvm::Triple default_target_triple, llvm::TargetMachine* target_machine, int inline_threshold) { tsl::profiler::ScopedAnnotation annotation([&] { return absl::StrFormat("XlaOptimizeLlvmIr:#module=%s#", module->getName().str()); }); TF_RETURN_IF_ERROR( module_linker(module, gpu_version, debug_options, device_bitcode_path)); llvm::LoopAnalysisManager lam; llvm::FunctionAnalysisManager fam; llvm::CGSCCAnalysisManager cgam; llvm::ModuleAnalysisManager mam; if (target_machine) { fam.registerPass([&] { return target_machine->getTargetIRAnalysis(); }); } llvm::PipelineTuningOptions pto; pto.SLPVectorization = true; pto.InlinerThreshold = inline_threshold; llvm::PassInstrumentationCallbacks pic; llvm::StandardInstrumentations si(module->getContext(), false); si.registerCallbacks(pic, &mam); llvm::PassBuilder pb(target_machine, pto, std::nullopt, &pic); pb.registerModuleAnalyses(mam); pb.registerCGSCCAnalyses(cgam); pb.registerFunctionAnalyses(fam); pb.registerLoopAnalyses(lam); pb.crossRegisterProxies(lam, fam, cgam, mam); if (debug_options.xla_gpu_dump_llvmir()) { std::string outputs_dir; if (!tsl::io::GetTestUndeclaredOutputsDir(&outputs_dir)) { outputs_dir = debug_options.xla_dump_to(); } if (!outputs_dir.empty()) { pic.registerBeforeNonSkippedPassCallback( DumpCallbackForModule(module->getModuleIdentifier(), outputs_dir)); } else { LOG(ERROR) << "--xla_gpu_dump_llvmir is set, but neither the environment " << "variable TEST_UNDECLARED_OUTPUTS_DIR nor the flag " << "--xla_dump_to is set, so the llvm dumps are disabled."; } } llvm::OptimizationLevel ol; switch (debug_options.xla_backend_optimization_level()) { case 0: ol = llvm::OptimizationLevel::O0; break; case 1: ol = llvm::OptimizationLevel::O1; break; case 2: ol = llvm::OptimizationLevel::O2; break; case 3: ol = llvm::OptimizationLevel::O3; break; } llvm::ModulePassManager mpm; mpm.addPass(llvm::VerifierPass()); if (ol == llvm::OptimizationLevel::O0) { mpm.addPass(pb.buildO0DefaultPipeline(ol)); } else { mpm.addPass(pb.buildPerModuleDefaultPipeline(ol)); } mpm.addPass(llvm::VerifierPass()); mpm.run(*module, mam); return absl::OkStatus(); } void NVPTXBackendInit(const DebugOptions& debug_options) { FeedLLVMWithFlags({"-bonus-inst-threshold=2"}); FeedLLVMWithFlags({"-nvptx-prec-divf32=1"}); FeedLLVMWithFlags({ "-slp-vectorize-hor=false", "-slp-max-reg-size=32", }); llvm_ir::InitializeLLVMCommandLineOptions( debug_options.xla_backend_extra_options()); LLVMInitializeNVPTXTarget(); LLVMInitializeNVPTXTargetInfo(); LLVMInitializeNVPTXTargetMC(); LLVMInitializeNVPTXAsmPrinter(); llvm::PassRegistry* registry = llvm::PassRegistry::getPassRegistry(); InitializePasses(registry); } } namespace nvptx { std::string GetSmName(se::CudaComputeCapability compute_capability) { int compute_capability_version = compute_capability.major * 10 + compute_capability.minor; int sm_version = 30; int supported_versions[] = {90, 89, 87, 86, 80, 75, 72, 70, 62, 61, 60, 53, 52, 50, 37, 35, 32, 30}; for (int v : supported_versions) { if (v <= compute_capability_version) { sm_version = v; break; } } if (sm_version != compute_capability_version && compute_capability_version < supported_versions[0]) { LOG(WARNING) << "Unknown compute capability " << compute_capability.ToString() << ". Defaulting to telling LLVM that we're compiling for sm_" << sm_version; } std::string_view extension = (compute_capability.major == 9 && sm_version == 90) ? "a" : ""; return absl::StrCat("sm_", sm_version, extension); } std::string CantFindCudaMessage(absl::string_view msg, absl::string_view xla_gpu_cuda_data_dir) { return absl::StrCat( msg, "\nSearched for CUDA in the following directories:\n ", absl::StrJoin(tsl::CandidateCudaRoots(std::string{xla_gpu_cuda_data_dir}), "\n "), "\nYou can choose the search directory by setting xla_gpu_cuda_data_dir " "in HloModule's DebugOptions. For most apps, setting the environment " "variable XLA_FLAGS=--xla_gpu_cuda_data_dir=/path/to/cuda will work."); } static std::string GetLibdeviceDir(absl::string_view xla_gpu_cuda_data_dir) { for (const std::string& cuda_root : tsl::CandidateCudaRoots(std::string{xla_gpu_cuda_data_dir})) { std::string libdevice_dir = tsl::io::JoinPath(cuda_root, "nvvm", "libdevice"); VLOG(2) << "Looking for libdevice at " << libdevice_dir; if (tsl::Env::Default()->IsDirectory(libdevice_dir).ok()) { VLOG(2) << "Found libdevice dir " << libdevice_dir; return libdevice_dir; } } LOG(WARNING) << CantFindCudaMessage( "Can't find libdevice directory ${CUDA_DIR}/nvvm/libdevice. This may " "result in compilation or runtime failures, if the program we try to run " "uses routines from libdevice.", xla_gpu_cuda_data_dir); return "."; } std::string LibDevicePath(absl::string_view xla_gpu_cuda_data_dir) { static absl::Mutex libdevice_cache_mu(absl::kConstInit); static auto& libdevice_dir_path_cache ABSL_GUARDED_BY(libdevice_cache_mu) = *new absl::flat_hash_map<std::string, std::string>(); std::string libdevice_dir_path = [&] { absl::MutexLock l(&libdevice_cache_mu); auto it = libdevice_dir_path_cache.find(xla_gpu_cuda_data_dir); if (it != libdevice_dir_path_cache.end()) { return it->second; } auto [it2, inserted] = libdevice_dir_path_cache.emplace( xla_gpu_cuda_data_dir, GetLibdeviceDir(xla_gpu_cuda_data_dir)); return it2->second; }(); return tsl::io::JoinPath(libdevice_dir_path, "libdevice.10.bc"); } absl::Status LinkLibdeviceIfNecessary(llvm::Module* module, const std::string& libdevice_path) { if (!CouldNeedDeviceBitcode(*module)) { return absl::OkStatus(); } if (!tsl::Env::Default()->FileExists(libdevice_path).ok()) { LOG(WARNING) << "libdevice is required by this HLO module but was not found at " << libdevice_path; return xla::Internal("libdevice not found at %s", libdevice_path); } VLOG(1) << "Linking with libdevice from: " << libdevice_path; return LinkWithBitcodeVector(module, {libdevice_path}); } absl::StatusOr<std::string> CompileToPtx( llvm::Module* module, se::GpuComputeCapability gpu_version, const DebugOptions& debug_options, std::function<void(llvm::TargetMachine*)> configure_target) { static absl::once_flag backend_init_flag; absl::call_once(backend_init_flag, NVPTXBackendInit, debug_options); std::string ptx; std::unique_ptr<llvm::TargetMachine> target_machine; { tsl::profiler::TraceMe activity( [&] { return absl::StrCat("Compiling IR:", module->getName().str()); }, tsl::profiler::TraceMeLevel::kInfo); XLA_SCOPED_LOGGING_TIMER("Compile module " + module->getName().str()); if (module->empty() && module->global_empty()) { VLOG(2) << "Module '" << module->getName().str() << "' is empty. Skipping compilation."; return std::string(); } auto compute_capability = std::get_if<se::CudaComputeCapability>(&gpu_version); if (!compute_capability) { return xla::Internal("Incompatible compute capability was specified."); } llvm::Triple default_target_triple("nvptx64-unknown-unknown"); std::unique_ptr<llvm::TargetMachine> target_machine = NVPTXGetTargetMachine( default_target_triple, *compute_capability, debug_options); if (configure_target) { configure_target(target_machine.get()); } uint64_t start_usecs = tsl::Env::Default()->NowMicros(); TF_RETURN_IF_ERROR(LinkAndOptimizeModule( module, gpu_version, debug_options, LibDevicePath(debug_options.xla_gpu_cuda_data_dir()), NVPTXTargetModuleLinker, default_target_triple, target_machine.get(), kDefaultInlineThreshold)); uint64_t end_usecs = tsl::Env::Default()->NowMicros(); RecordLlvmPassesDuration(end_usecs - start_usecs); start_usecs = tsl::Env::Default()->NowMicros(); ptx = EmitModuleToPTX(module, target_machine.get()); end_usecs = tsl::Env::Default()->NowMicros(); RecordLlvmToPtxDuration(end_usecs - start_usecs); } return ptx; } namespace { constexpr stream_executor::SemanticVersion kFallbackPtxVersion{6, 5, 0}; constexpr stream_executor::SemanticVersion kMaxPtxVersion{8, 5, 0}; } stream_executor::SemanticVersion DetermineHighestSupportedPtxVersionFromCudaVersion( stream_executor::SemanticVersion cuda_version) { if (cuda_version < stream_executor::SemanticVersion{11, 0, 0}) { return kFallbackPtxVersion; } if (cuda_version < stream_executor::SemanticVersion{12, 6, 0}) { return {cuda_version.major() - 4, cuda_version.minor(), 0}; } return kMaxPtxVersion; } } namespace { std::vector<std::string> GetROCDLPaths(std::string gcn_arch_name, const std::string& rocdl_dir_path) { static std::vector<std::string>* rocdl_filenames = new std::vector<std::string>( {"opencl.bc", "ocml.bc", "ockl.bc", "oclc_finite_only_off.bc", "oclc_daz_opt_off.bc", "oclc_correctly_rounded_sqrt_on.bc", "oclc_unsafe_math_off.bc", "oclc_wavefrontsize64_on.bc", "oclc_abi_version_500.bc"}); std::vector<std::string> result; result.reserve(rocdl_filenames->size() + 1); for (auto& filename : *rocdl_filenames) { result.push_back(tsl::io::JoinPath(rocdl_dir_path, filename)); } std::vector<std::string> tokens = absl::StrSplit(gcn_arch_name, ':'); std::string amdgpu_version = gcn_arch_name; if (!tokens.empty() && tokens[0].size() >= 3) { amdgpu_version = tokens[0].substr(3); } result.push_back(tsl::io::JoinPath( rocdl_dir_path, absl::StrCat("oclc_isa_version_", amdgpu_version, ".bc"))); return result; } struct HsacoCacheEntry { uint64_t hash; std::string ir; std::string gfx; std::vector<uint8_t> hsaco; }; struct HsacoCache { protected: std::vector<HsacoCacheEntry> cache; std::mutex m_mutex; int request_count = 0; int hit_count = 0; public: static bool Find(const std::string& ir, uint64_t& hash, const std::string& gfx, std::vector<uint8_t>& hsaco); static void Add(const std::string& ir, uint64_t hash, const std::string& gfx, const std::vector<uint8_t>& hsaco); }; static HsacoCache g_hsacoCache; bool HsacoCache::Find(const std::string& ir, uint64_t& hash, const std::string& gfx, std::vector<uint8_t>& hsaco) { std::lock_guard<std::mutex> lg(g_hsacoCache.m_mutex); hash = std::hash<std::string>{}(ir); bool hit = false; for (auto& x : g_hsacoCache.cache) { if (x.hash != hash) continue; if (x.gfx != gfx) continue; if (x.ir != ir) continue; hsaco = x.hsaco; hit = true; break; } g_hsacoCache.request_count++; if (hit) g_hsacoCache.hit_count++; if (!(g_hsacoCache.request_count % 50)) VLOG(1) << "HSACO cache: " << g_hsacoCache.request_count << " requests, " << g_hsacoCache.hit_count << " hits"; return hit; } void HsacoCache::Add(const std::string& ir, uint64_t hash, const std::string& gfx, const std::vector<uint8_t>& hsaco) { std::lock_guard<std::mutex> lg(g_hsacoCache.m_mutex); g_hsacoCache.cache.resize(g_hsacoCache.cache.size() + 1); g_hsacoCache.cache.back().ir = ir; g_hsacoCache.cache.back().hash = hash; g_hsacoCache.cache.back().gfx = gfx; g_hsacoCache.cache.back().hsaco = hsaco; } absl::StatusOr<std::vector<uint8_t>> EmitModuleToHsaco( llvm::Module* module, llvm::TargetMachine* target_machine) { auto* env = tsl::Env::Default(); std::vector<std::string> tempdir_vector; env->GetLocalTempDirectories(&tempdir_vector); if (tempdir_vector.empty()) { return xla::Internal( "Unable to locate a temporary directory for compile-time artifacts."); } std::string tempdir_name = tempdir_vector.front(); VLOG(1) << "Compile-time artifacts located at: " << tempdir_name; bool keep_tempfiles = false; TF_CHECK_OK(tsl::ReadBoolFromEnvVar("TF_ROCM_KEEP_XLA_TEMPFILES", false, &keep_tempfiles)); std::string random_number = std::to_string(tsl::random::New64()); std::string ir_filename = absl::StrCat(module->getModuleIdentifier(), random_number + ".ll"); std::string ir_path = tsl::io::JoinPath(tempdir_name, ir_filename); std::string ir_opt_filename = absl::StrCat(module->getModuleIdentifier(), random_number + "_opt.ll"); std::string ir_opt_path = tsl::io::JoinPath(tempdir_name, ir_opt_filename); std::string isabin_filename = absl::StrCat(module->getModuleIdentifier(), random_number + ".o"); std::string isabin_path = tsl::io::JoinPath(tempdir_name, isabin_filename); std::string hsaco_filename = absl::StrCat(module->getModuleIdentifier(), random_number + ".hsaco"); std::string hsaco_path = tsl::io::JoinPath(tempdir_name, hsaco_filename); std::error_code ec; std::unique_ptr<llvm::raw_fd_ostream> ir_fs( new llvm::raw_fd_ostream(ir_path, ec, llvm::sys::fs::OF_None)); module->print(*ir_fs, nullptr); ir_fs->flush(); llvm::legacy::PassManager pm; pm.add(new llvm::TargetLibraryInfoWrapperPass( llvm::Triple(module->getTargetTriple()))); llvm::SmallVector<char, 0> stream; llvm::raw_svector_ostream pstream(stream); std::unique_ptr<llvm::raw_fd_ostream> isabin_fs( new llvm::raw_fd_ostream(isabin_path, ec, llvm::sys::fs::OF_Text)); module->setDataLayout(target_machine->createDataLayout()); target_machine->addPassesToEmitFile(pm, *isabin_fs, nullptr, llvm::CodeGenFileType::ObjectFile); pm.run(*module); isabin_fs->flush(); if (keep_tempfiles) { std::unique_ptr<llvm::raw_fd_ostream> ir_fs( new llvm::raw_fd_ostream(ir_opt_path, ec, llvm::sys::fs::OF_None)); module->print(*ir_fs, nullptr); ir_fs->flush(); } std::string lld_path; if (std::getenv("LLVM_PATH")) { lld_path = tsl::io::JoinPath(std::getenv("LLVM_PATH"), "bin"); } else { lld_path = tsl::io::JoinPath(tsl::RocmRoot(), "llvm/bin"); } auto lld_program = llvm::sys::findProgramByName("ld.lld", {lld_path}); if (!lld_program) { return xla::Internal("unable to find ld.lld in PATH: %s", lld_program.getError().message()); } std::vector<llvm::StringRef> lld_args{ llvm_ir::AsStringRef("ld.lld"), llvm_ir::AsStringRef("-flavor"), llvm_ir::AsStringRef("gnu"), llvm_ir::AsStringRef("-shared"), llvm_ir::AsStringRef(isabin_path), llvm_ir::AsStringRef("-o"), llvm_ir::AsStringRef(hsaco_path), }; std::string error_message; int lld_result = llvm::sys::ExecuteAndWait(*lld_program, llvm_ir::AsArrayRef(lld_args), std::nullopt, {}, 0, 0, &error_message); if (lld_result) { return xla::Internal("ld.lld execute fail: %s, error code %d", error_message, lld_result); } std::ifstream hsaco_file(hsaco_path, std::ios::binary | std::ios::ate); std::ifstream::pos_type hsaco_file_size = hsaco_file.tellg(); std::vector<uint8_t> hsaco(hsaco_file_size); hsaco_file.seekg(0, std::ios::beg); hsaco_file.read(reinterpret_cast<char*>(hsaco.data()), hsaco_file_size); hsaco_file.close(); if (!keep_tempfiles) { remove(ir_path.c_str()); remove(isabin_path.c_str()); remove(hsaco_path.c_str()); } return hsaco; } absl::Status LinkROCDLIfNecessary(llvm::Module* module, std::string gcn_arch_name, const std::string& rocdl_dir_path) { if (!CouldNeedDeviceBitcode(*module)) { return absl::OkStatus(); } return LinkWithBitcodeVector(module, GetROCDLPaths(gcn_arch_name, rocdl_dir_path)); } absl::Status AMDGPUTargetModuleLinker( llvm::Module* module, se::GpuComputeCapability gpu_version, const DebugOptions& debug_options, const std::string& device_bitcode_dir_path) { auto compute_capability = std::get_if<se::RocmComputeCapability>(&gpu_version); if (!compute_capability) { return xla::Internal("Incompatible compute capability was specified."); } std::string gcn_arch_name = compute_capability->gcn_arch_name(); TF_RETURN_IF_ERROR( LinkROCDLIfNecessary(module, gcn_arch_name, device_bitcode_dir_path)); if (debug_options.xla_gpu_ftz()) { for (llvm::Function& fn : *module) { fn.addFnAttr("denormal-fp-math-f32", "preserve-sign"); } } return absl::OkStatus(); } std::string MapGCNArchNameTokenToFeatureStr(const std::string& token, const std::string& gfx) { if (token == "sramecc+") { return "+sramecc"; } else if (token == "sramecc-") { if (gfx == "gfx90a" || gfx == "gfx940" || gfx == "gfx941" || gfx == "gfx942") return ""; return "-sramecc"; } else if (token == "xnack+") { return "+xnack"; } else if (token == "xnack-") { return "-xnack"; } return ""; } std::pair<std::string, std::string> GetFeatureStrFromGCNArchName( const std::string& gcn_arch_name) { std::string feature_str; std::string gfx = gcn_arch_name; std::vector<std::string> tokens = absl::StrSplit(gcn_arch_name, ':'); std::vector<std::string> mapped_tokens; if (!tokens.empty()) gfx = tokens[0]; for (auto it = tokens.begin(); it != tokens.end(); it++) { if (it != tokens.begin()) { std::string token(*it); std::string mapped_token = MapGCNArchNameTokenToFeatureStr(token, gfx); mapped_tokens.push_back(mapped_token); } } feature_str = absl::StrJoin(mapped_tokens, ","); return std::make_pair(gfx, feature_str); } std::unique_ptr<llvm::TargetMachine> AMDGPUGetTargetMachine( llvm::Triple target_triple, se::GpuComputeCapability gpu_version, const DebugOptions& debug_options) { auto compute_capability = std::get_if<se::RocmComputeCapability>(&gpu_version); std::string gcn_arch_name = compute_capability->gcn_arch_name(); auto arch = GetFeatureStrFromGCNArchName(gcn_arch_name); return GetTargetMachine(std::move(target_triple), arch.first, debug_options, arch.second); } std::string GetROCDLDir(const DebugOptions& debug_options) { std::vector<std::string> potential_rocdl_dirs; const std::string& datadir = debug_options.xla_gpu_cuda_data_dir(); if (!datadir.empty()) { potential_rocdl_dirs.push_back(datadir); } potential_rocdl_dirs.push_back(tsl::RocdlRoot()); for (const std::string& potential_rocdl_dir : potential_rocdl_dirs) { if (tsl::Env::Default()->IsDirectory(potential_rocdl_dir).ok()) { VLOG(2) << "Found ROCm-Device-Libs dir " << potential_rocdl_dir; return potential_rocdl_dir; } VLOG(2) << "Unable to find potential ROCm-Device-Libs dir " << potential_rocdl_dir; } return "."; } void AMDGPUBackendInit(const DebugOptions& debug_options, std::string& rocdl_dir_path) { llvm_ir::InitializeLLVMCommandLineOptions( debug_options.xla_backend_extra_options()); #if TENSORFLOW_USE_ROCM LLVMInitializeAMDGPUTarget(); LLVMInitializeAMDGPUTargetInfo(); LLVMInitializeAMDGPUTargetMC(); LLVMInitializeAMDGPUAsmParser(); LLVMInitializeAMDGPUAsmPrinter(); #endif rocdl_dir_path = GetROCDLDir(debug_options); llvm::PassRegistry* registry = llvm::PassRegistry::getPassRegistry(); InitializePasses(registry); } } namespace amdgpu { std::string LibDevicePath(std::string gcn_arch_name, const std::string& rocdl_dir_path) { auto libdevice_dir_paths = GetROCDLPaths(gcn_arch_name, rocdl_dir_path); for (auto libdevice_dir_path : libdevice_dir_paths) { if (libdevice_dir_path.find("ocml.bc")) { return libdevice_dir_path; } } return ""; } absl::StatusOr<std::vector<uint8_t>> CompileToHsaco( llvm::Module* module, se::GpuComputeCapability gpu_version, const DebugOptions& debug_options, const std::string& module_config_cache_key) { static absl::once_flag backend_init_flag; static std::string rocdl_dir_path; absl::call_once(backend_init_flag, AMDGPUBackendInit, debug_options, rocdl_dir_path); std::vector<uint8_t> hsaco; std::unique_ptr<llvm::TargetMachine> target_machine; std::string str; llvm::raw_string_ostream stream(str); stream << *module; if (str.size() >= 13 && str.substr(0, 13) == "; ModuleID = ") { auto pos = str.find('\n'); if (pos != std::string::npos) str = str.substr(pos + 1); } if (str.size() >= 18 && str.substr(0, 18) == "source_filename = ") { auto pos = str.find('\n'); if (pos != std::string::npos) str = str.substr(pos + 1); } str += module_config_cache_key; { tsl::profiler::TraceMe activity( [&] { return absl::StrCat("Compiling IR", module->getName().str()); }, tsl::profiler::TraceMeLevel::kInfo); XLA_SCOPED_LOGGING_TIMER("Compile module " + module->getName().str()); auto compute_capability = std::get_if<se::RocmComputeCapability>(&gpu_version); if (!compute_capability) { return xla::Internal("Incompatible compute capability was specified."); } std::string gcn_arch_name = compute_capability->gcn_arch_name(); uint64_t hash; if (HsacoCache::Find(str, hash, gcn_arch_name, hsaco)) { VLOG(1) << "HSACO cache hit"; return hsaco; } VLOG(1) << "HSACO cache miss"; bool dump_lls = false; if (dump_lls) { static int hsaco_count = 0; std::string name = "/tmp/" + std::to_string(hsaco_count) + ".ll"; hsaco_count++; std::ofstream ofs(name); ofs << str; ofs.close(); } llvm::Triple default_target_triple("amdgcn--amdhsa-amdgiz"); std::unique_ptr<llvm::TargetMachine> target_machine = AMDGPUGetTargetMachine(default_target_triple, gpu_version, debug_options); TF_RETURN_IF_ERROR(LinkAndOptimizeModule( module, gpu_version, debug_options, rocdl_dir_path, AMDGPUTargetModuleLinker, default_target_triple, target_machine.get(), kAMDGPUInlineThreshold)); TF_ASSIGN_OR_RETURN(hsaco, EmitModuleToHsaco(module, target_machine.get())); HsacoCache::Add(str, hash, gcn_arch_name, hsaco); } return hsaco; } } namespace { std::unique_ptr<llvm::TargetMachine> SPIRGetTargetMachine( llvm::Triple target_triple, se::GpuComputeCapability gpu_version, const DebugOptions& debug_options) { return nullptr; } absl::Status SPIRTargetModuleLinker( llvm::Module* module, se::GpuComputeCapability gpu_version, const DebugOptions& debug_options, const std::string& device_bitcode_dir_path) { return absl::OkStatus(); } absl::StatusOr<std::string> EmitModuleToSpir( llvm::Module* module, se::GpuComputeCapability gpu_version, const DebugOptions& debug_options) { #if TENSORFLOW_USE_SYCL SPIRV::TranslatorOpts::ExtensionsStatusMap ExtensionsStatus; SPIRV::TranslatorOpts opts(SPIRV::VersionNumber::MaximumVersion, ExtensionsStatus); opts.enableAllExtensions(); std::ostringstream oss; std::string err; bool success = llvm::writeSpirv(module, opts, oss, err); if (!success) { return xla::Internal("Fails to convert LLVM as SPIR-V: %s", err); } return oss.str(); #else return absl::UnimplementedError("Not implemented for SYCL"); #endif } void SPIRBackendInit(const DebugOptions& debug_options) { FeedLLVMWithFlags({ "-slp-vectorize-hor=false", "-slp-min-reg-size=64", "-slp-max-reg-size=64", }); llvm_ir::InitializeLLVMCommandLineOptions( debug_options.xla_backend_extra_options()); llvm::PassRegistry* registry = llvm::PassRegistry::getPassRegistry(); InitializePasses(registry); } } namespace spir { absl::StatusOr<std::vector<uint8_t>> CompileToSpir( llvm::Module* module, se::GpuComputeCapability gpu_version, const DebugOptions& debug_options) { std::string libdevice_dir_path; static absl::once_flag backend_init_flag; absl::call_once(backend_init_flag, SPIRBackendInit, debug_options); std::string spir; { XLA_SCOPED_LOGGING_TIMER("Compile module " + module->getName().str()); if (module->empty() && module->global_empty()) { VLOG(2) << "Module '" << module->getName().str() << "' is empty. Skipping compilation."; return std::vector<uint8_t>(); } llvm::Triple default_target_triple("spir64-unknown-unknown"); std::unique_ptr<llvm::TargetMachine> target_machine = SPIRGetTargetMachine(default_target_triple, gpu_version, debug_options); TF_RETURN_IF_ERROR(LinkAndOptimizeModule( module, gpu_version, debug_options, libdevice_dir_path, SPIRTargetModuleLinker, default_target_triple, target_machine.get(), kDefaultInlineThreshold)); TF_ASSIGN_OR_RETURN(spir, EmitModuleToSpir(module, gpu_version, debug_options)); } return std::vector<uint8_t>(spir.begin(), spir.end()); } } } }
#include "xla/service/gpu/llvm_gpu_backend/gpu_backend_lib.h" #include <utility> #include "absl/strings/str_cat.h" #include "xla/stream_executor/device_description.h" #include "xla/stream_executor/semantic_version.h" #include "tsl/platform/test.h" namespace xla { namespace gpu { namespace { namespace se = ::stream_executor; TEST(UtilsTest, TestGetSmName) { se::CudaComputeCapability cc_hopper(9, 0); ASSERT_EQ(nvptx::GetSmName(cc_hopper), "sm_90a"); se::CudaComputeCapability cc_next(10, 0); ASSERT_EQ(nvptx::GetSmName(cc_next), "sm_90"); } using VersionPair = std::pair<se::SemanticVersion, se::SemanticVersion>; using PtxVersionFromCudaVersionTest = ::testing::TestWithParam<VersionPair>; TEST_P(PtxVersionFromCudaVersionTest, VerifyMapping) { EXPECT_EQ(nvptx::DetermineHighestSupportedPtxVersionFromCudaVersion( GetParam().first), GetParam().second); } INSTANTIATE_TEST_SUITE_P(VersionTest, PtxVersionFromCudaVersionTest, ::testing::ValuesIn<VersionPair>({ {{11, 0, 0}, {7, 0, 0}}, {{11, 1, 0}, {7, 1, 0}}, {{11, 2, 0}, {7, 2, 0}}, {{11, 3, 0}, {7, 3, 0}}, {{11, 4, 0}, {7, 4, 0}}, {{11, 5, 0}, {7, 5, 0}}, {{11, 6, 0}, {7, 6, 0}}, {{11, 7, 0}, {7, 7, 0}}, {{11, 8, 0}, {7, 8, 0}}, {{12, 0, 0}, {8, 0, 0}}, {{12, 1, 0}, {8, 1, 0}}, {{12, 2, 0}, {8, 2, 0}}, {{12, 3, 0}, {8, 3, 0}}, {{12, 4, 0}, {8, 4, 0}}, {{12, 5, 0}, {8, 5, 0}}, {{12, 6, 0}, {8, 5, 0}}, }), [](::testing::TestParamInfo<VersionPair> data) { se::SemanticVersion cuda_version = data.param.first; return absl::StrCat("cuda_", cuda_version.major(), "_", cuda_version.minor()); }); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/llvm_gpu_backend/gpu_backend_lib.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/llvm_gpu_backend/gpu_backend_lib_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
1036b1df-8b68-4267-bcd2-83a33e692cb7
cpp
google/cel-cpp
double_wrapper_type
common/types/double_wrapper_type.h
common/types/double_wrapper_type_test.cc
#ifndef THIRD_PARTY_CEL_CPP_COMMON_TYPES_DOUBLE_WRAPPER_TYPE_H_ #define THIRD_PARTY_CEL_CPP_COMMON_TYPES_DOUBLE_WRAPPER_TYPE_H_ #include <ostream> #include <string> #include <utility> #include "absl/strings/string_view.h" #include "common/type_kind.h" namespace cel { class Type; class TypeParameters; class DoubleWrapperType final { public: static constexpr TypeKind kKind = TypeKind::kDoubleWrapper; static constexpr absl::string_view kName = "google.protobuf.DoubleValue"; DoubleWrapperType() = default; DoubleWrapperType(const DoubleWrapperType&) = default; DoubleWrapperType(DoubleWrapperType&&) = default; DoubleWrapperType& operator=(const DoubleWrapperType&) = default; DoubleWrapperType& operator=(DoubleWrapperType&&) = default; static TypeKind kind() { return kKind; } static absl::string_view name() { return kName; } static TypeParameters GetParameters(); static std::string DebugString() { return std::string(name()); } constexpr void swap(DoubleWrapperType&) noexcept {} }; inline constexpr void swap(DoubleWrapperType& lhs, DoubleWrapperType& rhs) noexcept { lhs.swap(rhs); } inline constexpr bool operator==(DoubleWrapperType, DoubleWrapperType) { return true; } inline constexpr bool operator!=(DoubleWrapperType lhs, DoubleWrapperType rhs) { return !operator==(lhs, rhs); } template <typename H> H AbslHashValue(H state, DoubleWrapperType) { return std::move(state); } inline std::ostream& operator<<(std::ostream& out, const DoubleWrapperType& type) { return out << type.DebugString(); } } #endif
#include <sstream> #include "absl/hash/hash.h" #include "common/type.h" #include "internal/testing.h" namespace cel { namespace { TEST(DoubleWrapperType, Kind) { EXPECT_EQ(DoubleWrapperType().kind(), DoubleWrapperType::kKind); EXPECT_EQ(Type(DoubleWrapperType()).kind(), DoubleWrapperType::kKind); } TEST(DoubleWrapperType, Name) { EXPECT_EQ(DoubleWrapperType().name(), DoubleWrapperType::kName); EXPECT_EQ(Type(DoubleWrapperType()).name(), DoubleWrapperType::kName); } TEST(DoubleWrapperType, DebugString) { { std::ostringstream out; out << DoubleWrapperType(); EXPECT_EQ(out.str(), DoubleWrapperType::kName); } { std::ostringstream out; out << Type(DoubleWrapperType()); EXPECT_EQ(out.str(), DoubleWrapperType::kName); } } TEST(DoubleWrapperType, Hash) { EXPECT_EQ(absl::HashOf(DoubleWrapperType()), absl::HashOf(DoubleWrapperType())); } TEST(DoubleWrapperType, Equal) { EXPECT_EQ(DoubleWrapperType(), DoubleWrapperType()); EXPECT_EQ(Type(DoubleWrapperType()), DoubleWrapperType()); EXPECT_EQ(DoubleWrapperType(), Type(DoubleWrapperType())); EXPECT_EQ(Type(DoubleWrapperType()), Type(DoubleWrapperType())); } } }
https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/types/double_wrapper_type.h
https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/types/double_wrapper_type_test.cc
4552db5798fb0853b131b783d8875794334fae7f
6c61abf8-3990-4d88-87c1-76687b47cd1a
cpp
tensorflow/tensorflow
gradient_checker
tensorflow/c/eager/gradient_checker.cc
tensorflow/c/eager/gradient_checker_test.cc
#include "tensorflow/c/eager/gradient_checker.h" #include <memory> #include "absl/types/span.h" #include "tensorflow/c/eager/abstract_tensor_handle.h" #include "tensorflow/c/experimental/ops/math_ops.h" #include "tensorflow/c/tf_tensor.h" namespace tensorflow { namespace gradients { using namespace std; void Range(vector<int32_t>* data, int32_t start, int32_t end, int32_t step = 1) { for (int32_t i = start; i < end; i += step) { (*data)[i] = i; } } void GetDims(const TF_Tensor* t, int64_t* out_dims) { int num_dims = TF_NumDims(t); for (int i = 0; i < num_dims; i++) { out_dims[i] = TF_Dim(t, i); } } Status RunAndMaybeSum(AbstractContext* ctx, Model forward, absl::Span<AbstractTensorHandle* const> inputs, absl::Span<AbstractTensorHandle*> outputs, bool use_function) { AbstractTensorHandle* model_outputs[1]; TF_RETURN_IF_ERROR( RunModel(forward, ctx, inputs, model_outputs, use_function)); AbstractTensorHandlePtr model_out(model_outputs[0]); TF_Tensor* model_out_tensor; TF_RETURN_IF_ERROR(GetValue(model_out.get(), &model_out_tensor)); int num_dims_out = TF_NumDims(model_out_tensor); TF_DeleteTensor(model_out_tensor); if (num_dims_out == 0) { outputs[0] = model_out.release(); return absl::OkStatus(); } AbstractTensorHandlePtr sum_dims; { vector<int32_t> vals(num_dims_out); int64_t vals_shape[] = {num_dims_out}; Range(&vals, 0, num_dims_out); AbstractTensorHandle* sum_dims_raw = nullptr; TF_RETURN_IF_ERROR(TestTensorHandleWithDims<int32_t, TF_INT32>( ctx, vals.data(), vals_shape, 1, &sum_dims_raw)); sum_dims.reset(sum_dims_raw); } TF_RETURN_IF_ERROR(ops::Sum(ctx, model_out.get(), sum_dims.get(), &outputs[0], false, "sum_output")); return absl::OkStatus(); } Status CalcNumericalGrad(AbstractContext* ctx, Model forward, absl::Span<AbstractTensorHandle* const> inputs, int input_index, bool use_function, AbstractTensorHandle** numerical_grad) { vector<AbstractTensorHandle*> theta_inputs(inputs.size()); for (int i{}; i < inputs.size(); ++i) { theta_inputs[i] = inputs[i]; } AbstractTensorHandle* theta = theta_inputs[input_index]; TF_Tensor* theta_tensor; TF_RETURN_IF_ERROR(GetValue(theta, &theta_tensor)); int num_elems = TF_TensorElementCount(theta_tensor); vector<float> theta_data(num_elems); memcpy(theta_data.data(), TF_TensorData(theta_tensor), TF_TensorByteSize(theta_tensor)); vector<float> dtheta_approx(num_elems); int num_dims = TF_NumDims(theta_tensor); vector<int64_t> theta_dims(num_dims); GetDims(theta_tensor, theta_dims.data()); vector<float> thetaPlus_data(num_elems); vector<float> thetaMinus_data(num_elems); AbstractTensorHandle* f_outputs[1]; for (int i = 0; i < num_elems; i++) { float epsilon = theta_data[i] == 0 ? 1e-4 : std::abs(theta_data[i] * 1e-4); AbstractTensorHandlePtr two_eps; { AbstractTensorHandle* two_eps_raw = nullptr; TF_RETURN_IF_ERROR(TestScalarTensorHandle<float, TF_FLOAT>( ctx, 2 * epsilon, &two_eps_raw)); two_eps.reset(two_eps_raw); } memcpy(thetaPlus_data.data(), TF_TensorData(theta_tensor), TF_TensorByteSize(theta_tensor)); thetaPlus_data[i] += epsilon; AbstractTensorHandlePtr thetaPlus; { AbstractTensorHandle* thetaPlus_raw = nullptr; TF_RETURN_IF_ERROR(TestTensorHandleWithDims<float, TF_FLOAT>( ctx, thetaPlus_data.data(), theta_dims.data(), num_dims, &thetaPlus_raw)); thetaPlus.reset(thetaPlus_raw); } memcpy(&thetaMinus_data[0], TF_TensorData(theta_tensor), TF_TensorByteSize(theta_tensor)); thetaMinus_data[i] -= epsilon; AbstractTensorHandlePtr thetaMinus; { AbstractTensorHandle* thetaMinus_raw = nullptr; TF_RETURN_IF_ERROR(TestTensorHandleWithDims<float, TF_FLOAT>( ctx, thetaMinus_data.data(), theta_dims.data(), num_dims, &thetaMinus_raw)); thetaMinus.reset(thetaMinus_raw); } theta_inputs[input_index] = thetaPlus.get(); TF_RETURN_IF_ERROR( RunAndMaybeSum(ctx, forward, theta_inputs, f_outputs, use_function)); AbstractTensorHandlePtr fPlus(f_outputs[0]); theta_inputs[input_index] = thetaMinus.get(); TF_RETURN_IF_ERROR( RunAndMaybeSum(ctx, forward, theta_inputs, f_outputs, use_function)); AbstractTensorHandlePtr fMinus(f_outputs[0]); TF_RETURN_IF_ERROR( ops::Sub(ctx, fPlus.get(), fMinus.get(), f_outputs, "sub_top")); AbstractTensorHandlePtr fDiff(f_outputs[0]); TF_RETURN_IF_ERROR( ops::Div(ctx, fDiff.get(), two_eps.get(), f_outputs, "diff_quotient")); AbstractTensorHandlePtr diff_quotient(f_outputs[0]); TF_Tensor* grad_tensor; TF_RETURN_IF_ERROR(GetValue(diff_quotient.get(), &grad_tensor)); float grad_data[1]; memcpy(&grad_data[0], TF_TensorData(grad_tensor), TF_TensorByteSize(grad_tensor)); TF_DeleteTensor(grad_tensor); dtheta_approx[i] = grad_data[0]; } TF_RETURN_IF_ERROR(TestTensorHandleWithDims<float, TF_FLOAT>( ctx, dtheta_approx.data(), theta_dims.data(), num_dims, numerical_grad)); TF_DeleteTensor(theta_tensor); return absl::OkStatus(); } } }
#include "tensorflow/c/eager/gradient_checker.h" #include <memory> #include "absl/types/span.h" #include "tensorflow/c/eager/abstract_tensor_handle.h" #include "tensorflow/c/eager/c_api_unified_experimental.h" #include "tensorflow/c/eager/unified_api_testutil.h" #include "tensorflow/c/experimental/ops/math_ops.h" #include "tensorflow/c/tf_status_helper.h" #include "tensorflow/c/tf_tensor.h" #include "tensorflow/core/platform/tensor_float_32_utils.h" #include "tensorflow/core/platform/test.h" namespace tensorflow { namespace gradients { namespace internal { namespace { using tensorflow::TF_StatusPtr; void CompareNumericalAndManualGradients( Model model, AbstractContext* ctx, absl::Span<AbstractTensorHandle* const> inputs, int input_index, float* expected_grad, int num_grad, bool use_function, double abs_error = 1e-2) { Status s; AbstractTensorHandlePtr numerical_grad; { AbstractTensorHandle* numerical_grad_raw; s = CalcNumericalGrad(ctx, model, inputs, input_index, use_function, &numerical_grad_raw); ASSERT_EQ(errors::OK, s.code()) << s.message(); numerical_grad.reset(numerical_grad_raw); } TF_Tensor* numerical_tensor; s = GetValue(numerical_grad.get(), &numerical_tensor); ASSERT_EQ(errors::OK, s.code()) << s.message(); auto num_elem_numerical = TF_TensorElementCount(numerical_tensor); ASSERT_EQ(num_elem_numerical, num_grad); float* dnumerical = new float[num_elem_numerical]{0}; memcpy(&dnumerical[0], TF_TensorData(numerical_tensor), TF_TensorByteSize(numerical_tensor)); for (int j = 0; j < num_grad; j++) { ASSERT_NEAR(dnumerical[j], expected_grad[j], abs_error); } delete[] dnumerical; TF_DeleteTensor(numerical_tensor); } Status MatMulModel(AbstractContext* ctx, absl::Span<AbstractTensorHandle* const> inputs, absl::Span<AbstractTensorHandle*> outputs) { return ops::MatMul(ctx, inputs[0], inputs[1], &outputs[0], false, false, "MatMul"); } Status MulModel(AbstractContext* ctx, absl::Span<AbstractTensorHandle* const> inputs, absl::Span<AbstractTensorHandle*> outputs) { return ops::Mul(ctx, inputs[0], inputs[1], &outputs[0], "Mul"); } class GradientCheckerTest : public ::testing::TestWithParam<std::tuple<const char*, bool, bool>> { protected: void SetUp() override { TF_StatusPtr status(TF_NewStatus()); TF_SetTracingImplementation(std::get<0>(GetParam()), status.get()); { Status s = StatusFromTF_Status(status.get()); CHECK_EQ(errors::OK, s.code()) << s.message(); } { AbstractContext* ctx_raw = nullptr; Status s = BuildImmediateExecutionContext(std::get<1>(GetParam()), &ctx_raw); ASSERT_EQ(errors::OK, s.code()) << s.message(); ctx_.reset(ctx_raw); } enable_tensor_float_32_execution(false); } AbstractContextPtr ctx_; public: bool UseMlir() const { return strcmp(std::get<0>(GetParam()), "mlir") == 0; } bool UseFunction() const { return std::get<2>(GetParam()); } }; TEST_P(GradientCheckerTest, TestMatMul) { float A_vals[] = {1.0f, 2.0f, 3.0f, 4.0f}; int64_t A_dims[] = {2, 2}; AbstractTensorHandlePtr A; { AbstractTensorHandle* A_raw; Status s = TestTensorHandleWithDims<float, TF_FLOAT>(ctx_.get(), A_vals, A_dims, 2, &A_raw); ASSERT_EQ(errors::OK, s.code()) << s.message(); A.reset(A_raw); } float B_vals[] = {.5f, -1.0f, 1.0f, 1.0f}; int64_t B_dims[] = {2, 2}; AbstractTensorHandlePtr B; { AbstractTensorHandle* B_raw; Status s = TestTensorHandleWithDims<float, TF_FLOAT>(ctx_.get(), B_vals, B_dims, 2, &B_raw); ASSERT_EQ(errors::OK, s.code()) << s.message(); B.reset(B_raw); } float expected_dA[4] = {-.5f, 2.0f, -.5f, 2.0f}; ASSERT_NO_FATAL_FAILURE(CompareNumericalAndManualGradients( MatMulModel, ctx_.get(), {A.get(), B.get()}, 0, expected_dA, 4, UseFunction())); } TEST_P(GradientCheckerTest, TestMul) { AbstractTensorHandlePtr x; { AbstractTensorHandle* x_raw = nullptr; Status s = TestScalarTensorHandle<float, TF_FLOAT>(ctx_.get(), 2.0f, &x_raw); ASSERT_EQ(errors::OK, s.code()) << s.message(); x.reset(x_raw); } AbstractTensorHandlePtr y; { AbstractTensorHandle* y_raw = nullptr; Status s = TestScalarTensorHandle<float, TF_FLOAT>(ctx_.get(), 7.0f, &y_raw); ASSERT_EQ(errors::OK, s.code()) << s.message(); y.reset(y_raw); } float expected_dx[1] = {7.0f}; ASSERT_NO_FATAL_FAILURE(CompareNumericalAndManualGradients( MulModel, ctx_.get(), {x.get(), y.get()}, 0, expected_dx, 1, UseFunction())); } #ifdef PLATFORM_GOOGLE INSTANTIATE_TEST_SUITE_P( UnifiedCAPI, GradientCheckerTest, ::testing::Combine(::testing::Values("graphdef"), ::testing::Values(false), ::testing::Values(true, false))); #else INSTANTIATE_TEST_SUITE_P( UnifiedCAPI, GradientCheckerTest, ::testing::Combine(::testing::Values("graphdef"), ::testing::Values(false), ::testing::Values(true, false))); #endif } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/eager/gradient_checker.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/eager/gradient_checker_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
cfb250fc-4603-4bcf-a95d-0b15964ff56f
cpp
google/googletest
gmock
googlemock/src/gmock.cc
googlemock/test/gmock_test.cc
#include "gmock/gmock.h" #include <string> #include "gmock/internal/gmock-port.h" GMOCK_DEFINE_bool_(catch_leaked_mocks, true, "true if and only if Google Mock should report leaked " "mock objects as failures."); GMOCK_DEFINE_string_(verbose, testing::internal::kWarningVerbosity, "Controls how verbose Google Mock's output is." " Valid values:\n" " info - prints all messages.\n" " warning - prints warnings and errors.\n" " error - prints errors only."); GMOCK_DEFINE_int32_(default_mock_behavior, 1, "Controls the default behavior of mocks." " Valid values:\n" " 0 - by default, mocks act as NiceMocks.\n" " 1 - by default, mocks act as NaggyMocks.\n" " 2 - by default, mocks act as StrictMocks."); namespace testing { namespace internal { static const char* ParseGoogleMockFlagValue(const char* str, const char* flag_name, bool def_optional) { if (str == nullptr || flag_name == nullptr) return nullptr; const std::string flag_name_str = std::string("--gmock_") + flag_name; const size_t flag_name_len = flag_name_str.length(); if (strncmp(str, flag_name_str.c_str(), flag_name_len) != 0) return nullptr; const char* flag_end = str + flag_name_len; if (def_optional && (flag_end[0] == '\0')) { return flag_end; } if (flag_end[0] != '=') return nullptr; return flag_end + 1; } static bool ParseGoogleMockFlag(const char* str, const char* flag_name, bool* value) { const char* const value_str = ParseGoogleMockFlagValue(str, flag_name, true); if (value_str == nullptr) return false; *value = !(*value_str == '0' || *value_str == 'f' || *value_str == 'F'); return true; } template <typename String> static bool ParseGoogleMockFlag(const char* str, const char* flag_name, String* value) { const char* const value_str = ParseGoogleMockFlagValue(str, flag_name, false); if (value_str == nullptr) return false; *value = value_str; return true; } static bool ParseGoogleMockFlag(const char* str, const char* flag_name, int32_t* value) { const char* const value_str = ParseGoogleMockFlagValue(str, flag_name, true); if (value_str == nullptr) return false; return ParseInt32(Message() << "The value of flag --" << flag_name, value_str, value); } template <typename CharType> void InitGoogleMockImpl(int* argc, CharType** argv) { InitGoogleTest(argc, argv); if (*argc <= 0) return; for (int i = 1; i != *argc; i++) { const std::string arg_string = StreamableToString(argv[i]); const char* const arg = arg_string.c_str(); bool found_gmock_flag = false; #define GMOCK_INTERNAL_PARSE_FLAG(flag_name) \ if (!found_gmock_flag) { \ auto value = GMOCK_FLAG_GET(flag_name); \ if (ParseGoogleMockFlag(arg, #flag_name, &value)) { \ GMOCK_FLAG_SET(flag_name, value); \ found_gmock_flag = true; \ } \ } GMOCK_INTERNAL_PARSE_FLAG(catch_leaked_mocks) GMOCK_INTERNAL_PARSE_FLAG(verbose) GMOCK_INTERNAL_PARSE_FLAG(default_mock_behavior) if (found_gmock_flag) { for (int j = i; j != *argc; j++) { argv[j] = argv[j + 1]; } (*argc)--; i--; } } } } GTEST_API_ void InitGoogleMock(int* argc, char** argv) { internal::InitGoogleMockImpl(argc, argv); } GTEST_API_ void InitGoogleMock(int* argc, wchar_t** argv) { internal::InitGoogleMockImpl(argc, argv); } GTEST_API_ void InitGoogleMock() { int argc = 1; const auto arg0 = "dummy"; char* argv0 = const_cast<char*>(arg0); char** argv = &argv0; internal::InitGoogleMockImpl(&argc, argv); } }
#include "gmock/gmock.h" #include <string> #include "gtest/gtest.h" #include "gtest/internal/custom/gtest.h" #if !defined(GTEST_CUSTOM_INIT_GOOGLE_TEST_FUNCTION_) using testing::InitGoogleMock; template <typename Char, int M, int N> void TestInitGoogleMock(const Char* (&argv)[M], const Char* (&new_argv)[N], const ::std::string& expected_gmock_verbose) { const ::std::string old_verbose = GMOCK_FLAG_GET(verbose); int argc = M - 1; InitGoogleMock(&argc, const_cast<Char**>(argv)); ASSERT_EQ(N - 1, argc) << "The new argv has wrong number of elements."; for (int i = 0; i < N; i++) { EXPECT_STREQ(new_argv[i], argv[i]); } EXPECT_EQ(expected_gmock_verbose, GMOCK_FLAG_GET(verbose)); GMOCK_FLAG_SET(verbose, old_verbose); } TEST(InitGoogleMockTest, ParsesInvalidCommandLine) { const char* argv[] = {nullptr}; const char* new_argv[] = {nullptr}; TestInitGoogleMock(argv, new_argv, GMOCK_FLAG_GET(verbose)); } TEST(InitGoogleMockTest, ParsesEmptyCommandLine) { const char* argv[] = {"foo.exe", nullptr}; const char* new_argv[] = {"foo.exe", nullptr}; TestInitGoogleMock(argv, new_argv, GMOCK_FLAG_GET(verbose)); } TEST(InitGoogleMockTest, ParsesSingleFlag) { const char* argv[] = {"foo.exe", "--gmock_verbose=info", nullptr}; const char* new_argv[] = {"foo.exe", nullptr}; TestInitGoogleMock(argv, new_argv, "info"); } TEST(InitGoogleMockTest, ParsesMultipleFlags) { int old_default_behavior = GMOCK_FLAG_GET(default_mock_behavior); const wchar_t* argv[] = {L"foo.exe", L"--gmock_verbose=info", L"--gmock_default_mock_behavior=2", nullptr}; const wchar_t* new_argv[] = {L"foo.exe", nullptr}; TestInitGoogleMock(argv, new_argv, "info"); EXPECT_EQ(2, GMOCK_FLAG_GET(default_mock_behavior)); EXPECT_NE(2, old_default_behavior); GMOCK_FLAG_SET(default_mock_behavior, old_default_behavior); } TEST(InitGoogleMockTest, ParsesUnrecognizedFlag) { const char* argv[] = {"foo.exe", "--non_gmock_flag=blah", nullptr}; const char* new_argv[] = {"foo.exe", "--non_gmock_flag=blah", nullptr}; TestInitGoogleMock(argv, new_argv, GMOCK_FLAG_GET(verbose)); } TEST(InitGoogleMockTest, ParsesGoogleMockFlagAndUnrecognizedFlag) { const char* argv[] = {"foo.exe", "--non_gmock_flag=blah", "--gmock_verbose=error", nullptr}; const char* new_argv[] = {"foo.exe", "--non_gmock_flag=blah", nullptr}; TestInitGoogleMock(argv, new_argv, "error"); } TEST(WideInitGoogleMockTest, ParsesInvalidCommandLine) { const wchar_t* argv[] = {nullptr}; const wchar_t* new_argv[] = {nullptr}; TestInitGoogleMock(argv, new_argv, GMOCK_FLAG_GET(verbose)); } TEST(WideInitGoogleMockTest, ParsesEmptyCommandLine) { const wchar_t* argv[] = {L"foo.exe", nullptr}; const wchar_t* new_argv[] = {L"foo.exe", nullptr}; TestInitGoogleMock(argv, new_argv, GMOCK_FLAG_GET(verbose)); } TEST(WideInitGoogleMockTest, ParsesSingleFlag) { const wchar_t* argv[] = {L"foo.exe", L"--gmock_verbose=info", nullptr}; const wchar_t* new_argv[] = {L"foo.exe", nullptr}; TestInitGoogleMock(argv, new_argv, "info"); } TEST(WideInitGoogleMockTest, ParsesMultipleFlags) { int old_default_behavior = GMOCK_FLAG_GET(default_mock_behavior); const wchar_t* argv[] = {L"foo.exe", L"--gmock_verbose=info", L"--gmock_default_mock_behavior=2", nullptr}; const wchar_t* new_argv[] = {L"foo.exe", nullptr}; TestInitGoogleMock(argv, new_argv, "info"); EXPECT_EQ(2, GMOCK_FLAG_GET(default_mock_behavior)); EXPECT_NE(2, old_default_behavior); GMOCK_FLAG_SET(default_mock_behavior, old_default_behavior); } TEST(WideInitGoogleMockTest, ParsesUnrecognizedFlag) { const wchar_t* argv[] = {L"foo.exe", L"--non_gmock_flag=blah", nullptr}; const wchar_t* new_argv[] = {L"foo.exe", L"--non_gmock_flag=blah", nullptr}; TestInitGoogleMock(argv, new_argv, GMOCK_FLAG_GET(verbose)); } TEST(WideInitGoogleMockTest, ParsesGoogleMockFlagAndUnrecognizedFlag) { const wchar_t* argv[] = {L"foo.exe", L"--non_gmock_flag=blah", L"--gmock_verbose=error", nullptr}; const wchar_t* new_argv[] = {L"foo.exe", L"--non_gmock_flag=blah", nullptr}; TestInitGoogleMock(argv, new_argv, "error"); } #endif TEST(FlagTest, IsAccessibleInCode) { bool dummy = GMOCK_FLAG_GET(catch_leaked_mocks) && GMOCK_FLAG_GET(verbose).empty(); (void)dummy; }
https://github.com/google/googletest/blob/a1e255a582377e1006bb88a408ac3f933ba7c916/googlemock/src/gmock.cc
https://github.com/google/googletest/blob/a1e255a582377e1006bb88a408ac3f933ba7c916/googlemock/test/gmock_test.cc
a1e255a582377e1006bb88a408ac3f933ba7c916
c98917b6-8532-4f36-95e6-51113e57d6e4
cpp
tensorflow/tensorflow
grappler
tensorflow/c/experimental/grappler/grappler.cc
tensorflow/c/experimental/grappler/grappler_test.cc
#include "tensorflow/c/experimental/grappler/grappler.h" #include <algorithm> #include <cstddef> #include <cstring> #include <string> #include <unordered_map> #include <unordered_set> #include <vector> #include "absl/status/status.h" #include "tensorflow/c/c_api_macros.h" #include "tensorflow/c/experimental/grappler/grappler_internal.h" #include "tensorflow/c/tf_buffer.h" #include "tensorflow/c/tf_buffer_internal.h" #include "tensorflow/c/tf_status.h" #include "tensorflow/c/tf_status_helper.h" #include "tensorflow/core/framework/function.h" #include "tensorflow/core/framework/op.h" #include "tensorflow/core/framework/op_def.pb.h" #include "tensorflow/core/grappler/costs/graph_properties.h" #include "tensorflow/core/grappler/costs/op_performance_data.pb.h" #include "tensorflow/core/grappler/grappler_item.h" #include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/status.h" #include "tensorflow/core/protobuf/rewriter_config.pb.h" #include "tsl/platform/env.h" #include "tsl/platform/errors.h" namespace { #define VALIDATE_STRUCT_SIZE(STRUCT_NAME, STRUCT_OBJ, SIZE_VALUE_NAME) \ do { \ if (STRUCT_OBJ.struct_size == 0) { \ return absl::Status(absl::StatusCode::kFailedPrecondition, \ "struct_size field in " #STRUCT_NAME \ " must be set to " #SIZE_VALUE_NAME "."); \ } \ } while (0) #define VALIDATE_MEMBER(STRUCT_NAME, STRUCT_OBJ, NAME) \ do { \ if (STRUCT_OBJ.NAME == 0) { \ return absl::Status(absl::StatusCode::kFailedPrecondition, \ "'" #NAME "' field in " #STRUCT_NAME \ " must be set."); \ } \ } while (0) absl::Status ValidateTPOptimizerRegistrationParams( const TP_OptimizerRegistrationParams& params) { VALIDATE_STRUCT_SIZE(TP_OptimizerRegistrationParams, params, TP_OPTIMIZER_REGISTRATION_PARAMS_STRUCT_SIZE); VALIDATE_MEMBER(TP_OptimizerRegistrationParams, params, device_type); return absl::OkStatus(); } absl::Status ValidateTPOptimizer(const TP_Optimizer& optimizer) { VALIDATE_STRUCT_SIZE(TP_Optimizer, optimizer, TP_OPTIMIZER_STRUCT_SIZE); VALIDATE_MEMBER(TP_Optimizer, optimizer, optimize_func); return absl::OkStatus(); } absl::Status ValidateTPOptimizerConfigs(const TP_OptimizerConfigs& configs) { VALIDATE_STRUCT_SIZE(TP_OptimizerConfigs, configs, TP_OPTIMIZER_CONFIGS_STRUCT_SIZE); return absl::OkStatus(); } #undef VALIDATE_MEMBER #undef VALIDATE_STRUCT_SIZE } namespace tensorflow { namespace grappler { Status CGraphOptimizer::Optimize(Cluster* cluster, const GrapplerItem& item, GraphDef* optimized_graph_def) { OwnedTFStatus c_status(TF_NewStatus()); OwnedTFBuffer graph_buf(TF_NewBuffer()); OwnedTFBuffer optimized_graph_buf(TF_NewBuffer()); TF_RETURN_IF_ERROR(MessageToBuffer(item.graph, graph_buf.get())); optimizer_.optimize_func(c_optimizer_, graph_buf.get(), reinterpret_cast<const TF_GrapplerItem*>(&item), optimized_graph_buf.get(), c_status.get()); TF_RETURN_IF_ERROR(tsl::StatusFromTF_Status(c_status.get())); TF_RETURN_IF_ERROR( BufferToMessage(optimized_graph_buf.get(), optimized_graph_def)); return absl::OkStatus(); } #define CONFIG_TOGGLE(optimizer) \ if (tp_configs.optimizer == TF_TriState_Off) \ configs.toggle_config[#optimizer] = RewriterConfig::OFF; \ else \ configs.toggle_config[#optimizer] = RewriterConfig::ON; void CGraphOptimizerRegister( const PluginGraphOptimizerRegistry::Creator& creator, const TP_OptimizerConfigs tp_configs, const char* device_type) { ConfigList configs; if (tp_configs.disable_model_pruning == TF_TriState_On) configs.disable_model_pruning = true; else configs.disable_model_pruning = false; CONFIG_TOGGLE(implementation_selector); CONFIG_TOGGLE(function_optimization); CONFIG_TOGGLE(common_subgraph_elimination); CONFIG_TOGGLE(arithmetic_optimization); CONFIG_TOGGLE(debug_stripper); CONFIG_TOGGLE(constant_folding); CONFIG_TOGGLE(shape_optimization); CONFIG_TOGGLE(auto_mixed_precision); CONFIG_TOGGLE(auto_mixed_precision_onednn_bfloat16); CONFIG_TOGGLE(auto_mixed_precision_mkl); CONFIG_TOGGLE(pin_to_host_optimization); CONFIG_TOGGLE(layout_optimizer); CONFIG_TOGGLE(remapping); CONFIG_TOGGLE(loop_optimization); CONFIG_TOGGLE(dependency_optimization); CONFIG_TOGGLE(auto_parallel); CONFIG_TOGGLE(memory_optimization); CONFIG_TOGGLE(scoped_allocator_optimization); PluginGraphOptimizerRegistry::RegisterPluginOptimizerOrDie( creator, device_type, configs); } #undef CONFIG_TOGGLE absl::Status InitGraphPlugin(void* dso_handle) { tsl::Env* env = tsl::Env::Default(); void* dso_symbol; TF_RETURN_IF_ERROR( env->GetSymbolFromLibrary(dso_handle, "TF_InitGraph", &dso_symbol)); auto init_fn = reinterpret_cast<TFInitGraphPluginFn>(dso_symbol); return InitGraphPlugin(init_fn); } absl::Status InitGraphPlugin(TFInitGraphPluginFn init_fn) { TP_OptimizerRegistrationParams params{ TP_OPTIMIZER_REGISTRATION_PARAMS_STRUCT_SIZE}; TP_Optimizer optimizer{TP_OPTIMIZER_STRUCT_SIZE}; TP_OptimizerConfigs optimizer_configs{TP_OPTIMIZER_CONFIGS_STRUCT_SIZE}; params.major_version = GO_MAJOR; params.minor_version = GO_MINOR; params.patch_version = GO_PATCH; params.optimizer = &optimizer; params.optimizer_configs = &optimizer_configs; OwnedTFStatus c_status(TF_NewStatus()); init_fn(&params, c_status.get()); TF_RETURN_IF_ERROR(tsl::StatusFromTF_Status(c_status.get())); TF_RETURN_IF_ERROR(ValidateTPOptimizerRegistrationParams(params)); TF_RETURN_IF_ERROR(ValidateTPOptimizer(optimizer)); TF_RETURN_IF_ERROR(ValidateTPOptimizerConfigs(optimizer_configs)); CGraphOptimizerRegister( [=]() { return new CGraphOptimizer(optimizer, params.device_type); }, optimizer_configs, params.device_type); return absl::OkStatus(); } } } void TF_GetNodesToPreserveListSize(const TF_GrapplerItem* item, int* num_values, size_t* storage_size, TF_Status* status) { TF_SetStatus(status, TF_OK, ""); const std::unordered_set<std::string>& nodes = reinterpret_cast<const tensorflow::grappler::GrapplerItem*>(item) ->NodesToPreserve(); *num_values = nodes.size(); *storage_size = 0; for (const std::string& str : nodes) { *storage_size += str.size(); } } void TF_GetNodesToPreserveList(const TF_GrapplerItem* item, char** values, size_t* lengths, int num_values, void* storage, size_t storage_size, TF_Status* status) { TF_SetStatus(status, TF_OK, ""); const std::unordered_set<std::string>& nodes = reinterpret_cast<const tensorflow::grappler::GrapplerItem*>(item) ->NodesToPreserve(); char* p = static_cast<char*>(storage); int index = 0; for (const std::string& s : nodes) { if (index >= num_values) break; values[index] = p; lengths[index] = s.size(); if ((p + s.size()) > (static_cast<char*>(storage) + storage_size)) { tsl::Set_TF_Status_from_Status( status, absl::InvalidArgumentError( "Not enough storage to hold the requested list of nodes")); return; } memcpy(values[index], s.data(), s.size()); p += s.size(); index++; } } void TF_GetFetchNodesListSize(const TF_GrapplerItem* item, int* num_values, size_t* storage_size, TF_Status* status) { TF_SetStatus(status, TF_OK, ""); const std::vector<std::string>& nodes = reinterpret_cast<const tensorflow::grappler::GrapplerItem*>(item)->fetch; *num_values = nodes.size(); *storage_size = 0; for (const std::string& str : nodes) { *storage_size += str.size(); } } void TF_GetFetchNodesList(const TF_GrapplerItem* item, char** values, size_t* lengths, int num_values, void* storage, size_t storage_size, TF_Status* status) { TF_SetStatus(status, TF_OK, ""); const std::vector<std::string>& nodes = reinterpret_cast<const tensorflow::grappler::GrapplerItem*>(item)->fetch; const int len = std::min(num_values, static_cast<int>(nodes.size())); char* p = static_cast<char*>(storage); for (int index = 0; index < len; ++index) { const std::string& s = nodes[index]; values[index] = p; lengths[index] = s.size(); if ((p + s.size()) > (static_cast<char*>(storage) + storage_size)) { tsl::Set_TF_Status_from_Status( status, absl::InvalidArgumentError( "Not enough storage to hold the requested list of nodes")); return; } memcpy(values[index], s.data(), s.size()); p += s.size(); } } TF_GraphProperties* TF_NewGraphProperties(const TF_GrapplerItem* item) { return reinterpret_cast<TF_GraphProperties*>( new tensorflow::grappler::GraphProperties( *reinterpret_cast<const tensorflow::grappler::GrapplerItem*>(item))); } void TF_DeleteGraphProperties(TF_GraphProperties* graph_properties) { if (graph_properties == nullptr) return; delete reinterpret_cast<tensorflow::grappler::GraphProperties*>( graph_properties); } void TF_InferStatically(TF_GraphProperties* graph_properties, TF_Bool assume_valid_feeds, TF_Bool aggressive_shape_inference, TF_Bool include_input_tensor_values, TF_Bool include_output_tensor_values, TF_Status* status) { TF_SetStatus(status, TF_OK, ""); absl::Status s = reinterpret_cast<tensorflow::grappler::GraphProperties*>(graph_properties) ->InferStatically(assume_valid_feeds, aggressive_shape_inference, include_input_tensor_values, include_output_tensor_values); if (!s.ok()) { tsl::Set_TF_Status_from_Status(status, s); } } void TF_GetInputPropertiesListSize(TF_GraphProperties* graph_properties, const char* name, int* num_values, TF_Status* status) { TF_SetStatus(status, TF_OK, ""); *num_values = reinterpret_cast<tensorflow::grappler::GraphProperties*>(graph_properties) ->GetInputProperties(name) .size(); } void TF_GetOutputPropertiesListSize(TF_GraphProperties* graph_properties, const char* name, int* num_values, TF_Status* status) { TF_SetStatus(status, TF_OK, ""); *num_values = reinterpret_cast<tensorflow::grappler::GraphProperties*>(graph_properties) ->GetOutputProperties(name) .size(); } void TF_GetInputPropertiesList(TF_GraphProperties* graph_properties, const char* name, TF_Buffer** properties, int num_values, TF_Status* status) { TF_SetStatus(status, TF_OK, ""); const std::vector<tensorflow::OpInfo::TensorProperties>& tensor_properties = reinterpret_cast<tensorflow::grappler::GraphProperties*>(graph_properties) ->GetInputProperties(name); const int len = std::min(num_values, static_cast<int>(tensor_properties.size())); for (int i = 0; i < len; ++i) { absl::Status s = tensorflow::MessageToBuffer(tensor_properties[i], properties[i]); if (!s.ok()) { tsl::Set_TF_Status_from_Status(status, s); return; } } } void TF_GetOutputPropertiesList(TF_GraphProperties* graph_properties, const char* name, TF_Buffer** properties, int num_values, TF_Status* status) { TF_SetStatus(status, TF_OK, ""); const std::vector<tensorflow::OpInfo::TensorProperties>& tensor_properties = reinterpret_cast<tensorflow::grappler::GraphProperties*>(graph_properties) ->GetOutputProperties(name); const int len = std::min(num_values, static_cast<int>(tensor_properties.size())); for (int i = 0; i < len; ++i) { absl::Status s = tensorflow::MessageToBuffer(tensor_properties[i], properties[i]); if (!s.ok()) { tsl::Set_TF_Status_from_Status(status, s); return; } } } TF_FunctionLibraryDefinition* TF_NewFunctionLibraryDefinition( const TF_Buffer* graph_buf, TF_Status* status) { TF_SetStatus(status, TF_OK, ""); tensorflow::GraphDef graph_def; absl::Status s = tensorflow::BufferToMessage(graph_buf, &graph_def); if (!s.ok()) { tsl::Set_TF_Status_from_Status(status, s); return nullptr; } return reinterpret_cast<TF_FunctionLibraryDefinition*>( new tensorflow::FunctionLibraryDefinition( tensorflow::OpRegistry::Global(), graph_def.library())); } void TF_DeleteFunctionLibraryDefinition(TF_FunctionLibraryDefinition* fn_lib) { if (fn_lib == nullptr) return; delete reinterpret_cast<tensorflow::FunctionLibraryDefinition*>(fn_lib); } void TF_LookUpOpDef(TF_FunctionLibraryDefinition* fn_lib, const char* name, TF_Buffer* buf, TF_Status* status) { TF_SetStatus(status, TF_OK, ""); const tensorflow::OpDef* op_def_ptr = nullptr; absl::Status s = reinterpret_cast<tensorflow::FunctionLibraryDefinition*>(fn_lib) ->LookUpOpDef(name, &op_def_ptr); if (!s.ok()) { tsl::Set_TF_Status_from_Status(status, s); return; } s = tensorflow::MessageToBuffer(*op_def_ptr, buf); if (!s.ok()) { tsl::Set_TF_Status_from_Status(status, s); return; } }
#include "tensorflow/c/experimental/grappler/grappler.h" #include "absl/log/check.h" #include "tensorflow/c/experimental/grappler/grappler_internal.h" #include "tensorflow/c/tf_buffer.h" #include "tensorflow/c/tf_buffer_internal.h" #include "tensorflow/c/tf_status.h" #include "xla/tsl/lib/core/status_test_util.h" #include "tensorflow/core/framework/function.h" #include "tensorflow/core/framework/node_def.pb.h" #include "tensorflow/core/framework/op.h" #include "tensorflow/core/framework/op_def.pb.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/grappler/clusters/single_machine.h" #include "tensorflow/core/grappler/costs/op_performance_data.pb.h" #include "tensorflow/core/grappler/grappler_item.h" #include "tensorflow/core/grappler/inputs/trivial_test_graph_input_yielder.h" #include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h" #include "tensorflow/core/platform/status.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/types.h" #include "tensorflow/core/protobuf/rewriter_config.pb.h" #include "tsl/protobuf/error_codes.pb.h" namespace tensorflow { namespace grappler { namespace { void optimize_func(void* optimizer, const TF_Buffer* graph_buf, const TF_GrapplerItem* item, TF_Buffer* optimized_graph_buf, TF_Status* tf_status) {} void PopulateDefaultParam(TP_OptimizerRegistrationParams* params) { params->struct_size = TP_OPTIMIZER_REGISTRATION_PARAMS_STRUCT_SIZE; params->optimizer_configs->struct_size = TP_OPTIMIZER_CONFIGS_STRUCT_SIZE; params->optimizer->struct_size = TP_OPTIMIZER_STRUCT_SIZE; params->optimizer->create_func = nullptr; params->optimizer->optimize_func = optimize_func; params->optimizer->destroy_func = nullptr; } TEST(Grappler, SuccessfulRegistration) { auto plugin_init = [](TP_OptimizerRegistrationParams* const params, TF_Status* const status) -> void { TF_SetStatus(status, TF_OK, ""); PopulateDefaultParam(params); params->device_type = "Success"; params->optimizer_configs->remapping = TF_TriState_Off; }; TF_ASSERT_OK(InitGraphPlugin(plugin_init)); ASSERT_EQ(PluginGraphOptimizerRegistry::CreateOptimizers( std::set<string>{"Success"}) .size(), 1); ConfigList config = PluginGraphOptimizerRegistry::GetPluginConfigs( true, std::set<string>{"Success"}); ASSERT_EQ(config.toggle_config["remapping"], RewriterConfig::OFF); } TEST(Grappler, MultiplePluginRegistration) { auto plugin_init_0 = [](TP_OptimizerRegistrationParams* const params, TF_Status* const status) -> void { TF_SetStatus(status, TF_OK, ""); PopulateDefaultParam(params); params->device_type = "Device0"; }; auto plugin_init_1 = [](TP_OptimizerRegistrationParams* const params, TF_Status* const status) -> void { TF_SetStatus(status, TF_OK, ""); PopulateDefaultParam(params); params->device_type = "Device1"; }; TF_ASSERT_OK(InitGraphPlugin(plugin_init_0)); TF_ASSERT_OK(InitGraphPlugin(plugin_init_1)); ASSERT_EQ(PluginGraphOptimizerRegistry::CreateOptimizers( std::set<string>{"Device0", "Device1"}) .size(), 2); } TEST(Grappler, DeviceTypeNotSet) { auto plugin_init = [](TP_OptimizerRegistrationParams* const params, TF_Status* const status) -> void { TF_SetStatus(status, TF_OK, ""); PopulateDefaultParam(params); params->device_type = nullptr; }; tensorflow::Status status = InitGraphPlugin(plugin_init); ASSERT_EQ(status.code(), tensorflow::error::FAILED_PRECONDITION); ASSERT_EQ( status.message(), "'device_type' field in TP_OptimizerRegistrationParams must be set."); } TEST(Grappler, OptimizeFuncNotSet) { auto plugin_init = [](TP_OptimizerRegistrationParams* const params, TF_Status* const status) -> void { TF_SetStatus(status, TF_OK, ""); PopulateDefaultParam(params); params->device_type = "FuncNotSet"; params->optimizer->optimize_func = nullptr; }; tensorflow::Status status = InitGraphPlugin(plugin_init); ASSERT_EQ(status.code(), tensorflow::error::FAILED_PRECONDITION); ASSERT_EQ(status.message(), "'optimize_func' field in TP_Optimizer must be set."); } TEST(TF_GrapplerItem, NodesToPreserve) { GrapplerItem item; item.fetch = std::vector<string>{"Conv", "BiasAdd"}; std::unordered_set<string> nodes_preserved = item.NodesToPreserve(); TF_GrapplerItem* c_item = reinterpret_cast<TF_GrapplerItem*>(&item); int list_total_size = 0; for (const string& s : nodes_preserved) { list_total_size += s.size(); } size_t storage_size = 0; int num_values = 0; TF_Status* status = TF_NewStatus(); TF_GetNodesToPreserveListSize(c_item, &num_values, &storage_size, status); EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status); EXPECT_EQ(nodes_preserved.size(), num_values); EXPECT_EQ(list_total_size, storage_size); std::unique_ptr<char*[]> values(new char*[nodes_preserved.size()]); std::unique_ptr<size_t[]> lens(new size_t[nodes_preserved.size()]); std::unique_ptr<char[]> storage(new char[storage_size]); TF_GetNodesToPreserveList(c_item, values.get(), lens.get(), nodes_preserved.size(), storage.get(), storage_size, status); EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status); for (size_t i = 0; i < nodes_preserved.size(); ++i) { EXPECT_EQ(nodes_preserved.find(string(static_cast<const char*>(values[i]), lens[i])) != nodes_preserved.end(), true); } TF_DeleteStatus(status); } TEST(TF_GrapplerItem, FetchNodes) { GrapplerItem item; item.fetch = std::vector<string>{"Conv", "BiasAdd"}; TF_GrapplerItem* c_item = reinterpret_cast<TF_GrapplerItem*>(&item); int list_total_size = 0; for (const string& s : item.fetch) { list_total_size += s.size(); } size_t storage_size = 0; int num_values = 0; TF_Status* status = TF_NewStatus(); TF_GetFetchNodesListSize(c_item, &num_values, &storage_size, status); EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status); EXPECT_EQ(item.fetch.size(), num_values); EXPECT_EQ(list_total_size, storage_size); std::unique_ptr<char*[]> values(new char*[item.fetch.size()]); std::unique_ptr<size_t[]> lens(new size_t[item.fetch.size()]); std::unique_ptr<char[]> storage(new char[storage_size]); TF_GetFetchNodesList(c_item, values.get(), lens.get(), item.fetch.size(), storage.get(), storage_size, status); EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status); for (size_t i = 0; i < item.fetch.size(); ++i) { EXPECT_EQ(item.fetch[i].size(), lens[i]) << i; EXPECT_EQ(item.fetch[i], string(static_cast<const char*>(values[i]), lens[i])) << i; } TF_DeleteStatus(status); } TEST(TF_GraphProperties, InputProperties) { std::unique_ptr<SingleMachine> cluster(new SingleMachine(5 * 60, 3, 0)); TF_ASSERT_OK(cluster->Provision()); TrivialTestGraphInputYielder fake_input(4, 1, 10, false, cluster->GetDeviceNames()); GrapplerItem item; CHECK(fake_input.NextItem(&item)); TF_Status* status = TF_NewStatus(); TF_GraphProperties* graph_properties = TF_NewGraphProperties(reinterpret_cast<TF_GrapplerItem*>(&item)); TF_InferStatically(graph_properties, true, false, false, false, status); EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status); for (const NodeDef& node : item.graph.node()) { if (node.op() == "AddN") { int num_values = 0; TF_GetInputPropertiesListSize(graph_properties, node.name().c_str(), &num_values, status); EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status); EXPECT_EQ(num_values, 1); std::vector<TF_Buffer*> in_props_buf(num_values, TF_NewBuffer()); TF_GetInputPropertiesList(graph_properties, node.name().c_str(), in_props_buf.data(), num_values, status); EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status); tensorflow::OpInfo::TensorProperties in_props; Status s = tensorflow::BufferToMessage(in_props_buf[0], &in_props); TF_ASSERT_OK(s); EXPECT_EQ(DT_FLOAT, in_props.dtype()); EXPECT_FALSE(in_props.shape().unknown_rank()); EXPECT_EQ(2, in_props.shape().dim_size()); EXPECT_EQ(10, in_props.shape().dim(0).size()); EXPECT_EQ(1, in_props.shape().dim(1).size()); for (int i = 0; i < in_props_buf.size(); i++) TF_DeleteBuffer(in_props_buf[i]); } } TF_DeleteGraphProperties(graph_properties); TF_DeleteStatus(status); TF_ASSERT_OK(cluster->Shutdown()); } TEST(TF_GraphProperties, OutputProperties) { std::unique_ptr<SingleMachine> cluster(new SingleMachine(5 * 60, 3, 0)); TF_ASSERT_OK(cluster->Provision()); TrivialTestGraphInputYielder fake_input(4, 1, 10, false, cluster->GetDeviceNames()); GrapplerItem item; CHECK(fake_input.NextItem(&item)); TF_Status* status = TF_NewStatus(); TF_GraphProperties* graph_properties = TF_NewGraphProperties(reinterpret_cast<TF_GrapplerItem*>(&item)); TF_InferStatically(graph_properties, true, false, false, false, status); EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status); for (const NodeDef& node : item.graph.node()) { if (node.op() == "AddN") { int num_values = 0; TF_GetOutputPropertiesListSize(graph_properties, node.name().c_str(), &num_values, status); EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status); EXPECT_EQ(num_values, 1); std::vector<TF_Buffer*> out_props_buf(num_values, TF_NewBuffer()); TF_GetOutputPropertiesList(graph_properties, node.name().c_str(), out_props_buf.data(), num_values, status); EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status); tensorflow::OpInfo::TensorProperties out_props; Status s = tensorflow::BufferToMessage(out_props_buf[0], &out_props); TF_ASSERT_OK(s); EXPECT_EQ(DT_FLOAT, out_props.dtype()); EXPECT_FALSE(out_props.shape().unknown_rank()); EXPECT_EQ(2, out_props.shape().dim_size()); EXPECT_EQ(10, out_props.shape().dim(0).size()); EXPECT_EQ(1, out_props.shape().dim(1).size()); for (int i = 0; i < out_props_buf.size(); i++) TF_DeleteBuffer(out_props_buf[i]); } } TF_DeleteStatus(status); TF_DeleteGraphProperties(graph_properties); TF_ASSERT_OK(cluster->Shutdown()); } TEST(TF_FunctionLibraryDefinition, LookUpOpDef) { TF_Buffer* g_buf = TF_NewBuffer(); TF_Buffer* op_buf = TF_NewBuffer(); TF_Status* status = TF_NewStatus(); GraphDef g_def; Status s = MessageToBuffer(g_def, g_buf); TF_ASSERT_OK(s); TF_FunctionLibraryDefinition* func = TF_NewFunctionLibraryDefinition(g_buf, status); TF_LookUpOpDef(func, "Add", op_buf, status); string actual_string(reinterpret_cast<const char*>(op_buf->data), op_buf->length); ASSERT_EQ(TF_OK, TF_GetCode(status)); const OpDef* expected_op_def; TF_ASSERT_OK(OpRegistry::Global()->LookUpOpDef("Add", &expected_op_def)); string expected_serialized; expected_op_def->SerializeToString(&expected_serialized); EXPECT_EQ(expected_serialized, actual_string); TF_DeleteBuffer(g_buf); TF_DeleteBuffer(op_buf); TF_DeleteStatus(status); TF_DeleteFunctionLibraryDefinition(func); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/experimental/grappler/grappler.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/experimental/grappler/grappler_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
100048c8-17af-47ac-9db1-6fb5b3094ca7
cpp
google/cel-cpp
decl
common/decl.cc
common/decl_test.cc
#include "common/decl.h" #include <cstddef> #include <string> #include <utility> #include <vector> #include "absl/container/flat_hash_set.h" #include "absl/log/absl_check.h" #include "absl/status/status.h" #include "absl/strings/str_cat.h" #include "common/type.h" #include "common/type_kind.h" namespace cel { namespace common_internal { bool TypeIsAssignable(const Type& to, const Type& from) { if (to == from) { return true; } const auto to_kind = to.kind(); if (to_kind == TypeKind::kDyn) { return true; } switch (to_kind) { case TypeKind::kBoolWrapper: return TypeIsAssignable(NullType{}, from) || TypeIsAssignable(BoolType{}, from); case TypeKind::kIntWrapper: return TypeIsAssignable(NullType{}, from) || TypeIsAssignable(IntType{}, from); case TypeKind::kUintWrapper: return TypeIsAssignable(NullType{}, from) || TypeIsAssignable(UintType{}, from); case TypeKind::kDoubleWrapper: return TypeIsAssignable(NullType{}, from) || TypeIsAssignable(DoubleType{}, from); case TypeKind::kBytesWrapper: return TypeIsAssignable(NullType{}, from) || TypeIsAssignable(BytesType{}, from); case TypeKind::kStringWrapper: return TypeIsAssignable(NullType{}, from) || TypeIsAssignable(StringType{}, from); default: break; } const auto from_kind = from.kind(); if (to_kind != from_kind || to.name() != from.name()) { return false; } auto to_params = to.GetParameters(); auto from_params = from.GetParameters(); const auto params_size = to_params.size(); if (params_size != from_params.size()) { return false; } for (size_t i = 0; i < params_size; ++i) { if (!TypeIsAssignable(to_params[i], from_params[i])) { return false; } } return true; } } namespace { bool SignaturesOverlap(const OverloadDecl& lhs, const OverloadDecl& rhs) { if (lhs.member() != rhs.member()) { return false; } const auto& lhs_args = lhs.args(); const auto& rhs_args = rhs.args(); const auto args_size = lhs_args.size(); if (args_size != rhs_args.size()) { return false; } bool args_overlap = true; for (size_t i = 0; i < args_size; ++i) { args_overlap = args_overlap && (common_internal::TypeIsAssignable(lhs_args[i], rhs_args[i]) || common_internal::TypeIsAssignable(rhs_args[i], lhs_args[i])); } return args_overlap; } template <typename Overload> void AddOverloadInternal(std::vector<OverloadDecl>& insertion_order, OverloadDeclHashSet& overloads, Overload&& overload, absl::Status& status) { if (!status.ok()) { return; } if (auto it = overloads.find(overload.id()); it != overloads.end()) { status = absl::AlreadyExistsError( absl::StrCat("overload already exists: ", overload.id())); return; } for (const auto& existing : overloads) { if (SignaturesOverlap(overload, existing)) { status = absl::InvalidArgumentError( absl::StrCat("overload signature collision: ", existing.id(), " collides with ", overload.id())); return; } } const auto inserted = overloads.insert(std::forward<Overload>(overload)); ABSL_DCHECK(inserted.second); insertion_order.push_back(*inserted.first); } void CollectTypeParams(absl::flat_hash_set<std::string>& type_params, const Type& type) { const auto kind = type.kind(); switch (kind) { case TypeKind::kList: { const auto& list_type = type.GetList(); CollectTypeParams(type_params, list_type.element()); } break; case TypeKind::kMap: { const auto& map_type = type.GetMap(); CollectTypeParams(type_params, map_type.key()); CollectTypeParams(type_params, map_type.value()); } break; case TypeKind::kOpaque: { const auto& opaque_type = type.GetOpaque(); for (const auto& param : opaque_type.GetParameters()) { CollectTypeParams(type_params, param); } } break; case TypeKind::kFunction: { const auto& function_type = type.GetFunction(); CollectTypeParams(type_params, function_type.result()); for (const auto& arg : function_type.args()) { CollectTypeParams(type_params, arg); } } break; case TypeKind::kTypeParam: type_params.emplace(type.GetTypeParam().name()); break; default: break; } } } absl::flat_hash_set<std::string> OverloadDecl::GetTypeParams() const { absl::flat_hash_set<std::string> type_params; CollectTypeParams(type_params, result()); for (const auto& arg : args()) { CollectTypeParams(type_params, arg); } return type_params; } void FunctionDecl::AddOverloadImpl(const OverloadDecl& overload, absl::Status& status) { AddOverloadInternal(overloads_.insertion_order, overloads_.set, overload, status); } void FunctionDecl::AddOverloadImpl(OverloadDecl&& overload, absl::Status& status) { AddOverloadInternal(overloads_.insertion_order, overloads_.set, std::move(overload), status); } }
#include "common/decl.h" #include "absl/status/status.h" #include "common/constant.h" #include "common/type.h" #include "internal/testing.h" #include "google/protobuf/arena.h" namespace cel { namespace { using ::absl_testing::StatusIs; using ::testing::ElementsAre; using ::testing::IsEmpty; using ::testing::Property; using ::testing::UnorderedElementsAre; TEST(VariableDecl, Name) { VariableDecl variable_decl; EXPECT_THAT(variable_decl.name(), IsEmpty()); variable_decl.set_name("foo"); EXPECT_EQ(variable_decl.name(), "foo"); EXPECT_EQ(variable_decl.release_name(), "foo"); EXPECT_THAT(variable_decl.name(), IsEmpty()); } TEST(VariableDecl, Type) { VariableDecl variable_decl; EXPECT_EQ(variable_decl.type(), DynType{}); variable_decl.set_type(StringType{}); EXPECT_EQ(variable_decl.type(), StringType{}); } TEST(VariableDecl, Value) { VariableDecl variable_decl; EXPECT_FALSE(variable_decl.has_value()); EXPECT_EQ(variable_decl.value(), Constant{}); Constant value; value.set_bool_value(true); variable_decl.set_value(value); EXPECT_TRUE(variable_decl.has_value()); EXPECT_EQ(variable_decl.value(), value); EXPECT_EQ(variable_decl.release_value(), value); EXPECT_EQ(variable_decl.value(), Constant{}); } Constant MakeBoolConstant(bool value) { Constant constant; constant.set_bool_value(value); return constant; } TEST(VariableDecl, Equality) { VariableDecl variable_decl; EXPECT_EQ(variable_decl, VariableDecl{}); variable_decl.mutable_value().set_bool_value(true); EXPECT_NE(variable_decl, VariableDecl{}); EXPECT_EQ(MakeVariableDecl("foo", StringType{}), MakeVariableDecl("foo", StringType{})); EXPECT_EQ(MakeVariableDecl("foo", StringType{}), MakeVariableDecl("foo", StringType{})); EXPECT_EQ( MakeConstantVariableDecl("foo", StringType{}, MakeBoolConstant(true)), MakeConstantVariableDecl("foo", StringType{}, MakeBoolConstant(true))); EXPECT_EQ( MakeConstantVariableDecl("foo", StringType{}, MakeBoolConstant(true)), MakeConstantVariableDecl("foo", StringType{}, MakeBoolConstant(true))); } TEST(OverloadDecl, Id) { OverloadDecl overload_decl; EXPECT_THAT(overload_decl.id(), IsEmpty()); overload_decl.set_id("foo"); EXPECT_EQ(overload_decl.id(), "foo"); EXPECT_EQ(overload_decl.release_id(), "foo"); EXPECT_THAT(overload_decl.id(), IsEmpty()); } TEST(OverloadDecl, Result) { OverloadDecl overload_decl; EXPECT_EQ(overload_decl.result(), DynType{}); overload_decl.set_result(StringType{}); EXPECT_EQ(overload_decl.result(), StringType{}); } TEST(OverloadDecl, Args) { OverloadDecl overload_decl; EXPECT_THAT(overload_decl.args(), IsEmpty()); overload_decl.mutable_args().push_back(StringType{}); EXPECT_THAT(overload_decl.args(), ElementsAre(StringType{})); EXPECT_THAT(overload_decl.release_args(), ElementsAre(StringType{})); EXPECT_THAT(overload_decl.args(), IsEmpty()); } TEST(OverloadDecl, Member) { OverloadDecl overload_decl; EXPECT_FALSE(overload_decl.member()); overload_decl.set_member(true); EXPECT_TRUE(overload_decl.member()); } TEST(OverloadDecl, Equality) { OverloadDecl overload_decl; EXPECT_EQ(overload_decl, OverloadDecl{}); overload_decl.set_member(true); EXPECT_NE(overload_decl, OverloadDecl{}); } TEST(OverloadDecl, GetTypeParams) { google::protobuf::Arena arena; auto overload_decl = MakeOverloadDecl( "foo", ListType(&arena, TypeParamType("A")), MapType(&arena, TypeParamType("B"), TypeParamType("C")), OpaqueType(&arena, "bar", {FunctionType(&arena, TypeParamType("D"), {})})); EXPECT_THAT(overload_decl.GetTypeParams(), UnorderedElementsAre("A", "B", "C", "D")); } TEST(FunctionDecl, Name) { FunctionDecl function_decl; EXPECT_THAT(function_decl.name(), IsEmpty()); function_decl.set_name("foo"); EXPECT_EQ(function_decl.name(), "foo"); EXPECT_EQ(function_decl.release_name(), "foo"); EXPECT_THAT(function_decl.name(), IsEmpty()); } TEST(FunctionDecl, Overloads) { ASSERT_OK_AND_ASSIGN( auto function_decl, MakeFunctionDecl( "hello", MakeOverloadDecl("foo", StringType{}, StringType{}), MakeMemberOverloadDecl("bar", StringType{}, StringType{}), MakeOverloadDecl("baz", IntType{}, IntType{}))); EXPECT_THAT(function_decl.overloads(), ElementsAre(Property(&OverloadDecl::id, "foo"), Property(&OverloadDecl::id, "bar"), Property(&OverloadDecl::id, "baz"))); EXPECT_THAT(function_decl.AddOverload( MakeOverloadDecl("qux", DynType{}, StringType{})), StatusIs(absl::StatusCode::kInvalidArgument)); } using common_internal::TypeIsAssignable; TEST(TypeIsAssignable, BoolWrapper) { EXPECT_TRUE(TypeIsAssignable(BoolWrapperType{}, BoolWrapperType{})); EXPECT_TRUE(TypeIsAssignable(BoolWrapperType{}, NullType{})); EXPECT_TRUE(TypeIsAssignable(BoolWrapperType{}, BoolType{})); EXPECT_FALSE(TypeIsAssignable(BoolWrapperType{}, DurationType{})); } TEST(TypeIsAssignable, IntWrapper) { EXPECT_TRUE(TypeIsAssignable(IntWrapperType{}, IntWrapperType{})); EXPECT_TRUE(TypeIsAssignable(IntWrapperType{}, NullType{})); EXPECT_TRUE(TypeIsAssignable(IntWrapperType{}, IntType{})); EXPECT_FALSE(TypeIsAssignable(IntWrapperType{}, DurationType{})); } TEST(TypeIsAssignable, UintWrapper) { EXPECT_TRUE(TypeIsAssignable(UintWrapperType{}, UintWrapperType{})); EXPECT_TRUE(TypeIsAssignable(UintWrapperType{}, NullType{})); EXPECT_TRUE(TypeIsAssignable(UintWrapperType{}, UintType{})); EXPECT_FALSE(TypeIsAssignable(UintWrapperType{}, DurationType{})); } TEST(TypeIsAssignable, DoubleWrapper) { EXPECT_TRUE(TypeIsAssignable(DoubleWrapperType{}, DoubleWrapperType{})); EXPECT_TRUE(TypeIsAssignable(DoubleWrapperType{}, NullType{})); EXPECT_TRUE(TypeIsAssignable(DoubleWrapperType{}, DoubleType{})); EXPECT_FALSE(TypeIsAssignable(DoubleWrapperType{}, DurationType{})); } TEST(TypeIsAssignable, BytesWrapper) { EXPECT_TRUE(TypeIsAssignable(BytesWrapperType{}, BytesWrapperType{})); EXPECT_TRUE(TypeIsAssignable(BytesWrapperType{}, NullType{})); EXPECT_TRUE(TypeIsAssignable(BytesWrapperType{}, BytesType{})); EXPECT_FALSE(TypeIsAssignable(BytesWrapperType{}, DurationType{})); } TEST(TypeIsAssignable, StringWrapper) { EXPECT_TRUE(TypeIsAssignable(StringWrapperType{}, StringWrapperType{})); EXPECT_TRUE(TypeIsAssignable(StringWrapperType{}, NullType{})); EXPECT_TRUE(TypeIsAssignable(StringWrapperType{}, StringType{})); EXPECT_FALSE(TypeIsAssignable(StringWrapperType{}, DurationType{})); } TEST(TypeIsAssignable, Complex) { google::protobuf::Arena arena; EXPECT_TRUE(TypeIsAssignable(OptionalType(&arena, DynType{}), OptionalType(&arena, StringType{}))); EXPECT_FALSE(TypeIsAssignable(OptionalType(&arena, BoolType{}), OptionalType(&arena, StringType{}))); } } }
https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/decl.cc
https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/decl_test.cc
4552db5798fb0853b131b783d8875794334fae7f
faed5816-4f2b-465c-8e7b-c4e2d764ef9d
cpp
google/arolla
extensions
arolla/expr/eval/extensions.cc
arolla/expr/eval/extensions_test.cc
#include "arolla/expr/eval/extensions.h" #include <optional> #include <utility> #include "absl/base/no_destructor.h" #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/synchronization/mutex.h" #include "arolla/expr/eval/eval.h" #include "arolla/expr/eval/prepare_expression.h" #include "arolla/expr/expr_node.h" #include "arolla/util/status_macros_backport.h" namespace arolla::expr::eval_internal { CompilerExtensionRegistry& CompilerExtensionRegistry::GetInstance() { static absl::NoDestructor<CompilerExtensionRegistry> instance; return *instance; } CompilerExtensionSet CompilerExtensionRegistry::GetCompilerExtensionSet() const { absl::ReaderMutexLock lock(&mutex_); return CompilerExtensionSet{ .node_transformation_fn = [node_transformation_fns = node_transformation_fns_]( const DynamicEvaluationEngineOptions& options, ExprNodePtr node) -> absl::StatusOr<ExprNodePtr> { for (const auto& fn : node_transformation_fns) { ASSIGN_OR_RETURN(auto new_node, fn(options, node)); if (new_node->fingerprint() != node->fingerprint()) { return new_node; } node = std::move(new_node); } return node; }, .compile_operator_fn = [compile_operator_fns = compile_operator_fns_]( const CompileOperatorFnArgs& args) -> std::optional<absl::Status> { for (const auto& fn : compile_operator_fns) { std::optional<absl::Status> result = fn(args); if (result.has_value()) { return result; } } return std::nullopt; }}; } void CompilerExtensionRegistry::RegisterNodeTransformationFn( NodeTransformationFn fn) { absl::MutexLock lock(&mutex_); node_transformation_fns_.push_back(fn); } void CompilerExtensionRegistry::RegisterCompileOperatorFn( CompileOperatorFn fn) { absl::MutexLock lock(&mutex_); compile_operator_fns_.push_back(fn); } }
#include "arolla/expr/eval/extensions.h" #include <memory> #include <optional> #include <utility> #include "gmock/gmock.h" #include "gtest/gtest.h" #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/types/span.h" #include "arolla/expr/basic_expr_operator.h" #include "arolla/expr/eval/eval.h" #include "arolla/expr/eval/executable_builder.h" #include "arolla/expr/eval/prepare_expression.h" #include "arolla/expr/eval/test_utils.h" #include "arolla/expr/expr.h" #include "arolla/expr/expr_node.h" #include "arolla/expr/expr_operator.h" #include "arolla/expr/expr_operator_signature.h" #include "arolla/expr/testing/testing.h" #include "arolla/memory/frame.h" #include "arolla/qexpr/bound_operators.h" #include "arolla/qexpr/eval_context.h" #include "arolla/qexpr/evaluation_engine.h" #include "arolla/qtype/qtype.h" #include "arolla/qtype/qtype_traits.h" #include "arolla/qtype/typed_slot.h" #include "arolla/util/fast_dynamic_downcast_final.h" #include "arolla/util/fingerprint.h" #include "arolla/util/status_macros_backport.h" namespace arolla::expr::eval_internal { namespace { using ::arolla::testing::EqualsExpr; using ::testing::Eq; TEST(ExtensionsTest, RegisterNodeTransformationFn) { CompilerExtensionRegistry registry; NodeTransformationFn replace_add_with_sub = [](const DynamicEvaluationEngineOptions&, ExprNodePtr node) -> absl::StatusOr<ExprNodePtr> { if (node->is_op() && node->op()->display_name() == "math.add") { return BindOp("math.subtract", node->node_deps(), {}); } return node; }; registry.RegisterNodeTransformationFn(replace_add_with_sub); NodeTransformationFn replace_sub_with_mul = [](const DynamicEvaluationEngineOptions&, ExprNodePtr node) -> absl::StatusOr<ExprNodePtr> { if (node->is_op() && node->op()->display_name() == "math.subtract") { return BindOp("math.multiply", node->node_deps(), {}); } return node; }; registry.RegisterNodeTransformationFn(replace_sub_with_mul); ASSERT_OK_AND_ASSIGN(ExprNodePtr expr, CallOp("math.add", {Leaf("x"), Literal(57)})); CompilerExtensionSet extensions = registry.GetCompilerExtensionSet(); DynamicEvaluationEngineOptions options; ASSERT_OK_AND_ASSIGN(auto transformed_expr, extensions.node_transformation_fn(options, expr)); EXPECT_THAT(transformed_expr, EqualsExpr(CallOp("math.subtract", {Leaf("x"), Literal(57)}))); ASSERT_OK_AND_ASSIGN( auto transforemed_transformed_expr, extensions.node_transformation_fn(options, transformed_expr)); EXPECT_THAT(transforemed_transformed_expr, EqualsExpr(CallOp("math.multiply", {Leaf("x"), Literal(57)}))); } class TestOperator final : public UnnamedExprOperator, public BuiltinExprOperatorTag { public: TestOperator() : UnnamedExprOperator( ExprOperatorSignature::MakeArgsN(1), FingerprintHasher("arolla::expr::eval_internal::TestOperator") .Finish()) {} absl::StatusOr<QTypePtr> GetOutputQType( absl::Span<const QTypePtr> input_qtypes) const final { return GetQType<float>(); } }; class OtherOperator final : public UnnamedExprOperator, public BuiltinExprOperatorTag { public: OtherOperator() : UnnamedExprOperator( ExprOperatorSignature::MakeArgsN(1), FingerprintHasher("arolla::expr::eval_internal::OtherOperator") .Finish()) {} absl::StatusOr<QTypePtr> GetOutputQType( absl::Span<const QTypePtr> input_qtypes) const final { return GetQType<float>(); } }; TEST(ExtensionsTest, RegisterCompileOperatorFn) { CompilerExtensionRegistry registry; CompileOperatorFn dummy_compile_op = [](CompileOperatorFnArgs args) -> std::optional<absl::Status> { return std::nullopt; }; registry.RegisterCompileOperatorFn(dummy_compile_op); CompileOperatorFn compile_test_op = [](CompileOperatorFnArgs args) -> std::optional<absl::Status> { if (fast_dynamic_downcast_final<const TestOperator*>(args.op.get()) == nullptr) { return std::nullopt; } ASSIGN_OR_RETURN(auto output_slot, args.output_slot.ToSlot<float>()); args.executable_builder->AddEvalOp( MakeBoundOperator( [output_slot](EvaluationContext* ctx, FramePtr frame) { frame.Set(output_slot, 57); }), "eval test operator", "eval test operator"); return absl::OkStatus(); }; registry.RegisterCompileOperatorFn(compile_test_op); CompilerExtensionSet extensions = registry.GetCompilerExtensionSet(); FrameLayout::Builder layout_builder; ExecutableBuilder executable_builder(&layout_builder, true); auto out_slot = layout_builder.AddSlot<float>(); ExprOperatorPtr other_op = std::make_shared<OtherOperator>(); EXPECT_THAT(extensions.compile_operator_fn(CompileOperatorFnArgs{ .options = DynamicEvaluationEngineOptions{}, .op = other_op, .input_slots = {}, .output_slot = TypedSlot::FromSlot(out_slot), .executable_builder = &executable_builder}), Eq(std::nullopt)); ExprOperatorPtr test_op = std::make_shared<TestOperator>(); EXPECT_THAT(extensions.compile_operator_fn(CompileOperatorFnArgs{ .options = DynamicEvaluationEngineOptions{}, .op = test_op, .input_slots = {}, .output_slot = TypedSlot::FromSlot(out_slot), .executable_builder = &executable_builder}), Eq(absl::OkStatus())); std::unique_ptr<BoundExpr> bound_expr = std::move(executable_builder) .Build({}, TypedSlot::FromSlot(out_slot)); EXPECT_THAT(bound_expr, EvalOperationsAre("eval test operator")); } } }
https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/eval/extensions.cc
https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/eval/extensions_test.cc
1ca990dbeca224035efdabffecc7f3738df6b52c
ecfc7b9e-9c2a-42a9-b642-6e26f02538cb
cpp
tensorflow/tensorflow
operand_upcaster
third_party/xla/xla/service/operand_upcaster.cc
third_party/xla/xla/service/operand_upcaster_test.cc
#include "xla/service/operand_upcaster.h" #include <optional> #include "absl/algorithm/container.h" #include "absl/status/statusor.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/service/hlo_creation_utils.h" #include "xla/service/shape_inference.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/xla_data.pb.h" #include "tsl/platform/errors.h" #include "tsl/platform/statusor.h" namespace xla { namespace { absl::StatusOr<std::optional<Shape>> MaybeInferShape( const HloInstruction* instruction) { switch (instruction->opcode()) { case HloOpcode::kDot: return ShapeInference::InferDotOpShape( instruction->operand(0)->shape(), instruction->operand(1)->shape(), instruction->dot_dimension_numbers(), std::nullopt, Cast<HloDotInstruction>(instruction)->sparsity()); case HloOpcode::kConvolution: return ShapeInference::InferConvolveShape( instruction->operand(0)->shape(), instruction->operand(1)->shape(), instruction->feature_group_count(), instruction->batch_group_count(), instruction->window(), instruction->convolution_dimension_numbers(), std::nullopt); default: return std::optional<Shape>(std::nullopt); } } } bool OperandUpcaster::InstructionMatchesPattern(HloInstruction* instruction) { auto status_or_inferred_shape = MaybeInferShape(instruction); if (!status_or_inferred_shape.ok() || !status_or_inferred_shape->has_value()) { return false; } if (absl::c_count(instruction->precision_config().operand_precision(), PrecisionConfig::PACKED_NIBBLE) == 2) { return true; } PrimitiveType inferred_type = (*status_or_inferred_shape)->element_type(); if (instruction->shape().element_type() == inferred_type && instruction->operand(0)->shape().element_type() == inferred_type && instruction->operand(1)->shape().element_type() == inferred_type) { return false; } return ShapeUtil::ElementCanUpcast(**status_or_inferred_shape, instruction->shape()); } absl::StatusOr<HloInstruction*> OperandUpcaster::ExpandInstruction( HloInstruction* instruction) { const bool packed_nibble = absl::c_count(instruction->precision_config().operand_precision(), PrecisionConfig::PACKED_NIBBLE) == 2; auto type = instruction->shape().element_type(); if (packed_nibble) { HloInstruction *lhs_n0 = instruction->mutable_operand(0), *lhs_n1 = lhs_n0, *rhs_n0 = instruction->mutable_operand(1), *rhs_n1 = rhs_n0; TF_ASSIGN_OR_RETURN(lhs_n0, MakeBinaryHlo(HloOpcode::kShiftLeft, lhs_n0, MakeScalarLike(lhs_n0, 4))); HloOpcode lhs_shift = ShapeUtil::ElementIsSigned(lhs_n0->shape()) ? HloOpcode::kShiftRightArithmetic : HloOpcode::kShiftRightLogical; TF_ASSIGN_OR_RETURN( lhs_n0, MakeBinaryHlo(lhs_shift, lhs_n0, MakeScalarLike(lhs_n0, 4))); lhs_n0 = MakeConvertToHlo(lhs_n0, type); TF_ASSIGN_OR_RETURN( lhs_n1, MakeBinaryHlo(lhs_shift, lhs_n1, MakeScalarLike(lhs_n1, 4))); lhs_n1 = MakeConvertToHlo(lhs_n1, type); TF_ASSIGN_OR_RETURN(rhs_n0, MakeBinaryHlo(HloOpcode::kShiftLeft, rhs_n0, MakeScalarLike(rhs_n0, 4))); HloOpcode rhs_shift = ShapeUtil::ElementIsSigned(rhs_n0->shape()) ? HloOpcode::kShiftRightArithmetic : HloOpcode::kShiftRightLogical; TF_ASSIGN_OR_RETURN( rhs_n0, MakeBinaryHlo(rhs_shift, rhs_n0, MakeScalarLike(rhs_n0, 4))); rhs_n0 = MakeConvertToHlo(rhs_n0, type); TF_ASSIGN_OR_RETURN( rhs_n1, MakeBinaryHlo(rhs_shift, rhs_n1, MakeScalarLike(rhs_n1, 4))); rhs_n1 = MakeConvertToHlo(rhs_n1, type); HloInstruction* linear_n0 = instruction->parent()->AddInstruction(instruction->CloneWithNewOperands( instruction->shape(), {lhs_n0, rhs_n0})); linear_n0->mutable_precision_config()->mutable_operand_precision()->Set( 0, PrecisionConfig::DEFAULT); linear_n0->mutable_precision_config()->mutable_operand_precision()->Set( 1, PrecisionConfig::DEFAULT); HloInstruction* linear_n1 = instruction->parent()->AddInstruction(linear_n0->CloneWithNewOperands( instruction->shape(), {lhs_n1, rhs_n1})); return MakeBinaryHlo(HloOpcode::kAdd, linear_n0, linear_n1); } for (int i = 0; i < HloDotInstruction::kOperands; ++i) { auto* operand = instruction->mutable_operand(i); if (operand->shape().element_type() == type) { continue; } auto upcast_shape = operand->shape(); upcast_shape.set_element_type(type); auto* convert_inst = instruction->AddInstruction( HloInstruction::CreateConvert(upcast_shape, operand)); TF_RETURN_IF_ERROR( instruction->ReplaceOperandWithDifferentShape(i, convert_inst)); } return nullptr; } }
#include "xla/service/operand_upcaster.h" #include <memory> #include <tuple> #include "absl/strings/string_view.h" #include "absl/strings/substitute.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/utils/hlo_matchers.h" #include "xla/primitive_util.h" #include "xla/tests/hlo_test_base.h" #include "tsl/platform/statusor.h" namespace xla { namespace { namespace op = ::xla::testing::opcode_matchers; class OperandUpcasterTest : public HloTestBase, public ::testing::WithParamInterface< std::tuple<PrimitiveType, PrimitiveType, PrimitiveType>> {}; bool ShouldUpcast(PrimitiveType operand_type, PrimitiveType result_type) { return operand_type != result_type && primitive_util::HigherPrecisionType(operand_type, result_type) == result_type; } TEST_P(OperandUpcasterTest, ConvertInserted) { PrimitiveType lhs_type, rhs_type, result_type; std::tie(lhs_type, rhs_type, result_type) = GetParam(); absl::string_view module_tmpl = R"( HloModule module ENTRY main { p0 = $0[2,3]{1,0} parameter(0) p1 = $1[3,2]{1,0} parameter(1) ROOT dot = $2[2,2]{1,0} dot(p0, p1), lhs_contracting_dims={1}, rhs_contracting_dims={0} })"; auto module_string = absl::Substitute( module_tmpl, primitive_util::LowercasePrimitiveTypeName(lhs_type), primitive_util::LowercasePrimitiveTypeName(rhs_type), primitive_util::LowercasePrimitiveTypeName(result_type)); TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_string)); TF_ASSERT_OK_AND_ASSIGN(bool upcasted, OperandUpcaster().Run(module.get())); EXPECT_EQ(upcasted, ShouldUpcast(lhs_type, result_type) || ShouldUpcast(rhs_type, result_type)); auto original_lhs = op::Parameter(0); auto original_rhs = op::Parameter(1); auto upcasted_lhs = ShouldUpcast(lhs_type, result_type) ? AllOf(op::Convert(original_lhs), op::Shape(absl::Substitute( "$0[2,3]{1,0}", primitive_util::LowercasePrimitiveTypeName(result_type)))) : original_lhs; auto upcasted_rhs = ShouldUpcast(rhs_type, result_type) ? AllOf(op::Convert(original_rhs), op::Shape(absl::Substitute( "$0[3,2]{1,0}", primitive_util::LowercasePrimitiveTypeName(result_type)))) : original_rhs; EXPECT_THAT( module->entry_computation()->root_instruction(), AllOf(op::Dot(upcasted_lhs, upcasted_rhs), op::Shape(absl::Substitute( "$0[2,2]{1,0}", primitive_util::LowercasePrimitiveTypeName(result_type))))); } INSTANTIATE_TEST_SUITE_P(S16U16, OperandUpcasterTest, ::testing::Values(std::make_tuple(S8, S8, S16), std::make_tuple(U8, U8, U16))); INSTANTIATE_TEST_SUITE_P(S32, OperandUpcasterTest, ::testing::Combine(::testing::Values(S8, U8, S16), ::testing::Values(S8, U8, S16), ::testing::Values(S32))); INSTANTIATE_TEST_SUITE_P(U32, OperandUpcasterTest, ::testing::Combine(::testing::Values(U8, U16), ::testing::Values(U8, U16), ::testing::Values(U32))); INSTANTIATE_TEST_SUITE_P(BF16, OperandUpcasterTest, ::testing::Combine(::testing::Values(BF16, S8, U8), ::testing::Values(BF16, S8, U8), ::testing::Values(BF16))); INSTANTIATE_TEST_SUITE_P(F32, OperandUpcasterTest, ::testing::Combine(::testing::Values(BF16, F16), ::testing::Values(BF16, F16), ::testing::Values(F32))); INSTANTIATE_TEST_SUITE_P(NoUpcast, OperandUpcasterTest, ::testing::Values(std::make_tuple(F32, F32, BF16), std::make_tuple(S32, S32, U32))); TEST_F(OperandUpcasterTest, SparseDot) { absl::string_view kHlo = R"( HloModule module ENTRY main { p0 = bf16[2,16]{1,0} parameter(0) p1 = bf16[32,2]{1,0} parameter(1) meta = u16[2,2]{1,0} parameter(2) ROOT dot = f32[2,2]{1,0} dot(p0, p1, meta), lhs_contracting_dims={1}, rhs_contracting_dims={0}, sparsity=L.1@2:4 })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(kHlo)); TF_ASSERT_OK_AND_ASSIGN(bool upcasted, OperandUpcaster().Run(module.get())); EXPECT_TRUE(upcasted); auto upcasted_lhs = AllOf(op::Convert(op::Parameter(0)), op::Shape("f32[2,16]{1,0}")); auto upcasted_rhs = AllOf(op::Convert(op::Parameter(1)), op::Shape("f32[32,2]{1,0}")); EXPECT_THAT(module->entry_computation()->root_instruction(), AllOf(::testing::MakeMatcher(new ::xla::testing::HloMatcher( HloOpcode::kDot, {upcasted_lhs, upcasted_rhs, op::Parameter(2)})), op::Shape("f32[2,2]{1,0}"))); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/operand_upcaster.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/operand_upcaster_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
8da78b81-3cb6-408a-85a1-864de5903db3
cpp
google/arolla
expr_attributes
arolla/expr/expr_attributes.cc
arolla/expr/expr_attributes_test.cc
#include "arolla/expr/expr_attributes.h" #include <ostream> #include "arolla/util/fingerprint.h" namespace arolla::expr { std::ostream& operator<<(std::ostream& ostream, const ExprAttributes& attr) { if (attr.qvalue()) { ostream << "Attr(qvalue=" << attr.qvalue()->Repr() << ")"; } else if (attr.qtype()) { ostream << "Attr(qtype=" << attr.qtype()->name() << ")"; } else { ostream << "Attr{}"; } return ostream; } } namespace arolla { void FingerprintHasherTraits<expr::ExprAttributes>::operator()( FingerprintHasher* hasher, const expr::ExprAttributes& attr) const { hasher->Combine(attr.qtype()); hasher->Combine(attr.qvalue().has_value() ? attr.qvalue()->GetFingerprint() : Fingerprint{}); } }
#include "arolla/expr/expr_attributes.h" #include <cstdint> #include <optional> #include "gmock/gmock.h" #include "gtest/gtest.h" #include "absl/container/flat_hash_set.h" #include "absl/status/status_matchers.h" #include "arolla/qtype/base_types.h" #include "arolla/qtype/qtype.h" #include "arolla/qtype/qtype_traits.h" #include "arolla/qtype/typed_value.h" #include "arolla/util/fingerprint.h" namespace arolla::expr { namespace { using ::absl_testing::IsOkAndHolds; using ::testing::PrintToString; using Attr = ::arolla::expr::ExprAttributes; TEST(ExprAttributesTest, Default) { const Attr attr; EXPECT_EQ(attr.qtype(), nullptr); EXPECT_EQ(attr.qvalue(), std::nullopt); EXPECT_EQ(PrintToString(attr), "Attr{}"); } TEST(ExprAttributesTest, QTypeNullptr) { const Attr attr(nullptr); EXPECT_EQ(attr.qtype(), nullptr); EXPECT_EQ(attr.qvalue(), std::nullopt); EXPECT_EQ(PrintToString(attr), "Attr{}"); } TEST(ExprAttributesTest, QType) { const Attr attr(GetQTypeQType()); EXPECT_EQ(attr.qtype(), GetQTypeQType()); EXPECT_EQ(attr.qvalue(), std::nullopt); EXPECT_EQ(PrintToString(attr), "Attr(qtype=QTYPE)"); } TEST(ExprAttributesTest, QValue) { const Attr attr(TypedValue::FromValue(GetNothingQType())); EXPECT_EQ(attr.qtype(), GetQTypeQType()); EXPECT_THAT(attr.qvalue()->As<QTypePtr>(), IsOkAndHolds(GetNothingQType())); EXPECT_EQ(PrintToString(attr), "Attr(qvalue=NOTHING)"); } TEST(ExprAttributesTest, NoQTypeNoQValue) { const Attr attr(nullptr, std::nullopt); EXPECT_EQ(attr.qtype(), nullptr); EXPECT_EQ(attr.qvalue(), std::nullopt); EXPECT_EQ(PrintToString(attr), "Attr{}"); } TEST(ExprAttributesTest, QTypeNoQValue) { const Attr attr(GetQTypeQType(), std::nullopt); EXPECT_EQ(attr.qtype(), GetQTypeQType()); EXPECT_EQ(attr.qvalue(), std::nullopt); EXPECT_EQ(PrintToString(attr), "Attr(qtype=QTYPE)"); } TEST(ExprAttributesTest, QValueQValue) { std::optional<TypedValue> qvalue = TypedValue::FromValue(GetNothingQType()); const Attr attr(GetQTypeQType(), qvalue); EXPECT_EQ(attr.qtype(), GetQTypeQType()); EXPECT_THAT(attr.qvalue()->As<QTypePtr>(), IsOkAndHolds(GetNothingQType())); EXPECT_EQ(PrintToString(attr), "Attr(qvalue=NOTHING)"); } TEST(ExprAttributesTest, Fingerprints) { absl::flat_hash_set<Fingerprint> fingerprints; EXPECT_TRUE( fingerprints .insert(FingerprintHasher("").Combine(ExprAttributes()).Finish()) .second); EXPECT_FALSE( fingerprints .insert(FingerprintHasher("").Combine(ExprAttributes()).Finish()) .second); EXPECT_TRUE(fingerprints .insert(FingerprintHasher("") .Combine(ExprAttributes(GetQType<int64_t>())) .Finish()) .second); EXPECT_FALSE(fingerprints .insert(FingerprintHasher("") .Combine(ExprAttributes(GetQType<int64_t>())) .Finish()) .second); EXPECT_TRUE(fingerprints .insert(FingerprintHasher("") .Combine(ExprAttributes( TypedValue::FromValue<int64_t>(57))) .Finish()) .second); EXPECT_FALSE(fingerprints .insert(FingerprintHasher("") .Combine(ExprAttributes( TypedValue::FromValue<int64_t>(57))) .Finish()) .second); } TEST(ExprAttributesTest, IsIdenticalToEmpty) { const Attr attr1; const Attr attr2; EXPECT_TRUE(attr1.IsIdenticalTo(attr1)); EXPECT_TRUE(attr1.IsIdenticalTo(attr2)); EXPECT_TRUE(attr2.IsIdenticalTo(attr2)); } TEST(ExprAttributesTest, IsIdenticalToGeneral) { const Attr attr0; const Attr attr1(GetQTypeQType()); EXPECT_FALSE(attr0.IsIdenticalTo(attr1)); const Attr attr2(TypedValue::FromValue(GetNothingQType())); EXPECT_FALSE(attr0.IsIdenticalTo(attr2)); EXPECT_FALSE(attr1.IsIdenticalTo(attr2)); const Attr attr3(GetQTypeQType(), TypedValue::FromValue(GetNothingQType())); EXPECT_FALSE(attr0.IsIdenticalTo(attr3)); EXPECT_FALSE(attr1.IsIdenticalTo(attr3)); EXPECT_TRUE(attr2.IsIdenticalTo(attr3)); const Attr attr4(TypedValue::FromValue(GetQType<int64_t>())); EXPECT_FALSE(attr0.IsIdenticalTo(attr4)); EXPECT_FALSE(attr1.IsIdenticalTo(attr4)); EXPECT_FALSE(attr2.IsIdenticalTo(attr4)); EXPECT_FALSE(attr3.IsIdenticalTo(attr4)); } TEST(ExprAttributesTest, IsSubsetOfEmpty) { const Attr attr1; const Attr attr2; EXPECT_TRUE(attr1.IsSubsetOf(attr1)); EXPECT_TRUE(attr1.IsSubsetOf(attr2)); EXPECT_TRUE(attr2.IsSubsetOf(attr2)); } TEST(ExprAttributesTest, IsSubsetOf) { const Attr attr0; const Attr attr1(GetQTypeQType()); const Attr attr2(TypedValue::FromValue(GetNothingQType())); const Attr attr3(TypedValue::FromValue(GetQTypeQType())); EXPECT_TRUE(attr0.IsSubsetOf(attr0)); EXPECT_TRUE(attr0.IsSubsetOf(attr1)); EXPECT_TRUE(attr0.IsSubsetOf(attr2)); EXPECT_TRUE(attr0.IsSubsetOf(attr3)); EXPECT_FALSE(attr1.IsSubsetOf(attr0)); EXPECT_TRUE(attr1.IsSubsetOf(attr1)); EXPECT_TRUE(attr1.IsSubsetOf(attr2)); EXPECT_TRUE(attr1.IsSubsetOf(attr3)); EXPECT_FALSE(attr2.IsSubsetOf(attr0)); EXPECT_FALSE(attr2.IsSubsetOf(attr1)); EXPECT_TRUE(attr2.IsSubsetOf(attr2)); EXPECT_FALSE(attr2.IsSubsetOf(attr3)); EXPECT_FALSE(attr3.IsSubsetOf(attr0)); EXPECT_FALSE(attr3.IsSubsetOf(attr1)); EXPECT_FALSE(attr3.IsSubsetOf(attr2)); EXPECT_TRUE(attr3.IsSubsetOf(attr3)); } } }
https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/expr_attributes.cc
https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/expr_attributes_test.cc
1ca990dbeca224035efdabffecc7f3738df6b52c
b02af755-636c-49b9-876f-0fd35babbbb7
cpp
tensorflow/tensorflow
tuple_simplifier
third_party/xla/xla/service/tuple_simplifier.cc
third_party/xla/xla/service/tuple_simplifier_test.cc
#include "xla/service/tuple_simplifier.h" #include <cstdint> #include "absl/container/flat_hash_set.h" #include "absl/status/statusor.h" #include "absl/strings/string_view.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/shape_util.h" #include "tsl/platform/errors.h" #include "tsl/platform/statusor.h" namespace xla { TupleSimplifier::TupleSimplifier(bool exclude_entry_computation) : exclude_entry_computation_(exclude_entry_computation) {} absl::StatusOr<bool> TupleSimplifier::RemoveWholeTuple(HloInstruction* tuple) { HloInstruction* top_tuple = nullptr; for (int64_t operand_number = 0; operand_number < tuple->operand_count(); ++operand_number) { HloInstruction* operand = tuple->mutable_operand(operand_number); if (operand->opcode() != HloOpcode::kGetTupleElement || operand->tuple_index() != operand_number) { return false; } if (top_tuple == nullptr) { top_tuple = operand->mutable_operand(0); if (!ShapeUtil::Compatible(top_tuple->shape(), tuple->shape())) { return false; } } else if (top_tuple != operand->operand(0)) { return false; } } if (top_tuple == nullptr) { return false; } TF_ASSIGN_OR_RETURN(bool changed, tuple->parent()->ReplaceInstruction( tuple, top_tuple, true)); return changed; } absl::StatusOr<bool> TupleSimplifier::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { bool changed = false; for (auto* computation : module->computations(execution_threads)) { if (exclude_entry_computation_ && computation == module->entry_computation()) { continue; } for (auto* instruction : computation->MakeInstructionPostOrder()) { if (instruction->opcode() == HloOpcode::kTuple) { TF_ASSIGN_OR_RETURN(bool c, RemoveWholeTuple(instruction)); changed |= c; } else { auto [ancestor, index] = instruction->LatestNonGteAncestorAndIndex(); if (ancestor == instruction) { continue; } HloInstruction* replacement = ancestor; for (int i = 0; i < index.size(); ++i) { if (replacement->opcode() != HloOpcode::kTuple) { replacement = nullptr; break; } replacement = replacement->mutable_operand(index[i]); } if (replacement) { TF_ASSIGN_OR_RETURN(bool replaced, computation->ReplaceInstruction( instruction, replacement, true, true)); changed |= replaced; } } } } if (module->has_schedule()) { TF_RETURN_IF_ERROR(module->schedule().Update()); } return changed; } }
#include "xla/service/tuple_simplifier.h" #include <memory> #include "absl/strings/string_view.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/utils/hlo_matchers.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/test.h" #include "xla/tests/hlo_test_base.h" #include "xla/tsl/lib/core/status_test_util.h" #include "tsl/platform/statusor.h" namespace xla { namespace { namespace op = xla::testing::opcode_matchers; class TupleSimplifierTest : public HloTestBase { protected: void Run(HloModule* module, bool change_expected) { auto changed_status = RunHloPass(TupleSimplifier(), module); TF_ASSERT_OK(changed_status.status()); EXPECT_EQ(change_expected, changed_status.value()); } void Run(HloModule* module, bool change_expected, bool exclude_entry) { auto changed_status = RunHloPass(TupleSimplifier(exclude_entry), module); TF_ASSERT_OK(changed_status.status()); EXPECT_EQ(change_expected, changed_status.value()); } const Shape scalar_shape_ = ShapeUtil::MakeShape(F32, {}); const Shape tuple_shape_ = ShapeUtil::MakeTupleShape( {ShapeUtil::MakeShape(F32, {}), ShapeUtil::MakeShape(F32, {}), ShapeUtil::MakeShape(F32, {})}); }; TEST_F(TupleSimplifierTest, TupleOfParameters) { constexpr absl::string_view kModuleStr = R"( HloModule TupleOfParameters, entry_computation_layout={(f32[], f32[], f32[])->(f32[], f32[], f32[])} ENTRY %TupleOfParameters (param0: f32[], param1: f32[], param2: f32[]) -> (f32[], f32[], f32[]) { %param0 = f32[] parameter(0) %param1 = f32[] parameter(1) %param2 = f32[] parameter(2) ROOT %tuple = (f32[], f32[], f32[]) tuple(f32[] %param0, f32[] %param1, f32[] %param2) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModuleStr)); Run(module.get(), false); } TEST_F(TupleSimplifierTest, GteOfTupleOfParameter) { constexpr absl::string_view kModuleStr = R"( HloModule GteOfTupleOfParameter, entry_computation_layout={((f32[], f32[], f32[]))->f32[]} ENTRY %GteOfTupleOfParameter (param: (f32[], f32[], f32[])) -> f32[] { %param = (f32[], f32[], f32[]) parameter(0) ROOT %get-tuple-element = f32[] get-tuple-element((f32[], f32[], f32[]) %param), index=1 } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModuleStr)); Run(module.get(), false); } TEST_F(TupleSimplifierTest, GteOfTuple) { constexpr absl::string_view kModuleStr = R"( HloModule GteOfTuple, entry_computation_layout={(f32[], f32[], f32[])->f32[]} ENTRY %GteOfTuple (param0: f32[], param1: f32[], param2: f32[]) -> f32[] { %param0 = f32[] parameter(0) %param1 = f32[] parameter(1) %param2 = f32[] parameter(2) %tuple = (f32[], f32[], f32[]) tuple(f32[] %param0, f32[] %param1, f32[] %param2) ROOT %get-tuple-element = f32[] get-tuple-element((f32[], f32[], f32[]) %tuple), index=1 } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModuleStr)); EXPECT_THAT(module->entry_computation()->root_instruction(), op::GetTupleElement(op::Tuple())); Run(module.get(), true); EXPECT_THAT(module->entry_computation()->root_instruction(), op::Parameter(1)); } TEST_F(TupleSimplifierTest, GteOfTupleChain) { constexpr absl::string_view kModuleStr = R"( HloModule GteOfTupleChain, entry_computation_layout={(f32[])->f32[]} ENTRY %GteOfTupleChain (param: f32[]) -> f32[] { %param = f32[] parameter(0) %tuple = (f32[], f32[], f32[]) tuple(f32[] %param, f32[] %param, f32[] %param) %get-tuple-element = f32[] get-tuple-element((f32[], f32[], f32[]) %tuple), index=1 %tuple.1 = (f32[], f32[], f32[]) tuple(f32[] %get-tuple-element, f32[] %get-tuple-element, f32[] %get-tuple-element) %get-tuple-element.1 = f32[] get-tuple-element((f32[], f32[], f32[]) %tuple.1), index=1 %tuple.2 = (f32[], f32[], f32[]) tuple(f32[] %get-tuple-element.1, f32[] %get-tuple-element.1, f32[] %get-tuple-element.1) %get-tuple-element.2 = f32[] get-tuple-element((f32[], f32[], f32[]) %tuple.2), index=1 %tuple.3 = (f32[], f32[], f32[]) tuple(f32[] %get-tuple-element.2, f32[] %get-tuple-element.2, f32[] %get-tuple-element.2) %get-tuple-element.3 = f32[] get-tuple-element((f32[], f32[], f32[]) %tuple.3), index=1 %tuple.4 = (f32[], f32[], f32[]) tuple(f32[] %get-tuple-element.3, f32[] %get-tuple-element.3, f32[] %get-tuple-element.3) %get-tuple-element.4 = f32[] get-tuple-element((f32[], f32[], f32[]) %tuple.4), index=1 %tuple.5 = (f32[], f32[], f32[]) tuple(f32[] %get-tuple-element.4, f32[] %get-tuple-element.4, f32[] %get-tuple-element.4) %get-tuple-element.5 = f32[] get-tuple-element((f32[], f32[], f32[]) %tuple.5), index=1 %tuple.6 = (f32[], f32[], f32[]) tuple(f32[] %get-tuple-element.5, f32[] %get-tuple-element.5, f32[] %get-tuple-element.5) %get-tuple-element.6 = f32[] get-tuple-element((f32[], f32[], f32[]) %tuple.6), index=1 %tuple.7 = (f32[], f32[], f32[]) tuple(f32[] %get-tuple-element.6, f32[] %get-tuple-element.6, f32[] %get-tuple-element.6) %get-tuple-element.7 = f32[] get-tuple-element((f32[], f32[], f32[]) %tuple.7), index=1 %tuple.8 = (f32[], f32[], f32[]) tuple(f32[] %get-tuple-element.7, f32[] %get-tuple-element.7, f32[] %get-tuple-element.7) %get-tuple-element.8 = f32[] get-tuple-element((f32[], f32[], f32[]) %tuple.8), index=1 %tuple.9 = (f32[], f32[], f32[]) tuple(f32[] %get-tuple-element.8, f32[] %get-tuple-element.8, f32[] %get-tuple-element.8) %get-tuple-element.9 = f32[] get-tuple-element((f32[], f32[], f32[]) %tuple.9), index=1 ROOT %negate = f32[] negate(f32[] %get-tuple-element.9) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModuleStr)); EXPECT_THAT(module->entry_computation()->root_instruction(), op::Negate(op::GetTupleElement(op::Tuple()))); Run(module.get(), true); EXPECT_THAT(module->entry_computation()->root_instruction(), op::Negate(op::Parameter())); } TEST_F(TupleSimplifierTest, NestedGteOfTuples) { constexpr absl::string_view kModuleStr = R"( HloModule NestedGteOfTuples, entry_computation_layout={(f32[])->f32[]} ENTRY %NestedGteOfTuples (param: f32[]) -> f32[] { %param = f32[] parameter(0) %tuple = (f32[], f32[]) tuple(f32[] %param, f32[] %param) %tuple.1 = ((f32[], f32[]), (f32[], f32[])) tuple((f32[], f32[]) %tuple, (f32[], f32[]) %tuple) %tuple.2 = (((f32[], f32[]), (f32[], f32[])), ((f32[], f32[]), (f32[], f32[]))) tuple( ((f32[], f32[]), (f32[], f32[])) %tuple.1, ((f32[], f32[]), (f32[], f32[])) %tuple.1 ) %tuple.3 = ((((f32[], f32[]), (f32[], f32[])), ((f32[], f32[]), (f32[], f32[]))), (((f32[], f32[]), (f32[], f32[])), ((f32[], f32[]), (f32[], f32[])))) tuple( (((f32[], f32[]), (f32[], f32[])), ((f32[], f32[]), (f32[], f32[]))) %tuple.2, (((f32[], f32[]), (f32[], f32[])), ((f32[], f32[]), (f32[], f32[]))) %tuple.2 ) %tuple.4 = (((((f32[], f32[]), (f32[], f32[])), ((f32[], f32[]), (f32[], f32[]))), (((f32[], f32[]), (f32[], f32[])), ((f32[], f32[]), (f32[], f32[])))), ((((f32[], f32[]), (f32[], f32[])), ((f32[], f32[]), (f32[], f32[]))), (((f32[], f32[]), (f32[], f32[])), ((f32[], f32[]), (f32[], f32[]))))) tuple( ((((f32[], f32[]), (f32[], f32[])), ((f32[], f32[]), (f32[], f32[]))), (((f32[], f32[]), (f32[], f32[])), ((f32[], f32[]), (f32[], f32[])))) %tuple.3, ((((f32[], f32[]), (f32[], f32[])), ((f32[], f32[]), (f32[], f32[]))), (((f32[], f32[]), (f32[], f32[])), ((f32[], f32[]), (f32[], f32[])))) %tuple.3 ) %get-tuple-element = ((((f32[], f32[]), (f32[], f32[])), ((f32[], f32[]), (f32[], f32[]))), (((f32[], f32[]), (f32[], f32[])), ((f32[], f32[]), (f32[], f32[])))) get-tuple-element( (((((f32[], f32[]), (f32[], f32[])), ((f32[], f32[]), (f32[], f32[]))), (((f32[], f32[]), (f32[], f32[])), ((f32[], f32[]), (f32[], f32[])))), ((((f32[], f32[]), (f32[], f32[])), ((f32[], f32[]), (f32[], f32[]))), (((f32[], f32[]), (f32[], f32[])), ((f32[], f32[]), (f32[], f32[]))))) %tuple.4 ), index=0 %get-tuple-element.1 = (((f32[], f32[]), (f32[], f32[])), ((f32[], f32[]), (f32[], f32[]))) get-tuple-element( ((((f32[], f32[]), (f32[], f32[])), ((f32[], f32[]), (f32[], f32[]))), (((f32[], f32[]), (f32[], f32[])), ((f32[], f32[]), (f32[], f32[])))) %get-tuple-element ), index=0 %get-tuple-element.2 = ((f32[], f32[]), (f32[], f32[])) get-tuple-element( (((f32[], f32[]), (f32[], f32[])), ((f32[], f32[]), (f32[], f32[]))) %get-tuple-element.1 ), index=0 %get-tuple-element.3 = (f32[], f32[]) get-tuple-element( ((f32[], f32[]), (f32[], f32[])) %get-tuple-element.2 ), index=0 ROOT %get-tuple-element.4 = f32[] get-tuple-element((f32[], f32[]) %get-tuple-element.3), index=0 } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModuleStr)); EXPECT_THAT(module->entry_computation()->root_instruction(), op::GetTupleElement()); Run(module.get(), true); EXPECT_THAT(module->entry_computation()->root_instruction(), op::Parameter(0)); } TEST_F(TupleSimplifierTest, TupleOfGteInstructions) { constexpr absl::string_view kModuleStr = R"( HloModule TupleOfGteInstructions, entry_computation_layout={((f32[], f32[], f32[]))->(f32[], f32[], f32[])} ENTRY %TupleOfGteInstructions (param: (f32[], f32[], f32[])) -> (f32[], f32[], f32[]) { %param = (f32[], f32[], f32[]) parameter(0) %get-tuple-element = f32[] get-tuple-element((f32[], f32[], f32[]) %param), index=0 %get-tuple-element.1 = f32[] get-tuple-element((f32[], f32[], f32[]) %param), index=1 %get-tuple-element.2 = f32[] get-tuple-element((f32[], f32[], f32[]) %param), index=2 ROOT %tuple = (f32[], f32[], f32[]) tuple(f32[] %get-tuple-element, f32[] %get-tuple-element.1, f32[] %get-tuple-element.2) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModuleStr)); EXPECT_THAT(module->entry_computation()->root_instruction(), op::Tuple(op::GetTupleElement(), op::GetTupleElement(), op::GetTupleElement())); Run(module.get(), true); EXPECT_THAT(module->entry_computation()->root_instruction(), op::Parameter(0)); } TEST_F(TupleSimplifierTest, TupleOfGteNotRemovedIfOrderIsNotPreserved) { constexpr absl::string_view kModuleStr = R"( HloModule TupleOfGteInstructions, entry_computation_layout={((f32[], f32[], f32[]))->(f32[], f32[], f32[])} ENTRY %TupleOfGteInstructions (param: (f32[], f32[], f32[])) -> (f32[], f32[], f32[]) { %param = (f32[], f32[], f32[]) parameter(0) %get-tuple-element = f32[] get-tuple-element((f32[], f32[], f32[]) %param), index=0 %get-tuple-element.1 = f32[] get-tuple-element((f32[], f32[], f32[]) %param), index=1 %get-tuple-element.2 = f32[] get-tuple-element((f32[], f32[], f32[]) %param), index=2 ROOT %tuple = (f32[], f32[], f32[]) tuple(f32[] %get-tuple-element, f32[] %get-tuple-element.2, f32[] %get-tuple-element.1) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModuleStr)); Run(module.get(), false); } TEST_F(TupleSimplifierTest, IncompatibleTuples) { constexpr absl::string_view kModuleStr = R"( HloModule IncompatibleTuples, entry_computation_layout={((f32[], f32[], f32[]))->(f32[], f32[])} ENTRY %IncompatibleTuples (param: (f32[], f32[], f32[])) -> (f32[], f32[]) { %param = (f32[], f32[], f32[]) parameter(0) %get-tuple-element = f32[] get-tuple-element((f32[], f32[], f32[]) %param), index=0 %get-tuple-element.1 = f32[] get-tuple-element((f32[], f32[], f32[]) %param), index=1 ROOT %tuple = (f32[], f32[]) tuple(f32[] %get-tuple-element, f32[] %get-tuple-element.1) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModuleStr)); Run(module.get(), false); } TEST_F(TupleSimplifierTest, CanExcludeEntryComputation) { constexpr absl::string_view kModuleStr = R"( HloModule CanExcludeEntryComputation, entry_computation_layout={((f32[], f32[], f32[]))->(f32[], f32[])} %c1 (param: (f32[], f32[], f32[])) -> (f32[], f32[], f32[]) { %param = (f32[], f32[], f32[]) parameter(0) %get-tuple-element = f32[] get-tuple-element((f32[], f32[], f32[]) %param), index=0 %get-tuple-element.1 = f32[] get-tuple-element((f32[], f32[], f32[]) %param), index=1 %get-tuple-element.2 = f32[] get-tuple-element((f32[], f32[], f32[]) %param), index=2 ROOT %tuple = (f32[], f32[], f32[]) tuple(f32[] %get-tuple-element, f32[] %get-tuple-element.1, f32[] %get-tuple-element.2) } %c2 (param.1: (f32[], f32[], f32[])) -> (f32[], f32[], f32[]) { %param.1 = (f32[], f32[], f32[]) parameter(0) %get-tuple-element.3 = f32[] get-tuple-element((f32[], f32[], f32[]) %param.1), index=0 %get-tuple-element.4 = f32[] get-tuple-element((f32[], f32[], f32[]) %param.1), index=1 %get-tuple-element.5 = f32[] get-tuple-element((f32[], f32[], f32[]) %param.1), index=2 ROOT %tuple.1 = (f32[], f32[], f32[]) tuple(f32[] %get-tuple-element.3, f32[] %get-tuple-element.4, f32[] %get-tuple-element.5) } ENTRY %e (param.2: (f32[], f32[], f32[])) -> (f32[], f32[]) { %param.2 = (f32[], f32[], f32[]) parameter(0) %call = (f32[], f32[], f32[]) call((f32[], f32[], f32[]) %param.2), to_apply=%c1 %get-tuple-element.6 = f32[] get-tuple-element((f32[], f32[], f32[]) %call), index=0 %call.1 = (f32[], f32[], f32[]) call((f32[], f32[], f32[]) %param.2), to_apply=%c2 %get-tuple-element.7 = f32[] get-tuple-element((f32[], f32[], f32[]) %call.1), index=1 %tuple.2 = (f32[], f32[]) tuple(f32[] %get-tuple-element.6, f32[] %get-tuple-element.7) %get-tuple-element.8 = f32[] get-tuple-element((f32[], f32[]) %tuple.2), index=0 %get-tuple-element.9 = f32[] get-tuple-element((f32[], f32[]) %tuple.2), index=1 ROOT %tuple.3 = (f32[], f32[]) tuple(f32[] %get-tuple-element.8, f32[] %get-tuple-element.9) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModuleStr)); Run(module.get(), true, true); EXPECT_THAT(FindComputation(module.get(), "c1")->root_instruction(), op::Parameter(0)); EXPECT_THAT(FindComputation(module.get(), "c2")->root_instruction(), op::Parameter(0)); EXPECT_EQ(module->entry_computation()->instruction_count(), 9); } TEST_F(TupleSimplifierTest, ShardingInfoIsNotBeLost) { constexpr absl::string_view kModuleStr = R"( HloModule m ENTRY test { p0 = s32[10] parameter(0), sharding={devices=[2]0,1} t = (s32[10]) tuple(p0) ROOT %gte = s32[10] get-tuple-element(t), index=0, sharding={replicated} } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModuleStr)); Run(module.get(), false); } TEST_F(TupleSimplifierTest, NestedTuple) { constexpr absl::string_view kModuleStr = R"( HloModule m ENTRY test { p0 = s32[10] parameter(0), sharding={devices=[2]0,1} p1 = s32[10] parameter(1), sharding={devices=[2]0,1} p2 = s32[10] parameter(2), sharding={devices=[2]0,1} p3 = s32[10] parameter(3), sharding={devices=[2]0,1} t = (s32[10], s32[10]) tuple(p0, p1), sharding={{devices=[2]0,1}, {devices=[2]0,1}} t2 = ((s32[10], s32[10]), s32[10]) tuple(t, p2), sharding={{devices=[2]0,1}, {devices=[2]0,1}, {devices=[2]0,1}} t3 = (((s32[10], s32[10]), s32[10]), s32[10]) tuple(t2, p3), sharding={{devices=[2]0,1}, {devices=[2]0,1}, {devices=[2]0,1}, {devices=[2]0,1}} gte0 = ((s32[10], s32[10]), s32[10]) get-tuple-element(t3), index=0, sharding={{replicated}, {replicated}, {replicated}} gte1 = (s32[10], s32[10]) get-tuple-element(gte0), index=0, sharding={{replicated}, {replicated}} gte2 = s32[10] get-tuple-element(gte1), index=1, sharding={devices=[2]0,1} gte3 = s32[10] get-tuple-element(gte1), index=0, sharding={replicated} ROOT to = (s32[10], s32[10]) tuple(gte2, gte3) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModuleStr)); Run(module.get(), true); auto* p1 = FindInstruction(module.get(), "p1"); auto* gte3 = FindInstruction(module.get(), "gte3"); EXPECT_EQ(module->entry_computation()->root_instruction()->operand(0), p1); EXPECT_EQ(module->entry_computation()->root_instruction()->operand(1), gte3); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/tuple_simplifier.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/tuple_simplifier_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
e0837984-608a-4366-a9d6-6e4cd4a9f979
cpp
abseil/abseil-cpp
exponential_distribution
absl/random/exponential_distribution.h
absl/random/exponential_distribution_test.cc
#ifndef ABSL_RANDOM_EXPONENTIAL_DISTRIBUTION_H_ #define ABSL_RANDOM_EXPONENTIAL_DISTRIBUTION_H_ #include <cassert> #include <cmath> #include <istream> #include <limits> #include <type_traits> #include "absl/meta/type_traits.h" #include "absl/random/internal/fast_uniform_bits.h" #include "absl/random/internal/generate_real.h" #include "absl/random/internal/iostream_state_saver.h" namespace absl { ABSL_NAMESPACE_BEGIN template <typename RealType = double> class exponential_distribution { public: using result_type = RealType; class param_type { public: using distribution_type = exponential_distribution; explicit param_type(result_type lambda = 1) : lambda_(lambda) { assert(lambda > 0); neg_inv_lambda_ = -result_type(1) / lambda_; } result_type lambda() const { return lambda_; } friend bool operator==(const param_type& a, const param_type& b) { return a.lambda_ == b.lambda_; } friend bool operator!=(const param_type& a, const param_type& b) { return !(a == b); } private: friend class exponential_distribution; result_type lambda_; result_type neg_inv_lambda_; static_assert( std::is_floating_point<RealType>::value, "Class-template absl::exponential_distribution<> must be parameterized " "using a floating-point type."); }; exponential_distribution() : exponential_distribution(1) {} explicit exponential_distribution(result_type lambda) : param_(lambda) {} explicit exponential_distribution(const param_type& p) : param_(p) {} void reset() {} template <typename URBG> result_type operator()(URBG& g) { return (*this)(g, param_); } template <typename URBG> result_type operator()(URBG& g, const param_type& p); param_type param() const { return param_; } void param(const param_type& p) { param_ = p; } result_type(min)() const { return 0; } result_type(max)() const { return std::numeric_limits<result_type>::infinity(); } result_type lambda() const { return param_.lambda(); } friend bool operator==(const exponential_distribution& a, const exponential_distribution& b) { return a.param_ == b.param_; } friend bool operator!=(const exponential_distribution& a, const exponential_distribution& b) { return a.param_ != b.param_; } private: param_type param_; random_internal::FastUniformBits<uint64_t> fast_u64_; }; template <typename RealType> template <typename URBG> typename exponential_distribution<RealType>::result_type exponential_distribution<RealType>::operator()( URBG& g, const param_type& p) { using random_internal::GenerateNegativeTag; using random_internal::GenerateRealFromBits; using real_type = absl::conditional_t<std::is_same<RealType, float>::value, float, double>; const result_type u = GenerateRealFromBits<real_type, GenerateNegativeTag, false>(fast_u64_(g)); return p.neg_inv_lambda_ * std::log1p(u); } template <typename CharT, typename Traits, typename RealType> std::basic_ostream<CharT, Traits>& operator<<( std::basic_ostream<CharT, Traits>& os, const exponential_distribution<RealType>& x) { auto saver = random_internal::make_ostream_state_saver(os); os.precision(random_internal::stream_precision_helper<RealType>::kPrecision); os << x.lambda(); return os; } template <typename CharT, typename Traits, typename RealType> std::basic_istream<CharT, Traits>& operator>>( std::basic_istream<CharT, Traits>& is, exponential_distribution<RealType>& x) { using result_type = typename exponential_distribution<RealType>::result_type; using param_type = typename exponential_distribution<RealType>::param_type; result_type lambda; auto saver = random_internal::make_istream_state_saver(is); lambda = random_internal::read_floating_point<result_type>(is); if (!is.fail()) { x.param(param_type(lambda)); } return is; } ABSL_NAMESPACE_END } #endif
#include "absl/random/exponential_distribution.h" #include <algorithm> #include <cfloat> #include <cmath> #include <cstddef> #include <cstdint> #include <iterator> #include <limits> #include <random> #include <sstream> #include <string> #include <type_traits> #include <vector> #include "gmock/gmock.h" #include "gtest/gtest.h" #include "absl/base/macros.h" #include "absl/log/log.h" #include "absl/numeric/internal/representation.h" #include "absl/random/internal/chi_square.h" #include "absl/random/internal/distribution_test_util.h" #include "absl/random/internal/pcg_engine.h" #include "absl/random/internal/sequence_urbg.h" #include "absl/random/random.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_format.h" #include "absl/strings/str_replace.h" #include "absl/strings/strip.h" namespace { using absl::random_internal::kChiSquared; template <typename RealType> class ExponentialDistributionTypedTest : public ::testing::Test {}; using RealTypes = std::conditional<absl::numeric_internal::IsDoubleDouble(), ::testing::Types<float, double>, ::testing::Types<float, double, long double>>::type; TYPED_TEST_SUITE(ExponentialDistributionTypedTest, RealTypes); TYPED_TEST(ExponentialDistributionTypedTest, SerializeTest) { using param_type = typename absl::exponential_distribution<TypeParam>::param_type; const TypeParam kParams[] = { 1, std::nextafter(TypeParam(1), TypeParam(0)), std::nextafter(TypeParam(1), TypeParam(2)), TypeParam(1e-8), TypeParam(1e-4), TypeParam(1), TypeParam(2), TypeParam(1e4), TypeParam(1e8), TypeParam(1e20), TypeParam(2.5), std::numeric_limits<TypeParam>::max(), std::numeric_limits<TypeParam>::epsilon(), std::nextafter(std::numeric_limits<TypeParam>::min(), TypeParam(1)), std::numeric_limits<TypeParam>::min(), std::numeric_limits<TypeParam>::denorm_min(), std::numeric_limits<TypeParam>::min() / 2, std::nextafter(std::numeric_limits<TypeParam>::min(), TypeParam(0)), }; constexpr int kCount = 1000; absl::InsecureBitGen gen; for (const TypeParam lambda : kParams) { if (!std::isfinite(lambda)) continue; ABSL_ASSERT(lambda > 0); const param_type param(lambda); absl::exponential_distribution<TypeParam> before(lambda); EXPECT_EQ(before.lambda(), param.lambda()); { absl::exponential_distribution<TypeParam> via_param(param); EXPECT_EQ(via_param, before); EXPECT_EQ(via_param.param(), before.param()); } auto sample_min = before.max(); auto sample_max = before.min(); for (int i = 0; i < kCount; i++) { auto sample = before(gen); EXPECT_GE(sample, before.min()) << before; EXPECT_LE(sample, before.max()) << before; if (sample > sample_max) sample_max = sample; if (sample < sample_min) sample_min = sample; } if (!std::is_same<TypeParam, long double>::value) { LOG(INFO) << "Range {" << lambda << "}: " << sample_min << ", " << sample_max << ", lambda=" << lambda; } std::stringstream ss; ss << before; if (!std::isfinite(lambda)) { continue; } absl::exponential_distribution<TypeParam> after(34.56f); EXPECT_NE(before.lambda(), after.lambda()); EXPECT_NE(before.param(), after.param()); EXPECT_NE(before, after); ss >> after; EXPECT_EQ(before.lambda(), after.lambda()) << ss.str() << " " << (ss.good() ? "good " : "") << (ss.bad() ? "bad " : "") << (ss.eof() ? "eof " : "") << (ss.fail() ? "fail " : ""); } } class ExponentialModel { public: explicit ExponentialModel(double lambda) : lambda_(lambda), beta_(1.0 / lambda) {} double lambda() const { return lambda_; } double mean() const { return beta_; } double variance() const { return beta_ * beta_; } double stddev() const { return std::sqrt(variance()); } double skew() const { return 2; } double kurtosis() const { return 6.0; } double CDF(double x) { return 1.0 - std::exp(-lambda_ * x); } double InverseCDF(double p) { ABSL_ASSERT(p >= 0.0); ABSL_ASSERT(p < 1.0); return -beta_ * std::log(1.0 - p); } private: const double lambda_; const double beta_; }; struct Param { double lambda; double p_fail; int trials; }; class ExponentialDistributionTests : public testing::TestWithParam<Param>, public ExponentialModel { public: ExponentialDistributionTests() : ExponentialModel(GetParam().lambda) {} template <typename D> bool SingleZTest(const double p, const size_t samples); template <typename D> double SingleChiSquaredTest(); absl::random_internal::pcg64_2018_engine rng_{0x2B7E151628AED2A6}; }; template <typename D> bool ExponentialDistributionTests::SingleZTest(const double p, const size_t samples) { D dis(lambda()); std::vector<double> data; data.reserve(samples); for (size_t i = 0; i < samples; i++) { const double x = dis(rng_); data.push_back(x); } const auto m = absl::random_internal::ComputeDistributionMoments(data); const double max_err = absl::random_internal::MaxErrorTolerance(p); const double z = absl::random_internal::ZScore(mean(), m); const bool pass = absl::random_internal::Near("z", z, 0.0, max_err); if (!pass) { LOG(INFO) << "p=" << p << " max_err=" << max_err << "\n" " lambda=" << lambda() << "\n" " mean=" << m.mean << " vs. " << mean() << "\n" " stddev=" << std::sqrt(m.variance) << " vs. " << stddev() << "\n" " skewness=" << m.skewness << " vs. " << skew() << "\n" " kurtosis=" << m.kurtosis << " vs. " << kurtosis() << "\n" " z=" << z << " vs. 0"; } return pass; } template <typename D> double ExponentialDistributionTests::SingleChiSquaredTest() { const size_t kSamples = 10000; const int kBuckets = 50; std::vector<double> cutoffs; const double kInc = 1.0 / static_cast<double>(kBuckets); for (double p = kInc; p < 1.0; p += kInc) { cutoffs.push_back(InverseCDF(p)); } if (cutoffs.back() != std::numeric_limits<double>::infinity()) { cutoffs.push_back(std::numeric_limits<double>::infinity()); } D dis(lambda()); std::vector<int32_t> counts(cutoffs.size(), 0); for (int j = 0; j < kSamples; j++) { const double x = dis(rng_); auto it = std::upper_bound(cutoffs.begin(), cutoffs.end(), x); counts[std::distance(cutoffs.begin(), it)]++; } const int dof = static_cast<int>(counts.size()) - 1; const double threshold = absl::random_internal::ChiSquareValue(dof, 0.98); const double expected = static_cast<double>(kSamples) / static_cast<double>(counts.size()); double chi_square = absl::random_internal::ChiSquareWithExpected( std::begin(counts), std::end(counts), expected); double p = absl::random_internal::ChiSquarePValue(chi_square, dof); if (chi_square > threshold) { for (size_t i = 0; i < cutoffs.size(); i++) { LOG(INFO) << i << " : (" << cutoffs[i] << ") = " << counts[i]; } LOG(INFO) << "lambda " << lambda() << "\n" " expected " << expected << "\n" << kChiSquared << " " << chi_square << " (" << p << ")\n" << kChiSquared << " @ 0.98 = " << threshold; } return p; } TEST_P(ExponentialDistributionTests, ZTest) { const size_t kSamples = 10000; const auto& param = GetParam(); const int expected_failures = std::max(1, static_cast<int>(std::ceil(param.trials * param.p_fail))); const double p = absl::random_internal::RequiredSuccessProbability( param.p_fail, param.trials); int failures = 0; for (int i = 0; i < param.trials; i++) { failures += SingleZTest<absl::exponential_distribution<double>>(p, kSamples) ? 0 : 1; } EXPECT_LE(failures, expected_failures); } TEST_P(ExponentialDistributionTests, ChiSquaredTest) { const int kTrials = 20; int failures = 0; for (int i = 0; i < kTrials; i++) { double p_value = SingleChiSquaredTest<absl::exponential_distribution<double>>(); if (p_value < 0.005) { failures++; } } EXPECT_LE(failures, 4); } std::vector<Param> GenParams() { return { Param{1.0, 0.02, 100}, Param{2.5, 0.02, 100}, Param{10, 0.02, 100}, Param{1e4, 0.02, 100}, Param{1e9, 0.02, 100}, Param{0.1, 0.02, 100}, Param{1e-3, 0.02, 100}, Param{1e-5, 0.02, 100}, }; } std::string ParamName(const ::testing::TestParamInfo<Param>& info) { const auto& p = info.param; std::string name = absl::StrCat("lambda_", absl::SixDigits(p.lambda)); return absl::StrReplaceAll(name, {{"+", "_"}, {"-", "_"}, {".", "_"}}); } INSTANTIATE_TEST_SUITE_P(All, ExponentialDistributionTests, ::testing::ValuesIn(GenParams()), ParamName); TEST(ExponentialDistributionTest, StabilityTest) { absl::random_internal::sequence_urbg urbg( {0x0003eb76f6f7f755ull, 0xFFCEA50FDB2F953Bull, 0xC332DDEFBE6C5AA5ull, 0x6558218568AB9702ull, 0x2AEF7DAD5B6E2F84ull, 0x1521B62829076170ull, 0xECDD4775619F1510ull, 0x13CCA830EB61BD96ull, 0x0334FE1EAA0363CFull, 0xB5735C904C70A239ull, 0xD59E9E0BCBAADE14ull, 0xEECC86BC60622CA7ull}); std::vector<int> output(14); { absl::exponential_distribution<double> dist; std::generate(std::begin(output), std::end(output), [&] { return static_cast<int>(10000.0 * dist(urbg)); }); EXPECT_EQ(14, urbg.invocations()); EXPECT_THAT(output, testing::ElementsAre(0, 71913, 14375, 5039, 1835, 861, 25936, 804, 126, 12337, 17984, 27002, 0, 71913)); } urbg.reset(); { absl::exponential_distribution<float> dist; std::generate(std::begin(output), std::end(output), [&] { return static_cast<int>(10000.0f * dist(urbg)); }); EXPECT_EQ(14, urbg.invocations()); EXPECT_THAT(output, testing::ElementsAre(0, 71913, 14375, 5039, 1835, 861, 25936, 804, 126, 12337, 17984, 27002, 0, 71913)); } } TEST(ExponentialDistributionTest, AlgorithmBounds) { #if (defined(__i386__) || defined(_M_IX86)) && FLT_EVAL_METHOD != 0 GTEST_SKIP() << "Skipping the test because we detected x87 floating-point semantics"; #endif absl::exponential_distribution<double> dist; { absl::random_internal::sequence_urbg urbg({0x0000000000000001ull}); double a = dist(urbg); EXPECT_EQ(a, 5.42101086242752217004e-20); } { absl::random_internal::sequence_urbg urbg({0x7fffffffffffffefull}); double a = dist(urbg); EXPECT_EQ(a, 0.693147180559945175204); } { absl::random_internal::sequence_urbg urbg({0xFFFFFFFFFFFFFFeFull}); double a = dist(urbg); EXPECT_EQ(a, 36.7368005696771007251); } { absl::random_internal::sequence_urbg urbg({0xFFFFFFFFFFFFFFFFull}); double a = dist(urbg); EXPECT_EQ(a, 36.7368005696771007251); } } }
https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/random/exponential_distribution.h
https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/random/exponential_distribution_test.cc
03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4
4ad59b14-c70d-4f5b-b197-26ce368fa44d
cpp
tensorflow/tensorflow
simple_delete
tensorflow/core/graph/regularization/simple_delete.cc
tensorflow/core/graph/regularization/simple_delete_test.cc
#include "tensorflow/core/graph/regularization/simple_delete.h" #include <cstdint> #include <string> #include "absl/status/statusor.h" #include "absl/strings/strip.h" #include "tensorflow/core/framework/attr_value.pb.h" #include "tensorflow/core/framework/function.pb.h" #include "tensorflow/core/framework/graph.pb.h" #include "tensorflow/core/framework/node_def.pb.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/framework/versions.pb.h" #include "tensorflow/core/graph/regularization/util.h" #include "tensorflow/core/grappler/op_types.h" namespace tensorflow::graph_regularization { namespace { void RegularizeNodes(GraphDef* graph_def) { for (NodeDef& node : *graph_def->mutable_node()) { if (grappler::IsPartitionedCall(node) || grappler::IsStatefulPartitionedCall(node)) { std::string function_name = node.attr().find("f")->second.func().name(); absl::StatusOr<int64_t> uid = GetSuffixUID(function_name); if (uid.ok()) { node.mutable_attr()->find("f")->second.mutable_func()->set_name( std::string( absl::StripSuffix(function_name, std::to_string(*uid)))); } auto node_config_proto = node.mutable_attr()->find("config_proto"); if (node_config_proto != node.attr().end()) { node_config_proto->second.mutable_s()->erase(); } } if (grappler::IsConstant(node)) { if (node.attr().at("dtype").type() == DT_STRING) { node.mutable_attr()->find("value")->second.clear_value(); } } } } } void SimpleDelete(GraphDef& graph_def) { RegularizeNodes(&graph_def); graph_def.mutable_library()->Clear(); graph_def.mutable_versions()->Clear(); } }
#include "tensorflow/core/graph/regularization/simple_delete.h" #include <string> #include "absl/status/statusor.h" #include "absl/strings/string_view.h" #include "tensorflow/core/graph/regularization/util.h" #include "tensorflow/core/platform/env.h" #include "tensorflow/core/platform/path.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/types.h" #include "tensorflow/core/protobuf/meta_graph.pb.h" #include "tensorflow/core/protobuf/saved_model.pb.h" #include "tsl/platform/statusor.h" namespace tensorflow::graph_regularization { namespace { absl::StatusOr<SavedModel> ReadSavedModel(absl::string_view file_dir) { std::string file_path = io::JoinPath(file_dir, "saved_model.pb"); std::string serialized_saved_model; auto status = ReadFileToString(Env::Default(), file_path, &serialized_saved_model); if (!status.ok()) { return status; } SavedModel saved_model_pb; saved_model_pb.ParseFromString(serialized_saved_model); return saved_model_pb; } TEST(SimpleDeleteTest, TestSimpleDeleteModelSavedTwice) { const std::string export_dir = io::JoinPath(testing::TensorFlowSrcRoot(), "core/graph/regularization/testdata", "bert1"); TF_ASSERT_OK_AND_ASSIGN(SavedModel saved_model_pb, ReadSavedModel(export_dir)); MetaGraphDef* metagraph = saved_model_pb.mutable_meta_graphs(0); GraphDef* graph_def = metagraph->mutable_graph_def(); SimpleDelete(*graph_def); uint64 hash1 = ComputeHash(*graph_def); const std::string export_dir2 = io::JoinPath(testing::TensorFlowSrcRoot(), "core/graph/regularization/testdata", "bert2"); TF_ASSERT_OK_AND_ASSIGN(SavedModel saved_model_pb2, ReadSavedModel(export_dir2)); const MetaGraphDef& metagraph2 = saved_model_pb2.meta_graphs(0); GraphDef graph_def2 = metagraph2.graph_def(); SimpleDelete(graph_def2); uint64 hash2 = ComputeHash(graph_def2); EXPECT_EQ(hash1, hash2); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/graph/regularization/simple_delete.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/graph/regularization/simple_delete_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
7a442277-60e5-4a8a-9580-69dd82e9d0f3
cpp
abseil/abseil-cpp
crc_cord_state
absl/crc/internal/crc_cord_state.cc
absl/crc/internal/crc_cord_state_test.cc
#include "absl/crc/internal/crc_cord_state.h" #include <cassert> #include "absl/base/config.h" #include "absl/base/no_destructor.h" #include "absl/numeric/bits.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace crc_internal { CrcCordState::RefcountedRep* CrcCordState::RefSharedEmptyRep() { static absl::NoDestructor<CrcCordState::RefcountedRep> empty; assert(empty->count.load(std::memory_order_relaxed) >= 1); assert(empty->rep.removed_prefix.length == 0); assert(empty->rep.prefix_crc.empty()); Ref(empty.get()); return empty.get(); } CrcCordState::CrcCordState() : refcounted_rep_(new RefcountedRep) {} CrcCordState::CrcCordState(const CrcCordState& other) : refcounted_rep_(other.refcounted_rep_) { Ref(refcounted_rep_); } CrcCordState::CrcCordState(CrcCordState&& other) : refcounted_rep_(other.refcounted_rep_) { other.refcounted_rep_ = RefSharedEmptyRep(); } CrcCordState& CrcCordState::operator=(const CrcCordState& other) { if (this != &other) { Unref(refcounted_rep_); refcounted_rep_ = other.refcounted_rep_; Ref(refcounted_rep_); } return *this; } CrcCordState& CrcCordState::operator=(CrcCordState&& other) { if (this != &other) { Unref(refcounted_rep_); refcounted_rep_ = other.refcounted_rep_; other.refcounted_rep_ = RefSharedEmptyRep(); } return *this; } CrcCordState::~CrcCordState() { Unref(refcounted_rep_); } crc32c_t CrcCordState::Checksum() const { if (rep().prefix_crc.empty()) { return absl::crc32c_t{0}; } if (IsNormalized()) { return rep().prefix_crc.back().crc; } return absl::RemoveCrc32cPrefix( rep().removed_prefix.crc, rep().prefix_crc.back().crc, rep().prefix_crc.back().length - rep().removed_prefix.length); } CrcCordState::PrefixCrc CrcCordState::NormalizedPrefixCrcAtNthChunk( size_t n) const { assert(n < NumChunks()); if (IsNormalized()) { return rep().prefix_crc[n]; } size_t length = rep().prefix_crc[n].length - rep().removed_prefix.length; return PrefixCrc(length, absl::RemoveCrc32cPrefix(rep().removed_prefix.crc, rep().prefix_crc[n].crc, length)); } void CrcCordState::Normalize() { if (IsNormalized() || rep().prefix_crc.empty()) { return; } Rep* r = mutable_rep(); for (auto& prefix_crc : r->prefix_crc) { size_t remaining = prefix_crc.length - r->removed_prefix.length; prefix_crc.crc = absl::RemoveCrc32cPrefix(r->removed_prefix.crc, prefix_crc.crc, remaining); prefix_crc.length = remaining; } r->removed_prefix = PrefixCrc(); } void CrcCordState::Poison() { Rep* rep = mutable_rep(); if (NumChunks() > 0) { for (auto& prefix_crc : rep->prefix_crc) { uint32_t crc = static_cast<uint32_t>(prefix_crc.crc); crc += 0x2e76e41b; crc = absl::rotr(crc, 17); prefix_crc.crc = crc32c_t{crc}; } } else { rep->prefix_crc.emplace_back(0, crc32c_t{1}); } } } ABSL_NAMESPACE_END }
#include "absl/crc/internal/crc_cord_state.h" #include <algorithm> #include <cstdint> #include <string> #include <utility> #include "gtest/gtest.h" #include "absl/crc/crc32c.h" namespace { TEST(CrcCordState, Default) { absl::crc_internal::CrcCordState state; EXPECT_TRUE(state.IsNormalized()); EXPECT_EQ(state.Checksum(), absl::crc32c_t{0}); state.Normalize(); EXPECT_EQ(state.Checksum(), absl::crc32c_t{0}); } TEST(CrcCordState, Normalize) { absl::crc_internal::CrcCordState state; auto* rep = state.mutable_rep(); rep->prefix_crc.push_back( absl::crc_internal::CrcCordState::PrefixCrc(1000, absl::crc32c_t{1000})); rep->prefix_crc.push_back( absl::crc_internal::CrcCordState::PrefixCrc(2000, absl::crc32c_t{2000})); rep->removed_prefix = absl::crc_internal::CrcCordState::PrefixCrc(500, absl::crc32c_t{500}); EXPECT_FALSE(state.IsNormalized()); absl::crc32c_t crc = state.Checksum(); state.Normalize(); EXPECT_TRUE(state.IsNormalized()); EXPECT_EQ(state.Checksum(), crc); EXPECT_EQ(rep->removed_prefix.length, 0); } TEST(CrcCordState, Copy) { absl::crc_internal::CrcCordState state; auto* rep = state.mutable_rep(); rep->prefix_crc.push_back( absl::crc_internal::CrcCordState::PrefixCrc(1000, absl::crc32c_t{1000})); absl::crc_internal::CrcCordState copy = state; EXPECT_EQ(state.Checksum(), absl::crc32c_t{1000}); EXPECT_EQ(copy.Checksum(), absl::crc32c_t{1000}); } TEST(CrcCordState, UnsharedSelfCopy) { absl::crc_internal::CrcCordState state; auto* rep = state.mutable_rep(); rep->prefix_crc.push_back( absl::crc_internal::CrcCordState::PrefixCrc(1000, absl::crc32c_t{1000})); const absl::crc_internal::CrcCordState& ref = state; state = ref; EXPECT_EQ(state.Checksum(), absl::crc32c_t{1000}); } TEST(CrcCordState, Move) { absl::crc_internal::CrcCordState state; auto* rep = state.mutable_rep(); rep->prefix_crc.push_back( absl::crc_internal::CrcCordState::PrefixCrc(1000, absl::crc32c_t{1000})); absl::crc_internal::CrcCordState moved = std::move(state); EXPECT_EQ(moved.Checksum(), absl::crc32c_t{1000}); } TEST(CrcCordState, UnsharedSelfMove) { absl::crc_internal::CrcCordState state; auto* rep = state.mutable_rep(); rep->prefix_crc.push_back( absl::crc_internal::CrcCordState::PrefixCrc(1000, absl::crc32c_t{1000})); absl::crc_internal::CrcCordState& ref = state; state = std::move(ref); EXPECT_EQ(state.Checksum(), absl::crc32c_t{1000}); } TEST(CrcCordState, PoisonDefault) { absl::crc_internal::CrcCordState state; state.Poison(); EXPECT_NE(state.Checksum(), absl::crc32c_t{0}); } TEST(CrcCordState, PoisonData) { absl::crc_internal::CrcCordState state; auto* rep = state.mutable_rep(); rep->prefix_crc.push_back( absl::crc_internal::CrcCordState::PrefixCrc(1000, absl::crc32c_t{1000})); rep->prefix_crc.push_back( absl::crc_internal::CrcCordState::PrefixCrc(2000, absl::crc32c_t{2000})); rep->removed_prefix = absl::crc_internal::CrcCordState::PrefixCrc(500, absl::crc32c_t{500}); absl::crc32c_t crc = state.Checksum(); state.Poison(); EXPECT_NE(state.Checksum(), crc); } }
https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/crc/internal/crc_cord_state.cc
https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/crc/internal/crc_cord_state_test.cc
03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4
f8b4bd87-b3e8-495b-94cb-8b130bd59927
cpp
google/quiche
quic_stream_sequencer
quiche/quic/core/quic_stream_sequencer.cc
quiche/quic/core/quic_stream_sequencer_test.cc
#include "quiche/quic/core/quic_stream_sequencer.h" #include <algorithm> #include <cstddef> #include <limits> #include <string> #include <utility> #include "absl/strings/str_cat.h" #include "absl/strings/string_view.h" #include "quiche/quic/core/quic_clock.h" #include "quiche/quic/core/quic_error_codes.h" #include "quiche/quic/core/quic_packets.h" #include "quiche/quic/core/quic_stream.h" #include "quiche/quic/core/quic_stream_sequencer_buffer.h" #include "quiche/quic/core/quic_types.h" #include "quiche/quic/core/quic_utils.h" #include "quiche/quic/platform/api/quic_bug_tracker.h" #include "quiche/quic/platform/api/quic_flag_utils.h" #include "quiche/quic/platform/api/quic_flags.h" #include "quiche/quic/platform/api/quic_logging.h" #include "quiche/quic/platform/api/quic_stack_trace.h" namespace quic { QuicStreamSequencer::QuicStreamSequencer(StreamInterface* quic_stream) : stream_(quic_stream), buffered_frames_(kStreamReceiveWindowLimit), highest_offset_(0), close_offset_(std::numeric_limits<QuicStreamOffset>::max()), reliable_offset_(0), blocked_(false), num_frames_received_(0), num_duplicate_frames_received_(0), ignore_read_data_(false), level_triggered_(false) {} QuicStreamSequencer::~QuicStreamSequencer() { if (stream_ == nullptr) { QUIC_BUG(quic_bug_10858_1) << "Double free'ing QuicStreamSequencer at " << this << ". " << QuicStackTrace(); } stream_ = nullptr; } void QuicStreamSequencer::OnStreamFrame(const QuicStreamFrame& frame) { QUICHE_DCHECK_LE(frame.offset + frame.data_length, close_offset_); ++num_frames_received_; const QuicStreamOffset byte_offset = frame.offset; const size_t data_len = frame.data_length; if (frame.fin && (!CloseStreamAtOffset(frame.offset + data_len) || data_len == 0)) { return; } if (stream_->version().HasIetfQuicFrames() && data_len == 0) { QUICHE_DCHECK(!frame.fin); return; } OnFrameData(byte_offset, data_len, frame.data_buffer); } void QuicStreamSequencer::OnCryptoFrame(const QuicCryptoFrame& frame) { ++num_frames_received_; if (frame.data_length == 0) { return; } OnFrameData(frame.offset, frame.data_length, frame.data_buffer); } void QuicStreamSequencer::OnReliableReset(QuicStreamOffset reliable_size) { reliable_offset_ = reliable_size; } void QuicStreamSequencer::OnFrameData(QuicStreamOffset byte_offset, size_t data_len, const char* data_buffer) { highest_offset_ = std::max(highest_offset_, byte_offset + data_len); const size_t previous_readable_bytes = buffered_frames_.ReadableBytes(); size_t bytes_written; std::string error_details; QuicErrorCode result = buffered_frames_.OnStreamData( byte_offset, absl::string_view(data_buffer, data_len), &bytes_written, &error_details); if (result != QUIC_NO_ERROR) { std::string details = absl::StrCat("Stream ", stream_->id(), ": ", QuicErrorCodeToString(result), ": ", error_details); QUIC_LOG_FIRST_N(WARNING, 50) << QuicErrorCodeToString(result); QUIC_LOG_FIRST_N(WARNING, 50) << details; stream_->OnUnrecoverableError(result, details); return; } if (bytes_written == 0) { ++num_duplicate_frames_received_; return; } if (blocked_) { return; } if (level_triggered_) { if (buffered_frames_.ReadableBytes() > previous_readable_bytes) { if (ignore_read_data_) { FlushBufferedFrames(); } else { stream_->OnDataAvailable(); } } return; } const bool stream_unblocked = previous_readable_bytes == 0 && buffered_frames_.ReadableBytes() > 0; if (stream_unblocked) { if (ignore_read_data_) { FlushBufferedFrames(); } else { stream_->OnDataAvailable(); } } } bool QuicStreamSequencer::CloseStreamAtOffset(QuicStreamOffset offset) { const QuicStreamOffset kMaxOffset = std::numeric_limits<QuicStreamOffset>::max(); if (close_offset_ != kMaxOffset && offset != close_offset_) { stream_->OnUnrecoverableError( QUIC_STREAM_SEQUENCER_INVALID_STATE, absl::StrCat( "Stream ", stream_->id(), " received new final offset: ", offset, ", which is different from close offset: ", close_offset_)); return false; } if (offset < highest_offset_) { stream_->OnUnrecoverableError( QUIC_STREAM_SEQUENCER_INVALID_STATE, absl::StrCat( "Stream ", stream_->id(), " received fin with offset: ", offset, ", which reduces current highest offset: ", highest_offset_)); return false; } if (offset < reliable_offset_) { stream_->OnUnrecoverableError( QUIC_STREAM_MULTIPLE_OFFSET, absl::StrCat( "Stream ", stream_->id(), " received fin with offset: ", offset, ", which reduces current reliable offset: ", reliable_offset_)); return false; } close_offset_ = offset; MaybeCloseStream(); return true; } void QuicStreamSequencer::MaybeCloseStream() { if (blocked_ || !IsClosed()) { return; } QUIC_DVLOG(1) << "Passing up termination, as we've processed " << buffered_frames_.BytesConsumed() << " of " << close_offset_ << " bytes."; if (ignore_read_data_) { stream_->OnFinRead(); } else { stream_->OnDataAvailable(); } buffered_frames_.Clear(); } int QuicStreamSequencer::GetReadableRegions(iovec* iov, size_t iov_len) const { QUICHE_DCHECK(!blocked_); return buffered_frames_.GetReadableRegions(iov, iov_len); } bool QuicStreamSequencer::GetReadableRegion(iovec* iov) const { QUICHE_DCHECK(!blocked_); return buffered_frames_.GetReadableRegion(iov); } bool QuicStreamSequencer::PeekRegion(QuicStreamOffset offset, iovec* iov) const { QUICHE_DCHECK(!blocked_); return buffered_frames_.PeekRegion(offset, iov); } void QuicStreamSequencer::Read(std::string* buffer) { QUICHE_DCHECK(!blocked_); buffer->resize(buffer->size() + ReadableBytes()); iovec iov; iov.iov_len = ReadableBytes(); iov.iov_base = &(*buffer)[buffer->size() - iov.iov_len]; Readv(&iov, 1); } size_t QuicStreamSequencer::Readv(const struct iovec* iov, size_t iov_len) { QUICHE_DCHECK(!blocked_); std::string error_details; size_t bytes_read; QuicErrorCode read_error = buffered_frames_.Readv(iov, iov_len, &bytes_read, &error_details); if (read_error != QUIC_NO_ERROR) { std::string details = absl::StrCat("Stream ", stream_->id(), ": ", error_details); stream_->OnUnrecoverableError(read_error, details); return bytes_read; } stream_->AddBytesConsumed(bytes_read); return bytes_read; } bool QuicStreamSequencer::HasBytesToRead() const { return buffered_frames_.HasBytesToRead(); } size_t QuicStreamSequencer::ReadableBytes() const { return buffered_frames_.ReadableBytes(); } bool QuicStreamSequencer::IsClosed() const { return buffered_frames_.BytesConsumed() >= close_offset_; } void QuicStreamSequencer::MarkConsumed(size_t num_bytes_consumed) { QUICHE_DCHECK(!blocked_); bool result = buffered_frames_.MarkConsumed(num_bytes_consumed); if (!result) { QUIC_BUG(quic_bug_10858_2) << "Invalid argument to MarkConsumed." << " expect to consume: " << num_bytes_consumed << ", but not enough bytes available. " << DebugString(); stream_->ResetWithError( QuicResetStreamError::FromInternal(QUIC_ERROR_PROCESSING_STREAM)); return; } stream_->AddBytesConsumed(num_bytes_consumed); } void QuicStreamSequencer::SetBlockedUntilFlush() { blocked_ = true; } void QuicStreamSequencer::SetUnblocked() { blocked_ = false; if (IsClosed() || HasBytesToRead()) { stream_->OnDataAvailable(); } } void QuicStreamSequencer::StopReading() { if (ignore_read_data_) { return; } ignore_read_data_ = true; FlushBufferedFrames(); } void QuicStreamSequencer::ReleaseBuffer() { buffered_frames_.ReleaseWholeBuffer(); } void QuicStreamSequencer::ReleaseBufferIfEmpty() { if (buffered_frames_.Empty()) { buffered_frames_.ReleaseWholeBuffer(); } } void QuicStreamSequencer::FlushBufferedFrames() { QUICHE_DCHECK(ignore_read_data_); size_t bytes_flushed = buffered_frames_.FlushBufferedFrames(); QUIC_DVLOG(1) << "Flushing buffered data at offset " << buffered_frames_.BytesConsumed() << " length " << bytes_flushed << " for stream " << stream_->id(); stream_->AddBytesConsumed(bytes_flushed); MaybeCloseStream(); } size_t QuicStreamSequencer::NumBytesBuffered() const { return buffered_frames_.BytesBuffered(); } QuicStreamOffset QuicStreamSequencer::NumBytesConsumed() const { return buffered_frames_.BytesConsumed(); } bool QuicStreamSequencer::IsAllDataAvailable() const { QUICHE_DCHECK_LE(NumBytesConsumed() + NumBytesBuffered(), close_offset_); return NumBytesConsumed() + NumBytesBuffered() >= close_offset_; } std::string QuicStreamSequencer::DebugString() const { return absl::StrCat( "QuicStreamSequencer: bytes buffered: ", NumBytesBuffered(), "\n bytes consumed: ", NumBytesConsumed(), "\n first missing byte: ", buffered_frames_.FirstMissingByte(), "\n next expected byte: ", buffered_frames_.NextExpectedByte(), "\n received frames: ", buffered_frames_.ReceivedFramesDebugString(), "\n has bytes to read: ", HasBytesToRead() ? "true" : "false", "\n frames received: ", num_frames_received(), "\n close offset bytes: ", close_offset_, "\n is closed: ", IsClosed() ? "true" : "false"); } }
#include "quiche/quic/core/quic_stream_sequencer.h" #include <algorithm> #include <cstdint> #include <memory> #include <string> #include <utility> #include <vector> #include "absl/base/macros.h" #include "absl/strings/string_view.h" #include "quiche/quic/core/quic_stream.h" #include "quiche/quic/core/quic_utils.h" #include "quiche/quic/platform/api/quic_expect_bug.h" #include "quiche/quic/platform/api/quic_flags.h" #include "quiche/quic/platform/api/quic_logging.h" #include "quiche/quic/platform/api/quic_test.h" #include "quiche/quic/test_tools/quic_stream_sequencer_peer.h" #include "quiche/quic/test_tools/quic_test_utils.h" using testing::_; using testing::AnyNumber; using testing::InSequence; namespace quic { namespace test { class MockStream : public QuicStreamSequencer::StreamInterface { public: MOCK_METHOD(void, OnFinRead, (), (override)); MOCK_METHOD(void, OnDataAvailable, (), (override)); MOCK_METHOD(void, OnUnrecoverableError, (QuicErrorCode error, const std::string& details), (override)); MOCK_METHOD(void, OnUnrecoverableError, (QuicErrorCode error, QuicIetfTransportErrorCodes ietf_error, const std::string& details), (override)); MOCK_METHOD(void, ResetWithError, (QuicResetStreamError error), (override)); MOCK_METHOD(void, AddBytesConsumed, (QuicByteCount bytes), (override)); QuicStreamId id() const override { return 1; } ParsedQuicVersion version() const override { return CurrentSupportedVersions()[0]; } }; namespace { static const char kPayload[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"; class QuicStreamSequencerTest : public QuicTest { public: void ConsumeData(size_t num_bytes) { char buffer[1024]; ASSERT_GT(ABSL_ARRAYSIZE(buffer), num_bytes); struct iovec iov; iov.iov_base = buffer; iov.iov_len = num_bytes; ASSERT_EQ(num_bytes, sequencer_->Readv(&iov, 1)); } protected: QuicStreamSequencerTest() : stream_(), sequencer_(new QuicStreamSequencer(&stream_)) {} bool VerifyReadableRegion(const std::vector<std::string>& expected) { return VerifyReadableRegion(*sequencer_, expected); } bool VerifyReadableRegions(const std::vector<std::string>& expected) { return VerifyReadableRegions(*sequencer_, expected); } bool VerifyIovecs(iovec* iovecs, size_t num_iovecs, const std::vector<std::string>& expected) { return VerifyIovecs(*sequencer_, iovecs, num_iovecs, expected); } bool VerifyReadableRegion(const QuicStreamSequencer& sequencer, const std::vector<std::string>& expected) { iovec iovecs[1]; if (sequencer.GetReadableRegions(iovecs, 1)) { return (VerifyIovecs(sequencer, iovecs, 1, std::vector<std::string>{expected[0]})); } return false; } bool VerifyReadableRegions(const QuicStreamSequencer& sequencer, const std::vector<std::string>& expected) { iovec iovecs[5]; size_t num_iovecs = sequencer.GetReadableRegions(iovecs, ABSL_ARRAYSIZE(iovecs)); return VerifyReadableRegion(sequencer, expected) && VerifyIovecs(sequencer, iovecs, num_iovecs, expected); } bool VerifyIovecs(const QuicStreamSequencer& , iovec* iovecs, size_t num_iovecs, const std::vector<std::string>& expected) { int start_position = 0; for (size_t i = 0; i < num_iovecs; ++i) { if (!VerifyIovec(iovecs[i], expected[0].substr(start_position, iovecs[i].iov_len))) { return false; } start_position += iovecs[i].iov_len; } return true; } bool VerifyIovec(const iovec& iovec, absl::string_view expected) { if (iovec.iov_len != expected.length()) { QUIC_LOG(ERROR) << "Invalid length: " << iovec.iov_len << " vs " << expected.length(); return false; } if (memcmp(iovec.iov_base, expected.data(), expected.length()) != 0) { QUIC_LOG(ERROR) << "Invalid data: " << static_cast<char*>(iovec.iov_base) << " vs " << expected; return false; } return true; } void OnFinFrame(QuicStreamOffset byte_offset, const char* data) { QuicStreamFrame frame; frame.stream_id = 1; frame.offset = byte_offset; frame.data_buffer = data; frame.data_length = strlen(data); frame.fin = true; sequencer_->OnStreamFrame(frame); } void OnFrame(QuicStreamOffset byte_offset, const char* data) { QuicStreamFrame frame; frame.stream_id = 1; frame.offset = byte_offset; frame.data_buffer = data; frame.data_length = strlen(data); frame.fin = false; sequencer_->OnStreamFrame(frame); } size_t NumBufferedBytes() { return QuicStreamSequencerPeer::GetNumBufferedBytes(sequencer_.get()); } testing::StrictMock<MockStream> stream_; std::unique_ptr<QuicStreamSequencer> sequencer_; }; TEST_F(QuicStreamSequencerTest, RejectOldFrame) { EXPECT_CALL(stream_, AddBytesConsumed(3)); EXPECT_CALL(stream_, OnDataAvailable()).WillOnce(testing::Invoke([this]() { ConsumeData(3); })); OnFrame(0, "abc"); EXPECT_EQ(0u, NumBufferedBytes()); EXPECT_EQ(3u, sequencer_->NumBytesConsumed()); OnFrame(0, "def"); EXPECT_EQ(0u, NumBufferedBytes()); } TEST_F(QuicStreamSequencerTest, RejectBufferedFrame) { EXPECT_CALL(stream_, OnDataAvailable()); OnFrame(0, "abc"); EXPECT_EQ(3u, NumBufferedBytes()); EXPECT_EQ(0u, sequencer_->NumBytesConsumed()); OnFrame(0, "def"); EXPECT_EQ(3u, NumBufferedBytes()); } TEST_F(QuicStreamSequencerTest, FullFrameConsumed) { EXPECT_CALL(stream_, AddBytesConsumed(3)); EXPECT_CALL(stream_, OnDataAvailable()).WillOnce(testing::Invoke([this]() { ConsumeData(3); })); OnFrame(0, "abc"); EXPECT_EQ(0u, NumBufferedBytes()); EXPECT_EQ(3u, sequencer_->NumBytesConsumed()); } TEST_F(QuicStreamSequencerTest, BlockedThenFullFrameConsumed) { sequencer_->SetBlockedUntilFlush(); OnFrame(0, "abc"); EXPECT_EQ(3u, NumBufferedBytes()); EXPECT_EQ(0u, sequencer_->NumBytesConsumed()); EXPECT_CALL(stream_, AddBytesConsumed(3)); EXPECT_CALL(stream_, OnDataAvailable()).WillOnce(testing::Invoke([this]() { ConsumeData(3); })); sequencer_->SetUnblocked(); EXPECT_EQ(0u, NumBufferedBytes()); EXPECT_EQ(3u, sequencer_->NumBytesConsumed()); EXPECT_CALL(stream_, AddBytesConsumed(3)); EXPECT_CALL(stream_, OnDataAvailable()).WillOnce(testing::Invoke([this]() { ConsumeData(3); })); EXPECT_FALSE(sequencer_->IsClosed()); EXPECT_FALSE(sequencer_->IsAllDataAvailable()); OnFinFrame(3, "def"); EXPECT_TRUE(sequencer_->IsClosed()); EXPECT_TRUE(sequencer_->IsAllDataAvailable()); } TEST_F(QuicStreamSequencerTest, BlockedThenFullFrameAndFinConsumed) { sequencer_->SetBlockedUntilFlush(); OnFinFrame(0, "abc"); EXPECT_EQ(3u, NumBufferedBytes()); EXPECT_EQ(0u, sequencer_->NumBytesConsumed()); EXPECT_CALL(stream_, AddBytesConsumed(3)); EXPECT_CALL(stream_, OnDataAvailable()).WillOnce(testing::Invoke([this]() { ConsumeData(3); })); EXPECT_FALSE(sequencer_->IsClosed()); EXPECT_TRUE(sequencer_->IsAllDataAvailable()); sequencer_->SetUnblocked(); EXPECT_TRUE(sequencer_->IsClosed()); EXPECT_EQ(0u, NumBufferedBytes()); EXPECT_EQ(3u, sequencer_->NumBytesConsumed()); } TEST_F(QuicStreamSequencerTest, EmptyFrame) { if (!stream_.version().HasIetfQuicFrames()) { EXPECT_CALL(stream_, OnUnrecoverableError(QUIC_EMPTY_STREAM_FRAME_NO_FIN, _)); } OnFrame(0, ""); EXPECT_EQ(0u, NumBufferedBytes()); EXPECT_EQ(0u, sequencer_->NumBytesConsumed()); } TEST_F(QuicStreamSequencerTest, EmptyFinFrame) { EXPECT_CALL(stream_, OnDataAvailable()); OnFinFrame(0, ""); EXPECT_EQ(0u, NumBufferedBytes()); EXPECT_EQ(0u, sequencer_->NumBytesConsumed()); EXPECT_TRUE(sequencer_->IsAllDataAvailable()); } TEST_F(QuicStreamSequencerTest, PartialFrameConsumed) { EXPECT_CALL(stream_, AddBytesConsumed(2)); EXPECT_CALL(stream_, OnDataAvailable()).WillOnce(testing::Invoke([this]() { ConsumeData(2); })); OnFrame(0, "abc"); EXPECT_EQ(1u, NumBufferedBytes()); EXPECT_EQ(2u, sequencer_->NumBytesConsumed()); } TEST_F(QuicStreamSequencerTest, NextxFrameNotConsumed) { EXPECT_CALL(stream_, OnDataAvailable()); OnFrame(0, "abc"); EXPECT_EQ(3u, NumBufferedBytes()); EXPECT_EQ(0u, sequencer_->NumBytesConsumed()); } TEST_F(QuicStreamSequencerTest, FutureFrameNotProcessed) { OnFrame(3, "abc"); EXPECT_EQ(3u, NumBufferedBytes()); EXPECT_EQ(0u, sequencer_->NumBytesConsumed()); } TEST_F(QuicStreamSequencerTest, OutOfOrderFrameProcessed) { OnFrame(6, "ghi"); EXPECT_EQ(3u, NumBufferedBytes()); EXPECT_EQ(0u, sequencer_->NumBytesConsumed()); EXPECT_EQ(3u, sequencer_->NumBytesBuffered()); OnFrame(3, "def"); EXPECT_EQ(6u, NumBufferedBytes()); EXPECT_EQ(0u, sequencer_->NumBytesConsumed()); EXPECT_EQ(6u, sequencer_->NumBytesBuffered()); EXPECT_CALL(stream_, AddBytesConsumed(9)); EXPECT_CALL(stream_, OnDataAvailable()).WillOnce(testing::Invoke([this]() { ConsumeData(9); })); OnFrame(0, "abc"); EXPECT_EQ(9u, sequencer_->NumBytesConsumed()); EXPECT_EQ(0u, sequencer_->NumBytesBuffered()); EXPECT_EQ(0u, NumBufferedBytes()); } TEST_F(QuicStreamSequencerTest, BasicHalfCloseOrdered) { InSequence s; EXPECT_CALL(stream_, OnDataAvailable()).WillOnce(testing::Invoke([this]() { ConsumeData(3); })); EXPECT_CALL(stream_, AddBytesConsumed(3)); OnFinFrame(0, "abc"); EXPECT_EQ(3u, QuicStreamSequencerPeer::GetCloseOffset(sequencer_.get())); } TEST_F(QuicStreamSequencerTest, BasicHalfCloseUnorderedWithFlush) { OnFinFrame(6, ""); EXPECT_EQ(6u, QuicStreamSequencerPeer::GetCloseOffset(sequencer_.get())); OnFrame(3, "def"); EXPECT_CALL(stream_, AddBytesConsumed(6)); EXPECT_CALL(stream_, OnDataAvailable()).WillOnce(testing::Invoke([this]() { ConsumeData(6); })); EXPECT_FALSE(sequencer_->IsClosed()); OnFrame(0, "abc"); EXPECT_TRUE(sequencer_->IsClosed()); } TEST_F(QuicStreamSequencerTest, BasicHalfUnordered) { OnFinFrame(3, ""); EXPECT_EQ(3u, QuicStreamSequencerPeer::GetCloseOffset(sequencer_.get())); EXPECT_CALL(stream_, AddBytesConsumed(3)); EXPECT_CALL(stream_, OnDataAvailable()).WillOnce(testing::Invoke([this]() { ConsumeData(3); })); EXPECT_FALSE(sequencer_->IsClosed()); OnFrame(0, "abc"); EXPECT_TRUE(sequencer_->IsClosed()); } TEST_F(QuicStreamSequencerTest, TerminateWithReadv) { char buffer[3]; OnFinFrame(3, ""); EXPECT_EQ(3u, QuicStreamSequencerPeer::GetCloseOffset(sequencer_.get())); EXPECT_FALSE(sequencer_->IsClosed()); EXPECT_CALL(stream_, OnDataAvailable()); OnFrame(0, "abc"); EXPECT_CALL(stream_, AddBytesConsumed(3)); iovec iov = {&buffer[0], 3}; int bytes_read = sequencer_->Readv(&iov, 1); EXPECT_EQ(3, bytes_read); EXPECT_TRUE(sequencer_->IsClosed()); } TEST_F(QuicStreamSequencerTest, MultipleOffsets) { OnFinFrame(3, ""); EXPECT_EQ(3u, QuicStreamSequencerPeer::GetCloseOffset(sequencer_.get())); EXPECT_CALL(stream_, OnUnrecoverableError( QUIC_STREAM_SEQUENCER_INVALID_STATE, "Stream 1 received new final offset: 1, which is " "different from close offset: 3")); OnFinFrame(1, ""); } class QuicSequencerRandomTest : public QuicStreamSequencerTest { public: using Frame = std::pair<int, std::string>; using FrameList = std::vector<Frame>; void CreateFrames() { int payload_size = ABSL_ARRAYSIZE(kPayload) - 1; int remaining_payload = payload_size; while (remaining_payload != 0) { int size = std::min(OneToN(6), remaining_payload); int index = payload_size - remaining_payload; list_.push_back( std::make_pair(index, std::string(kPayload + index, size))); remaining_payload -= size; } } QuicSequencerRandomTest() { uint64_t seed = QuicRandom::GetInstance()->RandUint64(); QUIC_LOG(INFO) << "**** The current seed is " << seed << " ****"; random_.set_seed(seed); CreateFrames(); } int OneToN(int n) { return random_.RandUint64() % n + 1; } void ReadAvailableData() { char output[ABSL_ARRAYSIZE(kPayload) + 1]; iovec iov; iov.iov_base = output; iov.iov_len = ABSL_ARRAYSIZE(output); int bytes_read = sequencer_->Readv(&iov, 1); EXPECT_NE(0, bytes_read); output_.append(output, bytes_read); } std::string output_; std::string peeked_; SimpleRandom random_; FrameList list_; }; TEST_F(QuicSequencerRandomTest, RandomFramesNoDroppingNoBackup) { EXPECT_CALL(stream_, OnDataAvailable()) .Times(AnyNumber()) .WillRepeatedly( Invoke(this, &QuicSequencerRandomTest::ReadAvailableData)); QuicByteCount total_bytes_consumed = 0; EXPECT_CALL(stream_, AddBytesConsumed(_)) .Times(AnyNumber()) .WillRepeatedly( testing::Invoke([&total_bytes_consumed](QuicByteCount bytes) { total_bytes_consumed += bytes; })); while (!list_.empty()) { int index = OneToN(list_.size()) - 1; QUIC_LOG(ERROR) << "Sending index " << index << " " << list_[index].second; OnFrame(list_[index].first, list_[index].second.data()); list_.erase(list_.begin() + index); } ASSERT_EQ(ABSL_ARRAYSIZE(kPayload) - 1, output_.size()); EXPECT_EQ(kPayload, output_); EXPECT_EQ(ABSL_ARRAYSIZE(kPayload) - 1, total_bytes_consumed); } TEST_F(QuicSequencerRandomTest, RandomFramesNoDroppingBackup) { char buffer[10]; iovec iov[2]; iov[0].iov_base = &buffer[0]; iov[0].iov_len = 5; iov[1].iov_base = &buffer[5]; iov[1].iov_len = 5; EXPECT_CALL(stream_, OnDataAvailable()).Times(AnyNumber()); QuicByteCount total_bytes_consumed = 0; EXPECT_CALL(stream_, AddBytesConsumed(_)) .Times(AnyNumber()) .WillRepeatedly( testing::Invoke([&total_bytes_consumed](QuicByteCount bytes) { total_bytes_consumed += bytes; })); while (output_.size() != ABSL_ARRAYSIZE(kPayload) - 1) { if (!list_.empty() && OneToN(2) == 1) { int index = OneToN(list_.size()) - 1; OnFrame(list_[index].first, list_[index].second.data()); list_.erase(list_.begin() + index); } else { bool has_bytes = sequencer_->HasBytesToRead(); iovec peek_iov[20]; int iovs_peeked = sequencer_->GetReadableRegions(peek_iov, 20); if (has_bytes) { ASSERT_LT(0, iovs_peeked); ASSERT_TRUE(sequencer_->GetReadableRegion(peek_iov)); } else { ASSERT_EQ(0, iovs_peeked); ASSERT_FALSE(sequencer_->GetReadableRegion(peek_iov)); } int total_bytes_to_peek = ABSL_ARRAYSIZE(buffer); for (int i = 0; i < iovs_peeked; ++i) { int bytes_to_peek = std::min<int>(peek_iov[i].iov_len, total_bytes_to_peek); peeked_.append(static_cast<char*>(peek_iov[i].iov_base), bytes_to_peek); total_bytes_to_peek -= bytes_to_peek; if (total_bytes_to_peek == 0) { break; } } int bytes_read = sequencer_->Readv(iov, 2); output_.append(buffer, bytes_read); ASSERT_EQ(output_.size(), peeked_.size()); } } EXPECT_EQ(std::string(kPayload), output_); EXPECT_EQ(std::string(kPayload), peeked_); EXPECT_EQ(ABSL_ARRAYSIZE(kPayload) - 1, total_bytes_consumed); } TEST_F(QuicStreamSequencerTest, MarkConsumed) { InSequence s; EXPECT_CALL(stream_, OnDataAvailable()); OnFrame(0, "abc"); OnFrame(3, "def"); OnFrame(6, "ghi"); EXPECT_EQ(9u, sequencer_->NumBytesBuffered()); std::vector<std::string> expected = {"abcdefghi"}; ASSERT_TRUE(VerifyReadableRegions(expected)); EXPECT_CALL(stream_, AddBytesConsumed(1)); sequencer_->MarkConsumed(1); std::vector<std::string> expected2 = {"bcdefghi"}; ASSERT_TRUE(VerifyReadableRegions(expected2)); EXPECT_EQ(8u, sequencer_->NumBytesBuffered()); EXPECT_CALL(stream_, AddBytesConsumed(2)); sequencer_->MarkConsumed(2); std::vector<std::string> expected3 = {"defghi"}; ASSERT_TRUE(VerifyReadableRegions(expected3)); EXPECT_EQ(6u, sequencer_->NumBytesBuffered()); EXPECT_CALL(stream_, AddBytesConsumed(5)); sequencer_->MarkConsumed(5); std::vector<std::string> expected4{"i"}; ASSERT_TRUE(VerifyReadableRegions(expected4)); EXPECT_EQ(1u, sequencer_->NumBytesBuffered()); } TEST_F(QuicStreamSequencerTest, MarkConsumedError) { EXPECT_CALL(stream_, OnDataAvailable()); OnFrame(0, "abc"); OnFrame(9, "jklmnopqrstuvwxyz"); std::vector<std::string> expected{"abc"}; ASSERT_TRUE(VerifyReadableRegions(expected)); EXPECT_QUIC_BUG( { EXPECT_CALL(stream_, ResetWithError(QuicResetStreamError::FromInternal( QUIC_ERROR_PROCESSING_STREAM))); sequencer_->MarkConsumed(4); }, "Invalid argument to MarkConsumed." " expect to consume: 4, but not enough bytes available."); } TEST_F(QuicStreamSequencerTest, MarkConsumedWithMissingPacket) { InSequence s; EXPECT_CALL(stream_, OnDataAvailable()); OnFrame(0, "abc"); OnFrame(3, "def"); OnFrame(9, "jkl"); std::vector<std::string> expected = {"abcdef"}; ASSERT_TRUE(VerifyReadableRegions(expected)); EXPECT_CALL(stream_, AddBytesConsumed(6)); sequencer_->MarkConsumed(6); } TEST_F(QuicStreamSequencerTest, Move) { InSequence s; EXPECT_CALL(stream_, OnDataAvailable()); OnFrame(0, "abc"); OnFrame(3, "def"); OnFrame(6, "ghi"); EXPECT_EQ(9u, sequencer_->NumBytesBuffered()); std::vector<std::string> expected = {"abcdefghi"}; ASSERT_TRUE(VerifyReadableRegions(expected)); QuicStreamSequencer sequencer2(std::move(*sequencer_)); ASSERT_TRUE(VerifyReadableRegions(sequencer2, expected)); } TEST_F(QuicStreamSequencerTest, OverlappingFramesReceived) { QuicStreamId id = 1; QuicStreamFrame frame1(id, false, 1, absl::string_view("hello")); sequencer_->OnStreamFrame(frame1); QuicStreamFrame frame2(id, false, 2, absl::string_view("hello")); EXPECT_CALL(stream_, OnUnrecoverableError(QUIC_OVERLAPPING_STREAM_DATA, _)) .Times(0); sequencer_->OnStreamFrame(frame2); } TEST_F(QuicStreamSequencerTest, DataAvailableOnOverlappingFrames) { QuicStreamId id = 1; const std::string data(1000, '.'); QuicStreamFrame frame1(id, false, 0, data); EXPECT_CALL(stream_, OnDataAvailable()); sequencer_->OnStreamFrame(frame1); EXPECT_CALL(stream_, AddBytesConsumed(500)); QuicStreamSequencerTest::ConsumeData(500); EXPECT_EQ(500u, sequencer_->NumBytesConsumed()); EXPECT_EQ(500u, sequencer_->NumBytesBuffered()); QuicStreamFrame frame2(id, false, 500, data); EXPECT_CALL(stream_, OnDataAvailable()).Times(0); sequencer_->OnStreamFrame(frame2); EXPECT_CALL(stream_, AddBytesConsumed(1000)); QuicStreamSequencerTest::ConsumeData(1000); EXPECT_EQ(1500u, sequencer_->NumBytesConsumed()); EXPECT_EQ(0u, sequencer_->NumBytesBuffered()); QuicStreamFrame frame3(id, false, 1498, absl::string_view("hello")); EXPECT_CALL(stream_, OnDataAvailable()); sequencer_->OnStreamFrame(frame3); EXPECT_CALL(stream_, AddBytesConsumed(3)); QuicStreamSequencerTest::ConsumeData(3); EXPECT_EQ(1503u, sequencer_->NumBytesConsumed()); EXPECT_EQ(0u, sequencer_->NumBytesBuffered()); QuicStreamFrame frame4(id, false, 1000, absl::string_view("hello")); EXPECT_CALL(stream_, OnDataAvailable()).Times(0); sequencer_->OnStreamFrame(frame4); EXPECT_EQ(1503u, sequencer_->NumBytesConsumed()); EXPECT_EQ(0u, sequencer_->NumBytesBuffered()); } TEST_F(QuicStreamSequencerTest, OnDataAvailableWhenReadableBytesIncrease) { sequencer_->set_level_triggered(true); QuicStreamId id = 1; QuicStreamFrame frame1(id, false, 0, "hello"); EXPECT_CALL(stream_, OnDataAvailable()); sequencer_->OnStreamFrame(frame1); EXPECT_EQ(5u, sequencer_->NumBytesBuffered()); QuicStreamFrame frame2(id, false, 5, " world"); EXPECT_CALL(stream_, OnDataAvailable()); sequencer_->OnStreamFrame(frame2); EXPECT_EQ(11u, sequencer_->NumBytesBuffered()); QuicStreamFrame frame3(id, false, 5, "a"); EXPECT_CALL(stream_, OnDataAvailable()).Times(0); sequencer_->OnStreamFrame(frame3); EXPECT_EQ(11u, sequencer_->NumBytesBuffered()); } TEST_F(QuicStreamSequencerTest, ReadSingleFrame) { EXPECT_CALL(stream_, OnDataAvailable()); OnFrame(0u, "abc"); std::string actual; EXPECT_CALL(stream_, AddBytesConsumed(3)); sequencer_->Read(&actual); EXPECT_EQ("abc", actual); EXPECT_EQ(0u, sequencer_->NumBytesBuffered()); } TEST_F(QuicStreamSequencerTest, ReadMultipleFramesWithMissingFrame) { EXPECT_CALL(stream_, OnDataAvailable()); OnFrame(0u, "abc"); OnFrame(3u, "def"); OnFrame(6u, "ghi"); OnFrame(10u, "xyz"); std::string actual; EXPECT_CALL(stream_, AddBytesConsumed(9)); sequencer_->Read(&actual); EXPECT_EQ("abcdefghi", actual); EXPECT_EQ(3u, sequencer_->NumBytesBuffered()); } TEST_F(QuicStreamSequencerTest, ReadAndAppendToString) { EXPECT_CALL(stream_, OnDataAvailable()); OnFrame(0u, "def"); OnFrame(3u, "ghi"); std::string actual = "abc"; EXPECT_CALL(stream_, AddBytesConsumed(6)); sequencer_->Read(&actual); EXPECT_EQ("abcdefghi", actual); EXPECT_EQ(0u, sequencer_->NumBytesBuffered()); } TEST_F(QuicStreamSequencerTest, StopReading) { EXPECT_CALL(stream_, OnDataAvailable()).Times(0); EXPECT_CALL(stream_, OnFinRead()); EXPECT_CALL(stream_, AddBytesConsumed(0)); sequencer_->StopReading(); EXPECT_CALL(stream_, AddBytesConsumed(3)); OnFrame(0u, "abc"); EXPECT_CALL(stream_, AddBytesConsumed(3)); OnFrame(3u, "def"); EXPECT_CALL(stream_, AddBytesConsumed(3)); OnFinFrame(6u, "ghi"); } TEST_F(QuicStreamSequencerTest, StopReadingWithLevelTriggered) { EXPECT_CALL(stream_, AddBytesConsumed(0)); EXPECT_CALL(stream_, AddBytesConsumed(3)).Times(3); EXPECT_CALL(stream_, OnDataAvailable()).Times(0); EXPECT_CALL(stream_, OnFinRead()); sequencer_->set_level_triggered(true); sequencer_->StopReading(); OnFrame(0u, "abc"); OnFrame(3u, "def"); OnFinFrame(6u, "ghi"); } TEST_F(QuicStreamSequencerTest, CorruptFinFrames) { EXPECT_CALL(stream_, OnUnrecoverableError( QUIC_STREAM_SEQUENCER_INVALID_STATE, "Stream 1 received new final offset: 1, which is " "different from close offset: 2")); OnFinFrame(2u, ""); OnFinFrame(0u, "a"); EXPECT_FALSE(sequencer_->HasBytesToRead()); } TEST_F(QuicStreamSequencerTest, ReceiveFinLessThanHighestOffset) { EXPECT_CALL(stream_, OnDataAvailable()).Times(1); EXPECT_CALL(stream_, OnUnrecoverableError( QUIC_STREAM_SEQUENCER_INVALID_STATE, "Stream 1 received fin with offset: 0, which " "reduces current highest offset: 3")); OnFrame(0u, "abc"); OnFinFrame(0u, ""); } } } }
https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_stream_sequencer.cc
https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_stream_sequencer_test.cc
6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6
801caa50-fb76-4a49-b918-3cf9f7903458
cpp
google/quiche
hpack_entry_collector
quiche/http2/test_tools/hpack_entry_collector.cc
quiche/http2/hpack/decoder/hpack_entry_collector_test.cc
#include "quiche/http2/test_tools/hpack_entry_collector.h" #include <ostream> #include <string> #include "absl/strings/str_cat.h" #include "quiche/http2/hpack/http2_hpack_constants.h" #include "quiche/http2/test_tools/hpack_string_collector.h" #include "quiche/http2/test_tools/verify_macros.h" #include "quiche/common/platform/api/quiche_logging.h" #include "quiche/common/platform/api/quiche_test.h" using ::testing::AssertionResult; namespace http2 { namespace test { namespace { const HpackEntryType kInvalidHeaderType = static_cast<HpackEntryType>(99); const size_t kInvalidIndex = 99999999; } HpackEntryCollector::HpackEntryCollector() { Clear(); } HpackEntryCollector::HpackEntryCollector(const HpackEntryCollector& other) = default; HpackEntryCollector::HpackEntryCollector(HpackEntryType type, size_t index_or_size) : header_type_(type), index_(index_or_size), started_(true), ended_(true) {} HpackEntryCollector::HpackEntryCollector(HpackEntryType type, size_t index, bool value_huffman, const std::string& value) : header_type_(type), index_(index), value_(value, value_huffman), started_(true), ended_(true) {} HpackEntryCollector::HpackEntryCollector(HpackEntryType type, bool name_huffman, const std::string& name, bool value_huffman, const std::string& value) : header_type_(type), index_(0), name_(name, name_huffman), value_(value, value_huffman), started_(true), ended_(true) {} HpackEntryCollector::~HpackEntryCollector() = default; void HpackEntryCollector::OnIndexedHeader(size_t index) { ASSERT_FALSE(started_); ASSERT_TRUE(IsClear()) << ToString(); Init(HpackEntryType::kIndexedHeader, index); ended_ = true; } void HpackEntryCollector::OnStartLiteralHeader(HpackEntryType header_type, size_t maybe_name_index) { ASSERT_FALSE(started_); ASSERT_TRUE(IsClear()) << ToString(); Init(header_type, maybe_name_index); } void HpackEntryCollector::OnNameStart(bool huffman_encoded, size_t len) { ASSERT_TRUE(started_); ASSERT_FALSE(ended_); ASSERT_FALSE(IsClear()); ASSERT_TRUE(LiteralNameExpected()) << ToString(); name_.OnStringStart(huffman_encoded, len); } void HpackEntryCollector::OnNameData(const char* data, size_t len) { ASSERT_TRUE(started_); ASSERT_FALSE(ended_); ASSERT_TRUE(LiteralNameExpected()) << ToString(); ASSERT_TRUE(name_.IsInProgress()); name_.OnStringData(data, len); } void HpackEntryCollector::OnNameEnd() { ASSERT_TRUE(started_); ASSERT_FALSE(ended_); ASSERT_TRUE(LiteralNameExpected()) << ToString(); ASSERT_TRUE(name_.IsInProgress()); name_.OnStringEnd(); } void HpackEntryCollector::OnValueStart(bool huffman_encoded, size_t len) { ASSERT_TRUE(started_); ASSERT_FALSE(ended_); if (LiteralNameExpected()) { ASSERT_TRUE(name_.HasEnded()); } ASSERT_TRUE(LiteralValueExpected()) << ToString(); ASSERT_TRUE(value_.IsClear()) << value_.ToString(); value_.OnStringStart(huffman_encoded, len); } void HpackEntryCollector::OnValueData(const char* data, size_t len) { ASSERT_TRUE(started_); ASSERT_FALSE(ended_); ASSERT_TRUE(LiteralValueExpected()) << ToString(); ASSERT_TRUE(value_.IsInProgress()); value_.OnStringData(data, len); } void HpackEntryCollector::OnValueEnd() { ASSERT_TRUE(started_); ASSERT_FALSE(ended_); ASSERT_TRUE(LiteralValueExpected()) << ToString(); ASSERT_TRUE(value_.IsInProgress()); value_.OnStringEnd(); ended_ = true; } void HpackEntryCollector::OnDynamicTableSizeUpdate(size_t size) { ASSERT_FALSE(started_); ASSERT_TRUE(IsClear()) << ToString(); Init(HpackEntryType::kDynamicTableSizeUpdate, size); ended_ = true; } void HpackEntryCollector::Clear() { header_type_ = kInvalidHeaderType; index_ = kInvalidIndex; name_.Clear(); value_.Clear(); started_ = ended_ = false; } bool HpackEntryCollector::IsClear() const { return header_type_ == kInvalidHeaderType && index_ == kInvalidIndex && name_.IsClear() && value_.IsClear() && !started_ && !ended_; } bool HpackEntryCollector::IsComplete() const { return started_ && ended_; } bool HpackEntryCollector::LiteralNameExpected() const { switch (header_type_) { case HpackEntryType::kIndexedLiteralHeader: case HpackEntryType::kUnindexedLiteralHeader: case HpackEntryType::kNeverIndexedLiteralHeader: return index_ == 0; default: return false; } } bool HpackEntryCollector::LiteralValueExpected() const { switch (header_type_) { case HpackEntryType::kIndexedLiteralHeader: case HpackEntryType::kUnindexedLiteralHeader: case HpackEntryType::kNeverIndexedLiteralHeader: return true; default: return false; } } AssertionResult HpackEntryCollector::ValidateIndexedHeader( size_t expected_index) const { HTTP2_VERIFY_TRUE(started_); HTTP2_VERIFY_TRUE(ended_); HTTP2_VERIFY_EQ(HpackEntryType::kIndexedHeader, header_type_); HTTP2_VERIFY_EQ(expected_index, index_); return ::testing::AssertionSuccess(); } AssertionResult HpackEntryCollector::ValidateLiteralValueHeader( HpackEntryType expected_type, size_t expected_index, bool expected_value_huffman, absl::string_view expected_value) const { HTTP2_VERIFY_TRUE(started_); HTTP2_VERIFY_TRUE(ended_); HTTP2_VERIFY_EQ(expected_type, header_type_); HTTP2_VERIFY_NE(0u, expected_index); HTTP2_VERIFY_EQ(expected_index, index_); HTTP2_VERIFY_TRUE(name_.IsClear()); HTTP2_VERIFY_SUCCESS( value_.Collected(expected_value, expected_value_huffman)); return ::testing::AssertionSuccess(); } AssertionResult HpackEntryCollector::ValidateLiteralNameValueHeader( HpackEntryType expected_type, bool expected_name_huffman, absl::string_view expected_name, bool expected_value_huffman, absl::string_view expected_value) const { HTTP2_VERIFY_TRUE(started_); HTTP2_VERIFY_TRUE(ended_); HTTP2_VERIFY_EQ(expected_type, header_type_); HTTP2_VERIFY_EQ(0u, index_); HTTP2_VERIFY_SUCCESS(name_.Collected(expected_name, expected_name_huffman)); HTTP2_VERIFY_SUCCESS( value_.Collected(expected_value, expected_value_huffman)); return ::testing::AssertionSuccess(); } AssertionResult HpackEntryCollector::ValidateDynamicTableSizeUpdate( size_t size) const { HTTP2_VERIFY_TRUE(started_); HTTP2_VERIFY_TRUE(ended_); HTTP2_VERIFY_EQ(HpackEntryType::kDynamicTableSizeUpdate, header_type_); HTTP2_VERIFY_EQ(index_, size); return ::testing::AssertionSuccess(); } void HpackEntryCollector::AppendToHpackBlockBuilder( HpackBlockBuilder* hbb) const { ASSERT_TRUE(started_ && ended_) << *this; switch (header_type_) { case HpackEntryType::kIndexedHeader: hbb->AppendIndexedHeader(index_); return; case HpackEntryType::kDynamicTableSizeUpdate: hbb->AppendDynamicTableSizeUpdate(index_); return; case HpackEntryType::kIndexedLiteralHeader: case HpackEntryType::kUnindexedLiteralHeader: case HpackEntryType::kNeverIndexedLiteralHeader: ASSERT_TRUE(value_.HasEnded()) << *this; if (index_ != 0) { QUICHE_CHECK(name_.IsClear()); hbb->AppendNameIndexAndLiteralValue(header_type_, index_, value_.huffman_encoded, value_.s); } else { QUICHE_CHECK(name_.HasEnded()) << *this; hbb->AppendLiteralNameAndValue(header_type_, name_.huffman_encoded, name_.s, value_.huffman_encoded, value_.s); } return; default: ADD_FAILURE() << *this; } } std::string HpackEntryCollector::ToString() const { std::string result("Type="); switch (header_type_) { case HpackEntryType::kIndexedHeader: result += "IndexedHeader"; break; case HpackEntryType::kDynamicTableSizeUpdate: result += "DynamicTableSizeUpdate"; break; case HpackEntryType::kIndexedLiteralHeader: result += "IndexedLiteralHeader"; break; case HpackEntryType::kUnindexedLiteralHeader: result += "UnindexedLiteralHeader"; break; case HpackEntryType::kNeverIndexedLiteralHeader: result += "NeverIndexedLiteralHeader"; break; default: if (header_type_ == kInvalidHeaderType) { result += "<unset>"; } else { absl::StrAppend(&result, header_type_); } } if (index_ != 0) { absl::StrAppend(&result, " Index=", index_); } if (!name_.IsClear()) { absl::StrAppend(&result, " Name", name_.ToString()); } if (!value_.IsClear()) { absl::StrAppend(&result, " Value", value_.ToString()); } if (!started_) { EXPECT_FALSE(ended_); absl::StrAppend(&result, " !started"); } else if (!ended_) { absl::StrAppend(&result, " !ended"); } else { absl::StrAppend(&result, " Complete"); } return result; } void HpackEntryCollector::Init(HpackEntryType type, size_t maybe_index) { ASSERT_TRUE(IsClear()) << ToString(); header_type_ = type; index_ = maybe_index; started_ = true; } bool operator==(const HpackEntryCollector& a, const HpackEntryCollector& b) { return a.name() == b.name() && a.value() == b.value() && a.index() == b.index() && a.header_type() == b.header_type() && a.started() == b.started() && a.ended() == b.ended(); } bool operator!=(const HpackEntryCollector& a, const HpackEntryCollector& b) { return !(a == b); } std::ostream& operator<<(std::ostream& out, const HpackEntryCollector& v) { return out << v.ToString(); } } }
#include "quiche/http2/test_tools/hpack_entry_collector.h" #include "quiche/common/platform/api/quiche_logging.h" #include "quiche/common/platform/api/quiche_test.h" using ::testing::HasSubstr; namespace http2 { namespace test { namespace { TEST(HpackEntryCollectorTest, Clear) { HpackEntryCollector collector; QUICHE_VLOG(1) << collector; EXPECT_THAT(collector.ToString(), HasSubstr("!started")); EXPECT_TRUE(collector.IsClear()); collector.set_header_type(HpackEntryType::kIndexedLiteralHeader); EXPECT_FALSE(collector.IsClear()); QUICHE_VLOG(1) << collector; collector.Clear(); EXPECT_TRUE(collector.IsClear()); collector.set_index(123); EXPECT_FALSE(collector.IsClear()); QUICHE_VLOG(1) << collector; collector.Clear(); EXPECT_TRUE(collector.IsClear()); collector.set_name(HpackStringCollector("name", true)); EXPECT_FALSE(collector.IsClear()); QUICHE_VLOG(1) << collector; collector.Clear(); EXPECT_TRUE(collector.IsClear()); collector.set_value(HpackStringCollector("value", false)); EXPECT_FALSE(collector.IsClear()); QUICHE_VLOG(1) << collector; } void IndexedHeaderErrorTest() { HpackEntryCollector collector; collector.OnIndexedHeader(1); collector.OnIndexedHeader(234); } TEST(HpackEntryCollectorTest, IndexedHeader) { HpackEntryCollector collector; collector.OnIndexedHeader(123); QUICHE_VLOG(1) << collector; EXPECT_FALSE(collector.IsClear()); EXPECT_TRUE(collector.IsComplete()); EXPECT_TRUE(collector.ValidateIndexedHeader(123)); EXPECT_THAT(collector.ToString(), HasSubstr("IndexedHeader")); EXPECT_THAT(collector.ToString(), HasSubstr("Complete")); EXPECT_FATAL_FAILURE(IndexedHeaderErrorTest(), "Value of: started_"); } void LiteralValueErrorTest() { HpackEntryCollector collector; collector.OnStartLiteralHeader(HpackEntryType::kIndexedLiteralHeader, 1); collector.OnNameStart(false, 10); } TEST(HpackEntryCollectorTest, LiteralValueHeader) { HpackEntryCollector collector; collector.OnStartLiteralHeader(HpackEntryType::kIndexedLiteralHeader, 4); QUICHE_VLOG(1) << collector; EXPECT_FALSE(collector.IsClear()); EXPECT_FALSE(collector.IsComplete()); EXPECT_THAT(collector.ToString(), HasSubstr("!ended")); collector.OnValueStart(true, 5); QUICHE_VLOG(1) << collector; collector.OnValueData("value", 5); collector.OnValueEnd(); QUICHE_VLOG(1) << collector; EXPECT_FALSE(collector.IsClear()); EXPECT_TRUE(collector.IsComplete()); EXPECT_TRUE(collector.ValidateLiteralValueHeader( HpackEntryType::kIndexedLiteralHeader, 4, true, "value")); EXPECT_THAT(collector.ToString(), HasSubstr("IndexedLiteralHeader")); EXPECT_THAT(collector.ToString(), HasSubstr("Complete")); EXPECT_FATAL_FAILURE(LiteralValueErrorTest(), "Value of: LiteralNameExpected"); } void LiteralNameValueHeaderErrorTest() { HpackEntryCollector collector; collector.OnStartLiteralHeader(HpackEntryType::kNeverIndexedLiteralHeader, 0); collector.OnValueStart(false, 10); } TEST(HpackEntryCollectorTest, LiteralNameValueHeader) { HpackEntryCollector collector; collector.OnStartLiteralHeader(HpackEntryType::kUnindexedLiteralHeader, 0); QUICHE_VLOG(1) << collector; EXPECT_FALSE(collector.IsClear()); EXPECT_FALSE(collector.IsComplete()); collector.OnNameStart(false, 4); collector.OnNameData("na", 2); QUICHE_VLOG(1) << collector; collector.OnNameData("me", 2); collector.OnNameEnd(); collector.OnValueStart(true, 5); QUICHE_VLOG(1) << collector; collector.OnValueData("Value", 5); collector.OnValueEnd(); QUICHE_VLOG(1) << collector; EXPECT_FALSE(collector.IsClear()); EXPECT_TRUE(collector.IsComplete()); EXPECT_TRUE(collector.ValidateLiteralNameValueHeader( HpackEntryType::kUnindexedLiteralHeader, false, "name", true, "Value")); EXPECT_FATAL_FAILURE(LiteralNameValueHeaderErrorTest(), "Value of: name_.HasEnded"); } void DynamicTableSizeUpdateErrorTest() { HpackEntryCollector collector; collector.OnDynamicTableSizeUpdate(123); EXPECT_FALSE(collector.IsClear()); EXPECT_TRUE(collector.IsComplete()); EXPECT_TRUE(collector.ValidateDynamicTableSizeUpdate(123)); collector.OnDynamicTableSizeUpdate(234); } TEST(HpackEntryCollectorTest, DynamicTableSizeUpdate) { HpackEntryCollector collector; collector.OnDynamicTableSizeUpdate(8192); QUICHE_VLOG(1) << collector; EXPECT_FALSE(collector.IsClear()); EXPECT_TRUE(collector.IsComplete()); EXPECT_TRUE(collector.ValidateDynamicTableSizeUpdate(8192)); EXPECT_EQ(collector, HpackEntryCollector(HpackEntryType::kDynamicTableSizeUpdate, 8192)); EXPECT_NE(collector, HpackEntryCollector(HpackEntryType::kIndexedHeader, 8192)); EXPECT_NE(collector, HpackEntryCollector(HpackEntryType::kDynamicTableSizeUpdate, 8191)); EXPECT_FATAL_FAILURE(DynamicTableSizeUpdateErrorTest(), "Value of: started_"); } } } }
https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/test_tools/hpack_entry_collector.cc
https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/hpack/decoder/hpack_entry_collector_test.cc
6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6
1764824f-014b-48c7-86d7-e5d951be561b
cpp
tensorflow/tensorflow
shard_restore_util
tensorflow/core/tfrt/mlrt/kernel/shard_restore_util.cc
tensorflow/core/tfrt/mlrt/kernel/shard_restore_util_test.cc
#include "tensorflow/core/tfrt/mlrt/kernel/shard_restore_util.h" #include <algorithm> #include <cstddef> #include <cstdint> #include <queue> #include <utility> #include <vector> #include "absl/log/check.h" #include "absl/types/span.h" namespace tensorflow { namespace tf_mlrt { std::vector<std::vector<int>> ShardVariables( int num_shards, absl::Span<int64_t> variable_sizes) { DCHECK_GT(num_shards, 0); struct IndexSize { int index; int64_t size; }; std::vector<IndexSize> variable_index_sizes; variable_index_sizes.reserve(variable_sizes.size()); for (int i = 0; i < variable_sizes.size(); ++i) { variable_index_sizes.push_back({.index = i, .size = variable_sizes[i]}); } std::sort( variable_index_sizes.begin(), variable_index_sizes.end(), [&](const IndexSize& a, const IndexSize& b) { return a.size > b.size; }); struct RestoreVariableCluster { std::vector<int> indices; size_t total_size = 0; }; auto cmp = [](const RestoreVariableCluster& a, const RestoreVariableCluster& b) { return a.total_size > b.total_size; }; std::priority_queue<RestoreVariableCluster, std::vector<RestoreVariableCluster>, decltype(cmp)> min_heap(cmp); for (int i = 0; i < num_shards; ++i) { min_heap.push(RestoreVariableCluster()); } for (int i = 0; i < variable_index_sizes.size(); ++i) { RestoreVariableCluster min_cluster = min_heap.top(); min_heap.pop(); min_cluster.total_size += variable_index_sizes[i].size; min_cluster.indices.push_back(variable_index_sizes[i].index); min_heap.push(std::move(min_cluster)); } std::vector<std::vector<int>> shards; shards.reserve(min_heap.size()); while (!min_heap.empty()) { auto& min_cluster = min_heap.top(); if (min_cluster.total_size > 0) { shards.push_back(min_cluster.indices); } min_heap.pop(); } return shards; } } }
#include "tensorflow/core/tfrt/mlrt/kernel/shard_restore_util.h" #include <cstdint> #include <vector> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/types/span.h" namespace tensorflow { namespace tf_mlrt { namespace { using ::testing::ElementsAre; using ::testing::UnorderedElementsAre; TEST(ShardRestoreUtilTest, Basic) { int num_shards = 2; std::vector<int64_t> shard_sizes = {8, 10, 3}; std::vector<std::vector<int>> shards = ShardVariables(num_shards, absl::MakeSpan(shard_sizes)); EXPECT_EQ(shards.size(), 2); EXPECT_THAT(shards[0], ElementsAre(1)); EXPECT_THAT(shards[1], ElementsAre(0, 2)); } TEST(ShardRestoreUtilTest, Imbalance) { int num_shards = 2; std::vector<int64_t> shard_sizes = {3, 3, 10, 3}; std::vector<std::vector<int>> shards = ShardVariables(num_shards, absl::MakeSpan(shard_sizes)); EXPECT_EQ(shards.size(), 2); EXPECT_THAT(shards[0], UnorderedElementsAre(0, 1, 3)); EXPECT_THAT(shards[1], ElementsAre(2)); } TEST(ShardRestoreUtilTest, SingleShard) { int num_shards = 1; std::vector<int64_t> shard_sizes = {10, 2}; std::vector<std::vector<int>> shards = ShardVariables(num_shards, absl::MakeSpan(shard_sizes)); EXPECT_EQ(shards.size(), 1); EXPECT_THAT(shards[0], ElementsAre(0, 1)); } TEST(ShardRestoreUtilTest, NumVariablesLessThanShard) { int num_shards = 2; std::vector<int64_t> shard_sizes = {1}; std::vector<std::vector<int>> shards = ShardVariables(num_shards, absl::MakeSpan(shard_sizes)); EXPECT_EQ(shards.size(), 1); EXPECT_THAT(shards[0], ElementsAre(0)); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/mlrt/kernel/shard_restore_util.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/mlrt/kernel/shard_restore_util_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
45587220-e750-404f-9906-bd0f0490ad7b
cpp
google/cel-cpp
unknown_attribute_set
eval/public/unknown_attribute_set.h
eval/public/unknown_attribute_set_test.cc
#ifndef THIRD_PARTY_CEL_CPP_EVAL_PUBLIC_UNKNOWN_ATTRIBUTE_SET_H_ #define THIRD_PARTY_CEL_CPP_EVAL_PUBLIC_UNKNOWN_ATTRIBUTE_SET_H_ #include "base/attribute_set.h" namespace google { namespace api { namespace expr { namespace runtime { using UnknownAttributeSet = ::cel::AttributeSet; } } } } #endif
#include "eval/public/unknown_attribute_set.h" #include <memory> #include <string> #include <vector> #include "eval/public/cel_attribute.h" #include "eval/public/cel_value.h" #include "internal/testing.h" namespace google { namespace api { namespace expr { namespace runtime { namespace { using ::testing::Eq; using google::api::expr::v1alpha1::Expr; TEST(UnknownAttributeSetTest, TestCreate) { const std::string kAttr1 = "a1"; const std::string kAttr2 = "a2"; const std::string kAttr3 = "a3"; std::shared_ptr<CelAttribute> cel_attr = std::make_shared<CelAttribute>( "root", std::vector<CelAttributeQualifier>( {CreateCelAttributeQualifier(CelValue::CreateString(&kAttr1)), CreateCelAttributeQualifier(CelValue::CreateInt64(1)), CreateCelAttributeQualifier(CelValue::CreateUint64(2)), CreateCelAttributeQualifier(CelValue::CreateBool(true))})); UnknownAttributeSet unknown_set({*cel_attr}); EXPECT_THAT(unknown_set.size(), Eq(1)); EXPECT_THAT(*(unknown_set.begin()), Eq(*cel_attr)); } TEST(UnknownAttributeSetTest, TestMergeSets) { const std::string kAttr1 = "a1"; const std::string kAttr2 = "a2"; const std::string kAttr3 = "a3"; CelAttribute cel_attr1( "root", std::vector<CelAttributeQualifier>( {CreateCelAttributeQualifier(CelValue::CreateString(&kAttr1)), CreateCelAttributeQualifier(CelValue::CreateInt64(1)), CreateCelAttributeQualifier(CelValue::CreateUint64(2)), CreateCelAttributeQualifier(CelValue::CreateBool(true))})); CelAttribute cel_attr1_copy( "root", std::vector<CelAttributeQualifier>( {CreateCelAttributeQualifier(CelValue::CreateString(&kAttr1)), CreateCelAttributeQualifier(CelValue::CreateInt64(1)), CreateCelAttributeQualifier(CelValue::CreateUint64(2)), CreateCelAttributeQualifier(CelValue::CreateBool(true))})); CelAttribute cel_attr2( "root", std::vector<CelAttributeQualifier>( {CreateCelAttributeQualifier(CelValue::CreateString(&kAttr1)), CreateCelAttributeQualifier(CelValue::CreateInt64(2)), CreateCelAttributeQualifier(CelValue::CreateUint64(2)), CreateCelAttributeQualifier(CelValue::CreateBool(true))})); CelAttribute cel_attr3( "root", std::vector<CelAttributeQualifier>( {CreateCelAttributeQualifier(CelValue::CreateString(&kAttr1)), CreateCelAttributeQualifier(CelValue::CreateInt64(2)), CreateCelAttributeQualifier(CelValue::CreateUint64(2)), CreateCelAttributeQualifier(CelValue::CreateBool(false))})); UnknownAttributeSet unknown_set1({cel_attr1, cel_attr2}); UnknownAttributeSet unknown_set2({cel_attr1_copy, cel_attr3}); UnknownAttributeSet unknown_set3 = UnknownAttributeSet::Merge(unknown_set1, unknown_set2); EXPECT_THAT(unknown_set3.size(), Eq(3)); std::vector<CelAttribute> attrs1; for (const auto& attr_ptr : unknown_set3) { attrs1.push_back(attr_ptr); } std::vector<CelAttribute> attrs2 = {cel_attr1, cel_attr2, cel_attr3}; EXPECT_THAT(attrs1, testing::UnorderedPointwise(Eq(), attrs2)); } } } } } }
https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/public/unknown_attribute_set.h
https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/public/unknown_attribute_set_test.cc
4552db5798fb0853b131b783d8875794334fae7f
9f9db1be-f58f-4382-bf7e-e57609423b88
cpp
tensorflow/tensorflow
custom_kernel_fusion_rewriter
third_party/xla/xla/service/gpu/transforms/custom_kernel_fusion_rewriter.cc
third_party/xla/xla/service/gpu/transforms/custom_kernel_fusion_rewriter_test.cc
#include "xla/service/gpu/transforms/custom_kernel_fusion_rewriter.h" #include <cstdint> #include <optional> #include <utility> #include <vector> #include "absl/algorithm/container.h" #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" #include "absl/container/inlined_vector.h" #include "absl/strings/str_cat.h" #include "absl/strings/string_view.h" #include "absl/types/span.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/service/gpu/kernels/custom_kernel_fusion_pattern.h" #include "xla/shape_util.h" #include "xla/stream_executor/device_description.h" #include "tsl/platform/errors.h" #include "tsl/platform/logging.h" #include "tsl/platform/statusor.h" namespace xla::gpu { CustomKernelFusionRewriter::CustomKernelFusionRewriter( const se::DeviceDescription* device, int kernel_index, const CustomKernelFusionPatternRegistry* patterns) : device_(device), kernel_index_(kernel_index), patterns_(patterns) {} static std::optional<absl::flat_hash_set<HloInstruction*>> GetPatternReplacements(const CustomKernelFusionPattern::Match& match) { absl::flat_hash_set<HloInstruction*> requires_replacement; absl::flat_hash_set<HloInstruction*> instructions_set( match.instructions().begin(), match.instructions().end()); for (HloInstruction* instr : match.instructions()) { for (HloInstruction* user : instr->users()) { if (instr == match.root() || instructions_set.contains(user)) continue; if (match.HasReplacement(instr)) { requires_replacement.insert(instr); continue; } VLOG(3) << "Custom kernel fusion intermediate result " << instr->name() << " has users outside of a matched pattern: " << user->name(); return std::nullopt; } } return requires_replacement; } static absl::InlinedVector<HloInstruction*, 4> GetPatternCaptures( const CustomKernelFusionPattern::Match& match) { absl::InlinedVector<HloInstruction*, 4> captures; absl::flat_hash_set<HloInstruction*> instructions_set( match.instructions().begin(), match.instructions().end()); for (HloInstruction* instr : match.instructions()) { for (HloInstruction* operand : instr->operands()) { if (!instructions_set.contains(operand) && absl::c_find(captures, operand) == captures.end()) { captures.emplace_back(operand); } } } return captures; } static absl::StatusOr<HloComputation*> CreateFusionBody( HloModule* module, const CustomKernelFusionPattern::Match& match, absl::Span<HloInstruction* const> captures) { HloComputation::Builder builder(match.config().name()); absl::flat_hash_map<const HloInstruction*, HloInstruction*> instr_mapping; auto mapped_operands = [&](HloInstruction* instr) { absl::InlinedVector<HloInstruction*, 4> operands; for (HloInstruction* operand : instr->operands()) { operands.push_back(instr_mapping.at(operand)); } return operands; }; for (const HloInstruction* capture : captures) { int64_t index = instr_mapping.size(); instr_mapping[capture] = builder.AddInstruction(HloInstruction::CreateParameter( index, capture->shape(), absl::StrCat("p", index))); } for (HloInstruction* instr : match.instructions()) { instr_mapping[instr] = builder.AddInstruction( instr->CloneWithNewOperands(instr->shape(), mapped_operands(instr))); } HloInstruction* root = builder.last_added_instruction(); if (match.workspace_size_bytes() > 0) { auto workspace_shape = ShapeUtil::MakeShape(PrimitiveType::U8, {match.workspace_size_bytes()}); HloInstruction* workspace = builder.AddInstruction(HloInstruction::CreateCustomCall( workspace_shape, {}, CustomKernelFusionPattern::kWorkspace, "", CustomCallApiVersion::API_VERSION_TYPED_FFI)); builder.AddInstruction(HloInstruction::CreateTuple({root, workspace})); } return module->AddComputationAndUnifyNamesAndIds(builder.Build(), false); } namespace { absl::StatusOr<HloInstruction*> CreateFusionInstruction( HloModule* module, const CustomKernelFusionPattern::Match& match, absl::Span<HloInstruction* const> captures, HloComputation* body, int kernel_index) { HloInstruction* root = match.root(); HloComputation* parent = root->parent(); HloInstruction* fusion = parent->AddInstruction(HloInstruction::CreateFusion( body->root_instruction()->shape(), HloInstruction::FusionKind::kCustom, captures, body)); module->SetAndUniquifyInstrName(fusion, match.config().name()); GpuBackendConfig gpu_config; FusionBackendConfig& backend_config = *gpu_config.mutable_fusion_backend_config(); backend_config.set_kind("__custom_fusion"); *backend_config.mutable_custom_fusion_config() = match.config(); backend_config.mutable_custom_fusion_config()->set_kernel_index(kernel_index); TF_RETURN_IF_ERROR(fusion->set_backend_config(std::move(gpu_config))); if (match.workspace_size_bytes() == 0) return fusion; return parent->AddInstruction( HloInstruction::CreateGetTupleElement(fusion, 0)); } } absl::StatusOr<bool> CustomKernelFusionRewriter::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { std::vector<CustomKernelFusionPattern::Match> matches; for (HloComputation* computation : module->computations()) { for (HloInstruction* instr : computation->instructions()) { auto matched = patterns_->Match(*device_, instr); matches.insert(matches.end(), matched.begin(), matched.end()); } } if (matches.empty()) return false; for (const CustomKernelFusionPattern::Match& match : matches) { VLOG(2) << "Matched custom kernel fusion " << match.config().name() << "; root instruction: " << match.instructions().back()->name(); auto replacememts = GetPatternReplacements(match); if (!replacememts.has_value()) continue; auto captures = GetPatternCaptures(match); TF_ASSIGN_OR_RETURN(HloComputation * fusion_body, CreateFusionBody(module, match, captures)); TF_ASSIGN_OR_RETURN(HloInstruction * fusion, CreateFusionInstruction(module, match, captures, fusion_body, kernel_index_)); VLOG(2) << "Added a fusion instruction: " << fusion->name() << " for custom kernel fusion " << match.config().name() << " (instruction count = " << match.instructions().size() << ")"; for (HloInstruction* instr : *replacememts) { VLOG(2) << "Replace matched instruction: " << instr->name() << " with a pattern replacement"; TF_ASSIGN_OR_RETURN( HloInstruction * replacement, match.BuildReplacement(instr, Cast<HloFusionInstruction>(fusion))); TF_RETURN_IF_ERROR( instr->ReplaceAllUsesWith(replacement, match.config().name())); VLOG(2) << "Replaced instruction: " << instr->name() << " with: " << replacement->name(); } VLOG(2) << "Replace custom kernel fusion root instruction " << match.root()->name() << "with " << fusion->name(); HloComputation* parent = match.root()->parent(); TF_RETURN_IF_ERROR(parent->ReplaceInstruction(match.root(), fusion)); } return true; } }
#include "xla/service/gpu/transforms/custom_kernel_fusion_rewriter.h" #include <cstdint> #include <optional> #include <utility> #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/service/gpu/gpu_device_info_for_tests.h" #include "xla/service/gpu/kernels/custom_kernel_fusion_pattern.h" #include "xla/stream_executor/device_description.h" #include "xla/tests/hlo_test_base.h" #include "tsl/platform/test.h" namespace xla::gpu { struct SimpleGemmPattern : public CustomKernelFusionPattern { explicit SimpleGemmPattern(int64_t workspace = 0) : workspace(workspace) {} std::optional<Match> TryMatch(const se::DeviceDescription& device, HloInstruction* instr) const override { if (auto* dot = DynCast<HloDotInstruction>(instr)) { CustomFusionConfig config; config.set_name("simple_gemm"); return Match{config, {instr}, workspace}; } return std::nullopt; } int64_t workspace; }; class CustomKernelFusionRewriterTest : public HloTestBase {}; TEST_F(CustomKernelFusionRewriterTest, SimpleGemm) { const char* hlo = R"( HloModule test ENTRY %main (p0: f16[15,19], p1: f16[19,17]) -> f16[15,17] { %p0 = f16[15,19]{1,0} parameter(0) %p1 = f16[19,17]{1,0} parameter(1) ROOT %r = f16[15,17]{1,0} dot(%p0, %p1), lhs_contracting_dims={1}, rhs_contracting_dims={0} } )"; const char* expected = R"( ; CHECK: %simple_gemm {{.*}} { ; CHECK: [[P0:%[^ ]+]] = f16[15,19]{1,0} parameter(0) ; CHECK: [[P1:%[^ ]+]] = f16[19,17]{1,0} parameter(1) ; CHECK: ROOT [[DOT:%[^ ]+]] = f16[15,17]{1,0} dot([[P0]], [[P1]]), ; CHECK: lhs_contracting_dims={1}, rhs_contracting_dims={0} ; CHECK: } ; CHECK: ENTRY %main {{.*}} { ; CHECK: ROOT [[FUSION:%[^ ]+]] = f16[15,17]{1,0} fusion ; CHECK: kind=kCustom, calls=%simple_gemm, ; CHECK: backend_config={ ; CHECK: "kind":"__custom_fusion", ; CHECK: "custom_fusion_config":{"name":"simple_gemm","kernel_index":0} ; CHECK: } ; CHECK: } )"; CustomKernelFusionPatternRegistry patterns; patterns.Emplace<SimpleGemmPattern>(); auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo(); CustomKernelFusionRewriter pass(&device, 0, &patterns); RunAndFilecheckHloRewrite(hlo, std::move(pass), expected); } TEST_F(CustomKernelFusionRewriterTest, SetsKernelIndex) { const char* hlo = R"( HloModule test ENTRY %main (p0: f16[15,19], p1: f16[19,17]) -> f16[15,17] { %p0 = f16[15,19]{1,0} parameter(0) %p1 = f16[19,17]{1,0} parameter(1) ROOT %r = f16[15,17]{1,0} dot(%p0, %p1), lhs_contracting_dims={1}, rhs_contracting_dims={0} } )"; CustomKernelFusionPatternRegistry patterns; patterns.Emplace<SimpleGemmPattern>(); auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo(); CustomKernelFusionRewriter pass(&device, 1, &patterns); RunAndFilecheckHloRewrite(hlo, std::move(pass), "CHECK: \"kernel_index\":1"); } TEST_F(CustomKernelFusionRewriterTest, SimpleGemmWithWorkspace) { const char* hlo = R"( HloModule test ENTRY %main (p0: f16[15,19], p1: f16[19,17]) -> f16[15,17] { %p0 = f16[15,19]{1,0} parameter(0) %p1 = f16[19,17]{1,0} parameter(1) ROOT %r = f16[15,17]{1,0} dot(%p0, %p1), lhs_contracting_dims={1}, rhs_contracting_dims={0} } )"; const char* expected = R"( ; CHECK: %simple_gemm {{.*}} { ; CHECK: [[P0:%[^ ]+]] = f16[15,19]{1,0} parameter(0) ; CHECK: [[P1:%[^ ]+]] = f16[19,17]{1,0} parameter(1) ; CHECK: [[DOT:%[^ ]+]] = f16[15,17]{1,0} dot([[P0]], [[P1]]), ; CHECK: lhs_contracting_dims={1}, rhs_contracting_dims={0} ; CHECK: [[WORKSPACE:%[^ ]+]] = u8[1024]{0} custom-call(), ; CHECK: custom_call_target="__custom_kernel_fusion$workspace" ; CHECK: ROOT [[TUPLE:%[^ ]+]] = (f16[15,17]{1,0}, u8[1024]{0}) ; CHECK: tuple([[DOT]], [[WORKSPACE]]) ; CHECK: } ; CHECK: ENTRY %main {{.*}} { ; CHECK: [[FUSION:%[^ ]+]] = (f16[15,17]{1,0}, u8[1024]{0}) fusion ; CHECK: kind=kCustom, calls=%simple_gemm, ; CHECK: backend_config={ ; CHECK: "kind":"__custom_fusion", ; CHECK: "custom_fusion_config":{"name":"simple_gemm","kernel_index":0} ; CHECK: } ; CHECK: ROOT {{.*}} get-tuple-element([[FUSION]]), index=0 ; CHECK: } )"; CustomKernelFusionPatternRegistry patterns; patterns.Emplace<SimpleGemmPattern>(1024); auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo(); CustomKernelFusionRewriter pass(&device, 0, &patterns); RunAndFilecheckHloRewrite(hlo, std::move(pass), expected); } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/custom_kernel_fusion_rewriter.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/custom_kernel_fusion_rewriter_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
c5092579-ad27-4d89-8e94-e52defb886c3
cpp
tensorflow/tensorflow
custom_device
tensorflow/core/common_runtime/eager/custom_device.cc
tensorflow/c/eager/custom_device_test.cc
#include "tensorflow/core/common_runtime/eager/custom_device.h" #include <utility> #include <vector> #include "tensorflow/core/common_runtime/eager/custom_device_op_handler.h" namespace tensorflow { Status CustomDeviceTensorHandle::Shape(PartialTensorShape* shape) const { int num_dims; TF_RETURN_IF_ERROR(NumDims(&num_dims)); std::vector<int64_t> dims(num_dims); for (int i = 0; i < num_dims; ++i) { TF_RETURN_IF_ERROR(Dim(i, &dims[i])); } return PartialTensorShape::MakePartialShape(dims.data(), num_dims, shape); } Status CustomDeviceTensorHandle::NumElements(int64_t* num_elements) const { *num_elements = 1; int num_dims; TF_RETURN_IF_ERROR(NumDims(&num_dims)); for (int i = 0; i < num_dims; ++i) { int64_t dim; TF_RETURN_IF_ERROR(Dim(i, &dim)); if (dim < 0) { return errors::InvalidArgument( absl::StrCat("Tried to compute the number of elements of a tensor " "representing varying shapes. ", DebugString())); } *num_elements *= dim; } return absl::OkStatus(); } const char* CustomDeviceTensorHandle::DeviceType(Status* status) const { const DeviceNameUtils::ParsedName* parsed = ParsedName(status); if (!status->ok()) { return ""; } return parsed->type.c_str(); } int CustomDeviceTensorHandle::DeviceId(Status* status) const { const DeviceNameUtils::ParsedName* parsed = ParsedName(status); if (!status->ok()) { return 0; } return parsed->id; } AbstractTensorInterface* CustomDeviceTensorHandle::Resolve(Status* status) { core::RefCountPtr<ImmediateExecutionTensorHandle> copied_off( context_->GetCustomDeviceOpHandler().CopyTensorHandleToDevice( context_, this, DeviceNameUtils::ParsedNameToString(context_->HostCPUParsedName()) .c_str(), status)); if (!status->ok()) { return nullptr; } return copied_off->Resolve(status); } const DeviceNameUtils::ParsedName* CustomDeviceTensorHandle::ParsedName( Status* status) const { if (!parsed_name_.has_value()) { DeviceNameUtils::ParsedName parsed_name; if (!DeviceNameUtils::ParseFullOrLocalName(device_->name(), &parsed_name)) { *status = errors::InvalidArgument( absl::StrCat("Invalid custom device name ", device_->name())); return nullptr; } parsed_name_.emplace(std::move(parsed_name)); } return &*parsed_name_; } }
#include <memory> #include "absl/strings/match.h" #include "tensorflow/c/c_api.h" #include "tensorflow/c/eager/c_api.h" #include "tensorflow/c/eager/c_api_experimental.h" #include "tensorflow/c/eager/c_api_test_util.h" #include "tensorflow/c/eager/custom_device_testutil.h" #include "tensorflow/c/tf_status.h" #include "tensorflow/core/lib/gtl/cleanup.h" #include "tensorflow/core/platform/test.h" TEST(CUSTOM_DEVICE, RegisterSimpleDevice) { std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status( TF_NewStatus(), TF_DeleteStatus); TFE_ContextOptions* opts = TFE_NewContextOptions(); TFE_Context* context = TFE_NewContext(opts, status.get()); TFE_DeleteContextOptions(opts); ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get()); bool arrived = false; bool executed = false; const char* name = "/job:localhost/replica:0/task:0/device:CUSTOM:0"; RegisterLoggingDevice(context, name, true, &arrived, &executed, status.get()); ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get()); TFE_TensorHandle* hcpu = TestMatrixTensorHandle(context); ASSERT_FALSE(arrived); TFE_TensorHandle* hdevice = TFE_TensorHandleCopyToDevice(hcpu, context, name, status.get()); ASSERT_TRUE(arrived); ASSERT_FALSE(executed); ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get()); std::unique_ptr<TFE_Op, decltype(&TFE_DeleteOp)> matmul( MatMulOp(context, hcpu, hdevice), TFE_DeleteOp); TFE_OpSetDevice(matmul.get(), name, status.get()); ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get()); TFE_TensorHandle* retval; int num_retvals = 1; TFE_Execute(matmul.get(), &retval, &num_retvals, status.get()); ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get()); ASSERT_TRUE(executed); TFE_DeleteTensorHandle(retval); TFE_DeleteTensorHandle(hcpu); TFE_DeleteTensorHandle(hdevice); TFE_DeleteContext(context); } TEST(CUSTOM_DEVICE, ResetOperation) { std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status( TF_NewStatus(), TF_DeleteStatus); TFE_ContextOptions* opts = TFE_NewContextOptions(); std::unique_ptr<TFE_Context, decltype(&TFE_DeleteContext)> context( TFE_NewContext(opts, status.get()), TFE_DeleteContext); TFE_DeleteContextOptions(opts); ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get()); bool arrived = false; bool executed = false; const char* custom_device_name = "/job:localhost/replica:0/task:0/device:CUSTOM:0"; RegisterLoggingDevice(context.get(), custom_device_name, true, &arrived, &executed, status.get()); ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get()); std::unique_ptr<TFE_Op, decltype(&TFE_DeleteOp)> reused_op( TFE_NewOp(context.get(), "Identity", status.get()), TFE_DeleteOp); TFE_OpReset(reused_op.get(), "Identity", custom_device_name, status.get()); ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get()); ASSERT_EQ(tensorflow::string(TFE_OpGetDevice(reused_op.get(), status.get())), tensorflow::string(custom_device_name)); ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get()); TFE_OpReset(reused_op.get(), "Identity", "/job:localhost/replica:0/task:0/device:CPU:0", status.get()); ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get()); ASSERT_EQ(tensorflow::string(TFE_OpGetDevice(reused_op.get(), status.get())), tensorflow::string("/job:localhost/replica:0/task:0/device:CPU:0")); ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get()); } TEST(CUSTOM_DEVICE, MakeVariable) { std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status( TF_NewStatus(), TF_DeleteStatus); std::unique_ptr<TFE_ContextOptions, decltype(&TFE_DeleteContextOptions)> opts( TFE_NewContextOptions(), TFE_DeleteContextOptions); std::unique_ptr<TFE_Context, decltype(&TFE_DeleteContext)> context( TFE_NewContext(opts.get(), status.get()), TFE_DeleteContext); ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get()); bool arrived = false; bool executed = false; const char* name = "/job:localhost/replica:0/task:0/device:CUSTOM:0"; RegisterLoggingDevice(context.get(), name, true, &arrived, &executed, status.get()); ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get()); std::unique_ptr<TFE_Op, decltype(&TFE_DeleteOp)> op( TFE_NewOp(context.get(), "VarHandleOp", status.get()), TFE_DeleteOp); ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get()); TFE_OpSetAttrType(op.get(), "dtype", TF_FLOAT); TFE_OpSetAttrShape(op.get(), "shape", {}, 0, status.get()); TFE_OpSetAttrString(op.get(), "container", "", 0); TFE_OpSetAttrString(op.get(), "shared_name", "", 0); TFE_OpSetDevice(op.get(), name, status.get()); ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get()); TFE_TensorHandle* var_handle = nullptr; int num_retvals = 1; executed = false; TFE_Execute(op.get(), &var_handle, &num_retvals, status.get()); ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get()); ASSERT_TRUE(executed); auto handle_cleaner = tensorflow::gtl::MakeCleanup( [var_handle]() { TFE_DeleteTensorHandle(var_handle); }); std::unique_ptr<TFE_TensorHandle, decltype(&TFE_DeleteTensorHandle)> one( TestScalarTensorHandle(context.get(), 111.f), TFE_DeleteTensorHandle); op.reset(TFE_NewOp(context.get(), "AssignVariableOp", status.get())); TFE_OpSetAttrType(op.get(), "dtype", TF_FLOAT); TFE_OpAddInput(op.get(), var_handle, status.get()); TFE_OpAddInput(op.get(), one.get(), status.get()); TFE_OpSetDevice(op.get(), name, status.get()); ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get()); executed = false; num_retvals = 0; TFE_Execute(op.get(), nullptr, &num_retvals, status.get()); ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get()); ASSERT_TRUE(executed); op.reset(TFE_NewOp(context.get(), "ReadVariableOp", status.get())); TFE_OpAddInput(op.get(), var_handle, status.get()); TFE_OpSetDevice(op.get(), name, status.get()); TFE_OpSetAttrType(op.get(), "dtype", TF_FLOAT); ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get()); executed = false; num_retvals = 1; TFE_TensorHandle* var_value = nullptr; TFE_Execute(op.get(), &var_value, &num_retvals, status.get()); ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get()); ASSERT_TRUE(executed); auto value_cleaner = tensorflow::gtl::MakeCleanup( [var_value]() { TFE_DeleteTensorHandle(var_value); }); ASSERT_EQ(tensorflow::string(name), tensorflow::string( TFE_TensorHandleBackingDeviceName(var_value, status.get()))); TFE_TensorHandle* var_value_unpacked = UnpackTensorHandle(var_value, status.get()); ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get()); std::unique_ptr<TF_Tensor, decltype(&TF_DeleteTensor)> resolved_value( TFE_TensorHandleResolve(var_value_unpacked, status.get()), TF_DeleteTensor); ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get()); ASSERT_EQ(111., *static_cast<float*>(TF_TensorData(resolved_value.get()))); op.reset(TFE_NewOp(context.get(), "DestroyResourceOp", status.get())); TFE_OpAddInput(op.get(), var_handle, status.get()); TFE_OpSetDevice(op.get(), name, status.get()); ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get()); num_retvals = 0; TFE_Execute(op.get(), nullptr, &num_retvals, status.get()); ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get()); } TEST(CUSTOM_DEVICE, AccessVariableOnCustomDevice) { std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status( TF_NewStatus(), TF_DeleteStatus); std::unique_ptr<TFE_ContextOptions, decltype(&TFE_DeleteContextOptions)> opts( TFE_NewContextOptions(), TFE_DeleteContextOptions); std::unique_ptr<TFE_Context, decltype(&TFE_DeleteContext)> context( TFE_NewContext(opts.get(), status.get()), TFE_DeleteContext); ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get()); bool arrived = false; bool executed = false; const char* name = "/job:localhost/replica:0/task:0/device:CUSTOM:0"; RegisterLoggingDevice(context.get(), name, false, &arrived, &executed, status.get()); ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get()); std::unique_ptr<TFE_Op, decltype(&TFE_DeleteOp)> op( TFE_NewOp(context.get(), "VarHandleOp", status.get()), TFE_DeleteOp); ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get()); TFE_OpSetAttrType(op.get(), "dtype", TF_FLOAT); TFE_OpSetAttrShape(op.get(), "shape", {}, 0, status.get()); TFE_OpSetAttrString(op.get(), "container", "", 0); TFE_OpSetAttrString(op.get(), "shared_name", "", 0); TFE_OpSetDevice(op.get(), name, status.get()); ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get()); TFE_TensorHandle* var_handle = nullptr; int num_retvals = 1; executed = false; TFE_Execute(op.get(), &var_handle, &num_retvals, status.get()); ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get()); ASSERT_TRUE(executed); auto handle_cleaner = tensorflow::gtl::MakeCleanup( [var_handle]() { TFE_DeleteTensorHandle(var_handle); }); std::unique_ptr<TFE_TensorHandle, decltype(&TFE_DeleteTensorHandle)> one( TestScalarTensorHandle(context.get(), 111.f), TFE_DeleteTensorHandle); op.reset(TFE_NewOp(context.get(), "AssignVariableOp", status.get())); TFE_OpSetAttrType(op.get(), "dtype", TF_FLOAT); TFE_OpAddInput(op.get(), var_handle, status.get()); TFE_OpAddInput(op.get(), one.get(), status.get()); TFE_OpSetDevice(op.get(), name, status.get()); ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get()); executed = false; num_retvals = 0; TFE_Execute(op.get(), nullptr, &num_retvals, status.get()); ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get()); ASSERT_TRUE(executed); op.reset(TFE_NewOp(context.get(), "ReadVariableOp", status.get())); ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get()); TFE_OpAddInput(op.get(), var_handle, status.get()); ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get()); TFE_OpSetAttrType(op.get(), "dtype", TF_FLOAT); executed = false; num_retvals = 1; TFE_TensorHandle* var_value = nullptr; TFE_Execute(op.get(), &var_value, &num_retvals, status.get()); ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get()); ASSERT_TRUE(executed); ASSERT_EQ( tensorflow::string(name), tensorflow::string(TFE_TensorHandleDeviceName(var_value, status.get()))); TFE_DeleteTensorHandle(var_value); op.reset(TFE_NewOp(context.get(), "DestroyResourceOp", status.get())); TFE_OpAddInput(op.get(), var_handle, status.get()); TFE_OpSetDevice(op.get(), name, status.get()); ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get()); num_retvals = 0; TFE_Execute(op.get(), nullptr, &num_retvals, status.get()); ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get()); } TEST(CUSTOM_DEVICE, InputBasedPlacement) { std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status( TF_NewStatus(), TF_DeleteStatus); std::unique_ptr<TFE_ContextOptions, decltype(&TFE_DeleteContextOptions)> opts( TFE_NewContextOptions(), TFE_DeleteContextOptions); std::unique_ptr<TFE_Context, decltype(&TFE_DeleteContext)> context( TFE_NewContext(opts.get(), status.get()), TFE_DeleteContext); ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get()); const char* custom0 = "/job:localhost/replica:0/task:0/device:CUSTOM:0"; const char* custom1 = "/job:localhost/replica:0/task:0/device:CUSTOM:1"; bool arrived = false; bool executed = false; RegisterLoggingDevice(context.get(), custom0, false, &arrived, &executed, status.get()); ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get()); RegisterLoggingDevice(context.get(), custom1, true, &arrived, &executed, status.get()); ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get()); std::unique_ptr<TFE_TensorHandle, decltype(&TFE_DeleteTensorHandle)> hcpu( TestMatrixTensorHandle(context.get()), TFE_DeleteTensorHandle); ASSERT_FALSE(arrived); std::unique_ptr<TFE_TensorHandle, decltype(&TFE_DeleteTensorHandle)> hcustom0( TFE_TensorHandleCopyToDevice(hcpu.get(), context.get(), custom0, status.get()), TFE_DeleteTensorHandle); ASSERT_TRUE(arrived); ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get()); arrived = false; std::unique_ptr<TFE_TensorHandle, decltype(&TFE_DeleteTensorHandle)> hcustom1( TFE_TensorHandleCopyToDevice(hcpu.get(), context.get(), custom1, status.get()), TFE_DeleteTensorHandle); ASSERT_TRUE(arrived); ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get()); std::unique_ptr<TFE_Op, decltype(&TFE_DeleteOp)> matmul( MatMulOp(context.get(), hcpu.get(), hcpu.get()), TFE_DeleteOp); TFE_TensorHandle* retval; int num_retvals = 1; TFE_Execute(matmul.get(), &retval, &num_retvals, status.get()); ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get()); TFE_DeleteTensorHandle(retval); matmul.reset(MatMulOp(context.get(), hcustom0.get(), hcustom0.get())); num_retvals = 1; executed = false; TFE_Execute(matmul.get(), &retval, &num_retvals, status.get()); ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get()); ASSERT_TRUE(executed); TFE_DeleteTensorHandle(retval); matmul.reset(MatMulOp(context.get(), hcustom0.get(), hcustom1.get())); num_retvals = 1; TFE_Execute(matmul.get(), &retval, &num_retvals, status.get()); ASSERT_NE(TF_OK, TF_GetCode(status.get())); ASSERT_TRUE(absl::StrContains(TF_Message(status.get()), custom0)); ASSERT_TRUE(absl::StrContains(TF_Message(status.get()), custom1)); matmul.reset(MatMulOp(context.get(), hcustom0.get(), hcpu.get())); num_retvals = 1; executed = false; TFE_Execute(matmul.get(), &retval, &num_retvals, status.get()); EXPECT_TRUE(executed); ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get()); TFE_DeleteTensorHandle(retval); matmul.reset(MatMulOp(context.get(), hcustom0.get(), hcpu.get())); TFE_OpSetDevice(matmul.get(), "/job:localhost/replica:0/task:0/device:CPU:0", status.get()); ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get()); num_retvals = 1; executed = false; TFE_Execute(matmul.get(), &retval, &num_retvals, status.get()); EXPECT_FALSE(executed); ASSERT_FALSE(TF_GetCode(status.get()) == TF_OK); matmul.reset(MatMulOp(context.get(), hcustom1.get(), hcpu.get())); num_retvals = 1; executed = false; TFE_Execute(matmul.get(), &retval, &num_retvals, status.get()); EXPECT_FALSE(executed); ASSERT_FALSE(TF_GetCode(status.get()) == TF_OK); } TEST(CUSTOM_DEVICE, InvalidRegistrationError) { std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status( TF_NewStatus(), TF_DeleteStatus); std::unique_ptr<TFE_ContextOptions, decltype(&TFE_DeleteContextOptions)> opts( TFE_NewContextOptions(), TFE_DeleteContextOptions); std::unique_ptr<TFE_Context, decltype(&TFE_DeleteContext)> context( TFE_NewContext(opts.get(), status.get()), TFE_DeleteContext); ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get()); bool arrived = false; bool executed = false; RegisterLoggingDevice(context.get(), "/device:CUSTOM:0", true, &arrived, &executed, status.get()); ASSERT_TRUE(TF_GetCode(status.get()) == TF_INVALID_ARGUMENT) << TF_Message(status.get()); const char* name = "/job:localhost/replica:0/task:0/device:CUSTOM:0"; RegisterLoggingDevice(context.get(), name, true, &arrived, &executed, status.get()); ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get()); RegisterLoggingDevice(context.get(), name, true, &arrived, &executed, status.get()); ASSERT_TRUE(TF_GetCode(status.get()) == TF_ALREADY_EXISTS) << TF_Message(status.get()); RegisterLoggingDevice( context.get(), "/job:localhost/replica:0/task:0/device:CPU:0", true, &arrived, &executed, status.get()); ASSERT_TRUE(TF_GetCode(status.get()) == TF_ALREADY_EXISTS) << TF_Message(status.get()); }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/eager/custom_device.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/eager/custom_device_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
785e640c-83bd-4859-b375-a19a3da0f900
cpp
tensorflow/tensorflow
dataset_store
tensorflow/core/data/service/dataset_store.cc
tensorflow/core/data/service/dataset_store_test.cc
#include "tensorflow/core/data/service/dataset_store.h" #include <memory> #include <string> #include "absl/memory/memory.h" #include "tensorflow/core/data/service/common.pb.h" #include "tensorflow/core/data/service/utils.h" #include "tensorflow/core/lib/io/record_reader.h" #include "tensorflow/core/lib/io/record_writer.h" #include "tensorflow/core/platform/env.h" #include "tensorflow/core/platform/errors.h" #include "tensorflow/core/platform/file_system.h" #include "tensorflow/core/platform/path.h" namespace tensorflow { namespace data { FileSystemDatasetStore::FileSystemDatasetStore(const std::string& datasets_dir) : datasets_dir_(datasets_dir) {} Status FileSystemDatasetStore::Put(const std::string& key, const DatasetDef& dataset) { std::string path_to_write = io::JoinPath(datasets_dir_, key); TF_RETURN_IF_ERROR(WriteDatasetDef(path_to_write, dataset)); return absl::OkStatus(); } Status FileSystemDatasetStore::Get( const std::string& key, std::shared_ptr<const DatasetDef>& dataset_def) { std::string path = io::JoinPath(datasets_dir_, key); TF_RETURN_IF_ERROR(Env::Default()->FileExists(path)); DatasetDef def; TF_RETURN_IF_ERROR(ReadDatasetDef(path, def)); dataset_def = std::make_shared<const DatasetDef>(def); return absl::OkStatus(); } Status MemoryDatasetStore::Put(const std::string& key, const DatasetDef& dataset) { auto& stored_dataset = datasets_[key]; stored_dataset = std::make_shared<const DatasetDef>(dataset); return absl::OkStatus(); } Status MemoryDatasetStore::Get(const std::string& key, std::shared_ptr<const DatasetDef>& dataset_def) { auto& stored_dataset = datasets_[key]; if (!stored_dataset) { return errors::NotFound("Dataset with key ", key, " not found"); } dataset_def = stored_dataset; return absl::OkStatus(); } } }
#include "tensorflow/core/data/service/dataset_store.h" #include <memory> #include <string> #include <vector> #include "absl/memory/memory.h" #include "tensorflow/core/data/service/common.pb.h" #include "tensorflow/core/framework/graph.pb.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/env.h" #include "tensorflow/core/platform/errors.h" #include "tensorflow/core/platform/path.h" #include "tensorflow/core/platform/status.h" #include "tensorflow/core/platform/test.h" namespace tensorflow { namespace data { namespace { const char kFileSystem[] = "file_system"; const char kMemory[] = "memory"; std::string NewDatasetsDir() { std::string dir = io::JoinPath(testing::TmpDir(), "datasets"); if (Env::Default()->FileExists(dir).ok()) { int64_t undeleted_files; int64_t undeleted_dirs; CHECK(Env::Default() ->DeleteRecursively(dir, &undeleted_files, &undeleted_dirs) .ok()); } CHECK(Env::Default()->RecursivelyCreateDir(dir).ok()); return dir; } std::unique_ptr<DatasetStore> MakeStore(const std::string& type) { if (type == kFileSystem) { return std::make_unique<FileSystemDatasetStore>(NewDatasetsDir()); } else if (type == kMemory) { return std::make_unique<MemoryDatasetStore>(); } else { CHECK(false) << "unexpected type: " << type; } } DatasetDef DatasetDefWithVersion(int32_t version) { DatasetDef def; def.mutable_graph()->set_version(version); return def; } } class DatasetStoreTest : public ::testing::Test, public ::testing::WithParamInterface<std::string> {}; TEST_P(DatasetStoreTest, StoreAndGet) { std::unique_ptr<DatasetStore> store = MakeStore(GetParam()); std::string key = "key"; DatasetDef dataset_def = DatasetDefWithVersion(1); TF_ASSERT_OK(store->Put(key, dataset_def)); std::shared_ptr<const DatasetDef> result; TF_ASSERT_OK(store->Get(key, result)); EXPECT_EQ(result->graph().version(), dataset_def.graph().version()); } TEST_P(DatasetStoreTest, StoreAndGetMultiple) { std::unique_ptr<DatasetStore> store = MakeStore(GetParam()); int64_t num_datasets = 10; std::vector<std::string> keys; for (int i = 0; i < num_datasets; ++i) { std::string key = absl::StrCat("key", i); DatasetDef dataset_def = DatasetDefWithVersion(i); TF_ASSERT_OK(store->Put(key, dataset_def)); keys.push_back(key); } for (int i = 0; i < num_datasets; ++i) { std::shared_ptr<const DatasetDef> result; TF_ASSERT_OK(store->Get(keys[i], result)); EXPECT_EQ(result->graph().version(), i); } } TEST_P(DatasetStoreTest, StoreAlreadyExists) { std::unique_ptr<DatasetStore> store = MakeStore(GetParam()); int32_t version = 1; DatasetDef dataset_def = DatasetDefWithVersion(version); std::string key = "key"; TF_ASSERT_OK(store->Put(key, dataset_def)); TF_EXPECT_OK(store->Put(key, dataset_def)); std::shared_ptr<const DatasetDef> result; TF_ASSERT_OK(store->Get(key, result)); EXPECT_EQ(result->graph().version(), version); } TEST_P(DatasetStoreTest, GetMissing) { std::unique_ptr<DatasetStore> store = MakeStore(GetParam()); std::shared_ptr<const DatasetDef> result; Status s = store->Get("missing", result); EXPECT_EQ(s.code(), error::NOT_FOUND); } INSTANTIATE_TEST_SUITE_P(DatasetStoreTests, DatasetStoreTest, ::testing::Values(kFileSystem, kMemory)); } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/dataset_store.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/dataset_store_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
b211b421-a59e-4aa5-9f5c-9e7ba5fcbefa
cpp
tensorflow/tensorflow
device_compilation_cache
tensorflow/compiler/jit/device_compilation_cache.h
tensorflow/compiler/jit/device_compilation_cache_test.cc
#ifndef TENSORFLOW_COMPILER_JIT_DEVICE_COMPILATION_CACHE_H_ #define TENSORFLOW_COMPILER_JIT_DEVICE_COMPILATION_CACHE_H_ #include <cstdint> #include <memory> #include <optional> #include <string> #include <utility> #include <variant> #include "absl/strings/str_cat.h" #include "tensorflow/compiler/jit/device_compilation_cluster_signature.h" #include "tensorflow/compiler/jit/xla_compile_util.h" #include "tensorflow/compiler/tf2xla/xla_compiler.h" #include "xla/client/local_client.h" #include "xla/pjrt/pjrt_client.h" #include "tensorflow/core/platform/mutex.h" namespace tensorflow { namespace device_compilation_cache_internal { template <typename ExecutableType> int64_t ExecutableSize(const ExecutableType* executable) { return 0; } template <> inline int64_t ExecutableSize<xla::LocalExecutable>( const xla::LocalExecutable* executable) { if (executable != nullptr && executable->executable() != nullptr) { return executable->executable()->SizeOfGeneratedCodeInBytes(); } return 0; } template <> inline int64_t ExecutableSize<xla::PjRtLoadedExecutable>( const xla::PjRtLoadedExecutable* executable) { if (executable != nullptr) { return executable->SizeOfGeneratedCodeInBytes(); } return 0; } } template <typename ExecutableType> class DeviceCompilationCache { public: DeviceCompilationCache() = default; ~DeviceCompilationCache() = default; using Key = DeviceCompilationClusterSignature; struct Value { DeviceCompileState compile_state = DeviceCompileState::kUncompiled; Status compilation_status; int64_t request_count = 0; const XlaCompiler::CompilationResult* compilation_result = nullptr; ExecutableType* executable = nullptr; }; std::optional<Value> Lookup(const Key& key) const; Value LookupOrCreate(const Key& key); void Store(const Key& key, std::optional<DeviceCompileState> compile_state, std::optional<Status> compilation_status, std::optional<std::unique_ptr<XlaCompiler::CompilationResult>> compilation_result, std::optional<std::unique_ptr<ExecutableType>> executable); std::string DebugString() const; private: struct Entry { mutable mutex mu; DeviceCompileState compile_state TF_GUARDED_BY(mu) = DeviceCompileState::kUncompiled; int64_t request_count TF_GUARDED_BY(mu) = 0; Status compilation_status TF_GUARDED_BY(mu); std::unique_ptr<XlaCompiler::CompilationResult> compilation_result TF_GUARDED_BY(mu); std::unique_ptr<ExecutableType> executable TF_GUARDED_BY(mu); std::string DebugString() const { mutex_lock lock(mu); int64_t executable_size = device_compilation_cache_internal::ExecutableSize<ExecutableType>( executable.get()); int64_t hlo_module_size = 0; if (compilation_result != nullptr && compilation_result->computation != nullptr) { hlo_module_size = compilation_result->computation->proto().ByteSizeLong(); } return absl::StrCat( "{compile_state: ", compile_state, ", request_count: ", request_count, ", compilation_status: ", compilation_status.ToString(), ", compilation_result?: ", compilation_result != nullptr, ", hlo_module_size: ", hlo_module_size, " bytes", ", executable?: ", executable != nullptr, ", executable_size: ", executable_size, " bytes}"); } }; mutable mutex compile_cache_mu_; absl::flat_hash_map<Key, std::unique_ptr<Entry>, Key::Hash> cache_ TF_GUARDED_BY(compile_cache_mu_); DeviceCompilationCache(const DeviceCompilationCache&) = delete; void operator=(const DeviceCompilationCache&) = delete; }; template <typename ExecutableType> std::optional<typename DeviceCompilationCache<ExecutableType>::Value> DeviceCompilationCache<ExecutableType>::Lookup(const Key& key) const { Entry* entry; { mutex_lock lock(compile_cache_mu_); auto it = cache_.find(key); if (it == cache_.cend()) { return std::nullopt; } entry = it->second.get(); } mutex_lock lock(entry->mu); Value value = {entry->compile_state, entry->compilation_status, ++entry->request_count, entry->compilation_result.get(), entry->executable.get()}; return value; } template <typename ExecutableType> typename DeviceCompilationCache<ExecutableType>::Value DeviceCompilationCache<ExecutableType>::LookupOrCreate(const Key& key) { Entry* entry; { mutex_lock lock(compile_cache_mu_); auto it = cache_.emplace(key, std::make_unique<Entry>()).first; entry = it->second.get(); } mutex_lock lock(entry->mu); Value value = {entry->compile_state, entry->compilation_status, ++entry->request_count, entry->compilation_result.get(), entry->executable.get()}; return value; } template <typename ExecutableType> void DeviceCompilationCache<ExecutableType>::Store( const Key& key, std::optional<DeviceCompileState> compile_state, std::optional<Status> compilation_status, std::optional<std::unique_ptr<XlaCompiler::CompilationResult>> compilation_result, std::optional<std::unique_ptr<ExecutableType>> executable) { Entry* entry; { mutex_lock lock(compile_cache_mu_); auto it = cache_.emplace(key, std::make_unique<Entry>()).first; entry = it->second.get(); } { mutex_lock lock(entry->mu); if (compile_state.has_value()) { entry->compile_state = *compile_state; } if (compilation_status.has_value()) { entry->compilation_status = *compilation_status; } if (compilation_result.has_value()) { entry->compilation_result = std::move(*compilation_result); } if (executable.has_value()) { entry->executable = std::move(*executable); } } VLOG(4) << "Added/updated cache entry: key=" << key.HumanString() << ", entry=" << entry->DebugString(); } template <typename ExecutableType> std::string DeviceCompilationCache<ExecutableType>::DebugString() const { std::string s = "DeviceCompilationCache<ExecutableType> {\n"; { mutex_lock lock(compile_cache_mu_); for (const auto& [key, entry] : cache_) { absl::StrAppend(&s, key.HumanString(), " : ", entry->DebugString(), ",\n"); } } absl::StrAppend(&s, "}"); return s; } } #endif
#include "tensorflow/compiler/jit/device_compilation_cache.h" #include <memory> #include <optional> #include <string> #include <utility> #include <vector> #include <gtest/gtest.h> #include "tensorflow/core/platform/errors.h" #include "tensorflow/core/platform/test.h" #include "tsl/protobuf/error_codes.pb.h" namespace tensorflow { namespace { struct FakeExecutable { std::string data; explicit FakeExecutable(const std::string& s) : data(s) {} }; using Cache = DeviceCompilationCache<FakeExecutable>; using Signature = DeviceCompilationClusterSignature; absl::StatusOr<Signature> BuildSampleSignature(const std::string& fn_name) { NameAttrList fn; fn.set_name(fn_name); std::vector<XlaCompiler::Argument> args(1); args[0].kind = XlaCompiler::Argument::kConstant; args[0].type = DT_INT32; args[0].shape = TensorShape({4, 0}); args[0].constant_value = Tensor(DT_INT32, {4, 0}); return Signature::Build(fn, args); } TEST(DeviceCompilationCacheTest, LookupEntryDoesntExist) { auto cache = std::make_unique<Cache>(); TF_ASSERT_OK_AND_ASSIGN(auto key, BuildSampleSignature("foo")); auto cache_value = cache->Lookup(key); EXPECT_FALSE(cache_value.has_value()); } TEST(DeviceCompilationCacheTest, LookupOrCreateEntryDoesntExist) { auto cache = std::make_unique<Cache>(); TF_ASSERT_OK_AND_ASSIGN(auto key, BuildSampleSignature("foo")); Cache::Value cache_value = cache->LookupOrCreate(key); EXPECT_EQ(cache_value.compile_state, DeviceCompileState::kUncompiled); EXPECT_EQ(cache_value.request_count, 1); EXPECT_EQ(cache_value.compilation_result, nullptr); EXPECT_EQ(cache_value.executable, nullptr); } TEST(DeviceCompilationCacheTest, IncrementRequestCountOnLookup) { auto cache = std::make_unique<Cache>(); TF_ASSERT_OK_AND_ASSIGN(auto key, BuildSampleSignature("foo")); Cache::Value cache_value = cache->LookupOrCreate(key); EXPECT_EQ(cache_value.request_count, 1); cache_value = cache->LookupOrCreate(key); EXPECT_EQ(cache_value.request_count, 2); cache_value = cache->LookupOrCreate(key); EXPECT_EQ(cache_value.request_count, 3); } TEST(DeviceCompilationCacheTest, RequestCountUnchangedOnStore) { auto cache = std::make_unique<Cache>(); TF_ASSERT_OK_AND_ASSIGN(auto key, BuildSampleSignature("foo")); Cache::Value cache_value = cache->LookupOrCreate(key); EXPECT_EQ(cache_value.request_count, 1); cache_value = cache->LookupOrCreate(key); EXPECT_EQ(cache_value.request_count, 2); cache_value = cache->LookupOrCreate(key); EXPECT_EQ(cache_value.request_count, 3); auto compilation_result = std::make_unique<XlaCompiler::CompilationResult>(); cache->Store(key, DeviceCompileState::kCompiled, absl::OkStatus(), std::move(compilation_result), std::nullopt); cache_value = cache->LookupOrCreate(key); EXPECT_EQ(cache_value.request_count, 4); } TEST(DeviceCompilationCacheTest, StoreLookup) { auto cache = std::make_unique<Cache>(); TF_ASSERT_OK_AND_ASSIGN(auto key, BuildSampleSignature("foo")); auto compilation_result = std::make_unique<XlaCompiler::CompilationResult>(); auto executable = std::make_unique<FakeExecutable>("foo_exe"); cache->Store(key, DeviceCompileState::kCompiled, absl::OkStatus(), std::move(compilation_result), std::move(executable)); auto cache_value = cache->Lookup(key); EXPECT_EQ(cache_value->compile_state, DeviceCompileState::kCompiled); EXPECT_EQ(cache_value->request_count, 1); EXPECT_TRUE(cache_value->compilation_status.ok()); EXPECT_TRUE(cache_value->compilation_result != nullptr); EXPECT_TRUE(cache_value->executable != nullptr); EXPECT_EQ(cache_value->executable->data, "foo_exe"); } TEST(DeviceCompilationCacheTest, StoreLookupOrCreate) { auto cache = std::make_unique<Cache>(); TF_ASSERT_OK_AND_ASSIGN(auto key, BuildSampleSignature("foo")); auto compilation_result = std::make_unique<XlaCompiler::CompilationResult>(); auto executable = std::make_unique<FakeExecutable>("foo_exe"); cache->Store(key, DeviceCompileState::kCompiled, absl::OkStatus(), std::move(compilation_result), std::move(executable)); auto cache_value = cache->LookupOrCreate(key); EXPECT_EQ(cache_value.compile_state, DeviceCompileState::kCompiled); EXPECT_EQ(cache_value.request_count, 1); EXPECT_TRUE(cache_value.compilation_status.ok()); EXPECT_TRUE(cache_value.compilation_result != nullptr); EXPECT_TRUE(cache_value.executable != nullptr); EXPECT_EQ(cache_value.executable->data, "foo_exe"); } TEST(DeviceCompilationCacheTest, StoreOptionalArgs) { auto cache = std::make_unique<Cache>(); TF_ASSERT_OK_AND_ASSIGN(auto key, BuildSampleSignature("foo")); auto compilation_result = std::make_unique<XlaCompiler::CompilationResult>(); auto executable = std::make_unique<FakeExecutable>("foo_exe"); cache->Store(key, DeviceCompileState::kCompiled, std::nullopt, std::nullopt, std::nullopt); auto cache_value = cache->Lookup(key); EXPECT_EQ(cache_value->compile_state, DeviceCompileState::kCompiled); EXPECT_TRUE(cache_value->compilation_status.ok()); EXPECT_TRUE(cache_value->compilation_result == nullptr); EXPECT_TRUE(cache_value->executable == nullptr); cache->Store(key, std::nullopt, errors::InvalidArgument("Couldn't compile."), std::nullopt, std::nullopt); cache_value = cache->Lookup(key); EXPECT_EQ(cache_value->compile_state, DeviceCompileState::kCompiled); EXPECT_EQ(cache_value->compilation_status.code(), error::INVALID_ARGUMENT); EXPECT_TRUE(cache_value->compilation_result == nullptr); EXPECT_TRUE(cache_value->executable == nullptr); cache->Store(key, std::nullopt, std::nullopt, std::move(compilation_result), std::nullopt); cache_value = cache->Lookup(key); EXPECT_EQ(cache_value->compile_state, DeviceCompileState::kCompiled); EXPECT_EQ(cache_value->compilation_status.code(), error::INVALID_ARGUMENT); EXPECT_TRUE(cache_value->compilation_result != nullptr); EXPECT_TRUE(cache_value->executable == nullptr); cache->Store(key, std::nullopt, std::nullopt, std::nullopt, std::move(executable)); cache_value = cache->Lookup(key); EXPECT_EQ(cache_value->compile_state, DeviceCompileState::kCompiled); EXPECT_EQ(cache_value->compilation_status.code(), error::INVALID_ARGUMENT); EXPECT_TRUE(cache_value->compilation_result != nullptr); EXPECT_TRUE(cache_value->executable != nullptr); EXPECT_EQ(cache_value->executable->data, "foo_exe"); } TEST(DeviceCompilationCacheTest, StoreMultipleEntries) { auto cache = std::make_unique<Cache>(); TF_ASSERT_OK_AND_ASSIGN(auto key1, BuildSampleSignature("foo")); TF_ASSERT_OK_AND_ASSIGN(auto key2, BuildSampleSignature("bar")); auto compilation_result1 = std::make_unique<XlaCompiler::CompilationResult>(); auto compilation_result2 = std::make_unique<XlaCompiler::CompilationResult>(); auto executable1 = std::make_unique<FakeExecutable>("foo_exe"); auto executable2 = std::make_unique<FakeExecutable>("bar_exe"); cache->Store(key1, DeviceCompileState::kCompiled, errors::InvalidArgument("Invalid argument."), std::move(compilation_result1), std::move(executable1)); cache->Store(key2, DeviceCompileState::kCompiling, absl::OkStatus(), std::move(compilation_result2), std::move(executable2)); auto cache_value_1 = cache->Lookup(key1); auto cache_value_2 = cache->Lookup(key2); EXPECT_EQ(cache_value_1->compile_state, DeviceCompileState::kCompiled); EXPECT_EQ(cache_value_1->compilation_status.code(), error::INVALID_ARGUMENT); EXPECT_TRUE(cache_value_1->compilation_result != nullptr); EXPECT_TRUE(cache_value_1->executable != nullptr); EXPECT_EQ(cache_value_1->executable->data, "foo_exe"); EXPECT_EQ(cache_value_2->compile_state, DeviceCompileState::kCompiling); EXPECT_TRUE(cache_value_2->compilation_status.ok()); EXPECT_TRUE(cache_value_2->compilation_result != nullptr); EXPECT_TRUE(cache_value_2->executable != nullptr); EXPECT_EQ(cache_value_2->executable->data, "bar_exe"); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/device_compilation_cache.h
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/device_compilation_cache_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
f924b834-016b-4653-93b1-644440671e27
cpp
abseil/abseil-cpp
bernoulli_distribution
absl/random/bernoulli_distribution.h
absl/random/bernoulli_distribution_test.cc
#ifndef ABSL_RANDOM_BERNOULLI_DISTRIBUTION_H_ #define ABSL_RANDOM_BERNOULLI_DISTRIBUTION_H_ #include <cstdint> #include <istream> #include <limits> #include "absl/base/optimization.h" #include "absl/random/internal/fast_uniform_bits.h" #include "absl/random/internal/iostream_state_saver.h" namespace absl { ABSL_NAMESPACE_BEGIN class bernoulli_distribution { public: using result_type = bool; class param_type { public: using distribution_type = bernoulli_distribution; explicit param_type(double p = 0.5) : prob_(p) { assert(p >= 0.0 && p <= 1.0); } double p() const { return prob_; } friend bool operator==(const param_type& p1, const param_type& p2) { return p1.p() == p2.p(); } friend bool operator!=(const param_type& p1, const param_type& p2) { return p1.p() != p2.p(); } private: double prob_; }; bernoulli_distribution() : bernoulli_distribution(0.5) {} explicit bernoulli_distribution(double p) : param_(p) {} explicit bernoulli_distribution(param_type p) : param_(p) {} void reset() {} template <typename URBG> bool operator()(URBG& g) { return Generate(param_.p(), g); } template <typename URBG> bool operator()(URBG& g, const param_type& param) { return Generate(param.p(), g); } param_type param() const { return param_; } void param(const param_type& param) { param_ = param; } double p() const { return param_.p(); } result_type(min)() const { return false; } result_type(max)() const { return true; } friend bool operator==(const bernoulli_distribution& d1, const bernoulli_distribution& d2) { return d1.param_ == d2.param_; } friend bool operator!=(const bernoulli_distribution& d1, const bernoulli_distribution& d2) { return d1.param_ != d2.param_; } private: static constexpr uint64_t kP32 = static_cast<uint64_t>(1) << 32; template <typename URBG> static bool Generate(double p, URBG& g); param_type param_; }; template <typename CharT, typename Traits> std::basic_ostream<CharT, Traits>& operator<<( std::basic_ostream<CharT, Traits>& os, const bernoulli_distribution& x) { auto saver = random_internal::make_ostream_state_saver(os); os.precision(random_internal::stream_precision_helper<double>::kPrecision); os << x.p(); return os; } template <typename CharT, typename Traits> std::basic_istream<CharT, Traits>& operator>>( std::basic_istream<CharT, Traits>& is, bernoulli_distribution& x) { auto saver = random_internal::make_istream_state_saver(is); auto p = random_internal::read_floating_point<double>(is); if (!is.fail()) { x.param(bernoulli_distribution::param_type(p)); } return is; } template <typename URBG> bool bernoulli_distribution::Generate(double p, URBG& g) { random_internal::FastUniformBits<uint32_t> fast_u32; while (true) { const uint64_t c = static_cast<uint64_t>(static_cast<int64_t>(p * kP32)); const uint32_t v = fast_u32(g); if (ABSL_PREDICT_TRUE(v != c)) return v < c; const double q = static_cast<double>(c) / kP32; const double left = p - q; const double here = left * kP32; if (here == 0) return false; p = here; } } ABSL_NAMESPACE_END } #endif
#include "absl/random/bernoulli_distribution.h" #include <cmath> #include <cstddef> #include <random> #include <sstream> #include <utility> #include "gtest/gtest.h" #include "absl/random/internal/pcg_engine.h" #include "absl/random/internal/sequence_urbg.h" #include "absl/random/random.h" namespace { class BernoulliTest : public testing::TestWithParam<std::pair<double, size_t>> { }; TEST_P(BernoulliTest, Serialize) { const double d = GetParam().first; absl::bernoulli_distribution before(d); { absl::bernoulli_distribution via_param{ absl::bernoulli_distribution::param_type(d)}; EXPECT_EQ(via_param, before); } std::stringstream ss; ss << before; absl::bernoulli_distribution after(0.6789); EXPECT_NE(before.p(), after.p()); EXPECT_NE(before.param(), after.param()); EXPECT_NE(before, after); ss >> after; EXPECT_EQ(before.p(), after.p()); EXPECT_EQ(before.param(), after.param()); EXPECT_EQ(before, after); } TEST_P(BernoulliTest, Accuracy) { const std::pair<double, size_t> para = GetParam(); size_t trials = para.second; double p = para.first; absl::random_internal::pcg64_2018_engine rng(0x2B7E151628AED2A6); size_t yes = 0; absl::bernoulli_distribution dist(p); for (size_t i = 0; i < trials; ++i) { if (dist(rng)) yes++; } const double stddev_p = std::sqrt((p * (1.0 - p)) / trials); const double expected = trials * p; const double stddev = trials * stddev_p; EXPECT_NEAR(yes, expected, 5 * stddev) << "@" << p << ", " << std::abs(static_cast<double>(yes) - expected) / stddev << " stddev"; } INSTANTIATE_TEST_SUITE_P( All, BernoulliTest, ::testing::Values( std::make_pair(0, 30000), std::make_pair(1e-3, 30000000), std::make_pair(0.1, 3000000), std::make_pair(0.5, 3000000), std::make_pair(0.9, 30000000), std::make_pair(0.999, 30000000), std::make_pair(1, 30000), std::make_pair(std::nextafter(1.0, 0.0), 1), std::make_pair(std::numeric_limits<double>::epsilon(), 1), std::make_pair(std::nextafter(std::numeric_limits<double>::min(), 1.0), 1), std::make_pair(std::numeric_limits<double>::min(), 1), std::make_pair( std::numeric_limits<double>::denorm_min(), 1), std::make_pair(std::numeric_limits<double>::min() / 2, 1), std::make_pair(std::nextafter(std::numeric_limits<double>::min(), 0.0), 1))); TEST(BernoulliTest, StabilityTest) { absl::random_internal::sequence_urbg urbg({ 0x0003eb76f6f7f755ull, 0xFFCEA50FDB2F953Bull, 0xC332DDEFBE6C5AA5ull, 0x6558218568AB9702ull, 0x2AEF7DAD5B6E2F84ull, 0x1521B62829076170ull, 0xECDD4775619F1510ull, 0x13CCA830EB61BD96ull, 0x0334FE1EAA0363CFull, 0xB5735C904C70A239ull, 0xD59E9E0BCBAADE14ull, 0xEECC86BC60622CA7ull, 0x4864f22c059bf29eull, 0x247856d8b862665cull, 0xe46e86e9a1337e10ull, 0xd8c8541f3519b133ull, 0xe75b5162c567b9e4ull, 0xf732e5ded7009c5bull, 0xb170b98353121eacull, 0x1ec2e8986d2362caull, 0x814c8e35fe9a961aull, 0x0c3cd59c9b638a02ull, 0xcb3bb6478a07715cull, 0x1224e62c978bbc7full, 0x671ef2cb04e81f6eull, 0x3c1cbd811eaf1808ull, 0x1bbc23cfa8fac721ull, 0xa4c2cda65e596a51ull, 0xb77216fad37adf91ull, 0x836d794457c08849ull, 0xe083df03475f49d7ull, 0xbc9feb512e6b0d6cull, 0xb12d74fdd718c8c5ull, 0x12ff09653bfbe4caull, 0x8dd03a105bc4ee7eull, 0x5738341045ba0d85ull, 0xe3fd722dc65ad09eull, 0x5a14fd21ea2a5705ull, 0x14e6ea4d6edb0c73ull, 0x275b0dc7e0a18acfull, 0x36cebe0d2653682eull, 0x0361e9b23861596bull, }); auto generate = [&urbg](absl::bernoulli_distribution& dist) { std::string output; output.reserve(36); urbg.reset(); for (int i = 0; i < 35; i++) { output.append(dist(urbg) ? "1" : "0"); } return output; }; const double kP = 0.0331289862362; { absl::bernoulli_distribution dist(kP); auto v = generate(dist); EXPECT_EQ(35, urbg.invocations()); EXPECT_EQ(v, "00000000000010000000000010000000000") << dist; } { absl::bernoulli_distribution dist(kP * 10.0); auto v = generate(dist); EXPECT_EQ(35, urbg.invocations()); EXPECT_EQ(v, "00000100010010010010000011000011010") << dist; } { absl::bernoulli_distribution dist(kP * 20.0); auto v = generate(dist); EXPECT_EQ(35, urbg.invocations()); EXPECT_EQ(v, "00011110010110110011011111110111011") << dist; } { absl::bernoulli_distribution dist(1.0 - kP); auto v = generate(dist); EXPECT_EQ(35, urbg.invocations()); EXPECT_EQ(v, "11111111111111111111011111111111111") << dist; } } TEST(BernoulliTest, StabilityTest2) { absl::random_internal::sequence_urbg urbg( {0x0003eb76f6f7f755ull, 0xFFCEA50FDB2F953Bull, 0xC332DDEFBE6C5AA5ull, 0x6558218568AB9702ull, 0x2AEF7DAD5B6E2F84ull, 0x1521B62829076170ull, 0xECDD4775619F1510ull, 0x13CCA830EB61BD96ull, 0x0334FE1EAA0363CFull, 0xB5735C904C70A239ull, 0xD59E9E0BCBAADE14ull, 0xEECC86BC60622CA7ull}); auto generate = [&urbg](absl::bernoulli_distribution& dist) { std::string output; output.reserve(13); urbg.reset(); for (int i = 0; i < 12; i++) { output.append(dist(urbg) ? "1" : "0"); } return output; }; constexpr double b0 = 1.0 / 13.0 / 0.2; constexpr double b1 = 2.0 / 13.0 / 0.2; constexpr double b3 = (5.0 / 13.0 / 0.2) - ((1 - b0) + (1 - b1) + (1 - b1)); { absl::bernoulli_distribution dist(b0); auto v = generate(dist); EXPECT_EQ(12, urbg.invocations()); EXPECT_EQ(v, "000011100101") << dist; } { absl::bernoulli_distribution dist(b1); auto v = generate(dist); EXPECT_EQ(12, urbg.invocations()); EXPECT_EQ(v, "001111101101") << dist; } { absl::bernoulli_distribution dist(b3); auto v = generate(dist); EXPECT_EQ(12, urbg.invocations()); EXPECT_EQ(v, "001111101111") << dist; } } }
https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/random/bernoulli_distribution.h
https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/random/bernoulli_distribution_test.cc
03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4
485d01de-bfb2-4fa4-8ce6-7a875fdfc513
cpp
abseil/abseil-cpp
low_level_alloc
absl/base/internal/low_level_alloc.cc
absl/base/internal/low_level_alloc_test.cc
#include "absl/base/internal/low_level_alloc.h" #include <type_traits> #include "absl/base/call_once.h" #include "absl/base/config.h" #include "absl/base/internal/direct_mmap.h" #include "absl/base/internal/scheduling_mode.h" #include "absl/base/macros.h" #include "absl/base/thread_annotations.h" #ifndef ABSL_LOW_LEVEL_ALLOC_MISSING #ifndef _WIN32 #include <pthread.h> #include <signal.h> #include <sys/mman.h> #include <unistd.h> #else #include <windows.h> #endif #ifdef __linux__ #include <sys/prctl.h> #endif #include <string.h> #include <algorithm> #include <atomic> #include <cerrno> #include <cstddef> #include <new> #include "absl/base/dynamic_annotations.h" #include "absl/base/internal/raw_logging.h" #include "absl/base/internal/spinlock.h" #if defined(MAP_ANON) && !defined(MAP_ANONYMOUS) #define MAP_ANONYMOUS MAP_ANON #endif namespace absl { ABSL_NAMESPACE_BEGIN namespace base_internal { static const int kMaxLevel = 30; namespace { struct AllocList { struct Header { uintptr_t size; uintptr_t magic; LowLevelAlloc::Arena *arena; void *dummy_for_alignment; } header; int levels; AllocList *next[kMaxLevel]; }; } static int IntLog2(size_t size, size_t base) { int result = 0; for (size_t i = size; i > base; i >>= 1) { result++; } return result; } static int Random(uint32_t *state) { uint32_t r = *state; int result = 1; while ((((r = r * 1103515245 + 12345) >> 30) & 1) == 0) { result++; } *state = r; return result; } static int LLA_SkiplistLevels(size_t size, size_t base, uint32_t *random) { size_t max_fit = (size - offsetof(AllocList, next)) / sizeof(AllocList *); int level = IntLog2(size, base) + (random != nullptr ? Random(random) : 1); if (static_cast<size_t>(level) > max_fit) level = static_cast<int>(max_fit); if (level > kMaxLevel - 1) level = kMaxLevel - 1; ABSL_RAW_CHECK(level >= 1, "block not big enough for even one level"); return level; } static AllocList *LLA_SkiplistSearch(AllocList *head, AllocList *e, AllocList **prev) { AllocList *p = head; for (int level = head->levels - 1; level >= 0; level--) { for (AllocList *n; (n = p->next[level]) != nullptr && n < e; p = n) { } prev[level] = p; } return (head->levels == 0) ? nullptr : prev[0]->next[0]; } static void LLA_SkiplistInsert(AllocList *head, AllocList *e, AllocList **prev) { LLA_SkiplistSearch(head, e, prev); for (; head->levels < e->levels; head->levels++) { prev[head->levels] = head; } for (int i = 0; i != e->levels; i++) { e->next[i] = prev[i]->next[i]; prev[i]->next[i] = e; } } static void LLA_SkiplistDelete(AllocList *head, AllocList *e, AllocList **prev) { AllocList *found = LLA_SkiplistSearch(head, e, prev); ABSL_RAW_CHECK(e == found, "element not in freelist"); for (int i = 0; i != e->levels && prev[i]->next[i] == e; i++) { prev[i]->next[i] = e->next[i]; } while (head->levels > 0 && head->next[head->levels - 1] == nullptr) { head->levels--; } } struct LowLevelAlloc::Arena { explicit Arena(uint32_t flags_value); base_internal::SpinLock mu; AllocList freelist ABSL_GUARDED_BY(mu); int32_t allocation_count ABSL_GUARDED_BY(mu); const uint32_t flags; const size_t pagesize; const size_t round_up; const size_t min_size; uint32_t random ABSL_GUARDED_BY(mu); }; namespace { alignas(LowLevelAlloc::Arena) unsigned char default_arena_storage[sizeof( LowLevelAlloc::Arena)]; alignas(LowLevelAlloc::Arena) unsigned char unhooked_arena_storage[sizeof( LowLevelAlloc::Arena)]; #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING alignas( LowLevelAlloc::Arena) unsigned char unhooked_async_sig_safe_arena_storage [sizeof(LowLevelAlloc::Arena)]; #endif absl::once_flag create_globals_once; void CreateGlobalArenas() { new (&default_arena_storage) LowLevelAlloc::Arena(LowLevelAlloc::kCallMallocHook); new (&unhooked_arena_storage) LowLevelAlloc::Arena(0); #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING new (&unhooked_async_sig_safe_arena_storage) LowLevelAlloc::Arena(LowLevelAlloc::kAsyncSignalSafe); #endif } LowLevelAlloc::Arena *UnhookedArena() { base_internal::LowLevelCallOnce(&create_globals_once, CreateGlobalArenas); return reinterpret_cast<LowLevelAlloc::Arena *>(&unhooked_arena_storage); } #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING LowLevelAlloc::Arena *UnhookedAsyncSigSafeArena() { base_internal::LowLevelCallOnce(&create_globals_once, CreateGlobalArenas); return reinterpret_cast<LowLevelAlloc::Arena *>( &unhooked_async_sig_safe_arena_storage); } #endif } LowLevelAlloc::Arena *LowLevelAlloc::DefaultArena() { base_internal::LowLevelCallOnce(&create_globals_once, CreateGlobalArenas); return reinterpret_cast<LowLevelAlloc::Arena *>(&default_arena_storage); } static const uintptr_t kMagicAllocated = 0x4c833e95U; static const uintptr_t kMagicUnallocated = ~kMagicAllocated; namespace { class ABSL_SCOPED_LOCKABLE ArenaLock { public: explicit ArenaLock(LowLevelAlloc::Arena *arena) ABSL_EXCLUSIVE_LOCK_FUNCTION(arena->mu) : arena_(arena) { #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) != 0) { sigset_t all; sigfillset(&all); mask_valid_ = pthread_sigmask(SIG_BLOCK, &all, &mask_) == 0; } #endif arena_->mu.Lock(); } ~ArenaLock() { ABSL_RAW_CHECK(left_, "haven't left Arena region"); } void Leave() ABSL_UNLOCK_FUNCTION() { arena_->mu.Unlock(); #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING if (mask_valid_) { const int err = pthread_sigmask(SIG_SETMASK, &mask_, nullptr); if (err != 0) { ABSL_RAW_LOG(FATAL, "pthread_sigmask failed: %d", err); } } #endif left_ = true; } private: bool left_ = false; #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING bool mask_valid_ = false; sigset_t mask_; #endif LowLevelAlloc::Arena *arena_; ArenaLock(const ArenaLock &) = delete; ArenaLock &operator=(const ArenaLock &) = delete; }; } inline static uintptr_t Magic(uintptr_t magic, AllocList::Header *ptr) { return magic ^ reinterpret_cast<uintptr_t>(ptr); } namespace { size_t GetPageSize() { #ifdef _WIN32 SYSTEM_INFO system_info; GetSystemInfo(&system_info); return std::max(system_info.dwPageSize, system_info.dwAllocationGranularity); #elif defined(__wasm__) || defined(__asmjs__) || defined(__hexagon__) return getpagesize(); #else return static_cast<size_t>(sysconf(_SC_PAGESIZE)); #endif } size_t RoundedUpBlockSize() { size_t round_up = 16; while (round_up < sizeof(AllocList::Header)) { round_up += round_up; } return round_up; } } LowLevelAlloc::Arena::Arena(uint32_t flags_value) : mu(base_internal::SCHEDULE_KERNEL_ONLY), allocation_count(0), flags(flags_value), pagesize(GetPageSize()), round_up(RoundedUpBlockSize()), min_size(2 * round_up), random(0) { freelist.header.size = 0; freelist.header.magic = Magic(kMagicUnallocated, &freelist.header); freelist.header.arena = this; freelist.levels = 0; memset(freelist.next, 0, sizeof(freelist.next)); } LowLevelAlloc::Arena *LowLevelAlloc::NewArena(uint32_t flags) { Arena *meta_data_arena = DefaultArena(); #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING if ((flags & LowLevelAlloc::kAsyncSignalSafe) != 0) { meta_data_arena = UnhookedAsyncSigSafeArena(); } else #endif if ((flags & LowLevelAlloc::kCallMallocHook) == 0) { meta_data_arena = UnhookedArena(); } Arena *result = new (AllocWithArena(sizeof(*result), meta_data_arena)) Arena(flags); return result; } bool LowLevelAlloc::DeleteArena(Arena *arena) { ABSL_RAW_CHECK( arena != nullptr && arena != DefaultArena() && arena != UnhookedArena(), "may not delete default arena"); ArenaLock section(arena); if (arena->allocation_count != 0) { section.Leave(); return false; } while (arena->freelist.next[0] != nullptr) { AllocList *region = arena->freelist.next[0]; size_t size = region->header.size; arena->freelist.next[0] = region->next[0]; ABSL_RAW_CHECK( region->header.magic == Magic(kMagicUnallocated, &region->header), "bad magic number in DeleteArena()"); ABSL_RAW_CHECK(region->header.arena == arena, "bad arena pointer in DeleteArena()"); ABSL_RAW_CHECK(size % arena->pagesize == 0, "empty arena has non-page-aligned block size"); ABSL_RAW_CHECK(reinterpret_cast<uintptr_t>(region) % arena->pagesize == 0, "empty arena has non-page-aligned block"); int munmap_result; #ifdef _WIN32 munmap_result = VirtualFree(region, 0, MEM_RELEASE); ABSL_RAW_CHECK(munmap_result != 0, "LowLevelAlloc::DeleteArena: VitualFree failed"); #else #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) == 0) { munmap_result = munmap(region, size); } else { munmap_result = base_internal::DirectMunmap(region, size); } #else munmap_result = munmap(region, size); #endif if (munmap_result != 0) { ABSL_RAW_LOG(FATAL, "LowLevelAlloc::DeleteArena: munmap failed: %d", errno); } #endif } section.Leave(); arena->~Arena(); Free(arena); return true; } static inline uintptr_t CheckedAdd(uintptr_t a, uintptr_t b) { uintptr_t sum = a + b; ABSL_RAW_CHECK(sum >= a, "LowLevelAlloc arithmetic overflow"); return sum; } static inline uintptr_t RoundUp(uintptr_t addr, uintptr_t align) { return CheckedAdd(addr, align - 1) & ~(align - 1); } static AllocList *Next(int i, AllocList *prev, LowLevelAlloc::Arena *arena) { ABSL_RAW_CHECK(i < prev->levels, "too few levels in Next()"); AllocList *next = prev->next[i]; if (next != nullptr) { ABSL_RAW_CHECK( next->header.magic == Magic(kMagicUnallocated, &next->header), "bad magic number in Next()"); ABSL_RAW_CHECK(next->header.arena == arena, "bad arena pointer in Next()"); if (prev != &arena->freelist) { ABSL_RAW_CHECK(prev < next, "unordered freelist"); ABSL_RAW_CHECK(reinterpret_cast<char *>(prev) + prev->header.size < reinterpret_cast<char *>(next), "malformed freelist"); } } return next; } static void Coalesce(AllocList *a) { AllocList *n = a->next[0]; if (n != nullptr && reinterpret_cast<char *>(a) + a->header.size == reinterpret_cast<char *>(n)) { LowLevelAlloc::Arena *arena = a->header.arena; a->header.size += n->header.size; n->header.magic = 0; n->header.arena = nullptr; AllocList *prev[kMaxLevel]; LLA_SkiplistDelete(&arena->freelist, n, prev); LLA_SkiplistDelete(&arena->freelist, a, prev); a->levels = LLA_SkiplistLevels(a->header.size, arena->min_size, &arena->random); LLA_SkiplistInsert(&arena->freelist, a, prev); } } static void AddToFreelist(void *v, LowLevelAlloc::Arena *arena) { AllocList *f = reinterpret_cast<AllocList *>(reinterpret_cast<char *>(v) - sizeof(f->header)); ABSL_RAW_CHECK(f->header.magic == Magic(kMagicAllocated, &f->header), "bad magic number in AddToFreelist()"); ABSL_RAW_CHECK(f->header.arena == arena, "bad arena pointer in AddToFreelist()"); f->levels = LLA_SkiplistLevels(f->header.size, arena->min_size, &arena->random); AllocList *prev[kMaxLevel]; LLA_SkiplistInsert(&arena->freelist, f, prev); f->header.magic = Magic(kMagicUnallocated, &f->header); Coalesce(f); Coalesce(prev[0]); } void LowLevelAlloc::Free(void *v) { if (v != nullptr) { AllocList *f = reinterpret_cast<AllocList *>(reinterpret_cast<char *>(v) - sizeof(f->header)); LowLevelAlloc::Arena *arena = f->header.arena; ArenaLock section(arena); AddToFreelist(v, arena); ABSL_RAW_CHECK(arena->allocation_count > 0, "nothing in arena to free"); arena->allocation_count--; section.Leave(); } } static void *DoAllocWithArena(size_t request, LowLevelAlloc::Arena *arena) { void *result = nullptr; if (request != 0) { AllocList *s; ArenaLock section(arena); size_t req_rnd = RoundUp(CheckedAdd(request, sizeof(s->header)), arena->round_up); for (;;) { int i = LLA_SkiplistLevels(req_rnd, arena->min_size, nullptr) - 1; if (i < arena->freelist.levels) { AllocList *before = &arena->freelist; while ((s = Next(i, before, arena)) != nullptr && s->header.size < req_rnd) { before = s; } if (s != nullptr) { break; } } arena->mu.Unlock(); size_t new_pages_size = RoundUp(req_rnd, arena->pagesize * 16); void *new_pages; #ifdef _WIN32 new_pages = VirtualAlloc(nullptr, new_pages_size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE); ABSL_RAW_CHECK(new_pages != nullptr, "VirtualAlloc failed"); #else #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) != 0) { new_pages = base_internal::DirectMmap(nullptr, new_pages_size, PROT_WRITE|PROT_READ, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0); } else { new_pages = mmap(nullptr, new_pages_size, PROT_WRITE | PROT_READ, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); } #else new_pages = mmap(nullptr, new_pages_size, PROT_WRITE | PROT_READ, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); #endif if (new_pages == MAP_FAILED) { ABSL_RAW_LOG(FATAL, "mmap error: %d", errno); } #ifdef __linux__ #if defined(PR_SET_VMA) && defined(PR_SET_VMA_ANON_NAME) prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, new_pages, new_pages_size, "absl"); #endif #endif #endif arena->mu.Lock(); s = reinterpret_cast<AllocList *>(new_pages); s->header.size = new_pages_size; s->header.magic = Magic(kMagicAllocated, &s->header); s->header.arena = arena; AddToFreelist(&s->levels, arena); } AllocList *prev[kMaxLevel]; LLA_SkiplistDelete(&arena->freelist, s, prev); if (CheckedAdd(req_rnd, arena->min_size) <= s->header.size) { AllocList *n = reinterpret_cast<AllocList *>(req_rnd + reinterpret_cast<char *>(s)); n->header.size = s->header.size - req_rnd; n->header.magic = Magic(kMagicAllocated, &n->header); n->header.arena = arena; s->header.size = req_rnd; AddToFreelist(&n->levels, arena); } s->header.magic = Magic(kMagicAllocated, &s->header); ABSL_RAW_CHECK(s->header.arena == arena, ""); arena->allocation_count++; section.Leave(); result = &s->levels; } ABSL_ANNOTATE_MEMORY_IS_UNINITIALIZED(result, request); return result; } void *LowLevelAlloc::Alloc(size_t request) { void *result = DoAllocWithArena(request, DefaultArena()); return result; } void *LowLevelAlloc::AllocWithArena(size_t request, Arena *arena) { ABSL_RAW_CHECK(arena != nullptr, "must pass a valid arena"); void *result = DoAllocWithArena(request, arena); return result; } } ABSL_NAMESPACE_END } #endif
#include "absl/base/internal/low_level_alloc.h" #include <stdint.h> #include <stdio.h> #include <stdlib.h> #include <thread> #include <unordered_map> #include <utility> #ifdef __EMSCRIPTEN__ #include <emscripten.h> #endif #include "absl/container/node_hash_map.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace base_internal { namespace { #define TEST_ASSERT(x) \ if (!(x)) { \ printf("TEST_ASSERT(%s) FAILED ON LINE %d\n", #x, __LINE__); \ abort(); \ } struct BlockDesc { char *ptr; int len; int fill; }; static void CheckBlockDesc(const BlockDesc &d) { for (int i = 0; i != d.len; i++) { TEST_ASSERT((d.ptr[i] & 0xff) == ((d.fill + i) & 0xff)); } } static void RandomizeBlockDesc(BlockDesc *d) { d->fill = rand() & 0xff; for (int i = 0; i != d->len; i++) { d->ptr[i] = (d->fill + i) & 0xff; } } static bool using_low_level_alloc = false; static void Test(bool use_new_arena, bool call_malloc_hook, int n) { typedef absl::node_hash_map<int, BlockDesc> AllocMap; AllocMap allocated; AllocMap::iterator it; BlockDesc block_desc; int rnd; LowLevelAlloc::Arena *arena = nullptr; if (use_new_arena) { int32_t flags = call_malloc_hook ? LowLevelAlloc::kCallMallocHook : 0; arena = LowLevelAlloc::NewArena(flags); } for (int i = 0; i != n; i++) { if (i != 0 && i % 10000 == 0) { printf("."); fflush(stdout); } switch (rand() & 1) { case 0: using_low_level_alloc = true; block_desc.len = rand() & 0x3fff; block_desc.ptr = reinterpret_cast<char *>( arena == nullptr ? LowLevelAlloc::Alloc(block_desc.len) : LowLevelAlloc::AllocWithArena(block_desc.len, arena)); using_low_level_alloc = false; RandomizeBlockDesc(&block_desc); rnd = rand(); it = allocated.find(rnd); if (it != allocated.end()) { CheckBlockDesc(it->second); using_low_level_alloc = true; LowLevelAlloc::Free(it->second.ptr); using_low_level_alloc = false; it->second = block_desc; } else { allocated[rnd] = block_desc; } break; case 1: it = allocated.begin(); if (it != allocated.end()) { CheckBlockDesc(it->second); using_low_level_alloc = true; LowLevelAlloc::Free(it->second.ptr); using_low_level_alloc = false; allocated.erase(it); } break; } } while ((it = allocated.begin()) != allocated.end()) { CheckBlockDesc(it->second); using_low_level_alloc = true; LowLevelAlloc::Free(it->second.ptr); using_low_level_alloc = false; allocated.erase(it); } if (use_new_arena) { TEST_ASSERT(LowLevelAlloc::DeleteArena(arena)); } } static struct BeforeMain { BeforeMain() { Test(false, false, 50000); Test(true, false, 50000); Test(true, true, 50000); } } before_main; } } ABSL_NAMESPACE_END } int main(int argc, char *argv[]) { printf("PASS\n"); #ifdef __EMSCRIPTEN__ MAIN_THREAD_EM_ASM({ if (ENVIRONMENT_IS_WEB) { if (typeof TEST_FINISH === 'function') { TEST_FINISH($0); } else { console.error('Attempted to exit with status ' + $0); console.error('But TEST_FINSIHED is not a function.'); } } }, 0); #endif return 0; }
https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/base/internal/low_level_alloc.cc
https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/base/internal/low_level_alloc_test.cc
03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4
e7d29cc8-d376-428f-885c-1b55d95cf820
cpp
google/quiche
quic_compressed_certs_cache
quiche/quic/core/crypto/quic_compressed_certs_cache.cc
quiche/quic/core/crypto/quic_compressed_certs_cache_test.cc
#include "quiche/quic/core/crypto/quic_compressed_certs_cache.h" #include <memory> #include <string> #include <utility> namespace quic { namespace { inline void hash_combine(uint64_t* seed, const uint64_t& val) { (*seed) ^= val + 0x9e3779b9 + ((*seed) << 6) + ((*seed) >> 2); } } const size_t QuicCompressedCertsCache::kQuicCompressedCertsCacheSize = 225; QuicCompressedCertsCache::UncompressedCerts::UncompressedCerts() : chain(nullptr), client_cached_cert_hashes(nullptr) {} QuicCompressedCertsCache::UncompressedCerts::UncompressedCerts( const quiche::QuicheReferenceCountedPointer<ProofSource::Chain>& chain, const std::string* client_cached_cert_hashes) : chain(chain), client_cached_cert_hashes(client_cached_cert_hashes) {} QuicCompressedCertsCache::UncompressedCerts::~UncompressedCerts() {} QuicCompressedCertsCache::CachedCerts::CachedCerts() {} QuicCompressedCertsCache::CachedCerts::CachedCerts( const UncompressedCerts& uncompressed_certs, const std::string& compressed_cert) : chain_(uncompressed_certs.chain), client_cached_cert_hashes_(*uncompressed_certs.client_cached_cert_hashes), compressed_cert_(compressed_cert) {} QuicCompressedCertsCache::CachedCerts::CachedCerts(const CachedCerts& other) = default; QuicCompressedCertsCache::CachedCerts::~CachedCerts() {} bool QuicCompressedCertsCache::CachedCerts::MatchesUncompressedCerts( const UncompressedCerts& uncompressed_certs) const { return (client_cached_cert_hashes_ == *uncompressed_certs.client_cached_cert_hashes && chain_ == uncompressed_certs.chain); } const std::string* QuicCompressedCertsCache::CachedCerts::compressed_cert() const { return &compressed_cert_; } QuicCompressedCertsCache::QuicCompressedCertsCache(int64_t max_num_certs) : certs_cache_(max_num_certs) {} QuicCompressedCertsCache::~QuicCompressedCertsCache() { certs_cache_.Clear(); } const std::string* QuicCompressedCertsCache::GetCompressedCert( const quiche::QuicheReferenceCountedPointer<ProofSource::Chain>& chain, const std::string& client_cached_cert_hashes) { UncompressedCerts uncompressed_certs(chain, &client_cached_cert_hashes); uint64_t key = ComputeUncompressedCertsHash(uncompressed_certs); CachedCerts* cached_value = nullptr; auto iter = certs_cache_.Lookup(key); if (iter != certs_cache_.end()) { cached_value = iter->second.get(); } if (cached_value != nullptr && cached_value->MatchesUncompressedCerts(uncompressed_certs)) { return cached_value->compressed_cert(); } return nullptr; } void QuicCompressedCertsCache::Insert( const quiche::QuicheReferenceCountedPointer<ProofSource::Chain>& chain, const std::string& client_cached_cert_hashes, const std::string& compressed_cert) { UncompressedCerts uncompressed_certs(chain, &client_cached_cert_hashes); uint64_t key = ComputeUncompressedCertsHash(uncompressed_certs); std::unique_ptr<CachedCerts> cached_certs( new CachedCerts(uncompressed_certs, compressed_cert)); certs_cache_.Insert(key, std::move(cached_certs)); } size_t QuicCompressedCertsCache::MaxSize() { return certs_cache_.MaxSize(); } size_t QuicCompressedCertsCache::Size() { return certs_cache_.Size(); } uint64_t QuicCompressedCertsCache::ComputeUncompressedCertsHash( const UncompressedCerts& uncompressed_certs) { uint64_t hash = std::hash<std::string>()(*uncompressed_certs.client_cached_cert_hashes); hash_combine(&hash, reinterpret_cast<uint64_t>(uncompressed_certs.chain.get())); return hash; } }
#include "quiche/quic/core/crypto/quic_compressed_certs_cache.h" #include <string> #include <vector> #include "absl/strings/str_cat.h" #include "quiche/quic/core/crypto/cert_compressor.h" #include "quiche/quic/platform/api/quic_test.h" #include "quiche/quic/test_tools/crypto_test_utils.h" namespace quic { namespace test { namespace { class QuicCompressedCertsCacheTest : public QuicTest { public: QuicCompressedCertsCacheTest() : certs_cache_(QuicCompressedCertsCache::kQuicCompressedCertsCacheSize) {} protected: QuicCompressedCertsCache certs_cache_; }; TEST_F(QuicCompressedCertsCacheTest, CacheHit) { std::vector<std::string> certs = {"leaf cert", "intermediate cert", "root cert"}; quiche::QuicheReferenceCountedPointer<ProofSource::Chain> chain( new ProofSource::Chain(certs)); std::string cached_certs = "cached certs"; std::string compressed = "compressed cert"; certs_cache_.Insert(chain, cached_certs, compressed); const std::string* cached_value = certs_cache_.GetCompressedCert(chain, cached_certs); ASSERT_NE(nullptr, cached_value); EXPECT_EQ(*cached_value, compressed); } TEST_F(QuicCompressedCertsCacheTest, CacheMiss) { std::vector<std::string> certs = {"leaf cert", "intermediate cert", "root cert"}; quiche::QuicheReferenceCountedPointer<ProofSource::Chain> chain( new ProofSource::Chain(certs)); std::string cached_certs = "cached certs"; std::string compressed = "compressed cert"; certs_cache_.Insert(chain, cached_certs, compressed); EXPECT_EQ(nullptr, certs_cache_.GetCompressedCert(chain, "mismatched cached certs")); quiche::QuicheReferenceCountedPointer<ProofSource::Chain> chain2( new ProofSource::Chain(certs)); EXPECT_EQ(nullptr, certs_cache_.GetCompressedCert(chain2, cached_certs)); } TEST_F(QuicCompressedCertsCacheTest, CacheMissDueToEviction) { std::vector<std::string> certs = {"leaf cert", "intermediate cert", "root cert"}; quiche::QuicheReferenceCountedPointer<ProofSource::Chain> chain( new ProofSource::Chain(certs)); std::string cached_certs = "cached certs"; std::string compressed = "compressed cert"; certs_cache_.Insert(chain, cached_certs, compressed); for (unsigned int i = 0; i < QuicCompressedCertsCache::kQuicCompressedCertsCacheSize; i++) { EXPECT_EQ(certs_cache_.Size(), i + 1); certs_cache_.Insert(chain, absl::StrCat(i), absl::StrCat(i)); } EXPECT_EQ(certs_cache_.MaxSize(), certs_cache_.Size()); EXPECT_EQ(nullptr, certs_cache_.GetCompressedCert(chain, cached_certs)); } } } }
https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/crypto/quic_compressed_certs_cache.cc
https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/crypto/quic_compressed_certs_cache_test.cc
6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6
95ee4ca1-2dec-41c4-beaa-acb7667b5f7b
cpp
tensorflow/tensorflow
ctc_beam_search
tensorflow/lite/kernels/ctc/ctc_beam_search.h
tensorflow/core/util/ctc/ctc_beam_search_test.cc
#ifndef TENSORFLOW_LITE_KERNELS_CTC_CTC_BEAM_SEARCH_H_ #define TENSORFLOW_LITE_KERNELS_CTC_CTC_BEAM_SEARCH_H_ #include <algorithm> #include <cmath> #include <limits> #include <memory> #include <vector> #include "Eigen/Core" #include "tensorflow/lite/kernels/ctc/ctc_beam_entry.h" #include "tensorflow/lite/kernels/ctc/ctc_beam_scorer.h" #include "tensorflow/lite/kernels/ctc/ctc_decoder.h" #include "tensorflow/lite/kernels/ctc/ctc_loss_util.h" #include "tensorflow/lite/kernels/ctc/top_n.h" #include "tensorflow/lite/kernels/internal/compatibility.h" namespace tflite { namespace custom { namespace ctc { template <typename CTCBeamState = ctc_beam_search::EmptyBeamState, typename CTCBeamComparer = ctc_beam_search::BeamComparer<CTCBeamState>> class CTCBeamSearchDecoder : public CTCDecoder { typedef ctc_beam_search::BeamEntry<CTCBeamState> BeamEntry; typedef ctc_beam_search::BeamRoot<CTCBeamState> BeamRoot; typedef ctc_beam_search::BeamProbability BeamProbability; public: typedef BaseBeamScorer<CTCBeamState> DefaultBeamScorer; CTCBeamSearchDecoder(int num_classes, int beam_width, BaseBeamScorer<CTCBeamState>* scorer, int batch_size = 1, bool merge_repeated = false) : CTCDecoder(num_classes, batch_size, merge_repeated), beam_width_(beam_width), leaves_(beam_width), beam_scorer_(scorer) { Reset(); } ~CTCBeamSearchDecoder() override {} bool Decode(const CTCDecoder::SequenceLength& seq_len, const std::vector<CTCDecoder::Input>& input, std::vector<CTCDecoder::Output>* output, CTCDecoder::ScoreOutput* scores) override; template <typename Vector> void Step(const Vector& raw_input); template <typename Vector> float GetTopK(const int K, const Vector& input, std::vector<float>* top_k_logits, std::vector<int>* top_k_indices); BaseBeamScorer<CTCBeamState>* GetBeamScorer() const { return beam_scorer_; } void SetLabelSelectionParameters(int label_selection_size, float label_selection_margin) { label_selection_size_ = label_selection_size; label_selection_margin_ = label_selection_margin; } void Reset(); bool TopPaths(int n, std::vector<std::vector<int>>* paths, std::vector<float>* log_probs, bool merge_repeated) const; private: int beam_width_; int label_selection_size_ = 0; float label_selection_margin_ = -1; gtl::TopN<BeamEntry*, CTCBeamComparer> leaves_; std::unique_ptr<BeamRoot> beam_root_; BaseBeamScorer<CTCBeamState>* beam_scorer_; CTCBeamSearchDecoder(const CTCBeamSearchDecoder&) = delete; void operator=(const CTCBeamSearchDecoder&) = delete; }; template <typename CTCBeamState, typename CTCBeamComparer> bool CTCBeamSearchDecoder<CTCBeamState, CTCBeamComparer>::Decode( const CTCDecoder::SequenceLength& seq_len, const std::vector<CTCDecoder::Input>& input, std::vector<CTCDecoder::Output>* output, ScoreOutput* scores) { std::vector<std::vector<int>> beams; std::vector<float> beam_log_probabilities; int top_n = output->size(); if (std::any_of(output->begin(), output->end(), [this](const CTCDecoder::Output& output) -> bool { return output.size() < this->batch_size_; })) { return false; } if (scores->rows() < batch_size_ || scores->cols() < top_n) { return false; } for (int b = 0; b < batch_size_; ++b) { int seq_len_b = seq_len[b]; Reset(); for (int t = 0; t < seq_len_b; ++t) { Step(input[t].row(b)); } std::unique_ptr<std::vector<BeamEntry*>> branches(leaves_.Extract()); leaves_.Reset(); for (int i = 0; i < branches->size(); ++i) { BeamEntry* entry = (*branches)[i]; beam_scorer_->ExpandStateEnd(&entry->state); entry->newp.total += beam_scorer_->GetStateEndExpansionScore(entry->state); leaves_.push(entry); } bool status = TopPaths(top_n, &beams, &beam_log_probabilities, merge_repeated_); if (!status) { return status; } TFLITE_DCHECK_EQ(top_n, beam_log_probabilities.size()); TFLITE_DCHECK_EQ(beams.size(), beam_log_probabilities.size()); for (int i = 0; i < top_n; ++i) { (*output)[i][b].swap(beams[i]); (*scores)(b, i) = -beam_log_probabilities[i]; } } return true; } template <typename CTCBeamState, typename CTCBeamComparer> template <typename Vector> float CTCBeamSearchDecoder<CTCBeamState, CTCBeamComparer>::GetTopK( const int K, const Vector& input, std::vector<float>* top_k_logits, std::vector<int>* top_k_indices) { TFLITE_DCHECK_EQ(num_classes_, input.size()); top_k_logits->clear(); top_k_indices->clear(); top_k_logits->resize(K, -INFINITY); top_k_indices->resize(K, -1); for (int j = 0; j < num_classes_ - 1; ++j) { const float logit = input(j); if (logit > (*top_k_logits)[K - 1]) { int k = K - 1; while (k > 0 && logit > (*top_k_logits)[k - 1]) { (*top_k_logits)[k] = (*top_k_logits)[k - 1]; (*top_k_indices)[k] = (*top_k_indices)[k - 1]; k--; } (*top_k_logits)[k] = logit; (*top_k_indices)[k] = j; } } return std::max((*top_k_logits)[0], input(num_classes_ - 1)); } template <typename CTCBeamState, typename CTCBeamComparer> template <typename Vector> void CTCBeamSearchDecoder<CTCBeamState, CTCBeamComparer>::Step( const Vector& raw_input) { std::vector<float> top_k_logits; std::vector<int> top_k_indices; const bool top_k = (label_selection_size_ > 0 && label_selection_size_ < raw_input.size()); const int max_classes = top_k ? label_selection_size_ : (num_classes_ - 1); float max_coeff; if (top_k) { max_coeff = GetTopK(label_selection_size_, raw_input, &top_k_logits, &top_k_indices); } else { max_coeff = raw_input.maxCoeff(); } float logsumexp = 0.0; for (int j = 0; j < raw_input.size(); ++j) { logsumexp += Eigen::numext::exp(raw_input(j) - max_coeff); } logsumexp = Eigen::numext::log(logsumexp); float norm_offset = max_coeff + logsumexp; const float label_selection_input_min = (label_selection_margin_ >= 0) ? (max_coeff - label_selection_margin_) : -std::numeric_limits<float>::infinity(); TFLITE_DCHECK_EQ(num_classes_, raw_input.size()); std::unique_ptr<std::vector<BeamEntry*>> branches(leaves_.Extract()); leaves_.Reset(); for (BeamEntry* b : *branches) { b->oldp = b->newp; } for (BeamEntry* b : *branches) { if (b->parent != nullptr) { if (b->parent->Active()) { float previous = (b->label == b->parent->label) ? b->parent->oldp.blank : b->parent->oldp.total; b->newp.label = LogSumExp(b->newp.label, beam_scorer_->GetStateExpansionScore(b->state, previous)); } b->newp.label += raw_input(b->label) - norm_offset; } b->newp.blank = b->oldp.total + raw_input(blank_index_) - norm_offset; b->newp.total = LogSumExp(b->newp.blank, b->newp.label); leaves_.push(b); } for (BeamEntry* b : *branches) { auto is_candidate = [this](const BeamProbability& prob) { return (prob.total > kLogZero && (leaves_.size() < beam_width_ || prob.total > leaves_.peek_bottom()->newp.total)); }; if (!is_candidate(b->oldp)) { continue; } for (int ind = 0; ind < max_classes; ind++) { const int label = top_k ? top_k_indices[ind] : ind; const float logit = top_k ? top_k_logits[ind] : raw_input(ind); if (logit < label_selection_input_min) { continue; } BeamEntry& c = b->GetChild(label); if (!c.Active()) { c.newp.blank = kLogZero; beam_scorer_->ExpandState(b->state, b->label, &c.state, c.label); float previous = (c.label == b->label) ? b->oldp.blank : b->oldp.total; c.newp.label = logit - norm_offset + beam_scorer_->GetStateExpansionScore(c.state, previous); c.newp.total = c.newp.label; if (is_candidate(c.newp)) { if (leaves_.size() == beam_width_) { BeamEntry* bottom = leaves_.peek_bottom(); bottom->newp.Reset(); } leaves_.push(&c); } else { c.oldp.Reset(); c.newp.Reset(); } } } } } template <typename CTCBeamState, typename CTCBeamComparer> void CTCBeamSearchDecoder<CTCBeamState, CTCBeamComparer>::Reset() { leaves_.Reset(); beam_root_.reset(new BeamRoot(nullptr, -1)); beam_root_->RootEntry()->newp.total = 0.0; beam_root_->RootEntry()->newp.blank = 0.0; leaves_.push(beam_root_->RootEntry()); beam_scorer_->InitializeState(&beam_root_->RootEntry()->state); } template <typename CTCBeamState, typename CTCBeamComparer> bool CTCBeamSearchDecoder<CTCBeamState, CTCBeamComparer>::TopPaths( int n, std::vector<std::vector<int>>* paths, std::vector<float>* log_probs, bool merge_repeated) const { TFLITE_DCHECK(paths); TFLITE_DCHECK(log_probs); paths->clear(); log_probs->clear(); if (n > beam_width_) { return false; } if (n > leaves_.size()) { return false; } gtl::TopN<BeamEntry*, CTCBeamComparer> top_branches(n); for (auto it = leaves_.unsorted_begin(); it != leaves_.unsorted_end(); ++it) { top_branches.push(*it); } std::unique_ptr<std::vector<BeamEntry*>> branches(top_branches.Extract()); for (int i = 0; i < n; ++i) { BeamEntry* e((*branches)[i]); paths->push_back(e->LabelSeq(merge_repeated)); log_probs->push_back(e->newp.total); } return true; } } } } #endif
#include "tensorflow/core/util/ctc/ctc_beam_search.h" #include <cmath> #include "tensorflow/core/lib/strings/strcat.h" #include "tensorflow/core/platform/test.h" namespace { template <class T> using TestData = std::vector<std::vector<std::vector<T>>>; template <class T> struct HistoryBeamState { T score; std::vector<int> labels; }; template <class T, class BeamState> class DictionaryBeamScorer : public tensorflow::ctc::BaseBeamScorer<T, BeamState> { public: DictionaryBeamScorer() : tensorflow::ctc::BaseBeamScorer<T, BeamState>(), dictionary_({{3}, {3, 1}}) {} void InitializeState(BeamState* root) const override { root->score = 0; } void ExpandState(const BeamState& from_state, int from_label, BeamState* to_state, int to_label) const override { to_state->labels.push_back(to_label); SetStateScoreAccordingToDict(to_state); } void ExpandStateEnd(BeamState* state) const override { SetStateScoreAccordingToDict(state); } T GetStateExpansionScore(const BeamState& state, T previous_score) const override { return previous_score + state.score; } T GetStateEndExpansionScore(const BeamState& state) const override { return state.score; } const std::vector<std::vector<int>> dictionary_; private: void SetStateScoreAccordingToDict(BeamState* state) const; }; template <class T, class BeamState> void DictionaryBeamScorer<T, BeamState>::SetStateScoreAccordingToDict( BeamState* state) const { const std::vector<int>& candidate = state->labels; for (int w = 0; w < dictionary_.size(); ++w) { const std::vector<int>& word = dictionary_[w]; if (candidate.size() > word.size()) { continue; } if (std::equal(word.begin(), word.begin() + candidate.size(), candidate.begin())) { state->score = std::log(T(1.0)); return; } } state->score = std::log(T(0.01)); } template <class T> void ctc_beam_search_decoding_with_and_without_dictionary() { const int batch_size = 1; const int timesteps = 5; const int top_paths = 3; const int num_classes = 6; typename tensorflow::ctc::CTCBeamSearchDecoder<T>::DefaultBeamScorer default_scorer; tensorflow::ctc::CTCBeamSearchDecoder<T> decoder(num_classes, 10 * top_paths, &default_scorer); DictionaryBeamScorer<T, HistoryBeamState<T>> dictionary_scorer; tensorflow::ctc::CTCBeamSearchDecoder<T, HistoryBeamState<T>> dictionary_decoder(num_classes, top_paths, &dictionary_scorer); int sequence_lengths[batch_size] = {timesteps}; T input_data_mat[timesteps][batch_size][num_classes] = { {{0, 0.6, 0, 0.4, 0, 0}}, {{0, 0.5, 0, 0.5, 0, 0}}, {{0, 0.4, 0, 0.6, 0, 0}}, {{0, 0.4, 0, 0.6, 0, 0}}, {{0, 0.4, 0, 0.6, 0, 0}}}; for (int t = 0; t < timesteps; ++t) { for (int b = 0; b < batch_size; ++b) { for (int c = 0; c < num_classes; ++c) { input_data_mat[t][b][c] = std::log(input_data_mat[t][b][c]); } } } std::vector<typename tensorflow::ctc::CTCDecoder<T>::Output> expected_output = { {{1, 3}, {1, 3, 1}, {3, 1, 3}}, }; std::vector<typename tensorflow::ctc::CTCDecoder<T>::Output> expected_dict_output = { {{3}, {1, 3}, {3, 1}}, }; Eigen::Map<const Eigen::ArrayXi> seq_len(&sequence_lengths[0], batch_size); std::vector< Eigen::Map<const Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic>>> inputs; inputs.reserve(timesteps); for (int t = 0; t < timesteps; ++t) { inputs.emplace_back(&input_data_mat[t][0][0], batch_size, num_classes); } std::vector<typename tensorflow::ctc::CTCDecoder<T>::Output> outputs( top_paths); for (typename tensorflow::ctc::CTCDecoder<T>::Output& output : outputs) { output.resize(batch_size); } T score[batch_size][top_paths] = {{0.0}}; Eigen::Map<Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic>> scores( &score[0][0], batch_size, top_paths); EXPECT_TRUE(decoder.Decode(seq_len, inputs, &outputs, &scores).ok()); for (int path = 0; path < top_paths; ++path) { EXPECT_EQ(outputs[path][0], expected_output[0][path]); } std::vector<typename tensorflow::ctc::CTCDecoder<T>::Output> dict_outputs( top_paths); for (typename tensorflow::ctc::CTCDecoder<T>::Output& output : dict_outputs) { output.resize(batch_size); } EXPECT_TRUE( dictionary_decoder.Decode(seq_len, inputs, &dict_outputs, &scores).ok()); for (int path = 0; path < top_paths; ++path) { EXPECT_EQ(dict_outputs[path][0], expected_dict_output[0][path]); } } template <class T> void ctc_beam_search_decoding_all_beam_elements_have_finite_scores() { const int batch_size = 1; const int timesteps = 1; const int top_paths = 3; const int num_classes = 6; typename tensorflow::ctc::CTCBeamSearchDecoder<T>::DefaultBeamScorer default_scorer; tensorflow::ctc::CTCBeamSearchDecoder<T> decoder(num_classes, top_paths, &default_scorer); int sequence_lengths[batch_size] = {timesteps}; T input_data_mat[timesteps][batch_size][num_classes] = { {{0.4, 0.3, 0, 0, 0, 0.5}}}; Eigen::Map<const Eigen::ArrayXi> seq_len(&sequence_lengths[0], batch_size); std::vector< Eigen::Map<const Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic>>> inputs; inputs.reserve(timesteps); for (int t = 0; t < timesteps; ++t) { inputs.emplace_back(&input_data_mat[t][0][0], batch_size, num_classes); } std::vector<typename tensorflow::ctc::CTCDecoder<T>::Output> outputs( top_paths); for (typename tensorflow::ctc::CTCDecoder<T>::Output& output : outputs) { output.resize(batch_size); } T score[batch_size][top_paths] = {{0.0}}; Eigen::Map<Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic>> scores( &score[0][0], batch_size, top_paths); EXPECT_TRUE(decoder.Decode(seq_len, inputs, &outputs, &scores).ok()); for (int path = 0; path < top_paths; ++path) { LOG(INFO) << "path " << path; EXPECT_FALSE(std::isinf(score[0][path])); } } typedef int LabelState; template <class T> class RapidlyDroppingLabelScorer : public tensorflow::ctc::BaseBeamScorer<T, LabelState> { public: void InitializeState(LabelState* root) const override {} void ExpandState(const LabelState& from_state, int from_label, LabelState* to_state, int to_label) const override { *to_state = to_label; } void ExpandStateEnd(LabelState* state) const override {} T GetStateExpansionScore(const LabelState& state, T previous_score) const override { const T kRapidly = 100; return previous_score - kRapidly * state; } T GetStateEndExpansionScore(const LabelState& state) const override { return T(0); } }; template <class T> void ctc_beam_search_label_selection() { const int batch_size = 1; const int timesteps = 3; const int top_paths = 5; const int num_classes = 6; RapidlyDroppingLabelScorer<T> scorer; tensorflow::ctc::CTCBeamSearchDecoder<T, LabelState> decoder( num_classes, top_paths, &scorer); int sequence_lengths[batch_size] = {timesteps}; T input_data_mat[timesteps][batch_size][num_classes] = { {{-1e6, 1, 2, 3, 4, -1e6}}, {{1e6, 0, 0, 0, 0, -1e6}}, {{-1e6, 1.1, 2.2, 3.3, 4.4, -1e6}}, }; std::vector<typename tensorflow::ctc::CTCDecoder<T>::Output> expected_default_output = { {{1, 0, 1}, {1, 0, 2}, {2, 0, 1}, {1, 0, 3}, {2, 0, 2}}, }; std::vector<typename tensorflow::ctc::CTCDecoder<T>::Output> expected_output_size2 = { {{3, 0, 3}, {3, 0, 4}, {4, 0, 3}, {4, 0, 4}, {3}}, }; std::vector<typename tensorflow::ctc::CTCDecoder<T>::Output> expected_output_width2 = { {{2, 0, 3}, {2, 0, 4}, {3, 0, 3}, {3, 0, 4}, {4, 0, 3}}, }; Eigen::Map<const Eigen::ArrayXi> seq_len(&sequence_lengths[0], batch_size); std::vector< Eigen::Map<const Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic>>> inputs; inputs.reserve(timesteps); for (int t = 0; t < timesteps; ++t) { inputs.emplace_back(&input_data_mat[t][0][0], batch_size, num_classes); } std::vector<typename tensorflow::ctc::CTCDecoder<T>::Output> outputs( top_paths); for (typename tensorflow::ctc::CTCDecoder<T>::Output& output : outputs) { output.resize(batch_size); } T score[batch_size][top_paths] = {{0.0}}; Eigen::Map<Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic>> scores( &score[0][0], batch_size, top_paths); EXPECT_TRUE(decoder.Decode(seq_len, inputs, &outputs, &scores).ok()); for (int path = 0; path < top_paths; ++path) { EXPECT_EQ(outputs[path][0], expected_default_output[0][path]); } decoder.SetLabelSelectionParameters(2, T(-1)); EXPECT_TRUE(decoder.Decode(seq_len, inputs, &outputs, &scores).ok()); for (int path = 0; path < top_paths; ++path) { EXPECT_EQ(outputs[path][0], expected_output_size2[0][path]); } decoder.SetLabelSelectionParameters(0, T(2.0)); EXPECT_TRUE(decoder.Decode(seq_len, inputs, &outputs, &scores).ok()); for (int path = 0; path < top_paths; ++path) { EXPECT_EQ(outputs[path][0], expected_output_width2[0][path]); } decoder.SetLabelSelectionParameters(2, T(2.0)); EXPECT_TRUE(decoder.Decode(seq_len, inputs, &outputs, &scores).ok()); for (int path = 0; path < top_paths; ++path) { EXPECT_EQ(outputs[path][0], expected_output_size2[0][path]); } decoder.SetLabelSelectionParameters(4, T(3.3001)); EXPECT_TRUE(decoder.Decode(seq_len, inputs, &outputs, &scores).ok()); for (int path = 0; path < top_paths; ++path) { EXPECT_EQ(outputs[path][0], expected_default_output[0][path]); } } TEST(CtcBeamSearch, FloatDecodingWithAndWithoutDictionary) { ctc_beam_search_decoding_with_and_without_dictionary<float>(); } TEST(CtcBeamSearch, DoubleDecodingWithAndWithoutDictionary) { ctc_beam_search_decoding_with_and_without_dictionary<double>(); } TEST(CtcBeamSearch, FloatAllBeamElementsHaveFiniteScores) { ctc_beam_search_decoding_all_beam_elements_have_finite_scores<float>(); } TEST(CtcBeamSearch, DoubleAllBeamElementsHaveFiniteScores) { ctc_beam_search_decoding_all_beam_elements_have_finite_scores<double>(); } TEST(CtcBeamSearch, FloatLabelSelection) { ctc_beam_search_label_selection<float>(); } TEST(CtcBeamSearch, DoubleLabelSelection) { ctc_beam_search_label_selection<double>(); } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/ctc/ctc_beam_search.h
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/ctc/ctc_beam_search_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
85d220d5-9ed3-477a-8730-5b2484f55ad5
cpp
tensorflow/tensorflow
logging
tensorflow/c/logging.cc
third_party/xla/third_party/tsl/tsl/platform/logging_test.cc
#include "tensorflow/c/logging.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/stringprintf.h" static ::tensorflow::string BuildMessage(const char* fmt, va_list args) { ::tensorflow::string message; ::tensorflow::strings::Appendv(&message, fmt, args); return message; } void TF_Log(TF_LogLevel level, const char* fmt, ...) { if (level < TF_INFO || level > TF_FATAL) return; va_list args; va_start(args, fmt); auto message = BuildMessage(fmt, args); va_end(args); switch (level) { case TF_INFO: LOG(INFO) << message; break; case TF_WARNING: LOG(WARNING) << message; break; case TF_ERROR: LOG(ERROR) << message; break; case TF_FATAL: LOG(FATAL) << message; break; } } void TF_VLog(int level, const char* fmt, ...) { va_list args; va_start(args, fmt); auto message = BuildMessage(fmt, args); va_end(args); VLOG(level) << message; } void TF_DVLog(int level, const char* fmt, ...) { va_list args; va_start(args, fmt); auto message = BuildMessage(fmt, args); va_end(args); DVLOG(level) << message; }
#include "tsl/platform/logging.h" #include <cerrno> #include <cstddef> #include <cstdio> #include <cstdlib> #include <memory> #include <sstream> #include <vector> #include "absl/base/log_severity.h" #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/str_format.h" #include "absl/strings/string_view.h" #include "tsl/platform/path.h" #include "tsl/platform/stacktrace_handler.h" #include "tsl/platform/statusor.h" #include "tsl/platform/test.h" #ifdef PLATFORM_WINDOWS #define popen _popen #define pclose _pclose #endif static char* program_name; namespace tsl { namespace { using ::testing::HasSubstr; using ::testing::Not; TEST(Logging, Log) { LOG(INFO) << "Hello"; LOG(INFO) << "Another log message"; LOG(ERROR) << "Error message"; VLOG(1) << "A VLOG message"; VLOG(2) << "A higher VLOG message"; DVLOG(1) << "A DVLOG message"; DVLOG(2) << "A higher DVLOG message"; } TEST(Logging, CheckChecks) { CHECK(true); CHECK(7 > 5); string a("abc"); string b("xyz"); CHECK_EQ(a, a); CHECK_NE(a, b); CHECK_EQ(3, 3); CHECK_NE(4, 3); CHECK_GT(4, 3); CHECK_GE(3, 3); CHECK_LT(2, 3); CHECK_LE(2, 3); DCHECK(true); DCHECK(7 > 5); DCHECK_EQ(a, a); DCHECK_NE(a, b); DCHECK_EQ(3, 3); DCHECK_NE(4, 3); DCHECK_GT(4, 3); DCHECK_GE(3, 3); DCHECK_LT(2, 3); DCHECK_LE(2, 3); } TEST(LoggingDeathTest, FailedChecks) { string a("abc"); string b("xyz"); const char* p_const = "hello there"; const char* p_null_const = nullptr; char mybuf[10]; char* p_non_const = mybuf; char* p_null = nullptr; CHECK_NOTNULL(p_const); CHECK_NOTNULL(p_non_const); ASSERT_DEATH(CHECK(false), "false"); ASSERT_DEATH(CHECK(9 < 7), "9 < 7"); ASSERT_DEATH(CHECK_EQ(a, b), "a == b"); ASSERT_DEATH(CHECK_EQ(3, 4), "3 == 4"); ASSERT_DEATH(CHECK_NE(3, 3), "3 != 3"); ASSERT_DEATH(CHECK_GT(2, 3), "2 > 3"); ASSERT_DEATH(CHECK_GE(2, 3), "2 >= 3"); ASSERT_DEATH(CHECK_LT(3, 2), "3 < 2"); ASSERT_DEATH(CHECK_LE(3, 2), "3 <= 2"); ASSERT_DEATH(CHECK(false), "false"); ASSERT_DEATH(printf("%s", CHECK_NOTNULL(p_null)), "Must be non NULL"); ASSERT_DEATH(printf("%s", CHECK_NOTNULL(p_null_const)), "Must be non NULL"); #ifndef NDEBUG ASSERT_DEATH(DCHECK(9 < 7), "9 < 7"); ASSERT_DEATH(DCHECK(9 < 7), "9 < 7"); ASSERT_DEATH(DCHECK_EQ(a, b), "a == b"); ASSERT_DEATH(DCHECK_EQ(3, 4), "3 == 4"); ASSERT_DEATH(DCHECK_NE(3, 3), "3 != 3"); ASSERT_DEATH(DCHECK_GT(2, 3), "2 > 3"); ASSERT_DEATH(DCHECK_GE(2, 3), "2 >= 3"); ASSERT_DEATH(DCHECK_LT(3, 2), "3 < 2"); ASSERT_DEATH(DCHECK_LE(3, 2), "3 <= 2"); #endif } TEST(InternalLogString, Basic) { internal::LogString(__FILE__, __LINE__, absl::LogSeverity::kInfo, "Hello there"); } class TestSink : public TFLogSink { public: void Send(const TFLogEntry& entry) override { ss_ << entry.text_message() << std::endl; } std::string Get() const { return ss_.str(); } private: std::stringstream ss_; }; TEST(LogSinkTest, testLogSinks) { const int sinks_initial_size = TFGetLogSinks().size(); TestSink sink; TFAddLogSink(&sink); EXPECT_EQ(TFGetLogSinks().size(), sinks_initial_size + 1); LOG(INFO) << "Foo"; LOG(INFO) << "Bar"; EXPECT_EQ(sink.Get(), "Foo\nBar\n"); TFRemoveLogSink(&sink); EXPECT_EQ(TFGetLogSinks().size(), sinks_initial_size); } std::string ReadFromFilePointer(FILE* fp) { std::string result; while (!feof(fp)) { char buf[512]; size_t len = fread(buf, sizeof(buf[0]), 512, fp); result.append(buf, len); } return result; } absl::StatusOr<std::string> ReadFromFile(const std::string& filename) { std::shared_ptr<FILE> fp(fopen(filename.c_str(), "r"), fclose); if (fp == nullptr) { return absl::ErrnoToStatus(errno, absl::StrFormat("Cannot fopen '%s'", filename)); } return ReadFromFilePointer(fp.get()); } class SubcommandTest : public ::testing::Test { public: static constexpr absl::string_view kLogVLog = "log_and_vlog"; static bool IsSubcommand(absl::string_view subcommand) { return subcommand == kLogVLog; } static int Run(absl::string_view subcommand) { CHECK_EQ(subcommand, kLogVLog); LOG(INFO) << "LOG INFO"; LOG(WARNING) << "LOG WARNING"; LOG(ERROR) << "LOG ERROR"; LOG(INFO) << absl::StrFormat("VLOG_IS_ON(1)? %d", VLOG_IS_ON(1)); LOG(INFO) << absl::StrFormat("VLOG_IS_ON(2)? %d", VLOG_IS_ON(2)); LOG(INFO) << absl::StrFormat("VLOG_IS_ON(3)? %d", VLOG_IS_ON(3)); VLOG(1) << "VLevel 1"; VLOG(2) << "VLevel 2"; VLOG(3) << "VLevel 3"; return EXIT_SUCCESS; } protected: absl::StatusOr<std::string> CaptureOutput(const char* invocation) { std::shared_ptr<FILE> fp(popen(invocation, "r"), pclose); if (fp == nullptr) { return absl::ErrnoToStatus( errno, absl::StrFormat("Cannot popen '%s'", invocation)); } return ReadFromFilePointer(fp.get()); } }; TEST_F(SubcommandTest, LogDefaultTest) { std::string command = absl::StrFormat("%s %s", program_name, kLogVLog); #if defined(PLATFORM_GOOGLE) command += " --alsologtostderr"; #endif command += " 2>&1"; TF_ASSERT_OK_AND_ASSIGN(std::string out, CaptureOutput(command.c_str())); EXPECT_THAT(out, HasSubstr("LOG INFO")); EXPECT_THAT(out, HasSubstr("LOG WARNING")); EXPECT_THAT(out, HasSubstr("LOG ERROR")); EXPECT_THAT(out, HasSubstr("VLOG_IS_ON(1)? 0")); EXPECT_THAT(out, HasSubstr("VLOG_IS_ON(2)? 0")); EXPECT_THAT(out, HasSubstr("VLOG_IS_ON(3)? 0")); } TEST_F(SubcommandTest, MinLogLevelTest) { std::string command = absl::StrFormat("%s %s", program_name, kLogVLog); #if defined(PLATFORM_GOOGLE) command += " --minloglevel=1 --alsologtostderr"; #elif defined(PLATFORM_WINDOWS) command = absl::StrFormat("set TF_CPP_MIN_LOG_LEVEL=1 && %s", command); #else command = absl::StrFormat("TF_CPP_MIN_LOG_LEVEL=1 %s", command); #endif command += " 2>&1"; TF_ASSERT_OK_AND_ASSIGN(std::string out, CaptureOutput(command.c_str())); EXPECT_THAT(out, Not(HasSubstr("LOG INFO"))); EXPECT_THAT(out, HasSubstr("LOG WARNING")); EXPECT_THAT(out, HasSubstr("LOG ERROR")); } TEST_F(SubcommandTest, VLogDefaultTest) { std::string command = absl::StrFormat("%s %s", program_name, kLogVLog); #if defined(PLATFORM_GOOGLE) command += " --alsologtostderr"; #endif command += " 2>&1"; TF_ASSERT_OK_AND_ASSIGN(std::string out, CaptureOutput(command.c_str())); EXPECT_THAT(out, Not(HasSubstr("VLevel 1"))); EXPECT_THAT(out, Not(HasSubstr("VLevel 2"))); EXPECT_THAT(out, Not(HasSubstr("VLevel 3"))); } TEST_F(SubcommandTest, MaxVLogLevelTest) { std::string command = absl::StrFormat("%s %s", program_name, kLogVLog); #if defined(PLATFORM_GOOGLE) command += " --v=2 --alsologtostderr"; #elif defined(PLATFORM_WINDOWS) command = absl::StrFormat("set TF_CPP_MAX_VLOG_LEVEL=2 && %s", command); #else command = absl::StrFormat("TF_CPP_MAX_VLOG_LEVEL=2 %s", command); #endif command += " 2>&1"; TF_ASSERT_OK_AND_ASSIGN(std::string out, CaptureOutput(command.c_str())); EXPECT_THAT(out, HasSubstr("VLevel 1")); EXPECT_THAT(out, HasSubstr("VLevel 2")); EXPECT_THAT(out, Not(HasSubstr("VLevel 3"))); EXPECT_THAT(out, HasSubstr("VLOG_IS_ON(1)? 1")); EXPECT_THAT(out, HasSubstr("VLOG_IS_ON(2)? 1")); EXPECT_THAT(out, HasSubstr("VLOG_IS_ON(3)? 0")); } TEST_F(SubcommandTest, VModuleTest) { std::string command = absl::StrFormat("%s %s", program_name, kLogVLog); #if defined(PLATFORM_GOOGLE) command += " --vmodule=logging_test=2,shoobadooba=3 --alsologtostderr"; #elif defined(PLATFORM_WINDOWS) command = absl::StrFormat( "set TF_CPP_VMODULE=logging_test=2,shoobadooba=3 && %s", command); #else command = absl::StrFormat("TF_CPP_VMODULE=logging_test=2,shoobadooba=3 %s", command); #endif command += " 2>&1"; TF_ASSERT_OK_AND_ASSIGN(std::string out, CaptureOutput(command.c_str())); EXPECT_THAT(out, HasSubstr("VLevel 1")); EXPECT_THAT(out, HasSubstr("VLevel 2")); EXPECT_THAT(out, Not(HasSubstr("VLevel 3"))); EXPECT_THAT(out, HasSubstr("VLOG_IS_ON(1)? 1")); EXPECT_THAT(out, HasSubstr("VLOG_IS_ON(2)? 1")); EXPECT_THAT(out, HasSubstr("VLOG_IS_ON(3)? 0")); } TEST_F(SubcommandTest, VLogFilenameTest) { #if defined(PLATFORM_GOOGLE) constexpr bool kVLogFilenameEnvVarIsSupported = false; #else constexpr bool kVLogFilenameEnvVarIsSupported = true; #endif if (!kVLogFilenameEnvVarIsSupported) { GTEST_SKIP() << "Not supported on this platform"; } std::string command = absl::StrFormat("%s %s", program_name, kLogVLog); std::string filename = io::GetTempFilename("logging_test"); #if defined(PLATFORM_WINDOWS) command = absl::StrFormat( "set TF_CPP_VLOG_FILENAME=%s && set TF_CPP_MAX_VLOG_LEVEL=1 && %s", filename, command); #else command = absl::StrFormat( "TF_CPP_VLOG_FILENAME=%s TF_CPP_MAX_VLOG_LEVEL=1 %s", filename, command); #endif command += " 2>&1"; TF_ASSERT_OK_AND_ASSIGN(std::string out, CaptureOutput(command.c_str())); EXPECT_THAT(out, Not(HasSubstr("LOG INFO"))); EXPECT_THAT(out, Not(HasSubstr("LOG WARNING"))); EXPECT_THAT(out, Not(HasSubstr("LOG ERROR"))); EXPECT_THAT(out, Not(HasSubstr("VLOG_IS_ON(1)?"))); EXPECT_THAT(out, Not(HasSubstr("VLOG_IS_ON(2)?"))); EXPECT_THAT(out, Not(HasSubstr("VLOG_IS_ON(3)?"))); EXPECT_THAT(out, Not(HasSubstr("VLevel 1"))); EXPECT_THAT(out, Not(HasSubstr("VLevel 2"))); EXPECT_THAT(out, Not(HasSubstr("VLevel 3"))); TF_ASSERT_OK_AND_ASSIGN(std::string log_file, ReadFromFile(filename)); EXPECT_THAT(log_file, HasSubstr("LOG INFO")); EXPECT_THAT(log_file, HasSubstr("LOG WARNING")); EXPECT_THAT(log_file, HasSubstr("LOG ERROR")); EXPECT_THAT(log_file, HasSubstr("VLOG_IS_ON(1)")); EXPECT_THAT(log_file, HasSubstr("VLOG_IS_ON(2)")); EXPECT_THAT(log_file, HasSubstr("VLOG_IS_ON(3)")); EXPECT_THAT(log_file, HasSubstr("VLevel 1")); EXPECT_THAT(log_file, Not(HasSubstr("VLevel 2"))); EXPECT_THAT(log_file, Not(HasSubstr("VLevel 3"))); } } } GTEST_API_ int main(int argc, char** argv) { tsl::testing::InstallStacktraceHandler(); testing::InitGoogleTest(&argc, argv); program_name = argv[0]; if (argc >= 2 && tsl::SubcommandTest::IsSubcommand(argv[1])) { return tsl::SubcommandTest::Run(argv[1]); } return RUN_ALL_TESTS(); }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/logging.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/logging_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
cd4789df-22b1-45b7-8784-0f2d1d2727ba
cpp
tensorflow/tensorflow
assert_next_dataset_op
tensorflow/core/kernels/data/experimental/assert_next_dataset_op.cc
tensorflow/core/kernels/data/experimental/assert_next_dataset_op_test.cc
#include "tensorflow/core/kernels/data/experimental/assert_next_dataset_op.h" #include <map> #include "tensorflow/core/data/dataset_utils.h" #include "tensorflow/core/data/name_utils.h" #include "tensorflow/core/framework/partial_tensor_shape.h" #include "tensorflow/core/framework/tensor.h" namespace tensorflow { namespace data { namespace experimental { constexpr const char* const AssertNextDatasetOp::kInputDataset; constexpr const char* const AssertNextDatasetOp::kDatasetType; constexpr const char* const AssertNextDatasetOp::kTransformations; constexpr const char* const AssertNextDatasetOp::kOutputTypes; constexpr const char* const AssertNextDatasetOp::kOutputShapes; class AssertNextDatasetOp::Dataset : public DatasetBase { public: Dataset(OpKernelContext* ctx, const DatasetBase* input, const std::vector<tstring>& transformations, const DataTypeVector& output_types, const std::vector<PartialTensorShape>& output_shapes) : DatasetBase(DatasetContext(ctx)), input_(input), transformations_(transformations), output_types_(output_types), output_shapes_(output_shapes) { input_->Ref(); } ~Dataset() override { input_->Unref(); } std::unique_ptr<IteratorBase> MakeIteratorInternal( const string& prefix) const override { return std::make_unique<Iterator>(Iterator::Params{ this, name_utils::IteratorPrefix(kDatasetType, prefix)}); } const DataTypeVector& output_dtypes() const override { return output_types_; } const std::vector<PartialTensorShape>& output_shapes() const override { return output_shapes_; } string DebugString() const override { return name_utils::DatasetDebugString(kDatasetType); } int64_t CardinalityInternal(CardinalityOptions options) const override { return input_->Cardinality(options); } Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override { inputs->push_back(input_); return absl::OkStatus(); } Status CheckExternalState() const override { return input_->CheckExternalState(); } protected: Status AsGraphDefInternal(SerializationContext* ctx, DatasetGraphDefBuilder* b, Node** output) const override { Node* input_graph_node = nullptr; TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_graph_node)); Node* transformations_node = nullptr; TF_RETURN_IF_ERROR(b->AddVector(transformations_, &transformations_node)); TF_RETURN_IF_ERROR( b->AddDataset(this, {input_graph_node, transformations_node}, output)); return absl::OkStatus(); } private: class Iterator : public DatasetIterator<Dataset> { public: explicit Iterator(const Params& params) : DatasetIterator<Dataset>(params) {} Status Initialize(IteratorContext* ctx) override { std::vector<string> tokens = absl::StrSplit(prefix(), ':', absl::SkipEmpty()); if (dataset()->transformations_.size() > tokens.size() - 2) { return errors::InvalidArgument( "Asserted next ", dataset()->transformations_.size(), " transformations but encountered only ", tokens.size() - 2, "."); } int n = tokens.size(); for (size_t i = 0; i < dataset()->transformations_.size(); ++i) { if (!MatchesAnyVersion(dataset()->transformations_[i], tokens[n - 2 - i])) { return errors::InvalidArgument("Asserted transformation matching ", dataset()->transformations_[i], " at offset ", i, " but encountered ", tokens[n - 2 - i], " transformation instead."); } } return dataset()->input_->MakeIterator(ctx, this, prefix(), &input_impl_); } Status GetNextInternal(IteratorContext* ctx, std::vector<Tensor>* out_tensors, bool* end_of_sequence) override { return input_impl_->GetNext(ctx, out_tensors, end_of_sequence); } protected: std::shared_ptr<model::Node> CreateNode( IteratorContext* ctx, model::Node::Args args) const override { return model::MakeKnownRatioNode(std::move(args), 1); } Status SaveInternal(SerializationContext* ctx, IteratorStateWriter* writer) override { TF_RETURN_IF_ERROR(SaveInput(ctx, writer, input_impl_)); return absl::OkStatus(); } Status RestoreInternal(IteratorContext* ctx, IteratorStateReader* reader) override { TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, input_impl_)); return absl::OkStatus(); } private: std::unique_ptr<IteratorBase> input_impl_; }; const DatasetBase* input_; const std::vector<tstring> transformations_; const DataTypeVector output_types_; const std::vector<PartialTensorShape> output_shapes_; }; AssertNextDatasetOp::AssertNextDatasetOp(OpKernelConstruction* ctx) : UnaryDatasetOpKernel(ctx) { OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputTypes, &output_types_)); OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputShapes, &output_shapes_)); } void AssertNextDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input, DatasetBase** output) { std::vector<tstring> transformations; OP_REQUIRES_OK(ctx, ParseVectorArgument<tstring>(ctx, kTransformations, &transformations)); *output = new Dataset(ctx, input, transformations, output_types_, output_shapes_); } namespace { REGISTER_KERNEL_BUILDER(Name("AssertNextDataset").Device(DEVICE_CPU), AssertNextDatasetOp); REGISTER_KERNEL_BUILDER( Name("ExperimentalAssertNextDataset").Device(DEVICE_CPU), AssertNextDatasetOp); } } } }
#include "tensorflow/core/kernels/data/experimental/assert_next_dataset_op.h" #include "tensorflow/core/data/dataset_test_base.h" #include "tensorflow/core/kernels/data/range_dataset_op.h" #include "tensorflow/core/kernels/data/take_dataset_op.h" namespace tensorflow { namespace data { namespace experimental { namespace { constexpr char kNodeName[] = "assert_next_dataset"; class AssertNextDatasetParams : public DatasetParams { public: template <typename T> AssertNextDatasetParams(T input_dataset_params, const std::vector<tstring>& transformations, DataTypeVector output_dtypes, std::vector<PartialTensorShape> output_shapes, string node_name) : DatasetParams(std::move(output_dtypes), std::move(output_shapes), std::move(node_name)), transformations_(transformations) { input_dataset_params_.push_back(std::make_unique<T>(input_dataset_params)); iterator_prefix_ = name_utils::IteratorPrefix(input_dataset_params.dataset_type(), input_dataset_params.iterator_prefix()); } std::vector<Tensor> GetInputTensors() const override { int num_transformations = transformations_.size(); return {CreateTensor<tstring>(TensorShape({num_transformations}), transformations_)}; } Status GetInputNames(std::vector<string>* input_names) const override { input_names->reserve(input_dataset_params_.size() + 1); input_names->emplace_back(AssertNextDatasetOp::kInputDataset); input_names->emplace_back(AssertNextDatasetOp::kTransformations); return absl::OkStatus(); } Status GetAttributes(AttributeVector* attr_vector) const override { *attr_vector = {{AssertNextDatasetOp::kOutputShapes, output_shapes_}, {AssertNextDatasetOp::kOutputTypes, output_dtypes_}}; return absl::OkStatus(); } string dataset_type() const override { return AssertNextDatasetOp::kDatasetType; } private: std::vector<tstring> transformations_; }; class AssertNextDatasetOpTest : public DatasetOpsTestBase {}; AssertNextDatasetParams AssertNextDatasetParams1() { TakeDatasetParams take_dataset_params = TakeDatasetParams(RangeDatasetParams(0, 10, 1), 3, {DT_INT64}, {PartialTensorShape({})}, "take_dataset"); return AssertNextDatasetParams( std::move(take_dataset_params), {TakeDatasetOp::kDatasetType}, {DT_INT64}, {PartialTensorShape({})}, kNodeName); } AssertNextDatasetParams AssertNextDatasetParams2() { TakeDatasetParams take_dataset_params = TakeDatasetParams(RangeDatasetParams(0, 10, 1), 3, {DT_INT64}, {PartialTensorShape({})}, "take_dataset"); return AssertNextDatasetParams( std::move(take_dataset_params), {TakeDatasetOp::kDatasetType, RangeDatasetOp::kDatasetType}, {DT_INT64}, {PartialTensorShape({})}, kNodeName); } AssertNextDatasetParams InvalidAssertNextDatasetParams() { TakeDatasetParams take_dataset_params = TakeDatasetParams(RangeDatasetParams(0, 10, 1), 3, {DT_INT64}, {PartialTensorShape({})}, "take_dataset"); return AssertNextDatasetParams(std::move(take_dataset_params), {"Whoops"}, {DT_INT64}, {PartialTensorShape({})}, kNodeName); } AssertNextDatasetParams ShortAssertNextDatasetParams() { TakeDatasetParams take_dataset_params = TakeDatasetParams(RangeDatasetParams(0, 10, 1), 3, {DT_INT64}, {PartialTensorShape({})}, "take_dataset"); return AssertNextDatasetParams( std::move(take_dataset_params), {TakeDatasetOp::kDatasetType, RangeDatasetOp::kDatasetType, "Whoops"}, {DT_INT64}, {PartialTensorShape({})}, kNodeName); } std::vector<GetNextTestCase<AssertNextDatasetParams>> GetNextTestCases() { return {{AssertNextDatasetParams1(), CreateTensors<int64_t>(TensorShape({}), {{0}, {1}, {2}})}, {AssertNextDatasetParams2(), CreateTensors<int64_t>(TensorShape({}), {{0}, {1}, {2}})}}; } ITERATOR_GET_NEXT_TEST_P(AssertNextDatasetOpTest, AssertNextDatasetParams, GetNextTestCases()) TEST_F(AssertNextDatasetOpTest, DatasetNodeName) { auto dataset_params = AssertNextDatasetParams1(); TF_ASSERT_OK(Initialize(dataset_params)); TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name())); } TEST_F(AssertNextDatasetOpTest, DatasetTypeString) { auto dataset_params = AssertNextDatasetParams1(); TF_ASSERT_OK(Initialize(dataset_params)); TF_ASSERT_OK(CheckDatasetTypeString( name_utils::OpName(AssertNextDatasetOp::kDatasetType))); } TEST_F(AssertNextDatasetOpTest, DatasetOutputDtypes) { auto dataset_params = AssertNextDatasetParams1(); TF_ASSERT_OK(Initialize(dataset_params)); TF_ASSERT_OK(CheckDatasetOutputDtypes({DT_INT64})); } TEST_F(AssertNextDatasetOpTest, DatasetOutputShapes) { auto dataset_params = AssertNextDatasetParams1(); TF_ASSERT_OK(Initialize(dataset_params)); TF_ASSERT_OK(CheckDatasetOutputShapes({PartialTensorShape({})})); } TEST_F(AssertNextDatasetOpTest, Cardinality) { auto dataset_params = AssertNextDatasetParams1(); TF_ASSERT_OK(Initialize(dataset_params)); TF_ASSERT_OK(CheckDatasetCardinality(3)); } TEST_F(AssertNextDatasetOpTest, IteratorOutputDtypes) { auto dataset_params = AssertNextDatasetParams1(); TF_ASSERT_OK(Initialize(dataset_params)); TF_ASSERT_OK(CheckIteratorOutputDtypes({DT_INT64})); } TEST_F(AssertNextDatasetOpTest, IteratorOutputShapes) { auto dataset_params = AssertNextDatasetParams1(); TF_ASSERT_OK(Initialize(dataset_params)); TF_ASSERT_OK(CheckIteratorOutputShapes({PartialTensorShape({})})); } TEST_F(AssertNextDatasetOpTest, IteratorPrefix) { auto dataset_params = AssertNextDatasetParams1(); TF_ASSERT_OK(Initialize(dataset_params)); TF_ASSERT_OK(CheckIteratorPrefix(name_utils::IteratorPrefix( AssertNextDatasetOp::kDatasetType, dataset_params.iterator_prefix()))); } std::vector<IteratorSaveAndRestoreTestCase<AssertNextDatasetParams>> IteratorSaveAndRestoreTestCases() { return {{AssertNextDatasetParams1(), {0, 2, 5}, CreateTensors<int64_t>(TensorShape({}), {{0}, {1}, {2}})}, {AssertNextDatasetParams2(), {0, 2, 5}, CreateTensors<int64_t>(TensorShape({}), {{0}, {1}, {2}})}}; } ITERATOR_SAVE_AND_RESTORE_TEST_P(AssertNextDatasetOpTest, AssertNextDatasetParams, IteratorSaveAndRestoreTestCases()) TEST_F(AssertNextDatasetOpTest, InvalidArguments) { auto dataset_params = InvalidAssertNextDatasetParams(); EXPECT_EQ(Initialize(dataset_params).code(), absl::StatusCode::kInvalidArgument); } TEST_F(AssertNextDatasetOpTest, ShortAssertNext) { auto dataset_params = ShortAssertNextDatasetParams(); EXPECT_EQ(Initialize(dataset_params).code(), absl::StatusCode::kInvalidArgument); } } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/experimental/assert_next_dataset_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/experimental/assert_next_dataset_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
b2f293ea-f020-4c91-af4f-d136b4414cb6
cpp
google/tensorstore
rank
tensorstore/rank.cc
tensorstore/rank_test.cc
#include "tensorstore/rank.h" #include "tensorstore/util/str_cat.h" namespace tensorstore { std::string StaticCastTraits<DimensionIndex>::Describe(DimensionIndex value) { if (value == dynamic_rank) return "dynamic rank"; return tensorstore::StrCat("rank of ", value); } absl::Status ValidateRank(DimensionIndex rank) { if (!IsValidRank(rank)) { return absl::InvalidArgumentError(tensorstore::StrCat( "Rank ", rank, " is outside valid range [0, ", kMaxRank, "]")); } return absl::OkStatus(); } }
#include "tensorstore/rank.h" #include <gmock/gmock.h> #include <gtest/gtest.h> #include "tensorstore/util/result.h" #include "tensorstore/util/status.h" #include "tensorstore/util/status_testutil.h" namespace { using ::tensorstore::DimensionIndex; using ::tensorstore::dynamic_rank; using ::tensorstore::InlineRankLimit; using ::tensorstore::MatchesStatus; using ::tensorstore::RankConstraint; using ::tensorstore::StaticRankCast; using ::tensorstore::unchecked; static_assert(RankConstraint::Implies(3, 3)); static_assert(RankConstraint::Implies(3, dynamic_rank)); static_assert(RankConstraint::Implies(dynamic_rank, dynamic_rank)); static_assert(!RankConstraint::Implies(3, 2)); static_assert(!RankConstraint::Implies(dynamic_rank, 3)); static_assert(RankConstraint::EqualOrUnspecified(3, 3)); static_assert(RankConstraint::EqualOrUnspecified(dynamic_rank, dynamic_rank)); static_assert(RankConstraint::EqualOrUnspecified(dynamic_rank, 3)); static_assert(RankConstraint::EqualOrUnspecified(3, dynamic_rank)); static_assert(!RankConstraint::EqualOrUnspecified(3, 2)); static_assert(RankConstraint::Add(2, 3) == 5); static_assert(RankConstraint::Add({2, 3, 4}) == 9); static_assert(RankConstraint::Add({2}) == 2); static_assert(RankConstraint::Add({}) == 0); static_assert(RankConstraint::Add(dynamic_rank, 3) == dynamic_rank); static_assert(RankConstraint::Add(3, dynamic_rank) == dynamic_rank); static_assert(RankConstraint::Add(dynamic_rank, dynamic_rank) == dynamic_rank); static_assert(RankConstraint::Subtract(5, 2) == 3); static_assert(RankConstraint::Subtract(dynamic_rank, 3) == dynamic_rank); static_assert(RankConstraint::Subtract(3, dynamic_rank) == dynamic_rank); static_assert(RankConstraint::Subtract(dynamic_rank, dynamic_rank) == dynamic_rank); static_assert(RankConstraint::And(dynamic_rank, 5) == 5); static_assert(RankConstraint::And(5, dynamic_rank) == 5); static_assert(RankConstraint::And(dynamic_rank, dynamic_rank) == dynamic_rank); static_assert(RankConstraint::And({5, 5, dynamic_rank}) == 5); static_assert(RankConstraint::And({3}) == 3); static_assert(RankConstraint::And({}) == dynamic_rank); static_assert(RankConstraint::LessOrUnspecified(1, 2) == true); static_assert(RankConstraint::LessOrUnspecified(1, 1) == false); static_assert(RankConstraint::LessOrUnspecified(dynamic_rank, 2) == true); static_assert(RankConstraint::LessOrUnspecified(1, dynamic_rank) == true); static_assert(RankConstraint::LessOrUnspecified(dynamic_rank, dynamic_rank) == true); static_assert(RankConstraint::LessEqualOrUnspecified(1, 2) == true); static_assert(RankConstraint::LessEqualOrUnspecified(1, 1) == true); static_assert(RankConstraint::LessEqualOrUnspecified(1, 0) == false); static_assert(RankConstraint::LessEqualOrUnspecified(dynamic_rank, 2) == true); static_assert(RankConstraint::LessEqualOrUnspecified(1, dynamic_rank) == true); static_assert(RankConstraint::LessEqualOrUnspecified(dynamic_rank, dynamic_rank) == true); static_assert(RankConstraint::GreaterOrUnspecified(2, 1) == true); static_assert(RankConstraint::GreaterOrUnspecified(1, 1) == false); static_assert(RankConstraint::GreaterOrUnspecified(dynamic_rank, 2) == true); static_assert(RankConstraint::GreaterOrUnspecified(1, dynamic_rank) == true); static_assert(RankConstraint::GreaterOrUnspecified(dynamic_rank, dynamic_rank) == true); static_assert(RankConstraint::GreaterEqualOrUnspecified(2, 1) == true); static_assert(RankConstraint::GreaterEqualOrUnspecified(1, 1) == true); static_assert(RankConstraint::GreaterEqualOrUnspecified(0, 1) == false); static_assert(RankConstraint::GreaterEqualOrUnspecified(dynamic_rank, 2) == true); static_assert(RankConstraint::GreaterEqualOrUnspecified(1, dynamic_rank) == true); static_assert(RankConstraint::GreaterEqualOrUnspecified(dynamic_rank, dynamic_rank) == true); TEST(RankCastTest, Basic) { auto x = StaticRankCast<3>(std::integral_constant<DimensionIndex, 3>()).value(); static_assert( std::is_same_v<decltype(x), std::integral_constant<DimensionIndex, 3>>); auto y = StaticRankCast<dynamic_rank>(x).value(); EXPECT_EQ(3, y); static_assert(std::is_same_v<decltype(y), DimensionIndex>); auto a = StaticRankCast<3>(DimensionIndex(3)).value(); auto b = StaticRankCast<dynamic_rank>(DimensionIndex(3)).value(); static_assert( std::is_same_v<decltype(a), std::integral_constant<DimensionIndex, 3>>); static_assert(std::is_same_v<decltype(b), DimensionIndex>); EXPECT_THAT((StaticRankCast<3>(DimensionIndex(2))), MatchesStatus(absl::StatusCode::kInvalidArgument, "Cannot cast rank of 2 to rank of 3")); EXPECT_THAT((StaticRankCast<3>(DimensionIndex(3))), ::testing::Optional(tensorstore::StaticRank<3>())); EXPECT_THAT((StaticRankCast<3>(DimensionIndex(dynamic_rank))), ::testing::Optional(tensorstore::StaticRank<3>())); } TEST(RankCastDeathTest, DynamicToStatic) { EXPECT_DEBUG_DEATH((StaticRankCast<3, unchecked>(DimensionIndex(1))), "StaticCast is not valid"); } static_assert(InlineRankLimit(dynamic_rank(0)) == 0); static_assert(InlineRankLimit(dynamic_rank(1)) == 1); static_assert(InlineRankLimit(dynamic_rank(2)) == 2); static_assert(RankConstraint::FromInlineRank(dynamic_rank(0)) == -1); static_assert(RankConstraint::FromInlineRank(dynamic_rank(1)) == -1); static_assert(RankConstraint::FromInlineRank(dynamic_rank(2)) == -1); static_assert(RankConstraint::FromInlineRank(0) == 0); static_assert(RankConstraint::FromInlineRank(1) == 1); static_assert(RankConstraint::FromInlineRank(2) == 2); }
https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/rank.cc
https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/rank_test.cc
4f887a6430414cd6088e1743555015b10f116d50
66a0bc89-4701-4131-b068-d96673fd7351
cpp
google/tensorstore
zip_details
tensorstore/internal/compression/zip_details.cc
tensorstore/internal/compression/zip_details_test.cc
#include "tensorstore/internal/compression/zip_details.h" #include <stdint.h> #include <algorithm> #include <cassert> #include <ctime> #include <ios> #include <limits> #include <memory> #include <string_view> #include <utility> #include <variant> #include "absl/base/attributes.h" #include "absl/log/absl_log.h" #include "absl/log/log.h" #include "absl/status/status.h" #include "absl/strings/match.h" #include "absl/strings/str_format.h" #include "absl/time/time.h" #include "riegeli/bytes/limiting_reader.h" #include "riegeli/bytes/prefix_limiting_reader.h" #include "riegeli/bytes/reader.h" #include "riegeli/bzip2/bzip2_reader.h" #include "riegeli/endian/endian_reading.h" #include "riegeli/xz/xz_reader.h" #include "riegeli/zlib/zlib_reader.h" #include "riegeli/zstd/zstd_reader.h" #include "tensorstore/internal/log/verbose_flag.h" #include "tensorstore/internal/riegeli/find.h" #include "tensorstore/util/result.h" #include "tensorstore/util/status.h" #include "tensorstore/util/str_cat.h" namespace tensorstore { namespace internal_zip { namespace { using ::riegeli::ReadLittleEndian16; using ::riegeli::ReadLittleEndian32; using ::riegeli::ReadLittleEndian64; using ::riegeli::ReadLittleEndianSigned64; ABSL_CONST_INIT internal_log::VerboseFlag zip_logging("zip_details"); const absl::Time kWindowsEpoch = ::absl::UnixEpoch() - ::absl::Seconds(11644473600); absl::Time MakeMSDOSTime(uint16_t date, uint16_t time) { struct tm dos_tm; dos_tm.tm_mday = (uint16_t)(date & 0x1f); dos_tm.tm_mon = (uint16_t)((date >> 5) & 0xf) - 1; dos_tm.tm_year = (uint16_t)(date >> 9) + 80; dos_tm.tm_hour = (uint16_t)(time >> 11); dos_tm.tm_min = (uint16_t)((time >> 5) & 0x1f); dos_tm.tm_sec = (uint16_t)(2 * (time & 0x1f)); dos_tm.tm_isdst = -1; return absl::FromTM(dos_tm, absl::UTCTimeZone()); } absl::Status ReadExtraField_Zip64_0001(riegeli::Reader &reader, uint16_t tag_size, ZipEntry &entry) { assert(tag_size >= 8); entry.is_zip64 = true; do { if (tag_size >= 8 && entry.uncompressed_size == std::numeric_limits<uint32_t>::max()) { if (!ReadLittleEndian64(reader, entry.uncompressed_size)) break; tag_size -= 8; } if (tag_size >= 8 && entry.compressed_size == std::numeric_limits<uint32_t>::max()) { if (!ReadLittleEndian64(reader, entry.compressed_size)) break; tag_size -= 8; } if (tag_size >= 8 && entry.local_header_offset == std::numeric_limits<uint32_t>::max()) { if (!ReadLittleEndian64(reader, entry.local_header_offset)) break; tag_size -= 8; } return absl::OkStatus(); } while (false); return absl::InvalidArgumentError("Failed to read ZIP64 extra field"); } absl::Status ReadExtraField_Unix_000D(riegeli::Reader &reader, uint16_t tag_size, ZipEntry &entry) { assert(tag_size >= 12); uint32_t ignored32; uint32_t mtime; uint32_t atime; if (!ReadLittleEndian32(reader, atime) || !ReadLittleEndian32(reader, mtime) || !ReadLittleEndian32(reader, ignored32) ) { return absl::InvalidArgumentError("Failed to read UNIX extra field"); } entry.atime = absl::FromUnixSeconds(atime); entry.mtime = absl::FromUnixSeconds(mtime); return absl::OkStatus(); } absl::Status ReadExtraField_NTFS_000A(riegeli::Reader &reader, uint16_t tag_size, ZipEntry &entry) { assert(tag_size >= 8); uint32_t ignored32; if (!ReadLittleEndian32(reader, ignored32)) { return absl::InvalidArgumentError("Failed to read NTFS extra field"); } tag_size -= 4; uint16_t ntfs_tag, ntfs_size; while (tag_size > 4) { if (!ReadLittleEndian16(reader, ntfs_tag) || !ReadLittleEndian16(reader, ntfs_size)) { break; } tag_size -= 4; tag_size -= ntfs_size; if (ntfs_tag == 0x0001 && ntfs_size == 24) { uint64_t mtime; uint64_t atime; uint64_t ctime; if (!ReadLittleEndian64(reader, mtime) || !ReadLittleEndian64(reader, atime) || !ReadLittleEndian64(reader, ctime)) { return absl::InvalidArgumentError("Failed to read NTFS extra field"); } entry.mtime = kWindowsEpoch + absl::Nanoseconds(mtime * 100); entry.atime = kWindowsEpoch + absl::Nanoseconds(atime * 100); } else { reader.Skip(ntfs_size); } } return absl::OkStatus(); } absl::Status ReadExtraField_Unix_5455(riegeli::Reader &reader, uint16_t tag_size, ZipEntry &entry) { assert(tag_size >= 1); uint8_t flags = 0; uint32_t tstamp = 0; do { if (!reader.ReadByte(flags)) break; --tag_size; if (flags & 0x01 && tag_size >= 4) { if (!ReadLittleEndian32(reader, tstamp)) break; tag_size -= 4; entry.mtime = absl::FromUnixSeconds(tstamp); } if (flags & 0x02 && tag_size >= 4) { if (!ReadLittleEndian32(reader, tstamp)) break; tag_size -= 4; entry.atime = absl::FromUnixSeconds(tstamp); } if (flags & 0x04 && tag_size >= 4) { if (!ReadLittleEndian32(reader, tstamp)) break; tag_size -= 4; } return absl::OkStatus(); } while (false); return absl::InvalidArgumentError( "Failed to read unix timestamp extra field"); } absl::Status ReadExtraField(riegeli::Reader &reader, ZipEntry &entry) { uint16_t tag, tag_size; absl::Status status; while (reader.ok()) { if (!ReadLittleEndian16(reader, tag) || !ReadLittleEndian16(reader, tag_size)) { return absl::OkStatus(); } ABSL_LOG_IF(INFO, zip_logging) << std::hex << "extra tag " << tag << " size " << tag_size; auto pos = reader.pos(); switch (tag) { case 0x0001: status.Update(ReadExtraField_Zip64_0001(reader, tag_size, entry)); break; case 0x000d: status.Update(ReadExtraField_Unix_000D(reader, tag_size, entry)); break; case 0x000a: status.Update(ReadExtraField_NTFS_000A(reader, tag_size, entry)); break; case 0x5455: status.Update(ReadExtraField_Unix_5455(reader, tag_size, entry)); break; case 0x7875: break; default: break; } assert(reader.pos() <= pos + tag_size); reader.Seek(pos + tag_size); } return status; } } absl::Status ReadEOCD64Locator(riegeli::Reader &reader, ZipEOCD64Locator &locator) { if (!reader.Pull(ZipEOCD64Locator::kRecordSize)) { return absl::InvalidArgumentError( "ZIP EOCD64 Locator Entry insufficient data available"); } uint32_t signature; ReadLittleEndian32(reader, signature); if (signature != 0x07064b50) { return absl::InvalidArgumentError(absl::StrFormat( "Failed to read ZIP64 End of Central Directory Locator signature %08x", signature)); } uint32_t ignored32; ReadLittleEndian32(reader, locator.disk_number_with_cd); ReadLittleEndianSigned64(reader, locator.cd_offset); ReadLittleEndian32(reader, ignored32); if (locator.cd_offset < 0) { ABSL_LOG_IF(INFO, zip_logging && !reader.ok()) << reader.status(); return absl::InvalidArgumentError( "Failed to read ZIP64 End of Central Directory Locator"); } return absl::OkStatus(); } absl::Status ReadEOCD64(riegeli::Reader &reader, ZipEOCD &eocd) { if (!reader.Pull(ZipEOCD::kEOCD64RecordSize)) { return absl::InvalidArgumentError( "ZIP EOCD Entry insufficient data available"); } auto eocd_pos = reader.pos(); uint32_t signature; ReadLittleEndian32(reader, signature); if (signature != 0x06064b50) { return absl::InvalidArgumentError( "Failed to read ZIP64 Central Directory Entry signature"); } uint64_t eocd_size; ReadLittleEndian64(reader, eocd_size); if (eocd_size < 44 || !reader.Pull(eocd_size)) { return absl::InvalidArgumentError( "Failed to read ZIP64 End of Central Directory"); } riegeli::LimitingReader oecd64_reader( &reader, riegeli::LimitingReaderBase::Options().set_exact_length(eocd_size)); uint16_t version_madeby; uint16_t version_needed_to_extract; uint32_t disk_number; uint32_t disk_number_with_cd; uint64_t total_num_entries; ReadLittleEndian16(oecd64_reader, version_madeby); ReadLittleEndian16(oecd64_reader, version_needed_to_extract); ReadLittleEndian32(oecd64_reader, disk_number); ReadLittleEndian32(oecd64_reader, disk_number_with_cd); ReadLittleEndian64(oecd64_reader, eocd.num_entries); ReadLittleEndian64(oecd64_reader, total_num_entries); ReadLittleEndianSigned64(oecd64_reader, eocd.cd_size); ReadLittleEndianSigned64(oecd64_reader, eocd.cd_offset); if (disk_number != disk_number_with_cd || eocd.num_entries != total_num_entries || eocd.num_entries == std::numeric_limits<uint16_t>::max() || eocd.cd_size == std::numeric_limits<uint16_t>::max() || eocd.cd_offset == std::numeric_limits<uint32_t>::max() || eocd.cd_size < 0 || eocd.cd_offset < 0) { return absl::InvalidArgumentError( "Failed to read ZIP64 End of Central Directory"); } oecd64_reader.Seek(eocd_size); eocd.record_offset = eocd_pos; return absl::OkStatus(); } absl::Status ReadEOCD(riegeli::Reader &reader, ZipEOCD &eocd) { if (!reader.Pull(ZipEOCD::kEOCDRecordSize)) { return absl::InvalidArgumentError( "ZIP EOCD Entry insufficient data available"); } auto eocd_pos = reader.pos(); uint32_t signature; ReadLittleEndian32(reader, signature); if (signature != 0x06054b50) { return absl::InvalidArgumentError( "Failed to read ZIP Central Directory Entry signature"); } uint16_t disk_number; uint16_t disk_number_with_cd; uint16_t num_entries; uint16_t total_num_entries; uint32_t cd_size; uint32_t cd_offset; uint16_t comment_length; ReadLittleEndian16(reader, disk_number); ReadLittleEndian16(reader, disk_number_with_cd); ReadLittleEndian16(reader, num_entries); ReadLittleEndian16(reader, total_num_entries); ReadLittleEndian32(reader, cd_size); ReadLittleEndian32(reader, cd_offset); ReadLittleEndian16(reader, comment_length); if (num_entries != total_num_entries) { ABSL_LOG(INFO) << "ZIP num_entries mismatch " << num_entries << " vs " << total_num_entries; return absl::InvalidArgumentError( "Failed to read ZIP End of Central Directory"); } if (disk_number != disk_number_with_cd) { ABSL_LOG(INFO) << "ZIP disk_number mismatch " << disk_number << " vs " << disk_number_with_cd; return absl::InvalidArgumentError( "Failed to read ZIP End of Central Directory"); } if (comment_length > 0 && !reader.Read(comment_length, eocd.comment)) { return absl::InvalidArgumentError( "Failed to read ZIP End of Central Directory"); } reader.VerifyEnd(); if (!reader.status().ok()) { return absl::InvalidArgumentError( "Failed to read ZIP End of Central Directory"); } eocd.record_offset = eocd_pos; eocd.num_entries = num_entries; eocd.cd_size = cd_size; eocd.cd_offset = cd_offset; if (total_num_entries == std::numeric_limits<uint16_t>::max() || cd_offset == std::numeric_limits<uint32_t>::max()) { eocd.cd_offset = std::numeric_limits<uint32_t>::max(); } return absl::OkStatus(); } std::variant<absl::Status, int64_t> TryReadFullEOCD(riegeli::Reader &reader, ZipEOCD &eocd, int64_t offset_adjustment) { if (!internal::FindLast( reader, std::string_view(reinterpret_cast<const char *>(kEOCDLiteral), sizeof(kEOCDLiteral)))) { return absl::InvalidArgumentError("Failed to find valid ZIP EOCD"); } int64_t eocd_start = reader.pos(); ZipEOCD last_eocd{}; TENSORSTORE_RETURN_IF_ERROR(ReadEOCD(reader, last_eocd)); if (last_eocd.cd_offset != std::numeric_limits<uint32_t>::max()) { eocd = last_eocd; reader.Seek(eocd_start + 4); return absl::OkStatus(); } if (eocd_start < ZipEOCD64Locator::kRecordSize) { return absl::InvalidArgumentError("Block does not contain EOCD64 Locator"); } if (!reader.Seek(eocd_start - ZipEOCD64Locator::kRecordSize)) { if (!reader.ok() && !reader.status().ok()) { return MaybeAnnotateStatus(reader.status(), "Failed to read EOCD64 Locator"); } return absl::InvalidArgumentError("Failed to read EOCD64 Locator"); } ZipEOCD64Locator locator; TENSORSTORE_RETURN_IF_ERROR(ReadEOCD64Locator(reader, locator)); if (offset_adjustment < 0) { return locator.cd_offset; } auto target_pos = locator.cd_offset - offset_adjustment; if (target_pos < 0) { assert(offset_adjustment > 0); return locator.cd_offset; } if (!reader.Seek(target_pos)) { if (!reader.ok() && !reader.status().ok()) { return MaybeAnnotateStatus(reader.status(), "Failed to read EOCD64"); } return absl::InvalidArgumentError("Failed to read EOCD64"); } TENSORSTORE_RETURN_IF_ERROR(ReadEOCD64(reader, last_eocd)); eocd = last_eocd; reader.Seek(eocd_start + 4); return absl::OkStatus(); } absl::Status ReadCentralDirectoryEntry(riegeli::Reader &reader, ZipEntry &entry) { if (!reader.Pull(ZipEntry::kCentralRecordSize)) { return absl::InvalidArgumentError( "ZIP Central Directory Entry insufficient data available"); } uint32_t signature; ReadLittleEndian32(reader, signature); if (signature != 0x02014b50) { return absl::InvalidArgumentError( "Failed to read ZIP Central Directory Entry signature"); } uint32_t uncompressed_size = 0; uint32_t compressed_size; uint32_t relative_header_offset = 0; uint16_t file_name_length = 0; uint16_t extra_field_length = 0; uint16_t file_comment_length = 0; uint16_t last_mod_time; uint16_t last_mod_date; uint16_t ignored16; uint16_t compression_method; ReadLittleEndian16(reader, entry.version_madeby); ReadLittleEndian16(reader, ignored16); ReadLittleEndian16(reader, entry.flags); ReadLittleEndian16(reader, compression_method); ReadLittleEndian16(reader, last_mod_time); ReadLittleEndian16(reader, last_mod_date); ReadLittleEndian32(reader, entry.crc); ReadLittleEndian32(reader, compressed_size); ReadLittleEndian32(reader, uncompressed_size); ReadLittleEndian16(reader, file_name_length); ReadLittleEndian16(reader, extra_field_length); ReadLittleEndian16(reader, file_comment_length); ReadLittleEndian16(reader, ignored16); ReadLittleEndian16(reader, entry.internal_fa); ReadLittleEndian32(reader, entry.external_fa); ReadLittleEndian32(reader, relative_header_offset); entry.compressed_size = compressed_size; entry.uncompressed_size = uncompressed_size; entry.local_header_offset = relative_header_offset; entry.mtime = MakeMSDOSTime(last_mod_date, last_mod_time); entry.compression_method = static_cast<ZipCompression>(compression_method); if (file_name_length > 0 && !reader.Read(file_name_length, entry.filename)) { return absl::InvalidArgumentError( "Failed to read ZIP Central Directory Entry (filename)"); } assert(entry.filename.size() == file_name_length); if (extra_field_length > 0) { assert(extra_field_length > 4); riegeli::LimitingReader extra_reader( &reader, riegeli::LimitingReaderBase::Options().set_exact_length( extra_field_length)); extra_reader.SetReadAllHint(true); if (auto status = ReadExtraField(extra_reader, entry); !status.ok()) { return status; } extra_reader.Seek(extra_field_length); } if (file_comment_length > 0 && !reader.Read(file_comment_length, entry.comment)) { return absl::InvalidArgumentError( "Failed to read ZIP Central Directory Entry (comment)"); } entry.end_of_header_offset = reader.pos(); entry.estimated_read_size = std::max(entry.compressed_size, entry.uncompressed_size) + file_name_length + extra_field_length + ZipEntry::kLocalRecordSize + (entry.flags & kHasDataDescriptor ? 12 : 0); return absl::OkStatus(); } absl::Status ReadLocalEntry(riegeli::Reader &reader, ZipEntry &entry) { if (!reader.Pull(ZipEntry::kLocalRecordSize)) { return absl::InvalidArgumentError( "ZIP Local Entry insufficient data available"); } uint32_t signature; ReadLittleEndian32(reader, signature); if (signature != 0x04034b50) { return absl::InvalidArgumentError( "Failed to read ZIP Local Entry signature"); } uint16_t ignored16; uint16_t compression_method; uint16_t last_mod_time; uint16_t last_mod_date; uint32_t uncompressed_size; uint32_t compressed_size; uint16_t file_name_length = 0; uint16_t extra_field_length = 0; ReadLittleEndian16(reader, ignored16); ReadLittleEndian16(reader, entry.flags); ReadLittleEndian16(reader, compression_method); ReadLittleEndian16(reader, last_mod_time); ReadLittleEndian16(reader, last_mod_date); ReadLittleEndian32(reader, entry.crc); ReadLittleEndian32(reader, compressed_size); ReadLittleEndian32(reader, uncompressed_size); ReadLittleEndian16(reader, file_name_length); ReadLittleEndian16(reader, extra_field_length); entry.version_madeby = 0; entry.internal_fa = 0; entry.external_fa = 0; entry.local_header_offset = 0; entry.estimated_read_size = 0; entry.compressed_size = compressed_size; entry.uncompressed_size = uncompressed_size; entry.mtime = MakeMSDOSTime(last_mod_date, last_mod_time); entry.compression_method = static_cast<ZipCompression>(compression_method); if (file_name_length > 0 && !reader.Read(file_name_length, entry.filename)) { return absl::InvalidArgumentError( "Failed to read ZIP Local Entry (filename)"); } assert(entry.filename.size() == file_name_length); entry.end_of_header_offset = reader.pos() + extra_field_length; if (extra_field_length > 0) { assert(extra_field_length > 4); riegeli::LimitingReader extra_reader( &reader, riegeli::LimitingReaderBase::Options().set_exact_length( extra_field_length)); extra_reader.SetReadAllHint(true); if (auto status = ReadExtraField(extra_reader, entry); !status.ok()) { return status; } extra_reader.Seek(extra_field_length); } return absl::OkStatus(); } absl::Status ValidateEntryIsSupported(const ZipEntry &entry) { if (entry.flags & 0x01 || entry.flags & (uint16_t{1} << 6) || entry.flags & (uint16_t{1} << 13) || entry.compression_method == ZipCompression::kAes) { return absl::InvalidArgumentError( tensorstore::StrCat("ZIP encryption is not supported")); } if (entry.compression_method != ZipCompression::kStore && entry.compression_method != ZipCompression::kDeflate && entry.compression_method != ZipCompression::kBzip2 && entry.compression_method != ZipCompression::kZStd && entry.compression_method != ZipCompression::kXZ) { return absl::InvalidArgumentError( tensorstore::StrCat("ZIP compression method ", entry.compression_method, " is not supported")); } if (absl::EndsWith(entry.filename, "/")) { return absl::InvalidArgumentError("ZIP directory entries cannot be read"); } return absl::OkStatus(); } tensorstore::Result<std::unique_ptr<riegeli::Reader>> GetRawReader( riegeli::Reader *reader, ZipEntry &entry) { assert(reader != nullptr); if (entry.flags & kHasDataDescriptor) { const auto start_pos = reader->pos(); if (!reader->Skip(entry.compressed_size)) { return reader->status(); } static constexpr size_t kZipDataDescriptorSize = 16; static constexpr size_t kZip64DataDescriptorSize = 24; if (!reader->Pull(entry.is_zip64 ? kZip64DataDescriptorSize : kZipDataDescriptorSize)) { return absl::DataLossError("Failed to read ZIP DataDescriptor"); } uint32_t signature, crc32; ReadLittleEndian32(*reader, signature); ReadLittleEndian32(*reader, crc32); if (signature != 0x08074b50) { return absl::DataLossError(absl::StrFormat( "Failed to read ZIP DataDescriptor signature %08x", signature)); } if (entry.crc == 0) entry.crc = crc32; if (entry.is_zip64) { uint64_t compressed_size, uncompressed_size; ReadLittleEndian64(*reader, compressed_size); ReadLittleEndian64(*reader, uncompressed_size); if (entry.compressed_size == 0) entry.compressed_size = compressed_size; if (entry.uncompressed_size == 0) entry.uncompressed_size = uncompressed_size; } else { uint32_t compressed_size, uncompressed_size; ReadLittleEndian32(*reader, compressed_size); ReadLittleEndian32(*reader, uncompressed_size); if (entry.compressed_size == 0) { entry.compressed_size = compressed_size; } if (entry.uncompressed_size == 0) { entry.uncompressed_size = uncompressed_size; } } if (!reader->Seek(start_pos)) { return reader->status(); } } using Reader = riegeli::LimitingReader<riegeli::Reader *>; return std::make_unique<Reader>( reader, riegeli::LimitingReaderBase::Options().set_exact_length( entry.compressed_size)); } tensorstore::Result<std::unique_ptr<riegeli::Reader>> GetReader( riegeli::Reader *reader, ZipEntry &entry) { TENSORSTORE_ASSIGN_OR_RETURN(std::unique_ptr<riegeli::Reader> base_reader, GetRawReader(reader, entry)); switch (entry.compression_method) { case ZipCompression::kStore: { using PLReader = riegeli::PrefixLimitingReader<std::unique_ptr<riegeli::Reader>>; return std::make_unique<PLReader>( std::move(base_reader), PLReader::Options().set_base_pos(reader->pos())); } case ZipCompression::kDeflate: { using DeflateReader = riegeli::ZlibReader<std::unique_ptr<riegeli::Reader>>; return std::make_unique<DeflateReader>( std::move(base_reader), DeflateReader::Options().set_header(DeflateReader::Header::kRaw)); } case ZipCompression::kBzip2: { using Bzip2Reader = riegeli::Bzip2Reader<std::unique_ptr<riegeli::Reader>>; return std::make_unique<Bzip2Reader>(std::move(base_reader)); } case ZipCompression::kZStd: { using ZStdReader = riegeli::ZstdReader<std::unique_ptr<riegeli::Reader>>; return std::make_unique<ZStdReader>(std::move(base_reader)); } case ZipCompression::kXZ: { using XzReader = riegeli::XzReader<std::unique_ptr<riegeli::Reader>>; return std::make_unique<XzReader>( std::move(base_reader), XzReader::Options() .set_container(XzReader::Container::kXz) .set_concatenate(true) ); } default: break; } return absl::InvalidArgumentError(tensorstore::StrCat( "Unsupported ZIP compression method ", entry.compression_method)); } } }
#include "tensorstore/internal/compression/zip_details.h" #include <stddef.h> #include <stdint.h> #include <string> #include <string_view> #include <utility> #include <variant> #include <vector> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/flags/flag.h" #include "absl/log/absl_check.h" #include "absl/status/status.h" #include "absl/strings/cord.h" #include "absl/time/time.h" #include "riegeli/bytes/cord_reader.h" #include "riegeli/bytes/fd_reader.h" #include "riegeli/bytes/read_all.h" #include "riegeli/bytes/reader.h" #include "riegeli/bytes/string_reader.h" #include "tensorstore/internal/riegeli/find.h" #include "tensorstore/util/status.h" #include "tensorstore/util/status_testutil.h" using ::tensorstore::internal::FindFirst; using ::tensorstore::internal::StartsWith; using ::tensorstore::internal_zip::kCentralHeaderLiteral; using ::tensorstore::internal_zip::kEOCDLiteral; using ::tensorstore::internal_zip::kLocalHeaderLiteral; using ::tensorstore::internal_zip::ReadCentralDirectoryEntry; using ::tensorstore::internal_zip::ReadEOCD; using ::tensorstore::internal_zip::ReadEOCD64Locator; using ::tensorstore::internal_zip::ReadLocalEntry; using ::tensorstore::internal_zip::TryReadFullEOCD; using ::tensorstore::internal_zip::ZipCompression; using ::tensorstore::internal_zip::ZipEntry; using ::tensorstore::internal_zip::ZipEOCD; using ::tensorstore::internal_zip::ZipEOCD64Locator; using ::tensorstore::internal_zip::kCentralHeaderLiteral; using ::tensorstore::internal_zip::kEOCD64Literal; using ::tensorstore::internal_zip::kEOCD64LocatorLiteral; using ::tensorstore::internal_zip::kEOCDLiteral; using ::tensorstore::internal_zip::kLocalHeaderLiteral; ABSL_FLAG(std::string, tensorstore_test_data, "", "Path to internal/compression/testdata/data.zip"); namespace { absl::Cord GetTestZipFileData() { ABSL_CHECK(!absl::GetFlag(FLAGS_tensorstore_test_data).empty()); absl::Cord filedata; TENSORSTORE_CHECK_OK(riegeli::ReadAll( riegeli::FdReader(absl::GetFlag(FLAGS_tensorstore_test_data)), filedata)); ABSL_CHECK_EQ(filedata.size(), 319482); return filedata; } static constexpr unsigned char kMinimalZip[] = { 0x50, 0x4b, 0x5, 0x6, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}; static constexpr unsigned char kZip64OneEmptyFile[] = { 0x50, 0x4b, 0x03, 0x04, 0x2d, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4f, 0x72, 0x5b, 0x40, 0x07, 0xa1, 0xea, 0xdd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x01, 0x00, 0x14, 0x00, 0x2d, 0x01, 0x00, 0x10, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x61, 0x0a, 0x50, 0x4b, 0x01, 0x02, 0x1e, 0x03, 0x2d, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4f, 0x72, 0x5b, 0x40, 0x07, 0xa1, 0xea, 0xdd, 0x02, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x80, 0x11, 0x00, 0x00, 0x00, 0x00, 0x2d, 0x50, 0x4b, 0x06, 0x06, 0x2c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1e, 0x03, 0x2d, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x35, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, 0x4b, 0x06, 0x07, 0x00, 0x00, 0x00, 0x00, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x50, 0x4b, 0x05, 0x06, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x01, 0x00, 0x2f, 0x00, 0x00, 0x00, 0x35, 0x00, 0x00, 0x00, 0x00, 0x00, }; static constexpr unsigned char kZipTest2[] = { 0x50, 0x4b, 0x03, 0x04, 0x0a, 0x00, 0x02, 0x00, 0x00, 0x00, 0xd5, 0x7d, 0x46, 0x2f, 0xc6, 0x35, 0xb9, 0x3b, 0x05, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x04, 0x00, 0x15, 0x00, 0x74, 0x65, 0x73, 0x74, 0x55, 0x54, 0x09, 0x00, 0x03, 0x41, 0x72, 0x81, 0x3f, 0x41, 0x72, 0x81, 0x3f, 0x55, 0x78, 0x04, 0x00, 0x64, 0x00, 0x14, 0x00, 0x74, 0x65, 0x73, 0x74, 0x0a, 0x50, 0x4b, 0x03, 0x04, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7b, 0x98, 0x2b, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x15, 0x00, 0x74, 0x65, 0x73, 0x74, 0x64, 0x69, 0x72, 0x2f, 0x55, 0x54, 0x09, 0x00, 0x03, 0x09, 0x15, 0xe4, 0x41, 0x9a, 0x15, 0xe4, 0x41, 0x55, 0x78, 0x04, 0x00, 0xe8, 0x03, 0x64, 0x00, 0x50, 0x4b, 0x03, 0x04, 0x0a, 0x00, 0x02, 0x00, 0x00, 0x00, 0xd5, 0x7d, 0x46, 0x2f, 0xc6, 0x35, 0xb9, 0x3b, 0x05, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x0d, 0x00, 0x15, 0x00, 0x74, 0x65, 0x73, 0x74, 0x64, 0x69, 0x72, 0x2f, 0x74, 0x65, 0x73, 0x74, 0x32, 0x55, 0x54, 0x09, 0x00, 0x03, 0x41, 0x72, 0x81, 0x3f, 0x41, 0x72, 0x81, 0x3f, 0x55, 0x78, 0x04, 0x00, 0xe8, 0x03, 0x64, 0x00, 0x74, 0x65, 0x73, 0x74, 0x0a, 0x50, 0x4b, 0x01, 0x02, 0x17, 0x03, 0x0a, 0x00, 0x02, 0x00, 0x00, 0x00, 0xd5, 0x7d, 0x46, 0x2f, 0xc6, 0x35, 0xb9, 0x3b, 0x05, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x04, 0x00, 0x0d, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0xb4, 0x81, 0x00, 0x00, 0x00, 0x00, 0x74, 0x65, 0x73, 0x74, 0x55, 0x54, 0x05, 0x00, 0x03, 0x41, 0x72, 0x81, 0x3f, 0x55, 0x78, 0x00, 0x00, 0x50, 0x4b, 0x01, 0x02, 0x17, 0x03, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7b, 0x98, 0x2b, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x0d, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0xed, 0x41, 0x3c, 0x00, 0x00, 0x00, 0x74, 0x65, 0x73, 0x74, 0x64, 0x69, 0x72, 0x2f, 0x55, 0x54, 0x05, 0x00, 0x03, 0x09, 0x15, 0xe4, 0x41, 0x55, 0x78, 0x00, 0x00, 0x50, 0x4b, 0x01, 0x02, 0x17, 0x03, 0x0a, 0x00, 0x02, 0x00, 0x00, 0x00, 0xd5, 0x7d, 0x46, 0x2f, 0xc6, 0x35, 0xb9, 0x3b, 0x05, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x0d, 0x00, 0x0d, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0xb4, 0x81, 0x77, 0x00, 0x00, 0x00, 0x74, 0x65, 0x73, 0x74, 0x64, 0x69, 0x72, 0x2f, 0x74, 0x65, 0x73, 0x74, 0x32, 0x55, 0x54, 0x05, 0x00, 0x03, 0x41, 0x72, 0x81, 0x3f, 0x55, 0x78, 0x00, 0x00, 0x50, 0x4b, 0x05, 0x06, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x03, 0x00, 0xca, 0x00, 0x00, 0x00, 0xbc, 0x00, 0x00, 0x00, 0x00, 0x00, }; template <size_t N> std::string_view StringViewOf(const unsigned char (&str)[N]) { return std::string_view(reinterpret_cast<const char*>(str), N); } TEST(ZipDetailsTest, DecodeEOCD) { riegeli::StringReader string_reader(StringViewOf(kMinimalZip)); EXPECT_TRUE(FindFirst(string_reader, StringViewOf(kEOCDLiteral))); ZipEOCD eocd; ASSERT_THAT(ReadEOCD(string_reader, eocd), ::tensorstore::IsOk()); EXPECT_EQ(eocd.num_entries, 0); EXPECT_EQ(eocd.cd_size, 0); EXPECT_EQ(eocd.cd_offset, 0); } TEST(ZipDetailsTest, ReadEOCDZip64) { riegeli::StringReader string_reader(StringViewOf(kZip64OneEmptyFile)); EXPECT_TRUE(FindFirst(string_reader, StringViewOf(kEOCDLiteral))); ZipEOCD eocd; ASSERT_THAT(ReadEOCD(string_reader, eocd), ::tensorstore::IsOk()); EXPECT_EQ(eocd.num_entries, 1); EXPECT_EQ(eocd.cd_size, 47); EXPECT_EQ(eocd.cd_offset, 53); } TEST(ZipDetailsTest, ReadEOCD6LocatorZip64) { riegeli::StringReader string_reader(StringViewOf(kZip64OneEmptyFile)); EXPECT_TRUE(FindFirst(string_reader, StringViewOf(kEOCD64LocatorLiteral))); ZipEOCD64Locator eocd64_locator; ASSERT_THAT(ReadEOCD64Locator(string_reader, eocd64_locator), ::tensorstore::IsOk()); EXPECT_EQ(eocd64_locator.disk_number_with_cd, 0); EXPECT_EQ(eocd64_locator.cd_offset, 100); } TEST(ZipDetailsTest, ReadEOCD64Zip64) { riegeli::StringReader string_reader(StringViewOf(kZip64OneEmptyFile)); EXPECT_TRUE(FindFirst(string_reader, StringViewOf(kEOCD64Literal))); EXPECT_EQ(100, string_reader.pos()); ZipEOCD eocd64; ASSERT_THAT(ReadEOCD64(string_reader, eocd64), ::tensorstore::IsOk()); EXPECT_EQ(eocd64.num_entries, 1); EXPECT_EQ(eocd64.cd_size, 47); EXPECT_EQ(eocd64.cd_offset, 53); } TEST(ZipDetailsTest, TryReadFullEOCDZip64) { riegeli::StringReader string_reader(StringViewOf(kZip64OneEmptyFile)); EXPECT_TRUE(FindFirst(string_reader, StringViewOf(kEOCD64Literal))); EXPECT_EQ(100, string_reader.pos()); ZipEOCD eocd64; ASSERT_THAT(TryReadFullEOCD(string_reader, eocd64, 0), ::testing::VariantWith<absl::Status>(::tensorstore::IsOk())); EXPECT_EQ(eocd64.num_entries, 1); EXPECT_EQ(eocd64.cd_size, 47); EXPECT_EQ(eocd64.cd_offset, 53); } TEST(ZipDetailsTest, ReadCentralHeaderZip64) { riegeli::StringReader string_reader(StringViewOf(kZip64OneEmptyFile)); EXPECT_TRUE(FindFirst(string_reader, StringViewOf(kCentralHeaderLiteral))); EXPECT_EQ(53, string_reader.pos()); ZipEntry central_header; ASSERT_THAT(ReadCentralDirectoryEntry(string_reader, central_header), ::tensorstore::IsOk()); EXPECT_EQ(central_header.version_madeby, 798); EXPECT_EQ(central_header.flags, 0); EXPECT_EQ(central_header.compression_method, ZipCompression::kStore); EXPECT_EQ(central_header.crc, 3723141383); EXPECT_EQ(central_header.compressed_size, 2); EXPECT_EQ(central_header.uncompressed_size, 2); EXPECT_EQ(central_header.internal_fa, 1); EXPECT_EQ(central_header.external_fa, 293601280); EXPECT_EQ(central_header.local_header_offset, 0); EXPECT_EQ(central_header.filename, "-"); EXPECT_EQ(central_header.comment, ""); EXPECT_GT(central_header.mtime, absl::UnixEpoch()); } TEST(ZipDetailsTest, ReadLocalHeaderZip64) { riegeli::StringReader string_reader( reinterpret_cast<const char*>(kZip64OneEmptyFile), sizeof(kZip64OneEmptyFile)); EXPECT_TRUE(FindFirst(string_reader, StringViewOf(kLocalHeaderLiteral))); ZipEntry local_header; ASSERT_THAT(ReadLocalEntry(string_reader, local_header), ::tensorstore::IsOk()); EXPECT_EQ(local_header.version_madeby, 0); EXPECT_EQ(local_header.flags, 0); EXPECT_EQ(local_header.compression_method, ZipCompression::kStore); EXPECT_EQ(local_header.crc, 3723141383); EXPECT_EQ(local_header.compressed_size, 2); EXPECT_EQ(local_header.uncompressed_size, 2); EXPECT_EQ(local_header.internal_fa, 0); EXPECT_EQ(local_header.external_fa, 0); EXPECT_EQ(local_header.local_header_offset, 0); EXPECT_EQ(local_header.filename, "-"); EXPECT_EQ(local_header.comment, ""); EXPECT_GT(local_header.mtime, absl::UnixEpoch()); } TEST(ZipDetailsTest, Decode) { riegeli::StringReader string_reader(reinterpret_cast<const char*>(kZipTest2), sizeof(kZipTest2)); EXPECT_TRUE(FindFirst(string_reader, StringViewOf(kEOCDLiteral))); ZipEOCD eocd; ASSERT_THAT(ReadEOCD(string_reader, eocd), ::tensorstore::IsOk()); EXPECT_EQ(eocd.num_entries, 3); EXPECT_EQ(eocd.cd_size, 202); EXPECT_EQ(eocd.cd_offset, 188); string_reader.Seek(eocd.cd_offset); std::vector<ZipEntry> central_headers; for (size_t i = 0; i < eocd.num_entries; ++i) { EXPECT_TRUE(StartsWith(string_reader, StringViewOf(kCentralHeaderLiteral))) << i; ZipEntry header; ASSERT_THAT(ReadCentralDirectoryEntry(string_reader, header), ::tensorstore::IsOk()); central_headers.push_back(std::move(header)); } std::vector<ZipEntry> local_headers; for (const auto& header : central_headers) { ZipEntry local_header; string_reader.Seek(header.local_header_offset); EXPECT_TRUE(StartsWith(string_reader, StringViewOf(kLocalHeaderLiteral))); ASSERT_THAT(ReadLocalEntry(string_reader, local_header), ::tensorstore::IsOk()); local_headers.push_back(std::move(local_header)); absl::Cord data; string_reader.Read(local_headers.back().compressed_size, data); } ASSERT_THAT(local_headers.size(), 3); for (size_t i = 0; i < local_headers.size(); ++i) { EXPECT_EQ(local_headers[i].flags, central_headers[i].flags); EXPECT_EQ(local_headers[i].compression_method, central_headers[i].compression_method); EXPECT_EQ(local_headers[i].crc, central_headers[i].crc); EXPECT_EQ(local_headers[i].compressed_size, central_headers[i].compressed_size); EXPECT_EQ(local_headers[i].uncompressed_size, central_headers[i].uncompressed_size); EXPECT_EQ(local_headers[i].filename, central_headers[i].filename); } } struct ZipDirectory { ZipEOCD eocd; std::vector<ZipEntry> entries; }; absl::Status ReadDirectory(riegeli::Reader& reader, ZipDirectory& directory) { int64_t initial_pos = reader.pos(); auto response = tensorstore::internal_zip::TryReadFullEOCD(reader, directory.eocd, -1); if (std::holds_alternative<int64_t>(response)) { reader.Seek(initial_pos); response = tensorstore::internal_zip::TryReadFullEOCD(reader, directory.eocd, 0); } if (auto* status = std::get_if<absl::Status>(&response); status != nullptr && !status->ok()) { return std::move(*status); } if (std::holds_alternative<int64_t>(response)) { return absl::InternalError("ZIP incomplete"); } reader.Seek(directory.eocd.cd_offset); std::vector<ZipEntry> central_headers; for (size_t i = 0; i < directory.eocd.num_entries; ++i) { ZipEntry header{}; if (auto entry_status = ReadCentralDirectoryEntry(reader, header); !entry_status.ok()) { return entry_status; } directory.entries.push_back(std::move(header)); } return absl::OkStatus(); } TEST(ZipDetailsTest, ReadDirectory) { riegeli::StringReader string_reader(reinterpret_cast<const char*>(kZipTest2), sizeof(kZipTest2)); ZipDirectory dir; EXPECT_THAT(ReadDirectory(string_reader, dir), ::tensorstore::IsOk()); std::vector<ZipEntry> local_headers; for (const auto& header : dir.entries) { ZipEntry local_header; string_reader.Seek(header.local_header_offset); EXPECT_TRUE(StartsWith(string_reader, StringViewOf(kLocalHeaderLiteral))); EXPECT_THAT(ReadLocalEntry(string_reader, local_header), ::tensorstore::IsOk()); local_headers.push_back(std::move(local_header)); } EXPECT_THAT(local_headers.size(), 3); for (size_t i = 0; i < local_headers.size(); ++i) { EXPECT_EQ(local_headers[i].flags, dir.entries[i].flags); EXPECT_EQ(local_headers[i].compression_method, dir.entries[i].compression_method); EXPECT_EQ(local_headers[i].crc, dir.entries[i].crc); EXPECT_EQ(local_headers[i].compressed_size, dir.entries[i].compressed_size); EXPECT_EQ(local_headers[i].uncompressed_size, dir.entries[i].uncompressed_size); EXPECT_EQ(local_headers[i].filename, dir.entries[i].filename); } TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto reader, GetReader(&string_reader, local_headers[0])); std::string data; EXPECT_THAT(riegeli::ReadAll(*reader, data), ::tensorstore::IsOk()); EXPECT_EQ(data, "test\n"); EXPECT_EQ(data.size(), local_headers[0].uncompressed_size); } TEST(ZipDetailsTest, Xz) { static constexpr unsigned char kXZ[] = { 0x50, 0x4b, 0x03, 0x04, 0x14, 0x00, 0x00, 0x00, 0x5f, 0x00, 0x89, 0x8a, 0x36, 0x4f, 0x28, 0xe2, 0xde, 0xa0, 0x48, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x0f, 0x00, 0x00, 0x00, 0x61, 0x62, 0x61, 0x63, 0x2d, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x2e, 0x74, 0x78, 0x74, 0xfd, 0x37, 0x7a, 0x58, 0x5a, 0x00, 0x00, 0x00, 0xff, 0x12, 0xd9, 0x41, 0x02, 0x00, 0x21, 0x01, 0x00, 0x00, 0x00, 0x00, 0x37, 0x27, 0x97, 0xd6, 0xe0, 0x00, 0x3f, 0x00, 0x11, 0x5e, 0x00, 0x30, 0xec, 0xbd, 0xa0, 0xa3, 0x19, 0xd7, 0x9c, 0xf2, 0xec, 0x93, 0x6b, 0xfe, 0x81, 0xb3, 0x7a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x25, 0x40, 0x5c, 0x24, 0xa9, 0xbe, 0x06, 0x72, 0x9e, 0x7a, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x59, 0x5a, 0x50, 0x4b, 0x01, 0x02, 0x14, 0x00, 0x14, 0x00, 0x00, 0x00, 0x5f, 0x00, 0x89, 0x8a, 0x36, 0x4f, 0x28, 0xe2, 0xde, 0xa0, 0x48, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x61, 0x62, 0x61, 0x63, 0x2d, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x2e, 0x74, 0x78, 0x74, 0x50, 0x4b, 0x05, 0x06, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x01, 0x00, 0x3d, 0x00, 0x00, 0x00, 0x75, 0x00, 0x00, 0x00, 0x00, 0x00, }; riegeli::StringReader string_reader(reinterpret_cast<const char*>(kXZ), sizeof(kXZ)); ZipDirectory dir; ASSERT_THAT(ReadDirectory(string_reader, dir), ::tensorstore::IsOk()); EXPECT_THAT(dir.entries.size(), ::testing::Gt(0)); ZipEntry local_header; string_reader.Seek(dir.entries[0].local_header_offset); EXPECT_TRUE(StartsWith(string_reader, StringViewOf(kLocalHeaderLiteral))); ASSERT_THAT(ReadLocalEntry(string_reader, local_header), ::tensorstore::IsOk()); EXPECT_EQ(local_header.compression_method, ZipCompression::kXZ); TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto reader, GetReader(&string_reader, local_header)); std::string data; EXPECT_THAT(riegeli::ReadAll(*reader, data), ::tensorstore::IsOk()); EXPECT_EQ(data, "aaaaaaaaaaaaaa\r\nbbbbbbbbbbbbbb\r\naaaaaaaaaaaaaa\r\ncccccccccccc" "cc\r\n"); EXPECT_EQ(data.size(), local_header.uncompressed_size); } TEST(ZipDetailsTest, Zstd) { static constexpr unsigned char kZStd[] = { 0x50, 0x4b, 0x03, 0x04, 0x3f, 0x00, 0x00, 0x00, 0x5d, 0x00, 0xa2, 0x69, 0xf2, 0x50, 0x28, 0xe2, 0xde, 0xa0, 0x20, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x0f, 0x00, 0x00, 0x00, 0x61, 0x62, 0x61, 0x63, 0x2d, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x2e, 0x74, 0x78, 0x74, 0x28, 0xb5, 0x2f, 0xfd, 0x20, 0x40, 0xbd, 0x00, 0x00, 0x68, 0x61, 0x61, 0x0d, 0x0a, 0x62, 0x0d, 0x0a, 0x61, 0x0d, 0x0a, 0x63, 0x0d, 0x0a, 0x04, 0x10, 0x00, 0xc7, 0x38, 0xc6, 0x31, 0x38, 0x2c, 0x50, 0x4b, 0x01, 0x02, 0x3f, 0x00, 0x3f, 0x00, 0x00, 0x00, 0x5d, 0x00, 0xa2, 0x69, 0xf2, 0x50, 0x28, 0xe2, 0xde, 0xa0, 0x20, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x61, 0x62, 0x61, 0x63, 0x2d, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x2e, 0x74, 0x78, 0x74, 0x50, 0x4b, 0x05, 0x06, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x01, 0x00, 0x3d, 0x00, 0x00, 0x00, 0x4d, 0x00, 0x00, 0x00, 0x00, 0x00, }; riegeli::StringReader string_reader(StringViewOf(kZStd)); ZipDirectory dir; ASSERT_THAT(ReadDirectory(string_reader, dir), ::tensorstore::IsOk()); EXPECT_THAT(dir.entries.size(), ::testing::Gt(0)); ZipEntry local_header; string_reader.Seek(dir.entries[0].local_header_offset); EXPECT_TRUE(StartsWith(string_reader, StringViewOf(kLocalHeaderLiteral))); ASSERT_THAT(ReadLocalEntry(string_reader, local_header), ::tensorstore::IsOk()); EXPECT_EQ(local_header.compression_method, ZipCompression::kZStd); TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto reader, GetReader(&string_reader, local_header)); std::string data; EXPECT_THAT(riegeli::ReadAll(*reader, data), ::tensorstore::IsOk()); EXPECT_EQ(data, "aaaaaaaaaaaaaa\r\nbbbbbbbbbbbbbb\r\naaaaaaaaaaaaaa\r\ncccccccccccc" "cc\r\n"); EXPECT_EQ(data.size(), local_header.uncompressed_size); } TEST(ZipDetailsTest, Bzip2) { static constexpr unsigned char kBzip2[] = { 0x50, 0x4b, 0x03, 0x04, 0x0a, 0x00, 0x00, 0x00, 0x0c, 0x00, 0x54, 0x74, 0x45, 0x3c, 0x48, 0x40, 0x35, 0xb0, 0x2f, 0x00, 0x00, 0x00, 0x3c, 0x00, 0x00, 0x00, 0x0f, 0x00, 0x00, 0x00, 0x61, 0x62, 0x61, 0x63, 0x2d, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x2e, 0x74, 0x78, 0x74, 0x42, 0x5a, 0x68, 0x39, 0x31, 0x41, 0x59, 0x26, 0x53, 0x59, 0x03, 0x64, 0xc8, 0x04, 0x00, 0x00, 0x07, 0x41, 0x00, 0x00, 0x10, 0x38, 0x00, 0x20, 0x00, 0x30, 0xcd, 0x34, 0x12, 0x6a, 0x7a, 0x95, 0x10, 0x26, 0x4e, 0xcd, 0x9f, 0x17, 0x72, 0x45, 0x38, 0x50, 0x90, 0x03, 0x64, 0xc8, 0x04, 0x50, 0x4b, 0x01, 0x02, 0x1e, 0x03, 0x0a, 0x00, 0x00, 0x00, 0x0c, 0x00, 0x54, 0x74, 0x45, 0x3c, 0x48, 0x40, 0x35, 0xb0, 0x2f, 0x00, 0x00, 0x00, 0x3c, 0x00, 0x00, 0x00, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfd, 0x81, 0x00, 0x00, 0x00, 0x00, 0x61, 0x62, 0x61, 0x63, 0x2d, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x2e, 0x74, 0x78, 0x74, 0x50, 0x4b, 0x05, 0x06, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x01, 0x00, 0x3d, 0x00, 0x00, 0x00, 0x5c, 0x00, 0x00, 0x00, 0x00, 0x00, }; riegeli::StringReader string_reader(StringViewOf(kBzip2)); ZipDirectory dir; ASSERT_THAT(ReadDirectory(string_reader, dir), ::tensorstore::IsOk()); EXPECT_THAT(dir.entries.size(), ::testing::Gt(0)); ZipEntry local_header; string_reader.Seek(dir.entries[0].local_header_offset); EXPECT_TRUE(StartsWith(string_reader, StringViewOf(kLocalHeaderLiteral))); ASSERT_THAT(ReadLocalEntry(string_reader, local_header), ::tensorstore::IsOk()); EXPECT_EQ(local_header.compression_method, ZipCompression::kBzip2); TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto reader, GetReader(&string_reader, local_header)); std::string data; EXPECT_THAT(riegeli::ReadAll(*reader, data), ::tensorstore::IsOk()); EXPECT_EQ(data, "aaaaaaaaaaaaaa\nbbbbbbbbbbbbbb\naaaaaaaaaaaaaa\ncccccccccccccc\n"); EXPECT_EQ(data.size(), local_header.uncompressed_size); } TEST(ZipDetailsTest, Deflate) { static constexpr unsigned char kDeflate[] = { 0x50, 0x4b, 0x03, 0x04, 0x14, 0x00, 0x00, 0x00, 0x08, 0x00, 0x56, 0x5e, 0x9c, 0x40, 0xb0, 0x91, 0x01, 0x58, 0x12, 0x00, 0x00, 0x00, 0x13, 0x00, 0x00, 0x00, 0x0b, 0x00, 0x00, 0x00, 0x66, 0x69, 0x72, 0x73, 0x74, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x4b, 0xcb, 0x2c, 0x2a, 0x2e, 0x29, 0x48, 0x2c, 0x2a, 0x29, 0x4e, 0x4d, 0xce, 0xcf, 0x4b, 0x01, 0xb1, 0x00, 0x50, 0x4b, 0x01, 0x02, 0x1e, 0x03, 0x14, 0x00, 0x00, 0x00, 0x08, 0x00, 0x56, 0x5e, 0x9c, 0x40, 0xb0, 0x91, 0x01, 0x58, 0x12, 0x00, 0x00, 0x00, 0x13, 0x00, 0x00, 0x00, 0x0b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0xb4, 0x81, 0x00, 0x00, 0x00, 0x00, 0x66, 0x69, 0x72, 0x73, 0x74, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x50, 0x4b, 0x05, 0x06, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x01, 0x00, 0x39, 0x00, 0x00, 0x00, 0x3b, 0x00, 0x00, 0x00, 0x00, 0x00, }; riegeli::StringReader string_reader(StringViewOf(kDeflate)); ZipDirectory dir; ASSERT_THAT(ReadDirectory(string_reader, dir), ::tensorstore::IsOk()); EXPECT_THAT(dir.entries.size(), ::testing::Gt(0)); ZipEntry local_header; string_reader.Seek(dir.entries[0].local_header_offset); EXPECT_TRUE(StartsWith(string_reader, StringViewOf(kLocalHeaderLiteral))); ASSERT_THAT(ReadLocalEntry(string_reader, local_header), ::tensorstore::IsOk()); EXPECT_EQ(local_header.compression_method, ZipCompression::kDeflate); TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto reader, GetReader(&string_reader, local_header)); std::string data; EXPECT_THAT(riegeli::ReadAll(*reader, data), ::tensorstore::IsOk()); EXPECT_EQ(data, "firstpartsecondpart"); EXPECT_EQ(data.size(), local_header.uncompressed_size); } TEST(TestdataTest, HeaderPositions) { riegeli::CordReader reader(GetTestZipFileData()); EXPECT_TRUE(FindFirst(reader, StringViewOf(kLocalHeaderLiteral))); EXPECT_TRUE(StartsWith(reader, StringViewOf(kLocalHeaderLiteral))); EXPECT_THAT(reader.pos(), 0); reader.Skip(4); EXPECT_TRUE(FindFirst(reader, StringViewOf(kLocalHeaderLiteral))); EXPECT_TRUE(StartsWith(reader, StringViewOf(kLocalHeaderLiteral))); EXPECT_THAT(reader.pos(), 0x19FA6); reader.Skip(4); EXPECT_TRUE(FindFirst(reader, StringViewOf(kLocalHeaderLiteral))); EXPECT_TRUE(StartsWith(reader, StringViewOf(kLocalHeaderLiteral))); EXPECT_THAT(reader.pos(), 0x33F4D); reader.Seek(0); EXPECT_TRUE(FindFirst(reader, StringViewOf(kCentralHeaderLiteral))); EXPECT_TRUE(StartsWith(reader, StringViewOf(kCentralHeaderLiteral))); EXPECT_THAT(reader.pos(), 0x4DEF3); reader.Skip(4); EXPECT_TRUE(FindFirst(reader, StringViewOf(kCentralHeaderLiteral))); EXPECT_TRUE(StartsWith(reader, StringViewOf(kCentralHeaderLiteral))); EXPECT_THAT(reader.pos(), 0x4DF43); reader.Skip(4); EXPECT_TRUE(FindFirst(reader, StringViewOf(kCentralHeaderLiteral))); EXPECT_TRUE(StartsWith(reader, StringViewOf(kCentralHeaderLiteral))); EXPECT_THAT(reader.pos(), 0x4DF94); reader.Seek(0); EXPECT_TRUE(FindFirst(reader, StringViewOf(kEOCDLiteral))); EXPECT_TRUE(StartsWith(reader, StringViewOf(kEOCDLiteral))); EXPECT_THAT(reader.pos(), 0x4DFE4); } TEST(TestdataTest, LocalHeaderEntry) { riegeli::CordReader reader(GetTestZipFileData()); ZipEntry header; EXPECT_TRUE(StartsWith(reader, StringViewOf(kLocalHeaderLiteral))); EXPECT_THAT(reader.pos(), 0); ASSERT_THAT(ReadLocalEntry(reader, header), ::tensorstore::IsOk()); EXPECT_THAT(header.version_madeby, 0); EXPECT_THAT(header.flags, 0x2); EXPECT_THAT(header.compression_method, ZipCompression::kDeflate); EXPECT_THAT(header.crc, 0x94EE1E3E); EXPECT_THAT(header.compressed_size, 0x00019F62); EXPECT_THAT(header.uncompressed_size, 0x00019F6F); EXPECT_THAT(header.internal_fa, 0); EXPECT_THAT(header.external_fa, 0); EXPECT_THAT(header.local_header_offset, 0); EXPECT_THAT(header.end_of_header_offset, 68); EXPECT_THAT(header.filename, "data/a.png"); EXPECT_THAT(header.comment, ""); EXPECT_THAT(header.is_zip64, false); } TEST(TestdataTest, CentralHeaderEntry) { riegeli::CordReader reader(GetTestZipFileData()); reader.Seek(0x4DEF3); ASSERT_TRUE(FindFirst(reader, StringViewOf(kCentralHeaderLiteral))); EXPECT_TRUE(StartsWith(reader, StringViewOf(kCentralHeaderLiteral))); EXPECT_THAT(reader.pos(), 0x4DEF3); ZipEntry header{}; ASSERT_THAT(ReadCentralDirectoryEntry(reader, header), ::tensorstore::IsOk()); EXPECT_THAT(header.flags, 0x2); EXPECT_THAT(header.compression_method, ZipCompression::kDeflate); EXPECT_THAT(header.crc, 0x94EE1E3E); EXPECT_THAT(header.compressed_size, 0x00019F62); EXPECT_THAT(header.uncompressed_size, 0x00019F6F); EXPECT_THAT(header.local_header_offset, 0); EXPECT_THAT(header.end_of_header_offset, 24); EXPECT_THAT(header.filename, "data/a.png"); EXPECT_THAT(header.comment, ""); EXPECT_THAT(header.is_zip64, false); EXPECT_THAT(header.version_madeby, 0x031E); EXPECT_THAT(header.internal_fa, 0); EXPECT_THAT(header.external_fa, 0x81240001); EXPECT_THAT(header.local_header_offset, 0); EXPECT_THAT(header.estimated_read_size, 106415); } TEST(TestdataTest, EOCD) { riegeli::CordReader reader(GetTestZipFileData()); ASSERT_TRUE(FindFirst(reader, StringViewOf(kEOCDLiteral))); EXPECT_TRUE(StartsWith(reader, StringViewOf(kEOCDLiteral))); EXPECT_THAT(reader.pos(), 0x4DFE4); ::tensorstore::internal_zip::ZipEOCD eocd{}; ASSERT_THAT(ReadEOCD(reader, eocd), ::tensorstore::IsOk()); EXPECT_THAT(eocd.num_entries, 3); EXPECT_THAT(eocd.cd_size, 0x000000F1); EXPECT_THAT(eocd.cd_offset, 0x0004DEF3); EXPECT_THAT(eocd.comment, ""); } TEST(TestdataTest, FileData) { riegeli::CordReader reader(GetTestZipFileData()); ZipEntry header; ASSERT_THAT(ReadLocalEntry(reader, header), ::tensorstore::IsOk()); EXPECT_THAT(reader.pos(), 0x0044); TENSORSTORE_ASSERT_OK_AND_ASSIGN( auto entry_reader, tensorstore::internal_zip::GetReader(&reader, header)); std::string data; EXPECT_THAT(riegeli::ReadAll(*entry_reader, data), ::tensorstore::IsOk()); EXPECT_EQ(data.size(), header.uncompressed_size); } }
https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/compression/zip_details.cc
https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/compression/zip_details_test.cc
4f887a6430414cd6088e1743555015b10f116d50
98b2faaa-6d2b-4983-9116-c31a4abe2d4e
cpp
tensorflow/tensorflow
call_graph_util
tensorflow/compiler/mlir/tensorflow/utils/call_graph_util.cc
tensorflow/compiler/mlir/tensorflow/utils/call_graph_util_test.cc
#include <vector> #include "absl/strings/string_view.h" #include "llvm/ADT/StringRef.h" #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/IR/BuiltinOps.h" #include "mlir/Support/LLVM.h" #include "tensorflow/compiler/mlir/tensorflow/ir/tf_saved_model.h" namespace mlir { std::vector<llvm::StringRef> GetEntryFunctionAttributeNames() { return {"tf.entry_function", tf_saved_model::kTfSavedModelInitializerTypeAttr}; } bool IsEntryFunction(func::FuncOp func) { for (const auto &attr : GetEntryFunctionAttributeNames()) { if (func->hasAttr(attr)) { return true; } } return false; } llvm::SmallVector<func::FuncOp> GetEntryFunctions(ModuleOp module) { llvm::SmallVector<func::FuncOp> entry_funcs; module.walk([&](func::FuncOp func) { if (IsEntryFunction(func)) { entry_funcs.push_back(func); } }); return entry_funcs; } LogicalResult GetCallees(SymbolUserOpInterface op, SymbolTable &symtab, llvm::SmallVector<func::FuncOp> &callees) { for (auto attr : op->getAttrs()) { auto sym = mlir::dyn_cast<SymbolRefAttr>(attr.getValue()); if (!sym) continue; auto callee = symtab.lookup<func::FuncOp>(sym.getRootReference()); if (!callee) { return op->emitError() << "Cannot find function " << sym.getRootReference(); } callees.push_back(callee); } return success(); } bool HasSingleBlock(func::FuncOp func) { return func->getNumRegions() == 1 && func.getBody().hasOneBlock(); } }
#include "tensorflow/compiler/mlir/tensorflow/utils/call_graph_util.h" #include "llvm/Support/raw_ostream.h" #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/IR/BuiltinOps.h" #include "mlir/IR/DialectRegistry.h" #include "mlir/IR/Location.h" #include "mlir/IR/MLIRContext.h" #include "mlir/IR/OwningOpRef.h" #include "mlir/Parser/Parser.h" #include "tensorflow/compiler/mlir/tensorflow/dialect_registration.h" #include "tensorflow/compiler/mlir/tensorflow/ir/tf_saved_model.h" #include "tensorflow/compiler/mlir/tensorflow/utils/attribute_utils.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/test.h" namespace tensorflow { namespace { TEST(CallGraphUtilTest, GetEntryFunctionAttributeNames) { auto attr_names = mlir::GetEntryFunctionAttributeNames(); EXPECT_EQ(attr_names.size(), 2); EXPECT_EQ(attr_names[0], "tf.entry_function"); EXPECT_EQ(attr_names[1], mlir::tf_saved_model::kTfSavedModelInitializerTypeAttr); } TEST(CallGraphUtilTest, GetEntryFunctions) { const char *const code = R"mlir( func.func @entry_func_1(%arg0: tensor<i32>) -> tensor<i32> attributes {tf.entry_function = {}} { %0 = "tf.StatefulPartitionedCall"(%arg0) {_xla_compile_device_type = "CPU", config = "", config_proto = "", device = "/device:CPU:0", executor_type = "", f = @func} : (tensor<i32>) -> (tensor<i32>) func.return %0 : tensor<i32> } func.func @entry_func_2(%arg0: tensor<i32>) -> tensor<i32> attributes {tf_saved_model.initializer_type = ""} { %0 = "tf.StatefulPartitionedCall"(%arg0) {_xla_compile_device_type = "CPU", config = "", config_proto = "", device = "/device:CPU:0", executor_type = "", f = @func} : (tensor<i32>) -> (tensor<i32>) func.return %0 : tensor<i32> } func.func @func(%arg0: tensor<i32>) -> tensor<i32> { func.return %arg0 : tensor<i32> } )mlir"; mlir::MLIRContext context; context.loadDialect<mlir::func::FuncDialect, mlir::TF::TensorFlowDialect>(); mlir::OwningOpRef<mlir::ModuleOp> module = mlir::parseSourceString<mlir::ModuleOp>(code, &context); ASSERT_TRUE(module); auto entry_funcs = GetEntryFunctions(*module); EXPECT_EQ(entry_funcs.size(), 2); EXPECT_EQ(entry_funcs[0].getSymName(), "entry_func_1"); EXPECT_EQ(entry_funcs[1].getSymName(), "entry_func_2"); } TEST(CallGraphUtilTest, GetCallees) { const char *const code = R"mlir( func.func @entry_func(%arg0: tensor<i32>) -> tensor<i32> attributes {tf_saved_model.initializer_type = ""} { %0 = "tf.While"(%arg0) {cond = @while_cond_func, body = @while_body_func, is_stateless = true} : (tensor<i32>) -> (tensor<i32>) func.return %0 : tensor<i32> } func.func @while_cond_func(%arg0: tensor<i32>) -> tensor<i1> { %0 = "tf.Const"() {value = dense<0> : tensor<i1>} : () -> tensor<i1> func.return %0 : tensor<i1> } func.func @while_body_func(%arg0: tensor<i32>) -> (tensor<i32>) { %0 = "tf.Const"() {value = dense<0> : tensor<i32>} : () -> tensor<i32> func.return %0 : tensor<i32> } )mlir"; mlir::MLIRContext context; context.loadDialect<mlir::func::FuncDialect, mlir::TF::TensorFlowDialect>(); mlir::OwningOpRef<mlir::ModuleOp> module = mlir::parseSourceString<mlir::ModuleOp>(code, &context); ASSERT_TRUE(module); mlir::SymbolTable symtab(*module); llvm::SmallVector<mlir::func::FuncOp> callees; module->walk([&](mlir::SymbolUserOpInterface op) { auto result = GetCallees(op, symtab, callees).succeeded(); ASSERT_TRUE(result); EXPECT_EQ(callees.size(), 2); EXPECT_EQ(callees[0].getSymName(), "while_body_func"); EXPECT_EQ(callees[1].getSymName(), "while_cond_func"); }); } TEST(CallGraphUtilTest, GetFirstOpsOfType) { const char *const code = R"mlir( func.func @entry_func(%arg0: tensor<i32>) -> tensor<i32> attributes {tf.entry_function = {}} { %0 = "tf.While"(%arg0) {cond = @while_cond_func, body = @while_body_func, is_stateless = true} : (tensor<i32>) -> (tensor<i32>) func.return %0 : tensor<i32> } func.func @while_cond_func(%arg0: tensor<i32>) -> tensor<i1> { %0 = "tf.Const"() {value = dense<0> : tensor<i1>} : () -> tensor<i1> func.return %0 : tensor<i1> } func.func @while_body_func(%arg0: tensor<i32>) -> (tensor<i32>) { %0 = "tf.StatefulPartitionedCall"(%arg0) {config = "", config_proto = "", device = "/device:CPU:0", executor_type = "", f = @outer_stateful_pcall_func} : (tensor<i32>) -> (tensor<i32>) func.return %0 : tensor<i32> } func.func @outer_stateful_pcall_func(%arg0: tensor<i32>) -> (tensor<i32>) { %0 = "tf.StatefulPartitionedCall"(%arg0) {_xla_compile_device_type = "CPU", config = "", config_proto = "", device = "/device:CPU:0", executor_type = "", f = @inner_stateful_pcall_func} : (tensor<i32>) -> (tensor<i32>) func.return %0 : tensor<i32> } func.func @inner_stateful_pcall_func(%arg0: tensor<i32>) -> tensor<i32> { %0 = "tf.StatefulPartitionedCall"(%arg0) {_xla_compile_device_type = "CPU", config = "", config_proto = "", device = "/device:CPU:0", executor_type = "", f = @func} : (tensor<i32>) -> (tensor<i32>) func.return %0 : tensor<i32> } func.func @func(%arg0: tensor<i32>) -> tensor<i32> { func.return %arg0 : tensor<i32> } )mlir"; auto has_compile_device_type = [](mlir::SymbolUserOpInterface op) { return op->hasAttr(tensorflow::kCompileDeviceTypeAttr); }; mlir::MLIRContext context; context.loadDialect<mlir::func::FuncDialect, mlir::TF::TensorFlowDialect>(); mlir::OwningOpRef<mlir::ModuleOp> module = mlir::parseSourceString<mlir::ModuleOp>(code, &context); ASSERT_TRUE(module); mlir::SymbolTable symtab(*module); llvm::SmallVector<mlir::func::FuncOp> entry_funcs = GetEntryFunctions(*module); EXPECT_EQ(entry_funcs.size(), 1); EXPECT_EQ(entry_funcs[0].getSymName(), "entry_func"); llvm::SmallVector<mlir::SymbolUserOpInterface> outermost_pcall_ops; auto result = mlir::GetFirstOpsOfType<mlir::TF::StatefulPartitionedCallOp, mlir::TF::PartitionedCallOp>( entry_funcs[0], symtab, has_compile_device_type, outermost_pcall_ops) .succeeded(); ASSERT_TRUE(result); EXPECT_EQ(outermost_pcall_ops.size(), 1); auto func = llvm::dyn_cast<mlir::func::FuncOp>(outermost_pcall_ops[0]->getParentOp()); ASSERT_TRUE(func); EXPECT_EQ(func.getSymName(), "outer_stateful_pcall_func"); } TEST(CallGraphUtilTest, GetOpsOfTypeUntilMiss) { const char *const code = R"mlir( func.func @entry_func(%arg0: tensor<i32>) -> tensor<i32> attributes {tf.entry_function = {}} { %0 = "tf.While"(%arg0) {cond = @while_cond_func, body = @while_body_func, is_stateless = true} : (tensor<i32>) -> (tensor<i32>) func.return %0 : tensor<i32> } func.func @while_cond_func(%arg0: tensor<i32>) -> tensor<i1> { %0 = "tf.Const"() {value = dense<0> : tensor<i1>} : () -> tensor<i1> func.return %0 : tensor<i1> } func.func @while_body_func(%arg0: tensor<i32>) -> (tensor<i32>) { %0 = "tf.StatefulPartitionedCall"(%arg0) {config = "", config_proto = "", device = "/device:CPU:0", executor_type = "", f = @outer_stateful_pcall_func} : (tensor<i32>) -> (tensor<i32>) func.return %0 : tensor<i32> } func.func @outer_stateful_pcall_func(%arg0: tensor<i32>) -> (tensor<i32>) { %0 = "tf.StatefulPartitionedCall"(%arg0) {config = "", config_proto = "", device = "/device:CPU:0", executor_type = "", f = @inner_stateful_pcall_func} : (tensor<i32>) -> (tensor<i32>) func.return %0 : tensor<i32> } func.func @inner_stateful_pcall_func(%arg0: tensor<i32>) -> tensor<i32> { %0 = "tf.StatefulPartitionedCall"(%arg0) {_xla_compile_device_type = "CPU", config = "", config_proto = "", device = "/device:CPU:0", executor_type = "", f = @func} : (tensor<i32>) -> (tensor<i32>) func.return %0 : tensor<i32> } func.func @func(%arg0: tensor<i32>) -> tensor<i32> { func.return %arg0 : tensor<i32> } )mlir"; auto has_no_compile_device_type = [](mlir::SymbolUserOpInterface op) { return !op->hasAttr(tensorflow::kCompileDeviceTypeAttr); }; mlir::MLIRContext context; context.loadDialect<mlir::func::FuncDialect, mlir::TF::TensorFlowDialect>(); mlir::OwningOpRef<mlir::ModuleOp> module = mlir::parseSourceString<mlir::ModuleOp>(code, &context); ASSERT_TRUE(module); mlir::SymbolTable symtab(*module); llvm::SmallVector<mlir::func::FuncOp> entry_funcs = GetEntryFunctions(*module); EXPECT_EQ(entry_funcs.size(), 1); EXPECT_EQ(entry_funcs[0].getSymName(), "entry_func"); llvm::SmallVector<mlir::SymbolUserOpInterface> noinline_pcall_ops, outermost_pcall_ops; auto result = mlir::GetOpsOfTypeUntilMiss<mlir::TF::StatefulPartitionedCallOp, mlir::TF::PartitionedCallOp>( entry_funcs[0], symtab, has_no_compile_device_type, noinline_pcall_ops, outermost_pcall_ops) .succeeded(); ASSERT_TRUE(result); EXPECT_EQ(noinline_pcall_ops.size(), 2); auto func = llvm::dyn_cast<mlir::func::FuncOp>(noinline_pcall_ops[0]->getParentOp()); ASSERT_TRUE(func); EXPECT_EQ(func.getSymName(), "while_body_func"); func = llvm::dyn_cast<mlir::func::FuncOp>(noinline_pcall_ops[1]->getParentOp()); ASSERT_TRUE(func); EXPECT_EQ(func.getSymName(), "outer_stateful_pcall_func"); EXPECT_EQ(outermost_pcall_ops.size(), 1); func = llvm::dyn_cast<mlir::func::FuncOp>(outermost_pcall_ops[0]->getParentOp()); ASSERT_TRUE(func); EXPECT_EQ(func.getSymName(), "inner_stateful_pcall_func"); } TEST(CallGraphUtilTest, SingleBlockEntryFunction) { const char *const code = R"mlir( func.func @entry_func(%arg0: tensor<i32>) -> tensor<i32> attributes {tf.entry_function = {}} { func.return %arg0 : tensor<i32> } )mlir"; mlir::MLIRContext context; context.loadDialect<mlir::func::FuncDialect, mlir::TF::TensorFlowDialect>(); mlir::OwningOpRef<mlir::ModuleOp> module = mlir::parseSourceString<mlir::ModuleOp>(code, &context); llvm::errs() << "module:\n"; ASSERT_TRUE(module); mlir::SymbolTable symtab(*module); llvm::SmallVector<mlir::func::FuncOp> entry_funcs = GetEntryFunctions(*module); EXPECT_EQ(entry_funcs.size(), 1); EXPECT_EQ(entry_funcs[0].getSymName(), "entry_func"); EXPECT_TRUE(HasSingleBlock(entry_funcs[0])); } TEST(CallGraphUtilTest, MultipleBlocksEntryFunction) { const char *const code = R"mlir( func.func @entry_func(%arg0: tensor<i32>) -> tensor<i32> attributes {tf.entry_function = {}} { cf.br ^bb1 ^bb1: func.return %arg0 : tensor<i32> } )mlir"; mlir::MLIRContext context; context.loadDialect<mlir::cf::ControlFlowDialect, mlir::func::FuncDialect, mlir::TF::TensorFlowDialect>(); mlir::OwningOpRef<mlir::ModuleOp> module = mlir::parseSourceString<mlir::ModuleOp>(code, &context); llvm::errs() << "module:\n"; ASSERT_TRUE(module); mlir::SymbolTable symtab(*module); llvm::SmallVector<mlir::func::FuncOp> entry_funcs = GetEntryFunctions(*module); EXPECT_EQ(entry_funcs.size(), 1); EXPECT_EQ(entry_funcs[0].getSymName(), "entry_func"); EXPECT_FALSE(HasSingleBlock(entry_funcs[0])); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tensorflow/utils/call_graph_util.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tensorflow/utils/call_graph_util_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
26eccd9e-9633-4d68-9b75-cb9b02e992db
cpp
abseil/abseil-cpp
beta_distribution
absl/random/beta_distribution.h
absl/random/beta_distribution_test.cc
#ifndef ABSL_RANDOM_BETA_DISTRIBUTION_H_ #define ABSL_RANDOM_BETA_DISTRIBUTION_H_ #include <cassert> #include <cmath> #include <istream> #include <limits> #include <ostream> #include <type_traits> #include "absl/meta/type_traits.h" #include "absl/random/internal/fast_uniform_bits.h" #include "absl/random/internal/fastmath.h" #include "absl/random/internal/generate_real.h" #include "absl/random/internal/iostream_state_saver.h" namespace absl { ABSL_NAMESPACE_BEGIN template <typename RealType = double> class beta_distribution { public: using result_type = RealType; class param_type { public: using distribution_type = beta_distribution; explicit param_type(result_type alpha, result_type beta) : alpha_(alpha), beta_(beta) { assert(alpha >= 0); assert(beta >= 0); assert(alpha <= (std::numeric_limits<result_type>::max)()); assert(beta <= (std::numeric_limits<result_type>::max)()); if (alpha == 0 || beta == 0) { method_ = DEGENERATE_SMALL; x_ = (alpha >= beta) ? 1 : 0; return; } if (beta < alpha) { inverted_ = true; a_ = beta; b_ = alpha; } else { inverted_ = false; a_ = alpha; b_ = beta; } if (a_ <= 1 && b_ >= ThresholdForLargeA()) { method_ = DEGENERATE_SMALL; x_ = inverted_ ? result_type(1) : result_type(0); return; } if ((b_ < 1.0 && a_ + b_ <= 1.2) || a_ <= ThresholdForSmallA()) { method_ = JOEHNK; a_ = result_type(1) / alpha_; b_ = result_type(1) / beta_; if (std::isinf(a_) || std::isinf(b_)) { method_ = DEGENERATE_SMALL; x_ = inverted_ ? result_type(1) : result_type(0); } return; } if (a_ >= ThresholdForLargeA()) { method_ = DEGENERATE_LARGE; result_type r = a_ / b_; x_ = (inverted_ ? result_type(1) : r) / (1 + r); return; } x_ = a_ + b_; log_x_ = std::log(x_); if (a_ <= 1) { method_ = CHENG_BA; y_ = result_type(1) / a_; gamma_ = a_ + a_; return; } method_ = CHENG_BB; result_type r = (a_ - 1) / (b_ - 1); y_ = std::sqrt((1 + r) / (b_ * r * 2 - r + 1)); gamma_ = a_ + result_type(1) / y_; } result_type alpha() const { return alpha_; } result_type beta() const { return beta_; } friend bool operator==(const param_type& a, const param_type& b) { return a.alpha_ == b.alpha_ && a.beta_ == b.beta_; } friend bool operator!=(const param_type& a, const param_type& b) { return !(a == b); } private: friend class beta_distribution; #ifdef _MSC_VER #define ABSL_RANDOM_INTERNAL_LOG_EXP_CONSTEXPR #else #define ABSL_RANDOM_INTERNAL_LOG_EXP_CONSTEXPR constexpr #endif static ABSL_RANDOM_INTERNAL_LOG_EXP_CONSTEXPR result_type ThresholdForSmallA() { return result_type(1) / std::log((std::numeric_limits<result_type>::max)()); } static ABSL_RANDOM_INTERNAL_LOG_EXP_CONSTEXPR result_type ThresholdForLargeA() { return std::exp( std::log((std::numeric_limits<result_type>::max)()) - std::log(std::log((std::numeric_limits<result_type>::max)())) - ThresholdPadding()); } #undef ABSL_RANDOM_INTERNAL_LOG_EXP_CONSTEXPR static constexpr result_type ThresholdPadding() { return 0; } enum Method { JOEHNK, CHENG_BA, CHENG_BB, DEGENERATE_SMALL, DEGENERATE_LARGE, }; result_type alpha_; result_type beta_; result_type a_{}; result_type b_{}; result_type x_{}; result_type log_x_{}; result_type y_{}; result_type gamma_{}; Method method_{}; bool inverted_{}; static_assert(std::is_floating_point<RealType>::value, "Class-template absl::beta_distribution<> must be " "parameterized using a floating-point type."); }; beta_distribution() : beta_distribution(1) {} explicit beta_distribution(result_type alpha, result_type beta = 1) : param_(alpha, beta) {} explicit beta_distribution(const param_type& p) : param_(p) {} void reset() {} template <typename URBG> result_type operator()(URBG& g) { return (*this)(g, param_); } template <typename URBG> result_type operator()(URBG& g, const param_type& p); param_type param() const { return param_; } void param(const param_type& p) { param_ = p; } result_type(min)() const { return 0; } result_type(max)() const { return 1; } result_type alpha() const { return param_.alpha(); } result_type beta() const { return param_.beta(); } friend bool operator==(const beta_distribution& a, const beta_distribution& b) { return a.param_ == b.param_; } friend bool operator!=(const beta_distribution& a, const beta_distribution& b) { return a.param_ != b.param_; } private: template <typename URBG> result_type AlgorithmJoehnk(URBG& g, const param_type& p); template <typename URBG> result_type AlgorithmCheng(URBG& g, const param_type& p); template <typename URBG> result_type DegenerateCase(URBG& g, const param_type& p) { if (p.method_ == param_type::DEGENERATE_SMALL && p.alpha_ == p.beta_) { random_internal::FastUniformBits<uint8_t> fast_u8; return static_cast<result_type>((fast_u8(g) & 0x10) != 0); } return p.x_; } param_type param_; random_internal::FastUniformBits<uint64_t> fast_u64_; }; #if defined(__powerpc64__) || defined(__PPC64__) || defined(__powerpc__) || \ defined(__ppc__) || defined(__PPC__) template <> constexpr long double beta_distribution<long double>::param_type::ThresholdPadding() { return 10; } #endif template <typename RealType> template <typename URBG> typename beta_distribution<RealType>::result_type beta_distribution<RealType>::AlgorithmJoehnk( URBG& g, const param_type& p) { using random_internal::GeneratePositiveTag; using random_internal::GenerateRealFromBits; using real_type = absl::conditional_t<std::is_same<RealType, float>::value, float, double>; result_type u, v, x, y, z; for (;;) { u = GenerateRealFromBits<real_type, GeneratePositiveTag, false>( fast_u64_(g)); v = GenerateRealFromBits<real_type, GeneratePositiveTag, false>( fast_u64_(g)); if (!std::is_same<float, result_type>::value) { x = std::pow(u, p.a_); y = std::pow(v, p.b_); z = x + y; if (z > 1) { continue; } if (z > 0) { return x / z; } } x = std::log(u) * p.a_; y = std::log(v) * p.b_; if (!std::isfinite(x) || !std::isfinite(y)) { continue; } z = x > y ? (x + std::log(1 + std::exp(y - x))) : (y + std::log(1 + std::exp(x - y))); if (z > 0) { continue; } return std::exp(x - z); } } template <typename RealType> template <typename URBG> typename beta_distribution<RealType>::result_type beta_distribution<RealType>::AlgorithmCheng( URBG& g, const param_type& p) { using random_internal::GeneratePositiveTag; using random_internal::GenerateRealFromBits; using real_type = absl::conditional_t<std::is_same<RealType, float>::value, float, double>; static constexpr result_type kLogFour = result_type(1.3862943611198906188344642429163531361); static constexpr result_type kS = result_type(2.6094379124341003746007593332261876); const bool use_algorithm_ba = (p.method_ == param_type::CHENG_BA); result_type u1, u2, v, w, z, r, s, t, bw_inv, lhs; for (;;) { u1 = GenerateRealFromBits<real_type, GeneratePositiveTag, false>( fast_u64_(g)); u2 = GenerateRealFromBits<real_type, GeneratePositiveTag, false>( fast_u64_(g)); v = p.y_ * std::log(u1 / (1 - u1)); w = p.a_ * std::exp(v); bw_inv = result_type(1) / (p.b_ + w); r = p.gamma_ * v - kLogFour; s = p.a_ + r - w; z = u1 * u1 * u2; if (!use_algorithm_ba && s + kS >= 5 * z) { break; } t = std::log(z); if (!use_algorithm_ba && s >= t) { break; } lhs = p.x_ * (p.log_x_ + std::log(bw_inv)) + r; if (lhs >= t) { break; } } return p.inverted_ ? (1 - w * bw_inv) : w * bw_inv; } template <typename RealType> template <typename URBG> typename beta_distribution<RealType>::result_type beta_distribution<RealType>::operator()(URBG& g, const param_type& p) { switch (p.method_) { case param_type::JOEHNK: return AlgorithmJoehnk(g, p); case param_type::CHENG_BA: ABSL_FALLTHROUGH_INTENDED; case param_type::CHENG_BB: return AlgorithmCheng(g, p); default: return DegenerateCase(g, p); } } template <typename CharT, typename Traits, typename RealType> std::basic_ostream<CharT, Traits>& operator<<( std::basic_ostream<CharT, Traits>& os, const beta_distribution<RealType>& x) { auto saver = random_internal::make_ostream_state_saver(os); os.precision(random_internal::stream_precision_helper<RealType>::kPrecision); os << x.alpha() << os.fill() << x.beta(); return os; } template <typename CharT, typename Traits, typename RealType> std::basic_istream<CharT, Traits>& operator>>( std::basic_istream<CharT, Traits>& is, beta_distribution<RealType>& x) { using result_type = typename beta_distribution<RealType>::result_type; using param_type = typename beta_distribution<RealType>::param_type; result_type alpha, beta; auto saver = random_internal::make_istream_state_saver(is); alpha = random_internal::read_floating_point<result_type>(is); if (is.fail()) return is; beta = random_internal::read_floating_point<result_type>(is); if (!is.fail()) { x.param(param_type(alpha, beta)); } return is; } ABSL_NAMESPACE_END } #endif
#include "absl/random/beta_distribution.h" #include <algorithm> #include <cfloat> #include <cstddef> #include <cstdint> #include <iterator> #include <random> #include <sstream> #include <string> #include <type_traits> #include <unordered_map> #include <vector> #include "gmock/gmock.h" #include "gtest/gtest.h" #include "absl/log/log.h" #include "absl/numeric/internal/representation.h" #include "absl/random/internal/chi_square.h" #include "absl/random/internal/distribution_test_util.h" #include "absl/random/internal/pcg_engine.h" #include "absl/random/internal/sequence_urbg.h" #include "absl/random/random.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_format.h" #include "absl/strings/str_replace.h" #include "absl/strings/strip.h" namespace { template <typename IntType> class BetaDistributionInterfaceTest : public ::testing::Test {}; constexpr bool ShouldExerciseLongDoubleTests() { #if defined(__i686__) && defined(__x86_64__) return !absl::numeric_internal::IsDoubleDouble(); #else return false; #endif } using RealTypes = std::conditional<ShouldExerciseLongDoubleTests(), ::testing::Types<float, double, long double>, ::testing::Types<float, double>>::type; TYPED_TEST_SUITE(BetaDistributionInterfaceTest, RealTypes); TYPED_TEST(BetaDistributionInterfaceTest, SerializeTest) { const TypeParam kSmallA = 1.0f / std::log((std::numeric_limits<TypeParam>::max)()); const TypeParam kLargeA = std::exp(std::log((std::numeric_limits<TypeParam>::max)()) - std::log(std::log((std::numeric_limits<TypeParam>::max)()))); using param_type = typename absl::beta_distribution<TypeParam>::param_type; constexpr int kCount = 1000; absl::InsecureBitGen gen; const TypeParam kValues[] = { TypeParam(1e-20), TypeParam(1e-12), TypeParam(1e-8), TypeParam(1e-4), TypeParam(1e-3), TypeParam(0.1), TypeParam(0.25), std::nextafter(TypeParam(0.5), TypeParam(0)), std::nextafter(TypeParam(0.5), TypeParam(1)), TypeParam(0.5), TypeParam(1.0), std::nextafter(TypeParam(1), TypeParam(0)), std::nextafter(TypeParam(1), TypeParam(2)), TypeParam(12.5), TypeParam(1e2), TypeParam(1e8), TypeParam(1e12), TypeParam(1e20), kSmallA, std::nextafter(kSmallA, TypeParam(0)), std::nextafter(kSmallA, TypeParam(1)), kLargeA, std::nextafter(kLargeA, TypeParam(0)), std::nextafter(kLargeA, std::numeric_limits<TypeParam>::max()), std::numeric_limits<TypeParam>::max(), std::numeric_limits<TypeParam>::epsilon(), std::nextafter(std::numeric_limits<TypeParam>::min(), TypeParam(1)), std::numeric_limits<TypeParam>::min(), std::numeric_limits<TypeParam>::denorm_min(), std::numeric_limits<TypeParam>::min() / 2, std::nextafter(std::numeric_limits<TypeParam>::min(), TypeParam(0)), }; for (TypeParam alpha : kValues) { for (TypeParam beta : kValues) { LOG(INFO) << absl::StreamFormat("Smoke test for Beta(%a, %a)", alpha, beta); param_type param(alpha, beta); absl::beta_distribution<TypeParam> before(alpha, beta); EXPECT_EQ(before.alpha(), param.alpha()); EXPECT_EQ(before.beta(), param.beta()); { absl::beta_distribution<TypeParam> via_param(param); EXPECT_EQ(via_param, before); EXPECT_EQ(via_param.param(), before.param()); } for (int i = 0; i < kCount; ++i) { auto sample = before(gen); EXPECT_TRUE(std::isfinite(sample)); EXPECT_GE(sample, before.min()); EXPECT_LE(sample, before.max()); } std::stringstream ss; ss << before; absl::beta_distribution<TypeParam> after(3.8f, 1.43f); EXPECT_NE(before.alpha(), after.alpha()); EXPECT_NE(before.beta(), after.beta()); EXPECT_NE(before.param(), after.param()); EXPECT_NE(before, after); ss >> after; EXPECT_EQ(before.alpha(), after.alpha()); EXPECT_EQ(before.beta(), after.beta()); EXPECT_EQ(before, after) << ss.str() << " " << (ss.good() ? "good " : "") << (ss.bad() ? "bad " : "") << (ss.eof() ? "eof " : "") << (ss.fail() ? "fail " : ""); } } } TYPED_TEST(BetaDistributionInterfaceTest, DegenerateCases) { absl::random_internal::pcg64_2018_engine rng(0x2B7E151628AED2A6); constexpr int kCount = 1000; const TypeParam kSmallValues[] = { std::numeric_limits<TypeParam>::min(), std::numeric_limits<TypeParam>::denorm_min(), std::nextafter(std::numeric_limits<TypeParam>::min(), TypeParam(0)), std::numeric_limits<TypeParam>::epsilon(), }; const TypeParam kLargeValues[] = { std::numeric_limits<TypeParam>::max() * static_cast<TypeParam>(0.9999), std::numeric_limits<TypeParam>::max() - 1, std::numeric_limits<TypeParam>::max(), }; { for (TypeParam alpha : kSmallValues) { for (TypeParam beta : kSmallValues) { int zeros = 0; int ones = 0; absl::beta_distribution<TypeParam> d(alpha, beta); for (int i = 0; i < kCount; ++i) { TypeParam x = d(rng); if (x == 0.0) { zeros++; } else if (x == 1.0) { ones++; } } EXPECT_EQ(ones + zeros, kCount); if (alpha == beta) { EXPECT_NE(ones, 0); EXPECT_NE(zeros, 0); } } } } { for (TypeParam alpha : kSmallValues) { for (TypeParam beta : kLargeValues) { absl::beta_distribution<TypeParam> d(alpha, beta); for (int i = 0; i < kCount; ++i) { EXPECT_EQ(d(rng), 0.0); } } } } { for (TypeParam alpha : kLargeValues) { for (TypeParam beta : kSmallValues) { absl::beta_distribution<TypeParam> d(alpha, beta); for (int i = 0; i < kCount; ++i) { EXPECT_EQ(d(rng), 1.0); } } } } { absl::beta_distribution<TypeParam> d(std::numeric_limits<TypeParam>::max(), std::numeric_limits<TypeParam>::max()); for (int i = 0; i < kCount; ++i) { EXPECT_EQ(d(rng), 0.5); } } { absl::beta_distribution<TypeParam> d( std::numeric_limits<TypeParam>::max(), std::numeric_limits<TypeParam>::max() * 0.9999); for (int i = 0; i < kCount; ++i) { TypeParam x = d(rng); EXPECT_NE(x, 0.5f); EXPECT_FLOAT_EQ(x, 0.500025f); } } } class BetaDistributionModel { public: explicit BetaDistributionModel(::testing::tuple<double, double> p) : alpha_(::testing::get<0>(p)), beta_(::testing::get<1>(p)) {} double Mean() const { return alpha_ / (alpha_ + beta_); } double Variance() const { return alpha_ * beta_ / (alpha_ + beta_ + 1) / (alpha_ + beta_) / (alpha_ + beta_); } double Kurtosis() const { return 3 + 6 * ((alpha_ - beta_) * (alpha_ - beta_) * (alpha_ + beta_ + 1) - alpha_ * beta_ * (2 + alpha_ + beta_)) / alpha_ / beta_ / (alpha_ + beta_ + 2) / (alpha_ + beta_ + 3); } protected: const double alpha_; const double beta_; }; class BetaDistributionTest : public ::testing::TestWithParam<::testing::tuple<double, double>>, public BetaDistributionModel { public: BetaDistributionTest() : BetaDistributionModel(GetParam()) {} protected: template <class D> bool SingleZTestOnMeanAndVariance(double p, size_t samples); template <class D> bool SingleChiSquaredTest(double p, size_t samples, size_t buckets); absl::InsecureBitGen rng_; }; template <class D> bool BetaDistributionTest::SingleZTestOnMeanAndVariance(double p, size_t samples) { D dis(alpha_, beta_); std::vector<double> data; data.reserve(samples); for (size_t i = 0; i < samples; i++) { const double variate = dis(rng_); EXPECT_FALSE(std::isnan(variate)); EXPECT_GE(variate, 0.0); EXPECT_LE(variate, 1.0); data.push_back(variate); } const auto m = absl::random_internal::ComputeDistributionMoments(data); const double mean_stddev = std::sqrt(Variance() / static_cast<double>(m.n)); const double variance_stddev = std::sqrt( (Kurtosis() - 1) * Variance() * Variance() / static_cast<double>(m.n)); const double z_variance = (m.variance - Variance()) / variance_stddev; const double max_err = absl::random_internal::MaxErrorTolerance(p); const double z_mean = absl::random_internal::ZScore(Mean(), m); const bool pass = absl::random_internal::Near("z", z_mean, 0.0, max_err) && absl::random_internal::Near("z_variance", z_variance, 0.0, max_err); if (!pass) { LOG(INFO) << "Beta(" << alpha_ << ", " << beta_ << "), mean: sample " << m.mean << ", expect " << Mean() << ", which is " << std::abs(m.mean - Mean()) / mean_stddev << " stddevs away, variance: sample " << m.variance << ", expect " << Variance() << ", which is " << std::abs(m.variance - Variance()) / variance_stddev << " stddevs away."; } return pass; } template <class D> bool BetaDistributionTest::SingleChiSquaredTest(double p, size_t samples, size_t buckets) { constexpr double kErr = 1e-7; std::vector<double> cutoffs, expected; const double bucket_width = 1.0 / static_cast<double>(buckets); int i = 1; int unmerged_buckets = 0; for (; i < buckets; ++i) { const double p = bucket_width * static_cast<double>(i); const double boundary = absl::random_internal::BetaIncompleteInv(alpha_, beta_, p); if ((cutoffs.empty() && boundary < kErr) || (!cutoffs.empty() && boundary <= cutoffs.back())) { unmerged_buckets++; continue; } if (boundary >= 1.0 - 1e-10) { break; } cutoffs.push_back(boundary); expected.push_back(static_cast<double>(1 + unmerged_buckets) * bucket_width * static_cast<double>(samples)); unmerged_buckets = 0; } cutoffs.push_back(std::numeric_limits<double>::infinity()); expected.push_back(static_cast<double>(buckets - i + 1) * bucket_width * static_cast<double>(samples)); EXPECT_GE(cutoffs.size(), 3) << alpha_ << ", " << beta_; D dis(alpha_, beta_); std::vector<int32_t> counts(cutoffs.size(), 0); for (int i = 0; i < samples; i++) { const double x = dis(rng_); auto it = std::upper_bound(cutoffs.begin(), cutoffs.end(), x); counts[std::distance(cutoffs.begin(), it)]++; } const int dof = cutoffs.size() - 1; const double chi_square = absl::random_internal::ChiSquare( counts.begin(), counts.end(), expected.begin(), expected.end()); const bool pass = (absl::random_internal::ChiSquarePValue(chi_square, dof) >= p); if (!pass) { for (size_t i = 0; i < cutoffs.size(); i++) { LOG(INFO) << "cutoff[" << i << "] = " << cutoffs[i] << ", actual count " << counts[i] << ", expected " << static_cast<int>(expected[i]); } LOG(INFO) << "Beta(" << alpha_ << ", " << beta_ << ") " << absl::random_internal::kChiSquared << " " << chi_square << ", p = " << absl::random_internal::ChiSquarePValue(chi_square, dof); } return pass; } TEST_P(BetaDistributionTest, TestSampleStatistics) { static constexpr int kRuns = 20; static constexpr double kPFail = 0.02; const double p = absl::random_internal::RequiredSuccessProbability(kPFail, kRuns); static constexpr int kSampleCount = 10000; static constexpr int kBucketCount = 100; int failed = 0; for (int i = 0; i < kRuns; ++i) { if (!SingleZTestOnMeanAndVariance<absl::beta_distribution<double>>( p, kSampleCount)) { failed++; } if (!SingleChiSquaredTest<absl::beta_distribution<double>>( 0.005, kSampleCount, kBucketCount)) { failed++; } } EXPECT_LE(failed, 5); } std::string ParamName( const ::testing::TestParamInfo<::testing::tuple<double, double>>& info) { std::string name = absl::StrCat("alpha_", ::testing::get<0>(info.param), "__beta_", ::testing::get<1>(info.param)); return absl::StrReplaceAll(name, {{"+", "_"}, {"-", "_"}, {".", "_"}}); } INSTANTIATE_TEST_SUITE_P( TestSampleStatisticsCombinations, BetaDistributionTest, ::testing::Combine(::testing::Values(0.1, 0.2, 0.9, 1.1, 2.5, 10.0, 123.4), ::testing::Values(0.1, 0.2, 0.9, 1.1, 2.5, 10.0, 123.4)), ParamName); INSTANTIATE_TEST_SUITE_P( TestSampleStatistics_SelectedPairs, BetaDistributionTest, ::testing::Values(std::make_pair(0.5, 1000), std::make_pair(1000, 0.5), std::make_pair(900, 1000), std::make_pair(10000, 20000), std::make_pair(4e5, 2e7), std::make_pair(1e7, 1e5)), ParamName); TEST(BetaDistributionTest, StabilityTest) { using testing::ElementsAre; absl::random_internal::sequence_urbg urbg({ 0xffff00000000e6c8ull, 0xffff0000000006c8ull, 0x800003766295CFA9ull, 0x11C819684E734A41ull, 0x832603766295CFA9ull, 0x7fbe76c8b4395800ull, 0xB3472DCA7B14A94Aull, 0x0003eb76f6f7f755ull, 0xFFCEA50FDB2F953Bull, 0x13CCA830EB61BD96ull, 0x0334FE1EAA0363CFull, 0x00035C904C70A239ull, 0x00009E0BCBAADE14ull, 0x0000000000622CA7ull, 0x4864f22c059bf29eull, 0x247856d8b862665cull, 0xe46e86e9a1337e10ull, 0xd8c8541f3519b133ull, 0xffe75b52c567b9e4ull, 0xfffff732e5709c5bull, 0xff1f7f0b983532acull, 0x1ec2e8986d2362caull, 0xC332DDEFBE6C5AA5ull, 0x6558218568AB9702ull, 0x2AEF7DAD5B6E2F84ull, 0x1521B62829076170ull, 0xECDD4775619F1510ull, 0x814c8e35fe9a961aull, 0x0c3cd59c9b638a02ull, 0xcb3bb6478a07715cull, 0x1224e62c978bbc7full, 0x671ef2cb04e81f6eull, 0x3c1cbd811eaf1808ull, 0x1bbc23cfa8fac721ull, 0xa4c2cda65e596a51ull, 0xb77216fad37adf91ull, 0x836d794457c08849ull, 0xe083df03475f49d7ull, 0xbc9feb512e6b0d6cull, 0xb12d74fdd718c8c5ull, 0x12ff09653bfbe4caull, 0x8dd03a105bc4ee7eull, 0x5738341045ba0d85ull, 0xf3fd722dc65ad09eull, 0xfa14fd21ea2a5705ull, 0xffe6ea4d6edb0c73ull, 0xD07E9EFE2BF11FB4ull, 0x95DBDA4DAE909198ull, 0xEAAD8E716B93D5A0ull, 0xD08ED1D0AFC725E0ull, 0x8E3C5B2F8E7594B7ull, 0x8FF6E2FBF2122B64ull, 0x8888B812900DF01Cull, 0x4FAD5EA0688FC31Cull, 0xD1CFF191B3A8C1ADull, 0x2F2F2218BE0E1777ull, 0xEA752DFE8B021FA1ull, }); auto float_to_u64 = [](float d) { int exp = 0; auto f = std::frexp(d, &exp); return (static_cast<uint64_t>(1e5 * f) * 10000) + std::abs(exp); }; auto double_to_u64 = [](double d) { int exp = 0; auto f = std::frexp(d, &exp); return (static_cast<uint64_t>(1e10 * f) * 10000) + std::abs(exp); }; std::vector<uint64_t> output(20); { absl::beta_distribution<float> dist(0.1f, 0.2f); std::generate(std::begin(output), std::end(output), [&] { return float_to_u64(dist(urbg)); }); EXPECT_EQ(44, urbg.invocations()); EXPECT_THAT(output, testing::ElementsAre( 998340000, 619030004, 500000001, 999990000, 996280000, 500000001, 844740004, 847210001, 999970000, 872320000, 585480007, 933280000, 869080042, 647670031, 528240004, 969980004, 626050008, 915930002, 833440033, 878040015)); } urbg.reset(); { absl::beta_distribution<double> dist(0.1, 0.2); std::generate(std::begin(output), std::end(output), [&] { return double_to_u64(dist(urbg)); }); EXPECT_EQ(44, urbg.invocations()); EXPECT_THAT( output, testing::ElementsAre( 99834713000000, 61903356870004, 50000000000001, 99999721170000, 99628374770000, 99999999990000, 84474397860004, 84721276240001, 99997407490000, 87232528120000, 58548364780007, 93328932910000, 86908237770042, 64767917930031, 52824581970004, 96998544140004, 62605946270008, 91593604380002, 83345031740033, 87804397230015)); } urbg.reset(); { absl::beta_distribution<double> dist(0.9, 2.0); std::generate(std::begin(output), std::end(output), [&] { return double_to_u64(dist(urbg)); }); EXPECT_EQ(62, urbg.invocations()); EXPECT_THAT( output, testing::ElementsAre( 62069004780001, 64433204450001, 53607416560000, 89644295430008, 61434586310019, 55172615890002, 62187161490000, 56433684810003, 80454622050005, 86418558710003, 92920514700001, 64645184680001, 58549183380000, 84881283650005, 71078728590002, 69949694970000, 73157461710001, 68592191300001, 70747623900000, 78584696930005)); } urbg.reset(); { absl::beta_distribution<double> dist(1.5, 2.5); std::generate(std::begin(output), std::end(output), [&] { return double_to_u64(dist(urbg)); }); EXPECT_EQ(54, urbg.invocations()); EXPECT_THAT( output, testing::ElementsAre( 75000029250001, 76751482860001, 53264575220000, 69193133650005, 78028324470013, 91573587560002, 59167523770000, 60658618560002, 80075870540000, 94141320460004, 63196592770003, 78883906300002, 96797992590001, 76907587800001, 56645167560000, 65408302280003, 53401156320001, 64731238570000, 83065573750001, 79788333820001)); } } TEST(BetaDistributionTest, AlgorithmBounds) { #if (defined(__i386__) || defined(_M_IX86)) && FLT_EVAL_METHOD != 0 GTEST_SKIP() << "Skipping the test because we detected x87 floating-point semantics"; #endif { absl::random_internal::sequence_urbg urbg( {0x7fbe76c8b4395800ull, 0x8000000000000000ull}); absl::beta_distribution<double> dist(1e-4, 1e-4); double a = dist(urbg); EXPECT_EQ(a, 2.0202860861567108529e-09); EXPECT_EQ(2, urbg.invocations()); } { absl::beta_distribution<float> dist(0.5, 0.5); absl::random_internal::sequence_urbg urbg( {0xffff00000006e6c8ull, 0xffff00000007c7c8ull, 0x800003766295CFA9ull, 0x11C819684E734A41ull}); { double y = absl::beta_distribution<double>(0.5, 0.5)(urbg); EXPECT_EQ(4, urbg.invocations()); EXPECT_EQ(y, 0.9810668952633862) << y; } urbg.reset(); { float x = absl::beta_distribution<float>(0.5, 0.5)(urbg); EXPECT_EQ(4, urbg.invocations()); EXPECT_NEAR(0.98106688261032104, x, 0.0000005) << x << "f"; } } } }
https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/random/beta_distribution.h
https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/random/beta_distribution_test.cc
03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4
761b914e-0551-49f0-83ab-8e512d88d3fe
cpp
tensorflow/tensorflow
tfg_optimizer_hook
tensorflow/core/grappler/optimizers/tfg_optimizer_hook.cc
tensorflow/core/grappler/optimizers/tfg_optimizer_hook_test.cc
#include "tensorflow/core/grappler/optimizers/tfg_optimizer_hook.h" #include <string> #include <utility> #include "absl/status/status.h" #include "absl/strings/str_cat.h" #include "llvm/Support/ThreadPool.h" #include "llvm/Support/Threading.h" #include "llvm/Support/raw_ostream.h" #include "mlir/IR/BuiltinOps.h" #include "mlir/IR/Dialect.h" #include "mlir/IR/MLIRContext.h" #include "mlir/Pass/PassManager.h" #include "mlir/Pass/PassRegistry.h" #include "mlir/Support/LogicalResult.h" #include "tensorflow/c/tf_status.h" #include "tensorflow/compiler/mlir/tensorflow/utils/error_util.h" #include "tensorflow/core/framework/graph.pb.h" #include "tensorflow/core/framework/graph_debug_info.pb.h" #include "tensorflow/core/framework/metrics.h" #include "tensorflow/core/framework/versions.pb.h" #include "tensorflow/core/grappler/grappler_item.h" #include "tensorflow/core/ir/dialect.h" #include "tensorflow/core/ir/importexport/graphdef_export.h" #include "tensorflow/core/ir/importexport/graphdef_import.h" #include "tensorflow/core/ir/tf_op_registry.h" #include "tensorflow/core/platform/errors.h" #include "tensorflow/core/platform/status.h" #include "tensorflow/core/util/dump_graph.h" using tensorflow::Status; using tensorflow::errors::InvalidArgument; namespace mlir { namespace tfg { class TFGGrapplerOptimizer::Impl { public: explicit Impl(TFGPassPipelineBuilder builder, unsigned num_tfg_threads) : ctx_(MLIRContext::Threading::DISABLED), mgr_(&ctx_) { DialectRegistry registry; registry.addExtension(+[](MLIRContext* ctx, TFGraphDialect* dialect) { dialect->addInterfaces<TensorFlowOpRegistryInterface>(); }); ctx_.appendDialectRegistry(registry); builder(mgr_); if (num_tfg_threads) { llvm::ThreadPoolStrategy strategy; strategy.ThreadsRequested = num_tfg_threads; threadpool_ = std::make_unique<llvm::DefaultThreadPool>(strategy); ctx_.setThreadPool(*threadpool_); } } LogicalResult RunPipeline(ModuleOp module) { return mgr_.run(module); } MLIRContext* GetContext() { return &ctx_; } std::string GetPipelineString() { std::string pipeline; llvm::raw_string_ostream os(pipeline); mgr_.printAsTextualPipeline(os); return os.str(); } private: std::unique_ptr<llvm::DefaultThreadPool> threadpool_; MLIRContext ctx_; PassManager mgr_; }; TFGGrapplerOptimizer::TFGGrapplerOptimizer(TFGPassPipelineBuilder builder, unsigned num_tfg_threads) : impl_(std::make_unique<Impl>(std::move(builder), num_tfg_threads)) {} TFGGrapplerOptimizer::~TFGGrapplerOptimizer() = default; std::string TFGGrapplerOptimizer::name() const { return absl::StrCat("tfg_optimizer{", impl_->GetPipelineString(), "}"); } Status TFGGrapplerOptimizer::Optimize( tensorflow::grappler::Cluster* cluster, const tensorflow::grappler::GrapplerItem& item, tensorflow::GraphDef* optimized_graph) { if (VLOG_IS_ON(4)) { tensorflow::DumpGraphDefToFile( absl::StrCat("tfg_before_graph_", item.id, "_", std::hash<std::string>()(name())), item.graph); } VLOG(5) << "TFG Before Graph: \n" << item.graph.DebugString(); tensorflow::GraphDebugInfo debug_info; tensorflow::metrics::ScopedCounter<2> metrics( tensorflow::metrics::GetGraphOptimizationCounter(), {"TfgOptimizer", "convert_graphdef_to_tfg"}); auto error_or_module = ImportGraphDef(impl_->GetContext(), debug_info, item.graph); if (!error_or_module.ok()) { auto status = error_or_module.status(); tensorflow::errors::AppendToMessage( &status, "when importing GraphDef to MLIR module in GrapplerHook"); LOG(ERROR) << name() << " failed: " << status.ToString(); return absl::AbortedError(status.message()); } metrics.ReportAndStop(); ModuleOp module = (*error_or_module).get(); if (failed(impl_->RunPipeline(module))) { return absl::InvalidArgumentError("MLIR Graph Optimizer failed: "); } tensorflow::GraphDef graphdef; metrics.Reset({"TfgOptimizer", "convert_tfg_to_graphdef"}); TF_RETURN_WITH_CONTEXT_IF_ERROR( ConvertToGraphDef(module, &graphdef), "when exporting MLIR module to GraphDef in GrapplerHook"); (void)graphdef.mutable_library(); metrics.ReportAndStop(); *optimized_graph = std::move(graphdef); if (VLOG_IS_ON(4)) { tensorflow::DumpGraphDefToFile( absl::StrCat("tfg_after_graph_", item.id, "_", std::hash<std::string>()(name())), *optimized_graph); } if (VLOG_IS_ON(5)) { VLOG(5) << "TFG After Graph: \n" << optimized_graph->DebugString() << "\nMLIR module: \n"; module.dump(); } return absl::OkStatus(); } } }
#include "tensorflow/core/grappler/optimizers/tfg_optimizer_hook.h" #include <utility> #include "mlir/Pass/Pass.h" #include "mlir/Pass/PassManager.h" #include "mlir/Pass/PassRegistry.h" #include "tensorflow/cc/framework/scope.h" #include "tensorflow/cc/ops/const_op.h" #include "tensorflow/core/framework/attr_value.pb.h" #include "tensorflow/core/framework/graph.pb.h" #include "tensorflow/core/grappler/grappler_item.h" #include "tensorflow/core/grappler/optimizers/graph_optimizer.h" #include "tensorflow/core/grappler/optimizers/meta_optimizer.h" #include "tensorflow/core/ir/ops.h" #include "tensorflow/core/ir/tf_op_wrapper.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/errors.h" #include "tensorflow/core/platform/status.h" #include "tensorflow/core/platform/test.h" namespace mlir { namespace tfg { namespace { class TestPass : public PassWrapper<TestPass, OperationPass<GraphOp>> { public: MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestPass); StringRef getArgument() const override { return "grappler-hook-test-pass"; } void runOnOperation() override { GraphOp graph = getOperation(); for (TFOp op : graph.getOps()) op.setName(op.name() + "_visited"); } }; class AlwaysFailPass : public PassWrapper<AlwaysFailPass, OperationPass<GraphOp>> { public: MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(AlwaysFailPass); StringRef getArgument() const override { return "grappler-hook-fail-pass"; } void runOnOperation() override { signalPassFailure(); } }; } } } namespace tensorflow { namespace grappler { namespace { TEST(TFGOptimizerTest, TestCustomPipeline) { Scope s = Scope::NewRootScope(); Output a = ops::Const(s.WithOpName("a"), 0.0f, {10, 10}); Output b = ops::Const(s.WithOpName("b"), 1.0f, {10, 10}); GrapplerItem item; TF_CHECK_OK(s.ToGraphDef(&item.graph)); EXPECT_EQ("a", item.graph.node(0).name()); EXPECT_EQ("b", item.graph.node(1).name()); mlir::tfg::TFGGrapplerOptimizer optimizer([](mlir::PassManager &mgr) { mgr.addNestedPass<mlir::tfg::GraphOp>( std::make_unique<mlir::tfg::TestPass>()); }); GraphDef output; const Status status = optimizer.Optimize(nullptr, item, &output); TF_ASSERT_OK(status); EXPECT_EQ("a_visited", output.node(0).name()); EXPECT_EQ("b_visited", output.node(1).name()); } TEST(TFGOptimizerTest, TestCustomPipelineName) { mlir::tfg::TFGGrapplerOptimizer optimizer([](mlir::PassManager &mgr) { mgr.addNestedPass<mlir::tfg::GraphOp>( std::make_unique<mlir::tfg::TestPass>()); }); EXPECT_EQ(optimizer.name(), "tfg_optimizer{any(tfg.graph(grappler-hook-test-pass))}"); } TEST(TFGOptimizerTest, TestImportErrorReturnsAborted) { Scope s = Scope::NewRootScope(); Output a = ops::Const(s.WithOpName("a"), 0.0f, {10, 10}); GrapplerItem item; TF_CHECK_OK(s.ToGraphDef(&item.graph)); AttrValue attr; attr.set_i(0); item.graph.mutable_node(0)->mutable_attr()->insert({"", std::move(attr)}); mlir::tfg::TFGGrapplerOptimizer optimizer([](mlir::PassManager &mgr) {}); GraphDef output; Status status = optimizer.Optimize(nullptr, item, &output); EXPECT_FALSE(status.ok()); EXPECT_TRUE(errors::IsAborted(status)); } TEST(TFGOptimizerTest, TestPassErrorIsFatal) { Scope s = Scope::NewRootScope(); GrapplerItem item; TF_CHECK_OK(s.ToGraphDef(&item.graph)); mlir::tfg::TFGGrapplerOptimizer optimizer([](mlir::PassManager &mgr) { mgr.addNestedPass<mlir::tfg::GraphOp>( std::make_unique<mlir::tfg::AlwaysFailPass>()); }); GraphDef output; Status status = optimizer.Optimize(nullptr, item, &output); EXPECT_FALSE(status.ok()); EXPECT_FALSE(errors::IsAborted(status)); EXPECT_TRUE(errors::IsInvalidArgument(status)); } TEST(TFGOptimizerTest, TestImportErrorMetaOptimizerIsNotFatal) { Scope s = Scope::NewRootScope(); Output a = ops::Const(s.WithOpName("a"), 0.0f, {10, 10}); GrapplerItem item; TF_CHECK_OK(s.ToGraphDef(&item.graph)); AttrValue attr; attr.set_i(0); item.graph.mutable_node(0)->mutable_attr()->insert({"", std::move(attr)}); std::vector<std::unique_ptr<GraphOptimizer>> optimizers; optimizers.push_back(std::make_unique<mlir::tfg::TFGGrapplerOptimizer>( [](mlir::PassManager &mgr) {})); GraphDef output; Status status = RunMetaOptimizer(std::move(item), {}, nullptr, nullptr, &output); TF_EXPECT_OK(status); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/tfg_optimizer_hook.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/tfg_optimizer_hook_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
7a699b20-fc79-4536-a38d-cf545441b0c0
cpp
tensorflow/tensorflow
stateful_rng_spmd_partitioner
third_party/xla/xla/service/spmd/stateful_rng_spmd_partitioner.cc
third_party/xla/xla/service/spmd/stateful_rng_spmd_partitioner_test.cc
#include "xla/service/spmd/stateful_rng_spmd_partitioner.h" #include <memory> #include <utility> #include "absl/container/flat_hash_set.h" #include "absl/status/status.h" #include "absl/strings/string_view.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/ir/hlo_sharding.h" #include "xla/service/call_graph.h" #include "xla/service/spmd/spmd_partitioner.h" #include "xla/status_macros.h" #include "xla/xla_data.pb.h" namespace xla { namespace spmd { absl::Status StatefulRngSpmdPartitioningVisitor::HandleRngGetAndUpdateState( HloInstruction* hlo) { if (hlo->sharding().HasUniqueDevice()) { return HandleSingleDevice(hlo); } TF_RET_CHECK(hlo->sharding().IsReplicated()); auto clone = builder()->AddInstruction(hlo->CloneWithNewOperands(hlo->shape(), {})); clone->set_sharding(hlo->sharding()); SetPartitionedHlo( hlo, spmd::PartitionedHlo(clone, hlo->shape(), MakePartitioningState()) .Reshard(hlo->sharding())); return absl::OkStatus(); } std::unique_ptr<spmd::SpmdPartitioningVisitor> StatefulRngSpmdPartitioner::CreateVisitor( HloComputation* computation, int64_t num_partitions, int64_t num_replicas, const spmd::SPMDCollectiveOpsCreator& collective_ops_creator, int64_t* next_channel_id, spmd::SpmdLogger* logger, spmd::SpmdPartitionerOptions options, const CallGraph& call_graph) { return std::make_unique<StatefulRngSpmdPartitioningVisitor>( computation, num_partitions, num_replicas, collective_ops_creator, next_channel_id, logger, std::move(options), this, call_graph); } absl::Status StatefulRngSpmdPartitioner::PreprocessSharding( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { for (HloComputation* computation : module->computations(execution_threads)) { for (HloInstruction* hlo : computation->instructions()) { if (hlo->opcode() == HloOpcode::kRngGetAndUpdateState && !hlo->has_sharding()) { hlo->set_sharding(HloSharding::Replicate()); } } } return spmd::SpmdPartitioner::PreprocessSharding(module, execution_threads); } bool StatefulRngSpmdPartitioner::CanSideEffectingHaveReplicatedSharding( const HloInstruction* hlo) { if (hlo->opcode() == HloOpcode::kRngGetAndUpdateState) return true; return spmd::SpmdPartitioner::CanSideEffectingHaveReplicatedSharding(hlo); } absl::Status StatefulRngSpmdPartitioner::HandleRotateRightWhilePreprocessing( HloComputation* computation) { if (!computation->IsWhileBodyComputation()) { return absl::OkStatus(); } HloInstruction* while_loop = computation->WhileCallInstruction(); TF_RET_CHECK(while_loop); if (computation->parent() ->config() .debug_options() .xla_gpu_unsafe_pipelined_loop_annotator()) { xla::FrontendAttributes attributes; (*attributes.mutable_map())["is_pipelined_while_loop"] = "true"; while_loop->add_frontend_attributes(attributes); } return absl::OkStatus(); } } }
#include "xla/service/spmd/stateful_rng_spmd_partitioner.h" #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/status/statusor.h" #include "absl/strings/string_view.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/pass/hlo_pass_pipeline.h" #include "xla/hlo/utils/hlo_matchers.h" #include "xla/service/hlo_module_config.h" #include "xla/service/hlo_verifier.h" #include "xla/service/rng_expander.h" #include "xla/service/sharding_propagation.h" #include "xla/tests/hlo_test_base.h" #include "xla/util.h" #include "xla/xla.pb.h" #include "xla/xla_data.pb.h" #include "tsl/platform/errors.h" #include "tsl/platform/statusor.h" namespace xla { namespace spmd { namespace { namespace op = xla::testing::opcode_matchers; int64_t CountInstructions(const HloComputation &computation, HloOpcode opcode) { int64_t count = 0; for (const auto &instruction : computation.instructions()) { if (instruction->opcode() == opcode) { count++; } } return count; } class StatefulRngSpmdPartitionerTest : public HloTestBase { public: absl::StatusOr<std::unique_ptr<HloModule>> PartitionComputation( absl::string_view hlo_module, int64_t num_partitions, DebugOptions debug_options, std::function<void(HloPassPipeline &pipeline)> add_passes = nullptr, bool skip_checking_windowed_einsum_users = false, bool disable_ag_rewrite_for_multiple_consumers = false) { HloModuleConfig config = GetModuleConfigForTest(1, num_partitions); config.set_use_spmd_partitioning(true); config.set_debug_options(debug_options); TF_ASSIGN_OR_RETURN(auto module, ParseAndReturnVerifiedModule(hlo_module, config)); HloPassPipeline pass("partitioning"); pass.AddPass<HloVerifier>(false, false); if (add_passes) { add_passes(pass); } pass.AddPass<ShardingPropagation>(true); pass.AddPass<StatefulRngSpmdPartitioner>( num_partitions, 1, debug_options.xla_gpu_threshold_for_windowed_einsum_mib(), debug_options.xla_gpu_multi_streamed_windowed_einsum(), skip_checking_windowed_einsum_users, disable_ag_rewrite_for_multiple_consumers); pass.AddPass<HloVerifier>(false, false); TF_RETURN_IF_ERROR(pass.Run(module.get()).status()); return absl::StatusOr<std::unique_ptr<HloModule>>(std::move(module)); } void VerifyNoAllReduce(HloModule *module) { for (HloComputation *computation : module->computations()) { for (HloInstruction *hlo : computation->instructions()) { EXPECT_NE(hlo->opcode(), HloOpcode::kAllReduce); } } } DebugOptions GetDefaultDebugOptions() { DebugOptions debug_options = GetDebugOptionsForTest(); debug_options.set_xla_gpu_threshold_for_windowed_einsum_mib(1000000); debug_options.set_xla_gpu_multi_streamed_windowed_einsum(false); debug_options.set_xla_gpu_unsafe_pipelined_loop_annotator(false); return debug_options; } }; TEST_F(StatefulRngSpmdPartitionerTest, RngReplicatedConsumer) { absl::string_view hlo_string = R"( HloModule module ENTRY entry { %p0 = f32[50,100] parameter(0), sharding={replicated} %mu = f32[] constant(0) %sigma = f32[] constant(1) %rng = f32[50,100] rng(f32[] %mu, f32[] %sigma), distribution=rng_uniform ROOT %add = f32[50,100] add(%rng, %p0), sharding={replicated} } )"; auto add_passes = [](HloPassPipeline &pipeline) { pipeline.AddPass<RngExpander>(); }; DebugOptions debug_options = GetDebugOptionsForTest(); TF_ASSERT_OK_AND_ASSIGN( auto module, PartitionComputation(hlo_string, 2, GetDefaultDebugOptions(), add_passes)); XLA_VLOG_LINES(1, module->ToString()); VerifyNoAllReduce(module.get()); } TEST_F(StatefulRngSpmdPartitionerTest, RngPartitionedConsumer) { absl::string_view hlo_string = R"( HloModule module ENTRY entry { %p0 = f32[50,100] parameter(0), sharding={replicated} %mu = f32[] constant(0) %sigma = f32[] constant(1) %rng = f32[50,100] rng(f32[] %mu, f32[] %sigma), distribution=rng_uniform ROOT %add = f32[50,100] add(%rng, %p0), sharding={devices=[2,1]0,1} } )"; auto add_passes = [](HloPassPipeline &pipeline) { pipeline.AddPass<RngExpander>(); }; TF_ASSERT_OK_AND_ASSIGN( auto module, PartitionComputation(hlo_string, 2, GetDefaultDebugOptions(), add_passes)); XLA_VLOG_LINES(1, module->ToString()); VerifyNoAllReduce(module.get()); } TEST_F(StatefulRngSpmdPartitionerTest, EinsumDisableRewriteForAgWithMultipleConsumers) { absl::string_view hlo_string = R"( HloModule test, entry_computation_layout={(bf16[2,2048,24576]{2,1,0}, bf16[24576,98304]{1,0}, bf16[24576,98304]{1,0})->bf16[2,2048,98304]{2,1,0}}, num_partitions=4 ENTRY main { Arg_0.1 = bf16[2,2048,24576]{2,1,0} parameter(0), sharding={devices=[1,4,1]<=[4]} Arg_1.2 = bf16[24576,98304]{1,0} parameter(1), sharding={devices=[1,4]<=[4]} dot.5 = bf16[2,2048,98304]{2,1,0} dot(Arg_0.1, Arg_1.2), lhs_contracting_dims={2}, rhs_contracting_dims={0}, sharding={devices=[1,1,4]<=[4]} Arg_2.3 = bf16[24576,98304]{1,0} parameter(2), sharding={devices=[1,4]<=[4]} dot.6 = bf16[2,2048,98304]{2,1,0} dot(Arg_0.1, Arg_2.3), lhs_contracting_dims={2}, rhs_contracting_dims={0}, sharding={devices=[1,1,4]<=[4]} ROOT add.8 = bf16[2,2048,98304]{2,1,0} add(dot.5, dot.6), sharding={devices=[1,1,4]<=[4]} } )"; DebugOptions debug_options = GetDefaultDebugOptions(); debug_options.set_xla_gpu_threshold_for_windowed_einsum_mib(0); debug_options.set_xla_gpu_multi_streamed_windowed_einsum(true); TF_ASSERT_OK_AND_ASSIGN( auto module, PartitionComputation(hlo_string, 4, debug_options, nullptr, true, true)); XLA_VLOG_LINES(1, module->ToString()); EXPECT_EQ(CountInstructions(*module->entry_computation(), HloOpcode::kWhile), 1); EXPECT_EQ(CountInstructions(*module->entry_computation(), HloOpcode::kDot), 1); EXPECT_EQ( CountInstructions(*module->entry_computation(), HloOpcode::kAllGather), 1); } TEST_F(StatefulRngSpmdPartitionerTest, VerifyThresholdSetCorrectly) { auto debug_options = HloTestBase::GetDebugOptionsForTest(); int64_t threshold = 400; debug_options.set_xla_gpu_threshold_for_windowed_einsum_mib(threshold); debug_options.set_xla_gpu_multi_streamed_windowed_einsum(true); StatefulRngSpmdPartitioner rng_spmd_partitioner( 2, 1, debug_options.xla_gpu_threshold_for_windowed_einsum_mib(), debug_options.xla_gpu_multi_streamed_windowed_einsum()); EXPECT_EQ(rng_spmd_partitioner.options().threshold_for_windowed_einsum_mib, threshold); EXPECT_EQ(rng_spmd_partitioner.options().unroll_windowed_einsum, true); } TEST_F(StatefulRngSpmdPartitionerTest, MergedSliceThenConcatRotateRightWhileOp) { absl::string_view hlo_string = R"( HloModule test %Body { %param = (f32[12], s32[]) parameter(0) %i = s32[] get-tuple-element(%param), index=1 %one = s32[] constant(1) %i_plus_one = s32[] add(s32[] %i, s32[] %one) %param0 = f32[12] get-tuple-element(%param), index=0, sharding={devices=[4]<=[4]} %slice0 = f32[2] slice(%param0), slice={[10:12]}, sharding={devices=[4]<=[4]} %slice1 = f32[10] slice(%param0), slice={[0:10]}, sharding={devices=[4]<=[4]} %concat = f32[12] concatenate(%slice0, %slice1), dimensions={0}, sharding={devices=[4]<=[4]} ROOT %tuple = (f32[12], s32[]) tuple(%concat, %i_plus_one) } %Cond { %param.1 = (f32[12], s32[]) parameter(0) %i.1 = s32[] get-tuple-element(%param.1), index=1 %trip_count = s32[] constant(11) ROOT %done = pred[] compare(%i.1, %trip_count), direction=LT } ENTRY %test { %i_start = f32[12] parameter(0) %p_start = s32[] constant(0) %initial_tuple = (f32[12], s32[]) tuple(%i_start, %p_start) ROOT %while = (f32[12], s32[]) while(%initial_tuple), condition=%Cond, body=%Body } )"; DebugOptions debug_options = GetDefaultDebugOptions(); debug_options.set_xla_gpu_unsafe_pipelined_loop_annotator(true); TF_ASSERT_OK_AND_ASSIGN( auto module, PartitionComputation(hlo_string, 4, debug_options)); const HloInstruction *whileOp = module->entry_computation()->GetInstructionWithName("while.1"); const HloInstruction *root = whileOp->while_body()->GetInstructionWithName("concatenate"); auto rotate = op::Concatenate(op::CollectivePermute(op::Slice()), op::Slice()); EXPECT_THAT(root, AllOf(rotate, op::Shape("f32[3]"))); EXPECT_TRUE( whileOp->frontend_attributes().map().contains("is_pipelined_while_loop")); debug_options.set_xla_gpu_unsafe_pipelined_loop_annotator(false); TF_ASSERT_OK_AND_ASSIGN( module, PartitionComputation(hlo_string, 4, debug_options)); whileOp = module->entry_computation()->GetInstructionWithName("while.1"); root = whileOp->while_body()->GetInstructionWithName("concatenate"); rotate = op::Concatenate(op::CollectivePermute(op::Slice()), op::Slice()); EXPECT_THAT(root, AllOf(rotate, op::Shape("f32[3]"))); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/spmd/stateful_rng_spmd_partitioner.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/spmd/stateful_rng_spmd_partitioner_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
4decdc63-0b55-463f-bcc4-8d92940cc475
cpp
google/arolla
lift_accumulator_to_scalar_operator
arolla/qexpr/lift_accumulator_to_scalar_operator.h
arolla/qexpr/lift_accumulator_to_scalar_operator_test.cc
#ifndef AROLLA_QEXPR_LIFT_ACCUMULATOR_TO_SCALAR_OPERATOR_H_ #define AROLLA_QEXPR_LIFT_ACCUMULATOR_TO_SCALAR_OPERATOR_H_ #include <type_traits> #include "arolla/memory/optional_value.h" #include "arolla/qexpr/aggregation_ops_interface.h" #include "arolla/qexpr/eval_context.h" #include "arolla/qtype/array_like/array_like_qtype.h" #include "arolla/util/meta.h" namespace arolla { template <typename Accumulator, typename ParentTypes, typename ChildTypes> class ScalarToScalarGroupLifter; template <typename Accumulator, typename... ParentTs, typename... ChildTs> class ScalarToScalarGroupLifter<Accumulator, meta::type_list<ParentTs...>, meta::type_list<ChildTs...>> { public: template <typename... Ts> std::conditional_t<Accumulator::IsAggregator(), typename Accumulator::result_type, wrap_with_optional_t<typename Accumulator::result_type>> operator()(EvaluationContext* ctx, const ParentTs&... p_args, const wrap_with_optional_t<ChildTs>&... c_args, const ScalarToScalarEdge&, const Ts&... init_args) const { auto accumulator_or_status = CreateAccumulator<Accumulator>(init_args...); if (!accumulator_or_status.ok()) { ctx->set_status(std::move(accumulator_or_status).status()); return typename Accumulator::result_type(); } Accumulator& accumulator = *accumulator_or_status; accumulator.Reset(p_args...); bool child_args_present = (is_present_or_not_required<ChildTs>(c_args) && ... && true); if (child_args_present) { accumulator.Add(value<ChildTs>(c_args)...); } if constexpr (Accumulator::IsFull()) { accumulator.FinalizeFullGroup(); } ctx->set_status(accumulator.GetStatus()); if (Accumulator::IsAggregator() || child_args_present) { return typename Accumulator::result_type(accumulator.GetResult()); } else { return {}; } } private: template <typename T> bool is_present_or_not_required(const wrap_with_optional_t<T>& arg) const { if constexpr (is_optional_v<T>) { return true; } else { return arg.present; } } template <typename T> T value(const wrap_with_optional_t<T>& arg) const { if constexpr (is_optional_v<T>) { return arg; } else { return arg.value; } } }; } #endif
#include <cstdint> #include "gmock/gmock.h" #include "gtest/gtest.h" #include "absl/status/status_matchers.h" #include "arolla/memory/optional_value.h" #include "arolla/qexpr/operators.h" #include "arolla/qtype/array_like/array_like_qtype.h" #include "arolla/qtype/base_types.h" namespace arolla { namespace { using ::absl_testing::IsOkAndHolds; TEST(ScalarToScalarGroupLifterTest, AggSum) { EXPECT_THAT(InvokeOperator<OptionalValue<int>>( "test.agg_sum", OptionalValue<int>(5), ScalarToScalarEdge()), IsOkAndHolds(OptionalValue<int>(5))); } TEST(ScalarToScalarGroupLifterTest, Average) { EXPECT_THAT(InvokeOperator<float>("test.average", OptionalValue<float>(5.0f), ScalarToScalarEdge()), IsOkAndHolds(5.0f)); } TEST(ScalarToScalarGroupLifterTest, RankValues) { EXPECT_THAT( InvokeOperator<OptionalValue<int64_t>>( "test.rank_values", OptionalValue<int>(5), ScalarToScalarEdge()), IsOkAndHolds(OptionalValue<int64_t>(0))); } } }
https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qexpr/lift_accumulator_to_scalar_operator.h
https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qexpr/lift_accumulator_to_scalar_operator_test.cc
1ca990dbeca224035efdabffecc7f3738df6b52c
3867fb7c-4631-4b9d-84a9-d1c6629febeb
cpp
tensorflow/tensorflow
rewrite_utils
tensorflow/core/data/rewrite_utils.cc
tensorflow/core/data/rewrite_utils_test.cc
#include "tensorflow/core/data/rewrite_utils.h" #include "tensorflow/core/platform/refcount.h" #if !defined(IS_MOBILE_PLATFORM) #include <algorithm> #include <functional> #include <map> #include <memory> #include <string> #include <unordered_map> #include <utility> #include <vector> #include "absl/container/flat_hash_set.h" #include "absl/strings/str_cat.h" #include "absl/strings/substitute.h" #include "tensorflow/core/common_runtime/graph_constructor.h" #include "tensorflow/core/common_runtime/graph_runner.h" #include "tensorflow/core/common_runtime/process_function_library_runtime.h" #include "tensorflow/core/data/dataset_utils.h" #include "tensorflow/core/data/hash_utils.h" #include "tensorflow/core/data/serialization_utils.h" #include "tensorflow/core/framework/dataset.h" #include "tensorflow/core/framework/function.h" #include "tensorflow/core/framework/function.pb.h" #include "tensorflow/core/framework/graph.pb.h" #include "tensorflow/core/framework/metrics.h" #include "tensorflow/core/framework/node_def.pb.h" #include "tensorflow/core/framework/op.h" #include "tensorflow/core/framework/op_def_util.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/graph/graph.h" #include "tensorflow/core/graph/graph_def_builder.h" #include "tensorflow/core/grappler/clusters/virtual_cluster.h" #include "tensorflow/core/grappler/grappler_item.h" #include "tensorflow/core/grappler/grappler_item_builder.h" #include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h" #include "tensorflow/core/grappler/optimizers/data/function_utils.h" #include "tensorflow/core/grappler/optimizers/data/graph_utils.h" #include "tensorflow/core/grappler/optimizers/meta_optimizer.h" #include "tensorflow/core/lib/hash/hash.h" #include "tensorflow/core/lib/strings/proto_serialization.h" #include "tensorflow/core/platform/errors.h" #include "tensorflow/core/platform/status.h" #include "tensorflow/core/platform/statusor.h" #include "tensorflow/core/platform/tstring.h" #include "tensorflow/core/protobuf/config.pb.h" #include "tensorflow/core/protobuf/device_properties.pb.h" #include "tensorflow/core/protobuf/meta_graph.pb.h" #include "tensorflow/core/protobuf/rewriter_config.pb.h" namespace tensorflow { namespace data { namespace { constexpr char kOptimizerName[] = "tf_data_meta_optimizer"; constexpr char kOptimizers[] = "optimizers"; constexpr char kOptimizerConfigs[] = "optimizer_configs"; void AddFakeSinks(FunctionDef* function_def) { int counter = 0; for (const auto& output : function_def->signature().output_arg()) { NodeDef* node = function_def->add_node_def(); tensorflow::grappler::function_utils::SetUniqueFunctionNodeName( strings::StrCat("FakeSink", counter++), function_def, node); node->set_op("Identity"); node->add_input(function_def->ret().at(output.name())); (*node->mutable_attr())["T"].set_type(output.type()); (*function_def->mutable_ret())[output.name()] = strings::StrCat(node->name(), ":output:0"); } } void RemoveFakeSinks(FunctionDef* function_def) { std::map<std::string, std::string> identity_map; for (const auto& node : function_def->node_def()) { if (node.op() == "Identity" && node.input_size() == 1) { identity_map[node.name()] = node.input(0); } } for (const auto& output_arg : function_def->signature().output_arg()) { const std::string& tensor = function_def->ret().at(output_arg.name()); const std::string& output_node = tensor.substr(0, tensor.find(':')); if (identity_map.find(output_node) != identity_map.end()) { (*function_def->mutable_ret())[output_arg.name()] = identity_map.at(output_node); } } } Status ApplyRewrites(OpKernelContext* ctx, const std::function<RewriterConfig(void)> config_factory, GraphDef* graph_def, string* dataset_node) { std::unique_ptr<tensorflow::grappler::GrapplerItem> grappler_item = GetGrapplerItem(graph_def, dataset_node, true); std::unordered_map<std::string, tensorflow::DeviceProperties> device_map; tensorflow::grappler::VirtualCluster cluster(device_map); tensorflow::ConfigProto config; *config.mutable_graph_options()->mutable_rewrite_options() = config_factory(); TF_RETURN_IF_ERROR(tensorflow::grappler::RunMetaOptimizer( std::move(*grappler_item), config, ctx->device(), &cluster, graph_def)); for (auto& function_def : *graph_def->mutable_library()->mutable_function()) { RemoveFakeSinks(&function_def); } return absl::OkStatus(); } } RewriterConfig CreateRewriterConfig( const absl::flat_hash_set<tstring>& optimizations, const absl::flat_hash_set<tstring>& optimizations_configs) { RewriterConfig rewriter_config; rewriter_config.add_optimizers(kOptimizerName); rewriter_config.set_meta_optimizer_iterations(RewriterConfig::ONE); rewriter_config.set_fail_on_optimizer_errors(true); auto custom_optimizer = rewriter_config.add_custom_optimizers(); custom_optimizer->set_name(kOptimizerName); auto* custom_optimizations_list = (*custom_optimizer->mutable_parameter_map())[kOptimizers].mutable_list(); const auto& registered_optimizers = grappler::CustomGraphOptimizerRegistry::GetRegisteredOptimizers(); for (const auto& optimization : optimizations) { if (std::find(registered_optimizers.begin(), registered_optimizers.end(), optimization) != registered_optimizers.end()) { custom_optimizations_list->add_s(optimization.data(), optimization.size()); } else { VLOG(1) << "Optimization " << optimization << " is not registered."; } } auto* config_list = (*custom_optimizer->mutable_parameter_map())[kOptimizerConfigs] .mutable_list(); for (const auto& config : optimizations_configs) { config_list->add_s(config.data(), config.size()); } return rewriter_config; } Status RewriteDataset(OpKernelContext* ctx, const DatasetBase* input, std::function<RewriterConfig(void)> config_factory, bool record_fingerprint, core::RefCountPtr<DatasetBase>* rewritten_input) { std::vector<std::pair<string, Tensor>> input_list; GraphDef graph_def; string output_node; TF_RETURN_IF_ERROR( AsGraphDefForRewrite(ctx, input, &input_list, &graph_def, &output_node)); VLOG(3) << "Before graph rewrites: " << graph_def.DebugString(); TF_RETURN_IF_ERROR( ApplyRewrites(ctx, config_factory, &graph_def, &output_node)); VLOG(3) << "After graph rewrites: " << graph_def.DebugString(); FunctionLibraryRuntime* flr = nullptr; std::unique_ptr<ProcessFunctionLibraryRuntime> pflr = nullptr; std::unique_ptr<FunctionLibraryDefinition> lib_def = nullptr; TF_RETURN_IF_ERROR( ctx->function_library()->Clone(&lib_def, &pflr, &flr, true)); TF_RETURN_IF_ERROR(AddToFunctionLibrary(lib_def.get(), graph_def.library())); Graph graph(OpRegistry::Global()); TF_RETURN_IF_ERROR(ImportGraphDef({}, graph_def, &graph, nullptr)); std::vector<Tensor> outputs; GraphRunner graph_runner(flr->device()); TF_RETURN_IF_ERROR( graph_runner.Run(&graph, flr, input_list, {output_node}, &outputs)); DatasetBase* rewritten_dataset; TF_RETURN_IF_ERROR( GetDatasetFromVariantTensor(outputs[0], &rewritten_dataset)); rewritten_dataset->Ref(); rewritten_input->reset(rewritten_dataset); if (record_fingerprint) { (*ctx->runner())([graph_def = std::move(graph_def), lib_def = lib_def.release(), input_list = std::move(input_list), output_node = std::move(output_node)]() { std::unique_ptr<FunctionLibraryDefinition> lib_def_owner(lib_def); const NodeDef* node_def = nullptr; for (const auto& node : graph_def.node()) { if (node.name() == output_node) { node_def = &node; break; } } if (node_def == nullptr) { VLOG(3) << "Failed to find node: " << output_node; return; } uint64 hash = 0; Status s = HashNode(graph_def, *node_def, *lib_def, &hash); if (!s.ok()) { VLOG(3) << "Failed to hash graph: " << s; return; } for (const auto& pair : input_list) { hash = Hash64CombineUnordered(hash, Hash64(pair.first)); uint64 tensor_hash = 0; Status s = HashTensor(pair.second, &tensor_hash); if (s.ok()) { hash = Hash64CombineUnordered(hash, tensor_hash); } else { VLOG(3) << "Failed to hash tensor: " << s; } } string graph_hash = strings::StrCat(strings::Hex(hash, strings::kZeroPad16)); metrics::RecordTFDataFingerprint(graph_hash); }); } return absl::OkStatus(); } std::unique_ptr<tensorflow::grappler::GrapplerItem> GetGrapplerItem( GraphDef* graph_def, std::string* dataset_node, bool add_fake_sinks, bool apply_optimizations) { NodeDef* node = graph_def->mutable_node()->Add(); tensorflow::grappler::graph_utils::SetUniqueGraphNodeName("Sink", graph_def, node); node->set_op("Identity"); node->add_input(*dataset_node); (*node->mutable_attr())["T"].set_type(DT_VARIANT); *dataset_node = node->name(); if (add_fake_sinks) { for (auto& function_def : *graph_def->mutable_library()->mutable_function()) { AddFakeSinks(&function_def); } } MetaGraphDef meta_graph_def; (*meta_graph_def.mutable_graph_def()) = *graph_def; CollectionDef collection_def; auto node_list = collection_def.mutable_node_list(); node_list->add_value(*dataset_node); (*meta_graph_def.mutable_collection_def())["train_op"] = collection_def; tensorflow::grappler::ItemConfig item_config; item_config.apply_optimizations = apply_optimizations; std::unique_ptr<tensorflow::grappler::GrapplerItem> grappler_item = tensorflow::grappler::GrapplerItemFromMetaGraphDef( "graph", meta_graph_def, item_config); grappler_item->optimization_options().optimize_function_library = false; return grappler_item; } absl::flat_hash_set<tstring> SelectOptimizations( const absl::flat_hash_set<string>& experiments, const absl::flat_hash_set<tstring>& optimizations_enabled, const absl::flat_hash_set<tstring>& optimizations_disabled, const absl::flat_hash_set<tstring>& optimizations_default) { absl::flat_hash_set<tstring> optimizations; optimizations.insert(optimizations_enabled.begin(), optimizations_enabled.end()); for (const auto& optimization : optimizations_default) { if (!optimizations_disabled.contains(optimization)) { optimizations.insert(optimization); } } const auto& registered_optimizers = grappler::CustomGraphOptimizerRegistry::GetRegisteredOptimizers(); for (const auto& experiment : experiments) { if (std::find(registered_optimizers.begin(), registered_optimizers.end(), experiment) != registered_optimizers.end() && !optimizations_disabled.contains(experiment)) { optimizations.insert(experiment); } } return optimizations; } absl::StatusOr<std::string> GetDatasetNode(const GraphDef& graph_def) { for (const auto& node : graph_def.node()) { if (node.op() == kRetvalOp) { return node.input(0); } } return errors::NotFound( absl::Substitute("Dataset node for graph is not found:\n$0", graph_def.ShortDebugString())); } absl::StatusOr<NodeDef> GetDatasetNodeDef(const GraphDef& graph_def) { TF_ASSIGN_OR_RETURN(std::string dataset_node_name, GetDatasetNode(graph_def)); for (const auto& node : graph_def.node()) { if (node.name() == dataset_node_name) { return node; } } return errors::NotFound( absl::Substitute("Dataset node for graph is not found:\n$0", graph_def.ShortDebugString())); } } } #endif
#include "tensorflow/core/data/rewrite_utils.h" #include <memory> #include <string> #include <vector> #include "absl/strings/string_view.h" #include "tensorflow/core/framework/function.h" #include "tensorflow/core/framework/function.pb.h" #include "tensorflow/core/framework/function_testlib.h" #include "tensorflow/core/framework/graph.pb.h" #include "tensorflow/core/framework/node_def.pb.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/grappler/grappler_item.h" #include "tensorflow/core/lib/gtl/array_slice.h" #include "tensorflow/core/platform/statusor.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/types.h" namespace tensorflow { namespace data { namespace { using ::tensorflow::test::AsScalar; using ::tensorflow::test::function::GDef; using ::tensorflow::test::function::NDef; using ::testing::ElementsAre; NodeDef GetMapNode(absl::string_view name, absl::string_view input_node_name, absl::string_view function_name) { return NDef( name, "MapDataset", {std::string(input_node_name)}, {{"f", FunctionDefHelper::FunctionRef(std::string(function_name))}, {"Targuments", {}}, {"output_shapes", absl::Span<const TensorShape>{TensorShape()}}, {"output_types", absl::Span<const DataType>{DT_INT64}}}); } FunctionDef XTimesX() { return FunctionDefHelper::Create( "XTimesX", {"x: int64"}, {"y: int64"}, {}, {{{"y"}, "Mul", {"x", "x"}, {{"T", DT_INT64}}}}, {{"y", "y:z:0"}}); } GraphDef GetRangeSquareDatasetDef(const int64_t range) { return GDef( {NDef("start", "Const", {}, {{"value", AsScalar<int64_t>(0)}, {"dtype", DT_INT64}}), NDef("stop", "Const", {}, {{"value", AsScalar<int64_t>(range)}, {"dtype", DT_INT64}}), NDef("step", "Const", {}, {{"value", AsScalar<int64_t>(1)}, {"dtype", DT_INT64}}), NDef("range", "RangeDataset", {"start", "stop", "step"}, {{"output_shapes", absl::Span<const TensorShape>{TensorShape()}}, {"output_types", absl::Span<const DataType>{DT_INT64}}}), GetMapNode("map", "range", "XTimesX"), NDef("dataset", "_Retval", {"map"}, {{"T", DT_VARIANT}, {"index", 0}})}, {XTimesX()}); } TEST(GraphUtilTest, GetFetchNode) { GraphDef graph = GetRangeSquareDatasetDef(10); TF_ASSERT_OK_AND_ASSIGN(std::string dataset_node, GetDatasetNode(graph)); std::unique_ptr<tensorflow::grappler::GrapplerItem> grappler_item = GetGrapplerItem(&graph, &dataset_node, false); EXPECT_THAT(grappler_item->fetch, ElementsAre("Sink")); } TEST(GraphUtilTest, GetFetchNodeDef) { GraphDef graph = GetRangeSquareDatasetDef(10); TF_ASSERT_OK_AND_ASSIGN(NodeDef dataset_nodedef, GetDatasetNodeDef(graph)); std::string dataset_node = dataset_nodedef.name(); std::unique_ptr<tensorflow::grappler::GrapplerItem> grappler_item = GetGrapplerItem(&graph, &dataset_node, false); EXPECT_THAT(grappler_item->fetch, ElementsAre("Sink")); } struct SelectOptimizationsTestCase { absl::flat_hash_set<string> experiments; absl::flat_hash_set<tstring> optimizations_enabled; absl::flat_hash_set<tstring> optimizations_disabled; absl::flat_hash_set<tstring> optimizations_default; std::vector<string> expected; }; class SelectOptimizationsTest : public ::testing::TestWithParam<SelectOptimizationsTestCase> {}; TEST_P(SelectOptimizationsTest, DatasetUtils) { const SelectOptimizationsTestCase test_case = GetParam(); auto optimizations = SelectOptimizations( test_case.experiments, test_case.optimizations_enabled, test_case.optimizations_disabled, test_case.optimizations_default); EXPECT_THAT(std::vector<string>(optimizations.begin(), optimizations.end()), ::testing::UnorderedElementsAreArray(test_case.expected)); } INSTANTIATE_TEST_SUITE_P( Test, SelectOptimizationsTest, ::testing::Values( SelectOptimizationsTestCase{ {}, {}, {}, {}, {}}, SelectOptimizationsTestCase{ {"map_and_batch_fusion"}, {"bar"}, {}, {"baz"}, {"map_and_batch_fusion", "bar", "baz"}}, SelectOptimizationsTestCase{ {"this_is_not_an_optimization"}, {"bar"}, {}, {"baz"}, {"bar", "baz"}}, SelectOptimizationsTestCase{{}, {"foo"}, {"baz"}, {"bar", "baz"}, {"foo", "bar"}}, SelectOptimizationsTestCase{ {"foo"}, {"bar"}, {"foo"}, {"baz"}, {"bar", "baz"}})); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/rewrite_utils.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/rewrite_utils_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
b3ef8084-b9ff-4365-81fc-8e5630925d83
cpp
tensorflow/tensorflow
attrs_and_constraints
tensorflow/compiler/mlir/quantization/common/attrs_and_constraints.cc
tensorflow/compiler/mlir/quantization/common/attrs_and_constraints_test.cc
#include "tensorflow/compiler/mlir/quantization/common/attrs_and_constraints.h" #include <cstdint> #include <optional> #include "absl/algorithm/container.h" #include "absl/status/status.h" #include "absl/status/statusor.h" #include "llvm/ADT/STLExtras.h" #include "llvm/Support/Debug.h" #include "llvm/Support/MathExtras.h" #include "mlir/IR/Builders.h" #include "mlir/IR/BuiltinAttributes.h" #include "mlir/IR/BuiltinTypeInterfaces.h" #include "mlir/IR/IRMapping.h" #include "mlir/IR/Operation.h" #include "mlir/IR/Types.h" #include "mlir/IR/Value.h" #include "mlir/Support/LLVM.h" #include "mlir/Support/LogicalResult.h" #include "stablehlo/dialect/StablehloOps.h" #include "tensorflow/compiler/mlir/quantization/common/uniform_quantized_types.h" #include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h" #include "tensorflow/compiler/mlir/tensorflow/utils/xla_call_module_attrs.h" namespace mlir::quant { using ::mlir::stablehlo::DotGeneralOp; bool HasStaticShape(Value value) { auto shaped_type = mlir::dyn_cast<ShapedType>(value.getType()); if (!shaped_type) return false; return shaped_type.hasStaticShape(); } bool HasStaticShapeAtDims(Value value, const ArrayRef<int> dims) { auto shaped_type = mlir::dyn_cast<ShapedType>(value.getType()); if (!shaped_type || !shaped_type.hasRank()) return false; for (auto dim : dims) { if (shaped_type.isDynamicDim(dim)) return false; } return true; } Type CloneTypeWithNewElementType(Type old_type, Type element_type) { if (!mlir::isa<ShapedType>(old_type)) return {}; return mlir::cast<ShapedType>(old_type).clone(element_type); } SmallVector<Value> CloneOpWithReplacedOperands( OpBuilder& builder, Operation* op, const ArrayRef<Value> new_operands) { IRMapping mapping; for (const auto& arg : enumerate(new_operands)) { mapping.map(op->getOperand(arg.index()), arg.value()); } return builder.clone(*op, mapping)->getResults(); } FailureOr<int32_t> CastI64ToI32(const int64_t value) { if (!llvm::isInt<32>(value)) { DEBUG_WITH_TYPE( "mlir-quant-attrs-and-constraints", llvm::dbgs() << "Tried to cast " << value << "from int64 to int32, but lies out of range of int32.\n"); return failure(); } return static_cast<int32_t>(value); } FailureOr<SmallVector<int32_t>> CastI64ArrayToI32( const ArrayRef<int64_t> int64_array) { SmallVector<int32_t> int32_array{}; int32_array.reserve(int64_array.size()); for (const int64_t i64 : int64_array) { FailureOr<int32_t> cast_i32 = CastI64ToI32(i64); if (failed(cast_i32)) return failure(); int32_array.push_back(*cast_i32); } return int32_array; } StringRef GetEntryFunctionName(TF::XlaCallModuleOp op) { if (!op->hasAttrOfType<FlatSymbolRefAttr>( TF::kStablehloEntryFunctionAttrName)) { return StringRef(); } return op ->getAttrOfType<FlatSymbolRefAttr>(TF::kStablehloEntryFunctionAttrName) .getValue(); } bool IsHybridQuantizedOp(Operation* op) { if ((op->getNumOperands() != 2 && op->getNumOperands() != 3) || op->getResultTypes().size() != 1) { return false; } Type lhs_type = op->getOperand(0).getType(); Type rhs_type = op->getOperand(1).getType(); Type result_type = op->getResult(0).getType(); return !IsQuantizedTensorType(lhs_type) && IsQuantizedTensorType(rhs_type) && !IsQuantizedTensorType(result_type); } absl::StatusOr<bool> IsDotGeneralFullyConnected(DotGeneralOp dot_general_op) { if (dot_general_op == nullptr) return absl::InvalidArgumentError( "Given dot_general op cannot be null when checking " "`IsDotGeneralBatchMatmul`."); const ::mlir::stablehlo::DotDimensionNumbersAttr dot_dimension_numbers = dot_general_op.getDotDimensionNumbers(); const ArrayRef<int64_t> lhs_contracting_dims = dot_dimension_numbers.getLhsContractingDimensions(); const ArrayRef<int64_t> rhs_contracting_dims = dot_dimension_numbers.getRhsContractingDimensions(); const int64_t input_rank = mlir::dyn_cast<ShapedType>(dot_general_op.getOperand(0).getType()) .getRank(); const int64_t filter_rank = mlir::dyn_cast<ShapedType>(dot_general_op.getOperand(1).getType()) .getRank(); const bool has_proper_rank = (input_rank == 1 || input_rank == 2) && filter_rank == 2; const bool has_proper_contracting_dim = lhs_contracting_dims.size() == 1 && rhs_contracting_dims.size() == 1 && lhs_contracting_dims[0] == input_rank - 1; const bool is_not_batch_op = dot_dimension_numbers.getLhsBatchingDimensions().empty(); const bool has_proper_quantization_dimension = absl::c_find(rhs_contracting_dims, filter_rank) == rhs_contracting_dims.end(); return has_proper_rank && has_proper_contracting_dim && is_not_batch_op && has_proper_quantization_dimension; } std::optional<int64_t> GetDotGeneralQuantizationDim( DotGeneralOp dot_general_op) { if (dot_general_op == nullptr) return std::nullopt; const int64_t filter_rank = mlir::dyn_cast<ShapedType>(dot_general_op.getOperand(1).getType()) .getRank(); const bool is_per_axis_quantizable = IsDotGeneralFullyConnected(dot_general_op).value(); if (!is_per_axis_quantizable) return std::nullopt; return filter_rank - 1; } bool ContainsConvOrDot(StringRef str) { return str.contains("_conv") || str.contains("_dot_general"); } }
#include "tensorflow/compiler/mlir/quantization/common/attrs_and_constraints.h" #include <cstdint> #include <optional> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/status/status.h" #include "absl/strings/string_view.h" #include "llvm/Support/MathExtras.h" #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/IR/BuiltinAttributes.h" #include "mlir/IR/BuiltinOps.h" #include "mlir/IR/OwningOpRef.h" #include "mlir/IR/Value.h" #include "mlir/Support/LLVM.h" #include "mlir/Support/LogicalResult.h" #include "stablehlo/dialect/StablehloOps.h" #include "tensorflow/compiler/mlir/quantization/common/func.h" #include "tensorflow/compiler/mlir/quantization/common/test_base.h" #include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h" #include "tsl/platform/status_matchers.h" namespace mlir::quant { namespace { using ::mlir::stablehlo::AddOp; using ::mlir::stablehlo::ConstantOp; using ::mlir::stablehlo::ConvolutionOp; using ::mlir::stablehlo::DotGeneralOp; using ::mlir::stablehlo::SubtractOp; using ::testing::ElementsAreArray; using ::testing::Eq; using ::testing::IsEmpty; using ::testing::IsNull; using ::testing::NotNull; using ::testing::Optional; using ::tsl::testing::StatusIs; using AttrsAndConstraintsTest = ::mlir::quant::QuantizationTestBase; constexpr absl::string_view kModuleStatic = R"mlir( module { func.func @main(%arg0: tensor<1x1024xf32>, %arg1: tensor<1024x3xf32>) -> tensor<1x3xf32> attributes {_from_xla_call_module} { %0 = stablehlo.dot_general %arg0, %arg1, contracting_dims = [1] x [0], precision = [] : (tensor<1x1024xf32>, tensor<1024x3xf32>) -> tensor<1x3xf32> return %0 : tensor<1x3xf32> } } )mlir"; constexpr absl::string_view kModuleDynamic = R"mlir( module { func.func @main(%arg0: tensor<?x1024xf32>, %arg1: tensor<1024x3xf32>) -> tensor<?x3xf32> attributes {_from_xla_call_module} { %0 = stablehlo.dot_general %arg0, %arg1, contracting_dims = [1] x [0], precision = [] : (tensor<?x1024xf32>, tensor<1024x3xf32>) -> tensor<?x3xf32> return %0 : tensor<?x3xf32> } } )mlir"; constexpr absl::string_view kModuleMultipleUses = R"mlir( module { func.func @main(%arg0: tensor<1x1024xf32>, %arg1: tensor<1024x3xf32>) -> tensor<1x3xf32> attributes {_from_xla_call_module} { %cst = stablehlo.constant dense<1.0> : tensor<1x3xf32> %0 = stablehlo.dot_general %arg0, %arg1, contracting_dims = [1] x [0], precision = [] : (tensor<1x1024xf32>, tensor<1024x3xf32>) -> tensor<1x3xf32> %1 = stablehlo.subtract %cst, %0 : tensor<1x3xf32> %2 = stablehlo.add %0, %cst : tensor<1x3xf32> return %2 : tensor<1x3xf32> } } )mlir"; constexpr absl::string_view kModuleXlaCallModule = R"mlir( module { func.func @main(%arg0: tensor<?x2xf32> {tf_saved_model.index_path = ["input_tensor"]}) -> (tensor<?x2xf32>) { %0 = stablehlo.constant dense<[-0.211145893, -0.708605706]> : tensor<2xf32> %1 = stablehlo.constant dense<[[-0.630731344, 0.54962182], [0.180364341, -0.764542698]]> : tensor<2x2xf32> %2 = "tf.XlaCallModule"(%arg0, %1, %0) <{Sout = [#tf_type.shape<?x2>], module = "", version = 9 : i64}> {_entry_function = @composite_fn_1, _original_entry_function = "composite_fn_1", _tfl_quant_trait = "fully_quantizable"} : (tensor<?x2xf32>, tensor<2x2xf32>, tensor<2xf32>) -> tensor<?x2xf32> return %2 : tensor<?x2xf32> } func.func private @composite_fn_1(%arg0: tensor<?x2xf32>, %arg1: tensor<2x2xf32>, %arg2: tensor<2xf32>) -> tensor<?x2xf32> attributes {_from_xla_call_module, tf_quant.composite_function} { return %arg0 : tensor<?x2xf32> } } )mlir"; constexpr absl::string_view kModuleDotWeightOnlyPtq = R"mlir( module { func.func @main(%arg0: tensor<?x2xf32> {tf_saved_model.index_path = ["input_tensor"]}) -> (tensor<?x2xf32>) { %0 = stablehlo.constant dense<[-0.211145893, -0.708605706]> : tensor<2xf32> %1 = stablehlo.constant dense<[[-0.630731344, 0.54962182], [0.180364341, -0.764542698]]> : tensor<2x2xf32> %2 = "tf.XlaCallModule"(%arg0, %1, %0) <{Sout = [#tf_type.shape<?x2>], module = "", version = 9 : i64}> {_entry_function = @composite_dot_general_fn_1, _original_entry_function = "composite_dot_general_fn_1", _tfl_quant_trait = "fully_quantizable", _quantization_method = "weight_only_ptq { }"} : (tensor<?x2xf32>, tensor<2x2xf32>, tensor<2xf32>) -> tensor<?x2xf32> return %2 : tensor<?x2xf32> } func.func private @composite_dot_general_fn_1(%arg0: tensor<?x2xf32>, %arg1: tensor<2x2xf32>, %arg2: tensor<2xf32>) -> tensor<?x2xf32> attributes {_from_xla_call_module, tf_quant.composite_function} { %0 = stablehlo.dot_general %arg0, %arg1, contracting_dims = [1] x [0] : (tensor<?x2xf32>, tensor<2x2xf32>) -> tensor<?x2xf32> return %0 : tensor<?x2xf32> } } )mlir"; constexpr absl::string_view kModuleXlaCallModuleNoEntryNoQuantTrait = R"mlir( module { func.func @main(%arg0: tensor<?x2xf32> {tf_saved_model.index_path = ["input_tensor"]}) -> (tensor<?x2xf32>) { %0 = stablehlo.constant dense<[-0.211145893, -0.708605706]> : tensor<2xf32> %1 = stablehlo.constant dense<[[-0.630731344, 0.54962182], [0.180364341, -0.764542698]]> : tensor<2x2xf32> %2 = "tf.XlaCallModule"(%arg0, %1, %0) <{Sout = [#tf_type.shape<?x2>], module = "", version = 9 : i64}> {_original_entry_function = "composite_fn_1"} : (tensor<?x2xf32>, tensor<2x2xf32>, tensor<2xf32>) -> tensor<?x2xf32> return %2 : tensor<?x2xf32> } func.func private @composite_fn_1(%arg0: tensor<?x2xf32>, %arg1: tensor<2x2xf32>, %arg2: tensor<2xf32>) -> tensor<?x2xf32> attributes {_from_xla_call_module, tf_quant.composite_function} { return %arg0 : tensor<?x2xf32> } } )mlir"; constexpr absl::string_view kModulePartitionedCall = R"mlir( module { func.func @main(%arg0: tensor<2x2xf32> {tf_saved_model.index_path = ["input_tensor"]}) -> (tensor<2x2xf32>) { %cst = "tf.Const"() {device = "", value = dense<[[-0.630731344, 0.54962182], [0.180364341, -0.764542698]]> : tensor<2x2xf32>} : () -> tensor<2x2xf32> %0 = "tf.PartitionedCall"(%arg0, %cst) {_tfl_quant_trait = "fully_quantizable", config = "", config_proto = "", executor_type = "", f = @composite_fn_1} : (tensor<2x2xf32>, tensor<2x2xf32>) -> tensor<2x2xf32> loc(callsite("test@main"("MatMul") at "QuantizationUnit(\12\06MatMul\1a\07main)")) return %0 : tensor<2x2xf32> } func.func private @composite_fn_1(%arg0: tensor<2x2xf32>, %arg1: tensor<2x2xf32>) -> tensor<2x2xf32> attributes {tf_quant.composite_function} { %0 = "tf.MatMul"(%arg0, %arg1) {attr_map = "0:transpose_a,1:transpose_b", device = "", transpose_a = false, transpose_b = false} : (tensor<2x2xf32>, tensor<2x2xf32>) -> tensor<2x2xf32> return %0 : tensor<2x2xf32> } } )mlir"; constexpr absl::string_view kModuleHybridQuantized = R"mlir( module { func.func @main(%arg0: tensor<1x2xf32>, %arg1: tensor<2x3x!quant.uniform<i8:f32, 6.000000e-03:0>> {tf_saved_model.index_path = ["input_tensor"]}) -> (tensor<1x3xf32>) { %0 = stablehlo.dot_general %arg0, %arg1, contracting_dims = [1] x [0] : (tensor<1x2xf32>, tensor<2x3x!quant.uniform<i8:f32, 6.000000e-03:0>>) -> tensor<1x3xf32> return %0 : tensor<1x3xf32> } } )mlir"; TEST_F(AttrsAndConstraintsTest, HasStaticShapeSucceedsWithStaticShapes) { OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kModuleStatic); ASSERT_TRUE(module_op); func::FuncOp main_fn = FindMainFuncOp(*module_op); ASSERT_THAT(main_fn, NotNull()); Value dot_general_result = FindOperationOfType<DotGeneralOp>(main_fn)->getResult(0); EXPECT_TRUE(HasStaticShape(dot_general_result)); EXPECT_TRUE(HasStaticShapeAtDims(dot_general_result, {0})); EXPECT_TRUE(HasStaticShapeAtDims(dot_general_result, {1})); } TEST_F(AttrsAndConstraintsTest, HasStaticShapeFailsWithDynamicShapes) { OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kModuleDynamic); ASSERT_TRUE(module_op); func::FuncOp main_fn = FindMainFuncOp(*module_op); ASSERT_THAT(main_fn, NotNull()); Value dot_general_result = FindOperationOfType<DotGeneralOp>(main_fn)->getResult(0); EXPECT_FALSE(HasStaticShape(dot_general_result)); EXPECT_FALSE(HasStaticShapeAtDims(dot_general_result, {0})); EXPECT_TRUE(HasStaticShapeAtDims(dot_general_result, {1})); } TEST_F(AttrsAndConstraintsTest, HasRankOfReturnsTrueForMatchingRank) { constexpr absl::string_view kConstantOpWithRankFour = R"mlir(%0 = stablehlo.constant dense<0> : tensor<1x1x1x1xi8>)mlir"; OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kConstantOpWithRankFour); ASSERT_TRUE(module_op); ASSERT_FALSE(module_op->getBodyRegion().empty()); ASSERT_FALSE(module_op->getBodyRegion().front().empty()); auto constant_op = dyn_cast_or_null<mlir::stablehlo::ConstantOp>( module_op->getBodyRegion().front().front()); ASSERT_THAT(constant_op, NotNull()); EXPECT_TRUE(HasRankOf(constant_op, 4)); } TEST_F(AttrsAndConstraintsTest, HasRankOfReturnsFalseForNonMatchingRank) { constexpr absl::string_view kConstantOpWithRankFour = R"mlir(%0 = stablehlo.constant dense<0> : tensor<1x1x1x1xi8>)mlir"; OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kConstantOpWithRankFour); ASSERT_TRUE(module_op); ASSERT_FALSE(module_op->getBodyRegion().empty()); ASSERT_FALSE(module_op->getBodyRegion().front().empty()); auto constant_op = dyn_cast_or_null<mlir::stablehlo::ConstantOp>( module_op->getBodyRegion().front().front()); ASSERT_THAT(constant_op, NotNull()); EXPECT_FALSE(HasRankOf(constant_op, 3)); } TEST_F(AttrsAndConstraintsTest, HasRankOfReturnsTrueForMatchingRankWithUnknownDimensions) { constexpr absl::string_view kArgumentWithUnknownDims = R"mlir( func.func @unknown_dims_arg(%arg: tensor<?x?xi8>) -> tensor<?x?xi8> { return %arg : tensor<?x?xi8> } )mlir"; OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kArgumentWithUnknownDims); ASSERT_TRUE(module_op); auto func_op = module_op->lookupSymbol<func::FuncOp>("unknown_dims_arg"); ASSERT_THAT(func_op, NotNull()); ASSERT_THAT(func_op.getNumArguments(), Eq(1)); EXPECT_TRUE(HasRankOf(func_op.getArgument(0), 2)); } TEST_F(AttrsAndConstraintsTest, HasRankOfReturnsFalseForUnknownRank) { constexpr absl::string_view kArgumentWithUnknownRank = R"mlir( func.func @unknown_rank_arg(%arg: tensor<*xi8>) -> tensor<*xi8> { return %arg : tensor<*xi8> } )mlir"; OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kArgumentWithUnknownRank); ASSERT_TRUE(module_op); auto func_op = module_op->lookupSymbol<func::FuncOp>("unknown_rank_arg"); ASSERT_THAT(func_op, NotNull()); ASSERT_THAT(func_op.getNumArguments(), Eq(1)); EXPECT_FALSE(HasRankOf(func_op.getArgument(0), 1)); } TEST_F(AttrsAndConstraintsTest, TryCastSucceeds) { OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kModuleStatic); ASSERT_TRUE(module_op); func::FuncOp main_fn = FindMainFuncOp(*module_op); ASSERT_THAT(main_fn, NotNull()); auto dot_general_op = FindOperationOfType<DotGeneralOp>(main_fn); ASSERT_THAT(dot_general_op, NotNull()); EXPECT_TRUE(succeeded( TryCast<DotGeneralOp>(dot_general_op, "dot_general_op"))); } TEST_F(AttrsAndConstraintsTest, TryCastFailsOnWrongType) { OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kModuleStatic); ASSERT_TRUE(module_op); func::FuncOp main_fn = FindMainFuncOp(*module_op); ASSERT_THAT(main_fn, NotNull()); auto dot_general_op = FindOperationOfType<DotGeneralOp>(main_fn); ASSERT_THAT(dot_general_op, NotNull()); EXPECT_TRUE( failed(TryCast<AddOp>(dot_general_op, "dot_general_op"))); } TEST_F(AttrsAndConstraintsTest, TryCastFailsOnNullPtr) { OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kModuleStatic); func::FuncOp main_fn = FindMainFuncOp(*module_op); ASSERT_THAT(main_fn, NotNull()); auto op_nullptr = FindOperationOfType<DotGeneralOp>(main_fn)->getNextNode()->getNextNode(); EXPECT_THAT(op_nullptr, IsNull()); EXPECT_TRUE(failed(TryCast<DotGeneralOp>(op_nullptr, "op_nullptr"))); EXPECT_TRUE(failed(TryCast<DotGeneralOp>(nullptr, "nullptr"))); } TEST_F(AttrsAndConstraintsTest, I64ValueInI32RangeAreCastedCorrectly) { EXPECT_TRUE(succeeded(CastI64ToI32(llvm::minIntN(32)))); EXPECT_TRUE(succeeded(CastI64ToI32(llvm::maxIntN(32)))); } TEST_F(AttrsAndConstraintsTest, CastingFailsForI64ValueOutOfI32Range) { EXPECT_TRUE(failed(CastI64ToI32(llvm::minIntN(32) - 10))); EXPECT_TRUE(failed(CastI64ToI32(llvm::maxIntN(32) + 10))); } TEST_F(AttrsAndConstraintsTest, I64ArrayInI32RangeAreCastedCorrectly) { const SmallVector<int64_t> array_i64 = {llvm::minIntN(32), -2, -1, 0, 1, 2, llvm::maxIntN(32)}; FailureOr<SmallVector<int32_t>> array_i32 = CastI64ArrayToI32(array_i64); EXPECT_TRUE(succeeded(array_i32)); EXPECT_THAT( *array_i32, ElementsAreArray({static_cast<int32_t>(llvm::minIntN(32)), -2, -1, 0, 1, 2, static_cast<int32_t>(llvm::maxIntN(32))})); } TEST_F(AttrsAndConstraintsTest, CastingFailsForI64ArrayUnderI32Range) { const int64_t under_min_i32 = -2147483658; ArrayRef<int64_t> array_i64{under_min_i32}; EXPECT_EQ(under_min_i32, llvm::minIntN(32) - 10); EXPECT_TRUE(failed(CastI64ArrayToI32(array_i64))); } TEST_F(AttrsAndConstraintsTest, CastingFailsForI64ArrayAboveI32Range) { const int64_t below_max_i32 = 2147483657; ArrayRef<int64_t> array_i64{below_max_i32}; EXPECT_EQ(below_max_i32, llvm::maxIntN(32) + 10); EXPECT_TRUE(failed(CastI64ArrayToI32(array_i64))); } TEST_F(AttrsAndConstraintsTest, FindUserOfDifferentTypes) { OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kModuleMultipleUses); ASSERT_TRUE(module_op); func::FuncOp main_fn = FindMainFuncOp(*module_op); ASSERT_THAT(main_fn, NotNull()); auto dot_general_op = FindOperationOfType<DotGeneralOp>(main_fn); ASSERT_THAT(dot_general_op, NotNull()); EXPECT_THAT(FindUserOfType<AddOp>(dot_general_op), NotNull()); EXPECT_THAT(FindUserOfType<SubtractOp>(dot_general_op), NotNull()); EXPECT_THAT(FindUserOfType<>(dot_general_op), NotNull()); EXPECT_THAT(FindUserOfType<ConvolutionOp>(dot_general_op), IsNull()); } TEST_F(AttrsAndConstraintsTest, FindOperandOfDifferentTypes) { OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kModuleMultipleUses); ASSERT_TRUE(module_op); func::FuncOp main_fn = FindMainFuncOp(*module_op); ASSERT_THAT(main_fn, NotNull()); auto subtract_op = FindOperationOfType<SubtractOp>(main_fn); ASSERT_THAT(subtract_op, NotNull()); EXPECT_THAT(FindOperandOfType<DotGeneralOp>(subtract_op), NotNull()); EXPECT_THAT(FindOperandOfType<ConstantOp>(subtract_op), NotNull()); EXPECT_THAT(FindOperandOfType<>(subtract_op), NotNull()); EXPECT_THAT(FindOperandOfType<AddOp>(subtract_op), IsNull()); } TEST_F(AttrsAndConstraintsTest, XlaCallModuleOpGetFuncAttr) { OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kModuleXlaCallModule); ASSERT_TRUE(module_op); func::FuncOp main_fn = FindMainFuncOp(*module_op); ASSERT_THAT(main_fn, NotNull()); auto xla_call_module_op = FindOperationOfType<TF::XlaCallModuleOp>(main_fn); ASSERT_THAT(xla_call_module_op, NotNull()); FlatSymbolRefAttr xla_call_op_attr = GetFuncAttr(xla_call_module_op); EXPECT_EQ(xla_call_op_attr.getValue(), "composite_fn_1"); } TEST_F(AttrsAndConstraintsTest, PartitionedCallGetFuncAttr) { OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kModulePartitionedCall); ASSERT_TRUE(module_op); func::FuncOp main_fn = FindMainFuncOp(*module_op); ASSERT_THAT(main_fn, NotNull()); auto partitioned_call_op = FindOperationOfType<TF::PartitionedCallOp>(main_fn); ASSERT_THAT(partitioned_call_op, NotNull()); FlatSymbolRefAttr partitioned_call_op_attr = GetFuncAttr(partitioned_call_op); EXPECT_EQ(partitioned_call_op_attr.getValue(), "composite_fn_1"); } TEST_F(AttrsAndConstraintsTest, GetEntryFunctionNameCorrectly) { OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kModuleXlaCallModule); ASSERT_TRUE(module_op); func::FuncOp main_fn = FindMainFuncOp(*module_op); ASSERT_THAT(main_fn, NotNull()); auto xla_call_module_op = FindOperationOfType<TF::XlaCallModuleOp>(main_fn); ASSERT_THAT(xla_call_module_op, NotNull()); EXPECT_EQ(GetEntryFunctionName(xla_call_module_op), StringRef("composite_fn_1")); } TEST_F(AttrsAndConstraintsTest, GetEntryFunctionNameWhenNotSet) { OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kModuleXlaCallModuleNoEntryNoQuantTrait); ASSERT_TRUE(module_op); func::FuncOp main_fn = FindMainFuncOp(*module_op); ASSERT_THAT(main_fn, NotNull()); auto xla_call_module_op = FindOperationOfType<TF::XlaCallModuleOp>(main_fn); ASSERT_THAT(xla_call_module_op, NotNull()); EXPECT_THAT(GetEntryFunctionName(xla_call_module_op), IsEmpty()); } TEST_F(AttrsAndConstraintsTest, HasQuantizableTraitTrue) { OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kModuleXlaCallModule); ASSERT_TRUE(module_op); func::FuncOp main_fn = FindMainFuncOp(*module_op); ASSERT_THAT(main_fn, NotNull()); auto xla_call_module_op = FindOperationOfType<TF::XlaCallModuleOp>(main_fn); ASSERT_THAT(xla_call_module_op, NotNull()); EXPECT_TRUE(HasQuantizableTrait(xla_call_module_op)); } TEST_F(AttrsAndConstraintsTest, HasQuantizableTraitFalse) { OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kModuleXlaCallModuleNoEntryNoQuantTrait); ASSERT_TRUE(module_op); func::FuncOp main_fn = FindMainFuncOp(*module_op); ASSERT_THAT(main_fn, NotNull()); auto xla_call_module_op = FindOperationOfType<TF::XlaCallModuleOp>(main_fn); ASSERT_THAT(xla_call_module_op, NotNull()); EXPECT_FALSE(HasQuantizableTrait(xla_call_module_op)); } TEST_F(AttrsAndConstraintsTest, IsHybridQuantizedOpTrue) { OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kModuleHybridQuantized); func::FuncOp main_fn = FindMainFuncOp(*module_op); ASSERT_THAT(main_fn, NotNull()); Operation* dot_general = FindOperationOfType<DotGeneralOp>(main_fn); EXPECT_TRUE(IsHybridQuantizedOp(dot_general)); } TEST_F(AttrsAndConstraintsTest, IsHybridQuantizedOpFalse) { OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kModuleXlaCallModule); func::FuncOp main_fn = FindMainFuncOp(*module_op); ASSERT_THAT(main_fn, NotNull()); Operation* call_op = FindOperationOfType<TF::XlaCallModuleOp>(main_fn); EXPECT_FALSE(IsHybridQuantizedOp(call_op)); } constexpr absl::string_view kModuleDotGeneralFullyConnected = R"mlir( module { func.func @main(%arg0: tensor<1x1024xf32>, %arg1: tensor<1024x3xf32>) -> tensor<1x3xf32> attributes {_from_xla_call_module} { %0 = stablehlo.dot_general %arg0, %arg1, contracting_dims = [1] x [0], precision = [] : (tensor<1x1024xf32>, tensor<1024x3xf32>) -> tensor<1x3xf32> return %0 : tensor<1x3xf32> } } )mlir"; constexpr absl::string_view kModuleDotGeneralBatchMatmul = R"mlir( module { func.func @main(%arg0: tensor<2x2x2xf32>, %arg1: tensor<2x2x2xf32>) -> tensor<2x2x2xf32> attributes {_from_xla_call_module} { %0 = stablehlo.dot_general %arg0, %arg1, batching_dims = [0] x [0], contracting_dims = [2] x [1], precision = [DEFAULT, DEFAULT] : (tensor<2x2x2xf32>, tensor<2x2x2xf32>) -> tensor<2x2x2xf32> return %0 : tensor<2x2x2xf32> } } )mlir"; TEST_F(AttrsAndConstraintsTest, IsDotGeneralFullyConnectedReturnsError) { DotGeneralOp dot_general_op = nullptr; StatusIs(absl::StatusCode::kInvalidArgument, "Given dot_general op cannot be null when checking " "`IsDotGeneralBatchMatmul`"); } TEST_F(AttrsAndConstraintsTest, IsDotGeneralFullyConnectedReturnsTrue) { OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kModuleDotGeneralFullyConnected); ASSERT_TRUE(module_op); func::FuncOp main_fn = FindMainFuncOp(*module_op); ASSERT_THAT(main_fn, NotNull()); auto dot_general_op = *main_fn.getOps<DotGeneralOp>().begin(); EXPECT_THAT(IsDotGeneralFullyConnected(dot_general_op), true); } TEST_F(AttrsAndConstraintsTest, IsDotGeneralFullyConnectedReturnsFalse) { OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kModuleDotGeneralBatchMatmul); ASSERT_TRUE(module_op); func::FuncOp main_fn = FindMainFuncOp(*module_op); ASSERT_THAT(main_fn, NotNull()); auto dot_general_op = *main_fn.getOps<DotGeneralOp>().begin(); EXPECT_THAT(IsDotGeneralFullyConnected(dot_general_op), false); } TEST_F(AttrsAndConstraintsTest, DotGeneralFullyConnectedReturnsQuantDim) { OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kModuleDotGeneralFullyConnected); ASSERT_TRUE(module_op); func::FuncOp main_fn = FindMainFuncOp(*module_op); ASSERT_THAT(main_fn, NotNull()); auto dot_general_op = *main_fn.getOps<DotGeneralOp>().begin(); EXPECT_THAT(GetDotGeneralQuantizationDim(dot_general_op), Optional(1)); } TEST_F(AttrsAndConstraintsTest, DotGeneralBatchMatmulReturnsNullQuantDim) { OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kModuleDotGeneralBatchMatmul); ASSERT_TRUE(module_op); func::FuncOp main_fn = FindMainFuncOp(*module_op); ASSERT_THAT(main_fn, NotNull()); auto dot_general_op = *main_fn.getOps<DotGeneralOp>().begin(); EXPECT_THAT(GetDotGeneralQuantizationDim(dot_general_op), Eq(std::nullopt)); } TEST_F(AttrsAndConstraintsTest, ContainsConvOrDotTrue) { OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kModuleDotWeightOnlyPtq); ASSERT_TRUE(module_op); func::FuncOp main_fn = FindMainFuncOp(*module_op); ASSERT_THAT(main_fn, NotNull()); auto call_op = *main_fn.getOps<TF::XlaCallModuleOp>().begin(); const StringRef function_name = GetEntryFunctionName(call_op); EXPECT_TRUE(ContainsConvOrDot(function_name)); } TEST_F(AttrsAndConstraintsTest, ContainsConvOrDotFalse) { OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kModuleXlaCallModuleNoEntryNoQuantTrait); ASSERT_TRUE(module_op); func::FuncOp main_fn = FindMainFuncOp(*module_op); ASSERT_THAT(main_fn, NotNull()); auto call_op = *main_fn.getOps<TF::XlaCallModuleOp>().begin(); const StringRef function_name = GetEntryFunctionName(call_op); EXPECT_FALSE(ContainsConvOrDot(function_name)); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/common/attrs_and_constraints.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/common/attrs_and_constraints_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
e281052a-c5ac-4c16-a801-e69a8838f43e
cpp
google/quiche
hpack_varint_decoder
quiche/http2/hpack/varint/hpack_varint_decoder.cc
quiche/http2/hpack/varint/hpack_varint_decoder_test.cc
#include "quiche/http2/hpack/varint/hpack_varint_decoder.h" #include <limits> #include <string> #include "absl/strings/str_cat.h" namespace http2 { DecodeStatus HpackVarintDecoder::Start(uint8_t prefix_value, uint8_t prefix_length, DecodeBuffer* db) { QUICHE_DCHECK_LE(3u, prefix_length); QUICHE_DCHECK_LE(prefix_length, 8u); const uint8_t prefix_mask = (1 << prefix_length) - 1; value_ = prefix_value & prefix_mask; if (value_ < prefix_mask) { MarkDone(); return DecodeStatus::kDecodeDone; } offset_ = 0; return Resume(db); } DecodeStatus HpackVarintDecoder::StartExtended(uint8_t prefix_length, DecodeBuffer* db) { QUICHE_DCHECK_LE(3u, prefix_length); QUICHE_DCHECK_LE(prefix_length, 8u); value_ = (1 << prefix_length) - 1; offset_ = 0; return Resume(db); } DecodeStatus HpackVarintDecoder::Resume(DecodeBuffer* db) { const uint8_t kMaxOffset = 63; CheckNotDone(); while (offset_ < kMaxOffset) { if (db->Empty()) { return DecodeStatus::kDecodeInProgress; } uint8_t byte = db->DecodeUInt8(); uint64_t summand = byte & 0x7f; QUICHE_DCHECK_LE(offset_, 56); QUICHE_DCHECK_LE(summand, std::numeric_limits<uint64_t>::max() >> offset_); summand <<= offset_; QUICHE_DCHECK_LE(value_, std::numeric_limits<uint64_t>::max() - summand); value_ += summand; if ((byte & 0x80) == 0) { MarkDone(); return DecodeStatus::kDecodeDone; } offset_ += 7; } if (db->Empty()) { return DecodeStatus::kDecodeInProgress; } QUICHE_DCHECK_EQ(kMaxOffset, offset_); uint8_t byte = db->DecodeUInt8(); if ((byte & 0x80) == 0) { uint64_t summand = byte & 0x7f; if (summand <= std::numeric_limits<uint64_t>::max() >> offset_) { summand <<= offset_; if (value_ <= std::numeric_limits<uint64_t>::max() - summand) { value_ += summand; MarkDone(); return DecodeStatus::kDecodeDone; } } } QUICHE_DLOG(WARNING) << "Variable length int encoding is too large or too long. " << DebugString(); MarkDone(); return DecodeStatus::kDecodeError; } uint64_t HpackVarintDecoder::value() const { CheckDone(); return value_; } void HpackVarintDecoder::set_value(uint64_t v) { MarkDone(); value_ = v; } std::string HpackVarintDecoder::DebugString() const { return absl::StrCat("HpackVarintDecoder(value=", value_, ", offset=", offset_, ")"); } DecodeStatus HpackVarintDecoder::StartForTest(uint8_t prefix_value, uint8_t prefix_length, DecodeBuffer* db) { return Start(prefix_value, prefix_length, db); } DecodeStatus HpackVarintDecoder::StartExtendedForTest(uint8_t prefix_length, DecodeBuffer* db) { return StartExtended(prefix_length, db); } DecodeStatus HpackVarintDecoder::ResumeForTest(DecodeBuffer* db) { return Resume(db); } }
#include "quiche/http2/hpack/varint/hpack_varint_decoder.h" #include <stddef.h> #include <cstdint> #include <string> #include <tuple> #include <utility> #include "absl/base/macros.h" #include "absl/strings/escaping.h" #include "absl/strings/string_view.h" #include "quiche/http2/test_tools/random_decoder_test_base.h" #include "quiche/http2/test_tools/verify_macros.h" #include "quiche/common/platform/api/quiche_logging.h" #include "quiche/common/platform/api/quiche_test.h" using ::testing::AssertionSuccess; namespace http2 { namespace test { namespace { class HpackVarintDecoderTest : public RandomDecoderTest, public ::testing::WithParamInterface<std::tuple<uint8_t, const char*>> { protected: HpackVarintDecoderTest() : high_bits_(std::get<0>(GetParam())), prefix_length_(0) { QUICHE_CHECK(absl::HexStringToBytes(std::get<1>(GetParam()), &suffix_)); } void DecodeExpectSuccess(absl::string_view data, uint32_t prefix_length, uint64_t expected_value) { Validator validator = [expected_value, this]( const DecodeBuffer& , DecodeStatus ) -> AssertionResult { HTTP2_VERIFY_EQ(expected_value, decoder_.value()) << "Value doesn't match expected: " << decoder_.value() << " != " << expected_value; return AssertionSuccess(); }; validator = ValidateDoneAndOffset( data.size(), std::move(validator)); EXPECT_TRUE(Decode(data, prefix_length, std::move(validator))); EXPECT_EQ(expected_value, decoder_.value()); } void DecodeExpectError(absl::string_view data, uint32_t prefix_length) { Validator validator = [](const DecodeBuffer& , DecodeStatus status) -> AssertionResult { HTTP2_VERIFY_EQ(DecodeStatus::kDecodeError, status); return AssertionSuccess(); }; EXPECT_TRUE(Decode(data, prefix_length, std::move(validator))); } private: AssertionResult Decode(absl::string_view data, uint32_t prefix_length, const Validator validator) { prefix_length_ = prefix_length; std::string data_copy(data); uint8_t high_bits_mask = 0b11111111 << prefix_length_; data_copy[0] |= (high_bits_mask & high_bits_); data_copy.append(suffix_); DecodeBuffer b(data_copy); bool return_non_zero_on_first = true; return DecodeAndValidateSeveralWays(&b, return_non_zero_on_first, validator); } DecodeStatus StartDecoding(DecodeBuffer* b) override { QUICHE_CHECK_LT(0u, b->Remaining()); uint8_t prefix = b->DecodeUInt8(); return decoder_.Start(prefix, prefix_length_, b); } DecodeStatus ResumeDecoding(DecodeBuffer* b) override { return decoder_.Resume(b); } const uint8_t high_bits_; std::string suffix_; HpackVarintDecoder decoder_; uint8_t prefix_length_; }; INSTANTIATE_TEST_SUITE_P( HpackVarintDecoderTest, HpackVarintDecoderTest, ::testing::Combine( ::testing::Values(0b00000000, 0b11111111, 0b10101010), ::testing::Values("", "00", "666f6f"))); struct { const char* data; uint32_t prefix_length; uint64_t expected_value; } kSuccessTestData[] = { {"00", 3, 0}, {"00", 4, 0}, {"00", 5, 0}, {"00", 6, 0}, {"00", 7, 0}, {"00", 8, 0}, {"06", 3, 6}, {"0d", 4, 13}, {"10", 5, 16}, {"29", 6, 41}, {"56", 7, 86}, {"bf", 8, 191}, {"0700", 3, 7}, {"0f00", 4, 15}, {"1f00", 5, 31}, {"3f00", 6, 63}, {"7f00", 7, 127}, {"ff00", 8, 255}, {"078000", 3, 7}, {"0f8000", 4, 15}, {"1f8000", 5, 31}, {"3f8000", 6, 63}, {"7f8000", 7, 127}, {"ff8000", 8, 255}, {"0760", 3, 103}, {"0f2a", 4, 57}, {"1f7f", 5, 158}, {"3f02", 6, 65}, {"7f49", 7, 200}, {"ff6f", 8, 366}, {"07e000", 3, 103}, {"0faa00", 4, 57}, {"1fff00", 5, 158}, {"3f8200", 6, 65}, {"7fc900", 7, 200}, {"ffef00", 8, 366}, {"07e08000", 3, 103}, {"0faa8000", 4, 57}, {"1fff8000", 5, 158}, {"3f828000", 6, 65}, {"7fc98000", 7, 200}, {"ffef8000", 8, 366}, {"07e0808080808080808000", 3, 103}, {"0faa808080808080808000", 4, 57}, {"1fff808080808080808000", 5, 158}, {"3f82808080808080808000", 6, 65}, {"7fc9808080808080808000", 7, 200}, {"ffef808080808080808000", 8, 366}, {"07b260", 3, 12345}, {"0f8a2a", 4, 5401}, {"1fa87f", 5, 16327}, {"3fd002", 6, 399}, {"7fff49", 7, 9598}, {"ffe32f", 8, 6370}, {"07b2e000", 3, 12345}, {"0f8aaa00", 4, 5401}, {"1fa8ff00", 5, 16327}, {"3fd08200", 6, 399}, {"7fffc900", 7, 9598}, {"ffe3af00", 8, 6370}, {"07b2e080808080808000", 3, 12345}, {"0f8aaa80808080808000", 4, 5401}, {"1fa8ff80808080808000", 5, 16327}, {"3fd08280808080808000", 6, 399}, {"7fffc980808080808000", 7, 9598}, {"ffe3af80808080808000", 8, 6370}, {"078ab260", 3, 1579281}, {"0fc18a2a", 4, 689488}, {"1fada87f", 5, 2085964}, {"3fa0d002", 6, 43103}, {"7ffeff49", 7, 1212541}, {"ff93de23", 8, 585746}, {"078ab2e000", 3, 1579281}, {"0fc18aaa00", 4, 689488}, {"1fada8ff00", 5, 2085964}, {"3fa0d08200", 6, 43103}, {"7ffeffc900", 7, 1212541}, {"ff93dea300", 8, 585746}, {"079f8ab260", 3, 202147110}, {"0fa2c18a2a", 4, 88252593}, {"1fd0ada87f", 5, 266999535}, {"3ff9a0d002", 6, 5509304}, {"7f9efeff49", 7, 155189149}, {"ffaa82f404", 8, 10289705}, {"079f8ab2e000", 3, 202147110}, {"0fa2c18aaa00", 4, 88252593}, {"1fd0ada8ff00", 5, 266999535}, {"3ff9a0d08200", 6, 5509304}, {"7f9efeffc900", 7, 155189149}, {"ffaa82f48400", 8, 10289705}, {"0783aa9f8ab260", 3, 3311978140938}, {"0ff0b0a2c18a2a", 4, 1445930244223}, {"1fda84d0ada87f", 5, 4374519874169}, {"3fb5fbf9a0d002", 6, 90263420404}, {"7fcff19efeff49", 7, 2542616951118}, {"ff9fa486bbc327", 8, 1358138807070}, {"07f19883aa9f8ab260", 3, 54263449861016696}, {"0f84fdf0b0a2c18a2a", 4, 23690121121119891}, {"1fa0dfda84d0ada87f", 5, 71672133617889215}, {"3f9ff0b5fbf9a0d002", 6, 1478875878881374}, {"7ffbc1cff19efeff49", 7, 41658236125045114}, {"ff91b6fb85af99c342", 8, 37450237664484368}, {"0794f1f19883aa9f8ab201", 3, 12832019021693745307u}, {"0fa08f84fdf0b0a2c18a01", 4, 9980690937382242223u}, {"1fbfdda0dfda84d0ada801", 5, 12131360551794650846u}, {"3f9dc79ff0b5fbf9a0d001", 6, 15006530362736632796u}, {"7f8790fbc1cff19efeff01", 7, 18445754019193211014u}, {"fffba8c5b8d3fe9f8c8401", 8, 9518498503615141242u}, {"07f8ffffffffffffffff01", 3, 18446744073709551615u}, {"0ff0ffffffffffffffff01", 4, 18446744073709551615u}, {"1fe0ffffffffffffffff01", 5, 18446744073709551615u}, {"3fc0ffffffffffffffff01", 6, 18446744073709551615u}, {"7f80ffffffffffffffff01", 7, 18446744073709551615u}, {"ff80feffffffffffffff01", 8, 18446744073709551615u}, {"0a", 5, 10}, {"1f9a0a", 5, 1337}, }; TEST_P(HpackVarintDecoderTest, Success) { for (size_t i = 0; i < ABSL_ARRAYSIZE(kSuccessTestData); ++i) { std::string data_bytes; ASSERT_TRUE(absl::HexStringToBytes(kSuccessTestData[i].data, &data_bytes)); DecodeExpectSuccess(data_bytes, kSuccessTestData[i].prefix_length, kSuccessTestData[i].expected_value); } } struct { const char* data; uint32_t prefix_length; } kErrorTestData[] = { {"0780808080808080808080", 3}, {"0f80808080808080808080", 4}, {"1f80808080808080808080", 5}, {"3f80808080808080808080", 6}, {"7f80808080808080808080", 7}, {"ff80808080808080808080", 8}, {"07ffffffffffffffffffff", 3}, {"0fffffffffffffffffffff", 4}, {"1fffffffffffffffffffff", 5}, {"3fffffffffffffffffffff", 6}, {"7fffffffffffffffffffff", 7}, {"ffffffffffffffffffffff", 8}, {"07f9ffffffffffffffff01", 3}, {"0ff1ffffffffffffffff01", 4}, {"1fe1ffffffffffffffff01", 5}, {"3fc1ffffffffffffffff01", 6}, {"7f81ffffffffffffffff01", 7}, {"ff81feffffffffffffff01", 8}, {"07f8ffffffffffffffff8100", 3}, {"0ff0ffffffffffffffff8100", 4}, {"1fe0ffffffffffffffff8100", 5}, {"3fc0ffffffffffffffff8100", 6}, {"7f80ffffffffffffffff8100", 7}, {"ff80feffffffffffffff8100", 8}}; TEST_P(HpackVarintDecoderTest, Error) { for (size_t i = 0; i < ABSL_ARRAYSIZE(kErrorTestData); ++i) { std::string data_bytes; ASSERT_TRUE(absl::HexStringToBytes(kErrorTestData[i].data, &data_bytes)); DecodeExpectError(data_bytes, kErrorTestData[i].prefix_length); } } } } }
https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/hpack/varint/hpack_varint_decoder.cc
https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/hpack/varint/hpack_varint_decoder_test.cc
6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6
552ac37d-9222-4206-ba65-c3ddaa1f19e1
cpp
google/quiche
qpack_blocking_manager
quiche/quic/core/qpack/qpack_blocking_manager.cc
quiche/quic/core/qpack/qpack_blocking_manager_test.cc
#include "quiche/quic/core/qpack/qpack_blocking_manager.h" #include <limits> #include <utility> namespace quic { QpackBlockingManager::QpackBlockingManager() : known_received_count_(0) {} bool QpackBlockingManager::OnHeaderAcknowledgement(QuicStreamId stream_id) { auto it = header_blocks_.find(stream_id); if (it == header_blocks_.end()) { return false; } QUICHE_DCHECK(!it->second.empty()); const IndexSet& indices = it->second.front(); QUICHE_DCHECK(!indices.empty()); const uint64_t required_index_count = RequiredInsertCount(indices); if (known_received_count_ < required_index_count) { known_received_count_ = required_index_count; } DecreaseReferenceCounts(indices); it->second.pop_front(); if (it->second.empty()) { header_blocks_.erase(it); } return true; } void QpackBlockingManager::OnStreamCancellation(QuicStreamId stream_id) { auto it = header_blocks_.find(stream_id); if (it == header_blocks_.end()) { return; } for (const IndexSet& indices : it->second) { DecreaseReferenceCounts(indices); } header_blocks_.erase(it); } bool QpackBlockingManager::OnInsertCountIncrement(uint64_t increment) { if (increment > std::numeric_limits<uint64_t>::max() - known_received_count_) { return false; } known_received_count_ += increment; return true; } void QpackBlockingManager::OnHeaderBlockSent(QuicStreamId stream_id, IndexSet indices) { QUICHE_DCHECK(!indices.empty()); IncreaseReferenceCounts(indices); header_blocks_[stream_id].push_back(std::move(indices)); } bool QpackBlockingManager::blocking_allowed_on_stream( QuicStreamId stream_id, uint64_t maximum_blocked_streams) const { if (header_blocks_.size() + 1 <= maximum_blocked_streams) { return true; } if (maximum_blocked_streams == 0) { return false; } uint64_t blocked_stream_count = 0; for (const auto& header_blocks_for_stream : header_blocks_) { for (const IndexSet& indices : header_blocks_for_stream.second) { if (RequiredInsertCount(indices) > known_received_count_) { if (header_blocks_for_stream.first == stream_id) { return true; } ++blocked_stream_count; if (blocked_stream_count + 1 > maximum_blocked_streams) { return false; } break; } } } return true; } uint64_t QpackBlockingManager::smallest_blocking_index() const { return entry_reference_counts_.empty() ? std::numeric_limits<uint64_t>::max() : entry_reference_counts_.begin()->first; } uint64_t QpackBlockingManager::RequiredInsertCount(const IndexSet& indices) { return *indices.rbegin() + 1; } void QpackBlockingManager::IncreaseReferenceCounts(const IndexSet& indices) { for (const uint64_t index : indices) { auto it = entry_reference_counts_.lower_bound(index); if (it != entry_reference_counts_.end() && it->first == index) { ++it->second; } else { entry_reference_counts_.insert(it, {index, 1}); } } } void QpackBlockingManager::DecreaseReferenceCounts(const IndexSet& indices) { for (const uint64_t index : indices) { auto it = entry_reference_counts_.find(index); QUICHE_DCHECK(it != entry_reference_counts_.end()); QUICHE_DCHECK_NE(0u, it->second); if (it->second == 1) { entry_reference_counts_.erase(it); } else { --it->second; } } } }
#include "quiche/quic/core/qpack/qpack_blocking_manager.h" #include <limits> #include "quiche/quic/platform/api/quic_test.h" namespace quic { namespace test { class QpackBlockingManagerPeer { public: static bool stream_is_blocked(const QpackBlockingManager* manager, QuicStreamId stream_id) { for (const auto& header_blocks_for_stream : manager->header_blocks_) { if (header_blocks_for_stream.first != stream_id) { continue; } for (const auto& indices : header_blocks_for_stream.second) { if (QpackBlockingManager::RequiredInsertCount(indices) > manager->known_received_count_) { return true; } } } return false; } }; namespace { class QpackBlockingManagerTest : public QuicTest { protected: QpackBlockingManagerTest() = default; ~QpackBlockingManagerTest() override = default; bool stream_is_blocked(QuicStreamId stream_id) const { return QpackBlockingManagerPeer::stream_is_blocked(&manager_, stream_id); } QpackBlockingManager manager_; }; TEST_F(QpackBlockingManagerTest, Empty) { EXPECT_EQ(0u, manager_.known_received_count()); EXPECT_EQ(std::numeric_limits<uint64_t>::max(), manager_.smallest_blocking_index()); EXPECT_FALSE(manager_.OnHeaderAcknowledgement(0)); EXPECT_FALSE(manager_.OnHeaderAcknowledgement(1)); } TEST_F(QpackBlockingManagerTest, NotBlockedByInsertCountIncrement) { EXPECT_TRUE(manager_.OnInsertCountIncrement(2)); manager_.OnHeaderBlockSent(0, {1, 0}); EXPECT_FALSE(stream_is_blocked(0)); } TEST_F(QpackBlockingManagerTest, UnblockedByInsertCountIncrement) { manager_.OnHeaderBlockSent(0, {1, 0}); EXPECT_TRUE(stream_is_blocked(0)); EXPECT_TRUE(manager_.OnInsertCountIncrement(2)); EXPECT_FALSE(stream_is_blocked(0)); } TEST_F(QpackBlockingManagerTest, NotBlockedByHeaderAcknowledgement) { manager_.OnHeaderBlockSent(0, {2, 1, 1}); EXPECT_TRUE(stream_is_blocked(0)); EXPECT_TRUE(manager_.OnHeaderAcknowledgement(0)); EXPECT_FALSE(stream_is_blocked(0)); manager_.OnHeaderBlockSent(1, {2, 2}); EXPECT_FALSE(stream_is_blocked(1)); } TEST_F(QpackBlockingManagerTest, UnblockedByHeaderAcknowledgement) { manager_.OnHeaderBlockSent(0, {2, 1, 1}); manager_.OnHeaderBlockSent(1, {2, 2}); EXPECT_TRUE(stream_is_blocked(0)); EXPECT_TRUE(stream_is_blocked(1)); EXPECT_TRUE(manager_.OnHeaderAcknowledgement(0)); EXPECT_FALSE(stream_is_blocked(0)); EXPECT_FALSE(stream_is_blocked(1)); } TEST_F(QpackBlockingManagerTest, KnownReceivedCount) { EXPECT_EQ(0u, manager_.known_received_count()); manager_.OnHeaderBlockSent(0, {0}); EXPECT_EQ(0u, manager_.known_received_count()); manager_.OnHeaderBlockSent(1, {1}); EXPECT_EQ(0u, manager_.known_received_count()); EXPECT_TRUE(manager_.OnHeaderAcknowledgement(0)); EXPECT_EQ(1u, manager_.known_received_count()); manager_.OnHeaderBlockSent(2, {5}); EXPECT_EQ(1u, manager_.known_received_count()); EXPECT_TRUE(manager_.OnHeaderAcknowledgement(1)); EXPECT_EQ(2u, manager_.known_received_count()); EXPECT_TRUE(manager_.OnInsertCountIncrement(2)); EXPECT_EQ(4u, manager_.known_received_count()); EXPECT_TRUE(manager_.OnHeaderAcknowledgement(2)); EXPECT_EQ(6u, manager_.known_received_count()); manager_.OnStreamCancellation(0); EXPECT_EQ(6u, manager_.known_received_count()); manager_.OnHeaderBlockSent(0, {3}); EXPECT_EQ(6u, manager_.known_received_count()); EXPECT_TRUE(manager_.OnHeaderAcknowledgement(0)); EXPECT_EQ(6u, manager_.known_received_count()); manager_.OnHeaderBlockSent(1, {5}); EXPECT_EQ(6u, manager_.known_received_count()); EXPECT_TRUE(manager_.OnHeaderAcknowledgement(1)); EXPECT_EQ(6u, manager_.known_received_count()); } TEST_F(QpackBlockingManagerTest, SmallestBlockingIndex) { EXPECT_EQ(std::numeric_limits<uint64_t>::max(), manager_.smallest_blocking_index()); manager_.OnHeaderBlockSent(0, {0}); EXPECT_EQ(0u, manager_.smallest_blocking_index()); manager_.OnHeaderBlockSent(1, {2}); EXPECT_EQ(0u, manager_.smallest_blocking_index()); EXPECT_TRUE(manager_.OnHeaderAcknowledgement(0)); EXPECT_EQ(2u, manager_.smallest_blocking_index()); manager_.OnHeaderBlockSent(1, {1}); EXPECT_EQ(1u, manager_.smallest_blocking_index()); EXPECT_TRUE(manager_.OnHeaderAcknowledgement(1)); EXPECT_EQ(1u, manager_.smallest_blocking_index()); EXPECT_TRUE(manager_.OnInsertCountIncrement(2)); EXPECT_EQ(1u, manager_.smallest_blocking_index()); manager_.OnStreamCancellation(1); EXPECT_EQ(std::numeric_limits<uint64_t>::max(), manager_.smallest_blocking_index()); } TEST_F(QpackBlockingManagerTest, HeaderAcknowledgementsOnSingleStream) { EXPECT_EQ(0u, manager_.known_received_count()); EXPECT_EQ(std::numeric_limits<uint64_t>::max(), manager_.smallest_blocking_index()); manager_.OnHeaderBlockSent(0, {2, 1, 1}); EXPECT_EQ(0u, manager_.known_received_count()); EXPECT_TRUE(stream_is_blocked(0)); EXPECT_EQ(1u, manager_.smallest_blocking_index()); manager_.OnHeaderBlockSent(0, {1, 0}); EXPECT_EQ(0u, manager_.known_received_count()); EXPECT_TRUE(stream_is_blocked(0)); EXPECT_EQ(0u, manager_.smallest_blocking_index()); EXPECT_TRUE(manager_.OnHeaderAcknowledgement(0)); EXPECT_EQ(3u, manager_.known_received_count()); EXPECT_FALSE(stream_is_blocked(0)); EXPECT_EQ(0u, manager_.smallest_blocking_index()); manager_.OnHeaderBlockSent(0, {3}); EXPECT_EQ(3u, manager_.known_received_count()); EXPECT_TRUE(stream_is_blocked(0)); EXPECT_EQ(0u, manager_.smallest_blocking_index()); EXPECT_TRUE(manager_.OnHeaderAcknowledgement(0)); EXPECT_EQ(3u, manager_.known_received_count()); EXPECT_TRUE(stream_is_blocked(0)); EXPECT_EQ(3u, manager_.smallest_blocking_index()); EXPECT_TRUE(manager_.OnHeaderAcknowledgement(0)); EXPECT_EQ(4u, manager_.known_received_count()); EXPECT_FALSE(stream_is_blocked(0)); EXPECT_EQ(std::numeric_limits<uint64_t>::max(), manager_.smallest_blocking_index()); EXPECT_FALSE(manager_.OnHeaderAcknowledgement(0)); } TEST_F(QpackBlockingManagerTest, CancelStream) { manager_.OnHeaderBlockSent(0, {3}); EXPECT_TRUE(stream_is_blocked(0)); EXPECT_EQ(3u, manager_.smallest_blocking_index()); manager_.OnHeaderBlockSent(0, {2}); EXPECT_TRUE(stream_is_blocked(0)); EXPECT_EQ(2u, manager_.smallest_blocking_index()); manager_.OnHeaderBlockSent(1, {4}); EXPECT_TRUE(stream_is_blocked(0)); EXPECT_TRUE(stream_is_blocked(1)); EXPECT_EQ(2u, manager_.smallest_blocking_index()); manager_.OnStreamCancellation(0); EXPECT_FALSE(stream_is_blocked(0)); EXPECT_TRUE(stream_is_blocked(1)); EXPECT_EQ(4u, manager_.smallest_blocking_index()); manager_.OnStreamCancellation(1); EXPECT_FALSE(stream_is_blocked(0)); EXPECT_FALSE(stream_is_blocked(1)); EXPECT_EQ(std::numeric_limits<uint64_t>::max(), manager_.smallest_blocking_index()); } TEST_F(QpackBlockingManagerTest, BlockingAllowedOnStream) { const QuicStreamId kStreamId1 = 1; const QuicStreamId kStreamId2 = 2; const QuicStreamId kStreamId3 = 3; EXPECT_FALSE(manager_.blocking_allowed_on_stream(kStreamId1, 0)); EXPECT_FALSE(manager_.blocking_allowed_on_stream(kStreamId2, 0)); EXPECT_TRUE(manager_.blocking_allowed_on_stream(kStreamId1, 1)); EXPECT_TRUE(manager_.blocking_allowed_on_stream(kStreamId2, 1)); manager_.OnHeaderBlockSent(kStreamId1, {0}); manager_.OnHeaderBlockSent(kStreamId1, {1}); EXPECT_TRUE(manager_.blocking_allowed_on_stream(kStreamId1, 1)); EXPECT_FALSE(manager_.blocking_allowed_on_stream(kStreamId2, 1)); EXPECT_TRUE(manager_.blocking_allowed_on_stream(kStreamId1, 2)); EXPECT_TRUE(manager_.blocking_allowed_on_stream(kStreamId2, 2)); manager_.OnHeaderBlockSent(kStreamId2, {2}); EXPECT_TRUE(manager_.blocking_allowed_on_stream(kStreamId1, 2)); EXPECT_TRUE(manager_.blocking_allowed_on_stream(kStreamId2, 2)); EXPECT_FALSE(manager_.blocking_allowed_on_stream(kStreamId3, 2)); EXPECT_TRUE(manager_.blocking_allowed_on_stream(kStreamId3, 3)); manager_.OnHeaderAcknowledgement(kStreamId1); EXPECT_TRUE(manager_.blocking_allowed_on_stream(kStreamId1, 2)); EXPECT_TRUE(manager_.blocking_allowed_on_stream(kStreamId2, 2)); manager_.OnHeaderAcknowledgement(kStreamId1); EXPECT_FALSE(manager_.blocking_allowed_on_stream(kStreamId1, 1)); EXPECT_TRUE(manager_.blocking_allowed_on_stream(kStreamId2, 1)); EXPECT_TRUE(manager_.blocking_allowed_on_stream(kStreamId1, 2)); EXPECT_TRUE(manager_.blocking_allowed_on_stream(kStreamId2, 2)); manager_.OnHeaderAcknowledgement(kStreamId2); EXPECT_FALSE(manager_.blocking_allowed_on_stream(kStreamId1, 0)); EXPECT_FALSE(manager_.blocking_allowed_on_stream(kStreamId2, 0)); EXPECT_TRUE(manager_.blocking_allowed_on_stream(kStreamId1, 1)); EXPECT_TRUE(manager_.blocking_allowed_on_stream(kStreamId2, 1)); } TEST_F(QpackBlockingManagerTest, InsertCountIncrementOverflow) { EXPECT_TRUE(manager_.OnInsertCountIncrement(10)); EXPECT_EQ(10u, manager_.known_received_count()); EXPECT_FALSE(manager_.OnInsertCountIncrement( std::numeric_limits<uint64_t>::max() - 5)); } } } }
https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/qpack/qpack_blocking_manager.cc
https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/qpack/qpack_blocking_manager_test.cc
6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6
ec664ca7-65e1-4a7d-acf8-ac959dafd8ea
cpp
tensorflow/tensorflow
sharding_util
tensorflow/compiler/tf2xla/sharding_util.cc
tensorflow/compiler/tf2xla/sharding_util_test.cc
#include "tensorflow/compiler/tf2xla/sharding_util.h" #include "absl/strings/match.h" #include "tensorflow/compiler/mlir/tensorflow/utils/xla_sharding_util.h" #include "tensorflow/core/framework/node_def.pb.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/util/device_name_utils.h" namespace tensorflow { namespace { const char kDeviceSuffixReplicatedCore[] = "REPLICATED_CORE"; const char kShardingAttribute[] = "_XlaSharding"; const char kShardingOpAttribute[] = "sharding"; } namespace { xla::OpMetadata CreateOpMetadata(const std::string& op_type, const std::string& op_name) { xla::OpMetadata metadata; metadata.set_op_type(op_type); metadata.set_op_name(op_name); return metadata; } void AssignOpMetadataToSharding(xla::OpSharding& sharding, const string& op_type, const string& op_name) { auto metadata = CreateOpMetadata(op_type, op_name); if (sharding.type() == xla::OpSharding::TUPLE) { for (auto& sharding_element : *sharding.mutable_tuple_shardings()) { *sharding_element.add_metadata() = metadata; } } else { *sharding.add_metadata() = metadata; } } Status CoreOutOfRangeError(int core, int num_cores_per_replica) { return errors::InvalidArgument( "Invalid replicated core id: ", core, "; num_cores_per_replica=", num_cores_per_replica); } } absl::StatusOr<std::optional<xla::OpSharding>> ParseShardingFromDevice( const string& device_name, int num_cores_per_replica, std::optional<xla::OpSharding> explicit_sharding, std::optional<xla::OpMetadata> metadata) { if (device_name.empty()) { return explicit_sharding; } DeviceNameUtils::ParsedName parsed_device; if (!DeviceNameUtils::ParseFullName(device_name, &parsed_device)) { return errors::InvalidArgument("Malformed assigned device '", device_name, "'"); } if (explicit_sharding.has_value()) { return explicit_sharding; } else if (!parsed_device.has_type || !parsed_device.has_id || !absl::StrContains(parsed_device.type, kDeviceSuffixReplicatedCore)) { return std::optional<xla::OpSharding>(); } else { const int core = parsed_device.id; if (core < 0 || core >= num_cores_per_replica) { return CoreOutOfRangeError(core, num_cores_per_replica); } auto sharding = xla::sharding_builder::AssignDevice(core); if (metadata.has_value()) { *sharding.add_metadata() = metadata.value(); } return std::optional<xla::OpSharding>(sharding); } } absl::StatusOr<std::optional<xla::OpSharding>> ParseShardingFromDevice( const NodeDef& node_def, int num_cores_per_replica, bool add_metadata) { const string& device_name = node_def.device(); TF_ASSIGN_OR_RETURN(std::optional<xla::OpSharding> sharding, GetShardingFromNodeDef(node_def, add_metadata)); return ParseShardingFromDevice( device_name, num_cores_per_replica, sharding, add_metadata ? std::optional<xla::OpMetadata>( CreateOpMetadata(node_def.op(), node_def.name())) : std::nullopt); } absl::StatusOr<std::optional<xla::OpSharding>> ParseShardingFromDevice( const Node& node, int num_cores_per_replica, bool add_metadata) { string device_name = node.assigned_device_name(); if (device_name.empty()) { device_name = node.requested_device(); } TF_ASSIGN_OR_RETURN(std::optional<xla::OpSharding> sharding, GetShardingFromNodeDef(node.def(), add_metadata)); return ParseShardingFromDevice( device_name, num_cores_per_replica, sharding, add_metadata ? std::optional<xla::OpMetadata>( CreateOpMetadata(node.type_string(), node.name())) : std::nullopt); } absl::StatusOr<std::optional<xla::OpSharding>> ParseShardingFromEdgeSource( const Edge& edge, int num_cores_per_replica, bool add_metadata) { if (edge.src() == nullptr) { return tensorflow::errors::InvalidArgument( "Null src for ParseShardingFromEdgeSource edge=", edge.DebugString()); } TF_ASSIGN_OR_RETURN(std::optional<xla::OpSharding> sharding, ParseShardingFromDevice( *edge.src(), num_cores_per_replica, add_metadata)); if (sharding.has_value() && sharding.value().type() == xla::OpSharding::TUPLE) { if (edge.src_output() < 0 || edge.src_output() >= sharding.value().tuple_shardings_size()) { return tensorflow::errors::InvalidArgument( "Tuple index out of bound: edge=", edge.DebugString(), " sharding=", sharding->DebugString()); } std::optional<xla::OpSharding> subsharding = sharding.value().tuple_shardings(edge.src_output()); return subsharding; } return sharding; } void SetShardingDeviceAssignmentFromNode(const Node& src, Node* dst) { string device_name = src.assigned_device_name(); if (device_name.empty()) { device_name = src.requested_device(); } dst->set_assigned_device_name(device_name); if (const AttrValue* attr = src.attrs().Find(kShardingAttribute)) { dst->AddAttr(kShardingAttribute, *attr); } } namespace { absl::StatusOr<std::optional<xla::OpSharding>> GetShardingFromNodeDefInternal( const NodeDef& node_def, bool add_metadata, const char* attribute) { if (!HasNodeAttr(node_def, attribute)) { return std::optional<xla::OpSharding>(); } string value; xla::OpSharding sharding; TF_RETURN_IF_ERROR(GetNodeAttr(node_def, attribute, &value)); if (tensorflow::DecodeShardingAttribute(value, sharding).failed()) { return xla::InvalidArgument( "Experimental %s attribute was not a valid encoded xla::OpSharding " "proto.", attribute); } if (add_metadata) { AssignOpMetadataToSharding(sharding, node_def.op(), node_def.name()); } return std::optional<xla::OpSharding>(sharding); } } absl::StatusOr<std::optional<xla::OpSharding>> GetShardingFromNodeDef( const NodeDef& node_def, bool add_metadata) { if (node_def.op() == "XlaSharding") { TF_ASSIGN_OR_RETURN(auto sharding, GetShardingFromNodeDefInternal(node_def, add_metadata, kShardingOpAttribute)); if (sharding.has_value()) { return sharding; } } return GetShardingFromNodeDefInternal(node_def, add_metadata, kShardingAttribute); } }
#include "tensorflow/compiler/tf2xla/sharding_util.h" #include <functional> #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/test.h" namespace tensorflow { TEST(CoreUtilTest, ParseShardingFromDevice) { Graph graph(OpRegistry::Global()); auto core_from_sharding = [](std::optional<xla::OpSharding> sharding) -> int64 { if (sharding.has_value() && sharding.value().type() == xla::OpSharding::MAXIMAL) { return sharding.value().tile_assignment_devices(0); } else { return -1; } }; auto parse_status = ParseShardingFromDevice("", 1); TF_EXPECT_OK(parse_status.status()); EXPECT_EQ(-1, core_from_sharding(parse_status.value())); parse_status = ParseShardingFromDevice("", 100); TF_EXPECT_OK(parse_status.status()); EXPECT_EQ(-1, core_from_sharding(parse_status.value())); parse_status = ParseShardingFromDevice("/device:A_REPLICATED_CORE:-1", 100); EXPECT_FALSE(parse_status.ok()); parse_status = ParseShardingFromDevice("/device:A_REPLICATED_CORE:55", 100); TF_EXPECT_OK(parse_status.status()); EXPECT_EQ(55, core_from_sharding(parse_status.value())); parse_status = ParseShardingFromDevice("/device:A_REPLICATED_CORE:100", 100); EXPECT_FALSE(parse_status.ok()); parse_status = ParseShardingFromDevice("/cpu:0", 100); TF_EXPECT_OK(parse_status.status()); EXPECT_EQ(-1, core_from_sharding(parse_status.value())); } class ShardingWithMetadataTest : public ::testing::TestWithParam<xla::OpSharding> {}; TEST_P(ShardingWithMetadataTest, GetShardingFromNode) { NodeDef node_def; { node_def.set_op("_Arg"); node_def.set_name("arg"); AttrValue xla_sharding; xla_sharding.set_s(""); AttrValue index; index.set_i(0); AttrValue type; type.set_type(DataType::DT_FLOAT); node_def.mutable_attr()->insert( {{"_XlaSharding", xla_sharding}, {"index", index}, {"T", type}}); } auto check_metadata = [](const xla::OpSharding& sharding) { ASSERT_EQ(sharding.metadata_size(), 1); const auto& metadata = sharding.metadata(0); EXPECT_EQ(metadata.op_type(), "_Arg"); EXPECT_EQ(metadata.op_name(), "arg"); }; auto test_sharding_metadata = [&check_metadata]( const std::function<absl::StatusOr<std::optional<xla::OpSharding>>()>& fn) { auto status_or_sharding = fn(); TF_ASSERT_OK(status_or_sharding.status()); ASSERT_TRUE(status_or_sharding.value().has_value()); auto& sharding = status_or_sharding.value(); ASSERT_TRUE(sharding.has_value()); if (sharding->type() == xla::OpSharding::TUPLE) { EXPECT_TRUE(sharding->metadata().empty()); for (const auto& sharding_element : sharding->tuple_shardings()) { check_metadata(sharding_element); } } else { check_metadata(sharding.value()); } }; { test_sharding_metadata([&node_def]() { return GetShardingFromNodeDef(node_def, true); }); } { test_sharding_metadata([&node_def]() { return ParseShardingFromDevice(node_def, 1, true); }); } { Graph graph(OpRegistry::Global()); Status status; Node* node = graph.AddNode(node_def, &status); TF_ASSERT_OK(status); test_sharding_metadata([node]() { return ParseShardingFromDevice(*node, 1, true); }); } } xla::OpSharding CreateTupleSharding() { xla::OpSharding sharding; sharding.set_type(xla::OpSharding::TUPLE); sharding.add_tuple_shardings()->set_type(xla::OpSharding::REPLICATED); sharding.add_tuple_shardings()->set_type(xla::OpSharding::REPLICATED); return sharding; } INSTANTIATE_TEST_SUITE_P(GetShardingFromNode, ShardingWithMetadataTest, ::testing::Values(xla::sharding_builder::Replicate(), CreateTupleSharding())); }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/sharding_util.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/sharding_util_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
c987cf9e-49f9-4548-8911-f4a481f0a4b8
cpp
tensorflow/tensorflow
buffer_allocations
third_party/xla/xla/service/gpu/buffer_allocations.cc
third_party/xla/xla/backends/cpu/runtime/buffer_allocations_test.cc
#include "xla/service/gpu/buffer_allocations.h" #include <cstdint> #include <set> #include "absl/status/status.h" #include "absl/types/span.h" #include "xla/service/buffer_assignment.h" #include "xla/stream_executor/device_memory.h" #include "tsl/platform/logging.h" namespace xla { namespace gpu { absl::Status BufferAllocations::TearDown( const std::set<se::DeviceMemoryBase>& live_addresses, absl::Span<const BufferAllocation> allocations) { absl::Status status; const int64_t num_buffers = allocations.size(); for (BufferAllocation::Index i = 0; i < num_buffers; ++i) { const BufferAllocation& allocation = allocations[i]; se::DeviceMemoryBase buffer_address = GetDeviceAddress(allocation.index()); if ((allocation.maybe_live_out() && !live_addresses.count(buffer_address)) || allocation.IsPreallocatedTempBuffer()) { auto dealloc_result = memory_allocator_->Deallocate(device_ordinal_, buffer_address); if (!dealloc_result.ok() && status.ok()) { status = dealloc_result; } } } return status; } se::DeviceMemoryBase BufferAllocations::GetDeviceAddress( BufferAllocation::Index buffer_index) const { CHECK_GE(buffer_index, 0); CHECK_LT(buffer_index, buffers_.size()); return buffers_[buffer_index]; } se::DeviceMemoryBase& BufferAllocations::GetMutableDeviceAddress( BufferAllocation::Index buffer_index) { CHECK_GE(buffer_index, 0); CHECK_LT(buffer_index, buffers_.size()); return buffers_[buffer_index]; } se::DeviceMemoryBase BufferAllocations::GetDeviceAddress( const BufferAllocation::Slice& buffer_slice) const { int64_t index = buffer_slice.index(); se::DeviceMemoryBase base = GetDeviceAddress(index); int64_t offset = buffer_slice.offset(); CHECK_LE(buffer_slice.offset(), base.size()) << "slice offset " << offset << " must be smaller than buffer #" << index << " size " << base.size(); int64_t extent = offset + buffer_slice.size(); CHECK_LE(extent, base.size()) << "slice extent " << extent << " must be smaller than buffer #" << index << " size " << base.size(); return base.GetByteSlice(buffer_slice.offset(), buffer_slice.size()); } } }
#include "xla/backends/cpu/runtime/buffer_allocations.h" #include <cstddef> #include <vector> #include "xla/service/buffer_assignment.h" #include "xla/service/maybe_owning_device_memory.h" #include "xla/stream_executor/device_memory.h" #include "tsl/platform/statusor.h" #include "tsl/platform/test.h" namespace xla::cpu { namespace { TEST(BufferAllocationsTest, GetDeviceAddress) { std::vector<MaybeOwningDeviceMemory> buffers; std::vector<float> data = {1.0, 2.0, 3.0, 4.0}; size_t size_in_bytes = data.size() * sizeof(float); buffers.emplace_back(se::DeviceMemoryBase(data.data(), size_in_bytes)); BufferAllocations allocations(buffers); BufferAllocation alloc(0, size_in_bytes, 0); BufferAllocation::Slice slice(&alloc, 2 * sizeof(float), sizeof(float)); TF_ASSERT_OK_AND_ASSIGN(se::DeviceMemoryBase alloc_mem, allocations.GetDeviceAddress(0)); EXPECT_EQ(alloc_mem.opaque(), &data[0]); TF_ASSERT_OK_AND_ASSIGN(se::DeviceMemoryBase slice_mem, allocations.GetDeviceAddress(slice)); EXPECT_EQ(slice_mem.opaque(), &data[2]); } TEST(BufferAllocationsTest, GetDeviceAddressUnchecked) { std::vector<MaybeOwningDeviceMemory> buffers; std::vector<float> data = {1.0, 2.0, 3.0, 4.0}; size_t size_in_bytes = data.size() * sizeof(float); buffers.emplace_back(se::DeviceMemoryBase(data.data(), size_in_bytes)); BufferAllocations allocations(buffers); BufferAllocation alloc(0, size_in_bytes, 0); BufferAllocation::Slice slice(&alloc, 2 * sizeof(float), sizeof(float)); se::DeviceMemoryBase alloc_mem = allocations.GetDeviceAddressUnchecked(0); EXPECT_EQ(alloc_mem.opaque(), &data[0]); se::DeviceMemoryBase slice_mem = allocations.GetDeviceAddressUnchecked(slice); EXPECT_EQ(slice_mem.opaque(), &data[2]); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/buffer_allocations.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/backends/cpu/runtime/buffer_allocations_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
63ccecee-2364-4308-a602-e2ce86fff0bc
cpp
tensorflow/tensorflow
image_metrics
tensorflow/lite/tools/evaluation/stages/utils/image_metrics.cc
tensorflow/lite/tools/evaluation/stages/utils/image_metrics_test.cc
#include "tensorflow/lite/tools/evaluation/stages/utils/image_metrics.h" #include <algorithm> #include <cmath> #include "absl/container/flat_hash_map.h" #include "tensorflow/core/platform/logging.h" namespace tflite { namespace evaluation { namespace image { float Box2D::Length(const Box2D::Interval& a) { return std::max(0.f, a.max - a.min); } float Box2D::Intersection(const Box2D::Interval& a, const Box2D::Interval& b) { return Length(Interval{std::max(a.min, b.min), std::min(a.max, b.max)}); } float Box2D::Area() const { return Length(x) * Length(y); } float Box2D::Intersection(const Box2D& other) const { return Intersection(x, other.x) * Intersection(y, other.y); } float Box2D::Union(const Box2D& other) const { return Area() + other.Area() - Intersection(other); } float Box2D::IoU(const Box2D& other) const { const float total = Union(other); if (total > 0) { return Intersection(other) / total; } else { return 0.0; } } float Box2D::Overlap(const Box2D& other) const { const float intersection = Intersection(other); return intersection > 0 ? intersection / Area() : 0.0; } float AveragePrecision::FromPRCurve(const std::vector<PR>& pr, std::vector<PR>* pr_out) { float p = 0; float sum = 0; int r_level = opts_.num_recall_points; for (int i = pr.size() - 1; i >= 0; --i) { const PR& item = pr[i]; if (i > 0) { if (item.r < pr[i - 1].r) { LOG(ERROR) << "recall points are not in order: " << pr[i - 1].r << ", " << item.r; return 0; } } while (item.r * opts_.num_recall_points < r_level) { const float recall = static_cast<float>(r_level) / opts_.num_recall_points; if (r_level < 0) { LOG(ERROR) << "Number of recall points should be > 0"; return 0; } sum += p; r_level -= 1; if (pr_out != nullptr) { pr_out->emplace_back(p, recall); } } p = std::max(p, item.p); } for (; r_level >= 0; --r_level) { const float recall = static_cast<float>(r_level) / opts_.num_recall_points; sum += p; if (pr_out != nullptr) { pr_out->emplace_back(p, recall); } } return sum / (1 + opts_.num_recall_points); } float AveragePrecision::FromBoxes(const std::vector<Detection>& groundtruth, const std::vector<Detection>& prediction, std::vector<PR>* pr_out) { absl::flat_hash_map<int64_t, std::list<Detection>> gt; int num_gt = 0; for (auto& box : groundtruth) { gt[box.imgid].push_back(box); if (!box.difficult && box.ignore == kDontIgnore) { ++num_gt; } } if (num_gt == 0) { return NAN; } std::vector<Detection> pd = prediction; std::sort(pd.begin(), pd.end(), [](const Detection& a, const Detection& b) { return a.score > b.score; }); std::vector<PR> pr; int correct = 0; int num_pd = 0; for (int i = 0; i < pd.size(); ++i) { const Detection& b = pd[i]; auto* g = &gt[b.imgid]; auto best = g->end(); float best_iou = -INFINITY; for (auto it = g->begin(); it != g->end(); ++it) { const auto iou = b.box.IoU(it->box); if (iou > best_iou) { best = it; best_iou = iou; } } if ((best != g->end()) && (best_iou >= opts_.iou_threshold)) { if (best->difficult) { continue; } switch (best->ignore) { case kDontIgnore: { ++correct; ++num_pd; g->erase(best); pr.push_back({static_cast<float>(correct) / num_pd, static_cast<float>(correct) / num_gt}); break; } case kIgnoreOneMatch: { g->erase(best); break; } case kIgnoreAllMatches: { break; } } } else { ++num_pd; pr.push_back({static_cast<float>(correct) / num_pd, static_cast<float>(correct) / num_gt}); } } return FromPRCurve(pr, pr_out); } } } }
#include "tensorflow/lite/tools/evaluation/stages/utils/image_metrics.h" #include <stdint.h> #include <algorithm> #include <cmath> #include <cstdlib> #include <gtest/gtest.h> namespace tflite { namespace evaluation { namespace image { float MaxP(float minr, const std::vector<PR>& prs) { float p = 0; for (auto& pr : prs) { if (pr.r >= minr) p = std::max(p, pr.p); } return p; } float ExpectedAP(const std::vector<PR>& prs) { float sum = 0; for (float r = 0; r <= 1.0; r += 0.01) { sum += MaxP(r, prs); } return sum / 101; } float GenerateRandomFraction() { return static_cast<float>(std::rand()) / RAND_MAX; } TEST(ImageMetricsTest, APBasic) { std::vector<PR> prs; prs = {{1., 1.}, {0.5, 1.0}, {1 / 3, 1.0}}; EXPECT_NEAR(ExpectedAP(prs), AveragePrecision().FromPRCurve(prs), 1e-6); prs = {{1.0, 0.01}}; EXPECT_NEAR(ExpectedAP(prs), AveragePrecision().FromPRCurve(prs), 1e-6); prs = {{1.0, 0.2}, {1.0, 0.4}, {0.67, 0.4}, {0.5, 0.4}, {0.4, 0.4}, {0.5, 0.6}, {0.57, 0.8}, {0.5, 0.8}, {0.44, 0.8}, {0.5, 1.0}}; EXPECT_NEAR(ExpectedAP(prs), AveragePrecision().FromPRCurve(prs), 1e-6); } TEST(ImageMetricsTest, APRandom) { std::vector<PR> prs; for (int i = 0; i < 5000; ++i) { float p = GenerateRandomFraction(); float r = GenerateRandomFraction(); prs.push_back({p, r}); } const float expected = ExpectedAP(prs); std::sort(std::begin(prs), std::end(prs), [](const PR& a, const PR& b) { return a.r < b.r; }); const float actual = AveragePrecision().FromPRCurve(prs); EXPECT_NEAR(expected, actual, 1e-5); } TEST(ImageMetricsTest, BBoxAPBasic) { std::vector<Detection> gt; gt.push_back(Detection({false, 100, 1, {{0, 1}, {0, 1}}})); gt.push_back(Detection({false, 200, 1, {{1, 2}, {1, 2}}})); std::vector<Detection> pd; pd.push_back(Detection({false, 100, 0.8, {{0.1, 1.1}, {0.1, 1.1}}})); pd.push_back(Detection({false, 200, 0.8, {{0.9, 1.9}, {0.9, 1.9}}})); EXPECT_NEAR(1.0, AveragePrecision().FromBoxes(gt, pd), 1e-6); AveragePrecision::Options opts; opts.iou_threshold = 0.85; EXPECT_NEAR(0.0, AveragePrecision(opts).FromBoxes(gt, pd), 1e-6); } TEST(ImageMetricsTest, Box2DOverlap) { Box2D a({{0, 1}, {0, 1}}); Box2D b({{0.5, 2.5}, {0.5, 2.5}}); EXPECT_NEAR(0.25, a.Overlap(b), 1e-6); EXPECT_NEAR(0.0625, b.Overlap(a), 1e-6); } TEST(ImageMetricsTest, BBoxAPwithIgnoredGroundTruth) { std::vector<Detection> gt; std::vector<Detection> pd; gt.push_back(Detection({false, 100, 1, {{1, 2}, {1, 2}}, kIgnoreOneMatch})); pd.push_back(Detection({false, 100, 0.8, {{0.1, 1.1}, {0.1, 1.1}}})); EXPECT_TRUE(std::isnan(AveragePrecision().FromBoxes(gt, pd))); gt.push_back({false, 100, 1, {{0, 1}, {0, 1}}}); EXPECT_NEAR(1.0, AveragePrecision().FromBoxes(gt, pd), 1e-6); pd.push_back({false, 100, 0.9, {{0.9, 1.9}, {0.9, 1.9}}}); EXPECT_NEAR(1.0, AveragePrecision().FromBoxes(gt, pd), 1e-6); pd.push_back({false, 100, 0.95, {{0.9, 1.9}, {0.9, 1.9}}}); EXPECT_NEAR(0.5, AveragePrecision().FromBoxes(gt, pd), 1e-6); gt[0].ignore = kIgnoreAllMatches; EXPECT_NEAR(1.0, AveragePrecision().FromBoxes(gt, pd), 1e-6); } TEST(ImageMetricsTest, BBoxAPRandom) { auto rand = [](int64_t id) { auto xmin = GenerateRandomFraction(); auto xmax = xmin + GenerateRandomFraction(); auto ymin = GenerateRandomFraction(); auto ymax = ymin + GenerateRandomFraction(); return Detection( {false, id, GenerateRandomFraction(), {{xmin, xmax}, {ymin, ymax}}}); }; std::vector<Detection> gt; for (int i = 0; i < 100; ++i) { gt.push_back(rand(i % 10)); } std::vector<Detection> pd = gt; for (int i = 0; i < 10000; ++i) { pd.push_back(rand(i % 10)); } std::vector<PR> pr; AveragePrecision().FromBoxes(gt, pd, &pr); EXPECT_EQ(101, pr.size()); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/evaluation/stages/utils/image_metrics.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/evaluation/stages/utils/image_metrics_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
8e5fb475-639c-46df-a5e4-ac0be1209b8a
cpp
tensorflow/tensorflow
sign_custom
tensorflow/lite/kernels/sign_custom.cc
tensorflow/lite/kernels/sign_custom_test.cc
#include <cmath> #include "tensorflow/lite/core/c/common.h" #include "tensorflow/lite/kernels/custom_ops_register.h" #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "tensorflow/lite/kernels/kernel_util.h" namespace tflite { namespace ops { namespace custom { namespace sign { TfLiteStatus PointwiseUnaryOpPrepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, tflite::NumInputs(node), 1); const TfLiteTensor* input = tflite::GetInput(context, node, 0); TfLiteTensor* output = tflite::GetOutput(context, node, 0); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); TfLiteIntArray* output_shape = TfLiteIntArrayCopy(input->dims); return context->ResizeTensor(context, output, output_shape); } template <typename Op, typename T> TfLiteStatus PointwiseUnaryOpDoEval( TfLiteContext* context, const TfLiteTensor* input, TfLiteTensor* output) { const T* data = tflite::GetTensorData<T>(input); T* data_output = tflite::GetTensorData<T>(output); const int64_t num_elements = NumElements(input); for (int64_t i = 0; i < num_elements; ++i) { data_output[i] = Op::template Eval<T>(data[i]); } return TfLiteStatus::kTfLiteOk; } template <typename Op> TfLiteStatus PointwiseUnaryOpEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = tflite::GetInput(context, node, 0); TfLiteTensor* output = tflite::GetOutput(context, node, 0); switch (output->type) { case kTfLiteFloat32: TF_LITE_ENSURE_OK( context, (PointwiseUnaryOpDoEval<Op, float>(context, input, output))); break; case kTfLiteFloat64: TF_LITE_ENSURE_OK( context, (PointwiseUnaryOpDoEval<Op, double>(context, input, output))); break; default: { TF_LITE_KERNEL_LOG(context, "Unsupported datatype for sign output: %s", TfLiteTypeGetName(output->type)); return TfLiteStatus::kTfLiteError; } } return TfLiteStatus::kTfLiteOk; } struct Sign { template <typename T> static T Eval(T x) { if (x > 0) { return 1; } if (x < 0) { return -1; } return 0; } }; } TfLiteRegistration* Register_SIGN() { static TfLiteRegistration r = {nullptr, nullptr, sign::PointwiseUnaryOpPrepare, sign::PointwiseUnaryOpEval<sign::Sign>}; return &r; } } } }
#include <cmath> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "tensorflow/lite/kernels/custom_ops_register.h" #include "tensorflow/lite/kernels/test_util.h" #include "tensorflow/lite/schema/schema_generated.h" #include "tensorflow/lite/testing/util.h" namespace tflite { namespace { template <typename T> tflite::TensorType GetTTEnum(); template <> tflite::TensorType GetTTEnum<float>() { return tflite::TensorType_FLOAT32; } template <> tflite::TensorType GetTTEnum<double>() { return tflite::TensorType_FLOAT64; } class SignModel : public tflite::SingleOpModel { public: SignModel(tflite::TensorData x, tflite::TensorData output) { x_ = AddInput(x); output_ = AddOutput(output); SetCustomOp("Sign", {}, ops::custom::Register_SIGN); BuildInterpreter({GetShape(x_)}); } int x_; int output_; template <typename T> std::vector<T> GetOutput(const std::vector<T>& x) { PopulateTensor<T>(x_, x); Invoke(); return ExtractVector<T>(output_); } }; template <typename Float> class SignCustomTest : public ::testing::Test { public: using FloatType = Float; }; using TestTypes = ::testing::Types<float, double>; TYPED_TEST_SUITE(SignCustomTest, TestTypes); TYPED_TEST(SignCustomTest, TestScalar) { using Float = typename TestFixture::FloatType; tflite::TensorData x = {GetTTEnum<Float>(), {}}; tflite::TensorData output = {GetTTEnum<Float>(), {}}; SignModel m(x, output); auto got = m.GetOutput<Float>({0.0}); ASSERT_EQ(got.size(), 1); EXPECT_FLOAT_EQ(got[0], 0.0); ASSERT_FLOAT_EQ(m.GetOutput<Float>({5.0})[0], 1.0); ASSERT_FLOAT_EQ(m.GetOutput<Float>({-3.0})[0], -1.0); } TYPED_TEST(SignCustomTest, TestBatch) { using Float = typename TestFixture::FloatType; tflite::TensorData x = {GetTTEnum<Float>(), {4, 2, 1}}; tflite::TensorData output = {GetTTEnum<Float>(), {4, 2, 1}}; SignModel m(x, output); std::vector<Float> x_data = {0.8, -0.7, 0.6, -0.5, 0.4, -0.3, 0.2, 0.0}; auto got = m.GetOutput<Float>(x_data); EXPECT_EQ(got, std::vector<Float>( {1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, 0.0})); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/sign_custom.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/sign_custom_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
458adafa-04cc-4dde-85aa-2a8742b9d8ab
cpp
tensorflow/tensorflow
cost_measurement_registry
tensorflow/core/common_runtime/cost_measurement_registry.cc
tensorflow/core/common_runtime/cost_measurement_registry_test.cc
#include "tensorflow/core/common_runtime/cost_measurement_registry.h" #include <string> #include <utility> #include "absl/container/flat_hash_map.h" #include "absl/strings/string_view.h" #include "tensorflow/core/common_runtime/cost_measurement.h" #include "tensorflow/core/platform/logging.h" namespace tensorflow { namespace { using RegistrationMap = absl::flat_hash_map<std::string, CostMeasurementRegistry::Creator>; RegistrationMap* GetRegistrationMap() { static RegistrationMap* registered_cost_measurements = new RegistrationMap; return registered_cost_measurements; } } std::unique_ptr<CostMeasurement> CostMeasurementRegistry::CreateByNameOrNull( const std::string& name, const CostMeasurement::Context& context) { const auto it = GetRegistrationMap()->find(name); if (it == GetRegistrationMap()->end()) { LOG_FIRST_N(ERROR, 1) << "Cost type " << name << " is unregistered."; return nullptr; } return it->second(context); } void CostMeasurementRegistry::RegisterCostMeasurement(absl::string_view name, Creator creator) { const auto it = GetRegistrationMap()->find(name); CHECK(it == GetRegistrationMap()->end()) << "CostMeasurement " << name << " is registered twice."; GetRegistrationMap()->emplace(name, std::move(creator)); } }
#include "tensorflow/core/common_runtime/cost_measurement_registry.h" #include "absl/strings/str_cat.h" #include "absl/strings/string_view.h" #include "absl/time/time.h" #include "tensorflow/core/common_runtime/cost_measurement.h" #include "tensorflow/core/platform/test.h" namespace tensorflow { namespace { constexpr char kTestCostName[] = "test"; class TestCostMeasurement : public CostMeasurement { public: using CostMeasurement::CostMeasurement; absl::Duration GetTotalCost() override { return absl::ZeroDuration(); } absl::string_view GetCostType() const override { return kTestCostName; } }; REGISTER_COST_MEASUREMENT(kTestCostName, TestCostMeasurement); TEST(CostMeasurementRegistryTest, Basic) { const CostMeasurement::Context context; std::unique_ptr<const CostMeasurement> test_cost_measurement = CostMeasurementRegistry::CreateByNameOrNull("unregistered", context); EXPECT_EQ(test_cost_measurement, nullptr); test_cost_measurement = CostMeasurementRegistry::CreateByNameOrNull(kTestCostName, context); EXPECT_NE(test_cost_measurement, nullptr); } TEST(CostMeasurementRegistryDeathTest, CrashWhenRegisterTwice) { const auto creator = [](const CostMeasurement::Context& context) { return std::make_unique<TestCostMeasurement>(context); }; EXPECT_DEATH( CostMeasurementRegistry::RegisterCostMeasurement(kTestCostName, creator), absl::StrCat("CostMeasurement ", kTestCostName, " is registered twice.")); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/cost_measurement_registry.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/cost_measurement_registry_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
1636930c-ab5c-49b8-9201-f3ad61c62b54
cpp
tensorflow/tensorflow
ordered_set
third_party/xla/xla/service/graphcycles/ordered_set.h
third_party/xla/xla/service/graphcycles/ordered_set_test.cc
#ifndef XLA_SERVICE_GRAPHCYCLES_ORDERED_SET_H_ #define XLA_SERVICE_GRAPHCYCLES_ORDERED_SET_H_ #include <vector> #include "absl/container/flat_hash_map.h" #include "absl/types/span.h" #include "tsl/platform/logging.h" namespace xla { template <typename T> class OrderedSet { public: bool Insert(T value) { bool new_insertion = value_to_index_.insert({value, value_sequence_.size()}).second; if (new_insertion) { value_sequence_.push_back(value); } return new_insertion; } void Erase(T value) { auto it = value_to_index_.find(value); DCHECK(it != value_to_index_.end()); value_to_index_[value_sequence_.back()] = it->second; std::swap(value_sequence_[it->second], value_sequence_.back()); value_sequence_.pop_back(); value_to_index_.erase(it); } void Reserve(size_t new_size) { value_to_index_.reserve(new_size); value_sequence_.reserve(new_size); } void Clear() { value_to_index_.clear(); value_sequence_.clear(); } bool Contains(T value) const { return value_to_index_.contains(value); } size_t Size() const { return value_sequence_.size(); } absl::Span<T const> GetSequence() const { return value_sequence_; } private: std::vector<T> value_sequence_; absl::flat_hash_map<T, int> value_to_index_; }; } #endif
#include "xla/service/graphcycles/ordered_set.h" #include "tsl/platform/test.h" namespace xla { namespace { TEST(OrderedSetTest, Insert) { OrderedSet<int> ordered_set; EXPECT_TRUE(ordered_set.Insert(90)); EXPECT_TRUE(ordered_set.Insert(100)); EXPECT_TRUE(ordered_set.Insert(80)); EXPECT_FALSE(ordered_set.Insert(100)); EXPECT_EQ(ordered_set.Size(), 3); EXPECT_TRUE(ordered_set.Contains(90)); EXPECT_TRUE(ordered_set.Contains(100)); EXPECT_TRUE(ordered_set.Contains(80)); EXPECT_FALSE(ordered_set.Contains(40)); std::array<int, 3> expected_sequence = {90, 100, 80}; EXPECT_EQ(ordered_set.GetSequence(), expected_sequence); } TEST(OrderedSetTest, Erase) { OrderedSet<int> ordered_set; EXPECT_TRUE(ordered_set.Insert(90)); EXPECT_TRUE(ordered_set.Insert(100)); EXPECT_TRUE(ordered_set.Insert(80)); ordered_set.Erase(100); EXPECT_EQ(ordered_set.Size(), 2); EXPECT_TRUE(ordered_set.Contains(90)); EXPECT_FALSE(ordered_set.Contains(100)); EXPECT_TRUE(ordered_set.Contains(80)); std::array<int, 2> expected_sequence_0 = {90, 80}; EXPECT_EQ(ordered_set.GetSequence(), expected_sequence_0); ordered_set.Erase(80); EXPECT_EQ(ordered_set.Size(), 1); EXPECT_TRUE(ordered_set.Contains(90)); EXPECT_FALSE(ordered_set.Contains(100)); EXPECT_FALSE(ordered_set.Contains(80)); std::array<int, 1> expected_sequence_1 = {90}; EXPECT_EQ(ordered_set.GetSequence(), expected_sequence_1); ordered_set.Erase(90); EXPECT_EQ(ordered_set.Size(), 0); EXPECT_FALSE(ordered_set.Contains(90)); EXPECT_FALSE(ordered_set.Contains(100)); EXPECT_FALSE(ordered_set.Contains(80)); std::array<int, 0> expected_sequence_2 = {}; EXPECT_EQ(ordered_set.GetSequence(), expected_sequence_2); } TEST(OrderedSetTest, Clear) { OrderedSet<int> ordered_set; EXPECT_TRUE(ordered_set.Insert(90)); EXPECT_TRUE(ordered_set.Insert(100)); EXPECT_TRUE(ordered_set.Insert(80)); ordered_set.Clear(); EXPECT_EQ(ordered_set.Size(), 0); EXPECT_FALSE(ordered_set.Contains(90)); EXPECT_FALSE(ordered_set.Contains(100)); EXPECT_FALSE(ordered_set.Contains(80)); std::array<int, 0> expected_sequence = {}; EXPECT_EQ(ordered_set.GetSequence(), expected_sequence); } TEST(OrderedSetTest, LargeInsertions) { const int kSize = 50 * 9000; OrderedSet<int> ordered_set; for (int i = 0; i < kSize; i++) { EXPECT_TRUE(ordered_set.Insert(i + 500)); } for (int i = 0; i < kSize; i++) { EXPECT_EQ(ordered_set.GetSequence()[i], i + 500); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/graphcycles/ordered_set.h
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/graphcycles/ordered_set_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
4249b828-0428-4326-a480-4dd2c2864c5d
cpp
google/quiche
cached_blind_sign_auth
quiche/blind_sign_auth/cached_blind_sign_auth.cc
quiche/blind_sign_auth/cached_blind_sign_auth_test.cc
#include "quiche/blind_sign_auth/cached_blind_sign_auth.h" #include <optional> #include <string> #include <utility> #include <vector> #include "absl/functional/bind_front.h" #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/str_format.h" #include "absl/time/clock.h" #include "absl/time/time.h" #include "absl/types/span.h" #include "quiche/blind_sign_auth/blind_sign_auth_interface.h" #include "quiche/common/platform/api/quiche_logging.h" #include "quiche/common/platform/api/quiche_mutex.h" namespace quiche { constexpr absl::Duration kFreshnessConstant = absl::Minutes(5); void CachedBlindSignAuth::GetTokens(std::optional<std::string> oauth_token, int num_tokens, ProxyLayer proxy_layer, BlindSignAuthServiceType service_type, SignedTokenCallback callback) { if (num_tokens > max_tokens_per_request_) { std::move(callback)(absl::InvalidArgumentError( absl::StrFormat("Number of tokens requested exceeds maximum: %d", kBlindSignAuthRequestMaxTokens))); return; } if (num_tokens < 0) { std::move(callback)(absl::InvalidArgumentError(absl::StrFormat( "Negative number of tokens requested: %d", num_tokens))); return; } std::vector<BlindSignToken> output_tokens; { QuicheWriterMutexLock lock(&mutex_); RemoveExpiredTokens(); if (static_cast<size_t>(num_tokens) <= cached_tokens_.size()) { output_tokens = CreateOutputTokens(num_tokens); } } if (!output_tokens.empty() || num_tokens == 0) { std::move(callback)(absl::MakeSpan(output_tokens)); return; } SignedTokenCallback caching_callback = absl::bind_front(&CachedBlindSignAuth::HandleGetTokensResponse, this, std::move(callback), num_tokens); blind_sign_auth_->GetTokens(oauth_token, kBlindSignAuthRequestMaxTokens, proxy_layer, service_type, std::move(caching_callback)); } void CachedBlindSignAuth::HandleGetTokensResponse( SignedTokenCallback callback, int num_tokens, absl::StatusOr<absl::Span<BlindSignToken>> tokens) { if (!tokens.ok()) { QUICHE_LOG(WARNING) << "BlindSignAuth::GetTokens failed: " << tokens.status(); std::move(callback)(tokens); return; } if (tokens->size() < static_cast<size_t>(num_tokens) || tokens->size() > kBlindSignAuthRequestMaxTokens) { QUICHE_LOG(WARNING) << "Expected " << num_tokens << " tokens, got " << tokens->size(); } std::vector<BlindSignToken> output_tokens; size_t cache_size; { QuicheWriterMutexLock lock(&mutex_); for (const BlindSignToken& token : *tokens) { cached_tokens_.push_back(token); } RemoveExpiredTokens(); cache_size = cached_tokens_.size(); if (cache_size >= static_cast<size_t>(num_tokens)) { output_tokens = CreateOutputTokens(num_tokens); } } if (!output_tokens.empty()) { std::move(callback)(absl::MakeSpan(output_tokens)); return; } std::move(callback)(absl::ResourceExhaustedError(absl::StrFormat( "Requested %d tokens, cache only has %d after GetTokensRequest", num_tokens, cache_size))); } std::vector<BlindSignToken> CachedBlindSignAuth::CreateOutputTokens( int num_tokens) { std::vector<BlindSignToken> output_tokens; if (cached_tokens_.size() < static_cast<size_t>(num_tokens)) { QUICHE_LOG(FATAL) << "Check failed, not enough tokens in cache: " << cached_tokens_.size() << " < " << num_tokens; } for (int i = 0; i < num_tokens; i++) { output_tokens.push_back(std::move(cached_tokens_.front())); cached_tokens_.pop_front(); } return output_tokens; } void CachedBlindSignAuth::RemoveExpiredTokens() { size_t original_size = cached_tokens_.size(); absl::Time now_plus_five_mins = absl::Now() + kFreshnessConstant; for (size_t i = 0; i < original_size; i++) { BlindSignToken token = std::move(cached_tokens_.front()); cached_tokens_.pop_front(); if (token.expiration > now_plus_five_mins) { cached_tokens_.push_back(std::move(token)); } } } }
#include "quiche/blind_sign_auth/cached_blind_sign_auth.h" #include <memory> #include <optional> #include <string> #include <utility> #include <vector> #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_format.h" #include "absl/time/clock.h" #include "absl/time/time.h" #include "absl/types/span.h" #include "quiche/blind_sign_auth/blind_sign_auth_interface.h" #include "quiche/blind_sign_auth/test_tools/mock_blind_sign_auth_interface.h" #include "quiche/common/platform/api/quiche_mutex.h" #include "quiche/common/platform/api/quiche_test.h" #include "quiche/common/test_tools/quiche_test_utils.h" namespace quiche { namespace test { namespace { using ::testing::_; using ::testing::InvokeArgument; using ::testing::Unused; class CachedBlindSignAuthTest : public QuicheTest { protected: void SetUp() override { cached_blind_sign_auth_ = std::make_unique<CachedBlindSignAuth>(&mock_blind_sign_auth_interface_); } void TearDown() override { fake_tokens_.clear(); cached_blind_sign_auth_.reset(); } public: std::vector<BlindSignToken> MakeFakeTokens(int num_tokens) { std::vector<BlindSignToken> fake_tokens; for (int i = 0; i < kBlindSignAuthRequestMaxTokens; i++) { fake_tokens.push_back(BlindSignToken{absl::StrCat("token:", i), absl::Now() + absl::Hours(1)}); } return fake_tokens; } std::vector<BlindSignToken> MakeExpiredTokens(int num_tokens) { std::vector<BlindSignToken> fake_tokens; for (int i = 0; i < kBlindSignAuthRequestMaxTokens; i++) { fake_tokens.push_back(BlindSignToken{absl::StrCat("token:", i), absl::Now() - absl::Hours(1)}); } return fake_tokens; } MockBlindSignAuthInterface mock_blind_sign_auth_interface_; std::unique_ptr<CachedBlindSignAuth> cached_blind_sign_auth_; std::optional<std::string> oauth_token_ = "oauth_token"; std::vector<BlindSignToken> fake_tokens_; }; TEST_F(CachedBlindSignAuthTest, TestGetTokensOneCallSuccessful) { EXPECT_CALL(mock_blind_sign_auth_interface_, GetTokens(oauth_token_, kBlindSignAuthRequestMaxTokens, _, _, _)) .Times(1) .WillOnce([this](Unused, int num_tokens, Unused, Unused, SignedTokenCallback callback) { fake_tokens_ = MakeFakeTokens(num_tokens); std::move(callback)(absl::MakeSpan(fake_tokens_)); }); int num_tokens = 5; QuicheNotification done; SignedTokenCallback callback = [num_tokens, &done](absl::StatusOr<absl::Span<BlindSignToken>> tokens) { QUICHE_EXPECT_OK(tokens); EXPECT_EQ(num_tokens, tokens->size()); for (int i = 0; i < num_tokens; i++) { EXPECT_EQ(tokens->at(i).token, absl::StrCat("token:", i)); } done.Notify(); }; cached_blind_sign_auth_->GetTokens( oauth_token_, num_tokens, ProxyLayer::kProxyA, BlindSignAuthServiceType::kChromeIpBlinding, std::move(callback)); done.WaitForNotification(); } TEST_F(CachedBlindSignAuthTest, TestGetTokensMultipleRemoteCallsSuccessful) { EXPECT_CALL(mock_blind_sign_auth_interface_, GetTokens(oauth_token_, kBlindSignAuthRequestMaxTokens, _, _, _)) .Times(2) .WillRepeatedly([this](Unused, int num_tokens, Unused, Unused, SignedTokenCallback callback) { fake_tokens_ = MakeFakeTokens(num_tokens); std::move(callback)(absl::MakeSpan(fake_tokens_)); }); int num_tokens = kBlindSignAuthRequestMaxTokens - 1; QuicheNotification first; SignedTokenCallback first_callback = [num_tokens, &first](absl::StatusOr<absl::Span<BlindSignToken>> tokens) { QUICHE_EXPECT_OK(tokens); EXPECT_EQ(num_tokens, tokens->size()); for (int i = 0; i < num_tokens; i++) { EXPECT_EQ(tokens->at(i).token, absl::StrCat("token:", i)); } first.Notify(); }; cached_blind_sign_auth_->GetTokens( oauth_token_, num_tokens, ProxyLayer::kProxyA, BlindSignAuthServiceType::kChromeIpBlinding, std::move(first_callback)); first.WaitForNotification(); QuicheNotification second; SignedTokenCallback second_callback = [num_tokens, &second](absl::StatusOr<absl::Span<BlindSignToken>> tokens) { QUICHE_EXPECT_OK(tokens); EXPECT_EQ(num_tokens, tokens->size()); EXPECT_EQ(tokens->at(0).token, absl::StrCat("token:", kBlindSignAuthRequestMaxTokens - 1)); for (int i = 1; i < num_tokens; i++) { EXPECT_EQ(tokens->at(i).token, absl::StrCat("token:", i - 1)); } second.Notify(); }; cached_blind_sign_auth_->GetTokens( oauth_token_, num_tokens, ProxyLayer::kProxyA, BlindSignAuthServiceType::kChromeIpBlinding, std::move(second_callback)); second.WaitForNotification(); } TEST_F(CachedBlindSignAuthTest, TestGetTokensSecondRequestFilledFromCache) { EXPECT_CALL(mock_blind_sign_auth_interface_, GetTokens(oauth_token_, kBlindSignAuthRequestMaxTokens, _, _, _)) .Times(1) .WillOnce([this](Unused, int num_tokens, Unused, Unused, SignedTokenCallback callback) { fake_tokens_ = MakeFakeTokens(num_tokens); std::move(callback)(absl::MakeSpan(fake_tokens_)); }); int num_tokens = kBlindSignAuthRequestMaxTokens / 2; QuicheNotification first; SignedTokenCallback first_callback = [num_tokens, &first](absl::StatusOr<absl::Span<BlindSignToken>> tokens) { QUICHE_EXPECT_OK(tokens); EXPECT_EQ(num_tokens, tokens->size()); for (int i = 0; i < num_tokens; i++) { EXPECT_EQ(tokens->at(i).token, absl::StrCat("token:", i)); } first.Notify(); }; cached_blind_sign_auth_->GetTokens( oauth_token_, num_tokens, ProxyLayer::kProxyA, BlindSignAuthServiceType::kChromeIpBlinding, std::move(first_callback)); first.WaitForNotification(); QuicheNotification second; SignedTokenCallback second_callback = [num_tokens, &second](absl::StatusOr<absl::Span<BlindSignToken>> tokens) { QUICHE_EXPECT_OK(tokens); EXPECT_EQ(num_tokens, tokens->size()); for (int i = 0; i < num_tokens; i++) { EXPECT_EQ(tokens->at(i).token, absl::StrCat("token:", i + num_tokens)); } second.Notify(); }; cached_blind_sign_auth_->GetTokens( oauth_token_, num_tokens, ProxyLayer::kProxyA, BlindSignAuthServiceType::kChromeIpBlinding, std::move(second_callback)); second.WaitForNotification(); } TEST_F(CachedBlindSignAuthTest, TestGetTokensThirdRequestRefillsCache) { EXPECT_CALL(mock_blind_sign_auth_interface_, GetTokens(oauth_token_, kBlindSignAuthRequestMaxTokens, _, _, _)) .Times(2) .WillRepeatedly([this](Unused, int num_tokens, Unused, Unused, SignedTokenCallback callback) { fake_tokens_ = MakeFakeTokens(num_tokens); std::move(callback)(absl::MakeSpan(fake_tokens_)); }); int num_tokens = kBlindSignAuthRequestMaxTokens / 2; QuicheNotification first; SignedTokenCallback first_callback = [num_tokens, &first](absl::StatusOr<absl::Span<BlindSignToken>> tokens) { QUICHE_EXPECT_OK(tokens); EXPECT_EQ(num_tokens, tokens->size()); for (int i = 0; i < num_tokens; i++) { EXPECT_EQ(tokens->at(i).token, absl::StrCat("token:", i)); } first.Notify(); }; cached_blind_sign_auth_->GetTokens( oauth_token_, num_tokens, ProxyLayer::kProxyA, BlindSignAuthServiceType::kChromeIpBlinding, std::move(first_callback)); first.WaitForNotification(); QuicheNotification second; SignedTokenCallback second_callback = [num_tokens, &second](absl::StatusOr<absl::Span<BlindSignToken>> tokens) { QUICHE_EXPECT_OK(tokens); EXPECT_EQ(num_tokens, tokens->size()); for (int i = 0; i < num_tokens; i++) { EXPECT_EQ(tokens->at(i).token, absl::StrCat("token:", i + num_tokens)); } second.Notify(); }; cached_blind_sign_auth_->GetTokens( oauth_token_, num_tokens, ProxyLayer::kProxyA, BlindSignAuthServiceType::kChromeIpBlinding, std::move(second_callback)); second.WaitForNotification(); QuicheNotification third; int third_request_tokens = 10; SignedTokenCallback third_callback = [third_request_tokens, &third](absl::StatusOr<absl::Span<BlindSignToken>> tokens) { QUICHE_EXPECT_OK(tokens); EXPECT_EQ(third_request_tokens, tokens->size()); for (int i = 0; i < third_request_tokens; i++) { EXPECT_EQ(tokens->at(i).token, absl::StrCat("token:", i)); } third.Notify(); }; cached_blind_sign_auth_->GetTokens( oauth_token_, third_request_tokens, ProxyLayer::kProxyA, BlindSignAuthServiceType::kChromeIpBlinding, std::move(third_callback)); third.WaitForNotification(); } TEST_F(CachedBlindSignAuthTest, TestGetTokensRequestTooLarge) { EXPECT_CALL(mock_blind_sign_auth_interface_, GetTokens(oauth_token_, kBlindSignAuthRequestMaxTokens, _, _, _)) .Times(0); int num_tokens = kBlindSignAuthRequestMaxTokens + 1; SignedTokenCallback callback = [](absl::StatusOr<absl::Span<BlindSignToken>> tokens) { EXPECT_THAT(tokens.status().code(), absl::StatusCode::kInvalidArgument); EXPECT_THAT( tokens.status().message(), absl::StrFormat("Number of tokens requested exceeds maximum: %d", kBlindSignAuthRequestMaxTokens)); }; cached_blind_sign_auth_->GetTokens( oauth_token_, num_tokens, ProxyLayer::kProxyA, BlindSignAuthServiceType::kChromeIpBlinding, std::move(callback)); } TEST_F(CachedBlindSignAuthTest, TestGetTokensRequestNegative) { EXPECT_CALL(mock_blind_sign_auth_interface_, GetTokens(oauth_token_, kBlindSignAuthRequestMaxTokens, _, _, _)) .Times(0); int num_tokens = -1; SignedTokenCallback callback = [num_tokens](absl::StatusOr<absl::Span<BlindSignToken>> tokens) { EXPECT_THAT(tokens.status().code(), absl::StatusCode::kInvalidArgument); EXPECT_THAT(tokens.status().message(), absl::StrFormat("Negative number of tokens requested: %d", num_tokens)); }; cached_blind_sign_auth_->GetTokens( oauth_token_, num_tokens, ProxyLayer::kProxyA, BlindSignAuthServiceType::kChromeIpBlinding, std::move(callback)); } TEST_F(CachedBlindSignAuthTest, TestHandleGetTokensResponseErrorHandling) { EXPECT_CALL(mock_blind_sign_auth_interface_, GetTokens(oauth_token_, kBlindSignAuthRequestMaxTokens, _, _, _)) .Times(2) .WillOnce([](Unused, int num_tokens, Unused, Unused, SignedTokenCallback callback) { std::move(callback)(absl::InternalError("AuthAndSign failed")); }) .WillOnce([this](Unused, int num_tokens, Unused, Unused, SignedTokenCallback callback) { fake_tokens_ = MakeFakeTokens(num_tokens); fake_tokens_.pop_back(); std::move(callback)(absl::MakeSpan(fake_tokens_)); }); int num_tokens = kBlindSignAuthRequestMaxTokens; QuicheNotification first; SignedTokenCallback first_callback = [&first](absl::StatusOr<absl::Span<BlindSignToken>> tokens) { EXPECT_THAT(tokens.status().code(), absl::StatusCode::kInternal); EXPECT_THAT(tokens.status().message(), "AuthAndSign failed"); first.Notify(); }; cached_blind_sign_auth_->GetTokens( oauth_token_, num_tokens, ProxyLayer::kProxyA, BlindSignAuthServiceType::kChromeIpBlinding, std::move(first_callback)); first.WaitForNotification(); QuicheNotification second; SignedTokenCallback second_callback = [&second](absl::StatusOr<absl::Span<BlindSignToken>> tokens) { EXPECT_THAT(tokens.status().code(), absl::StatusCode::kResourceExhausted); second.Notify(); }; cached_blind_sign_auth_->GetTokens( oauth_token_, num_tokens, ProxyLayer::kProxyA, BlindSignAuthServiceType::kChromeIpBlinding, std::move(second_callback)); second.WaitForNotification(); } TEST_F(CachedBlindSignAuthTest, TestGetTokensZeroTokensRequested) { EXPECT_CALL(mock_blind_sign_auth_interface_, GetTokens(oauth_token_, kBlindSignAuthRequestMaxTokens, _, _, _)) .Times(0); int num_tokens = 0; SignedTokenCallback callback = [](absl::StatusOr<absl::Span<BlindSignToken>> tokens) { QUICHE_EXPECT_OK(tokens); EXPECT_EQ(tokens->size(), 0); }; cached_blind_sign_auth_->GetTokens( oauth_token_, num_tokens, ProxyLayer::kProxyA, BlindSignAuthServiceType::kChromeIpBlinding, std::move(callback)); } TEST_F(CachedBlindSignAuthTest, TestExpiredTokensArePruned) { EXPECT_CALL(mock_blind_sign_auth_interface_, GetTokens(oauth_token_, kBlindSignAuthRequestMaxTokens, _, _, _)) .Times(1) .WillOnce([this](Unused, int num_tokens, Unused, Unused, SignedTokenCallback callback) { fake_tokens_ = MakeExpiredTokens(num_tokens); std::move(callback)(absl::MakeSpan(fake_tokens_)); }); int num_tokens = kBlindSignAuthRequestMaxTokens; QuicheNotification first; SignedTokenCallback first_callback = [&first](absl::StatusOr<absl::Span<BlindSignToken>> tokens) { EXPECT_THAT(tokens.status().code(), absl::StatusCode::kResourceExhausted); first.Notify(); }; cached_blind_sign_auth_->GetTokens( oauth_token_, num_tokens, ProxyLayer::kProxyA, BlindSignAuthServiceType::kChromeIpBlinding, std::move(first_callback)); first.WaitForNotification(); } TEST_F(CachedBlindSignAuthTest, TestClearCacheRemovesTokens) { EXPECT_CALL(mock_blind_sign_auth_interface_, GetTokens(oauth_token_, kBlindSignAuthRequestMaxTokens, _, _, _)) .Times(2) .WillRepeatedly([this](Unused, int num_tokens, Unused, Unused, SignedTokenCallback callback) { fake_tokens_ = MakeExpiredTokens(num_tokens); std::move(callback)(absl::MakeSpan(fake_tokens_)); }); int num_tokens = kBlindSignAuthRequestMaxTokens / 2; QuicheNotification first; SignedTokenCallback first_callback = [&first](absl::StatusOr<absl::Span<BlindSignToken>> tokens) { EXPECT_THAT(tokens.status().code(), absl::StatusCode::kResourceExhausted); first.Notify(); }; cached_blind_sign_auth_->GetTokens( oauth_token_, num_tokens, ProxyLayer::kProxyA, BlindSignAuthServiceType::kChromeIpBlinding, std::move(first_callback)); first.WaitForNotification(); cached_blind_sign_auth_->ClearCache(); QuicheNotification second; SignedTokenCallback second_callback = [&second](absl::StatusOr<absl::Span<BlindSignToken>> tokens) { EXPECT_THAT(tokens.status().code(), absl::StatusCode::kResourceExhausted); second.Notify(); }; cached_blind_sign_auth_->GetTokens( oauth_token_, num_tokens, ProxyLayer::kProxyA, BlindSignAuthServiceType::kChromeIpBlinding, std::move(second_callback)); second.WaitForNotification(); } } } }
https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/blind_sign_auth/cached_blind_sign_auth.cc
https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/blind_sign_auth/cached_blind_sign_auth_test.cc
6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6
847a8838-d796-4ef7-b972-f55b4b1524ad
cpp
google/cel-cpp
parsed_json_value
common/values/parsed_json_value.cc
common/values/parsed_json_value_test.cc
#include "common/values/parsed_json_value.h" #include <string> #include <utility> #include "absl/base/attributes.h" #include "absl/functional/overload.h" #include "absl/status/status.h" #include "absl/strings/cord.h" #include "absl/strings/str_cat.h" #include "absl/strings/string_view.h" #include "absl/types/variant.h" #include "common/allocator.h" #include "common/memory.h" #include "common/value.h" #include "internal/well_known_types.h" #include "google/protobuf/message.h" namespace cel::common_internal { namespace { using ::cel::well_known_types::AsVariant; using ::cel::well_known_types::GetValueReflectionOrDie; } Value ParsedJsonValue(Allocator<> allocator, Borrowed<const google::protobuf::Message> message) { const auto reflection = GetValueReflectionOrDie(message->GetDescriptor()); const auto kind_case = reflection.GetKindCase(*message); switch (kind_case) { case google::protobuf::Value::KIND_NOT_SET: ABSL_FALLTHROUGH_INTENDED; case google::protobuf::Value::kNullValue: return NullValue(); case google::protobuf::Value::kBoolValue: return BoolValue(reflection.GetBoolValue(*message)); case google::protobuf::Value::kNumberValue: return DoubleValue(reflection.GetNumberValue(*message)); case google::protobuf::Value::kStringValue: { std::string scratch; return absl::visit( absl::Overload( [&](absl::string_view string) -> StringValue { if (string.empty()) { return StringValue(); } if (string.data() == scratch.data() && string.size() == scratch.size()) { return StringValue(allocator, std::move(scratch)); } else { return StringValue(message, string); } }, [&](absl::Cord&& cord) -> StringValue { if (cord.empty()) { return StringValue(); } return StringValue(std::move(cord)); }), AsVariant(reflection.GetStringValue(*message, scratch))); } case google::protobuf::Value::kListValue: return ParsedJsonListValue(Owned<const google::protobuf::Message>( Owner(message), &reflection.GetListValue(*message))); case google::protobuf::Value::kStructValue: return ParsedJsonMapValue(Owned<const google::protobuf::Message>( Owner(message), &reflection.GetStructValue(*message))); default: return ErrorValue(absl::InvalidArgumentError( absl::StrCat("unexpected value kind case: ", kind_case))); } } }
#include "common/values/parsed_json_value.h" #include "google/protobuf/struct.pb.h" #include "absl/base/nullability.h" #include "absl/strings/string_view.h" #include "absl/types/optional.h" #include "common/allocator.h" #include "common/memory.h" #include "common/type_reflector.h" #include "common/value.h" #include "common/value_manager.h" #include "common/value_testing.h" #include "internal/parse_text_proto.h" #include "internal/testing.h" #include "internal/testing_descriptor_pool.h" #include "internal/testing_message_factory.h" #include "proto/test/v1/proto3/test_all_types.pb.h" #include "google/protobuf/arena.h" #include "google/protobuf/descriptor.h" #include "google/protobuf/message.h" namespace cel::common_internal { namespace { using ::cel::internal::GetTestingDescriptorPool; using ::cel::internal::GetTestingMessageFactory; using ::cel::test::BoolValueIs; using ::cel::test::DoubleValueIs; using ::cel::test::IsNullValue; using ::cel::test::ListValueElements; using ::cel::test::ListValueIs; using ::cel::test::MapValueElements; using ::cel::test::MapValueIs; using ::cel::test::StringValueIs; using ::testing::ElementsAre; using ::testing::Pair; using ::testing::PrintToStringParamName; using ::testing::TestWithParam; using ::testing::UnorderedElementsAre; using TestAllTypesProto3 = ::google::api::expr::test::v1::proto3::TestAllTypes; class ParsedJsonValueTest : public TestWithParam<AllocatorKind> { public: void SetUp() override { switch (GetParam()) { case AllocatorKind::kArena: arena_.emplace(); value_manager_ = NewThreadCompatibleValueManager( MemoryManager::Pooling(arena()), NewThreadCompatibleTypeReflector(MemoryManager::Pooling(arena()))); break; case AllocatorKind::kNewDelete: value_manager_ = NewThreadCompatibleValueManager( MemoryManager::ReferenceCounting(), NewThreadCompatibleTypeReflector( MemoryManager::ReferenceCounting())); break; } } void TearDown() override { value_manager_.reset(); arena_.reset(); } Allocator<> allocator() { return arena_ ? ArenaAllocator(&*arena_) : NewDeleteAllocator(); } absl::Nullable<google::protobuf::Arena*> arena() { return allocator().arena(); } absl::Nonnull<const google::protobuf::DescriptorPool*> descriptor_pool() { return GetTestingDescriptorPool(); } absl::Nonnull<google::protobuf::MessageFactory*> message_factory() { return GetTestingMessageFactory(); } ValueManager& value_manager() { return **value_manager_; } template <typename T> auto GeneratedParseTextProto(absl::string_view text) { return ::cel::internal::GeneratedParseTextProto<T>( allocator(), text, descriptor_pool(), message_factory()); } template <typename T> auto DynamicParseTextProto(absl::string_view text) { return ::cel::internal::DynamicParseTextProto<T>( allocator(), text, descriptor_pool(), message_factory()); } private: absl::optional<google::protobuf::Arena> arena_; absl::optional<Shared<ValueManager>> value_manager_; }; TEST_P(ParsedJsonValueTest, Null_Dynamic) { EXPECT_THAT( ParsedJsonValue(arena(), DynamicParseTextProto<google::protobuf::Value>( R"pb(null_value: NULL_VALUE)pb")), IsNullValue()); EXPECT_THAT( ParsedJsonValue(arena(), DynamicParseTextProto<google::protobuf::Value>( R"pb(null_value: NULL_VALUE)pb")), IsNullValue()); } TEST_P(ParsedJsonValueTest, Bool_Dynamic) { EXPECT_THAT( ParsedJsonValue(arena(), DynamicParseTextProto<google::protobuf::Value>( R"pb(bool_value: true)pb")), BoolValueIs(true)); } TEST_P(ParsedJsonValueTest, Double_Dynamic) { EXPECT_THAT( ParsedJsonValue(arena(), DynamicParseTextProto<google::protobuf::Value>( R"pb(number_value: 1.0)pb")), DoubleValueIs(1.0)); } TEST_P(ParsedJsonValueTest, String_Dynamic) { EXPECT_THAT( ParsedJsonValue(arena(), DynamicParseTextProto<google::protobuf::Value>( R"pb(string_value: "foo")pb")), StringValueIs("foo")); } TEST_P(ParsedJsonValueTest, List_Dynamic) { EXPECT_THAT( ParsedJsonValue(arena(), DynamicParseTextProto<google::protobuf::Value>( R"pb(list_value: { values {} values { bool_value: true } })pb")), ListValueIs(ListValueElements( &value_manager(), ElementsAre(IsNullValue(), BoolValueIs(true))))); } TEST_P(ParsedJsonValueTest, Map_Dynamic) { EXPECT_THAT( ParsedJsonValue(arena(), DynamicParseTextProto<google::protobuf::Value>( R"pb(struct_value: { fields { key: "foo" value: {} } fields { key: "bar" value: { bool_value: true } } })pb")), MapValueIs(MapValueElements( &value_manager(), UnorderedElementsAre( Pair(StringValueIs("foo"), IsNullValue()), Pair(StringValueIs("bar"), BoolValueIs(true)))))); } INSTANTIATE_TEST_SUITE_P(ParsedJsonValueTest, ParsedJsonValueTest, ::testing::Values(AllocatorKind::kArena, AllocatorKind::kNewDelete), PrintToStringParamName()); } }
https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/values/parsed_json_value.cc
https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/values/parsed_json_value_test.cc
4552db5798fb0853b131b783d8875794334fae7f
6dd790b3-9fd6-42c8-9057-21d992daec43
cpp
tensorflow/tensorflow
derived_timeline
tensorflow/core/profiler/utils/derived_timeline.cc
tensorflow/core/profiler/utils/derived_timeline_test.cc
#include "tensorflow/core/profiler/utils/derived_timeline.h" #include <algorithm> #include <cstdint> #include <optional> #include <string> #include <utility> #include <vector> #include "absl/container/flat_hash_map.h" #include "absl/strings/match.h" #include "absl/strings/str_cat.h" #include "absl/strings/string_view.h" #include "xla/tsl/profiler/convert/xla_op_utils.h" #include "xla/tsl/profiler/utils/group_events.h" #include "xla/tsl/profiler/utils/tf_op_utils.h" #include "xla/tsl/profiler/utils/tf_xplane_visitor.h" #include "xla/tsl/profiler/utils/timespan.h" #include "xla/tsl/profiler/utils/tpu_xplane_utils.h" #include "xla/tsl/profiler/utils/trace_utils.h" #include "xla/tsl/profiler/utils/xplane_schema.h" #include "xla/tsl/util/stats_calculator.h" #include "tensorflow/core/lib/gtl/map_util.h" #include "tensorflow/core/profiler/protobuf/xplane.pb.h" #include "tensorflow/core/profiler/utils/gpu_event_stats.h" #include "tensorflow/core/profiler/utils/hlo_module_map.h" #include "tensorflow/core/profiler/utils/hlo_proto_map.h" #include "tensorflow/core/profiler/utils/host_offload_utils.h" #include "tensorflow/core/profiler/utils/math_utils.h" #include "tensorflow/core/profiler/utils/trace_utils.h" #include "tensorflow/core/profiler/utils/xplane_builder.h" #include "tensorflow/core/profiler/utils/xplane_schema.h" #include "tensorflow/core/profiler/utils/xplane_utils.h" #include "tensorflow/core/profiler/utils/xplane_visitor.h" #include "tsl/profiler/protobuf/xplane.pb.h" namespace tensorflow { namespace profiler { namespace { using tsl::profiler::FindMutableTensorCorePlanes; inline std::string HloModuleEventName(const GpuEventStats& stats) { return stats.program_id ? tsl::profiler::HloModuleNameWithProgramId( stats.hlo_module_name, *stats.program_id) : std::string(stats.hlo_module_name); } inline std::string HloOpEventPrefix(const GpuEventStats& stats) { return stats.program_id ? absl::StrCat(*stats.program_id, "/") : absl::StrCat(stats.hlo_module_name, "/"); } std::vector<XEventMetadata*> GetOrCreateHloOpEventsMetadata( XPlaneBuilder& xplane, const GpuEventStats& stats, const Symbol symbol) { DCHECK(stats.IsXlaOp()); std::vector<XEventMetadata*> hlo_op_events_metadata; hlo_op_events_metadata.reserve(stats.hlo_op_names.size()); std::string hlo_op_event_prefix = HloOpEventPrefix(stats); for (absl::string_view hlo_op_name : stats.hlo_op_names) { XEventMetadata* hlo_op_event_metadata = xplane.GetOrCreateEventMetadata( absl::StrCat(hlo_op_event_prefix, hlo_op_name)); if (hlo_op_event_metadata->display_name().empty()) { hlo_op_event_metadata->set_display_name(std::string(hlo_op_name)); } hlo_op_events_metadata.push_back(hlo_op_event_metadata); if (!symbol.hlo_text.empty()) { XStatsBuilder<XEventMetadata> event_stats(hlo_op_event_metadata, &xplane); event_stats.SetOrAddStatValue(*xplane.GetOrCreateStatMetadata("hlo_text"), symbol.hlo_text); } } return hlo_op_events_metadata; } } void ProcessTfOpEvent(absl::string_view tf_op_full_name, tsl::profiler::Timespan event_span, std::optional<int64_t> group_id, XPlaneBuilder& plane_builder, DerivedXLineBuilder& tf_name_scope_line_builder, DerivedXLineBuilder& tf_op_line_builder) { tsl::profiler::TfOp tf_op = tsl::profiler::ParseTfOpFullname(tf_op_full_name); tsl::profiler::Category category = tf_op.category; if (category == tsl::profiler::Category::kTensorFlow || category == tsl::profiler::Category::kJax) { tf_name_scope_line_builder.ExpandOrAddEvents( plane_builder.GetOrCreateEventsMetadata( tsl::profiler::ParseTfNameScopes(tf_op)), event_span, group_id); } XEventMetadata* tf_op_event_metadata = plane_builder.GetOrCreateEventMetadata(tf_op_full_name); if (tf_op_event_metadata->display_name().empty()) { tf_op_event_metadata->set_display_name(tsl::profiler::TfOpEventName(tf_op)); } tf_op_line_builder.ExpandOrAddEvent(*tf_op_event_metadata, event_span, group_id); } DerivedXEventBuilder::DerivedXEventBuilder(XEventBuilder event, std::optional<int64_t> group_id) : event_(std::move(event)), group_id_(group_id) {} bool DerivedXEventBuilder::ShouldExpand(const XEventMetadata& event_metadata, std::optional<int64_t> group_id) const { return event_.MetadataId() == event_metadata.id() && group_id_ == group_id; } void DerivedXEventBuilder::Expand(tsl::profiler::Timespan event_span) { tsl::profiler::Timespan timespan = event_.GetTimespan(); DCHECK_LE(timespan.begin_ps(), event_span.begin_ps()); timespan.ExpandToInclude(event_span); event_.SetTimespan(timespan); } DerivedXLineBuilder::DerivedXLineBuilder( XPlaneBuilder* plane, int64_t line_id, absl::string_view name, int64_t timestamp_ns, std::vector<DerivedXLineBuilder*> dependent_lines) : group_id_stat_metadata_( plane->GetOrCreateStatMetadata(GetStatTypeStr(StatType::kGroupId))), correlation_id_metadata_(plane->GetOrCreateStatMetadata( GetStatTypeStr(StatType::kCorrelationId))), cuda_graph_id_metadata_(plane->GetOrCreateStatMetadata( GetStatTypeStr(StatType::kCudaGraphId))), line_(plane->GetOrCreateLine(line_id)), dependent_lines_(std::move(dependent_lines)) { line_.SetName(name); line_.SetTimestampNs(timestamp_ns); } void DerivedXLineBuilder::ExpandOrAddEvent(const XEventMetadata& event_metadata, tsl::profiler::Timespan event_span, std::optional<int64_t> group_id) { ExpandOrAddLevelEvent(event_metadata, event_span, group_id, 0); } void DerivedXLineBuilder::ExpandOrAddEvents( const std::vector<XEventMetadata*>& events_metadata_per_level, tsl::profiler::Timespan event_span, std::optional<int64_t> group_id) { if (events_metadata_per_level.empty()) return; size_t current_nested_level = events_metadata_per_level.size(); for (size_t level = 0; level < current_nested_level; ++level) { ExpandOrAddLevelEvent(*events_metadata_per_level[level], event_span, group_id, level); } ResetLastEvents(current_nested_level); } void DerivedXLineBuilder::ExpandOrAddLevelEvent( const XEventMetadata& event_metadata, tsl::profiler::Timespan event_span, std::optional<int64_t> group_id, int level) { auto& last_event = last_event_by_level_[level]; if (last_event && last_event->ShouldExpand(event_metadata, group_id)) { last_event->Expand(event_span); } else { ResetLastEvents(level); XEventBuilder event = line_.AddEvent(event_metadata); event.SetTimespan(event_span); if (group_id.has_value()) { event.AddStatValue(*group_id_stat_metadata_, *group_id); } last_event.emplace(std::move(event), group_id); } } void DerivedXLineBuilder::AddStatToLevelEvent(int level, const XStatMetadata& metadata, int64_t value) { if (auto it = last_event_by_level_.find(level); it != last_event_by_level_.end() && it->second.has_value()) { it->second->SetOrAddStatValue(metadata, value); } } void DerivedXLineBuilder::AddStatToLevelEvent(int level, const XStatMetadata& metadata, uint64_t value) { if (auto it = last_event_by_level_.find(level); it != last_event_by_level_.end() && it->second.has_value()) { it->second->SetOrAddStatValue(metadata, value); } } void DerivedXLineBuilder::AdjustDurationForTraceViewer(int level) { if (level >= last_event_by_level_.size() || !last_event_by_level_[level]) return; int max_level = level; for (; max_level < last_event_by_level_.size(); ++max_level) { if (!last_event_by_level_[max_level].has_value()) { break; } } --max_level; if (max_level <= level) return; auto& event_on_top_stack = *last_event_by_level_[max_level]; tsl::profiler::Timespan timespan = event_on_top_stack.GetTimespan(); int64_t max_shrink_ns = timespan.duration_ps() / 1000 - 1; int64_t shrink_ns = 0; std::optional<tsl::profiler::Timespan> last_level_timespan; for (int i = level; i <= max_level; ++i) { auto& current_event = *last_event_by_level_[i]; if (shrink_ns < max_shrink_ns && last_level_timespan == current_event.GetTimespan()) { shrink_ns++; } last_level_timespan = current_event.GetTimespan(); if (shrink_ns) { current_event.SetTimespan(tsl::profiler::Timespan::FromEndPoints( last_level_timespan->begin_ps(), last_level_timespan->end_ps() - 1000 * shrink_ns)); } } } void DerivedXLineBuilder::ResetLastEvents(int level) { AdjustDurationForTraceViewer(level); for (int i = level, end = last_event_by_level_.size(); i < end; ++i) { last_event_by_level_[i].reset(); } if (level == 0) { for (DerivedXLineBuilder* line : dependent_lines_) { line->ResetLastEvents(0); } } } void DeriveStepEventsFromGroups( const tsl::profiler::GroupMetadataMap& group_metadata_map, XPlane* device_trace) { XPlaneVisitor plane_visitor = tsl::profiler::CreateTfXPlaneVisitor(device_trace); const XStatMetadata* group_id_stat_metadata = plane_visitor.GetStatMetadataByType(StatType::kGroupId); if (group_id_stat_metadata == nullptr) return; XPlaneBuilder plane_builder(device_trace); int64_t start_timestamp_ns = GetStartTimestampNs(*device_trace); DerivedXLineBuilder steps(&plane_builder, kThreadIdStepInfo, kStepLineName, start_timestamp_ns, {}); for (const XEventVisitor& event_visitor : GetSortedEvents<XEventVisitor>(plane_visitor)) { std::optional<XStatVisitor> group_id_stat = event_visitor.GetStat(StatType::kGroupId, *group_id_stat_metadata); if (group_id_stat.has_value()) { int64_t group_id = group_id_stat->IntValue(); steps.ExpandOrAddEvent( *plane_builder.GetOrCreateEventMetadata(absl::StrCat(group_id)), event_visitor.GetTimespan(), group_id); } } AddGroupMetadataToStepEvents(group_metadata_map, steps.Line()); } void DeriveEventsFromAnnotations(const SymbolResolver& symbol_resolver, XPlane* device_trace) { XPlaneVisitor plane_visitor = tsl::profiler::CreateTfXPlaneVisitor(device_trace); XPlaneBuilder plane_builder(device_trace); int64_t start_timestamp_ns = GetStartTimestampNs(*device_trace); DerivedXLineBuilder tf_ops(&plane_builder, kThreadIdTfOp, kTensorFlowOpLineName, start_timestamp_ns, {}); DerivedXLineBuilder tf_name_scope(&plane_builder, kThreadIdTfNameScope, kTensorFlowNameScopeLineName, start_timestamp_ns, {&tf_ops}); DerivedXLineBuilder hlo_ops(&plane_builder, kThreadIdHloOp, kXlaOpLineName, start_timestamp_ns, {}); DerivedXLineBuilder hlo_modules(&plane_builder, kThreadIdHloModule, kXlaModuleLineName, start_timestamp_ns, {&tf_name_scope, &hlo_ops}); DerivedXLineBuilder source(&plane_builder, kThreadIdSource, kSourceLineName, start_timestamp_ns, {}); for (const XEventVisitor& event : GetSortedEvents<XEventVisitor>(plane_visitor)) { GpuEventStats stats(&event); if (!stats.IsKernel() && !stats.IsCudaGraphExecution()) continue; tsl::profiler::Timespan event_span = event.GetTimespan(); if (!stats.hlo_module_name.empty()) { hlo_modules.ExpandOrAddEvent( *plane_builder.GetOrCreateEventMetadata(HloModuleEventName(stats)), event_span, stats.group_id); } if (stats.IsXlaOp()) { auto symbol = symbol_resolver(stats.program_id, stats.hlo_module_name, stats.hlo_op_names.back()); auto hlo_events_metadata = GetOrCreateHloOpEventsMetadata(plane_builder, stats, symbol); hlo_ops.ExpandOrAddEvents(hlo_events_metadata, event_span, stats.group_id); if (stats.cuda_graph_id_for_inner_node.has_value() && *stats.cuda_graph_id_for_inner_node != 0) { int level = static_cast<int>(hlo_events_metadata.size()) - 1; if (level >= 0) { hlo_ops.AddStatToLevelEvent(level, *hlo_ops.GetCudaGraphIdMetadata(), *stats.cuda_graph_id_for_inner_node); if (stats.correlation_id.has_value()) { hlo_ops.AddStatToLevelEvent(level, *hlo_ops.GetCorrelationIdMetadata(), *stats.correlation_id); } } } if (!symbol.tf_op_name.empty()) { ProcessTfOpEvent(symbol.tf_op_name, event_span, stats.group_id, plane_builder, tf_name_scope, tf_ops); } if (!symbol.source_info.empty()) { source.ExpandOrAddEvent( *plane_builder.GetOrCreateEventMetadata(symbol.source_info), event_span, stats.group_id); } } else if (stats.IsTfOp()) { ProcessTfOpEvent(stats.tf_op_fullname, event_span, stats.group_id, plane_builder, tf_name_scope, tf_ops); } } RemoveEmptyLines(device_trace); } void DeriveEventsFromHostTrace( const XPlane* host_trace, const tsl::profiler::GroupMetadataMap& group_metadata_map, std::vector<XPlane*> device_traces) { struct GroupLaunchInfo { tsl::profiler::Timespan timespan; tsl::Stat<uint64_t> stat; void AddEventTimespan(tsl::profiler::Timespan event_span) { if (stat.count() == 0) { timespan = event_span; } else { timespan.ExpandToInclude(event_span); } stat.UpdateStat(event_span.duration_ps()); } }; using DeviceLaunchInfo = absl::flat_hash_map<int64_t , GroupLaunchInfo>; const int num_devices = device_traces.size(); std::vector<DeviceLaunchInfo> per_device_launch_info(num_devices); XPlaneVisitor host_plane = tsl::profiler::CreateTfXPlaneVisitor(host_trace); host_plane.ForEachLine([&](const XLineVisitor& line) { if (IsDerivedThreadId(line.Id())) return; line.ForEachEvent([&](const XEventVisitor& event) { if (absl::StartsWith(event.Name(), "cu")) return; LaunchEventStats stats(&event); if (stats.group_id.has_value() && stats.IsLaunch() && 0 <= *stats.device_id && *stats.device_id < num_devices) { GroupLaunchInfo& group_launch_info = per_device_launch_info[*stats.device_id][*stats.group_id]; group_launch_info.AddEventTimespan(event.GetTimespan()); } }); }); int64_t host_plane_start = GetStartTimestampNs(*host_trace); for (int i = 0; i < num_devices; ++i) { if (per_device_launch_info[i].empty()) continue; int64_t device_plane_start = GetStartTimestampNs(*device_traces[i]); XPlaneBuilder device_plane(device_traces[i]); const XStatMetadata& group_id_stat_metadata = *device_plane.GetOrCreateStatMetadata( GetStatTypeStr(StatType::kGroupId)); const XStatMetadata& num_launches_stat_metadata = *device_plane.GetOrCreateStatMetadata("num_launches"); const XStatMetadata& max_launch_time_us_stat_metadata = *device_plane.GetOrCreateStatMetadata("max_launch_time_us"); const XStatMetadata& avg_launch_time_us_stat_metadata = *device_plane.GetOrCreateStatMetadata("avg_launch_time_us"); XLineBuilder launch_line = device_plane.GetOrCreateLine(kThreadIdKernelLaunch); launch_line.SetName(kKernelLaunchLineName); launch_line.SetTimestampNs(std::min(device_plane_start, host_plane_start)); for (const auto& kv : per_device_launch_info[i]) { int64_t group_id = kv.first; const GroupLaunchInfo& group_info = kv.second; if (const tsl::profiler::GroupMetadata* group_metadata = gtl::FindOrNull(group_metadata_map, group_id)) { XEventBuilder device_event = launch_line.AddEvent(*device_plane.GetOrCreateEventMetadata( absl::StrCat("Launch Stats for ", group_metadata->name))); device_event.SetTimespan(group_info.timespan); device_event.AddStatValue(group_id_stat_metadata, group_id); device_event.AddStatValue(num_launches_stat_metadata, group_info.stat.count()); device_event.AddStatValue( max_launch_time_us_stat_metadata, tsl::profiler::PicoToMicro(group_info.stat.max())); device_event.AddStatValue( avg_launch_time_us_stat_metadata, tsl::profiler::PicoToMicro(group_info.stat.avg())); } } } } void GenerateDerivedTimeLines( const tsl::profiler::GroupMetadataMap& group_metadata_map, XSpace* space) { HloModuleMap hlo_module_map; { HloProtoMap hlo_proto_map; hlo_proto_map.AddHloProtosFromXSpace(*space); for (const auto& [program_id, hlo_proto] : hlo_proto_map) { AddHloProto(hlo_module_map, program_id, *hlo_proto); } } auto symbol_resolver = [&](absl::optional<uint64_t> program_id, absl::string_view hlo_module, absl::string_view hlo_op) -> Symbol { Symbol output; const auto* hlo_instruction = GetHloInstruction(hlo_module_map, program_id, hlo_op); if (hlo_instruction != nullptr) { output.tf_op_name = hlo_instruction->op_full_name(); output.source_info = std::string(hlo_instruction->source_info()); } return output; }; std::vector<XPlane*> device_planes = FindMutablePlanesWithPrefix(space, kGpuPlanePrefix); for (XPlane* plane : device_planes) { DeriveStepEventsFromGroups(group_metadata_map, plane); DeriveEventsFromAnnotations(symbol_resolver, plane); } const XPlane* host_plane = FindPlaneWithName(*space, kHostThreadsPlaneName); if (host_plane) { DeriveEventsFromHostTrace(host_plane, group_metadata_map, device_planes); } for (XPlane* plane : FindMutableTensorCorePlanes(space)) { DeriveLinesFromStats(plane); SortXPlane(plane); } } void DeriveLinesFromStats(XPlane* device_trace) { XPlaneVisitor plane_visitor = tsl::profiler::CreateTfXPlaneVisitor(device_trace); XPlaneBuilder plane_builder(device_trace); int64_t start_timestamp_ns = GetStartTimestampNs(*device_trace); DerivedXLineBuilder tf_ops( &plane_builder, tensorflow::profiler::kThreadIdTfOp, tensorflow::profiler::kTensorFlowOpLineName, start_timestamp_ns, {}); DerivedXLineBuilder tf_name_scope( &plane_builder, tensorflow::profiler::kThreadIdTfNameScope, tensorflow::profiler::kTensorFlowNameScopeLineName, start_timestamp_ns, {&tf_ops}); DerivedXLineBuilder source( &plane_builder, tensorflow::profiler::kThreadIdSource, tensorflow::profiler::kSourceLineName, start_timestamp_ns, {}); HostOffloadEventProcessor host_offload_event_processor(&plane_builder, start_timestamp_ns); for (const XEventVisitor& event : GetSortedEvents<XEventVisitor>(plane_visitor, true)) { tsl::profiler::Timespan event_span = event.GetTimespan(); std::optional<absl::string_view> tf_op_name; std::optional<absl::string_view> source_info; std::optional<uint64_t> group_id; std::optional<uint64_t> is_async; auto for_each_stat = [&](const XStatVisitor& stat) { if (stat.Type() == StatType::kTfOp) { tf_op_name = stat.StrOrRefValue(); } else if (stat.Type() == StatType::kGroupId) { group_id = stat.IntOrUintValue(); } else if (stat.Type() == StatType::kSourceInfo) { source_info = stat.StrOrRefValue(); } else if (stat.Type() == StatType::kIsAsync) { is_async = stat.IntOrUintValue(); } }; event.Metadata().ForEachStat(for_each_stat); event.ForEachStat(for_each_stat); if (is_async && *is_async) continue; if (tf_op_name && !tf_op_name->empty()) { ProcessTfOpEvent(*tf_op_name, event_span, group_id, plane_builder, tf_name_scope, tf_ops); } if (source_info && !source_info->empty()) { source.ExpandOrAddEvent( *plane_builder.GetOrCreateEventMetadata(*source_info), event_span, group_id); } if (host_offload_event_processor.IsHostOffloadOpName(event)) { host_offload_event_processor.ProcessHostOffloadOpEvent(event, group_id); } } RemoveEmptyLines(device_trace); } void DeriveLinesForXlaCpuOps(XPlane* host_trace) { if (host_trace == nullptr || !absl::StartsWith(host_trace->name(), kHostThreadsPlaneName)) return; XPlaneVisitor visitor = tsl::profiler::CreateTfXPlaneVisitor(host_trace); XPlane destination_plane; XPlaneBuilder plane_builder(&destination_plane); int64_t line_id = tsl::profiler::kThreadIdHostXlaRegionStart; visitor.ForEachLine([&](const XLineVisitor& line) { int64_t start_timestamp_ns = line.TimestampNs(); DerivedXLineBuilder tf_ops( &plane_builder, line_id++, absl::StrCat(line.Name(), "-", tensorflow::profiler::kTensorFlowOpLineName), start_timestamp_ns, {}); DerivedXLineBuilder tf_name_scope( &plane_builder, line_id++, absl::StrCat(line.Name(), "-", tensorflow::profiler::kTensorFlowNameScopeLineName), start_timestamp_ns, {&tf_ops}); DerivedXLineBuilder xla_cpu_ops( &plane_builder, line_id++, absl::StrCat(line.Name(), "-", tsl::profiler::kXlaModuleLineName), start_timestamp_ns, {}); line.ForEachEvent([&](const XEventVisitor& event) { std::optional<std::string> hlo_module_name; std::optional<std::string> framework_op_name; event.ForEachStat([&](const XStatVisitor& stat) { if (!stat.Type().has_value()) return; switch (stat.Type().value()) { case StatType::kHloModule: hlo_module_name = stat.StrOrRefValue(); break; case StatType::kTfOp: framework_op_name = stat.StrOrRefValue(); break; } }); if (hlo_module_name.has_value()) { xla_cpu_ops.ExpandOrAddEvent( *plane_builder.GetOrCreateEventMetadata(*hlo_module_name), event.GetTimespan(), std::nullopt); if (framework_op_name.has_value()) { ProcessTfOpEvent(*framework_op_name, event.GetTimespan(), std::nullopt, plane_builder, tf_name_scope, tf_ops); } } }); }); RemoveEmptyLines(&destination_plane); MergePlanes(destination_plane, host_trace); } } }
#include "tensorflow/core/profiler/utils/derived_timeline.h" #include <cstdint> #include <map> #include <optional> #include <gtest/gtest.h> #include "absl/strings/string_view.h" #include "xla/tsl/profiler/utils/group_events.h" #include "xla/tsl/profiler/utils/tf_xplane_visitor.h" #include "xla/tsl/profiler/utils/xplane_schema.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/types.h" #include "tensorflow/core/profiler/protobuf/xplane.pb.h" #include "tensorflow/core/profiler/utils/trace_utils.h" #include "tensorflow/core/profiler/utils/xplane_builder.h" #include "tensorflow/core/profiler/utils/xplane_schema.h" #include "tensorflow/core/profiler/utils/xplane_test_utils.h" #include "tensorflow/core/profiler/utils/xplane_visitor.h" namespace tensorflow { namespace profiler { namespace { TEST(DerivedTimelineTest, EmptySpaceTest) { XSpace space; tsl::profiler::GroupMetadataMap group_metadata_map; GenerateDerivedTimeLines(group_metadata_map, &space); EXPECT_EQ(space.planes_size(), 0); } TEST(DerivedTimelineTest, HloModuleNameTest) { const absl::string_view kHloModuleName = "hlo_module"; const absl::string_view kKernelDetails = "kernel_details"; XSpace space; tsl::profiler::GroupMetadataMap group_metadata_map; XPlane* plane = GetOrCreateGpuXPlane(&space, 0); XPlaneBuilder plane_builder(plane); auto line_builder = plane_builder.GetOrCreateLine(0); CreateXEvent(&plane_builder, &line_builder, "op1", 0, 100, {{StatType::kHloModule, kHloModuleName}, {StatType::kKernelDetails, kKernelDetails}}); CreateXEvent(&plane_builder, &line_builder, "op2", 200, 300, {{StatType::kHloModule, kHloModuleName}, {StatType::kKernelDetails, kKernelDetails}}); GenerateDerivedTimeLines(group_metadata_map, &space); XPlaneVisitor plane_visitor = tsl::profiler::CreateTfXPlaneVisitor(plane); EXPECT_EQ(plane_visitor.NumLines(), 2); plane_visitor.ForEachLine([&](const XLineVisitor& line_visitor) { if (line_visitor.Id() == 0) return; EXPECT_EQ(line_visitor.Id(), kThreadIdHloModule); EXPECT_EQ(line_visitor.NumEvents(), 1); line_visitor.ForEachEvent([&](const XEventVisitor& event_visitor) { EXPECT_EQ(event_visitor.Name(), kHloModuleName); }); }); } TEST(DerivedTimelineTest, NoHloModuleNameTest) { const absl::string_view kKernelDetails = "kernel_details"; const uint64_t kCudaGraphExecId = 1; XSpace space; tsl::profiler::GroupMetadataMap group_metadata_map; XPlane& plane = *GetOrCreateGpuXPlane(&space, 0); XPlaneBuilder plane_builder(&plane); auto line_builder = plane_builder.GetOrCreateLine(0); CreateXEvent(&plane_builder, &line_builder, "op1", 0, 100, {{StatType::kKernelDetails, kKernelDetails}}); CreateXEvent(&plane_builder, &line_builder, "op2", 200, 300, {{StatType::kKernelDetails, kKernelDetails}}); CreateXEvent(&plane_builder, &line_builder, "op3", 500, 100, {{StatType::kCudaGraphExecId, kCudaGraphExecId}}); GenerateDerivedTimeLines(group_metadata_map, &space); XPlaneVisitor plane_visitor = tsl::profiler::CreateTfXPlaneVisitor(&plane); EXPECT_EQ(plane_visitor.NumLines(), 1); plane_visitor.ForEachLine([&](const XLineVisitor& line_visitor) { if (line_visitor.Id() == 0) return; EXPECT_EQ(line_visitor.Id(), kThreadIdHloModule); EXPECT_EQ(line_visitor.NumEvents(), 0); }); } TEST(DerivedTimelineTest, TfOpLineTest) { const absl::string_view kTfOpName = "mul:Mul"; const absl::string_view kKernelDetails = "kernel_details"; const uint64_t kCudaGraphExecId = 1; XSpace space; tsl::profiler::GroupMetadataMap group_metadata_map; XPlane* plane = GetOrCreateGpuXPlane(&space, 0); XPlaneBuilder plane_builder(plane); auto line_builder = plane_builder.GetOrCreateLine(0); CreateXEvent(&plane_builder, &line_builder, "op1", 0, 100, {{StatType::kTfOp, kTfOpName}, {StatType::kKernelDetails, kKernelDetails}}); CreateXEvent(&plane_builder, &line_builder, "op2", 200, 300, {{StatType::kTfOp, kTfOpName}, {StatType::kKernelDetails, kKernelDetails}}); CreateXEvent(&plane_builder, &line_builder, "op3", 500, 100, {{StatType::kTfOp, kTfOpName}, {StatType::kCudaGraphExecId, kCudaGraphExecId}}); GenerateDerivedTimeLines(group_metadata_map, &space); XPlaneVisitor plane_visitor = tsl::profiler::CreateTfXPlaneVisitor(plane); EXPECT_EQ(plane_visitor.NumLines(), 2); plane_visitor.ForEachLine([&](const XLineVisitor& line_visitor) { if (line_visitor.Id() == 0) return; EXPECT_EQ(line_visitor.Id(), kThreadIdTfOp); EXPECT_EQ(line_visitor.NumEvents(), 1); line_visitor.ForEachEvent([&](const XEventVisitor& event_visitor) { EXPECT_EQ(event_visitor.Name(), kTfOpName); EXPECT_EQ(event_visitor.OffsetPs(), 0); EXPECT_EQ(event_visitor.DurationPs(), 600); }); }); } TEST(DerivedTimelineTest, DependencyTest) { constexpr int64_t kFirstGroupId = 0; constexpr int64_t kSecondGroupId = 1; const absl::string_view kTfOpName = "mul:Mul"; const absl::string_view kKernelDetails = "kernel_details"; XSpace space; tsl::profiler::GroupMetadataMap group_metadata_map( {{0, {"train 0"}}, {1, {"train 1"}}}); XPlane* plane = GetOrCreateGpuXPlane(&space, 0); XPlaneBuilder plane_builder(plane); auto line_builder = plane_builder.GetOrCreateLine(0); CreateXEvent(&plane_builder, &line_builder, "op1", 0, 100, {{StatType::kGroupId, kFirstGroupId}, {StatType::kTfOp, kTfOpName}, {StatType::kKernelDetails, kKernelDetails}}); CreateXEvent(&plane_builder, &line_builder, "op2", 200, 300, {{StatType::kGroupId, kSecondGroupId}, {StatType::kTfOp, kTfOpName}, {StatType::kKernelDetails, kKernelDetails}}); GenerateDerivedTimeLines(group_metadata_map, &space); XPlaneVisitor plane_visitor = tsl::profiler::CreateTfXPlaneVisitor(plane); EXPECT_EQ(plane_visitor.NumLines(), 3); plane_visitor.ForEachLine([&](const XLineVisitor& line_visitor) { if (line_visitor.Id() == 0) return; EXPECT_TRUE(line_visitor.Id() == kThreadIdStepInfo || line_visitor.Id() == kThreadIdTfOp); EXPECT_EQ(line_visitor.NumEvents(), 2); }); } TEST(DerivedTimelineTest, TfOpNameScopeTest) { const absl::string_view kTfOpName = "scope1/scope2/mul:Mul"; const absl::string_view kKernelDetails = "kernel_details"; XSpace space; tsl::profiler::GroupMetadataMap group_metadata_map; XPlane* plane = GetOrCreateGpuXPlane(&space, 0); XPlaneBuilder plane_builder(plane); auto line_builder = plane_builder.GetOrCreateLine(0); CreateXEvent(&plane_builder, &line_builder, "op1", 0, 100, {{StatType::kTfOp, kTfOpName}, {StatType::kKernelDetails, kKernelDetails}}); CreateXEvent(&plane_builder, &line_builder, "op2", 200, 300, {{StatType::kTfOp, kTfOpName}, {StatType::kKernelDetails, kKernelDetails}}); GenerateDerivedTimeLines(group_metadata_map, &space); XPlaneVisitor plane_visitor = tsl::profiler::CreateTfXPlaneVisitor(plane); EXPECT_EQ(plane_visitor.NumLines(), 3); plane_visitor.ForEachLine([&](const XLineVisitor& line_visitor) { int64_t line_id = line_visitor.Id(); if (line_id == 0) { return; } else if (line_id == kThreadIdTfNameScope) { EXPECT_EQ(line_visitor.NumEvents(), 2); line_visitor.ForEachEvent([&](const XEventVisitor& event_visitor) { EXPECT_EQ(event_visitor.OffsetPs(), 0); EXPECT_EQ(event_visitor.DurationPs(), 500); }); } else if (line_id == kThreadIdTfOp) { EXPECT_EQ(line_visitor.NumEvents(), 1); line_visitor.ForEachEvent([&](const XEventVisitor& event_visitor) { EXPECT_EQ(event_visitor.Name(), kTfOpName); EXPECT_EQ(event_visitor.OffsetPs(), 0); EXPECT_EQ(event_visitor.DurationPs(), 500); }); } }); } TEST(DerivedTimelineTest, TfOpNameScopeShrinkTest) { { XSpace space; tsl::profiler::GroupMetadataMap group_metadata_map; XPlane* plane = GetOrCreateGpuXPlane(&space, 0); XPlaneBuilder plane_builder(plane); auto line_builder = plane_builder.GetOrCreateLine(0); CreateXEvent(&plane_builder, &line_builder, "op1", 0, 10000, {{StatType::kTfOp, "a/b/c/Add:Add"}, {StatType::kKernelDetails, "blah"}}); CreateXEvent( &plane_builder, &line_builder, "op2", 20000, 30000, {{StatType::kTfOp, "a/d/Mul:Mul"}, {StatType::kKernelDetails, "blah"}}); GenerateDerivedTimeLines(group_metadata_map, &space); XPlaneVisitor plane_visitor = tsl::profiler::CreateTfXPlaneVisitor(plane); EXPECT_EQ(plane_visitor.NumLines(), 3); plane_visitor.ForEachLine([&](const XLineVisitor& line_visitor) { int64_t line_id = line_visitor.Id(); if (line_id == 0) { return; } else if (line_id == kThreadIdTfNameScope) { EXPECT_EQ(line_visitor.NumEvents(), 4); std::map<absl::string_view, uint64_t> durations; line_visitor.ForEachEvent([&](const XEventVisitor& event_visitor) { durations[event_visitor.Name()] = event_visitor.DurationPs(); }); EXPECT_EQ(durations["a"], 50000); EXPECT_EQ(durations["b"], 10000); EXPECT_EQ(durations["c"], 9000); EXPECT_EQ(durations["d"], 30000); } }); } { XSpace space; tsl::profiler::GroupMetadataMap group_metadata_map; XPlane* plane = GetOrCreateGpuXPlane(&space, 0); XPlaneBuilder plane_builder(plane); auto line_builder = plane_builder.GetOrCreateLine(0); CreateXEvent(&plane_builder, &line_builder, "op1", 0, 10000, {{StatType::kTfOp, "a/b/c/d/e/Add:Add"}, {StatType::kKernelDetails, "blah"}}); CreateXEvent(&plane_builder, &line_builder, "op2", 10000, 2000, {{StatType::kTfOp, "a/b/c/d/f/Sub:Sub"}, {StatType::kKernelDetails, "blah"}}); CreateXEvent( &plane_builder, &line_builder, "op3", 20000, 30000, {{StatType::kTfOp, "a/g/Mul:Mul"}, {StatType::kKernelDetails, "blah"}}); GenerateDerivedTimeLines(group_metadata_map, &space); XPlaneVisitor plane_visitor = tsl::profiler::CreateTfXPlaneVisitor(plane); EXPECT_EQ(plane_visitor.NumLines(), 3); plane_visitor.ForEachLine([&](const XLineVisitor& line_visitor) { int64_t line_id = line_visitor.Id(); if (line_id == 0) { return; } else if (line_id == kThreadIdTfNameScope) { EXPECT_EQ(line_visitor.NumEvents(), 7); std::map<absl::string_view, uint64_t> durations; line_visitor.ForEachEvent([&](const XEventVisitor& event_visitor) { durations[event_visitor.Name()] = event_visitor.DurationPs(); }); for (const auto& [name, duration] : durations) { LOG(ERROR) << name << ": " << duration; } EXPECT_EQ(durations["a"], 50000); EXPECT_EQ(durations["b"], 12000); EXPECT_EQ(durations["c"], 11000); EXPECT_EQ(durations["d"], 11000); EXPECT_EQ(durations["e"], 10000); EXPECT_EQ(durations["f"], 1000); EXPECT_EQ(durations["g"], 30000); } }); } } TEST(DerivedTimelineTest, XloOpHasCudaGraphStats) { constexpr absl::string_view kModuleName = "module"; constexpr absl::string_view kHloOpName = "op_level_2"; constexpr absl::string_view kKernelDetails = "kernel_details"; constexpr int64_t kGroupIdValue = 1; constexpr int64_t kCorrelationIdValue = 10000; const uint64_t kCudaGraphIdValue = 20; XSpace space; tsl::profiler::GroupMetadataMap group_metadata_map; XPlane& plane = *GetOrCreateGpuXPlane(&space, 0); XPlaneBuilder plane_builder(&plane); auto line_builder = plane_builder.GetOrCreateLine(0); CreateXEvent(&plane_builder, &line_builder, "op1", 0, 100, {{StatType::kKernelDetails, kKernelDetails}, {StatType::kGroupId, kGroupIdValue}, {StatType::kHloModule, kModuleName}, {StatType::kHloOp, kHloOpName}, {StatType::kCorrelationId, kCorrelationIdValue}, {StatType::kCudaGraphId, kCudaGraphIdValue}}); CreateXEvent(&plane_builder, &line_builder, "op2", 200, 300, {{StatType::kKernelDetails, kKernelDetails}, {StatType::kGroupId, kGroupIdValue}, {StatType::kHloModule, kModuleName}, {StatType::kHloOp, kHloOpName}, {StatType::kCorrelationId, kCorrelationIdValue}, {StatType::kCudaGraphId, kCudaGraphIdValue}}); GenerateDerivedTimeLines(group_metadata_map, &space); size_t num_hlo_op_line = 0; size_t num_events = 0; std::optional<XStatVisitor> correlation_id; std::optional<XStatVisitor> cuda_graph_id; XPlaneVisitor plane_visitor = tsl::profiler::CreateTfXPlaneVisitor(&plane); plane_visitor.ForEachLine([&](const XLineVisitor& line_visitor) { if (line_visitor.Id() == kThreadIdHloOp) { num_hlo_op_line++; if (num_hlo_op_line == 1) { num_events = line_visitor.NumEvents(); line_visitor.ForEachEvent([&](const XEventVisitor& event_visitor) { correlation_id = event_visitor.GetStat(StatType::kCorrelationId); cuda_graph_id = event_visitor.GetStat(StatType::kCudaGraphId); }); } } }); EXPECT_EQ(num_hlo_op_line, 1); EXPECT_EQ(num_events, 1); ASSERT_TRUE(correlation_id.has_value()); EXPECT_EQ(correlation_id->IntValue(), kCorrelationIdValue); ASSERT_TRUE(cuda_graph_id.has_value()); EXPECT_EQ(cuda_graph_id->UintValue(), kCudaGraphIdValue); } TEST(DerivedTimelineTest, DeriveLinesForXlaCpuOps) { XPlane xplane; XPlaneBuilder plane_builder(&xplane); plane_builder.SetName(tsl::profiler::kHostThreadsPlaneName); absl::string_view main_line_name = "main"; auto line_builder = plane_builder.GetOrCreateLine(0); line_builder.SetName(main_line_name); CreateXEvent(&plane_builder, &line_builder, "op1", 0, 100, {{StatType::kHloModule, "Module1"}}); CreateXEvent(&plane_builder, &line_builder, "op2", 200, 400, {{StatType::kHloModule, "Module2"}}); DeriveLinesForXlaCpuOps(&xplane); XPlaneVisitor plane_visitor = tsl::profiler::CreateTfXPlaneVisitor(&xplane); EXPECT_EQ(plane_visitor.NumLines(), 2); plane_visitor.ForEachLine([&](const XLineVisitor& line_visitor) { if (line_visitor.Name() == main_line_name) return; line_visitor.ForEachEvent([&](const XEventVisitor& event_visitor) { if (event_visitor.Name() == "Module1") { EXPECT_EQ(event_visitor.DurationPs(), 100); EXPECT_EQ(event_visitor.OffsetPs(), 0); } else if (event_visitor.Name() == "Module2") { EXPECT_EQ(event_visitor.DurationPs(), 400); EXPECT_EQ(event_visitor.OffsetPs(), 200); } else { FAIL() << "Found Event " << event_visitor.Name(); } }); }); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/utils/derived_timeline.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/utils/derived_timeline_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
a906af01-7ab9-4be4-9a21-19751f5ce494
cpp
tensorflow/tensorflow
generic_layout_optimizer_transposer_factory
tensorflow/core/grappler/optimizers/generic_layout_optimizer_transposer_factory.cc
tensorflow/core/grappler/optimizers/generic_layout_optimizer_transposer_factory_test.cc
#include "tensorflow/core/grappler/optimizers/generic_layout_optimizer_transposer_factory.h" #include "tensorflow/core/grappler/op_types.h" namespace tensorflow { namespace grappler { std::shared_ptr<Transposer> TransposerFactory::GetTransposer( const NodeDef& node) { if (IsDefaultLayoutSensitiveOp(node)) { return GetOrCreateIfNotFound<DefaultLayoutSensitiveOpTransposer>( "DefaultLayoutSensitiveOp"); } if (IsAvgPoolGrad(node)) { return GetOrCreateIfNotFound<AvgPoolGradTransposer>("AvgPoolGrad"); } if (IsBiasAddV2(node)) { return GetOrCreateIfNotFound<BiasAddTransposer>("BiasAdd"); } if (IsBiasAddGrad(node)) { return GetOrCreateIfNotFound<BiasAddGradTransposer>("BiasAddGrad"); } if (IsConv2DBackpropFilter(node) || IsDepthwiseConv2dNativeBackpropFilter(node)) { return GetOrCreateIfNotFound<Conv2DBackpropFilterTransposer>( "Conv2DBackpropFilter"); } if (IsConv2DBackpropInput(node) || IsDepthwiseConv2dNativeBackpropInput(node)) { return GetOrCreateIfNotFound<Conv2DBackpropInputTransposer>( "Conv2DBackpropInput"); } if (IsConv3D(node)) { return GetOrCreateIfNotFound<Conv3DTransposer>("Conv3D"); } if (IsConv3DBackpropInputV2(node)) { return GetOrCreateIfNotFound<Conv3DBackpropInputTransposer>( "Conv3DBackpropInput"); } if (IsConv3DBackpropFilterV2(node)) { return GetOrCreateIfNotFound<Conv3DBackpropFilterTransposer>( "Conv3DBackpropFilter"); } if (IsFusedBatchNormEx(node)) { return GetOrCreateIfNotFound<FusedBatchNormExTransposer>( "FusedBatchNormEx"); } if (IsFusedBatchNormGrad(node)) { return GetOrCreateIfNotFound<FusedBatchNormGradTransposer>( "FusedBatchNormGrad"); } if (IsMaxPoolV2(node)) { return GetOrCreateIfNotFound<MaxPoolV2Transposer>("MaxPoolV2"); } if (IsMaxPoolGrad(node) || IsMaxPoolGradGradV1(node)) { return GetOrCreateIfNotFound<MaxPoolGradTransposer>("MaxPoolGrad"); } if (IsMaxPoolGradV2(node) || IsMaxPoolGradGradV2(node)) { return GetOrCreateIfNotFound<MaxPoolGradV2Transposer>("MaxPoolGradV2"); } if (IsMaxPool3D(node)) { return GetOrCreateIfNotFound<MaxPool3DTransposer>("MaxPool3D"); } if (IsDefaultLayoutAgnosticOp(node)) { return GetOrCreateIfNotFound<DefaultLayoutAgnosticOpTransposer>( "DefaultLayoutAgnosticOp"); } if (IsAddN(node)) { return GetOrCreateIfNotFound<AddNTransposer>("AddN"); } if (IsBinaryOp(node)) { return GetOrCreateIfNotFound<BinaryOpTransposer>("BinaryOp"); } if (IsConcat(node)) { return GetOrCreateIfNotFound<ConcatOpTransposer>("Concat"); } if (IsFill(node)) { return GetOrCreateIfNotFound<FillOpTransposer>("Fill"); } if (IsIdentityN(node)) { return GetOrCreateIfNotFound<IdentityNTransposer>("IdentityN"); } if (IsMerge(node)) { return GetOrCreateIfNotFound<MergeTransposer>("Merge"); } if (IsMirrorPad(node) || IsMirrorPadGrad(node) || IsPad(node)) { return GetOrCreateIfNotFound<PadTransposer>("Pad"); } if (IsReduceOp(node)) { return GetOrCreateIfNotFound<ReduceTransposer>("ReduceOp"); } if (IsReverseV2(node)) { return GetOrCreateIfNotFound<ReverseV2Transposer>("ReverseV2"); } if (IsSelect(node)) { return GetOrCreateIfNotFound<SelectTransposer>("Select"); } if (IsShape(node)) { return GetOrCreateIfNotFound<ShapeTransposer>("Shape"); } if (IsShapeN(node)) { return GetOrCreateIfNotFound<ShapeNTransposer>("ShapeN"); } if (IsSlice(node)) { return GetOrCreateIfNotFound<SliceTransposer>("Slice"); } if (IsSplit(node)) { return GetOrCreateIfNotFound<SplitTransposer>("Split"); } if (IsSplitV(node)) { return GetOrCreateIfNotFound<SplitVTransposer>("SplitV"); } if (IsSqueeze(node)) { return GetOrCreateIfNotFound<SqueezeTransposer>("Squeeze"); } if (IsStridedSlice(node)) { return GetOrCreateIfNotFound<StridedSliceTransposer>("StridedSlice"); } if (IsSwitch(node)) { return GetOrCreateIfNotFound<SwitchTransposer>("Switch"); } if (IsTernaryOp(node)) { return GetOrCreateIfNotFound<TernaryOpTransposer>("TernaryOp"); } if (IsTile(node)) { return GetOrCreateIfNotFound<TileTransposer>("Tile"); } if (IsUnaryGrad(node)) { return GetOrCreateIfNotFound<UnaryGradTransposer>("UnaryGrad"); } return nullptr; } } }
#include "tensorflow/core/grappler/optimizers/generic_layout_optimizer_transposer_factory.h" #include <memory> #include "absl/container/flat_hash_set.h" #include "absl/types/span.h" #include "tensorflow/core/framework/node_def.pb.h" #include "tensorflow/core/framework/op.h" #include "tensorflow/core/grappler/optimizers/generic_layout_optimizer_transposer.h" #include "tensorflow/core/platform/test.h" namespace tensorflow { namespace grappler { namespace { void CheckSameTransposerForOps(absl::Span<const string> ops, TransposerFactory* factory, absl::flat_hash_set<Transposer*>* transposers) { absl::flat_hash_set<Transposer*> created_transposers; for (int i = 0; i < ops.size(); i++) { NodeDef node; node.set_op(ops[i]); std::shared_ptr<Transposer> transposer1 = factory->GetTransposer(node); ASSERT_NE(transposer1, nullptr); if (i == 0) { EXPECT_TRUE(transposers->insert(transposer1.get()).second); } else { EXPECT_FALSE(transposers->insert(transposer1.get()).second); } std::shared_ptr<Transposer> transposer2 = factory->GetTransposer(node); ASSERT_NE(transposer2, nullptr); EXPECT_EQ(transposer1.get(), transposer2.get()); created_transposers.insert(transposer1.get()); } if (!ops.empty()) { EXPECT_EQ(created_transposers.size(), 1); } } TEST(TransposerFactoryTest, SanityCheck) { TransposerFactory factory; absl::flat_hash_set<Transposer*> transposers; CheckSameTransposerForOps( {"Conv2D", "FusedBatchNorm", "DepthwiseConv2dNative"}, &factory, &transposers); CheckSameTransposerForOps({"AvgPoolGrad"}, &factory, &transposers); CheckSameTransposerForOps({"BiasAddGrad"}, &factory, &transposers); CheckSameTransposerForOps({"_FusedBatchNormEx"}, &factory, &transposers); CheckSameTransposerForOps({"FusedBatchNormGrad", "FusedBatchNormGradV2"}, &factory, &transposers); CheckSameTransposerForOps( {"Conv2DBackpropFilter", "DepthwiseConv2dNativeBackpropFilter"}, &factory, &transposers); CheckSameTransposerForOps( {"Conv2DBackpropInput", "DepthwiseConv2dNativeBackpropInput"}, &factory, &transposers); CheckSameTransposerForOps({"MaxPoolGrad", "MaxPoolGradGrad"}, &factory, &transposers); CheckSameTransposerForOps({"MaxPoolGradV2", "MaxPoolGradGradV2"}, &factory, &transposers); CheckSameTransposerForOps({"AddN"}, &factory, &transposers); CheckSameTransposerForOps({"IdentityN"}, &factory, &transposers); CheckSameTransposerForOps({"Merge", "RefMerge"}, &factory, &transposers); CheckSameTransposerForOps({"Select"}, &factory, &transposers); CheckSameTransposerForOps({"Switch", "RefSwitch"}, &factory, &transposers); CheckSameTransposerForOps({"Betainc"}, &factory, &transposers); CheckSameTransposerForOps({"TanhGrad"}, &factory, &transposers); CheckSameTransposerForOps({"Squeeze"}, &factory, &transposers); CheckSameTransposerForOps({"MaxPoolV2"}, &factory, &transposers); CheckSameTransposerForOps({"RealDiv", "Atan2", "Complex"}, &factory, &transposers); CheckSameTransposerForOps({"Concat", "ConcatV2"}, &factory, &transposers); CheckSameTransposerForOps({"Pad", "PadV2", "MirrorPad", "MirrorPadGrad"}, &factory, &transposers); CheckSameTransposerForOps({"ReverseV2"}, &factory, &transposers); CheckSameTransposerForOps({"Tile"}, &factory, &transposers); CheckSameTransposerForOps({"Shape"}, &factory, &transposers); CheckSameTransposerForOps({"ShapeN"}, &factory, &transposers); CheckSameTransposerForOps({"Fill"}, &factory, &transposers); CheckSameTransposerForOps({"Slice"}, &factory, &transposers); CheckSameTransposerForOps({"Split"}, &factory, &transposers); CheckSameTransposerForOps({"SplitV"}, &factory, &transposers); CheckSameTransposerForOps({"StridedSlice"}, &factory, &transposers); CheckSameTransposerForOps({"Sum", "Mean", "Prod", "Max", "Min", "All", "Any"}, &factory, &transposers); NodeDef node_unknown; node_unknown.set_op("UnknownOp"); std::shared_ptr<Transposer> transposer_unknown = factory.GetTransposer(node_unknown); EXPECT_TRUE(transposer_unknown == nullptr); } TEST(TransposerFactoryTest, ShouldUseAllOpTransposer) { TransposerFactory factory; std::vector<OpDef> op_defs; OpRegistry::Global()->GetRegisteredOps(&op_defs); NodeDef node; AttrValue value; value.set_type(DataType::DT_DOUBLE); node.mutable_attr()->insert({"T", value}); for (const OpDef& op_def : op_defs) { node.set_op(op_def.name()); std::shared_ptr<Transposer> transposer = factory.GetTransposer(node); if (transposer != nullptr) { EXPECT_TRUE(IsLayoutSensitiveOp(node) || IsLayoutAgnosticOp(node)) << "Transposer for op \"" << node.op() << "\" is created but not used. Add it to IsLayourSensitiveOp or " "IslayoutAgnosticOp."; } } } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/generic_layout_optimizer_transposer_factory.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/generic_layout_optimizer_transposer_factory_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
f09af30f-f8df-4c58-a306-5b6f6fb53caf
cpp
google/tensorstore
lock_collection
tensorstore/internal/lock_collection.cc
tensorstore/internal/lock_collection_test.cc
#include "tensorstore/internal/lock_collection.h" namespace tensorstore { namespace internal { bool LockCollection::MutexSharedLockFunction(void* mutex, bool lock) ABSL_NO_THREAD_SAFETY_ANALYSIS { auto& m = *static_cast<absl::Mutex*>(mutex); if (lock) { m.ReaderLock(); } else { m.ReaderUnlock(); } return true; } bool LockCollection::MutexExclusiveLockFunction(void* mutex, bool lock) ABSL_NO_THREAD_SAFETY_ANALYSIS { auto& m = *static_cast<absl::Mutex*>(mutex); if (lock) { m.WriterLock(); } else { m.WriterUnlock(); } return true; } bool LockCollection::try_lock() { if (locks_.size() > 1) { std::sort(locks_.begin(), locks_.end(), [](const Entry& a, const Entry& b) { return a.tagged_pointer < b.tagged_pointer; }); locks_.erase(std::unique(locks_.begin(), locks_.end(), [](const Entry& a, const Entry& b) { return a.data() == b.data(); }), locks_.end()); } size_t i = 0, size = locks_.size(); auto* locks = locks_.data(); for (; i < size; ++i) { auto& entry = locks[i]; if (!entry.lock_function(entry.data(), true)) { while (i > 0) { --i; auto& prev_entry = locks[i]; prev_entry.lock_function(prev_entry.data(), false); } return false; } } return true; } void LockCollection::unlock() { for (const auto& entry : locks_) { entry.lock_function(entry.data(), false); } } void LockCollection::clear() { locks_.clear(); } } }
#include "tensorstore/internal/lock_collection.h" #include <array> #include <cstddef> #include <mutex> #include <utility> #include <vector> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/algorithm/container.h" #include "absl/synchronization/mutex.h" #include "tensorstore/internal/testing/concurrent.h" namespace { using ::tensorstore::internal::LockCollection; using ::tensorstore::internal_testing::TestConcurrent; TEST(LockCollectionTest, Empty) { LockCollection c; { std::unique_lock<LockCollection> guard(c, std::try_to_lock); ASSERT_TRUE(guard); } } TEST(LockCollectionTest, SingleShared) { absl::Mutex m; LockCollection c; c.RegisterShared(m); { std::unique_lock<LockCollection> guard(c, std::try_to_lock); ASSERT_TRUE(guard); m.AssertReaderHeld(); } m.AssertNotHeld(); { std::unique_lock<LockCollection> guard(c, std::try_to_lock); ASSERT_TRUE(guard); m.AssertReaderHeld(); } m.AssertNotHeld(); } TEST(LockCollectionTest, SingleSharedDuplicate) { absl::Mutex m; LockCollection c; c.RegisterShared(m); c.RegisterShared(m); c.RegisterShared(m); { std::unique_lock<LockCollection> guard(c, std::try_to_lock); ASSERT_TRUE(guard); m.AssertReaderHeld(); } m.AssertNotHeld(); { std::unique_lock<LockCollection> guard(c, std::try_to_lock); ASSERT_TRUE(guard); m.AssertReaderHeld(); } m.AssertNotHeld(); } TEST(LockCollectionTest, SingleExclusive) { absl::Mutex m; LockCollection c; c.RegisterExclusive(m); { std::unique_lock<LockCollection> guard(c, std::try_to_lock); ASSERT_TRUE(guard); m.AssertHeld(); } m.AssertNotHeld(); { std::unique_lock<LockCollection> guard(c, std::try_to_lock); ASSERT_TRUE(guard); m.AssertHeld(); } m.AssertNotHeld(); } TEST(LockCollectionTest, SingleExclusiveDuplicate) { absl::Mutex m; LockCollection c; c.RegisterShared(m); c.RegisterExclusive(m); c.RegisterShared(m); { std::unique_lock<LockCollection> guard(c, std::try_to_lock); ASSERT_TRUE(guard); m.AssertHeld(); } m.AssertNotHeld(); } TEST(LockCollectionTest, Multiple) { absl::Mutex m[3]; LockCollection c; c.RegisterShared(m[0]); c.RegisterExclusive(m[0]); c.RegisterShared(m[1]); c.RegisterShared(m[0]); c.RegisterShared(m[2]); c.RegisterShared(m[1]); c.RegisterShared(m[1]); c.RegisterShared(m[2]); { std::unique_lock<LockCollection> guard(c, std::try_to_lock); ASSERT_TRUE(guard); m[0].AssertHeld(); m[1].AssertReaderHeld(); m[2].AssertReaderHeld(); } m[0].AssertNotHeld(); m[1].AssertNotHeld(); m[2].AssertNotHeld(); } #if !defined(_WIN32) TEST(LockCollectionTest, MultipleConcurrentExclusive) { constexpr static size_t kNumMutexes = 3; absl::Mutex m[kNumMutexes]; constexpr static size_t kNumCollections = 3; LockCollection c[kNumCollections]; std::array<int, kNumMutexes> mutex_indices; absl::c_iota(mutex_indices, 0); const auto RegisterFromPermutation = [&](LockCollection& lock_collection) { for (auto i : mutex_indices) lock_collection.RegisterExclusive(m[i]); }; RegisterFromPermutation(c[0]); absl::c_next_permutation(mutex_indices); RegisterFromPermutation(c[1]); while (absl::c_next_permutation(mutex_indices)) { c[2] = LockCollection(); RegisterFromPermutation(c[2]); TestConcurrent<kNumCollections>( 100, [] {}, [] {}, [&](size_t i) { std::unique_lock<LockCollection> guard(c[i], std::try_to_lock); ASSERT_TRUE(guard); }); } } TEST(LockCollectionTest, MultipleConcurrentExclusiveShared) { constexpr static size_t kNumMutexes = 3; absl::Mutex m[kNumMutexes]; constexpr static size_t kNumCollections = 3; constexpr static size_t kNumSharedCombinations = size_t(1) << kNumMutexes; LockCollection c[kNumCollections]; std::array<int, kNumMutexes> mutex_indices; absl::c_iota(mutex_indices, 0); const auto RegisterFromPermutation = [&](LockCollection& lock_collection, size_t shared_bit_vector) { for (auto i : mutex_indices) { if ((shared_bit_vector >> i) & i) { lock_collection.RegisterShared(m[i]); } else { lock_collection.RegisterExclusive(m[i]); } } }; RegisterFromPermutation(c[0], 0); absl::c_next_permutation(mutex_indices); RegisterFromPermutation(c[1], ~size_t(0)); while (absl::c_next_permutation(mutex_indices)) { for (size_t shared_bit_vector = 0; shared_bit_vector < kNumSharedCombinations; ++shared_bit_vector) { c[2] = LockCollection(); RegisterFromPermutation(c[2], shared_bit_vector); TestConcurrent<kNumCollections>( 20, [] {}, [] {}, [&](size_t i) { std::unique_lock<LockCollection> guard(c[i], std::try_to_lock); EXPECT_TRUE(guard); }); } } } #endif struct LoggingLockable; using LockLog = std::vector<std::pair<LoggingLockable*, bool>>; struct LoggingLockable { LockLog& log; bool fail; }; TEST(LockCollectionTest, Fail) { LockLog log; LoggingLockable lockables[4] = { LoggingLockable{log, false}, LoggingLockable{log, false}, LoggingLockable{log, true}, LoggingLockable{log, true}, }; constexpr auto lock_function = [](void* data, bool lock) -> bool { auto* lockable = static_cast<LoggingLockable*>(data); lockable->log.emplace_back(lockable, lock); if (lock && lockable->fail) return false; return true; }; LockCollection c; for (auto& lockable : lockables) { c.Register(&lockable, lock_function, false); } std::unique_lock<LockCollection> guard(c, std::try_to_lock); EXPECT_FALSE(guard); EXPECT_THAT(log, ::testing::ElementsAre(::testing::Pair(&lockables[0], true), ::testing::Pair(&lockables[1], true), ::testing::Pair(&lockables[2], true), ::testing::Pair(&lockables[1], false), ::testing::Pair(&lockables[0], false))); } }
https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/lock_collection.cc
https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/lock_collection_test.cc
4f887a6430414cd6088e1743555015b10f116d50
0198949a-e919-4cf3-b597-d178b5b5c6e9
cpp
tensorflow/tensorflow
session
tensorflow/core/common_runtime/session.cc
tensorflow/core/common_runtime/session_test.cc
#include "tensorflow/core/public/session.h" #include <string> #include "tensorflow/core/common_runtime/session_factory.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/lib/monitoring/gauge.h" #include "tensorflow/core/platform/logging.h" namespace tensorflow { namespace { auto* session_created = monitoring::Gauge<bool, 0>::New( "/tensorflow/core/session_created", "True if a session was created."); } void SetSessionCreatedMetric() { session_created->GetCell()->Set(true); } Session::Session() {} Session::~Session() {} Status Session::Run(const RunOptions& run_options, const std::vector<std::pair<string, Tensor> >& inputs, const std::vector<string>& output_tensor_names, const std::vector<string>& target_tensor_names, std::vector<Tensor>* outputs, RunMetadata* run_metadata) { return errors::Unimplemented( "Run with options is not supported for this session."); } Status Session::PRunSetup(const std::vector<string>& input_names, const std::vector<string>& output_names, const std::vector<string>& target_nodes, string* handle) { return errors::Unimplemented( "Partial run is not supported for this session."); } Status Session::PRun(const string& handle, const std::vector<std::pair<string, Tensor> >& inputs, const std::vector<string>& output_names, std::vector<Tensor>* outputs) { return errors::Unimplemented( "Partial run is not supported for this session."); } Session* NewSession(const SessionOptions& options) { SetSessionCreatedMetric(); Session* out_session; Status s = NewSession(options, &out_session); if (!s.ok()) { LOG(ERROR) << "Failed to create session: " << s; return nullptr; } return out_session; } Status NewSession(const SessionOptions& options, Session** out_session) { SessionFactory* factory; Status s = SessionFactory::GetFactory(options, &factory); if (!s.ok()) { *out_session = nullptr; LOG(ERROR) << "Failed to get session factory: " << s; return s; } SetSessionCreatedMetric(); s = factory->NewSession(options, out_session); if (!s.ok()) { *out_session = nullptr; LOG(ERROR) << "Failed to create session: " << s; } return s; } Status Reset(const SessionOptions& options, const std::vector<string>& containers) { SessionFactory* factory; TF_RETURN_IF_ERROR(SessionFactory::GetFactory(options, &factory)); return factory->Reset(options, containers); } }
#include "tensorflow/core/public/session.h" #include "tensorflow/core/common_runtime/session_factory.h" #include "tensorflow/core/lib/strings/str_util.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/public/session_options.h" namespace tensorflow { namespace { TEST(SessionTest, InvalidTargetReturnsNull) { SessionOptions options; options.target = "invalid target"; EXPECT_EQ(nullptr, tensorflow::NewSession(options)); Session* session; Status s = tensorflow::NewSession(options, &session); EXPECT_EQ(s.code(), error::NOT_FOUND); EXPECT_TRUE(absl::StrContains( s.message(), "No session factory registered for the given session options")); } class FakeSessionFactory : public SessionFactory { public: FakeSessionFactory() {} bool AcceptsOptions(const SessionOptions& options) override { return absl::StartsWith(options.target, "fake"); } Status NewSession(const SessionOptions& options, Session** out_session) override { *out_session = nullptr; return absl::OkStatus(); } }; class FakeSessionRegistrar { public: FakeSessionRegistrar() { SessionFactory::Register("FAKE_SESSION_1", new FakeSessionFactory()); SessionFactory::Register("FAKE_SESSION_2", new FakeSessionFactory()); } }; static FakeSessionRegistrar registrar; TEST(SessionTest, MultipleFactoriesForTarget) { SessionOptions options; options.target = "fakesession"; Session* session; Status s = tensorflow::NewSession(options, &session); EXPECT_EQ(s.code(), error::INTERNAL); EXPECT_TRUE(absl::StrContains(s.message(), "Multiple session factories")); EXPECT_TRUE(absl::StrContains(s.message(), "FAKE_SESSION_1")); EXPECT_TRUE(absl::StrContains(s.message(), "FAKE_SESSION_2")); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/session.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/session_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
e5b7ba67-593a-44d6-b196-01c568b6ef37
cpp
google/cel-cpp
macro_registry
parser/macro_registry.cc
parser/macro_registry_test.cc
#include "parser/macro_registry.h" #include <cstddef> #include <utility> #include "absl/status/status.h" #include "absl/strings/match.h" #include "absl/strings/str_cat.h" #include "absl/strings/string_view.h" #include "absl/types/optional.h" #include "absl/types/span.h" #include "parser/macro.h" namespace cel { absl::Status MacroRegistry::RegisterMacro(const Macro& macro) { if (!RegisterMacroImpl(macro)) { return absl::AlreadyExistsError( absl::StrCat("macro already exists: ", macro.key())); } return absl::OkStatus(); } absl::Status MacroRegistry::RegisterMacros(absl::Span<const Macro> macros) { for (size_t i = 0; i < macros.size(); ++i) { const auto& macro = macros[i]; if (!RegisterMacroImpl(macro)) { for (size_t j = 0; j < i; ++j) { macros_.erase(macros[j].key()); } return absl::AlreadyExistsError( absl::StrCat("macro already exists: ", macro.key())); } } return absl::OkStatus(); } absl::optional<Macro> MacroRegistry::FindMacro(absl::string_view name, size_t arg_count, bool receiver_style) const { if (name.empty() || absl::StrContains(name, ':')) { return absl::nullopt; } auto key = absl::StrCat(name, ":", arg_count, ":", receiver_style ? "true" : "false"); if (auto it = macros_.find(key); it != macros_.end()) { return it->second; } key = absl::StrCat(name, ":*:", receiver_style ? "true" : "false"); if (auto it = macros_.find(key); it != macros_.end()) { return it->second; } return absl::nullopt; } bool MacroRegistry::RegisterMacroImpl(const Macro& macro) { return macros_.insert(std::pair{macro.key(), macro}).second; } }
#include "parser/macro_registry.h" #include "absl/status/status.h" #include "absl/types/optional.h" #include "internal/testing.h" #include "parser/macro.h" namespace cel { namespace { using ::absl_testing::IsOk; using ::absl_testing::StatusIs; using ::testing::Eq; using ::testing::Ne; TEST(MacroRegistry, RegisterAndFind) { MacroRegistry macros; EXPECT_THAT(macros.RegisterMacro(HasMacro()), IsOk()); EXPECT_THAT(macros.FindMacro("has", 1, false), Ne(absl::nullopt)); } TEST(MacroRegistry, RegisterRollsback) { MacroRegistry macros; EXPECT_THAT(macros.RegisterMacros({HasMacro(), AllMacro(), AllMacro()}), StatusIs(absl::StatusCode::kAlreadyExists)); EXPECT_THAT(macros.FindMacro("has", 1, false), Eq(absl::nullopt)); } } }
https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/parser/macro_registry.cc
https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/parser/macro_registry_test.cc
4552db5798fb0853b131b783d8875794334fae7f
a6871c12-fea8-48ff-a88a-4a630c10dcb0
cpp
google/arolla
shape_qtype
arolla/qtype/shape_qtype.cc
arolla/qtype/shape_qtype_test.cc
#include "arolla/qtype/shape_qtype.h" #include <string> #include "absl/base/no_destructor.h" #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/str_format.h" #include "absl/strings/string_view.h" #include "arolla/qtype/base_types.h" #include "arolla/qtype/optional_qtype.h" #include "arolla/qtype/qtype.h" #include "arolla/qtype/qtype_traits.h" #include "arolla/util/fingerprint.h" #include "arolla/util/meta.h" #include "arolla/util/repr.h" #include "arolla/util/unit.h" #include "arolla/util/status_macros_backport.h" namespace arolla { namespace { absl::Status EnsureIsBaseType(QTypePtr qtype) { return IsScalarQType(qtype) || IsOptionalQType(qtype) ? absl::OkStatus() : absl::InvalidArgumentError(absl::StrFormat( "Shape::WithValueQType supports only scalar and " "optional values, got %s", qtype->name())); } class ScalarShapeQType final : public ShapeQType { public: ScalarShapeQType() : ShapeQType(meta::type<ScalarShape>(), "SCALAR_SHAPE") {} absl::StatusOr<QTypePtr> WithValueQType(QTypePtr value_qtype) const final { RETURN_IF_ERROR(EnsureIsBaseType(value_qtype)); return value_qtype; } QTypePtr presence_qtype() const final { return GetQType<Unit>(); } }; class OptionalScalarShapeQType final : public ShapeQType { public: OptionalScalarShapeQType() : ShapeQType(meta::type<OptionalScalarShape>(), "OPTIONAL_SCALAR_SHAPE") { } absl::StatusOr<QTypePtr> WithValueQType(QTypePtr value_qtype) const final { RETURN_IF_ERROR(EnsureIsBaseType(value_qtype)); return ToOptionalQType(value_qtype); } QTypePtr presence_qtype() const final { return GetOptionalQType<Unit>(); } }; } QTypePtr QTypeTraits<ScalarShape>::type() { static const absl::NoDestructor<ScalarShapeQType> shape_qtype; return shape_qtype.get(); } QTypePtr QTypeTraits<OptionalScalarShape>::type() { static const absl::NoDestructor<OptionalScalarShapeQType> shape_qtype; return shape_qtype.get(); } ReprToken ReprTraits<ScalarShape>::operator()( const ScalarShape& ) const { return ReprToken{"scalar_shape"}; } void FingerprintHasherTraits<ScalarShape>::operator()( FingerprintHasher* hasher, const ScalarShape& ) const { hasher->Combine(absl::string_view("scalar_shape")); } ReprToken ReprTraits<OptionalScalarShape>::operator()( const OptionalScalarShape& ) const { return ReprToken{"optional_scalar_shape"}; } void FingerprintHasherTraits<OptionalScalarShape>::operator()( FingerprintHasher* hasher, const OptionalScalarShape& ) const { hasher->Combine(absl::string_view("optional_scalar_shape")); } }
#include "arolla/qtype/shape_qtype.h" #include <cstdint> #include "gmock/gmock.h" #include "gtest/gtest.h" #include "absl/status/status_matchers.h" #include "arolla/qtype/base_types.h" #include "arolla/qtype/optional_qtype.h" #include "arolla/qtype/qtype_traits.h" #include "arolla/qtype/typed_value.h" #include "arolla/util/bytes.h" #include "arolla/util/repr.h" #include "arolla/util/testing/repr_token_eq.h" namespace arolla { namespace { using ::absl_testing::IsOkAndHolds; using ::arolla::testing::ReprTokenEq; using ::testing::Eq; using ::testing::NotNull; TEST(ShapeQType, ScalarShape) { auto scalar_shape = dynamic_cast<const ShapeQType*>(GetQType<ScalarShape>()); ASSERT_THAT(scalar_shape, NotNull()); EXPECT_THAT(scalar_shape->WithValueQType(GetQType<int64_t>()), IsOkAndHolds(Eq(GetQType<int64_t>()))); EXPECT_THAT(scalar_shape->WithValueQType(GetQType<Bytes>()), IsOkAndHolds(Eq(GetQType<Bytes>()))); EXPECT_THAT(scalar_shape->WithValueQType(GetOptionalQType<int64_t>()), IsOkAndHolds(Eq(GetOptionalQType<int64_t>()))); } TEST(ShapeQType, OptionalScalarShape) { auto optional_shape = dynamic_cast<const ShapeQType*>(GetQType<OptionalScalarShape>()); ASSERT_THAT(optional_shape, NotNull()); EXPECT_THAT(optional_shape->WithValueQType(GetQType<int64_t>()), IsOkAndHolds(Eq(GetOptionalQType<int64_t>()))); EXPECT_THAT(optional_shape->WithValueQType(GetQType<Bytes>()), IsOkAndHolds(Eq(GetOptionalQType<Bytes>()))); EXPECT_THAT(optional_shape->WithValueQType(GetOptionalQType<int64_t>()), IsOkAndHolds(Eq(GetOptionalQType<int64_t>()))); } TEST(ShapeQType, ScalarShapeRepr) { EXPECT_THAT(GenReprToken(ScalarShape{}), ReprTokenEq("scalar_shape")); } TEST(ShapeQType, OptionalScalarShapeRepr) { EXPECT_THAT(GenReprToken(OptionalScalarShape{}), ReprTokenEq("optional_scalar_shape")); } TEST(ShapeQType, TypedValuScalarShapeRepr) { EXPECT_THAT(TypedValue::FromValue(ScalarShape{}).GenReprToken(), ReprTokenEq("scalar_shape")); } TEST(ShapeQType, TypedValueOptionalScalarShapeRepr) { EXPECT_THAT(TypedValue::FromValue(OptionalScalarShape{}).GenReprToken(), ReprTokenEq("optional_scalar_shape")); } TEST(ShapeQType, ScalarShapeFingerprint) { EXPECT_THAT(TypedValue::FromValue(ScalarShape{}).GetFingerprint(), Eq(TypedValue::FromValue(ScalarShape{}).GetFingerprint())); } TEST(ShapeQType, OptionalScalarShapeFingerprint) { EXPECT_THAT( TypedValue::FromValue(OptionalScalarShape{}).GetFingerprint(), Eq(TypedValue::FromValue(OptionalScalarShape{}).GetFingerprint())); } TEST(ShapeQType, IsShapeQType) { EXPECT_TRUE(IsShapeQType(GetQType<OptionalScalarShape>())); EXPECT_FALSE(IsShapeQType(GetQType<int32_t>())); } } }
https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qtype/shape_qtype.cc
https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qtype/shape_qtype_test.cc
1ca990dbeca224035efdabffecc7f3738df6b52c
1f63ef80-aa42-46d4-82fe-0e4fc4d801cd
cpp
tensorflow/tensorflow
crop_and_resize_op
tensorflow/core/kernels/image/crop_and_resize_op.cc
tensorflow/core/kernels/image/crop_and_resize_op_test.cc
#define EIGEN_USE_THREADS #include "tensorflow/core/kernels/image/crop_and_resize_op.h" #include <functional> #include <string> #include "unsupported/Eigen/CXX11/Tensor" #include "tensorflow/core/framework/bounds_check.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_reference.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/lib/core/status.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/types.h" #include "tensorflow/core/util/determinism.h" #include "tensorflow/core/util/work_sharder.h" #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM #include "tensorflow/core/common_runtime/gpu/gpu_event_mgr.h" #include "tensorflow/core/platform/stream_executor.h" #endif #if GOOGLE_CUDA #include "xla/stream_executor/gpu/scoped_activate_context.h" using stream_executor::gpu::ScopedActivateContext; #elif TENSORFLOW_USE_ROCM #include "tensorflow/core/platform/rocm.h" using stream_executor::gpu::ScopedActivateContext; #endif namespace tensorflow { namespace { typedef Eigen::ThreadPoolDevice CPUDevice; typedef Eigen::GpuDevice GPUDevice; using Callback = std::function<void()>; static inline Status ParseAndCheckBoxSizes(const Tensor& boxes, const Tensor& box_index, int* num_boxes) { if (boxes.NumElements() == 0 && box_index.NumElements() == 0) { *num_boxes = 0; return absl::OkStatus(); } if (boxes.dims() != 2) { return errors::InvalidArgument("boxes must be 2-D", boxes.shape().DebugString()); } *num_boxes = boxes.dim_size(0); if (boxes.dim_size(1) != 4) { return errors::InvalidArgument("boxes must have 4 columns"); } if (box_index.dims() != 1) { return errors::InvalidArgument("box_index must be 1-D", box_index.shape().DebugString()); } if (box_index.dim_size(0) != *num_boxes) { return errors::InvalidArgument("box_index has incompatible shape"); } return absl::OkStatus(); } template <typename Device> inline void RunIfBoxIndexIsValid( OpKernelContext* context, typename TTypes<int32, 1>::ConstTensor box_index, int batch_size, const Callback& compute, const Callback& done); template <> inline void RunIfBoxIndexIsValid<CPUDevice>( OpKernelContext* context, typename TTypes<int32, 1>::ConstTensor box_index, int batch_size, const Callback& compute, const Callback& done) { const int num_boxes = box_index.dimension(0); for (int b = 0; b < num_boxes; ++b) { OP_REQUIRES_ASYNC( context, FastBoundsCheck(box_index(b), batch_size), errors::OutOfRange("box_index has values outside [0, batch_size)"), done); } if (compute) { compute(); } if (done) { done(); } } } template <typename Device, typename T> class CropAndResizeOp : public AsyncOpKernel { public: explicit CropAndResizeOp(OpKernelConstruction* context) : AsyncOpKernel(context) { OP_REQUIRES_OK(context, context->GetAttr("method", &method_)); OP_REQUIRES(context, method_ == "bilinear" || method_ == "nearest", errors::InvalidArgument( "method must be 'bilinear' or 'nearest'", method_)); OP_REQUIRES_OK(context, context->GetAttr("extrapolation_value", &extrapolation_value_)); } void ComputeAsync(OpKernelContext* context, DoneCallback done) override { const Tensor& image = context->input(0); const Tensor& boxes = context->input(1); const Tensor& box_index = context->input(2); const Tensor& crop_size = context->input(3); OP_REQUIRES_ASYNC(context, image.dims() == 4, errors::InvalidArgument("input image must be 4-D", image.shape().DebugString()), done); const int batch_size = image.dim_size(0); const int image_height = image.dim_size(1); const int image_width = image.dim_size(2); const int depth = image.dim_size(3); OP_REQUIRES_ASYNC( context, image_height > 0 && image_width > 0, errors::InvalidArgument("image dimensions must be positive"), done); OP_REQUIRES_ASYNC( context, boxes.dims() == 2, absl::InvalidArgumentError(absl::StrCat("boxes must be 2-D, got: ", boxes.shape().DebugString())), done); OP_REQUIRES_ASYNC( context, TensorShapeUtils::IsVector(box_index.shape()), errors::InvalidArgument("box_indices must be rank 1 but is shape ", box_index.shape().DebugString()), done); int num_boxes = 0; OP_REQUIRES_OK_ASYNC( context, ParseAndCheckBoxSizes(boxes, box_index, &num_boxes), done); OP_REQUIRES_ASYNC(context, crop_size.dims() == 1, errors::InvalidArgument("crop_size must be 1-D", crop_size.shape().DebugString()), done); OP_REQUIRES_ASYNC( context, crop_size.dim_size(0) == 2, errors::InvalidArgument("crop_size must have two elements", crop_size.shape().DebugString()), done); auto crop_size_vec = crop_size.vec<int32>(); const int crop_height = internal::SubtleMustCopy(crop_size_vec(0)); const int crop_width = internal::SubtleMustCopy(crop_size_vec(1)); OP_REQUIRES_ASYNC( context, crop_height > 0 && crop_width > 0, errors::InvalidArgument("crop dimensions must be positive"), done); TensorShape shape; OP_REQUIRES_OK_ASYNC(context, shape.AddDimWithStatus(num_boxes), done); OP_REQUIRES_OK_ASYNC(context, shape.AddDimWithStatus(crop_height), done); OP_REQUIRES_OK_ASYNC(context, shape.AddDimWithStatus(crop_width), done); OP_REQUIRES_OK_ASYNC(context, shape.AddDimWithStatus(depth), done); Tensor* output = nullptr; OP_REQUIRES_OK_ASYNC(context, context->allocate_output(0, shape, &output), done); auto compute_callback = [this, context, output]() { const Tensor& image = context->input(0); const Tensor& boxes = context->input(1); const Tensor& box_index = context->input(2); const bool status = functor::CropAndResize<Device, T>()( context, image.tensor<T, 4>(), boxes.tensor<float, 2>(), box_index.tensor<int32, 1>(), method_, extrapolation_value_, output->tensor<float, 4>()); if (!status) { context->SetStatus( errors::Internal("Failed to launch CropAndResizeKernel.")); } }; RunIfBoxIndexIsValid<Device>(context, box_index.tensor<int32, 1>(), batch_size, std::move(compute_callback), std::move(done)); } private: float extrapolation_value_; string method_; }; namespace functor { template <typename T> struct CropAndResize<CPUDevice, T> { bool operator()(OpKernelContext* context, typename TTypes<T, 4>::ConstTensor image, typename TTypes<float, 2>::ConstTensor boxes, typename TTypes<int32, 1>::ConstTensor box_index, const string& method_name, float extrapolation_value, typename TTypes<float, 4>::Tensor crops) { const int batch_size = image.dimension(0); const int image_height = image.dimension(1); const int image_width = image.dimension(2); const int num_boxes = crops.dimension(0); const int crop_height = crops.dimension(1); const int crop_width = crops.dimension(2); const int depth = crops.dimension(3); const Eigen::Tensor<bool, 0, Eigen::RowMajor> only_finite_elements = boxes.isfinite().all(); if (!only_finite_elements()) { context->SetStatus(errors::InvalidArgument( "Boxes contains at least one element that is not finite")); return false; } auto CropAndResizePerBox = [&](int64_t start_box, int64_t limit_box) { for (int b = start_box; b < limit_box; ++b) { const float y1 = boxes(b, 0); const float x1 = boxes(b, 1); const float y2 = boxes(b, 2); const float x2 = boxes(b, 3); const int32_t b_in = box_index(b); if (!FastBoundsCheck(b_in, batch_size)) { continue; } const float height_scale = (crop_height > 1) ? (y2 - y1) * (image_height - 1) / (crop_height - 1) : 0; const float width_scale = (crop_width > 1) ? (x2 - x1) * (image_width - 1) / (crop_width - 1) : 0; for (int y = 0; y < crop_height; ++y) { const float in_y = (crop_height > 1) ? y1 * (image_height - 1) + y * height_scale : 0.5 * (y1 + y2) * (image_height - 1); if (in_y < 0 || in_y > image_height - 1) { for (int x = 0; x < crop_width; ++x) { for (int d = 0; d < depth; ++d) { crops(b, y, x, d) = extrapolation_value; } } continue; } if (method_name == "bilinear") { const int top_y_index = floorf(in_y); const int bottom_y_index = ceilf(in_y); const float y_lerp = in_y - top_y_index; for (int x = 0; x < crop_width; ++x) { const float in_x = (crop_width > 1) ? x1 * (image_width - 1) + x * width_scale : 0.5 * (x1 + x2) * (image_width - 1); if (in_x < 0 || in_x > image_width - 1) { for (int d = 0; d < depth; ++d) { crops(b, y, x, d) = extrapolation_value; } continue; } const int left_x_index = floorf(in_x); const int right_x_index = ceilf(in_x); const float x_lerp = in_x - left_x_index; for (int d = 0; d < depth; ++d) { const float top_left(static_cast<float>( image(b_in, top_y_index, left_x_index, d))); const float top_right(static_cast<float>( image(b_in, top_y_index, right_x_index, d))); const float bottom_left(static_cast<float>( image(b_in, bottom_y_index, left_x_index, d))); const float bottom_right(static_cast<float>( image(b_in, bottom_y_index, right_x_index, d))); const float top = top_left + (top_right - top_left) * x_lerp; const float bottom = bottom_left + (bottom_right - bottom_left) * x_lerp; crops(b, y, x, d) = top + (bottom - top) * y_lerp; } } } else { for (int x = 0; x < crop_width; ++x) { const float in_x = (crop_width > 1) ? x1 * (image_width - 1) + x * width_scale : 0.5 * (x1 + x2) * (image_width - 1); if (in_x < 0 || in_x > image_width - 1) { for (int d = 0; d < depth; ++d) { crops(b, y, x, d) = extrapolation_value; } continue; } const int closest_x_index = roundf(in_x); const int closest_y_index = roundf(in_y); for (int d = 0; d < depth; ++d) { crops(b, y, x, d) = static_cast<float>( image(b_in, closest_y_index, closest_x_index, d)); } } } } } }; double cost_per_pixel = depth * (Eigen::TensorOpCost::AddCost<float>() * 6 + Eigen::TensorOpCost::MulCost<float>() * 3 + Eigen::TensorOpCost::CastCost<T, float>() * 4) + (Eigen::TensorOpCost::AddCost<float>() * 2 + Eigen::TensorOpCost::AddCost<float>() * 3); if (method_name == "nearest") { cost_per_pixel = depth * Eigen::TensorOpCost::CastCost<T, float>() + Eigen::TensorOpCost::AddCost<float>() * 4 + Eigen::TensorOpCost::MulCost<float>() * 4; } const double cost_per_box = crop_height * crop_width * cost_per_pixel; const DeviceBase::CpuWorkerThreads& worker_threads = *(context->device()->tensorflow_cpu_worker_threads()); Shard(worker_threads.num_threads, worker_threads.workers, num_boxes, cost_per_box, CropAndResizePerBox); return true; } }; } template <typename Device, typename T> class CropAndResizeGradImageOp : public AsyncOpKernel { public: explicit CropAndResizeGradImageOp(OpKernelConstruction* context) : AsyncOpKernel(context) { OP_REQUIRES_OK(context, context->GetAttr("method", &method_)); OP_REQUIRES(context, method_ == "bilinear" || method_ == "nearest", errors::InvalidArgument( "method must be 'bilinear' or 'nearest'", method_)); } void ComputeAsync(OpKernelContext* context, DoneCallback done) override { const Tensor& grads = context->input(0); const Tensor& boxes = context->input(1); const Tensor& box_index = context->input(2); const Tensor& image_size = context->input(3); OP_REQUIRES_ASYNC(context, grads.dims() == 4, errors::InvalidArgument("grads image must be 4-D", grads.shape().DebugString()), done); const int crop_height = grads.dim_size(1); const int crop_width = grads.dim_size(2); OP_REQUIRES_ASYNC( context, crop_height > 0 && crop_width > 0, errors::InvalidArgument("grads dimensions must be positive"), done); int num_boxes = 0; OP_REQUIRES_OK_ASYNC( context, ParseAndCheckBoxSizes(boxes, box_index, &num_boxes), done); OP_REQUIRES_ASYNC( context, grads.dim_size(0) == num_boxes, errors::InvalidArgument("boxes and grads have incompatible shape"), done); OP_REQUIRES_ASYNC(context, image_size.dims() == 1, errors::InvalidArgument("image_size must be 1-D", image_size.shape().DebugString()), done); OP_REQUIRES_ASYNC(context, image_size.dim_size(0) == 4, errors::InvalidArgument("image_size must have 4 elements", image_size.shape().DebugString()), done); auto image_size_vec = image_size.vec<int32>(); const int batch_size = internal::SubtleMustCopy(image_size_vec(0)); const int image_height = internal::SubtleMustCopy(image_size_vec(1)); const int image_width = internal::SubtleMustCopy(image_size_vec(2)); const int depth = internal::SubtleMustCopy(image_size_vec(3)); OP_REQUIRES_ASYNC( context, image_height > 0 && image_width > 0, errors::InvalidArgument("image dimensions must be positive"), done); OP_REQUIRES_ASYNC( context, grads.dim_size(3) == depth, errors::InvalidArgument("image_size and grads are incompatible"), done); if (std::is_same<Device, GPUDevice>::value) { OP_REQUIRES_ASYNC( context, !OpDeterminismRequired(), errors::Unimplemented( "Deterministic GPU implementation of CropAndResizeBackpropImage" " not available."), done); } TensorShape shape; OP_REQUIRES_OK_ASYNC(context, shape.AddDimWithStatus(batch_size), done); OP_REQUIRES_OK_ASYNC(context, shape.AddDimWithStatus(image_height), done); OP_REQUIRES_OK_ASYNC(context, shape.AddDimWithStatus(image_width), done); OP_REQUIRES_OK_ASYNC(context, shape.AddDimWithStatus(depth), done); Tensor* output = nullptr; OP_REQUIRES_OK_ASYNC(context, context->allocate_output(0, shape, &output), done); auto compute_callback = [this, context, output]() { const Tensor& grads = context->input(0); const Tensor& boxes = context->input(1); const Tensor& box_index = context->input(2); const bool status = functor::CropAndResizeBackpropImage<Device, T>()( context, grads.tensor<float, 4>(), boxes.tensor<float, 2>(), box_index.tensor<int32, 1>(), output->tensor<T, 4>(), method_); if (!status) { context->SetStatus(errors::Internal( "Failed to launch CropAndResizeBackpropImage kernel.")); } }; RunIfBoxIndexIsValid<Device>(context, box_index.tensor<int32, 1>(), batch_size, std::move(compute_callback), std::move(done)); } private: string method_; }; namespace functor { template <typename T> struct CropAndResizeBackpropImage<CPUDevice, T> { bool operator()(const OpKernelContext* context, typename TTypes<float, 4>::ConstTensor grads, typename TTypes<float, 2>::ConstTensor boxes, typename TTypes<int32, 1>::ConstTensor box_index, typename TTypes<T, 4>::Tensor grads_image, const string& method_name) { const int batch_size = grads_image.dimension(0); const int image_height = grads_image.dimension(1); const int image_width = grads_image.dimension(2); const int num_boxes = grads.dimension(0); const int crop_height = grads.dimension(1); const int crop_width = grads.dimension(2); const int depth = grads.dimension(3); grads_image.setZero(); auto CropAndResizeBackImgPerBox = [&](int64_t start_box, int64_t limit_box) { for (int b = start_box; b < limit_box; ++b) { const float y1 = boxes(b, 0); const float x1 = boxes(b, 1); const float y2 = boxes(b, 2); const float x2 = boxes(b, 3); const int32_t b_in = box_index(b); if (!FastBoundsCheck(b_in, batch_size)) { continue; } const float height_scale = (crop_height > 1) ? (y2 - y1) * (image_height - 1) / (crop_height - 1) : 0; const float width_scale = (crop_width > 1) ? (x2 - x1) * (image_width - 1) / (crop_width - 1) : 0; for (int y = 0; y < crop_height; ++y) { const float in_y = (crop_height > 1) ? y1 * (image_height - 1) + y * height_scale : 0.5 * (y1 + y2) * (image_height - 1); if (in_y < 0 || in_y > image_height - 1) { continue; } const int top_y_index = floorf(in_y); const int bottom_y_index = ceilf(in_y); const float y_lerp = in_y - top_y_index; for (int x = 0; x < crop_width; ++x) { const float in_x = (crop_width > 1) ? x1 * (image_width - 1) + x * width_scale : 0.5 * (x1 + x2) * (image_width - 1); if (in_x < 0 || in_x > image_width - 1) { continue; } if (method_name == "bilinear") { const int left_x_index = floorf(in_x); const int right_x_index = ceilf(in_x); const float x_lerp = in_x - left_x_index; for (int d = 0; d < depth; ++d) { const float dtop = (1 - y_lerp) * grads(b, y, x, d); grads_image(b_in, top_y_index, left_x_index, d) += static_cast<T>((1 - x_lerp) * dtop); grads_image(b_in, top_y_index, right_x_index, d) += static_cast<T>(x_lerp * dtop); const float dbottom = y_lerp * grads(b, y, x, d); grads_image(b_in, bottom_y_index, left_x_index, d) += static_cast<T>((1 - x_lerp) * dbottom); grads_image(b_in, bottom_y_index, right_x_index, d) += static_cast<T>(x_lerp * dbottom); } } else { for (int d = 0; d < depth; ++d) { int closest_x_index = roundf(in_x); int closest_y_index = roundf(in_y); grads_image(b_in, closest_y_index, closest_x_index, d) += static_cast<T>(grads(b, y, x, d)); } } } } } }; const double cost_per_pixel = (method_name == "bilinear" ? depth * (Eigen::TensorOpCost::AddCost<float>() * 7 + Eigen::TensorOpCost::MulCost<float>() * 6 + Eigen::TensorOpCost::CastCost<T, float>() * 4) + Eigen::TensorOpCost::AddCost<float>() * 4 : depth * (Eigen::TensorOpCost::AddCost<float>() + Eigen::TensorOpCost::CastCost<T, float>()) + Eigen::TensorOpCost::AddCost<float>() * 3); const double cost_per_box = crop_height * crop_width * cost_per_pixel; const DeviceBase::CpuWorkerThreads& worker_threads = *(context->device()->tensorflow_cpu_worker_threads()); int max_threads = OpDeterminismRequired() ? 1 : worker_threads.num_threads; Shard(max_threads, worker_threads.workers, num_boxes, cost_per_box, CropAndResizeBackImgPerBox); return true; } }; } template <typename Device, typename T> class CropAndResizeGradBoxesOp : public AsyncOpKernel { public: explicit CropAndResizeGradBoxesOp(OpKernelConstruction* context) : AsyncOpKernel(context) { string method; OP_REQUIRES_OK(context, context->GetAttr("method", &method)); OP_REQUIRES(context, method == "bilinear", errors::InvalidArgument("method must be 'bilinear'", method)); } void ComputeAsync(OpKernelContext* context, DoneCallback done) override { const Tensor& grads = context->input(0); const Tensor& boxes = context->input(2); const Tensor& box_index = context->input(3); const Tensor& image = context->input(1); OP_REQUIRES_ASYNC(context, grads.dims() == 4, errors::InvalidArgument("grads image must be 4-D", grads.shape().DebugString()), done); const int crop_height = grads.dim_size(1); const int crop_width = grads.dim_size(2); const int depth = grads.dim_size(3); OP_REQUIRES_ASYNC( context, crop_height > 0 && crop_width > 0, errors::InvalidArgument("grads dimensions must be positive"), done); OP_REQUIRES_ASYNC(context, image.dims() == 4, errors::InvalidArgument("input image must be 4-D", image.shape().DebugString()), done); const int batch_size = image.dim_size(0); const int image_height = image.dim_size(1); const int image_width = image.dim_size(2); OP_REQUIRES_ASYNC( context, image_height > 0 && image_width > 0, errors::InvalidArgument("image dimensions must be positive"), done); OP_REQUIRES_ASYNC(context, image.dim_size(3) == depth, errors::InvalidArgument("image, grads depth differ"), done); int num_boxes = 0; OP_REQUIRES_OK_ASYNC( context, ParseAndCheckBoxSizes(boxes, box_index, &num_boxes), done); OP_REQUIRES_ASYNC( context, grads.dim_size(0) == num_boxes, errors::InvalidArgument("boxes and grads have incompatible shape"), done); if (std::is_same<Device, GPUDevice>::value) { OP_REQUIRES_ASYNC( context, !OpDeterminismRequired(), errors::Unimplemented( "Deterministic GPU implementation of CropAndResizeBackpropBoxes" " not available."), done); } Tensor* output = nullptr; OP_REQUIRES_OK_ASYNC( context, context->allocate_output(0, TensorShape({num_boxes, 4}), &output), done); auto compute_callback = [context, output]() { const Tensor& grads = context->input(0); const Tensor& image = context->input(1); const Tensor& boxes = context->input(2); const Tensor& box_index = context->input(3); const bool status = functor::CropAndResizeBackpropBoxes<Device, T>()( context->eigen_device<Device>(), grads.tensor<float, 4>(), image.tensor<T, 4>(), boxes.tensor<float, 2>(), box_index.tensor<int32, 1>(), output->tensor<float, 2>()); if (!status) { context->SetStatus(errors::Internal( "Failed to launch CropAndResizeBackpropBoxes kernel.")); } }; RunIfBoxIndexIsValid<Device>(context, box_index.tensor<int32, 1>(), batch_size, std::move(compute_callback), std::move(done)); } }; namespace functor { template <typename T> struct CropAndResizeBackpropBoxes<CPUDevice, T> { bool operator()(const CPUDevice& d, typename TTypes<float, 4>::ConstTensor grads, typename TTypes<T, 4>::ConstTensor image, typename TTypes<float, 2>::ConstTensor boxes, typename TTypes<int32, 1>::ConstTensor box_index, typename TTypes<float, 2>::Tensor grads_boxes) { const int batch_size = image.dimension(0); const int image_height = image.dimension(1); const int image_width = image.dimension(2); const int num_boxes = grads.dimension(0); const int crop_height = grads.dimension(1); const int crop_width = grads.dimension(2); const int depth = grads.dimension(3); grads_boxes.setZero(); for (int b = 0; b < num_boxes; ++b) { const float y1 = boxes(b, 0); const float x1 = boxes(b, 1); const float y2 = boxes(b, 2); const float x2 = boxes(b, 3); const int32_t b_in = box_index(b); if (!FastBoundsCheck(b_in, batch_size)) { continue; } const float height_ratio = (crop_height > 1) ? static_cast<float>(image_height - 1) / (crop_height - 1) : 0; const float width_ratio = (crop_width > 1) ? static_cast<float>(image_width - 1) / (crop_width - 1) : 0; const float height_scale = (crop_height > 1) ? (y2 - y1) * height_ratio : 0; const float width_scale = (crop_width > 1) ? (x2 - x1) * width_ratio : 0; for (int y = 0; y < crop_height; ++y) { const float in_y = (crop_height > 1) ? y1 * (image_height - 1) + y * height_scale : 0.5 * (y1 + y2) * (image_height - 1); if (in_y < 0 || in_y > image_height - 1) { continue; } const int top_y_index = floorf(in_y); const int bottom_y_index = ceilf(in_y); const float y_lerp = in_y - top_y_index; for (int x = 0; x < crop_width; ++x) { const float in_x = (crop_width > 1) ? x1 * (image_width - 1) + x * width_scale : 0.5 * (x1 + x2) * (image_width - 1); if (in_x < 0 || in_x > image_width - 1) { continue; } const int left_x_index = floorf(in_x); const int right_x_index = ceilf(in_x); const float x_lerp = in_x - left_x_index; for (int d = 0; d < depth; ++d) { const float top_left( static_cast<float>(image(b_in, top_y_index, left_x_index, d))); const float top_right( static_cast<float>(image(b_in, top_y_index, right_x_index, d))); const float bottom_left(static_cast<float>( image(b_in, bottom_y_index, left_x_index, d))); const float bottom_right(static_cast<float>( image(b_in, bottom_y_index, right_x_index, d))); float image_grad_y = (1 - x_lerp) * (bottom_left - top_left) + x_lerp * (bottom_right - top_right); float image_grad_x = (1 - y_lerp) * (top_right - top_left) + y_lerp * (bottom_right - bottom_left); const float top_grad = grads(b, y, x, d); image_grad_y *= top_grad; image_grad_x *= top_grad; if (crop_height > 1) { grads_boxes(b, 0) += image_grad_y * (image_height - 1 - y * height_ratio); grads_boxes(b, 2) += image_grad_y * (y * height_ratio); } else { grads_boxes(b, 0) += image_grad_y * 0.5 * (image_height - 1); grads_boxes(b, 2) += image_grad_y * 0.5 * (image_height - 1); } if (crop_width > 1) { grads_boxes(b, 1) += image_grad_x * (image_width - 1 - x * width_ratio); grads_boxes(b, 3) += image_grad_x * (x * width_ratio); } else { grads_boxes(b, 1) += image_grad_x * 0.5 * (image_width - 1); grads_boxes(b, 3) += image_grad_x * 0.5 * (image_width - 1); } } } } } return true; } }; } #define REGISTER_KERNEL(T) \ REGISTER_KERNEL_BUILDER(Name("CropAndResize") \ .Device(DEVICE_CPU) \ .TypeConstraint<T>("T") \ .HostMemory("crop_size"), \ CropAndResizeOp<CPUDevice, T>); \ \ REGISTER_KERNEL_BUILDER(Name("CropAndResizeGradBoxes") \ .Device(DEVICE_CPU) \ .TypeConstraint<T>("T"), \ CropAndResizeGradBoxesOp<CPUDevice, T>); TF_CALL_REAL_NUMBER_TYPES(REGISTER_KERNEL); #undef REGISTER_KERNEL #define REGISTER_KERNEL(T) \ REGISTER_KERNEL_BUILDER(Name("CropAndResizeGradImage") \ .Device(DEVICE_CPU) \ .TypeConstraint<T>("T") \ .HostMemory("image_size"), \ CropAndResizeGradImageOp<CPUDevice, T>); TF_CALL_half(REGISTER_KERNEL); TF_CALL_float(REGISTER_KERNEL); TF_CALL_double(REGISTER_KERNEL); #undef REGISTER_KERNEL #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM namespace functor { template <> void CheckValidBoxIndexHelper<GPUDevice>::operator()( const GPUDevice& d, typename TTypes<int32, 1>::ConstTensor box_index, int batch_size, typename TTypes<bool, 0>::Tensor isvalid); extern template struct CheckValidBoxIndexHelper<GPUDevice>; } namespace { template <> inline void RunIfBoxIndexIsValid<GPUDevice>( OpKernelContext* context, typename TTypes<int32, 1>::ConstTensor box_index, int batch_size, const Callback& compute, const Callback& done) { const int num_boxes = box_index.dimension(0); if (num_boxes == 0) { compute(); done(); return; } Tensor isvalid_dev_tensor; OP_REQUIRES_OK_ASYNC( context, context->allocate_temp(DataTypeToEnum<bool>::value, TensorShape({}), &isvalid_dev_tensor), done); typename TTypes<bool, 0>::Tensor isvalid_dev = isvalid_dev_tensor.tensor<bool, 0>(); functor::CheckValidBoxIndexHelper<GPUDevice>()( context->eigen_device<GPUDevice>(), box_index, batch_size, isvalid_dev); auto* stream = context->op_device_context()->stream(); OP_REQUIRES_ASYNC(context, stream, errors::Internal("No GPU stream available."), done); Tensor isvalid_host_tensor; AllocatorAttributes alloc_attr; alloc_attr.set_on_host(true); alloc_attr.set_gpu_compatible(true); OP_REQUIRES_OK_ASYNC( context, context->allocate_temp(DataTypeToEnum<bool>::value, TensorShape({}), &isvalid_host_tensor, alloc_attr), done); se::DeviceMemoryBase wrapped(isvalid_dev.data(), sizeof(bool)); const bool status = stream ->Memcpy(isvalid_host_tensor.scalar<bool>().data() , wrapped , sizeof(bool)) .ok(); OP_REQUIRES_ASYNC( context, status, errors::Internal("Failed to launch copy of isvalid from device to host."), done); TensorReference isvalid_dev_ref(isvalid_dev_tensor); auto wrapped_callback = [context, isvalid_host_tensor, isvalid_dev_ref, compute, done]() { { auto stream = context->op_device_context()->stream(); ScopedActivateContext scoped_activation{stream->parent()}; const bool isvalid = isvalid_host_tensor.scalar<bool>()(); isvalid_dev_ref.Unref(); OP_REQUIRES_ASYNC( context, isvalid, errors::OutOfRange("box_index has values outside [0, batch_size)"), done); compute(); } done(); }; context->device() ->tensorflow_accelerator_device_info() ->event_mgr->ThenExecute(stream, wrapped_callback); } } #define REGISTER_KERNEL(T) \ REGISTER_KERNEL_BUILDER(Name("CropAndResize") \ .Device(DEVICE_GPU) \ .TypeConstraint<T>("T") \ .HostMemory("crop_size"), \ CropAndResizeOp<GPUDevice, T>); \ \ REGISTER_KERNEL_BUILDER(Name("CropAndResizeGradImage") \ .Device(DEVICE_GPU) \ .TypeConstraint<T>("T") \ .HostMemory("image_size"), \ CropAndResizeGradImageOp<GPUDevice, T>); \ \ REGISTER_KERNEL_BUILDER(Name("CropAndResizeGradBoxes") \ .Device(DEVICE_GPU) \ .TypeConstraint<T>("T"), \ CropAndResizeGradBoxesOp<GPUDevice, T>); TF_CALL_half(REGISTER_KERNEL); TF_CALL_float(REGISTER_KERNEL); TF_CALL_double(REGISTER_KERNEL); #undef REGISTER_KERNEL #endif }
#include "tensorflow/core/framework/allocator.h" #include "tensorflow/core/framework/fake_input.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/framework/tensor_util.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/kernels/ops_testutil.h" #include "tensorflow/core/kernels/ops_util.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/lib/strings/str_util.h" #include "tensorflow/core/platform/test.h" namespace tensorflow { class CropAndResizeOpTest : public OpsTestBase { protected: template <typename T> void MakeOp(float extrapolation_value, const string& method) { TF_EXPECT_OK(NodeDefBuilder("crop_and_resize_op", "CropAndResize") .Input(FakeInput(DataTypeToEnum<T>::value)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_INT32)) .Input(FakeInput(DT_INT32)) .Attr("extrapolation_value", extrapolation_value) .Attr("method", method) .Finalize(node_def())); TF_EXPECT_OK(InitOp()); } }; #define REGISTER_TEST(T) \ TEST_F(CropAndResizeOpTest, TestCropAndResize##T) { \ MakeOp<T>(0, "bilinear"); \ AddInputFromArray<T>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4}); \ AddInputFromArray<float>(TensorShape({1, 4}), {0, 0, 1, 1}); \ AddInputFromArray<int32>(TensorShape({1}), {0}); \ AddInputFromArray<int32>(TensorShape({2}), {1, 1}); \ TF_ASSERT_OK(RunOpKernel()); \ \ Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 1, 1, 1})); \ test::FillValues<float>(&expected, {2.5}); \ test::ExpectTensorEqual<float>(expected, *GetOutput(0)); \ } \ \ TEST_F(CropAndResizeOpTest, TestCropAndResize##T##nearest) { \ MakeOp<T>(0, "nearest"); \ AddInputFromArray<T>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4}); \ AddInputFromArray<float>(TensorShape({1, 4}), {0, 0, 1, 1}); \ AddInputFromArray<int32>(TensorShape({1}), {0}); \ AddInputFromArray<int32>(TensorShape({2}), {1, 1}); \ TF_ASSERT_OK(RunOpKernel()); \ \ Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 1, 1, 1})); \ test::FillValues<float>(&expected, {4.0}); \ test::ExpectTensorEqual<float>(expected, *GetOutput(0)); \ } REGISTER_TEST(float) REGISTER_TEST(double) REGISTER_TEST(uint8) REGISTER_TEST(uint16) REGISTER_TEST(int8) REGISTER_TEST(int16) REGISTER_TEST(int32) REGISTER_TEST(int64_t) #undef REGISTER_TEST TEST_F(CropAndResizeOpTest, TestCropAndResize2x2To1x1Uint8) { MakeOp<uint8>(0, "bilinear"); AddInputFromArray<uint8>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4}); AddInputFromArray<float>(TensorShape({1, 4}), {0, 0, 1, 1}); AddInputFromArray<int32>(TensorShape({1}), {0}); AddInputFromArray<int32>(TensorShape({2}), {1, 1}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 1, 1, 1})); test::FillValues<float>(&expected, {2.5}); test::ExpectTensorEqual<float>(expected, *GetOutput(0)); } TEST_F(CropAndResizeOpTest, TestCropAndResize2x2To1x1Uint8NearestNeibor) { MakeOp<uint8>(0, "nearest"); AddInputFromArray<uint8>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4}); AddInputFromArray<float>(TensorShape({1, 4}), {0, 0, 1, 1}); AddInputFromArray<int32>(TensorShape({1}), {0}); AddInputFromArray<int32>(TensorShape({2}), {1, 1}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 1, 1, 1})); test::FillValues<float>(&expected, {4.0}); test::ExpectTensorEqual<float>(expected, *GetOutput(0)); } TEST_F(CropAndResizeOpTest, TestCropAndResize2x2To1x1Flipped) { MakeOp<float>(0, "bilinear"); AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4}); AddInputFromArray<float>(TensorShape({1, 4}), {1, 1, 0, 0}); AddInputFromArray<int32>(TensorShape({1}), {0}); AddInputFromArray<int32>(TensorShape({2}), {1, 1}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 1, 1, 1})); test::FillValues<float>(&expected, {2.5}); test::ExpectTensorEqual<float>(expected, *GetOutput(0)); } TEST_F(CropAndResizeOpTest, TestCropAndResize2x2To1x1FlippedNearestNeighbor) { MakeOp<float>(0, "nearest"); AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4}); AddInputFromArray<float>(TensorShape({1, 4}), {1, 1, 0, 0}); AddInputFromArray<int32>(TensorShape({1}), {0}); AddInputFromArray<int32>(TensorShape({2}), {1, 1}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 1, 1, 1})); test::FillValues<float>(&expected, {4.0}); test::ExpectTensorEqual<float>(expected, *GetOutput(0)); } TEST_F(CropAndResizeOpTest, TestCropAndResize2x2To3x3) { MakeOp<float>(0, "bilinear"); AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4}); AddInputFromArray<float>(TensorShape({1, 4}), {0, 0, 1, 1}); AddInputFromArray<int32>(TensorShape({1}), {0}); AddInputFromArray<int32>(TensorShape({2}), {3, 3}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 3, 3, 1})); test::FillValues<float>(&expected, {1, 1.5, 2, 2, 2.5, 3, 3, 3.5, 4}); test::ExpectTensorEqual<float>(expected, *GetOutput(0)); } TEST_F(CropAndResizeOpTest, TestCropAndResize2x2To3x3NearestNeighbor) { MakeOp<float>(0, "nearest"); AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4}); AddInputFromArray<float>(TensorShape({1, 4}), {0, 0, 1, 1}); AddInputFromArray<int32>(TensorShape({1}), {0}); AddInputFromArray<int32>(TensorShape({2}), {3, 3}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 3, 3, 1})); test::FillValues<float>(&expected, {1, 2, 2, 3, 4, 4, 3, 4, 4}); test::ExpectTensorEqual<float>(expected, *GetOutput(0)); } TEST_F(CropAndResizeOpTest, TestCropAndResize2x2To3x3Flipped) { MakeOp<float>(0, "bilinear"); AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4}); AddInputFromArray<float>(TensorShape({1, 4}), {1, 1, 0, 0}); AddInputFromArray<int32>(TensorShape({1}), {0}); AddInputFromArray<int32>(TensorShape({2}), {3, 3}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 3, 3, 1})); test::FillValues<float>(&expected, {4, 3.5, 3, 3, 2.5, 2, 2, 1.5, 1}); test::ExpectTensorEqual<float>(expected, *GetOutput(0)); } TEST_F(CropAndResizeOpTest, TestCropAndResize2x2To3x3FlippedNearestNeighbor) { MakeOp<float>(0, "nearest"); AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4}); AddInputFromArray<float>(TensorShape({1, 4}), {1, 1, 0, 0}); AddInputFromArray<int32>(TensorShape({1}), {0}); AddInputFromArray<int32>(TensorShape({2}), {3, 3}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 3, 3, 1})); test::FillValues<float>(&expected, {4, 4, 3, 4, 4, 3, 2, 2, 1}); test::ExpectTensorEqual<float>(expected, *GetOutput(0)); } TEST_F(CropAndResizeOpTest, TestCropAndResize3x3To2x2) { MakeOp<float>(0, "bilinear"); AddInputFromArray<float>(TensorShape({1, 3, 3, 1}), {1, 2, 3, 4, 5, 6, 7, 8, 9}); AddInputFromArray<float>(TensorShape({2, 4}), {0, 0, 1, 1, 0, 0, 0.5, 0.5}); AddInputFromArray<int32>(TensorShape({2}), {0, 0}); AddInputFromArray<int32>(TensorShape({2}), {2, 2}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 2, 2, 1})); test::FillValues<float>(&expected, {1, 3, 7, 9, 1, 2, 4, 5}); test::ExpectTensorEqual<float>(expected, *GetOutput(0)); } TEST_F(CropAndResizeOpTest, TestCropAndResize3x3To2x2NearestNeighbor) { MakeOp<float>(0, "nearest"); AddInputFromArray<float>(TensorShape({1, 3, 3, 1}), {1, 2, 3, 4, 5, 6, 7, 8, 9}); AddInputFromArray<float>(TensorShape({2, 4}), {0, 0, 1, 1, 0, 0, 0.5, 0.5}); AddInputFromArray<int32>(TensorShape({2}), {0, 0}); AddInputFromArray<int32>(TensorShape({2}), {2, 2}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 2, 2, 1})); test::FillValues<float>(&expected, {1, 3, 7, 9, 1, 2, 4, 5}); test::ExpectTensorEqual<float>(expected, *GetOutput(0)); } TEST_F(CropAndResizeOpTest, TestCropAndResize3x3To2x2Flipped) { MakeOp<float>(0, "bilinear"); AddInputFromArray<float>(TensorShape({1, 3, 3, 1}), {1, 2, 3, 4, 5, 6, 7, 8, 9}); AddInputFromArray<float>(TensorShape({2, 4}), {1, 1, 0, 0, 0.5, 0.5, 0, 0}); AddInputFromArray<int32>(TensorShape({2}), {0, 0}); AddInputFromArray<int32>(TensorShape({2}), {2, 2}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 2, 2, 1})); test::FillValues<float>(&expected, {9, 7, 3, 1, 5, 4, 2, 1}); test::ExpectTensorEqual<float>(expected, *GetOutput(0)); } TEST_F(CropAndResizeOpTest, TestCropAndResize3x3To2x2FlippedNearestNeighbor) { MakeOp<float>(0, "nearest"); AddInputFromArray<float>(TensorShape({1, 3, 3, 1}), {1, 2, 3, 4, 5, 6, 7, 8, 9}); AddInputFromArray<float>(TensorShape({2, 4}), {1, 1, 0, 0, 0.5, 0.5, 0, 0}); AddInputFromArray<int32>(TensorShape({2}), {0, 0}); AddInputFromArray<int32>(TensorShape({2}), {2, 2}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 2, 2, 1})); test::FillValues<float>(&expected, {9, 7, 3, 1, 5, 4, 2, 1}); test::ExpectTensorEqual<float>(expected, *GetOutput(0)); } TEST_F(CropAndResizeOpTest, TestCropAndResize2x2To3x3Extrapolated) { const float v = -1; MakeOp<float>(v, "bilinear"); AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4}); AddInputFromArray<float>(TensorShape({1, 4}), {-1, -1, 1, 1}); AddInputFromArray<int32>(TensorShape({1}), {0}); AddInputFromArray<int32>(TensorShape({2}), {3, 3}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 3, 3, 1})); test::FillValues<float>(&expected, {v, v, v, v, 1, 2, v, 3, 4}); test::ExpectTensorEqual<float>(expected, *GetOutput(0)); } TEST_F(CropAndResizeOpTest, TestCropAndResize2x2To3x3NoCrop) { MakeOp<float>(0, "bilinear"); AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4}); AddInputFromArray<float>(TensorShape({0, 4}), {}); AddInputFromArray<int32>(TensorShape({0}), {}); AddInputFromArray<int32>(TensorShape({2}), {3, 3}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_FLOAT, TensorShape({0, 3, 3, 1})); test::FillValues<float>(&expected, {}); test::ExpectTensorEqual<float>(expected, *GetOutput(0)); } TEST_F(CropAndResizeOpTest, TestInvalidInputShape) { MakeOp<float>(0, "bilinear"); AddInputFromArray<float>(TensorShape({2, 2, 1}), {1, 2, 3, 4}); AddInputFromArray<float>(TensorShape({1, 4}), {0, 0, 1, 1}); AddInputFromArray<int32>(TensorShape({1}), {0}); AddInputFromArray<int32>(TensorShape({2}), {4, 4}); Status s = RunOpKernel(); ASSERT_FALSE(s.ok()); EXPECT_TRUE(absl::StrContains(s.ToString(), "input image must be 4-D")) << s; } TEST_F(CropAndResizeOpTest, TestInvalidBoxIndexShape) { MakeOp<float>(0, "bilinear"); AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4}); AddInputFromArray<float>(TensorShape({1, 4}), {0, 0, 1, 1}); AddInputFromArray<int32>(TensorShape({2}), {0, 0}); AddInputFromArray<int32>(TensorShape({2}), {4, 4}); Status s = RunOpKernel(); ASSERT_FALSE(s.ok()); EXPECT_TRUE( absl::StrContains(s.ToString(), "box_index has incompatible shape")) << s; } TEST_F(CropAndResizeOpTest, TestInvalidBoxIndex) { MakeOp<float>(0, "bilinear"); AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4}); AddInputFromArray<float>(TensorShape({1, 4}), {0, 0, 1, 1}); AddInputFromArray<int32>(TensorShape({1}), {1}); AddInputFromArray<int32>(TensorShape({2}), {3, 3}); Status s = RunOpKernel(); ASSERT_FALSE(s.ok()); EXPECT_TRUE(absl::StrContains(s.ToString(), "box_index has values outside [0, batch_size)")) << s; } TEST_F(CropAndResizeOpTest, TestWithSharding) { MakeOp<float>(0, "bilinear"); const int kLength = 999; const int kHalf = (kLength + 1) / 2; AddInput<float>(TensorShape({1, kLength, kLength, 1}), [=](int i) -> float { return i % kLength; }); AddInputFromArray<float>(TensorShape({2, 4}), {0, 0, 0.5, 0.5, 0.5, 0.5, 1, 1}); AddInputFromArray<int32>(TensorShape({2}), {0, 0}); AddInputFromArray<int32>(TensorShape({2}), {kHalf, kHalf}); TF_ASSERT_OK(RunOpKernel()); Tensor result1(allocator(), DT_FLOAT, TensorShape({1, kHalf, kHalf, 1})); test::FillFn<float>(&result1, [=](int i) -> float { return i % kHalf; }); Tensor result2(allocator(), DT_FLOAT, TensorShape({1, kHalf, kHalf, 1})); test::FillFn<float>(&result2, [=](int i) -> float { return i % kHalf + kHalf - 1; }); Tensor expected(allocator(), DT_FLOAT, TensorShape({2, kHalf, kHalf, 1})); TF_ASSERT_OK(tensor::Concat({result1, result2}, &expected)); test::ExpectTensorEqual<float>(expected, *GetOutput(0)); } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/image/crop_and_resize_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/image/crop_and_resize_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
0f10e362-7fd0-45fb-9e53-3b3d601049b6
cpp
google/tensorstore
array
tensorstore/internal/json/array.cc
tensorstore/internal/json_binding/array_test.cc
#include "tensorstore/internal/json/array.h" #include <stddef.h> #include <algorithm> #include <cassert> #include <utility> #include <vector> #include "absl/functional/function_ref.h" #include "absl/status/status.h" #include <nlohmann/json.hpp> #include "tensorstore/array.h" #include "tensorstore/contiguous_layout.h" #include "tensorstore/data_type.h" #include "tensorstore/data_type_conversion.h" #include "tensorstore/index.h" #include "tensorstore/internal/element_copy_function.h" #include "tensorstore/internal/elementwise_function.h" #include "tensorstore/rank.h" #include "tensorstore/strided_layout.h" #include "tensorstore/util/byte_strided_pointer.h" #include "tensorstore/util/iterate.h" #include "tensorstore/util/result.h" #include "tensorstore/util/span.h" #include "tensorstore/util/status.h" #include "tensorstore/util/str_cat.h" namespace tensorstore { namespace internal_json { ::nlohmann::json JsonEncodeNestedArrayImpl( ArrayView<const void, dynamic_rank, offset_origin> array, absl::FunctionRef<::nlohmann::json(const void*)> encode_element) { if (array.rank() == 0) { assert(array.data()); return encode_element(array.data()); } using array_t = ::nlohmann::json::array_t; array_t* path[kMaxRank]; DimensionIndex level = 0; array_t j_root; j_root.reserve(array.shape()[0]); path[0] = &j_root; if (array.shape()[0] == 0) { return j_root; } ByteStridedPointer<const void> pointer = array.byte_strided_origin_pointer(); while (true) { array_t* j_parent = path[level]; if (level == array.rank() - 1) { assert(pointer.get()); j_parent->push_back(encode_element(pointer.get())); } else { const Index size = array.shape()[level + 1]; array_t next_array; next_array.reserve(size); j_parent->emplace_back(std::move(next_array)); j_parent = j_parent->back().get_ptr<array_t*>(); if (size != 0) { path[++level] = j_parent; continue; } } while (true) { array_t* j_array = path[level]; const Index i = j_array->size(); const Index size = array.shape()[level]; const Index byte_stride = array.byte_strides()[level]; pointer += byte_stride; if (i != size) break; pointer -= i * byte_stride; if (level-- == 0) { return j_root; } } } } Result<SharedArray<void>> JsonParseNestedArrayImpl( const ::nlohmann::json& j_root, DataType dtype, absl::FunctionRef<absl::Status(const ::nlohmann::json& v, void* out)> decode_element) { assert(dtype.valid()); using array_t = ::nlohmann::json::array_t; SharedArray<void> array; ByteStridedPointer<void> pointer; const Index byte_stride = dtype->size; Index shape_or_position[kMaxRank]; const array_t* path[kMaxRank]; DimensionIndex nesting_level = 0; const ::nlohmann::json* j = &j_root; const auto allocate_array = [&] { array = AllocateArray(tensorstore::span(&shape_or_position[0], nesting_level), c_order, default_init, dtype); pointer = array.byte_strided_origin_pointer(); std::fill_n(&shape_or_position[0], nesting_level, static_cast<Index>(0)); }; while (true) { const array_t* j_array = j->get_ptr<const ::nlohmann::json::array_t*>(); if (!j_array) { if (!array.data()) allocate_array(); if (nesting_level != array.rank()) { return absl::InvalidArgumentError(tensorstore::StrCat( "Expected rank-", array.rank(), " array, but found non-array element ", j->dump(), " at position ", span(&shape_or_position[0], nesting_level), ".")); } TENSORSTORE_RETURN_IF_ERROR( decode_element(*j, pointer.get()), MaybeAnnotateStatus( _, tensorstore::StrCat("Error parsing array element at position ", span(&shape_or_position[0], nesting_level)))); pointer += byte_stride; } else { if (nesting_level == kMaxRank) { return absl::InvalidArgumentError(tensorstore::StrCat( "Nesting level exceeds maximum rank of ", kMaxRank)); } path[nesting_level++] = j_array; const Index size = j_array->size(); if (!array.data()) { shape_or_position[nesting_level - 1] = size; if (size == 0) { allocate_array(); return array; } } else if (nesting_level > static_cast<size_t>(array.rank())) { return absl::InvalidArgumentError(tensorstore::StrCat( "Expected rank-", array.rank(), " array, but found array element ", j->dump(), " at position ", span(&shape_or_position[0], nesting_level - 1), ".")); } else if (array.shape()[nesting_level - 1] != size) { return absl::InvalidArgumentError(tensorstore::StrCat( "Expected array of shape ", array.shape(), ", but found array element ", j->dump(), " of length ", size, " at position ", span(&shape_or_position[0], nesting_level - 1), ".")); } j = &(*j_array)[0]; continue; } while (true) { if (nesting_level == 0) { return array; } const array_t* j_array = path[nesting_level - 1]; const Index size = j_array->size(); const Index i = ++shape_or_position[nesting_level - 1]; if (i != size) { j = &(*j_array)[i]; break; } shape_or_position[nesting_level - 1] = 0; --nesting_level; } } } Result<::nlohmann::json> JsonEncodeNestedArray(ArrayView<const void> array) { auto convert = internal::GetDataTypeConverter( array.dtype(), dtype_v<::tensorstore::dtypes::json_t>); if (!(convert.flags & DataTypeConversionFlags::kSupported)) { return absl::InvalidArgumentError(tensorstore::StrCat( "Conversion from ", array.dtype(), " to JSON is not implemented")); } bool error = false; absl::Status status; ::nlohmann::json j = JsonEncodeNestedArrayImpl( array, [&](const void* ptr) -> ::nlohmann::json { if ((convert.flags & DataTypeConversionFlags::kCanReinterpretCast) == DataTypeConversionFlags::kCanReinterpretCast) { return *reinterpret_cast<const ::tensorstore::dtypes::json_t*>(ptr); } ::nlohmann::json value; if ((*convert.closure .function)[internal::IterationBufferKind::kContiguous]( convert.closure.context, {1, 1}, internal::IterationBufferPointer(const_cast<void*>(ptr), Index(0), Index(0)), internal::IterationBufferPointer(&value, Index(0), Index(0)), &status) != 1) { error = true; return nullptr; } return value; }); if (error) return internal::GetElementCopyErrorStatus(std::move(status)); return j; } Result<SharedArray<void>> JsonParseNestedArray(const ::nlohmann::json& j, DataType dtype, DimensionIndex rank_constraint) { auto convert = internal::GetDataTypeConverter( dtype_v<::tensorstore::dtypes::json_t>, dtype); if (!(convert.flags & DataTypeConversionFlags::kSupported)) { return absl::InvalidArgumentError(tensorstore::StrCat( "Conversion from JSON to ", dtype, " is not implemented")); } TENSORSTORE_ASSIGN_OR_RETURN( auto array, JsonParseNestedArrayImpl( j, dtype, [&](const ::nlohmann::json& v, void* out) -> absl::Status { if ((convert.flags & DataTypeConversionFlags::kCanReinterpretCast) == DataTypeConversionFlags::kCanReinterpretCast) { *reinterpret_cast<::tensorstore::dtypes::json_t*>(out) = v; return absl::OkStatus(); } else { absl::Status status; if ((*convert.closure .function)[internal::IterationBufferKind::kContiguous]( convert.closure.context, {1, 1}, internal::IterationBufferPointer( const_cast<::nlohmann::json*>(&v), Index(0), Index(0)), internal::IterationBufferPointer(out, Index(0), Index(0)), &status) != 1) { return internal::GetElementCopyErrorStatus(std::move(status)); } return absl::OkStatus(); } })); if (rank_constraint != dynamic_rank && array.rank() != rank_constraint) { return absl::InvalidArgumentError(tensorstore::StrCat( "Array rank (", array.rank(), ") does not match expected rank (", rank_constraint, ")")); } return array; } } }
#include "tensorstore/internal/json_binding/array.h" #include <memory> #include <utility> #include <gtest/gtest.h> #include <nlohmann/json_fwd.hpp> #include "tensorstore/array.h" #include "tensorstore/data_type.h" #include "tensorstore/internal/json/json.h" #include "tensorstore/internal/json_binding/gtest.h" #include "tensorstore/internal/json_gtest.h" #include "tensorstore/json_serialization_options_base.h" using ::nlohmann::json; using ::tensorstore::dtype_v; namespace jb = tensorstore::internal_json_binding; namespace { TEST(JsonParseNestedArray, NestedArrayBinder) { tensorstore::TestJsonBinderRoundTrip<tensorstore::SharedArray<void>>( { {tensorstore::MakeArray<std::int64_t>({{1, 2, 3}, {4, 5, 6}}), ::nlohmann::json{{1, 2, 3}, {4, 5, 6}}}, }, jb::NestedVoidArray(tensorstore::dtype_v<std::int64_t>)); tensorstore::TestJsonBinderRoundTrip<tensorstore::SharedArray<std::int64_t>>( { {tensorstore::MakeArray<std::int64_t>({{1, 2, 3}, {4, 5, 6}}), ::nlohmann::json{{1, 2, 3}, {4, 5, 6}}}, }, jb::NestedArray()); } }
https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/json/array.cc
https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/json_binding/array_test.cc
4f887a6430414cd6088e1743555015b10f116d50
0d6c250b-f682-4d42-9ca4-a86d11d951ce
cpp
google/quiche
quiche_circular_deque
quiche/common/quiche_circular_deque.h
quiche/common/quiche_circular_deque_test.cc
#ifndef QUICHE_COMMON_QUICHE_CIRCULAR_DEQUE_H_ #define QUICHE_COMMON_QUICHE_CIRCULAR_DEQUE_H_ #include <algorithm> #include <cstddef> #include <cstring> #include <iterator> #include <memory> #include <ostream> #include <type_traits> #include "quiche/common/platform/api/quiche_export.h" #include "quiche/common/platform/api/quiche_logging.h" namespace quiche { template <typename T, size_t MinCapacityIncrement = 3, typename Allocator = std::allocator<T>> class QUICHE_NO_EXPORT QuicheCircularDeque { using AllocatorTraits = std::allocator_traits<Allocator>; template <typename Pointee> class QUICHE_NO_EXPORT basic_iterator { using size_type = typename AllocatorTraits::size_type; public: using iterator_category = std::random_access_iterator_tag; using value_type = typename AllocatorTraits::value_type; using difference_type = typename AllocatorTraits::difference_type; using pointer = Pointee*; using reference = Pointee&; basic_iterator() = default; basic_iterator( const basic_iterator<value_type>& it) : deque_(it.deque_), index_(it.index_) {} basic_iterator& operator=(const basic_iterator<value_type>& it) { if (this != &it) { deque_ = it.deque_; index_ = it.index_; } return *this; } reference operator*() const { return *deque_->index_to_address(index_); } pointer operator->() const { return deque_->index_to_address(index_); } reference operator[](difference_type i) { return *(*this + i); } basic_iterator& operator++() { Increment(); return *this; } basic_iterator operator++(int) { basic_iterator result = *this; Increment(); return result; } basic_iterator operator--() { Decrement(); return *this; } basic_iterator operator--(int) { basic_iterator result = *this; Decrement(); return result; } friend basic_iterator operator+(const basic_iterator& it, difference_type delta) { basic_iterator result = it; result.IncrementBy(delta); return result; } basic_iterator& operator+=(difference_type delta) { IncrementBy(delta); return *this; } friend basic_iterator operator-(const basic_iterator& it, difference_type delta) { basic_iterator result = it; result.IncrementBy(-delta); return result; } basic_iterator& operator-=(difference_type delta) { IncrementBy(-delta); return *this; } friend difference_type operator-(const basic_iterator& lhs, const basic_iterator& rhs) { return lhs.ExternalPosition() - rhs.ExternalPosition(); } friend bool operator==(const basic_iterator& lhs, const basic_iterator& rhs) { return lhs.index_ == rhs.index_; } friend bool operator!=(const basic_iterator& lhs, const basic_iterator& rhs) { return !(lhs == rhs); } friend bool operator<(const basic_iterator& lhs, const basic_iterator& rhs) { return lhs.ExternalPosition() < rhs.ExternalPosition(); } friend bool operator<=(const basic_iterator& lhs, const basic_iterator& rhs) { return !(lhs > rhs); } friend bool operator>(const basic_iterator& lhs, const basic_iterator& rhs) { return lhs.ExternalPosition() > rhs.ExternalPosition(); } friend bool operator>=(const basic_iterator& lhs, const basic_iterator& rhs) { return !(lhs < rhs); } private: basic_iterator(const QuicheCircularDeque* deque, size_type index) : deque_(deque), index_(index) {} void Increment() { QUICHE_DCHECK_LE(ExternalPosition() + 1, deque_->size()); index_ = deque_->index_next(index_); } void Decrement() { QUICHE_DCHECK_GE(ExternalPosition(), 1u); index_ = deque_->index_prev(index_); } void IncrementBy(difference_type delta) { if (delta >= 0) { QUICHE_DCHECK_LE(static_cast<size_type>(ExternalPosition() + delta), deque_->size()); } else { QUICHE_DCHECK_GE(ExternalPosition(), static_cast<size_type>(-delta)); } index_ = deque_->index_increment_by(index_, delta); } size_type ExternalPosition() const { if (index_ >= deque_->begin_) { return index_ - deque_->begin_; } return index_ + deque_->data_capacity() - deque_->begin_; } friend class QuicheCircularDeque; const QuicheCircularDeque* deque_ = nullptr; size_type index_ = 0; }; public: using allocator_type = typename AllocatorTraits::allocator_type; using value_type = typename AllocatorTraits::value_type; using size_type = typename AllocatorTraits::size_type; using difference_type = typename AllocatorTraits::difference_type; using reference = value_type&; using const_reference = const value_type&; using pointer = typename AllocatorTraits::pointer; using const_pointer = typename AllocatorTraits::const_pointer; using iterator = basic_iterator<T>; using const_iterator = basic_iterator<const T>; using reverse_iterator = std::reverse_iterator<iterator>; using const_reverse_iterator = std::reverse_iterator<const_iterator>; QuicheCircularDeque() : QuicheCircularDeque(allocator_type()) {} explicit QuicheCircularDeque(const allocator_type& alloc) : allocator_and_data_(alloc) {} QuicheCircularDeque(size_type count, const T& value, const Allocator& alloc = allocator_type()) : allocator_and_data_(alloc) { resize(count, value); } explicit QuicheCircularDeque(size_type count, const Allocator& alloc = allocator_type()) : allocator_and_data_(alloc) { resize(count); } template < class InputIt, typename = std::enable_if_t<std::is_base_of< std::input_iterator_tag, typename std::iterator_traits<InputIt>::iterator_category>::value>> QuicheCircularDeque(InputIt first, InputIt last, const Allocator& alloc = allocator_type()) : allocator_and_data_(alloc) { AssignRange(first, last); } QuicheCircularDeque(const QuicheCircularDeque& other) : QuicheCircularDeque( other, AllocatorTraits::select_on_container_copy_construction( other.allocator_and_data_.allocator())) {} QuicheCircularDeque(const QuicheCircularDeque& other, const allocator_type& alloc) : allocator_and_data_(alloc) { assign(other.begin(), other.end()); } QuicheCircularDeque(QuicheCircularDeque&& other) : begin_(other.begin_), end_(other.end_), allocator_and_data_(std::move(other.allocator_and_data_)) { other.begin_ = other.end_ = 0; other.allocator_and_data_.data = nullptr; other.allocator_and_data_.data_capacity = 0; } QuicheCircularDeque(QuicheCircularDeque&& other, const allocator_type& alloc) : allocator_and_data_(alloc) { MoveRetainAllocator(std::move(other)); } QuicheCircularDeque(std::initializer_list<T> init, const allocator_type& alloc = allocator_type()) : QuicheCircularDeque(init.begin(), init.end(), alloc) {} QuicheCircularDeque& operator=(const QuicheCircularDeque& other) { if (this == &other) { return *this; } if (AllocatorTraits::propagate_on_container_copy_assignment::value && (allocator_and_data_.allocator() != other.allocator_and_data_.allocator())) { DestroyAndDeallocateAll(); begin_ = end_ = 0; allocator_and_data_ = AllocatorAndData(other.allocator_and_data_.allocator()); } assign(other.begin(), other.end()); return *this; } QuicheCircularDeque& operator=(QuicheCircularDeque&& other) { if (this == &other) { return *this; } if (AllocatorTraits::propagate_on_container_move_assignment::value) { this->~QuicheCircularDeque(); new (this) QuicheCircularDeque(std::move(other)); } else { MoveRetainAllocator(std::move(other)); } return *this; } ~QuicheCircularDeque() { DestroyAndDeallocateAll(); } void assign(size_type count, const T& value) { ClearRetainCapacity(); reserve(count); for (size_t i = 0; i < count; ++i) { emplace_back(value); } } template < class InputIt, typename = std::enable_if_t<std::is_base_of< std::input_iterator_tag, typename std::iterator_traits<InputIt>::iterator_category>::value>> void assign(InputIt first, InputIt last) { AssignRange(first, last); } void assign(std::initializer_list<T> ilist) { assign(ilist.begin(), ilist.end()); } reference at(size_type pos) { QUICHE_DCHECK(pos < size()) << "pos:" << pos << ", size():" << size(); size_type index = begin_ + pos; if (index < data_capacity()) { return *index_to_address(index); } return *index_to_address(index - data_capacity()); } const_reference at(size_type pos) const { return const_cast<QuicheCircularDeque*>(this)->at(pos); } reference operator[](size_type pos) { return at(pos); } const_reference operator[](size_type pos) const { return at(pos); } reference front() { QUICHE_DCHECK(!empty()); return *index_to_address(begin_); } const_reference front() const { return const_cast<QuicheCircularDeque*>(this)->front(); } reference back() { QUICHE_DCHECK(!empty()); return *(index_to_address(end_ == 0 ? data_capacity() - 1 : end_ - 1)); } const_reference back() const { return const_cast<QuicheCircularDeque*>(this)->back(); } iterator begin() { return iterator(this, begin_); } const_iterator begin() const { return const_iterator(this, begin_); } const_iterator cbegin() const { return const_iterator(this, begin_); } iterator end() { return iterator(this, end_); } const_iterator end() const { return const_iterator(this, end_); } const_iterator cend() const { return const_iterator(this, end_); } reverse_iterator rbegin() { return reverse_iterator(end()); } const_reverse_iterator rbegin() const { return const_reverse_iterator(end()); } const_reverse_iterator crbegin() const { return rbegin(); } reverse_iterator rend() { return reverse_iterator(begin()); } const_reverse_iterator rend() const { return const_reverse_iterator(begin()); } const_reverse_iterator crend() const { return rend(); } size_type capacity() const { return data_capacity() == 0 ? 0 : data_capacity() - 1; } void reserve(size_type new_cap) { if (new_cap > capacity()) { Relocate(new_cap); } } void clear() { ClearRetainCapacity(); } bool empty() const { return begin_ == end_; } size_type size() const { if (begin_ <= end_) { return end_ - begin_; } return data_capacity() + end_ - begin_; } void resize(size_type count) { ResizeInternal(count); } void resize(size_type count, const value_type& value) { ResizeInternal(count, value); } void push_front(const T& value) { emplace_front(value); } void push_front(T&& value) { emplace_front(std::move(value)); } template <class... Args> reference emplace_front(Args&&... args) { MaybeExpandCapacity(1); begin_ = index_prev(begin_); new (index_to_address(begin_)) T(std::forward<Args>(args)...); return front(); } void push_back(const T& value) { emplace_back(value); } void push_back(T&& value) { emplace_back(std::move(value)); } template <class... Args> reference emplace_back(Args&&... args) { MaybeExpandCapacity(1); new (index_to_address(end_)) T(std::forward<Args>(args)...); end_ = index_next(end_); return back(); } void pop_front() { QUICHE_DCHECK(!empty()); DestroyByIndex(begin_); begin_ = index_next(begin_); MaybeShrinkCapacity(); } size_type pop_front_n(size_type count) { size_type num_elements_to_pop = std::min(count, size()); size_type new_begin = index_increment_by(begin_, num_elements_to_pop); DestroyRange(begin_, new_begin); begin_ = new_begin; MaybeShrinkCapacity(); return num_elements_to_pop; } void pop_back() { QUICHE_DCHECK(!empty()); end_ = index_prev(end_); DestroyByIndex(end_); MaybeShrinkCapacity(); } size_type pop_back_n(size_type count) { size_type num_elements_to_pop = std::min(count, size()); size_type new_end = index_increment_by(end_, -num_elements_to_pop); DestroyRange(new_end, end_); end_ = new_end; MaybeShrinkCapacity(); return num_elements_to_pop; } void swap(QuicheCircularDeque& other) { using std::swap; swap(begin_, other.begin_); swap(end_, other.end_); if (AllocatorTraits::propagate_on_container_swap::value) { swap(allocator_and_data_, other.allocator_and_data_); } else { QUICHE_DCHECK(get_allocator() == other.get_allocator()) << "Undefined swap behavior"; swap(allocator_and_data_.data, other.allocator_and_data_.data); swap(allocator_and_data_.data_capacity, other.allocator_and_data_.data_capacity); } } friend void swap(QuicheCircularDeque& lhs, QuicheCircularDeque& rhs) { lhs.swap(rhs); } allocator_type get_allocator() const { return allocator_and_data_.allocator(); } friend bool operator==(const QuicheCircularDeque& lhs, const QuicheCircularDeque& rhs) { return std::equal(lhs.begin(), lhs.end(), rhs.begin(), rhs.end()); } friend bool operator!=(const QuicheCircularDeque& lhs, const QuicheCircularDeque& rhs) { return !(lhs == rhs); } friend QUICHE_NO_EXPORT std::ostream& operator<<( std::ostream& os, const QuicheCircularDeque& dq) { os << "{"; for (size_type pos = 0; pos != dq.size(); ++pos) { if (pos != 0) { os << ","; } os << " " << dq[pos]; } os << " }"; return os; } private: void MoveRetainAllocator(QuicheCircularDeque&& other) { if (get_allocator() == other.get_allocator()) { DestroyAndDeallocateAll(); begin_ = other.begin_; end_ = other.end_; allocator_and_data_.data = other.allocator_and_data_.data; allocator_and_data_.data_capacity = other.allocator_and_data_.data_capacity; other.begin_ = other.end_ = 0; other.allocator_and_data_.data = nullptr; other.allocator_and_data_.data_capacity = 0; } else { ClearRetainCapacity(); for (auto& elem : other) { push_back(std::move(elem)); } other.clear(); } } template < typename InputIt, typename = std::enable_if_t<std::is_base_of< std::input_iterator_tag, typename std::iterator_traits<InputIt>::iterator_category>::value>> void AssignRange(InputIt first, InputIt last) { ClearRetainCapacity(); if (std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits<InputIt>::iterator_category>::value) { reserve(std::distance(first, last)); } for (; first != last; ++first) { emplace_back(*first); } } void DestroyAndDeallocateAll() { DestroyRange(begin_, end_); if (data_capacity() > 0) { QUICHE_DCHECK_NE(nullptr, allocator_and_data_.data); AllocatorTraits::deallocate(allocator_and_data_.allocator(), allocator_and_data_.data, data_capacity()); } } void ClearRetainCapacity() { DestroyRange(begin_, end_); begin_ = end_ = 0; } void MaybeShrinkCapacity() { } void MaybeExpandCapacity(size_t num_additional_elements) { size_t new_size = size() + num_additional_elements; if (capacity() >= new_size) { return; } size_t min_additional_capacity = std::max(MinCapacityIncrement, capacity() / 4); size_t new_capacity = std::max(new_size, capacity() + min_additional_capacity); Relocate(new_capacity); } void Relocate(size_t new_capacity) { const size_t num_elements = size(); QUICHE_DCHECK_GT(new_capacity, num_elements) << "new_capacity:" << new_capacity << ", num_elements:" << num_elements; size_t new_data_capacity = new_capacity + 1; pointer new_data = AllocatorTraits::allocate( allocator_and_data_.allocator(), new_data_capacity); if (begin_ < end_) { RelocateUnwrappedRange(begin_, end_, new_data); } else if (begin_ > end_) { const size_t num_elements_before_wrap = data_capacity() - begin_; RelocateUnwrappedRange(begin_, data_capacity(), new_data); RelocateUnwrappedRange(0, end_, new_data + num_elements_before_wrap); } if (data_capacity()) { AllocatorTraits::deallocate(allocator_and_data_.allocator(), allocator_and_data_.data, data_capacity()); } allocator_and_data_.data = new_data; allocator_and_data_.data_capacity = new_data_capacity; begin_ = 0; end_ = num_elements; } template <typename T_ = T> typename std::enable_if<std::is_trivially_copyable<T_>::value, void>::type RelocateUnwrappedRange(size_type begin, size_type end, pointer dest) const { QUICHE_DCHECK_LE(begin, end) << "begin:" << begin << ", end:" << end; pointer src = index_to_address(begin); QUICHE_DCHECK_NE(src, nullptr); memcpy(dest, src, sizeof(T) * (end - begin)); DestroyRange(begin, end); } template <typename T_ = T> typename std::enable_if<!std::is_trivially_copyable<T_>::value && std::is_move_constructible<T_>::value, void>::type RelocateUnwrappedRange(size_type begin, size_type end, pointer dest) const { QUICHE_DCHECK_LE(begin, end) << "begin:" << begin << ", end:" << end; pointer src = index_to_address(begin); pointer src_end = index_to_address(end); while (src != src_end) { new (dest) T(std::move(*src)); DestroyByAddress(src); ++dest; ++src; } } template <typename T_ = T> typename std::enable_if<!std::is_trivially_copyable<T_>::value && !std::is_move_constructible<T_>::value, void>::type RelocateUnwrappedRange(size_type begin, size_type end, pointer dest) const { QUICHE_DCHECK_LE(begin, end) << "begin:" << begin << ", end:" << end; pointer src = index_to_address(begin); pointer src_end = index_to_address(end); while (src != src_end) { new (dest) T(*src); DestroyByAddress(src); ++dest; ++src; } } template <class... U> void ResizeInternal(size_type count, U&&... u) { if (count > size()) { MaybeExpandCapacity(count - size()); while (size() < count) { emplace_back(std::forward<U>(u)...); } } else { size_type new_end = (begin_ + count) % data_capacity(); DestroyRange(new_end, end_); end_ = new_end; MaybeShrinkCapacity(); } } void DestroyRange(size_type begin, size_type end) const { if (std::is_trivially_destructible<T>::value) { return; } if (end >= begin) { DestroyUnwrappedRange(begin, end); } else { DestroyUnwrappedRange(begin, data_capacity()); DestroyUnwrappedRange(0, end); } } void DestroyUnwrappedRange(size_type begin, size_type end) const { QUICHE_DCHECK_LE(begin, end) << "begin:" << begin << ", end:" << end; for (; begin != end; ++begin) { DestroyByIndex(begin); } } void DestroyByIndex(size_type index) const { DestroyByAddress(index_to_address(index)); } void DestroyByAddress(pointer address) const { if (std::is_trivially_destructible<T>::value) { return; } address->~T(); } size_type data_capacity() const { return allocator_and_data_.data_capacity; } pointer index_to_address(size_type index) const { return allocator_and_data_.data + index; } size_type index_prev(size_type index) const { return index == 0 ? data_capacity() - 1 : index - 1; } size_type index_next(size_type index) const { return index == data_capacity() - 1 ? 0 : index + 1; } size_type index_increment_by(size_type index, difference_type delta) const { if (delta == 0) { return index; } QUICHE_DCHECK_LT(static_cast<size_type>(std::abs(delta)), data_capacity()); return (index + data_capacity() + delta) % data_capacity(); } struct QUICHE_NO_EXPORT AllocatorAndData : private allocator_type { explicit AllocatorAndData(const allocator_type& alloc) : allocator_type(alloc) {} const allocator_type& allocator() const { return *this; } allocator_type& allocator() { return *this; } pointer data = nullptr; size_type data_capacity = 0; }; size_type begin_ = 0; size_type end_ = 0; AllocatorAndData allocator_and_data_; }; } #endif
#include "quiche/common/quiche_circular_deque.h" #include <cstddef> #include <cstdint> #include <list> #include <memory> #include <ostream> #include <type_traits> #include <utility> #include "quiche/common/platform/api/quiche_logging.h" #include "quiche/common/platform/api/quiche_test.h" using testing::ElementsAre; namespace quiche { namespace test { namespace { template <typename T, template <typename> class BaseAllocator = std::allocator> class CountingAllocator : public BaseAllocator<T> { using BaseType = BaseAllocator<T>; public: using propagate_on_container_copy_assignment = std::true_type; using propagate_on_container_move_assignment = std::true_type; using propagate_on_container_swap = std::true_type; T* allocate(std::size_t n) { ++shared_counts_->allocate_count; return BaseType::allocate(n); } void deallocate(T* ptr, std::size_t n) { ++shared_counts_->deallocate_count; return BaseType::deallocate(ptr, n); } size_t allocate_count() const { return shared_counts_->allocate_count; } size_t deallocate_count() const { return shared_counts_->deallocate_count; } friend bool operator==(const CountingAllocator& lhs, const CountingAllocator& rhs) { return lhs.shared_counts_ == rhs.shared_counts_; } friend bool operator!=(const CountingAllocator& lhs, const CountingAllocator& rhs) { return !(lhs == rhs); } private: struct Counts { size_t allocate_count = 0; size_t deallocate_count = 0; }; std::shared_ptr<Counts> shared_counts_ = std::make_shared<Counts>(); }; template <typename T, typename propagate_on_copy_assignment, typename propagate_on_move_assignment, typename propagate_on_swap, bool equality_result, template <typename> class BaseAllocator = std::allocator> struct ConfigurableAllocator : public BaseAllocator<T> { using propagate_on_container_copy_assignment = propagate_on_copy_assignment; using propagate_on_container_move_assignment = propagate_on_move_assignment; using propagate_on_container_swap = propagate_on_swap; friend bool operator==(const ConfigurableAllocator& , const ConfigurableAllocator& ) { return equality_result; } friend bool operator!=(const ConfigurableAllocator& lhs, const ConfigurableAllocator& rhs) { return !(lhs == rhs); } }; template <typename Deque> void ShiftRight(Deque* dq, bool emplace) { auto back = *(&dq->back()); dq->pop_back(); if (emplace) { dq->emplace_front(back); } else { dq->push_front(back); } } template <typename Deque> void ShiftLeft(Deque* dq, bool emplace) { auto front = *(&dq->front()); dq->pop_front(); if (emplace) { dq->emplace_back(front); } else { dq->push_back(front); } } class QuicheCircularDequeTest : public QuicheTest {}; TEST_F(QuicheCircularDequeTest, Empty) { QuicheCircularDeque<int> dq; EXPECT_TRUE(dq.empty()); EXPECT_EQ(0u, dq.size()); dq.clear(); dq.push_back(10); EXPECT_FALSE(dq.empty()); EXPECT_EQ(1u, dq.size()); EXPECT_EQ(10, dq.front()); EXPECT_EQ(10, dq.back()); dq.pop_front(); EXPECT_TRUE(dq.empty()); EXPECT_EQ(0u, dq.size()); EXPECT_QUICHE_DEBUG_DEATH(dq.front(), ""); EXPECT_QUICHE_DEBUG_DEATH(dq.back(), ""); EXPECT_QUICHE_DEBUG_DEATH(dq.at(0), ""); EXPECT_QUICHE_DEBUG_DEATH(dq[0], ""); } TEST_F(QuicheCircularDequeTest, Constructor) { QuicheCircularDeque<int> dq; EXPECT_TRUE(dq.empty()); std::allocator<int> alloc; QuicheCircularDeque<int> dq1(alloc); EXPECT_TRUE(dq1.empty()); QuicheCircularDeque<int> dq2(8, 100, alloc); EXPECT_THAT(dq2, ElementsAre(100, 100, 100, 100, 100, 100, 100, 100)); QuicheCircularDeque<int> dq3(5, alloc); EXPECT_THAT(dq3, ElementsAre(0, 0, 0, 0, 0)); QuicheCircularDeque<int> dq4_rand_iter(dq3.begin(), dq3.end(), alloc); EXPECT_THAT(dq4_rand_iter, ElementsAre(0, 0, 0, 0, 0)); EXPECT_EQ(dq4_rand_iter, dq3); std::list<int> dq4_src = {4, 4, 4, 4}; QuicheCircularDeque<int> dq4_bidi_iter(dq4_src.begin(), dq4_src.end()); EXPECT_THAT(dq4_bidi_iter, ElementsAre(4, 4, 4, 4)); QuicheCircularDeque<int> dq5(dq4_bidi_iter); EXPECT_THAT(dq5, ElementsAre(4, 4, 4, 4)); EXPECT_EQ(dq5, dq4_bidi_iter); QuicheCircularDeque<int> dq6(dq5, alloc); EXPECT_THAT(dq6, ElementsAre(4, 4, 4, 4)); EXPECT_EQ(dq6, dq5); QuicheCircularDeque<int> dq7(std::move(*&dq6)); EXPECT_THAT(dq7, ElementsAre(4, 4, 4, 4)); EXPECT_TRUE(dq6.empty()); QuicheCircularDeque<int> dq8_equal_allocator(std::move(*&dq7), alloc); EXPECT_THAT(dq8_equal_allocator, ElementsAre(4, 4, 4, 4)); EXPECT_TRUE(dq7.empty()); QuicheCircularDeque<int, 3, CountingAllocator<int>> dq8_temp = {5, 6, 7, 8, 9}; QuicheCircularDeque<int, 3, CountingAllocator<int>> dq8_unequal_allocator( std::move(*&dq8_temp), CountingAllocator<int>()); EXPECT_THAT(dq8_unequal_allocator, ElementsAre(5, 6, 7, 8, 9)); EXPECT_TRUE(dq8_temp.empty()); QuicheCircularDeque<int> dq9({3, 4, 5, 6, 7}, alloc); EXPECT_THAT(dq9, ElementsAre(3, 4, 5, 6, 7)); } TEST_F(QuicheCircularDequeTest, Assign) { QuicheCircularDeque<int, 3, CountingAllocator<int>> dq; dq.assign(7, 1); EXPECT_THAT(dq, ElementsAre(1, 1, 1, 1, 1, 1, 1)); EXPECT_EQ(1u, dq.get_allocator().allocate_count()); QuicheCircularDeque<int, 3, CountingAllocator<int>> dq2; dq2.assign(dq.begin(), dq.end()); EXPECT_THAT(dq2, ElementsAre(1, 1, 1, 1, 1, 1, 1)); EXPECT_EQ(1u, dq2.get_allocator().allocate_count()); EXPECT_TRUE(std::equal(dq.begin(), dq.end(), dq2.begin(), dq2.end())); dq2.assign({2, 2, 2, 2, 2, 2}); EXPECT_THAT(dq2, ElementsAre(2, 2, 2, 2, 2, 2)); std::list<int> dq3_src = {3, 3, 3, 3, 3}; QuicheCircularDeque<int, 3, CountingAllocator<int>> dq3; dq3.assign(dq3_src.begin(), dq3_src.end()); EXPECT_THAT(dq3, ElementsAre(3, 3, 3, 3, 3)); EXPECT_LT(1u, dq3.get_allocator().allocate_count()); dq3 = *&dq3; EXPECT_THAT(dq3, ElementsAre(3, 3, 3, 3, 3)); QuicheCircularDeque< int, 3, ConfigurableAllocator<int, std::true_type, std::true_type, std::true_type, false>> dq4, dq5; dq4.assign(dq3.begin(), dq3.end()); dq5 = dq4; EXPECT_THAT(dq5, ElementsAre(3, 3, 3, 3, 3)); QuicheCircularDeque< int, 3, ConfigurableAllocator<int, std::false_type, std::true_type, std::true_type, true>> dq6, dq7; dq6.assign(dq3.begin(), dq3.end()); dq7 = dq6; EXPECT_THAT(dq7, ElementsAre(3, 3, 3, 3, 3)); dq3 = std::move(*&dq3); EXPECT_THAT(dq3, ElementsAre(3, 3, 3, 3, 3)); ASSERT_TRUE(decltype(dq3.get_allocator()):: propagate_on_container_move_assignment::value); decltype(dq3) dq8; dq8 = std::move(*&dq3); EXPECT_THAT(dq8, ElementsAre(3, 3, 3, 3, 3)); EXPECT_TRUE(dq3.empty()); QuicheCircularDeque< int, 3, ConfigurableAllocator<int, std::true_type, std::false_type, std::true_type, true>> dq9, dq10; dq9.assign(dq8.begin(), dq8.end()); dq10.assign(dq2.begin(), dq2.end()); dq9 = std::move(*&dq10); EXPECT_THAT(dq9, ElementsAre(2, 2, 2, 2, 2, 2)); EXPECT_TRUE(dq10.empty()); QuicheCircularDeque< int, 3, ConfigurableAllocator<int, std::true_type, std::false_type, std::true_type, false>> dq11, dq12; dq11.assign(dq8.begin(), dq8.end()); dq12.assign(dq2.begin(), dq2.end()); dq11 = std::move(*&dq12); EXPECT_THAT(dq11, ElementsAre(2, 2, 2, 2, 2, 2)); EXPECT_TRUE(dq12.empty()); } TEST_F(QuicheCircularDequeTest, Access) { QuicheCircularDeque<int, 3, CountingAllocator<int>> dq; dq.push_back(10); EXPECT_EQ(dq.front(), 10); EXPECT_EQ(dq.back(), 10); EXPECT_EQ(dq.at(0), 10); EXPECT_EQ(dq[0], 10); dq.front() = 12; EXPECT_EQ(dq.front(), 12); EXPECT_EQ(dq.back(), 12); EXPECT_EQ(dq.at(0), 12); EXPECT_EQ(dq[0], 12); const auto& dqref = dq; EXPECT_EQ(dqref.front(), 12); EXPECT_EQ(dqref.back(), 12); EXPECT_EQ(dqref.at(0), 12); EXPECT_EQ(dqref[0], 12); dq.pop_front(); EXPECT_TRUE(dqref.empty()); dq.push_back(15); dq.push_front(5); dq.push_back(25); EXPECT_EQ(dq.size(), dq.capacity()); EXPECT_THAT(dq, ElementsAre(5, 15, 25)); EXPECT_LT(&dq.front(), &dq.back()); EXPECT_EQ(dq.front(), 5); EXPECT_EQ(dq.back(), 25); EXPECT_EQ(dq.at(0), 5); EXPECT_EQ(dq.at(1), 15); EXPECT_EQ(dq.at(2), 25); EXPECT_EQ(dq[0], 5); EXPECT_EQ(dq[1], 15); EXPECT_EQ(dq[2], 25); dq.pop_front(); dq.push_back(35); EXPECT_THAT(dq, ElementsAre(15, 25, 35)); EXPECT_LT(&dq.front(), &dq.back()); EXPECT_EQ(dq.front(), 15); EXPECT_EQ(dq.back(), 35); EXPECT_EQ(dq.at(0), 15); EXPECT_EQ(dq.at(1), 25); EXPECT_EQ(dq.at(2), 35); EXPECT_EQ(dq[0], 15); EXPECT_EQ(dq[1], 25); EXPECT_EQ(dq[2], 35); dq.pop_front(); dq.push_back(45); EXPECT_THAT(dq, ElementsAre(25, 35, 45)); EXPECT_GT(&dq.front(), &dq.back()); EXPECT_EQ(dq.front(), 25); EXPECT_EQ(dq.back(), 45); EXPECT_EQ(dq.at(0), 25); EXPECT_EQ(dq.at(1), 35); EXPECT_EQ(dq.at(2), 45); EXPECT_EQ(dq[0], 25); EXPECT_EQ(dq[1], 35); EXPECT_EQ(dq[2], 45); dq.pop_front(); dq.push_back(55); EXPECT_THAT(dq, ElementsAre(35, 45, 55)); EXPECT_GT(&dq.front(), &dq.back()); EXPECT_EQ(dq.front(), 35); EXPECT_EQ(dq.back(), 55); EXPECT_EQ(dq.at(0), 35); EXPECT_EQ(dq.at(1), 45); EXPECT_EQ(dq.at(2), 55); EXPECT_EQ(dq[0], 35); EXPECT_EQ(dq[1], 45); EXPECT_EQ(dq[2], 55); dq.pop_front(); dq.push_back(65); EXPECT_THAT(dq, ElementsAre(45, 55, 65)); EXPECT_LT(&dq.front(), &dq.back()); EXPECT_EQ(dq.front(), 45); EXPECT_EQ(dq.back(), 65); EXPECT_EQ(dq.at(0), 45); EXPECT_EQ(dq.at(1), 55); EXPECT_EQ(dq.at(2), 65); EXPECT_EQ(dq[0], 45); EXPECT_EQ(dq[1], 55); EXPECT_EQ(dq[2], 65); EXPECT_EQ(1u, dq.get_allocator().allocate_count()); } TEST_F(QuicheCircularDequeTest, Iterate) { QuicheCircularDeque<int> dq; EXPECT_EQ(dq.begin(), dq.end()); EXPECT_EQ(dq.cbegin(), dq.cend()); EXPECT_EQ(dq.rbegin(), dq.rend()); EXPECT_EQ(dq.crbegin(), dq.crend()); dq.emplace_back(2); QuicheCircularDeque<int>::const_iterator citer = dq.begin(); EXPECT_NE(citer, dq.end()); EXPECT_EQ(*citer, 2); ++citer; EXPECT_EQ(citer, dq.end()); EXPECT_EQ(*dq.begin(), 2); EXPECT_EQ(*dq.cbegin(), 2); EXPECT_EQ(*dq.rbegin(), 2); EXPECT_EQ(*dq.crbegin(), 2); dq.emplace_front(1); QuicheCircularDeque<int>::const_reverse_iterator criter = dq.rbegin(); EXPECT_NE(criter, dq.rend()); EXPECT_EQ(*criter, 2); ++criter; EXPECT_NE(criter, dq.rend()); EXPECT_EQ(*criter, 1); ++criter; EXPECT_EQ(criter, dq.rend()); EXPECT_EQ(*dq.begin(), 1); EXPECT_EQ(*dq.cbegin(), 1); EXPECT_EQ(*dq.rbegin(), 2); EXPECT_EQ(*dq.crbegin(), 2); dq.push_back(3); int expected_value = 1; for (QuicheCircularDeque<int>::iterator it = dq.begin(); it != dq.end(); ++it) { EXPECT_EQ(expected_value++, *it); } expected_value = 1; for (QuicheCircularDeque<int>::const_iterator it = dq.cbegin(); it != dq.cend(); ++it) { EXPECT_EQ(expected_value++, *it); } expected_value = 3; for (QuicheCircularDeque<int>::reverse_iterator it = dq.rbegin(); it != dq.rend(); ++it) { EXPECT_EQ(expected_value--, *it); } expected_value = 3; for (QuicheCircularDeque<int>::const_reverse_iterator it = dq.crbegin(); it != dq.crend(); ++it) { EXPECT_EQ(expected_value--, *it); } } TEST_F(QuicheCircularDequeTest, Iterator) { EXPECT_EQ(QuicheCircularDeque<int>::iterator(), QuicheCircularDeque<int>::iterator()); EXPECT_EQ(QuicheCircularDeque<int>::const_iterator(), QuicheCircularDeque<int>::const_iterator()); EXPECT_EQ(QuicheCircularDeque<int>::reverse_iterator(), QuicheCircularDeque<int>::reverse_iterator()); EXPECT_EQ(QuicheCircularDeque<int>::const_reverse_iterator(), QuicheCircularDeque<int>::const_reverse_iterator()); QuicheCircularDeque<QuicheCircularDeque<int>, 3> dqdq = { {1, 2}, {10, 20, 30}, {100, 200, 300, 400}}; decltype(dqdq)::iterator iter = dqdq.begin(); EXPECT_EQ(iter->size(), 2u); EXPECT_THAT(*iter, ElementsAre(1, 2)); decltype(dqdq)::const_iterator citer = dqdq.cbegin() + 1; EXPECT_NE(*iter, *citer); EXPECT_EQ(citer->size(), 3u); int x = 10; for (auto it = citer->begin(); it != citer->end(); ++it) { EXPECT_EQ(*it, x); x += 10; } EXPECT_LT(iter, citer); EXPECT_LE(iter, iter); EXPECT_GT(citer, iter); EXPECT_GE(citer, citer); iter += 2; EXPECT_NE(*iter, *citer); EXPECT_EQ(iter->size(), 4u); for (int i = 1; i <= 4; ++i) { EXPECT_EQ(iter->begin()[i - 1], i * 100); } EXPECT_LT(citer, iter); EXPECT_LE(iter, iter); EXPECT_GT(iter, citer); EXPECT_GE(citer, citer); iter -= 1; EXPECT_EQ(*iter, *citer); EXPECT_EQ(iter->size(), 3u); x = 10; for (auto it = iter->begin(); it != iter->end();) { EXPECT_EQ(*(it++), x); x += 10; } x = 30; for (auto it = iter->begin() + 2; it != iter->begin();) { EXPECT_EQ(*(it--), x); x -= 10; } } TEST_F(QuicheCircularDequeTest, Resize) { QuicheCircularDeque<int, 3, CountingAllocator<int>> dq; dq.resize(8); EXPECT_THAT(dq, ElementsAre(0, 0, 0, 0, 0, 0, 0, 0)); EXPECT_EQ(1u, dq.get_allocator().allocate_count()); dq.resize(10, 5); EXPECT_THAT(dq, ElementsAre(0, 0, 0, 0, 0, 0, 0, 0, 5, 5)); QuicheCircularDeque<int, 3, CountingAllocator<int>> dq2 = dq; for (size_t new_size = dq.size(); new_size != 0; --new_size) { dq.resize(new_size); EXPECT_TRUE( std::equal(dq.begin(), dq.end(), dq2.begin(), dq2.begin() + new_size)); } dq.resize(0); EXPECT_TRUE(dq.empty()); ASSERT_EQ(dq2.size(), dq2.capacity()); while (dq2.size() < dq2.capacity()) { dq2.push_back(5); } ASSERT_LT(&dq2.front(), &dq2.back()); dq2.pop_back(); dq2.push_front(-5); ASSERT_GT(&dq2.front(), &dq2.back()); EXPECT_EQ(-5, dq2.front()); EXPECT_EQ(5, dq2.back()); dq2.resize(dq2.size() + 1, 10); ASSERT_LT(&dq2.front(), &dq2.back()); EXPECT_EQ(-5, dq2.front()); EXPECT_EQ(10, dq2.back()); EXPECT_EQ(5, *(dq2.rbegin() + 1)); } namespace { class Foo { public: Foo() : Foo(0xF00) {} explicit Foo(int i) : i_(new int(i)) {} ~Foo() { if (i_ != nullptr) { delete i_; } } Foo(const Foo& other) : i_(new int(*other.i_)) {} Foo(Foo&& other) = delete; void Set(int i) { *i_ = i; } int i() const { return *i_; } friend bool operator==(const Foo& lhs, const Foo& rhs) { return lhs.i() == rhs.i(); } friend std::ostream& operator<<(std::ostream& os, const Foo& foo) { return os << "Foo(" << foo.i() << ")"; } private: int* i_ = nullptr; }; } TEST_F(QuicheCircularDequeTest, RelocateNonTriviallyCopyable) { { using MoveConstructible = std::unique_ptr<Foo>; ASSERT_FALSE(std::is_trivially_copyable<MoveConstructible>::value); ASSERT_TRUE(std::is_move_constructible<MoveConstructible>::value); QuicheCircularDeque<MoveConstructible, 3, CountingAllocator<MoveConstructible>> dq1; dq1.resize(3); EXPECT_EQ(dq1.size(), dq1.capacity()); EXPECT_EQ(1u, dq1.get_allocator().allocate_count()); dq1.emplace_back(new Foo(0xF1)); EXPECT_EQ(4u, dq1.size()); EXPECT_EQ(2u, dq1.get_allocator().allocate_count()); EXPECT_EQ(dq1[0], nullptr); EXPECT_EQ(dq1[1], nullptr); EXPECT_EQ(dq1[2], nullptr); EXPECT_EQ(dq1[3]->i(), 0xF1); } { using NonMoveConstructible = Foo; ASSERT_FALSE(std::is_trivially_copyable<NonMoveConstructible>::value); ASSERT_FALSE(std::is_move_constructible<NonMoveConstructible>::value); QuicheCircularDeque<NonMoveConstructible, 3, CountingAllocator<NonMoveConstructible>> dq2; dq2.resize(3); EXPECT_EQ(dq2.size(), dq2.capacity()); EXPECT_EQ(1u, dq2.get_allocator().allocate_count()); dq2.emplace_back(0xF1); EXPECT_EQ(4u, dq2.size()); EXPECT_EQ(2u, dq2.get_allocator().allocate_count()); EXPECT_EQ(dq2[0].i(), 0xF00); EXPECT_EQ(dq2[1].i(), 0xF00); EXPECT_EQ(dq2[2].i(), 0xF00); EXPECT_EQ(dq2[3].i(), 0xF1); } } TEST_F(QuicheCircularDequeTest, PushPop) { { QuicheCircularDeque<Foo, 4, CountingAllocator<Foo>> dq(4); for (size_t i = 0; i < dq.size(); ++i) { dq[i].Set(i + 1); } QUICHE_LOG(INFO) << "dq initialized to " << dq; EXPECT_THAT(dq, ElementsAre(Foo(1), Foo(2), Foo(3), Foo(4))); ShiftLeft(&dq, false); QUICHE_LOG(INFO) << "shift left once : " << dq; EXPECT_THAT(dq, ElementsAre(Foo(2), Foo(3), Foo(4), Foo(1))); ShiftLeft(&dq, true); QUICHE_LOG(INFO) << "shift left twice: " << dq; EXPECT_THAT(dq, ElementsAre(Foo(3), Foo(4), Foo(1), Foo(2))); ASSERT_GT(&dq.front(), &dq.back()); } { QuicheCircularDeque<Foo, 4, CountingAllocator<Foo>> dq1(4); for (size_t i = 0; i < dq1.size(); ++i) { dq1[i].Set(i + 1); } QUICHE_LOG(INFO) << "dq1 initialized to " << dq1; EXPECT_THAT(dq1, ElementsAre(Foo(1), Foo(2), Foo(3), Foo(4))); ShiftRight(&dq1, false); QUICHE_LOG(INFO) << "shift right once : " << dq1; EXPECT_THAT(dq1, ElementsAre(Foo(4), Foo(1), Foo(2), Foo(3))); ShiftRight(&dq1, true); QUICHE_LOG(INFO) << "shift right twice: " << dq1; EXPECT_THAT(dq1, ElementsAre(Foo(3), Foo(4), Foo(1), Foo(2))); ASSERT_GT(&dq1.front(), &dq1.back()); } { QuicheCircularDeque<Foo, 4, CountingAllocator<Foo>> dq2(5); for (size_t i = 0; i < dq2.size(); ++i) { dq2[i].Set(i + 1); } EXPECT_THAT(dq2, ElementsAre(Foo(1), Foo(2), Foo(3), Foo(4), Foo(5))); EXPECT_EQ(2u, dq2.pop_front_n(2)); EXPECT_THAT(dq2, ElementsAre(Foo(3), Foo(4), Foo(5))); EXPECT_EQ(3u, dq2.pop_front_n(100)); EXPECT_TRUE(dq2.empty()); } { QuicheCircularDeque<Foo, 4, CountingAllocator<Foo>> dq3(6); for (size_t i = 0; i < dq3.size(); ++i) { dq3[i].Set(i + 1); } EXPECT_THAT(dq3, ElementsAre(Foo(1), Foo(2), Foo(3), Foo(4), Foo(5), Foo(6))); ShiftRight(&dq3, true); ShiftRight(&dq3, true); ShiftRight(&dq3, true); EXPECT_THAT(dq3, ElementsAre(Foo(4), Foo(5), Foo(6), Foo(1), Foo(2), Foo(3))); EXPECT_EQ(2u, dq3.pop_back_n(2)); EXPECT_THAT(dq3, ElementsAre(Foo(4), Foo(5), Foo(6), Foo(1))); EXPECT_EQ(2u, dq3.pop_back_n(2)); EXPECT_THAT(dq3, ElementsAre(Foo(4), Foo(5))); } } TEST_F(QuicheCircularDequeTest, Allocation) { CountingAllocator<int> alloc; { QuicheCircularDeque<int, 3, CountingAllocator<int>> dq(alloc); EXPECT_EQ(alloc, dq.get_allocator()); EXPECT_EQ(0u, dq.size()); EXPECT_EQ(0u, dq.capacity()); EXPECT_EQ(0u, alloc.allocate_count()); EXPECT_EQ(0u, alloc.deallocate_count()); for (int i = 1; i <= 18; ++i) { SCOPED_TRACE(testing::Message() << "i=" << i << ", capacity_b4_push=" << dq.capacity()); dq.push_back(i); EXPECT_EQ(i, static_cast<int>(dq.size())); const size_t capacity = 3 + (i - 1) / 3 * 3; EXPECT_EQ(capacity, dq.capacity()); EXPECT_EQ(capacity / 3, alloc.allocate_count()); EXPECT_EQ(capacity / 3 - 1, alloc.deallocate_count()); } dq.push_back(19); EXPECT_EQ(22u, dq.capacity()); EXPECT_EQ(7u, alloc.allocate_count()); EXPECT_EQ(6u, alloc.deallocate_count()); } EXPECT_EQ(7u, alloc.deallocate_count()); } } } } namespace { template <typename T> using SwappableAllocator = quiche::test::ConfigurableAllocator< T, std::true_type, std::true_type, std::true_type, true>; template <typename T> using UnswappableEqualAllocator = quiche::test::ConfigurableAllocator< T, std::true_type, std::true_type, std::false_type, true>; template <typename T> using UnswappableUnequalAllocator = quiche::test::ConfigurableAllocator< T, std::true_type, std::true_type, std::false_type, false>; using quiche::test::QuicheCircularDequeTest; TEST_F(QuicheCircularDequeTest, Swap) { using std::swap; quiche::QuicheCircularDeque<int64_t, 3, SwappableAllocator<int64_t>> dq1, dq2; dq1.push_back(10); dq1.push_back(11); dq2.push_back(20); swap(dq1, dq2); EXPECT_THAT(dq1, ElementsAre(20)); EXPECT_THAT(dq2, ElementsAre(10, 11)); quiche::QuicheCircularDeque<char, 3, UnswappableEqualAllocator<char>> dq3, dq4; dq3 = {1, 2, 3, 4, 5}; dq4 = {6, 7, 8, 9, 0}; swap(dq3, dq4); EXPECT_THAT(dq3, ElementsAre(6, 7, 8, 9, 0)); EXPECT_THAT(dq4, ElementsAre(1, 2, 3, 4, 5)); quiche::QuicheCircularDeque<int, 3, UnswappableUnequalAllocator<int>> dq5, dq6; dq6.push_front(4); dq5.assign(dq6.begin(), dq6.end()); EXPECT_THAT(dq5, ElementsAre(4)); EXPECT_QUICHE_DEBUG_DEATH(swap(dq5, dq6), "Undefined swap behavior"); } }
https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/common/quiche_circular_deque.h
https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/common/quiche_circular_deque_test.cc
6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6
442c8e38-f2cc-4d7b-8e1c-cf95d427ea3c
cpp
tensorflow/tensorflow
cost_estimator
tensorflow/core/grappler/costs/cost_estimator.cc
tensorflow/core/grappler/costs/cost_estimator_test.cc
#include "tensorflow/core/grappler/costs/cost_estimator.h" namespace tensorflow { namespace grappler { Costs CombineCosts(const Costs& left, const Costs& right) { CHECK_NE(left.max_memory, kMemoryUnknown); CHECK_NE(left.max_per_op_buffers, kMemoryUnknown); CHECK_NE(left.max_per_op_streaming, kMemoryUnknown); Costs result = left; result.execution_time += right.execution_time; result.compute_time += right.compute_time; result.memory_time += right.memory_time; result.network_time += right.network_time; result.intermediate_memory_time += right.intermediate_memory_time; result.intermediate_memory_read_time += right.intermediate_memory_read_time; result.intermediate_memory_write_time += right.intermediate_memory_write_time; if (right.max_per_op_buffers != kMemoryUnknown) { result.max_per_op_buffers = std::max(left.max_per_op_buffers, right.max_per_op_buffers); } if (right.max_per_op_streaming != kMemoryUnknown) { result.max_per_op_streaming = std::max(left.max_per_op_streaming, right.max_per_op_streaming); } result.num_ops_total += right.num_ops_total; if (right.inaccurate) { result.inaccurate = true; } result.num_ops_with_unknown_shapes += right.num_ops_with_unknown_shapes; if (right.max_memory != kMemoryUnknown) { result.max_memory += right.max_memory; } return result; } Costs MultiplyCosts(const Costs& costs, int multiplier) { CHECK_GE(multiplier, 0); if (multiplier == 0) { return Costs::ZeroCosts(); } if (multiplier == 1) { return costs; } Costs result = costs; result.execution_time *= multiplier; result.compute_time *= multiplier; result.memory_time *= multiplier; result.network_time *= multiplier; result.intermediate_memory_time *= multiplier; result.intermediate_memory_read_time *= multiplier; result.intermediate_memory_write_time *= multiplier; return result; } } }
#include "tensorflow/core/grappler/costs/cost_estimator.h" #include "tensorflow/core/platform/test.h" namespace tensorflow { namespace grappler { namespace { TEST(CostEstimatorTest, CombineCosts) { Costs c = Costs::ZeroCosts(); c.execution_time = Costs::NanoSeconds(1); c.compute_time = Costs::NanoSeconds(2); c.memory_time = Costs::NanoSeconds(3); c.intermediate_memory_time = Costs::NanoSeconds(4); c.intermediate_memory_read_time = Costs::NanoSeconds(5); c.intermediate_memory_write_time = Costs::NanoSeconds(6); c.max_memory = 1; c.max_per_op_buffers = 2; c.max_per_op_streaming = 3; c.num_ops_total = 1; c.inaccurate = false; c.num_ops_with_unknown_shapes = 0; Costs sum = CombineCosts(c, c); EXPECT_EQ(sum.execution_time, Costs::NanoSeconds(2)); EXPECT_EQ(sum.compute_time, Costs::NanoSeconds(4)); EXPECT_EQ(sum.memory_time, Costs::NanoSeconds(6)); EXPECT_EQ(sum.intermediate_memory_time, Costs::NanoSeconds(8)); EXPECT_EQ(sum.intermediate_memory_read_time, Costs::NanoSeconds(10)); EXPECT_EQ(sum.intermediate_memory_write_time, Costs::NanoSeconds(12)); EXPECT_EQ(sum.max_memory, 2); EXPECT_EQ(sum.max_per_op_buffers, 2); EXPECT_EQ(sum.max_per_op_streaming, 3); EXPECT_EQ(sum.num_ops_total, 2); EXPECT_FALSE(sum.inaccurate); EXPECT_EQ(sum.num_ops_with_unknown_shapes, 0); } TEST(CostEstimatorTest, MultiplyCosts) { Costs c = Costs::ZeroCosts(); c.execution_time = Costs::NanoSeconds(1); c.compute_time = Costs::NanoSeconds(2); c.memory_time = Costs::NanoSeconds(3); c.intermediate_memory_time = Costs::NanoSeconds(4); c.intermediate_memory_read_time = Costs::NanoSeconds(5); c.intermediate_memory_write_time = Costs::NanoSeconds(6); c.max_memory = 1; c.max_per_op_buffers = 2; c.max_per_op_streaming = 3; c.num_ops_total = 1; c.inaccurate = false; c.num_ops_with_unknown_shapes = 0; Costs product = MultiplyCosts(c, 10); EXPECT_EQ(product.execution_time, Costs::NanoSeconds(10)); EXPECT_EQ(product.compute_time, Costs::NanoSeconds(20)); EXPECT_EQ(product.memory_time, Costs::NanoSeconds(30)); EXPECT_EQ(product.intermediate_memory_time, Costs::NanoSeconds(40)); EXPECT_EQ(product.intermediate_memory_read_time, Costs::NanoSeconds(50)); EXPECT_EQ(product.intermediate_memory_write_time, Costs::NanoSeconds(60)); EXPECT_EQ(product.max_memory, 1); EXPECT_EQ(product.max_per_op_buffers, 2); EXPECT_EQ(product.max_per_op_streaming, 3); EXPECT_EQ(product.num_ops_total, 1); EXPECT_FALSE(product.inaccurate); EXPECT_EQ(product.num_ops_with_unknown_shapes, 0); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/costs/cost_estimator.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/costs/cost_estimator_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
c677a6b6-3049-486c-ad27-da23b7b21c14
cpp
tensorflow/tensorflow
floor_div
tensorflow/lite/kernels/floor_div.cc
tensorflow/lite/kernels/floor_div_test.cc
#include <math.h> #include <stddef.h> #include <stdint.h> #include <functional> #include "tensorflow/lite/core/c/common.h" #include "tensorflow/lite/kernels/internal/reference/binary_function.h" #include "tensorflow/lite/kernels/internal/reference/reference_ops.h" #include "tensorflow/lite/kernels/internal/tensor.h" #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "tensorflow/lite/kernels/kernel_util.h" namespace tflite { namespace ops { namespace builtin { namespace floor_div { namespace { constexpr int kInputTensor1 = 0; constexpr int kInputTensor2 = 1; constexpr int kOutputTensor = 0; struct OpData { bool requires_broadcast; }; void* Init(TfLiteContext* context, const char* buffer, size_t length) { auto* data = new OpData; data->requires_broadcast = false; return data; } void Free(TfLiteContext* context, void* buffer) { delete reinterpret_cast<OpData*>(buffer); } TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); OpData* data = reinterpret_cast<OpData*>(node->user_data); const TfLiteTensor* input1; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor1, &input1)); const TfLiteTensor* input2; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor2, &input2)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kOutputTensor, &output)); TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input2->type); const TfLiteType type = input1->type; switch (type) { case kTfLiteFloat32: case kTfLiteInt32: case kTfLiteInt16: case kTfLiteInt8: break; default: TF_LITE_KERNEL_LOG(context, "Type '%s' is not supported by floor_div.", TfLiteTypeGetName(type)); return kTfLiteError; } output->type = type; data->requires_broadcast = !HaveSameShapes(input1, input2); TfLiteIntArray* output_size = nullptr; if (data->requires_broadcast) { TF_LITE_ENSURE_OK(context, CalculateShapeForBroadcast( context, input1, input2, &output_size)); } else { output_size = TfLiteIntArrayCopy(input1->dims); } return context->ResizeTensor(context, output, output_size); } template <typename T> TfLiteStatus EvalImpl(TfLiteContext* context, bool requires_broadcast, const TfLiteTensor* input1, const TfLiteTensor* input2, TfLiteTensor* output) { const T* denominator_data = GetTensorData<T>(input2); for (int i = 0; i < NumElements(input2); ++i) { if (std::equal_to<T>()(denominator_data[i], 0)) { TF_LITE_KERNEL_LOG(context, "Division by 0"); return kTfLiteError; } } if (requires_broadcast) { reference_ops::BroadcastBinaryFunction4DSlow<T, T, T>( GetTensorShape(input1), GetTensorData<T>(input1), GetTensorShape(input2), denominator_data, GetTensorShape(output), GetTensorData<T>(output), reference_ops::FloorDiv<T>); } else { reference_ops::BinaryFunction<T, T, T>( GetTensorShape(input1), GetTensorData<T>(input1), GetTensorShape(input2), GetTensorData<T>(input2), GetTensorShape(output), GetTensorData<T>(output), reference_ops::FloorDiv<T>); } return kTfLiteOk; } TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { OpData* data = reinterpret_cast<OpData*>(node->user_data); const TfLiteTensor* input1; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor1, &input1)); const TfLiteTensor* input2; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor2, &input2)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kOutputTensor, &output)); switch (input1->type) { case kTfLiteInt8: { return EvalImpl<int8_t>(context, data->requires_broadcast, input1, input2, output); } case kTfLiteInt16: { return EvalImpl<int16_t>(context, data->requires_broadcast, input1, input2, output); } case kTfLiteInt32: { return EvalImpl<int32_t>(context, data->requires_broadcast, input1, input2, output); } case kTfLiteFloat32: { return EvalImpl<float>(context, data->requires_broadcast, input1, input2, output); } default: { TF_LITE_KERNEL_LOG(context, "Type '%s' is not supported by floor_div.", TfLiteTypeGetName(input1->type)); return kTfLiteError; } } } } } TfLiteRegistration* Register_FLOOR_DIV() { static TfLiteRegistration r = {floor_div::Init, floor_div::Free, floor_div::Prepare, floor_div::Eval}; return &r; } } } }
#include <stdint.h> #include <vector> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "tensorflow/lite/kernels/test_util.h" #include "tensorflow/lite/schema/schema_generated.h" namespace tflite { namespace { using ::testing::ElementsAre; template <typename T> class FloorDivModel : public SingleOpModel { public: FloorDivModel(const TensorData& input1, const TensorData& input2, const TensorData& output) { input1_ = AddInput(input1); input2_ = AddInput(input2); output_ = AddOutput(output); SetBuiltinOp(BuiltinOperator_FLOOR_DIV, BuiltinOptions_FloorDivOptions, CreateFloorDivOptions(builder_).Union()); BuildInterpreter({GetShape(input1_), GetShape(input2_)}); } int input1() { return input1_; } int input2() { return input2_; } std::vector<T> GetOutput() { return ExtractVector<T>(output_); } std::vector<int> GetOutputShape() { return GetTensorShape(output_); } private: int input1_; int input2_; int output_; }; TEST(FloorDivModel, Simple) { FloorDivModel<int32_t> model({TensorType_INT32, {1, 2, 2, 1}}, {TensorType_INT32, {1, 2, 2, 1}}, {TensorType_INT32, {}}); model.PopulateTensor<int32_t>(model.input1(), {10, 9, 11, 3}); model.PopulateTensor<int32_t>(model.input2(), {2, 2, 3, 4}); ASSERT_EQ(model.Invoke(), kTfLiteOk); EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 2, 2, 1)); EXPECT_THAT(model.GetOutput(), ElementsAre(5, 4, 3, 0)); } TEST(FloorDivModel, NegativeValue) { FloorDivModel<int32_t> model({TensorType_INT32, {1, 2, 2, 1}}, {TensorType_INT32, {1, 2, 2, 1}}, {TensorType_INT32, {}}); model.PopulateTensor<int32_t>(model.input1(), {10, -9, -11, 7}); model.PopulateTensor<int32_t>(model.input2(), {2, 2, -3, -4}); ASSERT_EQ(model.Invoke(), kTfLiteOk); EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 2, 2, 1)); EXPECT_THAT(model.GetOutput(), ElementsAre(5, -5, 3, -2)); } TEST(FloorDivModel, BroadcastFloorDiv) { FloorDivModel<int32_t> model({TensorType_INT32, {1, 2, 2, 1}}, {TensorType_INT32, {1}}, {TensorType_INT32, {}}); model.PopulateTensor<int32_t>(model.input1(), {10, -9, -11, 7}); model.PopulateTensor<int32_t>(model.input2(), {-3}); ASSERT_EQ(model.Invoke(), kTfLiteOk); EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 2, 2, 1)); EXPECT_THAT(model.GetOutput(), ElementsAre(-4, 3, 3, -3)); } TEST(FloorDivModel, SimpleFloat) { FloorDivModel<float> model({TensorType_FLOAT32, {1, 2, 2, 1}}, {TensorType_FLOAT32, {1, 2, 2, 1}}, {TensorType_FLOAT32, {}}); model.PopulateTensor<float>(model.input1(), {10.05, 9.09, 11.9, 3.01}); model.PopulateTensor<float>(model.input2(), {2.05, 2.03, 3.03, 4.03}); ASSERT_EQ(model.Invoke(), kTfLiteOk); EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 2, 2, 1)); EXPECT_THAT(model.GetOutput(), ElementsAre(4.0, 4.0, 3.0, 0.0)); } TEST(FloorDivModel, NegativeValueFloat) { FloorDivModel<float> model({TensorType_FLOAT32, {1, 2, 2, 1}}, {TensorType_FLOAT32, {1, 2, 2, 1}}, {TensorType_FLOAT32, {}}); model.PopulateTensor<float>(model.input1(), {10.03, -9.9, -11.0, 7.0}); model.PopulateTensor<float>(model.input2(), {2.0, 2.3, -3.0, -4.1}); ASSERT_EQ(model.Invoke(), kTfLiteOk); EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 2, 2, 1)); EXPECT_THAT(model.GetOutput(), ElementsAre(5.0, -5.0, 3.0, -2.0)); } TEST(FloorDivModel, BroadcastFloorDivFloat) { FloorDivModel<float> model({TensorType_FLOAT32, {1, 2, 2, 1}}, {TensorType_FLOAT32, {1}}, {TensorType_FLOAT32, {}}); model.PopulateTensor<float>(model.input1(), {10.03, -9.9, -11.0, 7.0}); model.PopulateTensor<float>(model.input2(), {-3.3}); ASSERT_EQ(model.Invoke(), kTfLiteOk); EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 2, 2, 1)); EXPECT_THAT(model.GetOutput(), ElementsAre(-4.0, 2.0, 3.0, -3.0)); } TEST(FloorDivModel, SimpleInt16) { FloorDivModel<int16_t> model({TensorType_INT16, {1, 2, 2, 1}}, {TensorType_INT16, {1, 2, 2, 1}}, {TensorType_INT16, {}}); model.PopulateTensor<int16_t>(model.input1(), {10, 9, 11, 3}); model.PopulateTensor<int16_t>(model.input2(), {2, 2, 3, 4}); ASSERT_EQ(model.Invoke(), kTfLiteOk); EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 2, 2, 1)); EXPECT_THAT(model.GetOutput(), ElementsAre(5, 4, 3, 0)); } TEST(FloorDivModel, NegativeValueInt16) { FloorDivModel<int16_t> model({TensorType_INT16, {1, 2, 2, 1}}, {TensorType_INT16, {1, 2, 2, 1}}, {TensorType_INT16, {}}); model.PopulateTensor<int16_t>(model.input1(), {10, -9, -11, 7}); model.PopulateTensor<int16_t>(model.input2(), {2, 2, -3, -4}); ASSERT_EQ(model.Invoke(), kTfLiteOk); EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 2, 2, 1)); EXPECT_THAT(model.GetOutput(), ElementsAre(5, -5, 3, -2)); } TEST(FloorDivModel, BroadcastFloorDivInt16) { FloorDivModel<int16_t> model({TensorType_INT16, {1, 2, 2, 1}}, {TensorType_INT16, {1}}, {TensorType_INT16, {}}); model.PopulateTensor<int16_t>(model.input1(), {10, -9, -11, 7}); model.PopulateTensor<int16_t>(model.input2(), {-3}); ASSERT_EQ(model.Invoke(), kTfLiteOk); EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 2, 2, 1)); EXPECT_THAT(model.GetOutput(), ElementsAre(-4, 3, 3, -3)); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/floor_div.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/floor_div_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
1ef25b3e-ad9d-4e72-9756-a70137f03a02
cpp
tensorflow/tensorflow
autotuner_util
third_party/xla/xla/service/gpu/autotuning/autotuner_util.cc
third_party/xla/xla/service/gpu/autotuning/autotuner_util_test.cc
#include "xla/service/gpu/autotuning/autotuner_util.h" #include <algorithm> #include <array> #include <cmath> #include <cstdint> #include <limits> #include <optional> #include <string> #include <utility> #include <variant> #include "absl/base/const_init.h" #include "absl/base/thread_annotations.h" #include "absl/container/flat_hash_map.h" #include "absl/log/log.h" #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/match.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_format.h" #include "absl/strings/string_view.h" #include "absl/synchronization/mutex.h" #include "absl/time/clock.h" #include "llvm/ADT/StringRef.h" #include "llvm/Support/SHA256.h" #include "xla/autotune_results.pb.h" #include "xla/autotuning.pb.h" #include "xla/hlo/ir/hlo_clone_context.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/service/dump.h" #include "xla/service/gpu/gpu_asm_opts_util.h" #include "xla/service/gpu/stream_executor_util.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/status_macros.h" #include "xla/stream_executor/device_description.h" #include "xla/stream_executor/device_memory.h" #include "xla/stream_executor/gpu/redzone_allocator.h" #include "xla/stream_executor/stream.h" #include "xla/util.h" #include "xla/xla.pb.h" #include "tsl/platform/base64.h" #include "tsl/platform/env.h" #include "tsl/platform/errors.h" #include "tsl/platform/logging.h" #include "tsl/platform/path.h" #include "tsl/platform/protobuf.h" #include "tsl/platform/statusor.h" namespace xla { namespace gpu { namespace { constexpr int kVersion = 3; } using AutotuneCacheMap = absl::flat_hash_map<AutotuneCacheKey, AutotuneResult>; static absl::Mutex autotune_cache_mu(absl::kConstInit); static auto& autotune_cache ABSL_GUARDED_BY(autotune_cache_mu) = *new AutotuneCacheMap(); static AutotunerUtil::CacheStats autotune_cache_stats ABSL_GUARDED_BY(autotune_cache_mu); absl::StatusOr<std::string> GetBase64EncodedSha256Hash(absl::string_view s) { llvm::SHA256 sha256; sha256.update(llvm::StringRef(s)); std::array<uint8_t, 32> hash = sha256.final(); absl::string_view hash_view(reinterpret_cast<const char*>(hash.data()), hash.size()); std::string base64_encoded_hash; TF_RETURN_IF_ERROR(tsl::Base64Encode(hash_view, &base64_encoded_hash)); return base64_encoded_hash; } namespace { absl::StatusOr<std::string> GetCacheFilePath(absl::string_view cache_dir, absl::string_view key_hash) { if (cache_dir.empty()) { return absl::InvalidArgumentError("autotune_cache_dir should not be empty"); } return tsl::io::JoinPath(cache_dir, absl::StrCat(key_hash, ".textproto")); } struct ResultAndInserted { AutotuneResult result; bool inserted; }; ResultAndInserted AddResultToInMemoryCache(const AutotuneCacheKey& key, AutotuneResult result) ABSL_LOCKS_EXCLUDED(autotune_cache_mu) { absl::MutexLock lock(&autotune_cache_mu); auto [it, inserted] = autotune_cache.emplace(key, std::move(result)); return {it->second, inserted}; } absl::Status AddResultToFileBasedCacheIfEnabled( const AutotuneCacheKey& key, AutotuneResult result, std::string_view cache_dir, DebugOptions::AutotuneCacheMode autotune_cache_mode) ABSL_LOCKS_EXCLUDED(autotune_cache_mu) { if (cache_dir.empty() || autotune_cache_mode == DebugOptions::AUTOTUNE_CACHE_MODE_READ) { return absl::OkStatus(); } tsl::Env* default_env = tsl::Env::Default(); TF_RETURN_IF_ERROR(CreateDirIfNeeded(std::string(cache_dir), default_env)); TF_ASSIGN_OR_RETURN(std::string key_hash, GetBase64EncodedSha256Hash(key.ToString())); TF_ASSIGN_OR_RETURN(const std::string file_path, GetCacheFilePath(cache_dir, key_hash)); VLOG(1) << "Writing autotune result to file: " << file_path; std::string result_str; if (!tsl::protobuf::TextFormat::PrintToString(result, &result_str)) { return absl::InternalError("Failed to serialize autotune result."); } std::string tmp_dir = tsl::io::JoinPath(cache_dir, "tmp"); TF_RETURN_IF_ERROR(CreateDirIfNeeded(tmp_dir, default_env)); int64_t time_stamp = absl::GetCurrentTimeNanos(); std::string temp_file_path = tsl::io::JoinPath( tmp_dir, absl::StrCat("tmp_per_fusion_cache_", key_hash, "_", std::to_string(time_stamp), ".textproto")); TF_RETURN_IF_ERROR( tsl::WriteStringToFile(default_env, temp_file_path, result_str)); return default_env->RenameFile(temp_file_path, file_path); } absl::StatusOr<ResultAndInserted> AddResultToCaches( const AutotuneCacheKey& key, AutotuneResult result, std::string_view cache_dir, DebugOptions::AutotuneCacheMode autotune_cache_mode) ABSL_LOCKS_EXCLUDED(autotune_cache_mu) { ResultAndInserted result_and_inserted = AddResultToInMemoryCache(key, result); if (result_and_inserted.inserted) { TF_RETURN_IF_ERROR(AddResultToFileBasedCacheIfEnabled( key, result_and_inserted.result, cache_dir, autotune_cache_mode)); } return result_and_inserted; } std::optional<AutotuneResult> TryToFindInInMemoryCache( const AutotuneCacheKey& key) ABSL_LOCKS_EXCLUDED(autotune_cache_mu) { absl::MutexLock lock(&autotune_cache_mu); auto it = autotune_cache.find(key); if (it == autotune_cache.end()) { return std::nullopt; } return it->second; } absl::StatusOr<std::optional<AutotuneResult>> TryToFindInFileBasedCacheIfEnabled(const AutotuneCacheKey& key, absl::string_view cache_dir) ABSL_LOCKS_EXCLUDED(autotune_cache_mu) { if (cache_dir.empty()) { return std::nullopt; } TF_ASSIGN_OR_RETURN(std::string key_hash, GetBase64EncodedSha256Hash(key.ToString())); TF_ASSIGN_OR_RETURN(const std::string file_path, GetCacheFilePath(cache_dir, key_hash)); if (!tsl::Env::Default()->FileExists(file_path).ok()) { VLOG(1) << "Autotune result file not found: " << file_path; return std::nullopt; } VLOG(1) << "Autotune result file found: " << file_path; std::string autotune_result_str; TF_RETURN_IF_ERROR(tsl::ReadFileToString(tsl::Env::Default(), file_path, &autotune_result_str)); AutotuneResult result; if (!tsl::protobuf::TextFormat::ParseFromString(autotune_result_str, &result)) { return absl::InvalidArgumentError("Failed to parse autotune result."); } return result; } void SortAutotuneResults(AutotuneResults* results) { std::sort(results->mutable_results()->pointer_begin(), results->mutable_results()->pointer_end(), [](const auto* a, const auto* b) { return std::make_pair(absl::string_view(a->device()), absl::string_view(a->hlo())) < std::make_pair(absl::string_view(b->device()), absl::string_view(b->hlo())); }); } } absl::StatusOr<std::string> AutotuneResultsToString( const AutotuneResults& results, bool as_textproto) { if (as_textproto) { std::string textproto; if (tsl::protobuf::TextFormat::PrintToString(results, &textproto)) { return textproto; } else { return Internal("Failed to serialize autotune results."); } } return results.SerializeAsString(); } namespace { void SerializeAutotuneEntry(AutotuneResults* results, const AutotuneCacheKey& k, const AutotuneResult* res) { auto& entry = *results->add_results(); entry.set_device(std::string(k.GetModelStr())); entry.set_hlo(std::string(k.GetHlo())); *entry.mutable_result() = *res; } } absl::Status AutotunerUtil::SerializeAutotuneResults( AutotuneResults* results) { absl::MutexLock lock(&autotune_cache_mu); for (const auto& [k, result] : autotune_cache) { SerializeAutotuneEntry(results, k, &result); } results->set_version(kVersion); SortAutotuneResults(results); return absl::OkStatus(); } absl::Status AutotunerUtil::LoadAutotuneResults( const AutotuneResults& results) { absl::MutexLock lock(&autotune_cache_mu); for (const AutotuneResults::Entry& result : results.results()) { if (auto [it, inserted] = autotune_cache.emplace( AutotuneCacheKey(result.device(), result.hlo()), result.result()); !inserted) { return absl::InternalError(absl::StrCat( "Duplicate autotuning result for ", it->first.ToString())); } } return absl::OkStatus(); } void AutotunerUtil::ClearAutotuneResults() { absl::MutexLock lock(&autotune_cache_mu); autotune_cache.clear(); } bool AutotunerUtil::ResultCacheIsEmpty() { absl::MutexLock lock(&autotune_cache_mu); return autotune_cache.empty(); } absl::StatusOr<se::DeviceMemoryBase> AutotunerUtil::CreateBuffer( se::RedzoneAllocator& allocator, const Shape& shape, const AutotuneConfig& config, int64_t& rng_state) { TF_ASSIGN_OR_RETURN(se::DeviceMemoryBase buffer, allocator.AllocateBytes(ShapeUtil::ByteSizeOf(shape))); if (config.should_init_buffers()) { InitializeBuffer(allocator.stream(), shape.element_type(), &rng_state, buffer); } return buffer; } namespace { std::string ToCanonicalString(const HloInstruction* instr) { auto options = HloPrintOptions::Canonical(); if (instr->opcode() != HloOpcode::kFusion) { options.set_print_backend_config(true); options.set_sort_backend_config(true); return instr->ToString(options); } options.set_print_subcomputation_mode( HloPrintOptions::PrintSubcomputationMode::kOff); options.set_print_infeed_outfeed_config(false); options.set_print_only_essential_constants(true); options.set_print_operand_shape(true); options.set_print_ids(false); options.set_canonicalize_computations(true); return instr->called_computations()[0]->ToString(options); } } AutotuneCacheKey::AutotuneCacheKey(absl::string_view model_str, const HloInstruction& instr) : AutotuneCacheKey(model_str, ToCanonicalString(&instr)) {} std::string AutotuneCacheKey::DeviceDescriptionToCacheKey( const se::DeviceDescription& device_description) { std::string compute_capability; if (auto* ccc = std::get_if<se::CudaComputeCapability>( &device_description.gpu_compute_capability())) { compute_capability = absl::StrCat("CUDA: ", ccc->major, ".", ccc->minor); } else { auto* rcc = std::get_if<se::RocmComputeCapability>( &device_description.gpu_compute_capability()); CHECK(rcc != nullptr) << "Unknown compute capability type"; compute_capability = absl::StrCat("ROCM: ", rcc->gfx_version()); } double memory_bandwidth = device_description.memory_bandwidth() / 1e9; memory_bandwidth = std::round(memory_bandwidth); constexpr double kBytesPerMegabyte = 1 << 20; double l2_cache_size = device_description.l2_cache_size() / kBytesPerMegabyte; return absl::StrCat(compute_capability, ", Cores: ", device_description.core_count(), ", GPU clock: ", device_description.clock_rate_ghz(), " GHz, Memory bandwidth: ", memory_bandwidth, " GB/s, L2 cache: ", l2_cache_size, " MB"); } namespace { enum class CacheType { kNone, kInMemory, kOnDisk }; absl::StatusOr<std::pair<CacheType, std::optional<AutotuneResult>>> TryFindInAllCacheTypes(const AutotuneCacheKey& key, absl::string_view cache_dir) ABSL_LOCKS_EXCLUDED(autotune_cache_mu) { std::optional<AutotuneResult> opt_result = TryToFindInInMemoryCache(key); if (opt_result.has_value()) { return std::make_pair(CacheType::kInMemory, opt_result); } TF_ASSIGN_OR_RETURN(opt_result, TryToFindInFileBasedCacheIfEnabled(key, cache_dir)); if (opt_result.has_value()) { AddResultToInMemoryCache(key, opt_result.value()); return std::make_pair(CacheType::kOnDisk, opt_result); } return std::make_pair(CacheType::kNone, std::nullopt); } absl::StatusOr<std::optional<AutotuneResult>> TryFindInCache( const AutotuneCacheKey& key, absl::string_view cache_dir) ABSL_LOCKS_EXCLUDED(autotune_cache_mu) { TF_ASSIGN_OR_RETURN(auto cached, TryFindInAllCacheTypes(key, cache_dir)); if (VLOG_IS_ON(1)) { std::string logged_key = (VLOG_IS_ON(2)) ? absl::StrCat(": key = ", key.ToString()) : ""; switch (cached.first) { case CacheType::kNone: LOG(INFO) << "Autotune cache miss" << logged_key; break; case CacheType::kInMemory: LOG(INFO) << "In-memory autotune cache hit" << logged_key; break; case CacheType::kOnDisk: LOG(INFO) << "File-based autotune cache hit" << logged_key; break; } } { auto cache_hit = cached.second.has_value(); absl::MutexLock lock(&autotune_cache_mu); autotune_cache_stats.cache_hits += cache_hit ? 1 : 0; autotune_cache_stats.cache_misses += cache_hit ? 0 : 1; } return std::move(cached.second); } } AutotuneCacheKey AutotunerUtil::GetKey( const HloInstruction* instr, const AutotuneConfig& config) { return AutotuneCacheKey(config.GetModelStr(), *instr); } absl::StatusOr<bool> AutotunerUtil::IsInCache( const AutotuneCacheKey& key, const AutotuneConfig& config) { TF_ASSIGN_OR_RETURN(std::optional<AutotuneResult> opt_res, TryFindInCache(key, config.autotune_cache_dir())); return opt_res.has_value(); } absl::StatusOr<bool> AutotunerUtil::AddResult( const AutotuneCacheKey& key, AutotuneResult result, const AutotuneConfig& config) { TF_ASSIGN_OR_RETURN( ResultAndInserted result_and_inserted, AddResultToCaches(key, std::move(result), config.autotune_cache_dir(), config.autotune_cache_mode())); return result_and_inserted.inserted; } absl::StatusOr<AutotuneResult> AutotunerUtil::Autotune( const HloInstruction* instr, const AutotuneConfig& config, const AutotuneNoCacheFn& autotune_fn) { const AutotuneCacheKey key = GetKey(instr, config); TF_ASSIGN_OR_RETURN(std::optional<AutotuneResult> opt_res, TryFindInCache(key, config.autotune_cache_dir())); if (opt_res.has_value()) { return opt_res.value(); } if (config.should_require_complete_aot_autotune_results()) { return NotFound( "Complete XLA AOT autotuning results are required, but no AOT result " "was found for key: %s", key.ToString()); } TF_ASSIGN_OR_RETURN(AutotuneResult autotune_result, autotune_fn()); TF_ASSIGN_OR_RETURN(ResultAndInserted result_and_inserted, AddResultToCaches(key, std::move(autotune_result), config.autotune_cache_dir(), config.autotune_cache_mode())); return result_and_inserted.result; } namespace { bool IsTextProtoPath(absl::string_view file_path) { return absl::EndsWith(file_path, ".txt") || absl::EndsWith(file_path, ".textproto") || absl::EndsWith(file_path, ".prototxt") || absl::EndsWith(file_path, ".pbtxt"); } } absl::Status AutotunerUtil::LoadAutotuneResults( absl::string_view data, bool as_textproto) { AutotuneResults results; bool parse_success = as_textproto ? tsl::protobuf::TextFormat::ParseFromString( std::string(data), &results) : results.ParseFromString(std::string(data)); if (!parse_success) { return absl::InvalidArgumentError( "Failed to parse autotune results string."); } if (results.version() != kVersion) { return absl::InvalidArgumentError(absl::StrFormat( "Version mismatch in autotune results. Expected %d but was %d", kVersion, results.version())); } TF_RETURN_IF_ERROR(LoadAutotuneResults(results)); return absl::OkStatus(); } absl::StatusOr<std::string> AutotunerUtil::SerializeAutotuneResults( bool as_textproto) { AutotuneResults results; TF_RETURN_IF_ERROR(SerializeAutotuneResults(&results)); return AutotuneResultsToString(results, as_textproto); } absl::Status AutotunerUtil::SerializeAutotuneResultsToFile( const AutotuneResults& results, absl::string_view file_path) { TF_RET_CHECK(!file_path.empty()); TF_RET_CHECK(results.version() > 0) << "Did you call SerializeAutotuneResults to get this AutotuneResults?"; std::string resolved_path; if (!tsl::io::ResolveTestPrefixes(file_path, resolved_path)) { return FailedPrecondition("File path can not be resolved: %s", file_path); } TF_ASSIGN_OR_RETURN( std::string autotune_results_str, AutotuneResultsToString(results, IsTextProtoPath(resolved_path))); TF_RETURN_IF_ERROR(tsl::WriteStringToFile(tsl::Env::Default(), resolved_path, autotune_results_str)); LOG(INFO) << "Autotune results serialized to file: " << resolved_path; return absl::OkStatus(); } absl::Status AutotunerUtil::SerializeAutotuneResultsToFile( absl::string_view file_path) { AutotuneResults results; TF_RETURN_IF_ERROR(SerializeAutotuneResults(&results)); return SerializeAutotuneResultsToFile(results, file_path); } absl::Status AutotunerUtil::LoadAutotuneResultsFromFile( absl::string_view file_path) { TF_RET_CHECK(!file_path.empty()); std::string resolved_path; if (!tsl::io::ResolveTestPrefixes(file_path, resolved_path)) { return FailedPrecondition("File path can not be resolved: %s", file_path); } if (!tsl::Env::Default()->FileExists(resolved_path).ok()) { return FailedPrecondition("Autotune results file does not exist: %s", resolved_path); } std::string autotune_results_str; TF_RETURN_IF_ERROR(tsl::ReadFileToString(tsl::Env::Default(), resolved_path, &autotune_results_str)); TF_RETURN_IF_ERROR(LoadAutotuneResults(autotune_results_str, IsTextProtoPath(resolved_path))); LOG(INFO) << "Autotune results loaded from file: " << resolved_path; return absl::OkStatus(); } absl::StatusOr<se::RedzoneAllocator> AutotunerUtil::CreateRedzoneAllocator(const AutotuneConfig& config, const DebugOptions& opts) { TF_ASSIGN_OR_RETURN(se::Stream * stream, config.GetStream()); return se::RedzoneAllocator( stream, config.GetAllocator(), PtxOptsFromDebugOptions(opts), std::numeric_limits<int64_t>::max(), config.should_check_correctness() ? opts.xla_gpu_redzone_padding_bytes() : 0); } AutotunerUtil::CacheStats AutotunerUtil::GetCacheStats() { absl::MutexLock lock(&autotune_cache_mu); return autotune_cache_stats; } void AutotunerUtil::ClearCacheStats() { absl::MutexLock lock(&autotune_cache_mu); autotune_cache_stats = CacheStats(); } } }
#include "xla/service/gpu/autotuning/autotuner_util.h" #include <memory> #include <string> #include <vector> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/container/flat_hash_set.h" #include "absl/log/check.h" #include "absl/status/status.h" #include "absl/strings/str_cat.h" #include "absl/strings/string_view.h" #include "xla/autotune_results.pb.h" #include "xla/autotuning.pb.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/utils/hlo_query.h" #include "xla/service/dump.h" #include "xla/stream_executor/device_description.h" #include "xla/stream_executor/device_description.pb.h" #include "xla/stream_executor/platform.h" #include "xla/stream_executor/platform_manager.h" #include "xla/tests/hlo_test_base.h" #include "xla/tsl/lib/core/status_test_util.h" #include "xla/xla.pb.h" #include "tsl/platform/env.h" #include "tsl/platform/errors.h" #include "tsl/platform/logging.h" #include "tsl/platform/path.h" #include "tsl/platform/protobuf.h" #include "tsl/platform/status.h" #include "tsl/platform/status_matchers.h" #include "tsl/platform/statusor.h" #include "tsl/platform/test.h" namespace xla { namespace gpu { namespace { using ::testing::ElementsAre; using ::testing::HasSubstr; using ::testing::IsEmpty; using ::testing::Not; using ::testing::TempDir; using ::testing::UnorderedElementsAre; using ::tsl::testing::StatusIs; class AutotunerUtilTest : public HloTestBase { protected: static constexpr absl::string_view kHloText = R"( HloModule t ENTRY e { p0 = f16[1,16,17,3] parameter(0) p1 = s8[16,17,3] parameter(1) cp1 = f16[16,17,3] convert(p1) ROOT _ = f16[1,16,16] dot(p0, cp1), lhs_contracting_dims={2,3}, rhs_contracting_dims={1,2} })"; static constexpr absl::string_view kResultText = R"( version: 3 results { device: "CUDA: 8.0, Cores: 108, GPU clock: 1.41 GHz, Memory bandwidth: 1555 GB/s, L2 cache: 40 MB" hlo: "{\n tmp_0 = f16[1,16,17,3]{3,2,1,0} parameter(0)\n tmp_1 = f16[16,51]{1,0} bitcast(f16[1,16,17,3]{3,2,1,0} tmp_0)\n tmp_2 = s8[16,17,3]{2,1,0} parameter(1)\n tmp_3 = s8[51,16]{0,1} bitcast(s8[16,17,3]{2,1,0} tmp_2)\n tmp_4 = f16[51,16]{0,1} convert(s8[51,16]{0,1} tmp_3)\n tmp_5 = f16[16,16]{1,0} dot(f16[16,51]{1,0} tmp_1, f16[51,16]{0,1} tmp_4), lhs_contracting_dims={1}, rhs_contracting_dims={0}\n ROOT tmp_6 = f16[1,16,16]{2,1,0} bitcast(f16[16,16]{1,0} tmp_5)\n}" result { run_time { nanos: 31744 } triton { block_m: 32 block_n: 32 block_k: 32 split_k: 1 num_stages: 1 num_warps: 4 num_ctas: 1 } } })"; void SetUp() override { AutotunerUtil::ClearAutotuneResults(); AutotunerUtil::ClearCacheStats(); } std::string GetUniqueTempFilePath(absl::string_view suffix) { std::string filename = TempDir(); CHECK(tsl::Env::Default()->CreateUniqueFileName(&filename, std::string(suffix))); return filename; } std::string ExpectToReadNonEmptyFile(absl::string_view file_path) { std::string str; tsl::Env* env = tsl::Env::Default(); TF_EXPECT_OK(tsl::ReadFileToString(env, std::string(file_path), &str)); EXPECT_THAT(str, Not(IsEmpty())); return str; } static stream_executor::StreamExecutor* NewStreamExecutor() { stream_executor::Platform* platform = stream_executor::PlatformManager::PlatformWithName("Host").value(); return platform->ExecutorForDevice(0).value(); } absl::Status PopulateResultCache() { EXPECT_TRUE(AutotunerUtil::ResultCacheIsEmpty()); TF_RETURN_IF_ERROR(AutotunerUtil::LoadAutotuneResults(kResultText, true)); EXPECT_FALSE(AutotunerUtil::ResultCacheIsEmpty()); return absl::OkStatus(); } }; TEST_F(AutotunerUtilTest, SerializeAutotuneResultsToFile_TextProto1) { TF_EXPECT_OK(PopulateResultCache()); std::string kFilePath = GetUniqueTempFilePath(".txt"); TF_EXPECT_OK(AutotunerUtil::SerializeAutotuneResultsToFile(kFilePath)); std::string autotune_results_str = ExpectToReadNonEmptyFile(kFilePath); AutotuneResults results; EXPECT_TRUE(tsl::protobuf::TextFormat::ParseFromString(autotune_results_str, &results)); EXPECT_GT(results.results_size(), 0); } TEST_F(AutotunerUtilTest, SerializeAutotuneResultsToFile_TextProto2) { TF_EXPECT_OK(PopulateResultCache()); std::string kFilePath = GetUniqueTempFilePath(".textproto"); TF_EXPECT_OK(AutotunerUtil::SerializeAutotuneResultsToFile(kFilePath)); std::string autotune_results_str = ExpectToReadNonEmptyFile(kFilePath); AutotuneResults results; EXPECT_TRUE(tsl::protobuf::TextFormat::ParseFromString(autotune_results_str, &results)); } TEST_F(AutotunerUtilTest, SerializeAutotuneResultsToFile_Protobuf) { TF_EXPECT_OK(PopulateResultCache()); std::string kFilePath = GetUniqueTempFilePath(".pb"); TF_EXPECT_OK(AutotunerUtil::SerializeAutotuneResultsToFile(kFilePath)); std::string autotune_results_str = ExpectToReadNonEmptyFile(kFilePath); AutotuneResults results; EXPECT_TRUE(results.ParseFromString(autotune_results_str)); } TEST_F(AutotunerUtilTest, LoadAutotuneResultsFromFile_TextProto1) { TF_EXPECT_OK(PopulateResultCache()); std::string kFilePath = GetUniqueTempFilePath(".txt"); TF_EXPECT_OK(AutotunerUtil::SerializeAutotuneResultsToFile(kFilePath)); AutotunerUtil::ClearAutotuneResults(); EXPECT_TRUE(AutotunerUtil::ResultCacheIsEmpty()); TF_EXPECT_OK(AutotunerUtil::LoadAutotuneResultsFromFile(kFilePath)); EXPECT_FALSE(AutotunerUtil::ResultCacheIsEmpty()); } TEST_F(AutotunerUtilTest, LoadAutotuneResultsFromFile_TextProto2) { TF_EXPECT_OK(PopulateResultCache()); std::string kFilePath = GetUniqueTempFilePath(".textproto"); TF_EXPECT_OK(AutotunerUtil::SerializeAutotuneResultsToFile(kFilePath)); AutotunerUtil::ClearAutotuneResults(); EXPECT_TRUE(AutotunerUtil::ResultCacheIsEmpty()); TF_EXPECT_OK(AutotunerUtil::LoadAutotuneResultsFromFile(kFilePath)); EXPECT_FALSE(AutotunerUtil::ResultCacheIsEmpty()); } TEST_F(AutotunerUtilTest, LoadAutotuneResultsFromFile_Protobuf) { TF_EXPECT_OK(PopulateResultCache()); std::string kFilePath = GetUniqueTempFilePath(".pb"); TF_EXPECT_OK(AutotunerUtil::SerializeAutotuneResultsToFile(kFilePath)); AutotunerUtil::ClearAutotuneResults(); EXPECT_TRUE(AutotunerUtil::ResultCacheIsEmpty()); TF_EXPECT_OK(AutotunerUtil::LoadAutotuneResultsFromFile(kFilePath)); EXPECT_FALSE(AutotunerUtil::ResultCacheIsEmpty()); } TEST_F(AutotunerUtilTest, ResultConflictsAreDetected) { TF_EXPECT_OK(PopulateResultCache()); std::string kFilePath = GetUniqueTempFilePath(".pb"); TF_EXPECT_OK(AutotunerUtil::SerializeAutotuneResultsToFile(kFilePath)); EXPECT_THAT(AutotunerUtil::LoadAutotuneResultsFromFile(kFilePath), StatusIs(absl::StatusCode::kInternal, HasSubstr("Duplicate autotuning result"))); } TEST_F(AutotunerUtilTest, FailIfRequireCompleteAotAutotuning) { std::string kFilePath = GetUniqueTempFilePath(".txt"); auto hlo_module = GetOptimizedModule(kHloText); TF_EXPECT_OK(hlo_module.status()); std::vector<HloComputation*> computations = (*hlo_module) ->MakeNonfusionComputations(absl::flat_hash_set<absl::string_view>()); EXPECT_THAT(computations, Not(IsEmpty())); const HloInstruction* instruction = *computations[0]->instructions().begin(); stream_executor::StreamExecutor* executor = NewStreamExecutor(); auto options = DebugOptions(); options.set_xla_gpu_require_complete_aot_autotune_results(true); AutotuneConfig config(DeviceConfig{executor}, options); EXPECT_THAT( AutotunerUtil::Autotune(instruction, config, [&] { return AutotuneResult(); }), StatusIs( absl::StatusCode::kNotFound, HasSubstr("Complete XLA AOT autotuning results are required, but " "no AOT result was found for key: <key model"))); EXPECT_EQ(AutotunerUtil::GetCacheStats().cache_hits, 0); EXPECT_EQ(AutotunerUtil::GetCacheStats().cache_misses, 1); } TEST_F(AutotunerUtilTest, OkIfJitAutotuningDisabledButAlreadyLoadedAOT) { auto hlo_module = GetOptimizedModule(kHloText); std::vector<HloComputation*> computations = (*hlo_module) ->MakeNonfusionComputations(absl::flat_hash_set<absl::string_view>()); EXPECT_THAT(computations, Not(IsEmpty())); const HloInstruction* instruction = *computations[0]->instructions().begin(); stream_executor::StreamExecutor* executor = NewStreamExecutor(); { AutotuneConfig config(DeviceConfig{executor}, DebugOptions()); TF_EXPECT_OK(AutotunerUtil::Autotune(instruction, config, [&] { return AutotuneResult(); }).status()); EXPECT_EQ(AutotunerUtil::GetCacheStats().cache_hits, 0); EXPECT_EQ(AutotunerUtil::GetCacheStats().cache_misses, 1); } auto options = DebugOptions(); options.set_xla_gpu_require_complete_aot_autotune_results(true); AutotuneConfig config(DeviceConfig{executor}, options); TF_EXPECT_OK(AutotunerUtil::Autotune(instruction, config, [&] { return AutotuneResult(); }).status()); EXPECT_EQ(AutotunerUtil::GetCacheStats().cache_hits, 1); EXPECT_EQ(AutotunerUtil::GetCacheStats().cache_misses, 1); } class FileBasedCacheTest : public AutotunerUtilTest { public: static std::string ToString(const AutotuneResult& message) { std::string textproto; CHECK(tsl::protobuf::TextFormat::PrintToString(message, &textproto)); return textproto; } static std::vector<std::string> GetFilesInDir( const absl::string_view cache_dir) { std::vector<std::string> files_in_cache; if (!tsl::Env::Default() ->GetChildren(std::string(cache_dir), &files_in_cache) .ok()) { files_in_cache.clear(); } return files_in_cache; } static std::string Read(const absl::string_view filepath) { std::string file_content; TF_CHECK_OK(tsl::ReadFileToString(tsl::Env::Default(), std::string(filepath), &file_content)); return file_content; } void Write(const absl::string_view filepath, const absl::string_view content) { TF_CHECK_OK(CreateDirIfNeeded(cache_dir_, tsl::Env::Default())); TF_CHECK_OK(tsl::WriteStringToFile(tsl::Env::Default(), std::string(filepath), content)); } stream_executor::StreamExecutor* executor_ = NewStreamExecutor(); std::unique_ptr<HloModule> module_ = ParseAndReturnVerifiedModule(kHloText).value(); const HloInstruction* dot_ = hlo_query::GetFirstInstructionWithOpcode( *module_->entry_computation(), HloOpcode::kDot); std::string cache_dir_ = [] { tsl::Env* default_env = tsl::Env::Default(); std::string cache_dir; CHECK(default_env->LocalTempFilename(&cache_dir)); return cache_dir; }(); DebugOptions::AutotuneCacheMode GetCacheMode() const { return cache_mode_; } void SetCacheMode(DebugOptions::AutotuneCacheMode cache_mode) { cache_mode_ = cache_mode; } AutotuneConfig GetConfig() const { DebugOptions options; options.set_xla_gpu_per_fusion_autotune_cache_dir(cache_dir_); options.set_xla_gpu_experimental_autotune_cache_mode(GetCacheMode()); return AutotuneConfig(DeviceConfig{executor_}, options); } AutotuneCacheKey GetCacheKey() const { return AutotunerUtil::GetKey(dot_, GetConfig()); } std::string GetCacheFilename() const { absl::StatusOr<std::string> key_hash = GetBase64EncodedSha256Hash(GetCacheKey().ToString()); CHECK_OK(key_hash.status()); return absl::StrCat(key_hash.value(), ".textproto"); } std::string GetCacheFilePath() const { return tsl::io::JoinPath(cache_dir_, GetCacheFilename()); } const AutotuneResult result1_ = [] { AutotuneResult result; result.set_scratch_bytes(1); return result; }(); const AutotuneResult result2_ = [] { AutotuneResult result; result.set_scratch_bytes(2); return result; }(); private: DebugOptions::AutotuneCacheMode cache_mode_ = DebugOptions::AUTOTUNE_CACHE_MODE_UPDATE; }; TEST_F(FileBasedCacheTest, AutotuneCreatesTmpAndWritesResultToTheCacheDir) { TF_ASSERT_OK_AND_ASSIGN( AutotuneResult result, AutotunerUtil::Autotune(dot_, GetConfig(), [&] { return result1_; })); EXPECT_EQ(AutotunerUtil::GetCacheStats().cache_hits, 0); EXPECT_EQ(AutotunerUtil::GetCacheStats().cache_misses, 1); EXPECT_EQ(ToString(result), ToString(result1_)); ASSERT_THAT(GetFilesInDir(cache_dir_), UnorderedElementsAre(GetCacheFilename(), "tmp")); EXPECT_EQ(Read(GetCacheFilePath()), ToString(result1_)); } TEST_F(FileBasedCacheTest, AutotuneReadsResultFromTheCacheDir) { Write(GetCacheFilePath(), ToString(result1_)); bool cache_hit = true; TF_ASSERT_OK_AND_ASSIGN(AutotuneResult result, AutotunerUtil::Autotune(dot_, GetConfig(), [&] { cache_hit = false; return result2_; })); EXPECT_TRUE(cache_hit); EXPECT_EQ(AutotunerUtil::GetCacheStats().cache_hits, 1); EXPECT_EQ(AutotunerUtil::GetCacheStats().cache_misses, 0); EXPECT_EQ(ToString(result), ToString(result1_)); } TEST_F(FileBasedCacheTest, RepeatedAutotuneCallsDontReadOrWriteTheCacheFileAgain) { auto check_autotune_cache_hit = [](const HloInstruction* instr, const AutotuneConfig& config, const AutotuneResult& expected_result) { bool cache_hit = true; TF_ASSERT_OK_AND_ASSIGN(AutotuneResult result, AutotunerUtil::Autotune(instr, config, [&] { cache_hit = false; AutotuneResult new_result; new_result.set_scratch_bytes(2); return new_result; })); EXPECT_TRUE(cache_hit); EXPECT_EQ(ToString(result), ToString(expected_result)); }; const std::string cache_file_path = GetCacheFilePath(); const AutotuneConfig config = GetConfig(); Write(cache_file_path, ToString(result1_)); check_autotune_cache_hit(dot_, config, result1_); EXPECT_EQ(AutotunerUtil::GetCacheStats().cache_hits, 1); EXPECT_EQ(AutotunerUtil::GetCacheStats().cache_misses, 0); constexpr absl::string_view kPlaceholderContent = "placeholder content"; Write(cache_file_path, kPlaceholderContent); check_autotune_cache_hit(dot_, config, result1_); EXPECT_EQ(AutotunerUtil::GetCacheStats().cache_hits, 2); EXPECT_EQ(AutotunerUtil::GetCacheStats().cache_misses, 0); EXPECT_EQ(Read(cache_file_path), kPlaceholderContent); } TEST_F(FileBasedCacheTest, IsInCacheReturnsTrueIfTheResultIsInTheFileBasedCache) { Write(GetCacheFilePath(), ToString(result1_)); TF_ASSERT_OK_AND_ASSIGN(bool is_in_cache, AutotunerUtil::IsInCache(GetCacheKey(), GetConfig())); EXPECT_TRUE(is_in_cache); EXPECT_EQ(AutotunerUtil::GetCacheStats().cache_hits, 1); EXPECT_EQ(AutotunerUtil::GetCacheStats().cache_misses, 0); } TEST_F(FileBasedCacheTest, IsInCacheReturnsFalseIfTheResultIsNotInEitherCache) { TF_ASSERT_OK_AND_ASSIGN(bool is_in_cache, AutotunerUtil::IsInCache(GetCacheKey(), GetConfig())); EXPECT_FALSE(is_in_cache); EXPECT_EQ(AutotunerUtil::GetCacheStats().cache_hits, 0); EXPECT_EQ(AutotunerUtil::GetCacheStats().cache_misses, 1); } TEST_F(FileBasedCacheTest, AddResultAddsTheResultToTheFileBasedCache) { TF_ASSERT_OK_AND_ASSIGN( bool added, AutotunerUtil::AddResult(GetCacheKey(), result1_, GetConfig())); EXPECT_TRUE(added); ASSERT_THAT(GetFilesInDir(cache_dir_), UnorderedElementsAre(GetCacheFilename(), "tmp")); EXPECT_EQ(Read(GetCacheFilePath()), ToString(result1_)); } TEST_F(FileBasedCacheTest, RepeatedAddResultDoesNotWriteTheFileAgain) { const std::string cache_file_path = GetCacheFilePath(); const AutotuneCacheKey cache_key = GetCacheKey(); const AutotuneConfig config = GetConfig(); { TF_ASSERT_OK_AND_ASSIGN( bool added, AutotunerUtil::AddResult(cache_key, result1_, config)); EXPECT_TRUE(added); } ASSERT_THAT(GetFilesInDir(cache_dir_), UnorderedElementsAre(GetCacheFilename(), "tmp")); EXPECT_EQ(Read(cache_file_path), ToString(result1_)); constexpr absl::string_view kPlaceholderContent = "placeholder content"; Write(cache_file_path, kPlaceholderContent); { TF_ASSERT_OK_AND_ASSIGN( bool added, AutotunerUtil::AddResult(cache_key, result1_, config)); EXPECT_FALSE(added); } EXPECT_EQ(Read(cache_file_path), kPlaceholderContent); } TEST(AutotuneCacheKeyTest, DeviceDescriptionToCacheKey) { auto device_description = [](absl::string_view spec_file_name) -> se::DeviceDescription { se::GpuTargetConfigProto proto; std::string spec_string; CHECK_OK(tsl::ReadFileToString( tsl::Env::Default(), tsl::io::JoinPath(tsl::testing::XlaSrcRoot(), "tools", "hlo_opt", "gpu_specs", spec_file_name), &spec_string)); EXPECT_TRUE( tsl::protobuf::TextFormat::ParseFromString(spec_string, &proto)); return se::DeviceDescription(proto.gpu_device_info()); }; EXPECT_EQ(AutotuneCacheKey::DeviceDescriptionToCacheKey( device_description("a100_sxm_40.txtpb")), "CUDA: 8.0, Cores: 108, GPU clock: 1.41 GHz, Memory bandwidth: " "1555 GB/s, L2 cache: 40 MB"); EXPECT_EQ(AutotuneCacheKey::DeviceDescriptionToCacheKey( device_description("a100_sxm_80.txtpb")), "CUDA: 8.0, Cores: 108, GPU clock: 1.41 GHz, Memory bandwidth: " "2039 GB/s, L2 cache: 40 MB"); EXPECT_EQ(AutotuneCacheKey::DeviceDescriptionToCacheKey( device_description("mi200.txtpb")), "ROCM: gfx90a, Cores: 110, GPU clock: 1.7 GHz, Memory bandwidth: " "1638 GB/s, L2 cache: 8 MB"); } TEST_F(FileBasedCacheTest, AddResultDoesNotWriteTheFileInReadMode) { SetCacheMode(DebugOptions::AUTOTUNE_CACHE_MODE_READ); TF_ASSERT_OK_AND_ASSIGN( bool added, AutotunerUtil::AddResult(GetCacheKey(), result1_, GetConfig())); EXPECT_TRUE(added); EXPECT_EQ(GetFilesInDir(cache_dir_).size(), 0); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/autotuning/autotuner_util.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/autotuning/autotuner_util_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
b385adf9-728d-43f0-a409-fe53a52414c6
cpp
tensorflow/tensorflow
hlo_input_output_alias_config
third_party/xla/xla/hlo/ir/hlo_input_output_alias_config.cc
third_party/xla/xla/service/hlo_input_output_alias_config_test.cc
#include "xla/hlo/ir/hlo_input_output_alias_config.h" #include <cstdint> #include <optional> #include <ostream> #include <string> #include <utility> #include <vector> #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/str_format.h" #include "absl/strings/str_join.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/layout_util.h" #include "xla/service/hlo.pb.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/status_macros.h" #include "tsl/platform/errors.h" #include "tsl/platform/logging.h" namespace xla { bool HloInputOutputAliasConfig::OutputHasAlias( const ShapeIndex& output_index) const { return alias_.element(output_index).has_value(); } absl::Status HloInputOutputAliasConfig::SetUpAlias( const ShapeIndex& output_index, int64_t param_number, const ShapeIndex& param_index, HloInputOutputAliasConfig::AliasKind must_alias) { TF_RET_CHECK(ShapeUtil::IndexIsValid(alias_.shape(), output_index)) << "Trying to set up alias at " << output_index.ToString() << " which is an invalid index for shape " << ShapeUtil::HumanString(alias_.shape()); TF_RET_CHECK(param_number >= 0) << param_number; TF_RET_CHECK(!alias_.element(output_index)) << absl::StrFormat( "Trying to set up output alias for param %lld at %s but failed: output " "index %s is already aliased with param %lld at %s", param_number, param_index.ToString(), output_index.ToString(), alias_.element(output_index)->parameter_number, alias_.element(output_index)->parameter_index.ToString()); (*alias_.mutable_element(output_index)) = Alias(param_number, param_index, must_alias); VLOG(4) << "Set up alias between output index " << output_index.ToString() << " and parameter " << param_number << " at index " << param_index.ToString(); return absl::OkStatus(); } HloInputOutputAliasProto HloInputOutputAliasConfig::ToProto() const { HloInputOutputAliasProto result; alias_.ForEachElement( [&](const ShapeIndex& index, const std::optional<Alias>& data) { if (data) { HloInputOutputAliasProto::AliasEntryProto entry; for (int64_t i : index) { entry.add_output_shape_index(i); } entry.set_parameter_number(data->parameter_number); for (int64_t i : data->parameter_index) { entry.add_parameter_shape_index(i); } if (data->must_alias()) { entry.set_kind(Kind::MUST_ALIAS); } else { entry.set_kind(Kind::MAY_ALIAS); } result.add_entries()->Swap(&entry); } }); return result; } absl::StatusOr<HloInputOutputAliasConfig> HloInputOutputAliasConfig::CreateFromProto( Shape output_shape, const HloInputOutputAliasProto& proto) { HloInputOutputAliasConfig result(std::move(output_shape)); for (const HloInputOutputAliasProto::AliasEntryProto& entry : proto.entries()) { ShapeIndex output_index(entry.output_shape_index().begin(), entry.output_shape_index().end()); int64_t param_number = entry.parameter_number(); ShapeIndex param_index(entry.parameter_shape_index().begin(), entry.parameter_shape_index().end()); AliasKind kind = entry.kind() == Kind::MAY_ALIAS ? kMayAlias : kMustAlias; TF_RETURN_IF_ERROR( result.SetUpAlias(output_index, param_number, param_index, kind)); } return result; } const Shape& HloInputOutputAliasConfig::shape() const { return alias_.shape(); } std::string HloInputOutputAliasConfig::ToString() const { std::vector<std::string> pieces; pieces.push_back("HloInputOutputAliasConfig"); pieces.push_back( absl::StrFormat(" Output shape: %s", alias_.shape().ToString())); ForEachAlias([&](const ShapeIndex& output_index, const Alias& alias) { pieces.push_back(absl::StrFormat( " OutputIndex %s is %saliased with parameter %lld at %s:", output_index.ToString(), alias.kind == kMustAlias ? "must-" : "may-", alias.parameter_number, alias.parameter_index.ToString())); }); return absl::StrJoin(pieces, "\n"); } std::string HloInputOutputAliasConfig::ToShortString() const { std::vector<std::string> pieces; for (const auto& p : alias_) { const ShapeIndex& index = p.first; if (std::optional<Alias> alias = p.second) { pieces.push_back( absl::StrFormat("%s: %s", index.ToString(), alias->ToString())); } } return absl::StrJoin(pieces, ", "); } bool HloInputOutputAliasConfig::ParameterMustAlias( int64_t param_number, const ShapeIndex& param_index) const { bool result = false; alias_.ForEachElement( [&](const xla::ShapeIndex&, std::optional<Alias> alias) { if (alias && alias->parameter_number == param_number && alias->parameter_index == param_index && alias->must_alias()) { result = true; } }); return result; } std::optional<ShapeIndex> HloInputOutputAliasConfig::GetAliasedOutput( int64_t param_number, const ShapeIndex& param_index) const { for (auto it = alias_.rbegin(); it != alias_.rend(); ++it) { if (it->second.has_value() && it->second->parameter_number == param_number && it->second->parameter_index == param_index) { return it->first; } } return std::nullopt; } std::optional<HloInputOutputAliasConfig::Alias> HloInputOutputAliasConfig::GetAliasedParameter( const ShapeIndex& output_index) const { CHECK(ShapeUtil::IndexIsValid(alias_.shape(), output_index)) << ToString() << " " << alias_.shape().ToString() << " " << output_index; return alias_.element(output_index); } void HloInputOutputAliasConfig::ForEachAlias(AliasFn fn) const { alias_.ForEachElement( [&](const ShapeIndex& output_index, std::optional<Alias> aliased) { if (aliased) { fn(output_index, *aliased); } }); } absl::Status HloInputOutputAliasConfig::ForEachAliasWithStatus( AliasFnWithStatus fn) const { return alias_.ForEachElementWithStatus( [&](const ShapeIndex& output_index, std::optional<Alias> aliased) { if (aliased) { TF_RETURN_IF_ERROR(fn(output_index, *aliased)); } return absl::OkStatus(); }); } absl::Status HloInputOutputAliasConfig::Verify( const HloModule& module, absl::FunctionRef<int64_t(const Shape&)> size_func) const { std::vector<ShapeTree<bool>> param_has_seen; const HloComputation* entry = module.entry_computation(); for (int64_t i = 0; i < entry->num_parameters(); ++i) { HloInstruction* param = entry->parameter_instruction(i); param_has_seen.emplace_back(param->shape()); } return ForEachAliasWithStatus([&](const ShapeIndex& output_index, const Alias& alias) -> absl::Status { TF_RET_CHECK(0 <= alias.parameter_number); TF_RET_CHECK(entry->num_parameters() > alias.parameter_number); const Shape& param_shape = module.entry_computation_layout().parameter_shape( alias.parameter_number); const Shape& output_shape = module.entry_computation_layout().result_shape(); TF_RET_CHECK(ShapeUtil::IndexIsValid(param_shape, alias.parameter_index)); TF_RET_CHECK(ShapeUtil::IndexIsValid(output_shape, output_index)); const Shape& param_subshape = ShapeUtil::GetSubshape(param_shape, alias.parameter_index); const Shape& output_subshape = ShapeUtil::GetSubshape(output_shape, output_index); TF_RET_CHECK(LayoutUtil::IsDenseArray(param_subshape)); TF_RET_CHECK(LayoutUtil::IsDenseArray(output_subshape)); if (size_func(param_subshape) != size_func(output_subshape)) { return Internal( "Expected aliased input %lld at index %s and output at index %s to " "have the same size. Input sub-shape is %s with size %lld, output " "sub-shape is %s with size %lld", alias.parameter_number, alias.parameter_index.ToString(), output_index.ToString(), ShapeUtil::HumanStringWithLayout(param_subshape), size_func(param_subshape), ShapeUtil::HumanStringWithLayout(output_subshape), size_func(output_subshape)); } TF_RET_CHECK(param_has_seen[alias.parameter_number].element( alias.parameter_index) == false); *(param_has_seen[alias.parameter_number].mutable_element( alias.parameter_index)) = true; return absl::OkStatus(); }); } std::ostream& operator<<(std::ostream& out, const HloInputOutputAliasConfig& config) { out << config.ToString(); return out; } absl::Status HloBufferDonorConfig::AddBufferDonor( int64_t param_number, const ShapeIndex& param_index) { TF_RET_CHECK(param_number >= 0) << param_number; VLOG(4) << "Register the parameter " << param_number << " at index " << param_index.ToString() << " as a buffer donor."; buffer_donor_.emplace(BufferDonor(param_number, param_index)); return absl::OkStatus(); } absl::Status HloBufferDonorConfig::RemoveBufferDonor( int64_t param_number, const ShapeIndex& param_index) { TF_RET_CHECK(param_number >= 0) << param_number; buffer_donor_.erase(BufferDonor(param_number, param_index)); return absl::OkStatus(); } HloBufferDonorProto HloBufferDonorConfig::ToProto() const { HloBufferDonorProto result; for (const auto& donor : buffer_donor_) { HloBufferDonorProto::BufferDonorEntryProto entry; entry.set_parameter_number(donor.param_number); for (int64_t i : donor.param_index) { entry.add_parameter_shape_index(i); } result.add_entries()->Swap(&entry); } return result; } absl::StatusOr<HloBufferDonorConfig> HloBufferDonorConfig::CreateFromProto( const HloBufferDonorProto& proto) { HloBufferDonorConfig result; for (const HloBufferDonorProto::BufferDonorEntryProto& entry : proto.entries()) { int64_t param_number = entry.parameter_number(); ShapeIndex param_index(entry.parameter_shape_index().begin(), entry.parameter_shape_index().end()); TF_RETURN_IF_ERROR(result.AddBufferDonor(param_number, param_index)); } return result; } std::string HloBufferDonorConfig::ToString() const { std::vector<std::string> pieces; pieces.push_back("HloBufferDonorConfig"); for (const auto& donor : buffer_donor_) { pieces.push_back(absl::StrFormat( " Parameter %lld at %s is registered as a buffer donor.", donor.param_number, donor.param_index.ToString())); } return absl::StrJoin(pieces, "\n"); } std::string HloBufferDonorConfig::ToShortString() const { std::vector<std::string> pieces; pieces.reserve(buffer_donor_.size()); for (const auto& donor : buffer_donor_) { pieces.push_back(absl::StrFormat("(%lld, %s)", donor.param_number, donor.param_index.ToString())); } return absl::StrJoin(pieces, ", "); } bool HloBufferDonorConfig::ParameterIsBufferDonor( int64_t param_number, const ShapeIndex& param_index) const { auto it = buffer_donor_.find(BufferDonor(param_number, param_index)); return it != buffer_donor_.end(); } absl::Status HloBufferDonorConfig::Verify(const HloModule& module) const { const HloComputation* entry = module.entry_computation(); const auto& alias_config = module.input_output_alias_config(); for (const auto& donor : buffer_donor_) { TF_RET_CHECK(donor.param_number >= 0); TF_RET_CHECK(donor.param_number < entry->num_parameters()); const Shape& param_shape = module.entry_computation_layout().parameter_shape(donor.param_number); TF_RET_CHECK(ShapeUtil::IndexIsValid(param_shape, donor.param_index)); const Shape& param_subshape = ShapeUtil::GetSubshape(param_shape, donor.param_index); TF_RET_CHECK(LayoutUtil::IsDenseArray(param_subshape)); if (alias_config.ParameterHasAlias(donor.param_number, donor.param_index)) { return Internal( "Input %lld at index %s is registered as a buffer donor. However, it " "is also in the input output alias config.", donor.param_number, donor.param_index.ToString()); } } return absl::OkStatus(); } std::ostream& operator<<(std::ostream& out, const HloBufferDonorConfig& config) { out << config.ToString(); return out; } }
#include "xla/hlo/ir/hlo_input_output_alias_config.h" #include <memory> #include <string> #include <gtest/gtest.h> #include "absl/algorithm/container.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/service/hlo_dce.h" #include "xla/service/hlo_memory_scheduler.h" #include "xla/service/hlo_ordering.h" #include "xla/shape_util.h" #include "xla/test_helpers.h" #include "xla/tests/hlo_test_base.h" #include "xla/tsl/lib/core/status_test_util.h" #include "xla/types.h" #include "tsl/platform/statusor.h" namespace xla { namespace { class HloInputOutputAliasConfigTest : public HloTestBase { protected: void expect_aliased(const ShapeIndex& output_index, int64_t param_number, const ShapeIndex& param_index, const HloInputOutputAliasConfig& config) { std::optional<ShapeIndex> aliased_output = config.GetAliasedOutput(param_number, param_index); EXPECT_TRUE(aliased_output); EXPECT_EQ(aliased_output.value(), output_index); std::optional<HloInputOutputAliasConfig::Alias> aliased_param = config.GetAliasedParameter(output_index); EXPECT_TRUE(aliased_param); EXPECT_EQ(aliased_param->parameter_number, param_number); EXPECT_EQ(aliased_param->parameter_index, param_index); } void expect_not_aliased(const ShapeIndex& output_index, int64_t param_number, const ShapeIndex& param_index, const HloInputOutputAliasConfig& config) { std::optional<ShapeIndex> aliased_output = config.GetAliasedOutput(param_number, param_index); EXPECT_FALSE(aliased_output && aliased_output == output_index); std::optional<HloInputOutputAliasConfig::Alias> aliased_param = config.GetAliasedParameter(output_index); EXPECT_FALSE(aliased_param && aliased_param->parameter_number == param_number && aliased_param->parameter_index == param_index); } }; TEST_F(HloInputOutputAliasConfigTest, SimpleAliasing) { const std::string module_str = R"( HloModule TEST ENTRY main { a = f32[] parameter(0) b = f32[] parameter(1) ROOT root = (f32[], f32[]) tuple(%a, %b) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_str)); HloInputOutputAliasConfig config( module->entry_computation()->root_instruction()->shape()); TF_ASSERT_OK(config.SetUpAlias( {0}, 1, {})); expect_aliased({0}, 1, {}, config); expect_not_aliased({1}, 1, {}, config); expect_not_aliased({0}, 0, {}, config); } TEST_F(HloInputOutputAliasConfigTest, SimpleAliasingWithTupleInput) { const std::string module_str = R"( HloModule TEST ENTRY main { param = (f32[], f32[]) parameter(0) gte1 = f32[] get-tuple-element(%param), index=0 gte2 = f32[] get-tuple-element(%param), index=1 ROOT root = (f32[], f32[]) tuple(%gte1, %gte2) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_str)); HloInputOutputAliasConfig config( module->entry_computation()->root_instruction()->shape()); TF_ASSERT_OK(config.SetUpAlias( {0}, 0, {0})); TF_ASSERT_OK(config.SetUpAlias( {1}, 0, {1})); expect_aliased({0}, 0, {0}, config); expect_aliased({1}, 0, {1}, config); expect_not_aliased({1}, 1, {}, config); expect_not_aliased({0}, 0, {}, config); } TEST_F(HloInputOutputAliasConfigTest, InputDoNotAliasTwice) { const std::string module_str = R"( HloModule TEST ENTRY main { a = f32[] parameter(0) b = f32[] parameter(1) ROOT root = (f32[], f32[]) tuple(%a, %b) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_str)); HloInputOutputAliasConfig config( module->entry_computation()->root_instruction()->shape()); TF_ASSERT_OK(config.SetUpAlias( {0}, 0, {})); TF_ASSERT_OK(config.SetUpAlias( {1}, 0, {})); ASSERT_IS_NOT_OK(config.Verify(*module, [](const Shape& shape) { return ShapeUtil::ByteSizeOf(shape); })); } TEST_F(HloInputOutputAliasConfigTest, SizesMustMatch) { const std::string module_str = R"( HloModule TEST ENTRY main { a = f32[] parameter(0) b = f32[4096] parameter(1) ROOT root = (f32[], f32[4096]) tuple(%a, %b) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_str)); HloInputOutputAliasConfig config( module->entry_computation()->root_instruction()->shape()); TF_ASSERT_OK(config.SetUpAlias( {1}, 0, {})); ASSERT_IS_NOT_OK(config.Verify(*module, [](const Shape& shape) { return ShapeUtil::ByteSizeOf(shape); })); } TEST_F(HloInputOutputAliasConfigTest, OutputDoNotAliasTwice) { const std::string module_str = R"( HloModule TEST ENTRY main { a = f32[] parameter(0) b = f32[] parameter(1) ROOT root = (f32[], f32[]) tuple(%a, %b) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_str)); HloInputOutputAliasConfig config( module->entry_computation()->root_instruction()->shape()); TF_ASSERT_OK(config.SetUpAlias( {0}, 0, {})); ASSERT_IS_NOT_OK(config.SetUpAlias( {0}, 1, {})); } class HloBufferDonorConfigTest : public HloTestBase {}; TEST_F(HloBufferDonorConfigTest, SimpleBufferDonor) { const std::string module_str = R"( HloModule TEST ENTRY main { a = f32[] parameter(0) b = f32[] parameter(1) ROOT root = (f32[], f32[]) tuple(%a, %b) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_str)); HloBufferDonorConfig config; TF_ASSERT_OK(config.AddBufferDonor(0, {})); EXPECT_TRUE(config.ParameterIsBufferDonor(0, {})); EXPECT_FALSE(config.ParameterIsBufferDonor(1, {})); TF_ASSERT_OK(config.AddBufferDonor(1, {})); EXPECT_TRUE(config.ParameterIsBufferDonor(0, {})); EXPECT_TRUE(config.ParameterIsBufferDonor(1, {})); TF_ASSERT_OK(config.RemoveBufferDonor(0, {})); EXPECT_FALSE(config.ParameterIsBufferDonor(0, {})); EXPECT_TRUE(config.ParameterIsBufferDonor(1, {})); TF_ASSERT_OK(config.Verify(*module)); TF_ASSERT_OK(config.AddBufferDonor(2, {})); ASSERT_IS_NOT_OK(config.Verify(*module)); } TEST_F(HloBufferDonorConfigTest, SimpleBufferDonorWithTupleInput) { const std::string module_str = R"( HloModule TEST ENTRY main { param = (f32[], f32[]) parameter(0) gte1 = f32[] get-tuple-element(%param), index=0 gte2 = f32[] get-tuple-element(%param), index=1 ROOT root = (f32[], f32[]) tuple(%gte1, %gte2) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_str)); HloBufferDonorConfig config; TF_ASSERT_OK(config.AddBufferDonor(0, {0})); EXPECT_TRUE(config.ParameterIsBufferDonor(0, {0})); EXPECT_FALSE(config.ParameterIsBufferDonor(0, {1})); EXPECT_FALSE(config.ParameterIsBufferDonor(0, {})); EXPECT_FALSE(config.ParameterIsBufferDonor(1, {})); TF_ASSERT_OK(config.AddBufferDonor(0, {1})); EXPECT_TRUE(config.ParameterIsBufferDonor(0, {0})); EXPECT_TRUE(config.ParameterIsBufferDonor(0, {1})); EXPECT_FALSE(config.ParameterIsBufferDonor(0, {})); EXPECT_FALSE(config.ParameterIsBufferDonor(1, {})); TF_ASSERT_OK(config.Verify(*module)); TF_ASSERT_OK(config.AddBufferDonor(0, {2})); ASSERT_IS_NOT_OK(config.Verify(*module)); } TEST_F(HloBufferDonorConfigTest, BufferDonorInputOutputAliasOverlap) { const std::string module_str = R"( HloModule TEST ENTRY main { param = (f32[], f32[]) parameter(0) gte1 = f32[] get-tuple-element(%param), index=0 gte2 = f32[] get-tuple-element(%param), index=1 ROOT root = (f32[], f32[]) tuple(%gte1, %gte2) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_str)); HloBufferDonorConfig config; TF_ASSERT_OK(config.AddBufferDonor(0, {0})); TF_ASSERT_OK(config.Verify(*module)); TF_ASSERT_OK(module->input_output_alias_config().SetUpAlias({0}, 0, {0})); ASSERT_IS_NOT_OK(config.Verify(*module)); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/ir/hlo_input_output_alias_config.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_input_output_alias_config_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
94074df8-7681-4959-a162-1aadb620489e
cpp
google/arolla
dense_array
arolla/dense_array/dense_array.cc
arolla/qexpr/operators/dense_array/dense_array_test.cc
#include "arolla/dense_array/dense_array.h" #include "arolla/util/fingerprint.h" namespace arolla { void FingerprintHasherTraits<DenseArrayShape>::operator()( FingerprintHasher* hasher, const DenseArrayShape& value) const { hasher->Combine(value.size); } }
#include "arolla/dense_array/dense_array.h" #include <optional> #include "gmock/gmock.h" #include "gtest/gtest.h" #include "absl/status/status.h" #include "absl/status/status_matchers.h" #include "arolla/dense_array/edge.h" #include "arolla/dense_array/qtype/types.h" #include "arolla/memory/frame.h" #include "arolla/memory/optional_value.h" #include "arolla/memory/raw_buffer_factory.h" #include "arolla/qexpr/eval_context.h" #include "arolla/qexpr/lifting.h" #include "arolla/qexpr/operators/dense_array/group_lifter.h" #include "arolla/qexpr/operators/dense_array/lifter.h" #include "arolla/qexpr/operators/testing/accumulators.h" #include "arolla/util/meta.h" #include "arolla/util/text.h" namespace arolla::testing { using ::absl_testing::StatusIs; using ::testing::ElementsAre; using ::testing::HasSubstr; struct TemplatedAddFn { template <typename T> T operator()(T a, T b) const { return a + b; } }; struct TemplatedAddOneFn { template <typename T> T operator()(T a) const { return a + 1; } }; TEST(Lifter, SimpleCase) { DenseArray<int> arr1 = CreateDenseArray<int>({1, {}, 2, 3}); DenseArray<int> arr2 = CreateDenseArray<int>({3, 6, {}, 2}); FrameLayout frame_layout; RootEvaluationContext root_ctx(&frame_layout, GetHeapBufferFactory()); EvaluationContext ctx(root_ctx); auto op = DenseArrayLifter<TemplatedAddFn, meta::type_list<int, int>>(); ASSERT_OK_AND_ASSIGN(DenseArray<int> res, op(&ctx, arr1, arr2)); EXPECT_THAT(res, ElementsAre(4, std::nullopt, std::nullopt, 5)); } TEST(Lifter, SizeMismatch) { DenseArray<int> arr1 = CreateDenseArray<int>({1, {}, 2, 3}); DenseArray<int> arr2 = CreateDenseArray<int>({3, 6, {}}); FrameLayout frame_layout; RootEvaluationContext root_ctx(&frame_layout, GetHeapBufferFactory()); EvaluationContext ctx(root_ctx); auto op = DenseArrayLifter<TemplatedAddFn, meta::type_list<int, int>>(); EXPECT_THAT(op(&ctx, arr1, arr2), StatusIs(absl::StatusCode::kInvalidArgument, HasSubstr("argument sizes mismatch: (4, 3)"))); } TEST(Lifter, UnaryOperation) { DenseArray<int> arr = CreateDenseArray<int>({1, {}, 2, 3}); FrameLayout frame_layout; RootEvaluationContext root_ctx(&frame_layout, GetHeapBufferFactory()); EvaluationContext ctx(root_ctx); auto op = DenseArrayLifter<TemplatedAddOneFn, meta::type_list<int>>(); ASSERT_OK_AND_ASSIGN(DenseArray<int> res, op(&ctx, arr)); EXPECT_THAT(res, ElementsAre(2, std::nullopt, 3, 4)); } TEST(Lifter, NonLiftableArg) { DenseArray<int> arr = CreateDenseArray<int>({1, {}, 2, 3}); FrameLayout frame_layout; RootEvaluationContext root_ctx(&frame_layout, GetHeapBufferFactory()); EvaluationContext ctx(root_ctx); auto op = DenseArrayLifter<TemplatedAddFn, meta::type_list<DoNotLiftTag<int>, int>>(); ASSERT_OK_AND_ASSIGN(DenseArray<int> res, op(&ctx, 5, arr)); EXPECT_THAT(res, ElementsAre(6, std::nullopt, 7, 8)); } struct MyInt { int value; friend int operator+(int x, MyInt y) { return y.value + x; } }; template <typename... Ts> struct TemplatedVariadicAddFn { int operator()(Ts... vs) const { return (0 + ... + vs); } }; TEST(Lifter, NonLiftableArgs) { DenseArray<int> arr = CreateDenseArray<int>({1, {}, 2, 3}); FrameLayout frame_layout; RootEvaluationContext root_ctx(&frame_layout, GetHeapBufferFactory()); EvaluationContext ctx(root_ctx); { auto op = DenseArrayLifter< TemplatedVariadicAddFn<MyInt, MyInt, int>, meta::type_list<DoNotLiftTag<MyInt>, DoNotLiftTag<MyInt>, int>>(); ASSERT_OK_AND_ASSIGN(DenseArray<int> res, op(&ctx, MyInt{3}, MyInt{5}, arr)); EXPECT_THAT(res, ElementsAre(9, std::nullopt, 10, 11)); } { auto op = DenseArrayLifter< TemplatedVariadicAddFn<MyInt, int, MyInt>, meta::type_list<DoNotLiftTag<MyInt>, int, DoNotLiftTag<MyInt>>>(); ASSERT_OK_AND_ASSIGN(DenseArray<int> res, op(&ctx, MyInt{3}, arr, MyInt{5})); EXPECT_THAT(res, ElementsAre(9, std::nullopt, 10, 11)); } { auto op = DenseArrayLifter< TemplatedVariadicAddFn<int, MyInt, MyInt>, meta::type_list<int, DoNotLiftTag<MyInt>, DoNotLiftTag<MyInt>>>(); ASSERT_OK_AND_ASSIGN(DenseArray<int> res, op(&ctx, arr, MyInt{3}, MyInt{5})); EXPECT_THAT(res, ElementsAre(9, std::nullopt, 10, 11)); } { auto op = DenseArrayLifter<TemplatedVariadicAddFn<int, MyInt, int>, meta::type_list<int, DoNotLiftTag<MyInt>, int>>(); ASSERT_OK_AND_ASSIGN(DenseArray<int> res, op(&ctx, arr, MyInt{3}, arr)); EXPECT_THAT(res, ElementsAre(5, std::nullopt, 7, 9)); } { auto op = DenseArrayLifter< TemplatedVariadicAddFn<MyInt, int, MyInt, int>, meta::type_list<DoNotLiftTag<MyInt>, int, DoNotLiftTag<MyInt>, int>>(); ASSERT_OK_AND_ASSIGN(DenseArray<int> res, op(&ctx, MyInt{5}, arr, MyInt{3}, arr)); EXPECT_THAT(res, ElementsAre(10, std::nullopt, 12, 14)); } { auto op = DenseArrayLifter< TemplatedVariadicAddFn<int, MyInt, int, MyInt>, meta::type_list<int, DoNotLiftTag<MyInt>, int, DoNotLiftTag<MyInt>>>(); ASSERT_OK_AND_ASSIGN(DenseArray<int> res, op(&ctx, arr, MyInt{3}, arr, MyInt{5})); EXPECT_THAT(res, ElementsAre(10, std::nullopt, 12, 14)); } { auto op = DenseArrayLifter< TemplatedVariadicAddFn<int, MyInt, int, MyInt, MyInt>, meta::type_list<int, DoNotLiftTag<MyInt>, int, DoNotLiftTag<MyInt>, DoNotLiftTag<MyInt>>>(); ASSERT_OK_AND_ASSIGN(DenseArray<int> res, op(&ctx, arr, MyInt{3}, arr, MyInt{5}, MyInt{4})); EXPECT_THAT(res, ElementsAre(14, std::nullopt, 16, 18)); } } TEST(GroupLifter, AggTextAccumulator) { auto values = CreateDenseArray<Text>( {Text("w1"), std::nullopt, Text("w3"), Text("w4"), Text("w5")}); auto comments = CreateDenseArray<Text>({std::nullopt, Text("it is word #2"), std::nullopt, Text("it is word #4"), std::nullopt}); FrameLayout frame_layout; RootEvaluationContext root_ctx(&frame_layout, GetHeapBufferFactory()); EvaluationContext ctx(root_ctx); auto op = DenseArrayGroupLifter<AggTextAccumulator, meta::type_list<OptionalValue<Text>>, meta::type_list<Text, OptionalValue<Text>>>(); ASSERT_OK_AND_ASSIGN(Text res, op(&ctx, Text("prefix:"), values, comments, DenseArrayGroupScalarEdge(values.size()))); EXPECT_EQ(res.view(), "prefix:w1\nw3\nw4 (it is word #4)\nw5\n"); } }
https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/dense_array/dense_array.cc
https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qexpr/operators/dense_array/dense_array_test.cc
1ca990dbeca224035efdabffecc7f3738df6b52c
ca4f7ed1-b2c4-4390-98ec-f748a7ec5ee4
cpp
google/tensorstore
staleness_bound
tensorstore/internal/json_binding/staleness_bound.cc
tensorstore/internal/json_binding/staleness_bound_test.cc
#include "tensorstore/internal/json_binding/staleness_bound.h" #include "absl/status/status.h" #include "absl/time/time.h" #include <nlohmann/json.hpp> #include "tensorstore/internal/json/value_as.h" #include "tensorstore/internal/json_binding/bindable.h" #include "tensorstore/internal/json_binding/json_binding.h" namespace tensorstore { namespace internal { TENSORSTORE_DEFINE_JSON_BINDER( StalenessBoundJsonBinder, [](auto is_loading, const auto& options, auto* obj, ::nlohmann::json* j) -> absl::Status { if constexpr (is_loading) { if (const auto* b = j->get_ptr<const bool*>()) { *obj = *b ? absl::InfiniteFuture() : absl::InfinitePast(); } else if (j->is_number()) { const double t = static_cast<double>(*j); *obj = absl::UnixEpoch() + absl::Seconds(t); } else if (*j == "open") { obj->time = absl::InfiniteFuture(); obj->bounded_by_open_time = true; } else { return internal_json::ExpectedError(*j, "boolean, number, or \"open\""); } } else { if (obj->bounded_by_open_time) { *j = "open"; } else { const absl::Time& t = obj->time; if (t == absl::InfiniteFuture()) { *j = true; } else if (t == absl::InfinitePast()) { *j = false; } else { *j = absl::ToDoubleSeconds(t - absl::UnixEpoch()); } } } return absl::OkStatus(); }) } }
#include "tensorstore/internal/json_binding/staleness_bound.h" #include <memory> #include <type_traits> #include <utility> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/time/time.h" #include <nlohmann/json.hpp> #include "tensorstore/internal/json_binding/bindable.h" #include "tensorstore/internal/json_binding/gtest.h" #include "tensorstore/internal/json_gtest.h" #include "tensorstore/json_serialization_options_base.h" #include "tensorstore/staleness_bound.h" using ::tensorstore::MatchesJson; using ::tensorstore::StalenessBound; using ::testing::Optional; namespace { TEST(StalenessBoundJsonBinderTest, RoundTrip) { tensorstore::TestJsonBinderToJson<StalenessBound>({ {StalenessBound{absl::InfinitePast()}, Optional(MatchesJson(false))}, {StalenessBound{absl::InfiniteFuture()}, Optional(MatchesJson(true))}, {StalenessBound::BoundedByOpen(), Optional(MatchesJson("open"))}, {StalenessBound{absl::UnixEpoch()}, Optional(MatchesJson(0))}, {StalenessBound{absl::UnixEpoch() + absl::Seconds(1)}, Optional(MatchesJson(1))}, }); } TEST(StalenessBoundJsonBinderTest, FromJson) { tensorstore::TestJsonBinderFromJson<StalenessBound>({ {false, ::testing::Optional(::testing::AllOf( ::testing::Field(&StalenessBound::time, absl::InfinitePast()), ::testing::Field(&StalenessBound::bounded_by_open_time, false)))}, {true, ::testing::Optional(::testing::AllOf( ::testing::Field(&StalenessBound::time, absl::InfiniteFuture()), ::testing::Field(&StalenessBound::bounded_by_open_time, false)))}, {"open", ::testing::Optional(::testing::Field( &StalenessBound::bounded_by_open_time, true))}, {0, ::testing::Optional(::testing::AllOf( ::testing::Field(&StalenessBound::time, absl::UnixEpoch()), ::testing::Field(&StalenessBound::bounded_by_open_time, false)))}, {1, ::testing::Optional(::testing::AllOf( ::testing::Field(&StalenessBound::time, absl::UnixEpoch() + absl::Seconds(1)), ::testing::Field(&StalenessBound::bounded_by_open_time, false)))}, {1u, ::testing::Optional(::testing::AllOf( ::testing::Field(&StalenessBound::time, absl::UnixEpoch() + absl::Seconds(1)), ::testing::Field(&StalenessBound::bounded_by_open_time, false)))}, {1.5, ::testing::Optional(::testing::AllOf( ::testing::Field(&StalenessBound::time, absl::UnixEpoch() + absl::Milliseconds(1500)), ::testing::Field(&StalenessBound::bounded_by_open_time, false)))}, }); } }
https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/json_binding/staleness_bound.cc
https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/json_binding/staleness_bound_test.cc
4f887a6430414cd6088e1743555015b10f116d50
ccf41526-44d8-4845-b376-164154cfd8fb
cpp
tensorflow/tensorflow
fuse_mul_to_conv
tensorflow/lite/delegates/gpu/common/transformations/fuse_mul_to_conv.cc
tensorflow/lite/delegates/gpu/common/transformations/fuse_mul_to_conv_test.cc
#include "tensorflow/lite/delegates/gpu/common/transformations/fuse_mul_to_conv.h" #include <any> #include <memory> #include <string> #include <variant> #include <vector> #include "absl/types/any.h" #include "absl/types/variant.h" #include "tensorflow/lite/delegates/gpu/common/data_type.h" #include "tensorflow/lite/delegates/gpu/common/model.h" #include "tensorflow/lite/delegates/gpu/common/model_transformer.h" #include "tensorflow/lite/delegates/gpu/common/operations.h" #include "tensorflow/lite/delegates/gpu/common/shape.h" #include "tensorflow/lite/delegates/gpu/common/status.h" #include "tensorflow/lite/delegates/gpu/common/tensor.h" namespace tflite { namespace gpu { namespace { class MergeConvolutionWithMul : public SequenceTransformation { public: int ExpectedSequenceLength() const final { return 2; } TransformResult ApplyToNodesSequence(const std::vector<Node*>& sequence, GraphFloat32* graph) final { auto& conv_node = *sequence[0]; if (graph->FindInputs(conv_node.id).size() != 1) { return {TransformStatus::DECLINED, "This fusion is only applicable to ops with one runtime input."}; } auto& mul_node = *sequence[1]; if (mul_node.operation.type != ToString(OperationType::MUL) || !mul_node.operation.attributes.has_value()) { return {TransformStatus::SKIPPED, ""}; } ElementwiseAttributes mul_attr = std::any_cast<ElementwiseAttributes>(mul_node.operation.attributes); if (!std::holds_alternative<Tensor<Linear, DataType::FLOAT32>>( mul_attr.param) && !std::holds_alternative<float>(mul_attr.param)) { return { TransformStatus::DECLINED, "This fuse applicable only for broadcast or scalar multiplication."}; } if (conv_node.operation.type == ToString(OperationType::CONVOLUTION_2D)) { Convolution2DAttributes* conv_attr = std::any_cast<Convolution2DAttributes>( &conv_node.operation.attributes); FuseConvolution2DWithMultiply(mul_attr, conv_attr); } else if (conv_node.operation.type == ToString(OperationType::CONVOLUTION_TRANSPOSED)) { ConvolutionTransposedAttributes* conv_attr = std::any_cast<ConvolutionTransposedAttributes>( &conv_node.operation.attributes); FuseConvolutionTransposedWithMultiply(mul_attr, conv_attr); } else if (conv_node.operation.type == ToString(OperationType::DEPTHWISE_CONVOLUTION)) { DepthwiseConvolution2DAttributes* conv_attr = std::any_cast<DepthwiseConvolution2DAttributes>( &conv_node.operation.attributes); FuseDepthwiseConvolution2DWithMultiply(mul_attr, conv_attr); } else if (conv_node.operation.type == ToString(OperationType::FULLY_CONNECTED)) { FullyConnectedAttributes* conv_attr = std::any_cast<FullyConnectedAttributes>( &conv_node.operation.attributes); FuseFullyConnectedWithMultiply(mul_attr, conv_attr); } else { return {TransformStatus::SKIPPED, ""}; } absl::Status status = RemoveFollowingNode(graph, &mul_node, &conv_node); if (!status.ok()) { return {TransformStatus::INVALID, "Unable to remove mul node after convolution: " + std::string(status.message())}; } return {TransformStatus::APPLIED, ""}; } }; class MergeMulWithConvolution : public SequenceTransformation { public: int ExpectedSequenceLength() const final { return 2; } TransformResult ApplyToNodesSequence(const std::vector<Node*>& sequence, GraphFloat32* graph) final { auto& conv_node = *sequence[1]; if (graph->FindInputs(conv_node.id).size() != 1) { return {TransformStatus::DECLINED, "This fusion is only applicable to ops with one runtime input."}; } auto& mul_node = *sequence[0]; if (mul_node.operation.type != ToString(OperationType::MUL) || !mul_node.operation.attributes.has_value()) { return {TransformStatus::SKIPPED, ""}; } ElementwiseAttributes mul_attr = std::any_cast<ElementwiseAttributes>(mul_node.operation.attributes); if (!std::holds_alternative<Tensor<Linear, DataType::FLOAT32>>( mul_attr.param) && !std::holds_alternative<float>(mul_attr.param)) { return { TransformStatus::DECLINED, "This fuse applicable only for broadcast or scalar multiplication."}; } if (conv_node.operation.type == ToString(OperationType::CONVOLUTION_2D)) { Convolution2DAttributes* conv_attr = std::any_cast<Convolution2DAttributes>( &conv_node.operation.attributes); FuseMultiplyWithConvolution2D(mul_attr, conv_attr); } else if (conv_node.operation.type == ToString(OperationType::CONVOLUTION_TRANSPOSED)) { ConvolutionTransposedAttributes* conv_attr = std::any_cast<ConvolutionTransposedAttributes>( &conv_node.operation.attributes); FuseMultiplyWithConvolutionTransposed(mul_attr, conv_attr); } else if (conv_node.operation.type == ToString(OperationType::DEPTHWISE_CONVOLUTION)) { DepthwiseConvolution2DAttributes* conv_attr = std::any_cast<DepthwiseConvolution2DAttributes>( &conv_node.operation.attributes); FuseMultiplyWithDepthwiseConvolution2D(mul_attr, conv_attr); } else if (conv_node.operation.type == ToString(OperationType::FULLY_CONNECTED)) { FullyConnectedAttributes* conv_attr = std::any_cast<FullyConnectedAttributes>( &conv_node.operation.attributes); FuseMultiplyWithFullyConnected(mul_attr, conv_attr); } else { return {TransformStatus::SKIPPED, ""}; } absl::Status status = RemovePrecedingNode(graph, &mul_node, &conv_node); if (!status.ok()) { return {TransformStatus::INVALID, "Unable to remove mul node after convolution: " + std::string(status.message())}; } return {TransformStatus::APPLIED, ""}; } }; } std::unique_ptr<SequenceTransformation> NewMergeConvolutionWithMul() { return std::make_unique<MergeConvolutionWithMul>(); } std::unique_ptr<SequenceTransformation> NewMergeMulWithConvolution() { return std::make_unique<MergeMulWithConvolution>(); } void FuseConvolution2DWithMultiply(const ElementwiseAttributes& mul_attr, Convolution2DAttributes* attr) { auto mul = std::get_if<Tensor<Linear, DataType::FLOAT32>>(&mul_attr.param); auto mul_scalar = std::get_if<float>(&mul_attr.param); for (int d = 0; d < attr->weights.shape.o; ++d) { const float multiplier = mul ? mul->data[d] : *mul_scalar; for (int s = 0; s < attr->weights.shape.i; ++s) { for (int k_y = 0; k_y < attr->weights.shape.h; ++k_y) { for (int k_x = 0; k_x < attr->weights.shape.w; ++k_x) { const int index = attr->weights.shape.LinearIndex({{d, k_y, k_x, s}}); attr->weights.data[index] *= multiplier; } } } if (!attr->bias.data.empty()) { attr->bias.data[d] *= multiplier; } } } void FuseDepthwiseConvolution2DWithMultiply( const ElementwiseAttributes& mul_attr, DepthwiseConvolution2DAttributes* attr) { auto mul = std::get_if<Tensor<Linear, DataType::FLOAT32>>(&mul_attr.param); auto mul_scalar = std::get_if<float>(&mul_attr.param); for (int g = 0; g < attr->weights.shape.o; ++g) { for (int s = 0; s < attr->weights.shape.i; ++s) { const int d = s * attr->weights.shape.o + g; const float multiplier = mul ? mul->data[d] : *mul_scalar; for (int k_y = 0; k_y < attr->weights.shape.h; ++k_y) { for (int k_x = 0; k_x < attr->weights.shape.w; ++k_x) { const int index = attr->weights.shape.LinearIndex({{g, k_y, k_x, s}}); attr->weights.data[index] *= multiplier; } } if (!attr->bias.data.empty()) { attr->bias.data[d] *= multiplier; } } } } void FuseConvolutionTransposedWithMultiply( const ElementwiseAttributes& mul_attr, ConvolutionTransposedAttributes* attr) { auto mul = std::get_if<Tensor<Linear, DataType::FLOAT32>>(&mul_attr.param); auto mul_scalar = std::get_if<float>(&mul_attr.param); for (int d = 0; d < attr->weights.shape.o; ++d) { const float multiplier = mul ? mul->data[d] : *mul_scalar; for (int s = 0; s < attr->weights.shape.i; ++s) { for (int k_y = 0; k_y < attr->weights.shape.h; ++k_y) { for (int k_x = 0; k_x < attr->weights.shape.w; ++k_x) { const int index = attr->weights.shape.LinearIndex({{d, k_y, k_x, s}}); attr->weights.data[index] *= multiplier; } } } if (!attr->bias.data.empty()) { attr->bias.data[d] *= multiplier; } } } void FuseFullyConnectedWithMultiply(const ElementwiseAttributes& mul_attr, FullyConnectedAttributes* attr) { auto mul = std::get_if<Tensor<Linear, DataType::FLOAT32>>(&mul_attr.param); auto mul_scalar = std::get_if<float>(&mul_attr.param); for (int d = 0; d < attr->weights.shape.o; ++d) { const float multiplier = mul ? mul->data[d] : *mul_scalar; for (int s = 0; s < attr->weights.shape.i; ++s) { const int index = attr->weights.shape.LinearIndex({{d, 0, 0, s}}); attr->weights.data[index] *= multiplier; } if (!attr->bias.data.empty()) { attr->bias.data[d] *= multiplier; } } } void FuseMultiplyWithConvolution2D(const ElementwiseAttributes& mul_attr, Convolution2DAttributes* attr) { auto mul = std::get_if<Tensor<Linear, DataType::FLOAT32>>(&mul_attr.param); auto mul_scalar = std::get_if<float>(&mul_attr.param); for (int s = 0; s < attr->weights.shape.i; ++s) { const float multiplier = mul ? mul->data[s] : *mul_scalar; for (int d = 0; d < attr->weights.shape.o; ++d) { for (int k_y = 0; k_y < attr->weights.shape.h; ++k_y) { for (int k_x = 0; k_x < attr->weights.shape.w; ++k_x) { const int index = attr->weights.shape.LinearIndex({{d, k_y, k_x, s}}); attr->weights.data[index] *= multiplier; } } } } } void FuseMultiplyWithDepthwiseConvolution2D( const ElementwiseAttributes& mul_attr, DepthwiseConvolution2DAttributes* attr) { auto mul = std::get_if<Tensor<Linear, DataType::FLOAT32>>(&mul_attr.param); auto mul_scalar = std::get_if<float>(&mul_attr.param); for (int s = 0; s < attr->weights.shape.i; ++s) { const float multiplier = mul ? mul->data[s] : *mul_scalar; for (int g = 0; g < attr->weights.shape.o; ++g) { for (int k_y = 0; k_y < attr->weights.shape.h; ++k_y) { for (int k_x = 0; k_x < attr->weights.shape.w; ++k_x) { const int index = attr->weights.shape.LinearIndex({{g, k_y, k_x, s}}); attr->weights.data[index] *= multiplier; } } } } } void FuseMultiplyWithConvolutionTransposed( const ElementwiseAttributes& mul_attr, ConvolutionTransposedAttributes* attr) { auto mul = std::get_if<Tensor<Linear, DataType::FLOAT32>>(&mul_attr.param); auto mul_scalar = std::get_if<float>(&mul_attr.param); for (int s = 0; s < attr->weights.shape.i; ++s) { const float multiplier = mul ? mul->data[s] : *mul_scalar; for (int d = 0; d < attr->weights.shape.o; ++d) { for (int k_y = 0; k_y < attr->weights.shape.h; ++k_y) { for (int k_x = 0; k_x < attr->weights.shape.w; ++k_x) { const int index = attr->weights.shape.LinearIndex({{d, k_y, k_x, s}}); attr->weights.data[index] *= multiplier; } } } } } void FuseMultiplyWithFullyConnected(const ElementwiseAttributes& mul_attr, FullyConnectedAttributes* attr) { auto mul = std::get_if<Tensor<Linear, DataType::FLOAT32>>(&mul_attr.param); auto mul_scalar = std::get_if<float>(&mul_attr.param); for (int s = 0; s < attr->weights.shape.i; ++s) { const float multiplier = mul ? mul->data[s] : *mul_scalar; for (int d = 0; d < attr->weights.shape.o; ++d) { const int index = attr->weights.shape.LinearIndex({{d, 0, 0, s}}); attr->weights.data[index] *= multiplier; } } } } }
#include "tensorflow/lite/delegates/gpu/common/transformations/fuse_mul_to_conv.h" #include <any> #include <memory> #include <string> #include <vector> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/status/status.h" #include "tensorflow/lite/delegates/gpu/common/data_type.h" #include "tensorflow/lite/delegates/gpu/common/model.h" #include "tensorflow/lite/delegates/gpu/common/model_transformer.h" #include "tensorflow/lite/delegates/gpu/common/operations.h" #include "tensorflow/lite/delegates/gpu/common/shape.h" #include "tensorflow/lite/delegates/gpu/common/tensor.h" using ::testing::FloatNear; using ::testing::Pointwise; namespace tflite { namespace gpu { namespace { TEST(MergeConvolutionWithMulTest, Smoke) { GraphFloat32 graph; auto input = graph.NewValue(); input->tensor.shape = BHWC(1, 4, 4, 8); Convolution2DAttributes conv_attr; conv_attr.padding.prepended = HW(0, 0); conv_attr.padding.appended = HW(0, 0); conv_attr.strides = HW(1, 1); conv_attr.dilations = HW(1, 1); conv_attr.weights.shape = OHWI(16, 3, 2, 8); conv_attr.weights.data.resize(conv_attr.weights.shape.DimensionsProduct()); conv_attr.bias.shape = Linear(16); conv_attr.bias.data.resize(16); Tensor<Linear, DataType::FLOAT32> mul_tensor; mul_tensor.shape = Linear(16); mul_tensor.data.resize(16); ElementwiseAttributes mul_attr; mul_attr.param = mul_tensor; auto conv_node = graph.NewNode(); conv_node->operation.type = ToString(OperationType::CONVOLUTION_2D); conv_node->operation.attributes = conv_attr; auto mul_node = graph.NewNode(); mul_node->operation.type = ToString(OperationType::MUL); mul_node->operation.attributes = mul_attr; ASSERT_TRUE(graph.AddConsumer(conv_node->id, input->id).ok()); Value* output = nullptr; ASSERT_TRUE(AddOutput(&graph, mul_node, &output).ok()); output->tensor.shape = BHWC(1, 4, 4, 16); Value* link1 = nullptr; ASSERT_TRUE(ConnectTwoNodes(&graph, conv_node, mul_node, &link1).ok()); link1->tensor.shape = BHWC(1, 4, 4, 16); ASSERT_EQ(2, graph.nodes().size()); ASSERT_EQ(3, graph.values().size()); auto transformation = NewMergeConvolutionWithMul(); ModelTransformer transformer(&graph); transformer.Apply("merge_convolution_with_mul", transformation.get()); EXPECT_EQ(1, graph.nodes().size()); EXPECT_EQ(2, graph.values().size()); EXPECT_EQ(ToString(OperationType::CONVOLUTION_2D), graph.nodes()[0]->operation.type); } TEST(MergeMulWithConvolutionTest, Smoke) { GraphFloat32 graph; auto input = graph.NewValue(); input->tensor.shape = BHWC(1, 4, 4, 8); Tensor<Linear, DataType::FLOAT32> mul_tensor; mul_tensor.shape = Linear(8); mul_tensor.data.resize(8); ElementwiseAttributes mul_attr; mul_attr.param = mul_tensor; Convolution2DAttributes conv_attr; conv_attr.padding.prepended = HW(0, 0); conv_attr.padding.appended = HW(0, 0); conv_attr.strides = HW(1, 1); conv_attr.dilations = HW(1, 1); conv_attr.weights.shape = OHWI(16, 3, 2, 8); conv_attr.weights.data.resize(conv_attr.weights.shape.DimensionsProduct()); conv_attr.bias.shape = Linear(16); conv_attr.bias.data.resize(16); auto conv_node = graph.NewNode(); conv_node->operation.type = ToString(OperationType::CONVOLUTION_2D); conv_node->operation.attributes = conv_attr; auto mul_node = graph.NewNode(); mul_node->operation.type = ToString(OperationType::MUL); mul_node->operation.attributes = mul_attr; ASSERT_TRUE(graph.AddConsumer(mul_node->id, input->id).ok()); Value* output = nullptr; ASSERT_TRUE(AddOutput(&graph, conv_node, &output).ok()); output->tensor.shape = BHWC(1, 4, 4, 16); Value* link1 = nullptr; ASSERT_TRUE(ConnectTwoNodes(&graph, mul_node, conv_node, &link1).ok()); link1->tensor.shape = BHWC(1, 4, 4, 16); ASSERT_EQ(2, graph.nodes().size()); ASSERT_EQ(3, graph.values().size()); auto transformation = NewMergeMulWithConvolution(); ModelTransformer transformer(&graph); transformer.Apply("merge_mul_with_convolution", transformation.get()); EXPECT_EQ(1, graph.nodes().size()); EXPECT_EQ(2, graph.values().size()); EXPECT_EQ(ToString(OperationType::CONVOLUTION_2D), graph.nodes()[0]->operation.type); } TEST(FuseMulAfterConvolution2DTest, Smoke) { Convolution2DAttributes attr; attr.weights.shape = OHWI(2, 1, 2, 2); attr.weights.data = {0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.6f, 0.7f, 0.8f}; attr.bias.shape = Linear(2); attr.bias.data = {1.5f, 2.5f}; Tensor<Linear, DataType::FLOAT32> mul_tensor; mul_tensor.shape = Linear(2); mul_tensor.data = {0.5f, 2.0f}; ElementwiseAttributes mul_attr; mul_attr.param = mul_tensor; FuseConvolution2DWithMultiply(mul_attr, &attr); EXPECT_THAT(attr.weights.data, Pointwise(FloatNear(1e-6), {0.05f, 0.1f, 0.15f, 0.2f, 1.0f, 1.2f, 1.4f, 1.6f})); EXPECT_THAT(attr.bias.data, Pointwise(FloatNear(1e-6), {0.75f, 5.0f})); } TEST(FuseMulAfterDepthwiseConvolution2DTest, Smoke) { DepthwiseConvolution2DAttributes attr; attr.weights.shape = OHWI(2, 1, 2, 2); attr.weights.data = {0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.6f, 0.7f, 0.8f}; attr.bias.shape = Linear(4); attr.bias.data = {1.5f, 2.5f, 1.0f, 2.0f}; Tensor<Linear, DataType::FLOAT32> mul_tensor; mul_tensor.shape = Linear(4); mul_tensor.data = {0.5f, 2.0f, 4.0f, 0.25f}; ElementwiseAttributes mul_attr; mul_attr.param = mul_tensor; FuseDepthwiseConvolution2DWithMultiply(mul_attr, &attr); EXPECT_THAT(attr.weights.data, Pointwise(FloatNear(1e-6), {0.05f, 0.8f, 0.15f, 1.6f, 1.0f, 0.15f, 1.4f, 0.2f})); EXPECT_THAT(attr.bias.data, Pointwise(FloatNear(1e-6), {0.75f, 5.0f, 4.0f, 0.5f})); } TEST(FuseMulAfterConvolutionTransposedTest, Smoke) { ConvolutionTransposedAttributes attr; attr.weights.shape = OHWI(2, 1, 2, 2); attr.weights.data = {0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.6f, 0.7f, 0.8f}; attr.bias.shape = Linear(2); attr.bias.data = {1.5f, 2.5f}; Tensor<Linear, DataType::FLOAT32> mul_tensor; mul_tensor.shape = Linear(2); mul_tensor.data = {0.5f, 2.0f}; ElementwiseAttributes mul_attr; mul_attr.param = mul_tensor; FuseConvolutionTransposedWithMultiply(mul_attr, &attr); EXPECT_THAT(attr.weights.data, Pointwise(FloatNear(1e-6), {0.05f, 0.1f, 0.15f, 0.2f, 1.0f, 1.2f, 1.4f, 1.6f})); EXPECT_THAT(attr.bias.data, Pointwise(FloatNear(1e-6), {0.75f, 5.0f})); } TEST(FuseMulAfterFullyConnectedTest, Smoke) { FullyConnectedAttributes attr; attr.weights.shape = OHWI(2, 1, 1, 2); attr.weights.data = {0.1f, 0.2f, 0.3f, 0.4f}; attr.bias.shape = Linear(2); attr.bias.data = {1.5f, 2.5f}; Tensor<Linear, DataType::FLOAT32> mul_tensor; mul_tensor.shape = Linear(2); mul_tensor.data = {0.5f, 2.0f}; ElementwiseAttributes mul_attr; mul_attr.param = mul_tensor; FuseFullyConnectedWithMultiply(mul_attr, &attr); EXPECT_THAT(attr.weights.data, Pointwise(FloatNear(1e-6), {0.05f, 0.1f, 0.6f, 0.8f})); EXPECT_THAT(attr.bias.data, Pointwise(FloatNear(1e-6), {0.75f, 5.0f})); } TEST(FuseMulBeforeConvolution2DTest, Smoke) { Convolution2DAttributes attr; attr.weights.shape = OHWI(2, 1, 2, 2); attr.weights.data = {0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.6f, 0.7f, 0.8f}; attr.bias.shape = Linear(2); attr.bias.data = {1.5f, 2.5f}; Tensor<Linear, DataType::FLOAT32> mul_tensor; mul_tensor.shape = Linear(2); mul_tensor.data = {0.5f, 2.0f}; ElementwiseAttributes mul_attr; mul_attr.param = mul_tensor; FuseMultiplyWithConvolution2D(mul_attr, &attr); EXPECT_THAT(attr.weights.data, Pointwise(FloatNear(1e-6), {0.05f, 0.4f, 0.15f, 0.8f, 0.25f, 1.2f, 0.35f, 1.6f})); EXPECT_THAT(attr.bias.data, Pointwise(FloatNear(1e-6), {1.5f, 2.5f})); } TEST(FuseMulBeforeDepthwiseConvolution2DTest, Smoke) { DepthwiseConvolution2DAttributes attr; attr.weights.shape = OHWI(2, 1, 2, 2); attr.weights.data = {0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.6f, 0.7f, 0.8f}; attr.bias.shape = Linear(4); attr.bias.data = {1.5f, 2.5f, 1.0f, 2.0f}; Tensor<Linear, DataType::FLOAT32> mul_tensor; mul_tensor.shape = Linear(4); mul_tensor.data = {0.5f, 2.0f, 4.0f, 0.25f}; ElementwiseAttributes mul_attr; mul_attr.param = mul_tensor; FuseMultiplyWithDepthwiseConvolution2D(mul_attr, &attr); EXPECT_THAT(attr.weights.data, Pointwise(FloatNear(1e-6), {0.05f, 0.4f, 0.15f, 0.8f, 0.25f, 1.2f, 0.35f, 1.6f})); EXPECT_THAT(attr.bias.data, Pointwise(FloatNear(1e-6), {1.5f, 2.5f, 1.0f, 2.0f})); } TEST(FuseMulBeforeConvolutionTransposedTest, Smoke) { ConvolutionTransposedAttributes attr; attr.weights.shape = OHWI(2, 1, 2, 2); attr.weights.data = {0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.6f, 0.7f, 0.8f}; attr.bias.shape = Linear(2); attr.bias.data = {1.5f, 2.5f}; Tensor<Linear, DataType::FLOAT32> mul_tensor; mul_tensor.shape = Linear(2); mul_tensor.data = {0.5f, 2.0f}; ElementwiseAttributes mul_attr; mul_attr.param = mul_tensor; FuseMultiplyWithConvolutionTransposed(mul_attr, &attr); EXPECT_THAT(attr.weights.data, Pointwise(FloatNear(1e-6), {0.05f, 0.4f, 0.15f, 0.8f, 0.25f, 1.2f, 0.35f, 1.6f})); EXPECT_THAT(attr.bias.data, Pointwise(FloatNear(1e-6), {1.5f, 2.5f})); } TEST(FuseMulBeforeFullyConnectedTest, Smoke) { FullyConnectedAttributes attr; attr.weights.shape = OHWI(2, 1, 1, 2); attr.weights.data = {0.1f, 0.2f, 0.3f, 0.4f}; attr.bias.shape = Linear(2); attr.bias.data = {1.5f, 2.5f}; Tensor<Linear, DataType::FLOAT32> mul_tensor; mul_tensor.shape = Linear(2); mul_tensor.data = {0.5f, 2.0f}; ElementwiseAttributes mul_attr; mul_attr.param = mul_tensor; FuseMultiplyWithFullyConnected(mul_attr, &attr); EXPECT_THAT(attr.weights.data, Pointwise(FloatNear(1e-6), {0.05f, 0.4f, 0.15f, 0.8f})); EXPECT_THAT(attr.bias.data, Pointwise(FloatNear(1e-6), {1.5f, 2.5f})); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/transformations/fuse_mul_to_conv.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/transformations/fuse_mul_to_conv_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
7101ad49-0209-4740-b2c9-7c1c1361a321
cpp
google/arolla
dense_group_ops
arolla/dense_array/ops/dense_group_ops.h
arolla/dense_array/ops/dense_group_ops_test.cc
#ifndef AROLLA_DENSE_ARRAY_OPS_DENSE_GROUP_OPS_H_ #define AROLLA_DENSE_ARRAY_OPS_DENSE_GROUP_OPS_H_ #include <cstddef> #include <cstdint> #include <type_traits> #include <utility> #include <vector> #include "absl/log/check.h" #include "absl/status/status.h" #include "absl/status/statusor.h" #include "arolla/dense_array/dense_array.h" #include "arolla/dense_array/edge.h" #include "arolla/dense_array/ops/util.h" #include "arolla/memory/optional_value.h" #include "arolla/memory/raw_buffer_factory.h" #include "arolla/util/meta.h" #include "arolla/util/view_types.h" #include "arolla/util/status_macros_backport.h" namespace arolla { namespace dense_ops_internal { template <class Accumulator, class ParentTypes, class ChildTypes, bool ForwardId = false> class DenseGroupOpsImpl; template <class Accumulator, class... ParentTs, class... ChildTs, bool ForwardId> class DenseGroupOpsImpl<Accumulator, meta::type_list<ParentTs...>, meta::type_list<ChildTs...>, ForwardId> { using ParentUtil = DenseOpsUtil<meta::type_list<ParentTs...>>; using ChildUtil = DenseOpsUtil<meta::type_list<ChildTs...>>; using ResT = strip_optional_t<typename Accumulator::result_type>; static constexpr bool kIsAggregator = Accumulator::IsAggregator(); static constexpr bool kIsPartial = Accumulator::IsPartial(); static constexpr bool kIsFull = Accumulator::IsFull(); public: explicit DenseGroupOpsImpl(RawBufferFactory* buffer_factory, Accumulator empty_accumulator = Accumulator()) : buffer_factory_(buffer_factory), empty_accumulator_(std::move(empty_accumulator)) {} absl::StatusOr<DenseArray<ResT>> Apply( const DenseArrayEdge& edge, const AsDenseArray<ParentTs>&... p_args, const AsDenseArray<ChildTs>&... c_args) const { if (((p_args.size() != edge.parent_size()) || ... || false)) { return SizeMismatchError({edge.parent_size(), p_args.size()...}); } if (((c_args.size() != edge.child_size()) || ... || false)) { return SizeMismatchError({edge.child_size(), c_args.size()...}); } switch (edge.edge_type()) { case DenseArrayEdge::SPLIT_POINTS: { const auto& split_points = edge.edge_values(); return ApplyWithSplitPoints(edge.parent_size(), edge.child_size(), split_points, p_args..., c_args...); } case DenseArrayEdge::MAPPING: { const auto& mapping = edge.edge_values(); return ApplyWithMapping(edge.parent_size(), edge.child_size(), mapping, p_args..., c_args...); } default: return absl::InvalidArgumentError("unsupported edge type"); } } absl::StatusOr<std::conditional_t< kIsAggregator, typename Accumulator::result_type, DenseArray<ResT>>> Apply(const DenseArrayGroupScalarEdge& edge, view_type_t<ParentTs>... p_args, const AsDenseArray<ChildTs>&... c_args) const { if (((c_args.size() != edge.child_size()) || ... || false)) { return SizeMismatchError({edge.child_size(), c_args.size()...}); } Accumulator accumulator = empty_accumulator_; accumulator.Reset(p_args...); if constexpr (kIsAggregator) { auto fn = [&](int64_t child_id, bool child_row_valid, view_type_t<ChildTs>... args) { if (child_row_valid) { Add(accumulator, child_id, args...); } }; ChildUtil::Iterate(fn, 0, edge.child_size(), c_args...); auto res = accumulator.GetResult(); RETURN_IF_ERROR(accumulator.GetStatus()); return typename Accumulator::result_type(std::move(res)); } else { DenseArrayBuilder<ResT> builder(edge.child_size(), buffer_factory_); std::vector<int64_t> processed_rows; auto fn = [&](int64_t child_id, bool child_row_valid, view_type_t<ChildTs>... args) { if (child_row_valid) { Add(accumulator, child_id, args...); if constexpr (kIsPartial) { builder.Set(child_id, accumulator.GetResult()); } else if constexpr (kIsFull) { processed_rows.push_back(child_id); } } }; ChildUtil::Iterate(fn, 0, edge.child_size(), c_args...); if constexpr (kIsFull) { accumulator.FinalizeFullGroup(); for (int64_t row_id : processed_rows) { builder.Set(row_id, accumulator.GetResult()); } } RETURN_IF_ERROR(accumulator.GetStatus()); return std::move(builder).Build(); } } private: absl::StatusOr<DenseArray<ResT>> ApplyWithMapping( int64_t parent_row_count, int64_t child_row_count, const DenseArray<int64_t>& mapping, const AsDenseArray<ParentTs>&... p_values, const AsDenseArray<ChildTs>&... c_values) const { DCHECK_EQ(child_row_count, mapping.size()); using MappingAndChildUtil = DenseOpsUtil<meta::type_list<int64_t, ChildTs...>>; std::vector<Accumulator> accumulators(parent_row_count, empty_accumulator_); std::vector<bool> valid_groups(parent_row_count, false); { auto fn = [&](int64_t group, bool valid, view_type_t<ParentTs>... args) { if (valid) accumulators[group].Reset(args...); valid_groups[group] = valid; }; ParentUtil::IterateFromZero(fn, parent_row_count, p_values...); } std::vector<bool> processed_child_rows; if constexpr (kIsFull) { processed_child_rows.resize(child_row_count, false); } const int64_t result_row_count = kIsAggregator ? parent_row_count : child_row_count; DenseArrayBuilder<ResT> builder(result_row_count, buffer_factory_); auto process_child_row_fn = [&](int64_t child_id, bool valid, int64_t parent_id, view_type_t<ChildTs>... args) { if (!valid || !valid_groups[parent_id]) return; auto& accumulator = accumulators[parent_id]; Add(accumulator, child_id, args...); if constexpr (kIsFull) { processed_child_rows[child_id] = true; } if constexpr (kIsPartial) { builder.Set(child_id, accumulator.GetResult()); } }; MappingAndChildUtil::Iterate(process_child_row_fn, 0, child_row_count, mapping, c_values...); if constexpr (kIsFull) { int64_t parent_id = 0; for (bool valid : valid_groups) { if (valid) accumulators[parent_id].FinalizeFullGroup(); parent_id++; } for (int64_t child_id = 0; child_id < processed_child_rows.size(); ++child_id) { if (processed_child_rows[child_id]) { int64_t parent_id = mapping.values[child_id]; DCHECK(valid_groups[parent_id]) << "Child rows from invalid groups shouldn't be processed"; builder.Set(child_id, accumulators[parent_id].GetResult()); } } } for (int64_t parent_id = 0; parent_id < parent_row_count; ++parent_id) { if (valid_groups[parent_id]) { if constexpr (kIsAggregator) { builder.Set(parent_id, accumulators[parent_id].GetResult()); } RETURN_IF_ERROR(accumulators[parent_id].GetStatus()); } } return std::move(builder).Build(); } template <size_t... GIs> absl::StatusOr<DenseArray<ResT>> ApplyWithSplitPoints( int64_t parent_row_count, int64_t child_row_count, const DenseArray<int64_t>& splits, const AsDenseArray<ParentTs>&... p_values, const AsDenseArray<ChildTs>&... c_values) const { if (splits.size() != parent_row_count + 1) { return absl::InvalidArgumentError( "splits row count is not compatible with parent row count"); } const int64_t result_row_count = kIsAggregator ? parent_row_count : child_row_count; DenseArrayBuilder<ResT> builder(result_row_count, buffer_factory_); std::vector<int64_t> processed_rows; Accumulator accumulator = empty_accumulator_; ParentUtil::IterateFromZero( [&](int64_t parent_id, bool parent_valid, view_type_t<ParentTs>... args) { if (parent_valid) { accumulator.Reset(args...); ProcessSingleGroupWithSplitPoints(parent_id, splits, c_values..., processed_rows, accumulator, builder); } }, parent_row_count, p_values...); RETURN_IF_ERROR(accumulator.GetStatus()); return std::move(builder).Build(); } void ProcessSingleGroupWithSplitPoints( int64_t parent_id, const DenseArray<int64_t>& splits, const AsDenseArray<ChildTs>&... c_values, std::vector<int64_t>& processed_rows, Accumulator& accumulator, DenseArrayBuilder<ResT>& builder) const { DCHECK(splits.present(parent_id)); DCHECK(splits.present(parent_id + 1)); int64_t child_from = splits.values[parent_id]; int64_t child_to = splits.values[parent_id + 1]; auto fn = [&](int64_t child_id, bool child_row_valid, view_type_t<ChildTs>... args) { if (child_row_valid) { Add(accumulator, child_id, args...); if constexpr (kIsPartial) { builder.Set(child_id, accumulator.GetResult()); } else if constexpr (kIsFull) { processed_rows.push_back(child_id); } } }; ChildUtil::Iterate(fn, child_from, child_to, c_values...); if constexpr (kIsAggregator) { builder.Set(parent_id, accumulator.GetResult()); } else if constexpr (kIsFull) { accumulator.FinalizeFullGroup(); for (int64_t row_id : processed_rows) { builder.Set(row_id, accumulator.GetResult()); } processed_rows.clear(); } } void Add(Accumulator& accumulator, int64_t child_id, view_type_t<ChildTs>... args) const { if constexpr (ForwardId) { accumulator.Add(child_id, args...); } else { (void)child_id; accumulator.Add(args...); } } RawBufferFactory* buffer_factory_; const Accumulator empty_accumulator_; }; } template <class Accumulator> using DenseGroupOps = dense_ops_internal::DenseGroupOpsImpl<Accumulator, typename Accumulator::parent_types, typename Accumulator::child_types>; template <class Accumulator> using DenseGroupOpsWithId = dense_ops_internal::DenseGroupOpsImpl< Accumulator, typename Accumulator::parent_types, meta::tail_t<typename Accumulator::child_types>, true>; } #endif
#include "arolla/dense_array/ops/dense_group_ops.h" #include <cstdint> #include <optional> #include <vector> #include "gmock/gmock.h" #include "gtest/gtest.h" #include "absl/status/status.h" #include "absl/status/status_matchers.h" #include "absl/strings/string_view.h" #include "arolla/dense_array/dense_array.h" #include "arolla/dense_array/edge.h" #include "arolla/dense_array/testing/util.h" #include "arolla/memory/raw_buffer_factory.h" #include "arolla/qexpr/operators/testing/accumulators.h" #include "arolla/util/text.h" namespace arolla { namespace { using ::absl_testing::IsOkAndHolds; using ::absl_testing::StatusIs; using ::arolla::testing::CreateDenseArrayFromIdValues; using ::testing::ElementsAre; using ::testing::HasSubstr; using ::testing::Test; TEST(DenseGroupOps, FullArrayGroupSum) { auto values = CreateDenseArray<float>({5.0f, 8.0f, 3.0f, 6.0f}); auto detail_to_group = CreateDenseArray<int64_t>({1, 1, 2, 3}); auto splits = CreateDenseArray<int64_t>({0, 0, 2, 3, 4}); DenseGroupOps<testing::AggSumAccumulator<float>> agg(GetHeapBufferFactory()); ASSERT_OK_AND_ASSIGN( DenseArrayEdge edge1, DenseArrayEdge::FromMapping(detail_to_group, 4)); EXPECT_THAT(*agg.Apply(edge1, values), ElementsAre(std::nullopt, 13.0f, 3.0f, 6.0f)); ASSERT_OK_AND_ASSIGN(DenseArrayEdge edge2, DenseArrayEdge::FromSplitPoints(splits)); EXPECT_THAT(*agg.Apply(edge2, values), ElementsAre(std::nullopt, 13.0f, 3.0f, 6.0f)); } TEST(DenseGroupOps, ForwardId) { auto splits = CreateDenseArray<int64_t>({0, 0, 2, 3, 4}); ASSERT_OK_AND_ASSIGN(DenseArrayEdge edge, DenseArrayEdge::FromSplitPoints(splits)); std::vector<int64_t> ids; DenseGroupOpsWithId<testing::CollectIdsAccumulator> op( GetHeapBufferFactory(), testing::CollectIdsAccumulator(&ids)); EXPECT_OK(op.Apply(edge).status()); EXPECT_THAT(ids, ElementsAre(0, 1, 2, 3)); } TEST(DenseGroupOps, FullArrayAverageWithErrorStatus) { auto values = CreateDenseArray<float>({5.0f, 8.0f, 3.0f, 6.0f}); auto detail_to_group = CreateDenseArray<int64_t>({1, 1, 2, 3}); auto splits = CreateDenseArray<int64_t>({0, 0, 2, 3, 4}); DenseGroupOps<testing::AverageAccumulator> agg(GetHeapBufferFactory()); ASSERT_OK_AND_ASSIGN( DenseArrayEdge edge1, DenseArrayEdge::FromMapping(detail_to_group, 4)); EXPECT_THAT( agg.Apply(edge1, values), StatusIs(absl::StatusCode::kInvalidArgument, HasSubstr("empty group"))); ASSERT_OK_AND_ASSIGN(DenseArrayEdge edge2, DenseArrayEdge::FromSplitPoints(splits)); EXPECT_THAT( agg.Apply(edge2, values), StatusIs(absl::StatusCode::kInvalidArgument, HasSubstr("empty group"))); } TEST(DenseGroupOps, AverageToScalar) { DenseGroupOps<testing::AverageAccumulator> agg(GetHeapBufferFactory()); EXPECT_THAT(agg.Apply(DenseArrayGroupScalarEdge(3), CreateDenseArray<float>({1.0f, 3.0f, 8.0f})), IsOkAndHolds(4.0f)); EXPECT_THAT( agg.Apply(DenseArrayGroupScalarEdge(0), DenseArray<float>()), StatusIs(absl::StatusCode::kInvalidArgument, HasSubstr("empty group"))); } TEST(DenseGroupOps, AggregationToScalar) { auto values = CreateDenseArray<float>({5.0f, 8.0f, 3.0f, 6.0f}); DenseGroupOps<testing::AggSumAccumulator<float>> agg(GetHeapBufferFactory()); DenseArrayGroupScalarEdge edge(values.size()); EXPECT_EQ(*agg.Apply(edge, values), 22.0f); } TEST(DenseGroupOps, RankValues) { auto values = CreateDenseArray<float>({3.0f, 5.0f, 2.0f, 1.0f, 3.1f, 7.0f}); auto detail_to_group = CreateDenseArray<int64_t>({0, 0, 0, 0, 1, 1}); auto splits = CreateDenseArray<int64_t>({0, 4, 6}); DenseGroupOps<testing::RankValuesAccumulator<float>> agg( GetHeapBufferFactory()); ASSERT_OK_AND_ASSIGN( DenseArrayEdge edge1, DenseArrayEdge::FromMapping(detail_to_group, 2)); EXPECT_THAT(*agg.Apply(edge1, values), ElementsAre(1, 0, 2, 3, 1, 0)); ASSERT_OK_AND_ASSIGN(DenseArrayEdge edge2, DenseArrayEdge::FromSplitPoints(splits)); EXPECT_THAT(*agg.Apply(edge2, values), ElementsAre(1, 0, 2, 3, 1, 0)); EXPECT_THAT(*agg.Apply(DenseArrayGroupScalarEdge(values.size()), values), ElementsAre(5, 1, 4, 0, 2, 3)); } TEST(DenseGroupOps, PartialSparseMapping) { auto a = CreateDenseArray<float>({2.0f, 1.0f, 1.0f}); auto b = CreateDenseArray<float>({2.0f, 2.0f, 1.0f}); auto c = CreateDenseArray<float>({0.0f, -1.0f, -1.0f}); auto x = CreateDenseArrayFromIdValues<float>( 100, {{5, 1.0}, {10, 1.0}, {15, 1.0}, {20, 1.0}, {25, 1.0}, {30, 1.0}}); auto y = CreateDenseArrayFromIdValues<float>( 100, {{5, 1.0}, {10, 2.0}, {15, 3.0}, {20, 1.0}, {25, 3.0}, {30, 2.0}}); auto z = CreateDenseArrayFromIdValues<float>( 100, {{5, 1.0}, {10, 2.0}, {15, 1.0}, {20, 2.0}, {25, 1.0}, {30, 2.0}}); auto detail_to_group = CreateDenseArrayFromIdValues<int64_t>( 100, {{0, 0}, {5, 1}, {10, 0}, {15, 1}, {20, 0}, {25, 1}}); DenseGroupOps<testing::WeightedSumAccumulator> agg(GetHeapBufferFactory()); auto expected = CreateDenseArrayFromIdValues<float>( 100, {{5, 2.f}, {10, 6.f}, {15, 6.f}, {20, 4.f}, {25, 6.f}}); ASSERT_OK_AND_ASSIGN(DenseArrayEdge edge, DenseArrayEdge::FromMapping(detail_to_group, 3)); ASSERT_OK_AND_ASSIGN(auto res, agg.Apply(edge, a, b, c, x, y, z)); EXPECT_EQ(res.size(), expected.size()); for (int i = 0; i < res.size(); ++i) { EXPECT_EQ(expected[i], res[i]); } } TEST(DenseGroupOps, PartialDenseMapping) { auto a = CreateDenseArray<float>({2.0f, 1.0f, 1.0f}); auto b = CreateDenseArray<float>({2.0f, 2.0f, std::nullopt}); auto c = CreateDenseArray<float>({0.0f, -1.0f, -1.0f}); auto x = CreateDenseArray<float>({1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f}); auto y = CreateDenseArray<float>({1.0f, 2.0f, 3.0f, 1.0f, 3.0f, 2.0f}); auto z = CreateDenseArray<float>({1.f, 2.f, 1.f, std::nullopt, 1.f, 2.f}); auto splits = CreateDenseArray<int64_t>({0, 2, 5, 6}); auto detail_to_group = CreateDenseArray<int64_t>({0, 0, 1, 1, 1, 2}); DenseGroupOps<testing::WeightedSumAccumulator> agg(GetHeapBufferFactory()); ASSERT_OK_AND_ASSIGN(DenseArrayEdge edge1, DenseArrayEdge::FromSplitPoints(splits)); EXPECT_THAT(*agg.Apply(edge1, a, b, c, x, y, z), ElementsAre(4.f, 6.f, 6.f, std::nullopt, 6.f, std::nullopt)); ASSERT_OK_AND_ASSIGN( DenseArrayEdge edge2, DenseArrayEdge::FromMapping(detail_to_group, 3)); EXPECT_THAT(*agg.Apply(edge2, a, b, c, x, y, z), ElementsAre(4.f, 6.f, 6.f, std::nullopt, 6.f, std::nullopt)); } TEST(DenseGroupOps, PartialGroupScalarEdge) { auto x = CreateDenseArray<float>({1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f}); auto y = CreateDenseArray<float>({1.0f, 2.0f, 3.0f, 1.0f, 3.0f, 2.0f}); auto z = CreateDenseArray<float>({1.f, 2.f, 1.f, std::nullopt, 1.f, 2.f}); DenseGroupOps<testing::WeightedSumAccumulator> agg(GetHeapBufferFactory()); EXPECT_THAT( *agg.Apply(DenseArrayGroupScalarEdge(6), 2.0f, 2.0f, 0.0f, x, y, z), ElementsAre(4.f, 6.f, 8.f, std::nullopt, 8.f, 6.f)); } TEST(DenseGroupOps, OptionalText) { auto detail_to_group = CreateDenseArray<int64_t>({1, 1, 2, 3, 3}); auto splits = CreateDenseArray<int64_t>({0, 0, 2, 3, 5}); auto prefixes = CreateDenseArray<Text>( {Text("empty"), Text("some:\n"), Text("prefix:\n"), std::nullopt}); auto values = CreateDenseArray<Text>( {Text("w1"), std::nullopt, Text("w3"), Text("w4"), Text("w5")}); auto comments = CreateDenseArray<Text>({std::nullopt, Text("it is word #2"), std::nullopt, Text("it is word #4"), std::nullopt}); DenseGroupOps<testing::AggTextAccumulator> agg(GetHeapBufferFactory()); ASSERT_OK_AND_ASSIGN( DenseArrayEdge edge1, DenseArrayEdge::FromMapping(detail_to_group, 4)); ASSERT_OK_AND_ASSIGN(DenseArray<Text> res1, agg.Apply(edge1, prefixes, values, comments)); ASSERT_OK_AND_ASSIGN(DenseArrayEdge edge2, DenseArrayEdge::FromSplitPoints(splits)); ASSERT_OK_AND_ASSIGN(DenseArray<Text> res2, agg.Apply(edge2, prefixes, values, comments)); using V = absl::string_view; EXPECT_THAT(res1, ElementsAre(V("empty"), V("some:\nw1\n"), V("prefix:\nw3\n"), V("w4 (it is word #4)\nw5\n"))); EXPECT_EQ(res1.size(), res2.size()); for (int64_t i = 0; i < res1.size(); ++i) { EXPECT_EQ(res1[i], res2[i]); } } } }
https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/dense_array/ops/dense_group_ops.h
https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/dense_array/ops/dense_group_ops_test.cc
1ca990dbeca224035efdabffecc7f3738df6b52c
1a4c8fab-2f1c-4fda-8ce2-fa1ddfe8ac15
cpp
tensorflow/tensorflow
tf_saved_model
tensorflow/compiler/mlir/tensorflow/ir/tf_saved_model.cc
tensorflow/compiler/mlir/tensorflow/ir/tf_saved_model_test.cc
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_saved_model.h" #include <algorithm> #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/DenseSet.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/StringRef.h" #include "llvm/ADT/Twine.h" #include "llvm/Support/Casting.h" #include "llvm/Support/raw_ostream.h" #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/IR/Attributes.h" #include "mlir/IR/Builders.h" #include "mlir/IR/BuiltinOps.h" #include "mlir/IR/BuiltinTypes.h" #include "mlir/IR/OpImplementation.h" #include "mlir/IR/PatternMatch.h" #include "mlir/IR/SymbolTable.h" #include "mlir/IR/TypeUtilities.h" #include "mlir/IR/Visitors.h" #include "mlir/Support/LLVM.h" #include "mlir/Support/LogicalResult.h" #include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h" #include "tensorflow/compiler/mlir/tensorflow/ir/tf_types.h" namespace mlir { namespace tf_saved_model { static bool IsStrArrayAttr(Attribute attr) { auto array = mlir::dyn_cast<ArrayAttr>(attr); if (!array) return false; return llvm::all_of( array, [](Attribute attr) { return mlir::isa<StringAttr>(attr); }); } LogicalResult VerifyTensorTypesCompatible(Type t1, Type t2) { if (!mlir::isa<TensorType>(t1) || !mlir::isa<TensorType>(t2)) { return failure(); } return verifyCompatibleShape(mlir::cast<TensorType>(t1), mlir::cast<TensorType>(t2)); } LogicalResult GlobalTensorOp::verify() { GlobalTensorOp global_tensor = *this; if (global_tensor.getValue()) { if (failed(VerifyTensorTypesCompatible( global_tensor.getType(), global_tensor.getValue()->getType()))) { return global_tensor.emitError() << "'type' and 'value' attributes should " "have compatible tensor types"; } } if (!global_tensor.getIsMutable()) { if (!mlir::cast<TensorType>(global_tensor.getType()).hasStaticShape()) { return global_tensor.emitError() << "'type' attribute for immutable 'tf_saved_model.global_tensor' " "should have a static shape"; } } return success(); } LogicalResult SessionInitializerOp::verify() { SessionInitializerOp session_initializer = *this; mlir::SymbolTable symbol_table( session_initializer->getParentOfType<ModuleOp>()); for (auto sym_ref : session_initializer.getInitializers()) { auto init_func_op = symbol_table.lookup<mlir::func::FuncOp>( mlir::cast<FlatSymbolRefAttr>(sym_ref).getValue()); if (!init_func_op) return session_initializer.emitOpError() << "the initializer function does not exist"; if (!init_func_op.getFunctionType().getResults().empty()) return session_initializer.emitOpError() << "the initializer function should have no output"; auto exported_names = GetExportedNames(init_func_op); if (exported_names.empty()) return session_initializer.emitOpError() << "the initializer function should be exported"; if (exported_names.size() != 1) return session_initializer.emitOpError() << "the initializer function should have only one exported names"; } return success(); } } } #define GET_OP_CLASSES #include "tensorflow/compiler/mlir/tensorflow/ir/tf_saved_model.cc.inc" namespace mlir { namespace tf_saved_model { TensorFlowSavedModelDialect::TensorFlowSavedModelDialect(MLIRContext *context) : Dialect("tf_saved_model", context, TypeID::get<TensorFlowSavedModelDialect>()) { context->loadDialect<TF::TensorFlowDialect>(); addOperations< #define GET_OP_LIST #include "tensorflow/compiler/mlir/tensorflow/ir/tf_saved_model.cc.inc" >(); } static LogicalResult VerifyIndexPath(Operation *op, NamedAttribute named_attr) { auto attr = mlir::dyn_cast<ArrayAttr>(named_attr.getValue()); if (!attr) { return op->emitError() << "'" << kTfSavedModelIndexPathAttr << "' attribute should be an ArrayAttr"; } for (auto element : attr) { if (mlir::isa<StringAttr>(element)) { continue; } if (auto integer = mlir::dyn_cast<IntegerAttr>(element)) { if (integer.getValue().getBitWidth() == 64) { continue; } } return op->emitError() << "'" << kTfSavedModelIndexPathAttr << "' elements should be strings or 64-bit integers"; } return mlir::success(); } Type GetBoundInputArgTypeFor(mlir::Operation *op) { if (auto global_tensor = llvm::dyn_cast<GlobalTensorOp>(op)) { auto type = mlir::cast<TensorType>(global_tensor.getType()); return RankedTensorType::get( {}, TF::ResourceType::get({type}, type.getContext())); } if (auto asset = llvm::dyn_cast<AssetOp>(op)) { return RankedTensorType::get({}, TF::StringType::get(asset.getContext())); } op->emitError() << "unknown symbol operation"; return {}; } static LogicalResult VerifyBoundInputArgType(Operation *op_for_diagnostics, Type arg_type, mlir::Operation *symbol_op) { auto expected_type = GetBoundInputArgTypeFor(symbol_op); if (!expected_type) return failure(); if (arg_type != expected_type) { return op_for_diagnostics->emitError() << "bound input with type " << arg_type << " expected to have type " << expected_type; } return success(); } LogicalResult TensorFlowSavedModelDialect::verifyRegionArgAttribute( Operation *op, unsigned region_index, unsigned arg_index, NamedAttribute named_attr) { if (named_attr.getName() == "tf_saved_model.bound_input") { if (!mlir::isa<FlatSymbolRefAttr>(named_attr.getValue())) { return op->emitError() << "'tf_saved_model.bound_input' attribute should " "be a FlatSymbolRefAttr"; } auto symbol_name = mlir::cast<FlatSymbolRefAttr>(named_attr.getValue()).getValue(); auto module = op->getParentOfType<ModuleOp>(); mlir::Operation *symbol_op = module.lookupSymbol(symbol_name); if (!symbol_op) { return op->emitError() << "'tf_saved_model.bound_input' attribute must " "reference a valid symbol, got invalid symbol '" << symbol_name << "'"; } auto arg_type = cast<func::FuncOp>(op).getArgument(arg_index).getType(); return VerifyBoundInputArgType(op, arg_type, symbol_op); } if (named_attr.getName() == kTfSavedModelIndexPathAttr) { return VerifyIndexPath(op, named_attr); } return op->emitError() << "unknown tf_saved_model dialect arg attribute '" << named_attr.getName().getValue() << "'"; } LogicalResult TensorFlowSavedModelDialect::verifyRegionResultAttribute( Operation *op, unsigned region_index, unsigned result_index, NamedAttribute named_attr) { if (named_attr.getName() == kTfSavedModelIndexPathAttr) { return VerifyIndexPath(op, named_attr); } return op->emitError() << "unknown tf_saved_model dialect result attribute '" << named_attr.getName().getValue() << "'"; } LogicalResult VerifySessionInitOp(SessionInitializerOp session_init_op, SymbolTable &symbol_table) { llvm::SmallDenseSet<StringAttr> init_types{}; for (auto init_sym : session_init_op.getInitializers().getAsValueRange<FlatSymbolRefAttr>()) { auto init_func = symbol_table.lookup<func::FuncOp>(init_sym); if (!init_func) continue; auto init_type = init_func->getAttrOfType<StringAttr>(kTfSavedModelInitializerTypeAttr); if (!init_type) continue; if (init_types.contains(init_type)) { return init_func->emitError() << "Attribute tf_saved_model.initializer_type should not have " "duplicate values. Found duplicate: " << init_type; } init_types.insert(init_type); } return success(); } static bool HasAnyTfSavedModelArgAttr(func::FuncOp func) { for (int i = 0, e = func.getNumArguments(); i < e; i++) { if (func.getArgAttr(i, kTfSavedModelIndexPathAttr) || func.getArgAttr(i, "tf_saved_model.bound_input")) { return true; } } for (int i = 0, e = func.getNumResults(); i < e; i++) { if (func.getResultAttr(i, kTfSavedModelIndexPathAttr) || func.getResultAttr(i, "tf_saved_model.bound_input")) { return true; } } return false; } static LogicalResult VerifySavedModelModule( ModuleOp module, TensorFlowSavedModelDialect *dialect) { auto exported_names_ident = StringAttr::get(dialect->getContext(), kTfSavedModelExportedNamesAttr); DenseMap<StringRef, Operation *> exported_name_to_op; for (auto &op : module) { auto attr = op.getAttr(exported_names_ident); if (!attr) continue; if (failed(dialect->verifyOperationAttribute( &op, {exported_names_ident, attr}))) { return failure(); } for (auto str : mlir::cast<ArrayAttr>(attr)) { auto exported_name = mlir::cast<StringAttr>(str).getValue(); auto p = exported_name_to_op.insert({exported_name, &op}); if (!p.second) { return op.emitError() .append("duplicate exported name '", exported_name, "'") .attachNote(p.first->getSecond()->getLoc()) .append("previously seen here"); } } } for (auto func : module.getOps<func::FuncOp>()) { const bool is_exported = IsExported(func); if (is_exported && !func.isPublic()) { return func.emitError() << "exported function @" << func.getName() << " should be public"; } if (!is_exported && func.isPublic()) { return func.emitError() << "non-exported function @" << func.getName() << " should be private"; } if (!is_exported && HasAnyTfSavedModelArgAttr(func)) { return func.emitError() << "can only apply 'tf_saved_model' argument " "attributes to exported functions"; } } SymbolTable symbol_table(module); auto session_initializers = module.getOps<SessionInitializerOp>(); if (!session_initializers.empty()) { if (!llvm::hasSingleElement(session_initializers)) { return (*++session_initializers.begin()).emitError() << "there must be no more than one session_initializer op"; } if (failed( VerifySessionInitOp(*session_initializers.begin(), symbol_table))) { return failure(); } } auto is_init = [&session_initializers](mlir::func::FuncOp func) { if (session_initializers.empty()) return false; auto init_syms = (*session_initializers.begin()).getInitializers(); return std::any_of( init_syms.begin(), init_syms.end(), [&](Attribute sym_ref) { return mlir::cast<FlatSymbolRefAttr>(sym_ref).getValue() == func.getName(); }); }; auto symbol_uses = SymbolTable::getSymbolUses(&module.getBodyRegion()); if (!symbol_uses.has_value()) { return module.emitError() << "modules with 'tf_saved_model.semantics' must " "have analyzable symbol uses"; } for (auto symbol_use : *symbol_uses) { auto func = symbol_table.lookupNearestSymbolFrom<func::FuncOp>( symbol_use.getUser(), symbol_use.getSymbolRef()); if (func && IsExported(func)) { if (is_init(func) && llvm::isa<SessionInitializerOp>(symbol_use.getUser())) { if (!func->getAttr(kTfSavedModelInitializerTypeAttr)) { LOG(WARNING) << "func op in session_initializer op's initializers attribute " << "should have tf_saved_model.initializer_type attribute."; } continue; } return symbol_use.getUser() ->emitError("exported function cannot be internally referenced") .attachNote(func.getLoc()) .append("references this exported function"); } } return success(); } LogicalResult VerifyExportedFunc(func::FuncOp func) { bool reached_bound_inputs = false; auto module = func->getParentOfType<ModuleOp>(); for (int i = 0, e = func.getNumArguments(); i < e; i++) { if (func.getArgAttr(i, "tf_saved_model.bound_input")) { reached_bound_inputs = true; continue; } if (func.getArgAttr(i, kTfSavedModelIndexPathAttr)) { if (reached_bound_inputs) { return func.emitError() << "all '" << kTfSavedModelIndexPathAttr << "' arg attributes should precede all " "'tf_saved_model.bound_input' arg attributes"; } continue; } if (func.getArgAttr(i, "tf.resource_name")) { if (module->getAttr("tf_saved_model.under_construction")) continue; return func.emitError() << "'tf.resource_name' attribute is not allowed " "unless it is being under construction"; } return func.emitError() << "all arguments should have '" << kTfSavedModelIndexPathAttr << "', 'tf_saved_model.bound_input' or 'tf.resource_name' " "attributes"; } llvm::SmallDenseSet<StringRef, 8> unique_bound_inputs; for (int i = 0, e = func.getNumArguments(); i < e; i++) { if (auto attr = func.getArgAttrOfType<FlatSymbolRefAttr>( i, "tf_saved_model.bound_input")) { if (!unique_bound_inputs.insert(attr.getValue()).second) { if (module->getAttr("tf_saved_model.under_construction")) continue; return func.emitError() << "duplicate 'tf_saved_model.bound_input' binding"; } } } for (int i = 0, e = func.getNumResults(); i < e; i++) { if (!func.getResultAttr(i, kTfSavedModelIndexPathAttr)) { return func.emitError() << "all results should have '" << kTfSavedModelIndexPathAttr << "' attributes"; } } return success(); } bool IsValidInitializerType(StringRef initializer_type) { return initializer_type == kTfSavedModelInitializerRestoreType || initializer_type == kTfSavedModelInitializerInitType; } LogicalResult VerifyInitializerTypeAttr(Operation *op, const NamedAttribute named_attr) { if (!isa<func::FuncOp>(op)) { return op->emitError() << "Attribute tf_saved_model.initializer_type " << "should be on a func::FuncOp."; } auto initializer_type_attr_value = mlir::dyn_cast_or_null<StringAttr>(named_attr.getValue()); if (!initializer_type_attr_value) { return op->emitError() << "Attribute tf_saved_model.initializer_type " << "should be a StringAttr."; } if (!IsValidInitializerType(initializer_type_attr_value)) { return op->emitError() << "tf_saved_model.initializer_type should be one " "of 'restore_op' or 'init_op'. Got: " << initializer_type_attr_value.str(); } return success(); } LogicalResult TensorFlowSavedModelDialect::verifyOperationAttribute( Operation *op, NamedAttribute named_attr) { if (named_attr.getName() == kTfSavedModelExportedNamesAttr) { if (!isa<func::FuncOp, GlobalTensorOp>(op)) { return op->emitError() << "'" << kTfSavedModelExportedNamesAttr << "' must be on a 'func' or 'tf_saved_model.global_tensor' op"; } if (!IsStrArrayAttr(named_attr.getValue())) { return op->emitError() << "'" << kTfSavedModelExportedNamesAttr << "' must be an array of strings"; } if (!op->getParentOp()->getAttr("tf_saved_model.semantics")) { return op->emitError() << "'" << kTfSavedModelExportedNamesAttr << "' must be on an op whose immediate parent has " "attribute 'tf_saved_model.semantics'"; } if (auto func = dyn_cast<func::FuncOp>(op)) { if (failed(VerifyExportedFunc(func))) { return failure(); } } return success(); } if (named_attr.getName() == "tf_saved_model.semantics") { auto module = dyn_cast<ModuleOp>(op); if (!module) { return op->emitError() << "'tf_saved_model.semantics' must " "be on a module op"; } return VerifySavedModelModule(module, this); } if (named_attr.getName() == "tf_saved_model.under_construction") { return success(); } if (named_attr.getName() == kTfSavedModelInitializerTypeAttr) { return VerifyInitializerTypeAttr(op, named_attr); } return op->emitError() << "unknown tf_saved_model dialect attribute '" << named_attr.getName().getValue() << "'"; } SmallVector<StringRef, 2> GetExportedNames(Operation *op) { SmallVector<StringRef, 2> ret; auto exported_names = op->getAttrOfType<ArrayAttr>(kTfSavedModelExportedNamesAttr); if (exported_names) { for (auto name : exported_names) { ret.push_back(mlir::cast<StringAttr>(name).getValue()); } } return ret; } bool IsExported(Operation *op) { auto exported_names = op->getAttrOfType<ArrayAttr>(kTfSavedModelExportedNamesAttr); return exported_names && !exported_names.empty(); } bool HasTfSavedModelSemantics(ModuleOp module) { return module->getAttr("tf_saved_model.semantics") != nullptr; } Operation *LookupBoundInput(func::FuncOp func, int arg_index, const SymbolTable &symbol_table) { auto attr = func.getArgAttrOfType<FlatSymbolRefAttr>( arg_index, "tf_saved_model.bound_input"); if (!attr) return nullptr; return symbol_table.lookup(attr.getValue()); } SessionInitializerOp GetSessionInitializerOp(mlir::ModuleOp op) { auto initializers = op.getOps<SessionInitializerOp>(); if (initializers.empty()) return {}; return *initializers.begin(); } class OptimizeSessionInitializerPattern : public OpRewritePattern<SessionInitializerOp> { public: using OpRewritePattern::OpRewritePattern; LogicalResult matchAndRewrite(SessionInitializerOp op, PatternRewriter &rewriter) const override { SymbolTable symbol_table(op->getParentOfType<ModuleOp>()); SmallVector<func::FuncOp, 2> to_remove; SmallVector<mlir::Attribute, 2> to_keep; for (auto sym_ref : op.getInitializers()) { auto init_func_op = symbol_table.lookup<mlir::func::FuncOp>( mlir::cast<FlatSymbolRefAttr>(sym_ref).getValue()); auto &operations = init_func_op.front().getOperations(); if ((operations.size() == 1 && operations.front().hasTrait<OpTrait::IsTerminator>()) || (operations.size() == 2 && dyn_cast<mlir::TF::NoOp>(operations.front()) && operations.back().hasTrait<OpTrait::IsTerminator>())) { to_remove.push_back(init_func_op); } else { to_keep.push_back(sym_ref); } } for (auto func_op : to_remove) rewriter.eraseOp(func_op); if (to_keep.empty()) rewriter.eraseOp(op); else op->setAttr("initializers", rewriter.getArrayAttr(to_keep)); return success(); } }; void SessionInitializerOp::getCanonicalizationPatterns( RewritePatternSet &results, MLIRContext *context) { results.add<OptimizeSessionInitializerPattern>(context); } SmallVector<StringRef, 2> GetSessionInitializerExportedName(ModuleOp op) { auto session_initializer_op = GetSessionInitializerOp(op); if (!session_initializer_op) return {}; SymbolTable symbol_table(op); SmallVector<StringRef, 2> results; for (auto sym_ref : session_initializer_op.getInitializers()) { auto init_func_op = symbol_table.lookup<mlir::func::FuncOp>( mlir::cast<FlatSymbolRefAttr>(sym_ref).getValue()); auto exported_names = GetExportedNames(init_func_op); assert(exported_names.size() == 1); results.push_back(exported_names[0]); } return results; } SmallVector<func::FuncOp, 2> GetInitializerFunctions(ModuleOp module_op) { SessionInitializerOp session_initializer_op = GetSessionInitializerOp(module_op); if (!session_initializer_op) return {}; SymbolTable symbol_table(module_op); SmallVector<func::FuncOp, 2> init_func_ops; for (auto init_func_sym : session_initializer_op.getInitializers() .getAsValueRange<FlatSymbolRefAttr>()) { auto init_func_op = symbol_table.lookup<func::FuncOp>(init_func_sym); init_func_ops.push_back(init_func_op); } return init_func_ops; } func::FuncOp GetInitializerFunction(ModuleOp module_op, const StringRef initializer_type) { SmallVector<func::FuncOp, 2> init_func_ops = GetInitializerFunctions(module_op); auto init_func_itr = absl::c_find_if( init_func_ops, [initializer_type](const func::FuncOp init_func_op) { const auto init_type_attr = init_func_op->getAttrOfType<StringAttr>( kTfSavedModelInitializerTypeAttr); return init_type_attr && init_type_attr == initializer_type; }); return init_func_itr == init_func_ops.end() ? nullptr : *init_func_itr; } bool IsRestoreGraph(ModuleOp module) { return module .walk([](mlir::Operation *op) { if (llvm::isa<mlir::TF::RestoreV2Op>(op)) { return mlir::WalkResult::interrupt(); } return mlir::WalkResult::advance(); }) .wasInterrupted(); } } }
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_saved_model.h" #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/IR/Block.h" #include "mlir/IR/MLIRContext.h" #include "mlir/Parser/Parser.h" #include "mlir/Support/LLVM.h" #include "mlir/Support/LogicalResult.h" #include "tensorflow/core/platform/test.h" namespace mlir { namespace tf_saved_model { namespace { using ::testing::Eq; using ::testing::IsEmpty; using ::testing::IsNull; using ::testing::NotNull; using ::testing::SizeIs; class TfSavedModelTest : public ::testing::Test { protected: TfSavedModelTest() : ctx_() { ctx_.loadDialect<TensorFlowSavedModelDialect, func::FuncDialect>(); } MLIRContext ctx_; }; ModuleOp ParseModuleOp(const StringRef module_op_str, Block& block, MLIRContext& ctx) { const LogicalResult parse_result = parseSourceString(module_op_str, &block, ParserConfig(&ctx)); EXPECT_TRUE(succeeded(parse_result)); return cast<ModuleOp>(block.front()); } TEST_F(TfSavedModelTest, GetInitializerFunctionReturnsNullWhenNoSessionInitializerOp) { constexpr StringRef kModuleOpStr = R"mlir(module attributes {tf_saved_model.semantics} {})mlir"; Block block; ModuleOp module_op = ParseModuleOp(kModuleOpStr, block, ctx_); func::FuncOp init_func_op = GetInitializerFunction( module_op, kTfSavedModelInitializerInitType); EXPECT_THAT(init_func_op, IsNull()); } TEST_F(TfSavedModelTest, GetInitializerFunctionReturnsNullWhenInitializersEmpty) { constexpr StringRef kModuleOpStr = R"mlir( module attributes {tf_saved_model.semantics} { "tf_saved_model.session_initializer"() {initializers = []} : () -> () } )mlir"; Block block; ModuleOp module_op = ParseModuleOp(kModuleOpStr, block, ctx_); func::FuncOp init_func_op = GetInitializerFunction( module_op, kTfSavedModelInitializerInitType); EXPECT_THAT(init_func_op, IsNull()); } TEST_F(TfSavedModelTest, GetInitializerFunctionReturnsFuncOpMatchingInitializerType) { constexpr StringRef kModuleOpStr = R"mlir( module attributes {tf_saved_model.semantics} { "tf_saved_model.session_initializer"() {initializers = [@init_func]} : () -> () func.func @init_func() attributes {tf_saved_model.exported_names = ["init_func"], tf_saved_model.initializer_type = "init_op"} { func.return } } )mlir"; Block block; ModuleOp module_op = ParseModuleOp(kModuleOpStr, block, ctx_); func::FuncOp init_func_op = GetInitializerFunction( module_op, kTfSavedModelInitializerInitType); EXPECT_THAT(init_func_op, NotNull()); EXPECT_THAT(init_func_op.getSymName(), "init_func"); EXPECT_THAT( init_func_op->getAttrOfType<StringAttr>(kTfSavedModelInitializerTypeAttr), kTfSavedModelInitializerInitType); } TEST_F(TfSavedModelTest, GetInitializerFunctionNoMatchingInitializerType) { constexpr StringRef kModuleOpStr = R"mlir( module attributes {tf_saved_model.semantics} { "tf_saved_model.session_initializer"() {initializers = [@init_func]} : () -> () func.func @init_func() attributes {tf_saved_model.exported_names = ["init_func"], tf_saved_model.initializer_type = "restore_op"} { func.return } } )mlir"; Block block; ModuleOp module_op = ParseModuleOp(kModuleOpStr, block, ctx_); func::FuncOp init_func_op = GetInitializerFunction( module_op, kTfSavedModelInitializerInitType); EXPECT_THAT(init_func_op, IsNull()); } TEST_F(TfSavedModelTest, GetInitializerFunctionsEmptyWhenNoInitFunctions) { constexpr StringRef kModuleOpStr = R"mlir( module attributes {tf_saved_model.semantics} { "tf_saved_model.session_initializer"() {initializers = []} : () -> () } )mlir"; Block block; ModuleOp module_op = ParseModuleOp(kModuleOpStr, block, ctx_); SmallVector<func::FuncOp, 2> init_func_ops = GetInitializerFunctions(module_op); EXPECT_THAT(init_func_ops, IsEmpty()); } TEST_F(TfSavedModelTest, GetInitializerFunctionsEmptyWhenNoSessionInitializerOp) { constexpr StringRef kModuleOpStr = R"mlir(module attributes {tf_saved_model.semantics} {})mlir"; Block block; ModuleOp module_op = ParseModuleOp(kModuleOpStr, block, ctx_); SmallVector<func::FuncOp, 2> init_func_ops = GetInitializerFunctions(module_op); EXPECT_THAT(init_func_ops, IsEmpty()); } TEST_F(TfSavedModelTest, GetInitializerFunctionsReturnsMultipleFuncOps) { constexpr StringRef kModuleOpStr = R"mlir( module attributes {tf_saved_model.semantics} { "tf_saved_model.session_initializer"() {initializers = [@init_func1, @init_func2]} : () -> () func.func @init_func1() attributes {tf_saved_model.exported_names = ["init_func1"], tf_saved_model.initializer_type = "init_op"} { func.return } func.func @init_func2() attributes {tf_saved_model.exported_names = ["init_func2"], tf_saved_model.initializer_type = "restore_op"} { func.return } } )mlir"; Block block; ModuleOp module_op = ParseModuleOp(kModuleOpStr, block, ctx_); SmallVector<func::FuncOp, 2> init_func_ops = GetInitializerFunctions(module_op); EXPECT_THAT(init_func_ops, SizeIs(2)); EXPECT_THAT(init_func_ops[0].getSymName(), Eq("init_func1")); EXPECT_THAT(init_func_ops[1].getSymName(), Eq("init_func2")); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tensorflow/ir/tf_saved_model.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tensorflow/ir/tf_saved_model_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
330e0a6b-f0a5-44be-8255-1dab71a09c94
cpp
tensorflow/tensorflow
events_writer
tensorflow/core/util/events_writer.cc
tensorflow/core/util/events_writer_test.cc
#include "tensorflow/core/util/events_writer.h" #include <stddef.h> #include <memory> #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/lib/core/status.h" #include "tensorflow/core/lib/io/path.h" #include "tensorflow/core/lib/strings/strcat.h" #include "tensorflow/core/lib/strings/stringprintf.h" #include "tensorflow/core/platform/env.h" #include "tensorflow/core/platform/host_info.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/types.h" #include "tensorflow/core/util/event.pb.h" namespace tensorflow { EventsWriter::EventsWriter(const string& file_prefix) : env_(Env::Default()), file_prefix_(file_prefix), num_outstanding_events_(0) {} EventsWriter::~EventsWriter() { Close().IgnoreError(); } Status EventsWriter::Init() { return InitWithSuffix(""); } Status EventsWriter::InitWithSuffix(const string& suffix) { file_suffix_ = suffix; return InitIfNeeded(); } Status EventsWriter::InitIfNeeded() { if (recordio_writer_ != nullptr) { CHECK(!filename_.empty()); if (!FileStillExists().ok()) { if (num_outstanding_events_ > 0) { LOG(WARNING) << "Re-initialization, attempting to open a new file, " << num_outstanding_events_ << " events will be lost."; } } else { return absl::OkStatus(); } } int64_t time_in_seconds = env_->NowMicros() / 1000000; filename_ = strings::Printf("%s.out.tfevents.%010lld.%s%s", file_prefix_.c_str(), static_cast<long long>(time_in_seconds), port::Hostname().c_str(), file_suffix_.c_str()); recordio_writer_.reset(); TF_RETURN_WITH_CONTEXT_IF_ERROR( env_->NewWritableFile(filename_, &recordio_file_), "Creating writable file ", filename_); recordio_writer_ = std::make_unique<io::RecordWriter>(recordio_file_.get()); if (recordio_writer_ == nullptr) { return errors::Unknown("Could not create record writer"); } num_outstanding_events_ = 0; VLOG(1) << "Successfully opened events file: " << filename_; { Event event; event.set_wall_time(time_in_seconds); event.set_file_version(strings::StrCat(kVersionPrefix, kCurrentVersion)); SourceMetadata* source_metadata = event.mutable_source_metadata(); source_metadata->set_writer(kWriterSourceMetadata); WriteEvent(event); TF_RETURN_WITH_CONTEXT_IF_ERROR(Flush(), "Flushing first event."); } return absl::OkStatus(); } string EventsWriter::FileName() { if (filename_.empty()) { InitIfNeeded().IgnoreError(); } return filename_; } void EventsWriter::WriteSerializedEvent(StringPiece event_str) { if (recordio_writer_ == nullptr) { if (!InitIfNeeded().ok()) { LOG(ERROR) << "Write failed because file could not be opened."; return; } } num_outstanding_events_++; recordio_writer_->WriteRecord(event_str).IgnoreError(); } void EventsWriter::WriteEvent(const Event& event) { string record; event.AppendToString(&record); WriteSerializedEvent(record); } Status EventsWriter::Flush() { if (num_outstanding_events_ == 0) return absl::OkStatus(); CHECK(recordio_file_ != nullptr) << "Unexpected NULL file"; TF_RETURN_WITH_CONTEXT_IF_ERROR(recordio_writer_->Flush(), "Failed to flush ", num_outstanding_events_, " events to ", filename_); TF_RETURN_WITH_CONTEXT_IF_ERROR(recordio_file_->Sync(), "Failed to sync ", num_outstanding_events_, " events to ", filename_); VLOG(1) << "Wrote " << num_outstanding_events_ << " events to disk."; num_outstanding_events_ = 0; return absl::OkStatus(); } Status EventsWriter::Close() { Status status = Flush(); if (recordio_file_ != nullptr) { Status close_status = recordio_file_->Close(); if (!close_status.ok()) { status = close_status; } recordio_writer_.reset(nullptr); recordio_file_.reset(nullptr); } num_outstanding_events_ = 0; return status; } Status EventsWriter::FileStillExists() { if (env_->FileExists(filename_).ok()) { return absl::OkStatus(); } return errors::Unknown("The events file ", filename_, " has disappeared."); } }
#include "tensorflow/core/util/events_writer.h" #include <math.h> #include <memory> #include "tensorflow/core/framework/summary.pb.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/lib/core/status.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/lib/io/path.h" #include "tensorflow/core/lib/io/record_reader.h" #include "tensorflow/core/lib/strings/strcat.h" #include "tensorflow/core/platform/env.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/protobuf.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/util/event.pb.h" namespace tensorflow { namespace { Env* env() { return Env::Default(); } void WriteSimpleValue(EventsWriter* writer, double wall_time, int64_t step, const string& tag, float simple_value) { Event event; event.set_wall_time(wall_time); event.set_step(step); Summary::Value* summ_val = event.mutable_summary()->add_value(); summ_val->set_tag(tag); summ_val->set_simple_value(simple_value); writer->WriteEvent(event); } void WriteFile(EventsWriter* writer) { WriteSimpleValue(writer, 1234, 34, "foo", 3.14159); WriteSimpleValue(writer, 2345, 35, "bar", -42); } static bool ReadEventProto(io::RecordReader* reader, uint64* offset, Event* proto) { tstring record; Status s = reader->ReadRecord(offset, &record); if (!s.ok()) { return false; } return ParseProtoUnlimited(proto, record); } void VerifyFile(const string& filename) { TF_CHECK_OK(env()->FileExists(filename)); std::unique_ptr<RandomAccessFile> event_file; TF_CHECK_OK(env()->NewRandomAccessFile(filename, &event_file)); io::RecordReader* reader = new io::RecordReader(event_file.get()); uint64 offset = 0; Event actual; CHECK(ReadEventProto(reader, &offset, &actual)); VLOG(1) << actual.ShortDebugString(); double current_time = env()->NowMicros() / 1000000.0; EXPECT_LT(fabs(actual.wall_time() - current_time), 5); EXPECT_EQ(actual.file_version(), strings::StrCat(EventsWriter::kVersionPrefix, EventsWriter::kCurrentVersion)); EXPECT_EQ(actual.source_metadata().writer(), EventsWriter::kWriterSourceMetadata); Event expected; CHECK(ReadEventProto(reader, &offset, &actual)); VLOG(1) << actual.ShortDebugString(); ASSERT_TRUE(protobuf::TextFormat::ParseFromString( "wall_time: 1234 step: 34 " "summary { value { tag: 'foo' simple_value: 3.14159 } }", &expected)); CHECK(ReadEventProto(reader, &offset, &actual)); VLOG(1) << actual.ShortDebugString(); ASSERT_TRUE(protobuf::TextFormat::ParseFromString( "wall_time: 2345 step: 35 " "summary { value { tag: 'bar' simple_value: -42 } }", &expected)); TF_CHECK_OK(env()->DeleteFile(filename)); delete reader; } string GetDirName(const string& suffix) { return io::JoinPath(testing::TmpDir(), suffix); } TEST(EventWriter, WriteFlush) { string file_prefix = GetDirName("/writeflush_test"); EventsWriter writer(file_prefix); WriteFile(&writer); TF_EXPECT_OK(writer.Flush()); string filename = writer.FileName(); VerifyFile(filename); } TEST(EventWriter, WriteClose) { string file_prefix = GetDirName("/writeclose_test"); EventsWriter writer(file_prefix); WriteFile(&writer); TF_EXPECT_OK(writer.Close()); string filename = writer.FileName(); VerifyFile(filename); } TEST(EventWriter, WriteDelete) { string file_prefix = GetDirName("/writedelete_test"); EventsWriter* writer = new EventsWriter(file_prefix); WriteFile(writer); string filename = writer->FileName(); delete writer; VerifyFile(filename); } TEST(EventWriter, FailFlush) { string file_prefix = GetDirName("/failflush_test"); EventsWriter writer(file_prefix); string filename = writer.FileName(); WriteFile(&writer); TF_EXPECT_OK(env()->FileExists(filename)); TF_ASSERT_OK(env()->DeleteFile(filename)); EXPECT_TRUE(writer.Flush().ok()); } TEST(EventWriter, FailClose) { string file_prefix = GetDirName("/failclose_test"); EventsWriter writer(file_prefix); string filename = writer.FileName(); WriteFile(&writer); TF_EXPECT_OK(env()->FileExists(filename)); TF_ASSERT_OK(env()->DeleteFile(filename)); EXPECT_TRUE(writer.Close().ok()); } TEST(EventWriter, InitWriteClose) { string file_prefix = GetDirName("/initwriteclose_test"); EventsWriter writer(file_prefix); TF_EXPECT_OK(writer.Init()); string filename0 = writer.FileName(); TF_EXPECT_OK(env()->FileExists(filename0)); WriteFile(&writer); TF_EXPECT_OK(writer.Close()); string filename1 = writer.FileName(); EXPECT_EQ(filename0, filename1); VerifyFile(filename1); } TEST(EventWriter, NameWriteClose) { string file_prefix = GetDirName("/namewriteclose_test"); EventsWriter writer(file_prefix); string filename = writer.FileName(); TF_EXPECT_OK(env()->FileExists(filename)); WriteFile(&writer); TF_EXPECT_OK(writer.Close()); VerifyFile(filename); } TEST(EventWriter, NameClose) { string file_prefix = GetDirName("/nameclose_test"); EventsWriter writer(file_prefix); string filename = writer.FileName(); TF_EXPECT_OK(writer.Close()); TF_EXPECT_OK(env()->FileExists(filename)); TF_ASSERT_OK(env()->DeleteFile(filename)); } TEST(EventWriter, FileDeletionBeforeWriting) { string file_prefix = GetDirName("/fdbw_test"); EventsWriter writer(file_prefix); string filename0 = writer.FileName(); TF_EXPECT_OK(env()->FileExists(filename0)); env()->SleepForMicroseconds( 2000000); TF_ASSERT_OK(env()->DeleteFile(filename0)); TF_EXPECT_OK(writer.Init()); WriteFile(&writer); TF_EXPECT_OK(writer.Flush()); string filename1 = writer.FileName(); EXPECT_NE(filename0, filename1); VerifyFile(filename1); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/events_writer.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/events_writer_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
7ebe33fa-b0b5-44fd-81fd-f58aa8e47d6e
cpp
tensorflow/tensorflow
reconcile_fns
tensorflow/lite/core/async/interop/reconcile_fns.cc
tensorflow/lite/core/async/interop/reconcile_fns_test.cc
#include "tensorflow/lite/core/async/interop/reconcile_fns.h" #include <algorithm> #include <cstddef> #include <cstdint> #include <iterator> #include <set> #include "tensorflow/lite/core/async/interop/attribute_map_internal.h" #include "tensorflow/lite/core/async/interop/c/types.h" namespace tflite { namespace interop { namespace { template <typename T> T gcd(T x, T y) { while (y) { auto m = x % y; x = y; y = m; } return x; } template <typename T> T lcm(T x, T y) { return x / gcd(x, y) * y; } void ReconcileAlignment(size_t l, size_t r, AttributeMap::ContainerT* merged) { merged->insert_or_assign(static_cast<size_t>(kTfLiteBufferAttrKeyAlignment), lcm(l, r)); } void ReconcilePadding(size_t l, size_t r, AttributeMap::ContainerT* merged) { merged->insert_or_assign(static_cast<size_t>(kTfLiteBufferAttrKeyPadding), lcm(l, r)); } bool CheckMultiples(size_t l, size_t r) { return l % r == 0; } void ReconcileSize(size_t l, size_t r, AttributeMap::ContainerT* merged) { merged->insert_or_assign(static_cast<size_t>(kTfLiteBufferAttrKeySize), std::max(l, r)); } bool CheckSize(size_t l, size_t r) { return l >= r; } } bool ReconcileGeneralAttributeKeys(TfLiteAttrMapType type, const AttributeMap::ContainerT* lhs, const AttributeMap::ContainerT* rhs, AttributeMap::ContainerT* merged, AttributeMap::ContainerT* conflict) { if (lhs == nullptr || rhs == nullptr || merged == nullptr) return false; bool ret = true; std::set<uint32_t> keys; std::transform(lhs->begin(), lhs->end(), std::inserter(keys, keys.end()), [](auto pair) { return pair.first; }); std::transform(rhs->begin(), rhs->end(), std::inserter(keys, keys.end()), [](auto pair) { return pair.first; }); for (auto k : keys) { const auto l = lhs->find(k); const auto r = rhs->find(k); if (l == lhs->end() || l->second.GetPtr() == nullptr) { merged->insert_or_assign(k, r->second); continue; } if (r == rhs->end() || r->second.GetPtr() == nullptr) { merged->insert_or_assign(k, l->second); continue; } if (type == kTfLiteAttrMapTypeBuffer) { switch (static_cast<TfLiteBufferAttrKey>(k)) { case kTfLiteBufferAttrKeySize: ReconcileSize(*l->second.Get<size_t>(), *r->second.Get<size_t>(), merged); break; case kTfLiteBufferAttrKeyAlignment: ReconcileAlignment(*l->second.Get<size_t>(), *r->second.Get<size_t>(), merged); break; case kTfLiteBufferAttrKeyPadding: ReconcilePadding(*l->second.Get<size_t>(), *r->second.Get<size_t>(), merged); break; default: if (l->second == r->second) { merged->insert_or_assign(k, l->second); } else { ret = false; if (conflict) conflict->insert_or_assign(k, r->second); } } } else { if (l->second == r->second) { merged->insert_or_assign(k, l->second); } else { ret = false; if (conflict) conflict->insert_or_assign(k, r->second); } } } return ret; } bool CheckGeneralAttributeKeysCoverage(TfLiteAttrMapType type, const AttributeMap::ContainerT* lhs, const AttributeMap::ContainerT* rhs, AttributeMap::ContainerT* conflict) { if (lhs == nullptr || rhs == nullptr) return false; bool ret = true; std::set<uint32_t> keys; std::transform(lhs->begin(), lhs->end(), std::inserter(keys, keys.end()), [](auto pair) { return pair.first; }); std::transform(rhs->begin(), rhs->end(), std::inserter(keys, keys.end()), [](auto pair) { return pair.first; }); for (auto k : keys) { bool has_conflict = false; const auto l = lhs->find(k); const auto r = rhs->find(k); if (r == rhs->end() || r->second.GetPtr() == nullptr) { continue; } else if (l == lhs->end() || l->second.GetPtr() == nullptr) { has_conflict = true; } else { if (type == kTfLiteAttrMapTypeBuffer) { switch (static_cast<TfLiteBufferAttrKey>(k)) { case kTfLiteBufferAttrKeySize: has_conflict |= !CheckSize(*l->second.Get<size_t>(), *r->second.Get<size_t>()); break; case kTfLiteBufferAttrKeyAlignment: has_conflict |= !CheckMultiples(*l->second.Get<size_t>(), *r->second.Get<size_t>()); break; case kTfLiteBufferAttrKeyPadding: has_conflict |= !CheckSize(*l->second.Get<size_t>(), *r->second.Get<size_t>()); break; default: if (l->second != r->second) { has_conflict = true; } } } else { if (l->second != r->second) { has_conflict = true; } } } if (has_conflict) { if (conflict != nullptr) conflict->insert_or_assign(k, r->second); ret = false; } } return ret; } } }
#include "tensorflow/lite/core/async/interop/reconcile_fns.h" #include <cstddef> #include <cstdint> #include <cstring> #include <string> #include <tuple> #include <gtest/gtest.h> #include "tensorflow/lite/core/async/interop/attribute_map_internal.h" #include "tensorflow/lite/core/async/interop/c/types.h" namespace tflite::interop { namespace { using ContainerT = AttributeMap::ContainerT; template <typename ValT, typename KeyT> void SetAttr(ContainerT* c, KeyT k, ValT v) { c->insert_or_assign(static_cast<uint32_t>(k), v); } template <typename ValT, typename KeyT> ValT GetAttr(const ContainerT& c, KeyT k) { return *(c.at(static_cast<uint32_t>(k)).Get<ValT>()); } TEST(ReconcileTest, NullCheck) { ContainerT m1, m2; EXPECT_FALSE(ReconcileGeneralAttributeKeys(kTfLiteAttrMapTypeBuffer, &m1, &m2, nullptr, nullptr)); EXPECT_FALSE(ReconcileGeneralAttributeKeys(kTfLiteAttrMapTypeBuffer, nullptr, &m1, &m2, nullptr)); EXPECT_FALSE(ReconcileGeneralAttributeKeys(kTfLiteAttrMapTypeBuffer, &m1, nullptr, &m2, nullptr)); EXPECT_FALSE(CheckGeneralAttributeKeysCoverage(kTfLiteAttrMapTypeBuffer, nullptr, &m1, &m2)); EXPECT_FALSE(CheckGeneralAttributeKeysCoverage(kTfLiteAttrMapTypeBuffer, &m1, nullptr, &m2)); } TEST(ReconcileTest, MissingAttributeTest) { { ContainerT lhs, rhs, merged; SetAttr(&lhs, kTfLiteBufferAttrKeyAlignment, size_t(4)); EXPECT_TRUE(ReconcileGeneralAttributeKeys(kTfLiteAttrMapTypeBuffer, &lhs, &rhs, &merged, nullptr)); EXPECT_EQ(4, GetAttr<size_t>(merged, kTfLiteBufferAttrKeyAlignment)); } { ContainerT lhs, rhs, merged; SetAttr(&rhs, kTfLiteBufferAttrKeyAlignment, size_t(4)); EXPECT_TRUE(ReconcileGeneralAttributeKeys(kTfLiteAttrMapTypeBuffer, &lhs, &rhs, &merged, nullptr)); EXPECT_EQ(4, GetAttr<size_t>(merged, kTfLiteBufferAttrKeyAlignment)); } { ContainerT lhs, rhs, merged; const char value[] = "string"; SetAttr(&rhs, kTfLiteSynchronizationAttrKeyObjectTypeName, value); EXPECT_TRUE(ReconcileGeneralAttributeKeys(kTfLiteAttrMapTypeSync, &lhs, &rhs, &merged, nullptr)); EXPECT_EQ(value, GetAttr<const char*>( merged, kTfLiteSynchronizationAttrKeyObjectTypeName)); } } TEST(CheckCoverageTest, MissingAttributeTest) { { ContainerT lhs, rhs; SetAttr(&lhs, kTfLiteBufferAttrKeyAlignment, size_t(4)); EXPECT_TRUE(CheckGeneralAttributeKeysCoverage(kTfLiteAttrMapTypeBuffer, &lhs, &rhs, nullptr)); } { ContainerT lhs, rhs, merged; SetAttr(&rhs, kTfLiteBufferAttrKeyAlignment, size_t(4)); EXPECT_TRUE(ReconcileGeneralAttributeKeys(kTfLiteAttrMapTypeBuffer, &lhs, &rhs, &merged, nullptr)); EXPECT_FALSE(CheckGeneralAttributeKeysCoverage(kTfLiteAttrMapTypeBuffer, &lhs, &rhs, nullptr)); } } class ReconcileAlignmentTest : public testing::TestWithParam<std::tuple<size_t, size_t, size_t>> {}; TEST_P(ReconcileAlignmentTest, Test) { ContainerT lhs, rhs, merged; SetAttr(&lhs, kTfLiteBufferAttrKeyAlignment, std::get<0>(GetParam())); SetAttr(&rhs, kTfLiteBufferAttrKeyAlignment, std::get<1>(GetParam())); EXPECT_TRUE(ReconcileGeneralAttributeKeys(kTfLiteAttrMapTypeBuffer, &lhs, &rhs, &merged, nullptr)); EXPECT_EQ(std::get<2>(GetParam()), GetAttr<size_t>(merged, kTfLiteBufferAttrKeyAlignment)); } INSTANTIATE_TEST_SUITE_P(ReconcileAlignmentTest, ReconcileAlignmentTest, testing::Values(std::make_tuple(4, 4, 4), std::make_tuple(1, 4, 4), std::make_tuple(8, 4, 8), std::make_tuple(8, 3, 24))); class CheckAlignmentTest : public testing::TestWithParam<std::tuple<size_t, size_t, bool>> {}; TEST_P(CheckAlignmentTest, Test) { ContainerT lhs, rhs, conflict; SetAttr(&lhs, kTfLiteBufferAttrKeyAlignment, std::get<0>(GetParam())); SetAttr(&rhs, kTfLiteBufferAttrKeyAlignment, std::get<1>(GetParam())); EXPECT_EQ(std::get<2>(GetParam()), CheckGeneralAttributeKeysCoverage(kTfLiteAttrMapTypeBuffer, &lhs, &rhs, &conflict)); EXPECT_EQ( !std::get<2>(GetParam()), conflict.count(static_cast<uint32_t>(kTfLiteBufferAttrKeyAlignment))); } INSTANTIATE_TEST_SUITE_P(CheckAlignmentTest, CheckAlignmentTest, testing::Values(std::make_tuple(4, 4, true), std::make_tuple(4, 1, true), std::make_tuple(1, 4, false))); class ReconcilePaddingTest : public testing::TestWithParam<std::tuple<size_t, size_t, size_t>> {}; TEST_P(ReconcilePaddingTest, Test) { ContainerT lhs, rhs, merged; SetAttr(&lhs, kTfLiteBufferAttrKeyPadding, std::get<0>(GetParam())); SetAttr(&rhs, kTfLiteBufferAttrKeyPadding, std::get<1>(GetParam())); EXPECT_TRUE(ReconcileGeneralAttributeKeys(kTfLiteAttrMapTypeBuffer, &lhs, &rhs, &merged, nullptr)); EXPECT_EQ(std::get<2>(GetParam()), GetAttr<size_t>(merged, kTfLiteBufferAttrKeyPadding)); } INSTANTIATE_TEST_SUITE_P(ReconcilePaddingTest, ReconcilePaddingTest, testing::Values(std::make_tuple(4, 4, 4), std::make_tuple(1, 4, 4), std::make_tuple(8, 4, 8), std::make_tuple(8, 3, 24))); class CheckPaddingTest : public testing::TestWithParam<std::tuple<size_t, size_t, bool>> {}; TEST_P(CheckPaddingTest, Test) { ContainerT lhs, rhs, conflict; SetAttr(&lhs, kTfLiteBufferAttrKeyPadding, std::get<0>(GetParam())); SetAttr(&rhs, kTfLiteBufferAttrKeyPadding, std::get<1>(GetParam())); EXPECT_EQ(std::get<2>(GetParam()), CheckGeneralAttributeKeysCoverage(kTfLiteAttrMapTypeBuffer, &lhs, &rhs, &conflict)); EXPECT_EQ(!std::get<2>(GetParam()), conflict.count(static_cast<uint32_t>(kTfLiteBufferAttrKeyPadding))); } INSTANTIATE_TEST_SUITE_P(CheckPaddingTest, CheckPaddingTest, testing::Values(std::make_tuple(4, 4, true), std::make_tuple(4, 1, true), std::make_tuple(1, 4, false))); class ReconcileSizeTest : public testing::TestWithParam<std::tuple<size_t, size_t, size_t>> {}; TEST_P(ReconcileSizeTest, Test) { ContainerT lhs, rhs, merged; SetAttr(&lhs, kTfLiteBufferAttrKeySize, std::get<0>(GetParam())); SetAttr(&rhs, kTfLiteBufferAttrKeySize, std::get<1>(GetParam())); EXPECT_TRUE(ReconcileGeneralAttributeKeys(kTfLiteAttrMapTypeBuffer, &lhs, &rhs, &merged, nullptr)); EXPECT_EQ(std::get<2>(GetParam()), GetAttr<size_t>(merged, kTfLiteBufferAttrKeySize)); } INSTANTIATE_TEST_SUITE_P(ReconcileSizeTest, ReconcileSizeTest, testing::Values(std::make_tuple(4, 4, 4), std::make_tuple(1, 4, 4), std::make_tuple(8, 4, 8), std::make_tuple(8, 3, 8))); class CheckSizeTest : public testing::TestWithParam<std::tuple<size_t, size_t, bool>> {}; TEST_P(CheckSizeTest, Test) { ContainerT lhs, rhs, conflict; SetAttr(&lhs, kTfLiteBufferAttrKeySize, std::get<0>(GetParam())); SetAttr(&rhs, kTfLiteBufferAttrKeySize, std::get<1>(GetParam())); EXPECT_EQ(std::get<2>(GetParam()), CheckGeneralAttributeKeysCoverage(kTfLiteAttrMapTypeBuffer, &lhs, &rhs, &conflict)); EXPECT_EQ(!std::get<2>(GetParam()), conflict.count(static_cast<uint32_t>(kTfLiteBufferAttrKeySize))); } INSTANTIATE_TEST_SUITE_P(CheckSizeTest, CheckSizeTest, testing::Values(std::make_tuple(4, 4, true), std::make_tuple(4, 1, true), std::make_tuple(1, 4, false))); class ReconcileNameTest : public testing::TestWithParam<std::tuple<TfLiteAttrMapType, uint32_t>> {}; TEST_P(ReconcileNameTest, Test) { constexpr char name_string1[] = "string1"; std::string name_string1_1 = "string1"; constexpr char name_string2[] = "string2"; { ContainerT lhs, rhs, merged; SetAttr(&lhs, std::get<1>(GetParam()), name_string1); SetAttr(&rhs, std::get<1>(GetParam()), name_string1_1.c_str()); EXPECT_TRUE(ReconcileGeneralAttributeKeys(std::get<0>(GetParam()), &lhs, &rhs, &merged, nullptr)); EXPECT_EQ(0, strcmp(GetAttr<const char*>(merged, std::get<1>(GetParam())), name_string1)); } { ContainerT lhs, rhs, merged, conflict; SetAttr(&lhs, std::get<1>(GetParam()), name_string1); SetAttr(&rhs, std::get<1>(GetParam()), name_string2); EXPECT_FALSE(ReconcileGeneralAttributeKeys(std::get<0>(GetParam()), &lhs, &rhs, &merged, &conflict)); EXPECT_TRUE(conflict.count(std::get<1>(GetParam()))); } } INSTANTIATE_TEST_SUITE_P( ReconcileNameTest, ReconcileNameTest, testing::Values( std::make_tuple( kTfLiteAttrMapTypeBuffer, static_cast<uint32_t>(kTfLiteBufferAttrKeyResourceTypeName)), std::make_tuple(kTfLiteAttrMapTypeSync, static_cast<uint32_t>( kTfLiteSynchronizationAttrKeyObjectTypeName)))); class CheckNameTest : public testing::TestWithParam<std::tuple<TfLiteAttrMapType, uint32_t>> {}; TEST_P(CheckNameTest, Test) { constexpr char name_string1[] = "string1"; std::string name_string1_1 = "string1"; constexpr char name_string2[] = "string2"; { ContainerT lhs, rhs; SetAttr(&lhs, std::get<1>(GetParam()), name_string1); SetAttr(&rhs, std::get<1>(GetParam()), name_string1_1.c_str()); EXPECT_TRUE(CheckGeneralAttributeKeysCoverage(std::get<0>(GetParam()), &lhs, &rhs, nullptr)); } { ContainerT lhs, rhs, conflict; SetAttr(&lhs, std::get<1>(GetParam()), name_string1); SetAttr(&rhs, std::get<1>(GetParam()), name_string2); EXPECT_FALSE(CheckGeneralAttributeKeysCoverage(std::get<0>(GetParam()), &lhs, &rhs, &conflict)); EXPECT_TRUE(conflict.count(std::get<1>(GetParam()))); } } INSTANTIATE_TEST_SUITE_P( CheckNameTest, CheckNameTest, testing::Values( std::make_tuple( kTfLiteAttrMapTypeBuffer, static_cast<uint32_t>(kTfLiteBufferAttrKeyResourceTypeName)), std::make_tuple(kTfLiteAttrMapTypeSync, static_cast<uint32_t>( kTfLiteSynchronizationAttrKeyObjectTypeName)))); } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/core/async/interop/reconcile_fns.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/core/async/interop/reconcile_fns_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
8e41b562-c8e5-4977-bc45-be1e3cf242f7
cpp
tensorflow/tensorflow
text_line_dataset_op
tensorflow/core/kernels/data/text_line_dataset_op.cc
tensorflow/core/kernels/data/text_line_dataset_op_test.cc
#include "tensorflow/core/kernels/data/text_line_dataset_op.h" #include "tensorflow/core/data/name_utils.h" #include "tensorflow/core/data/utils.h" #include "tensorflow/core/framework/metrics.h" #include "tensorflow/core/framework/partial_tensor_shape.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/lib/io/buffered_inputstream.h" #include "tensorflow/core/lib/io/inputbuffer.h" #include "tensorflow/core/lib/io/random_inputstream.h" #include "tensorflow/core/lib/io/zlib_compression_options.h" #include "tensorflow/core/lib/io/zlib_inputstream.h" namespace tensorflow { namespace data { constexpr const char* const TextLineDatasetOp::kDatasetType; constexpr const char* const TextLineDatasetOp::kFileNames; constexpr const char* const TextLineDatasetOp::kCompressionType; constexpr const char* const TextLineDatasetOp::kBufferSize; constexpr char kZLIB[] = "ZLIB"; constexpr char kGZIP[] = "GZIP"; constexpr char kCurrentFileIndex[] = "current_file_index"; constexpr char kCurrentPos[] = "current_pos"; class TextLineDatasetOp::Dataset : public DatasetBase { public: Dataset(OpKernelContext* ctx, std::vector<string> filenames, const string& compression_type, const io::ZlibCompressionOptions& options) : DatasetBase(DatasetContext(ctx)), filenames_(std::move(filenames)), compression_type_(compression_type), use_compression_(!compression_type.empty()), options_(options) {} std::unique_ptr<IteratorBase> MakeIteratorInternal( const string& prefix) const override { return std::make_unique<Iterator>(Iterator::Params{ this, name_utils::IteratorPrefix(TextLineDatasetOp::kDatasetType, prefix)}); } const DataTypeVector& output_dtypes() const override { static DataTypeVector* dtypes = new DataTypeVector({DT_STRING}); return *dtypes; } const std::vector<PartialTensorShape>& output_shapes() const override { static std::vector<PartialTensorShape>* shapes = new std::vector<PartialTensorShape>({{}}); return *shapes; } string DebugString() const override { return name_utils::DatasetDebugString(kDatasetType); } Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override { return absl::OkStatus(); } Status CheckExternalState() const override { return absl::OkStatus(); } protected: Status AsGraphDefInternal(SerializationContext* ctx, DatasetGraphDefBuilder* b, Node** output) const override { Node* filenames = nullptr; Node* compression_type = nullptr; Node* buffer_size = nullptr; TF_RETURN_IF_ERROR(b->AddVector(filenames_, &filenames)); TF_RETURN_IF_ERROR(b->AddScalar(compression_type_, &compression_type)); TF_RETURN_IF_ERROR(b->AddScalar(options_.input_buffer_size, &buffer_size)); TF_RETURN_IF_ERROR(b->AddDataset( this, {filenames, compression_type, buffer_size}, output)); return absl::OkStatus(); } private: class Iterator : public DatasetIterator<Dataset> { public: explicit Iterator(const Params& params) : DatasetIterator<Dataset>(params) {} bool SymbolicCheckpointCompatible() const override { return true; } Status GetNextInternal(IteratorContext* ctx, std::vector<Tensor>* out_tensors, bool* end_of_sequence) override { mutex_lock l(mu_); do { if (buffered_input_stream_) { Tensor line_contents(tstring{}); tstring& line_contents_str = line_contents.scalar<tstring>()(); Status s = buffered_input_stream_->ReadLine(&line_contents_str); if (s.ok()) { static monitoring::CounterCell* bytes_counter = metrics::GetTFDataBytesReadCounter( name_utils::OpName(TextLineDatasetOp::kDatasetType)); bytes_counter->IncrementBy(line_contents_str.size()); out_tensors->push_back(std::move(line_contents)); *end_of_sequence = false; return absl::OkStatus(); } else if (!errors::IsOutOfRange(s)) { return s; } ResetStreamsLocked(); ++current_file_index_; } if (current_file_index_ == dataset()->filenames_.size()) { *end_of_sequence = true; return absl::OkStatus(); } TF_RETURN_IF_ERROR(SetupStreamsLocked(ctx->env())); } while (true); } protected: std::shared_ptr<model::Node> CreateNode( IteratorContext* ctx, model::Node::Args args) const override { return model::MakeSourceNode(std::move(args)); } Status SaveInternal(SerializationContext* ctx, IteratorStateWriter* writer) override { mutex_lock l(mu_); TF_RETURN_IF_ERROR(writer->WriteScalar(prefix(), kCurrentFileIndex, current_file_index_)); if (buffered_input_stream_) { TF_RETURN_IF_ERROR(writer->WriteScalar(prefix(), kCurrentPos, buffered_input_stream_->Tell())); } return absl::OkStatus(); } Status RestoreInternal(IteratorContext* ctx, IteratorStateReader* reader) override { mutex_lock l(mu_); ResetStreamsLocked(); int64_t current_file_index; TF_RETURN_IF_ERROR( reader->ReadScalar(prefix(), kCurrentFileIndex, &current_file_index)); current_file_index_ = size_t(current_file_index); if (reader->Contains(prefix(), kCurrentPos)) { int64_t current_pos; TF_RETURN_IF_ERROR( reader->ReadScalar(prefix(), kCurrentPos, &current_pos)); TF_RETURN_IF_ERROR(SetupStreamsLocked(ctx->env())); TF_RETURN_IF_ERROR(buffered_input_stream_->Seek(current_pos)); } return absl::OkStatus(); } private: Status SetupStreamsLocked(Env* env) TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) { if (current_file_index_ >= dataset()->filenames_.size()) { return errors::InvalidArgument( "current_file_index_:", current_file_index_, " >= filenames_.size():", dataset()->filenames_.size()); } TF_RETURN_IF_ERROR(env->NewRandomAccessFile( TranslateFileName(dataset()->filenames_[current_file_index_]), &file_)); input_stream_ = std::make_unique<io::RandomAccessInputStream>(file_.get(), false); if (dataset()->use_compression_) { zlib_input_stream_ = std::make_unique<io::ZlibInputStream>( input_stream_.get(), dataset()->options_.input_buffer_size, dataset()->options_.input_buffer_size, dataset()->options_); buffered_input_stream_ = std::make_unique<io::BufferedInputStream>( zlib_input_stream_.get(), dataset()->options_.input_buffer_size, false); } else { buffered_input_stream_ = std::make_unique<io::BufferedInputStream>( input_stream_.get(), dataset()->options_.input_buffer_size, false); } return absl::OkStatus(); } void ResetStreamsLocked() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) { input_stream_.reset(); zlib_input_stream_.reset(); buffered_input_stream_.reset(); file_.reset(); } mutex mu_; std::unique_ptr<io::RandomAccessInputStream> input_stream_ TF_GUARDED_BY(mu_); std::unique_ptr<io::ZlibInputStream> zlib_input_stream_ TF_GUARDED_BY(mu_); std::unique_ptr<io::BufferedInputStream> buffered_input_stream_ TF_GUARDED_BY(mu_); size_t current_file_index_ TF_GUARDED_BY(mu_) = 0; std::unique_ptr<RandomAccessFile> file_ TF_GUARDED_BY(mu_); }; const std::vector<string> filenames_; const tstring compression_type_; const bool use_compression_; const io::ZlibCompressionOptions options_; }; TextLineDatasetOp::TextLineDatasetOp(OpKernelConstruction* ctx) : DatasetOpKernel(ctx) {} void TextLineDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase** output) { const Tensor* filenames_tensor; OP_REQUIRES_OK(ctx, ctx->input(kFileNames, &filenames_tensor)); OP_REQUIRES( ctx, filenames_tensor->dims() <= 1, errors::InvalidArgument("`filenames` must be a scalar or a vector.")); tstring compression_type; OP_REQUIRES_OK(ctx, ParseScalarArgument<tstring>(ctx, kCompressionType, &compression_type)); int64_t buffer_size = -1; OP_REQUIRES_OK(ctx, ParseScalarArgument<int64_t>(ctx, kBufferSize, &buffer_size)); OP_REQUIRES( ctx, buffer_size >= 0, errors::InvalidArgument("`buffer_size` must be >= 0 (0 == default)")); io::ZlibCompressionOptions zlib_compression_options = io::ZlibCompressionOptions::DEFAULT(); if (compression_type == kZLIB) { zlib_compression_options = io::ZlibCompressionOptions::DEFAULT(); } else if (compression_type == kGZIP) { zlib_compression_options = io::ZlibCompressionOptions::GZIP(); } else { OP_REQUIRES(ctx, compression_type.empty(), errors::InvalidArgument("Unsupported compression_type.")); } if (buffer_size != 0) { zlib_compression_options.input_buffer_size = buffer_size; } std::vector<string> filenames; filenames.reserve(filenames_tensor->NumElements()); for (int i = 0; i < filenames_tensor->NumElements(); ++i) { filenames.push_back(filenames_tensor->flat<tstring>()(i)); metrics::RecordTFDataFilename(kDatasetType, filenames[i]); } LogFilenames(filenames); *output = new Dataset(ctx, std::move(filenames), compression_type, zlib_compression_options); } namespace { REGISTER_KERNEL_BUILDER(Name("TextLineDataset").Device(DEVICE_CPU), TextLineDatasetOp); } } }
#include "tensorflow/core/kernels/data/text_line_dataset_op.h" #include "tensorflow/core/data/dataset_test_base.h" namespace tensorflow { namespace data { namespace { constexpr char kNodeName[] = "text_line_dataset"; tstring LocalTempFilename() { std::string path; CHECK(Env::Default()->LocalTempFilename(&path)); return tstring(path); } class TextLineDatasetParams : public DatasetParams { public: TextLineDatasetParams(std::vector<tstring> filenames, CompressionType compression_type, int64_t buffer_size, string node_name) : DatasetParams({DT_STRING}, {PartialTensorShape({})}, std::move(node_name)), filenames_(std::move(filenames)), compression_type_(compression_type), buffer_size_(buffer_size) {} std::vector<Tensor> GetInputTensors() const override { int num_files = filenames_.size(); return { CreateTensor<tstring>(TensorShape({num_files}), filenames_), CreateTensor<tstring>(TensorShape({}), {ToString(compression_type_)}), CreateTensor<int64_t>(TensorShape({}), {buffer_size_})}; } Status GetInputNames(std::vector<string>* input_names) const override { input_names->clear(); *input_names = { TextLineDatasetOp::kFileNames, TextLineDatasetOp::kCompressionType, TextLineDatasetOp::kBufferSize, }; return absl::OkStatus(); } Status GetAttributes(AttributeVector* attr_vector) const override { attr_vector->clear(); attr_vector->emplace_back("metadata", ""); return absl::OkStatus(); } string dataset_type() const override { return TextLineDatasetOp::kDatasetType; } private: std::vector<tstring> filenames_; CompressionType compression_type_; int64_t buffer_size_; }; class TextLineDatasetOpTest : public DatasetOpsTestBase {}; Status CreateTestFiles(const std::vector<tstring>& filenames, const std::vector<tstring>& contents, CompressionType compression_type) { if (filenames.size() != contents.size()) { return tensorflow::errors::InvalidArgument( "The number of files does not match with the contents"); } CompressionParams params; params.output_buffer_size = 10; params.compression_type = compression_type; for (int i = 0; i < filenames.size(); ++i) { TF_RETURN_IF_ERROR( WriteDataToFile(filenames[i], contents[i].data(), params)); } return absl::OkStatus(); } TextLineDatasetParams TextLineDatasetParams1() { std::vector<tstring> filenames = {LocalTempFilename(), LocalTempFilename()}; std::vector<tstring> contents = { absl::StrCat("hello world\n", "11223334455\n"), absl::StrCat("abcd, EFgH\n", " \n", "$%^&*()\n")}; CompressionType compression_type = CompressionType::ZLIB; if (!CreateTestFiles(filenames, contents, compression_type).ok()) { LOG(WARNING) << "Failed to create the test files: " << absl::StrJoin(filenames, ", "); } return TextLineDatasetParams(filenames, compression_type, 10, kNodeName); } TextLineDatasetParams TextLineDatasetParams2() { std::vector<tstring> filenames = {LocalTempFilename(), LocalTempFilename()}; std::vector<tstring> contents = { absl::StrCat("hello world\n", "11223334455\n"), absl::StrCat("abcd, EFgH\n", " \n", "$%^&*()\n")}; CompressionType compression_type = CompressionType::GZIP; if (!CreateTestFiles(filenames, contents, compression_type).ok()) { LOG(WARNING) << "Failed to create the test files: " << absl::StrJoin(filenames, ", "); } return TextLineDatasetParams(filenames, compression_type, 10, kNodeName); } TextLineDatasetParams TextLineDatasetParams3() { std::vector<tstring> filenames = {LocalTempFilename(), LocalTempFilename()}; std::vector<tstring> contents = { absl::StrCat("hello world\n", "11223334455\n"), absl::StrCat("abcd, EFgH\n", " \n", "$%^&*()\n")}; CompressionType compression_type = CompressionType::UNCOMPRESSED; if (!CreateTestFiles(filenames, contents, compression_type).ok()) { LOG(WARNING) << "Failed to create the test files: " << absl::StrJoin(filenames, ", "); } return TextLineDatasetParams(filenames, compression_type, 10, kNodeName); } std::vector<GetNextTestCase<TextLineDatasetParams>> GetNextTestCases() { return {{TextLineDatasetParams1(), CreateTensors<tstring>(TensorShape({}), {{"hello world"}, {"11223334455"}, {"abcd, EFgH"}, {" "}, {"$%^&*()"}})}, {TextLineDatasetParams2(), CreateTensors<tstring>(TensorShape({}), {{"hello world"}, {"11223334455"}, {"abcd, EFgH"}, {" "}, {"$%^&*()"}})}, {TextLineDatasetParams3(), CreateTensors<tstring>(TensorShape({}), {{"hello world"}, {"11223334455"}, {"abcd, EFgH"}, {" "}, {"$%^&*()"}})}}; } ITERATOR_GET_NEXT_TEST_P(TextLineDatasetOpTest, TextLineDatasetParams, GetNextTestCases()) TEST_F(TextLineDatasetOpTest, DatasetNodeName) { auto dataset_params = TextLineDatasetParams1(); TF_ASSERT_OK(Initialize(dataset_params)); TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name())); } TEST_F(TextLineDatasetOpTest, DatasetTypeString) { auto dataset_params = TextLineDatasetParams1(); TF_ASSERT_OK(Initialize(dataset_params)); TF_ASSERT_OK(CheckDatasetTypeString( name_utils::OpName(TextLineDatasetOp::kDatasetType))); } TEST_F(TextLineDatasetOpTest, DatasetOutputDtypes) { auto dataset_params = TextLineDatasetParams1(); TF_ASSERT_OK(Initialize(dataset_params)); TF_ASSERT_OK(CheckDatasetOutputDtypes({DT_STRING})); } TEST_F(TextLineDatasetOpTest, DatasetOutputShapes) { auto dataset_params = TextLineDatasetParams1(); TF_ASSERT_OK(Initialize(dataset_params)); TF_ASSERT_OK(CheckDatasetOutputShapes({PartialTensorShape({})})); } TEST_F(TextLineDatasetOpTest, Cardinality) { auto dataset_params = TextLineDatasetParams1(); TF_ASSERT_OK(Initialize(dataset_params)); TF_ASSERT_OK(CheckDatasetCardinality(kUnknownCardinality)); } TEST_F(TextLineDatasetOpTest, IteratorOutputDtypes) { auto dataset_params = TextLineDatasetParams1(); TF_ASSERT_OK(Initialize(dataset_params)); TF_ASSERT_OK(CheckIteratorOutputDtypes({DT_STRING})); } TEST_F(TextLineDatasetOpTest, IteratorOutputShapes) { auto dataset_params = TextLineDatasetParams1(); TF_ASSERT_OK(Initialize(dataset_params)); TF_ASSERT_OK(CheckIteratorOutputShapes({PartialTensorShape({})})); } TEST_F(TextLineDatasetOpTest, IteratorPrefix) { auto dataset_params = TextLineDatasetParams1(); TF_ASSERT_OK(Initialize(dataset_params)); TF_ASSERT_OK(CheckIteratorPrefix(name_utils::IteratorPrefix( TextLineDatasetOp::kDatasetType, dataset_params.iterator_prefix()))); } std::vector<IteratorSaveAndRestoreTestCase<TextLineDatasetParams>> IteratorSaveAndRestoreTestCases() { return {{TextLineDatasetParams1(), {0, 2, 6}, CreateTensors<tstring>(TensorShape({}), {{"hello world"}, {"11223334455"}, {"abcd, EFgH"}, {" "}, {"$%^&*()"}})}, {TextLineDatasetParams2(), {0, 2, 6}, CreateTensors<tstring>(TensorShape({}), {{"hello world"}, {"11223334455"}, {"abcd, EFgH"}, {" "}, {"$%^&*()"}})}, {TextLineDatasetParams3(), {0, 2, 6}, CreateTensors<tstring>(TensorShape({}), {{"hello world"}, {"11223334455"}, {"abcd, EFgH"}, {" "}, {"$%^&*()"}})}}; } ITERATOR_SAVE_AND_RESTORE_TEST_P(TextLineDatasetOpTest, TextLineDatasetParams, IteratorSaveAndRestoreTestCases()) } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/text_line_dataset_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/text_line_dataset_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
7bcdd45e-32f6-4fdb-87c7-38510f150311
cpp
google/tensorstore
transaction
tensorstore/kvstore/transaction.cc
tensorstore/kvstore/transaction_test.cc
#include "tensorstore/kvstore/transaction.h" #include <stddef.h> #include <stdint.h> #include <cassert> #include <iterator> #include <memory> #include <new> #include <optional> #include <string> #include <string_view> #include <utility> #include "absl/base/optimization.h" #include "absl/container/btree_map.h" #include "absl/functional/function_ref.h" #include "absl/status/status.h" #include "absl/synchronization/mutex.h" #include "absl/time/time.h" #include "absl/types/compare.h" #include "tensorstore/internal/compare.h" #include "tensorstore/internal/intrusive_ptr.h" #include "tensorstore/internal/metrics/counter.h" #include "tensorstore/internal/metrics/metadata.h" #include "tensorstore/internal/source_location.h" #include "tensorstore/kvstore/byte_range.h" #include "tensorstore/kvstore/driver.h" #include "tensorstore/kvstore/generation.h" #include "tensorstore/kvstore/key_range.h" #include "tensorstore/kvstore/operations.h" #include "tensorstore/kvstore/read_modify_write.h" #include "tensorstore/transaction.h" #include "tensorstore/util/execution/execution.h" #include "tensorstore/util/execution/future_sender.h" #include "tensorstore/util/future.h" #include "tensorstore/util/result.h" #include "tensorstore/util/status.h" #include "tensorstore/util/str_cat.h" namespace tensorstore { namespace internal_kvstore { namespace { auto& kvstore_transaction_retries = internal_metrics::Counter<int64_t>::New( "/tensorstore/kvstore/transaction_retries", internal_metrics::MetricMetadata("Count of kvstore transaction retries")); template <typename Controller> void ReportWritebackError(Controller controller, std::string_view action, const absl::Status& error, SourceLocation loc = SourceLocation::current()) { controller.Error(kvstore::Driver::AnnotateErrorWithKeyDescription( controller.DescribeKey(controller.GetKey()), action, error, loc)); } template <typename Controller> void PerformWriteback(Driver* driver, Controller controller, ReadResult read_result) { if (!StorageGeneration::IsDirty(read_result.stamp.generation)) { if (!StorageGeneration::IsConditional(read_result.stamp.generation) || read_result.stamp.time > controller.GetTransactionNode() .transaction() ->commit_start_time()) { controller.Success(std::move(read_result.stamp)); return; } ReadOptions read_options; auto if_not_equal = StorageGeneration::Clean(std::move(read_result.stamp.generation)); read_options.generation_conditions.if_not_equal = if_not_equal; read_options.byte_range = OptionalByteRangeRequest{0, 0}; auto future = driver->Read(controller.GetKey(), std::move(read_options)); future.Force(); std::move(future).ExecuteWhenReady( [controller, if_not_equal = std::move(if_not_equal)]( ReadyFuture<ReadResult> future) mutable { auto& r = future.result(); if (!r.ok()) { ReportWritebackError(controller, "reading", r.status()); } else if (r->aborted() || r->stamp.generation == if_not_equal) { controller.Success(std::move(r->stamp)); } else { controller.Retry(r->stamp.time); } }); return; } WriteOptions write_options; assert(!read_result.aborted()); write_options.generation_conditions.if_equal = StorageGeneration::Clean(std::move(read_result.stamp.generation)); auto future = driver->Write(controller.GetKey(), std::move(read_result).optional_value(), std::move(write_options)); future.Force(); std::move(future).ExecuteWhenReady( [controller](ReadyFuture<TimestampedStorageGeneration> future) mutable { auto& r = future.result(); if (!r.ok()) { ReportWritebackError(controller, "writing", r.status()); } else if (StorageGeneration::IsUnknown(r->generation)) { controller.Retry(r->time); } else { controller.Success(std::move(*r)); } }); } void StartWriteback(ReadModifyWriteEntry& entry, absl::Time staleness_bound = absl::InfinitePast()); void DeletedEntryDone(DeleteRangeEntry& dr_entry, bool error, size_t count = 1); void EntryDone(SinglePhaseMutation& single_phase_mutation, bool error, size_t count = 1); [[maybe_unused]] void CheckInvariants(ReadModifyWriteEntry* entry) { do { assert(!(entry->flags_ & ReadModifyWriteEntry::kDeleted)); if (entry->prev_) { assert(entry->prev_->single_phase_mutation().phase_number_ <= entry->single_phase_mutation().phase_number_); assert(entry->prev_->key_ == entry->key_); } entry = entry->prev_; } while (entry); } [[maybe_unused]] void CheckInvariants(MultiPhaseMutation& multi_phase, bool commit_started) { absl::btree_map<size_t, size_t> phase_entry_count; for (auto* single_phase_mutation = &multi_phase.phases_;;) { if (single_phase_mutation != &multi_phase.phases_) { assert(single_phase_mutation->phase_number_ > single_phase_mutation->prev_->phase_number_); } for (MutationEntry * tree_entry = single_phase_mutation->entries_.begin().to_pointer(), *tree_next; tree_entry; tree_entry = tree_next) { ++phase_entry_count[tree_entry->single_phase_mutation().phase_number_]; if (commit_started) { assert(&tree_entry->single_phase_mutation() == single_phase_mutation); } else { assert(&tree_entry->single_phase_mutation() == single_phase_mutation || single_phase_mutation == multi_phase.phases_.prev_); } tree_next = MutationEntryTree::Traverse(*tree_entry, MutationEntryTree::kRight); if (tree_next) { assert(tree_next->key_ > tree_entry->key_); if (tree_entry->entry_type() != kReadModifyWrite) { [[maybe_unused]] auto* dr_entry = static_cast<DeleteRangeEntry*>(tree_entry); assert(KeyRange::CompareExclusiveMaxAndKey(dr_entry->exclusive_max_, tree_next->key_) <= 0); } } if (tree_entry->entry_type() == kReadModifyWrite) { auto* rmw_entry = static_cast<ReadModifyWriteEntry*>(tree_entry); CheckInvariants(rmw_entry); } else { auto* dr_entry = static_cast<DeleteRangeEntry*>(tree_entry); if (dr_entry->entry_type() == kDeleteRangePlaceholder) { --phase_entry_count[tree_entry->single_phase_mutation() .phase_number_]; assert(dr_entry->superseded_.empty()); } assert(KeyRange::CompareKeyAndExclusiveMax( dr_entry->key_, dr_entry->exclusive_max_) < 0); for (ReadModifyWriteEntryTree::iterator entry = dr_entry->superseded_.begin(), next; entry != dr_entry->superseded_.end(); entry = next) { next = std::next(entry); if (next) { assert(next->key_ > entry->key_); } assert(entry->entry_type() == kReadModifyWrite); assert(&entry->single_phase_mutation() == &dr_entry->single_phase_mutation()); assert(entry->key_ >= dr_entry->key_); assert(KeyRange::CompareKeyAndExclusiveMax( entry->key_, dr_entry->exclusive_max_) < 0); assert(entry->flags_ & ReadModifyWriteEntry::kDeleted); if (entry->prev_) { CheckInvariants(entry->prev_); } } } } single_phase_mutation = single_phase_mutation->next_; if (single_phase_mutation == &multi_phase.phases_) break; } for (auto* single_phase_mutation = &multi_phase.phases_; single_phase_mutation->next_ != &multi_phase.phases_; single_phase_mutation = single_phase_mutation->next_) { if (single_phase_mutation->phase_number_ < multi_phase.GetTransactionNode().phase()) { assert(single_phase_mutation->entries_.empty()); } } } #ifdef TENSORSTORE_INTERNAL_KVSTORETORE_TRANSACTION_DEBUG inline void DebugCheckInvariants(MultiPhaseMutation& multi_phase, bool commit_started) { CheckInvariants(multi_phase, commit_started); } class DebugCheckInvariantsInDestructor { public: explicit DebugCheckInvariantsInDestructor(MultiPhaseMutation& multi_phase, bool commit_started) : multi_phase_(multi_phase), commit_started_(commit_started) {} ~DebugCheckInvariantsInDestructor() { CheckInvariants(multi_phase_, commit_started_); } private: MultiPhaseMutation& multi_phase_; bool commit_started_; }; #else inline void DebugCheckInvariants(MultiPhaseMutation& multi_phase, bool commit_started) {} class DebugCheckInvariantsInDestructor { public: explicit DebugCheckInvariantsInDestructor(MultiPhaseMutation& multi_phase, bool commit_started) {} }; #endif void DestroyReadModifyWriteSequence(ReadModifyWriteEntry* entry) { if (auto* next_rmw = entry->next_read_modify_write()) { next_rmw->prev_ = nullptr; } auto& multi_phase = entry->multi_phase(); while (true) { auto* prev = entry->prev_; multi_phase.FreeReadModifyWriteEntry(entry); if (!prev) break; entry = prev; } } auto CompareToEntry(MutationEntry& e) { return [&e](MutationEntry& other) { return internal::CompareResultAsWeakOrdering(e.key_.compare(other.key_)); }; } void InsertIntoPriorPhase(MutationEntry* entry) { if (entry->entry_type() == kDeleteRangePlaceholder) { delete static_cast<DeleteRangeEntry*>(entry); return; } entry->single_phase_mutation().entries_.FindOrInsert( CompareToEntry(*entry), [entry] { return entry; }); } DeleteRangeEntry* MakeDeleteRangeEntry( MutationEntryType entry_type, SinglePhaseMutation& assigned_single_phase_mutation, KeyRange&& range) { auto* entry = new DeleteRangeEntry; entry->key_ = std::move(range.inclusive_min); entry->exclusive_max_ = std::move(range.exclusive_max); entry->single_phase_mutation_ = {&assigned_single_phase_mutation, static_cast<uintptr_t>(entry_type)}; return entry; } DeleteRangeEntry* InsertDeleteRangeEntry( MutationEntryType entry_type, SinglePhaseMutation& insert_single_phase_mutation, SinglePhaseMutation& assigned_single_phase_mutation, KeyRange&& range, MutationEntryTree::InsertPosition position) { assert(entry_type == kDeleteRange || entry_type == kDeleteRangePlaceholder); auto* entry = MakeDeleteRangeEntry(entry_type, assigned_single_phase_mutation, std::move(range)); insert_single_phase_mutation.entries_.Insert(position, *entry); return entry; } ReadModifyWriteEntry* MakeReadModifyWriteEntry( SinglePhaseMutation& assigned_single_phase_mutation, std::string&& key) { auto* entry = assigned_single_phase_mutation.multi_phase_ ->AllocateReadModifyWriteEntry(); entry->key_ = std::move(key); entry->single_phase_mutation_ = {&assigned_single_phase_mutation, 0}; return entry; } SinglePhaseMutation& GetCurrentSinglePhaseMutation( MultiPhaseMutation& multi_phase) { size_t phase = multi_phase.GetTransactionNode().transaction()->phase(); SinglePhaseMutation* single_phase_mutation; if (multi_phase.phases_.phase_number_ == internal::TransactionState::kInvalidPhase) { single_phase_mutation = &multi_phase.phases_; single_phase_mutation->phase_number_ = phase; } else { single_phase_mutation = multi_phase.phases_.prev_; assert(single_phase_mutation->phase_number_ <= phase); if (single_phase_mutation->phase_number_ != phase) { auto* new_single_phase_mutation = new SinglePhaseMutation; std::swap(new_single_phase_mutation->entries_, single_phase_mutation->entries_); new_single_phase_mutation->next_ = &multi_phase.phases_; new_single_phase_mutation->prev_ = single_phase_mutation; new_single_phase_mutation->phase_number_ = phase; new_single_phase_mutation->prev_->next_ = new_single_phase_mutation; new_single_phase_mutation->next_->prev_ = new_single_phase_mutation; new_single_phase_mutation->multi_phase_ = &multi_phase; single_phase_mutation = new_single_phase_mutation; } } return *single_phase_mutation; } struct Controller { ReadModifyWriteEntry* entry_; internal::TransactionState::Node& GetTransactionNode() { return entry_->multi_phase().GetTransactionNode(); } std::string DescribeKey(std::string_view key) { return entry_->multi_phase().DescribeKey(key); } const Key& GetKey() { return entry_->key_; } void Success(TimestampedStorageGeneration new_stamp) { if (auto* dr_entry = static_cast<DeleteRangeEntry*>(entry_->next_)) { DeletedEntryDone(*dr_entry, false); return; } WritebackSuccess(*entry_, std::move(new_stamp)); EntryDone(entry_->single_phase_mutation(), false); } void Error(absl::Status error) { auto* dr_entry = static_cast<DeleteRangeEntry*>(entry_->next_); auto& single_phase_mutation = entry_->single_phase_mutation(); entry_->multi_phase().RecordEntryWritebackError(*entry_, std::move(error)); if (dr_entry) { DeletedEntryDone(*dr_entry, true); } else { EntryDone(single_phase_mutation, true); } } void Retry(absl::Time time) { kvstore_transaction_retries.Increment(); StartWriteback(*entry_, time); } }; void ReceiveWritebackCommon(ReadModifyWriteEntry& entry, ReadResult& read_result) { TENSORSTORE_KVSTORE_DEBUG_LOG( entry, "ReceiveWritebackCommon: state=", read_result.state, ", stamp=", read_result.stamp); auto flags = (entry.flags_ & ~(ReadModifyWriteEntry::kTransitivelyUnconditional | ReadModifyWriteEntry::kDirty | ReadModifyWriteEntry::kTransitivelyDirty)) | ReadModifyWriteEntry::kWritebackProvided; if (!StorageGeneration::IsConditional(read_result.stamp.generation)) { flags |= ReadModifyWriteEntry::kTransitivelyUnconditional; } if (read_result.stamp.generation.ClearNewlyDirty()) { flags |= ReadModifyWriteEntry::kDirty; } if (read_result.state != ReadResult::kUnspecified) { flags |= ReadModifyWriteEntry::kTransitivelyDirty; } entry.flags_ = flags; } void StartWriteback(ReadModifyWriteEntry& entry, absl::Time staleness_bound) { TENSORSTORE_KVSTORE_DEBUG_LOG( entry, "StartWriteback: staleness_bound=", staleness_bound); for (auto* e = &entry;;) { e->flags_ &= ~(ReadModifyWriteEntry::kWritebackProvided | ReadModifyWriteEntry::kTransitivelyDirty); e = e->prev_; if (!e) break; } ReadModifyWriteSource::WritebackOptions writeback_options; writeback_options.staleness_bound = staleness_bound; writeback_options.writeback_mode = (entry.flags_ & ReadModifyWriteEntry::kDeleted) ? ReadModifyWriteSource::kValidateOnly : ReadModifyWriteSource::kNormalWriteback; if (!entry.prev_ && !(entry.flags_ & ReadModifyWriteEntry::kDeleted)) { struct WritebackReceiverImpl { ReadModifyWriteEntry* entry_; void set_error(absl::Status error) { ReportWritebackError(Controller{entry_}, "writing", error); } void set_cancel() { ABSL_UNREACHABLE(); } void set_value(ReadResult read_result) { ReceiveWritebackCommon(*entry_, read_result); entry_->multi_phase().Writeback(*entry_, *entry_, std::move(read_result)); } }; entry.source_->KvsWriteback(std::move(writeback_options), WritebackReceiverImpl{&entry}); return; } struct SequenceWritebackReceiverImpl { struct State { ReadModifyWriteEntry* entry; absl::Time staleness_bound; ReadResult read_result; ReadModifyWriteEntry* source_entry = nullptr; ReadModifyWriteEntry* GetLastReadModifyWriteEntry() { auto* e = entry; while (auto* next = e->next_read_modify_write()) e = next; return e; } }; std::unique_ptr<State> state_; void set_error(absl::Status error) { ReportWritebackError(Controller{state_->GetLastReadModifyWriteEntry()}, "writing", error); } void set_cancel() { ABSL_UNREACHABLE(); } void set_value(ReadResult read_result) { auto& entry = *state_->entry; ReceiveWritebackCommon(entry, read_result); if (!state_->entry->next_ && !(state_->entry->flags_ & ReadModifyWriteEntry::kDeleted)) { state_->read_result = std::move(read_result); state_->source_entry = &entry; } else { assert(!StorageGeneration::IsConditional( state_->read_result.stamp.generation)); if (state_->read_result.state == ReadResult::kUnspecified) { TENSORSTORE_KVSTORE_DEBUG_LOG( entry, "Replacing: existing_result state=", state_->read_result.state, ", stamp=", state_->read_result.stamp, ", new_result state=", read_result.state, ", stamp=", read_result.stamp); state_->read_result = std::move(read_result); state_->source_entry = &entry; } else { state_->read_result.stamp.time = read_result.stamp.time; TENSORSTORE_KVSTORE_DEBUG_LOG(entry, "Conditioning: existing_stamp=", state_->read_result.stamp.generation, ", new_stamp=", read_result.stamp); state_->read_result.stamp.generation = StorageGeneration::Condition( state_->read_result.stamp.generation, std::move(read_result.stamp.generation)); } } if (entry.flags_ & ReadModifyWriteEntry::kTransitivelyUnconditional) { const bool unmodified = state_->read_result.state == ReadResult::kUnspecified; auto GetPrevSupersededEntryToWriteback = [&](ReadModifyWriteEntry* entry) -> ReadModifyWriteEntry* { while (true) { entry = entry->prev_; if (!entry) return nullptr; if (unmodified) { if (!(entry->flags_ & ReadModifyWriteEntry::kWritebackProvided) || (entry->flags_ & ReadModifyWriteEntry::kTransitivelyDirty)) { return entry; } } else { if (!(entry->flags_ & (ReadModifyWriteEntry::kWritebackProvided | ReadModifyWriteEntry::kTransitivelyUnconditional))) { return entry; } } } }; if (auto* prev = GetPrevSupersededEntryToWriteback(&entry)) { state_->entry = prev; TENSORSTORE_KVSTORE_DEBUG_LOG(*prev, "Continuing writeback validate only"); ReadModifyWriteSource::WritebackOptions writeback_options; writeback_options.staleness_bound = state_->staleness_bound; writeback_options.writeback_mode = unmodified ? ReadModifyWriteSource::kNormalWriteback : ReadModifyWriteSource::kValidateOnly; prev->source_->KvsWriteback(std::move(writeback_options), std::move(*this)); return; } } auto* last_entry = state_->GetLastReadModifyWriteEntry(); if (last_entry->next_) { state_->read_result.state = ReadResult::kUnspecified; } TENSORSTORE_KVSTORE_DEBUG_LOG(*last_entry, "No remaining skipped entries, forwarding " "to MultiPhaseMutation::Writeback: ", state_->read_result.stamp); last_entry->multi_phase().Writeback( *last_entry, state_->source_entry ? *state_->source_entry : *last_entry, std::move(state_->read_result)); } }; auto state = std::unique_ptr<SequenceWritebackReceiverImpl::State>( new SequenceWritebackReceiverImpl::State{&entry, staleness_bound}); if (entry.flags_ & ReadModifyWriteEntry::kDeleted) { state->read_result.state = ReadResult::kMissing; } entry.source_->KvsWriteback(std::move(writeback_options), SequenceWritebackReceiverImpl{std::move(state)}); } void HandleDeleteRangeDone(DeleteRangeEntry& dr_entry) { const bool error = dr_entry.remaining_entries_.HasError(); if (error) { WritebackError(dr_entry); } else { WritebackSuccess(dr_entry); } EntryDone(dr_entry.single_phase_mutation(), error); } void DeletedEntryDone(DeleteRangeEntry& dr_entry, bool error, size_t count) { if (error) dr_entry.remaining_entries_.SetError(); if (!dr_entry.remaining_entries_.DecrementCount(count)) return; if (dr_entry.remaining_entries_.HasError()) { HandleDeleteRangeDone(dr_entry); return; } dr_entry.multi_phase().WritebackDelete(dr_entry); } std::string DescribeEntry(MutationEntry& entry) { return tensorstore::StrCat( entry.entry_type() == kReadModifyWrite ? "read/write " : "delete ", entry.multi_phase().DescribeKey(entry.key_)); } void EntryDone(SinglePhaseMutation& single_phase_mutation, bool error, size_t count) { auto& multi_phase = *single_phase_mutation.multi_phase_; if (error) single_phase_mutation.remaining_entries_.SetError(); if (!single_phase_mutation.remaining_entries_.DecrementCount(count)) { return; } multi_phase.AllEntriesDone(single_phase_mutation); } } void ReadModifyWriteEntry::KvsRead( ReadModifyWriteTarget::TransactionalReadOptions options, ReadModifyWriteTarget::ReadReceiver receiver) { struct ReadReceiverImpl { ReadModifyWriteEntry* entry_; ReadModifyWriteTarget::ReadReceiver receiver_; void set_cancel() { execution::set_cancel(receiver_); } void set_value(ReadResult read_result) { { assert(!StorageGeneration::IsUnknown(read_result.stamp.generation)); absl::MutexLock lock(&entry_->mutex()); ReceiveWritebackCommon(*entry_->prev_, read_result); entry_->flags_ |= (entry_->prev_->flags_ & ReadModifyWriteEntry::kTransitivelyUnconditional); } execution::set_value(receiver_, std::move(read_result)); } void set_error(absl::Status error) { execution::set_error(receiver_, std::move(error)); } }; if (flags_ & ReadModifyWriteEntry::kPrevDeleted) { execution::set_value( receiver, ReadResult::Missing( {StorageGeneration::Dirty(StorageGeneration::Unknown()), absl::InfiniteFuture()})); } else if (prev_) { TENSORSTORE_KVSTORE_DEBUG_LOG(*prev_, "Requesting writeback for read"); ReadModifyWriteSource::WritebackOptions writeback_options; writeback_options.generation_conditions.if_not_equal = std::move(options.generation_conditions.if_not_equal); writeback_options.staleness_bound = options.staleness_bound; writeback_options.writeback_mode = ReadModifyWriteSource::kSpecifyUnchangedWriteback; this->prev_->source_->KvsWriteback( std::move(writeback_options), ReadReceiverImpl{this, std::move(receiver)}); } else { multi_phase().Read(*this, std::move(options), std::move(receiver)); } } bool ReadModifyWriteEntry::KvsReadsCommitted() { return prev_ == nullptr && !(flags_ & ReadModifyWriteEntry::kPrevDeleted) && multi_phase().MultiPhaseReadsCommitted(); } void DestroyPhaseEntries(SinglePhaseMutation& single_phase_mutation) { auto& multi_phase = *single_phase_mutation.multi_phase_; for (MutationEntryTree::iterator tree_entry = single_phase_mutation.entries_.begin(), tree_next; tree_entry != single_phase_mutation.entries_.end(); tree_entry = tree_next) { tree_next = std::next(tree_entry); single_phase_mutation.entries_.Remove(*tree_entry); if (tree_entry->entry_type() == kReadModifyWrite) { DestroyReadModifyWriteSequence( static_cast<ReadModifyWriteEntry*>(&*tree_entry)); } else { auto& dr_entry = static_cast<DeleteRangeEntry&>(*tree_entry); for (ReadModifyWriteEntryTree::iterator entry = dr_entry.superseded_.begin(), next; entry != dr_entry.superseded_.end(); entry = next) { next = std::next(entry); dr_entry.superseded_.Remove(*entry); DestroyReadModifyWriteSequence(entry.to_pointer()); } delete &dr_entry; } } if (&single_phase_mutation != &multi_phase.phases_) { single_phase_mutation.prev_->next_ = single_phase_mutation.next_; single_phase_mutation.next_->prev_ = single_phase_mutation.prev_; delete &single_phase_mutation; } } namespace { void InvalidateReadStateGoingBackward(ReadModifyWriteEntry* entry) { do { entry->source_->KvsInvalidateReadState(); entry = entry->prev_; } while (entry); } } void InvalidateReadState(SinglePhaseMutation& single_phase_mutation) { for (auto& entry : single_phase_mutation.entries_) { if (entry.entry_type() == kReadModifyWrite) { InvalidateReadStateGoingBackward( static_cast<ReadModifyWriteEntry*>(&entry)); } else { for (auto& deleted_entry : static_cast<DeleteRangeEntry&>(entry).superseded_) { InvalidateReadStateGoingBackward(&deleted_entry); } } } } void WritebackSuccess(ReadModifyWriteEntry& entry, TimestampedStorageGeneration new_stamp) { assert(!entry.next_read_modify_write()); for (ReadModifyWriteEntry* e = &entry;;) { e->source_->KvsWritebackSuccess(new_stamp); bool dirty = static_cast<bool>(e->flags_ & ReadModifyWriteEntry::kDirty); e = e->prev_; if (!e) break; if (dirty || !(e->flags_ & ReadModifyWriteEntry::kWritebackProvided)) { new_stamp.generation = StorageGeneration::Unknown(); new_stamp.time = absl::InfiniteFuture(); } } } void WritebackError(ReadModifyWriteEntry& entry) { assert(!entry.next_read_modify_write()); if (entry.flags_ & ReadModifyWriteEntry::kError) return; entry.flags_ |= ReadModifyWriteEntry::kError; for (ReadModifyWriteEntry* e = &entry;;) { e->source_->KvsWritebackError(); e = e->prev_; if (!e) break; } } void WritebackError(DeleteRangeEntry& entry) { for (auto& e : entry.superseded_) { WritebackError(e); } } void WritebackSuccess(DeleteRangeEntry& entry) { for (auto& e : entry.superseded_) { WritebackSuccess(e, TimestampedStorageGeneration{StorageGeneration::Unknown(), absl::InfiniteFuture()}); } } void WritebackError(MutationEntry& entry) { if (entry.entry_type() == kReadModifyWrite) { WritebackError(static_cast<ReadModifyWriteEntry&>(entry)); } else { WritebackError(static_cast<DeleteRangeEntry&>(entry)); } } void WritebackError(SinglePhaseMutation& single_phase_mutation) { for (auto& entry : single_phase_mutation.entries_) { WritebackError(entry); } } MultiPhaseMutation::MultiPhaseMutation() { phases_.next_ = phases_.prev_ = &phases_; phases_.phase_number_ = internal::TransactionState::kInvalidPhase; phases_.multi_phase_ = this; } SinglePhaseMutation& MultiPhaseMutation::GetCommittingPhase() { auto* single_phase_mutation = &phases_; auto initial_phase_number = single_phase_mutation->phase_number_; if (initial_phase_number != this->GetTransactionNode().phase() && initial_phase_number != internal::TransactionState::kInvalidPhase) { single_phase_mutation = single_phase_mutation->next_; assert(single_phase_mutation->phase_number_ == this->GetTransactionNode().phase()); } return *single_phase_mutation; } void MultiPhaseMutation::AllEntriesDone( SinglePhaseMutation& single_phase_mutation) { size_t next_phase = 0; if (single_phase_mutation.next_ != &this->phases_) { next_phase = single_phase_mutation.next_->phase_number_; } DestroyPhaseEntries(single_phase_mutation); this->PhaseCommitDone(next_phase); } namespace { void InvalidateReadStateGoingForward(ReadModifyWriteEntry* entry) { auto& single_phase_mutation = entry->single_phase_mutation(); do { entry->source_->KvsInvalidateReadState(); entry->flags_ &= ~ReadModifyWriteEntry::kTransitivelyUnconditional; entry = entry->next_read_modify_write(); } while (entry && (&entry->single_phase_mutation() == &single_phase_mutation)); } void WritebackPhase( SinglePhaseMutation& single_phase_mutation, absl::Time staleness_bound, absl::FunctionRef<bool(ReadModifyWriteEntry& entry)> predicate) { assert(single_phase_mutation.remaining_entries_.IsDone()); size_t entry_count = 0; for (auto& entry : single_phase_mutation.entries_) { if (entry.entry_type() == kReadModifyWrite) { auto& rmw_entry = static_cast<ReadModifyWriteEntry&>(entry); if (auto* next = static_cast<ReadModifyWriteEntry*>(rmw_entry.next_)) { assert(next->entry_type() == kReadModifyWrite); assert(&next->single_phase_mutation() != &single_phase_mutation); next->prev_ = nullptr; InvalidateReadStateGoingForward(next); rmw_entry.next_ = nullptr; } if (predicate(rmw_entry)) { ++entry_count; StartWriteback(rmw_entry, staleness_bound); } } else { auto& dr_entry = static_cast<DeleteRangeEntry&>(entry); assert(dr_entry.remaining_entries_.IsDone()); ++entry_count; size_t deleted_entry_count = 0; for (auto& deleted_entry : dr_entry.superseded_) { auto& rmw_entry = static_cast<ReadModifyWriteEntry&>(deleted_entry); rmw_entry.next_ = &dr_entry; if (predicate(rmw_entry)) { ++deleted_entry_count; StartWriteback(static_cast<ReadModifyWriteEntry&>(deleted_entry), staleness_bound); } } DeletedEntryDone(dr_entry, false, -deleted_entry_count); } } EntryDone(single_phase_mutation, false, -entry_count); } } void MultiPhaseMutation::CommitNextPhase() { size_t cur_phase_number = GetTransactionNode().phase(); DebugCheckInvariants(*this, false); { DebugCheckInvariantsInDestructor debug_check(*this, true); if (cur_phase_number == 0) { if (phases_.next_ != &phases_) { auto* last_phase = phases_.prev_; for (MutationEntryTree::iterator entry = last_phase->entries_.begin(), next; entry != last_phase->entries_.end(); entry = next) { next = std::next(entry); if (&entry->single_phase_mutation() != last_phase) { last_phase->entries_.Remove(*entry); InsertIntoPriorPhase(entry.to_pointer()); } } } if (cur_phase_number != phases_.phase_number_) { this->PhaseCommitDone(phases_.phase_number_); return; } } } auto& single_phase_mutation = GetCommittingPhase(); WritebackPhase(single_phase_mutation, absl::InfinitePast(), [](ReadModifyWriteEntry& entry) { return true; }); } void MultiPhaseMutation::AbortRemainingPhases() { for (auto* single_phase_mutation = &phases_;;) { auto* next = single_phase_mutation->next_; DestroyPhaseEntries(*single_phase_mutation); if (next == &phases_) break; single_phase_mutation = next; } } MultiPhaseMutation::ReadModifyWriteStatus MultiPhaseMutation::ReadModifyWrite( size_t& phase, Key key, ReadModifyWriteSource& source) { DebugCheckInvariantsInDestructor debug_check(*this, false); #ifndef NDEBUG mutex().AssertHeld(); #endif auto& single_phase_mutation = GetCurrentSinglePhaseMutation(*this); phase = single_phase_mutation.phase_number_; auto* entry = MakeReadModifyWriteEntry(single_phase_mutation, std::move(key)); entry->source_ = &source; entry->source_->KvsSetTarget(*entry); auto find_result = single_phase_mutation.entries_.Find( [key = std::string_view(entry->key_)](MutationEntry& existing_entry) { auto c = key.compare(existing_entry.key_); if (c <= 0) return internal::CompareResultAsWeakOrdering(c); if (existing_entry.entry_type() == kReadModifyWrite) { return absl::weak_ordering::greater; } return KeyRange::CompareKeyAndExclusiveMax( key, static_cast<DeleteRangeEntry&>(existing_entry) .exclusive_max_) < 0 ? absl::weak_ordering::equivalent : absl::weak_ordering::greater; }); if (!find_result.found) { const bool was_empty = single_phase_mutation.entries_.empty(); single_phase_mutation.entries_.Insert(find_result.insert_position(), *entry); return was_empty ? ReadModifyWriteStatus::kAddedFirst : ReadModifyWriteStatus::kAddedSubsequent; } single_phase_mutation.entries_.Replace(*find_result.node, *entry); if (find_result.node->entry_type() == kReadModifyWrite) { auto* existing_entry = static_cast<ReadModifyWriteEntry*>(find_result.node); assert(existing_entry->key_ == entry->key_); if (&existing_entry->single_phase_mutation() != &single_phase_mutation) { InsertIntoPriorPhase(existing_entry); } existing_entry->source_->KvsRevoke(); assert(existing_entry->next_ == nullptr); entry->prev_ = existing_entry; existing_entry->next_ = entry; return ReadModifyWriteStatus::kExisting; } auto* existing_entry = static_cast<DeleteRangeEntry*>(find_result.node); assert(existing_entry->key_ <= entry->key_); assert(KeyRange::CompareKeyAndExclusiveMax( entry->key_, existing_entry->exclusive_max_) < 0); entry->flags_ |= (ReadModifyWriteEntry::kPrevDeleted | ReadModifyWriteEntry::kTransitivelyUnconditional); if (&existing_entry->single_phase_mutation() != &single_phase_mutation) { if (existing_entry->key_ != entry->key_) { InsertDeleteRangeEntry(kDeleteRangePlaceholder, single_phase_mutation, existing_entry->single_phase_mutation(), KeyRange{existing_entry->key_, entry->key_}, {entry, MutationEntryTree::kLeft}); } if (auto successor = KeyRange::Successor(entry->key_); successor != existing_entry->exclusive_max_) { InsertDeleteRangeEntry( kDeleteRangePlaceholder, single_phase_mutation, existing_entry->single_phase_mutation(), KeyRange{std::move(successor), existing_entry->exclusive_max_}, {entry, MutationEntryTree::kRight}); } InsertIntoPriorPhase(existing_entry); return ReadModifyWriteStatus::kExisting; } auto split_result = existing_entry->superseded_.FindSplit( [key = std::string_view(entry->key_)](MutationEntry& e) { return internal::CompareResultAsWeakOrdering(key.compare(e.key_)); }); if (split_result.center) { split_result.center->flags_ &= ~ReadModifyWriteEntry::kDeleted; entry->prev_ = split_result.center; split_result.center->next_ = entry; } if (existing_entry->key_ != entry->key_) { auto* dr_entry = InsertDeleteRangeEntry( kDeleteRange, single_phase_mutation, existing_entry->single_phase_mutation(), KeyRange{std::move(existing_entry->key_), entry->key_}, {entry, MutationEntryTree::kLeft}); dr_entry->superseded_ = std::move(split_result.trees[0]); } else { assert(split_result.trees[0].empty()); } existing_entry->key_ = KeyRange::Successor(entry->key_); if (existing_entry->key_ != existing_entry->exclusive_max_) { single_phase_mutation.entries_.Insert({entry, MutationEntryTree::kRight}, *existing_entry); existing_entry->superseded_ = std::move(split_result.trees[1]); } else { assert(split_result.trees[1].empty()); delete existing_entry; } return ReadModifyWriteStatus::kExisting; } void MultiPhaseMutation::DeleteRange(KeyRange range) { #ifndef NDEBUG mutex().AssertHeld(); #endif if (range.empty()) return; DebugCheckInvariantsInDestructor debug_check(*this, false); auto& single_phase_mutation = GetCurrentSinglePhaseMutation(*this); auto find_result = single_phase_mutation.entries_.FindBound<MutationEntryTree::kLeft>( [&](MutationEntry& existing_entry) { if (existing_entry.entry_type() == kReadModifyWrite) { return existing_entry.key_ < range.inclusive_min; } else { return KeyRange::CompareExclusiveMaxAndKey( static_cast<DeleteRangeEntry&>(existing_entry) .exclusive_max_, range.inclusive_min) <= 0; } }); DeleteRangeEntry* new_entry = nullptr; ReadModifyWriteEntryTree superseded; DeleteRangeEntry insert_placeholder; single_phase_mutation.entries_.Insert(find_result.insert_position(), insert_placeholder); for (MutationEntry *existing_entry = find_result.found_node(), *next; existing_entry; existing_entry = next) { if (KeyRange::CompareKeyAndExclusiveMax(existing_entry->key_, range.exclusive_max) >= 0) { break; } next = MutationEntryTree::Traverse(*existing_entry, MutationEntryTree::kRight); single_phase_mutation.entries_.Remove(*existing_entry); if (existing_entry->entry_type() == kReadModifyWrite) { auto* existing_rmw_entry = static_cast<ReadModifyWriteEntry*>(existing_entry); existing_rmw_entry->source_->KvsRevoke(); if (&existing_rmw_entry->single_phase_mutation() != &single_phase_mutation) { InsertIntoPriorPhase(existing_entry); } else { existing_rmw_entry->flags_ |= ReadModifyWriteEntry::kDeleted; [[maybe_unused]] bool inserted = superseded .FindOrInsert(CompareToEntry(*existing_rmw_entry), [=] { return existing_rmw_entry; }) .second; assert(inserted); } } else { auto* existing_dr_entry = static_cast<DeleteRangeEntry*>(existing_entry); if (&existing_dr_entry->single_phase_mutation() != &single_phase_mutation) { if (KeyRange::CompareExclusiveMax( range.exclusive_max, existing_dr_entry->exclusive_max_) < 0) { InsertDeleteRangeEntry( kDeleteRangePlaceholder, single_phase_mutation, existing_dr_entry->single_phase_mutation(), KeyRange{range.exclusive_max, existing_dr_entry->exclusive_max_}, {&insert_placeholder, MutationEntryTree::kRight}); } if (existing_dr_entry->key_ < range.inclusive_min) { InsertDeleteRangeEntry( kDeleteRangePlaceholder, single_phase_mutation, existing_dr_entry->single_phase_mutation(), KeyRange{existing_dr_entry->key_, range.inclusive_min}, {&insert_placeholder, MutationEntryTree::kLeft}); } InsertIntoPriorPhase(existing_dr_entry); } else { superseded = ReadModifyWriteEntryTree::Join( superseded, existing_dr_entry->superseded_); if (!new_entry) { new_entry = existing_dr_entry; } else { new_entry->exclusive_max_ = std::move(existing_dr_entry->exclusive_max_); delete existing_dr_entry; } } } } if (new_entry) { if (range.inclusive_min < new_entry->key_) { new_entry->key_ = std::move(range.inclusive_min); } if (KeyRange::CompareExclusiveMax(range.exclusive_max, new_entry->exclusive_max_) > 0) { new_entry->exclusive_max_ = std::move(range.exclusive_max); } } else { new_entry = MakeDeleteRangeEntry(kDeleteRange, single_phase_mutation, std::move(range)); } new_entry->superseded_ = std::move(superseded); single_phase_mutation.entries_.Replace(insert_placeholder, *new_entry); } std::string MultiPhaseMutation::DescribeFirstEntry() { assert(!phases_.prev_->entries_.empty()); return DescribeEntry(*phases_.prev_->entries_.begin()); } ReadModifyWriteEntry* MultiPhaseMutation::AllocateReadModifyWriteEntry() { return new ReadModifyWriteEntry; } void MultiPhaseMutation::FreeReadModifyWriteEntry(ReadModifyWriteEntry* entry) { delete entry; } void ReadDirectly(Driver* driver, ReadModifyWriteEntry& entry, ReadModifyWriteTarget::TransactionalReadOptions&& options, ReadModifyWriteTarget::ReadReceiver&& receiver) { ReadOptions kvstore_options; kvstore_options.staleness_bound = options.staleness_bound; kvstore_options.generation_conditions.if_not_equal = std::move(options.generation_conditions.if_not_equal); kvstore_options.batch = std::move(options.batch); execution::submit(driver->Read(entry.key_, std::move(kvstore_options)), std::move(receiver)); } void WritebackDirectly(Driver* driver, ReadModifyWriteEntry& entry, ReadResult&& read_result) { assert(read_result.stamp.time != absl::InfinitePast()); PerformWriteback(driver, Controller{&entry}, std::move(read_result)); } void WritebackDirectly(Driver* driver, DeleteRangeEntry& entry) { auto future = driver->DeleteRange(KeyRange{entry.key_, entry.exclusive_max_}); future.Force(); std::move(future).ExecuteWhenReady([&entry](ReadyFuture<const void> future) { auto& r = future.result(); if (!r.ok()) { entry.multi_phase().GetTransactionNode().SetError(r.status()); entry.remaining_entries_.SetError(); } HandleDeleteRangeDone(entry); }); } void MultiPhaseMutation::RecordEntryWritebackError(ReadModifyWriteEntry& entry, absl::Status error) { this->GetTransactionNode().SetError(std::move(error)); WritebackError(entry); } void AtomicMultiPhaseMutationBase::RetryAtomicWriteback( absl::Time staleness_bound) { auto& single_phase_mutation = GetCommittingPhase(); WritebackPhase( single_phase_mutation, staleness_bound, [&](ReadModifyWriteEntry& entry) { return static_cast<ReadModifyWriteEntryWithStamp&>(entry).IsOutOfDate( staleness_bound); }); } ReadModifyWriteEntry* AtomicMultiPhaseMutation::AllocateReadModifyWriteEntry() { return new BufferedReadModifyWriteEntry; } void AtomicMultiPhaseMutation::FreeReadModifyWriteEntry( ReadModifyWriteEntry* entry) { delete static_cast<BufferedReadModifyWriteEntry*>(entry); } void AtomicMultiPhaseMutationBase::AtomicWritebackReady( ReadModifyWriteEntry& entry) { if (auto* dr_entry = static_cast<DeleteRangeEntry*>(entry.next_)) { DeletedEntryDone(*dr_entry, false); } else { EntryDone(entry.single_phase_mutation(), false); } } void AtomicMultiPhaseMutation::Writeback(ReadModifyWriteEntry& entry, ReadModifyWriteEntry& source_entry, ReadResult&& read_result) { assert(read_result.stamp.time != absl::InfinitePast()); auto& buffered = static_cast<BufferedReadModifyWriteEntry&>(entry); buffered.stamp() = std::move(read_result.stamp); buffered.value_state_ = read_result.state; buffered.value_ = std::move(read_result.value); AtomicWritebackReady(entry); } void AtomicMultiPhaseMutationBase::WritebackDelete(DeleteRangeEntry& entry) { EntryDone(entry.single_phase_mutation(), false); } void AtomicMultiPhaseMutationBase::AtomicCommitWritebackSuccess() { for (auto& entry : GetCommittingPhase().entries_) { if (entry.entry_type() == kReadModifyWrite) { auto& rmw_entry = static_cast<ReadModifyWriteEntryWithStamp&>(entry); internal_kvstore::WritebackSuccess(rmw_entry, std::move(rmw_entry.stamp_)); } else { auto& dr_entry = static_cast<DeleteRangeEntry&>(entry); internal_kvstore::WritebackSuccess(dr_entry); } } } void AtomicMultiPhaseMutationBase::RevokeAllEntries() { assert(phases_.next_ == &phases_); for (auto& entry : phases_.entries_) { if (entry.entry_type() != kReadModifyWrite) continue; auto& rmw_entry = static_cast<ReadModifyWriteEntry&>(entry); rmw_entry.source_->KvsRevoke(); } } namespace { absl::Status GetNonAtomicReadModifyWriteError( NonAtomicTransactionNode& node, MultiPhaseMutation::ReadModifyWriteStatus modify_status) { if (!node.transaction()->atomic()) { return absl::OkStatus(); } using ReadModifyWriteStatus = MultiPhaseMutation::ReadModifyWriteStatus; if (modify_status == ReadModifyWriteStatus::kAddedFirst) { return node.MarkAsTerminal(); } if (modify_status == ReadModifyWriteStatus::kAddedSubsequent) { absl::MutexLock lock(&node.mutex_); auto& single_phase_mutation = *node.phases_.prev_; MutationEntry* e0 = single_phase_mutation.entries_.begin().to_pointer(); assert(e0); MutationEntry* e1 = MutationEntryTree::Traverse(*e0, MutationEntryTree::kRight); assert(e1); auto error = internal::TransactionState::Node::GetAtomicError( DescribeEntry(*e0), DescribeEntry(*e1)); node.transaction()->RequestAbort(error); return error; } return absl::OkStatus(); } class ReadViaExistingTransactionNode : public internal::TransactionState::Node, public ReadModifyWriteSource { public: ReadViaExistingTransactionNode() : internal::TransactionState::Node(nullptr) {} void PrepareForCommit() override { intrusive_ptr_increment(this); this->PrepareDone(); this->ReadyForCommit(); } void Commit() override { intrusive_ptr_decrement(this); } void Abort() override { AbortDone(); } void KvsSetTarget(ReadModifyWriteTarget& target) override { target_ = &target; } void KvsInvalidateReadState() override {} void KvsWriteback( ReadModifyWriteSource::WritebackOptions options, ReadModifyWriteSource::WritebackReceiver receiver) override { ReadModifyWriteTarget::TransactionalReadOptions read_options = options; if (options.writeback_mode != ReadModifyWriteSource::kSpecifyUnchangedWriteback) { TimestampedStorageGeneration expected_stamp; { absl::MutexLock lock(&mutex_); expected_stamp = expected_stamp_; } if (StorageGeneration::IsUnknown(expected_stamp.generation)) { execution::set_value( receiver, ReadResult::Unspecified( TimestampedStorageGeneration::Unconditional())); return; } if (StorageGeneration::IsClean(expected_stamp.generation) && expected_stamp.time >= read_options.staleness_bound) { execution::set_value( receiver, ReadResult::Unspecified(std::move(expected_stamp))); return; } } struct ReadReceiverImpl { ReadViaExistingTransactionNode& node_; ReadModifyWriteSource::WritebackReceiver receiver_; void set_value(ReadResult read_result) { bool mismatch; { absl::MutexLock lock(&node_.mutex_); mismatch = !StorageGeneration::EqualOrUnspecified( read_result.stamp.generation, node_.expected_stamp_.generation); } if (mismatch) { execution::set_error(receiver_, absl::AbortedError("Generation mismatch")); return; } execution::set_value(receiver_, std::move(read_result)); } void set_cancel() { execution::set_cancel(receiver_); } void set_error(absl::Status error) { execution::set_error(receiver_, std::move(error)); } }; target_->KvsRead(std::move(read_options), ReadReceiverImpl{*this, std::move(receiver)}); } void KvsWritebackError() override { this->CommitDone(); } void KvsRevoke() override {} void KvsWritebackSuccess(TimestampedStorageGeneration new_stamp) override { this->CommitDone(); } absl::Mutex mutex_; TimestampedStorageGeneration expected_stamp_; ReadModifyWriteTarget* target_; }; } Future<ReadResult> ReadViaExistingTransaction( Driver* driver, internal::OpenTransactionPtr& transaction, size_t& phase, Key key, kvstore::TransactionalReadOptions options) { auto [promise, future] = PromiseFuturePair<ReadResult>::Make(); using Node = ReadViaExistingTransactionNode; internal::WeakTransactionNodePtr<Node> node; node.reset(new Node); TENSORSTORE_RETURN_IF_ERROR( driver->ReadModifyWrite(transaction, phase, std::move(key), *node)); node->SetTransaction(*transaction); node->SetPhase(phase); TENSORSTORE_RETURN_IF_ERROR(node->Register()); struct InitialReadReceiverImpl { internal::OpenTransactionNodePtr<Node> node_; Promise<ReadResult> promise_; void set_value(ReadResult read_result) { if (node_->transaction()->mode() & repeatable_read) { absl::MutexLock lock(&node_->mutex_); node_->expected_stamp_ = read_result.stamp; } promise_.SetResult(std::move(read_result)); } void set_cancel() {} void set_error(absl::Status error) { promise_.SetResult(std::move(error)); } }; node->target_->KvsRead(std::move(options), InitialReadReceiverImpl{ internal::OpenTransactionNodePtr<Node>(node.get()), std::move(promise)}); return std::move(future); } namespace { class WriteViaExistingTransactionNode : public internal::TransactionState::Node, public ReadModifyWriteSource { public: WriteViaExistingTransactionNode() : internal::TransactionState::Node(nullptr) {} void PrepareForCommit() override { intrusive_ptr_increment(this); this->PrepareDone(); this->ReadyForCommit(); } void Commit() override { intrusive_ptr_decrement(this); } void Abort() override { AbortDone(); } void KvsSetTarget(ReadModifyWriteTarget& target) override { target_ = &target; } void KvsInvalidateReadState() override {} void KvsWriteback( ReadModifyWriteSource::WritebackOptions options, ReadModifyWriteSource::WritebackReceiver receiver) override { if (!StorageGeneration::IsConditional(read_result_.stamp.generation)) { execution::set_value(receiver, read_result_); return; } ReadModifyWriteTarget::TransactionalReadOptions read_options; read_options.generation_conditions.if_not_equal = StorageGeneration::Clean(read_result_.stamp.generation); read_options.staleness_bound = options.staleness_bound; struct ReadReceiverImpl { WriteViaExistingTransactionNode& source_; ReadModifyWriteSource::WritebackReceiver receiver_; void set_value(ReadResult read_result) { auto& existing_generation = source_.read_result_.stamp.generation; auto clean_generation = StorageGeneration::Clean(existing_generation); if (read_result.stamp.generation == clean_generation || (source_.if_equal_no_value_ && read_result.state == ReadResult::kMissing)) { source_.read_result_.stamp = std::move(read_result.stamp); source_.read_result_.stamp.generation.MarkDirty(); } else { assert( !StorageGeneration::IsNewlyDirty(read_result.stamp.generation)); source_.read_result_ = std::move(read_result); source_.if_equal_no_value_ = false; } execution::set_value(receiver_, source_.read_result_); } void set_cancel() { execution::set_cancel(receiver_); } void set_error(absl::Status error) { execution::set_error(receiver_, std::move(error)); } }; target_->KvsRead(std::move(read_options), ReadReceiverImpl{*this, std::move(receiver)}); } void KvsWritebackError() override { this->CommitDone(); } void KvsRevoke() override {} void KvsWritebackSuccess(TimestampedStorageGeneration new_stamp) override { if (!StorageGeneration::IsNewlyDirty(read_result_.stamp.generation)) { new_stamp = TimestampedStorageGeneration{}; } else if (new_stamp.time == absl::InfiniteFuture()) { new_stamp.generation = StorageGeneration::Invalid(); } promise_.SetResult(std::move(new_stamp)); this->CommitDone(); } Promise<TimestampedStorageGeneration> promise_; ReadResult read_result_; bool if_equal_no_value_; ReadModifyWriteTarget* target_; }; } Future<TimestampedStorageGeneration> WriteViaExistingTransaction( Driver* driver, internal::OpenTransactionPtr& transaction, size_t& phase, Key key, std::optional<Value> value, WriteOptions options) { TimestampedStorageGeneration stamp; if (StorageGeneration::IsUnknown(options.generation_conditions.if_equal)) { stamp.time = absl::InfiniteFuture(); } else { assert(StorageGeneration::IsClean(options.generation_conditions.if_equal)); stamp.time = absl::Time(); } bool if_equal_no_value = StorageGeneration::IsNoValue(options.generation_conditions.if_equal); stamp.generation = std::move(options.generation_conditions.if_equal); stamp.generation.MarkDirty(); auto [promise, future] = PromiseFuturePair<TimestampedStorageGeneration>::Make(); using Node = WriteViaExistingTransactionNode; internal::WeakTransactionNodePtr<Node> node; node.reset(new Node); node->promise_ = promise; node->read_result_ = value ? ReadResult::Value(*std::move(value), std::move(stamp)) : ReadResult::Missing(std::move(stamp)); node->if_equal_no_value_ = if_equal_no_value; TENSORSTORE_RETURN_IF_ERROR( driver->ReadModifyWrite(transaction, phase, std::move(key), *node)); node->SetTransaction(*transaction); node->SetPhase(phase); TENSORSTORE_RETURN_IF_ERROR(node->Register()); LinkError(std::move(promise), transaction->future()); return std::move(future); } Future<TimestampedStorageGeneration> WriteViaTransaction( Driver* driver, Key key, std::optional<Value> value, WriteOptions options) { internal::OpenTransactionPtr transaction; size_t phase; return WriteViaExistingTransaction(driver, transaction, phase, std::move(key), std::move(value), std::move(options)); } } namespace kvstore { absl::Status Driver::ReadModifyWrite(internal::OpenTransactionPtr& transaction, size_t& phase, Key key, ReadModifyWriteSource& source) { TENSORSTORE_ASSIGN_OR_RETURN( auto node, internal_kvstore::GetTransactionNode< internal_kvstore::NonAtomicTransactionNode>(this, transaction)); internal_kvstore::MultiPhaseMutation::ReadModifyWriteStatus rmw_status; { absl::MutexLock lock(&node->mutex_); rmw_status = node->ReadModifyWrite(phase, std::move(key), source); } return internal_kvstore::GetNonAtomicReadModifyWriteError(*node, rmw_status); } absl::Status Driver::TransactionalDeleteRange( const internal::OpenTransactionPtr& transaction, KeyRange range) { if (range.empty()) return absl::OkStatus(); if (transaction && transaction->atomic()) { auto error = absl::InvalidArgumentError( tensorstore::StrCat("Cannot delete range starting at ", this->DescribeKey(range.inclusive_min), " as single atomic transaction")); transaction->RequestAbort(error); return error; } return internal_kvstore::AddDeleteRange< internal_kvstore::NonAtomicTransactionNode>(this, transaction, std::move(range)); } } }
#include "tensorstore/transaction.h" #include <utility> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/status/status.h" #include "absl/strings/cord.h" #include "absl/time/clock.h" #include "tensorstore/internal/intrusive_ptr.h" #include "tensorstore/kvstore/byte_range.h" #include "tensorstore/kvstore/generation.h" #include "tensorstore/kvstore/kvstore.h" #include "tensorstore/kvstore/mock_kvstore.h" #include "tensorstore/kvstore/operations.h" #include "tensorstore/kvstore/read_result.h" #include "tensorstore/kvstore/test_matchers.h" #include "tensorstore/kvstore/test_util.h" #include "tensorstore/util/status_testutil.h" namespace { namespace kvstore = tensorstore::kvstore; using ::tensorstore::MatchesStatus; using ::tensorstore::OptionalByteRangeRequest; using ::tensorstore::StorageGeneration; using ::tensorstore::TimestampedStorageGeneration; using ::tensorstore::Transaction; using ::tensorstore::internal::MatchesKvsReadResult; using ::tensorstore::internal::MockKeyValueStore; using ::tensorstore::kvstore::KvStore; using ::tensorstore::kvstore::ReadResult; TEST(KvStoreTest, WriteThenRead) { auto mock_driver = MockKeyValueStore::Make(); Transaction txn(tensorstore::isolated); KvStore store(mock_driver, "", txn); TENSORSTORE_ASSERT_OK(kvstore::Write(store, "a", absl::Cord("value"))); EXPECT_THAT(kvstore::Read(store, "a").result(), ::testing::Optional(MatchesKvsReadResult(absl::Cord("value")))); auto future = txn.CommitAsync(); { auto req = mock_driver->write_requests.pop(); EXPECT_THAT(req.key, "a"); EXPECT_THAT(req.value, ::testing::Optional(absl::Cord("value"))); EXPECT_THAT(req.options.generation_conditions.if_equal, StorageGeneration::Unknown()); req.promise.SetResult(TimestampedStorageGeneration( StorageGeneration::FromString("abc"), absl::Now())); } TENSORSTORE_ASSERT_OK(future); } TEST(KvStoreTest, ReadWithoutRepeatableReadIsolation) { auto mock_driver = MockKeyValueStore::Make(); Transaction txn(tensorstore::isolated); KvStore store(mock_driver, "", txn); { auto read_future = kvstore::Read(store, "a"); { auto req = mock_driver->read_requests.pop(); EXPECT_THAT(req.key, "a"); req.promise.SetResult(ReadResult::Value( absl::Cord("value"), TimestampedStorageGeneration(StorageGeneration::FromString("abc"), absl::Now()))); } EXPECT_THAT(read_future.result(), ::testing::Optional(MatchesKvsReadResult(absl::Cord("value")))); } TENSORSTORE_ASSERT_OK(txn.CommitAsync().result()); } TEST(KvStoreTest, ReadWithRepeatableReadIsolation) { auto mock_driver = MockKeyValueStore::Make(); Transaction txn(tensorstore::isolated | tensorstore::repeatable_read); KvStore store(mock_driver, "", txn); { auto read_future = kvstore::Read(store, "a"); { auto req = mock_driver->read_requests.pop(); EXPECT_THAT(req.key, "a"); req.promise.SetResult(ReadResult::Value( absl::Cord("value"), TimestampedStorageGeneration(StorageGeneration::FromString("abc"), absl::Now()))); } EXPECT_THAT(read_future.result(), ::testing::Optional(MatchesKvsReadResult(absl::Cord("value")))); } auto future = txn.CommitAsync(); { auto req = mock_driver->read_requests.pop(); EXPECT_THAT(req.key, "a"); EXPECT_THAT(req.options.byte_range, OptionalByteRangeRequest(0, 0)); EXPECT_THAT(req.options.generation_conditions.if_not_equal, StorageGeneration::FromString("abc")); req.promise.SetResult(ReadResult::Unspecified(TimestampedStorageGeneration( StorageGeneration::FromString("abc"), absl::Now()))); } TENSORSTORE_ASSERT_OK(future); } TEST(KvStoreTest, ReadInvalidOptionIfEqual) { auto mock_driver = MockKeyValueStore::Make(); Transaction txn(tensorstore::isolated); KvStore store(mock_driver, "", txn); kvstore::ReadOptions options; options.generation_conditions.if_equal = StorageGeneration::FromString("abc"); EXPECT_THAT(kvstore::Read(store, "a", std::move(options)).result(), MatchesStatus(absl::StatusCode::kUnimplemented)); } TEST(KvStoreTest, ReadInvalidOptionByteRange) { auto mock_driver = MockKeyValueStore::Make(); Transaction txn(tensorstore::isolated); KvStore store(mock_driver, "", txn); kvstore::ReadOptions options; options.byte_range = OptionalByteRangeRequest{5, 10}; EXPECT_THAT(kvstore::Read(store, "a", std::move(options)).result(), MatchesStatus(absl::StatusCode::kUnimplemented)); } TEST(KvStoreTest, ReadMismatch) { auto mock_driver = MockKeyValueStore::Make(); Transaction txn(tensorstore::isolated | tensorstore::repeatable_read); KvStore store(mock_driver, "", txn); { auto read_future = kvstore::Read(store, "a"); { auto req = mock_driver->read_requests.pop(); EXPECT_THAT(req.key, "a"); req.promise.SetResult(ReadResult::Value( absl::Cord("value"), TimestampedStorageGeneration(StorageGeneration::FromString("abc"), absl::Now()))); } EXPECT_THAT(read_future.result(), ::testing::Optional(MatchesKvsReadResult(absl::Cord("value")))); } auto future = txn.CommitAsync(); { auto req = mock_driver->read_requests.pop(); EXPECT_THAT(req.key, "a"); EXPECT_THAT(req.options.byte_range, OptionalByteRangeRequest(0, 0)); EXPECT_THAT(req.options.generation_conditions.if_not_equal, StorageGeneration::FromString("abc")); req.promise.SetResult(ReadResult::Missing(TimestampedStorageGeneration( StorageGeneration::FromString("def"), absl::Now()))); } { auto req = mock_driver->read_requests.pop(); EXPECT_THAT(req.key, "a"); req.promise.SetResult(ReadResult::Missing(TimestampedStorageGeneration( StorageGeneration::FromString("def"), absl::Now()))); } EXPECT_THAT(future.result(), MatchesStatus(absl::StatusCode::kAborted, "Error writing \"a\": Generation mismatch")); } TEST(KvStoreTest, ListInvalid) { auto mock_driver = MockKeyValueStore::Make(); Transaction txn(tensorstore::isolated); KvStore store(mock_driver, "", txn); EXPECT_THAT(kvstore::ListFuture(store).result(), MatchesStatus(absl::StatusCode::kUnimplemented)); } }
https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/transaction.cc
https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/transaction_test.cc
4f887a6430414cd6088e1743555015b10f116d50
19a6f11d-9f4f-4aef-8c9a-58497e83fc3c
cpp
tensorflow/tensorflow
constant_value
third_party/xla/xla/service/constant_value.cc
third_party/xla/xla/service/constant_value_test.cc
#include "xla/service/constant_value.h" #include <string> namespace xla { absl::StatusOr<ConstantValue> ConstantValue::FromLiteral( const Literal& literal) { CHECK_EQ(literal.shape().dimensions_size(), 0) << "Expected scalar literal"; return primitive_util::PrimitiveTypeSwitch<absl::StatusOr<ConstantValue>>( [&](auto primitive_type_constant) -> absl::StatusOr<ConstantValue> { if constexpr (primitive_util::IsIntegralType(primitive_type_constant)) { return ConstantValue( static_cast<uint64_t>( literal.GetFirstElement< primitive_util::NativeTypeOf<primitive_type_constant>>()), primitive_util::BitWidth(primitive_type_constant), primitive_util::IsSignedIntegralType(primitive_type_constant)); } return InvalidArgument("Unsupported type"); }, literal.shape().element_type()); } ConstantValue ConstantValue::div(const ConstantValue& other) const { if (!is_signed_) { return ConstantValue(value_ / other.value_, bitwidth_, is_signed_); } return ConstantValue( absl::bit_cast<uint64_t>(absl::bit_cast<int64_t>(value_) / absl::bit_cast<int64_t>(other.value_)), bitwidth_, is_signed_); } ConstantValue ConstantValue::mod(const ConstantValue& other) const { if (!is_signed_) { return ConstantValue(value_ % other.value_, bitwidth_, is_signed_); } return ConstantValue( absl::bit_cast<uint64_t>(absl::bit_cast<int64_t>(value_) % absl::bit_cast<int64_t>(other.value_)), bitwidth_, is_signed_); } ConstantValue ConstantValue::mul(const ConstantValue& other) const { if (!is_signed_) { return ConstantValue(value_ * other.value_, bitwidth_, is_signed_); } return ConstantValue( absl::bit_cast<uint64_t>(absl::bit_cast<int64_t>(value_) * absl::bit_cast<int64_t>(other.value_)), bitwidth_, is_signed_); } bool ConstantValue::lt(const ConstantValue& other) const { if (!is_signed_) { return value_ < other.value_; } return absl::bit_cast<int64_t>(value_) < absl::bit_cast<int64_t>(other.value_); } bool ConstantValue::gt(const ConstantValue& other) const { if (!is_signed_) { return value_ > other.value_; } return absl::bit_cast<int64_t>(value_) > absl::bit_cast<int64_t>(other.value_); } std::string ConstantValue::ToString() const { return is_signed_ ? absl::StrCat(GetSignedValue()) : absl::StrCat(GetUnsignedValue()); } }
#include "xla/service/constant_value.h" #include <gtest/gtest.h> #include "xla/literal_util.h" namespace xla { namespace { class ConstantValueTest : public ::testing::Test {}; TEST_F(ConstantValueTest, ZeroTest32) { ConstantValue zero = ConstantValue::GetZero(32, false); EXPECT_EQ(zero.GetSignedValue(), 0); EXPECT_EQ(zero.GetUnsignedValue(), 0); EXPECT_EQ(zero.GetBitwidth(), 32); EXPECT_FALSE(zero.IsSigned()); ConstantValue zero_s = ConstantValue::GetZero(32, true); EXPECT_EQ(zero_s.GetSignedValue(), 0); EXPECT_EQ(zero_s.GetUnsignedValue(), 0); EXPECT_EQ(zero_s.GetBitwidth(), 32); EXPECT_TRUE(zero_s.IsSigned()); } TEST_F(ConstantValueTest, OneTest32) { ConstantValue one = ConstantValue::GetOne(32, false); EXPECT_EQ(one.GetSignedValue(), 1); EXPECT_EQ(one.GetUnsignedValue(), 1); EXPECT_EQ(one.GetBitwidth(), 32); EXPECT_FALSE(one.IsSigned()); ConstantValue one_s = ConstantValue::GetOne(32, true); EXPECT_EQ(one_s.GetSignedValue(), 1); EXPECT_EQ(one_s.GetUnsignedValue(), 1); EXPECT_EQ(one_s.GetBitwidth(), 32); EXPECT_TRUE(one_s.IsSigned()); } TEST_F(ConstantValueTest, Signed23) { ConstantValue signed_number = ConstantValue::GetSigned(4194303, 23); EXPECT_EQ(signed_number.GetSignedValue(), 4194303); EXPECT_EQ(signed_number.GetBitwidth(), 23); EXPECT_TRUE(signed_number.IsSigned()); ConstantValue signed_number_of = ConstantValue::GetSigned(4194304, 23); EXPECT_EQ(signed_number_of.GetSignedValue(), -4194304); EXPECT_EQ(signed_number_of.GetBitwidth(), 23); EXPECT_TRUE(signed_number_of.IsSigned()); } TEST_F(ConstantValueTest, Unsigned23) { ConstantValue unsigned_number = ConstantValue::GetUnsigned(8388607, 23); EXPECT_EQ(unsigned_number.GetUnsignedValue(), 8388607); EXPECT_EQ(unsigned_number.GetBitwidth(), 23); EXPECT_FALSE(unsigned_number.IsSigned()); ConstantValue unsigned_number_of = ConstantValue::GetUnsigned(8388608, 23); EXPECT_EQ(unsigned_number_of.GetUnsignedValue(), 0); EXPECT_EQ(unsigned_number_of.GetBitwidth(), 23); EXPECT_FALSE(unsigned_number_of.IsSigned()); } TEST_F(ConstantValueTest, FromLiteral) { auto cv_8 = ConstantValue::FromLiteral( LiteralUtil::CreateR0(static_cast<int8_t>(-32))); EXPECT_TRUE(cv_8.ok()); EXPECT_TRUE(cv_8->IsSigned()); EXPECT_EQ(cv_8->GetBitwidth(), 8); EXPECT_EQ(cv_8->GetSignedValue(), -32); auto cv_u8 = ConstantValue::FromLiteral( LiteralUtil::CreateR0(static_cast<int8_t>(32))); EXPECT_TRUE(cv_u8.ok()); EXPECT_TRUE(cv_u8->IsSigned()); EXPECT_EQ(cv_u8->GetBitwidth(), 8); EXPECT_EQ(cv_u8->GetUnsignedValue(), 32); auto cv_16 = ConstantValue::FromLiteral( LiteralUtil::CreateR0(static_cast<int16_t>(32000))); EXPECT_TRUE(cv_16.ok()); EXPECT_TRUE(cv_16->IsSigned()); EXPECT_EQ(cv_16->GetBitwidth(), 16); EXPECT_EQ(cv_16->GetSignedValue(), 32000); auto cv_u16 = ConstantValue::FromLiteral( LiteralUtil::CreateR0(static_cast<uint16_t>(33000))); EXPECT_TRUE(cv_u16.ok()); EXPECT_FALSE(cv_u16->IsSigned()); EXPECT_EQ(cv_u16->GetBitwidth(), 16); EXPECT_EQ(cv_u16->GetUnsignedValue(), 33000); auto cv_32 = ConstantValue::FromLiteral( LiteralUtil::CreateR0(static_cast<int32_t>(-2000000000))); EXPECT_TRUE(cv_32.ok()); EXPECT_TRUE(cv_32->IsSigned()); EXPECT_EQ(cv_32->GetBitwidth(), 32); EXPECT_EQ(cv_32->GetSignedValue(), -2000000000); auto cv_u32 = ConstantValue::FromLiteral( LiteralUtil::CreateR0(static_cast<uint32_t>(3000000000))); EXPECT_TRUE(cv_u32.ok()); EXPECT_FALSE(cv_u32->IsSigned()); EXPECT_EQ(cv_u32->GetBitwidth(), 32); EXPECT_EQ(cv_u32->GetUnsignedValue(), 3000000000); auto cv_64 = ConstantValue::FromLiteral( LiteralUtil::CreateR0(static_cast<int64_t>(3000000000))); EXPECT_TRUE(cv_64.ok()); EXPECT_TRUE(cv_64->IsSigned()); EXPECT_EQ(cv_64->GetBitwidth(), 64); EXPECT_EQ(cv_64->GetSignedValue(), 3000000000); auto cv_u64 = ConstantValue::FromLiteral( LiteralUtil::CreateR0(static_cast<uint64_t>(6000000000))); EXPECT_TRUE(cv_u64.ok()); EXPECT_FALSE(cv_u64->IsSigned()); EXPECT_EQ(cv_u64->GetBitwidth(), 64); EXPECT_EQ(cv_u64->GetUnsignedValue(), 6000000000); } TEST_F(ConstantValueTest, Add) { ConstantValue lhs = ConstantValue::GetUnsigned(8388607, 23); ConstantValue rhs = ConstantValue::GetUnsigned(1, 23); ConstantValue result = lhs.add(rhs); EXPECT_EQ(result.GetUnsignedValue(), 0); EXPECT_EQ(result.GetBitwidth(), 23); EXPECT_FALSE(result.IsSigned()); lhs = ConstantValue::GetUnsigned(8388600, 23); rhs = ConstantValue::GetUnsigned(7, 23); result = lhs.add(rhs); EXPECT_EQ(result.GetUnsignedValue(), 8388607); EXPECT_EQ(result.GetBitwidth(), 23); EXPECT_FALSE(result.IsSigned()); lhs = ConstantValue::GetSigned(-10, 23); rhs = ConstantValue::GetSigned(4, 23); result = lhs.add(rhs); EXPECT_EQ(result.GetSignedValue(), -6); EXPECT_EQ(result.GetBitwidth(), 23); EXPECT_TRUE(result.IsSigned()); lhs = ConstantValue::GetSigned(-4194304, 23); rhs = ConstantValue::GetSigned(-1, 23); result = lhs.add(rhs); EXPECT_EQ(result.GetSignedValue(), 4194303); EXPECT_EQ(result.GetBitwidth(), 23); EXPECT_TRUE(result.IsSigned()); } TEST_F(ConstantValueTest, Sub) { ConstantValue lhs = ConstantValue::GetUnsigned(8388607, 23); ConstantValue rhs = ConstantValue::GetUnsigned(1, 23); ConstantValue result = lhs.sub(rhs); EXPECT_EQ(result.GetUnsignedValue(), 8388606); EXPECT_EQ(result.GetBitwidth(), 23); EXPECT_FALSE(result.IsSigned()); lhs = ConstantValue::GetUnsigned(6, 23); rhs = ConstantValue::GetUnsigned(7, 23); result = lhs.sub(rhs); EXPECT_EQ(result.GetUnsignedValue(), 8388607); EXPECT_EQ(result.GetBitwidth(), 23); EXPECT_FALSE(result.IsSigned()); lhs = ConstantValue::GetSigned(-10, 23); rhs = ConstantValue::GetSigned(4, 23); result = lhs.sub(rhs); EXPECT_EQ(result.GetSignedValue(), -14); EXPECT_EQ(result.GetBitwidth(), 23); EXPECT_TRUE(result.IsSigned()); lhs = ConstantValue::GetSigned(-4194304, 23); rhs = ConstantValue::GetSigned(1, 23); result = lhs.sub(rhs); EXPECT_EQ(result.GetSignedValue(), 4194303); EXPECT_EQ(result.GetBitwidth(), 23); EXPECT_TRUE(result.IsSigned()); } TEST_F(ConstantValueTest, Div) { ConstantValue lhs = ConstantValue::GetUnsigned(94, 23); ConstantValue rhs = ConstantValue::GetUnsigned(47, 23); ConstantValue result = lhs.div(rhs); EXPECT_EQ(result.GetUnsignedValue(), 2); EXPECT_EQ(result.GetBitwidth(), 23); EXPECT_FALSE(result.IsSigned()); lhs = ConstantValue::GetUnsigned(6, 23); rhs = ConstantValue::GetUnsigned(7, 23); result = lhs.div(rhs); EXPECT_EQ(result.GetUnsignedValue(), 0); EXPECT_EQ(result.GetBitwidth(), 23); EXPECT_FALSE(result.IsSigned()); lhs = ConstantValue::GetSigned(-10, 23); rhs = ConstantValue::GetSigned(4, 23); result = lhs.div(rhs); EXPECT_EQ(result.GetSignedValue(), -2); EXPECT_EQ(result.GetBitwidth(), 23); EXPECT_TRUE(result.IsSigned()); lhs = ConstantValue::GetSigned(-4194304, 23); rhs = ConstantValue::GetSigned(2, 23); result = lhs.div(rhs); EXPECT_EQ(result.GetSignedValue(), -2097152); EXPECT_EQ(result.GetBitwidth(), 23); EXPECT_TRUE(result.IsSigned()); } TEST_F(ConstantValueTest, Mod) { ConstantValue lhs = ConstantValue::GetUnsigned(94, 23); ConstantValue rhs = ConstantValue::GetUnsigned(47, 23); ConstantValue result = lhs.mod(rhs); EXPECT_EQ(result.GetUnsignedValue(), 0); EXPECT_EQ(result.GetBitwidth(), 23); EXPECT_FALSE(result.IsSigned()); lhs = ConstantValue::GetUnsigned(6, 23); rhs = ConstantValue::GetUnsigned(7, 23); result = lhs.mod(rhs); EXPECT_EQ(result.GetUnsignedValue(), 6); EXPECT_EQ(result.GetBitwidth(), 23); EXPECT_FALSE(result.IsSigned()); lhs = ConstantValue::GetSigned(-10, 23); rhs = ConstantValue::GetSigned(3, 23); result = lhs.mod(rhs); EXPECT_EQ(result.GetSignedValue(), -1); EXPECT_EQ(result.GetBitwidth(), 23); EXPECT_TRUE(result.IsSigned()); lhs = ConstantValue::GetSigned(-4194304, 23); rhs = ConstantValue::GetSigned(1, 23); result = lhs.mod(rhs); EXPECT_EQ(result.GetSignedValue(), 0); EXPECT_EQ(result.GetBitwidth(), 23); EXPECT_TRUE(result.IsSigned()); } TEST_F(ConstantValueTest, Mul) { ConstantValue lhs = ConstantValue::GetUnsigned(94, 23); ConstantValue rhs = ConstantValue::GetUnsigned(47, 23); ConstantValue result = lhs.mul(rhs); EXPECT_EQ(result.GetUnsignedValue(), 4418); EXPECT_EQ(result.GetBitwidth(), 23); EXPECT_FALSE(result.IsSigned()); lhs = ConstantValue::GetUnsigned(8388607, 23); rhs = ConstantValue::GetUnsigned(2, 23); result = lhs.mul(rhs); EXPECT_EQ(result.GetUnsignedValue(), 8388606); EXPECT_EQ(result.GetBitwidth(), 23); EXPECT_FALSE(result.IsSigned()); lhs = ConstantValue::GetSigned(-10, 23); rhs = ConstantValue::GetSigned(3, 23); result = lhs.mul(rhs); EXPECT_EQ(result.GetSignedValue(), -30); EXPECT_EQ(result.GetBitwidth(), 23); EXPECT_TRUE(result.IsSigned()); lhs = ConstantValue::GetSigned(-4194304, 23); rhs = ConstantValue::GetSigned(2, 23); result = lhs.mod(rhs); EXPECT_EQ(result.GetSignedValue(), 0); EXPECT_EQ(result.GetBitwidth(), 23); EXPECT_TRUE(result.IsSigned()); } TEST_F(ConstantValueTest, LtGtEq) { ConstantValue lhs = ConstantValue::GetUnsigned(94, 23); ConstantValue rhs = ConstantValue::GetUnsigned(47, 23); EXPECT_FALSE(lhs.lt(rhs)); EXPECT_TRUE(lhs.gt(rhs)); lhs = ConstantValue::GetUnsigned(8388607, 23); rhs = ConstantValue::GetUnsigned(2, 23); EXPECT_FALSE(lhs.lt(rhs)); EXPECT_TRUE(lhs.gt(rhs)); lhs = ConstantValue::GetSigned(-10, 23); rhs = ConstantValue::GetSigned(3, 23); lhs = ConstantValue::GetSigned(-4194304, 23); rhs = ConstantValue::GetSigned(2, 23); EXPECT_TRUE(lhs.lt(rhs)); EXPECT_FALSE(lhs.gt(rhs)); lhs = ConstantValue::GetUnsigned(43, 23); rhs = ConstantValue::GetUnsigned(43, 23); EXPECT_TRUE(lhs.eq(rhs)); EXPECT_TRUE(rhs.eq(lhs)); lhs = ConstantValue::GetSigned(-10, 23); rhs = ConstantValue::GetSigned(-10, 23); EXPECT_TRUE(lhs.eq(rhs)); EXPECT_TRUE(rhs.eq(lhs)); lhs = ConstantValue::GetUnsigned(4194304, 23); rhs = ConstantValue::GetUnsigned(2, 23); EXPECT_FALSE(lhs.eq(rhs)); EXPECT_FALSE(rhs.eq(lhs)); lhs = ConstantValue::GetSigned(-4194304, 23); rhs = ConstantValue::GetSigned(2, 23); EXPECT_FALSE(lhs.eq(rhs)); EXPECT_FALSE(rhs.eq(lhs)); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/constant_value.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/constant_value_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
a182fb05-afd0-428d-a2cb-989b5f45682f
cpp
google/tensorstore
generation
tensorstore/kvstore/generation.cc
tensorstore/kvstore/generation_test.cc
#include "tensorstore/kvstore/generation.h" #include <stddef.h> #include <stdint.h> #include <cstring> #include <ostream> #include <string_view> #include <utility> #include "absl/time/time.h" #include "tensorstore/serialization/absl_time.h" #include "tensorstore/serialization/serialization.h" #include "tensorstore/util/quote_string.h" namespace tensorstore { namespace { std::string_view CanonicalGeneration(std::string_view generation) { size_t new_size = generation.size(); while (new_size && generation[new_size - 1] == 0) { --new_size; } return generation.substr(0, new_size); } } std::ostream& operator<<(std::ostream& os, const StorageGeneration& g) { return os << QuoteString(g.value); } std::ostream& operator<<(std::ostream& os, const TimestampedStorageGeneration& x) { return os << "{generation=" << x.generation << ", time=" << x.time << "}"; } bool StorageGeneration::Equivalent(std::string_view a, std::string_view b) { return CanonicalGeneration(a) == CanonicalGeneration(b); } StorageGeneration StorageGeneration::Clean(StorageGeneration generation) { size_t new_size = generation.value.size(); while (new_size) { if (generation.value[new_size - 1] & kBaseGeneration) { generation.value[new_size - 1] &= ~(kDirty | kNewlyDirty); break; } --new_size; } generation.value.resize(new_size); return generation; } void StorageGeneration::MarkDirty() { if (value.empty()) { value = (kDirty | kNewlyDirty); } else { value.back() |= (kDirty | kNewlyDirty); } } StorageGeneration StorageGeneration::Dirty(StorageGeneration generation) { if (generation.value.empty()) { return StorageGeneration{std::string(1, kDirty)}; } generation.value.back() |= kDirty; return generation; } StorageGeneration StorageGeneration::FromUint64(uint64_t n) { StorageGeneration generation; generation.value.resize(9); std::memcpy(generation.value.data(), &n, 8); generation.value[8] = kBaseGeneration; return generation; } StorageGeneration StorageGeneration::FromString(std::string_view s) { StorageGeneration generation; generation.value.reserve(s.size() + 1); generation.value += s; generation.value += kBaseGeneration; return generation; } StorageGeneration StorageGeneration::Condition( const StorageGeneration& generation, StorageGeneration condition) { if (IsDirty(generation)) { return Dirty(Clean(std::move(condition))); } return Clean(std::move(condition)); } bool StorageGeneration::IsDirty(const StorageGeneration& generation) { auto canonical = CanonicalGeneration(generation.value); return !canonical.empty() && (canonical.back() & kDirty); } bool StorageGeneration::IsInnerLayerDirty(const StorageGeneration& generation) { return !generation.value.empty() && (generation.value.back() & kDirty); } StorageGeneration StorageGeneration::AddLayer(StorageGeneration generation) { generation.value.resize(generation.value.size() + 1); return generation; } bool StorageGeneration::IsConditional(const StorageGeneration& generation) { size_t new_size = generation.value.size(); while (new_size && !(generation.value[new_size - 1] & kBaseGeneration)) { --new_size; } return (new_size != 0); } bool StorageGeneration::IsConditionalOn(const StorageGeneration& generation, const StorageGeneration& condition) { size_t size = generation.value.size(); return size != 0 && condition.value.size() == size && std::memcmp(generation.value.data(), condition.value.data(), size - 1) == 0 && (generation.value[size] | kDirty | kNewlyDirty) == (condition.value[size] | kDirty | kNewlyDirty); } std::string_view StorageGeneration::DecodeString( const StorageGeneration& generation) { std::string_view s = generation.value; if (s.empty()) return {}; while (true) { bool start_of_tags = static_cast<bool>(s.back() & kBaseGeneration); s.remove_suffix(1); if (start_of_tags || s.empty()) break; } return s; } } TENSORSTORE_DEFINE_SERIALIZER_SPECIALIZATION( tensorstore::StorageGeneration, tensorstore::serialization::ApplyMembersSerializer< tensorstore::StorageGeneration>()) TENSORSTORE_DEFINE_SERIALIZER_SPECIALIZATION( tensorstore::TimestampedStorageGeneration, tensorstore::serialization::ApplyMembersSerializer< tensorstore::TimestampedStorageGeneration>())
#include "tensorstore/kvstore/generation.h" #include <gtest/gtest.h> #include "tensorstore/serialization/serialization.h" #include "tensorstore/serialization/test_util.h" namespace { using ::tensorstore::StorageGeneration; using ::tensorstore::TimestampedStorageGeneration; using ::tensorstore::serialization::TestSerializationRoundTrip; TEST(StorageGenerationTest, Basic) { EXPECT_TRUE(StorageGeneration::IsUnknown(StorageGeneration::Unknown())); EXPECT_FALSE(StorageGeneration::IsUnknown(StorageGeneration::NoValue())); EXPECT_FALSE(StorageGeneration::IsNoValue(StorageGeneration::Unknown())); EXPECT_TRUE(StorageGeneration::IsNoValue(StorageGeneration::NoValue())); EXPECT_EQ(StorageGeneration{std::string{StorageGeneration::kDirty}}, StorageGeneration::Dirty(StorageGeneration::Unknown())); StorageGeneration gen{ std::string{1, 2, 3, 4, 5, StorageGeneration::kBaseGeneration}}; StorageGeneration local_gen{std::string{ 1, 2, 3, 4, 5, StorageGeneration::kBaseGeneration | StorageGeneration::kDirty}}; EXPECT_FALSE(StorageGeneration::IsUnknown(gen)); EXPECT_FALSE(StorageGeneration::IsUnknown(local_gen)); EXPECT_TRUE(StorageGeneration::IsClean(gen)); EXPECT_FALSE(StorageGeneration::IsClean(local_gen)); EXPECT_FALSE(StorageGeneration::IsDirty(gen)); EXPECT_TRUE(StorageGeneration::IsDirty(local_gen)); EXPECT_EQ(local_gen, StorageGeneration::Dirty(gen)); EXPECT_EQ(gen, StorageGeneration::Clean(local_gen)); EXPECT_TRUE(StorageGeneration::IsClean(StorageGeneration::NoValue())); EXPECT_FALSE(StorageGeneration::IsClean(StorageGeneration::Unknown())); EXPECT_EQ(StorageGeneration::NoValue(), StorageGeneration::Clean(StorageGeneration::NoValue())); } TEST(StorageGenerationTest, Uint64) { auto g = StorageGeneration::FromUint64(12345); EXPECT_TRUE(StorageGeneration::IsUint64(g)); EXPECT_EQ(12345, StorageGeneration::ToUint64(g)); EXPECT_FALSE(StorageGeneration::IsUint64(StorageGeneration::Unknown())); EXPECT_FALSE(StorageGeneration::IsUint64(StorageGeneration::NoValue())); EXPECT_FALSE(StorageGeneration::IsUint64(StorageGeneration::Invalid())); } TEST(StorageGenerationSerializationTest, Basic) { TestSerializationRoundTrip(StorageGeneration::Unknown()); TestSerializationRoundTrip(StorageGeneration::FromUint64(12345)); } TEST(TimestampedStorageGenerationSerializationTest, Basic) { TestSerializationRoundTrip(TimestampedStorageGeneration( StorageGeneration::FromUint64(12345), absl::InfinitePast())); TestSerializationRoundTrip(TimestampedStorageGeneration( StorageGeneration::FromUint64(12345), absl::InfiniteFuture())); } TEST(StorageGenerationTest, IsCleanValidValue) { EXPECT_FALSE( StorageGeneration::IsCleanValidValue(StorageGeneration::Unknown())); EXPECT_FALSE( StorageGeneration::IsCleanValidValue(StorageGeneration::NoValue())); EXPECT_FALSE( StorageGeneration::IsCleanValidValue(StorageGeneration::Invalid())); EXPECT_TRUE(StorageGeneration::IsCleanValidValue( StorageGeneration::FromString("abc"))); EXPECT_TRUE( StorageGeneration::IsCleanValidValue(StorageGeneration::FromUint64(42))); } TEST(StorageGenerationTest, DecodeString) { EXPECT_EQ("abc", StorageGeneration::DecodeString( StorageGeneration::FromString("abc"))); } }
https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/generation.cc
https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/generation_test.cc
4f887a6430414cd6088e1743555015b10f116d50
708fd6f0-6f63-4b65-9f78-85af47149678
cpp
tensorflow/tensorflow
buffered_file
third_party/xla/xla/tsl/lib/io/buffered_file.h
third_party/xla/xla/tsl/lib/io/buffered_file_test.cc
#ifndef XLA_TSL_LIB_IO_BUFFERED_FILE_H_ #define XLA_TSL_LIB_IO_BUFFERED_FILE_H_ #include <algorithm> #include <memory> #include <string> #include <utility> #include "xla/tsl/lib/hash/crc32c.h" #include "tsl/platform/cord.h" #include "tsl/platform/file_system.h" #include "tsl/platform/status.h" namespace tsl { class BufferedWritableFile : public WritableFile { public: explicit BufferedWritableFile(std::unique_ptr<WritableFile> file, int64_t buffer_size = kDefaultBufferSize) : file_(std::move(file)) { buffer_.resize(buffer_size); } ~BufferedWritableFile() override { Close().IgnoreError(); } absl::Status Append(absl::string_view str_data) override { int64_t bytes_left = str_data.size(); const char* data = str_data.data(); while (bytes_left > 0) { int64_t append_bytes = std::min( static_cast<int64_t>(buffer_.size() - buffer_pos_), bytes_left); std::copy_n(data, append_bytes, buffer_.begin() + buffer_pos_); crc32_ = crc32c::Extend(crc32_, &buffer_[buffer_pos_], append_bytes); buffer_pos_ += append_bytes; if (buffer_pos_ == buffer_.size()) { TF_RETURN_IF_ERROR(file_->Append(buffer_)); buffer_pos_ = 0; } data = data + append_bytes; bytes_left -= append_bytes; } return absl::OkStatus(); } absl::Status Append(const absl::Cord& data) override { for (absl::string_view fragment : data.Chunks()) { TF_RETURN_IF_ERROR(Append(fragment)); } return absl::OkStatus(); } absl::Status Close() override { TF_RETURN_IF_ERROR(Flush()); return file_->Close(); } absl::Status Flush() override { if (buffer_pos_ > 0) { TF_RETURN_IF_ERROR( file_->Append(absl::string_view(&buffer_[0], buffer_pos_))); buffer_pos_ = 0; } return file_->Flush(); } absl::Status Tell(int64_t* position) override { int64_t bytes_written; absl::Status status = file_->Tell(&bytes_written); if (status.ok()) { *position = bytes_written + buffer_pos_; return absl::OkStatus(); } else { return status; } } absl::Status Sync() override { return file_->Sync(); } uint32_t crc32() const { return crc32_; } void reset_crc32() { crc32_ = 0; } private: static constexpr int64_t kDefaultBufferSize = 1048576; std::string buffer_; int64_t buffer_pos_ = 0; std::unique_ptr<WritableFile> file_; uint32_t crc32_ = 0; BufferedWritableFile(const BufferedWritableFile&) = delete; void operator=(const BufferedWritableFile&) = delete; }; } #endif
#include "xla/tsl/lib/io/buffered_file.h" #include <memory> #include <utility> #include "xla/tsl/lib/core/status_test_util.h" #include "tsl/platform/env.h" #include "tsl/platform/test.h" #include "tsl/platform/test_benchmark.h" namespace tsl { namespace io { namespace { TEST(BufferedInputStream, Tell) { Env* env = Env::Default(); string fname; ASSERT_TRUE(env->LocalTempFilename(&fname)); std::unique_ptr<WritableFile> write_file; TF_ASSERT_OK(env->NewWritableFile(fname, &write_file)); BufferedWritableFile file(std::move(write_file), 8); int64_t position; TF_ASSERT_OK(file.Append("foo")); TF_ASSERT_OK(file.Tell(&position)); EXPECT_EQ(position, 3); TF_ASSERT_OK(file.Append("bar")); TF_ASSERT_OK(file.Tell(&position)); EXPECT_EQ(position, 6); TF_ASSERT_OK(file.Append("baz")); TF_ASSERT_OK(file.Tell(&position)); EXPECT_EQ(position, 9); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/lib/io/buffered_file.h
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/lib/io/buffered_file_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
6e35bbc7-1b0a-42d5-8c5e-f0868b68394d
cpp
google/cel-cpp
ast
extensions/protobuf/internal/ast.cc
extensions/protobuf/internal/ast_test.cc
#include "extensions/protobuf/internal/ast.h" #include <algorithm> #include <cstddef> #include <cstdint> #include <stack> #include <vector> #include "google/api/expr/v1alpha1/syntax.pb.h" #include "google/protobuf/struct.pb.h" #include "absl/base/attributes.h" #include "absl/base/nullability.h" #include "absl/functional/overload.h" #include "absl/status/status.h" #include "absl/strings/str_cat.h" #include "absl/types/variant.h" #include "common/ast.h" #include "common/constant.h" #include "extensions/protobuf/internal/constant.h" #include "internal/status_macros.h" namespace cel::extensions::protobuf_internal { namespace { using ExprProto = google::api::expr::v1alpha1::Expr; using ConstantProto = google::api::expr::v1alpha1::Constant; using StructExprProto = google::api::expr::v1alpha1::Expr::CreateStruct; class ExprToProtoState final { private: struct Frame final { absl::Nonnull<const Expr*> expr; absl::Nonnull<google::api::expr::v1alpha1::Expr*> proto; }; public: absl::Status ExprToProto(const Expr& expr, absl::Nonnull<google::api::expr::v1alpha1::Expr*> proto) { Push(expr, proto); Frame frame; while (Pop(frame)) { CEL_RETURN_IF_ERROR(ExprToProtoImpl(*frame.expr, frame.proto)); } return absl::OkStatus(); } private: absl::Status ExprToProtoImpl(const Expr& expr, absl::Nonnull<google::api::expr::v1alpha1::Expr*> proto) { return absl::visit( absl::Overload( [&expr, proto](const UnspecifiedExpr&) -> absl::Status { proto->Clear(); proto->set_id(expr.id()); return absl::OkStatus(); }, [this, &expr, proto](const Constant& const_expr) -> absl::Status { return ConstExprToProto(expr, const_expr, proto); }, [this, &expr, proto](const IdentExpr& ident_expr) -> absl::Status { return IdentExprToProto(expr, ident_expr, proto); }, [this, &expr, proto](const SelectExpr& select_expr) -> absl::Status { return SelectExprToProto(expr, select_expr, proto); }, [this, &expr, proto](const CallExpr& call_expr) -> absl::Status { return CallExprToProto(expr, call_expr, proto); }, [this, &expr, proto](const ListExpr& list_expr) -> absl::Status { return ListExprToProto(expr, list_expr, proto); }, [this, &expr, proto](const StructExpr& struct_expr) -> absl::Status { return StructExprToProto(expr, struct_expr, proto); }, [this, &expr, proto](const MapExpr& map_expr) -> absl::Status { return MapExprToProto(expr, map_expr, proto); }, [this, &expr, proto]( const ComprehensionExpr& comprehension_expr) -> absl::Status { return ComprehensionExprToProto(expr, comprehension_expr, proto); }), expr.kind()); } absl::Status ConstExprToProto(const Expr& expr, const Constant& const_expr, absl::Nonnull<ExprProto*> proto) { proto->Clear(); proto->set_id(expr.id()); return ConstantToProto(const_expr, proto->mutable_const_expr()); } absl::Status IdentExprToProto(const Expr& expr, const IdentExpr& ident_expr, absl::Nonnull<ExprProto*> proto) { proto->Clear(); auto* ident_proto = proto->mutable_ident_expr(); proto->set_id(expr.id()); ident_proto->set_name(ident_expr.name()); return absl::OkStatus(); } absl::Status SelectExprToProto(const Expr& expr, const SelectExpr& select_expr, absl::Nonnull<ExprProto*> proto) { proto->Clear(); auto* select_proto = proto->mutable_select_expr(); proto->set_id(expr.id()); if (select_expr.has_operand()) { Push(select_expr.operand(), select_proto->mutable_operand()); } select_proto->set_field(select_expr.field()); select_proto->set_test_only(select_expr.test_only()); return absl::OkStatus(); } absl::Status CallExprToProto(const Expr& expr, const CallExpr& call_expr, absl::Nonnull<ExprProto*> proto) { proto->Clear(); auto* call_proto = proto->mutable_call_expr(); proto->set_id(expr.id()); if (call_expr.has_target()) { Push(call_expr.target(), call_proto->mutable_target()); } call_proto->set_function(call_expr.function()); if (!call_expr.args().empty()) { call_proto->mutable_args()->Reserve( static_cast<int>(call_expr.args().size())); for (const auto& argument : call_expr.args()) { Push(argument, call_proto->add_args()); } } return absl::OkStatus(); } absl::Status ListExprToProto(const Expr& expr, const ListExpr& list_expr, absl::Nonnull<ExprProto*> proto) { proto->Clear(); auto* list_proto = proto->mutable_list_expr(); proto->set_id(expr.id()); if (!list_expr.elements().empty()) { list_proto->mutable_elements()->Reserve( static_cast<int>(list_expr.elements().size())); for (size_t i = 0; i < list_expr.elements().size(); ++i) { const auto& element_expr = list_expr.elements()[i]; auto* element_proto = list_proto->add_elements(); if (element_expr.has_expr()) { Push(element_expr.expr(), element_proto); } if (element_expr.optional()) { list_proto->add_optional_indices(static_cast<int32_t>(i)); } } } return absl::OkStatus(); } absl::Status StructExprToProto(const Expr& expr, const StructExpr& struct_expr, absl::Nonnull<ExprProto*> proto) { proto->Clear(); auto* struct_proto = proto->mutable_struct_expr(); proto->set_id(expr.id()); struct_proto->set_message_name(struct_expr.name()); if (!struct_expr.fields().empty()) { struct_proto->mutable_entries()->Reserve( static_cast<int>(struct_expr.fields().size())); for (const auto& field_expr : struct_expr.fields()) { auto* field_proto = struct_proto->add_entries(); field_proto->set_id(field_expr.id()); field_proto->set_field_key(field_expr.name()); if (field_expr.has_value()) { Push(field_expr.value(), field_proto->mutable_value()); } if (field_expr.optional()) { field_proto->set_optional_entry(true); } } } return absl::OkStatus(); } absl::Status MapExprToProto(const Expr& expr, const MapExpr& map_expr, absl::Nonnull<ExprProto*> proto) { proto->Clear(); auto* map_proto = proto->mutable_struct_expr(); proto->set_id(expr.id()); if (!map_expr.entries().empty()) { map_proto->mutable_entries()->Reserve( static_cast<int>(map_expr.entries().size())); for (const auto& entry_expr : map_expr.entries()) { auto* entry_proto = map_proto->add_entries(); entry_proto->set_id(entry_expr.id()); if (entry_expr.has_key()) { Push(entry_expr.key(), entry_proto->mutable_map_key()); } if (entry_expr.has_value()) { Push(entry_expr.value(), entry_proto->mutable_value()); } if (entry_expr.optional()) { entry_proto->set_optional_entry(true); } } } return absl::OkStatus(); } absl::Status ComprehensionExprToProto( const Expr& expr, const ComprehensionExpr& comprehension_expr, absl::Nonnull<ExprProto*> proto) { proto->Clear(); auto* comprehension_proto = proto->mutable_comprehension_expr(); proto->set_id(expr.id()); comprehension_proto->set_iter_var(comprehension_expr.iter_var()); if (comprehension_expr.has_iter_range()) { Push(comprehension_expr.iter_range(), comprehension_proto->mutable_iter_range()); } comprehension_proto->set_accu_var(comprehension_expr.accu_var()); if (comprehension_expr.has_accu_init()) { Push(comprehension_expr.accu_init(), comprehension_proto->mutable_accu_init()); } if (comprehension_expr.has_loop_condition()) { Push(comprehension_expr.loop_condition(), comprehension_proto->mutable_loop_condition()); } if (comprehension_expr.has_loop_step()) { Push(comprehension_expr.loop_step(), comprehension_proto->mutable_loop_step()); } if (comprehension_expr.has_result()) { Push(comprehension_expr.result(), comprehension_proto->mutable_result()); } return absl::OkStatus(); } void Push(const Expr& expr, absl::Nonnull<ExprProto*> proto) { frames_.push(Frame{&expr, proto}); } bool Pop(Frame& frame) { if (frames_.empty()) { return false; } frame = frames_.top(); frames_.pop(); return true; } std::stack<Frame, std::vector<Frame>> frames_; }; class ExprFromProtoState final { private: struct Frame final { absl::Nonnull<const ExprProto*> proto; absl::Nonnull<Expr*> expr; }; public: absl::Status ExprFromProto(const ExprProto& proto, Expr& expr) { Push(proto, expr); Frame frame; while (Pop(frame)) { CEL_RETURN_IF_ERROR(ExprFromProtoImpl(*frame.proto, *frame.expr)); } return absl::OkStatus(); } private: absl::Status ExprFromProtoImpl(const ExprProto& proto, Expr& expr) { switch (proto.expr_kind_case()) { case ExprProto::EXPR_KIND_NOT_SET: expr.Clear(); expr.set_id(proto.id()); return absl::OkStatus(); case ExprProto::kConstExpr: return ConstExprFromProto(proto, proto.const_expr(), expr); case ExprProto::kIdentExpr: return IdentExprFromProto(proto, proto.ident_expr(), expr); case ExprProto::kSelectExpr: return SelectExprFromProto(proto, proto.select_expr(), expr); case ExprProto::kCallExpr: return CallExprFromProto(proto, proto.call_expr(), expr); case ExprProto::kListExpr: return ListExprFromProto(proto, proto.list_expr(), expr); case ExprProto::kStructExpr: if (proto.struct_expr().message_name().empty()) { return MapExprFromProto(proto, proto.struct_expr(), expr); } return StructExprFromProto(proto, proto.struct_expr(), expr); case ExprProto::kComprehensionExpr: return ComprehensionExprFromProto(proto, proto.comprehension_expr(), expr); default: return absl::InvalidArgumentError( absl::StrCat("unexpected ExprKindCase: ", static_cast<int>(proto.expr_kind_case()))); } } absl::Status ConstExprFromProto(const ExprProto& proto, const ConstantProto& const_proto, Expr& expr) { expr.Clear(); expr.set_id(proto.id()); return ConstantFromProto(const_proto, expr.mutable_const_expr()); } absl::Status IdentExprFromProto(const ExprProto& proto, const ExprProto::Ident& ident_proto, Expr& expr) { expr.Clear(); expr.set_id(proto.id()); auto& ident_expr = expr.mutable_ident_expr(); ident_expr.set_name(ident_proto.name()); return absl::OkStatus(); } absl::Status SelectExprFromProto(const ExprProto& proto, const ExprProto::Select& select_proto, Expr& expr) { expr.Clear(); expr.set_id(proto.id()); auto& select_expr = expr.mutable_select_expr(); if (select_proto.has_operand()) { Push(select_proto.operand(), select_expr.mutable_operand()); } select_expr.set_field(select_proto.field()); select_expr.set_test_only(select_proto.test_only()); return absl::OkStatus(); } absl::Status CallExprFromProto(const ExprProto& proto, const ExprProto::Call& call_proto, Expr& expr) { expr.Clear(); expr.set_id(proto.id()); auto& call_expr = expr.mutable_call_expr(); call_expr.set_function(call_proto.function()); if (call_proto.has_target()) { Push(call_proto.target(), call_expr.mutable_target()); } call_expr.mutable_args().reserve( static_cast<size_t>(call_proto.args().size())); for (const auto& argument_proto : call_proto.args()) { Push(argument_proto, call_expr.add_args()); } return absl::OkStatus(); } absl::Status ListExprFromProto(const ExprProto& proto, const ExprProto::CreateList& list_proto, Expr& expr) { expr.Clear(); expr.set_id(proto.id()); auto& list_expr = expr.mutable_list_expr(); list_expr.mutable_elements().reserve( static_cast<size_t>(list_proto.elements().size())); for (int i = 0; i < list_proto.elements().size(); ++i) { const auto& element_proto = list_proto.elements()[i]; auto& element_expr = list_expr.add_elements(); Push(element_proto, element_expr.mutable_expr()); const auto& optional_indicies_proto = list_proto.optional_indices(); element_expr.set_optional(std::find(optional_indicies_proto.begin(), optional_indicies_proto.end(), i) != optional_indicies_proto.end()); } return absl::OkStatus(); } absl::Status StructExprFromProto(const ExprProto& proto, const StructExprProto& struct_proto, Expr& expr) { expr.Clear(); expr.set_id(proto.id()); auto& struct_expr = expr.mutable_struct_expr(); struct_expr.set_name(struct_proto.message_name()); struct_expr.mutable_fields().reserve( static_cast<size_t>(struct_proto.entries().size())); for (const auto& field_proto : struct_proto.entries()) { switch (field_proto.key_kind_case()) { case StructExprProto::Entry::KEY_KIND_NOT_SET: ABSL_FALLTHROUGH_INTENDED; case StructExprProto::Entry::kFieldKey: break; case StructExprProto::Entry::kMapKey: return absl::InvalidArgumentError("encountered map entry in struct"); default: return absl::InvalidArgumentError(absl::StrCat( "unexpected struct field kind: ", field_proto.key_kind_case())); } auto& field_expr = struct_expr.add_fields(); field_expr.set_id(field_proto.id()); field_expr.set_name(field_proto.field_key()); if (field_proto.has_value()) { Push(field_proto.value(), field_expr.mutable_value()); } field_expr.set_optional(field_proto.optional_entry()); } return absl::OkStatus(); } absl::Status MapExprFromProto(const ExprProto& proto, const ExprProto::CreateStruct& map_proto, Expr& expr) { expr.Clear(); expr.set_id(proto.id()); auto& map_expr = expr.mutable_map_expr(); map_expr.mutable_entries().reserve( static_cast<size_t>(map_proto.entries().size())); for (const auto& entry_proto : map_proto.entries()) { switch (entry_proto.key_kind_case()) { case StructExprProto::Entry::KEY_KIND_NOT_SET: ABSL_FALLTHROUGH_INTENDED; case StructExprProto::Entry::kMapKey: break; case StructExprProto::Entry::kFieldKey: return absl::InvalidArgumentError("encountered struct field in map"); default: return absl::InvalidArgumentError(absl::StrCat( "unexpected map entry kind: ", entry_proto.key_kind_case())); } auto& entry_expr = map_expr.add_entries(); entry_expr.set_id(entry_proto.id()); if (entry_proto.has_map_key()) { Push(entry_proto.map_key(), entry_expr.mutable_key()); } if (entry_proto.has_value()) { Push(entry_proto.value(), entry_expr.mutable_value()); } entry_expr.set_optional(entry_proto.optional_entry()); } return absl::OkStatus(); } absl::Status ComprehensionExprFromProto( const ExprProto& proto, const ExprProto::Comprehension& comprehension_proto, Expr& expr) { expr.Clear(); expr.set_id(proto.id()); auto& comprehension_expr = expr.mutable_comprehension_expr(); comprehension_expr.set_iter_var(comprehension_proto.iter_var()); comprehension_expr.set_accu_var(comprehension_proto.accu_var()); if (comprehension_proto.has_iter_range()) { Push(comprehension_proto.iter_range(), comprehension_expr.mutable_iter_range()); } if (comprehension_proto.has_accu_init()) { Push(comprehension_proto.accu_init(), comprehension_expr.mutable_accu_init()); } if (comprehension_proto.has_loop_condition()) { Push(comprehension_proto.loop_condition(), comprehension_expr.mutable_loop_condition()); } if (comprehension_proto.has_loop_step()) { Push(comprehension_proto.loop_step(), comprehension_expr.mutable_loop_step()); } if (comprehension_proto.has_result()) { Push(comprehension_proto.result(), comprehension_expr.mutable_result()); } return absl::OkStatus(); } void Push(const ExprProto& proto, Expr& expr) { frames_.push(Frame{&proto, &expr}); } bool Pop(Frame& frame) { if (frames_.empty()) { return false; } frame = frames_.top(); frames_.pop(); return true; } std::stack<Frame, std::vector<Frame>> frames_; }; } absl::Status ExprToProto(const Expr& expr, absl::Nonnull<google::api::expr::v1alpha1::Expr*> proto) { ExprToProtoState state; return state.ExprToProto(expr, proto); } absl::Status ExprFromProto(const google::api::expr::v1alpha1::Expr& proto, Expr& expr) { ExprFromProtoState state; return state.ExprFromProto(proto, expr); } }
#include "extensions/protobuf/internal/ast.h" #include <string> #include "google/api/expr/v1alpha1/syntax.pb.h" #include "absl/status/status.h" #include "common/ast.h" #include "internal/proto_matchers.h" #include "internal/testing.h" #include "google/protobuf/text_format.h" namespace cel::extensions::protobuf_internal { namespace { using ::absl_testing::IsOk; using ::absl_testing::StatusIs; using ::cel::internal::test::EqualsProto; using ExprProto = google::api::expr::v1alpha1::Expr; struct ExprRoundtripTestCase { std::string input; }; using ExprRoundTripTest = ::testing::TestWithParam<ExprRoundtripTestCase>; TEST_P(ExprRoundTripTest, RoundTrip) { const auto& test_case = GetParam(); ExprProto original_proto; ASSERT_TRUE( google::protobuf::TextFormat::ParseFromString(test_case.input, &original_proto)); Expr expr; ASSERT_THAT(ExprFromProto(original_proto, expr), IsOk()); ExprProto proto; ASSERT_THAT(ExprToProto(expr, &proto), IsOk()); EXPECT_THAT(proto, EqualsProto(original_proto)); } INSTANTIATE_TEST_SUITE_P( ExprRoundTripTest, ExprRoundTripTest, ::testing::ValuesIn<ExprRoundtripTestCase>({ {R"pb( )pb"}, {R"pb( id: 1 )pb"}, {R"pb( id: 1 const_expr {} )pb"}, {R"pb( id: 1 const_expr { null_value: NULL_VALUE } )pb"}, {R"pb( id: 1 const_expr { bool_value: true } )pb"}, {R"pb( id: 1 const_expr { int64_value: 1 } )pb"}, {R"pb( id: 1 const_expr { uint64_value: 1 } )pb"}, {R"pb( id: 1 const_expr { double_value: 1 } )pb"}, {R"pb( id: 1 const_expr { string_value: "foo" } )pb"}, {R"pb( id: 1 const_expr { bytes_value: "foo" } )pb"}, {R"pb( id: 1 const_expr { duration_value { seconds: 1 nanos: 1 } } )pb"}, {R"pb( id: 1 const_expr { timestamp_value { seconds: 1 nanos: 1 } } )pb"}, {R"pb( id: 1 ident_expr { name: "foo" } )pb"}, {R"pb( id: 1 select_expr { operand { id: 2 ident_expr { name: "bar" } } field: "foo" test_only: true } )pb"}, {R"pb( id: 1 call_expr { target { id: 2 ident_expr { name: "bar" } } function: "foo" args { id: 3 ident_expr { name: "baz" } } } )pb"}, {R"pb( id: 1 list_expr { elements { id: 2 ident_expr { name: "bar" } } elements { id: 3 ident_expr { name: "baz" } } optional_indices: 0 } )pb"}, {R"pb( id: 1 struct_expr { message_name: "google.type.Expr" entries { id: 2 field_key: "description" value { id: 3 const_expr { string_value: "foo" } } optional_entry: true } entries { id: 4 field_key: "expr" value { id: 5 const_expr { string_value: "bar" } } } } )pb"}, {R"pb( id: 1 struct_expr { entries { id: 2 map_key { id: 3 const_expr { string_value: "description" } } value { id: 4 const_expr { string_value: "foo" } } optional_entry: true } entries { id: 5 map_key { id: 6 const_expr { string_value: "expr" } } value { id: 7 const_expr { string_value: "foo" } } optional_entry: true } } )pb"}, {R"pb( id: 1 comprehension_expr { iter_var: "foo" iter_range { id: 2 list_expr {} } accu_var: "bar" accu_init { id: 3 list_expr {} } loop_condition { id: 4 const_expr { bool_value: true } } loop_step { id: 4 ident_expr { name: "bar" } } result { id: 5 ident_expr { name: "foo" } } } )pb"}, })); TEST(ExprFromProto, StructFieldInMap) { ExprProto original_proto; ASSERT_TRUE( google::protobuf::TextFormat::ParseFromString(R"pb( id: 1 struct_expr: { entries: { id: 2 field_key: "foo" value: { id: 3 ident_expr: { name: "bar" } } } } )pb", &original_proto)); Expr expr; ASSERT_THAT(ExprFromProto(original_proto, expr), StatusIs(absl::StatusCode::kInvalidArgument)); } TEST(ExprFromProto, MapEntryInStruct) { ExprProto original_proto; ASSERT_TRUE( google::protobuf::TextFormat::ParseFromString(R"pb( id: 1 struct_expr: { message_name: "some.Message" entries: { id: 2 map_key: { id: 3 ident_expr: { name: "foo" } } value: { id: 4 ident_expr: { name: "bar" } } } } )pb", &original_proto)); Expr expr; ASSERT_THAT(ExprFromProto(original_proto, expr), StatusIs(absl::StatusCode::kInvalidArgument)); } } }
https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/extensions/protobuf/internal/ast.cc
https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/extensions/protobuf/internal/ast_test.cc
4552db5798fb0853b131b783d8875794334fae7f
9ed64317-64de-4f9b-a9f5-58e30cb2b26b
cpp
tensorflow/tensorflow
serialization
tensorflow/lite/delegates/gpu/gl/serialization.cc
tensorflow/lite/delegates/gpu/gl/serialization_test.cc
#include "tensorflow/lite/delegates/gpu/gl/serialization.h" #include <string> #include <utility> #include <variant> #include "absl/types/variant.h" #include "tensorflow/lite/delegates/gpu/common/data_type.h" #include "tensorflow/lite/delegates/gpu/common/status.h" #include "tensorflow/lite/delegates/gpu/common/types.h" #include "tensorflow/lite/delegates/gpu/gl/variable.h" namespace tflite { namespace gpu { namespace gl { using flatbuffers::Offset; using flatbuffers::Vector; namespace { struct ParameterValueGetter { Offset<void> operator()(int32_t value) { auto offset = builder->CreateVector(std::vector<int32_t>{value}); data::DataInt32Builder data(*builder); data.add_data(offset); return data.Finish().Union(); } Offset<void> operator()(const int2& value) { auto offset = builder->CreateVector(std::vector<int32_t>{value.x, value.y}); data::DataInt32Builder data(*builder); data.add_data(offset); return data.Finish().Union(); } Offset<void> operator()(const int4& value) { auto offset = builder->CreateVector( std::vector<int32_t>{value.x, value.y, value.z, value.w}); data::DataInt32Builder data(*builder); data.add_data(offset); return data.Finish().Union(); } Offset<void> operator()(const std::vector<int2>& value) { std::vector<int32_t> d(value.size() * 2); for (size_t i = 0; i < value.size(); ++i) { d[i * 2] = value[i].x; d[i * 2 + 1] = value[i].y; } auto offset = builder->CreateVector(d); data::DataInt32Builder data(*builder); data.add_data(offset); return data.Finish().Union(); } Offset<void> operator()(uint32_t value) { auto offset = builder->CreateVector(std::vector<uint32_t>{value}); data::DataUint32Builder data(*builder); data.add_data(offset); return data.Finish().Union(); } Offset<void> operator()(const uint4& value) { auto offset = builder->CreateVector( std::vector<uint32_t>{value.x, value.y, value.z, value.w}); data::DataUint32Builder data(*builder); data.add_data(offset); return data.Finish().Union(); } Offset<void> operator()(float value) { auto offset = builder->CreateVector(std::vector<float>{value}); data::DataFloatBuilder data(*builder); data.add_data(offset); return data.Finish().Union(); } Offset<void> operator()(const float2& value) { auto offset = builder->CreateVector(std::vector<float>{value.x, value.y}); data::DataFloatBuilder data(*builder); data.add_data(offset); return data.Finish().Union(); } Offset<void> operator()(const float4& value) { auto offset = builder->CreateVector( std::vector<float>{value.x, value.y, value.z, value.w}); data::DataFloatBuilder data(*builder); data.add_data(offset); return data.Finish().Union(); } Offset<void> operator()(const std::vector<float4>& value) { std::vector<float> d(value.size() * 4); for (size_t i = 0; i < value.size(); ++i) { d[i * 4] = value[i].x; d[i * 4 + 1] = value[i].y; d[i * 4 + 2] = value[i].z; d[i * 4 + 3] = value[i].w; } auto offset = builder->CreateVector(d); data::DataFloatBuilder data(*builder); data.add_data(offset); return data.Finish().Union(); } ::flatbuffers::FlatBufferBuilder* builder; }; struct DataVariantTypeGetter { data::DataVariant operator()(int32_t) const { return data::DataVariant::DataInt32; } data::DataVariant operator()(const int2&) const { return data::DataVariant::DataInt32; } data::DataVariant operator()(const int4&) const { return data::DataVariant::DataInt32; } data::DataVariant operator()(const std::vector<int2>&) const { return data::DataVariant::DataInt32; } data::DataVariant operator()(uint32_t) const { return data::DataVariant::DataUint32; } data::DataVariant operator()(const uint4&) const { return data::DataVariant::DataUint32; } data::DataVariant operator()(float) const { return data::DataVariant::DataFloat; } data::DataVariant operator()(const float2&) const { return data::DataVariant::DataFloat; } data::DataVariant operator()(const float4&) const { return data::DataVariant::DataFloat; } data::DataVariant operator()(const std::vector<float4>&) const { return data::DataVariant::DataFloat; } }; struct ParameterTypeGetter { data::ParameterType operator()(int32_t) const { return data::ParameterType::INT32; } data::ParameterType operator()(const int2&) const { return data::ParameterType::INT32; } data::ParameterType operator()(const int4&) const { return data::ParameterType::INT32; } data::ParameterType operator()(const std::vector<int2>&) const { return data::ParameterType::INT32_2; } data::ParameterType operator()(uint32_t) const { return data::ParameterType::UINT32; } data::ParameterType operator()(const uint4&) const { return data::ParameterType::UINT32; } data::ParameterType operator()(float) const { return data::ParameterType::FLOAT32; } data::ParameterType operator()(const float2&) const { return data::ParameterType::FLOAT32; } data::ParameterType operator()(const float4&) const { return data::ParameterType::FLOAT32; } data::ParameterType operator()(const std::vector<float4>&) const { return data::ParameterType::FLOAT32; } }; data::DataType ToFB(DataType type) { switch (type) { case DataType::INT16: return data::DataType::INT16; case DataType::INT32: return data::DataType::INT32; case DataType::FLOAT16: return data::DataType::FLOAT16; case DataType::FLOAT32: return data::DataType::FLOAT32; default: return data::DataType::UNKNOWN; } } data::ObjectType ToFB(ObjectType type) { switch (type) { case ObjectType::TEXTURE: return data::ObjectType::TEXTURE; case ObjectType::BUFFER: return data::ObjectType::BUFFER; default: return data::ObjectType::UNKNOWN; } } struct ObjectSizeGetter { Offset<void> operator()(const uint3& shape) { data::Uint3Builder shape_builder(*builder); shape_builder.add_x(shape.x); shape_builder.add_y(shape.y); shape_builder.add_z(shape.z); return shape_builder.Finish().Union(); } Offset<void> operator()(const uint2& shape) { data::Uint2Builder shape_builder(*builder); shape_builder.add_x(shape.x); shape_builder.add_y(shape.y); return shape_builder.Finish().Union(); } Offset<void> operator()(uint32_t shape) { data::Uint1Builder shape_builder(*builder); shape_builder.add_x(shape); return shape_builder.Finish().Union(); } ::flatbuffers::FlatBufferBuilder* builder; }; struct ObjectSizeTypeGetter { data::ObjectSize operator()(const uint3&) const { return data::ObjectSize::Uint3; } data::ObjectSize operator()(const uint2&) const { return data::ObjectSize::Uint2; } data::ObjectSize operator()(const uint32_t) const { return data::ObjectSize::Uint1; } }; struct ObjectGetter { Offset<void> operator()(const ObjectData& data) { auto fb_data = builder->CreateVector(data); data::ObjectDataBuilder data_builder(*builder); data_builder.add_data(fb_data); return data_builder.Finish().Union(); } Offset<void> operator()(ObjectRef ref) { data::ObjectRefBuilder ref_builder(*builder); ref_builder.add_global_id(ref); return ref_builder.Finish().Union(); } ::flatbuffers::FlatBufferBuilder* builder; }; struct ObjectTypeGetter { data::ObjectVariant operator()(const ObjectData&) const { return data::ObjectVariant::ObjectData; } data::ObjectVariant operator()(const ObjectRef&) const { return data::ObjectVariant::ObjectRef; } }; data::AccessType ToFB(AccessType type) { switch (type) { case AccessType::READ: return data::AccessType::READ; case AccessType::WRITE: return data::AccessType::WRITE; case AccessType::READ_WRITE: return data::AccessType::READ_WRITE; } } Offset<data::Uint3> Encode(const uint3& v, ::flatbuffers::FlatBufferBuilder* builder) { data::Uint3Builder uint3_builder(*builder); uint3_builder.add_x(v.x); uint3_builder.add_y(v.y); uint3_builder.add_z(v.z); return uint3_builder.Finish(); } Offset<data::Parameters> Encode(const CompiledModelOptions& options, ::flatbuffers::FlatBufferBuilder* builder) { data::ParametersBuilder params_builder(*builder); params_builder.add_dynamic_batch(options.dynamic_batch); return params_builder.Finish(); } } void SerializedCompiledModelBuilder::AddShader(const std::string& shader_src) { shaders_.push_back(builder_.CreateString(shader_src)); } void SerializedCompiledModelBuilder::AddProgram( const std::vector<Variable>& parameters, const std::vector<Object>& objects, const uint3& workgroup_size, const uint3& num_workgroups, size_t shader_index) { Offset<data::Uint3> fb_workgroups = Encode(num_workgroups, &builder_); Offset<data::Uint3> fb_workgroup_size = Encode(workgroup_size, &builder_); Offset<Vector<Offset<data::UniformParameter>>> fb_params; { std::vector<Offset<data::UniformParameter>> offsets; for (const Variable& param : parameters) { auto name = builder_.CreateString(param.name); auto data = std::visit(ParameterValueGetter{&builder_}, param.value); data::UniformParameterBuilder builder(builder_); builder.add_name(name); builder.add_data_type(std::visit(DataVariantTypeGetter{}, param.value)); builder.add_data(data); builder.add_type(std::visit(ParameterTypeGetter{}, param.value)); offsets.push_back(builder.Finish()); } fb_params = builder_.CreateVector(offsets); } Offset<Vector<Offset<data::Object>>> fb_objects; { std::vector<Offset<data::Object>> offsets; for (const Object& object : objects) { auto object_variant = std::visit(ObjectGetter{&builder_}, object.object); auto size = std::visit(ObjectSizeGetter{&builder_}, object.size); data::ObjectBuilder builder(builder_); builder.add_access(ToFB(object.access)); builder.add_binding(object.binding); builder.add_type(ToFB(object.object_type)); builder.add_data_type(ToFB(object.data_type)); builder.add_size_type(std::visit(ObjectSizeTypeGetter{}, object.size)); builder.add_size(size); builder.add_object_type(std::visit(ObjectTypeGetter{}, object.object)); builder.add_object(object_variant); offsets.push_back(builder.Finish()); } fb_objects = builder_.CreateVector(offsets); } data::ProgramBuilder program_builder(builder_); program_builder.add_number_workgroups(fb_workgroups); program_builder.add_workgroup_size(fb_workgroup_size); program_builder.add_parameters(fb_params); program_builder.add_objects(fb_objects); program_builder.add_shader_index(shader_index); programs_.push_back(program_builder.Finish()); } absl::Span<const uint8_t> SerializedCompiledModelBuilder::Finalize( const CompiledModelOptions& options) { auto shaders = builder_.CreateVector(shaders_); auto programs = builder_.CreateVector(programs_); auto parameters = Encode(options, &builder_); data::CompiledModelBuilder model_builder(builder_); model_builder.add_shaders(shaders); model_builder.add_programs(programs); model_builder.add_parameters(parameters); data::FinishCompiledModelBuffer(builder_, model_builder.Finish()); return absl::MakeConstSpan(builder_.GetBufferPointer(), builder_.GetSize()); } namespace { absl::Status ParseParameter(const data::UniformParameter& fb_parameter, Variable* parameter) { parameter->name = fb_parameter.name()->str(); switch (fb_parameter.type()) { case data::ParameterType::INT32: { auto* ptr = fb_parameter.data_as_DataInt32(); if (ptr == nullptr) { return absl::InvalidArgumentError("Unexpected data type '" + parameter->name + "'"); } switch (ptr->data()->size()) { case 1: parameter->value = (*ptr->data())[0]; break; case 2: parameter->value = int2((*ptr->data())[0], (*ptr->data())[1]); break; case 4: parameter->value = int4((*ptr->data())[0], (*ptr->data())[1], (*ptr->data())[2], (*ptr->data())[3]); break; default: return absl::InvalidArgumentError("Unexpected size for parameter '" + parameter->name + "'"); } break; } case data::ParameterType::UINT32: { auto* ptr = fb_parameter.data_as_DataUint32(); if (ptr == nullptr) { return absl::InvalidArgumentError("Unexpected data type '" + parameter->name + "'"); } switch (ptr->data()->size()) { case 1: parameter->value = (*ptr->data())[0]; break; case 4: parameter->value = uint4((*ptr->data())[0], (*ptr->data())[1], (*ptr->data())[2], (*ptr->data())[3]); break; default: return absl::InvalidArgumentError("Unexpected size for parameter '" + parameter->name + "'"); } break; } case data::ParameterType::FLOAT32: { auto* ptr = fb_parameter.data_as_DataFloat(); if (ptr == nullptr) { return absl::InvalidArgumentError("Unexpected data type '" + parameter->name + "'"); } switch (ptr->data()->size()) { case 1: parameter->value = (*ptr->data())[0]; break; case 2: parameter->value = float2((*ptr->data())[0], (*ptr->data())[1]); break; case 4: parameter->value = float4((*ptr->data())[0], (*ptr->data())[1], (*ptr->data())[2], (*ptr->data())[3]); break; default: return absl::InvalidArgumentError("Unexpected size for parameter '" + parameter->name + "'"); } break; } case data::ParameterType::INT32_2: { auto* ptr = fb_parameter.data_as_DataInt32(); if (ptr == nullptr) { return absl::InvalidArgumentError("Unexpected data type '" + parameter->name + "'"); } if (ptr->data()->size() % 2 != 0) { return absl::InvalidArgumentError("Unexpected size for parameter '" + parameter->name + "'"); } std::vector<int2> values(ptr->data()->size() / 2); for (int i = 0; i < values.size(); ++i) { values[i] = int2((*ptr->data())[i * 2], (*ptr->data())[i * 2 + 1]); } parameter->value = values; break; } } return absl::OkStatus(); } DataType ToEnum(data::DataType type) { switch (type) { case data::DataType::INT16: return DataType::INT16; case data::DataType::INT32: return DataType::INT32; case data::DataType::FLOAT16: return DataType::FLOAT16; case data::DataType::FLOAT32: return DataType::FLOAT32; default: return DataType::UNKNOWN; } } ObjectType ToEnum(data::ObjectType type) { switch (type) { case data::ObjectType::TEXTURE: return ObjectType::TEXTURE; case data::ObjectType::BUFFER: return ObjectType::BUFFER; default: return ObjectType::UNKNOWN; } } AccessType ToEnum(data::AccessType type) { switch (type) { case data::AccessType::READ: return AccessType::READ; case data::AccessType::WRITE: return AccessType::WRITE; case data::AccessType::READ_WRITE: return AccessType::READ_WRITE; } } absl::Status ParseObject(const data::Object& fb_object, Object* object) { object->access = ToEnum(fb_object.access()); object->binding = fb_object.binding(); object->object_type = ToEnum(fb_object.type()); object->data_type = ToEnum(fb_object.data_type()); switch (fb_object.size_type()) { case data::ObjectSize::Uint3: { auto* size = fb_object.size_as_Uint3(); object->size = uint3(size->x(), size->y(), size->z()); break; } case data::ObjectSize::Uint2: { auto* size = fb_object.size_as_Uint2(); object->size = uint2(size->x(), size->y()); break; } case data::ObjectSize::Uint1: { auto* size = fb_object.size_as_Uint1(); object->size = size->x(); break; } case data::ObjectSize::NONE: return absl::InvalidArgumentError("Texture size is not set"); } switch (fb_object.object_type()) { case data::ObjectVariant::ObjectData: { auto* fb_data = fb_object.object_as_ObjectData(); object->object = std::vector<uint8_t>( fb_data->data()->data(), fb_data->data()->data() + fb_data->data()->size()); break; } case data::ObjectVariant::ObjectRef: { auto* fb_ref = fb_object.object_as_ObjectRef(); object->object = fb_ref->global_id(); break; } case data::ObjectVariant::NONE: { return absl::InvalidArgumentError("Object is not set"); } } return absl::OkStatus(); } CompiledModelOptions ParseParameters(const data::Parameters& fb_parameters) { CompiledModelOptions options; options.dynamic_batch = fb_parameters.dynamic_batch(); return options; } } absl::Status DeserializeCompiledModel(absl::Span<const uint8_t> serialized, DeserializationHandler* handler) { flatbuffers::Verifier verifier(serialized.data(), serialized.size()); if (!data::VerifyCompiledModelBuffer(verifier)) { return absl::InvalidArgumentError("Serialized model is corrupted."); } auto model = data::GetCompiledModel(serialized.data()); for (auto shader : *model->shaders()) { RETURN_IF_ERROR( handler->OnShader(absl::MakeSpan(shader->c_str(), shader->size()))); } std::vector<Variable> parameters; std::vector<Object> objects; for (auto program : *model->programs()) { parameters.clear(); objects.clear(); for (auto fb_parameter : *program->parameters()) { Variable parameter; RETURN_IF_ERROR(ParseParameter(*fb_parameter, &parameter)); parameters.push_back(std::move(parameter)); } for (auto fb_object : *program->objects()) { Object object; RETURN_IF_ERROR(ParseObject(*fb_object, &object)); objects.push_back(std::move(object)); } uint3 workgroup_size(program->workgroup_size()->x(), program->workgroup_size()->y(), program->workgroup_size()->z()); uint3 num_workgroups(program->number_workgroups()->x(), program->number_workgroups()->y(), program->number_workgroups()->z()); RETURN_IF_ERROR(handler->OnProgram(parameters, objects, workgroup_size, num_workgroups, program->shader_index())); } handler->OnOptions(ParseParameters(*model->parameters())); return absl::OkStatus(); } } } }
#include "tensorflow/lite/delegates/gpu/gl/serialization.h" #include <stddef.h> #include <sys/types.h> #include <cstdint> #include <string> #include <variant> #include <vector> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/types/span.h" #include "tensorflow/lite/delegates/gpu/common/shape.h" #include "tensorflow/lite/delegates/gpu/common/status.h" #include "tensorflow/lite/delegates/gpu/common/types.h" #include "tensorflow/lite/delegates/gpu/gl/object.h" #include "tensorflow/lite/delegates/gpu/gl/variable.h" namespace tflite { namespace gpu { namespace gl { namespace { struct ProgramDesc { std::vector<Variable> parameters; std::vector<Object> objects; uint3 workgroup_size; uint3 num_workgroups; size_t shader_index; }; struct Handler : public DeserializationHandler { absl::Status OnShader(absl::Span<const char> shader_src) final { shaders.push_back(std::string(shader_src.data(), shader_src.size())); return absl::OkStatus(); } absl::Status OnProgram(const std::vector<Variable>& parameters, const std::vector<Object>& objects, const uint3& workgroup_size, const uint3& num_workgroups, size_t shader_index) final { programs.push_back( {parameters, objects, workgroup_size, num_workgroups, shader_index}); return absl::OkStatus(); } void OnOptions(const CompiledModelOptions& o) final { options = o; } std::vector<std::string> shaders; std::vector<ProgramDesc> programs; CompiledModelOptions options; }; struct ParameterComparator { bool operator()(int32_t value) const { return value == std::get<int32_t>(a.value); } bool operator()(const int2& value) const { auto v = std::get<int2>(a.value); return value.x == v.x && value.y == v.y; } bool operator()(const int4& value) const { auto v = std::get<int4>(a.value); return value.x == v.x && value.y == v.y && value.z == v.z && value.w == v.w; } bool operator()(const std::vector<int2>& value) const { auto v = std::get<std::vector<int2>>(a.value); if (v.size() != value.size()) { return false; } for (int i = 0; i < v.size(); ++i) { if (v[i].x != value[i].x || v[i].y != value[i].y) { return false; } } return true; } bool operator()(uint32_t value) const { return value == std::get<uint32_t>(a.value); } bool operator()(const uint4& value) const { auto v = std::get<uint4>(a.value); return value.x == v.x && value.y == v.y && value.z == v.z && value.w == v.w; } bool operator()(float value) const { return value == std::get<float>(a.value); } bool operator()(float2 value) const { auto v = std::get<float2>(a.value); return value.x == v.x && value.y == v.y; } bool operator()(const float4& value) const { auto v = std::get<float4>(a.value); return value.x == v.x && value.y == v.y && value.z == v.z && value.w == v.w; } bool operator()(const std::vector<float4>& value) const { auto v = std::get<std::vector<float4>>(a.value); if (v.size() != value.size()) { return false; } for (int i = 0; i < v.size(); ++i) { if (v[i].x != value[i].x || v[i].y != value[i].y) { return false; } } return true; } Variable a; }; bool Eq(const Variable& a, const Variable& b) { return a.name == b.name && std::visit(ParameterComparator{a}, b.value); } struct ObjectComparator { bool operator()(const ObjectData& data) const { return std::get<ObjectData>(a.object) == data; } bool operator()(const ObjectRef& ref) const { return std::get<ObjectRef>(a.object) == ref; } Object a; }; bool Eq(const Object& a, const Object& b) { return a.access == b.access && a.binding == b.binding && std::visit(ObjectComparator{a}, b.object); } TEST(Smoke, Read) { std::string shader1 = "A"; std::string shader2 = "B"; SerializedCompiledModelBuilder builder; builder.AddShader(shader1); builder.AddShader(shader2); std::vector<Variable> parameters; parameters.push_back({"1", int32_t(1)}); parameters.push_back({"2", int2(1, 2)}); parameters.push_back({"3", int4(1, 2, 3, 4)}); parameters.push_back({"4", uint32_t(10)}); parameters.push_back({"5", uint4(10, 20, 30, 40)}); parameters.push_back({"6", -2.0f}); parameters.push_back({"7", float2(1, -1)}); parameters.push_back({"8", float4(1, -1, 2, -2)}); parameters.push_back( {"9", std::vector<int2>{int2(1, 2), int2(3, 4), int2(5, 6)}}); std::vector<Object> objects; objects.push_back(MakeReadonlyBuffer(std::vector<float>{1, 2, 3, 4})); objects.push_back(Object{AccessType::WRITE, DataType::FLOAT32, ObjectType::TEXTURE, 5, uint3(1, 2, 3), 100u}); objects.push_back(Object{AccessType::READ_WRITE, DataType::INT8, ObjectType::BUFFER, 6, uint2(2, 1), std::vector<uint8_t>{7, 9}}); uint3 num_workgroups(10, 20, 30); uint3 workgroup_size(1, 2, 3); builder.AddProgram(parameters, objects, workgroup_size, num_workgroups, 1); Handler handler; CompiledModelOptions options; options.dynamic_batch = true; ASSERT_TRUE( DeserializeCompiledModel(builder.Finalize(options), &handler).ok()); EXPECT_EQ(num_workgroups.data_, handler.programs[0].num_workgroups.data_); EXPECT_EQ(workgroup_size.data_, handler.programs[0].workgroup_size.data_); EXPECT_THAT(handler.shaders, ::testing::ElementsAre(shader1, shader2)); EXPECT_EQ(handler.programs[0].parameters.size(), parameters.size()); for (int i = 0; i < parameters.size(); ++i) { EXPECT_TRUE(Eq(parameters[i], handler.programs[0].parameters[i])) << i; } EXPECT_EQ(handler.programs[0].objects.size(), objects.size()); for (int i = 0; i < objects.size(); ++i) { EXPECT_TRUE(Eq(objects[i], handler.programs[0].objects[i])) << i; } EXPECT_TRUE(handler.options.dynamic_batch); } } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/gl/serialization.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/gl/serialization_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
67a22cb6-2ba5-40ca-af8d-032bd07077a4
cpp
tensorflow/tensorflow
hardware_type_utils
tensorflow/core/profiler/utils/hardware_type_utils.cc
tensorflow/core/profiler/utils/hardware_type_utils_test.cc
#include "tensorflow/core/profiler/utils/hardware_type_utils.h" #include <algorithm> #include "absl/container/btree_map.h" #include "absl/strings/match.h" #include "xla/tsl/profiler/utils/math_utils.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/types.h" #include "tensorflow/core/profiler/protobuf/hardware_types.pb.h" #include "tensorflow/core/profiler/utils/xplane_schema.h" namespace tensorflow { namespace profiler { namespace { const GpuFlopCapabilities kComputeCap_PerSM_PerCycle_9_0 = { .cuda_core = { .fp64_tflops = 128, .fp32_tflops = 256, .bf16_tflops = 512, .fp16_tflops = 512, .int8_tops = 1024, }, .tensor_core = { .fp64_tflops = 256, .fp32_tflops = 2048, .bf16_tflops = 4096, .fp16_tflops = 4096, .fp8_tflops = 8192, .int8_tops = 8192, }, .has_tensor_core_sparsity_support = true, }; const GpuFlopCapabilities kComputeCap_PerSM_PerCycle_8_9 = { .cuda_core = { .fp64_tflops = 128, .fp32_tflops = 256, .bf16_tflops = 256, .fp16_tflops = 256, .int8_tops = 512, }, .tensor_core = { .fp32_tflops = 512, .bf16_tflops = 1024, .fp16_tflops = 1024, .fp8_tflops = 2048, .int8_tops = 2048, .int4_tops = 4096, }, .has_tensor_core_sparsity_support = true, }; const GpuFlopCapabilities kComputeCap_PerSM_PerCycle_8_6 = { .cuda_core = { .fp64_tflops = 128, .fp32_tflops = 256, .bf16_tflops = 256, .fp16_tflops = 256, .int8_tops = 512, }, .tensor_core = { .fp32_tflops = 256, .bf16_tflops = 512, .fp16_tflops = 1024, .int8_tops = 2048, .int4_tops = 4096, }, .has_tensor_core_sparsity_support = true, }; const GpuFlopCapabilities kComputeCap_PerSM_PerCycle_8_0 = { .cuda_core = { .fp64_tflops = 64, .fp32_tflops = 128, .bf16_tflops = 256, .fp16_tflops = 512, .int8_tops = 512, }, .tensor_core = { .fp64_tflops = 128, .fp32_tflops = 1024, .bf16_tflops = 2048, .fp16_tflops = 2048, .int8_tops = 4096, }, .has_tensor_core_sparsity_support = true, }; const GpuFlopCapabilities kComputeCap_PerSM_PerCycle_7_5 = { .cuda_core = { .fp64_tflops = 64, .fp32_tflops = 128, .fp16_tflops = 256, .int8_tops = 512, }, .tensor_core = { .fp16_tflops = 1024, .int8_tops = 2048, .int4_tops = 4096, }, .has_tensor_core_sparsity_support = false, }; const GpuFlopCapabilities kComputeCap_PerSM_PerCycle_7_0 = { .cuda_core = { .fp64_tflops = 64, .fp32_tflops = 128, .bf16_tflops = 0.0, .fp16_tflops = 256, .int8_tops = 512, }, .tensor_core = { .fp16_tflops = 1024, }, .has_tensor_core_sparsity_support = false, }; const GpuFlopCapabilities kComputeCap_PerSM_PerCycle_6_1 = { .cuda_core = { .fp64_tflops = 8, .fp32_tflops = 256, .fp16_tflops = 4, .int8_tops = 1024, }, .tensor_core = {}, .has_tensor_core_sparsity_support = false, }; const GpuFlopCapabilities kComputeCap_PerSM_PerCycle_6_0 = { .cuda_core = { .fp64_tflops = 64, .fp32_tflops = 128, .fp16_tflops = 256, .int8_tops = 512, }, .tensor_core = {}, .has_tensor_core_sparsity_support = false, }; const GpuFlopCapabilities kComputeCap_PerSM_PerCycle_5_0 = { .cuda_core = { .fp64_tflops = 4, .fp32_tflops = 256, }, .tensor_core = {}, .has_tensor_core_sparsity_support = false, }; const GpuFlopCapabilities kComputeCap_PerSM_PerCycle_3_0 = { .cuda_core = { .fp64_tflops = 128, .fp32_tflops = 384, }, .tensor_core = {}, .has_tensor_core_sparsity_support = false, }; const GpuFlopCapabilities kComputeCap_PerSM_PerCycle_2_0 = { .cuda_core = { .fp64_tflops = 8, .fp32_tflops = 64, }, .tensor_core = {}, .has_tensor_core_sparsity_support = false, }; GpuFlopCapabilities GetNvidiaFlopCapsPerSMPerCycle(int major_comp_cap, int minor_comp_cap) { static const auto& kPerSMFlopCapsTable = *new absl::btree_map<int, GpuFlopCapabilities const*>{ {9000, &kComputeCap_PerSM_PerCycle_9_0}, {8090, &kComputeCap_PerSM_PerCycle_8_9}, {8060, &kComputeCap_PerSM_PerCycle_8_6}, {8000, &kComputeCap_PerSM_PerCycle_8_0}, {7050, &kComputeCap_PerSM_PerCycle_7_5}, {7000, &kComputeCap_PerSM_PerCycle_7_0}, {6010, &kComputeCap_PerSM_PerCycle_6_1}, {6000, &kComputeCap_PerSM_PerCycle_6_0}, {5000, &kComputeCap_PerSM_PerCycle_5_0}, {3000, &kComputeCap_PerSM_PerCycle_3_0}, {2000, &kComputeCap_PerSM_PerCycle_2_0}, }; const int normalized_compute_cap = major_comp_cap * 1000 + minor_comp_cap * 10; GpuFlopCapabilities flops_cap{}; auto it = kPerSMFlopCapsTable.lower_bound(normalized_compute_cap); if (it == kPerSMFlopCapsTable.end()) { LOG(WARNING) << "GPU compute capability " << major_comp_cap << "." << minor_comp_cap << " is too old to support."; } else { flops_cap = *it->second; if (it->first != normalized_compute_cap) { LOG(WARNING) << "GPU compute capability " << major_comp_cap << "." << minor_comp_cap << " is not found. Use the highest compute cap known " << (it->first / 1000) << "." << ((it->first % 1000) / 10) << " instead."; } } return flops_cap; } GpuFlopCapabilities GetGpuFlopCapabilitiesPerSM( const DeviceCapabilities& device_cap) { GpuFlopCapabilities flops_cap{}; if (device_cap.device_vendor() == kDeviceVendorNvidia) { flops_cap = GetNvidiaFlopCapsPerSMPerCycle(device_cap.compute_capability().major(), device_cap.compute_capability().minor()); } else { LOG(WARNING) << "Unsupported device vendor " << device_cap.device_vendor(); } flops_cap.ScaleWith(device_cap.clock_rate_in_ghz()); return flops_cap; } } double GetFlopMaxThroughputPerSM(const DeviceCapabilities& device_cap) { GpuFlopCapabilities sm_flops = GetGpuFlopCapabilitiesPerSM(device_cap); double result = std::max( {sm_flops.cuda_core.fp32_tflops, sm_flops.cuda_core.fp16_tflops, sm_flops.tensor_core.fp32_tflops, sm_flops.tensor_core.fp16_tflops}); VLOG(3) << "GetFlopMaxThroughputPerSM get result: " << result << " GFLOPs"; return result; } double GetSharedMemoryBandwidthPerSM(const DeviceCapabilities& device_cap) { double transaction_byts_per_cycle = device_cap.compute_capability().major() <= 2 ? (32 * 4 / 2) : (32 * 8); double GiBPS = transaction_byts_per_cycle * device_cap.clock_rate_in_ghz(); return tsl::profiler::GigaToUni(GiBPS); } absl::string_view GpuModelName(const DeviceCapabilities& device_cap) { if (device_cap.device_vendor() == kDeviceVendorNvidia) { switch (device_cap.compute_capability().major()) { case 2: return "Nvidia GPU (Fermi)"; case 3: return "Nvidia GPU (Kepler)"; case 5: return "Nvidia GPU (Maxwell)"; case 6: return "Nvidia GPU (Pascal)"; case 7: if (device_cap.compute_capability().minor() < 5) { return "Nvidia GPU (Volta)"; } else { return "Nvidia GPU (Turing)"; } case 8: if (device_cap.compute_capability().minor() < 9) { return "Nvidia GPU (Ampere)"; } else { return "Nvidia GPU (Ada Lovelace)"; } case 9: return "Nvidia GPU (Hopper)"; case 10: return "Nvidia GPU (Blackwell)"; default: return "Nvidia GPU"; } } else if (device_cap.device_vendor() == kDeviceVendorAMD) { switch (device_cap.compute_capability().major()) { case 9: return "AMD GPU - gfx-9XX series"; case 10: return "AMD GPU - gfx-10XX series"; case 11: return "AMD GPU - gfx-11XX series"; default: return "AMD GPU"; } } else { LOG(ERROR) << "Unknown device vendor " << device_cap.device_vendor(); return ""; } } HardwareType ParseHardwareType(absl::string_view device_type) { if (absl::StrContains(device_type, "GPU")) return HardwareType::GPU; if (device_type == "CPU") return HardwareType::CPU_ONLY; if (absl::StrContains(device_type, "TPU")) return HardwareType::TPU; return HardwareType::UNKNOWN_HARDWARE; } bool HasDevice(HardwareType x) { return x > tensorflow::profiler::CPU_ONLY; } } }
#include "tensorflow/core/profiler/utils/hardware_type_utils.h" #include "xla/tsl/profiler/utils/math_utils.h" #include "tensorflow/core/platform/test.h" namespace tensorflow { namespace profiler { namespace { TEST(HardwareTypeUtilsTest, H100PeakComputTFlops) { DeviceCapabilities device_cap; device_cap.set_clock_rate_in_ghz(1.620); device_cap.set_num_cores(114); device_cap.set_memory_size_in_bytes( tsl::profiler::GibiToGiga(tsl::profiler::GigaToUni(80))); device_cap.set_memory_bandwidth(tsl::profiler::GigaToUni(2.04 * 1024)); device_cap.set_device_vendor("Nvidia"); device_cap.mutable_compute_capability()->set_major(9); device_cap.mutable_compute_capability()->set_minor(0); double peak_tflops = GetFlopMaxThroughputPerSM(device_cap) * device_cap.num_cores() / 1000.0; EXPECT_NEAR(peak_tflops, 756, 1.0); } TEST(HardwareTypeUtilsTest, A100PeakComputTFlops) { DeviceCapabilities device_cap; device_cap.set_clock_rate_in_ghz(1.410); device_cap.set_num_cores(108); device_cap.set_memory_size_in_bytes( tsl::profiler::GibiToGiga(tsl::profiler::GigaToUni(80))); device_cap.set_memory_bandwidth(tsl::profiler::GigaToUni(2.04 * 1024)); device_cap.set_device_vendor("Nvidia"); device_cap.mutable_compute_capability()->set_major(8); device_cap.mutable_compute_capability()->set_minor(0); double peak_tflops = GetFlopMaxThroughputPerSM(device_cap) * device_cap.num_cores() / 1000.0; EXPECT_NEAR(peak_tflops, 312, 1.0); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/utils/hardware_type_utils.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/utils/hardware_type_utils_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
37f2e820-6f6b-4f58-9937-76f4d6f94147
cpp
tensorflow/tensorflow
padded_batch_dataset_op
tensorflow/core/kernels/data/padded_batch_dataset_op.cc
tensorflow/core/kernels/data/padded_batch_dataset_op_test.cc
#include "tensorflow/core/kernels/data/padded_batch_dataset_op.h" #include "tensorflow/core/data/dataset_utils.h" #include "tensorflow/core/data/name_utils.h" #include "tensorflow/core/framework/dataset.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/partial_tensor_shape.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_util.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/lib/gtl/cleanup.h" #include "tensorflow/core/platform/blocking_counter.h" #include "tensorflow/core/platform/macros.h" #include "tensorflow/core/platform/stringprintf.h" #include "tensorflow/core/util/batch_util.h" namespace tensorflow { namespace data { constexpr const char* const PaddedBatchDatasetOp::kDatasetType; constexpr const char* const PaddedBatchDatasetOp::kInputDataset; constexpr const char* const PaddedBatchDatasetOp::kBatchSize; constexpr const char* const PaddedBatchDatasetOp::kPaddedShapes; constexpr const char* const PaddedBatchDatasetOp::kPaddingValues; constexpr const char* const PaddedBatchDatasetOp::kDropRemainder; constexpr const char* const PaddedBatchDatasetOp::kParallelCopy; constexpr const char* const PaddedBatchDatasetOp::kToutputTypes; constexpr const char* const PaddedBatchDatasetOp::kOutputShapes; constexpr const char* const PaddedBatchDatasetOp::kNumPaddedShapes; constexpr char kExhausted[] = "exhausted"; class PaddedBatchDatasetOp::Dataset : public DatasetBase { public: Dataset(OpKernelContext* ctx, int64_t batch_size, bool drop_remainder, bool parallel_copy, std::vector<PartialTensorShape> padded_shapes, std::vector<Tensor> padding_values, const DatasetBase* input, int op_version) : DatasetBase(DatasetContext(ctx)), batch_size_(batch_size), drop_remainder_(drop_remainder), parallel_copy_(parallel_copy), padded_shapes_(std::move(padded_shapes)), padding_values_(std::move(padding_values)), input_(input), op_version_(op_version), traceme_metadata_( {{"batch_size", strings::Printf("%lld", static_cast<long long>(batch_size))}, {"drop_remainder", drop_remainder ? "true" : "false"}, {"parallel_copy", parallel_copy ? "true" : "false"}}) { input_->Ref(); const auto& input_shapes = input_->output_shapes(); output_shapes_.reserve(input_shapes.size()); for (size_t i = 0; i < input_shapes.size(); ++i) { if (drop_remainder_ || input_->Cardinality() == kInfiniteCardinality) { output_shapes_.push_back( PartialTensorShape({batch_size_}).Concatenate(padded_shapes_[i])); } else { output_shapes_.push_back( PartialTensorShape({-1}).Concatenate(padded_shapes_[i])); } } } ~Dataset() override { input_->Unref(); } std::unique_ptr<IteratorBase> MakeIteratorInternal( const string& prefix) const override { name_utils::IteratorPrefixParams params; params.op_version = op_version_; return std::make_unique<Iterator>(Iterator::Params{ this, name_utils::IteratorPrefix(kDatasetType, prefix, params)}); } const DataTypeVector& output_dtypes() const override { return input_->output_dtypes(); } const std::vector<PartialTensorShape>& output_shapes() const override { return output_shapes_; } string DebugString() const override { name_utils::DatasetDebugStringParams params; params.op_version = op_version_; params.set_args(batch_size_); return name_utils::DatasetDebugString(kDatasetType, params); } int64_t CardinalityInternal(CardinalityOptions options) const override { int64_t n = input_->Cardinality(options); if (n == kInfiniteCardinality || n == kUnknownCardinality) { return n; } return n / batch_size_ + (n % batch_size_ == 0 || drop_remainder_ ? 0 : 1); } Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override { inputs->push_back(input_); return absl::OkStatus(); } Status CheckExternalState() const override { return input_->CheckExternalState(); } protected: Status AsGraphDefInternal(SerializationContext* ctx, DatasetGraphDefBuilder* b, Node** output) const override { Node* input_graph_node = nullptr; TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_graph_node)); Node* batch_size = nullptr; TF_RETURN_IF_ERROR(b->AddScalar(batch_size_, &batch_size)); std::vector<Node*> padded_shapes; padded_shapes.reserve(padded_shapes_.size()); for (int i = 0; i < padded_shapes_.size(); i++) { Node* node; Tensor t(DT_INT64, TensorShape({padded_shapes_[i].dims()})); for (int j = 0; j < padded_shapes_[i].dims(); j++) { t.vec<int64_t>()(j) = padded_shapes_[i].dim_size(j); } TF_RETURN_IF_ERROR(b->AddTensor(t, &node)); padded_shapes.emplace_back(node); } std::vector<Node*> padding_values; padding_values.reserve(padding_values_.size()); for (const Tensor& t : padding_values_) { Node* node; TF_RETURN_IF_ERROR(b->AddTensor(t, &node)); padding_values.emplace_back(node); } Node* drop_remainder = nullptr; TF_RETURN_IF_ERROR(b->AddScalar(drop_remainder_, &drop_remainder)); AttrValue parallel_copy; b->BuildAttrValue(parallel_copy_, &parallel_copy); AttrValue output_types; b->BuildAttrValue(output_dtypes(), &output_types); AttrValue N; b->BuildAttrValue<int64_t>(padded_shapes_.size(), &N); TF_RETURN_IF_ERROR(b->AddDataset( this, {{0, input_graph_node}, {1, batch_size}, {4, drop_remainder}}, {{2, padded_shapes}, {3, padding_values}}, {{kParallelCopy, parallel_copy}, {kToutputTypes, output_types}, {kNumPaddedShapes, N}}, output)); return absl::OkStatus(); } private: class Iterator : public DatasetIterator<Dataset> { public: explicit Iterator(const Params& params) : DatasetIterator<Dataset>(params) {} bool SymbolicCheckpointCompatible() const override { return true; } Status Initialize(IteratorContext* ctx) override { return dataset()->input_->MakeIterator(ctx, this, prefix(), &input_impl_); } Status GetNextInternal(IteratorContext* ctx, std::vector<Tensor>* out_tensors, bool* end_of_sequence) override { std::vector<std::vector<Tensor>> batch_elements; { mutex_lock l(mu_); if (!input_impl_) { *end_of_sequence = true; return absl::OkStatus(); } else { *end_of_sequence = false; batch_elements.reserve(dataset()->batch_size_); for (int i = 0; i < dataset()->batch_size_ && !*end_of_sequence; ++i) { std::vector<Tensor> batch_element_tuple; TF_RETURN_IF_ERROR(input_impl_->GetNext(ctx, &batch_element_tuple, end_of_sequence)); if (!*end_of_sequence) { batch_elements.push_back(std::move(batch_element_tuple)); } } if (*end_of_sequence) { input_impl_.reset(); } } } if (batch_elements.empty()) { DCHECK(*end_of_sequence); return absl::OkStatus(); } if (dataset()->drop_remainder_ && batch_elements.size() < dataset()->batch_size_) { *end_of_sequence = true; return absl::OkStatus(); } TF_RETURN_IF_ERROR(CopyBatch(ctx, batch_elements, out_tensors)); *end_of_sequence = false; return absl::OkStatus(); } protected: std::shared_ptr<model::Node> CreateNode( IteratorContext* ctx, model::Node::Args args) const override { return model::MakeKnownRatioNode(std::move(args), dataset()->batch_size_); } Status SaveInternal(SerializationContext* ctx, IteratorStateWriter* writer) override { mutex_lock l(mu_); TF_RETURN_IF_ERROR(writer->WriteScalar( prefix(), kExhausted, static_cast<int64_t>(!input_impl_))); if (input_impl_) { TF_RETURN_IF_ERROR(SaveInput(ctx, writer, input_impl_)); } return absl::OkStatus(); } Status RestoreInternal(IteratorContext* ctx, IteratorStateReader* reader) override { mutex_lock l(mu_); int64_t input_exhausted; TF_RETURN_IF_ERROR( reader->ReadScalar(prefix(), kExhausted, &input_exhausted)); if (static_cast<bool>(input_exhausted)) { input_impl_.reset(); } else { TF_RETURN_IF_ERROR( dataset()->input_->MakeIterator(ctx, this, prefix(), &input_impl_)); TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, input_impl_)); } return absl::OkStatus(); } TraceMeMetadata GetTraceMeMetadata() const override { return dataset()->traceme_metadata_; } private: Status CopyBatch(IteratorContext* ctx, const std::vector<std::vector<Tensor>>& batch_elements, std::vector<Tensor>* out_tensors) { const size_t num_tuple_components = batch_elements[0].size(); const int64_t num_batch_elements = batch_elements.size(); for (size_t component_index = 0; component_index < num_tuple_components; ++component_index) { TensorShape batch_component_shape({num_batch_elements}); const PartialTensorShape& padded_shape = dataset()->padded_shapes_[component_index]; for (int dim = 0; dim < padded_shape.dims(); ++dim) { if (padded_shape.dim_size(dim) == -1) { TF_RETURN_IF_ERROR(batch_component_shape.AddDimWithStatus(0)); } else { TF_RETURN_IF_ERROR(batch_component_shape.AddDimWithStatus( padded_shape.dim_size(dim))); } } for (int64_t i = 0; i < num_batch_elements; ++i) { const TensorShape& element_shape = batch_elements[i][component_index].shape(); if (element_shape.dims() != padded_shape.dims()) { return errors::InvalidArgument( "All elements in a batch must have the same rank as the " "padded shape for component", component_index, ": expected rank ", padded_shape.dims(), " but got element with rank ", element_shape.dims()); } for (int dim = 0; dim < padded_shape.dims(); ++dim) { if (padded_shape.dim_size(dim) == -1) { if (batch_elements[i][component_index].shape().dim_size(dim) > batch_component_shape.dim_size(dim + 1)) { batch_component_shape.set_dim( dim + 1, batch_elements[i][component_index].shape().dim_size(dim)); } } else { if (batch_elements[i][component_index].shape().dim_size(dim) > batch_component_shape.dim_size(dim + 1)) { return errors::DataLoss( "Attempted to pad to a smaller size than the input " "element."); } } } } out_tensors->emplace_back(ctx->allocator({}), output_dtypes()[component_index], batch_component_shape); Tensor& batch_component = out_tensors->back(); TF_RETURN_IF_ERROR(batch_util::SetElementZero( &batch_component, dataset()->padding_values_[component_index])); TensorShape component_shape({}); for (int i = 1; i < batch_component_shape.dims(); ++i) { TF_RETURN_IF_ERROR(component_shape.AddDimWithStatus( batch_component_shape.dim_size(i))); } auto copy_element_fn = [component_index, &batch_elements, &batch_component, &component_shape](int index) { if (batch_elements[index][component_index].shape() == component_shape) { TF_RETURN_IF_ERROR(batch_util::CopyElementToSlice( batch_elements[index][component_index], &batch_component, index)); } else { TF_RETURN_IF_ERROR(batch_util::CopyElementToLargerSlice( batch_elements[index][component_index], &batch_component, index)); } return absl::OkStatus(); }; if (dataset()->parallel_copy_ && (batch_component.AllocatedBytes() / num_batch_elements) >= (1 << 15)) { BlockingCounter counter(num_batch_elements); Status status; mutex status_mu; const auto num_threads = ctx->runner_threadpool_size(); const auto slice_size = num_batch_elements / num_threads; int64_t offset = 0; for (size_t i = 0; i < num_threads; ++i) { int64_t length = slice_size; if (i < num_batch_elements % num_threads) ++length; (*ctx->runner())([offset, length, &status, &status_mu, &counter, &copy_element_fn]() { for (size_t j = offset; j < offset + length; ++j) { { Status s = copy_element_fn(j); mutex_lock l(status_mu); status.Update(s); } counter.DecrementCount(); } }); offset += length; } counter.Wait(); TF_RETURN_IF_ERROR(status); } else { for (size_t i = 0; i < num_batch_elements; ++i) { TF_RETURN_IF_ERROR(copy_element_fn(i)); } } } return absl::OkStatus(); } mutex mu_; std::unique_ptr<IteratorBase> input_impl_ TF_GUARDED_BY(mu_); }; const int64_t batch_size_; const bool drop_remainder_; const bool parallel_copy_; const std::vector<PartialTensorShape> padded_shapes_; const std::vector<Tensor> padding_values_; const DatasetBase* const input_; const int op_version_; std::vector<PartialTensorShape> output_shapes_; const TraceMeMetadata traceme_metadata_; }; PaddedBatchDatasetOp::PaddedBatchDatasetOp(OpKernelConstruction* ctx) : UnaryDatasetOpKernel(ctx), op_version_(ctx->def().op() == "PaddedBatchDataset" ? 1 : 2) { if (ctx->HasAttr(kParallelCopy)) { OP_REQUIRES_OK(ctx, ctx->GetAttr(kParallelCopy, &parallel_copy_)); } } void PaddedBatchDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input, DatasetBase** output) { int64_t batch_size; OP_REQUIRES_OK(ctx, ParseScalarArgument<int64_t>(ctx, kBatchSize, &batch_size)); OP_REQUIRES(ctx, batch_size > 0, errors::InvalidArgument("Batch size must be greater than zero.")); bool drop_remainder = false; if (op_version_ > 1) { OP_REQUIRES_OK( ctx, ParseScalarArgument<bool>(ctx, kDropRemainder, &drop_remainder)); } OpInputList padded_shape_tensors; OP_REQUIRES_OK(ctx, ctx->input_list(kPaddedShapes, &padded_shape_tensors)); std::vector<PartialTensorShape> padded_shapes; padded_shapes.reserve(padded_shape_tensors.size()); OP_REQUIRES(ctx, padded_shape_tensors.size() == input->output_shapes().size(), errors::InvalidArgument("Number of padded shapes (", padded_shape_tensors.size(), ") must match the number of components " "in the input dataset's elements (", input->output_shapes().size(), ")")); for (const Tensor& padded_shape_t : padded_shape_tensors) { OP_REQUIRES(ctx, TensorShapeUtils::IsVector(padded_shape_t.shape()), errors::InvalidArgument("All padded shapes must be vectors")); PartialTensorShape padded_shape; OP_REQUIRES_OK(ctx, PartialTensorShape::MakePartialShape( padded_shape_t.vec<int64_t>().data(), padded_shape_t.NumElements(), &padded_shape)); padded_shapes.push_back(std::move(padded_shape)); } OpInputList padding_values_list; OP_REQUIRES_OK(ctx, ctx->input_list(kPaddingValues, &padding_values_list)); std::vector<Tensor> padding_values; OP_REQUIRES(ctx, padding_values_list.size() == input->output_shapes().size(), errors::InvalidArgument( "Number of padding values (", padding_values_list.size(), ") must match the number of components in the input " "dataset's elements (", input->output_shapes().size(), ")")); for (int i = 0; i < padding_values_list.size(); ++i) { const Tensor& padding_value_t = padding_values_list[i]; OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(padding_value_t.shape()), errors::InvalidArgument("All padding values must be scalars")); OP_REQUIRES(ctx, padding_value_t.dtype() == input->output_dtypes()[i], errors::InvalidArgument( "Mismatched type between padding value ", i, " and input dataset's component ", i, ": ", DataTypeString(padding_value_t.dtype()), " vs. ", DataTypeString(input->output_dtypes()[i]))); padding_values.push_back(tensor::DeepCopy(padding_value_t)); } *output = new Dataset(ctx, batch_size, drop_remainder, parallel_copy_, std::move(padded_shapes), std::move(padding_values), input, op_version_); } namespace { REGISTER_KERNEL_BUILDER(Name("PaddedBatchDataset").Device(DEVICE_CPU), PaddedBatchDatasetOp); REGISTER_KERNEL_BUILDER(Name("PaddedBatchDatasetV2").Device(DEVICE_CPU), PaddedBatchDatasetOp); } } }
#include "tensorflow/core/kernels/data/padded_batch_dataset_op.h" #include "tensorflow/core/data/dataset_test_base.h" namespace tensorflow { namespace data { namespace { constexpr char kNodeName[] = "padded_batch_dataset"; constexpr int kOpVersion = 2; class PaddedBatchDatasetOpTest : public DatasetOpsTestBase {}; class PaddedBatchDatasetParams : public DatasetParams { public: template <typename T> PaddedBatchDatasetParams(T input_dataset_params, int64_t batch_size, std::vector<Tensor> padded_shapes, std::vector<Tensor> padded_values, bool drop_remainder, bool parallel_copy, DataTypeVector output_dtypes, std::vector<PartialTensorShape> output_shapes, int num_padded_shapes, string node_name) : DatasetParams(std::move(output_dtypes), std::move(output_shapes), std::move(node_name)), batch_size_(batch_size), padded_shapes_(std::move(padded_shapes)), padded_values_(std::move(padded_values)), drop_remainder_(drop_remainder), parallel_copy_(parallel_copy), num_padded_shapes_(num_padded_shapes) { input_dataset_params_.push_back(std::make_unique<T>(input_dataset_params)); op_version_ = kOpVersion; iterator_prefix_ = name_utils::IteratorPrefix(input_dataset_params.dataset_type(), input_dataset_params.iterator_prefix()); } std::vector<Tensor> GetInputTensors() const override { std::vector<Tensor> input_tensors; input_tensors.emplace_back( CreateTensor<int64_t>(TensorShape({}), {batch_size_})); for (auto& padded_shape : padded_shapes_) { input_tensors.emplace_back(padded_shape); } for (auto& padded_value : padded_values_) { input_tensors.emplace_back(padded_value); } input_tensors.emplace_back( CreateTensor<bool>(TensorShape({}), {drop_remainder_})); return input_tensors; } Status GetInputNames(std::vector<string>* input_names) const override { *input_names = {PaddedBatchDatasetOp::kInputDataset, PaddedBatchDatasetOp::kBatchSize}; for (int i = 0; i < num_padded_shapes_; ++i) { input_names->emplace_back( strings::StrCat(PaddedBatchDatasetOp::kPaddedShapes, "_", i)); } for (int j = 0; j < padded_values_.size(); ++j) { input_names->emplace_back( strings::StrCat(PaddedBatchDatasetOp::kPaddingValues, "_", j)); } input_names->push_back(PaddedBatchDatasetOp::kDropRemainder); return absl::OkStatus(); } Status GetAttributes(AttributeVector* attr_vector) const override { *attr_vector = {{"parallel_copy", parallel_copy_}, {"Toutput_types", output_dtypes_}, {"output_shapes", output_shapes_}, {"N", num_padded_shapes_}, {"metadata", ""}}; return absl::OkStatus(); } string dataset_type() const override { return PaddedBatchDatasetOp::kDatasetType; } private: int64_t batch_size_; std::vector<Tensor> padded_shapes_; std::vector<Tensor> padded_values_; bool drop_remainder_; bool parallel_copy_; int num_padded_shapes_; }; PaddedBatchDatasetParams PaddedBatchDatasetParams1() { auto tensor_slice_dataset_params = TensorSliceDatasetParams( {CreateTensor<int64_t>( TensorShape{7, 2}, {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13})}, "tensor_slice"); return PaddedBatchDatasetParams( tensor_slice_dataset_params, 2, {CreateTensor<int64_t>(TensorShape{1}, {3})}, {CreateTensor<int64_t>(TensorShape{}, {1})}, true, true, {DT_INT64}, {PartialTensorShape({2, 3})}, 1, kNodeName); } PaddedBatchDatasetParams PaddedBatchDatasetParams2() { auto tensor_slice_dataset_params_0 = TensorSliceDatasetParams( CreateTensors<int64_t>(TensorShape{3, 2}, {{0, 1, 2, 3, 4, 5}}), "tensor_slice_0"); auto tensor_slice_dataset_params_1 = TensorSliceDatasetParams( CreateTensors<int64_t>(TensorShape{4, 1}, {{6, 7, 8, 9}}), "tensor_slice_1"); auto concatenate_dataset_params = ConcatenateDatasetParams(std::move(tensor_slice_dataset_params_0), std::move(tensor_slice_dataset_params_1), {DT_INT64}, {PartialTensorShape({-1})}, "concatenate"); return PaddedBatchDatasetParams( concatenate_dataset_params, 2, {CreateTensor<int64_t>(TensorShape{1}, {3})}, {CreateTensor<int64_t>(TensorShape{}, {1})}, true, true, {DT_INT64}, {PartialTensorShape({2, 3})}, 1, kNodeName); } PaddedBatchDatasetParams PaddedBatchDatasetParams3() { auto tensor_slice_dataset_params_0 = TensorSliceDatasetParams( CreateTensors<int64_t>(TensorShape{3, 2}, {{0, 1, 2, 3, 4, 5}}), "tensor_slice_0"); auto tensor_slice_dataset_params_1 = TensorSliceDatasetParams( CreateTensors<int64_t>(TensorShape{4, 1}, {{6, 7, 8, 9}}), "tensor_slice_1"); auto concatenate_dataset_params = ConcatenateDatasetParams(std::move(tensor_slice_dataset_params_0), std::move(tensor_slice_dataset_params_1), {DT_INT64}, {PartialTensorShape({-1})}, "concatenate"); return PaddedBatchDatasetParams( concatenate_dataset_params, 2, {CreateTensor<int64_t>(TensorShape{1}, {3})}, {CreateTensor<int64_t>(TensorShape{}, {1})}, false, true, {DT_INT64}, {PartialTensorShape({2, 3})}, 1, kNodeName); } PaddedBatchDatasetParams PaddedBatchDatasetParams4() { auto tensor_slice_dataset_params_0 = TensorSliceDatasetParams( CreateTensors<int64_t>(TensorShape{3, 2}, {{0, 1, 2, 3, 4, 5}}), "tensor_slice_0"); auto tensor_slice_dataset_params_1 = TensorSliceDatasetParams( CreateTensors<int64_t>(TensorShape{3, 1}, {{6, 7, 8}}), "tensor_slice_1"); auto concatenate_dataset_params = ConcatenateDatasetParams(std::move(tensor_slice_dataset_params_0), std::move(tensor_slice_dataset_params_1), {DT_INT64}, {PartialTensorShape({-1})}, "concatenate"); return PaddedBatchDatasetParams( concatenate_dataset_params, 2, {CreateTensor<int64_t>(TensorShape{1}, {3})}, {CreateTensor<int64_t>(TensorShape{}, {1})}, false, true, {DT_INT64}, {PartialTensorShape({-1, 3})}, 1, kNodeName); } PaddedBatchDatasetParams PaddedBatchDatasetParams5() { auto tensor_slice_dataset_params_0 = TensorSliceDatasetParams( CreateTensors<int64_t>(TensorShape{3, 2}, {{0, 1, 2, 3, 4, 5}}), "tensor_slice_0"); auto tensor_slice_dataset_params_1 = TensorSliceDatasetParams( CreateTensors<int64_t>(TensorShape{4, 1}, {{6, 7, 8, 9}}), "tensor_slice_1"); auto concatenate_dataset_params = ConcatenateDatasetParams(std::move(tensor_slice_dataset_params_0), std::move(tensor_slice_dataset_params_1), {DT_INT64}, {PartialTensorShape({-1})}, "concatenate"); return PaddedBatchDatasetParams( concatenate_dataset_params, 2, {CreateTensor<int64_t>(TensorShape{1}, {-1})}, {CreateTensor<int64_t>(TensorShape{}, {1})}, false, false, {DT_INT64}, {PartialTensorShape({-1, -1})}, 1, kNodeName); } PaddedBatchDatasetParams PaddedBatchDatasetParams6() { auto tensor_slice_dataset_params_0 = TensorSliceDatasetParams( CreateTensors<int64_t>(TensorShape{3, 2}, {{0, 1, 2, 3, 4, 5}}), "tensor_slice_0"); auto tensor_slice_dataset_params_1 = TensorSliceDatasetParams( CreateTensors<int64_t>(TensorShape{4, 1}, {{6, 7, 8, 9}}), "tensor_slice_1"); auto concatenate_dataset_params = ConcatenateDatasetParams(std::move(tensor_slice_dataset_params_0), std::move(tensor_slice_dataset_params_1), {DT_INT64}, {PartialTensorShape({-1})}, "concatenate"); return PaddedBatchDatasetParams( concatenate_dataset_params, 2, {CreateTensor<int64_t>(TensorShape{1}, {-1})}, {CreateTensor<int64_t>(TensorShape{}, {1})}, false, true, {DT_INT64}, {PartialTensorShape({-1, -1})}, 1, kNodeName); } PaddedBatchDatasetParams PaddedBatchDatasetParams7() { return PaddedBatchDatasetParams( RangeDatasetParams(0, 0, 1), 2, {CreateTensor<int64_t>(TensorShape{1}, {-1})}, {CreateTensor<int64_t>(TensorShape{}, {1})}, false, true, {DT_INT64}, {PartialTensorShape({-1, -1})}, 1, kNodeName); } PaddedBatchDatasetParams PaddedBatchDatasetParamsWithShortPaddingShape() { auto tensor_slice_dataset_params_0 = TensorSliceDatasetParams( CreateTensors<int64_t>(TensorShape{3, 2}, {{0, 1, 2, 3, 4, 5}}), "tensor_slice_0"); auto tensor_slice_dataset_params_1 = TensorSliceDatasetParams( CreateTensors<int64_t>(TensorShape{3, 2}, {{6, 7, 8, 9, 10, 11}}), "tensor_slice_1"); auto concatenate_dataset_params = ConcatenateDatasetParams(std::move(tensor_slice_dataset_params_0), std::move(tensor_slice_dataset_params_1), {DT_INT64}, {PartialTensorShape({2})}, "concatenate"); return PaddedBatchDatasetParams( concatenate_dataset_params, 2, {CreateTensor<int64_t>(TensorShape{1}, {1})}, {CreateTensor<int64_t>(TensorShape{}, {1})}, false, true, {DT_INT64}, {PartialTensorShape({-1, -1})}, 1, kNodeName); } PaddedBatchDatasetParams PaddedBatchDatasetParamsWithInvalidPaddingShape() { auto tensor_slice_dataset_params_0 = TensorSliceDatasetParams( CreateTensors<int64_t>(TensorShape{3, 2}, {{0, 1, 2, 3, 4, 5}}), "tensor_slice_0"); auto tensor_slice_dataset_params_1 = TensorSliceDatasetParams( CreateTensors<int64_t>(TensorShape{3, 2}, {{6, 7, 8, 9, 10, 11}}), "tensor_slice_1"); auto concatenate_dataset_params = ConcatenateDatasetParams(std::move(tensor_slice_dataset_params_0), std::move(tensor_slice_dataset_params_1), {DT_INT64}, {PartialTensorShape({2})}, "concatenate"); return PaddedBatchDatasetParams( concatenate_dataset_params, 2, {CreateTensor<int64_t>(TensorShape{2}, {1, 2})}, {CreateTensor<int64_t>(TensorShape{}, {1})}, false, true, {DT_INT64}, {PartialTensorShape({-1, -1})}, 1, kNodeName); } PaddedBatchDatasetParams PaddedBatchDatasetParamsWithInvalidBatchSize() { auto tensor_slice_dataset_params_0 = TensorSliceDatasetParams( CreateTensors<int64_t>(TensorShape{3, 2}, {{0, 1, 2, 3, 4, 5}}), "tensor_slice_0"); auto tensor_slice_dataset_params_1 = TensorSliceDatasetParams( CreateTensors<int64_t>(TensorShape{3, 2}, {{6, 7, 8, 9, 10, 11}}), "tensor_slice_1"); auto concatenate_dataset_params = ConcatenateDatasetParams(std::move(tensor_slice_dataset_params_0), std::move(tensor_slice_dataset_params_1), {DT_INT64}, {PartialTensorShape({2})}, "concatenate"); return PaddedBatchDatasetParams( concatenate_dataset_params, -1, {CreateTensor<int64_t>(TensorShape{1}, {3})}, {CreateTensor<int64_t>(TensorShape{}, {1})}, false, true, {DT_INT64}, {PartialTensorShape({-1, -1})}, 1, kNodeName); } PaddedBatchDatasetParams PaddedBatchDatasetParamsWithInvalidPaddingShapesSize() { auto tensor_slice_dataset_params_0 = TensorSliceDatasetParams( CreateTensors<int64_t>(TensorShape{3, 2}, {{0, 1, 2, 3, 4, 5}}), "tensor_slice_0"); auto tensor_slice_dataset_params_1 = TensorSliceDatasetParams( CreateTensors<int64_t>(TensorShape{3, 2}, {{6, 7, 8, 9, 10, 11}}), "tensor_slice_1"); auto concatenate_dataset_params = ConcatenateDatasetParams(std::move(tensor_slice_dataset_params_0), std::move(tensor_slice_dataset_params_1), {DT_INT64}, {PartialTensorShape({2})}, "concatenate"); return PaddedBatchDatasetParams( concatenate_dataset_params, 2, {CreateTensor<int64_t>(TensorShape{1}, {3}), CreateTensor<int64_t>(TensorShape{1}, {3})}, {CreateTensor<int64_t>(TensorShape{}, {1})}, false, true, {DT_INT64}, {PartialTensorShape({-1, -1})}, 2, kNodeName); } PaddedBatchDatasetParams PaddedBatchDatasetParamsWithInvalidPaddingValuesSize() { auto tensor_slice_dataset_params_0 = TensorSliceDatasetParams( CreateTensors<int64_t>(TensorShape{3, 2}, {{0, 1, 2, 3, 4, 5}}), "tensor_slice_0"); auto tensor_slice_dataset_params_1 = TensorSliceDatasetParams( CreateTensors<int64_t>(TensorShape{3, 2}, {{6, 7, 8, 9, 10, 11}}), "tensor_slice_1"); auto concatenate_dataset_params = ConcatenateDatasetParams(std::move(tensor_slice_dataset_params_0), std::move(tensor_slice_dataset_params_1), {DT_INT64}, {PartialTensorShape({2})}, "concatenate"); return PaddedBatchDatasetParams( concatenate_dataset_params, 2, {CreateTensor<int64_t>(TensorShape{1}, {3})}, {CreateTensor<int64_t>(TensorShape{}, {1}), CreateTensor<int64_t>(TensorShape{}, {1})}, false, true, {DT_INT64}, {PartialTensorShape({-1, -1})}, 2, kNodeName); } PaddedBatchDatasetParams PaddedBatchDatasetParamsWithInvalidPaddingValuesDType() { auto tensor_slice_dataset_params_0 = TensorSliceDatasetParams( CreateTensors<int64_t>(TensorShape{3, 2}, {{0, 1, 2, 3, 4, 5}}), "tensor_slice_0"); auto tensor_slice_dataset_params_1 = TensorSliceDatasetParams( CreateTensors<int64_t>(TensorShape{3, 2}, {{6, 7, 8, 9, 10, 11}}), "tensor_slice_1"); auto concatenate_dataset_params = ConcatenateDatasetParams(std::move(tensor_slice_dataset_params_0), std::move(tensor_slice_dataset_params_1), {DT_INT64}, {PartialTensorShape({2})}, "concatenate"); return PaddedBatchDatasetParams( concatenate_dataset_params, 2, {CreateTensor<int64_t>(TensorShape{1}, {3})}, {CreateTensor<tstring>(TensorShape{}, {"a"})}, false, true, {DT_INT64}, {PartialTensorShape({-1, -1})}, 1, kNodeName); } PaddedBatchDatasetParams PaddedBatchDatasetParamsWithInvalidPaddingValuesShape() { auto tensor_slice_dataset_params_0 = TensorSliceDatasetParams( CreateTensors<int64_t>(TensorShape{3, 2}, {{0, 1, 2, 3, 4, 5}}), "tensor_slice_0"); auto tensor_slice_dataset_params_1 = TensorSliceDatasetParams( CreateTensors<int64_t>(TensorShape{3, 2}, {{6, 7, 8, 9, 10, 11}}), "tensor_slice_1"); auto concatenate_dataset_params = ConcatenateDatasetParams(std::move(tensor_slice_dataset_params_0), std::move(tensor_slice_dataset_params_1), {DT_INT64}, {PartialTensorShape({2})}, "concatenate"); return PaddedBatchDatasetParams( concatenate_dataset_params, 2, {CreateTensor<int64_t>(TensorShape{1}, {3})}, {CreateTensor<int64_t>(TensorShape{1}, {1})}, false, true, {DT_INT64}, {PartialTensorShape({-1, -1})}, 1, kNodeName); } std::vector<GetNextTestCase<PaddedBatchDatasetParams>> GetNextTestCases() { return {{PaddedBatchDatasetParams1(), CreateTensors<int64_t>( TensorShape{2, 3}, {{0, 1, 1, 2, 3, 1}, {4, 5, 1, 6, 7, 1}, {8, 9, 1, 10, 11, 1}})}, {PaddedBatchDatasetParams2(), CreateTensors<int64_t>( TensorShape{2, 3}, {{0, 1, 1, 2, 3, 1}, {4, 5, 1, 6, 1, 1}, {7, 1, 1, 8, 1, 1}})}, {PaddedBatchDatasetParams3(), {CreateTensor<int64_t>(TensorShape{2, 3}, {0, 1, 1, 2, 3, 1}), CreateTensor<int64_t>(TensorShape{2, 3}, {4, 5, 1, 6, 1, 1}), CreateTensor<int64_t>(TensorShape{2, 3}, {7, 1, 1, 8, 1, 1}), CreateTensor<int64_t>(TensorShape{1, 3}, {9, 1, 1})}}, {PaddedBatchDatasetParams4(), CreateTensors<int64_t>( TensorShape{2, 3}, {{0, 1, 1, 2, 3, 1}, {4, 5, 1, 6, 1, 1}, {7, 1, 1, 8, 1, 1}})}, {PaddedBatchDatasetParams5(), {CreateTensor<int64_t>(TensorShape{2, 2}, {0, 1, 2, 3}), CreateTensor<int64_t>(TensorShape{2, 2}, {4, 5, 6, 1}), CreateTensor<int64_t>(TensorShape{2, 1}, {7, 8}), CreateTensor<int64_t>(TensorShape{1, 1}, {9})}}, {PaddedBatchDatasetParams6(), {CreateTensor<int64_t>(TensorShape{2, 2}, {0, 1, 2, 3}), CreateTensor<int64_t>(TensorShape{2, 2}, {4, 5, 6, 1}), CreateTensor<int64_t>(TensorShape{2, 1}, {7, 8}), CreateTensor<int64_t>(TensorShape{1, 1}, {9})}}, {PaddedBatchDatasetParams7(), {}}}; } ITERATOR_GET_NEXT_TEST_P(PaddedBatchDatasetOpTest, PaddedBatchDatasetParams, GetNextTestCases()) TEST_F(PaddedBatchDatasetOpTest, DatasetNodeName) { auto dataset_params = PaddedBatchDatasetParams1(); TF_ASSERT_OK(Initialize(dataset_params)); TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name())); } TEST_F(PaddedBatchDatasetOpTest, DatasetTypeString) { auto dataset_params = PaddedBatchDatasetParams1(); TF_ASSERT_OK(Initialize(dataset_params)); name_utils::OpNameParams params; params.op_version = dataset_params.op_version(); TF_ASSERT_OK(CheckDatasetTypeString( name_utils::OpName(PaddedBatchDatasetOp::kDatasetType, params))); } std::vector<DatasetOutputDtypesTestCase<PaddedBatchDatasetParams>> DatasetOutputDtypesTestCases() { return {{PaddedBatchDatasetParams1(), {DT_INT64}}, {PaddedBatchDatasetParams2(), {DT_INT64}}, {PaddedBatchDatasetParams3(), {DT_INT64}}, {PaddedBatchDatasetParams4(), {DT_INT64}}, {PaddedBatchDatasetParams5(), {DT_INT64}}, {PaddedBatchDatasetParams6(), {DT_INT64}}, {PaddedBatchDatasetParams7(), {DT_INT64}}}; } DATASET_OUTPUT_DTYPES_TEST_P(PaddedBatchDatasetOpTest, PaddedBatchDatasetParams, DatasetOutputDtypesTestCases()) std::vector<DatasetOutputShapesTestCase<PaddedBatchDatasetParams>> DatasetOutputShapesTestCases() { return {{PaddedBatchDatasetParams1(), {PartialTensorShape({2, 3})}}, {PaddedBatchDatasetParams2(), {PartialTensorShape({2, 3})}}, {PaddedBatchDatasetParams3(), {PartialTensorShape({-1, 3})}}, {PaddedBatchDatasetParams4(), {PartialTensorShape({-1, 3})}}, {PaddedBatchDatasetParams5(), {PartialTensorShape({-1, -1})}}, {PaddedBatchDatasetParams6(), {PartialTensorShape({-1, -1})}}, {PaddedBatchDatasetParams7(), {PartialTensorShape({-1, -1})}}}; } DATASET_OUTPUT_SHAPES_TEST_P(PaddedBatchDatasetOpTest, PaddedBatchDatasetParams, DatasetOutputShapesTestCases()) std::vector<CardinalityTestCase<PaddedBatchDatasetParams>> CardinalityTestCases() { return {{PaddedBatchDatasetParams1(), 3}, {PaddedBatchDatasetParams2(), 3}, {PaddedBatchDatasetParams3(), 4}, {PaddedBatchDatasetParams4(), 3}, {PaddedBatchDatasetParams5(), 4}, {PaddedBatchDatasetParams6(), 4}, {PaddedBatchDatasetParams7(), 0}}; } DATASET_CARDINALITY_TEST_P(PaddedBatchDatasetOpTest, PaddedBatchDatasetParams, CardinalityTestCases()) std::vector<IteratorOutputDtypesTestCase<PaddedBatchDatasetParams>> IteratorOutputDtypesTestCases() { return {{PaddedBatchDatasetParams1(), {DT_INT64}}, {PaddedBatchDatasetParams2(), {DT_INT64}}, {PaddedBatchDatasetParams3(), {DT_INT64}}, {PaddedBatchDatasetParams4(), {DT_INT64}}, {PaddedBatchDatasetParams5(), {DT_INT64}}, {PaddedBatchDatasetParams6(), {DT_INT64}}, {PaddedBatchDatasetParams7(), {DT_INT64}}}; } ITERATOR_OUTPUT_DTYPES_TEST_P(PaddedBatchDatasetOpTest, PaddedBatchDatasetParams, IteratorOutputDtypesTestCases()) std::vector<IteratorOutputShapesTestCase<PaddedBatchDatasetParams>> IteratorOutputShapesTestCases() { return {{PaddedBatchDatasetParams1(), {PartialTensorShape({2, 3})}}, {PaddedBatchDatasetParams2(), {PartialTensorShape({2, 3})}}, {PaddedBatchDatasetParams3(), {PartialTensorShape({-1, 3})}}, {PaddedBatchDatasetParams4(), {PartialTensorShape({-1, 3})}}, {PaddedBatchDatasetParams5(), {PartialTensorShape({-1, -1})}}, {PaddedBatchDatasetParams6(), {PartialTensorShape({-1, -1})}}, {PaddedBatchDatasetParams7(), {PartialTensorShape({-1, -1})}}}; } ITERATOR_OUTPUT_SHAPES_TEST_P(PaddedBatchDatasetOpTest, PaddedBatchDatasetParams, IteratorOutputShapesTestCases()) TEST_F(PaddedBatchDatasetOpTest, IteratorPrefix) { auto dataset_params = PaddedBatchDatasetParams1(); TF_ASSERT_OK(Initialize(dataset_params)); name_utils::IteratorPrefixParams params; params.op_version = dataset_params.op_version(); TF_ASSERT_OK(CheckIteratorPrefix( name_utils::IteratorPrefix(PaddedBatchDatasetOp::kDatasetType, dataset_params.iterator_prefix(), params))); } std::vector<IteratorSaveAndRestoreTestCase<PaddedBatchDatasetParams>> IteratorSaveAndRestoreTestCases() { return {{PaddedBatchDatasetParams1(), {0, 2, 5}, CreateTensors<int64_t>( TensorShape{2, 3}, {{0, 1, 1, 2, 3, 1}, {4, 5, 1, 6, 7, 1}, {8, 9, 1, 10, 11, 1}})}, {PaddedBatchDatasetParams2(), {0, 2, 5}, CreateTensors<int64_t>( TensorShape{2, 3}, {{0, 1, 1, 2, 3, 1}, {4, 5, 1, 6, 1, 1}, {7, 1, 1, 8, 1, 1}})}, {PaddedBatchDatasetParams3(), {0, 2, 5}, {CreateTensor<int64_t>(TensorShape{2, 3}, {0, 1, 1, 2, 3, 1}), CreateTensor<int64_t>(TensorShape{2, 3}, {4, 5, 1, 6, 1, 1}), CreateTensor<int64_t>(TensorShape{2, 3}, {7, 1, 1, 8, 1, 1}), CreateTensor<int64_t>(TensorShape{1, 3}, {9, 1, 1})}}, {PaddedBatchDatasetParams4(), {0, 2, 5}, CreateTensors<int64_t>( TensorShape{2, 3}, {{0, 1, 1, 2, 3, 1}, {4, 5, 1, 6, 1, 1}, {7, 1, 1, 8, 1, 1}})}, {PaddedBatchDatasetParams5(), {0, 2, 5}, {CreateTensor<int64_t>(TensorShape{2, 2}, {0, 1, 2, 3}), CreateTensor<int64_t>(TensorShape{2, 2}, {4, 5, 6, 1}), CreateTensor<int64_t>(TensorShape{2, 1}, {7, 8}), CreateTensor<int64_t>(TensorShape{1, 1}, {9})}}, {PaddedBatchDatasetParams6(), {0, 2, 5}, {CreateTensor<int64_t>(TensorShape{2, 2}, {0, 1, 2, 3}), CreateTensor<int64_t>(TensorShape{2, 2}, {4, 5, 6, 1}), CreateTensor<int64_t>(TensorShape{2, 1}, {7, 8}), CreateTensor<int64_t>(TensorShape{1, 1}, {9})}}, {PaddedBatchDatasetParams7(), {0, 2, 5}, {}}}; } ITERATOR_SAVE_AND_RESTORE_TEST_P(PaddedBatchDatasetOpTest, PaddedBatchDatasetParams, IteratorSaveAndRestoreTestCases()) TEST_F(PaddedBatchDatasetOpTest, ShortPadding) { auto dataset_params = PaddedBatchDatasetParamsWithShortPaddingShape(); TF_ASSERT_OK(Initialize(dataset_params)); bool end_of_sequence = false; std::vector<Tensor> out_tensors; EXPECT_EQ( iterator_->GetNext(iterator_ctx_.get(), &out_tensors, &end_of_sequence) .code(), tensorflow::error::DATA_LOSS); } TEST_F(PaddedBatchDatasetOpTest, InvalidPaddedShapes) { auto dataset_params = PaddedBatchDatasetParamsWithInvalidPaddingShape(); TF_ASSERT_OK(Initialize(dataset_params)); bool end_of_sequence = false; std::vector<Tensor> out_tensors; EXPECT_EQ( iterator_->GetNext(iterator_ctx_.get(), &out_tensors, &end_of_sequence) .code(), absl::StatusCode::kInvalidArgument); } class ParameterizedInvalidArgumentTest : public PaddedBatchDatasetOpTest, public ::testing::WithParamInterface<PaddedBatchDatasetParams> {}; TEST_P(ParameterizedInvalidArgumentTest, InvalidPredicateFunc) { auto dataset_params = GetParam(); EXPECT_EQ(Initialize(dataset_params).code(), absl::StatusCode::kInvalidArgument); } INSTANTIATE_TEST_SUITE_P( PaddedBatchDatasetOpTest, ParameterizedInvalidArgumentTest, ::testing::ValuesIn( {PaddedBatchDatasetParamsWithInvalidBatchSize(), PaddedBatchDatasetParamsWithInvalidPaddingShapesSize(), PaddedBatchDatasetParamsWithInvalidPaddingValuesSize(), PaddedBatchDatasetParamsWithInvalidPaddingValuesDType(), PaddedBatchDatasetParamsWithInvalidPaddingValuesShape()})); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/padded_batch_dataset_op.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/padded_batch_dataset_op_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
dd9bc663-fd0f-418e-9cc8-144ac62321b0
cpp
google/tensorstore
index_vector_or_scalar
tensorstore/index_space/index_vector_or_scalar.cc
tensorstore/index_space/index_vector_or_scalar_test.cc
#include "tensorstore/index_space/index_vector_or_scalar.h" #include <system_error> #include "absl/status/status.h" #include "tensorstore/util/status.h" #include "tensorstore/util/str_cat.h" namespace tensorstore { namespace internal_index_space { absl::Status CheckIndexVectorSize(IndexVectorOrScalarView indices, DimensionIndex size) { if (indices.pointer && indices.size_or_scalar != size) return absl::InvalidArgumentError(tensorstore::StrCat( "Number of dimensions (", size, ") does not match number of indices (", indices.size_or_scalar, ")")); return absl::OkStatus(); } } }
#include "tensorstore/index_space/index_vector_or_scalar.h" #include <cstdint> #include <system_error> #include <vector> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "tensorstore/index.h" #include "tensorstore/util/span.h" #include "tensorstore/util/status.h" #include "tensorstore/util/status_testutil.h" namespace { using ::tensorstore::dynamic_extent; using ::tensorstore::Index; using ::tensorstore::IsIndexVectorOrScalar; using ::tensorstore::MatchesStatus; using ::tensorstore::span; using ::tensorstore::internal_index_space::CheckIndexVectorSize; using ::tensorstore::internal_index_space::IndexVectorOrScalarView; static_assert(IsIndexVectorOrScalar<Index>::value == true); static_assert(IsIndexVectorOrScalar<std::int32_t>::value == true); static_assert(IsIndexVectorOrScalar<float>::value == false); static_assert( std::is_same_v< typename IsIndexVectorOrScalar<std::int32_t>::normalized_type, Index>); static_assert(IsIndexVectorOrScalar<std::int32_t>::extent == dynamic_extent); static_assert(IsIndexVectorOrScalar<std::vector<std::int32_t>>::value == false); static_assert(IsIndexVectorOrScalar<const std::vector<Index>>::value == true); static_assert(std::is_same_v<typename IsIndexVectorOrScalar< const std::vector<Index>>::normalized_type, span<const Index>>); static_assert(IsIndexVectorOrScalar<const std::vector<Index>>::extent == dynamic_extent); static_assert(IsIndexVectorOrScalar<span<const Index>>::value == true); static_assert( std::is_same_v<typename IsIndexVectorOrScalar<span<Index>>::normalized_type, span<const Index>>); static_assert(IsIndexVectorOrScalar<span<const Index>>::extent == dynamic_extent); static_assert(IsIndexVectorOrScalar<span<const Index, 5>>::value == true); static_assert(std::is_same_v< typename IsIndexVectorOrScalar<span<Index, 5>>::normalized_type, span<const Index, 5>>); static_assert(IsIndexVectorOrScalar<span<Index, 5>>::extent == 5); TEST(IndexVectorOrScalarTest, Scalar) { IndexVectorOrScalarView v(5); EXPECT_EQ(5, v.size_or_scalar); EXPECT_EQ(nullptr, v.pointer); EXPECT_EQ(5, v[0]); EXPECT_EQ(5, v[1]); EXPECT_TRUE(CheckIndexVectorSize(v, 3).ok()); } TEST(IndexVectorOrScalarTest, Vector) { const Index arr[] = {1, 2, 3}; IndexVectorOrScalarView v{span(arr)}; EXPECT_EQ(3, v.size_or_scalar); EXPECT_EQ(&arr[0], v.pointer); EXPECT_EQ(1, v[0]); EXPECT_EQ(2, v[1]); EXPECT_EQ(3, v[2]); EXPECT_TRUE(CheckIndexVectorSize(v, 3).ok()); EXPECT_THAT(CheckIndexVectorSize(v, 5), tensorstore::MatchesStatus(absl::StatusCode::kInvalidArgument)); } }
https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/index_vector_or_scalar.cc
https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/index_vector_or_scalar_test.cc
4f887a6430414cd6088e1743555015b10f116d50
06bf8626-83aa-466b-9eec-d04667fc399d
cpp
google/tensorstore
kvs_backed_cache
tensorstore/internal/cache/kvs_backed_cache.cc
tensorstore/internal/cache/kvs_backed_cache_test.cc
#include "tensorstore/internal/cache/kvs_backed_cache.h" #include <cstdint> #include <string> #include "tensorstore/internal/metrics/counter.h" #include "tensorstore/internal/metrics/metadata.h" namespace tensorstore { namespace internal { namespace { auto& kvs_cache_read = internal_metrics::Counter<int64_t, std::string>::New( "/tensorstore/cache/kvs_cache_read", "category", internal_metrics::MetricMetadata( "Count of kvs_backed_cache reads by category. A large number of " "'unchanged' reads indicates that the dataset is relatively " "quiescent.")); } void KvsBackedCache_IncrementReadUnchangedMetric() { static auto& cell = kvs_cache_read.GetCell("unchanged"); cell.Increment(); } void KvsBackedCache_IncrementReadChangedMetric() { static auto& cell = kvs_cache_read.GetCell("changed"); cell.Increment(); } void KvsBackedCache_IncrementReadErrorMetric() { static auto& cell = kvs_cache_read.GetCell("error"); cell.Increment(); } } }
#include <stddef.h> #include <functional> #include <memory> #include <optional> #include <string> #include <utility> #include <vector> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/status/status.h" #include "absl/strings/cord.h" #include "absl/time/clock.h" #include "absl/time/time.h" #include "tensorstore/internal/cache/cache.h" #include "tensorstore/internal/cache/kvs_backed_cache_testutil.h" #include "tensorstore/internal/global_initializer.h" #include "tensorstore/kvstore/generation.h" #include "tensorstore/kvstore/key_range.h" #include "tensorstore/kvstore/memory/memory_key_value_store.h" #include "tensorstore/kvstore/mock_kvstore.h" #include "tensorstore/kvstore/test_matchers.h" #include "tensorstore/kvstore/test_util.h" #include "tensorstore/transaction.h" #include "tensorstore/util/status_testutil.h" namespace { namespace kvstore = tensorstore::kvstore; using ::tensorstore::KeyRange; using ::tensorstore::MatchesStatus; using ::tensorstore::StorageGeneration; using ::tensorstore::TimestampedStorageGeneration; using ::tensorstore::Transaction; using ::tensorstore::internal::CachePool; using ::tensorstore::internal::KvsBackedTestCache; using ::tensorstore::internal::MatchesKvsReadResult; using ::tensorstore::internal::MockKeyValueStore; using ::tensorstore::internal::OpenTransactionPtr; TENSORSTORE_GLOBAL_INITIALIZER { using ::tensorstore::internal::KvsBackedCacheBasicTransactionalTestOptions; using ::tensorstore::internal::RegisterKvsBackedCacheBasicTransactionalTest; { KvsBackedCacheBasicTransactionalTestOptions options; options.test_name = "MemoryNonAtomic"; options.get_store = [] { return tensorstore::GetMemoryKeyValueStore(false); }; options.multi_key_atomic_supported = false; RegisterKvsBackedCacheBasicTransactionalTest(options); } { KvsBackedCacheBasicTransactionalTestOptions options; options.test_name = "MemoryAtomic"; options.get_store = [] { return tensorstore::GetMemoryKeyValueStore(true); }; RegisterKvsBackedCacheBasicTransactionalTest(options); } } class MockStoreTest : public ::testing::Test { protected: CachePool::StrongPtr pool = CachePool::Make(CachePool::Limits{}); MockKeyValueStore::MockPtr mock_store = MockKeyValueStore::Make(); kvstore::DriverPtr memory_store = tensorstore::GetMemoryKeyValueStore(); tensorstore::internal::CachePtr<KvsBackedTestCache> GetCache( std::string cache_identifier = {}, kvstore::DriverPtr kvstore_driver = {}) { if (!kvstore_driver) kvstore_driver = mock_store; return tensorstore::internal::GetCache<KvsBackedTestCache>( pool.get(), cache_identifier, [&] { return std::make_unique<KvsBackedTestCache>(kvstore_driver); }); } tensorstore::internal::CachePtr<KvsBackedTestCache> cache = GetCache(); }; TEST_F(MockStoreTest, ReadSuccess) { auto entry = GetCacheEntry(cache, "a"); auto read_time = absl::Now(); auto read_future = entry->Read({read_time}); auto read_req = mock_store->read_requests.pop(); EXPECT_EQ("a", read_req.key); EXPECT_EQ(StorageGeneration::Unknown(), read_req.options.generation_conditions.if_equal); EXPECT_EQ(StorageGeneration::Unknown(), read_req.options.generation_conditions.if_not_equal); EXPECT_EQ(tensorstore::OptionalByteRangeRequest{}, read_req.options.byte_range); EXPECT_EQ(read_time, read_req.options.staleness_bound); read_req(memory_store); } TEST_F(MockStoreTest, ReadError) { auto entry = GetCacheEntry(cache, "a"); auto read_future = entry->Read({absl::Now()}); auto read_req = mock_store->read_requests.pop(); read_req.promise.SetResult(absl::FailedPreconditionError("read error")); EXPECT_THAT(read_future.result(), MatchesStatus(absl::StatusCode::kFailedPrecondition, "Error reading \"a\": read error")); } TEST_F(MockStoreTest, WriteError) { auto entry = GetCacheEntry(cache, "a"); auto transaction = Transaction(tensorstore::atomic_isolated); { TENSORSTORE_ASSERT_OK_AND_ASSIGN( auto open_transaction, tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction)); TENSORSTORE_ASSERT_OK(entry->Modify(open_transaction, true, "abc")); } transaction.CommitAsync().IgnoreFuture(); auto write_req = mock_store->write_requests.pop(); write_req.promise.SetResult(absl::FailedPreconditionError("write error")); EXPECT_THAT(transaction.future().result(), MatchesStatus(absl::StatusCode::kFailedPrecondition, "Error writing \"a\": write error")); } TEST_F(MockStoreTest, ReadErrorDuringWriteback) { auto entry = GetCacheEntry(cache, "a"); auto transaction = Transaction(tensorstore::atomic_isolated); { TENSORSTORE_ASSERT_OK_AND_ASSIGN( auto open_transaction, tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction)); TENSORSTORE_ASSERT_OK(entry->Modify(open_transaction, false, "abc")); } transaction.CommitAsync().IgnoreFuture(); auto read_req = mock_store->read_requests.pop(); read_req.promise.SetResult(absl::FailedPreconditionError("read error")); EXPECT_THAT(transaction.future().result(), MatchesStatus(absl::StatusCode::kFailedPrecondition, "Error reading \"a\": read error")); } TEST_F(MockStoreTest, ReadErrorDueToValidateDuringWriteback) { auto entry = GetCacheEntry(cache, "a"); auto transaction = Transaction(tensorstore::atomic_isolated); { TENSORSTORE_ASSERT_OK_AND_ASSIGN( auto open_transaction, tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction)); TENSORSTORE_ASSERT_OK(entry->Validate( open_transaction, [](absl::Cord data) { return absl::OkStatus(); })); auto read_future = entry->ReadValue(open_transaction); mock_store->read_requests.pop()(memory_store); EXPECT_THAT(read_future.result(), ::testing::Optional(absl::Cord())); } transaction.CommitAsync().IgnoreFuture(); auto read_req = mock_store->read_requests.pop(); read_req.promise.SetResult(absl::FailedPreconditionError("read error")); EXPECT_THAT(transaction.future().result(), MatchesStatus(absl::StatusCode::kFailedPrecondition, "Error reading \"a\": read error")); } TEST_F(MockStoreTest, WriteDuringRead) { auto entry = GetCacheEntry(cache, "a"); auto read_future = entry->Read({absl::InfinitePast()}); auto transaction = Transaction(tensorstore::atomic_isolated); { TENSORSTORE_ASSERT_OK_AND_ASSIGN( auto open_transaction, tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction)); TENSORSTORE_ASSERT_OK(entry->Modify(open_transaction, true, "abc")); } transaction.CommitAsync().IgnoreFuture(); auto read_future2 = entry->Read({absl::InfinitePast()}); { auto read_req = mock_store->read_requests.pop(); read_req(memory_store); TENSORSTORE_ASSERT_OK(read_future); TENSORSTORE_ASSERT_OK(read_future2); } { auto write_req = mock_store->write_requests.pop(); EXPECT_TRUE(mock_store->read_requests.empty()); EXPECT_TRUE(mock_store->write_requests.empty()); EXPECT_EQ("a", write_req.key); EXPECT_EQ(StorageGeneration::Unknown(), write_req.options.generation_conditions.if_equal); EXPECT_EQ("abc", write_req.value); write_req(memory_store); TENSORSTORE_ASSERT_OK(transaction.future()); } } TEST_F(MockStoreTest, MultiPhaseSeparateKeys) { auto transaction = Transaction(tensorstore::isolated); { TENSORSTORE_ASSERT_OK_AND_ASSIGN( auto open_transaction, tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction)); open_transaction->Barrier(); TENSORSTORE_ASSERT_OK(GetCacheEntry(GetCache("x"), "a") ->Modify(open_transaction, false, "abc")); open_transaction->Barrier(); TENSORSTORE_ASSERT_OK(GetCacheEntry(GetCache("x"), "b") ->Modify(open_transaction, false, "de")); TENSORSTORE_ASSERT_OK(GetCacheEntry(GetCache("y"), "b") ->Modify(open_transaction, false, "f")); } transaction.CommitAsync().IgnoreFuture(); { auto read_req = mock_store->read_requests.pop(); EXPECT_TRUE(mock_store->read_requests.empty()); EXPECT_TRUE(mock_store->write_requests.empty()); EXPECT_EQ("a", read_req.key); EXPECT_EQ(StorageGeneration::Unknown(), read_req.options.generation_conditions.if_not_equal); read_req(memory_store); } { auto write_req = mock_store->write_requests.pop(); EXPECT_TRUE(mock_store->read_requests.empty()); EXPECT_TRUE(mock_store->write_requests.empty()); EXPECT_EQ("a", write_req.key); EXPECT_EQ(StorageGeneration::NoValue(), write_req.options.generation_conditions.if_equal); EXPECT_EQ("abc", write_req.value); write_req(memory_store); } EXPECT_THAT(memory_store->Read("a").result(), MatchesKvsReadResult(absl::Cord("abc"))); { auto read_req = mock_store->read_requests.pop(); EXPECT_TRUE(mock_store->read_requests.empty()); EXPECT_TRUE(mock_store->write_requests.empty()); EXPECT_EQ("b", read_req.key); EXPECT_EQ(StorageGeneration::Unknown(), read_req.options.generation_conditions.if_not_equal); read_req(memory_store); } { auto write_req = mock_store->write_requests.pop(); EXPECT_TRUE(mock_store->read_requests.empty()); EXPECT_TRUE(mock_store->write_requests.empty()); EXPECT_EQ("b", write_req.key); EXPECT_EQ(StorageGeneration::NoValue(), write_req.options.generation_conditions.if_equal); EXPECT_EQ("def", write_req.value); write_req(memory_store); } ASSERT_TRUE(transaction.future().ready()); TENSORSTORE_EXPECT_OK(transaction.future()); EXPECT_THAT(memory_store->Read("b").result(), MatchesKvsReadResult(absl::Cord("def"))); } TEST_F(MockStoreTest, MultiPhaseSameKey) { auto entry = GetCacheEntry(cache, "a"); auto transaction = Transaction(tensorstore::isolated); { TENSORSTORE_ASSERT_OK_AND_ASSIGN( auto open_transaction, tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction)); TENSORSTORE_ASSERT_OK(entry->Modify(open_transaction, false, "abc")); open_transaction->Barrier(); TENSORSTORE_ASSERT_OK(entry->Modify(open_transaction, false, "def")); } transaction.CommitAsync().IgnoreFuture(); { auto read_req = mock_store->read_requests.pop(); EXPECT_TRUE(mock_store->read_requests.empty()); EXPECT_TRUE(mock_store->write_requests.empty()); EXPECT_EQ("a", read_req.key); EXPECT_EQ(StorageGeneration::Unknown(), read_req.options.generation_conditions.if_not_equal); read_req(memory_store); } { auto write_req = mock_store->write_requests.pop(); EXPECT_TRUE(mock_store->read_requests.empty()); EXPECT_TRUE(mock_store->write_requests.empty()); EXPECT_EQ("a", write_req.key); EXPECT_EQ(StorageGeneration::NoValue(), write_req.options.generation_conditions.if_equal); EXPECT_EQ("abc", write_req.value); write_req(memory_store); } TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto read_result, memory_store->Read("a").result()); EXPECT_THAT(read_result, MatchesKvsReadResult(absl::Cord("abc"))); TENSORSTORE_ASSERT_OK_AND_ASSIGN( auto write_stamp, memory_store->Write("a", absl::Cord("xyz")).result()); { auto write_req = mock_store->write_requests.pop(); EXPECT_TRUE(mock_store->read_requests.empty()); EXPECT_TRUE(mock_store->write_requests.empty()); EXPECT_EQ("a", write_req.key); EXPECT_EQ(read_result.stamp.generation, write_req.options.generation_conditions.if_equal); EXPECT_EQ("abcdef", write_req.value); write_req(memory_store); } { auto read_req = mock_store->read_requests.pop(); EXPECT_TRUE(mock_store->read_requests.empty()); EXPECT_TRUE(mock_store->write_requests.empty()); EXPECT_EQ("a", read_req.key); EXPECT_EQ(read_result.stamp.generation, read_req.options.generation_conditions.if_not_equal); read_req(memory_store); } { auto write_req = mock_store->write_requests.pop(); EXPECT_TRUE(mock_store->read_requests.empty()); EXPECT_TRUE(mock_store->write_requests.empty()); EXPECT_EQ("a", write_req.key); EXPECT_EQ(write_stamp.generation, write_req.options.generation_conditions.if_equal); EXPECT_EQ("xyzdef", write_req.value); write_req(memory_store); } ASSERT_TRUE(transaction.future().ready()); TENSORSTORE_EXPECT_OK(transaction.future()); EXPECT_THAT(memory_store->Read("a").result(), MatchesKvsReadResult(absl::Cord("xyzdef"))); } TEST_F(MockStoreTest, MultiPhaseSameKeyAbort) { auto entry = GetCacheEntry(cache, "a"); auto transaction = Transaction(tensorstore::isolated); { TENSORSTORE_ASSERT_OK_AND_ASSIGN( auto open_transaction, tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction)); TENSORSTORE_ASSERT_OK(entry->Modify(open_transaction, false, "abc")); open_transaction->Barrier(); TENSORSTORE_ASSERT_OK(entry->Modify(open_transaction, false, "def")); } transaction.Abort(); } TEST_F(MockStoreTest, DeleteRangeSingle) { auto transaction = Transaction(tensorstore::isolated); { TENSORSTORE_ASSERT_OK_AND_ASSIGN( auto open_transaction, tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction)); TENSORSTORE_ASSERT_OK(mock_store->TransactionalDeleteRange( open_transaction, KeyRange{"a", "c"})); } transaction.CommitAsync().IgnoreFuture(); ASSERT_FALSE(transaction.future().ready()); { auto req = mock_store->delete_range_requests.pop(); EXPECT_EQ(KeyRange("a", "c"), req.range); req(memory_store); } ASSERT_TRUE(transaction.future().ready()); TENSORSTORE_EXPECT_OK(transaction.future()); } TEST_F(MockStoreTest, DeleteRangeError) { auto transaction = Transaction(tensorstore::isolated); { TENSORSTORE_ASSERT_OK_AND_ASSIGN( auto open_transaction, tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction)); TENSORSTORE_ASSERT_OK(mock_store->TransactionalDeleteRange( open_transaction, KeyRange{"a", "c"})); } transaction.CommitAsync().IgnoreFuture(); ASSERT_FALSE(transaction.future().ready()); { auto req = mock_store->delete_range_requests.pop(); EXPECT_EQ(KeyRange("a", "c"), req.range); req.promise.SetResult(absl::FailedPreconditionError("delete range error")); } ASSERT_TRUE(transaction.future().ready()); EXPECT_THAT(transaction.future().result(), MatchesStatus(absl::StatusCode::kFailedPrecondition, "delete range error")); } TEST_F(MockStoreTest, DeleteRangeAtomicError) { auto transaction = Transaction(tensorstore::atomic_isolated); { TENSORSTORE_ASSERT_OK_AND_ASSIGN( auto open_transaction, tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction)); EXPECT_THAT(mock_store->TransactionalDeleteRange(open_transaction, KeyRange{"a", "c"}), MatchesStatus(absl::StatusCode::kInvalidArgument, "Cannot delete range starting at \"a\" as single " "atomic transaction")); } } TEST_F(MockStoreTest, DeleteRangeMultipleDisjoint) { auto transaction = Transaction(tensorstore::isolated); { TENSORSTORE_ASSERT_OK_AND_ASSIGN( auto open_transaction, tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction)); TENSORSTORE_ASSERT_OK(mock_store->TransactionalDeleteRange( open_transaction, KeyRange{"a", "c"})); TENSORSTORE_ASSERT_OK(mock_store->TransactionalDeleteRange( open_transaction, KeyRange{"d", "f"})); } transaction.CommitAsync().IgnoreFuture(); ASSERT_FALSE(transaction.future().ready()); { auto req = mock_store->delete_range_requests.pop(); EXPECT_EQ(KeyRange("a", "c"), req.range); req(memory_store); } ASSERT_FALSE(transaction.future().ready()); { auto req = mock_store->delete_range_requests.pop(); EXPECT_EQ(KeyRange("d", "f"), req.range); req(memory_store); } ASSERT_TRUE(transaction.future().ready()); TENSORSTORE_EXPECT_OK(transaction.future()); } TEST_F(MockStoreTest, DeleteRangeMultipleOverlapping) { auto transaction = Transaction(tensorstore::isolated); { TENSORSTORE_ASSERT_OK_AND_ASSIGN( auto open_transaction, tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction)); TENSORSTORE_ASSERT_OK(mock_store->TransactionalDeleteRange( open_transaction, KeyRange{"a", "c"})); TENSORSTORE_ASSERT_OK(mock_store->TransactionalDeleteRange( open_transaction, KeyRange{"b", "f"})); } transaction.CommitAsync().IgnoreFuture(); ASSERT_FALSE(transaction.future().ready()); { auto req = mock_store->delete_range_requests.pop(); EXPECT_EQ(KeyRange("a", "f"), req.range); req(memory_store); } ASSERT_TRUE(transaction.future().ready()); TENSORSTORE_EXPECT_OK(transaction.future()); } TEST_F(MockStoreTest, DeleteRangeBeforeWrite) { auto transaction = Transaction(tensorstore::isolated); { TENSORSTORE_ASSERT_OK_AND_ASSIGN( auto open_transaction, tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction)); TENSORSTORE_ASSERT_OK(mock_store->TransactionalDeleteRange( open_transaction, KeyRange{"a", "c"})); TENSORSTORE_ASSERT_OK( GetCacheEntry(cache, "b")->Modify(open_transaction, false, "abc")); } transaction.CommitAsync().IgnoreFuture(); ASSERT_FALSE(transaction.future().ready()); { auto req = mock_store->delete_range_requests.pop(); EXPECT_EQ(KeyRange("a", "b"), req.range); req(memory_store); } ASSERT_FALSE(transaction.future().ready()); { auto req = mock_store->delete_range_requests.pop(); EXPECT_EQ(KeyRange(KeyRange::Successor("b"), "c"), req.range); req(memory_store); } ASSERT_FALSE(transaction.future().ready()); { auto write_req = mock_store->write_requests.pop(); EXPECT_TRUE(mock_store->read_requests.empty()); EXPECT_TRUE(mock_store->write_requests.empty()); EXPECT_EQ("b", write_req.key); EXPECT_EQ(StorageGeneration::Unknown(), write_req.options.generation_conditions.if_equal); EXPECT_THAT(write_req.value, ::testing::Optional(std::string("abc"))); write_req(memory_store); } ASSERT_TRUE(transaction.future().ready()); TENSORSTORE_EXPECT_OK(transaction.future()); } TEST_F(MockStoreTest, DeleteRangeBeforeWriteJustBeforeExclusiveMax) { auto transaction = Transaction(tensorstore::isolated); { TENSORSTORE_ASSERT_OK_AND_ASSIGN( auto open_transaction, tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction)); TENSORSTORE_ASSERT_OK(mock_store->TransactionalDeleteRange( open_transaction, KeyRange{"a", KeyRange::Successor("b")})); TENSORSTORE_ASSERT_OK( GetCacheEntry(cache, "b")->Modify(open_transaction, false, "abc")); } transaction.CommitAsync().IgnoreFuture(); ASSERT_FALSE(transaction.future().ready()); { auto req = mock_store->delete_range_requests.pop(); EXPECT_EQ(KeyRange("a", "b"), req.range); req(memory_store); } ASSERT_FALSE(transaction.future().ready()); { auto write_req = mock_store->write_requests.pop(); EXPECT_TRUE(mock_store->read_requests.empty()); EXPECT_TRUE(mock_store->write_requests.empty()); EXPECT_EQ("b", write_req.key); EXPECT_EQ(StorageGeneration::Unknown(), write_req.options.generation_conditions.if_equal); EXPECT_EQ("abc", write_req.value); write_req(memory_store); } ASSERT_TRUE(transaction.future().ready()); TENSORSTORE_EXPECT_OK(transaction.future()); } TEST_F(MockStoreTest, DeleteRangeAfterWrite) { auto transaction = Transaction(tensorstore::isolated); { TENSORSTORE_ASSERT_OK_AND_ASSIGN( auto open_transaction, tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction)); TENSORSTORE_ASSERT_OK( GetCacheEntry(cache, "b")->Modify(open_transaction, false, "abc")); TENSORSTORE_ASSERT_OK(mock_store->TransactionalDeleteRange( open_transaction, KeyRange{"a", "c"})); } transaction.CommitAsync().IgnoreFuture(); ASSERT_FALSE(transaction.future().ready()); { auto req = mock_store->delete_range_requests.pop(); EXPECT_EQ(KeyRange("a", "c"), req.range); req(memory_store); } ASSERT_TRUE(transaction.future().ready()); TENSORSTORE_EXPECT_OK(transaction.future()); } TEST_F(MockStoreTest, DeleteRangeAfterValidateError) { auto transaction = Transaction(tensorstore::isolated); { TENSORSTORE_ASSERT_OK_AND_ASSIGN( auto open_transaction, tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction)); TENSORSTORE_ASSERT_OK( GetCacheEntry(cache, "b") ->Validate(open_transaction, [](absl::Cord value) { return absl::FailedPreconditionError("validate error"); })); TENSORSTORE_ASSERT_OK(mock_store->TransactionalDeleteRange( open_transaction, KeyRange{"a", "c"})); } transaction.CommitAsync().IgnoreFuture(); ASSERT_FALSE(transaction.future().ready()); mock_store->read_requests.pop()(memory_store); EXPECT_TRUE(mock_store->read_requests.empty()); EXPECT_TRUE(mock_store->write_requests.empty()); EXPECT_TRUE(mock_store->delete_range_requests.empty()); ASSERT_TRUE(transaction.future().ready()); EXPECT_THAT(transaction.future().result(), MatchesStatus(absl::StatusCode::kFailedPrecondition, "Error writing \"b\": validate error")); } TEST_F(MockStoreTest, DeleteRangeAfterValidateAndModify) { auto transaction = Transaction(tensorstore::isolated); { TENSORSTORE_ASSERT_OK_AND_ASSIGN( auto open_transaction, tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction)); TENSORSTORE_ASSERT_OK( GetCacheEntry(cache, "b") ->Validate(open_transaction, [](const absl::Cord& input) { return absl::OkStatus(); })); TENSORSTORE_ASSERT_OK( GetCacheEntry(cache, "b")->Modify(open_transaction, false, "abc")); TENSORSTORE_ASSERT_OK(mock_store->TransactionalDeleteRange( open_transaction, KeyRange{"a", "c"})); } transaction.CommitAsync().IgnoreFuture(); ASSERT_FALSE(transaction.future().ready()); { auto read_req = mock_store->read_requests.pop(); EXPECT_TRUE(mock_store->read_requests.empty()); EXPECT_TRUE(mock_store->write_requests.empty()); EXPECT_TRUE(mock_store->delete_range_requests.empty()); EXPECT_EQ("b", read_req.key); EXPECT_EQ(StorageGeneration::Unknown(), read_req.options.generation_conditions.if_not_equal); read_req(memory_store); } { auto req = mock_store->delete_range_requests.pop(); EXPECT_EQ(KeyRange("a", "c"), req.range); req(memory_store); } ASSERT_TRUE(transaction.future().ready()); TENSORSTORE_EXPECT_OK(transaction.future()); } TEST_F(MockStoreTest, MultiPhaseValidateError) { auto transaction = Transaction(tensorstore::isolated); auto entry = GetCacheEntry(cache, "a"); { TENSORSTORE_ASSERT_OK_AND_ASSIGN( auto open_transaction, tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction)); TENSORSTORE_ASSERT_OK(entry->Modify(open_transaction, true, "abc")); open_transaction->Barrier(); auto validator = [](absl::Cord value) { if (value != "abc") { return absl::AbortedError("validation"); } return absl::OkStatus(); }; TENSORSTORE_ASSERT_OK(entry->Validate(open_transaction, validator)); } transaction.CommitAsync().IgnoreFuture(); ASSERT_FALSE(transaction.future().ready()); { auto write_req = mock_store->write_requests.pop(); EXPECT_EQ("a", write_req.key); EXPECT_EQ(StorageGeneration::Unknown(), write_req.options.generation_conditions.if_equal); write_req(memory_store); } TENSORSTORE_ASSERT_OK(memory_store->Write("a", absl::Cord("def"))); ASSERT_FALSE(transaction.future().ready()); { auto read_req = mock_store->read_requests.pop(); EXPECT_EQ("a", read_req.key); EXPECT_EQ(tensorstore::OptionalByteRangeRequest(0, 0), read_req.options.byte_range); read_req(memory_store); } { auto read_req = mock_store->read_requests.pop(); EXPECT_EQ("a", read_req.key); read_req(memory_store); } ASSERT_TRUE(transaction.future().ready()); EXPECT_THAT(transaction.future().result(), MatchesStatus(absl::StatusCode::kAborted)); } TEST_F(MockStoreTest, MultiPhaseValidateErrorAfterReadValue) { auto transaction = Transaction(tensorstore::isolated); auto entry = GetCacheEntry(cache, "a"); { TENSORSTORE_ASSERT_OK_AND_ASSIGN( auto open_transaction, tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction)); TENSORSTORE_ASSERT_OK(entry->Modify(open_transaction, true, "abc")); open_transaction->Barrier(); auto validator = [](absl::Cord value) { if (value != "abc") { return absl::AbortedError("validation: " + std::string(value)); } return absl::OkStatus(); }; TENSORSTORE_ASSERT_OK(entry->Validate(open_transaction, validator)); TENSORSTORE_ASSERT_OK(entry->Modify(open_transaction, true, "xyz")); TENSORSTORE_ASSERT_OK(entry->Validate( open_transaction, [](absl::Cord value) { return absl::OkStatus(); })); EXPECT_THAT(entry->ReadValue(open_transaction).result(), ::testing::Optional(absl::Cord("xyz"))); } transaction.CommitAsync().IgnoreFuture(); ASSERT_FALSE(transaction.future().ready()); { auto write_req = mock_store->write_requests.pop(); EXPECT_EQ("a", write_req.key); EXPECT_EQ(StorageGeneration::Unknown(), write_req.options.generation_conditions.if_equal); write_req(memory_store); } TENSORSTORE_ASSERT_OK(memory_store->Write("a", absl::Cord("def"))); ASSERT_FALSE(transaction.future().ready()); { auto write_req = mock_store->write_requests.pop(); EXPECT_EQ("a", write_req.key); write_req(memory_store); } { auto read_req = mock_store->read_requests.pop(); EXPECT_EQ("a", read_req.key); read_req(memory_store); } ASSERT_TRUE(transaction.future().ready()); EXPECT_THAT(transaction.future().result(), MatchesStatus(absl::StatusCode::kAborted)); } TEST_F(MockStoreTest, UnboundedDeleteRangeAfterWrite) { auto transaction = Transaction(tensorstore::isolated); { TENSORSTORE_ASSERT_OK_AND_ASSIGN( auto open_transaction, tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction)); TENSORSTORE_ASSERT_OK( GetCacheEntry(cache, "b")->Modify(open_transaction, false, "abc")); TENSORSTORE_ASSERT_OK(mock_store->TransactionalDeleteRange( open_transaction, KeyRange{"a", ""})); } transaction.CommitAsync().IgnoreFuture(); ASSERT_FALSE(transaction.future().ready()); { auto req = mock_store->delete_range_requests.pop(); EXPECT_EQ(KeyRange("a", ""), req.range); req(memory_store); } ASSERT_TRUE(transaction.future().ready()); TENSORSTORE_EXPECT_OK(transaction.future()); } TEST_F(MockStoreTest, DeleteRangeThenWriteThenDeleteRange) { auto transaction = Transaction(tensorstore::isolated); { TENSORSTORE_ASSERT_OK_AND_ASSIGN( auto open_transaction, tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction)); TENSORSTORE_ASSERT_OK(mock_store->TransactionalDeleteRange( open_transaction, KeyRange{"a", "c"})); TENSORSTORE_ASSERT_OK( GetCacheEntry(cache, "b")->Modify(open_transaction, false, "abc")); TENSORSTORE_ASSERT_OK(mock_store->TransactionalDeleteRange( open_transaction, KeyRange{"a", "d"})); } transaction.CommitAsync().IgnoreFuture(); ASSERT_FALSE(transaction.future().ready()); { auto req = mock_store->delete_range_requests.pop(); EXPECT_EQ(KeyRange("a", "d"), req.range); req(memory_store); } ASSERT_TRUE(transaction.future().ready()); TENSORSTORE_EXPECT_OK(transaction.future()); } TEST_F(MockStoreTest, MultiPhaseDeleteRangeOverlapEnd) { const std::vector<std::vector<KeyRange>> test_cases = { { KeyRange{"a", "c"}, KeyRange{"a", "c"}, }, { KeyRange{"a", "c"}, KeyRange{"a", "d"}, }, { KeyRange{"b", "c"}, KeyRange{"a", "c"}, }, { KeyRange{"b", "c"}, KeyRange{"a", "d"}, }, { KeyRange{"a", "d"}, KeyRange{"b", "c"}, }, }; for (const auto& test_case : test_cases) { SCOPED_TRACE("test_case=" + ::testing::PrintToString(test_case)); auto transaction = Transaction(tensorstore::isolated); { TENSORSTORE_ASSERT_OK_AND_ASSIGN( auto open_transaction, tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction)); for (const auto& range : test_case) { TENSORSTORE_ASSERT_OK( mock_store->TransactionalDeleteRange(open_transaction, range)); open_transaction->Barrier(); } } transaction.CommitAsync().IgnoreFuture(); ASSERT_FALSE(transaction.future().ready()); for (const auto& range : test_case) { auto req = mock_store->delete_range_requests.pop(); EXPECT_TRUE(mock_store->delete_range_requests.empty()); EXPECT_EQ(range, req.range); req(memory_store); } ASSERT_TRUE(transaction.future().ready()); TENSORSTORE_EXPECT_OK(transaction.future()); } } TEST_F(MockStoreTest, MultiPhaseDeleteRangeAndWrite) { auto transaction = Transaction(tensorstore::isolated); { TENSORSTORE_ASSERT_OK_AND_ASSIGN( auto open_transaction, tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction)); TENSORSTORE_ASSERT_OK(mock_store->TransactionalDeleteRange( open_transaction, KeyRange{"a", "c"})); open_transaction->Barrier(); TENSORSTORE_ASSERT_OK( GetCacheEntry(cache, "b")->Modify(open_transaction, false, "abc")); TENSORSTORE_ASSERT_OK(mock_store->TransactionalDeleteRange( open_transaction, KeyRange{"a", "d"})); } transaction.CommitAsync().IgnoreFuture(); ASSERT_FALSE(transaction.future().ready()); { auto req = mock_store->delete_range_requests.pop(); EXPECT_TRUE(mock_store->delete_range_requests.empty()); EXPECT_EQ(KeyRange("a", "c"), req.range); req(memory_store); } { auto req = mock_store->delete_range_requests.pop(); EXPECT_EQ(KeyRange("a", "d"), req.range); req(memory_store); } ASSERT_TRUE(transaction.future().ready()); TENSORSTORE_EXPECT_OK(transaction.future()); } TEST_F(MockStoreTest, MultipleKeyValueStoreAtomicError) { auto transaction = Transaction(tensorstore::atomic_isolated); auto mock_store2 = MockKeyValueStore::Make(); { TENSORSTORE_ASSERT_OK_AND_ASSIGN( auto open_transaction, tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction)); TENSORSTORE_ASSERT_OK( GetCacheEntry(cache, "x")->Modify(open_transaction, false, "abc")); EXPECT_THAT(GetCacheEntry(GetCache("", mock_store2), "y") ->Modify(open_transaction, false, "abc"), MatchesStatus(absl::StatusCode::kInvalidArgument, "Cannot read/write \"x\" and read/write \"y\" as " "single atomic transaction")); } } class InitializationRaceTestingKvstore : public MockKeyValueStore { public: std::function<void()> on_read_modify_write; absl::Status ReadModifyWrite( tensorstore::internal::OpenTransactionPtr& transaction, size_t& phase, Key key, ReadModifyWriteSource& source) override { if (on_read_modify_write) on_read_modify_write(); return MockKeyValueStore::ReadModifyWrite(transaction, phase, std::move(key), source); } }; }
https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/cache/kvs_backed_cache.cc
https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/cache/kvs_backed_cache_test.cc
4f887a6430414cd6088e1743555015b10f116d50
95e82690-60b8-4cb9-881d-1c29fd32eca1
cpp
tensorflow/tensorflow
resource_operation_table
tensorflow/compiler/tf2xla/resource_operation_table.cc
tensorflow/compiler/tf2xla/resource_operation_table_test.cc
#include "tensorflow/compiler/tf2xla/resource_operation_table.h" #include "absl/algorithm/container.h" #include "absl/container/flat_hash_map.h" namespace tensorflow { absl::string_view XlaResourceOpInfo::XlaResourceOpKindToString( XlaResourceOpKind op_kind) { switch (op_kind) { case XlaResourceOpKind::kRead: return "Read"; case XlaResourceOpKind::kWrite: return "Write"; case XlaResourceOpKind::kReadWrite: return "Modify"; } } static absl::flat_hash_map<absl::string_view, XlaResourceOpInfo>* CreateResourceOpInfoMap() { auto* result = new absl::flat_hash_map<absl::string_view, XlaResourceOpInfo>; auto add = [&](absl::string_view op, XlaResourceOpKind op_kind, XlaResourceKind resource_kind) { auto insert_result = result->insert({op, XlaResourceOpInfo(op_kind, resource_kind)}); CHECK(insert_result.second); }; auto kRead = XlaResourceOpKind::kRead; auto kWrite = XlaResourceOpKind::kWrite; auto kReadWrite = XlaResourceOpKind::kReadWrite; auto kVariable = XlaResourceKind::kVariable; auto kStack = XlaResourceKind::kStack; auto kTensorArray = XlaResourceKind::kTensorArray; add("AssignAddVariableOp" , kReadWrite, kVariable); add("AssignSubVariableOp" , kReadWrite, kVariable); add("AssignVariableOp" , kWrite, kVariable); add("AssignVariableXlaConcatND" , kWrite, kVariable); add("CollectiveReduceV2" , kRead, kVariable); add("ReadVariableOp" , kRead, kVariable); add("ReadVariableXlaSplitND" , kRead, kVariable); add("ResourceApplyAdaMax" , kReadWrite, kVariable); add("ResourceApplyAdadelta" , kReadWrite, kVariable); add("ResourceApplyAdagrad" , kReadWrite, kVariable); add("ResourceApplyAdagradV2" , kReadWrite, kVariable), add("ResourceApplyAdagradDA" , kReadWrite, kVariable); add("ResourceApplyAdam" , kReadWrite, kVariable); add("ResourceApplyAddSign" , kReadWrite, kVariable); add("ResourceApplyCenteredRMSProp" , kReadWrite, kVariable); add("ResourceApplyFtrl" , kReadWrite, kVariable); add("ResourceApplyFtrlV2" , kReadWrite, kVariable); add("ResourceApplyGradientDescent" , kReadWrite, kVariable); add("ResourceApplyMomentum" , kReadWrite, kVariable); add("ResourceApplyKerasMomentum" , kReadWrite, kVariable); add("ResourceApplyPowerSign" , kReadWrite, kVariable); add("ResourceApplyProximalAdagrad" , kReadWrite, kVariable); add("ResourceApplyProximalGradientDescent" , kReadWrite, kVariable); add("ResourceApplyRMSProp" , kReadWrite, kVariable); add("ResourceGather" , kRead, kVariable); add("ResourceScatterAdd" , kReadWrite, kVariable); add("ResourceScatterDiv" , kReadWrite, kVariable); add("ResourceScatterMax" , kReadWrite, kVariable); add("ResourceScatterMin" , kReadWrite, kVariable); add("ResourceScatterMul" , kReadWrite, kVariable); add("ResourceScatterNdAdd" , kReadWrite, kVariable); add("ResourceScatterNdSub" , kReadWrite, kVariable); add("ResourceScatterNdUpdate" , kReadWrite, kVariable); add("ResourceScatterSub" , kReadWrite, kVariable); add("ResourceScatterUpdate" , kReadWrite, kVariable); add("ResourceStridedSliceAssign" , kReadWrite, kVariable); add("RngReadAndSkip" , kReadWrite, kVariable); add("RngSkip" , kReadWrite, kVariable); add("StatefulStandardNormalV2" , kReadWrite, kVariable); add("StatefulTruncatedNormal" , kReadWrite, kVariable); add("StatefulUniform" , kReadWrite, kVariable); add("StatefulUniformFullInt" , kReadWrite, kVariable); add("StatefulUniformInt" , kReadWrite, kVariable); add("VarIsInitializedOp" , kRead, kVariable); add("VariableShape" , kRead, kVariable); add("StackV2" , kWrite, kStack); add("StackCloseV2" , kRead, kStack); add("StackPopV2" , kReadWrite, kStack); add("StackPushV2" , kReadWrite, kStack); add("TensorArrayV3" , kWrite, kTensorArray); add("TensorArrayConcatV3" , kRead, kTensorArray); add("TensorArrayGatherV3" , kRead, kTensorArray); add("TensorArrayScatterV3" , kWrite, kTensorArray); add("TensorArrayGradV3" , kRead, kTensorArray); add("TensorArrayCloseV3" , kRead, kTensorArray); add("TensorArrayReadV3" , kRead, kTensorArray); add("TensorArraySizeV3" , kRead, kTensorArray); add("TensorArraySplitV3" , kWrite, kTensorArray); add("TensorArrayWriteV3" , kWrite, kTensorArray); return result; } static const absl::flat_hash_map<absl::string_view, XlaResourceOpInfo>& GetStaticResourceOpInfoMap() { static absl::flat_hash_map<absl::string_view, XlaResourceOpInfo>* op_info_map = CreateResourceOpInfoMap(); return *op_info_map; } const XlaResourceOpInfo* GetResourceOpInfoForOp(absl::string_view op) { const absl::flat_hash_map<absl::string_view, XlaResourceOpInfo>& op_infos = GetStaticResourceOpInfoMap(); auto it = op_infos.find(op); return it == op_infos.end() ? nullptr : &it->second; } namespace resource_op_table_internal { std::vector<absl::string_view> GetKnownResourceOps() { std::vector<absl::string_view> result; for (const auto& p : GetStaticResourceOpInfoMap()) { result.push_back(p.first); } absl::c_sort(result); return result; } } }
#include "tensorflow/compiler/tf2xla/resource_operation_table.h" #include "absl/algorithm/container.h" #include "absl/container/flat_hash_map.h" #include "absl/strings/str_join.h" #include "tensorflow/compiler/tf2xla/xla_op_registry.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/test.h" namespace tensorflow { namespace { bool IsResourceArgDef(const OpDef::ArgDef& arg_def) { return arg_def.type() == DT_RESOURCE; } bool HasResourceInputOrOutput(const OpDef& op_def) { return absl::c_any_of(op_def.input_arg(), IsResourceArgDef) || absl::c_any_of(op_def.output_arg(), IsResourceArgDef); } TEST(ResourceOperationTableTest, HaveAllResourceOps) { absl::flat_hash_map<string, bool> known_resource_ops; for (absl::string_view known_resource_op : resource_op_table_internal::GetKnownResourceOps()) { ASSERT_TRUE( known_resource_ops.insert({string(known_resource_op), false}).second); } std::vector<string> xla_op_names = XlaOpRegistry::GetAllRegisteredOps(); for (const string& xla_op_name : xla_op_names) { const OpDef* op_def; TF_ASSERT_OK(OpRegistry::Global()->LookUpOpDef(xla_op_name, &op_def)); if (HasResourceInputOrOutput(*op_def)) { EXPECT_EQ(known_resource_ops.count(xla_op_name), 1) << "Unknown resource op " << xla_op_name; known_resource_ops[xla_op_name] = true; } } std::vector<string> unnecessary_resource_ops; for (const auto& pair : known_resource_ops) { if (!pair.second) { unnecessary_resource_ops.push_back(pair.first); } } EXPECT_TRUE(unnecessary_resource_ops.empty()) << "Stale resource ops:\n" << absl::StrJoin(unnecessary_resource_ops, "\n"); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/resource_operation_table.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/resource_operation_table_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
7f390e78-3920-4249-bb9f-33db271e81a9
cpp
tensorflow/tensorflow
coding
third_party/xla/third_party/tsl/tsl/platform/coding.cc
tensorflow/core/lib/core/coding_test.cc
#include "tsl/platform/coding.h" #include "tsl/platform/byte_order.h" #include "tsl/platform/stringpiece.h" #include "tsl/platform/tstring.h" #include "tsl/platform/types.h" namespace tsl { namespace core { void EncodeFixed16(char* buf, uint16 value) { if (port::kLittleEndian) { memcpy(buf, &value, sizeof(value)); } else { buf[0] = value & 0xff; buf[1] = (value >> 8) & 0xff; } } void EncodeFixed32(char* buf, uint32 value) { if (port::kLittleEndian) { memcpy(buf, &value, sizeof(value)); } else { buf[0] = value & 0xff; buf[1] = (value >> 8) & 0xff; buf[2] = (value >> 16) & 0xff; buf[3] = (value >> 24) & 0xff; } } void EncodeFixed64(char* buf, uint64 value) { if (port::kLittleEndian) { memcpy(buf, &value, sizeof(value)); } else { buf[0] = value & 0xff; buf[1] = (value >> 8) & 0xff; buf[2] = (value >> 16) & 0xff; buf[3] = (value >> 24) & 0xff; buf[4] = (value >> 32) & 0xff; buf[5] = (value >> 40) & 0xff; buf[6] = (value >> 48) & 0xff; buf[7] = (value >> 56) & 0xff; } } void PutFixed16(string* dst, uint16 value) { char buf[sizeof(value)]; EncodeFixed16(buf, value); dst->append(buf, sizeof(buf)); } void PutFixed32(string* dst, uint32 value) { char buf[sizeof(value)]; EncodeFixed32(buf, value); dst->append(buf, sizeof(buf)); } void PutFixed64(string* dst, uint64 value) { char buf[sizeof(value)]; EncodeFixed64(buf, value); dst->append(buf, sizeof(buf)); } char* EncodeVarint32(char* dst, uint32 v) { unsigned char* ptr = reinterpret_cast<unsigned char*>(dst); static const int B = 128; if (v < (1 << 7)) { *(ptr++) = v; } else if (v < (1 << 14)) { *(ptr++) = v | B; *(ptr++) = v >> 7; } else if (v < (1 << 21)) { *(ptr++) = v | B; *(ptr++) = (v >> 7) | B; *(ptr++) = v >> 14; } else if (v < (1 << 28)) { *(ptr++) = v | B; *(ptr++) = (v >> 7) | B; *(ptr++) = (v >> 14) | B; *(ptr++) = v >> 21; } else { *(ptr++) = v | B; *(ptr++) = (v >> 7) | B; *(ptr++) = (v >> 14) | B; *(ptr++) = (v >> 21) | B; *(ptr++) = v >> 28; } return reinterpret_cast<char*>(ptr); } void PutVarint32(string* dst, uint32 v) { char buf[5]; char* ptr = EncodeVarint32(buf, v); dst->append(buf, ptr - buf); } void PutVarint32(tstring* dst, uint32 v) { char buf[5]; char* ptr = EncodeVarint32(buf, v); dst->append(buf, ptr - buf); } char* EncodeVarint64(char* dst, uint64 v) { static const int B = 128; unsigned char* ptr = reinterpret_cast<unsigned char*>(dst); while (v >= B) { *(ptr++) = (v & (B - 1)) | B; v >>= 7; } *(ptr++) = static_cast<unsigned char>(v); return reinterpret_cast<char*>(ptr); } void PutVarint64(string* dst, uint64 v) { char buf[10]; char* ptr = EncodeVarint64(buf, v); dst->append(buf, ptr - buf); } void PutVarint64(tstring* dst, uint64 v) { char buf[10]; char* ptr = EncodeVarint64(buf, v); dst->append(buf, ptr - buf); } int VarintLength(uint64_t v) { int len = 1; while (v >= 128) { v >>= 7; len++; } return len; } const char* GetVarint32Ptr(const char* p, const char* limit, uint32* value) { if (p < limit) { uint32 result = *(reinterpret_cast<const unsigned char*>(p)); if ((result & 128) == 0) { *value = result; return p + 1; } } return GetVarint32PtrFallback(p, limit, value); } const char* GetVarint32PtrFallback(const char* p, const char* limit, uint32* value) { uint32 result = 0; for (uint32 shift = 0; shift <= 28 && p < limit; shift += 7) { uint32 byte = *(reinterpret_cast<const unsigned char*>(p)); p++; if (byte & 128) { result |= ((byte & 127) << shift); } else { result |= (byte << shift); *value = result; return reinterpret_cast<const char*>(p); } } return nullptr; } bool GetVarint32(absl::string_view* input, uint32* value) { const char* p = input->data(); const char* limit = p + input->size(); const char* q = GetVarint32Ptr(p, limit, value); if (q == nullptr) { return false; } else { *input = absl::string_view(q, limit - q); return true; } } const char* GetVarint64Ptr(const char* p, const char* limit, uint64* value) { uint64 result = 0; for (uint32 shift = 0; shift <= 63 && p < limit; shift += 7) { uint64 byte = *(reinterpret_cast<const unsigned char*>(p)); p++; if (byte & 128) { result |= ((byte & 127) << shift); } else { result |= (byte << shift); *value = result; return reinterpret_cast<const char*>(p); } } return nullptr; } bool GetVarint64(absl::string_view* input, uint64* value) { const char* p = input->data(); const char* limit = p + input->size(); const char* q = GetVarint64Ptr(p, limit, value); if (q == nullptr) { return false; } else { *input = absl::string_view(q, limit - q); return true; } } } }
#include "tensorflow/core/lib/core/coding.h" #include <vector> #include "tensorflow/core/platform/test.h" namespace tensorflow { namespace core { TEST(Coding, Fixed16) { static const uint16 N = 50000; string s; for (uint16 v = 0; v < N; v++) { char buf[sizeof(uint16)]; EncodeFixed16(buf, v); s.append(buf, sizeof(buf)); } const char* p = s.data(); for (uint16 v = 0; v < N; v++) { uint16 actual = DecodeFixed16(p); ASSERT_EQ(v, actual); p += sizeof(uint16); } } TEST(Coding, Fixed32) { static const uint32 N = 100000; string s; for (uint32 v = 0; v < N; v++) { char buf[sizeof(uint32)]; EncodeFixed32(buf, v); s.append(buf, sizeof(buf)); } const char* p = s.data(); for (uint32 v = 0; v < N; v++) { uint32 actual = DecodeFixed32(p); ASSERT_EQ(v, actual); p += sizeof(uint32); } } TEST(Coding, Fixed64) { string s; for (int power = 0; power <= 63; power++) { uint64 v = static_cast<uint64>(1) << power; char buf[sizeof(uint64)]; EncodeFixed64(buf, v - 1); s.append(buf, sizeof(buf)); EncodeFixed64(buf, v + 0); s.append(buf, sizeof(buf)); EncodeFixed64(buf, v + 1); s.append(buf, sizeof(buf)); } const char* p = s.data(); for (int power = 0; power <= 63; power++) { uint64 v = static_cast<uint64>(1) << power; uint64 actual; actual = DecodeFixed64(p); ASSERT_EQ(v - 1, actual); p += sizeof(uint64); actual = DecodeFixed64(p); ASSERT_EQ(v + 0, actual); p += sizeof(uint64); actual = DecodeFixed64(p); ASSERT_EQ(v + 1, actual); p += sizeof(uint64); } } TEST(Coding, EncodingOutput) { char dst[8]; EncodeFixed16(dst, 0x0201); ASSERT_EQ(0x01, static_cast<int>(dst[0])); ASSERT_EQ(0x02, static_cast<int>(dst[1])); EncodeFixed32(dst, 0x04030201); ASSERT_EQ(0x01, static_cast<int>(dst[0])); ASSERT_EQ(0x02, static_cast<int>(dst[1])); ASSERT_EQ(0x03, static_cast<int>(dst[2])); ASSERT_EQ(0x04, static_cast<int>(dst[3])); EncodeFixed64(dst, 0x0807060504030201ull); ASSERT_EQ(0x01, static_cast<int>(dst[0])); ASSERT_EQ(0x02, static_cast<int>(dst[1])); ASSERT_EQ(0x03, static_cast<int>(dst[2])); ASSERT_EQ(0x04, static_cast<int>(dst[3])); ASSERT_EQ(0x05, static_cast<int>(dst[4])); ASSERT_EQ(0x06, static_cast<int>(dst[5])); ASSERT_EQ(0x07, static_cast<int>(dst[6])); ASSERT_EQ(0x08, static_cast<int>(dst[7])); } TEST(Coding, Varint32) { string s; for (uint32 i = 0; i < (32 * 32); i++) { uint32 v = (i / 32) << (i % 32); PutVarint32(&s, v); } const char* p = s.data(); const char* limit = p + s.size(); for (uint32 i = 0; i < (32 * 32); i++) { uint32 expected = (i / 32) << (i % 32); uint32 actual; p = GetVarint32Ptr(p, limit, &actual); ASSERT_TRUE(p != nullptr); ASSERT_EQ(expected, actual); } ASSERT_EQ(p, s.data() + s.size()); } TEST(Coding, Varint64) { std::vector<uint64> values; values.push_back(0); values.push_back(100); values.push_back(~static_cast<uint64>(0)); values.push_back(~static_cast<uint64>(0) - 1); for (uint32 k = 0; k < 64; k++) { const uint64 power = 1ull << k; values.push_back(power); values.push_back(power - 1); values.push_back(power + 1); } string s; for (size_t i = 0; i < values.size(); i++) { PutVarint64(&s, values[i]); } const char* p = s.data(); const char* limit = p + s.size(); for (size_t i = 0; i < values.size(); i++) { ASSERT_TRUE(p < limit); uint64 actual; p = GetVarint64Ptr(p, limit, &actual); ASSERT_TRUE(p != nullptr); ASSERT_EQ(values[i], actual); } ASSERT_EQ(p, limit); } TEST(Coding, Varint32Overflow) { uint32 result; string input("\x81\x82\x83\x84\x85\x11"); ASSERT_TRUE(GetVarint32Ptr(input.data(), input.data() + input.size(), &result) == nullptr); } TEST(Coding, Varint32Truncation) { uint32 large_value = (1u << 31) + 100; string s; PutVarint32(&s, large_value); uint32 result; for (size_t len = 0; len < s.size() - 1; len++) { ASSERT_TRUE(GetVarint32Ptr(s.data(), s.data() + len, &result) == nullptr); } ASSERT_TRUE(GetVarint32Ptr(s.data(), s.data() + s.size(), &result) != nullptr); ASSERT_EQ(large_value, result); } TEST(Coding, Varint64Overflow) { uint64 result; string input("\x81\x82\x83\x84\x85\x81\x82\x83\x84\x85\x11"); ASSERT_TRUE(GetVarint64Ptr(input.data(), input.data() + input.size(), &result) == nullptr); } TEST(Coding, Varint64Truncation) { uint64 large_value = (1ull << 63) + 100ull; string s; PutVarint64(&s, large_value); uint64 result; for (size_t len = 0; len < s.size() - 1; len++) { ASSERT_TRUE(GetVarint64Ptr(s.data(), s.data() + len, &result) == nullptr); } ASSERT_TRUE(GetVarint64Ptr(s.data(), s.data() + s.size(), &result) != nullptr); ASSERT_EQ(large_value, result); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/coding.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/lib/core/coding_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
b88ae0a9-9163-417c-9fd6-5923a08dc599
cpp
google/tsl
denormal
tsl/platform/denormal.cc
tsl/platform/denormal_test.cc
#include "tsl/platform/denormal.h" #include <cstdint> #include "tsl/platform/cpu_info.h" #include "tsl/platform/platform.h" #if !defined(__SSE3__) && !defined(__clang__) && \ (defined(__GNUC__) && (__GNUC__ < 4) || \ ((__GNUC__ == 4) && (__GNUC_MINOR__ < 9))) #define GCC_WITHOUT_INTRINSICS #endif #if defined(PLATFORM_IS_X86) && !defined(IS_MOBILE_PLATFORM) && \ !defined(GCC_WITHOUT_INTRINSICS) #define X86_DENORM_USE_INTRINSICS #endif #ifdef X86_DENORM_USE_INTRINSICS #include <pmmintrin.h> #endif #if defined(PLATFORM_IS_ARM) && defined(__ARM_FP) && (__ARM_FP > 0) #define ARM_DENORM_AVAILABLE #define ARM_FPCR_FZ (1 << 24) #endif namespace tsl { namespace port { bool DenormalState::operator==(const DenormalState& other) const { return flush_to_zero() == other.flush_to_zero() && denormals_are_zero() == other.denormals_are_zero(); } bool DenormalState::operator!=(const DenormalState& other) const { return !(this->operator==(other)); } #ifdef ARM_DENORM_AVAILABLE static inline void ArmSetFloatingPointControlRegister(uint32_t fpcr) { #ifdef PLATFORM_IS_ARM64 __asm__ __volatile__("msr fpcr, %[fpcr]" : : [fpcr] "r"(static_cast<uint64_t>(fpcr))); #else __asm__ __volatile__("vmsr fpscr, %[fpcr]" : : [fpcr] "r"(fpcr)); #endif } static inline uint32_t ArmGetFloatingPointControlRegister() { uint32_t fpcr; #ifdef PLATFORM_IS_ARM64 uint64_t fpcr64; __asm__ __volatile__("mrs %[fpcr], fpcr" : [fpcr] "=r"(fpcr64)); fpcr = static_cast<uint32_t>(fpcr64); #else __asm__ __volatile__("vmrs %[fpcr], fpscr" : [fpcr] "=r"(fpcr)); #endif return fpcr; } #endif bool SetDenormalState(const DenormalState& state) { #ifdef X86_DENORM_USE_INTRINSICS if (TestCPUFeature(SSE3)) { _MM_SET_FLUSH_ZERO_MODE(state.flush_to_zero() ? _MM_FLUSH_ZERO_ON : _MM_FLUSH_ZERO_OFF); _MM_SET_DENORMALS_ZERO_MODE(state.denormals_are_zero() ? _MM_DENORMALS_ZERO_ON : _MM_DENORMALS_ZERO_OFF); return true; } #endif #ifdef ARM_DENORM_AVAILABLE if (state.flush_to_zero() == state.denormals_are_zero()) { uint32_t fpcr = ArmGetFloatingPointControlRegister(); if (state.flush_to_zero()) { fpcr |= ARM_FPCR_FZ; } else { fpcr &= ~ARM_FPCR_FZ; } ArmSetFloatingPointControlRegister(fpcr); return true; } #endif return false; } DenormalState GetDenormalState() { #ifdef X86_DENORM_USE_INTRINSICS if (TestCPUFeature(SSE3)) { bool flush_zero_mode = _MM_GET_FLUSH_ZERO_MODE() == _MM_FLUSH_ZERO_ON; bool denormals_zero_mode = _MM_GET_DENORMALS_ZERO_MODE() == _MM_DENORMALS_ZERO_ON; return DenormalState(flush_zero_mode, denormals_zero_mode); } #endif #ifdef ARM_DENORM_AVAILABLE uint32_t fpcr = ArmGetFloatingPointControlRegister(); if ((fpcr & ARM_FPCR_FZ) != 0) { return DenormalState(true, true); } #endif return DenormalState(false, false); } ScopedRestoreFlushDenormalState::ScopedRestoreFlushDenormalState() : denormal_state_(GetDenormalState()) {} ScopedRestoreFlushDenormalState::~ScopedRestoreFlushDenormalState() { SetDenormalState(denormal_state_); } ScopedFlushDenormal::ScopedFlushDenormal() { SetDenormalState( DenormalState(true, true)); } ScopedDontFlushDenormal::ScopedDontFlushDenormal() { SetDenormalState( DenormalState(false, false)); } } }
#include "tsl/platform/denormal.h" #include <cstring> #include <limits> #include "tsl/platform/test.h" namespace tsl { namespace port { TEST(DenormalStateTest, ConstructorAndAccessorsWork) { const bool flush_to_zero[] = {true, true, false, false}; const bool denormals_are_zero[] = {true, false, true, false}; for (int i = 0; i < 4; ++i) { const DenormalState state = DenormalState(flush_to_zero[i], denormals_are_zero[i]); EXPECT_EQ(state.flush_to_zero(), flush_to_zero[i]); EXPECT_EQ(state.denormals_are_zero(), denormals_are_zero[i]); } } uint32_t bits(float x) { uint32_t out; memcpy(&out, &x, sizeof(float)); return out; } void CheckDenormalHandling(const DenormalState& state) { volatile float denormal_output = std::numeric_limits<float>::min(); denormal_output *= 0.25f; if (state.flush_to_zero()) { EXPECT_EQ(bits(denormal_output), 0x0); } else { EXPECT_NE(bits(denormal_output), 0x0); } volatile float normal_output = std::numeric_limits<float>::denorm_min(); normal_output *= std::numeric_limits<float>::max(); if (state.denormals_are_zero()) { EXPECT_EQ(bits(normal_output), 0x0); } else { EXPECT_NE(bits(normal_output), 0x0); } } TEST(DenormalTest, GetAndSetStateWorkWithCorrectFlushing) { const DenormalState states[] = { DenormalState(true, true), DenormalState(true, false), DenormalState(false, true), DenormalState(false, false)}; for (const DenormalState& state : states) { if (SetDenormalState(state)) { EXPECT_EQ(GetDenormalState(), state); CheckDenormalHandling(state); } } } TEST(ScopedRestoreFlushDenormalStateTest, RestoresState) { const DenormalState flush_denormals(true, true); const DenormalState dont_flush_denormals(false, false); const bool can_set_denormal_state = SetDenormalState(flush_denormals) && SetDenormalState(dont_flush_denormals); if (can_set_denormal_state) { SetDenormalState(flush_denormals); { ScopedRestoreFlushDenormalState restore_state; SetDenormalState(dont_flush_denormals); EXPECT_EQ(GetDenormalState(), dont_flush_denormals); } EXPECT_EQ(GetDenormalState(), flush_denormals); SetDenormalState(dont_flush_denormals); { ScopedRestoreFlushDenormalState restore_state; SetDenormalState(flush_denormals); EXPECT_EQ(GetDenormalState(), flush_denormals); } EXPECT_EQ(GetDenormalState(), dont_flush_denormals); } } TEST(ScopedFlushDenormalTest, SetsFlushingAndRestoresState) { const DenormalState flush_denormals(true, true); const DenormalState dont_flush_denormals(false, false); const bool can_set_denormal_state = SetDenormalState(flush_denormals) && SetDenormalState(dont_flush_denormals); if (can_set_denormal_state) { SetDenormalState(dont_flush_denormals); { ScopedFlushDenormal scoped_flush_denormal; EXPECT_EQ(GetDenormalState(), flush_denormals); } EXPECT_EQ(GetDenormalState(), dont_flush_denormals); } } TEST(ScopedDontFlushDenormalTest, SetsNoFlushingAndRestoresState) { const DenormalState flush_denormals(true, true); const DenormalState dont_flush_denormals(false, false); const bool can_set_denormal_state = SetDenormalState(flush_denormals) && SetDenormalState(dont_flush_denormals); if (can_set_denormal_state) { SetDenormalState(flush_denormals); { ScopedDontFlushDenormal scoped_dont_flush_denormal; EXPECT_EQ(GetDenormalState(), dont_flush_denormals); } EXPECT_EQ(GetDenormalState(), flush_denormals); } } } }
https://github.com/google/tsl/blob/6d708fdcdd4f40537b7fa273371215a6fa3d4423/tsl/platform/denormal.cc
https://github.com/google/tsl/blob/6d708fdcdd4f40537b7fa273371215a6fa3d4423/tsl/platform/denormal_test.cc
6d708fdcdd4f40537b7fa273371215a6fa3d4423
890fb4c1-2de7-4aa0-846d-d9612a5dfe98
cpp
tensorflow/tensorflow
gpu_serving_device_selector
tensorflow/core/common_runtime/gpu/gpu_serving_device_selector.cc
tensorflow/core/common_runtime/gpu/gpu_serving_device_selector_test.cc
#include "tensorflow/core/common_runtime/gpu/gpu_serving_device_selector.h" #include <algorithm> #include <cstdint> #include <memory> #include <utility> #include "absl/base/attributes.h" #include "absl/container/fixed_array.h" #include "absl/log/check.h" #include "absl/log/log.h" #include "absl/strings/string_view.h" #include "absl/synchronization/mutex.h" #include "absl/time/clock.h" #include "xla/tsl/framework/serving_device_selector.h" #include "tensorflow/core/common_runtime/gpu/gpu_scheduling_metrics_storage.h" namespace tensorflow { namespace gpu { constexpr int64_t kDefaultEstimateNs = 1; ABSL_CONST_INIT int64_t (*NowNs)() = +[]() -> int64_t { return absl::GetCurrentTimeNanos(); }; using DeviceStates = GpuServingDeviceSelector::DeviceStates; GpuServingDeviceSelector::GpuServingDeviceSelector( const int num_devices, std::unique_ptr<ServingDeviceSelector::Policy> device_selector_policy) : device_states_(num_devices), device_selector_policy_(std::move(device_selector_policy)), req_id_counter_(0) {} tsl::DeviceReservation GpuServingDeviceSelector::ReserveDevice( absl::string_view program_fingerprint) { absl::MutexLock lock(&mu_); DeviceStates device_states; device_states.states = absl::Span<const DeviceState>(device_states_); auto [it, emplaced] = execution_info_.try_emplace(program_fingerprint, ExecutionInfo()); const int device_index = device_selector_policy_->SelectDevice(program_fingerprint, device_states); ServingDeviceSelector::EnqueueHelper( device_states_.at(device_index), device_index, it->second, program_fingerprint, 0, req_id_counter_++, 1, 0, NowNs()); return tsl::DeviceReservation(device_index, this); } void GpuServingDeviceSelector::FreeDeviceReservation( const tsl::DeviceReservation& reservation) { Completed(reservation.device_index(), false); } void GpuServingDeviceSelector::Enqueue(int32_t index_on_host, absl::string_view fingerprint) { if (fingerprint.empty()) { LOG(ERROR) << "Empty fingerprint."; return; } absl::MutexLock lock(&mu_); auto [it, emplaced] = execution_info_.try_emplace(fingerprint, ExecutionInfo()); DeviceState& device_state = device_states_.at(index_on_host); ServingDeviceSelector::EnqueueHelper(device_state, index_on_host, it->second, fingerprint, 0, -1, 1, 0, NowNs()); int64_t total_estimated_time_ns = TotalEstimatedTimeTillIdleNs(); GpuSchedulingMetricsStorage::GetGlobalStorage().TotalGpuLoadNs().Set( total_estimated_time_ns); } void GpuServingDeviceSelector::Completed(int32_t index_on_host, bool had_error) { absl::MutexLock lock(&mu_); DeviceState& device_state = device_states_.at(index_on_host); ServingDeviceSelector::CompletedHelper(device_state, index_on_host, 0, min_exec_time_, had_error, NowNs()); int64_t total_estimated_time_ns = TotalEstimatedTimeTillIdleNs(); GpuSchedulingMetricsStorage::GetGlobalStorage().TotalGpuLoadNs().Set( total_estimated_time_ns); } int64_t GpuServingDeviceSelector::TotalEstimatedTimeTillIdleNs() { int64_t total_gpu_load_ns = 0; for (const auto& device_state : device_states_) { total_gpu_load_ns += ServingDeviceSelector::EstimateTimeTillIdleNs( device_state, 0, min_exec_time_.value_or(kDefaultEstimateNs), NowNs()); } return total_gpu_load_ns; } void GpuServingDeviceSelector::OverwriteNowNsFunctionForTest( int64_t (*now_ns)()) { NowNs = now_ns; } } }
#include "tensorflow/core/common_runtime/gpu/gpu_serving_device_selector.h" #include <cstdint> #include <memory> #include <string> #include <utility> #include <gtest/gtest.h> #include "absl/time/clock.h" #include "xla/tsl/framework/serving_device_selector.h" #include "xla/tsl/framework/serving_device_selector_policies.h" #include "tensorflow/core/common_runtime/gpu/gpu_scheduling_metrics_storage.h" namespace tensorflow { namespace gpu { class ServingDeviceSelectorTestHelper { public: ServingDeviceSelectorTestHelper() { GpuServingDeviceSelector::OverwriteNowNsFunctionForTest(NowNs); now_ns_ = 0; } ~ServingDeviceSelectorTestHelper() { GpuServingDeviceSelector::OverwriteNowNsFunctionForTest( absl::GetCurrentTimeNanos); } static void ElapseNs(int64_t ns) { now_ns_ += ns; } static int64_t NowNs() { return now_ns_; } private: static int64_t now_ns_; }; int64_t ServingDeviceSelectorTestHelper::now_ns_ = 0; namespace { TEST(GpuServingDeviceSelector, Basic) { GpuServingDeviceSelector selector(2, std::make_unique<tsl::RoundRobinPolicy>()); const std::string program_fingerprint = "TensorFlow"; tsl::DeviceReservation reservation = selector.ReserveDevice(program_fingerprint); EXPECT_EQ(reservation.device_index(), 0); reservation = selector.ReserveDevice(program_fingerprint); EXPECT_EQ(reservation.device_index(), 1); reservation = selector.ReserveDevice(program_fingerprint); EXPECT_EQ(reservation.device_index(), 0); } TEST(GpuServingDeviceSelector, DefaultPolicyOnlyEnqueueCall) { ServingDeviceSelectorTestHelper helper; auto policy = std::make_unique<tsl::RoundRobinPolicy>(); auto serving_device_selector = std::make_unique<tensorflow::gpu::GpuServingDeviceSelector>( 4, std::move(policy)); serving_device_selector->Enqueue(3, "16ms"); serving_device_selector->Enqueue(2, "8ms"); serving_device_selector->Enqueue(1, "4ms"); serving_device_selector->Enqueue(0, "2ms"); serving_device_selector->Enqueue(3, "16ms"); serving_device_selector->Enqueue(2, "8ms"); serving_device_selector->Enqueue(1, "4ms"); serving_device_selector->Enqueue(0, "2ms"); helper.ElapseNs(2e6); serving_device_selector->Completed(0, false); helper.ElapseNs(2e6); serving_device_selector->Completed(0, false); serving_device_selector->Completed(1, false); helper.ElapseNs(4e6); serving_device_selector->Completed(1, false); serving_device_selector->Completed(2, false); helper.ElapseNs(8e6); serving_device_selector->Completed(2, false); serving_device_selector->Completed(3, false); helper.ElapseNs(16e6); serving_device_selector->Completed(3, false); serving_device_selector->Enqueue(3, "16ms"); EXPECT_EQ( GpuSchedulingMetricsStorage::GetGlobalStorage().TotalGpuLoadNs().Get(), 16e6); serving_device_selector->Enqueue(2, "8ms"); EXPECT_EQ( GpuSchedulingMetricsStorage::GetGlobalStorage().TotalGpuLoadNs().Get(), 24e6); serving_device_selector->Enqueue(1, "4ms"); EXPECT_EQ( GpuSchedulingMetricsStorage::GetGlobalStorage().TotalGpuLoadNs().Get(), 28e6); serving_device_selector->Enqueue(0, "2ms"); EXPECT_EQ( GpuSchedulingMetricsStorage::GetGlobalStorage().TotalGpuLoadNs().Get(), 30e6); helper.ElapseNs(2e6); serving_device_selector->Completed(0, false); EXPECT_EQ( GpuSchedulingMetricsStorage::GetGlobalStorage().TotalGpuLoadNs().Get(), 22e6); helper.ElapseNs(2e6); serving_device_selector->Completed(1, false); EXPECT_EQ( GpuSchedulingMetricsStorage::GetGlobalStorage().TotalGpuLoadNs().Get(), 16e6); helper.ElapseNs(4e6); serving_device_selector->Completed(2, false); EXPECT_EQ( GpuSchedulingMetricsStorage::GetGlobalStorage().TotalGpuLoadNs().Get(), 8e6); helper.ElapseNs(8e6); serving_device_selector->Completed(3, false); EXPECT_EQ( GpuSchedulingMetricsStorage::GetGlobalStorage().TotalGpuLoadNs().Get(), 0e6); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/gpu/gpu_serving_device_selector.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/gpu/gpu_serving_device_selector_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
0c94eaf6-93e0-4025-945c-871916febf5b
cpp
google/tensorstore
serialization
tensorstore/serialization/serialization.cc
tensorstore/serialization/serialization_test.cc
#include "tensorstore/serialization/serialization.h" #include <cstring> #include <string_view> #include "absl/status/status.h" #include "tensorstore/util/span.h" #include "tensorstore/util/str_cat.h" namespace tensorstore { namespace serialization { namespace internal_serialization { void FailNonNull(DecodeSource& source) { source.Fail(serialization::DecodeError("Expected non-null value")); } void FailEof(DecodeSource& source) { source.Fail(serialization::DecodeError("Unexpected end of input")); } } void EncodeSink::Fail(absl::Status status) { assert(!status.ok()); writer().Fail(std::move(status)); } void DecodeSource::Fail(absl::Status status) { assert(!status.ok()); reader().Fail(std::move(status)); } absl::Status DecodeError() { return absl::DataLossError("Failed to decode value"); } absl::Status DecodeError(std::string_view message) { return absl::DataLossError(tensorstore::StrCat("Error decoding: ", message)); } namespace internal_serialization { absl::Status NonSerializableError() { return absl::InvalidArgumentError("Serialization not supported"); } } } }
#include "tensorstore/serialization/serialization.h" #include <cstdint> #include <map> #include <set> #include <string> #include <tuple> #include <variant> #include <vector> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "tensorstore/serialization/std_map.h" #include "tensorstore/serialization/std_optional.h" #include "tensorstore/serialization/std_set.h" #include "tensorstore/serialization/std_tuple.h" #include "tensorstore/serialization/std_variant.h" #include "tensorstore/serialization/std_vector.h" #include "tensorstore/serialization/test_util.h" #include "tensorstore/util/result.h" #include "tensorstore/util/status_testutil.h" namespace { using ::tensorstore::serialization::IsNonSerializableLike; using ::tensorstore::serialization::NonSerializable; using ::tensorstore::serialization::SerializationRoundTrip; using ::tensorstore::serialization::TestSerializationRoundTrip; TEST(SerializationTest, Bool) { TestSerializationRoundTrip(true); TestSerializationRoundTrip(false); } TEST(SerializationTest, Float) { TestSerializationRoundTrip(3.14f); TestSerializationRoundTrip(0.0f); } TEST(SerializationTest, String) { TestSerializationRoundTrip(std::string("abcdefg")); TestSerializationRoundTrip(std::string("")); } TEST(CordTest, SerializationRoundTrip) { TestSerializationRoundTrip(absl::Cord("")); TestSerializationRoundTrip(absl::Cord("abc")); } TEST(SerializationTest, Int32) { TestSerializationRoundTrip(static_cast<int32_t>(0)); TestSerializationRoundTrip(static_cast<int32_t>(3)); TestSerializationRoundTrip(static_cast<int32_t>(2147483647)); TestSerializationRoundTrip(static_cast<int32_t>(-2147483648)); } TEST(SerializationTest, VectorInt) { TestSerializationRoundTrip(std::vector<int>{}); TestSerializationRoundTrip(std::vector<int>{1, 2, 3}); } TEST(SerializationTest, VectorString) { TestSerializationRoundTrip(std::vector<std::string>{}); TestSerializationRoundTrip(std::vector<std::string>{"a", "b", "def"}); } TEST(SerializationTest, VectorVectorString) { TestSerializationRoundTrip( std::vector<std::vector<std::string>>{{"a", "b", "def"}, {"e", "f"}}); } TEST(SerializationTest, Map) { TestSerializationRoundTrip(std::map<int, std::string>{{1, "a"}, {2, "b"}}); } TEST(SerializationTest, Set) { TestSerializationRoundTrip(std::set<int>{1, 2, 3}); } TEST(SerializationTest, Tuple) { TestSerializationRoundTrip( std::tuple(std::string("abc"), 3, std::string("def"))); } TEST(SerializationTest, UniquePtrNull) { std::unique_ptr<int> ptr; TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto ptr2, SerializationRoundTrip(ptr)); EXPECT_FALSE(ptr2); } TEST(SerializationTest, UniquePtrNonNull) { auto ptr = std::make_unique<int>(5); TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto ptr2, SerializationRoundTrip(ptr)); EXPECT_THAT(ptr2, ::testing::Pointee(5)); } TEST(SerializationTest, SharedPtrNull) { std::shared_ptr<int> ptr; TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto ptr2, SerializationRoundTrip(ptr)); EXPECT_FALSE(ptr2); } TEST(SerializationTest, SharedPtrNonNull) { auto ptr = std::make_shared<int>(5); TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto ptr2, SerializationRoundTrip(ptr)); EXPECT_THAT(ptr2, ::testing::Pointee(5)); } TEST(SerializationTest, SharedPtrDuplicate) { auto ptr = std::make_shared<int>(5); TENSORSTORE_ASSERT_OK_AND_ASSIGN( auto tuple2, SerializationRoundTrip(std::make_tuple(ptr, ptr))); EXPECT_THAT(std::get<0>(tuple2), ::testing::Pointee(5)); EXPECT_EQ(std::get<0>(tuple2), std::get<1>(tuple2)); } struct Foo { std::string a; std::string b; constexpr static auto ApplyMembers = [](auto& x, auto f) { return f(x.a, x.b); }; bool operator==(const Foo& other) const { return a == other.a && b == other.b; } }; TEST(SerializationTest, ApplyMembers) { TestSerializationRoundTrip(Foo{"xyz", "abcd"}); TestSerializationRoundTrip(Foo{"", "abcd"}); } TEST(SerialiationTest, Optional) { TestSerializationRoundTrip(std::optional<int>()); TestSerializationRoundTrip(std::optional<int>(42)); } TEST(SerialiationTest, Variant) { TestSerializationRoundTrip(std::variant<int, std::string>(42)); TestSerializationRoundTrip(std::variant<int, std::string>("abc")); TestSerializationRoundTrip(std::variant<int, int>(std::in_place_index<1>, 1)); TestSerializationRoundTrip(std::variant<int, int>(std::in_place_index<0>, 0)); } static_assert(!IsNonSerializableLike<Foo>); static_assert(!IsNonSerializableLike<std::pair<Foo, Foo>>); static_assert(IsNonSerializableLike<NonSerializable<Foo>>); static_assert(IsNonSerializableLike<std::pair<Foo, NonSerializable<Foo>>>); }
https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/serialization/serialization.cc
https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/serialization/serialization_test.cc
4f887a6430414cd6088e1743555015b10f116d50
823b8659-b857-41eb-84fe-0909c8614260
cpp
tensorflow/tensorflow
tensor_slice_reader
tensorflow/core/util/tensor_slice_reader.cc
tensorflow/core/util/tensor_slice_reader_test.cc
#include "tensorflow/core/util/tensor_slice_reader.h" #include <climits> #include <memory> #include <utility> #include <vector> #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/framework/versions.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/lib/io/iterator.h" #include "tensorflow/core/lib/io/table.h" #include "tensorflow/core/lib/io/table_options.h" #include "tensorflow/core/platform/env.h" #include "tensorflow/core/platform/errors.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/protobuf.h" #include "tensorflow/core/public/version.h" #include "tensorflow/core/util/saved_tensor_slice_util.h" #include "tensorflow/core/util/tensor_slice_util.h" namespace tensorflow { namespace checkpoint { TensorSliceReader::Table::~Table() = default; namespace { class TensorSliceReaderTable : public TensorSliceReader::Table { public: explicit TensorSliceReaderTable(RandomAccessFile* f, table::Table* t) : file_(f), table_(t) {} ~TensorSliceReaderTable() override { delete table_; delete file_; } bool Get(const string& key, string* value) override { std::unique_ptr<table::Iterator> iter(table_->NewIterator()); iter->Seek(key); if (iter->Valid() && iter->key() == key) { StringPiece v = iter->value(); value->assign(v.data(), v.size()); return true; } else { return false; } } private: RandomAccessFile* file_; table::Table* table_; }; } Status OpenTableTensorSliceReader(const string& fname, TensorSliceReader::Table** result) { *result = nullptr; Env* env = Env::Default(); std::unique_ptr<RandomAccessFile> f; Status s = env->NewRandomAccessFile(fname, &f); if (s.ok()) { uint64 file_size; s = env->GetFileSize(fname, &file_size); if (s.ok()) { table::Options options; table::Table* table; s = table::Table::Open(options, f.get(), file_size, &table); if (s.ok()) { *result = new TensorSliceReaderTable(f.release(), table); return absl::OkStatus(); } else { s = errors::CreateWithUpdatedMessage( s, strings::StrCat(s.message(), ": perhaps your file is in a different " "file format and you need to use a " "different restore operator?")); } } } LOG(WARNING) << "Could not open " << fname << ": " << s; return s; } TensorSliceReader::TensorSliceReader(const string& filepattern) : TensorSliceReader(filepattern, OpenTableTensorSliceReader, kLoadAllShards) {} TensorSliceReader::TensorSliceReader(const string& filepattern, OpenTableFunction open_function) : TensorSliceReader(filepattern, std::move(open_function), kLoadAllShards) { } TensorSliceReader::TensorSliceReader(const string& filepattern, OpenTableFunction open_function, int preferred_shard) : filepattern_(filepattern), open_function_(std::move(open_function)) { VLOG(1) << "TensorSliceReader for " << filepattern; Status s = Env::Default()->GetMatchingPaths(filepattern, &fnames_); if (!s.ok()) { status_ = errors::InvalidArgument( "Unsuccessful TensorSliceReader constructor: " "Failed to get matching files on ", filepattern, ": ", s.ToString()); return; } if (fnames_.empty()) { status_ = errors::NotFound( "Unsuccessful TensorSliceReader constructor: " "Failed to find any matching files for ", filepattern); return; } sss_.resize(fnames_.size()); for (size_t shard = 0; shard < fnames_.size(); ++shard) { fname_to_index_.insert(std::make_pair(fnames_[shard], shard)); } if (preferred_shard == kLoadAllShards || fnames_.size() == 1 || static_cast<size_t>(preferred_shard) >= fnames_.size()) { LoadAllShards(); } else { VLOG(1) << "Loading shard " << preferred_shard << " for " << filepattern_; LoadShard(preferred_shard); } } void TensorSliceReader::LoadShard(int shard) const { CHECK_LT(shard, sss_.size()); if (sss_[shard] || !status_.ok()) { return; } string value; SavedTensorSlices sts; const string fname = fnames_[shard]; VLOG(1) << "Reading meta data from file " << fname << "..."; Table* table; Status s = open_function_(fname, &table); if (!s.ok()) { status_ = errors::DataLoss("Unable to open table file ", fname, ": ", s.ToString()); return; } sss_[shard].reset(table); if (!(table->Get(kSavedTensorSlicesKey, &value) && ParseProtoUnlimited(&sts, value))) { status_ = errors::Internal( "Failed to find the saved tensor slices at the beginning of the " "checkpoint file: ", fname); return; } status_ = CheckVersions(sts.meta().versions(), TF_CHECKPOINT_VERSION, TF_CHECKPOINT_VERSION_MIN_PRODUCER, "Checkpoint", "checkpoint"); if (!status_.ok()) return; for (const SavedSliceMeta& ssm : sts.meta().tensor()) { TensorShape ssm_shape; status_ = TensorShape::BuildTensorShapeBase(ssm.shape(), &ssm_shape); if (!status_.ok()) return; for (const TensorSliceProto& tsp : ssm.slice()) { TensorSlice ss_slice; status_ = TensorSlice::BuildTensorSlice(tsp, &ss_slice); if (!status_.ok()) return; status_ = RegisterTensorSlice(ssm.name(), ssm_shape, ssm.type(), fname, ss_slice, &tensors_); if (!status_.ok()) return; } } } void TensorSliceReader::LoadAllShards() const { VLOG(1) << "Loading all shards for " << filepattern_; for (size_t i = 0; i < fnames_.size() && status_.ok(); ++i) { LoadShard(i); } all_shards_loaded_ = true; } const TensorSliceSet* TensorSliceReader::FindTensorSlice( const string& name, const TensorSlice& slice, std::vector<std::pair<TensorSlice, string>>* details) const { const TensorSliceSet* tss = gtl::FindPtrOrNull(tensors_, name); if (tss && !tss->QueryMeta(slice, details)) { return nullptr; } return tss; } TensorSliceReader::~TensorSliceReader() { for (auto& temp : tensors_) { delete temp.second; } tensors_.clear(); } bool TensorSliceReader::HasTensor(const string& name, TensorShape* shape, DataType* type) const { mutex_lock l(mu_); const TensorSliceSet* tss = gtl::FindPtrOrNull(tensors_, name); if (!tss && !all_shards_loaded_) { VLOG(1) << "Did not find tensor in preferred shard, loading all shards: " << name; LoadAllShards(); tss = gtl::FindPtrOrNull(tensors_, name); } if (tss) { if (shape) { *shape = tss->shape(); } if (type) { *type = tss->type(); } return true; } else { return false; } } Status TensorSliceReader::GetTensor( const string& name, std::unique_ptr<tensorflow::Tensor>* out_tensor) const { DataType type; TensorShape shape; TensorSlice slice; { mutex_lock l(mu_); const TensorSliceSet* tss = gtl::FindPtrOrNull(tensors_, name); if (tss == nullptr) { return errors::NotFound(name, " not found in checkpoint file"); } if (tss->Slices().size() > 1) { return errors::Unimplemented("Sliced checkpoints are not supported"); } type = tss->type(); shape = tss->shape(); slice = tss->Slices().begin()->second.slice; } std::unique_ptr<tensorflow::Tensor> t(new tensorflow::Tensor); Status s = tensorflow::Tensor::BuildTensor(type, shape, t.get()); if (!s.ok()) return s; for (const auto d : shape.dim_sizes()) { if (d == LLONG_MAX) { return errors::InvalidArgument("Unable to read dimensions of size ", LLONG_MAX, ". Got shape: ", shape.DebugString()); } } bool success = false; #define READER_COPY(dt) \ case dt: \ success = CopySliceData(name, slice, \ t->flat<EnumToDataType<dt>::Type>().data()); \ break; switch (type) { READER_COPY(DT_FLOAT); READER_COPY(DT_DOUBLE); READER_COPY(DT_INT32); READER_COPY(DT_UINT8); READER_COPY(DT_INT16); READER_COPY(DT_INT8); READER_COPY(DT_INT64); READER_COPY(DT_STRING); READER_COPY(DT_BOOL); default: return errors::Unimplemented("Data type not supported"); } #undef READER_COPY if (!success) { return errors::NotFound(name, " not found in checkpoint file"); } std::swap(*out_tensor, t); return absl::OkStatus(); } TensorSliceReader::VarToShapeMap TensorSliceReader::GetVariableToShapeMap() const { VarToShapeMap name_to_shape; if (status().ok()) { for (auto& e : Tensors()) { name_to_shape[e.first] = e.second->shape(); } } return name_to_shape; } TensorSliceReader::VarToDataTypeMap TensorSliceReader::GetVariableToDataTypeMap() const { VarToDataTypeMap name_to_dtype; if (status().ok()) { for (auto& e : Tensors()) { name_to_dtype[e.first] = e.second->type(); } } return name_to_dtype; } const string TensorSliceReader::DebugString() const { string shape_str; if (status().ok()) { for (const auto& e : Tensors()) { strings::StrAppend(&shape_str, e.first, " (", DataType_Name(e.second->type()), ") ", e.second->shape().DebugString()); const int num_slices = e.second->Slices().size(); if (num_slices > 1) { strings::StrAppend(&shape_str, ", ", num_slices, " slices"); } strings::StrAppend(&shape_str, "\n"); } } return shape_str; } } }
#include "tensorflow/core/util/tensor_slice_reader.h" #include <functional> #include <memory> #include <string> #include <utility> #include <vector> #include "tensorflow/core/framework/tensor_shape.pb.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/framework/versions.pb.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/lib/core/stringpiece.h" #include "tensorflow/core/lib/io/iterator.h" #include "tensorflow/core/lib/io/path.h" #include "tensorflow/core/lib/io/table.h" #include "tensorflow/core/lib/io/table_builder.h" #include "tensorflow/core/lib/strings/str_util.h" #include "tensorflow/core/lib/strings/strcat.h" #include "tensorflow/core/platform/env.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/protobuf.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/types.h" #include "tensorflow/core/public/version.h" #include "tensorflow/core/util/saved_tensor_slice.pb.h" #include "tensorflow/core/util/saved_tensor_slice_util.h" #include "tensorflow/core/util/tensor_slice_reader_cache.h" #include "tensorflow/core/util/tensor_slice_writer.h" namespace tensorflow { namespace checkpoint { namespace { void SimpleFloatHelper( const TensorSliceWriter::CreateBuilderFunction& create_function, TensorSliceReader::OpenTableFunction open_function) { const string fname_base = io::JoinPath(testing::TmpDir(), "float_checkpoint"); TensorShape shape({4, 5}); { const string fname = strings::StrCat(fname_base, "_0"); TensorSliceWriter writer(fname, create_function); const float data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}; TensorSlice slice = TensorSlice::ParseOrDie("0,2:-"); TF_CHECK_OK(writer.Add("test", shape, slice, data)); TF_CHECK_OK(writer.Finish()); } { const string fname = strings::StrCat(fname_base, "_1"); TensorSliceWriter writer(fname, create_function); { const float data[] = {10, 11, 12, 15, 16, 17}; TensorSlice slice = TensorSlice::ParseOrDie("2,2:0,3"); TF_CHECK_OK(writer.Add("test", shape, slice, data)); } { const float data[] = {18, 19}; TensorSlice slice = TensorSlice::ParseOrDie("3,1:3,2"); TF_CHECK_OK(writer.Add("test", shape, slice, data)); } TF_CHECK_OK(writer.Finish()); } const string filepattern = strings::StrCat(fname_base, "_*"); TensorSliceReader reader(filepattern, std::move(open_function)); TF_EXPECT_OK(reader.status()); EXPECT_EQ(2, reader.num_files()); { TensorShape shape; DataType type; EXPECT_TRUE(reader.HasTensor("test", &shape, &type)); EXPECT_EQ("[4,5]", shape.DebugString()); EXPECT_EQ(DT_FLOAT, type); EXPECT_FALSE(reader.HasTensor("don't exist", nullptr, nullptr)); } { TensorSlice s = TensorSlice::ParseOrDie("0,2:-"); float expected[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}; float results[10]; EXPECT_TRUE(reader.CopySliceData("test", s, results)); for (int i = 0; i < 10; ++i) { EXPECT_EQ(expected[i], results[i]); } } { TensorSlice s = TensorSlice::ParseOrDie("1,1:-"); float expected[] = {5, 6, 7, 8, 9}; float results[5]; EXPECT_TRUE(reader.CopySliceData("test", s, results)); for (int i = 0; i < 5; ++i) { EXPECT_EQ(expected[i], results[i]); } } { TensorSlice s = TensorSlice::ParseOrDie("1,2:2,3"); float results[6]; EXPECT_FALSE(reader.CopySliceData("test", s, results)); } } TEST(TensorSliceReaderTest, SimpleFloat) { SimpleFloatHelper(CreateTableTensorSliceBuilder, OpenTableTensorSliceReader); } template <typename T, typename U> void SimpleIntXHelper( const TensorSliceWriter::CreateBuilderFunction& create_function, TensorSliceReader::OpenTableFunction open_function, const string& checkpoint_file) { const string fname_base = io::JoinPath(testing::TmpDir(), checkpoint_file); TensorShape shape({4, 5}); { const string fname = strings::StrCat(fname_base, "_0"); TensorSliceWriter writer(fname, create_function); const T data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}; TensorSlice slice = TensorSlice::ParseOrDie("0,2:-"); TF_CHECK_OK(writer.Add("test", shape, slice, data)); TF_CHECK_OK(writer.Finish()); } { const string fname = strings::StrCat(fname_base, "_1"); TensorSliceWriter writer(fname, create_function); { const T data[] = {10, 11, 12, 15, 16, 17}; TensorSlice slice = TensorSlice::ParseOrDie("2,2:0,3"); TF_CHECK_OK(writer.Add("test", shape, slice, data)); } { const T data[] = {18, 19}; TensorSlice slice = TensorSlice::ParseOrDie("3,1:3,2"); TF_CHECK_OK(writer.Add("test", shape, slice, data)); } TF_CHECK_OK(writer.Finish()); } const string filepattern = strings::StrCat(fname_base, "_*"); TensorSliceReader reader(filepattern, std::move(open_function)); TF_EXPECT_OK(reader.status()); EXPECT_EQ(2, reader.num_files()); { TensorShape shape; DataType type; EXPECT_TRUE(reader.HasTensor("test", &shape, &type)); EXPECT_EQ("[4,5]", shape.DebugString()); EXPECT_EQ(DataTypeToEnum<T>::v(), type); EXPECT_FALSE(reader.HasTensor("don't exist", nullptr, nullptr)); } { TensorSlice s = TensorSlice::ParseOrDie("0,2:-"); T expected[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}; U results[10]; EXPECT_TRUE(reader.CopySliceData("test", s, results)); for (int i = 0; i < 10; ++i) { EXPECT_EQ(expected[i], results[i]); } } { TensorSlice s = TensorSlice::ParseOrDie("1,1:-"); T expected[] = {5, 6, 7, 8, 9}; U results[5]; EXPECT_TRUE(reader.CopySliceData("test", s, results)); for (int i = 0; i < 5; ++i) { EXPECT_EQ(expected[i], results[i]); } } { TensorSlice s = TensorSlice::ParseOrDie("1,2:2,3"); U results[6]; EXPECT_FALSE(reader.CopySliceData("test", s, results)); } } #define TEST_SIMPLE_INT(TYPE, SAVED_TYPE) \ TEST(TensorSliceReaderTest, Simple##TYPE) { \ SimpleIntXHelper<TYPE, SAVED_TYPE>(CreateTableTensorSliceBuilder, \ OpenTableTensorSliceReader, \ #TYPE "_checkpoint"); \ } TEST_SIMPLE_INT(int32, int32) TEST_SIMPLE_INT(int64_t, int64_t) TEST_SIMPLE_INT(int16, int32) TEST_SIMPLE_INT(int8, int32) TEST_SIMPLE_INT(uint8, int32) void MutateSavedTensorSlices( const std::string& fname, const std::function<std::string(SavedTensorSlices)>& mutator) { table::Options options; options.compression = table::kNoCompression; std::vector<std::pair<std::string, std::string>> entries; { std::unique_ptr<RandomAccessFile> file; TF_CHECK_OK(Env::Default()->NewRandomAccessFile(fname, &file)); uint64 file_size; TF_CHECK_OK(Env::Default()->GetFileSize(fname, &file_size)); table::Table* t; TF_CHECK_OK(table::Table::Open(options, file.get(), file_size, &t)); std::unique_ptr<table::Table> table(t); std::unique_ptr<table::Iterator> it(table->NewIterator()); for (it->Seek(""); it->Valid(); it->Next()) { entries.emplace_back(it->key(), it->value()); } TF_CHECK_OK(it->status()); } { std::unique_ptr<WritableFile> file; TF_CHECK_OK(Env::Default()->NewWritableFile(fname, &file)); table::TableBuilder builder(options, file.get()); for (const auto& entry : entries) { SavedTensorSlices sts; CHECK(sts.ParseFromString(entry.second)); builder.Add(entry.first, mutator(std::move(sts))); } TF_CHECK_OK(builder.Finish()); TF_CHECK_OK(file->Close()); } } TEST(TensorSliceReaderTest, MissingTensorType) { const string fname = io::JoinPath(testing::TmpDir(), "invalid_checkpoint"); TensorSliceWriter writer(fname, CreateTableTensorSliceBuilder); const int32 data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}; TensorShape shape({4, 5}); TensorSlice slice = TensorSlice::ParseOrDie("0,2:-"); TF_CHECK_OK(writer.Add("test", shape, slice, data)); TF_CHECK_OK(writer.Finish()); MutateSavedTensorSlices(fname, [](SavedTensorSlices sts) { if (sts.has_meta()) { for (auto& tensor : *sts.mutable_meta()->mutable_tensor()) { tensor.clear_type(); } } return sts.SerializeAsString(); }); TensorSliceReader reader(fname, OpenTableTensorSliceReader); TF_CHECK_OK(reader.status()); EXPECT_TRUE(reader.HasTensor("test", nullptr, nullptr)); std::unique_ptr<Tensor> tensor; EXPECT_FALSE(reader.GetTensor("test", &tensor).ok()); } TEST(TensorSliceReaderTest, UnsupportedTensorType) { const string fname = io::JoinPath(testing::TmpDir(), "int32_ref_checkpoint"); TensorSliceWriter writer(fname, CreateTableTensorSliceBuilder); const int32 data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}; TensorShape shape({4, 5}); TensorSlice slice = TensorSlice::ParseOrDie("0,2:-"); TF_CHECK_OK(writer.Add("test", shape, slice, data)); TF_CHECK_OK(writer.Finish()); MutateSavedTensorSlices(fname, [](SavedTensorSlices sts) { if (sts.has_meta()) { for (auto& tensor : *sts.mutable_meta()->mutable_tensor()) { tensor.set_type(DT_INT32_REF); } } return sts.SerializeAsString(); }); TensorSliceReader reader(fname, OpenTableTensorSliceReader); TF_CHECK_OK(reader.status()); EXPECT_TRUE(reader.HasTensor("test", nullptr, nullptr)); std::unique_ptr<Tensor> tensor; EXPECT_FALSE(reader.GetTensor("test", &tensor).ok()); } TEST(TensorSliceReaderTest, NegativeTensorShapeDimension) { const string fname = io::JoinPath(testing::TmpDir(), "negative_dim_checkpoint"); TensorSliceWriter writer(fname, CreateTableTensorSliceBuilder); const int32 data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}; TF_CHECK_OK(writer.Add("test", TensorShape({4, 5}), TensorSlice::ParseOrDie("0,2:-"), data)); TF_CHECK_OK(writer.Finish()); MutateSavedTensorSlices(fname, [](SavedTensorSlices sts) { if (sts.has_meta()) { for (auto& tensor : *sts.mutable_meta()->mutable_tensor()) { for (auto& dim : *tensor.mutable_shape()->mutable_dim()) { dim.set_size(-dim.size()); } } } return sts.SerializeAsString(); }); TensorSliceReader reader(fname, OpenTableTensorSliceReader); EXPECT_FALSE(reader.status().ok()); } TEST(TensorSliceReaderTest, InvalidTensorSlice) { const string fname = io::JoinPath(testing::TmpDir(), "invalid_slice_checkpoint"); TensorSliceWriter writer(fname, CreateTableTensorSliceBuilder); const int32 data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}; TF_CHECK_OK(writer.Add("test", TensorShape({4, 5}), TensorSlice::ParseOrDie("0,2:-"), data)); TF_CHECK_OK(writer.Finish()); MutateSavedTensorSlices(fname, [](SavedTensorSlices sts) { if (sts.has_meta()) { for (auto& tensor : *sts.mutable_meta()->mutable_tensor()) { tensor.mutable_slice(0)->mutable_extent(0)->set_length(-10); } } return sts.SerializeAsString(); }); TensorSliceReader reader(fname, OpenTableTensorSliceReader); EXPECT_FALSE(reader.status().ok()); } TEST(TensorSliceReaderTest, MissingTensorData) { const string fname = io::JoinPath(testing::TmpDir(), "missing_data_checkpoint"); TensorSliceWriter writer(fname, CreateTableTensorSliceBuilder); const int32 data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}; TF_ASSERT_OK(writer.Add("test", TensorShape({4, 5}), TensorSlice::ParseOrDie("0,2:-"), data)); TF_ASSERT_OK(writer.Finish()); MutateSavedTensorSlices(fname, [&](SavedTensorSlices sts) { if (sts.has_data()) { Fill(data, 4, sts.mutable_data()->mutable_data()); } return sts.SerializeAsString(); }); TensorSliceReader reader(fname, OpenTableTensorSliceReader); TF_ASSERT_OK(reader.status()); EXPECT_TRUE(reader.HasTensor("test", nullptr, nullptr)); std::unique_ptr<Tensor> tensor; EXPECT_FALSE(reader.GetTensor("test", &tensor).ok()); } void CachedTensorSliceReaderTesterHelper( const TensorSliceWriter::CreateBuilderFunction& create_function, const TensorSliceReader::OpenTableFunction& open_function) { const string fname_base = io::JoinPath(testing::TmpDir(), "float_checkpoint"); TensorShape shape({4, 5}); { const string fname = strings::StrCat(fname_base, "_0"); TensorSliceWriter writer(fname, create_function); const float data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}; TensorSlice slice = TensorSlice::ParseOrDie("0,2:-"); TF_CHECK_OK(writer.Add("test", shape, slice, data)); TF_CHECK_OK(writer.Finish()); } { const string fname = strings::StrCat(fname_base, "_1"); TensorSliceWriter writer(fname, create_function); { const float data[] = {10, 11, 12, 15, 16, 17}; TensorSlice slice = TensorSlice::ParseOrDie("2,2:0,3"); TF_CHECK_OK(writer.Add("test", shape, slice, data)); } { const float data[] = {18, 19}; TensorSlice slice = TensorSlice::ParseOrDie("3,1:3,2"); TF_CHECK_OK(writer.Add("test", shape, slice, data)); } TF_CHECK_OK(writer.Finish()); } TensorSliceReaderCache cache; const string filepattern = strings::StrCat(fname_base, "_*"); const TensorSliceReader* reader = cache.GetReader( filepattern, open_function, TensorSliceReader::kLoadAllShards); EXPECT_TRUE(reader != nullptr); EXPECT_EQ(2, reader->num_files()); { TensorShape shape; DataType type; EXPECT_TRUE(reader->HasTensor("test", &shape, &type)); EXPECT_EQ("[4,5]", shape.DebugString()); EXPECT_EQ(DT_FLOAT, type); EXPECT_FALSE(reader->HasTensor("don't exist", nullptr, nullptr)); } const TensorSliceReader* reader2 = cache.GetReader( filepattern, open_function, TensorSliceReader::kLoadAllShards); EXPECT_EQ(reader, reader2); reader = cache.GetReader("file_does_not_exist", open_function, TensorSliceReader::kLoadAllShards); EXPECT_TRUE(reader == nullptr); } TEST(CachedTensorSliceReaderTest, SimpleFloat) { CachedTensorSliceReaderTesterHelper(CreateTableTensorSliceBuilder, OpenTableTensorSliceReader); } static void VersionTest(const VersionDef& versions, const string& error) { const string path = io::JoinPath(testing::TmpDir(), "checkpoint"); { SavedTensorSlices sts; *sts.mutable_meta()->mutable_versions() = versions; string contents; EXPECT_TRUE(sts.SerializeToString(&contents)); TensorSliceWriter::Builder* builder; TF_ASSERT_OK(CreateTableTensorSliceBuilder(path, &builder)); builder->Add(kSavedTensorSlicesKey, contents); int64_t file_size; TF_EXPECT_OK(builder->Finish(&file_size)); delete builder; } TensorSliceReader reader(path, OpenTableTensorSliceReader); EXPECT_TRUE(reader.status().code() == error::INVALID_ARGUMENT && absl::StartsWith(reader.status().message(), error)) << "Expected error starting with '" << errors::InvalidArgument(error) << "', got '" << reader.status() << "'"; } TEST(CheckpointVersionTest, MinConsumer) { VersionDef versions; versions.set_producer(TF_CHECKPOINT_VERSION + 1); versions.set_min_consumer(TF_CHECKPOINT_VERSION + 1); VersionTest( versions, strings::StrCat("Checkpoint min consumer version ", TF_CHECKPOINT_VERSION + 1, " above current version ", TF_CHECKPOINT_VERSION, " for TensorFlow")); } TEST(CheckpointVersionTest, MinProducer) { VersionDef versions; versions.set_producer(TF_CHECKPOINT_VERSION_MIN_PRODUCER - 1); VersionTest(versions, strings::StrCat("Checkpoint producer version ", TF_CHECKPOINT_VERSION_MIN_PRODUCER - 1, " below min producer ", TF_CHECKPOINT_VERSION_MIN_PRODUCER, " supported by TensorFlow")); } TEST(CheckpointVersionTest, BadConsumer) { VersionDef versions; versions.set_producer(TF_CHECKPOINT_VERSION + 1); versions.add_bad_consumers(TF_CHECKPOINT_VERSION); VersionTest( versions, strings::StrCat( "Checkpoint disallows consumer version ", TF_CHECKPOINT_VERSION, ". Please upgrade TensorFlow: this version is likely buggy.")); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/tensor_slice_reader.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/tensor_slice_reader_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
04744e90-f803-47c0-9f89-9e06a981cff5
cpp
google/tensorstore
dim_expression
python/tensorstore/dim_expression.cc
tensorstore/index_space/dim_expression_test.cc
#include <pybind11/pybind11.h> #include <pybind11/stl.h> #include "python/tensorstore/dim_expression.h" #include <memory> #include <string> #include <string_view> #include <utility> #include <variant> #include <vector> #include "absl/base/optimization.h" #include "absl/container/inlined_vector.h" #include "absl/status/status.h" #include "absl/strings/escaping.h" #include "python/tensorstore/index.h" #include "python/tensorstore/numpy_indexing_spec.h" #include "python/tensorstore/sequence_parameter.h" #include "python/tensorstore/serialization.h" #include "python/tensorstore/subscript_method.h" #include "python/tensorstore/tensorstore_module_components.h" #include "tensorstore/index.h" #include "tensorstore/index_space/dim_expression.h" #include "tensorstore/index_space/dimension_identifier.h" #include "tensorstore/index_space/dimension_index_buffer.h" #include "tensorstore/index_space/index_transform.h" #include "tensorstore/index_space/internal/numpy_indexing_spec.h" #include "tensorstore/internal/global_initializer.h" #include "tensorstore/internal/intrusive_ptr.h" #include "tensorstore/serialization/std_optional.h" #include "tensorstore/serialization/std_variant.h" #include "tensorstore/serialization/std_vector.h" #include "tensorstore/util/result.h" #include "tensorstore/util/span.h" #include "tensorstore/util/status.h" #include "tensorstore/util/str_cat.h" namespace tensorstore { namespace internal_python { namespace py = ::pybind11; void AppendDimensionSelectionRepr(std::string* out, span<const DynamicDimSpec> dims) { if (dims.empty()) { tensorstore::StrAppend(out, "()"); } for (size_t i = 0; i < dims.size(); ++i) { const auto& d = dims[i]; if (auto* index = std::get_if<DimensionIndex>(&d)) { tensorstore::StrAppend(out, (i == 0 ? "" : ","), *index); } else if (auto* label = std::get_if<std::string>(&d)) { tensorstore::StrAppend(out, (i == 0 ? "" : ","), "'", absl::CHexEscape(*label), "'"); } else { const auto& slice = std::get<DimRangeSpec>(d); tensorstore::StrAppend(out, (i == 0 ? "" : ","), slice); } } } std::string PythonDimExpressionChainTail::repr() const { std::string out = "d["; AppendDimensionSelectionRepr(&out, dims); tensorstore::StrAppend(&out, "]"); return out; } std::string PythonTranslateOp::repr() const { constexpr auto op_suffix = [](TranslateOpKind kind) { switch (kind) { case TranslateOpKind::kTranslateTo: return "to"; case TranslateOpKind::kTranslateBy: return "by"; case TranslateOpKind::kTranslateBackwardBy: return "backward_by"; } ABSL_UNREACHABLE(); }; return tensorstore::StrCat( ".translate_", op_suffix(translate_kind), "[", IndexVectorRepr(indices, true, true), "]"); } Result<IndexTransform<>> PythonTranslateOp::Apply(IndexTransform<> transform, DimensionIndexBuffer* buffer, bool domain_only) const { return internal_index_space::ApplyTranslate( std::move(transform), buffer, indices, translate_kind, domain_only); } std::string PythonStrideOp::repr() const { return tensorstore::StrCat( ".stride[", IndexVectorRepr(strides, true, true), "]"); } Result<IndexTransform<>> PythonStrideOp::Apply(IndexTransform<> transform, DimensionIndexBuffer* buffer, bool domain_only) const { return internal_index_space::ApplyStrideOp(std::move(transform), buffer, strides, domain_only); } std::string PythonLabelOp::repr() const { std::string r = ".label["; for (size_t i = 0; i < labels.size(); ++i) { tensorstore::StrAppend(&r, i == 0 ? "" : ",", "'", absl::CHexEscape(labels[i]), "'"); } tensorstore::StrAppend(&r, "]"); return r; } Result<IndexTransform<>> PythonLabelOp::Apply(IndexTransform<> transform, DimensionIndexBuffer* buffer, bool domain_only) const { return internal_index_space::ApplyLabel(std::move(transform), buffer, span(labels), domain_only); } std::string PythonDiagonalOp::repr() const { return ".diagonal"; } Result<IndexTransform<>> PythonDiagonalOp::Apply(IndexTransform<> transform, DimensionIndexBuffer* buffer, bool domain_only) const { return internal_index_space::ApplyDiagonal(std::move(transform), buffer, domain_only); } std::string PythonTransposeOp::repr() const { std::string out = ".transpose["; AppendDimensionSelectionRepr(&out, target_dim_specs); tensorstore::StrAppend(&out, "]"); return out; } Result<IndexTransform<>> PythonTransposeOp::Apply(IndexTransform<> transform, DimensionIndexBuffer* buffer, bool domain_only) const { return internal_index_space::ApplyTransposeToDynamic( std::move(transform), buffer, target_dim_specs, domain_only); } std::string PythonChangeImplicitStateOp::repr() const { std::string out = ".mark_bounds_implicit["; constexpr auto format_bound = [](std::optional<bool> value) -> std::string_view { if (!value.has_value()) return ""; return *value ? "True" : "False"; }; if (lower_implicit == upper_implicit && lower_implicit) { tensorstore::StrAppend(&out, format_bound(lower_implicit)); } else { tensorstore::StrAppend(&out, format_bound(lower_implicit), ":", format_bound(upper_implicit)); } out += ']'; return out; } Result<IndexTransform<>> PythonChangeImplicitStateOp::Apply( IndexTransform<> transform, DimensionIndexBuffer* buffer, bool domain_only) const { const auto do_apply = [&](bool implicit) { if (lower_implicit == implicit || upper_implicit == implicit) { TENSORSTORE_ASSIGN_OR_RETURN( transform, internal_index_space::ApplyChangeImplicitState( std::move(transform), buffer, implicit, lower_implicit == implicit, upper_implicit == implicit, domain_only)); } return absl::OkStatus(); }; TENSORSTORE_RETURN_IF_ERROR(do_apply(false)); TENSORSTORE_RETURN_IF_ERROR(do_apply(true)); return transform; } std::string PythonIndexOp::repr() const { return tensorstore::StrCat(GetIndexingModePrefix(spec.mode), "[", IndexingSpecRepr(spec), "]"); } Result<IndexTransform<>> PythonIndexOp::Apply(IndexTransform<> transform, DimensionIndexBuffer* buffer, bool domain_only) const { TENSORSTORE_ASSIGN_OR_RETURN( auto new_transform, ToIndexTransform(spec, transform.domain(), buffer)); return internal_index_space::ComposeTransforms( std::move(transform), std::move(new_transform), domain_only); } Result<IndexTransform<>> PythonIndexOp::ApplyInitial( span<const DynamicDimSpec> dim_selection, IndexTransform<> transform, DimensionIndexBuffer* buffer, bool domain_only) const { TENSORSTORE_ASSIGN_OR_RETURN( auto new_transform, ToIndexTransform(spec, transform.domain(), dim_selection, buffer)); return internal_index_space::ComposeTransforms( transform, std::move(new_transform), domain_only); } namespace { auto GetOps(const PythonDimExpressionChain& ops) { absl::InlinedVector<const PythonDimExpressionChain*, 4> op_vec; for (auto* op = &ops; op; op = op->parent.get()) { op_vec.push_back(op); } return op_vec; } } std::string PythonDimExpression::repr() const { std::string out; auto op_vec = GetOps(*ops); for (size_t i = op_vec.size(); i--;) { out += op_vec[i]->repr(); } return out; } Result<IndexTransform<>> PythonDimExpressionChainTail::Apply( IndexTransform<> transform, DimensionIndexBuffer* dimensions, bool domain_only) const { TENSORSTORE_RETURN_IF_ERROR(internal_index_space::GetDimensions( transform.input_labels(), dims, dimensions)); return transform; } Result<IndexTransform<>> PythonDimExpression::Apply( IndexTransform<> transform, DimensionIndexBuffer* dimensions, bool domain_only) const { auto op_vec = GetOps(*ops); if (op_vec.size() < 2) { return absl::InvalidArgumentError( "Must specify at least one operation in dimension expression"); } auto op_it = op_vec.rbegin(); if (auto first_op_it = std::next(op_it); (*first_op_it)->kind() == DimExpressionOpKind::kIndex) { op_it = std::next(first_op_it); auto* dim_selection_op = static_cast<const PythonDimExpressionChainTail*>(op_vec.back()); TENSORSTORE_ASSIGN_OR_RETURN( transform, (static_cast<const PythonDimExpressionChainOp<PythonIndexOp>*>( *first_op_it)) ->op.ApplyInitial(dim_selection_op->dims, std::move(transform), dimensions, domain_only)); } for (; op_it != op_vec.rend(); ++op_it) { TENSORSTORE_ASSIGN_OR_RETURN( transform, (*op_it)->Apply(std::move(transform), dimensions, domain_only)); } return transform; } [[nodiscard]] bool PythonDimExpression::Encode( serialization::EncodeSink& sink) const { for (auto* op = ops.get(); op; op = op->parent.get()) { if (!serialization::Encode(sink, op->kind()) || !op->Encode(sink)) { return false; } } return true; } namespace { template <typename... T> PythonDimExpressionChain::Ptr MakeChainOp(DimExpressionOpKind kind) { PythonDimExpressionChain::Ptr ptr; [[maybe_unused]] const bool matched = ((kind == T::kind ? ((ptr = internal::MakeIntrusivePtr< PythonDimExpressionChainOp<T>>()), true) : false) || ...); return ptr; } } [[nodiscard]] bool PythonDimExpression::Decode( serialization::DecodeSource& source) { PythonDimExpressionChain::Ptr* next_op = &ops; while (true) { DimExpressionOpKind kind; if (!serialization::Decode(source, kind)) return false; if (kind == DimExpressionOpKind::kDimSelection) { *next_op = internal::MakeIntrusivePtr<PythonDimExpressionChainTail>(); } else { *next_op = MakeChainOp<PythonTranslateOp, PythonStrideOp, PythonLabelOp, PythonDiagonalOp, PythonTransposeOp, PythonChangeImplicitStateOp, PythonIndexOp>(kind); if (!*next_op) { source.Fail(absl::DataLossError("Invalid DimExpression op")); return false; } } if (!const_cast<PythonDimExpressionChain&>(**next_op).Decode(source)) { return false; } if (kind == DimExpressionOpKind::kDimSelection) break; next_op = &const_cast<PythonDimExpressionChain&>(**next_op).parent; } return true; } bool PythonDimExpressionChainTail::Equal( const PythonDimExpressionChain& other) const { return dims == static_cast<const PythonDimExpressionChainTail&>(other).dims; } bool operator==(const PythonDimExpression& a, const PythonDimExpression& b) { const PythonDimExpressionChain* a_op = a.ops.get(); const PythonDimExpressionChain* b_op = b.ops.get(); while (true) { if (!a_op && !b_op) return true; if (!a_op || !b_op) return false; if (a_op->kind() != b_op->kind()) return false; if (!a_op->Equal(*b_op)) return false; a_op = a_op->parent.get(); b_op = b_op->parent.get(); } } namespace { using ClsDimExpression = py::class_<PythonDimExpression, std::shared_ptr<PythonDimExpression>>; using ClsDimensionSelection = py::class_<DimensionSelection, PythonDimExpression, std::shared_ptr<DimensionSelection>>; ClsDimExpression MakeDimExpressionClass(py::module m) { return ClsDimExpression(m, "DimExpression", R"( Specifies an advanced indexing operation. :ref:`Dimension expressions<python-dim-expressions>` permit indexing using :ref:`dimension labels<dimension-labels>`, and also support additional operations that cannot be performed with plain :ref:`python-numpy-style-indexing`. Group: Indexing Operations ========== )"); } void DefineDimExpressionAttributes(ClsDimExpression& cls) { using Self = PythonDimExpression; DefineNumpyIndexingMethods( &cls, { { R"( Applies a :ref:`NumPy-style indexing operation<python-dim-expression-numpy-indexing>` with default index array semantics. When using NumPy-style indexing with a dimension expression, all selected dimensions must be consumed by a term of the indexing spec; there is no implicit addition of an `Ellipsis` term to consume any remaining dimensions. Returns: Dimension expression with the indexing operation added. Group: Operations Examples ======== :ref:`Integer indexing<python-indexing-integer>` ------------------------------------------------ >>> transform = ts.IndexTransform(input_labels=['x', 'y', 'z']) >>> transform[ts.d['x'][5]] Rank 2 -> 3 index space transform: Input domain: 0: (-inf*, +inf*) "y" 1: (-inf*, +inf*) "z" Output index maps: out[0] = 5 out[1] = 0 + 1 * in[0] out[2] = 0 + 1 * in[1] >>> transform[ts.d['x', 'z'][5, 6]] Rank 1 -> 3 index space transform: Input domain: 0: (-inf*, +inf*) "y" Output index maps: out[0] = 5 out[1] = 0 + 1 * in[0] out[2] = 6 A single scalar index term applies to all selected dimensions: >>> transform[ts.d['x', 'y'][5]] Rank 1 -> 3 index space transform: Input domain: 0: (-inf*, +inf*) "z" Output index maps: out[0] = 5 out[1] = 5 out[2] = 0 + 1 * in[0] .. seealso:: :ref:`python-indexing-integer` :ref:`Interval indexing<python-indexing-interval>` -------------------------------------------------- >>> transform = ts.IndexTransform(input_labels=['x', 'y', 'z']) >>> transform[ts.d['x'][5:10]] Rank 3 -> 3 index space transform: Input domain: 0: [5, 10) "x" 1: (-inf*, +inf*) "y" 2: (-inf*, +inf*) "z" Output index maps: out[0] = 0 + 1 * in[0] out[1] = 0 + 1 * in[1] out[2] = 0 + 1 * in[2] >>> transform[ts.d['x', 'z'][5:10, 20:30]] Rank 3 -> 3 index space transform: Input domain: 0: [5, 10) "x" 1: (-inf*, +inf*) "y" 2: [20, 30) "z" Output index maps: out[0] = 0 + 1 * in[0] out[1] = 0 + 1 * in[1] out[2] = 0 + 1 * in[2] As an extension, TensorStore allows the ``start``, ``stop``, and ``step`` :py:obj:`python:slice` terms to be vectors rather than scalars: >>> transform[ts.d['x', 'z'][[5, 20]:[10, 30]]] Rank 3 -> 3 index space transform: Input domain: 0: [5, 10) "x" 1: (-inf*, +inf*) "y" 2: [20, 30) "z" Output index maps: out[0] = 0 + 1 * in[0] out[1] = 0 + 1 * in[1] out[2] = 0 + 1 * in[2] >>> transform[ts.d['x', 'z'][[5, 20]:30]] Rank 3 -> 3 index space transform: Input domain: 0: [5, 30) "x" 1: (-inf*, +inf*) "y" 2: [20, 30) "z" Output index maps: out[0] = 0 + 1 * in[0] out[1] = 0 + 1 * in[1] out[2] = 0 + 1 * in[2] As with integer indexing, a single scalar slice applies to all selected dimensions: >>> transform[ts.d['x', 'z'][5:30]] Rank 3 -> 3 index space transform: Input domain: 0: [5, 30) "x" 1: (-inf*, +inf*) "y" 2: [5, 30) "z" Output index maps: out[0] = 0 + 1 * in[0] out[1] = 0 + 1 * in[1] out[2] = 0 + 1 * in[2] .. seealso:: :ref:`python-indexing-interval` :ref:`Adding singleton dimensions<python-indexing-newaxis>` ----------------------------------------------------------- Specifying a value of :py:obj:`.newaxis` (equal to `None`) adds a new inert/singleton dimension with :ref:`implicit bounds<implicit-bounds>` :math:`[0, 1)`: >>> transform = ts.IndexTransform(input_labels=['x', 'y']) >>> transform[ts.d[1][ts.newaxis]] Rank 3 -> 2 index space transform: Input domain: 0: (-inf*, +inf*) "x" 1: [0*, 1*) 2: (-inf*, +inf*) "y" Output index maps: out[0] = 0 + 1 * in[0] out[1] = 0 + 1 * in[2] >>> transform[ts.d[0, -1][ts.newaxis, ts.newaxis]] Rank 4 -> 2 index space transform: Input domain: 0: [0*, 1*) 1: (-inf*, +inf*) "x" 2: (-inf*, +inf*) "y" 3: [0*, 1*) Output index maps: out[0] = 0 + 1 * in[1] out[1] = 0 + 1 * in[2] As with integer indexing, if only a single :python:`ts.newaxis` term is specified, it applies to all selected dimensions: >>> transform[ts.d[0, -1][ts.newaxis]] Rank 4 -> 2 index space transform: Input domain: 0: [0*, 1*) 1: (-inf*, +inf*) "x" 2: (-inf*, +inf*) "y" 3: [0*, 1*) Output index maps: out[0] = 0 + 1 * in[1] out[1] = 0 + 1 * in[2] :py:obj:`.newaxis` terms are only permitted in the first operation of a dimension expression, since in subsequent operations all dimensions of the dimension selection necessarily refer to existing dimensions: .. admonition:: Error :class: failure >>> transform[ts.d[0, 1].translate_by[5][ts.newaxis]] Traceback (most recent call last): ... IndexError: tensorstore.newaxis (`None`) not valid in chained indexing operations It is also an error to use :py:obj:`.newaxis` with dimensions specified by label: .. admonition:: Error :class: failure >>> transform[ts.d['x'][ts.newaxis]] Traceback (most recent call last): ... IndexError: New dimensions cannot be specified by label... .. seealso:: :ref:`python-indexing-newaxis` :ref:`Ellipsis<python-indexing-ellipsis>` ----------------------------------------- Specifying the special `Ellipsis` value (:python:`...`) is equivalent to specifying as many full slices :python:`:` as needed to consume the remaining selected dimensions not consumed by other indexing terms: >>> transform = ts.IndexTransform(input_rank=4) >>> transform[ts.d[:][1, ..., 5].translate_by[3]] Rank 2 -> 4 index space transform: Input domain: 0: (-inf*, +inf*) 1: (-inf*, +inf*) Output index maps: out[0] = 1 out[1] = -3 + 1 * in[0] out[2] = -3 + 1 * in[1] out[3] = 5 An indexing spec consisting solely of an `Ellipsis` term has no effect: >>> transform[ts.d[:][...]] Rank 4 -> 4 index space transform: Input domain: 0: (-inf*, +inf*) 1: (-inf*, +inf*) 2: (-inf*, +inf*) 3: (-inf*, +inf*) Output index maps: out[0] = 0 + 1 * in[0] out[1] = 0 + 1 * in[1] out[2] = 0 + 1 * in[2] out[3] = 0 + 1 * in[3] .. seealso:: :ref:`python-indexing-ellipsis` :ref:`Integer array indexing<python-indexing-integer-array>` ------------------------------------------------------------ Specifying an `~numpy.typing.ArrayLike` *index array* of integer values selects the coordinates given by the elements of the array of the selected dimension: >>> x = ts.array([[1, 2, 3], [4, 5, 6]], dtype=ts.int32) >>> x = x[ts.d[:].label['x', 'y']] >>> x[ts.d['y'][[1, 1, 0]]] TensorStore({ 'array': [[2, 2, 1], [5, 5, 4]], 'context': {'data_copy_concurrency': {}}, 'driver': 'array', 'dtype': 'int32', 'transform': { 'input_exclusive_max': [2, 3], 'input_inclusive_min': [0, 0], 'input_labels': ['x', ''], }, }) As in the example above, if only a single index array term is specified, the dimensions of the index array are added to the result domain in place of the selected dimension, consistent with :ref:`direct NumPy-style indexing<python-indexing-integer-array>` in the default index array mode. However, when using NumPy-style indexing with a dimension expression, if more than one index array term is specified, the broadcast dimensions of the index arrays are always added to the beginning of the result domain, i.e. exactly the behavior of :py:obj:`DimExpression.vindex`. Unlike with direct NumPy-style indexing (not with a dimension expression), the behavior does not depend on whether the index array terms apply to consecutive dimensions, since consecutive dimensions are not well-defined for dimension expressions: >>> x = ts.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], dtype=ts.int32) >>> x = x[ts.d[:].label['x', 'y', 'z']] >>> x[ts.d['z', 'y'][[1, 0], [1, 1]]] TensorStore({ 'array': [[4, 3], [8, 7]], 'context': {'data_copy_concurrency': {}}, 'driver': 'array', 'dtype': 'int32', 'transform': { 'input_exclusive_max': [2, 2], 'input_inclusive_min': [0, 0], 'input_labels': ['x', ''], }, }) .. seealso:: :ref:`python-indexing-integer-array` :ref:`Boolean array indexing<python-indexing-boolean-array>` ------------------------------------------------------------ Specifying an `~numpy.typing.ArrayLike` of `bool` values is equivalent to specifying a sequence of integer index arrays containing the coordinates of `True` values (in C order), e.g. as obtained from `numpy.nonzero`: Specifying a 1-d `bool` array is equivalent to a single index array of the non-zero coordinates: >>> x = ts.array([[1, 2, 3], [4, 5, 6]], dtype=ts.int32) >>> x = x[ts.d[:].label['x', 'y']] >>> x[ts.d['y'][[False, True, True]]] TensorStore({ 'array': [[2, 3], [5, 6]], 'context': {'data_copy_concurrency': {}}, 'driver': 'array', 'dtype': 'int32', 'transform': { 'input_exclusive_max': [2, 2], 'input_inclusive_min': [0, 0], 'input_labels': ['x', ''], }, }) Equivalently, using an index array: >>> x[ts.d['y'][[1, 2]]] TensorStore({ 'array': [[2, 3], [5, 6]], 'context': {'data_copy_concurrency': {}}, 'driver': 'array', 'dtype': 'int32', 'transform': { 'input_exclusive_max': [2, 2], 'input_inclusive_min': [0, 0], 'input_labels': ['x', ''], }, }) More generally, specifying an ``n``-dimensional `bool` array is equivalent to specifying ``n`` 1-dimensional index arrays, where the ``i``\ th index array specifies the ``i``\ th coordinate of the `True` values: >>> x = ts.array([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]], ... dtype=ts.int32) >>> x = x[ts.d[:].label['x', 'y', 'z']] >>> x[ts.d['x', 'z'][[[True, False, False], [True, True, False]]]] TensorStore({ 'array': [[1, 4], [7, 10], [8, 11]], 'context': {'data_copy_concurrency': {}}, 'driver': 'array', 'dtype': 'int32', 'transform': { 'input_exclusive_max': [3, 2], 'input_inclusive_min': [0, 0], 'input_labels': ['', 'y'], }, }) Equivalently, using an index array: >>> x[ts.d['x', 'z'][[0, 1, 1], [0, 0, 1]]] TensorStore({ 'array': [[1, 4], [7, 10], [8, 11]], 'context': {'data_copy_concurrency': {}}, 'driver': 'array', 'dtype': 'int32', 'transform': { 'input_exclusive_max': [3, 2], 'input_inclusive_min': [0, 0], 'input_labels': ['', 'y'], }, }) Note that as with integer array indexing, when using NumPy-styling indexing with a dimension expression, if boolean arrays are applied to more than one selected dimension, the added dimension corresponding to the `True` values is always added to the beginning of the result domain, i.e. exactly the behavior of :py:obj:`DimExpression.vindex`. .. seealso:: :ref:`python-indexing-boolean-array` )"}, {R"( Applies a :ref:`NumPy-style indexing operation<python-dim-expression-numpy-indexing>` with :ref:`outer indexing semantics<python-oindex-indexing>`. This is similar to :py:obj:`DimExpression.__getitem__`, but differs in that any integer or boolean array indexing terms are applied orthogonally: Examples: >>> transform = ts.IndexTransform(input_labels=['x', 'y', 'z']) >>> transform[ts.d['x', 'z'].oindex[[1, 2, 3], [4, 5, 6]]] Rank 3 -> 3 index space transform: Input domain: 0: [0, 3) 1: (-inf*, +inf*) "y" 2: [0, 3) Output index maps: out[0] = 0 + 1 * bounded((-inf, +inf), array(in)), where array = {{{1}}, {{2}}, {{3}}} out[1] = 0 + 1 * in[1] out[2] = 0 + 1 * bounded((-inf, +inf), array(in)), where array = {{{4, 5, 6}}} Returns: Dimension expression with the indexing operation added. See also: - :ref:`python-oindex-indexing` Group: Operations )"}, {R"( Applies a :ref:`NumPy-style indexing operation<python-dim-expression-numpy-indexing>` with :ref:`vectorized indexing semantics<python-vindex-indexing>`. This is similar to :py:obj:`DimExpression.__getitem__`, but differs in that if :python:`indices` specifies any array indexing terms, the broadcasted array dimensions are unconditionally added as the first dimensions of the result domain: Examples: >>> transform = ts.IndexTransform(input_labels=['x', 'y', 'z']) >>> transform[ts.d['y', 'z'].vindex[[1, 2, 3], [4, 5, 6]]] Rank 2 -> 3 index space transform: Input domain: 0: [0, 3) 1: (-inf*, +inf*) "x" Output index maps: out[0] = 0 + 1 * in[1] out[1] = 0 + 1 * bounded((-inf, +inf), array(in)), where array = {{1}, {2}, {3}} out[2] = 0 + 1 * bounded((-inf, +inf), array(in)), where array = {{4}, {5}, {6}} Returns: Dimension expression with the indexing operation added. See also: - :ref:`python-vindex-indexing` Group: Operations )"}, }, [](const Self& self, NumpyIndexingSpecPlaceholder spec) { return self.Extend(PythonIndexOp{ spec.Parse(self.ops->kind() == DimExpressionOpKind::kDimSelection ? NumpyIndexingSpec::Usage::kDimSelectionInitial : NumpyIndexingSpec::Usage::kDimSelectionChained)}); }); constexpr auto apply_op = [](const Self& self, auto&& op) { return self.Extend(std::forward<decltype(op)>(op)); }; DefineTranslateToOp<Self>(cls, apply_op, R"( Translates the domains of the selected input dimensions to the specified origins without affecting the output range. Examples: >>> transform = ts.IndexTransform(input_shape=[4, 5, 6], ... input_labels=['x', 'y', 'z']) >>> transform[ts.d['x', 'y'].translate_to[10, 20]] Rank 3 -> 3 index space transform: Input domain: 0: [10, 14) "x" 1: [20, 25) "y" 2: [0, 6) "z" Output index maps: out[0] = -10 + 1 * in[0] out[1] = -20 + 1 * in[1] out[2] = 0 + 1 * in[2] >>> transform[ts.d['x', 'y'].translate_to[10, None]] Rank 3 -> 3 index space transform: Input domain: 0: [10, 14) "x" 1: [0, 5) "y" 2: [0, 6) "z" Output index maps: out[0] = -10 + 1 * in[0] out[1] = 0 + 1 * in[1] out[2] = 0 + 1 * in[2] >>> transform[ts.d['x', 'y'].translate_to[10]] Rank 3 -> 3 index space transform: Input domain: 0: [10, 14) "x" 1: [10, 15) "y" 2: [0, 6) "z" Output index maps: out[0] = -10 + 1 * in[0] out[1] = -10 + 1 * in[1] out[2] = 0 + 1 * in[2] The new dimension selection is the same as the prior dimension selection. Args: origins: The new origins for each of the selected dimensions. May also be a scalar, e.g. :python:`5`, in which case the same origin is used for all selected dimensions. If :python:`None` is specified for a given dimension, the origin of that dimension remains unchanged. Returns: Dimension expression with the translation operation added. Raises: IndexError: If the number origins does not match the number of selected dimensions. IndexError: If any of the selected dimensions has a lower bound of :python:`-inf`. Group: Operations )"); DefineTranslateByOp<Self>(cls, apply_op, R"( Translates (shifts) the domains of the selected input dimensions by the specified offsets, without affecting the output range. Examples: >>> transform = ts.IndexTransform(input_inclusive_min=[2, 3, 4], ... input_shape=[4, 5, 6], ... input_labels=['x', 'y', 'z']) >>> transform[ts.d['x', 'y'].translate_by[10, 20]] Rank 3 -> 3 index space transform: Input domain: 0: [12, 16) "x" 1: [23, 28) "y" 2: [4, 10) "z" Output index maps: out[0] = -10 + 1 * in[0] out[1] = -20 + 1 * in[1] out[2] = 0 + 1 * in[2] >>> transform[ts.d['x', 'y'].translate_by[10, None]] Rank 3 -> 3 index space transform: Input domain: 0: [12, 16) "x" 1: [3, 8) "y" 2: [4, 10) "z" Output index maps: out[0] = -10 + 1 * in[0] out[1] = 0 + 1 * in[1] out[2] = 0 + 1 * in[2] >>> transform[ts.d['x', 'y'].translate_by[10]] Rank 3 -> 3 index space transform: Input domain: 0: [12, 16) "x" 1: [13, 18) "y" 2: [4, 10) "z" Output index maps: out[0] = -10 + 1 * in[0] out[1] = -10 + 1 * in[1] out[2] = 0 + 1 * in[2] The new dimension selection is the same as the prior dimension selection. Args: offsets: The offsets for each of the selected dimensions. May also be a scalar, e.g. :python:`5`, in which case the same offset is used for all selected dimensions. Specifying :python:`None` for a given dimension (equivalent to specifying an offset of :python:`0`) leaves the origin of that dimension unchanged. Returns: Dimension expression with the translation operation added. Raises: IndexError: If the number origins does not match the number of selected dimensions. Group: Operations )"); DefineTranslateBackwardByOp<Self>(cls, apply_op, R"( Translates (shifts) the domains of the selected input dimensions backward by the specified offsets, without affecting the output range. Examples: >>> transform = ts.IndexTransform(input_inclusive_min=[2, 3, 4], ... input_shape=[4, 5, 6], ... input_labels=['x', 'y', 'z']) >>> transform[ts.d['x', 'y'].translate_backward_by[10, 20]] Rank 3 -> 3 index space transform: Input domain: 0: [-8, -4) "x" 1: [-17, -12) "y" 2: [4, 10) "z" Output index maps: out[0] = 10 + 1 * in[0] out[1] = 20 + 1 * in[1] out[2] = 0 + 1 * in[2] >>> transform[ts.d['x', 'y'].translate_backward_by[10, None]] Rank 3 -> 3 index space transform: Input domain: 0: [-8, -4) "x" 1: [3, 8) "y" 2: [4, 10) "z" Output index maps: out[0] = 10 + 1 * in[0] out[1] = 0 + 1 * in[1] out[2] = 0 + 1 * in[2] >>> transform[ts.d['x', 'y'].translate_backward_by[10]] Rank 3 -> 3 index space transform: Input domain: 0: [-8, -4) "x" 1: [-7, -2) "y" 2: [4, 10) "z" Output index maps: out[0] = 10 + 1 * in[0] out[1] = 10 + 1 * in[1] out[2] = 0 + 1 * in[2] The new dimension selection is the same as the prior dimension selection. Args: offsets: The offsets for each of the selected dimensions. May also be a scalar, e.g. :python:`5`, in which case the same offset is used for all selected dimensions. Specifying :python:`None` for a given dimension (equivalent to specifying an offset of :python:`0`) leaves the origin of that dimension unchanged. Returns: Dimension expression with the translation operation added. Raises: IndexError: If the number origins does not match the number of selected dimensions. Group: Operations )"); DefineSubscriptMethod<Self, struct StrideTag>(&cls, "stride", "_Stride") .def( "__getitem__", +[](const Self& self, OptionallyImplicitIndexVectorOrScalarContainer strides) { return self.Extend( PythonStrideOp{ToIndexVectorOrScalarContainer(strides)}); }, R"( Strides the domains of the selected input dimensions by the specified amounts. For each selected dimension ``i``, the new domain is the set of indices ``x`` such that :python:`x * strides[i]` is contained in the original domain. Examples: >>> transform = ts.IndexTransform(input_inclusive_min=[0, 2, 1], ... input_inclusive_max=[6, 5, 8], ... input_labels=["x", "y", "z"]) >>> transform[ts.d["x", "z"].stride[-2, 3]] Rank 3 -> 3 index space transform: Input domain: 0: [-3, 1) "x" 1: [2, 6) "y" 2: [1, 3) "z" Output index maps: out[0] = 0 + -2 * in[0] out[1] = 0 + 1 * in[1] out[2] = 0 + 3 * in[2] >>> transform[ts.d["x", "z"].stride[3]] Rank 3 -> 3 index space transform: Input domain: 0: [0, 3) "x" 1: [2, 6) "y" 2: [1, 3) "z" Output index maps: out[0] = 0 + 3 * in[0] out[1] = 0 + 1 * in[1] out[2] = 0 + 3 * in[2] Note: :python:`expr.stride[strides]` is similar to the :ref:`NumPy-style slicing<python-indexing-interval>` operation :python:`expr[::strides]` except that the striding is always done with respect to an origin of 0, irrespective of the existing dimension lower bounds. The new dimension selection is the same as the prior dimension selection. Args: strides: Strides for each selected dimension. May also be a scalar, e.g. :python:`2`, in which case the same stride value is used for all selected dimensions. Specifying :python:`None` for a given dimension (equivalent to specifying a stride of :python:`1`) leaves that dimension unchanged. Specify a stride of :python:`0` is not valid. Returns: Dimension expression with the striding operation added. Raises: IndexError: If the number strides does not match the number of selected dimensions. Group: Operations )", py::arg("strides")); DefineSubscriptMethod<Self, struct TransposeTag>(&cls, "transpose", "_Transpose") .def( "__getitem__", +[](const Self& self, DimensionSelectionLike dim_specs) { return self.Extend(PythonTransposeOp{dim_specs.value.dims()}); }, R"( Transposes the selected dimensions to the specified target indices. A dimension range may be specified to reverse the order of all dimensions: >>> transform = ts.IndexTransform(input_shape=[2, 3, 4], ... input_labels=["x", "y", "z"]) >>> transform[ts.d[:].transpose[::-1]] Rank 3 -> 3 index space transform: Input domain: 0: [0, 4) "z" 1: [0, 3) "y" 2: [0, 2) "x" Output index maps: out[0] = 0 + 1 * in[2] out[1] = 0 + 1 * in[1] out[2] = 0 + 1 * in[0] Dimensions not in the selection retain their relative order and fill in the dimension indices not in :python:`target`: >>> transform = ts.IndexTransform(input_shape=[2, 3, 4], ... input_labels=["x", "y", "z"]) >>> transform[ts.d['x', 'z'].transpose[0, 1]] Rank 3 -> 3 index space transform: Input domain: 0: [0, 2) "x" 1: [0, 4) "z" 2: [0, 3) "y" Output index maps: out[0] = 0 + 1 * in[0] out[1] = 0 + 1 * in[2] out[2] = 0 + 1 * in[1] A single non-negative :python:`target` index may be specified to reorder all of the selected dimensions to start at the specified index: >>> transform = ts.IndexTransform(input_shape=[2, 3, 4, 5], ... input_labels=["a", "b", "c", "d"]) >>> transform[ts.d['a', 'd'].transpose[1]] Rank 4 -> 4 index space transform: Input domain: 0: [0, 3) "b" 1: [0, 2) "a" 2: [0, 5) "d" 3: [0, 4) "c" Output index maps: out[0] = 0 + 1 * in[1] out[1] = 0 + 1 * in[0] out[2] = 0 + 1 * in[3] out[3] = 0 + 1 * in[2] A single negative :python:`target` index may be specified to order all of the selected dimensions to end at the specified index from end: >>> transform = ts.IndexTransform(input_shape=[2, 3, 4, 5], ... input_labels=["a", "b", "c", "d"]) >>> transform[ts.d['a', 'd'].transpose[-1]] Rank 4 -> 4 index space transform: Input domain: 0: [0, 3) "b" 1: [0, 4) "c" 2: [0, 2) "a" 3: [0, 5) "d" Output index maps: out[0] = 0 + 1 * in[2] out[1] = 0 + 1 * in[0] out[2] = 0 + 1 * in[1] out[3] = 0 + 1 * in[3] Args: target: Target dimension indices for the selected dimensions. All dimensions must be specified by index. Labels are not permitted. If the dimension selection has :python:`k > 1` dimensions, a single non-negative index :python:`i` is equivalent to :python:`i:i+k`; a single negative index :python:`-i` is equivalent to :python:`-i-k:-i`. Returns: Dimension expression with the transpose operation added. Group: Operations )", py::arg("target")); DefineLabelOp<Self>(cls, apply_op, R"( Sets (or changes) the :ref:`labels<dimension-labels>` of the selected dimensions. Examples: >>> ts.IndexTransform(3)[ts.d[0].label['x']] Rank 3 -> 3 index space transform: Input domain: 0: (-inf*, +inf*) "x" 1: (-inf*, +inf*) 2: (-inf*, +inf*) Output index maps: out[0] = 0 + 1 * in[0] out[1] = 0 + 1 * in[1] out[2] = 0 + 1 * in[2] >>> ts.IndexTransform(3)[ts.d[0, 2].label['x', 'z']] Rank 3 -> 3 index space transform: Input domain: 0: (-inf*, +inf*) "x" 1: (-inf*, +inf*) 2: (-inf*, +inf*) "z" Output index maps: out[0] = 0 + 1 * in[0] out[1] = 0 + 1 * in[1] out[2] = 0 + 1 * in[2] >>> ts.IndexTransform(3)[ts.d[:].label['x', 'y', 'z']] Rank 3 -> 3 index space transform: Input domain: 0: (-inf*, +inf*) "x" 1: (-inf*, +inf*) "y" 2: (-inf*, +inf*) "z" Output index maps: out[0] = 0 + 1 * in[0] out[1] = 0 + 1 * in[1] out[2] = 0 + 1 * in[2] >>> ts.IndexTransform(3)[ts.d[0, 1].label['x', 'y'].translate_by[2]] Rank 3 -> 3 index space transform: Input domain: 0: (-inf*, +inf*) "x" 1: (-inf*, +inf*) "y" 2: (-inf*, +inf*) Output index maps: out[0] = -2 + 1 * in[0] out[1] = -2 + 1 * in[1] out[2] = 0 + 1 * in[2] The new dimension selection is the same as the prior dimension selection. Args: labels: Dimension labels for each selected dimension. Returns: Dimension expression with the label operation added. Raises: IndexError: If the number of labels does not match the number of selected dimensions, or if the resultant domain would have duplicate labels. Group: Operations )"); cls.def_property_readonly( "diagonal", [](const Self& self) { return self.Extend(PythonDiagonalOp{}); }, R"( Extracts the diagonal of the selected dimensions. The selection dimensions are removed from the resultant index space, and a new dimension corresponding to the diagonal is added as the first dimension, with an input domain equal to the intersection of the input domains of the selection dimensions. The new dimension selection is equal to :python:`ts.d[0]`, corresponding to the newly added diagonal dimension. The lower and upper bounds of the new diagonal dimension are :ref:`implicit<implicit-bounds>` if, and only if, the lower or upper bounds, respectively, of every selected dimension are implicit. Examples: >>> transform = ts.IndexTransform(input_shape=[2, 3], ... input_labels=["x", "y"]) >>> transform[ts.d['x', 'y'].diagonal] Rank 1 -> 2 index space transform: Input domain: 0: [0, 2) Output index maps: out[0] = 0 + 1 * in[0] out[1] = 0 + 1 * in[0] >>> transform = ts.IndexTransform(3) >>> transform[ts.d[0, 2].diagonal] Rank 2 -> 3 index space transform: Input domain: 0: (-inf*, +inf*) 1: (-inf*, +inf*) Output index maps: out[0] = 0 + 1 * in[0] out[1] = 0 + 1 * in[1] out[2] = 0 + 1 * in[0] Note: If zero dimensions are selected, :py:obj:`.diagonal` simply results in a new singleton dimension as the first dimension, equivalent to :python:`[ts.newaxis]`: >>> transform = ts.IndexTransform(1) >>> transform[ts.d[()].diagonal] Rank 2 -> 1 index space transform: Input domain: 0: (-inf*, +inf*) 1: (-inf*, +inf*) Output index maps: out[0] = 0 + 1 * in[1] If only one dimension is selected, :py:obj:`.diagonal` is equivalent to :python:`.label[''].transpose[0]`: >>> transform = ts.IndexTransform(input_labels=['x', 'y']) >>> transform[ts.d[1].diagonal] Rank 2 -> 2 index space transform: Input domain: 0: (-inf*, +inf*) 1: (-inf*, +inf*) "x" Output index maps: out[0] = 0 + 1 * in[1] out[1] = 0 + 1 * in[0] Group: Operations )"); DefineMarkBoundsImplicitOp<Self>(cls, apply_op, R"( Marks the lower/upper bounds of the selected dimensions as :ref:`implicit/explicit<implicit-bounds>`. For a `TensorStore`, implicit bounds indicate resizeable dimensions. Marking a bound as explicit fixes it to its current value such that it won't be adjusted by subsequent `TensorStore.resolve` calls if the stored bounds change. Because implicit bounds do not constrain subsequent indexing/slicing operations, a bound may be marked implicit in order to expand the domain. .. warning:: Be careful when marking bounds as implicit, since this may bypass intended constraints on the domain. Examples: >>> s = await ts.open({ ... 'driver': 'zarr', ... 'kvstore': 'memory: ... }, ... shape=[100, 200], ... dtype=ts.uint32, ... create=True) >>> s.domain { [0, 100*), [0, 200*) } >>> await s.resize(exclusive_max=[200, 300]) >>> (await s.resolve()).domain { [0, 200*), [0, 300*) } >>> (await s[ts.d[0].mark_bounds_implicit[False]].resolve()).domain { [0, 100), [0, 300*) } >>> s_subregion = s[20:30, 40:50] >>> s_subregion.domain { [20, 30), [40, 50) } >>> (await ... s_subregion[ts.d[0].mark_bounds_implicit[:True]].resolve()).domain { [20, 200*), [40, 50) } >>> t = ts.IndexTransform(input_rank=3) >>> t = t[ts.d[0, 2].mark_bounds_implicit[False]] >>> t Rank 3 -> 3 index space transform: Input domain: 0: (-inf, +inf) 1: (-inf*, +inf*) 2: (-inf, +inf) Output index maps: out[0] = 0 + 1 * in[0] out[1] = 0 + 1 * in[1] out[2] = 0 + 1 * in[2] >>> t = t[ts.d[0, 1].mark_bounds_implicit[:True]] >>> t Rank 3 -> 3 index space transform: Input domain: 0: (-inf, +inf*) 1: (-inf*, +inf*) 2: (-inf, +inf) Output index maps: out[0] = 0 + 1 * in[0] out[1] = 0 + 1 * in[1] out[2] = 0 + 1 * in[2] >>> t = t[ts.d[1, 2].mark_bounds_implicit[True:False]] >>> t Rank 3 -> 3 index space transform: Input domain: 0: (-inf, +inf*) 1: (-inf*, +inf) 2: (-inf*, +inf) Output index maps: out[0] = 0 + 1 * in[0] out[1] = 0 + 1 * in[1] out[2] = 0 + 1 * in[2] The new dimension selection is the same as the prior dimension selection. Args: implicit: Indicates the new implicit value for the lower and upper bounds. Must be one of: - `None` to indicate no change; - `True` to change both lower and upper bounds to implicit; - `False` to change both lower and upper bounds to explicit. - a `slice`, where :python:`start` and :python:`stop` specify the new implicit value for the lower and upper bounds, respectively, and each must be one of `None`, `True`, or `False`. Returns: Dimension expression with bounds marked as implicit/explicit. Raises: IndexError: If the resultant domain would have an input dimension referenced by an index array marked as implicit. Group: Operations )"); cls.def("__repr__", &PythonDimExpression::repr); cls.def("__eq__", [](const Self& a, const Self& b) { return a == b; }); cls.attr("__iter__") = py::none(); EnablePicklingFromSerialization(cls); } ClsDimensionSelection MakeDimensionSelectionClass(py::module m) { return ClsDimensionSelection(m, "d", R"( Specifies a dimension selection, for starting a :ref:`dimension expression<python-dim-expressions>`. A dimension selection specifies a sequence of dimensions, either by index or :ref:`label<dimension-labels>`. :ref:`python-dim-selections` may be used as part of a :ref:`dimension expression<python-dim-expression-construction>` to specify the dimensions to which an indexing operation applies. Group: Indexing Constructors ============ Operations ========== )"); } void DefineDimensionSelectionAttributes(ClsDimensionSelection& cls) { cls.def_static( "__class_getitem__", [](DimensionSelectionLike selection) { return selection.value; }, R"( Constructs from a sequence of dimension indices, ranges, and/or labels. Examples: >>> ts.d[0, 1, 2] d[0,1,2] >>> ts.d[0:1, 2, "x"] d[0:1,2,'x'] >>> ts.d[[0, 1], [2]] d[0,1,2] >>> ts.d[[0, 1], ts.d[2, 3]] d[0,1,2,3] )", py::arg("selection")); cls.def( "__eq__", [](const DimensionSelection& a, const DimensionSelection& b) { return a.dims() == b.dims(); }, py::arg("other")); EnablePicklingFromSerialization(cls); } void RegisterDimExpressionBindings(pybind11::module m, Executor defer) { defer([cls = MakeDimExpressionClass(m)]() mutable { DefineDimExpressionAttributes(cls); }); defer([cls = MakeDimensionSelectionClass(m)]() mutable { DefineDimensionSelectionAttributes(cls); }); m.attr("newaxis") = py::none(); } TENSORSTORE_GLOBAL_INITIALIZER { RegisterPythonComponent(RegisterDimExpressionBindings, -850); } } bool CastToDimensionSelection(py::handle src, std::vector<DynamicDimSpec>& out) { if (PyUnicode_Check(src.ptr())) { out.emplace_back(py::cast<std::string>(src)); } else if (PyIndex_Check(src.ptr())) { out.emplace_back(DimensionIndex(py::cast<PythonDimensionIndex>(src))); } else if (PySlice_Check(src.ptr())) { out.emplace_back(py::cast<DimRangeSpec>(src)); } else if (py::isinstance<DimensionSelection>(src)) { auto existing = py::cast<DimensionSelection>(src); out.insert(out.end(), existing.dims().begin(), existing.dims().end()); } else { py::object seq = py::reinterpret_steal<py::object>(PySequence_Fast(src.ptr(), "")); if (!seq) { PyErr_Clear(); return false; } std::vector<py::object> seq_objs; Py_ssize_t seq_size = PySequence_Fast_GET_SIZE(seq.ptr()); seq_objs.reserve(seq_size); PyObject** elems = PySequence_Fast_ITEMS(seq.ptr()); for (Py_ssize_t i = 0; i < seq_size; ++i) { seq_objs.push_back(py::reinterpret_borrow<py::object>(elems[i])); } for (const auto& obj : seq_objs) { if (!CastToDimensionSelection(obj, out)) return false; } } return true; } PythonDimExpressionChain::~PythonDimExpressionChain() = default; bool PythonDimExpressionChainTail::Encode( serialization::EncodeSink& sink) const { return serialization::Encode(sink, dims); } bool PythonDimExpressionChainTail::Decode(serialization::DecodeSource& source) { return serialization::Decode(source, dims); } } namespace serialization { [[nodiscard]] bool Serializer<internal_python::PythonDimExpression>::Encode( EncodeSink& sink, const internal_python::PythonDimExpression& value) { return value.Encode(sink); } [[nodiscard]] bool Serializer<internal_python::PythonDimExpression>::Decode( DecodeSource& source, internal_python::PythonDimExpression& value) { return value.Decode(source); } [[nodiscard]] bool Serializer<internal_python::DimensionSelection>::Encode( EncodeSink& sink, const internal_python::DimensionSelection& value) { return serialization::Encode(sink, value.dims()); } [[nodiscard]] bool Serializer<internal_python::DimensionSelection>::Decode( DecodeSource& source, internal_python::DimensionSelection& value) { auto ops = internal::MakeIntrusivePtr< internal_python::PythonDimExpressionChainTail>(); if (!serialization::Decode(source, ops->dims)) return false; value.ops = std::move(ops); return true; } } } namespace pybind11 { namespace detail { bool type_caster<tensorstore::internal_python::DimensionSelectionLike>::load( handle src, bool convert) { if (pybind11::isinstance<tensorstore::internal_python::DimensionSelection>( src)) { value.value = pybind11::cast<tensorstore::internal_python::DimensionSelection>(src); return true; } if (!convert) return false; auto ops = tensorstore::internal::MakeIntrusivePtr< tensorstore::internal_python::PythonDimExpressionChainTail>(); auto& dims = ops->dims; if (tensorstore::internal_python::CastToDimensionSelection(src, dims)) { value.value.ops = std::move(ops); return true; } return false; } handle type_caster<tensorstore::internal_python::DimensionSelectionLike>::cast( tensorstore::internal_python::DimensionSelectionLike value, return_value_policy policy, handle parent) { return pybind11::cast(std::move(value.value)); } bool type_caster<tensorstore::DimRangeSpec>::load(handle src, bool convert) { if (!PySlice_Check(src.ptr())) return false; Py_ssize_t start, stop, step; if (PySlice_Unpack(src.ptr(), &start, &stop, &step) != 0) { return false; } auto* slice_obj = reinterpret_cast<PySliceObject*>(src.ptr()); if (slice_obj->start != Py_None) value.inclusive_start = start; if (slice_obj->stop != Py_None) value.exclusive_stop = stop; value.step = step; return true; } handle type_caster<tensorstore::DimRangeSpec>::cast( const tensorstore::DimRangeSpec& x, return_value_policy , handle ) { handle h(PySlice_New(pybind11::cast(x.inclusive_start).ptr(), pybind11::cast(x.exclusive_stop).ptr(), x.step == 1 ? nullptr : pybind11::cast(x.step).ptr())); if (!h.ptr()) throw error_already_set(); return h; } } }
#include "tensorstore/index_space/dim_expression.h" #include <string_view> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "tensorstore/array.h" #include "tensorstore/box.h" #include "tensorstore/index.h" #include "tensorstore/index_space/index_transform.h" #include "tensorstore/index_space/transformed_array.h" #include "tensorstore/rank.h" #include "tensorstore/util/result.h" #include "tensorstore/util/span.h" #include "tensorstore/util/status_testutil.h" namespace { using ::tensorstore::AllDims; using ::tensorstore::BoxView; using ::tensorstore::DimRange; using ::tensorstore::Dims; using ::tensorstore::Index; using ::tensorstore::MakeArray; using ::tensorstore::MakeOffsetArrayView; using ::tensorstore::Materialize; static const Index default_origin[3] = {0, 0, 0}; auto TestArray(tensorstore::span<const Index, 3> origin = default_origin) { static const int test_array[4][4][8] = { { {111, 112, 113, 114, 115, 116, 117, 118}, {121, 122, 123, 124, 125, 126, 127, 128}, {131, 132, 133, 134, 135, 136, 137, 138}, {141, 142, 143, 144, 145, 146, 147, 148}, }, { {211, 212, 213, 214, 215, 216, 217, 218}, {221, 222, 223, 224, 225, 226, 227, 228}, {231, 232, 233, 234, 235, 236, 237, 238}, {241, 242, 243, 244, 245, 246, 247, 248}, }, { {311, 312, 313, 314, 315, 316, 317, 318}, {321, 322, 323, 324, 325, 326, 327, 328}, {331, 332, 333, 334, 335, 336, 337, 338}, {341, 342, 343, 344, 345, 346, 347, 348}, }, { {411, 412, 413, 414, 415, 416, 417, 418}, {421, 422, 423, 424, 425, 426, 427, 428}, {431, 432, 433, 434, 435, 436, 437, 438}, {441, 442, 443, 444, 445, 446, 447, 448}, }}; return MakeOffsetArrayView(origin, test_array); } TEST(DimExpressionTest, TranslateBy) { auto view = TestArray() | Dims(0, 2).TranslateBy({10, 20}) | Materialize(); TENSORSTORE_EXPECT_OK(view); EXPECT_EQ(344, ((*view)({12, 3, 23}))); } TEST(DimExpressionTest, TranslateBySingle) { auto view = TestArray() | Dims(0, 2).TranslateBy(10); TENSORSTORE_EXPECT_OK(view); } TEST(DimExpressionTest, TranslateTo) { const Index origin[3] = {1, 2, 3}; auto view = TestArray(origin) | Dims(0, 2).TranslateTo({10, 20}) | Materialize(); TENSORSTORE_EXPECT_OK(view); EXPECT_EQ(344 - 123, ((*view)({11, 3, 20}))); } TEST(DimExpressionTest, TranslateToSingle) { auto view = TestArray() | AllDims().TranslateTo(0); TENSORSTORE_EXPECT_OK(view); } TEST(DimExpressionTest, IndexSlice) { auto view = TestArray() | Dims(0, 2).IndexSlice({2, 4}) | Materialize(); TENSORSTORE_EXPECT_OK(view); EXPECT_EQ(345, ((*view)({3}))); } TEST(DimExpressionTest, IndexSliceSingle) { auto view = TestArray() | Dims(0, 2).IndexSlice(1); TENSORSTORE_EXPECT_OK(view); } TEST(DimExpressionTest, BoxSlice) { auto view = TestArray() | Dims(0, 2).BoxSlice(BoxView({1, 4}, {3, 4})) | Materialize(); TENSORSTORE_EXPECT_OK(view); EXPECT_EQ(245, ((*view)({1, 3, 4}))); } TEST(DimExpressionTest, TranslateBoxSlice) { const Index origin[3] = {0, 2, 0}; auto view = TestArray(origin) | Dims(0, 2).TranslateBoxSlice(BoxView({1, 4}, {3, 4})) | Materialize(); TENSORSTORE_EXPECT_OK(view); EXPECT_EQ(245 - 20, ((*view)({0, 3, 0}))); } TEST(DimExpressionTest, ClosedInterval) { auto view = TestArray() | Dims(0, 2).ClosedInterval({1, 6}, {3, 0}, {1, -2}) | Materialize(); TENSORSTORE_EXPECT_OK(view); EXPECT_EQ(347, ((*view)({2, 3, -3}))); } TEST(DimExpressionTest, ClosedInterval1) { auto view = TestArray() | Dims(0, 2).ClosedInterval(1, 1); TENSORSTORE_EXPECT_OK(view); } TEST(DimExpressionTest, HalfOpenInterval) { auto view = TestArray() | Dims(0, 2).HalfOpenInterval({1, 6}, {3, 0}, {1, -2}) | Materialize(); TENSORSTORE_EXPECT_OK(view); EXPECT_EQ(347, ((*view)({2, 3, -3}))); } TEST(DimExpressionTest, HalfOpenInterval1) { auto view = TestArray() | Dims(0, 2).HalfOpenInterval(1, 2); TENSORSTORE_EXPECT_OK(view); } TEST(DimExpressionTest, SizedInterval) { auto view = TestArray() | Dims(0, 2).SizedInterval({1, 6}, {3, 2}, {1, -2}) | Materialize(); TENSORSTORE_EXPECT_OK(view); EXPECT_EQ(347, ((*view)({2, 3, -3}))); } TEST(DimExpressionTest, SizedInterval1) { auto view = TestArray() | Dims(0, 2).SizedInterval(1, 2); TENSORSTORE_EXPECT_OK(view); } TEST(DimExpressionTest, TranslateClosedInterval) { auto view = TestArray() | Dims(0, 2).TranslateClosedInterval({0, 1}, {1, 1}); TENSORSTORE_EXPECT_OK(view); } TEST(DimExpressionTest, TranslateClosedInterval1) { auto view = TestArray() | Dims(0, 2).TranslateClosedInterval(1, 1); TENSORSTORE_EXPECT_OK(view); } TEST(DimExpressionTest, TranslateHalfOpenInterval) { auto view = TestArray() | Dims(0, 2).TranslateHalfOpenInterval({0, 1}, {1, 1}); TENSORSTORE_EXPECT_OK(view); } TEST(DimExpressionTest, TranslateHalfOpenInterval1) { auto view = TestArray() | Dims(0, 2).TranslateHalfOpenInterval(1, 2); TENSORSTORE_EXPECT_OK(view); } TEST(DimExpressionTest, TranslateSizedInterval) { auto view = TestArray() | Dims(0, 2).TranslateSizedInterval({0, 1}, {1, 1}); TENSORSTORE_EXPECT_OK(view); } TEST(DimExpressionTest, TranslateSizedInterval1) { auto view = TestArray() | Dims(0, 2).TranslateSizedInterval(1, 2); TENSORSTORE_EXPECT_OK(view); } TEST(DimExpressionTest, IndexArraySlice) { auto view = TestArray() | Dims(0, 2).IndexArraySlice( MakeArray<Index>({{1, 2, 3}, {3, 2, 1}}), MakeArray<Index>({{7, 6, 5}, {1, 2, 4}})) | Materialize(); TENSORSTORE_EXPECT_OK(view); EXPECT_EQ(248, ((*view)({0, 0, 3}))); } TEST(DimExpressionTest, IndexVectorArraySlice) { auto view = TestArray() | Dims(0, 2).IndexVectorArraySlice( MakeArray<Index>( {{{1, 7}, {2, 6}, {3, 5}}, {{3, 1}, {2, 2}, {1, 4}}}), -1) | Materialize(); TENSORSTORE_EXPECT_OK(view); EXPECT_EQ(248, ((*view)({0, 0, 3}))); } TEST(DimExpressionTest, OuterIndexArraySlice) { auto view = TestArray() | Dims(2, 0).OuterIndexArraySlice( MakeArray<Index>({{4, 5}, {6, 7}}), MakeArray<Index>({3, 2})) | Materialize(); TENSORSTORE_EXPECT_OK(view); EXPECT_EQ(438, ((*view)({0, 2, 1, 1}))); } TEST(DimExpressionTest, Label) { auto view = TestArray() | Dims(0, 2).Label({"a", "b"}); TENSORSTORE_EXPECT_OK(view); } TEST(DimExpressionTest, LabelB) { auto view = TestArray() | Dims(0, 2).Label("a", "b"); TENSORSTORE_EXPECT_OK(view); } TEST(DimExpressionTest, MoveTo) { auto view = TestArray() | Dims(2, 0).MoveTo(1) | Materialize(); TENSORSTORE_EXPECT_OK(view); EXPECT_EQ(345, ((*view)({3, 4, 2}))); } TEST(DimExpressionTest, MoveToFront) { auto view = TestArray() | Dims(0, 2).MoveToFront(); TENSORSTORE_EXPECT_OK(view); } TEST(DimExpressionTest, MoveToBack) { auto view = TestArray() | Dims(0, 2).MoveToFront(); TENSORSTORE_EXPECT_OK(view); } TEST(DimExpressionTest, Diagonal) { auto view = TestArray() | Dims(0, 2).Diagonal() | Materialize(); TENSORSTORE_EXPECT_OK(view); EXPECT_EQ(343, ((*view)({2, 3}))); } TEST(DimExpressionTest, AddNew) { auto view = TestArray() | Dims(0, -1).AddNew() | Materialize(); TENSORSTORE_EXPECT_OK(view); EXPECT_EQ(333, ((*view)({0, 2, 2, 2, 0}))); } TEST(DimExpressionTest, Transpose) { auto view = TestArray() | Dims(2, 0, 1).Transpose() | Materialize(); TENSORSTORE_EXPECT_OK(view); EXPECT_EQ(234, ((*view)({3, 1, 2}))); } TEST(DimExpressionTest, TransposeB) { auto view = TestArray() | Dims(2, 0).Transpose({1, 2}) | Materialize(); TENSORSTORE_EXPECT_OK(view); EXPECT_EQ(345, ((*view)({3, 4, 2}))); } TEST(DimExpressionTest, MarkBoundsExplicit) { auto view = TestArray() | Dims(2, 0).MarkBoundsExplicit(); TENSORSTORE_EXPECT_OK(view); } TEST(DimExpressionTest, UnsafeMarkBoundsImplicit) { auto view = TestArray() | Dims(2, 0).UnsafeMarkBoundsImplicit(); TENSORSTORE_EXPECT_OK(view); } TEST(DimExpressionTest, Stride) { auto view = TestArray() | Dims(0, 2).Stride({-2, 3}) | Materialize(); TENSORSTORE_EXPECT_OK(view); EXPECT_EQ(344, ((*view)({-1, 3, 1}))); } TEST(DimExpressionTest, AllDims) { auto view = TestArray() | AllDims().IndexSlice(1) | Materialize(); TENSORSTORE_EXPECT_OK(view); EXPECT_EQ(222, ((*view)())); } TEST(DimExpressionTest, DimRange) { auto view = TestArray() | tensorstore::DimRange(1).IndexSlice(1) | Materialize(); TENSORSTORE_EXPECT_OK(view); EXPECT_EQ(322, ((*view)(2))); } }
https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/python/tensorstore/dim_expression.cc
https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/dim_expression_test.cc
4f887a6430414cd6088e1743555015b10f116d50
a3f54d00-50ef-4c35-82d3-1ecbc16f183e
cpp
google/tensorstore
environment_credential_provider
tensorstore/kvstore/s3/credentials/environment_credential_provider.cc
tensorstore/kvstore/s3/credentials/environment_credential_provider_test.cc
#include "tensorstore/kvstore/s3/credentials/environment_credential_provider.h" #include "absl/log/absl_log.h" #include "absl/status/status.h" #include "absl/strings/str_cat.h" #include "absl/time/time.h" #include "tensorstore/internal/env.h" #include "tensorstore/kvstore/s3/credentials/aws_credentials.h" #include "tensorstore/util/result.h" using ::tensorstore::internal::GetEnv; namespace tensorstore { namespace internal_kvstore_s3 { namespace { static constexpr char kEnvAwsAccessKeyId[] = "AWS_ACCESS_KEY_ID"; static constexpr char kEnvAwsSecretAccessKey[] = "AWS_SECRET_ACCESS_KEY"; static constexpr char kEnvAwsSessionToken[] = "AWS_SESSION_TOKEN"; } Result<AwsCredentials> EnvironmentCredentialProvider::GetCredentials() { auto access_key = GetEnv(kEnvAwsAccessKeyId); if (!access_key) { return absl::NotFoundError(absl::StrCat(kEnvAwsAccessKeyId, " not set")); } auto secret_key = GetEnv(kEnvAwsSecretAccessKey); if (!secret_key) { return absl::NotFoundError( absl::StrCat(kEnvAwsSecretAccessKey, " not set")); } ABSL_LOG_FIRST_N(INFO, 1) << "Using Environment Variable " << kEnvAwsAccessKeyId; auto credentials = AwsCredentials{*access_key, *secret_key}; if (auto session_token = GetEnv(kEnvAwsSessionToken); session_token) { credentials.session_token = *session_token; } credentials.expires_at = absl::InfiniteFuture(); return credentials; } } }
#include "tensorstore/kvstore/s3/credentials/environment_credential_provider.h" #include <gtest/gtest.h> #include "tensorstore/internal/env.h" #include "tensorstore/util/status_testutil.h" namespace { using ::tensorstore::internal::SetEnv; using ::tensorstore::internal::UnsetEnv; using ::tensorstore::internal_kvstore_s3::EnvironmentCredentialProvider; class EnvironmentCredentialProviderTest : public ::testing::Test { protected: void SetUp() override { for (const char* var : {"AWS_SHARED_CREDENTIALS_FILE", "AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_SESSION_TOKEN", "AWS_PROFILE"}) { UnsetEnv(var); } } }; #ifndef _WIN32 TEST_F(EnvironmentCredentialProviderTest, ProviderNoCredentials) { auto provider = EnvironmentCredentialProvider(); ASSERT_FALSE(provider.GetCredentials().ok()); SetEnv("AWS_ACCESS_KEY_ID", "foo"); SetEnv("AWS_SECRET_ACCESS_KEY", ""); TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto credentials, provider.GetCredentials()); ASSERT_EQ(credentials.access_key, "foo"); ASSERT_TRUE(credentials.secret_key.empty()); ASSERT_TRUE(credentials.session_token.empty()); } #endif TEST_F(EnvironmentCredentialProviderTest, ProviderAwsCredentialsFromEnv) { SetEnv("AWS_ACCESS_KEY_ID", "foo"); SetEnv("AWS_SECRET_ACCESS_KEY", "bar"); SetEnv("AWS_SESSION_TOKEN", "qux"); auto provider = EnvironmentCredentialProvider(); TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto credentials, provider.GetCredentials()); ASSERT_EQ(credentials.access_key, "foo"); ASSERT_EQ(credentials.secret_key, "bar"); ASSERT_EQ(credentials.session_token, "qux"); } }
https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/s3/credentials/environment_credential_provider.cc
https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/s3/credentials/environment_credential_provider_test.cc
4f887a6430414cd6088e1743555015b10f116d50
b0dfb30d-f1d3-464d-99a2-1da73dd33128
cpp
tensorflow/tensorflow
gpu_spmd_pipeline
third_party/xla/xla/service/gpu/gpu_spmd_pipeline.cc
third_party/xla/xla/service/gpu/gpu_spmd_pipeline_test.cc
#include "xla/service/gpu/gpu_spmd_pipeline.h" #include <cstdint> #include <optional> #include "absl/functional/function_ref.h" #include "absl/log/check.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_schedule.h" #include "xla/hlo/pass/hlo_pass_fix.h" #include "xla/hlo/pass/hlo_pass_pipeline.h" #include "xla/hlo/transforms/hlo_constant_splitter.h" #include "xla/service/algebraic_simplifier.h" #include "xla/service/conditional_simplifier.h" #include "xla/service/gather_expander.h" #include "xla/service/gpu/transforms/algebraic_simplifier.h" #include "xla/service/hlo_constant_folding.h" #include "xla/service/hlo_dce.h" #include "xla/service/hlo_module_config.h" #include "xla/service/reshape_mover.h" #include "xla/service/scatter_expander.h" #include "xla/service/sharding_propagation.h" #include "xla/service/sort_simplifier.h" #include "xla/service/spmd/collective_permute_motion.h" #include "xla/service/spmd/shardy/shardy_xla_pass.h" #include "xla/service/spmd/stateful_rng_spmd_partitioner.h" #include "xla/service/tuple_simplifier.h" #include "xla/service/while_loop_constant_sinking.h" #include "xla/service/while_loop_simplifier.h" #include "xla/stream_executor/device_description.h" namespace xla { namespace gpu { void AddSPMDPasses( const HloModule* hlo_module, const AlgebraicSimplifierOptions& layout_insensitive_algsimp_opts, const se::GpuComputeCapability& compute_capability, HloPassPipeline& spmd_pipeline, std::optional<const absl::FunctionRef<void(HloPassPipeline&)>> auto_sharding_func) { const int64_t num_partitions = hlo_module->config().num_partitions(); CHECK_GE(num_partitions, 1); HloPassPipeline& spmd_simplify = spmd_pipeline.AddPass<HloPassFix<HloPassPipeline>>("spmd-simplify"); spmd_simplify.AddPass<GpuAlgebraicSimplifier>(layout_insensitive_algsimp_opts, compute_capability); spmd_simplify.AddPass<SortSimplifier>(); spmd_simplify.AddPass<TupleSimplifier>(); spmd_simplify.AddPass<ScatterExpander>( ScatterExpander::kEliminateSimpleScatters); spmd_simplify.AddPass<GatherExpander>( GatherExpander::kEliminateSimpleGathers); spmd_simplify.AddPass<WhileLoopConstantSinking>(); spmd_simplify.AddPass<WhileLoopSimplifier>(); ReshapeMoverOptions reshape_mover_options; reshape_mover_options.reshape_of_1d_broadcast_is_cheap = true; spmd_simplify.AddPass<ReshapeMover>(reshape_mover_options); spmd_simplify.AddPass<HloPassFix<GpuAlgebraicSimplifier>>( layout_insensitive_algsimp_opts, compute_capability); spmd_simplify.AddPass<HloConstantFolding>(); spmd_simplify.AddPass<ConditionalSimplifier>(); const HloModuleConfig& config = hlo_module->config(); if (config.use_shardy_partitioner()) { spmd_pipeline.AddPass<sdy::ShardyXLA>(); } else { spmd_pipeline.AddPass<HloConstantSplitter>(); spmd_simplify.AddPass<HloDCE>(); if (auto_sharding_func.has_value()) { (*auto_sharding_func)(spmd_pipeline); } spmd_pipeline.AddPass<ShardingPropagation>( true, false, config.allow_spmd_sharding_propagation_to_output()); } spmd_pipeline.AddPass<spmd::StatefulRngSpmdPartitioner>( num_partitions, hlo_module->config().replica_count(), hlo_module->config() .debug_options() .xla_gpu_threshold_for_windowed_einsum_mib(), hlo_module->config() .debug_options() .xla_gpu_multi_streamed_windowed_einsum(), true, true); spmd_pipeline.AddPass<CollectivePermuteMotion>(); } } }
#include "xla/service/gpu/gpu_spmd_pipeline.h" #include <cstdint> #include <memory> #include <optional> #include <string> #include <gtest/gtest.h> #include "absl/log/check.h" #include "absl/log/log.h" #include "xla/client/executable_build_options.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/pass/hlo_pass_pipeline.h" #include "xla/service/algebraic_simplifier.h" #include "xla/service/hlo_module_config.h" #include "xla/service/hlo_parser.h" #include "xla/shape_util.h" #include "xla/stream_executor/device_description.h" #include "xla/tests/hlo_test_base.h" #include "xla/util.h" #include "tsl/platform/errors.h" #include "tsl/platform/statusor.h" namespace xla { namespace gpu { namespace { class GpuSpmdPartitioningTest : public HloTestBase, public ::testing::WithParamInterface<bool> { public: absl::StatusOr<std::unique_ptr<HloModule>> PartitionComputation( const char* hlo_module, int64_t num_devices) { HloModuleConfig config = GetModuleConfigForTest( 1, num_devices); config.set_num_partitions(num_devices); config.set_use_shardy_partitioner(UseShardy()); TF_ASSIGN_OR_RETURN(auto module, ParseAndReturnVerifiedModule(hlo_module, config)); HloPassPipeline spmd_pipeline("spmd-partitioner"); se::CudaComputeCapability ampere(8, 0); AlgebraicSimplifierOptions alg_simplifier_options; AddSPMDPasses(module.get(), alg_simplifier_options, ampere, spmd_pipeline, std::nullopt); TF_RETURN_IF_ERROR(spmd_pipeline.Run(module.get()).status()); XLA_VLOG_LINES(10, module->ToString()); return module; } protected: bool UseShardy() const { return GetParam(); } DebugOptions GetDebugOptionsForTest() override { DebugOptions debug_options = HloTestBase::GetDebugOptionsForTest(); return debug_options; } }; TEST_P(GpuSpmdPartitioningTest, DotWithEntryComputationLayout) { const char* const kHloModule = R"( HloModule module, entry_computation_layout={(f32[8,16]{0,1}, f32[16,24]{1,0}) ->f32[8,24]{1,0}} ENTRY main { %p0 = f32[8,16] parameter(0), sharding={devices=[1,8]<=[8]} %p1 = f32[16,24] parameter(1), sharding={devices=[8,1]<=[8]} ROOT %dot = f32[8,24] dot(%p0, %p1), lhs_contracting_dims={1}, rhs_contracting_dims={0} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, PartitionComputation(kHloModule, 8)); EXPECT_EQ(module->config().entry_computation_layout().parameter_shape(0), ShapeUtil::MakeShapeWithDenseLayout(F32, {8, 2}, {0, 1})); EXPECT_EQ(module->config().entry_computation_layout().parameter_shape(1), ShapeUtil::MakeShapeWithDenseLayout(F32, {2, 24}, {1, 0})); EXPECT_EQ(module->config().entry_computation_layout().result_shape(), ShapeUtil::MakeShapeWithDenseLayout(F32, {8, 24}, {1, 0})); } std::string TestParamToString( const ::testing::TestParamInfo<bool>& param_info) { return param_info.param ? "Shardy" : "GSPMD"; } INSTANTIATE_TEST_SUITE_P(All, GpuSpmdPartitioningTest, ::testing::Values(true, false), TestParamToString); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/gpu_spmd_pipeline.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/gpu_spmd_pipeline_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
35359af9-1cd7-46ca-b0f6-e0043cbb8f1d
cpp
abseil/abseil-cpp
numbers
absl/strings/numbers.cc
absl/strings/numbers_test.cc
#include "absl/strings/numbers.h" #include <algorithm> #include <cassert> #include <cfloat> #include <cmath> #include <cstdint> #include <cstdio> #include <cstdlib> #include <cstring> #include <iterator> #include <limits> #include <system_error> #include <utility> #include "absl/base/attributes.h" #include "absl/base/config.h" #include "absl/base/internal/endian.h" #include "absl/base/internal/raw_logging.h" #include "absl/base/nullability.h" #include "absl/base/optimization.h" #include "absl/numeric/bits.h" #include "absl/numeric/int128.h" #include "absl/strings/ascii.h" #include "absl/strings/charconv.h" #include "absl/strings/match.h" #include "absl/strings/string_view.h" namespace absl { ABSL_NAMESPACE_BEGIN bool SimpleAtof(absl::string_view str, absl::Nonnull<float*> out) { *out = 0.0; str = StripAsciiWhitespace(str); if (!str.empty() && str[0] == '+') { str.remove_prefix(1); if (!str.empty() && str[0] == '-') { return false; } } auto result = absl::from_chars(str.data(), str.data() + str.size(), *out); if (result.ec == std::errc::invalid_argument) { return false; } if (result.ptr != str.data() + str.size()) { return false; } if (result.ec == std::errc::result_out_of_range) { if (*out > 1.0) { *out = std::numeric_limits<float>::infinity(); } else if (*out < -1.0) { *out = -std::numeric_limits<float>::infinity(); } } return true; } bool SimpleAtod(absl::string_view str, absl::Nonnull<double*> out) { *out = 0.0; str = StripAsciiWhitespace(str); if (!str.empty() && str[0] == '+') { str.remove_prefix(1); if (!str.empty() && str[0] == '-') { return false; } } auto result = absl::from_chars(str.data(), str.data() + str.size(), *out); if (result.ec == std::errc::invalid_argument) { return false; } if (result.ptr != str.data() + str.size()) { return false; } if (result.ec == std::errc::result_out_of_range) { if (*out > 1.0) { *out = std::numeric_limits<double>::infinity(); } else if (*out < -1.0) { *out = -std::numeric_limits<double>::infinity(); } } return true; } bool SimpleAtob(absl::string_view str, absl::Nonnull<bool*> out) { ABSL_RAW_CHECK(out != nullptr, "Output pointer must not be nullptr."); if (EqualsIgnoreCase(str, "true") || EqualsIgnoreCase(str, "t") || EqualsIgnoreCase(str, "yes") || EqualsIgnoreCase(str, "y") || EqualsIgnoreCase(str, "1")) { *out = true; return true; } if (EqualsIgnoreCase(str, "false") || EqualsIgnoreCase(str, "f") || EqualsIgnoreCase(str, "no") || EqualsIgnoreCase(str, "n") || EqualsIgnoreCase(str, "0")) { *out = false; return true; } return false; } namespace { constexpr uint32_t kTwoZeroBytes = 0x0101 * '0'; constexpr uint64_t kFourZeroBytes = 0x01010101 * '0'; constexpr uint64_t kEightZeroBytes = 0x0101010101010101ull * '0'; constexpr uint64_t kDivisionBy10Mul = 103u; constexpr uint64_t kDivisionBy10Div = 1 << 10; constexpr uint64_t kDivisionBy100Mul = 10486u; constexpr uint64_t kDivisionBy100Div = 1 << 20; inline char* EncodeHundred(uint32_t n, absl::Nonnull<char*> out_str) { int num_digits = static_cast<int>(n - 10) >> 8; uint32_t div10 = (n * kDivisionBy10Mul) / kDivisionBy10Div; uint32_t mod10 = n - 10u * div10; uint32_t base = kTwoZeroBytes + div10 + (mod10 << 8); base >>= num_digits & 8; little_endian::Store16(out_str, static_cast<uint16_t>(base)); return out_str + 2 + num_digits; } inline char* EncodeTenThousand(uint32_t n, absl::Nonnull<char*> out_str) { uint32_t div100 = (n * kDivisionBy100Mul) / kDivisionBy100Div; uint32_t mod100 = n - 100ull * div100; uint32_t hundreds = (mod100 << 16) + div100; uint32_t tens = (hundreds * kDivisionBy10Mul) / kDivisionBy10Div; tens &= (0xFull << 16) | 0xFull; tens += (hundreds - 10ull * tens) << 8; ABSL_ASSUME(tens != 0); uint32_t zeroes = static_cast<uint32_t>(absl::countr_zero(tens)) & (0 - 8u); tens += kFourZeroBytes; tens >>= zeroes; little_endian::Store32(out_str, tens); return out_str + sizeof(tens) - zeroes / 8; } inline uint64_t PrepareEightDigits(uint32_t i) { ABSL_ASSUME(i < 10000'0000); uint32_t hi = i / 10000; uint32_t lo = i % 10000; uint64_t merged = hi | (uint64_t{lo} << 32); uint64_t div100 = ((merged * kDivisionBy100Mul) / kDivisionBy100Div) & ((0x7Full << 32) | 0x7Full); uint64_t mod100 = merged - 100ull * div100; uint64_t hundreds = (mod100 << 16) + div100; uint64_t tens = (hundreds * kDivisionBy10Mul) / kDivisionBy10Div; tens &= (0xFull << 48) | (0xFull << 32) | (0xFull << 16) | 0xFull; tens += (hundreds - 10ull * tens) << 8; return tens; } inline ABSL_ATTRIBUTE_ALWAYS_INLINE absl::Nonnull<char*> EncodeFullU32( uint32_t n, absl::Nonnull<char*> out_str) { if (n < 10) { *out_str = static_cast<char>('0' + n); return out_str + 1; } if (n < 100'000'000) { uint64_t bottom = PrepareEightDigits(n); ABSL_ASSUME(bottom != 0); uint32_t zeroes = static_cast<uint32_t>(absl::countr_zero(bottom)) & (0 - 8u); little_endian::Store64(out_str, (bottom + kEightZeroBytes) >> zeroes); return out_str + sizeof(bottom) - zeroes / 8; } uint32_t div08 = n / 100'000'000; uint32_t mod08 = n % 100'000'000; uint64_t bottom = PrepareEightDigits(mod08) + kEightZeroBytes; out_str = EncodeHundred(div08, out_str); little_endian::Store64(out_str, bottom); return out_str + sizeof(bottom); } inline ABSL_ATTRIBUTE_ALWAYS_INLINE char* EncodeFullU64(uint64_t i, char* buffer) { if (i <= std::numeric_limits<uint32_t>::max()) { return EncodeFullU32(static_cast<uint32_t>(i), buffer); } uint32_t mod08; if (i < 1'0000'0000'0000'0000ull) { uint32_t div08 = static_cast<uint32_t>(i / 100'000'000ull); mod08 = static_cast<uint32_t>(i % 100'000'000ull); buffer = EncodeFullU32(div08, buffer); } else { uint64_t div08 = i / 100'000'000ull; mod08 = static_cast<uint32_t>(i % 100'000'000ull); uint32_t div016 = static_cast<uint32_t>(div08 / 100'000'000ull); uint32_t div08mod08 = static_cast<uint32_t>(div08 % 100'000'000ull); uint64_t mid_result = PrepareEightDigits(div08mod08) + kEightZeroBytes; buffer = EncodeTenThousand(div016, buffer); little_endian::Store64(buffer, mid_result); buffer += sizeof(mid_result); } uint64_t mod_result = PrepareEightDigits(mod08) + kEightZeroBytes; little_endian::Store64(buffer, mod_result); return buffer + sizeof(mod_result); } } void numbers_internal::PutTwoDigits(uint32_t i, absl::Nonnull<char*> buf) { assert(i < 100); uint32_t base = kTwoZeroBytes; uint32_t div10 = (i * kDivisionBy10Mul) / kDivisionBy10Div; uint32_t mod10 = i - 10u * div10; base += div10 + (mod10 << 8); little_endian::Store16(buf, static_cast<uint16_t>(base)); } absl::Nonnull<char*> numbers_internal::FastIntToBuffer( uint32_t n, absl::Nonnull<char*> out_str) { out_str = EncodeFullU32(n, out_str); *out_str = '\0'; return out_str; } absl::Nonnull<char*> numbers_internal::FastIntToBuffer( int32_t i, absl::Nonnull<char*> buffer) { uint32_t u = static_cast<uint32_t>(i); if (i < 0) { *buffer++ = '-'; u = 0 - u; } buffer = EncodeFullU32(u, buffer); *buffer = '\0'; return buffer; } absl::Nonnull<char*> numbers_internal::FastIntToBuffer( uint64_t i, absl::Nonnull<char*> buffer) { buffer = EncodeFullU64(i, buffer); *buffer = '\0'; return buffer; } absl::Nonnull<char*> numbers_internal::FastIntToBuffer( int64_t i, absl::Nonnull<char*> buffer) { uint64_t u = static_cast<uint64_t>(i); if (i < 0) { *buffer++ = '-'; u = 0 - u; } buffer = EncodeFullU64(u, buffer); *buffer = '\0'; return buffer; } static std::pair<uint64_t, uint64_t> Mul32(std::pair<uint64_t, uint64_t> num, uint32_t mul) { uint64_t bits0_31 = num.second & 0xFFFFFFFF; uint64_t bits32_63 = num.second >> 32; uint64_t bits64_95 = num.first & 0xFFFFFFFF; uint64_t bits96_127 = num.first >> 32; bits0_31 *= mul; bits32_63 *= mul; bits64_95 *= mul; bits96_127 *= mul; uint64_t bits0_63 = bits0_31 + (bits32_63 << 32); uint64_t bits64_127 = bits64_95 + (bits96_127 << 32) + (bits32_63 >> 32) + (bits0_63 < bits0_31); uint64_t bits128_up = (bits96_127 >> 32) + (bits64_127 < bits64_95); if (bits128_up == 0) return {bits64_127, bits0_63}; auto shift = static_cast<unsigned>(bit_width(bits128_up)); uint64_t lo = (bits0_63 >> shift) + (bits64_127 << (64 - shift)); uint64_t hi = (bits64_127 >> shift) + (bits128_up << (64 - shift)); return {hi, lo}; } static std::pair<uint64_t, uint64_t> PowFive(uint64_t num, int expfive) { std::pair<uint64_t, uint64_t> result = {num, 0}; while (expfive >= 13) { result = Mul32(result, 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5); expfive -= 13; } constexpr uint32_t powers_of_five[13] = { 1, 5, 5 * 5, 5 * 5 * 5, 5 * 5 * 5 * 5, 5 * 5 * 5 * 5 * 5, 5 * 5 * 5 * 5 * 5 * 5, 5 * 5 * 5 * 5 * 5 * 5 * 5, 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5, 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5, 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5, 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5, 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5}; result = Mul32(result, powers_of_five[expfive & 15]); int shift = countl_zero(result.first); if (shift != 0) { result.first = (result.first << shift) + (result.second >> (64 - shift)); result.second = (result.second << shift); } return result; } struct ExpDigits { int32_t exponent; char digits[6]; }; static ExpDigits SplitToSix(const double value) { ExpDigits exp_dig; int exp = 5; double d = value; if (d >= 999999.5) { if (d >= 1e+261) exp += 256, d *= 1e-256; if (d >= 1e+133) exp += 128, d *= 1e-128; if (d >= 1e+69) exp += 64, d *= 1e-64; if (d >= 1e+37) exp += 32, d *= 1e-32; if (d >= 1e+21) exp += 16, d *= 1e-16; if (d >= 1e+13) exp += 8, d *= 1e-8; if (d >= 1e+9) exp += 4, d *= 1e-4; if (d >= 1e+7) exp += 2, d *= 1e-2; if (d >= 1e+6) exp += 1, d *= 1e-1; } else { if (d < 1e-250) exp -= 256, d *= 1e256; if (d < 1e-122) exp -= 128, d *= 1e128; if (d < 1e-58) exp -= 64, d *= 1e64; if (d < 1e-26) exp -= 32, d *= 1e32; if (d < 1e-10) exp -= 16, d *= 1e16; if (d < 1e-2) exp -= 8, d *= 1e8; if (d < 1e+2) exp -= 4, d *= 1e4; if (d < 1e+4) exp -= 2, d *= 1e2; if (d < 1e+5) exp -= 1, d *= 1e1; } uint64_t d64k = d * 65536; uint32_t dddddd; if ((d64k % 65536) == 32767 || (d64k % 65536) == 32768) { dddddd = static_cast<uint32_t>(d64k / 65536); int exp2; double m = std::frexp(value, &exp2); uint64_t mantissa = m * (32768.0 * 65536.0 * 65536.0 * 65536.0); mantissa <<= 1; exp2 -= 64; std::pair<uint64_t, uint64_t> edge, val; if (exp >= 6) { edge = PowFive(2 * dddddd + 1, exp - 5); val.first = mantissa; val.second = 0; } else { edge = PowFive(2 * dddddd + 1, 0); val = PowFive(mantissa, 5 - exp); } if (val > edge) { dddddd++; } else if (val == edge) { dddddd += (dddddd & 1); } } else { dddddd = static_cast<uint32_t>((d64k + 32768) / 65536); } if (dddddd == 1000000) { dddddd = 100000; exp += 1; } exp_dig.exponent = exp; uint32_t two_digits = dddddd / 10000; dddddd -= two_digits * 10000; numbers_internal::PutTwoDigits(two_digits, &exp_dig.digits[0]); two_digits = dddddd / 100; dddddd -= two_digits * 100; numbers_internal::PutTwoDigits(two_digits, &exp_dig.digits[2]); numbers_internal::PutTwoDigits(dddddd, &exp_dig.digits[4]); return exp_dig; } size_t numbers_internal::SixDigitsToBuffer(double d, absl::Nonnull<char*> const buffer) { static_assert(std::numeric_limits<float>::is_iec559, "IEEE-754/IEC-559 support only"); char* out = buffer; if (std::isnan(d)) { strcpy(out, "nan"); return 3; } if (d == 0) { if (std::signbit(d)) *out++ = '-'; *out++ = '0'; *out = 0; return static_cast<size_t>(out - buffer); } if (d < 0) { *out++ = '-'; d = -d; } if (d > std::numeric_limits<double>::max()) { strcpy(out, "inf"); return static_cast<size_t>(out + 3 - buffer); } auto exp_dig = SplitToSix(d); int exp = exp_dig.exponent; const char* digits = exp_dig.digits; out[0] = '0'; out[1] = '.'; switch (exp) { case 5: memcpy(out, &digits[0], 6), out += 6; *out = 0; return static_cast<size_t>(out - buffer); case 4: memcpy(out, &digits[0], 5), out += 5; if (digits[5] != '0') { *out++ = '.'; *out++ = digits[5]; } *out = 0; return static_cast<size_t>(out - buffer); case 3: memcpy(out, &digits[0], 4), out += 4; if ((digits[5] | digits[4]) != '0') { *out++ = '.'; *out++ = digits[4]; if (digits[5] != '0') *out++ = digits[5]; } *out = 0; return static_cast<size_t>(out - buffer); case 2: memcpy(out, &digits[0], 3), out += 3; *out++ = '.'; memcpy(out, &digits[3], 3); out += 3; while (out[-1] == '0') --out; if (out[-1] == '.') --out; *out = 0; return static_cast<size_t>(out - buffer); case 1: memcpy(out, &digits[0], 2), out += 2; *out++ = '.'; memcpy(out, &digits[2], 4); out += 4; while (out[-1] == '0') --out; if (out[-1] == '.') --out; *out = 0; return static_cast<size_t>(out - buffer); case 0: memcpy(out, &digits[0], 1), out += 1; *out++ = '.'; memcpy(out, &digits[1], 5); out += 5; while (out[-1] == '0') --out; if (out[-1] == '.') --out; *out = 0; return static_cast<size_t>(out - buffer); case -4: out[2] = '0'; ++out; ABSL_FALLTHROUGH_INTENDED; case -3: out[2] = '0'; ++out; ABSL_FALLTHROUGH_INTENDED; case -2: out[2] = '0'; ++out; ABSL_FALLTHROUGH_INTENDED; case -1: out += 2; memcpy(out, &digits[0], 6); out += 6; while (out[-1] == '0') --out; *out = 0; return static_cast<size_t>(out - buffer); } assert(exp < -4 || exp >= 6); out[0] = digits[0]; assert(out[1] == '.'); out += 2; memcpy(out, &digits[1], 5), out += 5; while (out[-1] == '0') --out; if (out[-1] == '.') --out; *out++ = 'e'; if (exp > 0) { *out++ = '+'; } else { *out++ = '-'; exp = -exp; } if (exp > 99) { int dig1 = exp / 100; exp -= dig1 * 100; *out++ = '0' + static_cast<char>(dig1); } PutTwoDigits(static_cast<uint32_t>(exp), out); out += 2; *out = 0; return static_cast<size_t>(out - buffer); } namespace { static const int8_t kAsciiToInt[256] = { 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 36, 36, 36, 36, 36, 36, 36, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 36, 36, 36, 36, 36, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36}; inline bool safe_parse_sign_and_base( absl::Nonnull<absl::string_view*> text , absl::Nonnull<int*> base_ptr , absl::Nonnull<bool*> negative_ptr ) { if (text->data() == nullptr) { return false; } const char* start = text->data(); const char* end = start + text->size(); int base = *base_ptr; while (start < end && absl::ascii_isspace(static_cast<unsigned char>(start[0]))) { ++start; } while (start < end && absl::ascii_isspace(static_cast<unsigned char>(end[-1]))) { --end; } if (start >= end) { return false; } *negative_ptr = (start[0] == '-'); if (*negative_ptr || start[0] == '+') { ++start; if (start >= end) { return false; } } if (base == 0) { if (end - start >= 2 && start[0] == '0' && (start[1] == 'x' || start[1] == 'X')) { base = 16; start += 2; if (start >= end) { return false; } } else if (end - start >= 1 && start[0] == '0') { base = 8; start += 1; } else { base = 10; } } else if (base == 16) { if (end - start >= 2 && start[0] == '0' && (start[1] == 'x' || start[1] == 'X')) { start += 2; if (start >= end) { return false; } } } else if (base >= 2 && base <= 36) { } else { return false; } *text = absl::string_view(start, static_cast<size_t>(end - start)); *base_ptr = base; return true; } template <typename IntType> struct LookupTables { ABSL_CONST_INIT static const IntType kVmaxOverBase[]; ABSL_CONST_INIT static const IntType kVminOverBase[]; }; #define X_OVER_BASE_INITIALIZER(X) \ { \ 0, 0, X / 2, X / 3, X / 4, X / 5, X / 6, X / 7, X / 8, X / 9, X / 10, \ X / 11, X / 12, X / 13, X / 14, X / 15, X / 16, X / 17, X / 18, \ X / 19, X / 20, X / 21, X / 22, X / 23, X / 24, X / 25, X / 26, \ X / 27, X / 28, X / 29, X / 30, X / 31, X / 32, X / 33, X / 34, \ X / 35, X / 36, \ } template <> ABSL_CONST_INIT const uint128 LookupTables<uint128>::kVmaxOverBase[] = { 0, 0, MakeUint128(9223372036854775807u, 18446744073709551615u), MakeUint128(6148914691236517205u, 6148914691236517205u), MakeUint128(4611686018427387903u, 18446744073709551615u), MakeUint128(3689348814741910323u, 3689348814741910323u), MakeUint128(3074457345618258602u, 12297829382473034410u), MakeUint128(2635249153387078802u, 5270498306774157604u), MakeUint128(2305843009213693951u, 18446744073709551615u), MakeUint128(2049638230412172401u, 14347467612885206812u), MakeUint128(1844674407370955161u, 11068046444225730969u), MakeUint128(1676976733973595601u, 8384883669867978007u), MakeUint128(1537228672809129301u, 6148914691236517205u), MakeUint128(1418980313362273201u, 4256940940086819603u), MakeUint128(1317624576693539401u, 2635249153387078802u), MakeUint128(1229782938247303441u, 1229782938247303441u), MakeUint128(1152921504606846975u, 18446744073709551615u), MakeUint128(1085102592571150095u, 1085102592571150095u), MakeUint128(1024819115206086200u, 16397105843297379214u), MakeUint128(970881267037344821u, 16504981539634861972u), MakeUint128(922337203685477580u, 14757395258967641292u), MakeUint128(878416384462359600u, 14054662151397753612u), MakeUint128(838488366986797800u, 13415813871788764811u), MakeUint128(802032351030850070u, 4812194106185100421u), MakeUint128(768614336404564650u, 12297829382473034410u), MakeUint128(737869762948382064u, 11805916207174113034u), MakeUint128(709490156681136600u, 11351842506898185609u), MakeUint128(683212743470724133u, 17080318586768103348u), MakeUint128(658812288346769700u, 10540996613548315209u), MakeUint128(636094623231363848u, 15266270957552732371u), MakeUint128(614891469123651720u, 9838263505978427528u), MakeUint128(595056260442243600u, 9520900167075897608u), MakeUint128(576460752303423487u, 18446744073709551615u), MakeUint128(558992244657865200u, 8943875914525843207u), MakeUint128(542551296285575047u, 9765923333140350855u), MakeUint128(527049830677415760u, 8432797290838652167u), MakeUint128(512409557603043100u, 8198552921648689607u), }; template <> ABSL_CONST_INIT const int128 LookupTables<int128>::kVmaxOverBase[] = { 0, 0, MakeInt128(4611686018427387903, 18446744073709551615u), MakeInt128(3074457345618258602, 12297829382473034410u), MakeInt128(2305843009213693951, 18446744073709551615u), MakeInt128(1844674407370955161, 11068046444225730969u), MakeInt128(1537228672809129301, 6148914691236517205u), MakeInt128(1317624576693539401, 2635249153387078802u), MakeInt128(1152921504606846975, 18446744073709551615u), MakeInt128(1024819115206086200, 16397105843297379214u), MakeInt128(922337203685477580, 14757395258967641292u), MakeInt128(838488366986797800, 13415813871788764811u), MakeInt128(768614336404564650, 12297829382473034410u), MakeInt128(709490156681136600, 11351842506898185609u), MakeInt128(658812288346769700, 10540996613548315209u), MakeInt128(614891469123651720, 9838263505978427528u), MakeInt128(576460752303423487, 18446744073709551615u), MakeInt128(542551296285575047, 9765923333140350855u), MakeInt128(512409557603043100, 8198552921648689607u), MakeInt128(485440633518672410, 17475862806672206794u), MakeInt128(461168601842738790, 7378697629483820646u), MakeInt128(439208192231179800, 7027331075698876806u), MakeInt128(419244183493398900, 6707906935894382405u), MakeInt128(401016175515425035, 2406097053092550210u), MakeInt128(384307168202282325, 6148914691236517205u), MakeInt128(368934881474191032, 5902958103587056517u), MakeInt128(354745078340568300, 5675921253449092804u), MakeInt128(341606371735362066, 17763531330238827482u), MakeInt128(329406144173384850, 5270498306774157604u), MakeInt128(318047311615681924, 7633135478776366185u), MakeInt128(307445734561825860, 4919131752989213764u), MakeInt128(297528130221121800, 4760450083537948804u), MakeInt128(288230376151711743, 18446744073709551615u), MakeInt128(279496122328932600, 4471937957262921603u), MakeInt128(271275648142787523, 14106333703424951235u), MakeInt128(263524915338707880, 4216398645419326083u), MakeInt128(256204778801521550, 4099276460824344803u), }; template <> ABSL_CONST_INIT const int128 LookupTables<int128>::kVminOverBase[] = { 0, 0, MakeInt128(-4611686018427387904, 0u), MakeInt128(-3074457345618258603, 6148914691236517206u), MakeInt128(-2305843009213693952, 0u), MakeInt128(-1844674407370955162, 7378697629483820647u), MakeInt128(-1537228672809129302, 12297829382473034411u), MakeInt128(-1317624576693539402, 15811494920322472814u), MakeInt128(-1152921504606846976, 0u), MakeInt128(-1024819115206086201, 2049638230412172402u), MakeInt128(-922337203685477581, 3689348814741910324u), MakeInt128(-838488366986797801, 5030930201920786805u), MakeInt128(-768614336404564651, 6148914691236517206u), MakeInt128(-709490156681136601, 7094901566811366007u), MakeInt128(-658812288346769701, 7905747460161236407u), MakeInt128(-614891469123651721, 8608480567731124088u), MakeInt128(-576460752303423488, 0u), MakeInt128(-542551296285575048, 8680820740569200761u), MakeInt128(-512409557603043101, 10248191152060862009u), MakeInt128(-485440633518672411, 970881267037344822u), MakeInt128(-461168601842738791, 11068046444225730970u), MakeInt128(-439208192231179801, 11419412998010674810u), MakeInt128(-419244183493398901, 11738837137815169211u), MakeInt128(-401016175515425036, 16040647020617001406u), MakeInt128(-384307168202282326, 12297829382473034411u), MakeInt128(-368934881474191033, 12543785970122495099u), MakeInt128(-354745078340568301, 12770822820260458812u), MakeInt128(-341606371735362067, 683212743470724134u), MakeInt128(-329406144173384851, 13176245766935394012u), MakeInt128(-318047311615681925, 10813608594933185431u), MakeInt128(-307445734561825861, 13527612320720337852u), MakeInt128(-297528130221121801, 13686293990171602812u), MakeInt128(-288230376151711744, 0u), MakeInt128(-279496122328932601, 13974806116446630013u), MakeInt128(-271275648142787524, 4340410370284600381u), MakeInt128(-263524915338707881, 14230345428290225533u), MakeInt128(-256204778801521551, 14347467612885206813u), }; template <typename IntType> ABSL_CONST_INIT const IntType LookupTables<IntType>::kVmaxOverBase[] = X_OVER_BASE_INITIALIZER(std::numeric_limits<IntType>::max()); template <typename IntType> ABSL_CONST_INIT const IntType LookupTables<IntType>::kVminOverBase[] = X_OVER_BASE_INITIALIZER(std::numeric_limits<IntType>::min()); #undef X_OVER_BASE_INITIALIZER template <typename IntType> inline bool safe_parse_positive_int(absl::string_view text, int base, absl::Nonnull<IntType*> value_p) { IntType value = 0; const IntType vmax = std::numeric_limits<IntType>::max(); assert(vmax > 0); assert(base >= 0); const IntType base_inttype = static_cast<IntType>(base); assert(vmax >= base_inttype); const IntType vmax_over_base = LookupTables<IntType>::kVmaxOverBase[base]; assert(base < 2 || std::numeric_limits<IntType>::max() / base_inttype == vmax_over_base); const char* start = text.data(); const char* end = start + text.size(); for (; start < end; ++start) { unsigned char c = static_cast<unsigned char>(start[0]); IntType digit = static_cast<IntType>(kAsciiToInt[c]); if (digit >= base_inttype) { *value_p = value; return false; } if (value > vmax_over_base) { *value_p = vmax; return false; } value *= base_inttype; if (value > vmax - digit) { *value_p = vmax; return false; } value += digit; } *value_p = value; return true; } template <typename IntType> inline bool safe_parse_negative_int(absl::string_view text, int base, absl::Nonnull<IntType*> value_p) { IntType value = 0; const IntType vmin = std::numeric_limits<IntType>::min(); assert(vmin < 0); assert(vmin <= 0 - base); IntType vmin_over_base = LookupTables<IntType>::kVminOverBase[base]; assert(base < 2 || std::numeric_limits<IntType>::min() / base == vmin_over_base); if (vmin % base > 0) { vmin_over_base += 1; } const char* start = text.data(); const char* end = start + text.size(); for (; start < end; ++start) { unsigned char c = static_cast<unsigned char>(start[0]); int digit = kAsciiToInt[c]; if (digit >= base) { *value_p = value; return false; } if (value < vmin_over_base) { *value_p = vmin; return false; } value *= base; if (value < vmin + digit) { *value_p = vmin; return false; } value -= digit; } *value_p = value; return true; } template <typename IntType> inline bool safe_int_internal(absl::string_view text, absl::Nonnull<IntType*> value_p, int base) { *value_p = 0; bool negative; if (!safe_parse_sign_and_base(&text, &base, &negative)) { return false; } if (!negative) { return safe_parse_positive_int(text, base, value_p); } else { return safe_parse_negative_int(text, base, value_p); } } template <typename IntType> inline bool safe_uint_internal(absl::string_view text, absl::Nonnull<IntType*> value_p, int base) { *value_p = 0; bool negative; if (!safe_parse_sign_and_base(&text, &base, &negative) || negative) { return false; } return safe_parse_positive_int(text, base, value_p); } } namespace numbers_internal { ABSL_CONST_INIT ABSL_DLL const char kHexChar[] = "0123456789abcdef"; ABSL_CONST_INIT ABSL_DLL const char kHexTable[513] = "000102030405060708090a0b0c0d0e0f" "101112131415161718191a1b1c1d1e1f" "202122232425262728292a2b2c2d2e2f" "303132333435363738393a3b3c3d3e3f" "404142434445464748494a4b4c4d4e4f" "505152535455565758595a5b5c5d5e5f" "606162636465666768696a6b6c6d6e6f" "707172737475767778797a7b7c7d7e7f" "808182838485868788898a8b8c8d8e8f" "909192939495969798999a9b9c9d9e9f" "a0a1a2a3a4a5a6a7a8a9aaabacadaeaf" "b0b1b2b3b4b5b6b7b8b9babbbcbdbebf" "c0c1c2c3c4c5c6c7c8c9cacbcccdcecf" "d0d1d2d3d4d5d6d7d8d9dadbdcdddedf" "e0e1e2e3e4e5e6e7e8e9eaebecedeeef" "f0f1f2f3f4f5f6f7f8f9fafbfcfdfeff"; bool safe_strto32_base(absl::string_view text, absl::Nonnull<int32_t*> value, int base) { return safe_int_internal<int32_t>(text, value, base); } bool safe_strto64_base(absl::string_view text, absl::Nonnull<int64_t*> value, int base) { return safe_int_internal<int64_t>(text, value, base); } bool safe_strto128_base(absl::string_view text, absl::Nonnull<int128*> value, int base) { return safe_int_internal<absl::int128>(text, value, base); } bool safe_strtou32_base(absl::string_view text, absl::Nonnull<uint32_t*> value, int base) { return safe_uint_internal<uint32_t>(text, value, base); } bool safe_strtou64_base(absl::string_view text, absl::Nonnull<uint64_t*> value, int base) { return safe_uint_internal<uint64_t>(text, value, base); } bool safe_strtou128_base(absl::string_view text, absl::Nonnull<uint128*> value, int base) { return safe_uint_internal<absl::uint128>(text, value, base); } } ABSL_NAMESPACE_END }
#include "absl/strings/numbers.h" #include <sys/types.h> #include <cfenv> #include <cfloat> #include <cinttypes> #include <climits> #include <cmath> #include <cstddef> #include <cstdint> #include <cstdio> #include <cstdlib> #include <cstring> #include <ios> #include <limits> #include <numeric> #include <random> #include <set> #include <string> #include <vector> #include "gmock/gmock.h" #include "gtest/gtest.h" #include "absl/log/log.h" #include "absl/numeric/int128.h" #include "absl/random/distributions.h" #include "absl/random/random.h" #include "absl/strings/internal/numbers_test_common.h" #include "absl/strings/internal/ostringstream.h" #include "absl/strings/internal/pow10_helper.h" #include "absl/strings/str_cat.h" #include "absl/strings/string_view.h" namespace { using absl::SimpleAtoi; using absl::SimpleHexAtoi; using absl::numbers_internal::kSixDigitsToBufferSize; using absl::numbers_internal::safe_strto32_base; using absl::numbers_internal::safe_strto64_base; using absl::numbers_internal::safe_strtou32_base; using absl::numbers_internal::safe_strtou64_base; using absl::numbers_internal::SixDigitsToBuffer; using absl::strings_internal::Itoa; using absl::strings_internal::strtouint32_test_cases; using absl::strings_internal::strtouint64_test_cases; using testing::Eq; using testing::MatchesRegex; using testing::Pointee; const int kFloatNumCases = 5000000; std::string PerfectDtoa(double d) { if (d == 0) return "0"; if (d < 0) return "-" + PerfectDtoa(-d); int64_t mantissa, exp = 0; while (d >= 1ULL << 63) ++exp, d *= 0.5; while ((mantissa = d) != d) --exp, d *= 2.0; constexpr int maxlen = 1100; char buf[maxlen + 5]; for (int64_t num = mantissa, pos = maxlen; --pos >= 0;) { buf[pos] = '0' + (num % 10); num /= 10; } char* begin = &buf[0]; char* end = buf + maxlen; for (int i = 0; i != exp; i += (exp > 0) ? 1 : -1) { int carry = 0; for (char* p = end; --p != begin;) { int dig = *p - '0'; dig = dig * (exp > 0 ? 2 : 5) + carry; carry = dig / 10; dig %= 10; *p = '0' + dig; } } if (exp < 0) { memmove(end + 1 + exp, end + exp, 1 - exp); end[exp] = '.'; ++end; } while (*begin == '0' && begin[1] != '.') ++begin; return {begin, end}; } TEST(ToString, PerfectDtoa) { EXPECT_THAT(PerfectDtoa(1), Eq("1")); EXPECT_THAT(PerfectDtoa(0.1), Eq("0.1000000000000000055511151231257827021181583404541015625")); EXPECT_THAT(PerfectDtoa(1e24), Eq("999999999999999983222784")); EXPECT_THAT(PerfectDtoa(5e-324), MatchesRegex("0.0000.*625")); for (int i = 0; i < 100; ++i) { for (double multiplier : {1e-300, 1e-200, 1e-100, 0.1, 1.0, 10.0, 1e100, 1e300}) { double d = multiplier * i; std::string s = PerfectDtoa(d); EXPECT_DOUBLE_EQ(d, strtod(s.c_str(), nullptr)); } } } template <typename integer> struct MyInteger { integer i; explicit constexpr MyInteger(integer i) : i(i) {} constexpr operator integer() const { return i; } constexpr MyInteger operator+(MyInteger other) const { return i + other.i; } constexpr MyInteger operator-(MyInteger other) const { return i - other.i; } constexpr MyInteger operator*(MyInteger other) const { return i * other.i; } constexpr MyInteger operator/(MyInteger other) const { return i / other.i; } constexpr bool operator<(MyInteger other) const { return i < other.i; } constexpr bool operator<=(MyInteger other) const { return i <= other.i; } constexpr bool operator==(MyInteger other) const { return i == other.i; } constexpr bool operator>=(MyInteger other) const { return i >= other.i; } constexpr bool operator>(MyInteger other) const { return i > other.i; } constexpr bool operator!=(MyInteger other) const { return i != other.i; } integer as_integer() const { return i; } }; typedef MyInteger<int64_t> MyInt64; typedef MyInteger<uint64_t> MyUInt64; void CheckInt32(int32_t x) { char buffer[absl::numbers_internal::kFastToBufferSize]; char* actual = absl::numbers_internal::FastIntToBuffer(x, buffer); std::string expected = std::to_string(x); EXPECT_EQ(expected, std::string(buffer, actual)) << " Input " << x; char* generic_actual = absl::numbers_internal::FastIntToBuffer(x, buffer); EXPECT_EQ(expected, std::string(buffer, generic_actual)) << " Input " << x; } void CheckInt64(int64_t x) { char buffer[absl::numbers_internal::kFastToBufferSize + 3]; buffer[0] = '*'; buffer[23] = '*'; buffer[24] = '*'; char* actual = absl::numbers_internal::FastIntToBuffer(x, &buffer[1]); std::string expected = std::to_string(x); EXPECT_EQ(expected, std::string(&buffer[1], actual)) << " Input " << x; EXPECT_EQ(buffer[0], '*'); EXPECT_EQ(buffer[23], '*'); EXPECT_EQ(buffer[24], '*'); char* my_actual = absl::numbers_internal::FastIntToBuffer(MyInt64(x), &buffer[1]); EXPECT_EQ(expected, std::string(&buffer[1], my_actual)) << " Input " << x; } void CheckUInt32(uint32_t x) { char buffer[absl::numbers_internal::kFastToBufferSize]; char* actual = absl::numbers_internal::FastIntToBuffer(x, buffer); std::string expected = std::to_string(x); EXPECT_EQ(expected, std::string(buffer, actual)) << " Input " << x; char* generic_actual = absl::numbers_internal::FastIntToBuffer(x, buffer); EXPECT_EQ(expected, std::string(buffer, generic_actual)) << " Input " << x; } void CheckUInt64(uint64_t x) { char buffer[absl::numbers_internal::kFastToBufferSize + 1]; char* actual = absl::numbers_internal::FastIntToBuffer(x, &buffer[1]); std::string expected = std::to_string(x); EXPECT_EQ(expected, std::string(&buffer[1], actual)) << " Input " << x; char* generic_actual = absl::numbers_internal::FastIntToBuffer(x, &buffer[1]); EXPECT_EQ(expected, std::string(&buffer[1], generic_actual)) << " Input " << x; char* my_actual = absl::numbers_internal::FastIntToBuffer(MyUInt64(x), &buffer[1]); EXPECT_EQ(expected, std::string(&buffer[1], my_actual)) << " Input " << x; } void CheckHex64(uint64_t v) { char expected[16 + 1]; std::string actual = absl::StrCat(absl::Hex(v, absl::kZeroPad16)); snprintf(expected, sizeof(expected), "%016" PRIx64, static_cast<uint64_t>(v)); EXPECT_EQ(expected, actual) << " Input " << v; actual = absl::StrCat(absl::Hex(v, absl::kSpacePad16)); snprintf(expected, sizeof(expected), "%16" PRIx64, static_cast<uint64_t>(v)); EXPECT_EQ(expected, actual) << " Input " << v; } TEST(Numbers, TestFastPrints) { for (int i = -100; i <= 100; i++) { CheckInt32(i); CheckInt64(i); } for (int i = 0; i <= 100; i++) { CheckUInt32(i); CheckUInt64(i); } CheckInt32(INT_MIN); CheckInt32(INT_MAX); CheckInt64(LONG_MIN); CheckInt64(uint64_t{1000000000}); CheckInt64(uint64_t{9999999999}); CheckInt64(uint64_t{100000000000000}); CheckInt64(uint64_t{999999999999999}); CheckInt64(uint64_t{1000000000000000000}); CheckInt64(uint64_t{1199999999999999999}); CheckInt64(int64_t{-700000000000000000}); CheckInt64(LONG_MAX); CheckUInt32(std::numeric_limits<uint32_t>::max()); CheckUInt64(uint64_t{1000000000}); CheckUInt64(uint64_t{9999999999}); CheckUInt64(uint64_t{100000000000000}); CheckUInt64(uint64_t{999999999999999}); CheckUInt64(uint64_t{1000000000000000000}); CheckUInt64(uint64_t{1199999999999999999}); CheckUInt64(std::numeric_limits<uint64_t>::max()); for (int i = 0; i < 10000; i++) { CheckHex64(i); } CheckHex64(uint64_t{0x123456789abcdef0}); } template <typename int_type, typename in_val_type> void VerifySimpleAtoiGood(in_val_type in_value, int_type exp_value) { std::string s; absl::strings_internal::OStringStream(&s) << in_value; int_type x = static_cast<int_type>(~exp_value); EXPECT_TRUE(SimpleAtoi(s, &x)) << "in_value=" << in_value << " s=" << s << " x=" << x; EXPECT_EQ(exp_value, x); x = static_cast<int_type>(~exp_value); EXPECT_TRUE(SimpleAtoi(s.c_str(), &x)); EXPECT_EQ(exp_value, x); } template <typename int_type, typename in_val_type> void VerifySimpleAtoiBad(in_val_type in_value) { std::string s; absl::strings_internal::OStringStream(&s) << in_value; int_type x; EXPECT_FALSE(SimpleAtoi(s, &x)); EXPECT_FALSE(SimpleAtoi(s.c_str(), &x)); } TEST(NumbersTest, Atoi) { VerifySimpleAtoiGood<int32_t>(0, 0); VerifySimpleAtoiGood<int32_t>(42, 42); VerifySimpleAtoiGood<int32_t>(-42, -42); VerifySimpleAtoiGood<int32_t>(std::numeric_limits<int32_t>::min(), std::numeric_limits<int32_t>::min()); VerifySimpleAtoiGood<int32_t>(std::numeric_limits<int32_t>::max(), std::numeric_limits<int32_t>::max()); VerifySimpleAtoiGood<uint32_t>(0, 0); VerifySimpleAtoiGood<uint32_t>(42, 42); VerifySimpleAtoiBad<uint32_t>(-42); VerifySimpleAtoiBad<uint32_t>(std::numeric_limits<int32_t>::min()); VerifySimpleAtoiGood<uint32_t>(std::numeric_limits<int32_t>::max(), std::numeric_limits<int32_t>::max()); VerifySimpleAtoiGood<uint32_t>(std::numeric_limits<uint32_t>::max(), std::numeric_limits<uint32_t>::max()); VerifySimpleAtoiBad<uint32_t>(std::numeric_limits<int64_t>::min()); VerifySimpleAtoiBad<uint32_t>(std::numeric_limits<int64_t>::max()); VerifySimpleAtoiBad<uint32_t>(std::numeric_limits<uint64_t>::max()); VerifySimpleAtoiGood<int64_t>(0, 0); VerifySimpleAtoiGood<int64_t>(42, 42); VerifySimpleAtoiGood<int64_t>(-42, -42); VerifySimpleAtoiGood<int64_t>(std::numeric_limits<int32_t>::min(), std::numeric_limits<int32_t>::min()); VerifySimpleAtoiGood<int64_t>(std::numeric_limits<int32_t>::max(), std::numeric_limits<int32_t>::max()); VerifySimpleAtoiGood<int64_t>(std::numeric_limits<uint32_t>::max(), std::numeric_limits<uint32_t>::max()); VerifySimpleAtoiGood<int64_t>(std::numeric_limits<int64_t>::min(), std::numeric_limits<int64_t>::min()); VerifySimpleAtoiGood<int64_t>(std::numeric_limits<int64_t>::max(), std::numeric_limits<int64_t>::max()); VerifySimpleAtoiBad<int64_t>(std::numeric_limits<uint64_t>::max()); VerifySimpleAtoiGood<uint64_t>(0, 0); VerifySimpleAtoiGood<uint64_t>(42, 42); VerifySimpleAtoiBad<uint64_t>(-42); VerifySimpleAtoiBad<uint64_t>(std::numeric_limits<int32_t>::min()); VerifySimpleAtoiGood<uint64_t>(std::numeric_limits<int32_t>::max(), std::numeric_limits<int32_t>::max()); VerifySimpleAtoiGood<uint64_t>(std::numeric_limits<uint32_t>::max(), std::numeric_limits<uint32_t>::max()); VerifySimpleAtoiBad<uint64_t>(std::numeric_limits<int64_t>::min()); VerifySimpleAtoiGood<uint64_t>(std::numeric_limits<int64_t>::max(), std::numeric_limits<int64_t>::max()); VerifySimpleAtoiGood<uint64_t>(std::numeric_limits<uint64_t>::max(), std::numeric_limits<uint64_t>::max()); VerifySimpleAtoiGood<absl::uint128>(0, 0); VerifySimpleAtoiGood<absl::uint128>(42, 42); VerifySimpleAtoiBad<absl::uint128>(-42); VerifySimpleAtoiBad<absl::uint128>(std::numeric_limits<int32_t>::min()); VerifySimpleAtoiGood<absl::uint128>(std::numeric_limits<int32_t>::max(), std::numeric_limits<int32_t>::max()); VerifySimpleAtoiGood<absl::uint128>(std::numeric_limits<uint32_t>::max(), std::numeric_limits<uint32_t>::max()); VerifySimpleAtoiBad<absl::uint128>(std::numeric_limits<int64_t>::min()); VerifySimpleAtoiGood<absl::uint128>(std::numeric_limits<int64_t>::max(), std::numeric_limits<int64_t>::max()); VerifySimpleAtoiGood<absl::uint128>(std::numeric_limits<uint64_t>::max(), std::numeric_limits<uint64_t>::max()); VerifySimpleAtoiGood<absl::uint128>( std::numeric_limits<absl::uint128>::max(), std::numeric_limits<absl::uint128>::max()); VerifySimpleAtoiGood<absl::int128>(0, 0); VerifySimpleAtoiGood<absl::int128>(42, 42); VerifySimpleAtoiGood<absl::int128>(-42, -42); VerifySimpleAtoiGood<absl::int128>(std::numeric_limits<int32_t>::min(), std::numeric_limits<int32_t>::min()); VerifySimpleAtoiGood<absl::int128>(std::numeric_limits<int32_t>::max(), std::numeric_limits<int32_t>::max()); VerifySimpleAtoiGood<absl::int128>(std::numeric_limits<uint32_t>::max(), std::numeric_limits<uint32_t>::max()); VerifySimpleAtoiGood<absl::int128>(std::numeric_limits<int64_t>::min(), std::numeric_limits<int64_t>::min()); VerifySimpleAtoiGood<absl::int128>(std::numeric_limits<int64_t>::max(), std::numeric_limits<int64_t>::max()); VerifySimpleAtoiGood<absl::int128>(std::numeric_limits<uint64_t>::max(), std::numeric_limits<uint64_t>::max()); VerifySimpleAtoiGood<absl::int128>( std::numeric_limits<absl::int128>::min(), std::numeric_limits<absl::int128>::min()); VerifySimpleAtoiGood<absl::int128>( std::numeric_limits<absl::int128>::max(), std::numeric_limits<absl::int128>::max()); VerifySimpleAtoiBad<absl::int128>(std::numeric_limits<absl::uint128>::max()); VerifySimpleAtoiGood<int>(-42, -42); VerifySimpleAtoiGood<int32_t>(-42, -42); VerifySimpleAtoiGood<uint32_t>(42, 42); VerifySimpleAtoiGood<unsigned int>(42, 42); VerifySimpleAtoiGood<int64_t>(-42, -42); VerifySimpleAtoiGood<long>(-42, -42); VerifySimpleAtoiGood<uint64_t>(42, 42); VerifySimpleAtoiGood<size_t>(42, 42); VerifySimpleAtoiGood<std::string::size_type>(42, 42); } TEST(NumbersTest, Atod) { #if !defined(DBL_TRUE_MIN) static constexpr double DBL_TRUE_MIN = 4.940656458412465441765687928682213723650598026143247644255856825e-324; #endif #if !defined(FLT_TRUE_MIN) static constexpr float FLT_TRUE_MIN = 1.401298464324817070923729583289916131280261941876515771757068284e-45f; #endif double d; float f; EXPECT_TRUE(absl::SimpleAtod("NaN", &d)); EXPECT_TRUE(std::isnan(d)); EXPECT_TRUE(absl::SimpleAtod("nAN", &d)); EXPECT_TRUE(std::isnan(d)); EXPECT_TRUE(absl::SimpleAtod("-nan", &d)); EXPECT_TRUE(std::isnan(d)); EXPECT_TRUE(absl::SimpleAtod("inf", &d)); EXPECT_TRUE(std::isinf(d) && (d > 0)); EXPECT_TRUE(absl::SimpleAtod("+Infinity", &d)); EXPECT_TRUE(std::isinf(d) && (d > 0)); EXPECT_TRUE(absl::SimpleAtod("-INF", &d)); EXPECT_TRUE(std::isinf(d) && (d < 0)); EXPECT_TRUE(absl::SimpleAtod("1.7976931348623157e+308", &d)); EXPECT_EQ(d, 1.7976931348623157e+308); EXPECT_TRUE(absl::SimpleAtod("5e308", &d)); EXPECT_TRUE(std::isinf(d) && (d > 0)); EXPECT_TRUE(absl::SimpleAtof("3.4028234663852886e+38", &f)); EXPECT_EQ(f, 3.4028234663852886e+38f); EXPECT_TRUE(absl::SimpleAtof("7e38", &f)); EXPECT_TRUE(std::isinf(f) && (f > 0)); EXPECT_TRUE(absl::SimpleAtod("1e308", &d)); EXPECT_EQ(d, 1e308); EXPECT_FALSE(std::isinf(d)); EXPECT_TRUE(absl::SimpleAtod("1e309", &d)); EXPECT_TRUE(std::isinf(d)); EXPECT_TRUE(absl::SimpleAtof("1e38", &f)); EXPECT_EQ(f, 1e38f); EXPECT_FALSE(std::isinf(f)); EXPECT_TRUE(absl::SimpleAtof("1e39", &f)); EXPECT_TRUE(std::isinf(f)); EXPECT_TRUE(absl::SimpleAtod("9.999999999999999999e307", &d)); EXPECT_EQ(d, 9.999999999999999999e307); EXPECT_FALSE(std::isinf(d)); EXPECT_TRUE(absl::SimpleAtod("9.999999999999999999e308", &d)); EXPECT_TRUE(std::isinf(d)); EXPECT_TRUE(absl::SimpleAtof("9.999999999999999999e37", &f)); EXPECT_EQ(f, 9.999999999999999999e37f); EXPECT_FALSE(std::isinf(f)); EXPECT_TRUE(absl::SimpleAtof("9.999999999999999999e38", &f)); EXPECT_TRUE(std::isinf(f)); EXPECT_TRUE(absl::SimpleAtod("2.2250738585072014e-308", &d)); EXPECT_EQ(d, 2.2250738585072014e-308); EXPECT_TRUE(absl::SimpleAtod("4.9406564584124654e-324", &d)); EXPECT_EQ(d, 4.9406564584124654e-324); EXPECT_TRUE(absl::SimpleAtod("4.9406564584124654e-325", &d)); EXPECT_EQ(d, 0); EXPECT_TRUE(absl::SimpleAtof("1.1754943508222875e-38", &f)); EXPECT_EQ(f, 1.1754943508222875e-38f); EXPECT_TRUE(absl::SimpleAtof("1.4012984643248171e-45", &f)); EXPECT_EQ(f, 1.4012984643248171e-45f); EXPECT_TRUE(absl::SimpleAtof("1.4012984643248171e-46", &f)); EXPECT_EQ(f, 0); EXPECT_TRUE(absl::SimpleAtod("1e-307", &d)); EXPECT_EQ(d, 1e-307); EXPECT_GE(d, DBL_MIN); EXPECT_LT(d, DBL_MIN * 10); EXPECT_TRUE(absl::SimpleAtod("1e-323", &d)); EXPECT_EQ(d, 1e-323); EXPECT_GE(d, DBL_TRUE_MIN); EXPECT_LT(d, DBL_TRUE_MIN * 10); EXPECT_TRUE(absl::SimpleAtod("1e-324", &d)); EXPECT_EQ(d, 0); EXPECT_TRUE(absl::SimpleAtof("1e-37", &f)); EXPECT_EQ(f, 1e-37f); EXPECT_GE(f, FLT_MIN); EXPECT_LT(f, FLT_MIN * 10); EXPECT_TRUE(absl::SimpleAtof("1e-45", &f)); EXPECT_EQ(f, 1e-45f); EXPECT_GE(f, FLT_TRUE_MIN); EXPECT_LT(f, FLT_TRUE_MIN * 10); EXPECT_TRUE(absl::SimpleAtof("1e-46", &f)); EXPECT_EQ(f, 0); EXPECT_TRUE(absl::SimpleAtod("9.999999999999999999e-308", &d)); EXPECT_EQ(d, 9.999999999999999999e-308); EXPECT_GE(d, DBL_MIN); EXPECT_LT(d, DBL_MIN * 10); EXPECT_TRUE(absl::SimpleAtod("9.999999999999999999e-324", &d)); EXPECT_EQ(d, 9.999999999999999999e-324); EXPECT_GE(d, DBL_TRUE_MIN); EXPECT_LT(d, DBL_TRUE_MIN * 10); EXPECT_TRUE(absl::SimpleAtod("9.999999999999999999e-325", &d)); EXPECT_EQ(d, 0); EXPECT_TRUE(absl::SimpleAtof("9.999999999999999999e-38", &f)); EXPECT_EQ(f, 9.999999999999999999e-38f); EXPECT_GE(f, FLT_MIN); EXPECT_LT(f, FLT_MIN * 10); EXPECT_TRUE(absl::SimpleAtof("9.999999999999999999e-46", &f)); EXPECT_EQ(f, 9.999999999999999999e-46f); EXPECT_GE(f, FLT_TRUE_MIN); EXPECT_LT(f, FLT_TRUE_MIN * 10); EXPECT_TRUE(absl::SimpleAtof("9.999999999999999999e-47", &f)); EXPECT_EQ(f, 0); EXPECT_TRUE(absl::SimpleAtod(" \t\r\n 2.718", &d)); EXPECT_EQ(d, 2.718); EXPECT_TRUE(absl::SimpleAtod(" 3.141 ", &d)); EXPECT_EQ(d, 3.141); EXPECT_FALSE(absl::SimpleAtod("n 0", &d)); EXPECT_FALSE(absl::SimpleAtod("0n ", &d)); EXPECT_TRUE(absl::SimpleAtod("000123", &d)); EXPECT_EQ(d, 123); EXPECT_TRUE(absl::SimpleAtod("000.456", &d)); EXPECT_EQ(d, 0.456); EXPECT_TRUE(absl::SimpleAtod(".5", &d)); EXPECT_EQ(d, 0.5); EXPECT_TRUE(absl::SimpleAtod("-.707", &d)); EXPECT_EQ(d, -0.707); EXPECT_TRUE(absl::SimpleAtod("+6.0221408e+23", &d)); EXPECT_EQ(d, 6.0221408e+23); EXPECT_FALSE(absl::SimpleAtod("123_456", &d)); EXPECT_TRUE(absl::SimpleAtod("8.9", &d)); EXPECT_FALSE(absl::SimpleAtod("8,9", &d)); EXPECT_TRUE(absl::SimpleAtod("4503599627370497.5", &d)); EXPECT_EQ(d, 4503599627370497.5); EXPECT_TRUE(absl::SimpleAtod("1e+23", &d)); EXPECT_EQ(d, 1e+23); EXPECT_TRUE(absl::SimpleAtod("9223372036854775807", &d)); EXPECT_EQ(d, 9223372036854775807); EXPECT_TRUE(absl::SimpleAtof("0.0625", &f)); EXPECT_EQ(f, 0.0625f); EXPECT_TRUE(absl::SimpleAtof("20040229.0", &f)); EXPECT_EQ(f, 20040229.0f); EXPECT_TRUE(absl::SimpleAtof("2147483647.0", &f)); EXPECT_EQ(f, 2147483647.0f); EXPECT_TRUE(absl::SimpleAtod("122.416294033786585", &d)); EXPECT_EQ(d, 122.416294033786585); EXPECT_TRUE(absl::SimpleAtof("122.416294033786585", &f)); EXPECT_EQ(f, 122.416294033786585f); } TEST(NumbersTest, Prefixes) { double d; EXPECT_FALSE(absl::SimpleAtod("++1", &d)); EXPECT_FALSE(absl::SimpleAtod("+-1", &d)); EXPECT_FALSE(absl::SimpleAtod("-+1", &d)); EXPECT_FALSE(absl::SimpleAtod("--1", &d)); EXPECT_TRUE(absl::SimpleAtod("-1", &d)); EXPECT_EQ(d, -1.); EXPECT_TRUE(absl::SimpleAtod("+1", &d)); EXPECT_EQ(d, +1.); float f; EXPECT_FALSE(absl::SimpleAtof("++1", &f)); EXPECT_FALSE(absl::SimpleAtof("+-1", &f)); EXPECT_FALSE(absl::SimpleAtof("-+1", &f)); EXPECT_FALSE(absl::SimpleAtof("--1", &f)); EXPECT_TRUE(absl::SimpleAtof("-1", &f)); EXPECT_EQ(f, -1.f); EXPECT_TRUE(absl::SimpleAtof("+1", &f)); EXPECT_EQ(f, +1.f); } TEST(NumbersTest, Atoenum) { enum E01 { E01_zero = 0, E01_one = 1, }; VerifySimpleAtoiGood<E01>(E01_zero, E01_zero); VerifySimpleAtoiGood<E01>(E01_one, E01_one); enum E_101 { E_101_minusone = -1, E_101_zero = 0, E_101_one = 1, }; VerifySimpleAtoiGood<E_101>(E_101_minusone, E_101_minusone); VerifySimpleAtoiGood<E_101>(E_101_zero, E_101_zero); VerifySimpleAtoiGood<E_101>(E_101_one, E_101_one); enum E_bigint { E_bigint_zero = 0, E_bigint_one = 1, E_bigint_max31 = static_cast<int32_t>(0x7FFFFFFF), }; VerifySimpleAtoiGood<E_bigint>(E_bigint_zero, E_bigint_zero); VerifySimpleAtoiGood<E_bigint>(E_bigint_one, E_bigint_one); VerifySimpleAtoiGood<E_bigint>(E_bigint_max31, E_bigint_max31); enum E_fullint { E_fullint_zero = 0, E_fullint_one = 1, E_fullint_max31 = static_cast<int32_t>(0x7FFFFFFF), E_fullint_min32 = INT32_MIN, }; VerifySimpleAtoiGood<E_fullint>(E_fullint_zero, E_fullint_zero); VerifySimpleAtoiGood<E_fullint>(E_fullint_one, E_fullint_one); VerifySimpleAtoiGood<E_fullint>(E_fullint_max31, E_fullint_max31); VerifySimpleAtoiGood<E_fullint>(E_fullint_min32, E_fullint_min32); enum E_biguint { E_biguint_zero = 0, E_biguint_one = 1, E_biguint_max31 = static_cast<uint32_t>(0x7FFFFFFF), E_biguint_max32 = static_cast<uint32_t>(0xFFFFFFFF), }; VerifySimpleAtoiGood<E_biguint>(E_biguint_zero, E_biguint_zero); VerifySimpleAtoiGood<E_biguint>(E_biguint_one, E_biguint_one); VerifySimpleAtoiGood<E_biguint>(E_biguint_max31, E_biguint_max31); VerifySimpleAtoiGood<E_biguint>(E_biguint_max32, E_biguint_max32); } template <typename int_type, typename in_val_type> void VerifySimpleHexAtoiGood(in_val_type in_value, int_type exp_value) { std::string s; absl::strings_internal::OStringStream strm(&s); if (in_value >= 0) { strm << std::hex << in_value; } else { strm << "-" << std::hex << -absl::uint128(in_value); } int_type x = static_cast<int_type>(~exp_value); EXPECT_TRUE(SimpleHexAtoi(s, &x)) << "in_value=" << std::hex << in_value << " s=" << s << " x=" << x; EXPECT_EQ(exp_value, x); x = static_cast<int_type>(~exp_value); EXPECT_TRUE(SimpleHexAtoi( s.c_str(), &x)); EXPECT_EQ(exp_value, x); } template <typename int_type, typename in_val_type> void VerifySimpleHexAtoiBad(in_val_type in_value) { std::string s; absl::strings_internal::OStringStream strm(&s); if (in_value >= 0) { strm << std::hex << in_value; } else { strm << "-" << std::hex << -absl::uint128(in_value); } int_type x; EXPECT_FALSE(SimpleHexAtoi(s, &x)); EXPECT_FALSE(SimpleHexAtoi( s.c_str(), &x)); } TEST(NumbersTest, HexAtoi) { VerifySimpleHexAtoiGood<int32_t>(0, 0); VerifySimpleHexAtoiGood<int32_t>(0x42, 0x42); VerifySimpleHexAtoiGood<int32_t>(-0x42, -0x42); VerifySimpleHexAtoiGood<int32_t>(std::numeric_limits<int32_t>::min(), std::numeric_limits<int32_t>::min()); VerifySimpleHexAtoiGood<int32_t>(std::numeric_limits<int32_t>::max(), std::numeric_limits<int32_t>::max()); VerifySimpleHexAtoiGood<uint32_t>(0, 0); VerifySimpleHexAtoiGood<uint32_t>(0x42, 0x42); VerifySimpleHexAtoiBad<uint32_t>(-0x42); VerifySimpleHexAtoiBad<uint32_t>(std::numeric_limits<int32_t>::min()); VerifySimpleHexAtoiGood<uint32_t>(std::numeric_limits<int32_t>::max(), std::numeric_limits<int32_t>::max()); VerifySimpleHexAtoiGood<uint32_t>(std::numeric_limits<uint32_t>::max(), std::numeric_limits<uint32_t>::max()); VerifySimpleHexAtoiBad<uint32_t>(std::numeric_limits<int64_t>::min()); VerifySimpleHexAtoiBad<uint32_t>(std::numeric_limits<int64_t>::max()); VerifySimpleHexAtoiBad<uint32_t>(std::numeric_limits<uint64_t>::max()); VerifySimpleHexAtoiGood<int64_t>(0, 0); VerifySimpleHexAtoiGood<int64_t>(0x42, 0x42); VerifySimpleHexAtoiGood<int64_t>(-0x42, -0x42); VerifySimpleHexAtoiGood<int64_t>(std::numeric_limits<int32_t>::min(), std::numeric_limits<int32_t>::min()); VerifySimpleHexAtoiGood<int64_t>(std::numeric_limits<int32_t>::max(), std::numeric_limits<int32_t>::max()); VerifySimpleHexAtoiGood<int64_t>(std::numeric_limits<uint32_t>::max(), std::numeric_limits<uint32_t>::max()); VerifySimpleHexAtoiGood<int64_t>(std::numeric_limits<int64_t>::min(), std::numeric_limits<int64_t>::min()); VerifySimpleHexAtoiGood<int64_t>(std::numeric_limits<int64_t>::max(), std::numeric_limits<int64_t>::max()); VerifySimpleHexAtoiBad<int64_t>(std::numeric_limits<uint64_t>::max()); VerifySimpleHexAtoiGood<uint64_t>(0, 0); VerifySimpleHexAtoiGood<uint64_t>(0x42, 0x42); VerifySimpleHexAtoiBad<uint64_t>(-0x42); VerifySimpleHexAtoiBad<uint64_t>(std::numeric_limits<int32_t>::min()); VerifySimpleHexAtoiGood<uint64_t>(std::numeric_limits<int32_t>::max(), std::numeric_limits<int32_t>::max()); VerifySimpleHexAtoiGood<uint64_t>(std::numeric_limits<uint32_t>::max(), std::numeric_limits<uint32_t>::max()); VerifySimpleHexAtoiBad<uint64_t>(std::numeric_limits<int64_t>::min()); VerifySimpleHexAtoiGood<uint64_t>(std::numeric_limits<int64_t>::max(), std::numeric_limits<int64_t>::max()); VerifySimpleHexAtoiGood<uint64_t>(std::numeric_limits<uint64_t>::max(), std::numeric_limits<uint64_t>::max()); VerifySimpleHexAtoiGood<absl::uint128>(0, 0); VerifySimpleHexAtoiGood<absl::uint128>(0x42, 0x42); VerifySimpleHexAtoiBad<absl::uint128>(-0x42); VerifySimpleHexAtoiBad<absl::uint128>(std::numeric_limits<int32_t>::min()); VerifySimpleHexAtoiGood<absl::uint128>(std::numeric_limits<int32_t>::max(), std::numeric_limits<int32_t>::max()); VerifySimpleHexAtoiGood<absl::uint128>(std::numeric_limits<uint32_t>::max(), std::numeric_limits<uint32_t>::max()); VerifySimpleHexAtoiBad<absl::uint128>(std::numeric_limits<int64_t>::min()); VerifySimpleHexAtoiGood<absl::uint128>(std::numeric_limits<int64_t>::max(), std::numeric_limits<int64_t>::max()); VerifySimpleHexAtoiGood<absl::uint128>(std::numeric_limits<uint64_t>::max(), std::numeric_limits<uint64_t>::max()); VerifySimpleHexAtoiGood<absl::uint128>( std::numeric_limits<absl::uint128>::max(), std::numeric_limits<absl::uint128>::max()); VerifySimpleHexAtoiGood<int>(-0x42, -0x42); VerifySimpleHexAtoiGood<int32_t>(-0x42, -0x42); VerifySimpleHexAtoiGood<uint32_t>(0x42, 0x42); VerifySimpleHexAtoiGood<unsigned int>(0x42, 0x42); VerifySimpleHexAtoiGood<int64_t>(-0x42, -0x42); VerifySimpleHexAtoiGood<long>(-0x42, -0x42); VerifySimpleHexAtoiGood<uint64_t>(0x42, 0x42); VerifySimpleHexAtoiGood<size_t>(0x42, 0x42); VerifySimpleHexAtoiGood<std::string::size_type>(0x42, 0x42); int32_t value; EXPECT_TRUE(safe_strto32_base("0x34234324", &value, 16)); EXPECT_EQ(0x34234324, value); EXPECT_TRUE(safe_strto32_base("0X34234324", &value, 16)); EXPECT_EQ(0x34234324, value); EXPECT_TRUE(safe_strto32_base(" \t\n 34234324", &value, 16)); EXPECT_EQ(0x34234324, value); EXPECT_TRUE(safe_strto32_base("34234324 \t\n ", &value, 16)); EXPECT_EQ(0x34234324, value); } TEST(stringtest, safe_strto32_base) { int32_t value; EXPECT_TRUE(safe_strto32_base("0x34234324", &value, 16)); EXPECT_EQ(0x34234324, value); EXPECT_TRUE(safe_strto32_base("0X34234324", &value, 16)); EXPECT_EQ(0x34234324, value); EXPECT_TRUE(safe_strto32_base("34234324", &value, 16)); EXPECT_EQ(0x34234324, value); EXPECT_TRUE(safe_strto32_base("0", &value, 16)); EXPECT_EQ(0, value); EXPECT_TRUE(safe_strto32_base(" \t\n -0x34234324", &value, 16)); EXPECT_EQ(-0x34234324, value); EXPECT_TRUE(safe_strto32_base(" \t\n -34234324", &value, 16)); EXPECT_EQ(-0x34234324, value); EXPECT_TRUE(safe_strto32_base("7654321", &value, 8)); EXPECT_EQ(07654321, value); EXPECT_TRUE(safe_strto32_base("-01234", &value, 8)); EXPECT_EQ(-01234, value); EXPECT_FALSE(safe_strto32_base("1834", &value, 8)); EXPECT_TRUE(safe_strto32_base("0", &value, 0)); EXPECT_EQ(0, value); EXPECT_TRUE(safe_strto32_base("077", &value, 0)); EXPECT_EQ(077, value); EXPECT_FALSE(safe_strto32_base("088", &value, 0)); EXPECT_FALSE(safe_strto32_base("0xG", &value, 0)); EXPECT_TRUE(safe_strto32_base("34234324", &value, 10)); EXPECT_EQ(34234324, value); EXPECT_TRUE(safe_strto32_base("0", &value, 10)); EXPECT_EQ(0, value); EXPECT_TRUE(safe_strto32_base(" \t\n -34234324", &value, 10)); EXPECT_EQ(-34234324, value); EXPECT_TRUE(safe_strto32_base("34234324 \n\t ", &value, 10)); EXPECT_EQ(34234324, value); EXPECT_FALSE(safe_strto32_base("", &value, 10)); EXPECT_FALSE(safe_strto32_base(" ", &value, 10)); EXPECT_FALSE(safe_strto32_base("abc", &value, 10)); EXPECT_FALSE(safe_strto32_base("34234324a", &value, 10)); EXPECT_FALSE(safe_strto32_base("34234.3", &value, 10)); EXPECT_FALSE(safe_strto32_base("2147483648", &value, 10)); EXPECT_FALSE(safe_strto32_base("-2147483649", &value, 10)); EXPECT_TRUE(safe_strto32_base(std::string("0x1234"), &value, 16)); EXPECT_EQ(0x1234, value); EXPECT_TRUE(safe_strto32_base("1234", &value, 10)); EXPECT_EQ(1234, value); } TEST(stringtest, safe_strto32_range) { int32_t value; EXPECT_FALSE(safe_strto32_base("2147483648", &value, 10)); EXPECT_EQ(std::numeric_limits<int32_t>::max(), value); EXPECT_TRUE(safe_strto32_base("-2147483648", &value, 10)); EXPECT_EQ(std::numeric_limits<int32_t>::min(), value); EXPECT_FALSE(safe_strto32_base("-2147483649", &value, 10)); EXPECT_EQ(std::numeric_limits<int32_t>::min(), value); } TEST(stringtest, safe_strto64_range) { int64_t value; EXPECT_FALSE(safe_strto64_base("9223372036854775808", &value, 10)); EXPECT_EQ(std::numeric_limits<int64_t>::max(), value); EXPECT_TRUE(safe_strto64_base("-9223372036854775808", &value, 10)); EXPECT_EQ(std::numeric_limits<int64_t>::min(), value); EXPECT_FALSE(safe_strto64_base("-9223372036854775809", &value, 10)); EXPECT_EQ(std::numeric_limits<int64_t>::min(), value); } TEST(stringtest, safe_strto32_leading_substring) { int32_t value; EXPECT_FALSE(safe_strto32_base("04069@@@", &value, 10)); EXPECT_EQ(4069, value); EXPECT_FALSE(safe_strto32_base("04069@@@", &value, 8)); EXPECT_EQ(0406, value); EXPECT_FALSE(safe_strto32_base("04069balloons", &value, 10)); EXPECT_EQ(4069, value); EXPECT_FALSE(safe_strto32_base("04069balloons", &value, 16)); EXPECT_EQ(0x4069ba, value); EXPECT_FALSE(safe_strto32_base("@@@", &value, 10)); EXPECT_EQ(0, value); } TEST(stringtest, safe_strto64_leading_substring) { int64_t value; EXPECT_FALSE(safe_strto64_base("04069@@@", &value, 10)); EXPECT_EQ(4069, value); EXPECT_FALSE(safe_strto64_base("04069@@@", &value, 8)); EXPECT_EQ(0406, value); EXPECT_FALSE(safe_strto64_base("04069balloons", &value, 10)); EXPECT_EQ(4069, value); EXPECT_FALSE(safe_strto64_base("04069balloons", &value, 16)); EXPECT_EQ(0x4069ba, value); EXPECT_FALSE(safe_strto64_base("@@@", &value, 10)); EXPECT_EQ(0, value); } TEST(stringtest, safe_strto64_base) { int64_t value; EXPECT_TRUE(safe_strto64_base("0x3423432448783446", &value, 16)); EXPECT_EQ(int64_t{0x3423432448783446}, value); EXPECT_TRUE(safe_strto64_base("3423432448783446", &value, 16)); EXPECT_EQ(int64_t{0x3423432448783446}, value); EXPECT_TRUE(safe_strto64_base("0", &value, 16)); EXPECT_EQ(0, value); EXPECT_TRUE(safe_strto64_base(" \t\n -0x3423432448783446", &value, 16)); EXPECT_EQ(int64_t{-0x3423432448783446}, value); EXPECT_TRUE(safe_strto64_base(" \t\n -3423432448783446", &value, 16)); EXPECT_EQ(int64_t{-0x3423432448783446}, value); EXPECT_TRUE(safe_strto64_base("123456701234567012", &value, 8)); EXPECT_EQ(int64_t{0123456701234567012}, value); EXPECT_TRUE(safe_strto64_base("-017777777777777", &value, 8)); EXPECT_EQ(int64_t{-017777777777777}, value); EXPECT_FALSE(safe_strto64_base("19777777777777", &value, 8)); EXPECT_TRUE(safe_strto64_base("0", &value, 0)); EXPECT_EQ(0, value); EXPECT_TRUE(safe_strto64_base("077", &value, 0)); EXPECT_EQ(077, value); EXPECT_FALSE(safe_strto64_base("088", &value, 0)); EXPECT_FALSE(safe_strto64_base("0xG", &value, 0)); EXPECT_TRUE(safe_strto64_base("34234324487834466", &value, 10)); EXPECT_EQ(int64_t{34234324487834466}, value); EXPECT_TRUE(safe_strto64_base("0", &value, 10)); EXPECT_EQ(0, value); EXPECT_TRUE(safe_strto64_base(" \t\n -34234324487834466", &value, 10)); EXPECT_EQ(int64_t{-34234324487834466}, value); EXPECT_TRUE(safe_strto64_base("34234324487834466 \n\t ", &value, 10)); EXPECT_EQ(int64_t{34234324487834466}, value); EXPECT_FALSE(safe_strto64_base("", &value, 10)); EXPECT_FALSE(safe_strto64_base(" ", &value, 10)); EXPECT_FALSE(safe_strto64_base("abc", &value, 10)); EXPECT_FALSE(safe_strto64_base("34234324487834466a", &value, 10)); EXPECT_FALSE(safe_strto64_base("34234487834466.3", &value, 10)); EXPECT_FALSE(safe_strto64_base("9223372036854775808", &value, 10)); EXPECT_FALSE(safe_strto64_base("-9223372036854775809", &value, 10)); EXPECT_TRUE(safe_strto64_base(std::string("0x1234"), &value, 16)); EXPECT_EQ(0x1234, value); EXPECT_TRUE(safe_strto64_base("1234", &value, 10)); EXPECT_EQ(1234, value); } const size_t kNumRandomTests = 10000; template <typename IntType> void test_random_integer_parse_base(bool (*parse_func)(absl::string_view, IntType* value, int base)) { using RandomEngine = std::minstd_rand0; std::random_device rd; RandomEngine rng(rd()); std::uniform_int_distribution<IntType> random_int( std::numeric_limits<IntType>::min()); std::uniform_int_distribution<int> random_base(2, 35); for (size_t i = 0; i < kNumRandomTests; i++) { IntType value = random_int(rng); int base = random_base(rng); std::string str_value; EXPECT_TRUE(Itoa<IntType>(value, base, &str_value)); IntType parsed_value; EXPECT_TRUE(parse_func(str_value, &parsed_value, base)); EXPECT_EQ(parsed_value, value); EXPECT_FALSE( parse_func(absl::StrCat(std::numeric_limits<IntType>::max(), value), &parsed_value, base)); if (std::numeric_limits<IntType>::min() < 0) { EXPECT_FALSE( parse_func(absl::StrCat(std::numeric_limits<IntType>::min(), value), &parsed_value, base)); } else { EXPECT_FALSE(parse_func(absl::StrCat("-", value), &parsed_value, base)); } } } TEST(stringtest, safe_strto32_random) { test_random_integer_parse_base<int32_t>(&safe_strto32_base); } TEST(stringtest, safe_strto64_random) { test_random_integer_parse_base<int64_t>(&safe_strto64_base); } TEST(stringtest, safe_strtou32_random) { test_random_integer_parse_base<uint32_t>(&safe_strtou32_base); } TEST(stringtest, safe_strtou64_random) { test_random_integer_parse_base<uint64_t>(&safe_strtou64_base); } TEST(stringtest, safe_strtou128_random) { using RandomEngine = std::minstd_rand0; using IntType = absl::uint128; constexpr auto parse_func = &absl::numbers_internal::safe_strtou128_base; std::random_device rd; RandomEngine rng(rd()); std::uniform_int_distribution<uint64_t> random_uint64( std::numeric_limits<uint64_t>::min()); std::uniform_int_distribution<int> random_base(2, 35); for (size_t i = 0; i < kNumRandomTests; i++) { IntType value = random_uint64(rng); value = (value << 64) + random_uint64(rng); int base = random_base(rng); std::string str_value; EXPECT_TRUE(Itoa<IntType>(value, base, &str_value)); IntType parsed_value; EXPECT_TRUE(parse_func(str_value, &parsed_value, base)); EXPECT_EQ(parsed_value, value); std::string s; absl::strings_internal::OStringStream(&s) << std::numeric_limits<IntType>::max() << value; EXPECT_FALSE(parse_func(s, &parsed_value, base)); s.clear(); absl::strings_internal::OStringStream(&s) << "-" << value; EXPECT_FALSE(parse_func(s, &parsed_value, base)); } } TEST(stringtest, safe_strto128_random) { using RandomEngine = std::minstd_rand0; using IntType = absl::int128; constexpr auto parse_func = &absl::numbers_internal::safe_strto128_base; std::random_device rd; RandomEngine rng(rd()); std::uniform_int_distribution<int64_t> random_int64( std::numeric_limits<int64_t>::min()); std::uniform_int_distribution<uint64_t> random_uint64( std::numeric_limits<uint64_t>::min()); std::uniform_int_distribution<int> random_base(2, 35); for (size_t i = 0; i < kNumRandomTests; ++i) { int64_t high = random_int64(rng); uint64_t low = random_uint64(rng); IntType value = absl::MakeInt128(high, low); int base = random_base(rng); std::string str_value; EXPECT_TRUE(Itoa<IntType>(value, base, &str_value)); IntType parsed_value; EXPECT_TRUE(parse_func(str_value, &parsed_value, base)); EXPECT_EQ(parsed_value, value); std::string s; absl::strings_internal::OStringStream(&s) << std::numeric_limits<IntType>::max() << value; EXPECT_FALSE(parse_func(s, &parsed_value, base)); s.clear(); absl::strings_internal::OStringStream(&s) << std::numeric_limits<IntType>::min() << value; EXPECT_FALSE(parse_func(s, &parsed_value, base)); } } TEST(stringtest, safe_strtou32_base) { for (int i = 0; strtouint32_test_cases()[i].str != nullptr; ++i) { const auto& e = strtouint32_test_cases()[i]; uint32_t value; EXPECT_EQ(e.expect_ok, safe_strtou32_base(e.str, &value, e.base)) << "str=\"" << e.str << "\" base=" << e.base; if (e.expect_ok) { EXPECT_EQ(e.expected, value) << "i=" << i << " str=\"" << e.str << "\" base=" << e.base; } } } TEST(stringtest, safe_strtou32_base_length_delimited) { for (int i = 0; strtouint32_test_cases()[i].str != nullptr; ++i) { const auto& e = strtouint32_test_cases()[i]; std::string tmp(e.str); tmp.append("12"); uint32_t value; EXPECT_EQ(e.expect_ok, safe_strtou32_base(absl::string_view(tmp.data(), strlen(e.str)), &value, e.base)) << "str=\"" << e.str << "\" base=" << e.base; if (e.expect_ok) { EXPECT_EQ(e.expected, value) << "i=" << i << " str=" << e.str << " base=" << e.base; } } } TEST(stringtest, safe_strtou64_base) { for (int i = 0; strtouint64_test_cases()[i].str != nullptr; ++i) { const auto& e = strtouint64_test_cases()[i]; uint64_t value; EXPECT_EQ(e.expect_ok, safe_strtou64_base(e.str, &value, e.base)) << "str=\"" << e.str << "\" base=" << e.base; if (e.expect_ok) { EXPECT_EQ(e.expected, value) << "str=" << e.str << " base=" << e.base; } } } TEST(stringtest, safe_strtou64_base_length_delimited) { for (int i = 0; strtouint64_test_cases()[i].str != nullptr; ++i) { const auto& e = strtouint64_test_cases()[i]; std::string tmp(e.str); tmp.append("12"); uint64_t value; EXPECT_EQ(e.expect_ok, safe_strtou64_base(absl::string_view(tmp.data(), strlen(e.str)), &value, e.base)) << "str=\"" << e.str << "\" base=" << e.base; if (e.expect_ok) { EXPECT_EQ(e.expected, value) << "str=\"" << e.str << "\" base=" << e.base; } } } #if defined(__GLIBC__) || defined(__BIONIC__) #define ABSL_HAVE_FEENABLEEXCEPT 1 #define ABSL_HAVE_FEDISABLEEXCEPT 1 #endif class SimpleDtoaTest : public testing::Test { protected: void SetUp() override { feholdexcept(&fp_env_); #ifdef ABSL_HAVE_FEENABLEEXCEPT feenableexcept(FE_DIVBYZERO | FE_INVALID | FE_OVERFLOW); #endif } void TearDown() override { #ifdef ABSL_HAVE_FEDISABLEEXCEPT fedisableexcept(FE_DIVBYZERO | FE_INVALID | FE_OVERFLOW); #endif fesetenv(&fp_env_); } std::string ToNineDigits(double value) { char buffer[16]; snprintf(buffer, sizeof(buffer), "%.9g", value); return buffer; } fenv_t fp_env_; }; template <typename R> void ExhaustiveFloat(uint32_t cases, R&& runnable) { runnable(0.0f); runnable(-0.0f); if (cases >= 2e9) { for (float f = 0; f < std::numeric_limits<float>::max(); ) { f = nextafterf(f, std::numeric_limits<float>::max()); runnable(-f); runnable(f); } return; } std::set<float> floats = {3.4028234e38f}; for (float f : {1.0, 3.14159265, 2.718281828, 1 / 2.718281828}) { for (float testf = f; testf != 0; testf *= 0.1f) floats.insert(testf); for (float testf = f; testf != 0; testf *= 0.5f) floats.insert(testf); for (float testf = f; testf < 3e38f / 2; testf *= 2.0f) floats.insert(testf); for (float testf = f; testf < 3e38f / 10; testf *= 10) floats.insert(testf); } float last = *floats.begin(); runnable(last); runnable(-last); int iters_per_float = cases / floats.size(); if (iters_per_float == 0) iters_per_float = 1; for (float f : floats) { if (f == last) continue; float testf = std::nextafter(last, std::numeric_limits<float>::max()); runnable(testf); runnable(-testf); last = testf; if (f == last) continue; double step = (double{f} - last) / iters_per_float; for (double d = last + step; d < f; d += step) { testf = d; if (testf != last) { runnable(testf); runnable(-testf); last = testf; } } testf = std::nextafter(f, 0.0f); if (testf > last) { runnable(testf); runnable(-testf); last = testf; } if (f != last) { runnable(f); runnable(-f); last = f; } } } TEST_F(SimpleDtoaTest, ExhaustiveDoubleToSixDigits) { uint64_t test_count = 0; std::vector<double> mismatches; auto checker = [&](double d) { if (d != d) return; ++test_count; char sixdigitsbuf[kSixDigitsToBufferSize] = {0}; SixDigitsToBuffer(d, sixdigitsbuf); char snprintfbuf[kSixDigitsToBufferSize] = {0}; snprintf(snprintfbuf, kSixDigitsToBufferSize, "%g", d); if (strcmp(sixdigitsbuf, snprintfbuf) != 0) { mismatches.push_back(d); if (mismatches.size() < 10) { LOG(ERROR) << "Six-digit failure with double. d=" << d << " sixdigits=" << sixdigitsbuf << " printf(%g)=" << snprintfbuf; } } }; checker(5e-324); checker(1e-308); checker(1.0); checker(1.000005); checker(1.7976931348623157e308); checker(0.00390625); #ifndef _MSC_VER checker(0.001953125); #endif checker(0.005859375); checker(1.089095e-15); checker(3.274195e-55); checker(6.534355e-146); checker(2.920845e+234); if (mismatches.empty()) { test_count = 0; ExhaustiveFloat(kFloatNumCases, checker); test_count = 0; std::vector<int> digit_testcases{ 100000, 100001, 100002, 100005, 100010, 100020, 100050, 100100, 195312, 195313, 200000, 500000, 800000, 585937, 585938, 900000, 990000, 999000, 999900, 999990, 999996, 999997, 999998, 999999}; if (kFloatNumCases >= 1e9) { constexpr int min_mantissa = 100000, max_mantissa = 999999; digit_testcases.resize(max_mantissa - min_mantissa + 1); std::iota(digit_testcases.begin(), digit_testcases.end(), min_mantissa); } for (int exponent = -324; exponent <= 308; ++exponent) { double powten = absl::strings_internal::Pow10(exponent); if (powten == 0) powten = 5e-324; if (kFloatNumCases >= 1e9) { char buf[kSixDigitsToBufferSize]; LOG(INFO) << "Exp " << exponent << " powten=" << powten << "(" << powten << ") (" << absl::string_view(buf, SixDigitsToBuffer(powten, buf)) << ")"; } for (int digits : digit_testcases) { if (exponent == 308 && digits >= 179769) break; double digiform = (digits + 0.5) * 0.00001; double testval = digiform * powten; double pretestval = nextafter(testval, 0); double posttestval = nextafter(testval, 1.7976931348623157e308); checker(testval); checker(pretestval); checker(posttestval); } } } else { EXPECT_EQ(mismatches.size(), 0); for (size_t i = 0; i < mismatches.size(); ++i) { if (i > 100) i = mismatches.size() - 1; double d = mismatches[i]; char sixdigitsbuf[kSixDigitsToBufferSize] = {0}; SixDigitsToBuffer(d, sixdigitsbuf); char snprintfbuf[kSixDigitsToBufferSize] = {0}; snprintf(snprintfbuf, kSixDigitsToBufferSize, "%g", d); double before = nextafter(d, 0.0); double after = nextafter(d, 1.7976931348623157e308); char b1[32], b2[kSixDigitsToBufferSize]; LOG(ERROR) << "Mismatch #" << i << " d=" << d << " (" << ToNineDigits(d) << ") sixdigits='" << sixdigitsbuf << "' snprintf='" << snprintfbuf << "' Before.=" << PerfectDtoa(before) << " " << (SixDigitsToBuffer(before, b2), b2) << " vs snprintf=" << (snprintf(b1, sizeof(b1), "%g", before), b1) << " Perfect=" << PerfectDtoa(d) << " " << (SixDigitsToBuffer(d, b2), b2) << " vs snprintf=" << (snprintf(b1, sizeof(b1), "%g", d), b1) << " After.=." << PerfectDtoa(after) << " " << (SixDigitsToBuffer(after, b2), b2) << " vs snprintf=" << (snprintf(b1, sizeof(b1), "%g", after), b1); } } } TEST(StrToInt32, Partial) { struct Int32TestLine { std::string input; bool status; int32_t value; }; const int32_t int32_min = std::numeric_limits<int32_t>::min(); const int32_t int32_max = std::numeric_limits<int32_t>::max(); Int32TestLine int32_test_line[] = { {"", false, 0}, {" ", false, 0}, {"-", false, 0}, {"123@@@", false, 123}, {absl::StrCat(int32_min, int32_max), false, int32_min}, {absl::StrCat(int32_max, int32_max), false, int32_max}, }; for (const Int32TestLine& test_line : int32_test_line) { int32_t value = -2; bool status = safe_strto32_base(test_line.input, &value, 10); EXPECT_EQ(test_line.status, status) << test_line.input; EXPECT_EQ(test_line.value, value) << test_line.input; value = -2; status = safe_strto32_base(test_line.input, &value, 10); EXPECT_EQ(test_line.status, status) << test_line.input; EXPECT_EQ(test_line.value, value) << test_line.input; value = -2; status = safe_strto32_base(absl::string_view(test_line.input), &value, 10); EXPECT_EQ(test_line.status, status) << test_line.input; EXPECT_EQ(test_line.value, value) << test_line.input; } } TEST(StrToUint32, Partial) { struct Uint32TestLine { std::string input; bool status; uint32_t value; }; const uint32_t uint32_max = std::numeric_limits<uint32_t>::max(); Uint32TestLine uint32_test_line[] = { {"", false, 0}, {" ", false, 0}, {"-", false, 0}, {"123@@@", false, 123}, {absl::StrCat(uint32_max, uint32_max), false, uint32_max}, }; for (const Uint32TestLine& test_line : uint32_test_line) { uint32_t value = 2; bool status = safe_strtou32_base(test_line.input, &value, 10); EXPECT_EQ(test_line.status, status) << test_line.input; EXPECT_EQ(test_line.value, value) << test_line.input; value = 2; status = safe_strtou32_base(test_line.input, &value, 10); EXPECT_EQ(test_line.status, status) << test_line.input; EXPECT_EQ(test_line.value, value) << test_line.input; value = 2; status = safe_strtou32_base(absl::string_view(test_line.input), &value, 10); EXPECT_EQ(test_line.status, status) << test_line.input; EXPECT_EQ(test_line.value, value) << test_line.input; } } TEST(StrToInt64, Partial) { struct Int64TestLine { std::string input; bool status; int64_t value; }; const int64_t int64_min = std::numeric_limits<int64_t>::min(); const int64_t int64_max = std::numeric_limits<int64_t>::max(); Int64TestLine int64_test_line[] = { {"", false, 0}, {" ", false, 0}, {"-", false, 0}, {"123@@@", false, 123}, {absl::StrCat(int64_min, int64_max), false, int64_min}, {absl::StrCat(int64_max, int64_max), false, int64_max}, }; for (const Int64TestLine& test_line : int64_test_line) { int64_t value = -2; bool status = safe_strto64_base(test_line.input, &value, 10); EXPECT_EQ(test_line.status, status) << test_line.input; EXPECT_EQ(test_line.value, value) << test_line.input; value = -2; status = safe_strto64_base(test_line.input, &value, 10); EXPECT_EQ(test_line.status, status) << test_line.input; EXPECT_EQ(test_line.value, value) << test_line.input; value = -2; status = safe_strto64_base(absl::string_view(test_line.input), &value, 10); EXPECT_EQ(test_line.status, status) << test_line.input; EXPECT_EQ(test_line.value, value) << test_line.input; } } TEST(StrToUint64, Partial) { struct Uint64TestLine { std::string input; bool status; uint64_t value; }; const uint64_t uint64_max = std::numeric_limits<uint64_t>::max(); Uint64TestLine uint64_test_line[] = { {"", false, 0}, {" ", false, 0}, {"-", false, 0}, {"123@@@", false, 123}, {absl::StrCat(uint64_max, uint64_max), false, uint64_max}, }; for (const Uint64TestLine& test_line : uint64_test_line) { uint64_t value = 2; bool status = safe_strtou64_base(test_line.input, &value, 10); EXPECT_EQ(test_line.status, status) << test_line.input; EXPECT_EQ(test_line.value, value) << test_line.input; value = 2; status = safe_strtou64_base(test_line.input, &value, 10); EXPECT_EQ(test_line.status, status) << test_line.input; EXPECT_EQ(test_line.value, value) << test_line.input; value = 2; status = safe_strtou64_base(absl::string_view(test_line.input), &value, 10); EXPECT_EQ(test_line.status, status) << test_line.input; EXPECT_EQ(test_line.value, value) << test_line.input; } } TEST(StrToInt32Base, PrefixOnly) { struct Int32TestLine { std::string input; bool status; int32_t value; }; Int32TestLine int32_test_line[] = { { "", false, 0 }, { "-", false, 0 }, { "-0", true, 0 }, { "0", true, 0 }, { "0x", false, 0 }, { "-0x", false, 0 }, }; const int base_array[] = { 0, 2, 8, 10, 16 }; for (const Int32TestLine& line : int32_test_line) { for (const int base : base_array) { int32_t value = 2; bool status = safe_strto32_base(line.input.c_str(), &value, base); EXPECT_EQ(line.status, status) << line.input << " " << base; EXPECT_EQ(line.value, value) << line.input << " " << base; value = 2; status = safe_strto32_base(line.input, &value, base); EXPECT_EQ(line.status, status) << line.input << " " << base; EXPECT_EQ(line.value, value) << line.input << " " << base; value = 2; status = safe_strto32_base(absl::string_view(line.input), &value, base); EXPECT_EQ(line.status, status) << line.input << " " << base; EXPECT_EQ(line.value, value) << line.input << " " << base; } } } TEST(StrToUint32Base, PrefixOnly) { struct Uint32TestLine { std::string input; bool status; uint32_t value; }; Uint32TestLine uint32_test_line[] = { { "", false, 0 }, { "0", true, 0 }, { "0x", false, 0 }, }; const int base_array[] = { 0, 2, 8, 10, 16 }; for (const Uint32TestLine& line : uint32_test_line) { for (const int base : base_array) { uint32_t value = 2; bool status = safe_strtou32_base(line.input.c_str(), &value, base); EXPECT_EQ(line.status, status) << line.input << " " << base; EXPECT_EQ(line.value, value) << line.input << " " << base; value = 2; status = safe_strtou32_base(line.input, &value, base); EXPECT_EQ(line.status, status) << line.input << " " << base; EXPECT_EQ(line.value, value) << line.input << " " << base; value = 2; status = safe_strtou32_base(absl::string_view(line.input), &value, base); EXPECT_EQ(line.status, status) << line.input << " " << base; EXPECT_EQ(line.value, value) << line.input << " " << base; } } } TEST(StrToInt64Base, PrefixOnly) { struct Int64TestLine { std::string input; bool status; int64_t value; }; Int64TestLine int64_test_line[] = { { "", false, 0 }, { "-", false, 0 }, { "-0", true, 0 }, { "0", true, 0 }, { "0x", false, 0 }, { "-0x", false, 0 }, }; const int base_array[] = { 0, 2, 8, 10, 16 }; for (const Int64TestLine& line : int64_test_line) { for (const int base : base_array) { int64_t value = 2; bool status = safe_strto64_base(line.input.c_str(), &value, base); EXPECT_EQ(line.status, status) << line.input << " " << base; EXPECT_EQ(line.value, value) << line.input << " " << base; value = 2; status = safe_strto64_base(line.input, &value, base); EXPECT_EQ(line.status, status) << line.input << " " << base; EXPECT_EQ(line.value, value) << line.input << " " << base; value = 2; status = safe_strto64_base(absl::string_view(line.input), &value, base); EXPECT_EQ(line.status, status) << line.input << " " << base; EXPECT_EQ(line.value, value) << line.input << " " << base; } } } TEST(StrToUint64Base, PrefixOnly) { struct Uint64TestLine { std::string input; bool status; uint64_t value; }; Uint64TestLine uint64_test_line[] = { { "", false, 0 }, { "0", true, 0 }, { "0x", false, 0 }, }; const int base_array[] = { 0, 2, 8, 10, 16 }; for (const Uint64TestLine& line : uint64_test_line) { for (const int base : base_array) { uint64_t value = 2; bool status = safe_strtou64_base(line.input.c_str(), &value, base); EXPECT_EQ(line.status, status) << line.input << " " << base; EXPECT_EQ(line.value, value) << line.input << " " << base; value = 2; status = safe_strtou64_base(line.input, &value, base); EXPECT_EQ(line.status, status) << line.input << " " << base; EXPECT_EQ(line.value, value) << line.input << " " << base; value = 2; status = safe_strtou64_base(absl::string_view(line.input), &value, base); EXPECT_EQ(line.status, status) << line.input << " " << base; EXPECT_EQ(line.value, value) << line.input << " " << base; } } } void TestFastHexToBufferZeroPad16(uint64_t v) { char buf[16]; auto digits = absl::numbers_internal::FastHexToBufferZeroPad16(v, buf); absl::string_view res(buf, 16); char buf2[17]; snprintf(buf2, sizeof(buf2), "%016" PRIx64, v); EXPECT_EQ(res, buf2) << v; size_t expected_digits = snprintf(buf2, sizeof(buf2), "%" PRIx64, v); EXPECT_EQ(digits, expected_digits) << v; } TEST(FastHexToBufferZeroPad16, Smoke) { TestFastHexToBufferZeroPad16(std::numeric_limits<uint64_t>::min()); TestFastHexToBufferZeroPad16(std::numeric_limits<uint64_t>::max()); TestFastHexToBufferZeroPad16(std::numeric_limits<int64_t>::min()); TestFastHexToBufferZeroPad16(std::numeric_limits<int64_t>::max()); absl::BitGen rng; for (int i = 0; i < 100000; ++i) { TestFastHexToBufferZeroPad16( absl::LogUniform(rng, std::numeric_limits<uint64_t>::min(), std::numeric_limits<uint64_t>::max())); } } template <typename Int> void ExpectWritesNull() { { char buf[absl::numbers_internal::kFastToBufferSize]; Int x = std::numeric_limits<Int>::min(); EXPECT_THAT(absl::numbers_internal::FastIntToBuffer(x, buf), Pointee('\0')); } { char buf[absl::numbers_internal::kFastToBufferSize]; Int x = std::numeric_limits<Int>::max(); EXPECT_THAT(absl::numbers_internal::FastIntToBuffer(x, buf), Pointee('\0')); } } TEST(FastIntToBuffer, WritesNull) { ExpectWritesNull<int32_t>(); ExpectWritesNull<uint32_t>(); ExpectWritesNull<int64_t>(); ExpectWritesNull<uint32_t>(); } }
https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/strings/numbers.cc
https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/strings/numbers_test.cc
03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4
389afdcb-70bf-4bef-a959-45ba901616f2
cpp
abseil/abseil-cpp
blocking_counter
absl/synchronization/blocking_counter.cc
absl/synchronization/blocking_counter_test.cc
#include "absl/synchronization/blocking_counter.h" #include <atomic> #include "absl/base/internal/raw_logging.h" #include "absl/base/internal/tracing.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace { bool IsDone(void *arg) { return *reinterpret_cast<bool *>(arg); } } BlockingCounter::BlockingCounter(int initial_count) : count_(initial_count), num_waiting_(0), done_{initial_count == 0 ? true : false} { ABSL_RAW_CHECK(initial_count >= 0, "BlockingCounter initial_count negative"); } bool BlockingCounter::DecrementCount() { int count = count_.fetch_sub(1, std::memory_order_acq_rel) - 1; ABSL_RAW_CHECK(count >= 0, "BlockingCounter::DecrementCount() called too many times"); if (count == 0) { base_internal::TraceSignal(this, TraceObjectKind()); MutexLock l(&lock_); done_ = true; return true; } return false; } void BlockingCounter::Wait() { base_internal::TraceWait(this, TraceObjectKind()); { MutexLock l(&this->lock_); ABSL_RAW_CHECK(num_waiting_ == 0, "multiple threads called Wait()"); num_waiting_++; this->lock_.Await(Condition(IsDone, &this->done_)); } base_internal::TraceContinue(this, TraceObjectKind()); } ABSL_NAMESPACE_END }
#include "absl/synchronization/blocking_counter.h" #include <thread> #include <tuple> #include <vector> #include "gtest/gtest.h" #include "absl/base/attributes.h" #include "absl/base/config.h" #include "absl/base/internal/tracing.h" #include "absl/time/clock.h" #include "absl/time/time.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace { void PauseAndDecreaseCounter(BlockingCounter* counter, int* done) { absl::SleepFor(absl::Seconds(1)); *done = 1; counter->DecrementCount(); } TEST(BlockingCounterTest, BasicFunctionality) { const int num_workers = 10; BlockingCounter counter(num_workers); std::vector<std::thread> workers; std::vector<int> done(num_workers, 0); workers.reserve(num_workers); for (int k = 0; k < num_workers; k++) { workers.emplace_back( [&counter, &done, k] { PauseAndDecreaseCounter(&counter, &done[k]); }); } counter.Wait(); for (int k = 0; k < num_workers; k++) { EXPECT_EQ(1, done[k]); } for (std::thread& w : workers) { w.join(); } } TEST(BlockingCounterTest, WaitZeroInitialCount) { BlockingCounter counter(0); counter.Wait(); } #if GTEST_HAS_DEATH_TEST TEST(BlockingCounterTest, WaitNegativeInitialCount) { EXPECT_DEATH(BlockingCounter counter(-1), "BlockingCounter initial_count negative"); } #endif } #if ABSL_HAVE_ATTRIBUTE_WEAK namespace base_internal { namespace { using TraceRecord = std::tuple<const void*, ObjectKind>; thread_local TraceRecord tls_signal; thread_local TraceRecord tls_wait; thread_local TraceRecord tls_continue; } extern "C" { void ABSL_INTERNAL_C_SYMBOL(AbslInternalTraceWait)(const void* object, ObjectKind kind) { tls_wait = {object, kind}; } void ABSL_INTERNAL_C_SYMBOL(AbslInternalTraceContinue)(const void* object, ObjectKind kind) { tls_continue = {object, kind}; } void ABSL_INTERNAL_C_SYMBOL(AbslInternalTraceSignal)(const void* object, ObjectKind kind) { tls_signal = {object, kind}; } } TEST(BlockingCounterTest, TracesSignal) { BlockingCounter counter(2); tls_signal = {}; counter.DecrementCount(); EXPECT_EQ(tls_signal, TraceRecord(nullptr, ObjectKind::kUnknown)); tls_signal = {}; counter.DecrementCount(); EXPECT_EQ(tls_signal, TraceRecord(&counter, ObjectKind::kBlockingCounter)); } TEST(BlockingCounterTest, TracesWaitContinue) { BlockingCounter counter(1); counter.DecrementCount(); tls_wait = {}; tls_continue = {}; counter.Wait(); EXPECT_EQ(tls_wait, TraceRecord(&counter, ObjectKind::kBlockingCounter)); EXPECT_EQ(tls_continue, TraceRecord(&counter, ObjectKind::kBlockingCounter)); } } #endif ABSL_NAMESPACE_END }
https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/synchronization/blocking_counter.cc
https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/synchronization/blocking_counter_test.cc
03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4
efb8c578-1307-4e81-8fd7-3cc4c3c6661e
cpp
google/quiche
nghttp2_util
quiche/http2/adapter/nghttp2_util.cc
quiche/http2/adapter/nghttp2_util_test.cc
#include "quiche/http2/adapter/nghttp2_util.h" #include <cstdint> #include <cstring> #include <memory> #include <string> #include <utility> #include <vector> #include "absl/strings/str_join.h" #include "absl/strings/string_view.h" #include "absl/types/span.h" #include "quiche/http2/adapter/http2_protocol.h" #include "quiche/common/platform/api/quiche_logging.h" #include "quiche/common/quiche_endian.h" namespace http2 { namespace adapter { namespace { using InvalidFrameError = Http2VisitorInterface::InvalidFrameError; void DeleteCallbacks(nghttp2_session_callbacks* callbacks) { if (callbacks) { nghttp2_session_callbacks_del(callbacks); } } void DeleteSession(nghttp2_session* session) { if (session) { nghttp2_session_del(session); } } } nghttp2_session_callbacks_unique_ptr MakeCallbacksPtr( nghttp2_session_callbacks* callbacks) { return nghttp2_session_callbacks_unique_ptr(callbacks, &DeleteCallbacks); } nghttp2_session_unique_ptr MakeSessionPtr(nghttp2_session* session) { return nghttp2_session_unique_ptr(session, &DeleteSession); } uint8_t* ToUint8Ptr(char* str) { return reinterpret_cast<uint8_t*>(str); } uint8_t* ToUint8Ptr(const char* str) { return const_cast<uint8_t*>(reinterpret_cast<const uint8_t*>(str)); } absl::string_view ToStringView(nghttp2_rcbuf* rc_buffer) { nghttp2_vec buffer = nghttp2_rcbuf_get_buf(rc_buffer); return absl::string_view(reinterpret_cast<const char*>(buffer.base), buffer.len); } absl::string_view ToStringView(uint8_t* pointer, size_t length) { return absl::string_view(reinterpret_cast<const char*>(pointer), length); } absl::string_view ToStringView(const uint8_t* pointer, size_t length) { return absl::string_view(reinterpret_cast<const char*>(pointer), length); } std::vector<nghttp2_nv> GetNghttp2Nvs(absl::Span<const Header> headers) { const int num_headers = headers.size(); std::vector<nghttp2_nv> nghttp2_nvs; nghttp2_nvs.reserve(num_headers); for (int i = 0; i < num_headers; ++i) { nghttp2_nv header; uint8_t flags = NGHTTP2_NV_FLAG_NONE; const auto [name, no_copy_name] = GetStringView(headers[i].first); header.name = ToUint8Ptr(name.data()); header.namelen = name.size(); if (no_copy_name) { flags |= NGHTTP2_NV_FLAG_NO_COPY_NAME; } const auto [value, no_copy_value] = GetStringView(headers[i].second); header.value = ToUint8Ptr(value.data()); header.valuelen = value.size(); if (no_copy_value) { flags |= NGHTTP2_NV_FLAG_NO_COPY_VALUE; } header.flags = flags; nghttp2_nvs.push_back(std::move(header)); } return nghttp2_nvs; } std::vector<nghttp2_nv> GetResponseNghttp2Nvs( const quiche::HttpHeaderBlock& headers, absl::string_view response_code) { const int num_headers = headers.size(); std::vector<nghttp2_nv> nghttp2_nvs; nghttp2_nvs.reserve(num_headers + 1); nghttp2_nv status; status.name = ToUint8Ptr(kHttp2StatusPseudoHeader.data()); status.namelen = kHttp2StatusPseudoHeader.size(); status.value = ToUint8Ptr(response_code.data()); status.valuelen = response_code.size(); status.flags = NGHTTP2_FLAG_NONE; nghttp2_nvs.push_back(std::move(status)); for (const auto& header_pair : headers) { nghttp2_nv header; header.name = ToUint8Ptr(header_pair.first.data()); header.namelen = header_pair.first.size(); header.value = ToUint8Ptr(header_pair.second.data()); header.valuelen = header_pair.second.size(); header.flags = NGHTTP2_FLAG_NONE; nghttp2_nvs.push_back(std::move(header)); } return nghttp2_nvs; } Http2ErrorCode ToHttp2ErrorCode(uint32_t wire_error_code) { if (wire_error_code > static_cast<int>(Http2ErrorCode::MAX_ERROR_CODE)) { return Http2ErrorCode::INTERNAL_ERROR; } return static_cast<Http2ErrorCode>(wire_error_code); } int ToNgHttp2ErrorCode(InvalidFrameError error) { switch (error) { case InvalidFrameError::kProtocol: return NGHTTP2_ERR_PROTO; case InvalidFrameError::kRefusedStream: return NGHTTP2_ERR_REFUSED_STREAM; case InvalidFrameError::kHttpHeader: return NGHTTP2_ERR_HTTP_HEADER; case InvalidFrameError::kHttpMessaging: return NGHTTP2_ERR_HTTP_MESSAGING; case InvalidFrameError::kFlowControl: return NGHTTP2_ERR_FLOW_CONTROL; case InvalidFrameError::kStreamClosed: return NGHTTP2_ERR_STREAM_CLOSED; } return NGHTTP2_ERR_PROTO; } InvalidFrameError ToInvalidFrameError(int error) { switch (error) { case NGHTTP2_ERR_PROTO: return InvalidFrameError::kProtocol; case NGHTTP2_ERR_REFUSED_STREAM: return InvalidFrameError::kRefusedStream; case NGHTTP2_ERR_HTTP_HEADER: return InvalidFrameError::kHttpHeader; case NGHTTP2_ERR_HTTP_MESSAGING: return InvalidFrameError::kHttpMessaging; case NGHTTP2_ERR_FLOW_CONTROL: return InvalidFrameError::kFlowControl; case NGHTTP2_ERR_STREAM_CLOSED: return InvalidFrameError::kStreamClosed; } return InvalidFrameError::kProtocol; } class Nghttp2DataFrameSource : public DataFrameSource { public: Nghttp2DataFrameSource(nghttp2_data_provider provider, nghttp2_send_data_callback send_data, void* user_data) : provider_(std::move(provider)), send_data_(std::move(send_data)), user_data_(user_data) {} std::pair<int64_t, bool> SelectPayloadLength(size_t max_length) override { const int32_t stream_id = 0; uint32_t data_flags = 0; int64_t result = provider_.read_callback( nullptr , stream_id, nullptr , max_length, &data_flags, &provider_.source, nullptr ); if (result == NGHTTP2_ERR_DEFERRED) { return {kBlocked, false}; } else if (result < 0) { return {kError, false}; } else if ((data_flags & NGHTTP2_DATA_FLAG_NO_COPY) == 0) { QUICHE_LOG(ERROR) << "Source did not use the zero-copy API!"; return {kError, false}; } else { const bool eof = data_flags & NGHTTP2_DATA_FLAG_EOF; if (eof && (data_flags & NGHTTP2_DATA_FLAG_NO_END_STREAM) == 0) { send_fin_ = true; } return {result, eof}; } } bool Send(absl::string_view frame_header, size_t payload_length) override { nghttp2_frame frame; frame.hd.type = 0; frame.hd.length = payload_length; frame.hd.flags = 0; frame.hd.stream_id = 0; frame.data.padlen = 0; const int result = send_data_( nullptr , &frame, ToUint8Ptr(frame_header.data()), payload_length, &provider_.source, user_data_); QUICHE_LOG_IF(ERROR, result < 0 && result != NGHTTP2_ERR_WOULDBLOCK) << "Unexpected error code from send: " << result; return result == 0; } bool send_fin() const override { return send_fin_; } private: nghttp2_data_provider provider_; nghttp2_send_data_callback send_data_; void* user_data_; bool send_fin_ = false; }; std::unique_ptr<DataFrameSource> MakeZeroCopyDataFrameSource( nghttp2_data_provider provider, void* user_data, nghttp2_send_data_callback send_data) { return std::make_unique<Nghttp2DataFrameSource>( std::move(provider), std::move(send_data), user_data); } absl::string_view ErrorString(uint32_t error_code) { return Http2ErrorCodeToString(static_cast<Http2ErrorCode>(error_code)); } size_t PaddingLength(uint8_t flags, size_t padlen) { return (flags & PADDED_FLAG ? 1 : 0) + padlen; } struct NvFormatter { void operator()(std::string* out, const nghttp2_nv& nv) { absl::StrAppend(out, ToStringView(nv.name, nv.namelen), ": ", ToStringView(nv.value, nv.valuelen)); } }; std::string NvsAsString(nghttp2_nv* nva, size_t nvlen) { return absl::StrJoin(absl::MakeConstSpan(nva, nvlen), ", ", NvFormatter()); } #define HTTP2_FRAME_SEND_LOG QUICHE_VLOG(1) void LogBeforeSend(const nghttp2_frame& frame) { switch (static_cast<FrameType>(frame.hd.type)) { case FrameType::DATA: HTTP2_FRAME_SEND_LOG << "Sending DATA on stream " << frame.hd.stream_id << " with length " << frame.hd.length - PaddingLength(frame.hd.flags, frame.data.padlen) << " and padding " << PaddingLength(frame.hd.flags, frame.data.padlen); break; case FrameType::HEADERS: HTTP2_FRAME_SEND_LOG << "Sending HEADERS on stream " << frame.hd.stream_id << " with headers [" << NvsAsString(frame.headers.nva, frame.headers.nvlen) << "]"; break; case FrameType::PRIORITY: HTTP2_FRAME_SEND_LOG << "Sending PRIORITY"; break; case FrameType::RST_STREAM: HTTP2_FRAME_SEND_LOG << "Sending RST_STREAM on stream " << frame.hd.stream_id << " with error code " << ErrorString(frame.rst_stream.error_code); break; case FrameType::SETTINGS: HTTP2_FRAME_SEND_LOG << "Sending SETTINGS with " << frame.settings.niv << " entries, is_ack: " << (frame.hd.flags & ACK_FLAG); break; case FrameType::PUSH_PROMISE: HTTP2_FRAME_SEND_LOG << "Sending PUSH_PROMISE"; break; case FrameType::PING: { Http2PingId ping_id; std::memcpy(&ping_id, frame.ping.opaque_data, sizeof(Http2PingId)); HTTP2_FRAME_SEND_LOG << "Sending PING with unique_id " << quiche::QuicheEndian::NetToHost64(ping_id) << ", is_ack: " << (frame.hd.flags & ACK_FLAG); break; } case FrameType::GOAWAY: HTTP2_FRAME_SEND_LOG << "Sending GOAWAY with last_stream: " << frame.goaway.last_stream_id << " and error " << ErrorString(frame.goaway.error_code); break; case FrameType::WINDOW_UPDATE: HTTP2_FRAME_SEND_LOG << "Sending WINDOW_UPDATE on stream " << frame.hd.stream_id << " with update delta " << frame.window_update.window_size_increment; break; case FrameType::CONTINUATION: HTTP2_FRAME_SEND_LOG << "Sending CONTINUATION, which is unexpected"; break; } } #undef HTTP2_FRAME_SEND_LOG } }
#include "quiche/http2/adapter/nghttp2_util.h" #include <memory> #include <string> #include "quiche/http2/adapter/nghttp2_test_utils.h" #include "quiche/http2/adapter/test_utils.h" #include "quiche/common/platform/api/quiche_test.h" namespace http2 { namespace adapter { namespace test { namespace { int FakeSendCallback(nghttp2_session*, nghttp2_frame* , const uint8_t* framehd, size_t length, nghttp2_data_source* source, void* user_data) { auto* dest = static_cast<std::string*>(user_data); absl::StrAppend(dest, ToStringView(framehd, 9)); auto* test_source = static_cast<TestDataSource*>(source->ptr); absl::string_view payload = test_source->ReadNext(length); absl::StrAppend(dest, payload); return 0; } TEST(MakeZeroCopyDataFrameSource, EmptyPayload) { std::string result; const absl::string_view kEmptyBody = ""; TestDataSource body1{kEmptyBody}; nghttp2_data_provider provider = body1.MakeDataProvider(); std::unique_ptr<DataFrameSource> frame_source = MakeZeroCopyDataFrameSource(provider, &result, FakeSendCallback); auto [length, eof] = frame_source->SelectPayloadLength(100); EXPECT_EQ(length, 0); EXPECT_TRUE(eof); frame_source->Send("ninebytes", 0); EXPECT_EQ(result, "ninebytes"); } TEST(MakeZeroCopyDataFrameSource, ShortPayload) { std::string result; const absl::string_view kShortBody = "<html><head><title>Example Page!</title></head>" "<body><div><span><table><tr><th><blink>Wow!!" "</blink></th></tr></table></span></div></body>" "</html>"; TestDataSource body1{kShortBody}; nghttp2_data_provider provider = body1.MakeDataProvider(); std::unique_ptr<DataFrameSource> frame_source = MakeZeroCopyDataFrameSource(provider, &result, FakeSendCallback); auto [length, eof] = frame_source->SelectPayloadLength(200); EXPECT_EQ(length, kShortBody.size()); EXPECT_TRUE(eof); frame_source->Send("ninebytes", length); EXPECT_EQ(result, absl::StrCat("ninebytes", kShortBody)); } TEST(MakeZeroCopyDataFrameSource, MultiFramePayload) { std::string result; const absl::string_view kShortBody = "<html><head><title>Example Page!</title></head>" "<body><div><span><table><tr><th><blink>Wow!!" "</blink></th></tr></table></span></div></body>" "</html>"; TestDataSource body1{kShortBody}; nghttp2_data_provider provider = body1.MakeDataProvider(); std::unique_ptr<DataFrameSource> frame_source = MakeZeroCopyDataFrameSource(provider, &result, FakeSendCallback); auto ret = frame_source->SelectPayloadLength(50); EXPECT_EQ(ret.first, 50); EXPECT_FALSE(ret.second); frame_source->Send("ninebyte1", ret.first); ret = frame_source->SelectPayloadLength(50); EXPECT_EQ(ret.first, 50); EXPECT_FALSE(ret.second); frame_source->Send("ninebyte2", ret.first); ret = frame_source->SelectPayloadLength(50); EXPECT_EQ(ret.first, 44); EXPECT_TRUE(ret.second); frame_source->Send("ninebyte3", ret.first); EXPECT_EQ(result, "ninebyte1<html><head><title>Example Page!</title></head><bo" "ninebyte2dy><div><span><table><tr><th><blink>Wow!!</blink><" "ninebyte3/th></tr></table></span></div></body></html>"); } } } } }
https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/adapter/nghttp2_util.cc
https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/adapter/nghttp2_util_test.cc
6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6
5b273aa4-9a13-4db4-af7d-902975da9160
cpp
tensorflow/tensorflow
eager_service_impl
tensorflow/core/distributed_runtime/eager/eager_service_impl.cc
tensorflow/core/distributed_runtime/eager/eager_service_impl_test.cc
#include "tensorflow/core/distributed_runtime/eager/eager_service_impl.h" #include <functional> #include <memory> #include <optional> #include <string> #include <unordered_map> #include <utility> #include <vector> #include "absl/container/fixed_array.h" #include "absl/status/status.h" #include "absl/strings/str_cat.h" #include "absl/types/optional.h" #include "tensorflow/c/eager/immediate_execution_distributed_manager.h" #include "xla/tsl/distributed_runtime/preemption/preemption_notifier.h" #include "xla/tsl/protobuf/coordination_config.pb.h" #include "tensorflow/core/common_runtime/device_mgr.h" #include "tensorflow/core/common_runtime/eager/context.h" #include "tensorflow/core/common_runtime/eager/context_distributed_manager.h" #include "tensorflow/core/common_runtime/eager/eager_operation.h" #include "tensorflow/core/common_runtime/eager/execute.h" #include "tensorflow/core/distributed_runtime/eager/cluster_function_library_runtime.h" #include "tensorflow/core/distributed_runtime/eager/remote_mgr.h" #include "tensorflow/core/distributed_runtime/eager/remote_tensor_handle.h" #include "tensorflow/core/distributed_runtime/message_wrappers.h" #include "tensorflow/core/distributed_runtime/rpc_collective_executor_mgr.h" #include "tensorflow/core/distributed_runtime/session_mgr.h" #include "tensorflow/core/distributed_runtime/worker_cache.h" #include "tensorflow/core/distributed_runtime/worker_env.h" #include "tensorflow/core/framework/function.h" #include "tensorflow/core/framework/rendezvous.h" #include "tensorflow/core/nccl/collective_communicator.h" #include "tensorflow/core/platform/errors.h" #include "tensorflow/core/platform/host_info.h" #include "tensorflow/core/platform/mutex.h" #include "tensorflow/core/platform/refcount.h" #include "tensorflow/core/platform/status.h" #include "tensorflow/core/platform/stringprintf.h" #include "tensorflow/core/platform/types.h" #include "tensorflow/core/profiler/lib/traceme.h" namespace tensorflow { namespace eager { namespace { Status GetNumRetvals(FunctionLibraryDefinition* func_lib_def, const string& op_name, const google::protobuf::Map<string, tensorflow::AttrValue>& attrs, int* num_retvals) { const tensorflow::OpRegistrationData* op_reg_data = nullptr; auto status = tensorflow::OpRegistry::Global()->LookUp(op_name, &op_reg_data); if (absl::IsNotFound(status)) { status = func_lib_def->LookUp(op_name, &op_reg_data); } TF_RETURN_IF_ERROR(status); const tensorflow::OpDef& op_def = op_reg_data->op_def; for (const auto& output_arg : op_def.output_arg()) { if (!output_arg.number_attr().empty()) { auto iter = attrs.find(output_arg.number_attr()); if (iter == attrs.end()) { return errors::InvalidArgument("Unable to find number_attr ", output_arg.number_attr(), " for Op: ", op_name); } *num_retvals += iter->second.i(); } else if (!output_arg.type_list_attr().empty()) { auto iter = attrs.find(output_arg.type_list_attr()); if (iter == attrs.end()) { return errors::InvalidArgument("Unable to find type_list_attr ", output_arg.type_list_attr(), " for Op: ", op_name); } *num_retvals += iter->second.list().type_size(); } else { *num_retvals += 1; } } return absl::OkStatus(); } Status GetEagerOperationAndNumRetvals(const Operation& operation, EagerContext* eager_context, EagerExecutor* eager_executor, EagerOperation* eager_op, int* num_retvals) { const char* name = operation.name().c_str(); std::optional<tensorflow::EagerFunctionParams> remote_func_params = std::nullopt; FunctionLibraryDefinition* func_lib_def; if (operation.is_function()) { if (operation.is_component_function()) { func_lib_def = eager_context->GetComponentFunctionFunctionLibraryDefinition( operation.name()); if (func_lib_def == nullptr) { return absl::InternalError( absl::StrCat("Could not find function library for registered " "component function: ", operation.name())); } remote_func_params = {operation.id(), true, operation.func_step_id(), func_lib_def}; } else { func_lib_def = eager_context->FuncLibDef(); remote_func_params = {operation.id(), false, std::nullopt, nullptr}; } } else { func_lib_def = eager_context->FuncLibDef(); } TF_RETURN_IF_ERROR(eager_op->Reset(name, operation.device().c_str(), false, eager_executor, remote_func_params)); { tsl::profiler::TraceMe activity("EagerService:RemoteTensorHandleInternal", tsl::profiler::TraceMeLevel::kVerbose); for (const auto& input : operation.op_inputs()) { tensorflow::TensorHandle* handle; if (input.has_remote_handle()) { TF_RETURN_IF_ERROR( eager_context->RemoteMgr()->DeserializeRemoteTensorHandle( input.remote_handle(), &handle)); TF_RETURN_IF_ERROR(eager_op->AddInput(handle)); } else { Tensor tensor; if (!ParseTensorProtoToTensor(input.tensor(), &tensor)) { return errors::InvalidArgument("Invalid TensorProto: ", input.tensor().DebugString()); } else { handle = TensorHandle::CreateLocalHandle(std::move(tensor), nullptr, nullptr, eager_context); TF_RETURN_IF_ERROR(eager_op->AddInput(handle)); } } handle->Unref(); } } for (const auto& attr : operation.attrs()) { eager_op->MutableAttrs()->Set(attr.first, attr.second); } return GetNumRetvals(func_lib_def, operation.name(), operation.attrs(), num_retvals); } Status TensorHandleProto(TensorHandle* handle, TensorProto* proto) { const tensorflow::Tensor* t = nullptr; TF_RETURN_IF_ERROR(handle->Tensor(&t)); t->AsProtoTensorContent(proto); return absl::OkStatus(); } Status TensorHandleShape(TensorHandle* handle, TensorShapeProto* proto) { const tensorflow::Tensor* t = nullptr; if (handle->Type() == TensorHandle::LOCAL) { TF_RETURN_IF_ERROR(handle->Tensor(&t)); t->shape().AsProto(proto); } else { TensorShape shape; TF_RETURN_IF_ERROR(handle->Shape(&shape)); shape.AsProto(proto); } return absl::OkStatus(); } Status AddOpRetvalsToResponse( EagerContext* eager_context, int op_id, int num_retvals, const std::vector<int32>& output_nums, TensorHandle** retvals, std::function<TensorProto*()> add_tensor_proto_fn, std::function<TensorShapeProto*()> add_shape_proto_fn, std::function<string*()> add_device_fn = nullptr) { StatusGroup sg; if (op_id == kInvalidOpId) { for (int i = 0; i < num_retvals; i++) { sg.Update(TensorHandleProto(retvals[i], add_tensor_proto_fn())); retvals[i]->Unref(); } } else { for (int i = 0; i < num_retvals; i++) { sg.Update(TensorHandleShape(retvals[i], add_shape_proto_fn())); if (add_device_fn) { Device* device = retvals[i]->device(); *add_device_fn() = device ? device->name() : ""; } if (retvals[i]->Type() == TensorHandle::REMOTE) { retvals[i]->Unref(); } else { const int output_num = output_nums.empty() ? i : output_nums.at(i); eager_context->RemoteMgr()->AddOperationOutput(retvals[i], op_id, output_num); } } } return sg.as_summary_status(); } Status ResetAgentAndConnectToCoordinationService( tsl::CoordinationServiceAgent* coord_agent) { if (coord_agent->IsError()) { const Status s = coord_agent->Reset(); if (!s.ok()) { LOG(ERROR) << "Coordination Service agent reset failed " << s; return s; } } if (!coord_agent->IsConnected()) { const Status s = coord_agent->Connect(); if (!s.ok()) { LOG(ERROR) << "Coordination Service agent connect failed " << s; return s; } } return absl::OkStatus(); } } Status EagerServiceImpl::CreateContext(const CreateContextRequest* request, CreateContextResponse* response) { bool update_collective_executor_mgr = false; { mutex_lock l(contexts_mu_); if (contexts_.empty()) { update_collective_executor_mgr = true; } else { auto context_it = contexts_.find(request->context_id()); if (context_it != contexts_.end()) { if (request->context_view_id() < context_it->second->Context()->GetContextViewId()) { return errors::InvalidArgument("EagerService:CreateContext failed. ", "Context id: <", request->context_id(), "> already exists."); } else { context_it->second->Unref(); contexts_.erase(context_it); } } } } if (env_ == nullptr || env_->rendezvous_mgr == nullptr) { return tensorflow::errors::Internal( "invalid eager env_ or env_->rendezvous_mgr."); } if (request->clear_existing_contexts()) { for (auto* device : env_->device_mgr->ListDevices()) { device->ClearResourceMgr(); } env_->rendezvous_mgr->CleanupAll(); env_->collective_executor_mgr->CleanupAll(); TF_RETURN_IF_ERROR(env_->session_mgr->DeleteAllSessions()); std::unordered_map<uint64, ServerContext*> tmp_contexts; { mutex_lock l(contexts_mu_); if (!contexts_.empty()) { std::swap(tmp_contexts, contexts_); } } for (auto& context : tmp_contexts) { context.second->Unref(); } } tsl::core::RefCountPtr<RemoteRendezvous> r = env_->rendezvous_mgr->Find(request->context_id()); auto session_name = tensorflow::strings::StrCat("eager_", request->context_id()); if (VLOG_IS_ON(2)) { VLOG(2) << "Creating context on /job:" << request->server_def().job_name() << "/task:" << request->server_def().task_index(); for (const auto& da : request->cluster_device_attributes()) { VLOG(2) << " " << da.name(); } } TF_RETURN_IF_ERROR(env_->session_mgr->CreateSession( session_name, request->server_def(), request->cluster_device_attributes(), request->server_def().default_session_config().isolate_session_state())); int64_t context_id = request->context_id(); std::function<void()> session_destroyer = [this, context_id, session_name]() { env_->rendezvous_mgr->Cleanup(context_id); auto s = env_->session_mgr->DeleteSession(session_name); if (!s.ok()) { LOG(WARNING) << "Failed to destroy worker session '" << session_name << "' due to " << s.message(); } }; std::shared_ptr<WorkerSession> worker_session; TF_RETURN_IF_ERROR(env_->session_mgr->WorkerSessionForSession( session_name, &worker_session)); tensorflow::DeviceMgr* device_mgr = worker_session->device_mgr(); TF_RETURN_IF_ERROR(r->Initialize(worker_session.get())); r->SetRemoteEagerContextDefault(); std::function<tsl::core::RefCountPtr<Rendezvous>(const int64_t)> rendezvous_creator = [worker_session, this](const int64_t step_id) { tsl::core::RefCountPtr<RemoteRendezvous> r = env_->rendezvous_mgr->Find(step_id); r->Initialize(worker_session.get()).IgnoreError(); return r; }; LOG(INFO) << "Creating " << (request->async() ? "async" : "sync") << " eager service context with rendezvous_id on host " << port::Hostname() << " " << worker_session->worker_name(); SessionOptions opts; opts.config = request->server_def().default_session_config(); LOG(INFO) << "SessionOptions: " << opts.config.DebugString(); if (update_collective_executor_mgr) { env_->collective_executor_mgr = CreateProdRpcCollectiveExecutorMgr( opts.config, device_mgr, MaybeCreateNcclCommunicator(opts.config), worker_session->worker_cache(), worker_session->worker_name()); } tensorflow::EagerContext* ctx = new tensorflow::EagerContext( opts, tensorflow::ContextDevicePlacementPolicy::DEVICE_PLACEMENT_SILENT, request->async(), device_mgr, false, std::move(r), worker_session->cluster_flr(), env_->collective_executor_mgr.get()); core::ScopedUnref unref_ctx(ctx); std::vector<string> remote_workers; worker_session->worker_cache()->ListWorkers(&remote_workers); remote_workers.erase(std::remove(remote_workers.begin(), remote_workers.end(), worker_session->worker_name()), remote_workers.end()); std::unique_ptr<tensorflow::eager::EagerClientCache> remote_eager_workers; TF_RETURN_IF_ERROR(worker_session->worker_cache()->GetEagerClientCache( &remote_eager_workers)); DistributedFunctionLibraryRuntime* cluster_flr = eager::CreateClusterFLR(request->context_id(), ctx, worker_session.get()); auto remote_mgr = std::make_unique<tensorflow::eager::RemoteMgr>(false, ctx); Status s = ctx->InitializeRemoteWorker( std::move(remote_eager_workers), worker_session->remote_device_mgr(), remote_workers, request->context_id(), request->context_view_id(), std::move(rendezvous_creator), cluster_flr, std::move(remote_mgr), std::move(session_destroyer)); if (!s.ok()) { VLOG(1) << "EagerContext::InitializeRemoteWorker failed with " << s.ToString(); return s; } #if !defined(IS_MOBILE_PLATFORM) const auto& config = request->server_def().default_session_config(); const bool enable_coordination = !config.experimental().coordination_config().service_type().empty(); if (enable_coordination) { auto dist_mgr = std::make_unique<EagerContextDistributedManager>(ctx); auto coord_agent = env_->session_mgr->GetCoordinationServiceAgent(); dist_mgr->SetCoordinationServiceAgent(coord_agent); if (config.experimental().coordination_config().enable_health_check()) { TF_RETURN_IF_ERROR( ResetAgentAndConnectToCoordinationService(coord_agent)); } auto preemption_notifier = tsl::PreemptionNotifier::CreatePreemptionNotifier("sigterm", Env::Default()); preemption_notifier->WillBePreemptedAtAsync( [coord_agent](absl::StatusOr<absl::Time> time_or_status) { if (time_or_status.ok()) { const auto coord_task = coord_agent->GetOwnTask().value(); Status s = coord_agent->InsertKeyValue( "TF_DEFAULT_PREEMPTION_NOTICE_KEY", absl::StrCat("/job:", coord_task.job_name(), "/task:", coord_task.task_id())); if (!s.ok()) { VLOG(3) << "Preemption not exported to coordination service: " << s; } } }); dist_mgr->SetPreemptionNotifier(std::move(preemption_notifier)); ctx->SetDistributedManager(std::move(dist_mgr)); } #endif std::vector<DeviceAttributes> device_attributes; device_mgr->ListDeviceAttributes(&device_attributes); for (const auto& da : device_attributes) { *response->add_device_attributes() = da; } { mutex_lock l(contexts_mu_); auto context_it = contexts_.find(request->context_id()); if (context_it != contexts_.end()) { return errors::InvalidArgument("EagerService:CreateContext failed. ", "Context id: <", request->context_id(), "> already exists."); } contexts_.emplace(request->context_id(), new ServerContext(ctx, request->keep_alive_secs(), env_)); } return absl::OkStatus(); } Status EagerServiceImpl::UpdateContext(const UpdateContextRequest* request, UpdateContextResponse* response) { if (env_ == nullptr || env_->rendezvous_mgr == nullptr) { return tensorflow::errors::Internal( "invalid eager env_ or env_->rendezvous_mgr."); } ServerContext* server_context = nullptr; TF_RETURN_IF_ERROR(GetServerContext(request->context_id(), &server_context)); core::ScopedUnref context_unref(server_context); tensorflow::EagerContext* ctx = server_context->Context(); if (request->context_view_id() != ctx->GetContextViewId() + 1) { return errors::InvalidArgument( "EagerService:UpdateContext failed. Context id: <", request->context_id(), "> currently at view #", ctx->GetContextViewId(), " but received update request at view #", request->context_view_id(), ". View id should only be continuously incremented."); } if (request->cluster_device_attributes_size() == 0) { ctx->IncrementContextViewId(); VLOG(1) << "Processing simplified UpdateContextRequest on " << ctx->HostCPU()->name(); return absl::OkStatus(); } auto session_name = tensorflow::strings::StrCat("eager_", request->context_id()); TF_RETURN_IF_ERROR( env_->session_mgr->UpdateSession(session_name, request->server_def(), request->cluster_device_attributes())); std::shared_ptr<WorkerSession> worker_session; TF_RETURN_IF_ERROR(env_->session_mgr->WorkerSessionForSession( session_name, &worker_session)); const tensorflow::DeviceMgr* device_mgr = worker_session->device_mgr(); std::vector<string> remote_workers; worker_session->worker_cache()->ListWorkers(&remote_workers); remote_workers.erase(std::remove(remote_workers.begin(), remote_workers.end(), worker_session->worker_name()), remote_workers.end()); VLOG(1) << "On existing server " << worker_session->worker_name() << " updating remote workers"; if (VLOG_IS_ON(2)) { for (const string& rw : remote_workers) { VLOG(2) << "Remote worker " << rw; } } std::unique_ptr<tensorflow::eager::EagerClientCache> remote_eager_workers; TF_RETURN_IF_ERROR(worker_session->worker_cache()->GetEagerClientCache( &remote_eager_workers)); ctx->ClearCachesAndThreadExecutors(); Status s = ctx->UpdateRemoteWorker(std::move(remote_eager_workers), remote_workers, request->context_id()); if (!s.ok()) { VLOG(1) << "EagerContext::UpdateRemoteWorker failed with " << s.ToString(); return s; } #if !defined(IS_MOBILE_PLATFORM) const auto& config = request->server_def().default_session_config(); const bool should_connect = !config.experimental().coordination_config().service_type().empty() && config.experimental().coordination_config().enable_health_check(); if (should_connect) { auto coord_agent = env_->session_mgr->GetCoordinationServiceAgent(); TF_RETURN_IF_ERROR(ResetAgentAndConnectToCoordinationService(coord_agent)); } #endif std::vector<DeviceAttributes> device_attributes; device_mgr->ListDeviceAttributes(&device_attributes); for (const auto& da : device_attributes) { *response->add_device_attributes() = da; } return absl::OkStatus(); } Status EagerServiceImpl::CreateMasterContext( const tensorflow::uint64 context_id, EagerContext* context) { { mutex_lock l(contexts_mu_); auto iter = contexts_.find(context_id); if (iter != contexts_.end()) { return errors::InvalidArgument( "EagerService:CreateMasterContext failed. ", "Context id: <", context_id, "> already exists."); } } ServerContext* server_context = ServerContext::CreateMasterContext(context, env_); mutex_lock l(contexts_mu_); contexts_.emplace(context_id, server_context); return absl::OkStatus(); } void EagerServiceImpl::RunComponentFunction( CallOptions* call_opts, const RunComponentFunctionRequest* request, RunComponentFunctionResponse* response, StatusCallback done) { ServerContext* context = nullptr; Status s = GetServerContext(request->context_id(), &context); if (!s.ok()) { done(s); return; } core::ScopedUnref context_unref(context); auto& operation = request->operation(); if (!operation.is_function() || !operation.is_component_function()) { done(errors::Internal( "RunComponentFunction request can only be used to execute " "component functions.")); return; } EagerContext* eager_context = context->Context(); EagerExecutor* eager_executor = &eager_context->Executor(); EagerOperation* op = new EagerOperation(eager_context); int* num_retvals = new int(0); s = GetEagerOperationAndNumRetvals(operation, eager_context, eager_executor, op, num_retvals); if (!s.ok()) { delete num_retvals; delete op; done(s); return; } if (!op->IsLocal()) { delete num_retvals; delete op; done(errors::Internal( "Received RunComponentFunction request with remote function device. ")); return; } s = op->SetAttrBool("is_component_function", true); if (!s.ok()) { delete num_retvals; delete op; done(errors::Internal("Error setting is_component_function attribute: ", s.message())); return; } auto* retvals = new absl::FixedArray<TensorHandle*>(*num_retvals); VLOG(3) << "ServerContext: Calling EagerLocalExecuteAsync for op " << operation.id(); std::vector<int32> output_nums; for (const int32_t output_num : request->output_num()) { output_nums.push_back(output_num); } auto cm = std::make_shared<CancellationManager>(); op->SetCancellationManager(cm.get()); call_opts->SetCancelCallback([cm] { cm->StartCancel(); }); context->Ref(); EagerLocalExecuteAsync( op, retvals->data(), num_retvals, [op, op_id = operation.id(), num_retvals, retvals, output_nums, cm, call_opts, response, eager_context, context, done = std::move(done)](const Status& status) { call_opts->ClearCancelCallback(); auto wrapped_done = [&](const Status& status) { context->Unref(); done(status); delete op; delete num_retvals; delete retvals; }; if (!status.ok()) { wrapped_done(status); return; } wrapped_done(AddOpRetvalsToResponse( eager_context, op_id, *num_retvals, output_nums, retvals->data(), [response] { return response->add_tensor(); }, [response] { return response->add_shape(); })); }); } Status EagerServiceImpl::ExecuteOp(CallOptions* call_opts, const Operation& operation, EagerContext* eager_context, EagerExecutor* eager_executor, QueueResponse* queue_response) { tensorflow::EagerOperation op(eager_context); int num_retvals = 0; TF_RETURN_IF_ERROR(GetEagerOperationAndNumRetvals( operation, eager_context, eager_executor, &op, &num_retvals)); auto cm = std::make_shared<CancellationManager>(); if (call_opts) { op.SetCancellationManager(cm.get()); call_opts->SetCancelCallback([cm] { cm->StartCancel(); }); } absl::FixedArray<tensorflow::TensorHandle*> retvals(num_retvals); VLOG(3) << "ServerContext: Calling EagerExecute for op " << operation.id(); TF_RETURN_IF_ERROR(op.Execute( absl::MakeSpan( reinterpret_cast<tensorflow::AbstractTensorHandle**>(retvals.data()), num_retvals), &num_retvals)); std::function<string*()> add_device_fn = nullptr; if (op.is_function()) { add_device_fn = [queue_response] { return queue_response->add_device(); }; } return AddOpRetvalsToResponse( eager_context, operation.id(), num_retvals, {}, retvals.data(), [queue_response] { return queue_response->add_tensor(); }, [queue_response] { return queue_response->add_shape(); }, std::move(add_device_fn)); } Status EagerServiceImpl::Enqueue(CallOptions* call_opts, const EnqueueRequest* request, EnqueueResponse* response, uint64 stream_id) { tsl::profiler::TraceMe activity( [&] { return absl::StrCat( "EagerService:Enqueue#debug_str=", request->DebugString(), "#"); }, tsl::profiler::TraceMeLevel::kInfo); ServerContext* context = nullptr; TF_RETURN_IF_ERROR(GetServerContext(request->context_id(), &context)); core::ScopedUnref context_unref(context); EagerExecutor& executor = stream_id == kInvalidStreamId ? context->Context()->Executor() : context->Context()->RemoteMgr()->GetOrCreateExecutorForStream( stream_id); Status s; for (const auto& item : request->queue()) { auto* queue_response = response->add_queue_response(); if (item.has_operation()) { s = ExecuteOp(call_opts, item.operation(), context->Context(), &executor, queue_response); } else if (item.has_handle_to_decref()) { auto handle_to_decref = std::make_unique<RemoteTensorHandleInternal>( item.handle_to_decref()); auto node = std::make_unique<ClientTensorHandleDeleteNode>( context, std::move(handle_to_decref)); s = context->Context()->Executor().AddOrExecute(std::move(node)); } else if (item.has_send_tensor()) { s = SendTensor(item.send_tensor(), context->Context()); } else if (item.has_send_packed_handle()) { s = SendPackedHandle(item.send_packed_handle(), context->Context()); } else if (item.has_register_function()) { s = RegisterFunction(item.register_function(), context->Context()); } else if (item.has_remove_function()) { s = RemoveFunction(item.remove_function(), context->Context()); } else if (item.has_cleanup_function()) { s = CleanupFunction(item.cleanup_function()); } else { DCHECK(item.has_sync_remote_executor_for_stream()); s = executor.WaitForAllPendingNodes(); } if (!s.ok()) { if (stream_id != kInvalidStreamId) { context->Context()->RemoteMgr()->DeleteExecutorForStream(stream_id); } return s; } } return absl::OkStatus(); } Status EagerServiceImpl::WaitQueueDone(const WaitQueueDoneRequest* request, WaitQueueDoneResponse* response) { ServerContext* context = nullptr; TF_RETURN_IF_ERROR(GetServerContext(request->context_id(), &context)); core::ScopedUnref context_unref(context); if (request->op_id_size() > 0) { return errors::Unimplemented( "EagerServiceImpl::WaitQueueDone is not " "implemented for particular op IDs."); } return context->Context()->Executor().WaitForAllPendingNodes(); } Status EagerServiceImpl::KeepAlive(const KeepAliveRequest* request, KeepAliveResponse* response) { ServerContext* context = nullptr; TF_RETURN_IF_ERROR(GetServerContext(request->context_id(), &context)); core::ScopedUnref context_unref(context); tensorflow::EagerContext* ctx = context->Context(); response->set_context_view_id(ctx->GetContextViewId()); return absl::OkStatus(); } Status EagerServiceImpl::CloseContext(const CloseContextRequest* request, CloseContextResponse* response) { ServerContext* context = nullptr; if (!GetServerContext(request->context_id(), &context).ok()) { return absl::OkStatus(); } core::ScopedUnref context_unref(context); if (request->context_view_id() < context->Context()->GetContextViewId()) { LOG(INFO) << "Ignoring CloseContext request with a stale context_view_id " << request->context_view_id() << " for context_id " << request->context_id() << ". The current context_view_id is " << context->Context()->GetContextViewId() << "."; return absl::OkStatus(); } mutex_lock l(contexts_mu_); contexts_.erase(request->context_id()); context->Unref(); return absl::OkStatus(); } Status EagerServiceImpl::RegisterFunction( const RegisterFunctionOp& register_function, EagerContext* eager_context) { if (register_function.is_component_function()) { return eager_context->AddComponentFunction(register_function.function_def(), register_function.library()); } else { return eager_context->AddFunctionDef(register_function.function_def(), register_function.library(), false); } } Status EagerServiceImpl::RemoveFunction(const RemoveFunctionOp& remove_function, EagerContext* eager_context) { return eager_context->RemoveFunction(remove_function.function_name()); } Status EagerServiceImpl::CleanupFunction( const CleanupFunctionOp& cleanup_function) { env_->rendezvous_mgr->Cleanup(cleanup_function.step_id()); return absl::OkStatus(); } Status EagerServiceImpl::SendTensor(const SendTensorOp& send_tensor, EagerContext* eager_context) { absl::InlinedVector<tensorflow::TensorHandle*, 2UL> tensors; for (const auto& tensor_proto : send_tensor.tensors()) { Tensor tensor; if (!tensor.FromProto(tensor_proto)) { return errors::InvalidArgument("Unable to parse tensor proto"); } TensorHandle* tensor_handle = TensorHandle::CreateLocalHandle( std::move(tensor), nullptr, nullptr, eager_context); TensorHandle* copied_handle = nullptr; Device* device; TF_RETURN_IF_ERROR(eager_context->FindDeviceFromName( send_tensor.device_name().c_str(), &device)); TF_RETURN_IF_ERROR(EagerCopyToDevice(tensor_handle, eager_context, &eager_context->Executor(), device, false, &copied_handle)); tensors.push_back(copied_handle); tensor_handle->Unref(); } eager_context->RemoteMgr()->AddOperationOutputs(tensors, send_tensor.op_id()); return absl::OkStatus(); } Status EagerServiceImpl::SendPackedHandle( const SendPackedHandleOp& send_packed_handle, EagerContext* eager_context) { if (send_packed_handle.handles().empty()) { return errors::InvalidArgument("Handles should not be empty."); } std::vector<tensorflow::TensorHandle*> handles; handles.resize(send_packed_handle.handles_size()); for (int i = 0; i < send_packed_handle.handles_size(); ++i) { const auto& item = send_packed_handle.handles(i); if (item.has_local_handle()) { Tensor tensor; if (!ParseTensorProtoToTensor(item.local_handle().tensor(), &tensor)) { return errors::InvalidArgument( "Invalid TensorProto: ", item.local_handle().tensor().DebugString()); } Device* op_device = nullptr; TF_RETURN_IF_ERROR(eager_context->FindDeviceFromName( item.local_handle().device().c_str(), &op_device)); handles[i] = TensorHandle::CreateLocalHandle( std::move(tensor), nullptr, op_device, eager_context); } else { TF_RETURN_IF_ERROR( eager_context->RemoteMgr()->DeserializeRemoteTensorHandle( item.remote_handle(), &handles[i])); } } tensorflow::TensorHandle* packed_handle = nullptr; std::vector<tensorflow::TensorHandle*> handles_to_pack = handles; TF_RETURN_IF_ERROR(TensorHandle::CreatePackedHandle( std::move(handles_to_pack), handles.at(0)->dtype, TensorShape(), send_packed_handle.device_name(), eager_context, &packed_handle)); for (auto* h : handles) { h->Unref(); } eager_context->RemoteMgr()->AddOperationOutputs({packed_handle}, send_packed_handle.op_id()); return absl::OkStatus(); } tensorflow::Status EagerServiceImpl::GetServerContext( uint64 context_id, ServerContext** server_context) { tf_shared_lock l(contexts_mu_); auto iter = contexts_.find(context_id); if (iter == contexts_.end()) { *server_context = nullptr; return errors::Aborted(strings::Printf( "Unable to find a context_id matching the specified one " "(%llu). Perhaps the worker was restarted, or the context was GC'd?", static_cast<unsigned long long>(context_id))); } *server_context = iter->second; (*server_context)->Ref(); (*server_context)->RecordAccess(); return absl::OkStatus(); } } }
#include "tensorflow/core/distributed_runtime/eager/eager_service_impl.h" #include <cstdint> #include <functional> #include <memory> #include <optional> #include <unordered_map> #include <utility> #include <variant> #include <vector> #include "absl/status/status.h" #include "absl/types/optional.h" #include "absl/types/variant.h" #include "tensorflow/c/tf_tensor.h" #include "tensorflow/c/tf_tensor_internal.h" #include "tensorflow/core/common_runtime/eager/kernel_and_device.h" #include "tensorflow/core/common_runtime/eager/tensor_handle.h" #include "tensorflow/core/distributed_runtime/eager/cluster_function_library_runtime.h" #include "tensorflow/core/distributed_runtime/eager/remote_mgr.h" #include "tensorflow/core/distributed_runtime/rpc/rpc_rendezvous_mgr.h" #include "tensorflow/core/distributed_runtime/session_mgr.h" #include "tensorflow/core/distributed_runtime/test_utils.h" #include "tensorflow/core/distributed_runtime/worker_env.h" #include "tensorflow/core/framework/attr_value.pb.h" #include "tensorflow/core/framework/op.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/errors.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/protobuf/eager_service.pb.h" #include "tensorflow/core/protobuf/error_codes.pb.h" #include "tensorflow/core/protobuf/remote_tensor_handle.pb.h" #include "tensorflow/core/protobuf/tensorflow_server.pb.h" namespace tensorflow { namespace eager { namespace { class TestEagerServiceImpl : public EagerServiceImpl { public: explicit TestEagerServiceImpl(WorkerEnv* env) : EagerServiceImpl(env) {} Status GetEagerContext(const uint64 context_id, EagerContext** ctx) { ServerContext* context = nullptr; TF_RETURN_IF_ERROR(GetServerContext(context_id, &context)); core::ScopedUnref context_unref(context); *ctx = context->Context(); return absl::OkStatus(); } Status GetTensorHandle(const uint64 context_id, const RemoteTensorHandleInternal& remote_handle, tensorflow::TensorHandle** handle) { ServerContext* context = nullptr; TF_RETURN_IF_ERROR(GetServerContext(context_id, &context)); core::ScopedUnref context_unref(context); return context->Context()->RemoteMgr()->GetTensorHandle(remote_handle, handle); } }; class FakeEagerClient : public EagerClient { public: FakeEagerClient() {} ~FakeEagerClient() override {} void SetServiceImpl(TestEagerServiceImpl* impl) { impl_ = impl; } #define CLIENT_METHOD(method) \ void method##Async(const method##Request* request, \ method##Response* response, StatusCallback done) \ override { \ done(impl_->method(request, response)); \ } CLIENT_METHOD(CreateContext); CLIENT_METHOD(UpdateContext); CLIENT_METHOD(WaitQueueDone); CLIENT_METHOD(KeepAlive); CLIENT_METHOD(CloseContext); #undef CLIENT_METHOD #define CLIENT_METHOD_WITH_TIMEOUT_AND_RETRIES(method) \ void method##Async(const method##Request* request, \ method##Response* response, StatusCallback done, \ int64_t init_timeout_in_ms, int retries) override { \ done(impl_->method(request, response)); \ } CLIENT_METHOD_WITH_TIMEOUT_AND_RETRIES(CreateContext); #undef CLIENT_METHOD_WITH_TIMEOUT_AND_RETRIES void EnqueueAsync(CallOptions* call_opts, const EnqueueRequest* request, EnqueueResponse* response, StatusCallback done) override { done(impl_->Enqueue(call_opts, request, response)); } void RunComponentFunctionAsync(CallOptions* call_opts, const RunComponentFunctionRequest* request, RunComponentFunctionResponse* response, StatusCallback done) override { impl_->RunComponentFunction(call_opts, request, response, std::move(done)); } void StreamingEnqueueAsync(bool enable_streaming_enqueue, CallOptions* call_opts, const EnqueueRequest* request, EnqueueResponse* response, StatusCallback done) override { done(impl_->Enqueue(nullptr, request, response)); } bool allow_multiple_pending_requests() const override { return false; } private: TestEagerServiceImpl* impl_; }; class DummyEagerClientCache : public EagerClientCache { public: DummyEagerClientCache() : client_(new FakeEagerClient) {} Status GetClient(const string& target, core::RefCountPtr<EagerClient>* client) override { client->reset(client_.get()); client_->Ref(); return absl::OkStatus(); } private: core::RefCountPtr<EagerClient> client_; }; class FakeCache : public TestWorkerCache { Status GetEagerClientCache( std::unique_ptr<eager::EagerClientCache>* eager_client_cache) override { *eager_client_cache = std::make_unique<DummyEagerClientCache>(); return absl::OkStatus(); } void ListWorkers(std::vector<string>* workers) const override { workers->push_back("/job:localhost/replica:0/task:0"); } }; class EagerServiceImplTest : public ::testing::Test { public: EagerServiceImplTest() : rendezvous_mgr_(&worker_env_), session_mgr_(new SessionMgr( &worker_env_, "/job:localhost/replica:0/task:0/device:CPU:0", std::unique_ptr<WorkerCacheInterface>(new FakeCache), [](const ServerDef& server_def, WorkerCacheInterface** worker_cache) { *worker_cache = new FakeCache; return absl::OkStatus(); }, nullptr)) { worker_env_.env = Env::Default(); worker_env_.rendezvous_mgr = &rendezvous_mgr_; worker_env_.session_mgr = session_mgr_.get(); device_mgr_ = std::make_unique<StaticDeviceMgr>( DeviceFactory::NewDevice("CPU", {}, "/job:localhost/replica:0/task:0")); worker_env_.device_mgr = device_mgr_.get(); } protected: WorkerEnv worker_env_; tensorflow::RpcRendezvousMgr rendezvous_mgr_; std::unique_ptr<SessionMgr> session_mgr_; std::unique_ptr<DynamicDeviceMgr> device_mgr_; }; void SetTensorProto(TensorProto* tensor_proto) { int64_t dims[] = {2, 2}; float data[] = {1.0f, 2.0f, 3.0f, 4.0f}; TF_Tensor* t = TF_AllocateTensor( TF_FLOAT, &dims[0], sizeof(dims) / sizeof(int64_t), sizeof(data)); memcpy(TF_TensorData(t), &data[0], TF_TensorByteSize(t)); tensorflow::Tensor tensor; TF_ASSERT_OK(tensorflow::TF_TensorToTensor(t, &tensor)); tensor.AsProtoTensorContent(tensor_proto); TF_DeleteTensor(t); } void BuildOperation( Operation* operation, int64_t id, const string& name, const std::vector<std::variant<TensorProto, std::pair<int64_t, int32>>>& inputs, const std::unordered_map<string, AttrValue>& attrs, const string& device) { operation->set_id(id); operation->set_name(name); operation->set_device(device); for (const auto& input : inputs) { if (input.index() == 0) { *operation->add_op_inputs()->mutable_tensor() = std::get<TensorProto>(input); } else { const auto& tensor_handle_pair = std::get<std::pair<int64_t, int32>>(input); auto* input = operation->add_op_inputs()->mutable_remote_handle(); input->set_op_id(tensor_handle_pair.first); input->set_output_num(tensor_handle_pair.second); input->set_op_device(device); input->set_device(device); } } for (const auto& attr_entry : attrs) { (*operation->mutable_attrs())[attr_entry.first] = attr_entry.second; } } void AddOperationToEnqueueRequest( int64_t id, const string& name, const std::vector<std::variant<TensorProto, std::pair<int64_t, int32>>>& inputs, const std::unordered_map<string, AttrValue>& attrs, const string& device, EnqueueRequest* request) { auto* operation = request->add_queue()->mutable_operation(); BuildOperation(operation, id, name, inputs, attrs, device); } void AddOperationToRunComponentFunctionRequest( int64_t id, const string& name, const std::vector<std::variant<TensorProto, std::pair<int64_t, int32>>>& inputs, const std::unordered_map<string, AttrValue>& attrs, const string& device, const int output_num, RunComponentFunctionRequest* request) { auto* operation = request->mutable_operation(); operation->set_is_function(true); operation->set_is_component_function(true); request->add_output_num(output_num); BuildOperation(operation, id, name, inputs, attrs, device); } tensorflow::NodeDef MatMulFunctionNodeDef() { tensorflow::NodeDef def; CHECK(tensorflow::protobuf::TextFormat::ParseFromString( " name: 'matmul_func'" " op: 'MatMulFunction'" " input: 'a'" " input: 'a'" " attr {" " key: 'T'" " value {" " type: DT_FLOAT" " }" " }", &def)); return def; } tensorflow::FunctionDef MatMulFunction() { tensorflow::FunctionDef def; CHECK(tensorflow::protobuf::TextFormat::ParseFromString( " signature {" " name: 'MatMulFunction'" " input_arg {" " name: 'a'" " type: DT_FLOAT" " }" " output_arg {" " name: 'm'" " type: DT_FLOAT" " }" " }" " node_def {" " name: 'matmul'" " op: 'MatMul'" " input: 'a'" " input: 'a'" " attr {" " key: 'T'" " value {" " type: DT_FLOAT" " }" " }" " attr {" " key: 'transpose_a'" " value {" " b: false" " }" " }" " }" " ret {" " key: 'm'" " value: 'matmul:product'" " }", &def)); return def; } tensorflow::FunctionDef MatMulTransposeFunction() { tensorflow::FunctionDef def; CHECK(tensorflow::protobuf::TextFormat::ParseFromString( " signature {" " name: 'MatMulFunction'" " input_arg {" " name: 'a'" " type: DT_FLOAT" " }" " output_arg {" " name: 'm'" " type: DT_FLOAT" " }" " }" " node_def {" " name: 'matmul'" " op: 'MatMul'" " input: 'a'" " input: 'a'" " attr {" " key: 'T'" " value {" " type: DT_FLOAT" " }" " }" " attr {" " key: 'transpose_a'" " value {" " b: true" " }" " }" " }" " ret {" " key: 'm'" " value: 'matmul:product'" " }", &def)); return def; } tensorflow::FunctionDef MatMulNestedFunction() { tensorflow::FunctionDef def; CHECK(tensorflow::protobuf::TextFormat::ParseFromString( " signature {" " name: 'MatMulNestedFunction'" " input_arg {" " name: 'a'" " type: DT_FLOAT" " }" " output_arg {" " name: 'matmul_nested'" " type: DT_FLOAT" " }" " }" " node_def {" " name: 'matmul_nested'" " op: 'MatMulFunction'" " input: 'a'" " attr {" " key: 'T'" " value {" " type: DT_FLOAT" " }" " }" " }" " ret {" " key: 'matmul_nested'" " value: 'matmul_nested:m:0'" " }", &def)); return def; } tensorflow::FunctionDef SingleRecvNodeFunction() { tensorflow::FunctionDef def; CHECK(tensorflow::protobuf::TextFormat::ParseFromString( " signature {" " name: 'SingleRecvNodeFunction'" " input_arg {" " name: 'a'" " type: DT_FLOAT" " }" " output_arg {" " name: 'recv_tensor'" " type: DT_FLOAT" " }" " }" " node_def {" " name: 'recv_node'" " op: '_Recv'" " device: '/job:localhost/replica:0/task:0/device:CPU:0'" " attr {" " key: 'client_terminated'" " value {" " b: true" " }" " }" " attr {" " key: 'recv_device'" " value {" " s: '/job:localhost/replica:0/task:0/device:CPU:0'" " }" " }" " attr {" " key: 'send_device'" " value {" " s: '/job:localhost/replica:0/task:0/device:CPU:0'" " }" " }" " attr {" " key: 'send_device_incarnation'" " value {" " i: 1" " }" " }" " attr {" " key: 'tensor_name'" " value {" " s: 't0'" " }" " }" " attr {" " key: 'tensor_type'" " value {" " type: DT_FLOAT" " }" " }" " }" " ret {" " key: 'recv_tensor'" " value: 'recv_node:tensor:0'" " }", &def)); return def; } TEST_F(EagerServiceImplTest, BasicTest) { TestEagerServiceImpl eager_service_impl(&worker_env_); uint64 context_id = random::New64(); CreateContextRequest request; request.mutable_server_def()->set_job_name("localhost"); request.mutable_server_def()->set_task_index(0); request.set_context_id(context_id); CreateContextResponse response; TF_ASSERT_OK(eager_service_impl.CreateContext(&request, &response)); EnqueueRequest remote_enqueue_request; remote_enqueue_request.set_context_id(context_id); EnqueueResponse remote_enqueue_response; std::unordered_map<string, AttrValue> const_attrs; AttrValue val; val.set_type(tensorflow::DataType::DT_FLOAT); const_attrs.insert({"dtype", val}); val.Clear(); SetTensorProto(val.mutable_tensor()); const_attrs.insert({"value", val}); AddOperationToEnqueueRequest(1, "Const", {}, const_attrs, "/job:localhost/replica:0/task:0/device:CPU:0", &remote_enqueue_request); std::unordered_map<string, AttrValue> attrs; val.Clear(); val.set_type(tensorflow::DataType::DT_FLOAT); attrs.insert({"T", val}); val.Clear(); val.set_b(false); attrs.insert({"transpose_a", val}); attrs.insert({"transpose_b", val}); AddOperationToEnqueueRequest( 2, "MatMul", {std::make_pair(1, 0), std::make_pair(1, 0)}, attrs, "/job:localhost/replica:0/task:0/device:CPU:0", &remote_enqueue_request); TF_ASSERT_OK(eager_service_impl.Enqueue(nullptr, &remote_enqueue_request, &remote_enqueue_response)); auto& matmul_result_shape = remote_enqueue_response.queue_response(1).shape(0); EXPECT_EQ(matmul_result_shape.dim(0).size(), 2); EXPECT_EQ(matmul_result_shape.dim(1).size(), 2); tensorflow::TensorHandle* tensor_handle; TF_ASSERT_OK(eager_service_impl.GetTensorHandle( context_id, RemoteTensorHandleInternal(2, 0), &tensor_handle)); const tensorflow::Tensor* t = nullptr; TF_ASSERT_OK(tensor_handle->Tensor(&t)); auto actual = t->flat<float>(); EXPECT_EQ(4, actual.size()); EXPECT_EQ(7, actual(0)); EXPECT_EQ(10, actual(1)); EXPECT_EQ(15, actual(2)); EXPECT_EQ(22, actual(3)); CloseContextRequest close_context_request; close_context_request.set_context_id(context_id); close_context_request.set_context_view_id(0); CloseContextResponse close_context_response; TF_ASSERT_OK(eager_service_impl.CloseContext(&close_context_request, &close_context_response)); } class EagerServiceImplFunctionTest : public EagerServiceImplTest { public: EagerServiceImplFunctionTest() : EagerServiceImplTest() {} void TestFunction(const RegisterFunctionOp& register_op, const string& function_name, const bool local_inputs = false, const bool test_cancel = false) { TestEagerServiceImpl eager_service_impl(&worker_env_); uint64 context_id = random::New64(); CreateContextRequest request; request.mutable_server_def()->set_job_name("localhost"); request.mutable_server_def()->set_task_index(0); request.set_context_id(context_id); CreateContextResponse response; TF_ASSERT_OK(eager_service_impl.CreateContext(&request, &response)); EnqueueRequest enqueue_request; enqueue_request.set_context_id(context_id); *enqueue_request.add_queue()->mutable_register_function() = register_op; EnqueueResponse enqueue_response; TF_ASSERT_OK(eager_service_impl.Enqueue(nullptr, &enqueue_request, &enqueue_response)); EnqueueRequest remote_enqueue_request; remote_enqueue_request.set_context_id(context_id); EnqueueResponse remote_enqueue_response; if (local_inputs) { TensorProto tensor_proto; SetTensorProto(&tensor_proto); AddOperationToEnqueueRequest( 2, function_name, {tensor_proto}, std::unordered_map<string, AttrValue>(), "/job:localhost/replica:0/task:0/device:CPU:0", &remote_enqueue_request); } else { std::unordered_map<string, AttrValue> const_attrs; AttrValue val; val.set_type(tensorflow::DataType::DT_FLOAT); const_attrs.insert({"dtype", val}); val.Clear(); SetTensorProto(val.mutable_tensor()); const_attrs.insert({"value", val}); AddOperationToEnqueueRequest( 1, "Const", {}, const_attrs, "/job:localhost/replica:0/task:0/device:CPU:0", &remote_enqueue_request); AddOperationToEnqueueRequest( 2, function_name, {std::make_pair(1, 0)}, std::unordered_map<string, AttrValue>(), "/job:localhost/replica:0/task:0/device:CPU:0", &remote_enqueue_request); } CallOptions call_opts; Status status; Notification n; Env::Default()->SchedClosure([&] { status = eager_service_impl.Enqueue(&call_opts, &remote_enqueue_request, &remote_enqueue_response); n.Notify(); }); if (test_cancel) { Env::Default()->SleepForMicroseconds(500000); call_opts.StartCancel(); n.WaitForNotification(); EXPECT_TRUE(absl::IsCancelled(status)) << status.message(); } else { n.WaitForNotification(); TF_ASSERT_OK(status); const tensorflow::Tensor* t = nullptr; tensorflow::TensorHandle* tensor_handle; TF_ASSERT_OK(eager_service_impl.GetTensorHandle( context_id, RemoteTensorHandleInternal(2, 0), &tensor_handle)); TF_ASSERT_OK(tensor_handle->Tensor(&t)); auto actual = t->flat<float>(); EXPECT_EQ(4, actual.size()); EXPECT_EQ(7, actual(0)); EXPECT_EQ(10, actual(1)); EXPECT_EQ(15, actual(2)); EXPECT_EQ(22, actual(3)); } CloseContextRequest close_context_request; close_context_request.set_context_id(context_id); close_context_request.set_context_view_id(0); CloseContextResponse close_context_response; TF_ASSERT_OK(eager_service_impl.CloseContext(&close_context_request, &close_context_response)); } void TestComponentFunction(const RegisterFunctionOp& register_op, const string& function_name, const bool test_cancel) { TestEagerServiceImpl eager_service_impl(&worker_env_); uint64 context_id = random::New64(); CreateContextRequest request; request.mutable_server_def()->set_job_name("localhost"); request.mutable_server_def()->set_task_index(0); request.set_context_id(context_id); CreateContextResponse response; TF_ASSERT_OK(eager_service_impl.CreateContext(&request, &response)); EnqueueRequest enqueue_request; enqueue_request.set_context_id(context_id); *enqueue_request.add_queue()->mutable_register_function() = register_op; EnqueueResponse enqueue_response; TF_ASSERT_OK(eager_service_impl.Enqueue(nullptr, &enqueue_request, &enqueue_response)); EnqueueRequest remote_enqueue_request; remote_enqueue_request.set_context_id(context_id); EnqueueResponse remote_enqueue_response; std::unordered_map<string, AttrValue> const_attrs; AttrValue val; val.set_type(tensorflow::DataType::DT_FLOAT); const_attrs.insert({"dtype", val}); val.Clear(); SetTensorProto(val.mutable_tensor()); const_attrs.insert({"value", val}); AddOperationToEnqueueRequest(1, "Const", {}, const_attrs, "/job:localhost/replica:0/task:0/device:CPU:0", &remote_enqueue_request); TF_ASSERT_OK(eager_service_impl.Enqueue(nullptr, &remote_enqueue_request, &remote_enqueue_response)); RunComponentFunctionRequest run_comp_func_request; run_comp_func_request.set_context_id(context_id); RunComponentFunctionResponse run_comp_func_response; const int output_num = 5; AddOperationToRunComponentFunctionRequest( 2, function_name, {std::make_pair(1, 0)}, std::unordered_map<string, AttrValue>(), "/job:localhost/replica:0/task:0/device:CPU:0", output_num, &run_comp_func_request); CallOptions call_opts; Notification n; Status status; eager_service_impl.RunComponentFunction(&call_opts, &run_comp_func_request, &run_comp_func_response, [&status, &n](const Status& s) { status.Update(s); n.Notify(); }); if (test_cancel) { call_opts.StartCancel(); } n.WaitForNotification(); if (test_cancel) { EXPECT_TRUE(absl::IsCancelled(status)) << status.message(); } else { TF_ASSERT_OK(status); const tensorflow::Tensor* t = nullptr; tensorflow::TensorHandle* tensor_handle; TF_ASSERT_OK(eager_service_impl.GetTensorHandle( context_id, RemoteTensorHandleInternal(2, output_num), &tensor_handle)); TF_ASSERT_OK(tensor_handle->Tensor(&t)); auto actual = t->flat<float>(); EXPECT_EQ(4, actual.size()); EXPECT_EQ(7, actual(0)); EXPECT_EQ(10, actual(1)); EXPECT_EQ(15, actual(2)); EXPECT_EQ(22, actual(3)); } CloseContextRequest close_context_request; close_context_request.set_context_id(context_id); close_context_request.set_context_view_id(0); CloseContextResponse close_context_response; TF_ASSERT_OK(eager_service_impl.CloseContext(&close_context_request, &close_context_response)); } }; TEST_F(EagerServiceImplFunctionTest, BasicFunctionTest) { RegisterFunctionOp register_op; *register_op.mutable_function_def() = MatMulFunction(); TestFunction(register_op, "MatMulFunction"); } TEST_F(EagerServiceImplFunctionTest, FunctionWithLocalInputsTest) { RegisterFunctionOp register_op; *register_op.mutable_function_def() = MatMulFunction(); TestFunction(register_op, "MatMulFunction", true); } TEST_F(EagerServiceImplFunctionTest, NestedFunctionTest) { RegisterFunctionOp register_op; *register_op.mutable_function_def() = MatMulNestedFunction(); *register_op.mutable_library()->add_function() = MatMulFunction(); TestFunction(register_op, "MatMulNestedFunction"); } TEST_F(EagerServiceImplFunctionTest, FunctionCancellationTest) { RegisterFunctionOp register_op; *register_op.mutable_function_def() = SingleRecvNodeFunction(); TestFunction(register_op, "SingleRecvNodeFunction", false, true); } TEST_F(EagerServiceImplFunctionTest, ComponentFunctionTest) { RegisterFunctionOp register_op; *register_op.mutable_function_def() = MatMulFunction(); register_op.set_is_component_function(true); TestComponentFunction(register_op, "MatMulFunction", false); } TEST_F(EagerServiceImplFunctionTest, ComponentFunctionCancellationTest) { RegisterFunctionOp register_op; *register_op.mutable_function_def() = SingleRecvNodeFunction(); register_op.set_is_component_function(true); TestComponentFunction(register_op, "SingleRecvNodeFunction", true); } TEST_F(EagerServiceImplFunctionTest, ComponentNestedFunctionTest) { RegisterFunctionOp register_op; *register_op.mutable_function_def() = MatMulNestedFunction(); *register_op.mutable_library()->add_function() = MatMulFunction(); register_op.set_is_component_function(true); TestComponentFunction(register_op, "MatMulNestedFunction", false); } TEST_F(EagerServiceImplFunctionTest, ComponentNestedFunctionWithNameClashTest) { TestEagerServiceImpl eager_service_impl(&worker_env_); uint64 context_id = random::New64(); CreateContextRequest request; request.mutable_server_def()->set_job_name("localhost"); request.mutable_server_def()->set_task_index(0); request.set_context_id(context_id); CreateContextResponse response; TF_ASSERT_OK(eager_service_impl.CreateContext(&request, &response)); { EnqueueRequest enqueue_request; enqueue_request.set_context_id(context_id); RegisterFunctionOp* register_op = enqueue_request.add_queue()->mutable_register_function(); *register_op->mutable_function_def() = MatMulNestedFunction(); *register_op->mutable_library()->add_function() = MatMulFunction(); register_op->set_is_component_function(true); EnqueueResponse enqueue_response; TF_ASSERT_OK(eager_service_impl.Enqueue(nullptr, &enqueue_request, &enqueue_response)); } { EnqueueRequest enqueue_request; enqueue_request.set_context_id(context_id); RegisterFunctionOp* register_op = enqueue_request.add_queue()->mutable_register_function(); *register_op->mutable_function_def() = MatMulNestedFunction(); register_op->mutable_function_def()->mutable_signature()->set_name( "MatMulNestedTransposeFunction"); *register_op->mutable_library()->add_function() = MatMulTransposeFunction(); register_op->set_is_component_function(true); EnqueueResponse enqueue_response; TF_ASSERT_OK(eager_service_impl.Enqueue(nullptr, &enqueue_request, &enqueue_response)); } EnqueueRequest remote_enqueue_request; remote_enqueue_request.set_context_id(context_id); EnqueueResponse remote_enqueue_response; std::unordered_map<string, AttrValue> const_attrs; AttrValue val; val.set_type(tensorflow::DataType::DT_FLOAT); const_attrs.insert({"dtype", val}); val.Clear(); SetTensorProto(val.mutable_tensor()); const_attrs.insert({"value", val}); AddOperationToEnqueueRequest(1, "Const", {}, const_attrs, "/job:localhost/replica:0/task:0/device:CPU:0", &remote_enqueue_request); TF_ASSERT_OK(eager_service_impl.Enqueue(nullptr, &remote_enqueue_request, &remote_enqueue_response)); { RunComponentFunctionRequest run_comp_func_request; run_comp_func_request.set_context_id(context_id); RunComponentFunctionResponse run_comp_func_response; const int output_num = 5; AddOperationToRunComponentFunctionRequest( 2, "MatMulNestedFunction", {std::make_pair(1, 0)}, std::unordered_map<string, AttrValue>(), "/job:localhost/replica:0/task:0/device:CPU:0", output_num, &run_comp_func_request); CallOptions call_opts; Notification n; Status status; eager_service_impl.RunComponentFunction(&call_opts, &run_comp_func_request, &run_comp_func_response, [&status, &n](const Status& s) { status.Update(s); n.Notify(); }); n.WaitForNotification(); TF_ASSERT_OK(status); const tensorflow::Tensor* t = nullptr; tensorflow::TensorHandle* tensor_handle; TF_ASSERT_OK(eager_service_impl.GetTensorHandle( context_id, RemoteTensorHandleInternal(2, output_num), &tensor_handle)); TF_ASSERT_OK(tensor_handle->Tensor(&t)); auto actual = t->flat<float>(); EXPECT_EQ(4, actual.size()); EXPECT_EQ(7, actual(0)); EXPECT_EQ(10, actual(1)); EXPECT_EQ(15, actual(2)); EXPECT_EQ(22, actual(3)); } { RunComponentFunctionRequest run_comp_func_request; run_comp_func_request.set_context_id(context_id); RunComponentFunctionResponse run_comp_func_response; const int output_num = 5; AddOperationToRunComponentFunctionRequest( 3, "MatMulNestedTransposeFunction", {std::make_pair(1, 0)}, std::unordered_map<string, AttrValue>(), "/job:localhost/replica:0/task:0/device:CPU:0", output_num, &run_comp_func_request); CallOptions call_opts; Notification n; Status status; eager_service_impl.RunComponentFunction(&call_opts, &run_comp_func_request, &run_comp_func_response, [&status, &n](const Status& s) { status.Update(s); n.Notify(); }); n.WaitForNotification(); TF_ASSERT_OK(status); const tensorflow::Tensor* t = nullptr; tensorflow::TensorHandle* tensor_handle; TF_ASSERT_OK(eager_service_impl.GetTensorHandle( context_id, RemoteTensorHandleInternal(3, output_num), &tensor_handle)); TF_ASSERT_OK(tensor_handle->Tensor(&t)); auto actual = t->flat<float>(); EXPECT_EQ(4, actual.size()); EXPECT_EQ(10, actual(0)); EXPECT_EQ(14, actual(1)); EXPECT_EQ(14, actual(2)); EXPECT_EQ(20, actual(3)); } CloseContextRequest close_context_request; close_context_request.set_context_id(context_id); close_context_request.set_context_view_id(0); CloseContextResponse close_context_response; TF_ASSERT_OK(eager_service_impl.CloseContext(&close_context_request, &close_context_response)); } class FunctionWithRemoteInputsTest : public EagerServiceImplTest { public: FunctionWithRemoteInputsTest() : EagerServiceImplTest(), eager_service_impl_(&worker_env_) { remote_device_mgr_ = std::make_unique<StaticDeviceMgr>( DeviceFactory::NewDevice("CPU", {}, "/job:localhost/replica:0/task:1")); context_id_ = random::New64(); } class TestExecuteNodeArgs : public EagerKernelArgs { public: TestExecuteNodeArgs( absl::InlinedVector<TensorValue, 4UL>&& tensor_args, std::function<Status(const int, eager::RemoteTensorHandle*)> serialize_remote_handle) : EagerKernelArgs(std::move(tensor_args)), serialize_remote_handle_(std::move(serialize_remote_handle)) {} bool HasRemoteOrPackedInputs() const override { return true; } Status GetRemoteArg(const FunctionArgIndex& index, eager::RemoteTensorHandle* val) const override { return serialize_remote_handle_(index.index, val); } private: std::function<Status(const int, eager::RemoteTensorHandle*)> serialize_remote_handle_; }; bool MatMulHasAttrWithDefaultValue(const tensorflow::FunctionDef& fdef) { for (const auto& node : fdef.node_def()) { if (node.op() == "MatMul") { return node.attr().find("transpose_a") != node.attr().end(); } } return false; } void Init() { CreateContextRequest request; request.mutable_server_def()->set_job_name("localhost"); request.mutable_server_def()->set_task_index(0); request.set_context_id(context_id_); CreateContextResponse response; TF_ASSERT_OK(eager_service_impl_.CreateContext(&request, &response)); EagerContext* ctx = nullptr; TF_ASSERT_OK(eager_service_impl_.GetEagerContext(context_id_, &ctx)); Device* device; TF_ASSERT_OK(ctx->FindDeviceFromName(local_device_.c_str(), &device)); core::RefCountPtr<EagerClient> client; TF_ASSERT_OK(ctx->GetClient(device, &client)); FakeEagerClient* fake_client = static_cast<FakeEagerClient*>(client.get()); fake_client->SetServiceImpl(&eager_service_impl_); EnqueueRequest remote_enqueue_request; remote_enqueue_request.set_context_id(context_id_); EnqueueResponse remote_enqueue_response; std::unordered_map<string, AttrValue> const_attrs; AttrValue val; val.set_type(tensorflow::DataType::DT_FLOAT); const_attrs.insert({"dtype", val}); val.Clear(); SetTensorProto(val.mutable_tensor()); const_attrs.insert({"value", val}); AddOperationToEnqueueRequest(1, "Const", {}, const_attrs, local_device_, &remote_enqueue_request); TF_EXPECT_OK(eager_service_impl_.Enqueue(nullptr, &remote_enqueue_request, &remote_enqueue_response)); eager_cluster_flr_ = std::make_unique<EagerClusterFunctionLibraryRuntime>( context_id_, ctx, device_mgr_.get()); fdef_ = MatMulFunction(); TF_ASSERT_OK(func_lib_def_.AddFunctionDef(fdef_)); eager_pflr_ = std::make_unique<ProcessFunctionLibraryRuntime>( remote_device_mgr_.get(), Env::Default(), nullptr, TF_GRAPH_DEF_VERSION, &func_lib_def_, OptimizerOptions(), nullptr, eager_cluster_flr_.get(), nullptr, Rendezvous::Factory{[this](const int64_t step_id, const DeviceMgr* device_mgr, tsl::core::RefCountPtr<Rendezvous>* r) { *r = tsl::core::RefCountPtr<Rendezvous>( worker_env_.rendezvous_mgr->Find(step_id).release()); return absl::OkStatus(); }}); } void CheckOutputTensorAndClose(const Tensor& tensor) { auto actual = tensor.flat<float>(); EXPECT_EQ(4, actual.size()); EXPECT_EQ(7, actual(0)); EXPECT_EQ(10, actual(1)); EXPECT_EQ(15, actual(2)); EXPECT_EQ(22, actual(3)); CloseContextRequest close_context_request; close_context_request.set_context_id(context_id_); close_context_request.set_context_view_id(0); CloseContextResponse close_context_response; TF_ASSERT_OK(eager_service_impl_.CloseContext(&close_context_request, &close_context_response)); } void CheckOutputsAndClose(const std::vector<FunctionRet>& outputs, const int64_t op_id) { const tensorflow::Tensor* t = nullptr; tensorflow::TensorHandle* tensor_handle; TF_ASSERT_OK(eager_service_impl_.GetTensorHandle( context_id_, RemoteTensorHandleInternal(2, 0), &tensor_handle)); TF_ASSERT_OK(tensor_handle->Tensor(&t)); EXPECT_EQ(outputs.size(), 1); EXPECT_EQ(outputs.at(0).index(), 1); const TensorShape& shape = std::get<TensorShape>(outputs.at(0)); EXPECT_EQ(shape, t->shape()); CheckOutputTensorAndClose(*t); } protected: const string local_device_ = "/job:localhost/replica:0/task:0/device:CPU:0"; const string remote_device_ = "/job:localhost/replica:0/task:1/device:CPU:0"; TestEagerServiceImpl eager_service_impl_; std::unique_ptr<DeviceMgr> remote_device_mgr_; uint64 context_id_; tensorflow::FunctionDef fdef_; std::unique_ptr<ProcessFunctionLibraryRuntime> eager_pflr_; std::unique_ptr<EagerClusterFunctionLibraryRuntime> eager_cluster_flr_; FunctionLibraryDefinition func_lib_def_{OpRegistry::Global(), FunctionDefLibrary()}; }; TEST_F(FunctionWithRemoteInputsTest, EagerPFLRTest) { Init(); FunctionLibraryRuntime::InstantiateOptions options; options.target = remote_device_; options.is_multi_device_function = true; options.input_devices.push_back(local_device_); FunctionLibraryRuntime::Handle handle; EXPECT_TRUE(MatMulHasAttrWithDefaultValue(fdef_)); TF_ASSERT_OK(eager_pflr_->Instantiate( fdef_.signature().name(), AttrSlice(&fdef_.attr()), options, &handle)); EagerContext* ctx = nullptr; TF_ASSERT_OK(eager_service_impl_.GetEagerContext(context_id_, &ctx)); for (const string& func_name : ctx->FuncLibDef()->ListFunctionNames()) { const FunctionDef* fdef = ctx->FuncLibDef()->Find(func_name); EXPECT_TRUE(fdef != nullptr); if (absl::StartsWith(func_name, "MatMulFunction")) { EXPECT_FALSE(MatMulHasAttrWithDefaultValue(*fdef)); } } bool is_cross_process = false; TF_CHECK_OK(eager_pflr_->IsCrossProcess(handle, &is_cross_process)); EXPECT_TRUE(is_cross_process); FunctionLibraryRuntime::Options opts; const uint64 op_id = 2; opts.op_id = op_id; Notification done; Status status; RemoteTensorHandle input; input.set_op_id(1); input.set_output_num(0); input.set_op_device(local_device_); input.set_device(local_device_); std::vector<RemoteTensorHandle> inputs = {input}; std::vector<FunctionRet> outputs; absl::InlinedVector<TensorValue, 4UL> tensor_args = {TensorValue()}; TestExecuteNodeArgs args( std::move(tensor_args), [&inputs](const int i, RemoteTensorHandle* handle) -> Status { *handle = inputs.at(i); return absl::OkStatus(); }); eager_pflr_->Run(opts, handle, args, &outputs, [&status, &done](const Status& s) { status = s; done.Notify(); }); done.WaitForNotification(); TF_ASSERT_OK(status); CheckOutputsAndClose(outputs, op_id); } TEST_F(FunctionWithRemoteInputsTest, EagerClusterFLRTestWithLocalInputAndOutput) { Init(); FunctionLibraryRuntime::Handle handle; EXPECT_TRUE(MatMulHasAttrWithDefaultValue(fdef_)); Status status; Notification instantiate_done; eager_cluster_flr_->Instantiate( fdef_.signature().name(), func_lib_def_, AttrSlice(&fdef_.attr()), FunctionLibraryRuntime::InstantiateOptions(), &handle, [&status, &instantiate_done](const Status& s) { status = s; instantiate_done.Notify(); }); instantiate_done.WaitForNotification(); TF_ASSERT_OK(status); EagerContext* ctx = nullptr; TF_ASSERT_OK(eager_service_impl_.GetEagerContext(context_id_, &ctx)); for (const string& func_name : ctx->FuncLibDef()->ListFunctionNames()) { const FunctionDef* fdef = ctx->FuncLibDef()->Find(func_name); EXPECT_TRUE(fdef != nullptr); if (absl::StartsWith(func_name, "MatMulFunction")) { EXPECT_FALSE(MatMulHasAttrWithDefaultValue(*fdef)); } } const tensorflow::Tensor* input_tensor = nullptr; tensorflow::TensorHandle* tensor_handle; TF_ASSERT_OK(eager_service_impl_.GetTensorHandle( context_id_, RemoteTensorHandleInternal(1, 0), &tensor_handle)); TF_ASSERT_OK(tensor_handle->Tensor(&input_tensor)); FunctionLibraryRuntime::Options opts; Notification execute_done; std::vector<Tensor> inputs = {*input_tensor}; std::vector<Tensor> outputs; eager_cluster_flr_->Run(opts, handle, inputs, &outputs, [&status, &execute_done](const Status& s) { status = s; execute_done.Notify(); }); execute_done.WaitForNotification(); TF_ASSERT_OK(status); EXPECT_EQ(outputs.size(), 1); CheckOutputTensorAndClose(outputs.at(0)); } TEST_F(FunctionWithRemoteInputsTest, KernelAndDeviceFuncTest) { Init(); Device* local_device; TF_ASSERT_OK(device_mgr_->LookupDevice(local_device_, &local_device)); std::vector<Device*> input_dev_ptrs; input_dev_ptrs.push_back(local_device); FunctionLibraryRuntime* flr = eager_pflr_->GetFLR(remote_device_); EagerContext* ctx = nullptr; TF_ASSERT_OK(eager_service_impl_.GetEagerContext(context_id_, &ctx)); core::RefCountPtr<KernelAndDeviceFunc> kernel = nullptr; const int64_t op_id = 2; kernel.reset(new KernelAndDeviceFunc( flr, eager_pflr_.get(), std::move(input_dev_ptrs), {}, {}, nullptr, nullptr, local_device, fdef_.signature().name(), false, false, false, true, false, std::nullopt, false, ctx->RendezvousFactory(), [=]() { return op_id; })); const NodeDef node_def = MatMulFunctionNodeDef(); TF_ASSERT_OK(kernel->InstantiateFunc({}, node_def, nullptr, std::nullopt)); absl::InlinedVector<TensorValue, 4UL> input_tensors = {TensorValue()}; RemoteTensorHandle input; input.set_op_id(1); input.set_output_num(0); input.set_op_device(local_device_); input.set_device(local_device_); std::vector<RemoteTensorHandle> remote_handles = {input}; TestExecuteNodeArgs inputs( std::move(input_tensors), [&remote_handles](const int index, RemoteTensorHandle* handle) -> Status { *handle = remote_handles.at(index); return absl::OkStatus(); }); std::vector<FunctionRet> outputs; TF_ASSERT_OK(kernel->Run(nullptr, inputs, &outputs, nullptr, std::nullopt, std::nullopt, nullptr)); CheckOutputsAndClose(outputs, op_id); } TEST_F(FunctionWithRemoteInputsTest, KernelAndDeviceFuncAsyncTest) { Init(); Device* local_device; TF_ASSERT_OK(device_mgr_->LookupDevice(local_device_, &local_device)); std::vector<Device*> input_dev_ptrs; input_dev_ptrs.push_back(local_device); FunctionLibraryRuntime* flr = eager_pflr_->GetFLR(remote_device_); EagerContext* ctx = nullptr; TF_ASSERT_OK(eager_service_impl_.GetEagerContext(context_id_, &ctx)); core::RefCountPtr<KernelAndDeviceFunc> kernel = nullptr; const int64_t op_id = 2; kernel.reset(new KernelAndDeviceFunc( flr, eager_pflr_.get(), std::move(input_dev_ptrs), {}, {}, nullptr, nullptr, local_device, fdef_.signature().name(), false, false, false, true, false, std::nullopt, false, ctx->RendezvousFactory(), [=]() { return op_id; })); const NodeDef node_def = MatMulFunctionNodeDef(); TF_ASSERT_OK(kernel->InstantiateFunc({}, node_def, nullptr, std::nullopt)); absl::InlinedVector<TensorValue, 4UL> input_tensors = {TensorValue()}; RemoteTensorHandle input; input.set_op_id(1); input.set_output_num(0); input.set_op_device(local_device_); input.set_device(local_device_); std::vector<RemoteTensorHandle> remote_handles = {input}; TestExecuteNodeArgs inputs( std::move(input_tensors), [&remote_handles](const int index, RemoteTensorHandle* handle) -> Status { *handle = remote_handles.at(index); return absl::OkStatus(); }); std::vector<FunctionRet> outputs; Status status; Notification n; kernel->RunAsync(nullptr, inputs, &outputs, nullptr, std::nullopt, nullptr, [&status, &n](const Status& s) { status = s; n.Notify(); }); n.WaitForNotification(); TF_ASSERT_OK(status); CheckOutputsAndClose(outputs, op_id); } TEST_F(EagerServiceImplTest, SendTensorTest) { TestEagerServiceImpl eager_service_impl(&worker_env_); uint64 context_id = random::New64(); CreateContextRequest request; request.mutable_server_def()->set_job_name("localhost"); request.mutable_server_def()->set_task_index(0); request.set_context_id(context_id); CreateContextResponse response; TF_ASSERT_OK(eager_service_impl.CreateContext(&request, &response)); EnqueueRequest remote_enqueue_request; remote_enqueue_request.set_context_id(context_id); EnqueueResponse remote_enqueue_response; auto* send_tensor = remote_enqueue_request.add_queue()->mutable_send_tensor(); send_tensor->set_op_id(1); SetTensorProto(send_tensor->add_tensors()); std::unordered_map<string, AttrValue> attrs; AttrValue val; val.Clear(); val.set_type(tensorflow::DataType::DT_FLOAT); attrs.insert({"T", val}); val.Clear(); val.set_b(false); attrs.insert({"transpose_a", val}); attrs.insert({"transpose_b", val}); AddOperationToEnqueueRequest( 2, "MatMul", {std::make_pair(1, 0), std::make_pair(1, 0)}, attrs, "/job:localhost/replica:0/task:0/device:CPU:0", &remote_enqueue_request); TF_ASSERT_OK(eager_service_impl.Enqueue(nullptr, &remote_enqueue_request, &remote_enqueue_response)); const tensorflow::Tensor* t = nullptr; tensorflow::TensorHandle* tensor_handle; TF_ASSERT_OK(eager_service_impl.GetTensorHandle( context_id, RemoteTensorHandleInternal(2, 0), &tensor_handle)); TF_ASSERT_OK(tensor_handle->Tensor(&t)); EXPECT_EQ(tensor_handle->device(), nullptr); auto actual = t->flat<float>(); EXPECT_EQ(4, actual.size()); EXPECT_EQ(7, actual(0)); EXPECT_EQ(10, actual(1)); EXPECT_EQ(15, actual(2)); EXPECT_EQ(22, actual(3)); CloseContextRequest close_context_request; close_context_request.set_context_id(context_id); close_context_request.set_context_view_id(0); CloseContextResponse close_context_response; TF_ASSERT_OK(eager_service_impl.CloseContext(&close_context_request, &close_context_response)); } TEST_F(EagerServiceImplTest, SendPackedHandleTest) { TestEagerServiceImpl eager_service_impl(&worker_env_); const string device0 = "/job:localhost/replica:0/task:0/device:CPU:0"; const string device1 = "/job:localhost/replica:0/task:1/device:CPU:0"; const string device2 = "/job:localhost/replica:0/task:2/device:CPU:0"; const string composite_device = "/job:localhost/replica:0/task:0/device:COMPOSITE:0"; uint64 context_id = random::New64(); CreateContextRequest request; auto* server_def = request.mutable_server_def(); server_def->set_job_name("localhost"); server_def->set_task_index(0); request.add_cluster_device_attributes()->set_name(device0); request.add_cluster_device_attributes()->set_name(device1); request.add_cluster_device_attributes()->set_name(device2); request.set_context_id(context_id); CreateContextResponse response; TF_ASSERT_OK(eager_service_impl.CreateContext(&request, &response)); EnqueueRequest remote_enqueue_request; remote_enqueue_request.set_context_id(context_id); EnqueueResponse remote_enqueue_response; auto* send_tensor = remote_enqueue_request.add_queue()->mutable_send_tensor(); send_tensor->set_op_id(1); SetTensorProto(send_tensor->add_tensors()); auto* send_packed_handle = remote_enqueue_request.add_queue()->mutable_send_packed_handle(); send_packed_handle->set_op_id(3); RemoteTensorHandle* remote_handle = send_packed_handle->add_handles()->mutable_remote_handle(); remote_handle->set_op_id(send_tensor->op_id()); remote_handle->set_output_num(0); remote_handle->set_op_device(device0); remote_handle->set_device(device0); SendPackedHandleOp::LocalTensorHandle* lcoal_handle = send_packed_handle->add_handles()->mutable_local_handle(); SetTensorProto(lcoal_handle->mutable_tensor()); lcoal_handle->set_device(device1); remote_handle = send_packed_handle->add_handles()->mutable_remote_handle(); remote_handle->set_op_id(2); remote_handle->set_output_num(5); remote_handle->set_op_device(device2); remote_handle->set_device(device2); TF_ASSERT_OK(eager_service_impl.Enqueue(nullptr, &remote_enqueue_request, &remote_enqueue_response)); tensorflow::TensorHandle* packed_handle; TF_ASSERT_OK(eager_service_impl.GetTensorHandle( context_id, RemoteTensorHandleInternal(3, 0), &packed_handle)); EXPECT_EQ(packed_handle->Type(), TensorHandle::PACKED); EXPECT_EQ(packed_handle->NumPackedHandles(), 3); EXPECT_EQ(packed_handle->device()->name(), composite_device); TensorHandle* handle0 = nullptr; TF_ASSERT_OK(packed_handle->ExtractPackedHandle(0, &handle0)); EXPECT_EQ(handle0->Type(), TensorHandle::LOCAL); EXPECT_EQ(handle0->op_device()->name(), device0); const Tensor* t0 = nullptr; TF_ASSERT_OK(handle0->Tensor(&t0)); auto actual = t0->flat<float>(); EXPECT_EQ(4, actual.size()); EXPECT_EQ(1.0, actual(0)); EXPECT_EQ(2.0, actual(1)); EXPECT_EQ(3.0, actual(2)); EXPECT_EQ(4.0, actual(3)); TensorHandle* handle1 = nullptr; TF_ASSERT_OK(packed_handle->ExtractPackedHandle(1, &handle1)); EXPECT_EQ(handle1->Type(), TensorHandle::LOCAL); EXPECT_EQ(handle1->op_device()->name(), device1); const Tensor* t1 = nullptr; TF_ASSERT_OK(handle0->Tensor(&t1)); EXPECT_EQ(t1, t0); TensorHandle* handle2 = nullptr; TF_ASSERT_OK(packed_handle->ExtractPackedHandle(2, &handle2)); EXPECT_EQ(handle2->Type(), TensorHandle::REMOTE); EXPECT_EQ(handle2->op_device()->name(), device2); int64_t op_id; int32_t output_num; TF_ASSERT_OK(handle2->RemoteAddress(handle2->device(), true, &op_id, &output_num)); EXPECT_EQ(op_id, 2); EXPECT_EQ(output_num, 5); CloseContextRequest close_context_request; close_context_request.set_context_id(context_id); close_context_request.set_context_view_id(0); CloseContextResponse close_context_response; TF_ASSERT_OK(eager_service_impl.CloseContext(&close_context_request, &close_context_response)); } TEST_F(EagerServiceImplTest, RequestsToMasterTest) { tsl::core::RefCountPtr<tensorflow::Rendezvous> rendezvous = tsl::core::RefCountPtr<tensorflow::Rendezvous>( new tensorflow::IntraProcessRendezvous(device_mgr_.get())); tensorflow::EagerContext* ctx = new tensorflow::EagerContext( SessionOptions(), tensorflow::ContextDevicePlacementPolicy::DEVICE_PLACEMENT_SILENT, false, device_mgr_.get(), false, std::move(rendezvous), nullptr, nullptr, true); const uint64 context_id = random::New64(); auto remote_mgr = std::make_unique<tensorflow::eager::RemoteMgr>(true, ctx); TF_ASSERT_OK(ctx->InitializeRemoteWorker( nullptr, nullptr, {}, context_id, 0, nullptr, nullptr, std::move(remote_mgr), nullptr)); TestEagerServiceImpl eager_service_impl(&worker_env_); EnqueueRequest remote_enqueue_request; remote_enqueue_request.set_context_id(context_id); EnqueueResponse remote_enqueue_response; auto* send_tensor = remote_enqueue_request.add_queue()->mutable_send_tensor(); send_tensor->set_op_id(1); SetTensorProto(send_tensor->add_tensors()); Status status = eager_service_impl.Enqueue(nullptr, &remote_enqueue_request, &remote_enqueue_response); EXPECT_EQ(error::ABORTED, status.code()); EXPECT_TRUE(absl::StrContains( status.message(), "Unable to find a context_id matching the specified one")); TF_ASSERT_OK(eager_service_impl.CreateMasterContext(context_id, ctx)); TF_ASSERT_OK(eager_service_impl.Enqueue(nullptr, &remote_enqueue_request, &remote_enqueue_response)); ctx->Unref(); } TEST_F(EagerServiceImplTest, KeepAliveTest) { TestEagerServiceImpl eager_service_impl(&worker_env_); uint64 context_id = random::New64(); CreateContextRequest request; request.mutable_server_def()->set_job_name("localhost"); request.mutable_server_def()->set_task_index(0); request.set_context_id(context_id); request.set_keep_alive_secs(3); CreateContextResponse response; TF_ASSERT_OK(eager_service_impl.CreateContext(&request, &response)); worker_env_.env->SleepForMicroseconds(5 * tensorflow::EnvTime::kSecondsToMicros); KeepAliveRequest keep_alive_request; KeepAliveResponse keep_alive_response; keep_alive_request.set_context_id(context_id); Status status = eager_service_impl.KeepAlive(&keep_alive_request, &keep_alive_response); EXPECT_EQ(status.code(), error::ABORTED); EXPECT_PRED_FORMAT2(::testing::IsSubstring, "Unable to find a context_id", std::string(status.message())); uint64 new_context_id = random::New64(); request.set_context_id(new_context_id); TF_ASSERT_OK(eager_service_impl.CreateContext(&request, &response)); worker_env_.env->SleepForMicroseconds(1 * tensorflow::EnvTime::kSecondsToMicros); keep_alive_request.set_context_id(new_context_id); TF_ASSERT_OK( eager_service_impl.KeepAlive(&keep_alive_request, &keep_alive_response)); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/distributed_runtime/eager/eager_service_impl.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/distributed_runtime/eager/eager_service_impl_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
51ea8832-60b0-4030-bb24-35ccc1989653
cpp
tensorflow/tensorflow
direct_session
tensorflow/core/common_runtime/direct_session.cc
tensorflow/core/common_runtime/direct_session_test.cc
#include "tensorflow/core/common_runtime/direct_session.h" #include <algorithm> #include <atomic> #include <string> #include <vector> #include "absl/container/flat_hash_set.h" #include "absl/time/time.h" #include "absl/types/optional.h" #include "tensorflow/core/common_runtime/collective_executor_mgr.h" #include "tensorflow/core/common_runtime/collective_param_resolver_local.h" #include "tensorflow/core/common_runtime/constant_folding.h" #include "tensorflow/core/common_runtime/debugger_state_interface.h" #include "tensorflow/core/common_runtime/device_factory.h" #include "tensorflow/core/common_runtime/device_resolver_local.h" #include "tensorflow/core/common_runtime/executor.h" #include "tensorflow/core/common_runtime/executor_factory.h" #include "tensorflow/core/common_runtime/function.h" #include "tensorflow/core/common_runtime/graph_constructor.h" #include "tensorflow/core/common_runtime/graph_optimizer.h" #include "tensorflow/core/common_runtime/local_session_selection.h" #include "tensorflow/core/common_runtime/memory_types.h" #include "tensorflow/core/common_runtime/optimization_registry.h" #include "tensorflow/core/common_runtime/process_util.h" #include "tensorflow/core/common_runtime/rendezvous_mgr.h" #include "tensorflow/core/common_runtime/scoped_allocator_mgr.h" #include "tensorflow/core/common_runtime/step_stats_collector.h" #include "tensorflow/core/framework/function.h" #include "tensorflow/core/framework/graph.pb.h" #include "tensorflow/core/framework/graph_def_util.h" #include "tensorflow/core/framework/log_memory.h" #include "tensorflow/core/framework/logging.h" #include "tensorflow/core/framework/metrics.h" #include "tensorflow/core/framework/node_def.pb.h" #include "tensorflow/core/framework/run_handler.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/versions.pb.h" #include "tensorflow/core/graph/algorithm.h" #include "tensorflow/core/graph/graph.h" #include "tensorflow/core/graph/graph_partition.h" #include "tensorflow/core/graph/subgraph.h" #include "tensorflow/core/graph/tensor_id.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/lib/core/notification.h" #include "tensorflow/core/lib/core/refcount.h" #include "tensorflow/core/lib/core/status.h" #include "tensorflow/core/lib/core/threadpool.h" #include "tensorflow/core/lib/core/threadpool_options.h" #include "tensorflow/core/lib/gtl/array_slice.h" #include "tensorflow/core/lib/monitoring/counter.h" #include "tensorflow/core/lib/random/random.h" #include "tensorflow/core/lib/strings/numbers.h" #include "tensorflow/core/lib/strings/str_util.h" #include "tensorflow/core/lib/strings/strcat.h" #include "tensorflow/core/nccl/collective_communicator.h" #include "tensorflow/core/platform/byte_order.h" #include "tensorflow/core/platform/cpu_info.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/mutex.h" #include "tensorflow/core/platform/types.h" #include "tensorflow/core/profiler/lib/connected_traceme.h" #include "tensorflow/core/profiler/lib/device_profiler_session.h" #include "tensorflow/core/profiler/lib/traceme_encode.h" #include "tensorflow/core/protobuf/config.pb.h" #include "tensorflow/core/util/device_name_utils.h" #include "tensorflow/core/util/env_var.h" namespace tensorflow { namespace { auto* direct_session_runs = monitoring::Counter<0>::New( "/tensorflow/core/direct_session_runs", "The number of times DirectSession::Run() has been called."); Status NewThreadPoolFromThreadPoolOptions( const SessionOptions& options, const ThreadPoolOptionProto& thread_pool_options, int pool_number, thread::ThreadPool** pool, bool* owned) { int32_t num_threads = thread_pool_options.num_threads(); if (num_threads == 0) { num_threads = NumInterOpThreadsFromSessionOptions(options); } const string& name = thread_pool_options.global_name(); if (name.empty()) { VLOG(1) << "Direct session inter op parallelism threads for pool " << pool_number << ": " << num_threads; *pool = new thread::ThreadPool( options.env, ThreadOptions(), strings::StrCat("Compute", pool_number), num_threads, !options.config.experimental().disable_thread_spinning(), nullptr); *owned = true; return absl::OkStatus(); } typedef std::pair<int32, thread::ThreadPool*> MapValue; static std::map<string, MapValue>* global_pool_map = new std::map<string, MapValue>; static mutex* mu = new mutex(); mutex_lock l(*mu); MapValue* mvalue = &(*global_pool_map)[name]; if (mvalue->second == nullptr) { mvalue->first = thread_pool_options.num_threads(); mvalue->second = new thread::ThreadPool( options.env, ThreadOptions(), strings::StrCat("Compute", pool_number), num_threads, !options.config.experimental().disable_thread_spinning(), nullptr); } else { if (mvalue->first != thread_pool_options.num_threads()) { return errors::InvalidArgument( "Pool ", name, " configured previously with num_threads=", mvalue->first, "; cannot re-configure with num_threads=", thread_pool_options.num_threads()); } } *owned = false; *pool = mvalue->second; return absl::OkStatus(); } thread::ThreadPool* GlobalThreadPool(const SessionOptions& options, int32_t num_threads) { static thread::ThreadPool* const thread_pool = NewThreadPoolFromSessionOptions(options, num_threads); return thread_pool; } string GetRendezvousKey(const string& tensor_name, const DeviceAttributes& device_info, const FrameAndIter& frame_iter) { return strings::StrCat(device_info.name(), ";", strings::FpToString(device_info.incarnation()), ";", device_info.name(), ";", tensor_name, ";", frame_iter.frame_id, ":", frame_iter.iter_id); } } class DirectSessionFactory : public SessionFactory { public: DirectSessionFactory() {} bool AcceptsOptions(const SessionOptions& options) override { return options.target.empty() && !options.config.experimental().use_tfrt() && GetDefaultLocalSessionImpl() == LocalSessionImpl::kDirectSession; } Status NewSession(const SessionOptions& options, Session** out_session) override { const auto& experimental_config = options.config.experimental(); if (experimental_config.has_session_metadata()) { if (experimental_config.session_metadata().version() < 0) { return errors::InvalidArgument( "Session version shouldn't be negative: ", experimental_config.session_metadata().DebugString()); } const string key = GetMetadataKey(experimental_config.session_metadata()); mutex_lock l(sessions_lock_); if (!session_metadata_keys_.insert(key).second) { return errors::InvalidArgument( "A session with the same name and version has already been " "created: ", experimental_config.session_metadata().DebugString()); } } if (options.config.graph_options().build_cost_model() > 0) { EnableCPUAllocatorFullStats(); } std::vector<std::unique_ptr<Device>> devices; TF_RETURN_IF_ERROR(DeviceFactory::AddDevices( options, "/job:localhost/replica:0/task:0", &devices)); DirectSession* session = new DirectSession( options, new StaticDeviceMgr(std::move(devices)), this); { mutex_lock l(sessions_lock_); sessions_.push_back(session); } *out_session = session; return absl::OkStatus(); } Status Reset(const SessionOptions& options, const std::vector<string>& containers) override { std::vector<DirectSession*> sessions_to_reset; { mutex_lock l(sessions_lock_); std::swap(sessions_to_reset, sessions_); } Status s; for (auto session : sessions_to_reset) { s.Update(session->Reset(containers)); } for (auto session : sessions_to_reset) { s.Update(session->Close()); } return s; } void Deregister(const DirectSession* session) { mutex_lock l(sessions_lock_); sessions_.erase(std::remove(sessions_.begin(), sessions_.end(), session), sessions_.end()); if (session->options().config.experimental().has_session_metadata()) { session_metadata_keys_.erase(GetMetadataKey( session->options().config.experimental().session_metadata())); } } private: static string GetMetadataKey(const SessionMetadata& metadata) { return absl::StrCat(metadata.name(), "/", metadata.version()); } mutex sessions_lock_; std::vector<DirectSession*> sessions_ TF_GUARDED_BY(sessions_lock_); absl::flat_hash_set<string> session_metadata_keys_ TF_GUARDED_BY(sessions_lock_); }; class DirectSessionRegistrar { public: DirectSessionRegistrar() { SessionFactory::Register("DIRECT_SESSION", new DirectSessionFactory()); } }; static DirectSessionRegistrar registrar; std::atomic_int_fast64_t DirectSession::step_id_counter_(1); static RunHandlerPool* GetOrCreateRunHandlerPool( const SessionOptions& options) { int num_inter_threads = 0; int num_intra_threads = 0; static const int env_num_inter_threads = NumInterOpThreadsFromEnvironment(); static const int env_num_intra_threads = NumIntraOpThreadsFromEnvironment(); if (env_num_inter_threads > 0) { num_inter_threads = env_num_inter_threads; } if (env_num_intra_threads > 0) { num_intra_threads = env_num_intra_threads; } if (num_inter_threads == 0) { if (options.config.session_inter_op_thread_pool_size() > 0) { num_inter_threads = options.config.session_inter_op_thread_pool(0).num_threads(); } if (num_inter_threads == 0) { num_inter_threads = NumInterOpThreadsFromSessionOptions(options); } } if (num_intra_threads == 0) { num_intra_threads = options.config.intra_op_parallelism_threads(); if (num_intra_threads == 0) { num_intra_threads = port::MaxParallelism(); } } static RunHandlerPool* pool = [&]() { LOG(INFO) << "Creating run-handler pool with " "[num_inter_threads, num_intra_threads] as [" << num_inter_threads << "," << num_intra_threads << "]"; return new RunHandlerPool(num_inter_threads, num_intra_threads); }(); return pool; } bool DirectSession::ShouldUseRunHandlerPool( const RunOptions& run_options) const { if (options_.config.use_per_session_threads()) return false; if (options_.config.session_inter_op_thread_pool_size() > 0 && run_options.inter_op_thread_pool() > 0) return false; return true; } DirectSession::DirectSession(const SessionOptions& options, const DeviceMgr* device_mgr, DirectSessionFactory* const factory) : options_(options), device_mgr_(device_mgr), factory_(factory), cancellation_manager_(new CancellationManager()), operation_timeout_in_ms_(options_.config.operation_timeout_in_ms()) { const int thread_pool_size = options_.config.session_inter_op_thread_pool_size(); if (thread_pool_size > 0) { for (int i = 0; i < thread_pool_size; ++i) { thread::ThreadPool* pool = nullptr; bool owned = false; init_error_.Update(NewThreadPoolFromThreadPoolOptions( options_, options_.config.session_inter_op_thread_pool(i), i, &pool, &owned)); thread_pools_.emplace_back(pool, owned); } } else if (options_.config.use_per_session_threads()) { thread_pools_.emplace_back(NewThreadPoolFromSessionOptions(options_), true ); } else { static const int env_num_threads = NumInterOpThreadsFromEnvironment(); if (options_.config.inter_op_parallelism_threads() < 0 || (options_.config.inter_op_parallelism_threads() == 0 && env_num_threads < 0)) { run_in_caller_thread_ = true; } thread_pools_.emplace_back( GlobalThreadPool(options, run_in_caller_thread_ ? 1 : 0), false ); } const Status status = ReadBoolFromEnvVar("TF_SYNC_ON_FINISH", true, &sync_on_finish_); if (!status.ok()) { LOG(ERROR) << status.message(); } session_handle_ = strings::StrCat("direct", strings::FpToString(random::New64())); if (options.config.log_device_placement()) { const string mapping_str = device_mgr_->DeviceMappingString(); string msg; if (mapping_str.empty()) { msg = "Device mapping: no known devices."; } else { msg = strings::StrCat("Device mapping:\n", mapping_str); } if (!logging::LogToListeners(msg)) { LOG(INFO) << msg; } } device_set_.set_client_device(device_mgr_->HostCPU()); for (auto d : device_mgr_->ListDevices()) { devices_.push_back(d); device_set_.AddDevice(d); d->op_segment()->AddHold(session_handle_); } } DirectSession::~DirectSession() { if (!closed_) Close().IgnoreError(); for (auto& it : partial_runs_) { it.second.reset(nullptr); } for (auto& it : executors_) { it.second.reset(); } callables_.clear(); for (auto d : device_mgr_->ListDevices()) { d->op_segment()->RemoveHold(session_handle_); } functions_.clear(); delete cancellation_manager_; for (const auto& p_and_owned : thread_pools_) { if (p_and_owned.second) delete p_and_owned.first; } execution_state_.reset(nullptr); flib_def_.reset(nullptr); } Status DirectSession::Create(const GraphDef& graph) { return Create(GraphDef(graph)); } Status DirectSession::Create(GraphDef&& graph) { TF_RETURN_IF_ERROR(init_error_); if (graph.node_size() > 0) { mutex_lock l(graph_state_lock_); if (graph_created_) { return errors::AlreadyExists( "A Graph has already been created for this session."); } return ExtendLocked(std::move(graph)); } return absl::OkStatus(); } Status DirectSession::Extend(const GraphDef& graph) { return Extend(GraphDef(graph)); } Status DirectSession::Extend(GraphDef&& graph) { TF_RETURN_IF_ERROR(CheckNotClosed()); mutex_lock l(graph_state_lock_); return ExtendLocked(std::move(graph)); } Status DirectSession::ExtendLocked(GraphDef&& graph) { if (finalized_) { return errors::FailedPrecondition("Session has been finalized."); } if (!(flib_def_ && execution_state_)) { GraphExecutionStateOptions options; options.device_set = &device_set_; options.session_options = &options_; options.session_handle = session_handle_; TF_RETURN_IF_ERROR(GraphExecutionState::MakeForBaseGraph( std::move(graph), options, &execution_state_)); flib_def_.reset( new FunctionLibraryDefinition(execution_state_->flib_def())); graph_created_ = true; } else { std::unique_ptr<GraphExecutionState> state; TF_RETURN_IF_ERROR(execution_state_->Extend(graph, &state)); execution_state_.swap(state); TF_RETURN_IF_ERROR(flib_def_->AddLibrary(graph.library())); } return absl::OkStatus(); } Status DirectSession::Run(const NamedTensorList& inputs, const std::vector<string>& output_names, const std::vector<string>& target_nodes, std::vector<Tensor>* outputs) { RunMetadata run_metadata; return Run(RunOptions(), inputs, output_names, target_nodes, outputs, &run_metadata); } Status DirectSession::CreateDebuggerState( const CallableOptions& callable_options, int64_t global_step, int64_t session_run_index, int64_t executor_step_index, std::unique_ptr<DebuggerStateInterface>* debugger_state) { TF_RETURN_IF_ERROR(DebuggerStateRegistry::CreateState( callable_options.run_options().debug_options(), debugger_state)); std::vector<string> input_names(callable_options.feed().begin(), callable_options.feed().end()); std::vector<string> output_names(callable_options.fetch().begin(), callable_options.fetch().end()); std::vector<string> target_names(callable_options.target().begin(), callable_options.target().end()); TF_RETURN_IF_ERROR(debugger_state->get()->PublishDebugMetadata( global_step, session_run_index, executor_step_index, input_names, output_names, target_names)); return absl::OkStatus(); } Status DirectSession::DecorateAndPublishGraphForDebug( const DebugOptions& debug_options, Graph* graph, Device* device) { std::unique_ptr<DebugGraphDecoratorInterface> decorator; TF_RETURN_IF_ERROR( DebugGraphDecoratorRegistry::CreateDecorator(debug_options, &decorator)); TF_RETURN_IF_ERROR(decorator->DecorateGraph(graph, device)); TF_RETURN_IF_ERROR(decorator->PublishGraph(*graph, device->name())); return absl::OkStatus(); } Status DirectSession::RunInternal( int64_t step_id, const RunOptions& run_options, CallFrameInterface* call_frame, ExecutorsAndKeys* executors_and_keys, RunMetadata* run_metadata, const thread::ThreadPoolOptions& threadpool_options) { const uint64 start_time_usecs = options_.env->NowMicros(); const int64_t executor_step_count = executors_and_keys->step_count.fetch_add(1); RunState run_state(step_id, &devices_); const size_t num_executors = executors_and_keys->items.size(); tsl::profiler::TraceMeProducer activity( [&] { if (options_.config.experimental().has_session_metadata()) { const auto& model_metadata = options_.config.experimental().session_metadata(); string model_id = strings::StrCat(model_metadata.name(), ":", model_metadata.version()); return tsl::profiler::TraceMeEncode("SessionRun", {{"id", step_id}, {"_r", 1} , {"model_id", model_id}}); } else { return tsl::profiler::TraceMeEncode( "SessionRun", {{"id", step_id}, {"_r", 1} }); } }, tsl::profiler::ContextType::kTfExecutor, step_id, tsl::profiler::TraceMeLevel::kInfo); std::unique_ptr<DebuggerStateInterface> debugger_state; if (!run_options.debug_options().debug_tensor_watch_opts().empty()) { TF_RETURN_IF_ERROR( CreateDebuggerState(executors_and_keys->callable_options, run_options.debug_options().global_step(), step_id, executor_step_count, &debugger_state)); } if (run_metadata != nullptr && options_.config.experimental().has_session_metadata()) { *run_metadata->mutable_session_metadata() = options_.config.experimental().session_metadata(); } #ifndef __ANDROID__ if (executors_and_keys->collective_graph_key != BuildGraphOptions::kNoCollectiveGraphKey) { if (run_options.experimental().collective_graph_key() != BuildGraphOptions::kNoCollectiveGraphKey) { if (run_options.experimental().collective_graph_key() != executors_and_keys->collective_graph_key) { return errors::Internal( "collective_graph_key in RunOptions ", run_options.experimental().collective_graph_key(), " should match collective_graph_key from optimized graph ", executors_and_keys->collective_graph_key); } } if (!collective_executor_mgr_) { collective_executor_mgr_ = CreateProdLocalCollectiveExecutorMgr( options_.config, device_mgr_.get(), MaybeCreateNcclCommunicator(options_.config)); } run_state.collective_executor.reset(new CollectiveExecutor::Handle( collective_executor_mgr_->FindOrCreate(step_id), true )); } #endif thread::ThreadPool* pool; std::unique_ptr<thread::ThreadPool> threadpool_wrapper; const bool inline_execution_requested = run_in_caller_thread_ || run_options.inter_op_thread_pool() == -1; if (inline_execution_requested) { if (executors_and_keys->items.size() > 1) { pool = thread_pools_[0].first; } else { VLOG(1) << "Executing Session::Run() synchronously!"; pool = nullptr; } } else if (threadpool_options.inter_op_threadpool != nullptr) { threadpool_wrapper = std::make_unique<thread::ThreadPool>( threadpool_options.inter_op_threadpool); pool = threadpool_wrapper.get(); } else { if (run_options.inter_op_thread_pool() < -1 || run_options.inter_op_thread_pool() >= static_cast<int32>(thread_pools_.size())) { return errors::InvalidArgument("Invalid inter_op_thread_pool: ", run_options.inter_op_thread_pool()); } pool = thread_pools_[run_options.inter_op_thread_pool()].first; } const int64_t call_timeout = run_options.timeout_in_ms() > 0 ? run_options.timeout_in_ms() : operation_timeout_in_ms_; absl::optional<absl::Time> deadline; if (call_timeout > 0) { deadline = absl::Now() + absl::Milliseconds(call_timeout); } std::unique_ptr<RunHandler> handler; if (ShouldUseRunHandlerPool(run_options) && run_options.experimental().use_run_handler_pool()) { VLOG(1) << "Using RunHandler to scheduler inter-op closures."; handler = GetOrCreateRunHandlerPool(options_)->Get( step_id, call_timeout, run_options.experimental().run_handler_pool_options()); if (!handler) { return errors::DeadlineExceeded( "Could not obtain RunHandler for request after waiting for ", call_timeout, "ms."); } } auto* handler_ptr = handler.get(); Executor::Args::Runner default_runner = nullptr; if (pool == nullptr) { default_runner = [](const Executor::Args::Closure& c) { c(); }; } else if (handler_ptr != nullptr) { default_runner = [handler_ptr](Executor::Args::Closure c) { handler_ptr->ScheduleInterOpClosure(std::move(c)); }; } else { default_runner = [pool](Executor::Args::Closure c) { pool->Schedule(std::move(c)); }; } const bool can_execute_synchronously = executors_and_keys->items.size() == 1 && call_timeout == 0; Executor::Args args; args.step_id = step_id; args.call_frame = call_frame; args.collective_executor = (run_state.collective_executor ? run_state.collective_executor->get() : nullptr); args.session_config = &options_.config; args.session_state = &session_state_; args.session_handle = session_handle_; args.tensor_store = &run_state.tensor_store; args.step_container = &run_state.step_container; args.sync_on_finish = sync_on_finish_; args.user_intra_op_threadpool = threadpool_options.intra_op_threadpool; args.run_all_kernels_inline = pool == nullptr; args.start_time_usecs = start_time_usecs; args.deadline = deadline; const bool do_trace = (run_options.trace_level() > RunOptions::NO_TRACE); bool update_cost_model = false; if (options_.config.graph_options().build_cost_model() > 0) { const int64_t build_cost_model_every = options_.config.graph_options().build_cost_model(); const int64_t build_cost_model_after = options_.config.graph_options().build_cost_model_after(); int64_t measure_step_count = executor_step_count - build_cost_model_after; if (measure_step_count >= 0) { update_cost_model = ((measure_step_count + 1) % build_cost_model_every == 0); } } if (run_metadata != nullptr && (do_trace || update_cost_model || run_options.report_tensor_allocations_upon_oom())) { run_state.collector.reset( new StepStatsCollector(run_metadata->mutable_step_stats())); args.stats_collector = run_state.collector.get(); } std::unique_ptr<DeviceProfilerSession> device_profiler_session; if (run_options.trace_level() >= RunOptions::HARDWARE_TRACE) { device_profiler_session = DeviceProfilerSession::Create(); } CancellationManager step_cancellation_manager(cancellation_manager_); if (step_cancellation_manager.IsCancelled()) { return errors::Cancelled("Run call was cancelled"); } args.cancellation_manager = &step_cancellation_manager; Status run_status; auto set_threadpool_args_for_item = [&default_runner, &handler](const PerPartitionExecutorsAndLib& item, Executor::Args* args) { thread::ThreadPool* device_thread_pool = item.device->tensorflow_device_thread_pool(); if (!device_thread_pool) { args->runner = default_runner; } else { args->runner = [device_thread_pool](Executor::Args::Closure c) { device_thread_pool->Schedule(std::move(c)); }; } if (handler != nullptr) { args->user_intra_op_threadpool = handler->AsIntraThreadPoolInterface(); } }; if (can_execute_synchronously) { PrivateIntraProcessRendezvous rendezvous(device_mgr_.get()); args.rendezvous = &rendezvous; const auto& item = executors_and_keys->items[0]; set_threadpool_args_for_item(item, &args); run_status = item.executor->Run(args); } else { core::RefCountPtr<RefCountedIntraProcessRendezvous> rendezvous( new RefCountedIntraProcessRendezvous(device_mgr_.get())); args.rendezvous = rendezvous.get(); Notification executors_done; ExecutorBarrier* barrier = new ExecutorBarrier(num_executors, rendezvous.get(), [&run_state, &executors_done](const Status& ret) { { mutex_lock l(run_state.mu); run_state.status.Update(ret); } executors_done.Notify(); }); for (const auto& item : executors_and_keys->items) { set_threadpool_args_for_item(item, &args); item.executor->RunAsync(args, barrier->Get()); } WaitForNotification(&executors_done, &run_state, &step_cancellation_manager, call_timeout); { tf_shared_lock l(run_state.mu); run_status = run_state.status; } } if (step_cancellation_manager.IsCancelled()) { run_status.Update(errors::Cancelled("Run call was cancelled")); } if (run_metadata != nullptr && device_profiler_session) { TF_RETURN_IF_ERROR(device_profiler_session->CollectData( run_metadata->mutable_step_stats())); } TF_RETURN_IF_ERROR(run_status); if (!run_state.tensor_store.empty()) { TF_RETURN_IF_ERROR(run_state.tensor_store.SaveTensors( {executors_and_keys->callable_options.fetch().begin(), executors_and_keys->callable_options.fetch().end()}, &session_state_)); } if (run_state.collector) { run_state.collector->Finalize(); } if (update_cost_model) { std::unordered_map<string, const Graph*> device_to_graph; for (const PerPartitionExecutorsAndLib& partition : executors_and_keys->items) { const Graph* graph = partition.graph.get(); const string& device = partition.flib->device()->name(); device_to_graph[device] = graph; } mutex_lock l(executor_lock_); run_state.collector->BuildCostModel(&cost_model_manager_, device_to_graph); if (run_metadata != nullptr) { CostGraphDef* cost_graph = run_metadata->mutable_cost_graph(); for (const auto& item : executors_and_keys->items) { TF_RETURN_IF_ERROR(cost_model_manager_.AddToCostGraphDef( item.graph.get(), cost_graph)); } } } if (run_options.output_partition_graphs()) { if (options_.config.experimental().disable_output_partition_graphs()) { return errors::InvalidArgument( "RunOptions.output_partition_graphs() is not supported when " "disable_output_partition_graphs is true."); } else if (run_metadata != nullptr) { protobuf::RepeatedPtrField<GraphDef>* partition_graph_defs = run_metadata->mutable_partition_graphs(); for (const PerPartitionExecutorsAndLib& exec_and_lib : executors_and_keys->items) { GraphDef* partition_graph_def = partition_graph_defs->Add(); exec_and_lib.graph->ToGraphDef(partition_graph_def); } } } metrics::UpdateGraphExecTime(options_.env->NowMicros() - start_time_usecs); return absl::OkStatus(); } Status DirectSession::Run(const RunOptions& run_options, const NamedTensorList& inputs, const std::vector<string>& output_names, const std::vector<string>& target_nodes, std::vector<Tensor>* outputs, RunMetadata* run_metadata) { return Run(run_options, inputs, output_names, target_nodes, outputs, run_metadata, thread::ThreadPoolOptions()); } Status DirectSession::Run(const RunOptions& run_options, const NamedTensorList& inputs, const std::vector<string>& output_names, const std::vector<string>& target_nodes, std::vector<Tensor>* outputs, RunMetadata* run_metadata, const thread::ThreadPoolOptions& threadpool_options) { TF_RETURN_IF_ERROR(CheckNotClosed()); TF_RETURN_IF_ERROR(CheckGraphCreated("Run()")); direct_session_runs->GetCell()->IncrementBy(1); std::vector<string> input_tensor_names; input_tensor_names.reserve(inputs.size()); size_t input_size = 0; for (const auto& it : inputs) { input_tensor_names.push_back(it.first); input_size += it.second.AllocatedBytes(); } metrics::RecordGraphInputTensors(input_size); ExecutorsAndKeys* executors_and_keys; RunStateArgs run_state_args(run_options.debug_options()); run_state_args.collective_graph_key = run_options.experimental().collective_graph_key(); TF_RETURN_IF_ERROR(GetOrCreateExecutors(input_tensor_names, output_names, target_nodes, &executors_and_keys, &run_state_args)); { mutex_lock l(collective_graph_key_lock_); collective_graph_key_ = executors_and_keys->collective_graph_key; } FunctionCallFrame call_frame(executors_and_keys->input_types, executors_and_keys->output_types); gtl::InlinedVector<Tensor, 4> feed_args(inputs.size()); for (const auto& it : inputs) { if (it.second.dtype() == DT_RESOURCE) { Tensor tensor_from_handle; TF_RETURN_IF_ERROR( ResourceHandleToInputTensor(it.second, &tensor_from_handle)); feed_args[executors_and_keys->input_name_to_index[it.first]] = tensor_from_handle; } else { feed_args[executors_and_keys->input_name_to_index[it.first]] = it.second; } } const Status s = call_frame.SetArgs(feed_args); if (errors::IsInternal(s)) { return errors::InvalidArgument(s.message()); } else if (!s.ok()) { return s; } const int64_t step_id = step_id_counter_.fetch_add(1); if (LogMemory::IsEnabled()) { LogMemory::RecordStep(step_id, run_state_args.handle); } TF_RETURN_IF_ERROR(RunInternal(step_id, run_options, &call_frame, executors_and_keys, run_metadata, threadpool_options)); if (outputs) { std::vector<Tensor> sorted_outputs; const Status s = call_frame.ConsumeRetvals( &sorted_outputs, false); if (errors::IsInternal(s)) { return errors::InvalidArgument(s.message()); } else if (!s.ok()) { return s; } const bool unique_outputs = output_names.size() == executors_and_keys->output_name_to_index.size(); std::vector<int> first_indices; if (!unique_outputs) { first_indices.reserve(output_names.size()); for (const auto& name : output_names) { first_indices.push_back( std::find(output_names.begin(), output_names.end(), name) - output_names.begin()); } } outputs->clear(); size_t output_size = 0; outputs->reserve(sorted_outputs.size()); for (int i = 0; i < output_names.size(); ++i) { const string& output_name = output_names[i]; if (first_indices.empty() || first_indices[i] == i) { outputs->emplace_back( std::move(sorted_outputs[executors_and_keys ->output_name_to_index[output_name]])); } else { outputs->push_back((*outputs)[first_indices[i]]); } output_size += outputs->back().AllocatedBytes(); } metrics::RecordGraphOutputTensors(output_size); } return absl::OkStatus(); } Status DirectSession::PRunSetup(const std::vector<string>& input_names, const std::vector<string>& output_names, const std::vector<string>& target_nodes, string* handle) { TF_RETURN_IF_ERROR(CheckNotClosed()); TF_RETURN_IF_ERROR(CheckGraphCreated("PRunSetup()")); thread::ThreadPool* pool = thread_pools_[0].first; ExecutorsAndKeys* executors_and_keys; DebugOptions debug_options; RunStateArgs run_state_args(debug_options); run_state_args.is_partial_run = true; TF_RETURN_IF_ERROR(GetOrCreateExecutors(input_names, output_names, target_nodes, &executors_and_keys, &run_state_args)); Executor::Args args; args.step_id = step_id_counter_.fetch_add(1); PartialRunState* run_state = new PartialRunState(input_names, output_names, args.step_id, &devices_); run_state->rendez.reset(new IntraProcessRendezvous(device_mgr_.get())); { mutex_lock l(executor_lock_); if (!partial_runs_ .emplace(run_state_args.handle, std::unique_ptr<PartialRunState>(run_state)) .second) { return errors::Internal("The handle '", run_state_args.handle, "' created for this partial run is not unique."); } } const size_t num_executors = executors_and_keys->items.size(); ExecutorBarrier* barrier = new ExecutorBarrier( num_executors, run_state->rendez.get(), [run_state](const Status& ret) { if (!ret.ok()) { mutex_lock l(run_state->mu); run_state->status.Update(ret); } run_state->executors_done.Notify(); }); args.rendezvous = run_state->rendez.get(); args.cancellation_manager = cancellation_manager_; args.collective_executor = nullptr; args.session_config = &options_.config; args.runner = [this, pool](Executor::Args::Closure c) { pool->Schedule(std::move(c)); }; args.session_state = &session_state_; args.session_handle = session_handle_; args.tensor_store = &run_state->tensor_store; args.step_container = &run_state->step_container; if (LogMemory::IsEnabled()) { LogMemory::RecordStep(args.step_id, run_state_args.handle); } args.sync_on_finish = sync_on_finish_; if (options_.config.graph_options().build_cost_model()) { run_state->collector.reset(new StepStatsCollector(nullptr)); args.stats_collector = run_state->collector.get(); } for (auto& item : executors_and_keys->items) { item.executor->RunAsync(args, barrier->Get()); } *handle = run_state_args.handle; return absl::OkStatus(); } Status DirectSession::PRun(const string& handle, const NamedTensorList& inputs, const std::vector<string>& output_names, std::vector<Tensor>* outputs) { TF_RETURN_IF_ERROR(CheckNotClosed()); std::vector<string> parts = str_util::Split(handle, ';'); const string& key = parts[0]; ExecutorsAndKeys* executors_and_keys; PartialRunState* run_state; { mutex_lock l(executor_lock_); auto exc_it = executors_.find(key); if (exc_it == executors_.end()) { return errors::InvalidArgument( "Must run 'setup' before performing partial runs!"); } executors_and_keys = exc_it->second.get(); auto prun_it = partial_runs_.find(handle); if (prun_it == partial_runs_.end()) { return errors::InvalidArgument( "Must run 'setup' before performing partial runs!"); } run_state = prun_it->second.get(); for (const auto& input : inputs) { auto it = run_state->pending_inputs.find(input.first); if (it == run_state->pending_inputs.end()) { return errors::InvalidArgument( "The feed ", input.first, " was not specified in partial_run_setup."); } else if (it->second) { return errors::InvalidArgument("The feed ", input.first, " has already been fed."); } } for (const auto& output : output_names) { auto it = run_state->pending_outputs.find(output); if (it == run_state->pending_outputs.end()) { return errors::InvalidArgument( "The fetch ", output, " was not specified in partial_run_setup."); } else if (it->second) { return errors::InvalidArgument("The fetch ", output, " has already been fetched."); } } } TF_RETURN_IF_ERROR( CheckFetch(inputs, output_names, executors_and_keys, run_state)); Status s = SendPRunInputs(inputs, executors_and_keys, run_state->rendez.get()); if (s.ok()) { s = RecvPRunOutputs(output_names, executors_and_keys, run_state, outputs); } if (s.ok()) { s = run_state->tensor_store.SaveTensors(output_names, &session_state_); } { mutex_lock l(executor_lock_); bool done = true; if (s.ok()) { { mutex_lock l(run_state->mu); if (!run_state->status.ok()) { LOG(WARNING) << "An error unrelated to this prun has been detected. " << run_state->status; } } for (const auto& input : inputs) { auto it = run_state->pending_inputs.find(input.first); it->second = true; } for (const auto& name : output_names) { auto it = run_state->pending_outputs.find(name); it->second = true; } done = run_state->PendingDone(); } if (done) { WaitForNotification(&run_state->executors_done, run_state, cancellation_manager_, operation_timeout_in_ms_); partial_runs_.erase(handle); } } return s; } Status DirectSession::ResourceHandleToInputTensor(const Tensor& resource_tensor, Tensor* retrieved_tensor) { if (resource_tensor.dtype() != DT_RESOURCE) { return errors::InvalidArgument(strings::StrCat( "ResourceHandleToInputTensor() received non-DT_RESOURCE Tensor: ", resource_tensor.dtype())); } const ResourceHandle& resource_handle = resource_tensor.scalar<ResourceHandle>()(); if (resource_handle.container() == SessionState::kTensorHandleResourceTypeName) { return session_state_.GetTensor(resource_handle.name(), retrieved_tensor); } else { return errors::InvalidArgument(strings::StrCat( "Invalid resource type hash code: ", resource_handle.hash_code(), "(name: ", resource_handle.name(), " type: ", resource_handle.maybe_type_name(), "). Perhaps a resource tensor was being provided as a feed? That is " "not currently allowed. Please file an issue at " "https: "short code snippet that leads to this error message.")); } } Status DirectSession::SendPRunInputs(const NamedTensorList& inputs, const ExecutorsAndKeys* executors_and_keys, IntraProcessRendezvous* rendez) { Status s; Rendezvous::ParsedKey parsed; for (const auto& input : inputs) { auto it = executors_and_keys->input_name_to_rendezvous_key.find(input.first); if (it == executors_and_keys->input_name_to_rendezvous_key.end()) { return errors::Internal("'", input.first, "' is not a pre-defined feed."); } const string& input_key = it->second; s = Rendezvous::ParseKey(input_key, &parsed); if (!s.ok()) { rendez->StartAbort(s); return s; } if (input.second.dtype() == DT_RESOURCE) { Tensor tensor_from_handle; s = ResourceHandleToInputTensor(input.second, &tensor_from_handle); if (s.ok()) { s = rendez->Send(parsed, Rendezvous::Args(), tensor_from_handle, false); } } else { s = rendez->Send(parsed, Rendezvous::Args(), input.second, false); } if (!s.ok()) { rendez->StartAbort(s); return s; } } return absl::OkStatus(); } Status DirectSession::RecvPRunOutputs( const std::vector<string>& output_names, const ExecutorsAndKeys* executors_and_keys, PartialRunState* run_state, std::vector<Tensor>* outputs) { Status s; if (!output_names.empty()) { outputs->resize(output_names.size()); } Rendezvous::ParsedKey parsed; for (size_t output_offset = 0; output_offset < output_names.size(); ++output_offset) { const string& output_name = output_names[output_offset]; auto it = executors_and_keys->output_name_to_rendezvous_key.find(output_name); if (it == executors_and_keys->output_name_to_rendezvous_key.end()) { return errors::Internal("'", output_name, "' is not a pre-defined fetch."); } const string& output_key = it->second; Tensor output_tensor; bool is_dead; s = Rendezvous::ParseKey(output_key, &parsed); if (s.ok()) { s = run_state->rendez->Recv(parsed, Rendezvous::Args(), &output_tensor, &is_dead, operation_timeout_in_ms_); if (is_dead && s.ok()) { s = errors::InvalidArgument("The tensor returned for ", output_name, " was not valid."); } } if (!s.ok()) { run_state->rendez->StartAbort(s); outputs->clear(); return s; } (*outputs)[output_offset] = output_tensor; } return absl::OkStatus(); } Status DirectSession::CheckFetch(const NamedTensorList& feeds, const std::vector<string>& fetches, const ExecutorsAndKeys* executors_and_keys, const PartialRunState* run_state) { const Graph* graph = executors_and_keys->graph.get(); const NameNodeMap* name_to_node = &executors_and_keys->name_to_node; std::unordered_set<TensorId, TensorId::Hasher> pending_feeds; { mutex_lock l(executor_lock_); for (const auto& input : run_state->pending_inputs) { if (input.second) continue; TensorId id(ParseTensorName(input.first)); auto it = name_to_node->find(id.first); if (it == name_to_node->end()) { return errors::NotFound("Feed ", input.first, ": not found"); } pending_feeds.insert(id); } } for (const auto& it : feeds) { TensorId id(ParseTensorName(it.first)); pending_feeds.erase(id); } std::vector<const Node*> stack; for (const string& fetch : fetches) { TensorId id(ParseTensorName(fetch)); auto it = name_to_node->find(id.first); if (it == name_to_node->end()) { return errors::NotFound("Fetch ", fetch, ": not found"); } stack.push_back(it->second); } std::vector<bool> visited(graph->num_node_ids(), false); while (!stack.empty()) { const Node* n = stack.back(); stack.pop_back(); for (const Edge* in_edge : n->in_edges()) { const Node* in_node = in_edge->src(); if (pending_feeds.count({in_node->name(), in_edge->src_output()}) > 0) { return errors::InvalidArgument("Fetch ", in_node->name(), ":", in_edge->src_output(), " can't be computed from the feeds" " that have been fed so far."); } if (!visited[in_node->id()]) { visited[in_node->id()] = true; stack.push_back(in_node); } } } return absl::OkStatus(); } Status DirectSession::CreateExecutors( const CallableOptions& callable_options, std::unique_ptr<ExecutorsAndKeys>* out_executors_and_keys, std::unique_ptr<FunctionInfo>* out_func_info, RunStateArgs* run_state_args) { BuildGraphOptions options; options.callable_options = callable_options; options.use_function_convention = !run_state_args->is_partial_run; options.collective_graph_key = callable_options.run_options().experimental().collective_graph_key(); if (options_.config.experimental() .collective_deterministic_sequential_execution()) { options.collective_order = GraphCollectiveOrder::kEdges; } else if (options_.config.experimental().collective_nccl()) { options.collective_order = GraphCollectiveOrder::kAttrs; } std::unique_ptr<FunctionInfo> func_info(new FunctionInfo); std::unique_ptr<ExecutorsAndKeys> ek(new ExecutorsAndKeys); ek->callable_options = callable_options; std::unordered_map<string, std::unique_ptr<Graph>> graphs; TF_RETURN_IF_ERROR(CreateGraphs( options, &graphs, &func_info->flib_def, run_state_args, &ek->input_types, &ek->output_types, &ek->collective_graph_key)); if (run_state_args->is_partial_run) { ek->graph = std::move(run_state_args->graph); std::unordered_set<StringPiece, StringPieceHasher> names; for (const string& input : callable_options.feed()) { TensorId id(ParseTensorName(input)); names.emplace(id.first); } for (const string& output : callable_options.fetch()) { TensorId id(ParseTensorName(output)); names.emplace(id.first); } for (Node* n : ek->graph->nodes()) { if (names.count(n->name()) > 0) { ek->name_to_node.insert({n->name(), n}); } } } ek->items.reserve(graphs.size()); const auto& optimizer_opts = options_.config.graph_options().optimizer_options(); int graph_def_version = graphs.begin()->second->versions().producer(); const auto* session_metadata = options_.config.experimental().has_session_metadata() ? &options_.config.experimental().session_metadata() : nullptr; func_info->proc_flr.reset(new ProcessFunctionLibraryRuntime( device_mgr_.get(), options_.env, &options_.config, graph_def_version, func_info->flib_def.get(), optimizer_opts, thread_pools_[0].first, nullptr, session_metadata, Rendezvous::Factory{[](const int64_t, const DeviceMgr* device_mgr, tsl::core::RefCountPtr<Rendezvous>* r) { *r = tsl::core::RefCountPtr<Rendezvous>( new IntraProcessRendezvous(device_mgr)); return absl::OkStatus(); }})); GraphOptimizer optimizer(optimizer_opts); for (auto iter = graphs.begin(); iter != graphs.end(); ++iter) { const string& partition_name = iter->first; std::unique_ptr<Graph>& partition_graph = iter->second; Device* device; TF_RETURN_IF_ERROR(device_mgr_->LookupDevice(partition_name, &device)); ek->items.resize(ek->items.size() + 1); auto* item = &(ek->items.back()); auto lib = func_info->proc_flr->GetFLR(partition_name); if (lib == nullptr) { return errors::Internal("Could not find device: ", partition_name); } item->flib = lib; LocalExecutorParams params; params.device = device; params.session_metadata = session_metadata; params.function_library = lib; auto opseg = device->op_segment(); params.create_kernel = [this, lib, opseg](const std::shared_ptr<const NodeProperties>& props, OpKernel** kernel) { if (!OpSegment::ShouldOwnKernel(lib, props->node_def.op())) { return lib->CreateKernel(props, kernel); } auto create_fn = [lib, &props](OpKernel** kernel) { return lib->CreateKernel(props, kernel); }; return opseg->FindOrCreate(session_handle_, props->node_def.name(), kernel, create_fn); }; params.delete_kernel = [lib](OpKernel* kernel) { if (kernel && !OpSegment::ShouldOwnKernel(lib, kernel->type_string())) delete kernel; }; optimizer.Optimize(lib, options_.env, device, &partition_graph, GraphOptimizer::Options()); const DebugOptions& debug_options = options.callable_options.run_options().debug_options(); if (!debug_options.debug_tensor_watch_opts().empty()) { TF_RETURN_IF_ERROR(DecorateAndPublishGraphForDebug( debug_options, partition_graph.get(), params.device)); } TF_RETURN_IF_ERROR(EnsureMemoryTypes(DeviceType(device->device_type()), device->name(), partition_graph.get())); item->executor = nullptr; item->device = device; auto executor_type = options_.config.experimental().executor_type(); TF_RETURN_IF_ERROR( NewExecutor(executor_type, params, *partition_graph, &item->executor)); if (!options_.config.experimental().disable_output_partition_graphs() || options_.config.graph_options().build_cost_model() > 0) { item->graph = std::move(partition_graph); } } if (!run_state_args->is_partial_run) { for (int i = 0; i < callable_options.feed().size(); ++i) { const string& input = callable_options.feed(i); ek->input_name_to_index[input] = i; } for (int i = 0; i < callable_options.fetch().size(); ++i) { const string& output = callable_options.fetch(i); ek->output_name_to_index[output] = i; } } else { for (int i = 0; i < callable_options.feed().size(); ++i) { const string& input = callable_options.feed(i); ek->input_name_to_rendezvous_key[input] = GetRendezvousKey( input, device_set_.client_device()->attributes(), FrameAndIter(0, 0)); } for (int i = 0; i < callable_options.fetch().size(); ++i) { const string& output = callable_options.fetch(i); ek->output_name_to_rendezvous_key[output] = GetRendezvousKey(output, device_set_.client_device()->attributes(), FrameAndIter(0, 0)); } } *out_executors_and_keys = std::move(ek); *out_func_info = std::move(func_info); return absl::OkStatus(); } Status DirectSession::GetOrCreateExecutors( absl::Span<const string> inputs, absl::Span<const string> outputs, absl::Span<const string> target_nodes, ExecutorsAndKeys** executors_and_keys, RunStateArgs* run_state_args) { int64_t handle_name_counter_value = -1; if (LogMemory::IsEnabled() || run_state_args->is_partial_run) { handle_name_counter_value = handle_name_counter_.fetch_add(1); } string debug_tensor_watches_summary; if (!run_state_args->debug_options.debug_tensor_watch_opts().empty()) { debug_tensor_watches_summary = SummarizeDebugTensorWatches( run_state_args->debug_options.debug_tensor_watch_opts()); } const string key = strings::StrCat( absl::StrJoin(inputs, ","), "->", absl::StrJoin(outputs, ","), "/", absl::StrJoin(target_nodes, ","), "/", run_state_args->is_partial_run, "/", debug_tensor_watches_summary); if (handle_name_counter_value >= 0) { run_state_args->handle = strings::StrCat(key, ";", handle_name_counter_value); } { mutex_lock l(executor_lock_); auto it = executors_.find(key); if (it != executors_.end()) { *executors_and_keys = it->second.get(); return absl::OkStatus(); } } std::vector<string> inputs_sorted(inputs.begin(), inputs.end()); std::sort(inputs_sorted.begin(), inputs_sorted.end()); std::vector<string> outputs_sorted(outputs.begin(), outputs.end()); std::sort(outputs_sorted.begin(), outputs_sorted.end()); std::vector<string> tn_sorted(target_nodes.begin(), target_nodes.end()); std::sort(tn_sorted.begin(), tn_sorted.end()); const string sorted_key = strings::StrCat( absl::StrJoin(inputs_sorted, ","), "->", absl::StrJoin(outputs_sorted, ","), "/", absl::StrJoin(tn_sorted, ","), "/", run_state_args->is_partial_run, "/", debug_tensor_watches_summary); if (handle_name_counter_value >= 0) { run_state_args->handle = strings::StrCat(sorted_key, ";", handle_name_counter_value); } { mutex_lock l(executor_lock_); auto it = executors_.find(sorted_key); if (it != executors_.end()) { *executors_and_keys = it->second.get(); return absl::OkStatus(); } } CallableOptions callable_options; callable_options.mutable_feed()->Reserve(inputs_sorted.size()); for (const string& input : inputs_sorted) { callable_options.add_feed(input); } callable_options.mutable_fetch()->Reserve(outputs_sorted.size()); for (const string& output : outputs_sorted) { callable_options.add_fetch(output); } callable_options.mutable_target()->Reserve(tn_sorted.size()); for (const string& target : tn_sorted) { callable_options.add_target(target); } *callable_options.mutable_run_options()->mutable_debug_options() = run_state_args->debug_options; callable_options.mutable_run_options() ->mutable_experimental() ->set_collective_graph_key(run_state_args->collective_graph_key); std::unique_ptr<ExecutorsAndKeys> ek; std::unique_ptr<FunctionInfo> func_info; TF_RETURN_IF_ERROR( CreateExecutors(callable_options, &ek, &func_info, run_state_args)); mutex_lock l(executor_lock_); auto insert_result = executors_.emplace( sorted_key, std::shared_ptr<ExecutorsAndKeys>(std::move(ek))); if (insert_result.second) { functions_.push_back(std::move(func_info)); } executors_.emplace(key, insert_result.first->second); *executors_and_keys = insert_result.first->second.get(); return absl::OkStatus(); } Status DirectSession::CreateGraphs( const BuildGraphOptions& subgraph_options, std::unordered_map<string, std::unique_ptr<Graph>>* outputs, std::unique_ptr<FunctionLibraryDefinition>* flib_def, RunStateArgs* run_state_args, DataTypeVector* input_types, DataTypeVector* output_types, int64_t* collective_graph_key) { mutex_lock l(graph_state_lock_); if (finalized_) { return errors::FailedPrecondition("Session has been finalized."); } std::unique_ptr<ClientGraph> client_graph; std::unique_ptr<GraphExecutionState> temp_exec_state_holder; GraphExecutionState* execution_state = nullptr; if (options_.config.graph_options().place_pruned_graph()) { GraphExecutionStateOptions prune_options; prune_options.device_set = &device_set_; prune_options.session_options = &options_; prune_options.stateful_placements = stateful_placements_; prune_options.session_handle = session_handle_; TF_RETURN_IF_ERROR(GraphExecutionState::MakeForPrunedGraph( *execution_state_, prune_options, subgraph_options, &temp_exec_state_holder, &client_graph)); execution_state = temp_exec_state_holder.get(); } else { execution_state = execution_state_.get(); TF_RETURN_IF_ERROR( execution_state->BuildGraph(subgraph_options, &client_graph)); } *collective_graph_key = client_graph->collective_graph_key; if (subgraph_options.callable_options.feed_size() != client_graph->feed_types.size()) { return errors::Internal( "Graph pruning failed: requested number of feed endpoints = ", subgraph_options.callable_options.feed_size(), " versus number of pruned feed endpoints = ", client_graph->feed_types.size()); } if (subgraph_options.callable_options.fetch_size() != client_graph->fetch_types.size()) { return errors::Internal( "Graph pruning failed: requested number of fetch endpoints = ", subgraph_options.callable_options.fetch_size(), " versus number of pruned fetch endpoints = ", client_graph->fetch_types.size()); } auto current_stateful_placements = execution_state->GetStatefulPlacements(); for (const auto& placement_pair : current_stateful_placements) { const string& node_name = placement_pair.first; const string& placement = placement_pair.second; auto iter = stateful_placements_.find(node_name); if (iter == stateful_placements_.end()) { stateful_placements_.insert(std::make_pair(node_name, placement)); } else if (iter->second != placement) { return errors::Internal( "Stateful placement mismatch. " "Current assignment of ", node_name, " to ", iter->second, " does not match ", placement); } } stateful_placements_ = execution_state->GetStatefulPlacements(); if (run_state_args->is_partial_run) { run_state_args->graph.reset(new Graph(flib_def_.get())); CopyGraph(*execution_state->full_graph(), run_state_args->graph.get()); } PartitionOptions popts; popts.node_to_loc = [](const Node* node) { return node->assigned_device_name(); }; popts.new_name = [this](const string& prefix) { return strings::StrCat(prefix, "/_", edge_name_counter_.fetch_add(1)); }; popts.get_incarnation = [](const string& name) { return 1; }; popts.flib_def = flib_def->get(); popts.control_flow_added = false; std::unordered_map<string, GraphDef> partitions; TF_RETURN_IF_ERROR(Partition(popts, &client_graph->graph, &partitions)); std::vector<string> device_names; device_names.reserve(devices_.size()); for (auto device : devices_) { device_names.push_back(DeviceNameUtils::LocalName(device->name())); } for (const auto& partition : partitions) { const string local_partition_name = DeviceNameUtils::LocalName(partition.first); if (std::count(device_names.begin(), device_names.end(), local_partition_name) == 0) { return errors::InvalidArgument( "Creating a partition for ", local_partition_name, " which doesn't exist in the list of available devices. Available " "devices: ", absl::StrJoin(device_names, ",")); } } for (auto& partition : partitions) { std::unique_ptr<Graph> device_graph( new Graph(client_graph->flib_def.get())); device_graph->SetConstructionContext(ConstructionContext::kDirectSession); GraphConstructorOptions device_opts; device_opts.allow_internal_ops = true; device_opts.expect_device_spec = true; TF_RETURN_IF_ERROR(ConvertGraphDefToGraph( device_opts, std::move(partition.second), device_graph.get())); outputs->emplace(partition.first, std::move(device_graph)); } GraphOptimizationPassOptions optimization_options; optimization_options.session_options = &options_; optimization_options.flib_def = client_graph->flib_def.get(); optimization_options.partition_graphs = outputs; TF_RETURN_IF_ERROR(OptimizationPassRegistry::Global()->RunGrouping( OptimizationPassRegistry::POST_PARTITIONING, optimization_options)); Status s; for (auto& partition : *outputs) { const string& partition_name = partition.first; std::unique_ptr<Graph>* graph = &partition.second; VLOG(2) << "Created " << DebugString(graph->get()) << " for " << partition_name; Device* d; s = device_mgr_->LookupDevice(partition_name, &d); if (!s.ok()) break; s = d->MaybeRewriteGraph(graph); if (!s.ok()) { break; } } *flib_def = std::move(client_graph->flib_def); std::swap(*input_types, client_graph->feed_types); std::swap(*output_types, client_graph->fetch_types); return s; } ::tensorflow::Status DirectSession::ListDevices( std::vector<DeviceAttributes>* response) { response->clear(); response->reserve(devices_.size()); for (Device* d : devices_) { const DeviceAttributes& attrs = d->attributes(); response->emplace_back(attrs); } return absl::OkStatus(); } ::tensorflow::Status DirectSession::Reset( const std::vector<string>& containers) { device_mgr_->ClearContainers(containers); return absl::OkStatus(); } ::tensorflow::Status DirectSession::Close() { cancellation_manager_->StartCancel(); { mutex_lock l(closed_lock_); if (closed_) return absl::OkStatus(); closed_ = true; } if (factory_ != nullptr) factory_->Deregister(this); return absl::OkStatus(); } DirectSession::RunState::RunState(int64_t step_id, const std::vector<Device*>* devices) : step_container(step_id, [devices, step_id](const string& name) { for (auto d : *devices) { if (!d->resource_manager()->Cleanup(name).ok()) { } ScopedAllocatorMgr* sam = d->GetScopedAllocatorMgr(); if (sam) sam->Cleanup(step_id); } }) {} DirectSession::PartialRunState::PartialRunState( const std::vector<string>& pending_input_names, const std::vector<string>& pending_output_names, int64_t step_id, const std::vector<Device*>* devices) : RunState(step_id, devices) { for (auto& name : pending_input_names) { pending_inputs[name] = false; } for (auto& name : pending_output_names) { pending_outputs[name] = false; } } DirectSession::PartialRunState::~PartialRunState() { if (rendez != nullptr) { rendez->StartAbort(errors::Cancelled("PRun cancellation")); executors_done.WaitForNotification(); } } bool DirectSession::PartialRunState::PendingDone() const { for (const auto& it : pending_inputs) { if (!it.second) return false; } for (const auto& it : pending_outputs) { if (!it.second) return false; } return true; } void DirectSession::WaitForNotification(Notification* n, RunState* run_state, CancellationManager* cm, int64_t timeout_in_ms) { const Status status = WaitForNotification(n, timeout_in_ms); if (!status.ok()) { { mutex_lock l(run_state->mu); run_state->status.Update(status); } cm->StartCancel(); n->WaitForNotification(); } } ::tensorflow::Status DirectSession::WaitForNotification( Notification* notification, int64_t timeout_in_ms) { if (timeout_in_ms > 0) { const int64_t timeout_in_us = timeout_in_ms * 1000; const bool notified = WaitForNotificationWithTimeout(notification, timeout_in_us); if (!notified) { return Status(absl::StatusCode::kDeadlineExceeded, "Timed out waiting for notification"); } } else { notification->WaitForNotification(); } return absl::OkStatus(); } Status DirectSession::MakeCallable(const CallableOptions& callable_options, CallableHandle* out_handle) { TF_RETURN_IF_ERROR(CheckNotClosed()); TF_RETURN_IF_ERROR(CheckGraphCreated("MakeCallable()")); std::unique_ptr<ExecutorsAndKeys> ek; std::unique_ptr<FunctionInfo> func_info; RunStateArgs run_state_args(callable_options.run_options().debug_options()); TF_RETURN_IF_ERROR( CreateExecutors(callable_options, &ek, &func_info, &run_state_args)); { mutex_lock l(callables_lock_); *out_handle = next_callable_handle_++; callables_[*out_handle] = {std::move(ek), std::move(func_info)}; } return absl::OkStatus(); } class DirectSession::RunCallableCallFrame : public CallFrameInterface { public: RunCallableCallFrame(DirectSession* session, ExecutorsAndKeys* executors_and_keys, const std::vector<Tensor>* feed_tensors, std::vector<Tensor>* fetch_tensors) : session_(session), executors_and_keys_(executors_and_keys), feed_tensors_(feed_tensors), fetch_tensors_(fetch_tensors) {} size_t num_args() const override { return executors_and_keys_->input_types.size(); } size_t num_retvals() const override { return executors_and_keys_->output_types.size(); } Status GetArg(int index, const Tensor** val) override { if (TF_PREDICT_FALSE(index > feed_tensors_->size())) { return errors::Internal("Args index out of bounds: ", index); } else { *val = &(*feed_tensors_)[index]; } return absl::OkStatus(); } Status SetRetval(int index, const Tensor& val) override { if (index > fetch_tensors_->size()) { return errors::Internal("RetVal index out of bounds: ", index); } (*fetch_tensors_)[index] = val; return absl::OkStatus(); } private: DirectSession* const session_; ExecutorsAndKeys* const executors_and_keys_; const std::vector<Tensor>* const feed_tensors_; std::vector<Tensor>* const fetch_tensors_; }; ::tensorflow::Status DirectSession::RunCallable( CallableHandle handle, const std::vector<Tensor>& feed_tensors, std::vector<Tensor>* fetch_tensors, RunMetadata* run_metadata) { return RunCallable(handle, feed_tensors, fetch_tensors, run_metadata, thread::ThreadPoolOptions()); } ::tensorflow::Status DirectSession::RunCallable( CallableHandle handle, const std::vector<Tensor>& feed_tensors, std::vector<Tensor>* fetch_tensors, RunMetadata* run_metadata, const thread::ThreadPoolOptions& threadpool_options) { TF_RETURN_IF_ERROR(CheckNotClosed()); TF_RETURN_IF_ERROR(CheckGraphCreated("RunCallable()")); direct_session_runs->GetCell()->IncrementBy(1); std::shared_ptr<ExecutorsAndKeys> executors_and_keys; const int64_t step_id = step_id_counter_.fetch_add(1); { tf_shared_lock l(callables_lock_); if (handle >= next_callable_handle_) { return errors::InvalidArgument("No such callable handle: ", handle); } executors_and_keys = callables_[handle].executors_and_keys; } if (!executors_and_keys) { return errors::InvalidArgument( "Attempted to run callable after handle was released: ", handle); } DebugOptions debug_options; RunStateArgs run_state_args(debug_options); if (feed_tensors.size() != executors_and_keys->input_types.size()) { return errors::InvalidArgument( "Expected ", executors_and_keys->input_types.size(), " feed tensors, but got ", feed_tensors.size()); } if (fetch_tensors != nullptr) { fetch_tensors->resize(executors_and_keys->output_types.size()); } else if (!executors_and_keys->output_types.empty()) { return errors::InvalidArgument( "`fetch_tensors` must be provided when the callable has one or more " "outputs."); } size_t input_size = 0; bool any_resource_feeds = false; for (auto& tensor : feed_tensors) { input_size += tensor.AllocatedBytes(); any_resource_feeds = any_resource_feeds || tensor.dtype() == DT_RESOURCE; } metrics::RecordGraphInputTensors(input_size); std::unique_ptr<std::vector<Tensor>> converted_feed_tensors; const std::vector<Tensor>* actual_feed_tensors; if (TF_PREDICT_FALSE(any_resource_feeds)) { converted_feed_tensors = std::make_unique<std::vector<Tensor>>(); converted_feed_tensors->reserve(feed_tensors.size()); for (const Tensor& t : feed_tensors) { if (t.dtype() == DT_RESOURCE) { converted_feed_tensors->emplace_back(); Tensor* tensor_from_handle = &converted_feed_tensors->back(); TF_RETURN_IF_ERROR(ResourceHandleToInputTensor(t, tensor_from_handle)); } else { converted_feed_tensors->emplace_back(t); } } actual_feed_tensors = converted_feed_tensors.get(); } else { actual_feed_tensors = &feed_tensors; } RunCallableCallFrame call_frame(this, executors_and_keys.get(), actual_feed_tensors, fetch_tensors); if (LogMemory::IsEnabled()) { LogMemory::RecordStep(step_id, run_state_args.handle); } TF_RETURN_IF_ERROR(RunInternal( step_id, executors_and_keys->callable_options.run_options(), &call_frame, executors_and_keys.get(), run_metadata, threadpool_options)); if (fetch_tensors != nullptr) { size_t output_size = 0; for (auto& tensor : *fetch_tensors) { output_size += tensor.AllocatedBytes(); } metrics::RecordGraphOutputTensors(output_size); } return absl::OkStatus(); } ::tensorflow::Status DirectSession::ReleaseCallable(CallableHandle handle) { mutex_lock l(callables_lock_); if (handle >= next_callable_handle_) { return errors::InvalidArgument("No such callable handle: ", handle); } callables_.erase(handle); return absl::OkStatus(); } Status DirectSession::Finalize() { mutex_lock l(graph_state_lock_); if (finalized_) { return errors::FailedPrecondition("Session already finalized."); } if (!graph_created_) { return errors::FailedPrecondition("Session not yet created."); } execution_state_.reset(); flib_def_.reset(); finalized_ = true; return absl::OkStatus(); } DirectSession::Callable::~Callable() { executors_and_keys.reset(); function_info.reset(); } }
#include "tensorflow/core/common_runtime/direct_session.h" #include <map> #include <memory> #include <random> #include <string> #include <thread> #include <unordered_map> #include <vector> #include "absl/memory/memory.h" #include "absl/status/status.h" #include "absl/strings/match.h" #include "absl/strings/str_format.h" #include "tensorflow/core/common_runtime/device_factory.h" #include "tensorflow/core/common_runtime/device_mgr.h" #include "tensorflow/core/common_runtime/function_testlib.h" #include "tensorflow/core/framework/allocator.h" #include "tensorflow/core/framework/device_factory.h" #include "tensorflow/core/framework/graph.pb.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/graph/costmodel.h" #include "tensorflow/core/graph/graph.h" #include "tensorflow/core/graph/node_builder.h" #include "tensorflow/core/graph/testlib.h" #include "tensorflow/core/kernels/ops_util.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/lib/core/status.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/lib/core/threadpool.h" #include "tensorflow/core/lib/strings/str_util.h" #include "tensorflow/core/platform/protobuf.h" #include "tensorflow/core/platform/stacktrace.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" #include "tensorflow/core/protobuf/error_codes.pb.h" #include "tensorflow/core/protobuf/rewriter_config.pb.h" #include "tensorflow/core/public/session.h" #include "tensorflow/core/public/session_options.h" #include "tensorflow/core/util/device_name_utils.h" #include "tsl/platform/protobuf.h" #if GOOGLE_CUDA #include "third_party/gpus/cuda/include/cuda.h" #include "third_party/gpus/cuda/include/cuda_runtime_api.h" #elif TENSORFLOW_USE_ROCM #include "rocm/include/hip/hip_runtime.h" #endif namespace tensorflow { namespace { CallableOptions MakeCallableOptions(absl::Span<const string> feeds, absl::Span<const string> fetches, absl::Span<const string> targets) { CallableOptions ret; for (const string& feed : feeds) { ret.add_feed(feed); } for (const string& fetch : fetches) { ret.add_fetch(fetch); } for (const string& target : targets) { ret.add_target(target); } return ret; } SessionOptions DefaultSessionOptions() { SessionOptions options; (*options.config.mutable_device_count())["CPU"] = 2; return options; } std::unique_ptr<Session> CreateSession() { return std::unique_ptr<Session>(NewSession(DefaultSessionOptions())); } class DirectSessionMinusAXTest : public ::testing::Test { public: void Initialize(std::initializer_list<float> a_values) { Graph graph(OpRegistry::Global()); Tensor a_tensor(DT_FLOAT, TensorShape({2, 2})); test::FillValues<float>(&a_tensor, a_values); Node* a = test::graph::Constant(&graph, a_tensor); a->set_assigned_device_name("/job:localhost/replica:0/task:0/cpu:0"); a_ = a->name(); Tensor x_tensor(DT_FLOAT, TensorShape({2, 1})); test::FillValues<float>(&x_tensor, {1, 1}); Node* x = test::graph::Constant(&graph, x_tensor); x->set_assigned_device_name("/job:localhost/replica:0/task:0/cpu:1"); x_ = x->name(); Node* y = test::graph::Matmul(&graph, a, x, false, false); y->set_assigned_device_name("/job:localhost/replica:0/task:0/cpu:0"); y_ = y->name(); Node* y_neg = test::graph::Unary(&graph, "Neg", y); y_neg_ = y_neg->name(); y_neg->set_assigned_device_name("/job:localhost/replica:0/task:0/cpu:1"); Node* z = test::graph::Unary(&graph, "Identity", y_neg); z_ = z->name(); z->set_assigned_device_name("/job:localhost/replica:0/task:0/cpu:1"); graph.ToGraphDef(&def_); } string a_; string x_; string y_; string y_neg_; string z_; GraphDef def_; }; TEST_F(DirectSessionMinusAXTest, RunSimpleNetwork) { Initialize({3, 2, -1, 0}); auto session = CreateSession(); ASSERT_TRUE(session != nullptr); TF_ASSERT_OK(session->Create(def_)); std::vector<std::pair<string, Tensor>> inputs; std::vector<string> output_names = {y_ + ":0"}; std::vector<string> target_nodes = {y_neg_}; std::vector<Tensor> outputs; Status s = session->Run(inputs, output_names, target_nodes, &outputs); TF_ASSERT_OK(s); ASSERT_EQ(1, outputs.size()); auto mat = outputs[0].matrix<float>(); ASSERT_TRUE(outputs[0].IsInitialized()); EXPECT_FLOAT_EQ(5.0, mat(0, 0)); } TEST_F(DirectSessionMinusAXTest, RunSimpleNetwork_Callable) { Initialize({3, 2, -1, 0}); auto session = CreateSession(); ASSERT_TRUE(session != nullptr); TF_ASSERT_OK(session->Create(def_)); for (int i = 0; i < 2; ++i) { Session::CallableHandle handle; TF_ASSERT_OK(session->MakeCallable( MakeCallableOptions({}, {y_ + ":0"}, {y_neg_}), &handle)); for (int i = 0; i < 2; ++i) { std::vector<Tensor> outputs; TF_ASSERT_OK(session->RunCallable(handle, {}, &outputs, nullptr)); ASSERT_EQ(1, outputs.size()); auto mat = outputs[0].matrix<float>(); ASSERT_TRUE(outputs[0].IsInitialized()); EXPECT_FLOAT_EQ(5.0, mat(0, 0)); } Status s = session->RunCallable(handle, {}, nullptr, nullptr); EXPECT_TRUE(errors::IsInvalidArgument(s)); EXPECT_TRUE( absl::StrContains(s.message(), "`fetch_tensors` must be provided")); TF_ASSERT_OK(session->ReleaseCallable(handle)); std::vector<Tensor> outputs; s = session->RunCallable(handle, {}, &outputs, nullptr); EXPECT_TRUE(errors::IsInvalidArgument(s)); EXPECT_TRUE(absl::StrContains( s.message(), "Attempted to run callable after handle was released")); s = session->RunCallable(handle + 1, {}, &outputs, nullptr); EXPECT_TRUE(errors::IsInvalidArgument(s)); EXPECT_TRUE(absl::StrContains(s.message(), "No such callable handle")); } } TEST_F(DirectSessionMinusAXTest, RunSimpleNetwork_OptimizeForStaticGraph) { Initialize({3, 2, -1, 0}); SessionOptions options(DefaultSessionOptions()); options.config.mutable_experimental()->set_optimize_for_static_graph(true); auto session = absl::WrapUnique(NewSession(options)); ASSERT_TRUE(session != nullptr); TF_ASSERT_OK(session->Create(def_)); std::vector<std::pair<string, Tensor>> inputs; std::vector<string> output_names = {y_ + ":0"}; std::vector<string> target_nodes = {y_neg_}; std::vector<Tensor> outputs; Status s = session->Run(inputs, output_names, target_nodes, &outputs); TF_ASSERT_OK(s); ASSERT_EQ(1, outputs.size()); auto mat = outputs[0].matrix<float>(); ASSERT_TRUE(outputs[0].IsInitialized()); EXPECT_FLOAT_EQ(5.0, mat(0, 0)); s = session->Extend({}); EXPECT_TRUE(errors::IsFailedPrecondition(s)); EXPECT_TRUE(absl::StrContains(s.message(), "optimize_for_static_graph")); } TEST_F(DirectSessionMinusAXTest, RunSimpleNetwork_DisableOutputPartitionGraphs) { Initialize({3, 2, -1, 0}); SessionOptions options(DefaultSessionOptions()); options.config.mutable_experimental()->set_disable_output_partition_graphs( true); auto session = absl::WrapUnique(NewSession(options)); ASSERT_TRUE(session != nullptr); TF_ASSERT_OK(session->Create(def_)); std::vector<std::pair<string, Tensor>> inputs; std::vector<string> output_names = {y_ + ":0"}; std::vector<string> target_nodes = {y_neg_}; std::vector<Tensor> outputs; Status s = session->Run(inputs, output_names, target_nodes, &outputs); TF_ASSERT_OK(s); ASSERT_EQ(1, outputs.size()); auto mat = outputs[0].matrix<float>(); ASSERT_TRUE(outputs[0].IsInitialized()); EXPECT_FLOAT_EQ(5.0, mat(0, 0)); RunOptions run_options; run_options.set_output_partition_graphs(true); RunMetadata run_metadata; s = session->Run(run_options, inputs, output_names, target_nodes, &outputs, &run_metadata); EXPECT_TRUE(errors::IsInvalidArgument(s)); EXPECT_TRUE( absl::StrContains(s.message(), "disable_output_partition_graphs")); } TEST_F(DirectSessionMinusAXTest, RunSimpleNetwork_FinalizeWithCallables) { Initialize({3, 2, -1, 0}); auto session = CreateSession(); ASSERT_TRUE(session != nullptr); TF_ASSERT_OK(session->Create(def_)); Session::CallableHandle handle; TF_ASSERT_OK(session->MakeCallable( MakeCallableOptions({}, {y_ + ":0"}, {y_neg_}), &handle)); TF_ASSERT_OK(session->Finalize()); for (int i = 0; i < 2; ++i) { std::vector<Tensor> outputs; TF_ASSERT_OK(session->RunCallable(handle, {}, &outputs, nullptr)); ASSERT_EQ(1, outputs.size()); auto mat = outputs[0].matrix<float>(); ASSERT_TRUE(outputs[0].IsInitialized()); EXPECT_FLOAT_EQ(5.0, mat(0, 0)); } TF_ASSERT_OK(session->ReleaseCallable(handle)); Status s = session->MakeCallable(MakeCallableOptions({}, {y_ + ":0"}, {}), &handle); EXPECT_TRUE(errors::IsFailedPrecondition(s)); EXPECT_TRUE(absl::StrContains(s.message(), "Session has been finalized.")); } TEST_F(DirectSessionMinusAXTest, RunSimpleNetwork_FinalizeWithRun) { Initialize({3, 2, -1, 0}); auto session = CreateSession(); ASSERT_TRUE(session != nullptr); TF_ASSERT_OK(session->Create(def_)); std::vector<Tensor> outputs; TF_ASSERT_OK(session->Run({}, {y_ + ":0"}, {y_neg_}, &outputs)); ASSERT_EQ(1, outputs.size()); auto mat = outputs[0].matrix<float>(); ASSERT_TRUE(outputs[0].IsInitialized()); EXPECT_FLOAT_EQ(5.0, mat(0, 0)); TF_ASSERT_OK(session->Finalize()); TF_ASSERT_OK(session->Run({}, {y_ + ":0"}, {y_neg_}, &outputs)); ASSERT_EQ(1, outputs.size()); mat = outputs[0].matrix<float>(); ASSERT_TRUE(outputs[0].IsInitialized()); EXPECT_FLOAT_EQ(5.0, mat(0, 0)); Status s = session->Run({}, {y_ + ":0"}, {}, &outputs); EXPECT_TRUE(errors::IsFailedPrecondition(s)); EXPECT_TRUE(absl::StrContains(s.message(), "Session has been finalized.")); } TEST_F(DirectSessionMinusAXTest, TestTensorConnection) { Initialize({3, 2, -1, 0}); auto session = CreateSession(); ASSERT_TRUE(session != nullptr); TF_ASSERT_OK(session->Create(def_)); { CallableOptions callable_options; TensorConnection* c = callable_options.add_tensor_connection(); c->set_from_tensor(a_ + ":0"); c->set_to_tensor(y_ + ":0"); callable_options.add_fetch(y_neg_ + ":0"); Session::CallableHandle handle; TF_ASSERT_OK(session->MakeCallable(callable_options, &handle)); std::vector<Tensor> outputs; TF_ASSERT_OK(session->RunCallable(handle, {}, &outputs, nullptr)); ASSERT_EQ(1, outputs.size()); auto mat = outputs[0].matrix<float>(); ASSERT_TRUE(outputs[0].IsInitialized()); EXPECT_FLOAT_EQ(-3.0, mat(0, 0)); EXPECT_FLOAT_EQ(-2.0, mat(0, 1)); EXPECT_FLOAT_EQ(1.0, mat(1, 0)); EXPECT_FLOAT_EQ(0.0, mat(1, 1)); TF_ASSERT_OK(session->ReleaseCallable(handle)); } { CallableOptions callable_options; TensorConnection* c = callable_options.add_tensor_connection(); c->set_from_tensor(a_ + ":0"); c->set_to_tensor(y_ + ":0"); callable_options.add_fetch(a_ + ":0"); callable_options.add_fetch(y_neg_ + ":0"); Session::CallableHandle handle; TF_ASSERT_OK(session->MakeCallable(callable_options, &handle)); std::vector<Tensor> outputs; TF_ASSERT_OK(session->RunCallable(handle, {}, &outputs, nullptr)); ASSERT_EQ(2, outputs.size()); auto mat_a = outputs[0].matrix<float>(); ASSERT_TRUE(outputs[0].IsInitialized()); EXPECT_FLOAT_EQ(3.0, mat_a(0, 0)); EXPECT_FLOAT_EQ(2.0, mat_a(0, 1)); EXPECT_FLOAT_EQ(-1.0, mat_a(1, 0)); EXPECT_FLOAT_EQ(0.0, mat_a(1, 1)); auto mat_y_neg = outputs[1].matrix<float>(); ASSERT_TRUE(outputs[1].IsInitialized()); EXPECT_FLOAT_EQ(-3.0, mat_y_neg(0, 0)); EXPECT_FLOAT_EQ(-2.0, mat_y_neg(0, 1)); EXPECT_FLOAT_EQ(1.0, mat_y_neg(1, 0)); EXPECT_FLOAT_EQ(0.0, mat_y_neg(1, 1)); TF_ASSERT_OK(session->ReleaseCallable(handle)); } { CallableOptions callable_options; TensorConnection* c = callable_options.add_tensor_connection(); c->set_from_tensor(y_ + ":0"); c->set_to_tensor(a_ + ":0"); callable_options.add_fetch(y_ + ":0"); Session::CallableHandle handle; Status s = session->MakeCallable(callable_options, &handle); EXPECT_TRUE(errors::IsInvalidArgument(s)); EXPECT_TRUE(absl::StrContains(s.message(), "would create a cycle")); } { CallableOptions callable_options; TensorConnection* c = callable_options.add_tensor_connection(); c->set_from_tensor("unknown_node:0"); c->set_to_tensor(y_ + ":0"); callable_options.add_fetch(y_ + ":0"); Session::CallableHandle handle; Status s = session->MakeCallable(callable_options, &handle); EXPECT_TRUE(errors::IsInvalidArgument(s)); EXPECT_TRUE(absl::StrContains(s.message(), "unknown node")); } { CallableOptions callable_options; TensorConnection* c = callable_options.add_tensor_connection(); c->set_from_tensor(a_ + ":17"); c->set_to_tensor(y_ + ":0"); callable_options.add_fetch(y_ + ":0"); Session::CallableHandle handle; Status s = session->MakeCallable(callable_options, &handle); EXPECT_TRUE(errors::IsInvalidArgument(s)); EXPECT_TRUE(absl::StrContains(s.message(), "unknown edge")); } { CallableOptions callable_options; TensorConnection* c = callable_options.add_tensor_connection(); c->set_from_tensor(a_ + ":0"); c->set_to_tensor("unknown_node:0"); callable_options.add_fetch(y_ + ":0"); Session::CallableHandle handle; Status s = session->MakeCallable(callable_options, &handle); EXPECT_TRUE(errors::IsNotFound(s)); EXPECT_TRUE(absl::StrContains(s.message(), "unable to find feed output")); } { CallableOptions callable_options; TensorConnection* c1 = callable_options.add_tensor_connection(); c1->set_from_tensor(a_ + ":0"); c1->set_to_tensor(y_neg_ + ":0"); TensorConnection* c2 = callable_options.add_tensor_connection(); c2->set_from_tensor(x_ + ":0"); c2->set_to_tensor(y_neg_ + ":0"); callable_options.add_fetch(z_ + ":0"); Session::CallableHandle handle; Status s = session->MakeCallable(callable_options, &handle); EXPECT_TRUE(errors::IsInvalidArgument(s)); EXPECT_TRUE(absl::StrContains(s.message(), "fed more than once")); } { CallableOptions callable_options; TensorConnection* c = callable_options.add_tensor_connection(); c->set_from_tensor(a_ + ":0"); c->set_to_tensor(y_ + ":0"); callable_options.add_feed(y_ + ":0"); callable_options.add_fetch(y_neg_ + ":0"); Session::CallableHandle handle; Status s = session->MakeCallable(callable_options, &handle); EXPECT_TRUE(errors::IsInvalidArgument(s)); EXPECT_TRUE(absl::StrContains(s.message(), "fed more than once")); } } TEST_F(DirectSessionMinusAXTest, TestFeed) { Initialize({1, 2, 3, 4}); auto session = CreateSession(); ASSERT_TRUE(session != nullptr); TF_ASSERT_OK(session->Create(def_)); Tensor t(DT_FLOAT, TensorShape({2, 1})); t.matrix<float>()(0, 0) = 5; t.matrix<float>()(1, 0) = 6; std::vector<std::pair<string, Tensor>> inputs = {{x_, t}}; std::vector<string> output_names = {y_ + ":0"}; std::vector<Tensor> outputs; Status s = session->Run(inputs, output_names, {}, &outputs); TF_ASSERT_OK(s); ASSERT_EQ(1, outputs.size()); auto mat = outputs[0].matrix<float>(); EXPECT_FLOAT_EQ(17.0, mat(0, 0)); EXPECT_FLOAT_EQ(39.0, mat(1, 0)); } TEST_F(DirectSessionMinusAXTest, TestFeed_Callable) { Initialize({1, 2, 3, 4}); auto session = CreateSession(); ASSERT_TRUE(session != nullptr); TF_ASSERT_OK(session->Create(def_)); CallableOptions callable_options; callable_options.add_feed(x_); callable_options.add_fetch(y_ + ":0"); Session::CallableHandle handle; TF_ASSERT_OK(session->MakeCallable(MakeCallableOptions({x_}, {y_ + ":0"}, {}), &handle)); Tensor t(DT_FLOAT, TensorShape({2, 1})); t.matrix<float>()(0, 0) = 5; t.matrix<float>()(1, 0) = 6; std::vector<Tensor> inputs = {t}; std::vector<Tensor> outputs; TF_ASSERT_OK(session->RunCallable(handle, inputs, &outputs, nullptr)); ASSERT_EQ(1, outputs.size()); auto mat = outputs[0].matrix<float>(); EXPECT_FLOAT_EQ(17.0, mat(0, 0)); EXPECT_FLOAT_EQ(39.0, mat(1, 0)); } TEST_F(DirectSessionMinusAXTest, TestConcurrency) { Initialize({1, 2, 3, 4}); auto session = CreateSession(); ASSERT_TRUE(session != nullptr); TF_ASSERT_OK(session->Create(def_)); thread::ThreadPool* tp = new thread::ThreadPool(Env::Default(), "test", 4); std::vector<string> output_names = {y_ + ":0"}; auto fn = [&session, output_names]() { for (int i = 0; i < 1000; ++i) { std::vector<std::pair<string, Tensor>> inputs; std::vector<Tensor> outputs; Status s = session->Run(inputs, output_names, {}, &outputs); TF_ASSERT_OK(s); ASSERT_EQ(1, outputs.size()); auto mat = outputs[0].matrix<float>(); EXPECT_FLOAT_EQ(3.0, mat(0, 0)); } }; for (int i = 0; i < 4; ++i) { tp->Schedule(fn); } delete tp; } TEST_F(DirectSessionMinusAXTest, TestConcurrency_Callable) { Initialize({1, 2, 3, 4}); auto session = CreateSession(); ASSERT_TRUE(session != nullptr); TF_ASSERT_OK(session->Create(def_)); thread::ThreadPool* tp = new thread::ThreadPool(Env::Default(), "test", 4); Session::CallableHandle handle; TF_ASSERT_OK( session->MakeCallable(MakeCallableOptions({}, {y_ + ":0"}, {}), &handle)); auto fn = [&session, handle]() { for (int i = 0; i < 1000; ++i) { std::vector<Tensor> outputs; TF_ASSERT_OK(session->RunCallable(handle, {}, &outputs, nullptr)); ASSERT_EQ(1, outputs.size()); auto mat = outputs[0].matrix<float>(); EXPECT_FLOAT_EQ(3.0, mat(0, 0)); } }; for (int i = 0; i < 4; ++i) { tp->Schedule(fn); } delete tp; } TEST_F(DirectSessionMinusAXTest, TestPerSessionThreads) { Initialize({1, 2, 3, 4}); SessionOptions options; options.config.set_use_per_session_threads(true); (*options.config.mutable_device_count())["CPU"] = 2; std::unique_ptr<Session> session(NewSession(options)); ASSERT_TRUE(session != nullptr); TF_ASSERT_OK(session->Create(def_)); thread::ThreadPool* tp = new thread::ThreadPool(Env::Default(), "test", 4); std::vector<string> output_names = {y_ + ":0"}; auto fn = [&session, output_names]() { for (int i = 0; i < 1000; ++i) { std::vector<std::pair<string, Tensor>> inputs; std::vector<Tensor> outputs; Status s = session->Run(inputs, output_names, {}, &outputs); TF_ASSERT_OK(s); ASSERT_EQ(1, outputs.size()); auto mat = outputs[0].matrix<float>(); EXPECT_FLOAT_EQ(3.0, mat(0, 0)); } }; for (int i = 0; i < 4; ++i) { tp->Schedule(fn); } delete tp; } TEST_F(DirectSessionMinusAXTest, TwoCreateCallsFails) { Initialize({1, 2, 3, 4}); auto session = CreateSession(); ASSERT_TRUE(session != nullptr); TF_ASSERT_OK(session->Create(def_)); ASSERT_FALSE(session->Create(def_).ok()); } TEST_F(DirectSessionMinusAXTest, ForgetToCreate) { Initialize({1, 2, 3, 4}); auto session = CreateSession(); ASSERT_TRUE(session != nullptr); std::vector<std::pair<string, Tensor>> inputs; std::vector<Tensor> outputs; ASSERT_FALSE(session->Run(inputs, {y_ + ":0"}, {y_neg_}, &outputs).ok()); } TEST_F(DirectSessionMinusAXTest, InvalidDevice) { GraphDef def; Graph graph(OpRegistry::Global()); Tensor a_tensor(DT_FLOAT, TensorShape({2, 2})); a_tensor.flat<float>().setRandom(); Node* a = test::graph::Constant(&graph, a_tensor); a->set_assigned_device_name("/job:localhost/replica:0/task:0/cpu:0"); Tensor x_tensor(DT_FLOAT, TensorShape({2, 1})); x_tensor.flat<float>().setRandom(); Node* x = test::graph::Constant(&graph, x_tensor); x->set_assigned_device_name("/job:localhost/replica:0/task:0/cpu:1"); Node* y = test::graph::Matmul(&graph, a, x, false, false); y->set_assigned_device_name("/job:localhost/replica:0/task:0/cpu:2"); graph.ToGraphDef(&def); SessionOptions options; (*options.config.mutable_device_count())["CPU"] = 2; std::unique_ptr<Session> session(NewSession(options)); ASSERT_TRUE(session != nullptr); ASSERT_FALSE(session->Create(def).ok()); def.Clear(); y->set_assigned_device_name("/job:localhost/replica:0/task:0/cpu:1"); graph.ToGraphDef(&def); session.reset(NewSession(options)); TF_ASSERT_OK(session->Create(def)); std::vector<Tensor> outputs; TF_ASSERT_OK(session->Run({}, {y->name() + ":0"}, {}, &outputs)); } TEST_F(DirectSessionMinusAXTest, RunSimpleNetworkWithOpts) { Initialize({3, 2, -1, 0}); auto session = CreateSession(); ASSERT_TRUE(session != nullptr); TF_ASSERT_OK(session->Create(def_)); std::vector<std::pair<string, Tensor>> inputs; std::vector<string> output_names = {y_ + ":0"}; std::vector<string> target_nodes = {y_neg_}; std::vector<Tensor> outputs; RunOptions run_options; run_options.set_trace_level(RunOptions::SOFTWARE_TRACE); RunMetadata run_metadata; EXPECT_EQ(run_metadata.step_stats().dev_stats_size(), 0); Status s = session->Run(run_options, inputs, output_names, target_nodes, &outputs, &run_metadata); TF_ASSERT_OK(s); ASSERT_EQ(1, outputs.size()); auto mat = outputs[0].matrix<float>(); ASSERT_TRUE(outputs[0].IsInitialized()); EXPECT_FLOAT_EQ(5.0, mat(0, 0)); ASSERT_TRUE(run_metadata.has_step_stats()); EXPECT_EQ(run_metadata.step_stats().dev_stats_size(), 2); } TEST_F(DirectSessionMinusAXTest, RunSimpleNetworkWithOpts_Callable) { Initialize({3, 2, -1, 0}); auto session = CreateSession(); ASSERT_TRUE(session != nullptr); TF_ASSERT_OK(session->Create(def_)); Session::CallableHandle handle; CallableOptions callable_options = MakeCallableOptions({}, {y_ + ":0"}, {y_neg_}); callable_options.mutable_run_options()->set_trace_level( RunOptions::SOFTWARE_TRACE); TF_ASSERT_OK(session->MakeCallable(callable_options, &handle)); RunMetadata run_metadata; EXPECT_EQ(run_metadata.step_stats().dev_stats_size(), 0); std::vector<Tensor> outputs; TF_ASSERT_OK(session->RunCallable(handle, {}, &outputs, &run_metadata)); ASSERT_EQ(1, outputs.size()); auto mat = outputs[0].matrix<float>(); ASSERT_TRUE(outputs[0].IsInitialized()); EXPECT_FLOAT_EQ(5.0, mat(0, 0)); ASSERT_TRUE(run_metadata.has_step_stats()); EXPECT_EQ(run_metadata.step_stats().dev_stats_size(), 2); } TEST_F(DirectSessionMinusAXTest, UseRunHandlerPool) { Initialize({3, 2, -1, 0}); auto session = CreateSession(); ASSERT_TRUE(session != nullptr); TF_ASSERT_OK(session->Create(def_)); std::vector<std::pair<string, Tensor>> inputs; std::vector<string> output_names = {y_ + ":0"}; std::vector<string> target_nodes = {y_neg_}; std::vector<Tensor> outputs; RunOptions run_options; run_options.mutable_experimental()->set_use_run_handler_pool(true); Status s = session->Run(run_options, inputs, output_names, target_nodes, &outputs, nullptr); TF_ASSERT_OK(s); ASSERT_EQ(1, outputs.size()); auto mat = outputs[0].matrix<float>(); ASSERT_TRUE(outputs[0].IsInitialized()); EXPECT_FLOAT_EQ(5.0, mat(0, 0)); } TEST(DirectSessionTest, KeepsStateAcrossRunsOfSession) { GraphDef def; Graph g(OpRegistry::Global()); Node* var = test::graph::Var(&g, DT_FLOAT, TensorShape({10})); var->set_assigned_device_name("/job:localhost/replica:0/task:0/cpu:0"); Tensor twenty(DT_FLOAT, TensorShape({10})); for (int i = 0; i < 10; ++i) { twenty.flat<float>()(i) = 20.0; } Node* twenty_node = test::graph::Constant(&g, twenty); twenty_node->set_assigned_device_name( "/job:localhost/replica:0/task:0/cpu:0"); Node* init = test::graph::Assign(&g, var, twenty_node); init->set_assigned_device_name("/job:localhost/replica:0/task:0/cpu:0"); g.ToGraphDef(&def); auto session = CreateSession(); ASSERT_TRUE(session != nullptr); TF_ASSERT_OK(session->Create(def)); std::vector<std::pair<string, Tensor>> inputs; std::vector<Tensor> outputs; Status s = session->Run(inputs, {init->name()}, {}, &outputs); TF_ASSERT_OK(s); s = session->Run(inputs, {var->name() + ":0"}, {}, &outputs); TF_ASSERT_OK(s); ASSERT_EQ(1, outputs.size()); ASSERT_TRUE(outputs[0].IsInitialized()); EXPECT_EQ(20.0, outputs[0].flat<float>()(0)); } TEST(DirectSessionTest, MultipleFeedTest) { GraphDef def; Graph g(OpRegistry::Global()); Tensor first_value(DT_FLOAT, TensorShape({})); first_value.scalar<float>()() = 1.0; Node* first_const = test::graph::Constant(&g, first_value); Node* first_identity = test::graph::Identity(&g, first_const); Tensor second_value(DT_FLOAT, TensorShape({})); second_value.scalar<float>()() = 2.0; Node* second_const = test::graph::Constant(&g, second_value); Node* second_identity = test::graph::Identity(&g, second_const); g.ToGraphDef(&def); auto session = CreateSession(); ASSERT_TRUE(session != nullptr); TF_ASSERT_OK(session->Create(def)); std::vector<Tensor> outputs; Status s = session->Run( {}, {first_identity->name() + ":0", second_identity->name() + ":0"}, {}, &outputs); TF_ASSERT_OK(s); ASSERT_EQ(2, outputs.size()); ASSERT_EQ(1.0, outputs[0].flat<float>()(0)); ASSERT_EQ(2.0, outputs[1].flat<float>()(0)); s = session->Run( {}, {second_identity->name() + ":0", first_identity->name() + ":0"}, {}, &outputs); TF_ASSERT_OK(s); ASSERT_EQ(2, outputs.size()); ASSERT_EQ(2.0, outputs[0].flat<float>()(0)); ASSERT_EQ(1.0, outputs[1].flat<float>()(0)); Tensor value_11(DT_FLOAT, TensorShape({})); value_11.scalar<float>()() = 11.0; Tensor value_22(DT_FLOAT, TensorShape({})); value_22.scalar<float>()() = 22.0; s = session->Run( {{first_const->name(), value_11}, {second_const->name(), value_22}}, {first_identity->name() + ":0", second_identity->name() + ":0"}, {}, &outputs); TF_ASSERT_OK(s); ASSERT_EQ(2, outputs.size()); ASSERT_EQ(11.0, outputs[0].flat<float>()(0)); ASSERT_EQ(22.0, outputs[1].flat<float>()(0)); s = session->Run( {{second_const->name(), value_22}, {first_const->name(), value_11}}, {first_identity->name() + ":0", second_identity->name() + ":0"}, {}, &outputs); TF_ASSERT_OK(s); ASSERT_EQ(2, outputs.size()); ASSERT_EQ(11.0, outputs[0].flat<float>()(0)); ASSERT_EQ(22.0, outputs[1].flat<float>()(0)); s = session->Run( {{first_const->name(), value_11}, {first_const->name(), value_22}}, {first_identity->name() + ":0", second_identity->name() + ":0"}, {}, &outputs); EXPECT_TRUE(errors::IsInvalidArgument(s)); EXPECT_TRUE(absl::StrContains(s.message(), "fed more than once")); } TEST(DirectSessionTest, MultipleFeedTest_Callable) { GraphDef def; Graph g(OpRegistry::Global()); Tensor first_value(DT_FLOAT, TensorShape({})); first_value.scalar<float>()() = 1.0; Node* first_const = test::graph::Constant(&g, first_value); Node* first_identity = test::graph::Identity(&g, first_const); Tensor second_value(DT_FLOAT, TensorShape({})); second_value.scalar<float>()() = 2.0; Node* second_const = test::graph::Constant(&g, second_value); Node* second_identity = test::graph::Identity(&g, second_const); g.ToGraphDef(&def); auto session = CreateSession(); ASSERT_TRUE(session != nullptr); TF_ASSERT_OK(session->Create(def)); Session::CallableHandle handle; std::vector<Tensor> outputs; TF_ASSERT_OK(session->MakeCallable( MakeCallableOptions( {}, {first_identity->name() + ":0", second_identity->name() + ":0"}, {}), &handle)); TF_ASSERT_OK(session->RunCallable(handle, {}, &outputs, nullptr)); ASSERT_EQ(2, outputs.size()); ASSERT_EQ(1.0, outputs[0].flat<float>()(0)); ASSERT_EQ(2.0, outputs[1].flat<float>()(0)); TF_ASSERT_OK(session->MakeCallable( MakeCallableOptions( {}, {second_identity->name() + ":0", first_identity->name() + ":0"}, {}), &handle)); TF_ASSERT_OK(session->RunCallable(handle, {}, &outputs, nullptr)); ASSERT_EQ(2, outputs.size()); ASSERT_EQ(2.0, outputs[0].flat<float>()(0)); ASSERT_EQ(1.0, outputs[1].flat<float>()(0)); Tensor value_11(DT_FLOAT, TensorShape({})); value_11.scalar<float>()() = 11.0; Tensor value_22(DT_FLOAT, TensorShape({})); value_22.scalar<float>()() = 22.0; TF_ASSERT_OK(session->MakeCallable( MakeCallableOptions( {first_const->name(), second_const->name()}, {first_identity->name() + ":0", second_identity->name() + ":0"}, {}), &handle)); TF_ASSERT_OK( session->RunCallable(handle, {value_11, value_22}, &outputs, nullptr)); ASSERT_EQ(2, outputs.size()); ASSERT_EQ(11.0, outputs[0].flat<float>()(0)); ASSERT_EQ(22.0, outputs[1].flat<float>()(0)); TF_ASSERT_OK(session->MakeCallable( MakeCallableOptions( {second_const->name(), first_const->name()}, {first_identity->name() + ":0", second_identity->name() + ":0"}, {}), &handle)); TF_ASSERT_OK( session->RunCallable(handle, {value_22, value_11}, &outputs, nullptr)); ASSERT_EQ(2, outputs.size()); ASSERT_EQ(11.0, outputs[0].flat<float>()(0)); ASSERT_EQ(22.0, outputs[1].flat<float>()(0)); Status s = session->MakeCallable( MakeCallableOptions( {first_const->name(), first_const->name()}, {first_identity->name() + ":0", second_identity->name() + ":0"}, {}), &handle); EXPECT_TRUE(errors::IsInvalidArgument(s)); EXPECT_TRUE(absl::StrContains(s.message(), "fed more than once")); } TEST(DirectSessionTest, TestTensorConnectionUseTwice) { Graph graph(OpRegistry::Global()); Tensor a_tensor(DT_FLOAT, TensorShape({2, 2})); test::FillValues<float>(&a_tensor, {1.0, 2.0, 3.0, 4.0}); Node* a = test::graph::Constant(&graph, a_tensor); Tensor dummy_tensor(DT_FLOAT, TensorShape({1})); test::FillValues<float>(&dummy_tensor, {-1.0}); Node* left = test::graph::Constant(&graph, dummy_tensor); Node* right = test::graph::Constant(&graph, dummy_tensor); Node* y = test::graph::Add(&graph, left, right); GraphDef def; graph.ToGraphDef(&def); auto session = CreateSession(); ASSERT_TRUE(session != nullptr); TF_ASSERT_OK(session->Create(def)); CallableOptions callable_options; TensorConnection* c_left = callable_options.add_tensor_connection(); c_left->set_from_tensor(a->name() + ":0"); c_left->set_to_tensor(left->name() + ":0"); TensorConnection* c_right = callable_options.add_tensor_connection(); c_right->set_from_tensor(a->name() + ":0"); c_right->set_to_tensor(right->name() + ":0"); callable_options.add_fetch(y->name() + ":0"); Session::CallableHandle handle; TF_ASSERT_OK(session->MakeCallable(callable_options, &handle)); std::vector<Tensor> outputs; TF_ASSERT_OK(session->RunCallable(handle, {}, &outputs, nullptr)); ASSERT_EQ(1, outputs.size()); auto mat = outputs[0].matrix<float>(); ASSERT_TRUE(outputs[0].IsInitialized()); EXPECT_FLOAT_EQ(2.0, mat(0, 0)); EXPECT_FLOAT_EQ(4.0, mat(0, 1)); EXPECT_FLOAT_EQ(6.0, mat(1, 0)); EXPECT_FLOAT_EQ(8.0, mat(1, 1)); TF_ASSERT_OK(session->ReleaseCallable(handle)); } TEST(DirectSessionTest, FetchMultipleTimes) { Graph g(OpRegistry::Global()); Tensor seven_tensor(DT_INT32, TensorShape()); seven_tensor.flat<int32>()(0) = 7; Node* seven_node = test::graph::Constant(&g, seven_tensor); GraphDef def; g.ToGraphDef(&def); auto session = CreateSession(); ASSERT_TRUE(session != nullptr); TF_ASSERT_OK(session->Create(def)); const std::vector<std::pair<string, Tensor>> inputs; std::vector<Tensor> outputs; auto seven = seven_node->name(); Status s = session->Run(inputs, {seven, seven}, {}, &outputs); TF_ASSERT_OK(s); EXPECT_EQ(2, outputs.size()); for (int i = 0; i < outputs.size(); ++i) { const Tensor& t = outputs[i]; ASSERT_TRUE(t.IsInitialized()) << i; EXPECT_EQ(7, t.flat<int32>()(0)) << i; } } TEST(DirectSessionTest, MultipleFeedTestSomeSyncRun) { GraphDef def; Graph g(OpRegistry::Global()); RunOptions run_options; run_options.set_inter_op_thread_pool(-1); Tensor first_value(DT_FLOAT, TensorShape({})); first_value.scalar<float>()() = 1.0; Node* first_const = test::graph::Constant(&g, first_value); Node* first_identity = test::graph::Identity(&g, first_const); Tensor second_value(DT_FLOAT, TensorShape({})); second_value.scalar<float>()() = 2.0; Node* second_const = test::graph::Constant(&g, second_value); Node* second_identity = test::graph::Identity(&g, second_const); g.ToGraphDef(&def); auto session = CreateSession(); ASSERT_TRUE(session != nullptr); TF_ASSERT_OK(session->Create(def)); std::vector<Tensor> outputs; Status s = session->Run( run_options, {}, {first_identity->name() + ":0", second_identity->name() + ":0"}, {}, &outputs, nullptr); TF_ASSERT_OK(s); ASSERT_EQ(2, outputs.size()); ASSERT_EQ(1.0, outputs[0].flat<float>()(0)); ASSERT_EQ(2.0, outputs[1].flat<float>()(0)); s = session->Run( {}, {second_identity->name() + ":0", first_identity->name() + ":0"}, {}, &outputs); TF_ASSERT_OK(s); ASSERT_EQ(2, outputs.size()); ASSERT_EQ(2.0, outputs[0].flat<float>()(0)); ASSERT_EQ(1.0, outputs[1].flat<float>()(0)); Tensor value_11(DT_FLOAT, TensorShape({})); value_11.scalar<float>()() = 11.0; Tensor value_22(DT_FLOAT, TensorShape({})); value_22.scalar<float>()() = 22.0; s = session->Run( {{first_const->name(), value_11}, {second_const->name(), value_22}}, {first_identity->name() + ":0", second_identity->name() + ":0"}, {}, &outputs); TF_ASSERT_OK(s); ASSERT_EQ(2, outputs.size()); ASSERT_EQ(11.0, outputs[0].flat<float>()(0)); ASSERT_EQ(22.0, outputs[1].flat<float>()(0)); s = session->Run( {{second_const->name(), value_22}, {first_const->name(), value_11}}, {first_identity->name() + ":0", second_identity->name() + ":0"}, {}, &outputs); TF_ASSERT_OK(s); ASSERT_EQ(2, outputs.size()); ASSERT_EQ(11.0, outputs[0].flat<float>()(0)); ASSERT_EQ(22.0, outputs[1].flat<float>()(0)); s = session->Run( run_options, {{first_const->name(), value_11}, {first_const->name(), value_22}}, {first_identity->name() + ":0", second_identity->name() + ":0"}, {}, &outputs, nullptr); EXPECT_TRUE(errors::IsInvalidArgument(s)); EXPECT_TRUE(absl::StrContains(s.message(), "fed more than once")); } REGISTER_OP("SessionMetadataReader") .Input("x: int64") .Output("y: string") .SetIsStateful() .Doc(R"doc(SessionMetadataReader returns the session metadata. x: int64 y: string )doc"); class SessionMetadataReaderOp : public OpKernel { public: explicit SessionMetadataReaderOp(OpKernelConstruction* ctx) : OpKernel(ctx) {} void Compute(OpKernelContext* ctx) override { Tensor* out_tensor = nullptr; OP_REQUIRES_OK(ctx, ctx->allocate_output("y", TensorShape({}), &out_tensor)); if (ctx->session_metadata() != nullptr) { out_tensor->scalar<tstring>()() = tsl::LegacyUnredactedDebugString(*ctx->session_metadata()); } else { out_tensor->scalar<tstring>()() = ""; } } }; REGISTER_KERNEL_BUILDER(Name("SessionMetadataReader").Device(DEVICE_CPU), SessionMetadataReaderOp); REGISTER_KERNEL_BUILDER(Name("SessionMetadataReader").Device(DEVICE_GPU), SessionMetadataReaderOp); FunctionDef SessionMetadataReaderOpFn() { return FunctionDefHelper::Define( "SessionMetadataReaderFn", {"x: int64"}, {"y: string"}, {}, {{{"y"}, "SessionMetadataReader", {"x"}, {}}}); } TEST(DirectSessionTest, SessionMetadataAbsent) { Graph g(OpRegistry::Global()); Tensor vx(DT_INT64, TensorShape({})); vx.scalar<int64_t>()() = 17; Node* x = test::graph::Constant(&g, vx); Node* y = test::graph::Unary(&g, "SessionMetadataReader", x); GraphDef def; g.ToGraphDef(&def); auto sess = CreateSession(); TF_ASSERT_OK(sess->Create(def)); std::vector<Tensor> outputs; RunOptions run_opts; run_opts.set_inter_op_thread_pool(-1); auto s = sess->Run(run_opts, {}, {y->name() + ":0"}, {}, &outputs, nullptr); EXPECT_EQ("", outputs[0].scalar<tstring>()()); } TEST(DirectSessionTest, SessionMetadataAbsentViaFunction) { FunctionDefLibrary library_graph_def; *library_graph_def.add_function() = SessionMetadataReaderOpFn(); FunctionLibraryDefinition flib(OpRegistry::Global(), library_graph_def); Graph g(&flib); Tensor vx(DT_INT64, TensorShape({})); vx.scalar<int64_t>()() = 17; Node* x = test::graph::Constant(&g, vx); Node* y = test::graph::Unary(&g, "SessionMetadataReaderFn", x); GraphDef def; g.ToGraphDef(&def); *def.mutable_library() = library_graph_def; auto sess = CreateSession(); TF_ASSERT_OK(sess->Create(def)); std::vector<Tensor> outputs; RunOptions run_opts; run_opts.set_inter_op_thread_pool(-1); auto s = sess->Run(run_opts, {}, {y->name() + ":0"}, {}, &outputs, nullptr); EXPECT_EQ("", outputs[0].scalar<tstring>()()); } TEST(DirectSessionTest, SessionMetadataPresent) { Graph g(OpRegistry::Global()); Tensor vx(DT_INT64, TensorShape({})); vx.scalar<int64_t>()() = 17; Node* x = test::graph::Constant(&g, vx); Node* y = test::graph::Unary(&g, "SessionMetadataReader", x); GraphDef def; g.ToGraphDef(&def); auto session_options = DefaultSessionOptions(); auto* session_metadata = session_options.config.mutable_experimental()->mutable_session_metadata(); session_metadata->set_name("name"); session_metadata->set_version(1); auto sess = std::unique_ptr<Session>(NewSession(session_options)); TF_ASSERT_OK(sess->Create(def)); std::vector<Tensor> outputs; RunOptions run_opts; run_opts.set_inter_op_thread_pool(-1); auto s = sess->Run(run_opts, {}, {y->name() + ":0"}, {}, &outputs, nullptr ); SessionMetadata read_metadata; ASSERT_TRUE(protobuf::TextFormat::ParseFromString( outputs[0].scalar<tstring>()(), &read_metadata)); EXPECT_EQ("name", read_metadata.name()); EXPECT_EQ(1, read_metadata.version()); RunMetadata metadata; s = sess->Run(run_opts, {}, {y->name() + ":0"}, {}, &outputs, &metadata ); ASSERT_TRUE(protobuf::TextFormat::ParseFromString( outputs[0].scalar<tstring>()(), &read_metadata)); EXPECT_EQ("name", read_metadata.name()); EXPECT_EQ(1, read_metadata.version()); EXPECT_EQ(session_metadata->name(), metadata.session_metadata().name()); EXPECT_EQ(session_metadata->version(), metadata.session_metadata().version()); } TEST(DirectSessionTest, SessionMetadataPresentViaFunction) { FunctionDefLibrary library_graph_def; *library_graph_def.add_function() = SessionMetadataReaderOpFn(); FunctionLibraryDefinition flib(OpRegistry::Global(), library_graph_def); Graph g(&flib); Tensor vx(DT_INT64, TensorShape({})); vx.scalar<int64_t>()() = 17; Node* x = test::graph::Constant(&g, vx); Node* y = test::graph::Unary(&g, "SessionMetadataReaderFn", x); GraphDef def; g.ToGraphDef(&def); *def.mutable_library() = library_graph_def; auto session_options = DefaultSessionOptions(); auto* session_metadata = session_options.config.mutable_experimental()->mutable_session_metadata(); session_metadata->set_name("name"); session_metadata->set_version(1); auto sess = std::unique_ptr<Session>(NewSession(session_options)); TF_ASSERT_OK(sess->Create(def)); std::vector<Tensor> outputs; RunOptions run_opts; run_opts.set_inter_op_thread_pool(-1); auto s = sess->Run(run_opts, {}, {y->name() + ":0"}, {}, &outputs, nullptr ); SessionMetadata read_metadata; ASSERT_TRUE(protobuf::TextFormat::ParseFromString( outputs[0].scalar<tstring>()(), &read_metadata)); EXPECT_EQ("name", read_metadata.name()); EXPECT_EQ(1, read_metadata.version()); RunMetadata metadata; s = sess->Run(run_opts, {}, {y->name() + ":0"}, {}, &outputs, &metadata ); ASSERT_TRUE(protobuf::TextFormat::ParseFromString( outputs[0].scalar<tstring>()(), &read_metadata)); EXPECT_EQ("name", read_metadata.name()); EXPECT_EQ(1, read_metadata.version()); EXPECT_EQ(session_metadata->name(), metadata.session_metadata().name()); EXPECT_EQ(session_metadata->version(), metadata.session_metadata().version()); } TEST(DirectSessionTest, SessionMetadataKey) { auto session_options0 = DefaultSessionOptions(); auto* session_metadata0 = session_options0.config.mutable_experimental() ->mutable_session_metadata(); session_metadata0->set_name("name"); Session* sess0_ptr; ASSERT_TRUE(NewSession(session_options0, &sess0_ptr).ok()); auto sess0 = absl::WrapUnique(sess0_ptr); Session* dup_ptr; EXPECT_TRUE( errors::IsInvalidArgument(NewSession(session_options0, &dup_ptr))); auto session_options1 = DefaultSessionOptions(); auto* session_metadata1 = session_options1.config.mutable_experimental() ->mutable_session_metadata(); session_metadata1->set_name("name"); session_metadata1->set_version(1); Session* sess1_ptr; EXPECT_TRUE(NewSession(session_options1, &sess1_ptr).ok()); auto sess1 = absl::WrapUnique(sess1_ptr); sess0 = nullptr; EXPECT_TRUE(NewSession(session_options0, &dup_ptr).ok()); auto dup = absl::WrapUnique(dup_ptr); auto sess_without_metadata0 = CreateSession(); EXPECT_NE(sess_without_metadata0, nullptr); auto sess_without_metadata1 = CreateSession(); EXPECT_NE(sess_without_metadata1, nullptr); } TEST(DirectSessionTest, SessionMetadataInvalid) { const auto valid_session_options = DefaultSessionOptions(); Session* sess_ptr; ASSERT_TRUE(NewSession(valid_session_options, &sess_ptr).ok()); auto sess = absl::WrapUnique(sess_ptr); auto invalid_session_options = valid_session_options; auto* invalid_metadata = invalid_session_options.config.mutable_experimental() ->mutable_session_metadata(); invalid_metadata->set_name("name"); invalid_metadata->set_version(-1); Session* error_sess_ptr; EXPECT_TRUE(errors::IsInvalidArgument( NewSession(invalid_session_options, &error_sess_ptr))); } REGISTER_OP("ThreadID").Input("x: int64").Output("y: int64").Doc(R"doc( ThreadID returns the thread ID that called compute. x: int64 y: int64 )doc"); class ThreadIDOp : public OpKernel { public: explicit ThreadIDOp(OpKernelConstruction* ctx) : OpKernel(ctx) {} void Compute(OpKernelContext* ctx) override { Tensor* out_tensor = nullptr; OP_REQUIRES_OK(ctx, ctx->allocate_output("y", TensorShape({}), &out_tensor)); std::hash<std::thread::id> hasher; out_tensor->scalar<int64_t>()() = static_cast<int64_t>(hasher(std::this_thread::get_id())); } }; REGISTER_KERNEL_BUILDER(Name("ThreadID").Device(DEVICE_CPU), ThreadIDOp); TEST(DirectSessionTest, SessionSyncRun) { Graph g(OpRegistry::Global()); Tensor vx(DT_INT64, TensorShape({})); vx.scalar<int64_t>()() = 17; Node* x = test::graph::Constant(&g, vx); Node* y = test::graph::Unary(&g, "ThreadID", x); GraphDef def; g.ToGraphDef(&def); auto sess = CreateSession(); TF_ASSERT_OK(sess->Create(def)); std::vector<Tensor> outputs; RunOptions run_opts; run_opts.set_inter_op_thread_pool(-1); auto s = sess->Run(run_opts, {}, {y->name() + ":0"}, {}, &outputs, nullptr); std::hash<std::thread::id> hasher; EXPECT_EQ(static_cast<int64_t>(hasher(std::this_thread::get_id())), static_cast<int64_t>(outputs[0].scalar<int64_t>()())); } REGISTER_OP("ExpensiveNoop").SetIsStateful(); class ExpensiveNoopOp : public OpKernel { public: using OpKernel::OpKernel; bool IsExpensive() override { return true; } void Compute(OpKernelContext* ctx) override { const string& stack_trace = tensorflow::CurrentStackTrace(); const string process_method = "ExecutorState::Process()"; size_t pos = 0; int frame_count = 0; while ((pos = stack_trace.find("ExecutorState::Process()", pos)) != string::npos) { ++frame_count; ++pos; } OP_REQUIRES(ctx, frame_count <= 1, errors::Internal( "Recursive call to ExecutorState::Process() detected.")); } }; REGISTER_KERNEL_BUILDER(Name("ExpensiveNoop").Device(DEVICE_CPU), ExpensiveNoopOp); TEST(DirectSessionTest, SessionSyncRun_DeepGraph) { Graph g(OpRegistry::Global()); std::vector<Node*> nodes; nodes.reserve(1024); auto make_expensive_noop = [&g](absl::Span<Node* const> control_deps) { Node* ret; auto builder = NodeBuilder(g.NewName("N"), "ExpensiveNoop"); for (Node* control_dep : control_deps) { builder = builder.ControlInput(control_dep); } TF_CHECK_OK(builder.Finalize(&g, &ret)); return ret; }; Node* base = make_expensive_noop({}); Node* child_1 = make_expensive_noop({base}); Node* child_2 = make_expensive_noop({base}); GraphDef def; g.ToGraphDef(&def); auto sess = CreateSession(); TF_ASSERT_OK(sess->Create(def)); std::vector<Tensor> outputs; RunOptions run_opts; run_opts.set_inter_op_thread_pool(-1); EXPECT_TRUE(sess->Run(run_opts, {}, {}, {child_1->name(), child_2->name()}, &outputs, nullptr) .ok()); } TEST(DirectSessionTest, SyncSession) { Graph g(OpRegistry::Global()); Tensor vx(DT_INT64, TensorShape({})); vx.scalar<int64_t>()() = 17; Node* x = test::graph::Constant(&g, vx); Node* y = test::graph::Unary(&g, "ThreadID", x); GraphDef def; g.ToGraphDef(&def); SessionOptions options; options.config.set_inter_op_parallelism_threads(-1); std::unique_ptr<Session> sess(NewSession(options)); TF_ASSERT_OK(sess->Create(def)); std::vector<Tensor> outputs; RunOptions run_opts; auto s = sess->Run(run_opts, {}, {y->name() + ":0"}, {}, &outputs, nullptr); std::hash<std::thread::id> hasher; EXPECT_EQ(static_cast<int64_t>(hasher(std::this_thread::get_id())), static_cast<int64_t>(outputs[0].scalar<int64_t>()())); } REGISTER_OP("Darth").Input("x: float").Output("y: float").Doc(R"doc( Darth promises one return value. x: float y: float )doc"); class DarthOp : public OpKernel { public: explicit DarthOp(OpKernelConstruction* ctx) : OpKernel(ctx) {} void Compute(OpKernelContext* ctx) override {} }; REGISTER_KERNEL_BUILDER(Name("Darth").Device(DEVICE_CPU), DarthOp); TEST(DirectSessionTest, DarthKernel) { Graph g(OpRegistry::Global()); Tensor vx(DT_FLOAT, TensorShape({})); vx.scalar<float>()() = 1.0; Node* x = test::graph::Constant(&g, vx); Node* y = test::graph::Unary(&g, "Darth", x); GraphDef def; g.ToGraphDef(&def); auto sess = CreateSession(); TF_ASSERT_OK(sess->Create(def)); std::vector<Tensor> outputs; auto s = sess->Run({}, {y->name() + ":0"}, {}, &outputs); EXPECT_TRUE(errors::IsInternal(s)); } TEST(DirectSessionTest, PlacePrunedGraph) { { Graph g(OpRegistry::Global()); Tensor vx(DT_FLOAT, TensorShape({})); vx.scalar<float>()() = 1.0; Node* x = test::graph::Constant(&g, vx); Node* y = test::graph::Unary(&g, "Darth", x); y->set_assigned_device_name("/job:localhost/replica:0/task:0/device:GPU:0"); GraphDef def; g.ToGraphDef(&def); SessionOptions options; std::unique_ptr<Session> sess(NewSession(options)); auto s = sess->Create(def); EXPECT_TRUE(errors::IsInvalidArgument(s)); } { Graph g(OpRegistry::Global()); Tensor vx(DT_FLOAT, TensorShape({})); vx.scalar<float>()() = 1.0; Node* x = test::graph::Constant(&g, vx); Node* y = test::graph::Unary(&g, "Darth", x); y->set_assigned_device_name("/job:localhost/replica:0/task:0/device:GPU:0"); GraphDef def; g.ToGraphDef(&def); SessionOptions options; options.config.mutable_graph_options()->set_place_pruned_graph(true); std::unique_ptr<Session> sess(NewSession(options)); TF_ASSERT_OK(sess->Create(def)); std::vector<Tensor> outputs; auto s = sess->Run({}, {x->name() + ":0"}, {}, &outputs); TF_EXPECT_OK(s); } } TEST(DirectSessionTest, PartialRunTest) { GraphDef def; Graph g(OpRegistry::Global()); Tensor first_value(DT_FLOAT, TensorShape({})); first_value.scalar<float>()() = 1.0; Node* first_const = test::graph::Constant(&g, first_value); Node* first_identity = test::graph::Identity(&g, first_const); Tensor second_value(DT_FLOAT, TensorShape({})); second_value.scalar<float>()() = 2.0; Node* second_const = test::graph::Constant(&g, second_value); Node* second_identity = test::graph::Identity(&g, second_const); Node* third = test::graph::Add(&g, first_identity, second_identity); Node* third_identity = test::graph::Identity(&g, third); g.ToGraphDef(&def); auto session = CreateSession(); ASSERT_TRUE(session != nullptr); TF_ASSERT_OK(session->Create(def)); std::vector<Tensor> outputs; string handle; Status s = session->PRunSetup( {first_const->name(), second_const->name()}, {first_identity->name() + ":0", second_identity->name() + ":0", third_identity->name() + ":0"}, {}, &handle); TF_ASSERT_OK(s); Tensor value_11(DT_FLOAT, TensorShape({})); value_11.scalar<float>()() = 11.0; Tensor value_22(DT_FLOAT, TensorShape({})); value_22.scalar<float>()() = 22.0; s = session->PRun(handle, {{first_const->name(), value_11}}, {first_identity->name() + ":0"}, &outputs); TF_ASSERT_OK(s); ASSERT_EQ(1, outputs.size()); ASSERT_EQ(11.0, outputs[0].flat<float>()(0)); s = session->PRun( handle, {{second_const->name(), value_22}}, {second_identity->name() + ":0", third_identity->name() + ":0"}, &outputs); TF_ASSERT_OK(s); ASSERT_EQ(2, outputs.size()); ASSERT_EQ(22.0, outputs[0].flat<float>()(0)); ASSERT_EQ(11.0 + 22.0, outputs[1].flat<float>()(0)); } TEST(DirectSessionTest, PartialRunMissingFeed) { GraphDef def; Graph g(OpRegistry::Global()); Tensor first_value(DT_FLOAT, TensorShape({})); first_value.scalar<float>()() = 1.0; Node* first_const = test::graph::Constant(&g, first_value); Node* first_identity = test::graph::Identity(&g, first_const); Tensor second_value(DT_FLOAT, TensorShape({})); second_value.scalar<float>()() = 2.0; Node* second_const = test::graph::Constant(&g, second_value); Node* second_identity = test::graph::Identity(&g, second_const); Node* third = test::graph::Add(&g, first_identity, second_identity); Node* third_identity = test::graph::Identity(&g, third); g.ToGraphDef(&def); auto session = CreateSession(); ASSERT_TRUE(session != nullptr); TF_ASSERT_OK(session->Create(def)); std::vector<Tensor> outputs; string handle; Status s = session->PRunSetup({first_const->name(), second_const->name()}, {third_identity->name() + ":0"}, {}, &handle); TF_ASSERT_OK(s); Tensor value_11(DT_FLOAT, TensorShape({})); value_11.scalar<float>()() = 11.0; s = session->PRun(handle, {{first_const->name(), value_11}}, {third_identity->name() + ":0"}, &outputs); ASSERT_TRUE(errors::IsInvalidArgument(s)); EXPECT_TRUE( absl::StrContains(s.message(), "can't be computed from the feeds")); } TEST(DirectSessionTest, PartialRunMultiOutputFeed) { GraphDef def; Graph g(OpRegistry::Global()); Tensor bool_value(DT_BOOL, TensorShape({})); bool_value.scalar<bool>()() = true; Node* bool_const = test::graph::Constant(&g, bool_value); Node* switch_node = test::graph::Switch(&g, bool_const, bool_const); Node* fourth_identity = test::graph::Identity(&g, switch_node, 1); g.ToGraphDef(&def); auto session = CreateSession(); ASSERT_TRUE(session != nullptr); TF_ASSERT_OK(session->Create(def)); std::vector<Tensor> outputs; string handle; Status s = session->PRunSetup({switch_node->name() + ":1"}, {fourth_identity->name() + ":0"}, {}, &handle); TF_ASSERT_OK(s); s = session->PRun(handle, {}, {fourth_identity->name() + ":0"}, &outputs); ASSERT_TRUE(errors::IsInvalidArgument(s)); EXPECT_TRUE( absl::StrContains(s.message(), "can't be computed from the feeds")); s = session->PRun(handle, {{switch_node->name() + ":1", bool_value}}, {fourth_identity->name() + ":0"}, &outputs); TF_ASSERT_OK(s); ASSERT_EQ(1, outputs.size()); ASSERT_EQ(true, outputs[0].flat<bool>()(0)); } TEST(DirectSessionTest, RunHandleTest) { GraphDef def; Graph g(OpRegistry::Global()); Tensor value0(DT_FLOAT, TensorShape({})); value0.scalar<float>()() = 1.0; Node* const0 = test::graph::Constant(&g, value0); Node* identity0 = test::graph::Identity(&g, const0); Tensor value1(DT_FLOAT, TensorShape({})); value1.scalar<float>()() = 2.0; Node* const1 = test::graph::Constant(&g, value1); Node* node3 = test::graph::Add(&g, identity0, const1); Node* node4 = test::graph::Unary(&g, "GetSessionHandleV2", node3); Tensor value2(DT_STRING, TensorShape({})); Node* const2 = test::graph::Constant(&g, value2); Node* node5 = test::graph::GetSessionTensor(&g, const2); Node* node6 = test::graph::Add(&g, node5, const1); Node* node7 = test::graph::Unary(&g, "DeleteSessionTensor", const2); g.ToGraphDef(&def); auto session = CreateSession(); ASSERT_TRUE(session != nullptr); TF_ASSERT_OK(session->Create(def)); std::vector<Tensor> outputs; Status s = session->Run({}, {node4->name() + ":0"}, {}, &outputs); ASSERT_TRUE(s.ok()); ASSERT_EQ(1, outputs.size()); const ResourceHandle& resource_handle = outputs[0].scalar<ResourceHandle>()(); Tensor string_handle(DT_STRING, {}); string_handle.flat<tstring>().setConstant(resource_handle.name()); std::vector<Tensor> outputs1; s = session->Run({{const2->name(), string_handle}}, {node6->name() + ":0"}, {}, &outputs1); ASSERT_TRUE(s.ok()); ASSERT_EQ(1, outputs1.size()); ASSERT_EQ(5.0, outputs1[0].flat<float>()(0)); std::vector<Tensor> outputs2; s = session->Run({{const2->name(), string_handle}}, {}, {node7->name()}, &outputs2); ASSERT_TRUE(s.ok()); } TEST(DirectSessionTest, RunHandleTest_Callable) { GraphDef def; Graph g(OpRegistry::Global()); Tensor value0(DT_FLOAT, TensorShape({})); value0.scalar<float>()() = 1.0; Node* const0 = test::graph::Constant(&g, value0); Node* identity0 = test::graph::Identity(&g, const0); Tensor value1(DT_FLOAT, TensorShape({})); value1.scalar<float>()() = 2.0; Node* const1 = test::graph::Constant(&g, value1); Node* node3 = test::graph::Add(&g, identity0, const1); Node* node4 = test::graph::Unary(&g, "GetSessionHandleV2", node3); Tensor value2(DT_STRING, TensorShape({})); Node* const2 = test::graph::Constant(&g, value2); Node* node5 = test::graph::GetSessionTensor(&g, const2); Node* node6 = test::graph::Add(&g, node5, const1); Node* node7 = test::graph::Unary(&g, "DeleteSessionTensor", const2); g.ToGraphDef(&def); auto session = CreateSession(); ASSERT_TRUE(session != nullptr); TF_ASSERT_OK(session->Create(def)); std::vector<Tensor> outputs; Status s = session->Run({}, {node4->name() + ":0"}, {}, &outputs); ASSERT_TRUE(s.ok()); ASSERT_EQ(1, outputs.size()); const ResourceHandle& resource_handle = outputs[0].scalar<ResourceHandle>()(); Tensor string_handle(DT_STRING, {}); string_handle.flat<tstring>().setConstant(resource_handle.name()); std::vector<Tensor> outputs1; s = session->Run({{const2->name(), string_handle}}, {node6->name() + ":0"}, {}, &outputs1); ASSERT_TRUE(s.ok()); ASSERT_EQ(1, outputs1.size()); ASSERT_EQ(5.0, outputs1[0].flat<float>()(0)); std::vector<Tensor> outputs2; s = session->Run({{const2->name(), string_handle}}, {}, {node7->name()}, &outputs2); ASSERT_TRUE(s.ok()); } TEST(DirectSessionTest, CreateGraphFailsWhenAssigningAFedVar) { Graph graph(OpRegistry::Global()); Node* a = test::graph::Var(&graph, DT_FLOAT, {}); Node* b = test::graph::Constant(&graph, {}); Tensor zero(DT_FLOAT, {}); test::FillValues<float>(&zero, {0}); Node* assign = test::graph::Assign(&graph, a, b); auto session = CreateSession(); ASSERT_TRUE(session != nullptr); std::vector<Tensor> outputs; Status s = session->Run({{a->name(), zero}}, {assign->name()}, {}, &outputs); ASSERT_TRUE(errors::IsInvalidArgument(s)); } TEST(DirectSessionTest, TimeoutSession) { GraphDef graph; protobuf::TextFormat::ParseFromString(R"pb( node { name: 'fifo_queue' op: 'FIFOQueue' device: '/device:CPU:0' attr { key: 'capacity' value { i: 10 } } attr { key: 'component_types' value { list { type: DT_FLOAT } } } attr { key: 'container' value { s: '' } } attr { key: 'shapes' value { list {} } } attr { key: 'shared_name' value { s: '' } } } node { name: 'fifo_queue_Dequeue' op: 'QueueDequeue' input: 'fifo_queue' device: '/device:CPU:0' attr { key: 'component_types' value { list { type: DT_FLOAT } } } attr { key: 'timeout_ms' value { i: -1 } } } versions { producer: 9 } )pb", &graph); { SessionOptions options; (*options.config.mutable_device_count())["CPU"] = 2; options.config.set_operation_timeout_in_ms(100); std::unique_ptr<Session> session(NewSession(options)); ASSERT_TRUE(session != nullptr); TF_ASSERT_OK(session->Create(graph)); Status s = session->Run({}, {}, {"fifo_queue_Dequeue"}, nullptr); ASSERT_EQ(error::DEADLINE_EXCEEDED, s.code()); TF_ASSERT_OK(session->Close()); } { auto session = CreateSession(); ASSERT_TRUE(session != nullptr); TF_ASSERT_OK(session->Create(graph)); RunOptions run_options; run_options.set_timeout_in_ms(20); Status s2 = session->Run(run_options, {}, {}, {"fifo_queue_Dequeue"}, nullptr, nullptr); ASSERT_EQ(error::DEADLINE_EXCEEDED, s2.code()); TF_ASSERT_OK(session->Close()); } } class CancellationMgrPollingOp : public OpKernel { public: explicit CancellationMgrPollingOp(OpKernelConstruction* ctx) : OpKernel(ctx) {} void Compute(OpKernelContext* ctx) override { CancellationManager* cm = ctx->cancellation_manager(); while (!cm->IsCancelled()) { ctx->env()->SleepForMicroseconds(1000); } notification.Notify(); } static Notification notification; }; Notification CancellationMgrPollingOp::notification; REGISTER_KERNEL_BUILDER(Name("CancellationMgrPollingOp").Device(DEVICE_CPU), CancellationMgrPollingOp); REGISTER_OP("CancellationMgrPollingOp").Doc(""); TEST(DirectSessionTest, TestTimeoutCleanShutdown) { GraphDef graph; protobuf::TextFormat::ParseFromString(R"pb( node { name: 'cm_polling' op: 'CancellationMgrPollingOp' device: '/device:CPU:0' } versions { producer: 9 } )pb", &graph); SessionOptions options; options.config.set_operation_timeout_in_ms(100); std::unique_ptr<Session> session(NewSession(options)); ASSERT_TRUE(session != nullptr); TF_ASSERT_OK(session->Create(graph)); Status s = session->Run({}, {}, {"cm_polling"}, nullptr); ASSERT_EQ(error::DEADLINE_EXCEEDED, s.code()); ASSERT_TRUE(CancellationMgrPollingOp::notification.HasBeenNotified()); TF_ASSERT_OK(session->Close()); } static void TestSessionInterOpThreadsImpl(bool use_function_lib, bool use_global_pools) { using test::function::blocking_op_state; using test::function::BlockingOpState; FunctionDefLibrary library_graph_def; if (use_function_lib) { *library_graph_def.add_function() = test::function::BlockingOpFn(); } FunctionLibraryDefinition flib(OpRegistry::Global(), library_graph_def); Graph g(&flib); Tensor t(DT_FLOAT, TensorShape({})); t.scalar<float>()() = {1.2f}; Node* x = test::graph::Constant(&g, t); Node* y; if (use_function_lib) { y = test::graph::Unary(&g, "BlockingOpFn", x); } else { y = test::graph::Unary(&g, "BlockingOp", x); } GraphDef def; g.ToGraphDef(&def); *def.mutable_library() = library_graph_def; SessionOptions options; options.config.mutable_graph_options() ->mutable_optimizer_options() ->set_opt_level(OptimizerOptions::L0); options.config.mutable_graph_options() ->mutable_rewrite_options() ->set_constant_folding(RewriterConfig::OFF); (*options.config.mutable_device_count())["CPU"] = 2; (*options.config.mutable_device_count())["GPU"] = 0; auto* p = options.config.add_session_inter_op_thread_pool(); if (use_global_pools) p->set_global_name("large pool"); p = options.config.add_session_inter_op_thread_pool(); if (use_global_pools) p->set_global_name("small pool"); p->set_num_threads(1); const int kSyncPool = -1; const int kLargePool = 0; const int kSmallPool = 1; std::vector<std::unique_ptr<Session>> sessions; if (!use_global_pools) { sessions.emplace_back(NewSession(options)); TF_ASSERT_OK(sessions.back()->Create(def)); } mutex sessions_mu; std::atomic<int32> num_done(0); auto add_session_run_call = [use_global_pools, &def, &options, &sessions, &sessions_mu, &num_done]( thread::ThreadPool* tp, Node* node, int inter_op_pool) { auto fn = [use_global_pools, &def, &options, &sessions, &sessions_mu, inter_op_pool, node, &num_done]() { RunOptions run_options; run_options.set_inter_op_thread_pool(inter_op_pool); std::vector<Tensor> outputs; Session* session; if (use_global_pools) { std::unique_ptr<Session> s(NewSession(options)); TF_ASSERT_OK(s->Create(def)); session = s.get(); mutex_lock l(sessions_mu); sessions.emplace_back(std::move(s)); } else { session = sessions[0].get(); } Status s = session->Run(run_options, {} , {node->name() + ":0"} , {}, &outputs, nullptr ); TF_CHECK_OK(s); ASSERT_EQ(1, outputs.size()); auto flat = outputs[0].flat<float>(); EXPECT_FLOAT_EQ(1.2, flat(0)); num_done.fetch_add(1); }; if (tp != nullptr) { tp->Schedule(fn); } else { fn(); } }; thread::ThreadPool* tp1 = new thread::ThreadPool(Env::Default(), "tp1", 1); blocking_op_state = new BlockingOpState(); add_session_run_call(tp1, y, kLargePool); blocking_op_state->AwaitState(1); blocking_op_state->MoveToState(1, 2); blocking_op_state->AwaitState(3); blocking_op_state->MoveToState(3, 0); delete tp1; num_done = 0; tp1 = new thread::ThreadPool(Env::Default(), "tp1", 5); add_session_run_call(tp1, y, kSmallPool); blocking_op_state->AwaitState(1); const int kBlockedThreads = 3; for (int i = 0; i < kBlockedThreads; ++i) { add_session_run_call(tp1, x, kSmallPool); } thread::ThreadPool* tp2 = new thread::ThreadPool(Env::Default(), "tp2", 3); const int kUnblockedThreads = 4; for (int i = 0; i < kUnblockedThreads; ++i) { add_session_run_call(tp2, x, kLargePool); } delete tp2; EXPECT_EQ(kUnblockedThreads, num_done.load()); add_session_run_call(nullptr, x, kSyncPool); blocking_op_state->MoveToState(1, 2); delete tp1; EXPECT_EQ(kUnblockedThreads + kBlockedThreads + 1 + 1, num_done.load()); delete blocking_op_state; blocking_op_state = nullptr; } TEST(DirectSessionTest, TestSessionInterOpThreads) { TestSessionInterOpThreadsImpl(false , false ); } TEST(DirectSessionTest, TestSessionInterOpThreadsWithFunctions) { TestSessionInterOpThreadsImpl(true , false ); } TEST(DirectSessionTest, TestSessionInterOpGlobalPools) { TestSessionInterOpThreadsImpl(false , true ); } TEST(DirectSessionTest, TestSessionInterOpGlobalPoolsWithFunctions) { TestSessionInterOpThreadsImpl(true , true ); } TEST(DirectSessionTest, TestSessionInterOpThreadsInvalidOptions) { Graph g(OpRegistry::Global()); Tensor t(DT_FLOAT, TensorShape({})); t.scalar<float>()() = {1.2f}; Node* x = test::graph::Constant(&g, t); GraphDef def; g.ToGraphDef(&def); SessionOptions options; options.config.mutable_graph_options() ->mutable_optimizer_options() ->set_opt_level(OptimizerOptions::L0); (*options.config.mutable_device_count())["CPU"] = 2; options.config.add_session_inter_op_thread_pool(); { std::unique_ptr<Session> session(NewSession(options)); TF_ASSERT_OK(session->Create(def)); for (int pool_num = -2; pool_num <= 1; pool_num += 3) { RunOptions run_options; run_options.set_inter_op_thread_pool(pool_num); std::vector<Tensor> outputs; Status s = session->Run(run_options, {} , {x->name() + ":0"} , {}, &outputs, nullptr ); EXPECT_EQ(s.code(), error::INVALID_ARGUMENT); EXPECT_TRUE(absl::StrContains( s.message(), strings::StrCat("Invalid inter_op_thread_pool: ", pool_num))); } } std::vector<std::unique_ptr<Session>> sessions; auto* pool_config = options.config.mutable_session_inter_op_thread_pool(0); pool_config->set_num_threads(0); pool_config->set_global_name("foo"); sessions.emplace_back(NewSession(options)); TF_ASSERT_OK(sessions.back()->Create(def)); sessions.emplace_back(NewSession(options)); TF_ASSERT_OK(sessions.back()->Create(def)); for (int pass = 0; pass < 2; ++pass) { for (int i = 1; i < 128; ++i) { pool_config->set_num_threads(i); sessions.emplace_back(NewSession(options)); auto status = sessions.back()->Create(def); ASSERT_FALSE(status.ok()) << status; } sessions.clear(); } } TEST(DirectSessionTest, TestDirectSessionRunClose) { Graph g(OpRegistry::Global()); Tensor t(DT_FLOAT, TensorShape({})); t.scalar<float>()() = {1.2f}; Node* var_val = test::graph::Constant(&g, t); Node* var = test::graph::Var(&g, DT_FLOAT, {}); Node* var_assign = test::graph::Assign(&g, var, var_val); GraphDef def; g.ToGraphDef(&def); SessionOptions options; (*options.config.mutable_device_count())["CPU"] = 2; std::unique_ptr<Session> session(NewSession(options)); ASSERT_TRUE(session != nullptr); TF_ASSERT_OK(session->Create(def)); TF_ASSERT_OK(session->Run({} , {}, {var_assign->name()} , nullptr)); std::vector<Tensor> outputs; TF_ASSERT_OK(session->Run( {} , {var->name() + ":0"} , {}, &outputs)); EXPECT_EQ(t.scalar<float>()(), outputs[0].scalar<float>()()); outputs.clear(); Session::CallableHandle handle; TF_ASSERT_OK(session->MakeCallable( MakeCallableOptions({}, {}, {var_assign->name()}), &handle)); TF_ASSERT_OK(session->Close()); Status s = session->Run({} , {}, {var_assign->name()} , nullptr); EXPECT_EQ(s.code(), error::CANCELLED); EXPECT_TRUE(absl::StrContains(s.message(), "Session has been closed.")); s = session->RunCallable(handle, {}, {}, nullptr); EXPECT_EQ(s.code(), error::CANCELLED); EXPECT_TRUE(absl::StrContains(s.message(), "Session has been closed.")); } TEST(DirectSessionTest, TestDirectSessionPRunClose) { GraphDef def; Graph g(OpRegistry::Global()); Tensor first_value(DT_FLOAT, TensorShape({})); first_value.scalar<float>()() = 1.0; Node* first_const = test::graph::Constant(&g, first_value); Node* first_identity = test::graph::Identity(&g, first_const); Tensor second_value(DT_FLOAT, TensorShape({})); second_value.scalar<float>()() = 2.0; Node* second_const = test::graph::Constant(&g, second_value); Node* second_identity = test::graph::Identity(&g, second_const); Node* third = test::graph::Add(&g, first_identity, second_identity); Node* third_identity = test::graph::Identity(&g, third); g.ToGraphDef(&def); auto session = CreateSession(); ASSERT_TRUE(session != nullptr); TF_ASSERT_OK(session->Create(def)); std::vector<Tensor> outputs; string handle; Status s = session->PRunSetup( {first_const->name(), second_const->name()}, {first_identity->name() + ":0", second_identity->name() + ":0", third_identity->name() + ":0"}, {}, &handle); TF_ASSERT_OK(s); Tensor value_11(DT_FLOAT, TensorShape({})); value_11.scalar<float>()() = 11.0; Tensor value_22(DT_FLOAT, TensorShape({})); value_22.scalar<float>()() = 22.0; TF_ASSERT_OK(session->Close()); s = session->PRun(handle, {{first_const->name(), value_11}}, {first_identity->name() + ":0"}, &outputs); EXPECT_EQ(s.code(), error::CANCELLED); EXPECT_TRUE(absl::StrContains(s.message(), "Session has been closed.")); } TEST(DirectSessionTest, TestDirectSessionReset) { Graph g(OpRegistry::Global()); Tensor t(DT_FLOAT, TensorShape({})); t.scalar<float>()() = {1.2f}; Node* var_val = test::graph::Constant(&g, t); Node* var = test::graph::Var(&g, DT_FLOAT, {}); Node* var_assign = test::graph::Assign(&g, var, var_val); GraphDef def; g.ToGraphDef(&def); SessionOptions options; (*options.config.mutable_device_count())["CPU"] = 2; std::unique_ptr<Session> session(NewSession(options)); ASSERT_TRUE(session != nullptr); TF_ASSERT_OK(session->Create(def)); TF_ASSERT_OK(session->Run({} , {}, {var_assign->name()} , nullptr)); std::vector<Tensor> outputs; TF_ASSERT_OK(session->Run( {} , {var->name() + ":0"} , {}, &outputs)); EXPECT_EQ(t.scalar<float>()(), outputs[0].scalar<float>()()); outputs.clear(); TF_EXPECT_OK(Reset(options, {})); Status s = session->Run({} , {}, {var_assign->name()} , nullptr); EXPECT_EQ(s.code(), error::CANCELLED); EXPECT_TRUE(absl::StrContains(s.message(), "Session has been closed.")); } TEST(DirectSessionTest, LocalDeviceManager) { SessionOptions options; std::unique_ptr<Session> session(NewSession(options)); const DeviceMgr* mgr = nullptr; TF_ASSERT_OK(session->LocalDeviceManager(&mgr)); ASSERT_TRUE(mgr != nullptr); EXPECT_GT(mgr->ListDevices().size(), 0); } class FakeDevice : public Device { public: explicit FakeDevice(const DeviceAttributes& device_attributes) : Device(nullptr, device_attributes) {} Status Sync() override { return absl::UnimplementedError("FakeDevice::Sync()"); } }; template <char FirstLetter> class FakeFactory : public DeviceFactory { public: Status ListPhysicalDevices(std::vector<string>* devices) override { return absl::OkStatus(); } Status CreateDevices(const SessionOptions& options, const string& name_prefix, std::vector<std::unique_ptr<Device>>* devices) override { std::string name = absl::StrFormat("%cPU", FirstLetter); DeviceAttributes attr; attr.set_name( absl::StrFormat("/job:localhost/replica:0/task:0/device:%s:0", name)); attr.set_device_type(DeviceType(name).type()); devices->emplace_back(std::make_unique<FakeDevice>(attr)); return absl::OkStatus(); } }; REGISTER_LOCAL_DEVICE_FACTORY("APU", FakeFactory<'A'>); REGISTER_LOCAL_DEVICE_FACTORY("ZPU", FakeFactory<'Z'>); TEST(DirectSessionTest, FeedsAndFetchesGoToCpu) { auto session = CreateSession(); const DeviceMgr* mgr = nullptr; TF_ASSERT_OK(session->LocalDeviceManager(&mgr)); ASSERT_TRUE(mgr != nullptr); EXPECT_GT(mgr->ListDevices().size(), 2); GraphDef def; Graph graph(OpRegistry::Global()); Tensor a_tensor(DT_FLOAT, TensorShape({2, 2})); a_tensor.flat<float>().setRandom(); Node* a = test::graph::Constant(&graph, a_tensor); Tensor x_tensor(DT_FLOAT, TensorShape({2, 1})); x_tensor.flat<float>().setRandom(); Node* x = test::graph::Constant(&graph, x_tensor); Node* y = test::graph::Matmul(&graph, a, x, false, false); graph.ToGraphDef(&def); TF_ASSERT_OK(session->Create(def)); std::vector<Tensor> outputs; TF_ASSERT_OK(session->Run({}, {y->name() + ":0"}, {}, &outputs)); } GraphDef CreateGraphForYEqualsXSquared() { GraphDef graph_def; const char* text_proto = R"EOF( node { name: "x" op: "Placeholder" attr { key: "dtype" value { type: DT_FLOAT } } attr { key: "shape" value { shape { unknown_rank: true } } } } node { name: "y" op: "Square" input: "x" attr { key: "T" value { type: DT_FLOAT } } } versions { producer: 26 } )EOF"; QCHECK(protobuf::TextFormat::ParseFromString(text_proto, &graph_def)); return graph_def; } bool IsCUDATensor(const Tensor& t) { #ifdef GOOGLE_CUDA cudaPointerAttributes attributes; cudaError_t err = cudaPointerGetAttributes(&attributes, t.tensor_data().data()); if (err == cudaErrorInvalidValue) return false; CHECK_EQ(cudaSuccess, err) << cudaGetErrorString(err); return (attributes.type == cudaMemoryTypeDevice); #elif TENSORFLOW_USE_ROCM hipPointerAttribute_t attributes; hipError_t err = hipPointerGetAttributes(&attributes, t.tensor_data().data()); if (err == hipErrorInvalidValue) return false; CHECK_EQ(hipSuccess, err) << hipGetErrorString(err); return (attributes.memoryType == hipMemoryTypeDevice); #else return false; #endif } string GPUDeviceName(Session* session) { std::vector<DeviceAttributes> devices; TF_CHECK_OK(session->ListDevices(&devices)); for (const DeviceAttributes& d : devices) { if (d.device_type() == "GPU" || d.device_type() == "gpu") { return d.name(); } } return ""; } TEST(DirectSessionTest, FeedAndFetchTensorsInDeviceMemory) { std::unique_ptr<Session> session(NewSession(SessionOptions())); const string gpu_device_name = GPUDeviceName(session.get()); if (gpu_device_name.empty()) { LOG(INFO) << "Skipping test since no GPU is available"; return; } TF_ASSERT_OK(session->Create(CreateGraphForYEqualsXSquared())); CallableOptions opts; opts.add_feed("x:0"); opts.add_fetch("y:0"); Tensor gpu_tensor; { Session::CallableHandle feed_cpu_fetch_gpu; opts.mutable_fetch_devices()->insert({"y:0", gpu_device_name}); opts.set_fetch_skip_sync(true); TF_ASSERT_OK(session->MakeCallable(opts, &feed_cpu_fetch_gpu)); Tensor input(DT_FLOAT, {}); input.scalar<float>()() = 2.0f; std::vector<Tensor> outputs; TF_ASSERT_OK( session->RunCallable(feed_cpu_fetch_gpu, {input}, &outputs, nullptr)); TF_ASSERT_OK(session->ReleaseCallable(feed_cpu_fetch_gpu)); ASSERT_EQ(1, outputs.size()); gpu_tensor = outputs[0]; ASSERT_TRUE(IsCUDATensor(gpu_tensor)); } { Session::CallableHandle feed_gpu_fetch_cpu; opts.clear_fetch_devices(); opts.mutable_feed_devices()->insert({"x:0", gpu_device_name}); TF_ASSERT_OK(session->MakeCallable(opts, &feed_gpu_fetch_cpu)); std::vector<Tensor> outputs; TF_ASSERT_OK(session->RunCallable(feed_gpu_fetch_cpu, {gpu_tensor}, &outputs, nullptr)); TF_ASSERT_OK(session->ReleaseCallable(feed_gpu_fetch_cpu)); ASSERT_EQ(1, outputs.size()); ASSERT_EQ(16.0, outputs[0].scalar<float>()()); } } GraphDef CreateIdentityGraphDef(DataType dtype) { GraphDef def; AttrValue dtype_attr; dtype_attr.set_type(dtype); AttrValue shape_attr; shape_attr.mutable_shape()->set_unknown_rank(true); auto* placeholder = def.add_node(); placeholder->set_name("x"); placeholder->set_op("Placeholder"); placeholder->mutable_attr()->insert({"dtype", dtype_attr}); placeholder->mutable_attr()->insert({"shape", shape_attr}); auto* identity = def.add_node(); identity->set_name("y"); identity->set_op("Identity"); identity->add_input("x"); identity->mutable_attr()->insert({"T", dtype_attr}); return def; } void TestFeedAndFetchTensorsInDeviceMemory( const SessionOptions& session_options, DataType dtype) { std::unique_ptr<Session> session(NewSession(session_options)); const string gpu_device_name = GPUDeviceName(session.get()); if (gpu_device_name.empty()) { LOG(INFO) << "Skipping test since no GPU is available"; return; } TF_ASSERT_OK(session->Create(CreateIdentityGraphDef(dtype))) << DataType_Name(dtype); CallableOptions opts; opts.add_feed("x:0"); opts.add_fetch("y:0"); Tensor gpu_tensor; Tensor host_tensor(dtype, {3}); { opts.mutable_fetch_devices()->insert({"y:0", gpu_device_name}); opts.set_fetch_skip_sync(true); Session::CallableHandle handle; TF_ASSERT_OK(session->MakeCallable(opts, &handle)) << DataType_Name(dtype); std::vector<Tensor> outputs; TF_ASSERT_OK(session->RunCallable(handle, {host_tensor}, &outputs, nullptr)) << DataType_Name(dtype); TF_ASSERT_OK(session->ReleaseCallable(handle)) << DataType_Name(dtype); ASSERT_EQ(1, outputs.size()) << DataType_Name(dtype); gpu_tensor = outputs[0]; ASSERT_TRUE(IsCUDATensor(gpu_tensor)) << DataType_Name(dtype); } { opts.clear_fetch_devices(); opts.mutable_feed_devices()->insert({"x:0", gpu_device_name}); Session::CallableHandle handle; TF_ASSERT_OK(session->MakeCallable(opts, &handle)) << DataType_Name(dtype); std::vector<Tensor> outputs; TF_ASSERT_OK(session->RunCallable(handle, {gpu_tensor}, &outputs, nullptr)) << DataType_Name(dtype); TF_ASSERT_OK(session->ReleaseCallable(handle)) << DataType_Name(dtype); ASSERT_EQ(1, outputs.size()); const StringPiece actual_data = outputs[0].tensor_data(); const StringPiece expected_data = host_tensor.tensor_data(); EXPECT_EQ(expected_data.size(), actual_data.size()) << DataType_Name(dtype); EXPECT_EQ(0, memcmp(expected_data.data(), actual_data.data(), std::min(expected_data.size(), actual_data.size()))) << DataType_Name(dtype); } } void TestFeedAndFetchTensorsInDeviceMemoryFailsToMakeCallable( const SessionOptions& session_options, DataType dtype) { std::unique_ptr<Session> session(NewSession(session_options)); const string gpu_device_name = GPUDeviceName(session.get()); if (gpu_device_name.empty()) { LOG(INFO) << "Skipping test since no GPU is available"; return; } TF_ASSERT_OK(session->Create(CreateIdentityGraphDef(dtype))) << DataType_Name(dtype); CallableOptions opts; opts.add_feed("x:0"); opts.add_fetch("y:0"); { opts.mutable_fetch_devices()->insert({"y:0", gpu_device_name}); opts.set_fetch_skip_sync(true); Session::CallableHandle handle; Status status = session->MakeCallable(opts, &handle); EXPECT_FALSE(status.ok()) << DataType_Name(dtype); EXPECT_TRUE(absl::StrContains( status.message(), strings::StrCat( "Cannot feed or fetch tensor 'y:0' from device ", gpu_device_name, " as feeding/fetching from GPU devices is not yet supported for ", DataTypeString(dtype), " tensors"))) << DataType_Name(dtype) << ", Status: " << status; } { opts.clear_feed_devices(); opts.mutable_feed_devices()->insert({"x:0", gpu_device_name}); Session::CallableHandle handle; Status status = session->MakeCallable(opts, &handle); EXPECT_FALSE(status.ok()); EXPECT_TRUE(absl::StrContains( status.message(), strings::StrCat( "Cannot feed or fetch tensor 'x:0' from device ", gpu_device_name, " as feeding/fetching from GPU devices is not yet supported for ", DataTypeString(dtype), " tensors"))) << DataType_Name(dtype) << ", Status: " << status; } } void TestFeedAndFetchTensorsInDeviceMemoryForAllDataTypes( const SessionOptions& opts) { for (int i = DataType_MIN; i <= DataType_MAX; ++i) { if (!DataType_IsValid(i)) continue; const DataType dtype = static_cast<DataType>(i); switch (dtype) { case DT_INVALID: break; case DT_BFLOAT16: case DT_BOOL: case DT_COMPLEX128: case DT_COMPLEX64: case DT_DOUBLE: case DT_FLOAT: case DT_HALF: case DT_INT16: case DT_INT64: case DT_INT8: case DT_UINT16: case DT_UINT8: case DT_INT4: case DT_UINT4: TestFeedAndFetchTensorsInDeviceMemory(opts, dtype); break; default: if (!IsRefType(dtype)) { TestFeedAndFetchTensorsInDeviceMemoryFailsToMakeCallable(opts, dtype); } break; } } } TEST(DirectSessionTest, FeedAndFetchTensorsInDeviceMemory_AllDataTypes) { SessionOptions opts; opts.config.set_allow_soft_placement(false); TestFeedAndFetchTensorsInDeviceMemoryForAllDataTypes(opts); } TEST(DirectSessionTest, FeedAndFetchTensorsInDeviceMemory_AllDataTypes_SoftPlacement) { SessionOptions opts; opts.config.set_allow_soft_placement(true); TestFeedAndFetchTensorsInDeviceMemoryForAllDataTypes(opts); } void FeedFetchBenchmarkHelper(::testing::benchmark::State& state, int num_feeds, bool use_make_callable, int inter_op_threads, bool use_single_threaded_executor) { Tensor value(DT_FLOAT, TensorShape()); value.flat<float>()(0) = 37.0; std::vector<std::pair<string, Tensor>> inputs; inputs.reserve(num_feeds); std::vector<string> outputs; Graph g(OpRegistry::Global()); for (int i = 0; i < num_feeds; ++i) { Node* placeholder; TF_CHECK_OK(NodeBuilder(g.NewName("Placeholder"), "Placeholder") .Attr("shape", TensorShape()) .Attr("dtype", DT_FLOAT) .Device("/cpu:0") .Finalize(&g, &placeholder)); Node* identity; TF_CHECK_OK(NodeBuilder(g.NewName("Identity"), "Identity") .Input(placeholder) .Attr("T", DT_FLOAT) .Device("/cpu:0") .Finalize(&g, &identity)); inputs.push_back({placeholder->name() + ":0", value}); outputs.push_back(identity->name() + ":0"); } GraphDef gd; g.ToGraphDef(&gd); SessionOptions opts; opts.config.set_inter_op_parallelism_threads(inter_op_threads); if (use_single_threaded_executor) { opts.config.mutable_experimental()->set_executor_type( "SINGLE_THREADED_EXECUTOR"); } std::unique_ptr<Session> session(NewSession(opts)); TF_CHECK_OK(session->Create(gd)); if (use_make_callable) { Session::CallableHandle handle; CallableOptions callable_options; std::vector<Tensor> input_tensors; for (const auto& input : inputs) { callable_options.add_feed(input.first); input_tensors.push_back(input.second); } for (const string& output : outputs) { callable_options.add_fetch(output); } TF_CHECK_OK(session->MakeCallable(callable_options, &handle)); for (auto s : state) { std::vector<Tensor> output_values; TF_CHECK_OK( session->RunCallable(handle, input_tensors, &output_values, nullptr)); } } else { { std::vector<Tensor> output_values; TF_CHECK_OK(session->Run(inputs, outputs, {}, &output_values)); } for (auto s : state) { std::vector<Tensor> output_values; TF_CHECK_OK(session->Run(inputs, outputs, {}, &output_values)); } } } void BM_FeedFetch(::testing::benchmark::State& state) { const int num_feeds = state.range(0); FeedFetchBenchmarkHelper(state, num_feeds, false, 0, false); } void BM_FeedFetchCallable(::testing::benchmark::State& state) { const int num_feeds = state.range(0); FeedFetchBenchmarkHelper(state, num_feeds, true, 0, false); } void BM_FeedFetchCallableSingleThread(::testing::benchmark::State& state) { const int num_feeds = state.range(0); FeedFetchBenchmarkHelper(state, num_feeds, true, -1, false); } void BM_FeedFetchCallableSingleThreadExecutor( ::testing::benchmark::State& state) { const int num_feeds = state.range(0); FeedFetchBenchmarkHelper(state, num_feeds, true, -1, true); } BENCHMARK(BM_FeedFetch)->Arg(1)->Arg(2)->Arg(5)->Arg(10); BENCHMARK(BM_FeedFetchCallable)->Arg(1)->Arg(2)->Arg(5)->Arg(10); BENCHMARK(BM_FeedFetchCallableSingleThread)->Arg(1)->Arg(2)->Arg(5)->Arg(10); BENCHMARK(BM_FeedFetchCallableSingleThreadExecutor) ->Arg(1) ->Arg(2) ->Arg(5) ->Arg(10); } class DirectSessionCollectiveTest : public ::testing::Test { public: Status RunGraphWithCollectiveFunctions(bool add_unused_function, int64_t* collective_graph_key) { GraphDef g = CreateGraph(add_unused_function); const Tensor t1 = test::AsTensor<float>({0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1}); const Tensor t2 = test::AsTensor<float>({0.3, 1.3, 2.3, 3.3, 4.3, 5.3, 6.3, 7.3}); auto session = CreateSession(); TF_RETURN_IF_ERROR(session->Create(g)); std::vector<Tensor> outputs; TF_RETURN_IF_ERROR( session->Run({{"input0:0", t1}, {"input1:0", t2}}, {}, {"collective_call0:0", "collective_call1:0"}, &outputs)); DirectSession* direct_session = static_cast<DirectSession*>(session.get()); { mutex_lock l(direct_session->collective_graph_key_lock_); *collective_graph_key = direct_session->collective_graph_key_; } return absl::OkStatus(); } private: FunctionDef CollectiveFunction(const string& function_name, int instance_key) { return FunctionDefHelper::Define( function_name, {"arg:float"}, {"reduce:float"}, {}, {{ {"reduce"}, "CollectiveReduce", {"arg"}, {{"group_size", 2}, {"group_key", 1}, {"instance_key", instance_key}, {"subdiv_offsets", absl::Span<const int32>({0})}, {"merge_op", "Add"}, {"final_op", "Div"}, {"T", DT_FLOAT}}, }}); } NodeDef Input(int id) { AttrValue dtype_attr; SetAttrValue(DT_FLOAT, &dtype_attr); NodeDef input; input.set_name(strings::StrCat("input", id)); input.set_op("Placeholder"); input.mutable_attr()->insert({"dtype", dtype_attr}); return input; } NodeDef CollectiveCall(const string& op, const string& input, int cpu_id) { NodeDef collective_call; collective_call.set_name(strings::StrCat("collective_call", cpu_id)); collective_call.set_op(op); collective_call.add_input(input); collective_call.set_device( strings::StrCat("/job:localhost/replica:0/task:0/device:CPU:", cpu_id)); return collective_call; } GraphDef CreateGraph(bool add_unused_function) { GraphDef g; FunctionDef collective_function = CollectiveFunction("CollectiveFunction1", 1); FunctionDefLibrary* lib = g.mutable_library(); *lib->add_function() = collective_function; if (add_unused_function) { FunctionDef unused_function = CollectiveFunction("CollectiveFunction2", 2); *lib->add_function() = unused_function; } *g.add_node() = Input(0); *g.add_node() = Input(1); *g.add_node() = CollectiveCall("CollectiveFunction1", "input0", 0); *g.add_node() = CollectiveCall("CollectiveFunction1", "input1", 1); return g; } }; TEST_F(DirectSessionCollectiveTest, TestCollectiveGraphKeyUsesOnlyCalledFunctions) { int64_t key1; TF_ASSERT_OK(RunGraphWithCollectiveFunctions(false, &key1)); int64_t key2; TF_ASSERT_OK(RunGraphWithCollectiveFunctions(true, &key2)); ASSERT_EQ(key1, key2); } class StatefulOutputRequiredOp : public OpKernel { public: explicit StatefulOutputRequiredOp(OpKernelConstruction* ctx) : OpKernel(ctx) {} void Compute(OpKernelContext* ctx) override { Tensor count_outputs_required_t(int64_t{0}); int64_t& count_outputs_required = count_outputs_required_t.scalar<int64_t>()(); for (int i = 0; i < num_outputs(); ++i) { if (ctx->output_required(i)) ++count_outputs_required; } for (int i = 0; i < num_outputs(); ++i) { if (ctx->output_required(i)) ctx->set_output(i, count_outputs_required_t); } } }; REGISTER_KERNEL_BUILDER(Name("StatefulOutputRequired").Device(DEVICE_CPU), StatefulOutputRequiredOp); REGISTER_OP("StatefulOutputRequired") .Output("results : num_outs * int64") .Attr("num_outs : int = 5") .SetIsStateful(); TEST(DirectSessionTest, TestStatefulOutputRequiredOp) { GraphDef graph; protobuf::TextFormat::ParseFromString( R"pb( node { name: 'n' op: 'StatefulOutputRequired' device: '/device:CPU:0' } versions { producer: 9 } )pb", &graph); std::unique_ptr<Session> session(NewSession(SessionOptions())); ASSERT_TRUE(session != nullptr); TF_ASSERT_OK(session->Create(std::move(graph))); for (int num_outputs_required = 1; num_outputs_required <= 5; ++num_outputs_required) { std::vector<string> fetch_tensor_names; fetch_tensor_names.reserve(num_outputs_required); for (int output_idx = 0; output_idx < num_outputs_required; ++output_idx) { fetch_tensor_names.push_back(strings::StrCat("n:", output_idx)); } std::vector<Tensor> fetch_tensors; TF_ASSERT_OK(session->Run({}, fetch_tensor_names, {}, &fetch_tensors)); ASSERT_EQ(num_outputs_required, fetch_tensors.size()); for (const Tensor& t : fetch_tensors) { ASSERT_EQ(num_outputs_required, t.scalar<int64_t>()()); } } TF_ASSERT_OK(session->Close()); } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/direct_session.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/direct_session_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
f86148fb-f654-4d7d-b45a-5e7e21319870
cpp
tensorflow/tensorflow
runtime_client
tensorflow/core/function/runtime_client/runtime_client.cc
tensorflow/core/function/runtime_client/runtime_client_test.cc
#include "tensorflow/core/function/runtime_client/runtime_client.h" #include <memory> #include <string> #include <utility> #include <vector> #include "absl/strings/match.h" #include "absl/strings/str_cat.h" #include "absl/types/span.h" #include "mlir/IR/MLIRContext.h" #include "mlir/Pass/PassManager.h" #include "mlir/Pass/PassRegistry.h" #include "mlir/Support/LogicalResult.h" #include "tensorflow/c/eager/abstract_tensor_handle.h" #include "tensorflow/c/eager/immediate_execution_context.h" #include "tensorflow/c/eager/immediate_execution_operation.h" #include "tensorflow/c/eager/immediate_execution_tensor_handle.h" #if !defined(DISABLE_MLIR) #include "tensorflow/compiler/mlir/python/mlir.h" #endif #include "tensorflow/compiler/mlir/tensorflow/translate/import_model.h" #include "tensorflow/compiler/mlir/tensorflow/utils/error_util.h" #include "tensorflow/compiler/mlir/tf2xla/api/v2/tf_executor_to_graph.h" #include "tensorflow/core/common_runtime/device_mgr.h" #include "tensorflow/core/common_runtime/eager/context.h" #include "tensorflow/core/common_runtime/function_def_utils.h" #include "tensorflow/core/framework/device.h" #include "tensorflow/core/framework/device_factory.h" #include "tensorflow/core/framework/function.pb.h" #include "tensorflow/core/framework/graph.pb.h" #include "tensorflow/core/framework/op_def.pb.h" #include "tensorflow/core/ir/importexport/graphdef_export.h" #include "tensorflow/core/ir/importexport/graphdef_import.h" #include "tensorflow/core/ir/ops.h" #include "tensorflow/core/platform/errors.h" #include "tensorflow/core/platform/status.h" #include "tensorflow/core/platform/statusor.h" #include "tensorflow/core/platform/stringpiece.h" #include "tensorflow/core/platform/types.h" #include "tensorflow/core/protobuf/error_codes.pb.h" #include "tensorflow/core/public/session_options.h" namespace tensorflow { namespace core { namespace function { EagerContext& GlobalEagerContext() { static EagerContext* global_ctx = []() { SessionOptions opts; std::vector<std::unique_ptr<Device>> devices; Status&& device_init_status = DeviceFactory::AddDevices( opts, "/job:localhost/replica:0/task:0", &devices); CHECK(device_init_status.ok()); return new EagerContext( opts, ContextDevicePlacementPolicy::DEVICE_PLACEMENT_SILENT, false, new DynamicDeviceMgr(std::move(devices)), true, nullptr, nullptr, nullptr, true); }(); return *global_ctx; } EagerContext& GlobalPythonEagerContext() { EagerContext* ctx = reinterpret_cast<EagerContext*>(GetCEagerContext()); DCHECK(ctx) << "The Python eager context must be initialized first."; return *ctx; } absl::StatusOr<FunctionDef> Runtime::GetFunctionProto(StringPiece name) { EagerContext& ctx = this->eager_ctx_; const FunctionDef* f = ctx.FindFunctionDef(std::string(name)); if (f == nullptr) { return Status(absl::StatusCode::kInvalidArgument, absl::StrCat("Could not find an attribute for key ", name)); } return *f; } Status Runtime::CreateFunction(const FunctionDef& fdef) { const auto& fname = fdef.signature().name(); if (this->eager_ctx_.FindFunctionByName(fname)) { TF_RETURN_WITH_CONTEXT_IF_ERROR(this->eager_ctx_.RemoveFunction(fname), "removing function ", fname); } return this->eager_ctx_.AddFunctionDef(fdef); } Status Runtime::CreateFunction(OpaqueTfgGraphFuncOp* fop) { mlir::tfg::GraphFuncOp fop_proper = *reinterpret_cast<mlir::tfg::GraphFuncOp*>(fop); return mlir::tfg::ConvertToFunctionDef(fop_proper, *this->eager_ctx_.FuncLibDef()); } Status Runtime::CreateFunction(OpaqueTfFuncOp* fop) { mlir::func::FuncOp fop_proper = *reinterpret_cast<mlir::func::FuncOp*>(fop); const auto& fname = fop_proper.getName().str(); GraphExportConfig config; FunctionDef fdef; TF_RETURN_WITH_CONTEXT_IF_ERROR( tf2xla::v2::ConvertMlirFunctionToFunctionLibraryDef(fop_proper, config, &fdef), "creating function ", fname); return CreateFunction(fdef); } Status Runtime::TransformFunction(StringPiece name, StringPiece pipeline_name, Dialect dialect) { mlir::MLIRContext ctx; mlir::PassManager pm(&ctx); std::string error; llvm::raw_string_ostream error_stream(error); if (mlir::failed(mlir::parsePassPipeline(std::string(pipeline_name), pm, error_stream))) { return Status(absl::StatusCode::kInvalidArgument, absl::StrCat("locating pass pipeline ", pipeline_name, ": ", error_stream.str())); } auto fn = GetFunctionProto(name); TF_RETURN_WITH_CONTEXT_IF_ERROR(fn.status(), "loading function ", name); GraphDef graph; *graph.mutable_library()->add_function() = *fn; tensorflow::GraphDebugInfo debug_info; if (dialect == Dialect::TFG) { auto mlir_fn = mlir::tfg::ImportGraphDef(&ctx, debug_info, graph); TF_RETURN_WITH_CONTEXT_IF_ERROR(mlir_fn.status(), "importing function ", name); mlir::StatusScopedDiagnosticHandler diagnostics_handler(&ctx); if (failed(pm.run(mlir_fn->get()))) { return diagnostics_handler.Combine( Status(absl::StatusCode::kInvalidArgument, absl::StrCat("running pass pipeline ", pipeline_name, ": "))); } for (auto fn : mlir_fn->get().getBody()->getOps<mlir::tfg::GraphFuncOp>()) { TF_RETURN_WITH_CONTEXT_IF_ERROR( CreateFunction(reinterpret_cast<OpaqueTfgGraphFuncOp*>(&fn)), absl::StrCat("updating function ", fn.getName().str())); } return absl::OkStatus(); } if (dialect == Dialect::TF) { Status status; FunctionLibraryDefinition& flib_def = *this->eager_ctx_.FuncLibDef(); std::unique_ptr<FunctionBody> fbody; status = FunctionDefToBodyHelper(*fn, AttrSlice(), &flib_def, &fbody); TF_RETURN_WITH_CONTEXT_IF_ERROR(status, "importing function ", name); auto mlir_fn = ConvertFunctionToMlir(fbody.get(), flib_def, &ctx); TF_RETURN_WITH_CONTEXT_IF_ERROR(mlir_fn.status(), "importing function ", name); mlir::StatusScopedDiagnosticHandler diagnostics_handler(&ctx); if (failed(pm.run(mlir_fn->get()))) { return diagnostics_handler.Combine( Status(absl::StatusCode::kInvalidArgument, absl::StrCat("running pass pipeline ", pipeline_name, ": "))); } for (auto fn : mlir_fn->get().getBody()->getOps<mlir::func::FuncOp>()) { TF_RETURN_WITH_CONTEXT_IF_ERROR( CreateFunction(reinterpret_cast<OpaqueTfFuncOp*>(&fn)), absl::StrCat("updating function ", fn.getName().str())); } return absl::OkStatus(); } return Status( absl::StatusCode::kInvalidArgument, absl::StrCat("Unsupported dialect: ", dialect, ". Supported dialects are Dialect::TFG and Dialect::TF.")); } absl::StatusOr<ReturnValues> Runtime::CallFunction( StringPiece name, absl::Span<AbstractTensorHandle* const> args) { EagerContext& ctx = this->eager_ctx_; ImmediateOpPtr op(ctx.CreateOperation()); TF_RETURN_WITH_CONTEXT_IF_ERROR(op->Reset(name.data(), nullptr), "initializing call op for ", name); TF_RETURN_WITH_CONTEXT_IF_ERROR(op->AddInputList(args), "preparing call args for ", name); const FunctionDef* fn_def = ctx.GetFunctionDef(string(name)); int num_retvals = fn_def->signature().output_arg_size(); int actual_retvals = num_retvals; std::vector<ImmediateExecutionTensorHandle*> retvals(num_retvals); TF_RETURN_WITH_CONTEXT_IF_ERROR( op->Execute(absl::MakeSpan( reinterpret_cast<AbstractTensorHandle**>(retvals.data()), num_retvals), &actual_retvals), "executing call op for ", name); DCHECK(num_retvals == actual_retvals); ReturnValues final_returns; for (const auto& r : retvals) { final_returns.emplace_back(ImmediateTensorHandlePtr(r)); } return final_returns; } } } }
#include "tensorflow/core/function/runtime_client/runtime_client.h" #include <stdint.h> #include <memory> #include <utility> #include <vector> #include "absl/types/span.h" #include "mlir/Parser/Parser.h" #include "tensorflow/c/eager/immediate_execution_tensor_handle.h" #include "tensorflow/c/tensor_interface.h" #include "tensorflow/core/common_runtime/eager/context.h" #include "tensorflow/core/framework/function.pb.h" #include "tensorflow/core/framework/op_def.pb.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/function/testing/test_pass.h" #include "tensorflow/core/ir/dialect.h" #include "tensorflow/core/ir/ops.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/protobuf.h" #include "tensorflow/core/platform/status.h" #include "tensorflow/core/platform/statusor.h" #include "tensorflow/core/platform/test.h" namespace tensorflow { namespace core { namespace function { namespace { EagerContextPtr TestingEagerCtx() { SessionOptions opts; std::vector<std::unique_ptr<Device>> devices; Status&& device_init_status = DeviceFactory::AddDevices( opts, "/job:localhost/replica:0/task:0", &devices); CHECK(device_init_status.ok()); return EagerContextPtr(new EagerContext( opts, ContextDevicePlacementPolicy::DEVICE_PLACEMENT_SILENT, false, new DynamicDeviceMgr(std::move(devices)), true, nullptr, nullptr, nullptr, true)); } int IntValue(ImmediateExecutionTensorHandle& h) { Status status; AbstractTensorPtr t(h.Resolve(&status)); DCHECK(status.ok()); switch (h.DataType()) { case DT_INT32: return *(static_cast<int32_t*>(t->Data())); case DT_INT64: return *(static_cast<int64_t*>(t->Data())); default: DCHECK(false) << "invalid data type"; return 0; } } ImmediateTensorHandlePtr IntScalarTensor(EagerContext& ctx, int value) { AbstractTensorPtr tensor(ctx.CreateInt32Scalar(value)); ImmediateTensorHandlePtr handle(ctx.CreateLocalHandle(tensor.get())); return handle; } FunctionDef MakeNullaryFunction() { FunctionDef fd; protobuf::TextFormat::Parser parser; CHECK(parser.ParseFromString( R"pb(signature { name: 'NullaryFunction' output_arg { name: 'o' type: DT_INT32 } } node_def { name: 'retval' op: 'Const' attr { key: 'dtype' value { type: DT_INT32 } } attr { key: 'value' value { tensor { dtype: DT_INT32 tensor_shape {} int_val: 1 } } } } ret { key: 'o' value: 'retval:output' })pb", &fd)); return fd; } FunctionDef MakeUnaryFunction() { FunctionDef fd; protobuf::TextFormat::Parser parser; CHECK(parser.ParseFromString( R"pb(signature { name: "UnaryFunction" input_arg { name: "x" type: DT_INT32 } output_arg { name: "ret" type: DT_INT32 } } node_def { name: "ret" op: "Identity" input: "x" attr { key: "T" value { type: DT_INT32 } } } ret { key: "ret" value: "ret:output:0" })pb", &fd)); return fd; } FunctionDef MakeBinaryFunction() { FunctionDef fd; protobuf::TextFormat::Parser parser; CHECK(parser.ParseFromString( R"pb(signature { name: "BinaryFunction" input_arg { name: "x" type: DT_INT32 } input_arg { name: "y" type: DT_INT32 } output_arg { name: "ret" type: DT_INT32 } } node_def { name: "x_plus_y" op: "AddV2" input: "x" input: "y" attr { key: "T" value { type: DT_INT32 } } } node_def { name: "ret" op: "Identity" input: "x_plus_y:z:0" attr { key: "T" value { type: DT_INT32 } } } ret { key: "ret" value: "ret:output:0" })pb", &fd)); return fd; } FunctionDef MakeMultiplyFunction() { FunctionDef fd; protobuf::TextFormat::Parser parser; CHECK(parser.ParseFromString( R"pb(signature { name: "MultiplyFunction" input_arg { name: "x" type: DT_INT32 } input_arg { name: "y" type: DT_INT32 } output_arg { name: "ret" type: DT_INT32 } } node_def { name: "x_times_y" op: "Mul" input: "x" input: "y" attr { key: "T" value { type: DT_INT32 } } } node_def { name: "ret" op: "Identity" input: "x_times_y:z:0" attr { key: "T" value { type: DT_INT32 } } } ret { key: "ret" value: "ret:output:0" })pb", &fd)); return fd; } TEST(GlobalContext, Basic) { Runtime rt(GlobalEagerContext()); TF_ASSERT_OK(rt.CreateFunction(MakeNullaryFunction())); absl::StatusOr<ReturnValues> rets = rt.CallFunction("NullaryFunction", {}); TF_ASSERT_OK(rets.status()); ASSERT_EQ(rets->size(), 1); ASSERT_EQ(rets->at(0)->DataType(), DT_INT32); EXPECT_EQ(IntValue(*(rets->at(0))), 1); } TEST(CreateTest, Call) { EagerContextPtr ctx = TestingEagerCtx(); Runtime rt(*ctx); TF_ASSERT_OK(rt.CreateFunction(MakeNullaryFunction())); absl::StatusOr<ReturnValues> rets = rt.CallFunction("NullaryFunction", {}); TF_ASSERT_OK(rets.status()); ASSERT_EQ(rets->size(), 1); ASSERT_EQ(rets->at(0)->DataType(), DT_INT32); EXPECT_EQ(IntValue(*(rets->at(0))), 1); } TEST(CreateTest, GetRoundtrip) { EagerContextPtr ctx = TestingEagerCtx(); Runtime rt(*ctx); TF_ASSERT_OK(rt.CreateFunction(MakeNullaryFunction())); absl::StatusOr<FunctionDef> fdef_ret = rt.GetFunctionProto("NullaryFunction"); TF_ASSERT_OK(fdef_ret.status()); FunctionDef fdef = *fdef_ret; fdef.mutable_signature()->set_name("SecondFunction"); TF_ASSERT_OK(rt.CreateFunction(fdef)); absl::StatusOr<ReturnValues> rets = rt.CallFunction("SecondFunction", {}); TF_ASSERT_OK(rets.status()); ASSERT_EQ(rets->size(), 1); ASSERT_EQ(rets->at(0)->DataType(), DT_INT32); EXPECT_EQ(IntValue(*(rets->at(0))), 1); } TEST(CreateTest, MlirFromGraphDef) { mlir::MLIRContext mctx; mctx.getOrLoadDialect<mlir::tfg::TFGraphDialect>(); auto m = mlir::parseSourceString<mlir::ModuleOp>( R"mlir( module { tfg.func @NullaryFunction() -> (tensor<i32> {tfg.dtype = i32, tfg.name = "o"}) { %Const, %ctl = Const name("retval") {dtype = i32, value = dense<1> : tensor<i32>} : () -> (tensor<i32>) return(%Const) : tensor<i32> } } )mlir", &mctx); mlir::tfg::GraphFuncOp fop = *m->getBody()->op_begin<mlir::tfg::GraphFuncOp>(); EagerContextPtr ectx = TestingEagerCtx(); Runtime rt(*ectx); OpaqueTfgGraphFuncOp* opaque_fop = reinterpret_cast<OpaqueTfgGraphFuncOp*>(&fop); TF_ASSERT_OK(rt.CreateFunction(opaque_fop)); absl::StatusOr<ReturnValues> rets = rt.CallFunction("NullaryFunction", {}); TF_ASSERT_OK(rets.status()); ASSERT_EQ(rets->size(), 1); ASSERT_EQ(rets->at(0)->DataType(), DT_INT32); EXPECT_EQ(IntValue(*(rets->at(0))), 1); } TEST(CallTest, Nullary) { EagerContextPtr ctx = TestingEagerCtx(); Runtime rt(*ctx); TF_ASSERT_OK(rt.CreateFunction(MakeNullaryFunction())); absl::StatusOr<ReturnValues> rets = rt.CallFunction("NullaryFunction", {}); TF_ASSERT_OK(rets.status()); ASSERT_EQ(rets->size(), 1); ASSERT_EQ(rets->at(0)->DataType(), DT_INT32); EXPECT_EQ(IntValue(*(rets->at(0))), 1); } TEST(CallTest, Unary) { EagerContextPtr ctx = TestingEagerCtx(); Runtime rt(*ctx); TF_ASSERT_OK(rt.CreateFunction(MakeUnaryFunction())); auto x = IntScalarTensor(*ctx, 1); absl::StatusOr<ReturnValues> rets = rt.CallFunction("UnaryFunction", {x.get()}); TF_ASSERT_OK(rets.status()); ASSERT_EQ(rets->size(), 1); ASSERT_EQ(rets->at(0)->DataType(), DT_INT32); EXPECT_EQ(IntValue(*(rets->at(0))), 1); } TEST(CallTest, Binary) { EagerContextPtr ctx = TestingEagerCtx(); Runtime rt(*ctx); TF_ASSERT_OK(rt.CreateFunction(MakeBinaryFunction())); auto x = IntScalarTensor(*ctx, 1); auto y = IntScalarTensor(*ctx, 1); absl::StatusOr<ReturnValues> rets = rt.CallFunction("BinaryFunction", {x.get(), y.get()}); TF_ASSERT_OK(rets.status()); ASSERT_EQ(rets->size(), 1); ASSERT_EQ(rets->at(0)->DataType(), DT_INT32); EXPECT_EQ(IntValue(*(rets->at(0))), 2); } TEST(TransformTest, TestPassOnBinaryFunction) { EagerContextPtr ctx = TestingEagerCtx(); Runtime rt(*ctx); TF_ASSERT_OK(rt.CreateFunction(MakeBinaryFunction())); testing::RegisterTestPass(); TF_EXPECT_OK(rt.TransformFunction("BinaryFunction", "test-pass")); auto x = IntScalarTensor(*ctx, 2); auto y = IntScalarTensor(*ctx, 3); absl::StatusOr<ReturnValues> rets = rt.CallFunction("BinaryFunction", {x.get(), y.get()}); TF_ASSERT_OK(rets.status()); ASSERT_EQ(rets->size(), 1); ASSERT_EQ(rets->at(0)->DataType(), DT_INT32); EXPECT_EQ(IntValue(*(rets->at(0))), 6); } TEST(TransformTest, TestPassOnMultiplyFunction) { EagerContextPtr ctx = TestingEagerCtx(); Runtime rt(*ctx); TF_ASSERT_OK(rt.CreateFunction(MakeMultiplyFunction())); testing::RegisterTestPass(); TF_EXPECT_OK(rt.TransformFunction("MultiplyFunction", "test-pass-tf-dialect", Runtime::Dialect::TF)); auto x = IntScalarTensor(*ctx, 2); auto y = IntScalarTensor(*ctx, 3); absl::StatusOr<ReturnValues> rets = rt.CallFunction("MultiplyFunction", {x.get(), y.get()}); TF_ASSERT_OK(rets.status()); ASSERT_EQ(rets->size(), 1); ASSERT_EQ(rets->at(0)->DataType(), DT_INT32); EXPECT_EQ(IntValue(*(rets->at(0))), 5); } TEST(TransformTest, TestMixedPassesOnBinaryFunction) { EagerContextPtr ctx = TestingEagerCtx(); Runtime rt(*ctx); TF_ASSERT_OK(rt.CreateFunction(MakeBinaryFunction())); testing::RegisterTestPass(); TF_EXPECT_OK(rt.TransformFunction("BinaryFunction", "test-pass")); TF_EXPECT_OK(rt.TransformFunction("BinaryFunction", "test-pass-tf-dialect", Runtime::Dialect::TF)); auto x = IntScalarTensor(*ctx, 2); auto y = IntScalarTensor(*ctx, 3); absl::StatusOr<ReturnValues> rets = rt.CallFunction("BinaryFunction", {x.get(), y.get()}); TF_ASSERT_OK(rets.status()); ASSERT_EQ(rets->size(), 1); ASSERT_EQ(rets->at(0)->DataType(), DT_INT32); EXPECT_EQ(IntValue(*(rets->at(0))), 5); } } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/function/runtime_client/runtime_client.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/function/runtime_client/runtime_client_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
e5bec101-66d0-4856-83f0-6cc668d1a910
cpp
tensorflow/tensorflow
pooling3d
tensorflow/lite/kernels/pooling3d.cc
tensorflow/lite/kernels/pooling3d_test.cc
#include <stddef.h> #include <stdint.h> #include <algorithm> #include <cstdlib> #include <string> #include "flatbuffers/flexbuffers.h" #include "tensorflow/lite/core/c/common.h" #include "tensorflow/lite/kernels/internal/compatibility.h" #include "tensorflow/lite/kernels/internal/tensor.h" #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "tensorflow/lite/kernels/internal/types.h" #include "tensorflow/lite/kernels/kernel_util.h" #include "tensorflow/lite/kernels/padding.h" namespace tflite { namespace ops { namespace custom { namespace pooling_3d { namespace { struct Pool3DParams { TfLiteFusedActivation activation; TfLitePadding padding_type; Padding3DValues padding_values; int stride_depth; int stride_height; int stride_width; int filter_depth; int filter_height; int filter_width; int32_t quantized_activation_min; int32_t quantized_activation_max; float float_activation_min; float float_activation_max; }; template <typename T, typename ActivationT> inline T RoundAndAverage(ActivationT sum, int count) { return sum > 0 ? (sum + count / 2) / count : (sum - count / 2) / count; } template <> inline float RoundAndAverage(float sum, int count) { return sum / count; } template <typename T, typename ActivationT> inline void AveragePool3D(const Pool3DParams& params, const RuntimeShape& input_shape, const T* input_data, const RuntimeShape& output_shape, T* output_data) { TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 5); TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 5); ActivationT activation_min, activation_max; GetActivationParams(params, &activation_min, &activation_max); const int batches = MatchingDim(input_shape, 0, output_shape, 0); const int channels = MatchingDim(input_shape, 4, output_shape, 4); const int in_spatial_dim_1 = input_shape.Dims(1); const int in_spatial_dim_2 = input_shape.Dims(2); const int in_spatial_dim_3 = input_shape.Dims(3); const int out_spatial_dim_1 = output_shape.Dims(1); const int out_spatial_dim_2 = output_shape.Dims(2); const int out_spatial_dim_3 = output_shape.Dims(3); const int stride_spatial_dim_1 = params.stride_depth; const int stride_spatial_dim_2 = params.stride_height; const int stride_spatial_dim_3 = params.stride_width; const int filter_spatial_dim_1 = params.filter_depth; const int filter_spatial_dim_2 = params.filter_height; const int filter_spatial_dim_3 = params.filter_width; const int padding_spatial_dim_1 = params.padding_values.depth; const int padding_spatial_dim_2 = params.padding_values.height; const int padding_spatial_dim_3 = params.padding_values.width; for (int batch = 0; batch < batches; ++batch) { for (int out_d1 = 0; out_d1 < out_spatial_dim_1; ++out_d1) { const int in_d1_origin = (out_d1 * stride_spatial_dim_1) - padding_spatial_dim_1; const int filter_d1_start = std::max(0, -in_d1_origin); const int filter_d1_end = std::min(filter_spatial_dim_1, in_spatial_dim_1 - in_d1_origin); for (int out_d2 = 0; out_d2 < out_spatial_dim_2; ++out_d2) { const int in_d2_origin = (out_d2 * stride_spatial_dim_2) - padding_spatial_dim_2; const int filter_d2_start = std::max(0, -in_d2_origin); const int filter_d2_end = std::min(filter_spatial_dim_2, in_spatial_dim_2 - in_d2_origin); for (int out_d3 = 0; out_d3 < out_spatial_dim_3; ++out_d3) { const int in_d3_origin = (out_d3 * stride_spatial_dim_3) - padding_spatial_dim_3; const int filter_d3_start = std::max(0, -in_d3_origin); const int filter_d3_end = std::min(filter_spatial_dim_3, in_spatial_dim_3 - in_d3_origin); for (int channel = 0; channel < channels; ++channel) { ActivationT total = 0; for (int filter_d1 = filter_d1_start; filter_d1 < filter_d1_end; ++filter_d1) { const int in_d1 = in_d1_origin + filter_d1; for (int filter_d2 = filter_d2_start; filter_d2 < filter_d2_end; ++filter_d2) { const int in_d2 = in_d2_origin + filter_d2; for (int filter_d3 = filter_d3_start; filter_d3 < filter_d3_end; ++filter_d3) { const int in_d3 = in_d3_origin + filter_d3; total += input_data[Offset(input_shape, batch, in_d1, in_d2, in_d3, channel)]; } } } const int filter_count = (filter_d1_end - filter_d1_start) * (filter_d2_end - filter_d2_start) * (filter_d3_end - filter_d3_start); T average = pooling_3d::RoundAndAverage<T, ActivationT>( total, filter_count); average = std::max<T>(average, activation_min); average = std::min<T>(average, activation_max); output_data[Offset(output_shape, batch, out_d1, out_d2, out_d3, channel)] = average; } } } } } } template <typename T, typename ActivationT> inline void MaxPool3D(const Pool3DParams& params, const RuntimeShape& input_shape, const T* input_data, const RuntimeShape& output_shape, T* output_data) { TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 5); TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 5); ActivationT activation_min, activation_max; GetActivationParams(params, &activation_min, &activation_max); const int batches = MatchingDim(input_shape, 0, output_shape, 0); const int channels = MatchingDim(input_shape, 4, output_shape, 4); const int in_spatial_dim_1 = input_shape.Dims(1); const int in_spatial_dim_2 = input_shape.Dims(2); const int in_spatial_dim_3 = input_shape.Dims(3); const int out_spatial_dim_1 = output_shape.Dims(1); const int out_spatial_dim_2 = output_shape.Dims(2); const int out_spatial_dim_3 = output_shape.Dims(3); const int stride_spatial_dim_1 = params.stride_depth; const int stride_spatial_dim_2 = params.stride_height; const int stride_spatial_dim_3 = params.stride_width; const int filter_spatial_dim_1 = params.filter_depth; const int filter_spatial_dim_2 = params.filter_height; const int filter_spatial_dim_3 = params.filter_width; const int padding_spatial_dim_1 = params.padding_values.depth; const int padding_spatial_dim_2 = params.padding_values.height; const int padding_spatial_dim_3 = params.padding_values.width; for (int batch = 0; batch < batches; ++batch) { for (int out_d1 = 0; out_d1 < out_spatial_dim_1; ++out_d1) { const int in_d1_origin = (out_d1 * stride_spatial_dim_1) - padding_spatial_dim_1; const int filter_d1_start = std::max(0, -in_d1_origin); const int filter_d1_end = std::min(filter_spatial_dim_1, in_spatial_dim_1 - in_d1_origin); for (int out_d2 = 0; out_d2 < out_spatial_dim_2; ++out_d2) { const int in_d2_origin = (out_d2 * stride_spatial_dim_2) - padding_spatial_dim_2; const int filter_d2_start = std::max(0, -in_d2_origin); const int filter_d2_end = std::min(filter_spatial_dim_2, in_spatial_dim_2 - in_d2_origin); for (int out_d3 = 0; out_d3 < out_spatial_dim_3; ++out_d3) { const int in_d3_origin = (out_d3 * stride_spatial_dim_3) - padding_spatial_dim_3; const int filter_d3_start = std::max(0, -in_d3_origin); const int filter_d3_end = std::min(filter_spatial_dim_3, in_spatial_dim_3 - in_d3_origin); for (int channel = 0; channel < channels; ++channel) { T max = std::numeric_limits<T>::lowest(); for (int filter_d1 = filter_d1_start; filter_d1 < filter_d1_end; ++filter_d1) { const int in_d1 = in_d1_origin + filter_d1; for (int filter_d2 = filter_d2_start; filter_d2 < filter_d2_end; ++filter_d2) { const int in_d2 = in_d2_origin + filter_d2; for (int filter_d3 = filter_d3_start; filter_d3 < filter_d3_end; ++filter_d3) { const int in_d3 = in_d3_origin + filter_d3; max = std::max(max, input_data[Offset(input_shape, batch, in_d1, in_d2, in_d3, channel)]); } } } max = std::max<T>(max, activation_min); max = std::min<T>(max, activation_max); output_data[Offset(output_shape, batch, out_d1, out_d2, out_d3, channel)] = max; } } } } } } } enum PoolType { kAverage, kMax, }; constexpr const char kPoolSizeStr[] = "ksize"; constexpr const char kStridesStr[] = "strides"; constexpr const char kPaddingStr[] = "padding"; constexpr const char kDataFormatStr[] = "data_format"; constexpr const char kPaddingSameStr[] = "SAME"; constexpr const char kPaddingValidStr[] = "VALID"; struct OpData { Pool3DParams params; }; void* Init(TfLiteContext* context, const char* buffer, size_t length) { OpData* opdata = new OpData; opdata->params.activation = kTfLiteActNone; const flexbuffers::Map& m = flexbuffers::GetRoot(reinterpret_cast<const uint8_t*>(buffer), length) .AsMap(); const std::string data_format = m[kDataFormatStr].AsString().str(); TFLITE_CHECK_EQ(data_format, "NDHWC"); const std::string padding = m[kPaddingStr].AsString().str(); if (padding == kPaddingValidStr) { opdata->params.padding_type = kTfLitePaddingValid; } else if (padding == kPaddingSameStr) { opdata->params.padding_type = kTfLitePaddingSame; } else { opdata->params.padding_type = kTfLitePaddingUnknown; } const auto pool_size = m[kPoolSizeStr].AsTypedVector(); TFLITE_CHECK_EQ(pool_size.size(), 5); TFLITE_CHECK_EQ(pool_size[0].AsInt32(), 1); TFLITE_CHECK_EQ(pool_size[4].AsInt32(), 1); opdata->params.filter_depth = pool_size[1].AsInt32(); opdata->params.filter_height = pool_size[2].AsInt32(); opdata->params.filter_width = pool_size[3].AsInt32(); const auto strides = m[kStridesStr].AsTypedVector(); TFLITE_CHECK_EQ(strides.size(), 5); TFLITE_CHECK_EQ(strides[0].AsInt32(), 1); TFLITE_CHECK_EQ(strides[4].AsInt32(), 1); opdata->params.stride_depth = strides[1].AsInt32(); opdata->params.stride_height = strides[2].AsInt32(); opdata->params.stride_width = strides[3].AsInt32(); return opdata; } void Free(TfLiteContext* context, void* buffer) { delete reinterpret_cast<OpData*>(buffer); } TfLiteStatus GenericPrepare(TfLiteContext* context, TfLiteNode* node) { OpData* opdata = reinterpret_cast<OpData*>(node->user_data); Pool3DParams& params = opdata->params; TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TF_LITE_ENSURE_EQ(context, NumDimensions(input), 5); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); TF_LITE_ENSURE_EQ(context, input->type == kTfLiteFloat32 || input->type == kTfLiteInt16 || input->type == kTfLiteInt8, true); int batches = input->dims->data[0]; int depth = input->dims->data[1]; int height = input->dims->data[2]; int width = input->dims->data[3]; int channels = input->dims->data[4]; TF_LITE_ENSURE(context, params.stride_depth > 0); TF_LITE_ENSURE(context, params.stride_height > 0); TF_LITE_ENSURE(context, params.stride_width > 0); int out_width, out_height, out_depth; params.padding_values = ComputePadding3DValues( params.stride_height, params.stride_width, params.stride_depth, 1, 1, 1, height, width, depth, params.filter_height, params.filter_width, params.filter_depth, params.padding_type, &out_height, &out_width, &out_depth); if (input->type == kTfLiteInt8) { TF_LITE_ENSURE_NEAR(context, input->params.scale, output->params.scale, 1.0e-6); TFLITE_DCHECK_EQ(input->params.zero_point, output->params.zero_point); } TfLiteIntArray* output_size = TfLiteIntArrayCreate(5); output_size->data[0] = batches; output_size->data[1] = out_depth; output_size->data[2] = out_height; output_size->data[3] = out_width; output_size->data[4] = channels; return context->ResizeTensor(context, output, output_size); } TfLiteStatus AverageEval(TfLiteContext* context, TfLiteNode* node) { OpData* opdata = reinterpret_cast<OpData*>(node->user_data); Pool3DParams& params = opdata->params; TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); #define TF_LITE_AVERAGE_POOL_3D(type, activation_type) \ SetActivationParams(activation_min, activation_max, &params); \ AveragePool3D<type, activation_type>( \ params, GetTensorShape(input), GetTensorData<type>(input), \ GetTensorShape(output), GetTensorData<type>(output)) switch (input->type) { case kTfLiteFloat32: { float activation_min, activation_max; CalculateActivationRange(params.activation, &activation_min, &activation_max); TF_LITE_AVERAGE_POOL_3D(float, float); } break; case kTfLiteInt8: { int32_t activation_min; int32_t activation_max; CalculateActivationRangeQuantized(context, params.activation, output, &activation_min, &activation_max); TF_LITE_AVERAGE_POOL_3D(int8_t, int32_t); } break; case kTfLiteInt16: { int32_t activation_min; int32_t activation_max; CalculateActivationRangeQuantized(context, params.activation, output, &activation_min, &activation_max); TF_LITE_AVERAGE_POOL_3D(int16_t, int32_t); } break; default: TF_LITE_KERNEL_LOG(context, "Type %s not currently supported.", TfLiteTypeGetName(input->type)); return kTfLiteError; } #undef TF_LITE_AVERAGE_POOL_3D return kTfLiteOk; } TfLiteStatus MaxEval(TfLiteContext* context, TfLiteNode* node) { OpData* opdata = reinterpret_cast<OpData*>(node->user_data); Pool3DParams& params = opdata->params; #define TF_LITE_MAX_POOL_3D(type, activation_type) \ SetActivationParams(activation_min, activation_max, &params); \ MaxPool3D<type, activation_type>( \ params, GetTensorShape(input), GetTensorData<type>(input), \ GetTensorShape(output), GetTensorData<type>(output)) TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); switch (input->type) { case kTfLiteFloat32: { float activation_min, activation_max; CalculateActivationRange(params.activation, &activation_min, &activation_max); TF_LITE_MAX_POOL_3D(float, float); } break; case kTfLiteInt8: { int32_t activation_min; int32_t activation_max; CalculateActivationRangeQuantized(context, params.activation, output, &activation_min, &activation_max); TF_LITE_MAX_POOL_3D(int8_t, int32_t); } break; case kTfLiteInt16: { int32_t activation_min; int32_t activation_max; CalculateActivationRangeQuantized(context, params.activation, output, &activation_min, &activation_max); TF_LITE_MAX_POOL_3D(int16_t, int32_t); } break; default: TF_LITE_KERNEL_LOG(context, "Type %s not currently supported.", TfLiteTypeGetName(input->type)); return kTfLiteError; } #undef TF_LITE_MAX_POOL_3D return kTfLiteOk; } } TfLiteRegistration* Register_AVG_POOL_3D() { static TfLiteRegistration r = {pooling_3d::Init, pooling_3d::Free, pooling_3d::GenericPrepare, pooling_3d::AverageEval}; return &r; } TfLiteRegistration* Register_MAX_POOL_3D() { static TfLiteRegistration r = {pooling_3d::Init, pooling_3d::Free, pooling_3d::GenericPrepare, pooling_3d::MaxEval}; return &r; } } } }
#include <stdint.h> #include <initializer_list> #include <string> #include <vector> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "flatbuffers/flexbuffers.h" #include "tensorflow/lite/kernels/custom_ops_register.h" #include "tensorflow/lite/kernels/test_util.h" namespace tflite { using ::testing::ElementsAreArray; enum PoolType { kAverage, kMax, }; template <typename T> class BasePoolingOpModel : public SingleOpModel { public: BasePoolingOpModel(PoolType pool_type, TensorData input, int filter_d, int filter_h, int filter_w, TensorData output, TfLitePadding padding = kTfLitePaddingValid, int stride_d = 2, int stride_h = 2, int stride_w = 2) { if (input.type == TensorType_FLOAT32) { input.min = input.max = 0.f; output.min = output.max = 0.f; } input_ = AddInput(input); output_ = AddOutput(output); std::vector<uint8_t> custom_option = CreateCustomOptions( stride_d, stride_h, stride_w, filter_d, filter_h, filter_w, padding); if (pool_type == kAverage) { SetCustomOp("AveragePool3D", custom_option, ops::custom::Register_AVG_POOL_3D); } else { SetCustomOp("MaxPool3D", custom_option, ops::custom::Register_MAX_POOL_3D); } BuildInterpreter({GetShape(input_)}); } void SetInput(const std::vector<float>& data) { QuantizeAndPopulate<T>(input_, data); } std::vector<float> GetOutput() { return Dequantize<T>(ExtractVector<T>(output_), GetScale(output_), GetZeroPoint(output_)); } protected: int input_; int output_; private: std::vector<uint8_t> CreateCustomOptions(int stride_depth, int stride_height, int stride_width, int filter_depth, int filter_height, int filter_width, TfLitePadding padding) { auto flex_builder = std::make_unique<flexbuffers::Builder>(); size_t map_start = flex_builder->StartMap(); flex_builder->String("data_format", "NDHWC"); if (padding == kTfLitePaddingValid) { flex_builder->String("padding", "VALID"); } else { flex_builder->String("padding", "SAME"); } auto start = flex_builder->StartVector("ksize"); flex_builder->Add(1); flex_builder->Add(filter_depth); flex_builder->Add(filter_height); flex_builder->Add(filter_width); flex_builder->Add(1); flex_builder->EndVector(start, true, false); auto strides_start = flex_builder->StartVector("strides"); flex_builder->Add(1); flex_builder->Add(stride_depth); flex_builder->Add(stride_height); flex_builder->Add(stride_width); flex_builder->Add(1); flex_builder->EndVector(strides_start, true, false); flex_builder->EndMap(map_start); flex_builder->Finish(); return flex_builder->GetBuffer(); } }; template <> void BasePoolingOpModel<float>::SetInput(const std::vector<float>& data) { PopulateTensor(input_, data); } template <> std::vector<float> BasePoolingOpModel<float>::GetOutput() { return ExtractVector<float>(output_); } #if GTEST_HAS_DEATH_TEST TEST(AveragePoolingOpTest, InvalidDimSize) { EXPECT_DEATH(BasePoolingOpModel<float> m( kAverage, {TensorType_FLOAT32, {1, 2, 4, 1}}, 2, 2, 2, {TensorType_FLOAT32, {}}, kTfLitePaddingValid, 1, 1, 1), "NumDimensions.input. != 5 .4 != 5."); } TEST(AveragePoolingOpTest, ZeroStride) { EXPECT_DEATH(BasePoolingOpModel<float> m( kAverage, {TensorType_FLOAT32, {1, 2, 2, 4, 1}}, 2, 2, 2, {TensorType_FLOAT32, {}}, kTfLitePaddingValid, 0, 0, 0), "Cannot allocate tensors"); } #endif template <typename T> class AveragePoolingOpTest : public ::testing::Test {}; template <typename T> class MaxPoolingOpTest : public ::testing::Test {}; using DataTypes = ::testing::Types<float, int8_t, int16_t>; TYPED_TEST_SUITE(AveragePoolingOpTest, DataTypes); TYPED_TEST_SUITE(MaxPoolingOpTest, DataTypes); TYPED_TEST(AveragePoolingOpTest, AveragePool) { BasePoolingOpModel<TypeParam> m( kAverage, {GetTensorType<TypeParam>(), {1, 2, 2, 4, 1}, 0, 15.9375}, 2, 2, 2, {GetTensorType<TypeParam>(), {}, 0, 15.9375}); m.SetInput({0, 6, 2, 4, 4, 5, 1, 4, 3, 2, 10, 7, 2, 3, 5, 1}); ASSERT_EQ(m.Invoke(), kTfLiteOk); EXPECT_THAT(m.GetOutput(), Pointwise(FloatingPointEq(), {3.125, 4.25})); } TYPED_TEST(AveragePoolingOpTest, AveragePoolFilterH1) { BasePoolingOpModel<TypeParam> m( kAverage, {GetTensorType<TypeParam>(), {1, 2, 2, 4, 1}, 0, 15.9375}, 2, 1, 2, {GetTensorType<TypeParam>(), {}, 0, 15.9375}); m.SetInput({0, 6, 2, 4, 4, 5, 1, 4, 3, 2, 10, 7, 2, 3, 5, 1}); ASSERT_EQ(m.Invoke(), kTfLiteOk); EXPECT_THAT(m.GetOutput(), Pointwise(FloatingPointEq(), {2.75, 5.75})); } TYPED_TEST(AveragePoolingOpTest, AveragePoolPaddingSameStride1) { BasePoolingOpModel<TypeParam> m( kAverage, {GetTensorType<TypeParam>(), {1, 2, 2, 4, 1}, 0, 15.9375}, 2, 2, 2, {GetTensorType<TypeParam>(), {}, 0, 15.9375}, kTfLitePaddingSame, 1, 1, 1); m.SetInput({0, 6, 2, 4, 2, 5, 4, 3, 3, 2, 10, 7, 3, 2, 2, 4}); ASSERT_EQ(m.Invoke(), kTfLiteOk); EXPECT_THAT(m.GetOutput(), Pointwise(FloatingPointEq(), {2.875, 4.125, 4.5, 4.5, 3.0, 3.25, 3.25, 3.5, 2.5, 4.0, 5.75, 5.5, 2.5, 2.0, 3.0, 4.0})); } TYPED_TEST(AveragePoolingOpTest, AveragePoolPaddingValidStride1) { BasePoolingOpModel<TypeParam> m( kAverage, {GetTensorType<TypeParam>(), {1, 2, 2, 4, 1}, 0, 15.9375}, 2, 2, 2, {GetTensorType<TypeParam>(), {}, 0, 15.9375}, kTfLitePaddingValid, 1, 1, 1); m.SetInput({0, 6, 2, 4, 2, 5, 4, 3, 3, 2, 10, 7, 3, 2, 2, 4}); ASSERT_EQ(m.Invoke(), kTfLiteOk); EXPECT_THAT(m.GetOutput(), Pointwise(FloatingPointEq(), {2.875, 4.125, 4.5})); } TYPED_TEST(MaxPoolingOpTest, MaxPool) { BasePoolingOpModel<TypeParam> m( kMax, {GetTensorType<TypeParam>(), {1, 2, 2, 4, 1}, 0, 15.9375}, 2, 2, 2, {GetTensorType<TypeParam>(), {}, 0, 15.9375}); m.SetInput({0, 6, 2, 4, 4, 5, 1, 4, 3, 2, 10, 7, 2, 3, 5, 1}); ASSERT_EQ(m.Invoke(), kTfLiteOk); EXPECT_THAT(m.GetOutput(), Pointwise(FloatingPointEq(), {6.0, 10.0})); } TYPED_TEST(MaxPoolingOpTest, MaxPoolFilterH1) { BasePoolingOpModel<TypeParam> m( kMax, {GetTensorType<TypeParam>(), {1, 2, 2, 4, 1}, 0, 15.9375}, 2, 1, 2, {GetTensorType<TypeParam>(), {}, 0, 15.9375}); m.SetInput({0, 6, 2, 4, 4, 5, 1, 4, 3, 2, 10, 7, 2, 3, 5, 1}); ASSERT_EQ(m.Invoke(), kTfLiteOk); EXPECT_THAT(m.GetOutput(), ElementsAreArray({6, 10})); } TYPED_TEST(MaxPoolingOpTest, MaxPoolPaddingSameStride1) { BasePoolingOpModel<TypeParam> m( kMax, {GetTensorType<TypeParam>(), {1, 2, 2, 4, 1}, 0, 15.9375}, 2, 2, 2, {GetTensorType<TypeParam>(), {}, 0, 15.9375}, kTfLitePaddingSame, 1, 1, 1); m.SetInput({0, 6, 2, 4, 2, 5, 4, 3, 3, 2, 10, 7, 3, 2, 2, 4}); ASSERT_EQ(m.Invoke(), kTfLiteOk); EXPECT_THAT(m.GetOutput(), ElementsAreArray({6, 10, 10, 7, 5, 5, 4, 4, 3, 10, 10, 7, 3, 2, 4, 4})); } TYPED_TEST(MaxPoolingOpTest, MaxPoolPaddingValidStride1) { BasePoolingOpModel<TypeParam> m( kMax, {GetTensorType<TypeParam>(), {1, 2, 2, 4, 1}, 0, 15.9375}, 2, 2, 2, {GetTensorType<TypeParam>(), {}, 0, 15.9375}, kTfLitePaddingValid, 1, 1, 1); m.SetInput({0, 6, 2, 4, 2, 5, 4, 3, 3, 2, 10, 7, 3, 2, 2, 4}); ASSERT_EQ(m.Invoke(), kTfLiteOk); EXPECT_THAT(m.GetOutput(), Pointwise(FloatingPointEq(), {6.0, 10.0, 10.0})); } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/pooling3d.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/pooling3d_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
4ebd16c3-9ade-4140-a597-d2ef6dfe067e
cpp
tensorflow/tensorflow
gemm_fusion
third_party/xla/xla/service/gpu/transforms/gemm_fusion.cc
third_party/xla/xla/service/gpu/transforms/gemm_fusion_test.cc
#include "xla/service/gpu/transforms/gemm_fusion.h" #include <array> #include <cstddef> #include <cstdint> #include <optional> #include <queue> #include <string> #include <tuple> #include <utility> #include <variant> #include <vector> #include "absl/algorithm/container.h" #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" #include "absl/log/check.h" #include "absl/log/log.h" #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/str_cat.h" #include "absl/strings/string_view.h" #include "xla/hlo/ir/dfs_hlo_visitor_with_default.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/service/gpu/backend_configs.pb.h" #include "xla/service/gpu/cublas_padding_requirements.h" #include "xla/service/gpu/fusions/triton/triton_support.h" #include "xla/service/gpu/fusions/triton/triton_support_legacy.h" #include "xla/service/gpu/ir_emission_utils.h" #include "xla/service/gpu/matmul_utils.h" #include "xla/service/gpu/triton_fusion_analysis.h" #include "xla/service/gpu/triton_tiling_propagation.h" #include "xla/service/instruction_fusion.h" #include "xla/shape_util.h" #include "xla/stream_executor/device_description.h" #include "xla/util.h" #include "xla/xla_data.pb.h" #include "tsl/platform/errors.h" #include "tsl/platform/statusor.h" namespace xla { namespace gpu { namespace { using triton_fusion::CombineDotRequirements; using triton_fusion::DimensionOrder; using triton_fusion::DimOrderMap; using triton_fusion::DimOrdersAndReqs; using triton_fusion::DimOrdersAndReqsOrError; using triton_fusion::DotProperties; using triton_fusion::DotRequirements; using triton_fusion::DotRequirementsOrError; using triton_fusion::FusionContext; using triton_fusion::GetPropagatedDimOrdersAndRequirementsIfProfitablyFusible; using triton_fusion::TransformDirection; class AdjacencyList { public: using NodeId = int64_t; NodeId AddNode() { adj_.emplace_back(); return adj_.size() - 1; } const std::vector<NodeId>& GetOutNeighbors(NodeId node_id) const { return adj_.at(node_id); } void ReserveSpaceForOutNeighbors(NodeId node_id, size_t count) { adj_.at(node_id).reserve(count); } void AddArc(NodeId from, NodeId to) { adj_.at(from).push_back(to); } NodeId GetRoot() const { CHECK(!adj_.empty()); return 0; } private: std::vector<std::vector<NodeId>> adj_; }; struct HloAndDimOrder { const HloInstruction* original_hlo = nullptr; DimensionOrder dim_order; }; struct HloAndIterSpec { const HloInstruction* original_hlo; TensorIterationSpec iter_spec; auto ToTuple() const { return std::make_tuple(original_hlo, iter_spec); } bool operator==(const HloAndIterSpec& other) const { return ToTuple() == other.ToTuple(); } template <typename H> friend H AbslHashValue(H h, const HloAndIterSpec& key) { return H::combine(std::move(h), key.ToTuple()); } }; struct NodeFusionPlan { const HloInstruction* original_hlo = nullptr; bool should_fuse = false; }; struct FusionPlan { AdjacencyList graph; absl::flat_hash_map<AdjacencyList::NodeId, NodeFusionPlan> map; }; struct FusionPlanAndRequirements { FusionPlan fusion_plan; DotRequirements requirements; }; struct HlosAndRequirements { const HloInstruction* original_hlo = nullptr; const HloInstruction* fused_hlo = nullptr; DotRequirements requirements; }; HloInstruction& FuseDot(const HloDotInstruction& dot, const HloInstruction& fused_lhs, const HloInstruction& fused_rhs, std::optional<const HloInstruction*> fused_meta, HloComputation::Builder& builder ) { VLOG(3) << "Fusing " << dot.ToString(); std::vector<HloInstruction*> hlo_new_operands = { const_cast<HloInstruction*>(&fused_lhs), const_cast<HloInstruction*>(&fused_rhs)}; if (fused_meta.has_value()) { hlo_new_operands.push_back(const_cast<HloInstruction*>(fused_meta.value())); } return *builder.AddInstruction( dot.CloneWithNewOperands(dot.shape(), hlo_new_operands)); } int64_t NumAddedParameters(const HloInstruction& hlo) { if (hlo.opcode() == HloOpcode::kParameter || (hlo.opcode() == HloOpcode::kConstant && !ShapeUtil::IsScalar(hlo.shape()))) { return 0; } return hlo.operand_count() - 1; } std::optional<DimOrdersAndReqs> GetOperandDimOrdersAndCombinedReqs( const HloInstruction& hlo, const DimensionOrder& dim_order, const DotProperties& properties, const se::GpuComputeCapability& gpu_version, const DotRequirements& requirements) { DimOrdersAndReqsOrError dim_orders_and_new_reqs = GetPropagatedDimOrdersAndRequirements( hlo, dim_order, TransformDirection::kOutputToInput, properties); if (std::holds_alternative<FusionDecision>(dim_orders_and_new_reqs)) { VLOG(5) << "Not fusing " << hlo.ToString() << " to the output due to the decision: " << std::get<FusionDecision>(dim_orders_and_new_reqs).Explain(); return std::nullopt; } DotRequirementsOrError combined_reqs = CombineDotRequirements( requirements, std::get<DimOrdersAndReqs>(dim_orders_and_new_reqs).requirements); if (std::holds_alternative<FusionDecision>(combined_reqs)) { VLOG(5) << "Not fusing " << hlo.ToString() << " to the output due to the decision: " << std::get<FusionDecision>(combined_reqs).Explain(); return std::nullopt; } return DimOrdersAndReqs{ std::get<DimOrdersAndReqs>(dim_orders_and_new_reqs).dim_orders, std::get<DotRequirements>(combined_reqs)}; } std::optional<DimOrdersAndReqs> GetOperandDimOrdersAndCombinedReqsIfProfitable( const HloInstruction& hlo, const DimensionOrder& dim_order, const DotProperties& properties, const se::GpuComputeCapability& gpu_version, const DotRequirements& requirements) { DimOrdersAndReqsOrError dim_orders_and_new_reqs = GetPropagatedDimOrdersAndRequirementsIfProfitablyFusible( hlo, TransformDirection::kOutputToInput, std::nullopt, dim_order, gpu_version, properties); if (std::holds_alternative<FusionDecision>(dim_orders_and_new_reqs)) { VLOG(5) << "Not fusing " << hlo.ToString() << " to the output due to the decision: " << std::get<FusionDecision>(dim_orders_and_new_reqs).Explain(); return std::nullopt; } DotRequirementsOrError combined_reqs = CombineDotRequirements( requirements, std::get<DimOrdersAndReqs>(dim_orders_and_new_reqs).requirements); if (std::holds_alternative<FusionDecision>(combined_reqs)) { VLOG(5) << "Not fusing " << hlo.ToString() << " to the output due to the decision: " << std::get<FusionDecision>(combined_reqs).Explain(); return std::nullopt; } return DimOrdersAndReqs{ std::get<DimOrdersAndReqs>(dim_orders_and_new_reqs).dim_orders, std::get<DotRequirements>(combined_reqs)}; } std::optional<DimOrdersAndReqs> GetUserDimOrdersAndCombinedReqsIfProfitable( const HloInstruction& hlo, const DimensionOrder& hlo_dim_order, const HloInstruction& user, const DotProperties& properties, const se::GpuComputeCapability& gpu_version, const DotRequirements& requirements) { DimOrdersAndReqsOrError dim_orders_and_new_reqs = GetPropagatedDimOrdersAndRequirementsIfProfitablyFusible( user, TransformDirection::kInputToOutput, user.operand_index(&hlo), hlo_dim_order, gpu_version, properties); if (std::holds_alternative<FusionDecision>(dim_orders_and_new_reqs)) { VLOG(5) << "Not fusing " << user.ToString() << " to the input due to the decision: " << std::get<FusionDecision>(dim_orders_and_new_reqs).Explain(); return std::nullopt; } DotRequirementsOrError combined_reqs = CombineDotRequirements( requirements, std::get<DimOrdersAndReqs>(dim_orders_and_new_reqs).requirements); if (std::holds_alternative<FusionDecision>(combined_reqs)) { VLOG(5) << "Not fusing " << user.ToString() << " to the input due to the decision: " << std::get<FusionDecision>(combined_reqs).Explain(); return std::nullopt; } return DimOrdersAndReqs{ std::get<DimOrdersAndReqs>(dim_orders_and_new_reqs).dim_orders, std::get<DotRequirements>(combined_reqs)}; } FusionPlanAndRequirements BuildFusionPlanTowardOperands( const HloInstruction& root_hlo, const DimensionOrder& root_dim_order, const std::optional<int>& max_params, const se::GpuComputeCapability& gpu_version, const DotProperties& properties, const DotRequirements& requirements_so_far) { CHECK(!max_params.has_value() || max_params.value() >= 1); AdjacencyList graph; absl::flat_hash_map<AdjacencyList::NodeId, HloAndDimOrder> hlo_and_dim_order_map; absl::flat_hash_map<AdjacencyList::NodeId, NodeFusionPlan> fusion_plan_map; absl::flat_hash_map<HloAndIterSpec, AdjacencyList::NodeId> node_reuse_map; DotRequirements combined_reqs = requirements_so_far; auto get_or_create_fusion_node = [&](const HloInstruction& hlo, const DimensionOrder& dim_order, bool* is_new_node = nullptr) -> AdjacencyList::NodeId { HloAndIterSpec reuse_key = {&hlo, dim_order.ToTensorIterationSpec()}; if (auto it = node_reuse_map.find(reuse_key); it != node_reuse_map.end()) { if (is_new_node != nullptr) { *is_new_node = false; } return it->second; } AdjacencyList::NodeId node_id = graph.AddNode(); CHECK(hlo_and_dim_order_map.insert({node_id, {&hlo, dim_order}}).second); CHECK(node_reuse_map.insert({reuse_key, node_id}).second); if (is_new_node != nullptr) { *is_new_node = true; } return node_id; }; AdjacencyList::NodeId root = get_or_create_fusion_node(root_hlo, root_dim_order); absl::flat_hash_set<AdjacencyList::NodeId> inputs({root}); std::queue<AdjacencyList::NodeId> queue({root}); int64_t num_requeued = 0; while (queue.size() > num_requeued) { AdjacencyList::NodeId node_id = queue.front(); queue.pop(); const HloAndDimOrder& hlo_and_dim_order = hlo_and_dim_order_map.at(node_id); const HloInstruction& original_hlo = *hlo_and_dim_order.original_hlo; const DimensionOrder& dim_order = hlo_and_dim_order.dim_order; if (max_params.has_value() && inputs.size() + NumAddedParameters(original_hlo) > max_params.value()) { queue.push(node_id); ++num_requeued; continue; } num_requeued = 0; if (original_hlo.opcode() == HloOpcode::kParameter) { CHECK(fusion_plan_map .insert({node_id, {&original_hlo, false}}) .second); continue; } auto opt_result = GetOperandDimOrdersAndCombinedReqsIfProfitable( original_hlo, dim_order, properties, gpu_version, combined_reqs); if (!opt_result.has_value()) { CHECK(fusion_plan_map .insert({node_id, {&original_hlo, false}}) .second); continue; } const DimOrderMap operand_dim_orders = std::move(opt_result->dim_orders); combined_reqs = std::move(opt_result->requirements); inputs.erase(node_id); graph.ReserveSpaceForOutNeighbors(node_id, original_hlo.operand_count()); for (int64_t i = 0; i < original_hlo.operand_count(); ++i) { const HloInstruction& operand = *original_hlo.operand(i); const DimensionOrder& operand_dim_order = operand_dim_orders.at(&operand); bool is_new_node = false; AdjacencyList::NodeId operand_node_id = get_or_create_fusion_node(operand, operand_dim_order, &is_new_node); graph.AddArc(node_id, operand_node_id); if (is_new_node) { VLOG(6) << "Enqueueing " << operand.ToString() << ":" << operand_dim_order.ToString(); inputs.insert(operand_node_id); queue.push(operand_node_id); } } CHECK( fusion_plan_map.insert({node_id, {&original_hlo, true}}) .second); } while (!queue.empty()) { AdjacencyList::NodeId node_id = queue.front(); queue.pop(); const HloAndDimOrder& hlo_and_dim_order = hlo_and_dim_order_map.at(node_id); CHECK(fusion_plan_map .insert({node_id, {hlo_and_dim_order.original_hlo, false}}) .second); } return {{std::move(graph), std::move(fusion_plan_map)}, std::move(combined_reqs)}; } HloInstruction& BuildFusionTowardOperandsImpl( AdjacencyList::NodeId node_id, const FusionPlan& fusion_plan, absl::flat_hash_map<AdjacencyList::NodeId, HloInstruction*>& fused_hlo_map, HloComputation::Builder& builder, std::vector<HloInstruction*>& fusion_params ) { if (auto it = fused_hlo_map.find(node_id); it != fused_hlo_map.end()) { return *it->second; } const NodeFusionPlan& node_fusion_plan = fusion_plan.map.at(node_id); const bool should_fuse = node_fusion_plan.should_fuse; const HloInstruction& original_hlo = *node_fusion_plan.original_hlo; HloInstruction* fused_hlo = nullptr; if (should_fuse) { HloInstruction::InstructionVector new_operands; for (AdjacencyList::NodeId operand_id : fusion_plan.graph.GetOutNeighbors(node_id)) { new_operands.push_back(&BuildFusionTowardOperandsImpl( operand_id, fusion_plan, fused_hlo_map, builder, fusion_params)); } fused_hlo = builder.AddInstruction( original_hlo.CloneWithNewOperands(original_hlo.shape(), new_operands)); } else { fusion_params.push_back(const_cast<HloInstruction*>(&original_hlo)); fused_hlo = builder.AddInstruction(HloInstruction::CreateParameter( fusion_params.size() - 1, original_hlo.shape(), absl::StrCat("parameter_", fusion_params.size() - 1))); } CHECK(fused_hlo_map.insert({node_id, fused_hlo}).second); return *fused_hlo; } HloInstruction& BuildFusionTowardOperands( const FusionPlan& fusion_plan, HloComputation::Builder& builder, std::vector<HloInstruction*>& fusion_params ) { absl::flat_hash_map<AdjacencyList::NodeId, HloInstruction*> fused_hlo_map; return BuildFusionTowardOperandsImpl(fusion_plan.graph.GetRoot(), fusion_plan, fused_hlo_map, builder, fusion_params); } HlosAndRequirements FuseTowardOperands( const HloInstruction& root_hlo, const DimensionOrder& root_dim_order, const std::optional<int>& max_params, const se::GpuComputeCapability& gpu_version, const DotProperties& properties, const DotRequirements& requirements_so_far, HloComputation::Builder& builder, std::vector<HloInstruction*>& fusion_params ) { FusionPlanAndRequirements fusion_plan_and_reqs = BuildFusionPlanTowardOperands(root_hlo, root_dim_order, max_params, gpu_version, properties, requirements_so_far); HloInstruction& fused_hlo_or_param = BuildFusionTowardOperands( fusion_plan_and_reqs.fusion_plan, builder, fusion_params); return HlosAndRequirements{&root_hlo, &fused_hlo_or_param, fusion_plan_and_reqs.requirements}; } absl::StatusOr<HlosAndRequirements> FuseDotOperand( const HloInstruction& dot, int operand_index, const se::GpuComputeCapability& gpu_version, HloComputation::Builder& builder, std::vector<HloInstruction*>& fusion_params ) { TF_ASSIGN_OR_RETURN(const FusionContext context, FusionContext::FromDotOperand(dot, operand_index)); const HloInstruction& operand = *dot.operand(operand_index); return FuseTowardOperands(operand, context.dim_orders().at(&operand), TritonFusionAnalysis::kMaxParameterPerDotOperand, gpu_version, context.dot_properties(), context.requirements(), builder, fusion_params); } HlosAndRequirements FuseTowardUsers( const HloInstruction& hlo, const HloInstruction& fused_hlo, const DimensionOrder& hlo_dim_order, const se::GpuComputeCapability& gpu_version, const DotProperties& properties, const DotRequirements& requirements, HloComputation::Builder& builder, std::vector<HloInstruction*>& fusion_params ) { const HlosAndRequirements existing_hlos_and_requirements = {&hlo, &fused_hlo, requirements}; if (hlo.user_count() != 1) { return existing_hlos_and_requirements; } const HloInstruction& user = *hlo.users()[0]; if (!legacy_triton::IsDistributiveOverAddition(user)) { return existing_hlos_and_requirements; } auto opt_user_result = GetUserDimOrdersAndCombinedReqsIfProfitable( hlo, hlo_dim_order, user, properties, gpu_version, requirements); if (!opt_user_result.has_value()) { return existing_hlos_and_requirements; } DimensionOrder user_dim_order = opt_user_result->dim_orders.at(&user); DotRequirements combined_requirements = opt_user_result->requirements; HloInstruction::InstructionVector new_operands; if (user.operand_count() == 1) { new_operands.push_back(const_cast<HloInstruction*>(&fused_hlo)); } else { auto opt_operand_result = GetOperandDimOrdersAndCombinedReqs( user, user_dim_order, properties, gpu_version, combined_requirements); if (!opt_operand_result.has_value()) { return existing_hlos_and_requirements; } DimOrderMap operand_dim_orders = opt_operand_result->dim_orders; combined_requirements = opt_operand_result->requirements; for (int i = 0; i < user.operand_count(); ++i) { const HloInstruction& operand = *user.operand(i); if (&operand == &hlo) { new_operands.push_back(const_cast<HloInstruction*>(&fused_hlo)); } else { HlosAndRequirements hlos_and_requirements = FuseTowardOperands( operand, operand_dim_orders.at(&operand), std::nullopt, gpu_version, properties, combined_requirements, builder, fusion_params); new_operands.push_back( const_cast<HloInstruction*>(hlos_and_requirements.fused_hlo)); combined_requirements = hlos_and_requirements.requirements; } } } const HloInstruction& fused_user = *builder.AddInstruction( user.CloneWithNewOperands(user.shape(), new_operands)); return FuseTowardUsers(user, fused_user, user_dim_order, gpu_version, properties, combined_requirements, builder, fusion_params); } HlosAndRequirements FuseDotOutput( const HloInstruction& dot, const HloInstruction& fused_dot, const se::GpuComputeCapability& gpu_version, const DotRequirements& requirements, HloComputation::Builder& builder, std::vector<HloInstruction*>& fusion_params ) { const auto context = FusionContext::FromDotOutput(dot, 1, requirements); return FuseTowardUsers(dot, fused_dot, context.dim_orders().at(&dot), gpu_version, context.dot_properties(), context.requirements(), builder, fusion_params); } namespace { class Decision { public: bool CanFuse() const { return fusing_decision_.CanFuse() || able_to_fuse_; } bool WantToFuse() const { return fusing_decision_.CanFuse(); } static Decision Allow() { return {FusionDecision::Allow(), true}; }; static Decision Deny(std::string_view value) { return {FusionDecision::Forbid(value), false}; } static Decision NotProfitable(std::string_view value) { return {FusionDecision::Forbid(value), true}; } private: Decision(FusionDecision decision, bool able_to_fuse) : fusing_decision_(std::move(decision)), able_to_fuse_(able_to_fuse) {} FusionDecision fusing_decision_; bool able_to_fuse_; }; } absl::StatusOr<Decision> CreateDotFusion( const HloDotInstruction& dot, const se::GpuComputeCapability gpu_version, HloComputation::Builder& builder, std::vector<HloInstruction*>& fusion_inputs, HloInstruction** fusion_output_ptr) { VLOG(5) << dot.ToString(); if (CodegenDecision is_supported = legacy_triton::IsTritonSupportedInstruction(dot, gpu_version); !is_supported) { VLOG(3) << is_supported.Explain(); return Decision::Deny(is_supported.Explain()); } if (dot.sparse_operands()) { const SparsityDescriptor& descriptor = dot.sparsity().front(); if (dot.sparse_operands() != 1 || descriptor.index() != 0) { return InvalidArgument("Sparsity is only supported on left operand"); } if (descriptor.type() != SparsityType::SPARSITY_STRUCTURED_N_M || descriptor.n() != 2 || descriptor.m() != 4) { return InvalidArgument("Only 2:4 structured sparsity is supported"); } CHECK_EQ(descriptor.dimension(), dot.operand(0)->shape().rank() - 1); } TF_ASSIGN_OR_RETURN(HlosAndRequirements lhs_hlos_and_reqs, FuseDotOperand(dot, 0, gpu_version, builder, fusion_inputs)); TF_ASSIGN_OR_RETURN(HlosAndRequirements rhs_hlos_and_reqs, FuseDotOperand(dot, 1, gpu_version, builder, fusion_inputs)); std::optional<const HloInstruction*> meta_hlo; if (dot.sparse_operands()) { TF_ASSIGN_OR_RETURN(HlosAndRequirements meta_hlos_and_reqs, FuseDotOperand(dot, 2, gpu_version, builder, fusion_inputs)); meta_hlo.emplace(meta_hlos_and_reqs.fused_hlo); } HloInstruction& fused_dot = FuseDot(dot, *lhs_hlos_and_reqs.fused_hlo, *rhs_hlos_and_reqs.fused_hlo, meta_hlo, builder); HlosAndRequirements fused_output_and_reqs = FuseDotOutput(dot, fused_dot, gpu_version, lhs_hlos_and_reqs.requirements, builder, fusion_inputs); if (fusion_output_ptr != nullptr) { *fusion_output_ptr = const_cast<HloInstruction*>(fused_output_and_reqs.original_hlo); } bool has_int4_param = absl::c_any_of(fusion_inputs, [](const HloInstruction* hlo) { return hlo->shape().element_type() == PrimitiveType::S4; }); if (has_int4_param) { auto analysis_or = TritonFusionAnalysis::Execute(dot); if (analysis_or.ok()) { const auto& analysis = analysis_or.value(); if (!analysis.IsBatchDimMinorForInt4Parameter( dot, TritonFusionAnalysis::Scope::LHS) || !analysis.IsBatchDimMinorForInt4Parameter( dot, TritonFusionAnalysis::Scope::RHS)) { return Decision::Deny( "Fusion is not possible because the parameter with the type S4 has " "minor batch dimension."); } } } const PrecisionConfig::Algorithm algorithm = dot.precision_config().algorithm(); if (algorithm == PrecisionConfig::ALG_DOT_BF16_BF16_F32_X6 || algorithm == PrecisionConfig::ALG_DOT_BF16_BF16_F32_X3 || algorithm == PrecisionConfig::ALG_DOT_BF16_BF16_F32 || algorithm == PrecisionConfig::ALG_DOT_TF32_TF32_F32_X3 || dot.GetModule()->config().debug_options().xla_gpu_triton_gemm_any() || dot.sparse_operands()) { return Decision::Allow(); } bool is_pure_matmul = true; (void)builder.ForEachInstruction([&](const HloInstruction* fused_hlo) { static constexpr std::array<HloOpcode, 4> kPureOpcodes = { HloOpcode::kBitcast, HloOpcode::kDot, HloOpcode::kParameter, HloOpcode::kReshape}; if (absl::c_find(kPureOpcodes, fused_hlo->opcode()) == kPureOpcodes.end()) { is_pure_matmul = false; return absl::CancelledError(); } return absl::OkStatus(); }); if (is_pure_matmul) return Decision::NotProfitable("Pure Matmul"); return Decision::Allow(); } class GemmFusionVisitor : public DfsHloRewriteVisitor { public: explicit GemmFusionVisitor(const se::GpuComputeCapability& gpu_version) : gpu_version_(gpu_version) {} absl::Status HandleDot(HloInstruction* dot) override { CHECK_EQ(dot->opcode(), HloOpcode::kDot); int64_t gemm_rewrite_size_threshold = dot->GetModule() ->config() .debug_options() .xla_gpu_gemm_rewrite_size_threshold(); TF_ASSIGN_OR_RETURN(bool is_matmul_tiny, IsMatrixMultiplicationTooSmallForRewriting( *dot, gemm_rewrite_size_threshold)); if (is_matmul_tiny && IsDotSupportedByClassicalEmitters(*dot)) { return absl::OkStatus(); } std::string fusion_name = absl::StrCat("gemm_fusion_", dot->name()); HloComputation::Builder builder(absl::StrCat(fusion_name, "_computation")); std::vector<HloInstruction*> fusion_inputs; HloInstruction* fusion_output = nullptr; TF_ASSIGN_OR_RETURN( const Decision decision, CreateDotFusion(*Cast<HloDotInstruction>(dot), gpu_version_, builder, fusion_inputs, &fusion_output)); if (!decision.CanFuse()) { return absl::OkStatus(); } if (std::holds_alternative<se::CudaComputeCapability>(gpu_version_)) { if (!CublasRequiresPadding( *Cast<HloDotInstruction>(dot), std::get<se::CudaComputeCapability>(gpu_version_)) && !decision.WantToFuse()) { return absl::OkStatus(); } } HloComputation* computation = dot->GetModule()->AddComputationAndUnifyNamesAndIds(builder.Build(), false); HloInstruction* dot_fusion = dot->parent()->AddInstruction(HloInstruction::CreateFusion( computation->root_instruction()->shape(), HloInstruction::FusionKind::kCustom, fusion_inputs, computation)); dot_fusion->set_metadata(dot->metadata()); dot_fusion->GetModule()->SetAndUniquifyInstrName(dot_fusion, fusion_name); TF_ASSIGN_OR_RETURN(auto gpu_config, dot_fusion->backend_config<GpuBackendConfig>()); FusionBackendConfig& backend_config = *gpu_config.mutable_fusion_backend_config(); backend_config.set_kind(std::string(kTritonGemmFusionKind)); TF_RETURN_IF_ERROR(dot_fusion->set_backend_config(gpu_config)); if (fusion_output->IsRoot()) { fusion_output->parent()->set_root_instruction(dot_fusion); TF_RETURN_IF_ERROR( fusion_output->parent()->RemoveInstructionAndUnusedOperands( fusion_output)); MarkAsChanged(); } else { TF_RETURN_IF_ERROR(ReplaceInstruction(fusion_output, dot_fusion)); } XLA_VLOG_LINES(5, computation->ToString(HloPrintOptions::ShortParsable())); return absl::OkStatus(); } private: se::GpuComputeCapability gpu_version_; }; absl::StatusOr<bool> RunOnComputation( HloComputation* computation, const se::GpuComputeCapability& gpu_version) { GemmFusionVisitor visitor(gpu_version); TF_RETURN_IF_ERROR(computation->Accept(&visitor)); return visitor.changed(); } } bool ShouldTritonHandleGEMM(HloDotInstruction& dot, const se::GpuComputeCapability& gpu_version) { std::vector<HloInstruction*> fusion_inputs; HloComputation::Builder builder("disposable"); return CreateDotFusion(dot, gpu_version, builder, fusion_inputs, nullptr) ->WantToFuse(); } absl::StatusOr<bool> GemmFusion::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { TF_RETURN_IF_ERROR( EnsureTritonSupportsComputeCapability(compute_capability_)); bool changed = false; for (HloComputation* computation : module->MakeNonfusionComputations(execution_threads)) { TF_ASSIGN_OR_RETURN(bool result, RunOnComputation(computation, compute_capability_)); changed |= result; } return changed; } } }
#include "xla/service/gpu/transforms/gemm_fusion.h" #include <memory> #include <string> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/status/status.h" #include "absl/strings/string_view.h" #include "xla/autotuning.pb.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/service/gpu/cublas_padding_requirements.h" #include "xla/service/gpu/triton_fusion_analysis.h" #include "xla/service/pattern_matcher.h" #include "xla/service/pattern_matcher_gmock.h" #include "xla/stream_executor/device_description.h" #include "xla/tests/filecheck.h" #include "xla/tests/hlo_test_base.h" #include "xla/tests/verified_hlo_module.h" #include "xla/xla.pb.h" #include "xla/xla_data.pb.h" #include "tsl/platform/status_matchers.h" #include "tsl/platform/statusor.h" namespace xla { namespace gpu { namespace { using ::testing::ElementsAre; using ::testing::FieldsAre; namespace m = ::xla::match; class GemmFusionTest : public HloTestBase { public: GemmFusionTest() : HloTestBase(true, false) {} DebugOptions GetDebugOptionsForTest() override { DebugOptions debug_options = HloTestBase::GetDebugOptionsForTest(); debug_options.set_xla_gpu_triton_gemm_any(false); debug_options.set_xla_gpu_gemm_rewrite_size_threshold(0); return debug_options; } se::GpuComputeCapability gpu_version_{ se::CudaComputeCapability{se::CudaComputeCapability::AMPERE, 0}}; void MatchHloModule(HloModule& module, absl::string_view pattern) { TF_ASSERT_OK_AND_ASSIGN(bool filecheck_result, RunFileCheck(module.ToString(), pattern)); EXPECT_TRUE(filecheck_result); } }; TEST_F(GemmFusionTest, TransposeSubdimensionGroup) { auto module = ParseAndReturnVerifiedModule(R"( HloModule m ENTRY e { p0 = f32[32,3] parameter(0) t1 = f32[3,32] transpose(p0), dimensions={1,0} r1 = f32[3,8,4] reshape(t1) r0 = f32[3,32] reshape(r1) p1 = f16[32,7] parameter(1) c1 = f32[32,7] convert(p1) ROOT d = f32[3,7] dot(r0, c1), lhs_contracting_dims={1}, rhs_contracting_dims={0} })") .value(); EXPECT_TRUE(GemmFusion(gpu_version_).Run(module.get()).value()); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch(m::Fusion(m::Parameter(), m::Parameter()))); } TEST_F(GemmFusionTest, UnsupportedTransposeIsNotFused) { auto module = ParseAndReturnVerifiedModule(R"( ENTRY e { p0 = f16[1,512,8,1024]{3,1,0,2} parameter(0) c = f16[1,512,8,1024]{3,2,1,0} copy(p0) b = f16[4096,1024]{1,0} bitcast(c) p1 = f16[128,1024]{1,0} parameter(1) ROOT d = f16[4096,128]{1,0} dot(b, p1), lhs_contracting_dims={1}, rhs_contracting_dims={1} })") .value(); EXPECT_FALSE(GemmFusion(gpu_version_).Run(module.get()).value()); } TEST_F(GemmFusionTest, BitcastChain) { auto module = ParseAndReturnVerifiedModule(R"( HloModule m ENTRY e { p0 = s8[60,5] parameter(0) r0 = s8[3,20,5] reshape(p0) c0 = f16[3,20,5] convert(r0) p1 = f16[3,200] parameter(1) r12 = f16[600] reshape(p1) r11 = f16[30,20] reshape(r12) r1 = f16[3,10,20] reshape(r11) ROOT d = f16[3,5,10] dot(c0, r1), lhs_contracting_dims={1}, rhs_contracting_dims={2}, lhs_batch_dims={0}, rhs_batch_dims={0} })") .value(); EXPECT_TRUE(GemmFusion(gpu_version_).Run(module.get()).value()); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch(m::Fusion(m::Parameter(), m::Parameter()))); } TEST_F(GemmFusionTest, SplitDimensionTwice) { auto module = ParseAndReturnVerifiedModule(R"( ENTRY e { p0 = s8[4,2,32,4,2] parameter(0) r1 = s8[8,32,8] reshape(p0) t1 = s8[32,8,8] transpose(r1), dimensions={1,0,2} r0 = s8[32,64] reshape(t1) p1 = s8[32,32] parameter(1) c0 = f16[32,32] convert(p1) ROOT d = f16[64,32] dot(r0, c0), lhs_contracting_dims={0}, rhs_contracting_dims={1} })") .value(); EXPECT_TRUE(GemmFusion(gpu_version_).Run(module.get()).value()); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch(m::Fusion(m::Parameter(), m::Parameter()))); } TEST_F(GemmFusionTest, DoNotTriggerOnUnsupportedOutputConversions) { TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(R"( ENTRY e { p0 = f16[128,256] parameter(0) p1 = f16[256,512] parameter(1) r = f16[128,512] dot(p0, p1), lhs_contracting_dims={1}, rhs_contracting_dims={0} ROOT c = u8[128,512] convert(r) })")); EXPECT_FALSE(GemmFusion(gpu_version_).Run(module.get()).value()); } TEST_F(GemmFusionTest, FuseDotWithTrivialNoncontractingDim) { auto module = ParseAndReturnVerifiedModule(R"( HloModule m ENTRY e { p0 = s8[60,5] parameter(0) r0 = s8[3,20,5] reshape(p0) c0 = f16[3,20,5] convert(r0) p1 = f16[3,1,20] parameter(1) ROOT d = f16[3,5,1] dot(c0, p1), lhs_contracting_dims={1}, rhs_contracting_dims={2}, lhs_batch_dims={0}, rhs_batch_dims={0} })") .value(); EXPECT_TRUE(GemmFusion(gpu_version_).Run(module.get()).value()); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch(m::Fusion(m::Parameter(), m::Parameter()))); } TEST_F(GemmFusionTest, HandleDotIfCublasRequiresPadding) { TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(R"( HloModule m ENTRY e { p0 = f16[5,3] parameter(0) p1 = f16[5,7] parameter(1) ROOT d = f16[3,7] dot(p0, p1), lhs_contracting_dims={0}, rhs_contracting_dims={0} })")); const se::CudaComputeCapability cc{se::CudaComputeCapability::AMPERE, 0}; EXPECT_TRUE(CublasRequiresPadding( *xla::Cast<HloDotInstruction>( module->entry_computation()->root_instruction()), cc)); EXPECT_TRUE(GemmFusion(cc).Run(module.get()).value()); } TEST_F(GemmFusionTest, FuseSliceOfParameterWithOtherUsers) { TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(R"( ENTRY e { p0 = f32[97,121] parameter(0) s0 = f32[7,101] slice(p0), slice={[3:10], [10:111]} p1 = f32[101,16] parameter(1) d = f32[16,7] dot(p1, s0), lhs_contracting_dims={0}, rhs_contracting_dims={1} s1 = f32[3,33] slice(p0), slice={[10:13], [20:53]} ROOT t = tuple(d, s1) })")); const se::CudaComputeCapability cc{se::CudaComputeCapability::AMPERE, 0}; EXPECT_TRUE(GemmFusion(cc).Run(module.get()).value()); } TEST_F(GemmFusionTest, DoNotFuseSliceOfMixedDimensions) { TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(R"( ENTRY e { p0 = bf16[768,64] parameter(0) s0 = bf16[768,32] slice(p0), slice={[0:768], [0:32]} b0 = bf16[256,3,32] reshape(s0) b1 = bf16[256,96] reshape(b0) p1 = bf16[256,96] parameter(1) ROOT d = bf16[96,96] dot(b1, p1), lhs_contracting_dims={0}, rhs_contracting_dims={0} })")); const se::CudaComputeCapability cc{se::CudaComputeCapability::AMPERE, 0}; EXPECT_FALSE(GemmFusion(cc).Run(module.get()).value()); } TEST_F(GemmFusionTest, DoNotFuseSlicesOfNonMajorFragments) { TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(R"( ENTRY e { p0 = f32[2,2,256,256] parameter(0) s0 = f32[1,1,256,256] slice(p0), slice={[0:1], [0:1], [0:256], [0:256]} r0 = f32[256,256] reshape(s0) p1 = f16[2,2,256,256] parameter(1) s1 = f16[1,1,256,256] slice(p1), slice={[0:1], [0:1], [0:256], [0:256]} r1 = f16[256,256] reshape(s1) ROOT d = f32[256,256] dot(r0, r1), lhs_contracting_dims={1}, rhs_contracting_dims={0} })")); const se::CudaComputeCapability cc{se::CudaComputeCapability::AMPERE, 0}; EXPECT_FALSE(GemmFusion(cc).Run(module.get()).value()); } TEST_F(GemmFusionTest, DynamicSliceIsFused) { TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(R"( ENTRY e { dot_lhs = f32[2,18] parameter(0) dynamic_slice_input = f32[2,64,2] parameter(1) start_index0 = s32[] parameter(2) start_index1_2 = s32[] constant(0) dynamic_slice = f32[1,64,2] dynamic-slice(dynamic_slice_input, start_index0, start_index1_2, start_index1_2), dynamic_slice_sizes={1,64,2} reshape = f32[64,2] reshape(dynamic_slice) ROOT dot = f16[18,64] dot(dot_lhs, reshape), lhs_contracting_dims={0}, rhs_contracting_dims={1} })")); EXPECT_TRUE(GemmFusion(se::CudaComputeCapability{ se::CudaComputeCapability::AMPERE, 0}) .Run(module.get()) .value()); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch((m::Fusion(m::Parameter(), m::Parameter(), m::Parameter(), m::Constant())))); } TEST_F(GemmFusionTest, DynamicSlicesAreFusedEvenIfTheyShareIndices) { TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(R"( ENTRY e { p0 = f32[2,64,2] parameter(0) p1 = s32[] parameter(1) p2 = s32[] parameter(2) p3 = s32[] parameter(3) ds0 = f32[1,64,2] dynamic-slice(p0, p1, p2, p3), dynamic_slice_sizes={1,64,2} a = f32[64,2] reshape(ds0) ds1 = f32[1,64,2] dynamic-slice(p0, p3, p2, p1), dynamic_slice_sizes={1,64,2} b = f32[64,2] reshape(ds1) ROOT d = f16[64,64] dot(a, b), lhs_contracting_dims={1}, rhs_contracting_dims={1} })")); EXPECT_TRUE(GemmFusion(se::CudaComputeCapability{ se::CudaComputeCapability::AMPERE, 0}) .Run(module.get()) .value()); EXPECT_THAT( module->entry_computation()->root_instruction(), GmockMatch((m::Fusion(m::Parameter(), m::Parameter(), m::Parameter(), m::Parameter(), m::Parameter(), m::Parameter(), m::Parameter(), m::Parameter())))); } TEST_F(GemmFusionTest, DoNotFuseDynamicSliceOfNonMajorFragments) { TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(R"( ENTRY e { dot_lhs = f32[2,4]{1,0} parameter(0) dynamic_slice_input = f32[4,5,2]{2,1,0} parameter(1) c0 = s32[] constant(0) c2 = s32[] constant(2) dynamic_slice = f32[4,1,2]{2,1,0} dynamic-slice(dynamic_slice_input, c0, c2, c0), dynamic_slice_sizes={4,1,2} reshape = f32[4,2]{1,0} reshape(dynamic_slice) ROOT dot = f32[4,4]{1,0} dot(dot_lhs, reshape), lhs_contracting_dims={0}, rhs_contracting_dims={1} })")); const se::CudaComputeCapability cc{se::CudaComputeCapability::AMPERE, 0}; EXPECT_FALSE(GemmFusion(cc).Run(module.get()).value()); } TEST_F(GemmFusionTest, CanFuseDynamicSliceOfContractingDimIfItIsMajor) { TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(R"( ENTRY e { dot_lhs = f32[2,4]{1,0} parameter(0) dynamic_slice_input = f32[5,5]{1,0} parameter(1) start_index0 = s32[] constant(2) start_index1 = s32[] constant(0) dynamic_slice = f32[2,5]{1,0} dynamic-slice(dynamic_slice_input, start_index0, start_index1), dynamic_slice_sizes={2,5} ROOT d = f32[4,5]{1,0} dot(dot_lhs, dynamic_slice), lhs_contracting_dims={0}, rhs_contracting_dims={0} })")); EXPECT_TRUE(GemmFusion(se::CudaComputeCapability{ se::CudaComputeCapability::AMPERE, 0}) .Run(module.get()) .value()); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch((m::Fusion(m::Parameter(), m::Parameter(), m::Constant(), m::Constant())))); } TEST_F(GemmFusionTest, SliceToDegenerateIsSkipped) { TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(R"( ENTRY e { p = f32[3] parameter(0) s = f32[1] slice(p), slice={[2:3]} r = f32[] reshape(s) b = f32[3,3] broadcast(r), dimensions={} ROOT d = f32[3,3] dot(b, b), lhs_contracting_dims={1}, rhs_contracting_dims={0} } )")); const se::CudaComputeCapability cc{se::CudaComputeCapability::AMPERE, 0}; ASSERT_TRUE(GemmFusion(cc).Run(module.get()).value()); MatchHloModule(*module, R"( ; CHECK-NOT: slice ; CHECK: ENTRY ; CHECK: slice )"); } TEST_F(GemmFusionTest, MultipleUsesAreHandled) { TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(R"( ENTRY e { c = f32[] constant(1) b = f32[6,8] broadcast(c), dimensions={} p0 = f32[6,8] parameter(0) a1 = f32[6,8] add(p0, b) e = f32[6,8] exponential(a1) a2 = f32[6,8] add(e, b) d = f32[6,8] divide(b, a2) p2 = f16[8,6] parameter(1) cv = f32[8,6] convert(p2) ROOT r = f32[6,6] dot(d, cv), lhs_contracting_dims={1}, rhs_contracting_dims={0} })")); const se::CudaComputeCapability cc{se::CudaComputeCapability::AMPERE, 0}; EXPECT_TRUE(GemmFusion(cc).Run(module.get()).value()); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch(m::Fusion(m::Parameter(), m::Parameter()))); } TEST_F(GemmFusionTest, BinaryElementwiseOfBroadcastIsFused) { TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(R"( ENTRY e { p2 = f32[3072] parameter(2) b = f32[8192,3072] broadcast(p2), dimensions={1} p0 = f16[8192,3072] parameter(0) p0c = f32[8192,3072] convert(p0) a = f32[8192,3072] add(p0c, b) p1 = f32[3072,768] parameter(1) ROOT r = f32[8192,768] dot(a, p1), lhs_contracting_dims={1}, rhs_contracting_dims={0} })")); const se::CudaComputeCapability cc{se::CudaComputeCapability::AMPERE, 0}; EXPECT_TRUE(GemmFusion(cc).Run(module.get()).value()); EXPECT_THAT( module->entry_computation()->root_instruction(), GmockMatch(m::Fusion(m::Parameter(), m::Parameter(), m::Parameter()))); } TEST_F(GemmFusionTest, BinaryElementwiseOfUnsupportedBroadcastIsNotFused) { TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(R"( ENTRY e { p2 = f32[768] parameter(2) b = f32[8192,768,4] broadcast(p2), dimensions={1} s = f32[8192,3072] bitcast(b) p0 = f16[8192,3072] parameter(0) p0c = f32[8192,3072] convert(p0) a = f32[8192,3072] add(p0c, s) p1 = f32[3072,768] parameter(1) ROOT r = f32[8192,768] dot(a, p1), lhs_contracting_dims={1}, rhs_contracting_dims={0} })")); const se::CudaComputeCapability cc{se::CudaComputeCapability::AMPERE, 0}; EXPECT_FALSE(GemmFusion(cc).Run(module.get()).value()); } class GemmFusionLevel2Test : public GemmFusionTest { public: DebugOptions GetDebugOptionsForTest() override { DebugOptions debug_options = GemmFusionTest::GetDebugOptionsForTest(); debug_options.set_xla_gpu_triton_fusion_level(2); return debug_options; } }; TEST_F(GemmFusionTest, ConcatenationDivisibleBy64IsFused) { TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(R"( ENTRY e { p0 = bf16[8192,1]{1,0} parameter(0) p1 = bf16[2752,8192]{1,0} parameter(1) p2 = bf16[2752,8192]{1,0} parameter(2) concat = bf16[5504,8192]{1,0} concatenate(p1, p2), dimensions={0} bitcast = bf16[8192,5504]{0,1} bitcast(concat) ROOT r = f32[1,5504]{1,0} dot(p0, bitcast), lhs_contracting_dims={0}, rhs_contracting_dims={0} })")); const se::CudaComputeCapability cc{se::CudaComputeCapability::AMPERE, 0}; EXPECT_TRUE(GemmFusion(cc).Run(module.get()).value()); EXPECT_THAT( module->entry_computation()->root_instruction(), GmockMatch(m::Fusion(m::Parameter(), m::Parameter(), m::Parameter()))); } TEST_F(GemmFusionLevel2Test, ReshapeToScalarIsHandled) { TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(R"( ENTRY e { p0 = s8[5,3] parameter(0) c = f16[5,3] convert(p0) p1 = f16[1] parameter(1) r = f16[] reshape(p1) b = f16[5,7] broadcast(r) ROOT d = f16[3,7] dot(c, b), lhs_contracting_dims={0}, rhs_contracting_dims={0} })")); EXPECT_TRUE(GemmFusion(gpu_version_).Run(module.get()).value()); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch(m::Fusion(m::Parameter(), m::Parameter()))); } TEST_F(GemmFusionLevel2Test, DoNotFuseIncompatibleDimensionSplits) { TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(R"( ENTRY e { p1 = s8[5,7,2,3]{3,2,1,0} parameter(1) t1 = s8[7,5,2,3]{3,2,1,0} transpose(p1), dimensions={1,0,2,3} r1 = s8[7,30]{1,0} reshape(t1) cvt = f16[7,30]{1,0} convert(r1) p2 = f16[2,7,5,3]{3,2,1,0} parameter(2) t2 = f16[7,2,5,3]{3,2,1,0} transpose(p2), dimensions={1,0,2,3} r2 = f16[7,30]{1,0} reshape(t2) a = f16[7,30]{1,0} add(cvt, r2) p0 = f16[7,79]{1,0} parameter(0) ROOT dot = f16[30,79]{1,0} dot(a, p0), lhs_contracting_dims={0}, rhs_contracting_dims={0} })")); EXPECT_TRUE(GemmFusion(gpu_version_).Run(module.get()).value()); EXPECT_THAT( module->entry_computation()->root_instruction(), GmockMatch(m::Fusion(m::Transpose(), m::Parameter(), m::Parameter()))); } TEST_F(GemmFusionLevel2Test, DoNotFuseTooManyParameters) { TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(R"( ENTRY e { tmp_0 = f32[] constant(1) tmp_1 = f32[3,49]{1,0} broadcast(tmp_0), dimensions={} tmp_2 = f32[3,49]{1,0} parameter(6) tmp_3 = f32[] constant(0) tmp_4 = f32[3,49]{1,0} broadcast(tmp_3), dimensions={} tmp_5 = pred[3,49]{1,0} compare(tmp_2, tmp_4), direction=GT tmp_6 = f32[3,49]{1,0} convert(tmp_5) tmp_7 = f32[3,49]{1,0} subtract(tmp_1, tmp_6) tmp_8 = s32[] parameter(13) tmp_9 = f32[] convert(tmp_8) tmp_10 = f32[] maximum(tmp_9, tmp_0) tmp_11 = f32[] divide(tmp_3, tmp_10) tmp_12 = f32[3,49]{1,0} broadcast(tmp_11), dimensions={} tmp_13 = pred[3,49]{1,0} parameter(7) tmp_14 = pred[3,49]{1,0} parameter(10) tmp_15 = pred[3,49]{1,0} and(tmp_13, tmp_14) tmp_16 = f32[3,49]{1,0} convert(tmp_15) tmp_17 = f32[3,49]{1,0} multiply(tmp_12, tmp_16) tmp_18 = f32[3,49]{1,0} negate(tmp_17) tmp_19 = f32[3,49]{1,0} multiply(tmp_7, tmp_18) tmp_20 = f32[3,49]{1,0} parameter(19) tmp_21 = f32[3,49]{1,0} subtract(tmp_1, tmp_20) tmp_22 = f32[3,49]{1,0} divide(tmp_19, tmp_21) tmp_23 = f32[3,49]{1,0} negate(tmp_22) tmp_24 = f32[3,49]{1,0} negate(tmp_6) tmp_25 = f32[3,49]{1,0} multiply(tmp_24, tmp_17) tmp_26 = f32[3,49]{1,0} divide(tmp_25, tmp_20) tmp_27 = f32[3,49]{1,0} add(tmp_23, tmp_26) tmp_28 = f32[3,49]{1,0} parameter(18) tmp_29 = f32[3,49]{1,0} multiply(tmp_27, tmp_28) tmp_30 = f32[3,49]{1,0} parameter(17) tmp_31 = f32[3,49]{1,0} multiply(tmp_29, tmp_30) tmp_32 = f32[3,49]{1,0} parameter(16) tmp_33 = f32[3,49]{1,0} multiply(tmp_31, tmp_32) tmp_34 = f32[3,49]{1,0} parameter(15) tmp_35 = f32[3,49]{1,0} add(tmp_33, tmp_34) tmp_36 = f32[3,49]{1,0} parameter(14) tmp_37 = f32[3,49]{1,0} add(tmp_35, tmp_36) tmp_38 = f32[1,1]{1,0} constant({ {0} }) tmp_39 = f32[1,1]{1,0} broadcast(tmp_38), dimensions={0,1} tmp_40 = f32[] reshape(tmp_39) tmp_41 = f32[3,32]{1,0} broadcast(tmp_40), dimensions={} tmp_42 = u32[48]{0} parameter(11) tmp_43 = u32[48]{0} parameter(5) tmp_44 = u32[96]{0} concatenate(tmp_42, tmp_43), dimensions={0} tmp_45 = u32[3,32]{1,0} reshape(tmp_44) tmp_46 = u32[96]{0} reshape(tmp_45) tmp_47 = u32[] constant(1) tmp_48 = u32[3,32]{1,0} broadcast(tmp_47), dimensions={} tmp_49 = u32[96]{0} reshape(tmp_48) tmp_50 = u32[96]{0} shift-right-logical(tmp_46, tmp_49) tmp_51 = u32[3,32]{1,0} reshape(tmp_50) tmp_52 = u32[3,32]{1,0} or(tmp_51, tmp_48) tmp_53 = f32[3,32]{1,0} bitcast-convert(tmp_52) tmp_54 = f32[3,32]{1,0} broadcast(tmp_0), dimensions={} tmp_55 = f32[3,32]{1,0} subtract(tmp_53, tmp_54) tmp_56 = f32[1,1]{1,0} constant({ {1} }) tmp_57 = f32[1,1]{1,0} broadcast(tmp_56), dimensions={0,1} tmp_58 = f32[] reshape(tmp_57) tmp_59 = f32[3,32]{1,0} broadcast(tmp_58), dimensions={} tmp_60 = f32[3,32]{1,0} multiply(tmp_55, tmp_59) tmp_61 = f32[3,32]{1,0} add(tmp_60, tmp_41) tmp_62 = f32[3,32]{1,0} maximum(tmp_41, tmp_61) tmp_63 = f32[3,32]{1,0} broadcast(tmp_3), dimensions={} tmp_64 = pred[3,32]{1,0} compare(tmp_62, tmp_63), direction=LT tmp_65 = f32[3,32]{1,0} convert(tmp_64) tmp_66 = f32[3,49]{1,0} parameter(9) tmp_67 = f32[49]{0} parameter(4) tmp_68 = f32[3,49]{1,0} broadcast(tmp_67), dimensions={1} tmp_69 = f32[3,49]{1,0} add(tmp_66, tmp_68) tmp_70 = f32[1,49]{1,0} parameter(12) tmp_71 = f32[1,49]{1,0} broadcast(tmp_0), dimensions={} tmp_72 = f32[1,49]{1,0} divide(tmp_70, tmp_71) tmp_73 = f32[1,49]{1,0} broadcast(tmp_72), dimensions={0,1} tmp_74 = f32[49]{0} reshape(tmp_73) tmp_75 = f32[3,49]{1,0} broadcast(tmp_74), dimensions={1} tmp_76 = f32[3,49]{1,0} subtract(tmp_69, tmp_75) tmp_77 = f32[1,49]{1,0} parameter(3) tmp_78 = f32[1,49]{1,0} parameter(8) tmp_79 = f32[1,49]{1,0} divide(tmp_78, tmp_71) tmp_80 = f32[1,49]{1,0} multiply(tmp_72, tmp_72) tmp_81 = f32[1,49]{1,0} subtract(tmp_79, tmp_80) tmp_82 = f32[1,49]{1,0} add(tmp_81, tmp_71) tmp_83 = f32[1,49]{1,0} rsqrt(tmp_82) tmp_84 = f32[1,49]{1,0} multiply(tmp_77, tmp_83) tmp_85 = f32[1,49]{1,0} broadcast(tmp_84), dimensions={0,1} tmp_86 = f32[49]{0} reshape(tmp_85) tmp_87 = f32[3,49]{1,0} broadcast(tmp_86), dimensions={1} tmp_88 = f32[3,49]{1,0} multiply(tmp_76, tmp_87) tmp_89 = f32[1,49]{1,0} parameter(2) tmp_90 = f32[1,49]{1,0} broadcast(tmp_89), dimensions={0,1} tmp_91 = f32[49]{0} reshape(tmp_90) tmp_92 = f32[3,49]{1,0} broadcast(tmp_91), dimensions={1} tmp_93 = f32[3,49]{1,0} add(tmp_88, tmp_92) tmp_94 = f32[49,32]{1,0} parameter(1) tmp_95 = f32[3,32]{1,0} dot(tmp_93, tmp_94), lhs_contracting_dims={1}, rhs_contracting_dims={0} tmp_96 = f32[32]{0} parameter(0) tmp_97 = f32[3,32]{1,0} broadcast(tmp_96), dimensions={1} tmp_98 = f32[3,32]{1,0} add(tmp_95, tmp_97) tmp_99 = f32[3,32]{1,0} multiply(tmp_65, tmp_98) tmp_100 = f32[3,32]{1,0} divide(tmp_99, tmp_63) tmp_101 = f32[3,32]{1,0} maximum(tmp_100, tmp_63) ROOT tmp_102 = f32[49,32]{1,0} dot(tmp_37, tmp_101), lhs_contracting_dims={0}, rhs_contracting_dims={0} })")); EXPECT_TRUE(GemmFusion(gpu_version_).Run(module.get()).value()); EXPECT_EQ(module->entry_computation()->root_instruction()->opcode(), HloOpcode::kFusion); EXPECT_EQ(module->entry_computation()->root_instruction()->fusion_kind(), HloInstruction::FusionKind::kCustom); EXPECT_LE(module->entry_computation()->root_instruction()->operand_count(), TritonFusionAnalysis::kMaxParameterPerDotOperand * 2); } TEST_F(GemmFusionLevel2Test, DoNotFuseTooManyParametersWhenAnInstructionWouldAddMultipleParameters) { static_assert(TritonFusionAnalysis::kMaxParameterPerDotOperand == 4, "We have to update this test."); TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(R"( ENTRY e { a = f32[3,49]{1,0} parameter(0) b = f32[3,49]{1,0} parameter(1) c = pred[3,49]{1,0} parameter(2) d = f32[3,49]{1,0} parameter(3) e = f32[3,49]{1,0} parameter(4) add0 = f32[3,49]{1,0} add(a, b) select = f32[3,49]{1,0} select(c, d, e) add1 = f32[3,49]{1,0} add(add0, select) f = f32[3,32]{1,0} parameter(5) ROOT tmp_102 = f32[49,32]{1,0} dot(add1, f), lhs_contracting_dims={0}, rhs_contracting_dims={0} })")); EXPECT_TRUE(GemmFusion(gpu_version_).Run(module.get()).value()); EXPECT_EQ(module->entry_computation()->root_instruction()->opcode(), HloOpcode::kFusion); EXPECT_EQ(module->entry_computation()->root_instruction()->fusion_kind(), HloInstruction::FusionKind::kCustom); EXPECT_LE(module->entry_computation()->root_instruction()->operand_count(), TritonFusionAnalysis::kMaxParameterPerDotOperand + 1); } TEST_F(GemmFusionLevel2Test, DoNotFuseTooManyParametersForConcat) { static_assert(TritonFusionAnalysis::kMaxParameterPerDotOperand == 4, "We have to update this test."); TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(R"( ENTRY e { a = f32[3,3]{1,0} parameter(0) b = f32[3,3]{1,0} parameter(1) c = f32[3,3]{1,0} parameter(2) d = f32[3,3]{1,0} parameter(3) e = f32[3,3]{1,0} parameter(4) f = f16[3,3]{1,0} parameter(5) concat = f32[15,3]{1,0} concatenate(a, b, c, d, e), dimensions={0} convert = f32[3,3]{1,0} convert(f) ROOT dot = f32[15,3]{1,0} dot(concat, convert), lhs_contracting_dims={1}, rhs_contracting_dims={1} })")); EXPECT_TRUE(GemmFusion(gpu_version_).Run(module.get()).value()); EXPECT_EQ(module->entry_computation()->root_instruction()->opcode(), HloOpcode::kFusion); EXPECT_EQ(module->entry_computation()->root_instruction()->fusion_kind(), HloInstruction::FusionKind::kCustom); EXPECT_LE(module->entry_computation()->root_instruction()->operand_count(), TritonFusionAnalysis::kMaxParameterPerDotOperand + 1); } TEST_F(GemmFusionLevel2Test, InstructionsReachableFromMultipleOperandsAreHandledCorrectly) { static_assert(TritonFusionAnalysis::kMaxParameterPerDotOperand == 4, "We have to update this test."); TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(R"( ENTRY e { a = f32[2,4]{1,0} parameter(0) b = f32[2,4]{1,0} parameter(1) c = f32[2,4]{1,0} parameter(2) d = f32[2,4]{1,0} parameter(3) e = f32[2,4]{1,0} parameter(4) add0 = f32[2,4]{1,0} add(a, b) add1 = f32[2,4]{1,0} add(add0, c) add2 = f32[2,4]{1,0} add(add1, d) add3 = f32[2,4]{1,0} add(add2, e) ROOT r = f32[2,2]{1,0} dot(add3, add0), lhs_contracting_dims={1}, rhs_contracting_dims={1} })")); EXPECT_TRUE(GemmFusion(gpu_version_).Run(module.get()).value()); } TEST_F(GemmFusionLevel2Test, EachScopeIsFusedToASeparateSubgraph) { TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(R"( ENTRY e { a = f32[2,4]{1,0} parameter(0) b = f32[2,4]{1,0} parameter(1) add = f32[2,4]{1,0} add(a, b) ROOT r = f32[2,2]{1,0} dot(add, add), lhs_contracting_dims={1}, rhs_contracting_dims={1} })")); EXPECT_TRUE(GemmFusion(gpu_version_).Run(module.get()).value()); MatchHloModule(*module, R"( CHECK-DAG: %[[P0:.*]] = f32[2,4]{1,0} parameter(0) CHECK-DAG: %[[P1:.*]] = f32[2,4]{1,0} parameter(1) CHECK-DAG: %[[ADD0:.*]] = f32[2,4]{1,0} add(f32[2,4]{1,0} %[[P0]], f32[2,4]{1,0} %[[P1]]) CHECK-DAG: %[[P2:.*]] = f32[2,4]{1,0} parameter(2) CHECK-DAG: %[[P3:.*]] = f32[2,4]{1,0} parameter(3) CHECK-DAG: %[[ADD1:.*]] = f32[2,4]{1,0} add(f32[2,4]{1,0} %[[P2]], f32[2,4]{1,0} %[[P3]]) CHECK-DAG: ROOT {{.*}} = f32[2,2]{1,0} dot(f32[2,4]{1,0} %[[ADD0]], f32[2,4]{1,0} %[[ADD1]]) CHECK: ENTRY CHECK-DAG: %[[P0:.*]] = f32[2,4]{1,0} parameter(0) CHECK-DAG: %[[P1:.*]] = f32[2,4]{1,0} parameter(1) CHECK-DAG: ROOT {{.*}} = f32[2,2]{1,0} CHECK-SAME: fusion(f32[2,4]{1,0} %[[P0]], f32[2,4]{1,0} %[[P1]], f32[2,4]{1,0} %[[P0]], f32[2,4]{1,0} %[[P1]]), CHECK-SAME: kind=kCustom CHECK-SAME: __triton_gemm })"); } TEST_F(GemmFusionLevel2Test, ParamNodesAreReusedIfTheyHaveTheSameIterSpec) { TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(R"( ENTRY e { a = f32[2,4]{1,0} parameter(0) add = f32[2,4]{1,0} add(a, a) ROOT r = f32[2,2]{1,0} dot(add, add), lhs_contracting_dims={1}, rhs_contracting_dims={1} })")); EXPECT_TRUE(GemmFusion(gpu_version_).Run(module.get()).value()); MatchHloModule(*module, R"( CHECK-DAG: %[[P0:.*]] = f32[2,4]{1,0} parameter(0) CHECK-DAG: %[[ADD0:.*]] = f32[2,4]{1,0} add(f32[2,4]{1,0} %[[P0]], f32[2,4]{1,0} %[[P0]]) CHECK-DAG: %[[P1:.*]] = f32[2,4]{1,0} parameter(1) CHECK-DAG: %[[ADD1:.*]] = f32[2,4]{1,0} add(f32[2,4]{1,0} %[[P1]], f32[2,4]{1,0} %[[P1]]) CHECK-DAG: ROOT {{.*}} = f32[2,2]{1,0} dot(f32[2,4]{1,0} %[[ADD0]], f32[2,4]{1,0} %[[ADD1]]) CHECK: ENTRY CHECK-DAG: %[[P0:.*]] = f32[2,4]{1,0} parameter(0) CHECK-DAG: ROOT {{.*}} = f32[2,2]{1,0} CHECK-SAME: fusion(f32[2,4]{1,0} %[[P0]], f32[2,4]{1,0} %[[P0]]) CHECK-SAME: kind=kCustom CHECK-SAME: __triton_gemm })"); } TEST_F(GemmFusionLevel2Test, NonParamNodesAreReusedIfTheyHaveTheSameIterSpec) { TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(R"( ENTRY e { a = f32[4,4]{1,0} parameter(0) b = f32[4,4]{1,0} parameter(1) negate = f32[4,4]{1,0} negate(a) sine = f32[4,4]{1,0} sine(negate) add = f32[4,4]{1,0} add(negate, sine) ROOT r = f32[4,4]{1,0} dot(add, b), lhs_contracting_dims={1}, rhs_contracting_dims={1} })")); EXPECT_TRUE(GemmFusion(gpu_version_).Run(module.get()).value()); MatchHloModule(*module, R"( CHECK-DAG: %[[P0:.*]] = f32[4,4]{1,0} parameter(0) CHECK-DAG: %[[P1:.*]] = f32[4,4]{1,0} parameter(1) CHECK-DAG: %[[NEGATE:.*]] = f32[4,4]{1,0} negate(f32[4,4]{1,0} %[[P0]]) CHECK-DAG: %[[SINE:.*]] = f32[4,4]{1,0} sine(f32[4,4]{1,0} %[[NEGATE]]) CHECK-DAG: %[[ADD:.*]] = f32[4,4]{1,0} add(f32[4,4]{1,0} %[[NEGATE]], f32[4,4]{1,0} %[[SINE]]) CHECK-DAG: ROOT {{.*}} = f32[4,4]{1,0} dot(f32[4,4]{1,0} %[[ADD]], f32[4,4]{1,0} %[[P1]]) CHECK: ENTRY CHECK-DAG: %[[P0:.*]] = f32[4,4]{1,0} parameter(0) CHECK-DAG: %[[P1:.*]] = f32[4,4]{1,0} parameter(1) CHECK-DAG: ROOT {{.*}} = f32[4,4]{1,0} CHECK-SAME: fusion(f32[4,4]{1,0} %[[P0]], f32[4,4]{1,0} %[[P1]]) CHECK-SAME: kind=kCustom CHECK-SAME: __triton_gemm })"); } TEST_F(GemmFusionLevel2Test, NodesAreNotReusedIfTheyHaveDifferentIterSpecs) { TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(R"( ENTRY e { a = f32[4,4]{1,0} parameter(0) b = f32[4,4]{1,0} parameter(1) tr_a = f32[4,4]{1,0} transpose(a), dimensions={1,0} add = f32[4,4]{1,0} add(a, tr_a) ROOT r = f32[4,4]{1,0} dot(add, b), lhs_contracting_dims={1}, rhs_contracting_dims={1} })")); EXPECT_TRUE(GemmFusion(gpu_version_).Run(module.get()).value()); MatchHloModule(*module, R"( CHECK-DAG: %[[P0:.*]] = f32[4,4]{1,0} parameter(0) CHECK-DAG: %[[P1:.*]] = f32[4,4]{1,0} parameter(1) CHECK-DAG: %[[P2:.*]] = f32[4,4]{1,0} parameter(2) CHECK-DAG: %[[TRANSPOSE:.*]] = f32[4,4]{1,0} transpose(f32[4,4]{1,0} %[[P1]]) CHECK-DAG: %[[ADD:.*]] = f32[4,4]{1,0} add(f32[4,4]{1,0} %[[P0]], f32[4,4]{1,0} %[[TRANSPOSE]]) CHECK-DAG: ROOT {{.*}} = f32[4,4]{1,0} dot(f32[4,4]{1,0} %[[ADD]], f32[4,4]{1,0} %[[P2]]) CHECK: ENTRY CHECK-DAG: %[[P0:.*]] = f32[4,4]{1,0} parameter(0) CHECK-DAG: %[[P1:.*]] = f32[4,4]{1,0} parameter(1) CHECK-DAG: ROOT {{.*}} = f32[4,4]{1,0} CHECK-SAME: fusion(f32[4,4]{1,0} %[[P0]], f32[4,4]{1,0} %[[P0]], f32[4,4]{1,0} %[[P1]]) CHECK-SAME: kind=kCustom CHECK-SAME: __triton_gemm })"); } TEST_F(GemmFusionLevel2Test, OperationsAddingMoreParametersGetMultipleTries) { TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(R"( e { p0 = f32[2,2] parameter(0) c0 = f32[] constant(12345) b0 = f32[2,2] broadcast(c0), dimensions={} m0 = f32[2,2] multiply(p0, b0) c1 = f32[] constant(34567) b1 = f32[2,2] broadcast(c1), dimensions={} a0 = f32[2,2] add(m0, b1) b3 = f32[2,2,2] broadcast(a0), dimensions={0,1} p2 = f32[2,2,2] parameter(2) m2 = f32[2,2,2] multiply(p2, b3) p1 = f32[2]{0} parameter(1) c2 = f32[] constant(5678) b2 = f32[2] broadcast(c2), dimensions={} a1 = f32[2]{0} add(p1, b2) b4 = f32[2,2,2] broadcast(a1), dimensions={2} m1 = f32[2,2,2] multiply(m2, b4) b = f32[4,2] bitcast(m1) p3 = f16[2,2] parameter(3) p3c = f32[2,2] convert(p3) ROOT r = f32[4,2] dot(b, p3c), lhs_contracting_dims={1}, rhs_contracting_dims={0} })")); EXPECT_TRUE(GemmFusion(gpu_version_).Run(module.get()).value()); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch((m::Fusion(m::Parameter(), m::Parameter(), m::Parameter(), m::Parameter())))); } TEST_F(GemmFusionLevel2Test, GemmFusionBailsOutPreAmpere) { TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(R"( ENTRY e { p0 = f32[2,53] parameter(0) p0e = f32[2,53] exponential(p0) p1 = s16[53,2] parameter(1) p1c = f32[53,2] convert(p1) ROOT dot = f32[2,2] dot(p0e, p1c), lhs_contracting_dims={1}, rhs_contracting_dims={0} })")); EXPECT_THAT( GemmFusion(se::CudaComputeCapability{se::CudaComputeCapability::VOLTA, 0}) .Run(module.get()), tsl::testing::StatusIs( absl::StatusCode::kFailedPrecondition, ::testing::HasSubstr("Triton support is only enabled for Ampere GPUs " "(compute capability 8.0) and up, but got"))); } TEST_F(GemmFusionLevel2Test, GemmFusionSucceedsOnNonCudaGpu) { TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(R"( ENTRY e { p0 = f32[2,53] parameter(0) p0e = f32[2,53] exponential(p0) p1 = s16[53,2] parameter(1) p1c = f32[53,2] convert(p1) ROOT dot = f32[2,2] dot(p0e, p1c), lhs_contracting_dims={1}, rhs_contracting_dims={0} })")); EXPECT_TRUE(GemmFusion(se::RocmComputeCapability{}).Run(module.get()).ok()); } TEST_F(GemmFusionLevel2Test, ParameterUsedElementwiseTwiceIsFused) { TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(R"( HloModule t ENTRY e { p0 = f32[2,35] parameter(0) p0n = f32[2,35] negate(p0) p0e = f32[2,35] exponential(p0) a = f32[2,35] add(p0e, p0n) p1 = f16[35,2] parameter(1) p1c = f32[35,2] convert(p1) ROOT dot = f32[2,2] dot(a, p1c), lhs_contracting_dims={1}, rhs_contracting_dims={0} })")); EXPECT_TRUE(GemmFusion(se::CudaComputeCapability{ se::CudaComputeCapability::AMPERE, 0}) .Run(module.get()) .value()); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch((m::Fusion(m::Parameter(), m::Parameter())))); TF_ASSERT_OK_AND_ASSIGN( const auto analysis, TritonFusionAnalysis::Execute(*module->entry_computation() ->root_instruction() ->called_computations()[0])); EXPECT_EQ(analysis.ScopeParameters(TritonFusionAnalysis::Scope::LHS).size(), 1); EXPECT_EQ(analysis.ScopeParameters(TritonFusionAnalysis::Scope::RHS).size(), 1); } TEST_F(GemmFusionLevel2Test, ParameterUsedNonElementwiseTwiceIsFusedOnBothPaths) { TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(R"( HloModule t ENTRY e { p0 = f32[4,4] parameter(0) p0t = f32[4,4] transpose(p0), dimensions={1,0} a = f32[4,4] add(p0, p0t) p1 = f16[4,5] parameter(1) p1c = f32[4,5] convert(p1) ROOT dot = f32[4,5] dot(a, p1c), lhs_contracting_dims={1}, rhs_contracting_dims={0} })")); EXPECT_TRUE(GemmFusion(se::CudaComputeCapability{ se::CudaComputeCapability::AMPERE, 0}) .Run(module.get()) .value()); EXPECT_THAT( module->entry_computation()->root_instruction(), GmockMatch((m::Fusion(m::Parameter(), m::Parameter(), m::Parameter())))); } TEST_F(GemmFusionLevel2Test, ComputationParameterWithMultipleUsersIsNotTrivialToFuse) { TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(R"( ENTRY e { p0 = f32[400,400] parameter(0) c0 = f16[400,400] convert(p0) p1 = f16[400,400] parameter(1) dot0 = f16[400,400] dot(c0, p1), lhs_contracting_dims={1}, rhs_contracting_dims={0} c1 = f16[400,400] convert(p0) p2 = f16[400,400] parameter(2) dot1 = f16[400,400] dot(c1, p2), lhs_contracting_dims={1}, rhs_contracting_dims={0} ROOT a = f16[400,400] add(dot0, dot1) })")); EXPECT_FALSE(GemmFusion(se::CudaComputeCapability{ se::CudaComputeCapability::AMPERE, 0}) .Run(module.get()) .value()); } TEST_F(GemmFusionLevel2Test, NarrowingConversionIsAlwaysBetterToFuse) { TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(R"( ENTRY e { p0 = s8[512,512] parameter(0) c0 = f16[512,512] convert(p0) p1 = f16[512,512] parameter(1) dot0 = f16[512,512] dot(c0, p1), lhs_contracting_dims={1}, rhs_contracting_dims={0} n = f16[512,512] negate(c0) ROOT a = f16[512,512] add(dot0, n) })")); EXPECT_TRUE(GemmFusion(se::CudaComputeCapability{ se::CudaComputeCapability::AMPERE, 0}) .Run(module.get()) .value()); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch((m::Add(m::Fusion(m::Parameter(), m::Parameter()), m::Negate())))); } TEST_F(GemmFusionLevel2Test, NestedSlicingIsAnalyzedCorrectly) { TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(R"( triton_gemm_d_computation { p0 = f32[6,24]{1,0} parameter(0) slice1 = f32[5,20]{1,0} slice(p0), slice={[1:6], [3:23]} n1 = f32[5,20]{1,0} negate(slice1) slice2 = f32[3,7]{1,0} slice(n1), slice={[1:4], [13:20]} p1 = f32[7,37]{1,0} parameter(1) ROOT d = f32[3,37]{1,0} dot(slice2, p1), lhs_contracting_dims={1}, rhs_contracting_dims={0} } ENTRY e { p0 = f32[7,37]{1,0} parameter(0) p1 = f32[6,24]{1,0} parameter(1) ROOT triton_gemm_d = f32[3,37]{1,0} fusion(p1, p0), kind=kCustom, calls=triton_gemm_d_computation })")); const HloComputation* computation = module->entry_computation()->root_instruction()->called_computations()[0]; TF_ASSERT_OK_AND_ASSIGN(const auto analysis, TritonFusionAnalysis::Execute(*computation)); EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, computation->parameter_instruction(0), 0), ElementsAre(FieldsAre(24, 6, 2, 3, ElementsAre(3)))); EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, computation->parameter_instruction(0), 1), ElementsAre(FieldsAre(1, 24, 16, 7, ElementsAre(7)))); } TEST_F(GemmFusionLevel2Test, FusedConcatenationIsAnalyzedCorrectly) { TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(R"( e { p0 = s8[153,1536] parameter(0) p1 = s8[153,128] parameter(1) p2 = s8[153,256] parameter(2) cat = s8[153,1920] concatenate(p0, p1, p2), dimensions={1} cvt = bf16[153,1920] convert(cat) p3 = bf16[16,153] parameter(3) ROOT d = bf16[16,1920] dot(p3, cvt), lhs_contracting_dims={1}, rhs_contracting_dims={0} })")); EXPECT_TRUE(GemmFusion(se::CudaComputeCapability{ se::CudaComputeCapability::AMPERE, 0}) .Run(module.get()) .value()); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch((m::Fusion(m::Parameter(), m::Parameter(), m::Parameter(), m::Parameter())))); const HloComputation* computation = module->entry_computation()->root_instruction()->called_computations()[0]; TF_ASSERT_OK_AND_ASSIGN(const auto analysis, TritonFusionAnalysis::Execute(*computation)); EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, computation->parameter_instruction(1), 0), ElementsAre(FieldsAre(1536, 153, 0, 153, ElementsAre(153)))); EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, computation->parameter_instruction(1), 1), ElementsAre(FieldsAre(1, 1536, 0, 1536, ElementsAre(1536)))); EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, computation->parameter_instruction(2), 0), ElementsAre(FieldsAre(128, 153, 0, 153, ElementsAre(153)))); EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, computation->parameter_instruction(2), 1), ElementsAre(FieldsAre(1, 128, -1536, 128, ElementsAre(128)))); EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, computation->parameter_instruction(3), 0), ElementsAre(FieldsAre(256, 153, 0, 153, ElementsAre(153)))); EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, computation->parameter_instruction(3), 1), ElementsAre(FieldsAre(1, 256, -1536 - 128, 256, ElementsAre(256)))); } TEST_F(GemmFusionLevel2Test, IndivisibleConcatenationIsNotFused) { TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(R"( e { p0 = s8[124,1024] parameter(0) p1 = s8[124,1001] parameter(1) cat = s8[124,2025] concatenate(p0, p1), dimensions={1} cvt = f16[124,2025] convert(cat) p2 = f16[123,124] parameter(2) ROOT d = f16[2025,123] dot(cvt, p2), lhs_contracting_dims={0}, rhs_contracting_dims={1} })")); EXPECT_TRUE(GemmFusion(se::CudaComputeCapability{ se::CudaComputeCapability::AMPERE, 0}) .Run(module.get()) .value()); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch((m::Fusion(m::Concatenate(), m::Parameter())))); } TEST_F(GemmFusionLevel2Test, ConcatenationOfContractingIsNotFused) { TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(R"( e { p0 = s8[124,1024] parameter(0) p1 = s8[124,1024] parameter(1) cat = s8[124,2048] concatenate(p0, p1), dimensions={1} cvt = f16[124,2048] convert(cat) p2 = f16[123,2048] parameter(2) ROOT d = f16[124,123] dot(cvt, p2), lhs_contracting_dims={1}, rhs_contracting_dims={1} })")); EXPECT_TRUE(GemmFusion(se::CudaComputeCapability{ se::CudaComputeCapability::AMPERE, 0}) .Run(module.get()) .value()); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch((m::Fusion(m::Concatenate(), m::Parameter())))); } TEST_F(GemmFusionLevel2Test, ConcatenationOfBatchIsNotFused) { TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(R"( e { p0 = s8[124,1024,50] parameter(0) p1 = s8[124,1024,50] parameter(1) cat = s8[124,2048,50] concatenate(p0, p1), dimensions={1} cvt = f16[124,2048,50] convert(cat) p2 = f16[123,2048,50] parameter(2) ROOT d = f16[2048,124,123] dot(cvt, p2), lhs_batch_dims={1}, rhs_batch_dims={1}, lhs_contracting_dims={2}, rhs_contracting_dims={2} })")); EXPECT_TRUE(GemmFusion(se::CudaComputeCapability{ se::CudaComputeCapability::AMPERE, 0}) .Run(module.get()) .value()); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch((m::Fusion(m::Concatenate(), m::Parameter())))); } TEST_F(GemmFusionLevel2Test, DifferentConcatenationOfSameParametersIsFusedViaNodeDuplication) { TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(R"( e { p0 = s8[128,2] parameter(0) p1 = s8[128,2] parameter(1) cat0 = s8[256,2] concatenate(p0, p1), dimensions={0} cvt0 = f16[256,2] convert(cat0) cat1 = s8[256,2] concatenate(p1, p0), dimensions={0} n1 = s8[256,2] negate(cat1) cvt1 = f16[256,2] convert(n1) a = f16[256,2] add(cvt1, cvt0) p2 = f16[2,18] parameter(2) ROOT d = f16[18,256] dot(p2, a), lhs_contracting_dims={0}, rhs_contracting_dims={1} })")); EXPECT_TRUE(GemmFusion(se::CudaComputeCapability{ se::CudaComputeCapability::AMPERE, 0}) .Run(module.get()) .value()); EXPECT_THAT( module->entry_computation()->root_instruction(), GmockMatch((m::Fusion(m::Parameter(), m::Parameter(), m::Parameter(), m::Parameter(), m::Parameter())))); } TEST_F(GemmFusionTest, CopiesDotMetadataToFusionOp) { auto module = ParseAndReturnVerifiedModule(R"( HloModule m ENTRY e { p0 = f16[2,18] parameter(0) p1 = f16[256,2] parameter(1) ROOT d = f16[18,256] dot(p0, p1), lhs_contracting_dims={0}, rhs_contracting_dims={1}, metadata={op_name="foo"} })") .value(); EXPECT_TRUE(GemmFusion(gpu_version_).Run(module.get()).value()); EXPECT_EQ( module->entry_computation()->root_instruction()->metadata().op_name(), "foo"); } TEST_F(GemmFusionTest, FusesBroadcastOfScalarEpilogues) { auto module = ParseAndReturnVerifiedModule(R"( HloModule m ENTRY e { p0 = f16[2,18] parameter(0) p1 = f16[256,2] parameter(1) d = f16[18,256] dot(p0, p1), lhs_contracting_dims={0}, rhs_contracting_dims={1} p2 = f16[1] parameter(2) p3 = f16[1] parameter(3) m0 = f16[1] multiply(f16[1] p2, f16[1] p3) bc = f16[] bitcast(m0) b = f16[18,256] broadcast(f16[] bc) ROOT m = f16[18,256] multiply(d, b) })") .value(); EXPECT_TRUE(GemmFusion(gpu_version_).Run(module.get()).value()); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch((m::Fusion(m::Parameter(), m::Parameter(), m::Parameter(), m::Parameter())))); } class SmallDotGemmFusionTest : public GemmFusionTest { public: DebugOptions GetDebugOptionsForTest() override { DebugOptions debug_options = GemmFusionTest::GetDebugOptionsForTest(); debug_options.set_xla_gpu_gemm_rewrite_size_threshold(100); return debug_options; } }; TEST_F(SmallDotGemmFusionTest, SkipSmallMatrixMultiplicationRewrite) { auto module = ParseAndReturnVerifiedModule(R"( HloModule m ENTRY e { p0 = f16[2,10] parameter(0) p1 = f16[10,2] parameter(1) ROOT d = f16[10,10] dot(p0, p1), lhs_contracting_dims={0}, rhs_contracting_dims={1} })") .value(); EXPECT_FALSE(GemmFusion(gpu_version_).Run(module.get()).value()); MatchHloModule(*module, R"( ; CHECK-LABEL: ENTRY %e ({{.*}}: f16[2,10], {{.*}}: f16[10,2]) -> f16[10,10] { ; CHECK-NEXT: [[P0:%[^ ]+]] = f16[2,10]{1,0} parameter(0) ; CHECK-NEXT: [[P1:%[^ ]+]] = f16[10,2]{1,0} parameter(1) ; CHECK: ROOT {{.*}} = f16[10,10]{1,0} dot(f16[2,10]{1,0} [[P0]], f16[10,2]{1,0} [[P1]]) })"); } TEST_F(SmallDotGemmFusionTest, LargeMatrixMultiplicationIsRewritten) { auto module = ParseAndReturnVerifiedModule(R"( HloModule m ENTRY e { p0 = f16[2,18] parameter(0) p1 = f16[50,2] parameter(1) ROOT d = f16[18,50] dot(p0, p1), lhs_contracting_dims={0}, rhs_contracting_dims={1} })") .value(); EXPECT_TRUE(GemmFusion(gpu_version_).Run(module.get()).value()); MatchHloModule(*module, R"( ; CHECK-LABEL: ENTRY %e ({{.*}}: f16[2,18], {{.*}}: f16[50,2]) -> f16[18,50] { ; CHECK-NEXT: [[P0:%[^ ]+]] = f16[2,18]{1,0} parameter(0) ; CHECK-NEXT: [[P1:%[^ ]+]] = f16[50,2]{1,0} parameter(1) ; CHECK: ROOT {{.*}} = f16[18,50]{1,0} ; CHECK: fusion(f16[2,18]{1,0} [[P0]], f16[50,2]{1,0} [[P1]]), ; CHECK: kind=kCustom ; CHECK: __triton_gemm })"); } class SparseDotTest : public GemmFusionTest {}; TEST_F(SparseDotTest, DotWithSparseLhsOperandIsRewritten) { auto module = ParseAndReturnVerifiedModule(R"( HloModule test ENTRY main { lhs = f16[2,16] parameter(0) rhs = f16[32,2] parameter(1) meta = u16[2,2] parameter(2) ROOT dot = f32[2,2] dot(lhs, rhs, meta), lhs_contracting_dims={1}, rhs_contracting_dims={0}, sparsity=L.1@2:4 })") .value(); EXPECT_TRUE(GemmFusion(gpu_version_).Run(module.get()).value()); MatchHloModule(*module, R"( ; CHECK-LABEL: ENTRY %main ({{.*}}: f16[2,16], {{.*}}: f16[32,2], {{.*}}: u16[2,2]) -> f32[2,2] { ; CHECK-NEXT: [[P0:%[^ ]+]] = f16[2,16]{1,0} parameter(0) ; CHECK-NEXT: [[P1:%[^ ]+]] = f16[32,2]{1,0} parameter(1) ; CHECK-NEXT: [[META:%[^ ]+]] = u16[2,2]{1,0} parameter(2) ; CHECK: ROOT {{.*}} = f32[2,2]{1,0} ; CHECK-SAME: fusion(f16[2,16]{1,0} [[P0]], f16[32,2]{1,0} [[P1]], u16[2,2]{1,0} [[META]]), ; CHECK-SAME: kind=kCustom ; CHECK-SAME: __triton_gemm })"); } TEST_F(SparseDotTest, DotWithSparseRhsOperandIsNotSupported) { auto module = ParseAndReturnVerifiedModule(R"( HloModule test ENTRY main { lhs = f16[2,32] parameter(0) rhs = f16[16,2] parameter(1) meta = u16[2,2] parameter(2) ROOT dot = f32[2,2] dot(lhs, rhs, meta), lhs_contracting_dims={1}, rhs_contracting_dims={0}, sparsity=R.0@2:4 })") .value(); auto result = GemmFusion(gpu_version_).Run(module.get()); EXPECT_FALSE(result.ok()); } TEST_F(SparseDotTest, UnsupportedSparsityType) { auto module = ParseAndReturnVerifiedModule(R"( HloModule test ENTRY main { lhs = f16[2,8] parameter(0) rhs = f16[32,2] parameter(1) meta = u16[2,1] parameter(2) ROOT dot = f32[2,2] dot(lhs, rhs, meta), lhs_contracting_dims={1}, rhs_contracting_dims={0}, sparsity=L.1@1:4 })") .value(); auto result = GemmFusion(gpu_version_).Run(module.get()); EXPECT_FALSE(result.ok()); } TEST_F(SmallDotGemmFusionTest, Int4DotIsRewritten) { constexpr auto kInt4Dot = R"( ENTRY e { p0 = s8[16,16] parameter(0) p1 = s4[16,16] parameter(1) p1c = bf16[16,16] convert(p1) ROOT dot = bf16[16,16] dot(p0, p1c), lhs_contracting_dims={1}, rhs_contracting_dims={0} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(kInt4Dot)); EXPECT_TRUE(GemmFusion(gpu_version_).Run(module.get()).value()); } TEST_F(SmallDotGemmFusionTest, Int4ConcatPlusConvertIsRewritten) { const std::string kInt4Dot = R"( ENTRY main { lhs1 = s4[4,1024]{1,0} parameter(0) lhs2 = s4[4,1024]{1,0} parameter(1) rhs = bf16[1024,4]{1,0} parameter(2) lhs_concat = s4[8,1024]{1,0} concatenate(lhs1, lhs2), dimensions={0} lhs_converted = bf16[8,1024]{1,0} convert(lhs_concat) ROOT dot = bf16[8,4]{1,0} dot(lhs_converted, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(kInt4Dot)); EXPECT_TRUE(GemmFusion(gpu_version_).Run(module.get()).value()); MatchHloModule(*module, R"( CHECK: gemm_fusion_dot_computation CHECK: %parameter_0 = s4[8,1024]{1,0} parameter(0) CHECK: ENTRY CHECK-DAG: ROOT {{.*}} = bf16[8,4]{1,0} fusion(s4[8,1024]{1,0} %lhs_concat, bf16[1024,4]{1,0} %rhs) })"); } TEST_F(SmallDotGemmFusionTest, Int4ConvertPlusNegateIsRewritten) { const std::string kInt4Dot = R"( ENTRY main { lhs = s4[8,1024]{1,0} parameter(0) rhs = f32[1024,4]{1,0} parameter(1) lhs_converted = f32[8,1024]{1,0} convert(lhs) lhs_negated = f32[8,1024]{1,0} negate(lhs_converted) ROOT dot = f32[8,4]{1,0} dot(lhs_negated, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(kInt4Dot)); EXPECT_TRUE(GemmFusion(gpu_version_).Run(module.get()).value()); MatchHloModule(*module, R"( CHECK: gemm_fusion_dot_computation CHECK: %parameter_0 = s4[8,1024]{1,0} parameter(0) CHECK: ENTRY CHECK-DAG: ROOT {{.*}} = f32[8,4]{1,0} fusion(s4[8,1024]{1,0} %lhs, f32[1024,4]{1,0} %rhs) })"); } TEST_F(SmallDotGemmFusionTest, Int4WithMinorBatchDimIsNotRewritten) { const std::string kInt4Dot = R"( ENTRY main { lhs = s4[8,1024,16]{2,1,0} parameter(0) lhs_converted = bf16[8,1024,16]{2,1,0} convert(lhs) rhs = bf16[16,1024,64]{2,1,0} parameter(1) ROOT dot = bf16[16,8,64]{2,1,0} dot(lhs_converted, rhs), lhs_batch_dims={2}, lhs_contracting_dims={1}, rhs_batch_dims={0}, rhs_contracting_dims={1} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(kInt4Dot)); TF_ASSERT_OK_AND_ASSIGN(auto result, GemmFusion(gpu_version_).Run(module.get())); EXPECT_FALSE(result); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/gemm_fusion.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/gemm_fusion_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
36f457be-4d7f-4734-a657-1bf3fcf01002
cpp
tensorflow/tensorflow
spectral_ops
tensorflow/core/ops/spectral_ops.cc
tensorflow/core/ops/spectral_ops_test.cc
#include "tensorflow/core/framework/common_shape_fns.h" #include "tensorflow/core/framework/numeric_op.h" #include "tensorflow/core/framework/op.h" #include "tensorflow/core/framework/shape_inference.h" namespace tensorflow { using shape_inference::DimensionHandle; using shape_inference::InferenceContext; using shape_inference::ShapeHandle; REGISTER_OP("FFT") .Input("input: Tcomplex") .Output("output: Tcomplex") .Attr("Tcomplex: {complex64, complex128} = DT_COMPLEX64") .SetShapeFn([](InferenceContext* c) { return shape_inference::UnchangedShapeWithRankAtLeast(c, 1); }); REGISTER_OP("IFFT") .Input("input: Tcomplex") .Output("output: Tcomplex") .Attr("Tcomplex: {complex64, complex128} = DT_COMPLEX64") .SetShapeFn([](InferenceContext* c) { return shape_inference::UnchangedShapeWithRankAtLeast(c, 1); }); REGISTER_OP("FFT2D") .Input("input: Tcomplex") .Output("output: Tcomplex") .Attr("Tcomplex: {complex64, complex128} = DT_COMPLEX64") .SetShapeFn([](InferenceContext* c) { return shape_inference::UnchangedShapeWithRankAtLeast(c, 2); }); REGISTER_OP("IFFT2D") .Input("input: Tcomplex") .Output("output: Tcomplex") .Attr("Tcomplex: {complex64, complex128} = DT_COMPLEX64") .SetShapeFn([](InferenceContext* c) { return shape_inference::UnchangedShapeWithRankAtLeast(c, 2); }); REGISTER_OP("FFT3D") .Input("input: Tcomplex") .Output("output: Tcomplex") .Attr("Tcomplex: {complex64, complex128} = DT_COMPLEX64") .SetShapeFn([](InferenceContext* c) { return shape_inference::UnchangedShapeWithRankAtLeast(c, 3); }); REGISTER_OP("IFFT3D") .Input("input: Tcomplex") .Output("output: Tcomplex") .Attr("Tcomplex: {complex64, complex128} = DT_COMPLEX64") .SetShapeFn([](InferenceContext* c) { return shape_inference::UnchangedShapeWithRankAtLeast(c, 3); }); REGISTER_OP("FFTND") .Input("input: Tcomplex") .Input("fft_length: int32") .Input("axes: int32") .Output("output: Tcomplex") .Attr("Tcomplex: {complex64, complex128} = DT_COMPLEX64") .SetShapeFn([](InferenceContext* c) { return shape_inference::UnchangedShapeWithRankAtLeast(c, 1); }); REGISTER_OP("IFFTND") .Input("input: Tcomplex") .Input("fft_length: int32") .Input("axes: int32") .Output("output: Tcomplex") .Attr("Tcomplex: {complex64, complex128} = DT_COMPLEX64") .SetShapeFn([](InferenceContext* c) { return shape_inference::UnchangedShapeWithRankAtLeast(c, 1); }); Status RFFTShape(InferenceContext* c, const bool forward, const int rank) { ShapeHandle out; TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), rank, &out)); ShapeHandle unused_shape; DimensionHandle unused_dim; ShapeHandle fft_length_input = c->input(1); TF_RETURN_IF_ERROR(c->WithRank(fft_length_input, 1, &unused_shape)); TF_RETURN_IF_ERROR( c->WithValue(c->Dim(fft_length_input, 0), rank, &unused_dim)); const Tensor* fft_length_tensor = c->input_tensor(1); if (fft_length_tensor == nullptr) { for (int i = 0; i < rank; ++i) { TF_RETURN_IF_ERROR(c->ReplaceDim(out, -rank + i, c->UnknownDim(), &out)); } } else { auto fft_length_as_vec = fft_length_tensor->vec<int32>(); for (int i = 0; i < rank; ++i) { auto dim = forward && i == rank - 1 && fft_length_as_vec(i) != 0 ? fft_length_as_vec(i) / 2 + 1 : fft_length_as_vec(i); TF_RETURN_IF_ERROR(c->ReplaceDim(out, -rank + i, c->MakeDim(dim), &out)); } } c->set_output(0, out); return absl::OkStatus(); } REGISTER_OP("RFFT") .Input("input: Treal") .Input("fft_length: int32") .Output("output: Tcomplex") .Attr("Treal: {float32, float64} = DT_FLOAT") .Attr("Tcomplex: {complex64, complex128} = DT_COMPLEX64") .SetShapeFn([](InferenceContext* c) { return RFFTShape(c, true, 1); }); REGISTER_OP("IRFFT") .Input("input: Tcomplex") .Input("fft_length: int32") .Output("output: Treal") .Attr("Treal: {float32, float64} = DT_FLOAT") .Attr("Tcomplex: {complex64, complex128} = DT_COMPLEX64") .SetShapeFn([](InferenceContext* c) { return RFFTShape(c, false, 1); }); REGISTER_OP("RFFT2D") .Input("input: Treal") .Input("fft_length: int32") .Output("output: Tcomplex") .Attr("Treal: {float32, float64} = DT_FLOAT") .Attr("Tcomplex: {complex64, complex128} = DT_COMPLEX64") .SetShapeFn([](InferenceContext* c) { return RFFTShape(c, true, 2); }); REGISTER_OP("IRFFT2D") .Input("input: Tcomplex") .Input("fft_length: int32") .Output("output: Treal") .Attr("Treal: {float32, float64} = DT_FLOAT") .Attr("Tcomplex: {complex64, complex128} = DT_COMPLEX64") .SetShapeFn([](InferenceContext* c) { return RFFTShape(c, false, 2); }); REGISTER_OP("RFFT3D") .Input("input: Treal") .Input("fft_length: int32") .Output("output: Tcomplex") .Attr("Treal: {float32, float64} = DT_FLOAT") .Attr("Tcomplex: {complex64, complex128} = DT_COMPLEX64") .SetShapeFn([](InferenceContext* c) { return RFFTShape(c, true, 3); }); REGISTER_OP("IRFFT3D") .Input("input: Tcomplex") .Input("fft_length: int32") .Output("output: Treal") .Attr("Treal: {float32, float64} = DT_FLOAT") .Attr("Tcomplex: {complex64, complex128} = DT_COMPLEX64") .SetShapeFn([](InferenceContext* c) { return RFFTShape(c, false, 3); }); REGISTER_OP("RFFTND") .Input("input: Treal") .Input("fft_length: int32") .Input("axes: int32") .Output("output: Tcomplex") .Attr("Treal: {float32, float64} = DT_FLOAT") .Attr("Tcomplex: {complex64, complex128} = DT_COMPLEX64") .SetShapeFn([](InferenceContext* c) { return shape_inference::UnchangedShapeWithRankAtLeast(c, 1); }); REGISTER_OP("IRFFTND") .Input("input: Tcomplex") .Input("fft_length: int32") .Input("axes: int32") .Output("output: Treal") .Attr("Treal: {float32, float64} = DT_FLOAT") .Attr("Tcomplex: {complex64, complex128} = DT_COMPLEX64") .SetShapeFn([](InferenceContext* c) { return shape_inference::UnchangedShapeWithRankAtLeast(c, 1); }); REGISTER_OP("BatchFFT") .Input("input: complex64") .Output("output: complex64") .SetShapeFn(shape_inference::UnknownShape) .Deprecated(15, "Use FFT"); REGISTER_OP("BatchIFFT") .Input("input: complex64") .Output("output: complex64") .SetShapeFn(shape_inference::UnknownShape) .Deprecated(15, "Use IFFT"); REGISTER_OP("BatchFFT2D") .Input("input: complex64") .Output("output: complex64") .SetShapeFn(shape_inference::UnknownShape) .Deprecated(15, "Use FFT2D"); REGISTER_OP("BatchIFFT2D") .Input("input: complex64") .Output("output: complex64") .SetShapeFn(shape_inference::UnknownShape) .Deprecated(15, "Use IFFT2D"); REGISTER_OP("BatchFFT3D") .Input("input: complex64") .Output("output: complex64") .SetShapeFn(shape_inference::UnknownShape) .Deprecated(15, "Use FFT3D"); REGISTER_OP("BatchIFFT3D") .Input("input: complex64") .Output("output: complex64") .SetShapeFn(shape_inference::UnknownShape) .Deprecated(15, "Use IFFT3D"); }
#include "tensorflow/core/framework/shape_inference_testutil.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_testutil.h" namespace tensorflow { TEST(MathOpsTest, FFT_ShapeFn) { for (const auto* op_name : {"FFT", "IFFT"}) { ShapeInferenceTestOp op(op_name); INFER_OK(op, "?", "in0"); INFER_ERROR("Shape must be at least rank 1 but is rank 0", op, "[]"); INFER_OK(op, "[?]", "in0"); INFER_OK(op, "[1]", "in0"); INFER_OK(op, "[1,2,3,4,5,6,7]", "in0"); } for (const auto* op_name : {"FFT2D", "IFFT2D"}) { ShapeInferenceTestOp op(op_name); INFER_OK(op, "?", "in0"); INFER_ERROR("Shape must be at least rank 2 but is rank 1", op, "[1]"); INFER_OK(op, "[?,1]", "in0"); INFER_OK(op, "[1,2]", "in0"); INFER_OK(op, "[1,2,3,4,5,6,7]", "in0"); } for (const auto* op_name : {"FFT3D", "IFFT3D"}) { ShapeInferenceTestOp op(op_name); INFER_OK(op, "?", "in0"); INFER_ERROR("Shape must be at least rank 3 but is rank 2", op, "[1,2]"); INFER_OK(op, "[?,1,?]", "in0"); INFER_OK(op, "[1,2,3]", "in0"); INFER_OK(op, "[1,2,3,4,5,6,7]", "in0"); } } TEST(MathOpsTest, RFFT_ShapeFn) { for (const bool forward : {true, false}) { ShapeInferenceTestOp op(forward ? "RFFT" : "IRFFT"); INFER_OK(op, "?;?", "?"); INFER_OK(op, "?;[1]", "?"); INFER_OK(op, "[1];?", "[?]"); INFER_OK(op, "[1];[1]", "[?]"); INFER_OK(op, "[?];[1]", "[?]"); INFER_OK(op, "[1,2,3,4];[1]", "[d0_0,d0_1,d0_2,?]"); INFER_ERROR("Shape must be at least rank 1 but is rank 0", op, "[];?"); INFER_ERROR("Shape must be rank 1 but is rank 2", op, "[1];[1,1]"); INFER_ERROR("Dimension must be 1 but is 2", op, "[1];[2]"); op.input_tensors.resize(2); Tensor fft_length = test::AsTensor<int32>({10}); op.input_tensors[1] = &fft_length; if (forward) { INFER_OK(op, "[?];[1]", "[6]"); INFER_OK(op, "[1];[1]", "[6]"); INFER_OK(op, "[1,1];[1]", "[d0_0,6]"); } else { INFER_OK(op, "[?];[1]", "[10]"); INFER_OK(op, "[1];[1]", "[10]"); INFER_OK(op, "[1,1];[1]", "[d0_0,10]"); } fft_length = test::AsTensor<int32>({11}); if (forward) { INFER_OK(op, "[?];[1]", "[6]"); INFER_OK(op, "[1];[1]", "[6]"); INFER_OK(op, "[1,1];[1]", "[d0_0,6]"); } else { INFER_OK(op, "[?];[1]", "[11]"); INFER_OK(op, "[1];[1]", "[11]"); INFER_OK(op, "[1,1];[1]", "[d0_0,11]"); } fft_length = test::AsTensor<int32>({12}); if (forward) { INFER_OK(op, "[?];[1]", "[7]"); INFER_OK(op, "[1];[1]", "[7]"); INFER_OK(op, "[1,1];[1]", "[d0_0,7]"); } else { INFER_OK(op, "[?];[1]", "[12]"); INFER_OK(op, "[1];[1]", "[12]"); INFER_OK(op, "[1,1];[1]", "[d0_0,12]"); } } for (const bool forward : {true, false}) { ShapeInferenceTestOp op(forward ? "RFFT2D" : "IRFFT2D"); INFER_OK(op, "?;?", "?"); INFER_OK(op, "?;[2]", "?"); INFER_OK(op, "[1,1];?", "[?,?]"); INFER_OK(op, "[1,1];[2]", "[?,?]"); INFER_OK(op, "[?,?];[2]", "[?,?]"); INFER_OK(op, "[1,2,3,4];[2]", "[d0_0,d0_1,?,?]"); INFER_ERROR("Shape must be at least rank 2 but is rank 0", op, "[];?"); INFER_ERROR("Shape must be rank 1 but is rank 2", op, "[1,1];[1,1]"); INFER_ERROR("Dimension must be 2 but is 3", op, "[1,1];[3]"); op.input_tensors.resize(2); Tensor fft_length = test::AsTensor<int32>({9, 10}); op.input_tensors[1] = &fft_length; if (forward) { INFER_OK(op, "[?,?];[2]", "[9,6]"); INFER_OK(op, "[1,1];[2]", "[9,6]"); INFER_OK(op, "[1,1,1];[2]", "[d0_0,9,6]"); } else { INFER_OK(op, "[?,?];[2]", "[9,10]"); INFER_OK(op, "[1,1];[2]", "[9,10]"); INFER_OK(op, "[1,1,1];[2]", "[d0_0,9,10]"); } fft_length = test::AsTensor<int32>({10, 11}); if (forward) { INFER_OK(op, "[?,?];[2]", "[10,6]"); INFER_OK(op, "[1,1];[2]", "[10,6]"); INFER_OK(op, "[1,1,1];[2]", "[d0_0,10,6]"); } else { INFER_OK(op, "[?,?];[2]", "[10,11]"); INFER_OK(op, "[1,1];[2]", "[10,11]"); INFER_OK(op, "[1,1,1];[2]", "[d0_0,10,11]"); } fft_length = test::AsTensor<int32>({11, 12}); if (forward) { INFER_OK(op, "[?,?];[2]", "[11,7]"); INFER_OK(op, "[1,1];[2]", "[11,7]"); INFER_OK(op, "[1,1,1];[2]", "[d0_0,11,7]"); } else { INFER_OK(op, "[?,?];[2]", "[11,12]"); INFER_OK(op, "[1,1];[2]", "[11,12]"); INFER_OK(op, "[1,1,1];[2]", "[d0_0,11,12]"); } } for (const bool forward : {true, false}) { ShapeInferenceTestOp op(forward ? "RFFT3D" : "IRFFT3D"); INFER_OK(op, "?;?", "?"); INFER_OK(op, "?;[3]", "?"); INFER_OK(op, "[1,1,1];?", "[?,?,?]"); INFER_OK(op, "[1,1,1];[3]", "[?,?,?]"); INFER_OK(op, "[?,?,?];[3]", "[?,?,?]"); INFER_OK(op, "[1,2,3,4];[3]", "[d0_0,?,?,?]"); INFER_ERROR("Shape must be at least rank 3 but is rank 0", op, "[];?"); INFER_ERROR("Shape must be rank 1 but is rank 2", op, "[1,1,1];[1,1]"); INFER_ERROR("Dimension must be 3 but is 4", op, "[1,1,1];[4]"); op.input_tensors.resize(2); Tensor fft_length = test::AsTensor<int32>({10, 11, 12}); op.input_tensors[1] = &fft_length; if (forward) { INFER_OK(op, "[?,?,?];[3]", "[10,11,7]"); INFER_OK(op, "[1,1,1];[3]", "[10,11,7]"); INFER_OK(op, "[1,1,1,1];[3]", "[d0_0,10,11,7]"); } else { INFER_OK(op, "[?,?,?];[3]", "[10,11,12]"); INFER_OK(op, "[1,1,1];[3]", "[10,11,12]"); INFER_OK(op, "[1,1,1,1];[3]", "[d0_0,10,11,12]"); } fft_length = test::AsTensor<int32>({11, 12, 13}); if (forward) { INFER_OK(op, "[?,?,?];[3]", "[11,12,7]"); INFER_OK(op, "[1,1,1];[3]", "[11,12,7]"); INFER_OK(op, "[1,1,1,1];[3]", "[d0_0,11,12,7]"); } else { INFER_OK(op, "[?,?,?];[3]", "[11,12,13]"); INFER_OK(op, "[1,1,1];[3]", "[11,12,13]"); INFER_OK(op, "[1,1,1,1];[3]", "[d0_0,11,12,13]"); } fft_length = test::AsTensor<int32>({12, 13, 14}); if (forward) { INFER_OK(op, "[?,?,?];[3]", "[12,13,8]"); INFER_OK(op, "[1,1,1];[3]", "[12,13,8]"); INFER_OK(op, "[1,1,1,1];[3]", "[d0_0,12,13,8]"); } else { INFER_OK(op, "[?,?,?];[3]", "[12,13,14]"); INFER_OK(op, "[1,1,1];[3]", "[12,13,14]"); INFER_OK(op, "[1,1,1,1];[3]", "[d0_0,12,13,14]"); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ops/spectral_ops.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ops/spectral_ops_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea