Skip to content

Commit

Permalink
Use upstream Unique
Browse files Browse the repository at this point in the history
  • Loading branch information
will-cromar committed Nov 10, 2023
1 parent 0146c1b commit 9b99aed
Show file tree
Hide file tree
Showing 6 changed files with 5 additions and 67 deletions.
2 changes: 0 additions & 2 deletions torch_xla/csrc/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -118,10 +118,8 @@ ptxla_cc_library(
":layout_manager",
":shape_builder",
":shape_helper",
"//torch_xla/csrc/runtime:async_task",
"//torch_xla/csrc/runtime",
"//torch_xla/csrc/runtime:stablehlo_helper",
"//torch_xla/csrc/runtime:unique",
"//torch_xla/csrc/runtime:xla_util",
"@com_google_absl//absl/hash",
"@com_google_absl//absl/memory",
Expand Down
6 changes: 3 additions & 3 deletions torch_xla/csrc/debug_util.cpp
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
#include "torch_xla/csrc/debug_util.h"

#include <torch/csrc/lazy/core/hash.h>
#include <torch/csrc/lazy/core/unique.h>
#include <torch/csrc/lazy/python/python_util.h>

#include <fstream>
Expand All @@ -17,7 +18,6 @@
#include "torch_xla/csrc/ir_dump_util.h"
#include "torch_xla/csrc/runtime/debug_macros.h"
#include "torch_xla/csrc/runtime/sys_util.h"
#include "torch_xla/csrc/runtime/unique.h"
#include "torch_xla/csrc/xla_graph_executor.h"

namespace torch_xla {
Expand Down Expand Up @@ -61,7 +61,7 @@ std::string DebugUtil::GetTensorsGraphHlo(
absl::Span<const XLATensorPtr> tensors, const std::vector<size_t>* indices,
bool dump_stablehlo) {
std::vector<torch::lazy::Value> root_values;
runtime::util::Unique<torch::lazy::BackendDevice> unique_device;
torch::lazy::Unique<torch::lazy::BackendDevice> unique_device;
if (indices != nullptr) {
for (auto index : *indices) {
const XLATensorPtr& tensor = tensors[index];
Expand Down Expand Up @@ -91,7 +91,7 @@ std::string DebugUtil::GetTensorsGraphInfo(
std::vector<const torch::lazy::Node*> root_nodes;
std::vector<torch::lazy::Value> root_values;
std::vector<torch::lazy::hash_t> root_hashes;
runtime::util::Unique<torch::lazy::BackendDevice> unique_device;
torch::lazy::Unique<torch::lazy::BackendDevice> unique_device;
if (indices != nullptr) {
for (auto index : *indices) {
const XLATensorPtr& tensor = tensors[index];
Expand Down
9 changes: 0 additions & 9 deletions torch_xla/csrc/runtime/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -273,15 +273,6 @@ cc_library(
],
)

cc_library(
name = "unique",
hdrs = ["unique.h"],
deps = [
":debug_macros",
"@com_google_absl//absl/types:optional",
],
)

cc_library(
name = "util",
hdrs = ["util.h"],
Expand Down
50 changes: 0 additions & 50 deletions torch_xla/csrc/runtime/unique.h

This file was deleted.

1 change: 0 additions & 1 deletion torch_xla/csrc/tensor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,6 @@
#include "torch_xla/csrc/runtime/pjrt_computation_client.h"
#include "torch_xla/csrc/runtime/sys_util.h"
#include "torch_xla/csrc/runtime/thread_pool.h"
#include "torch_xla/csrc/runtime/unique.h"
#include "torch_xla/csrc/runtime/xla_util.h"
#include "torch_xla/csrc/tensor_util.h"
#include "torch_xla/csrc/torch_util.h"
Expand Down
4 changes: 2 additions & 2 deletions torch_xla/csrc/xla_graph_executor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
#include <torch/csrc/lazy/core/lazy_graph_executor.h>
#include <torch/csrc/lazy/core/metrics.h>
#include <torch/csrc/lazy/core/tensor_util.h>
#include <torch/csrc/lazy/core/unique.h>
#include <torch/csrc/lazy/core/util.h>

#include <algorithm>
Expand Down Expand Up @@ -47,7 +48,6 @@
#include "torch_xla/csrc/runtime/stablehlo_helper.h"
#include "torch_xla/csrc/runtime/sys_util.h"
#include "torch_xla/csrc/runtime/thread_pool.h"
#include "torch_xla/csrc/runtime/unique.h"
#include "torch_xla/csrc/runtime/xla_util.h"
#include "torch_xla/csrc/shape_helper.h"
#include "torch_xla/csrc/tensor_util.h"
Expand Down Expand Up @@ -534,7 +534,7 @@ XLAGraphExecutor::SyncTensorCollection XLAGraphExecutor::CollectSyncTensors(
const std::vector<XLATensorPtr>& tensors, const SyncTensorsConfig& config) {
tsl::profiler::TraceMe activity("CollectSyncTensors",
tsl::profiler::TraceMeLevel::kInfo);
runtime::util::Unique<torch::lazy::BackendDevice> unique_device;
torch::lazy::Unique<torch::lazy::BackendDevice> unique_device;
for (size_t i = 0; i < tensors.size(); ++i) {
unique_device.set(tensors[i]->GetDevice());
}
Expand Down

0 comments on commit 9b99aed

Please sign in to comment.