Skip to content

Commit

Permalink
update openxla-pin to nov24
Browse files Browse the repository at this point in the history
  • Loading branch information
ManfeiBai committed Nov 27, 2023
1 parent 3385bd6 commit 785da81
Show file tree
Hide file tree
Showing 7 changed files with 27 additions and 214 deletions.
5 changes: 2 additions & 3 deletions WORKSPACE
Original file line number Diff line number Diff line change
Expand Up @@ -42,11 +42,10 @@ http_archive(
"//openxla_patches:constexpr_return.diff",
"//openxla_patches:gpu_race_condition.diff",
"//openxla_patches:f16_abi_clang.diff",
"//openxla_patches:gpu_topk_rewriter.diff",
],
strip_prefix = "xla-4f8381651977dff16b1d86bb4b198eb733c5f478",
strip_prefix = "xla-8744c9a94782cd7804f015e6d29df253437af3cb",
urls = [
"https://github.com/openxla/xla/archive/4f8381651977dff16b1d86bb4b198eb733c5f478.tar.gz",
"https://github.com/openxla/xla/archive/8744c9a94782cd7804f015e6d29df253437af3cb.tar.gz",
],
)

Expand Down
33 changes: 16 additions & 17 deletions openxla_patches/cache_urls.diff
Original file line number Diff line number Diff line change
@@ -1,5 +1,19 @@
diff --git a/third_party/llvm/workspace.bzl b/third_party/llvm/workspace.bzl
index d7f3a8093..a7af9c68a 100644
--- a/third_party/llvm/workspace.bzl
+++ b/third_party/llvm/workspace.bzl
@@ -13,7 +13,9 @@ def repo(name):
strip_prefix = "llvm-project-{commit}".format(commit = LLVM_COMMIT),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
+ "https://storage.googleapis.com/tpu-pytorch/llvm-raw/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
+ "https://storage.googleapis.com/tpu-pytorch/llvm-raw/{commit}.tar.gz".format(commit = LLVM_COMMIT),
],
build_file = "//third_party/llvm:llvm.BUILD",
patch_file = [
diff --git a/xla/mlir_hlo/WORKSPACE b/xla/mlir_hlo/WORKSPACE
index cc9eeb64f..b290eb455 100644
index c3115e33d..d315ad745 100644
--- a/xla/mlir_hlo/WORKSPACE
+++ b/xla/mlir_hlo/WORKSPACE
@@ -35,7 +35,10 @@ http_archive(
Expand All @@ -13,19 +27,4 @@ index cc9eeb64f..b290eb455 100644
+ ],
)

load("@llvm-raw//utils/bazel:configure.bzl", "llvm_configure", "llvm_disable_optional_support_deps")
load("@llvm-raw//utils/bazel:configure.bzl", "llvm_configure", "llvm_disable_optional_support_deps")
diff --git a/third_party/llvm/workspace.bzl b/third_party/llvm/workspace.bzl
index a4574d75d..f9ce37094 100644
--- a/third_party/llvm/workspace.bzl
+++ b/third_party/llvm/workspace.bzl
@@ -13,7 +13,9 @@ def repo(name):
strip_prefix = "llvm-project-{commit}".format(commit = LLVM_COMMIT),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
+ "https://storage.googleapis.com/tpu-pytorch/llvm-raw/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
+ "https://storage.googleapis.com/tpu-pytorch/llvm-raw/{commit}.tar.gz".format(commit = LLVM_COMMIT),
],
build_file = "//third_party/llvm:llvm.BUILD",
patch_file = [
load("@llvm-raw//utils/bazel:configure.bzl", "llvm_configure")
6 changes: 3 additions & 3 deletions openxla_patches/constexpr_return.diff
Original file line number Diff line number Diff line change
@@ -1,12 +1,12 @@
diff --git a/xla/primitive_util.h b/xla/primitive_util.h
index 696147844..dfea15a4d 100644
index 63fa4e193..ab352626c 100644
--- a/xla/primitive_util.h
+++ b/xla/primitive_util.h
@@ -748,6 +748,7 @@ inline bool FitsInIntegralType(int64_t x, PrimitiveType ty) {
@@ -706,6 +706,7 @@ inline bool FitsInIntegralType(int64_t x, PrimitiveType ty) {
std::numeric_limits<NativeT>::max() >= x;
}
LOG(FATAL) << "Invalid primitive type " << PrimitiveType_Name(ty);
+ return false;
+ return false;
},
ty);
}
3 changes: 1 addition & 2 deletions openxla_patches/f16_abi_clang.diff
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
upstream CI will fail without this
diff --git a/xla/service/cpu/runtime_fp16.h b/xla/service/cpu/runtime_fp16.h
index 3f7af5197..ce4491c5d 100644
--- a/xla/service/cpu/runtime_fp16.h
Expand All @@ -16,4 +15,4 @@ index 3f7af5197..ce4491c5d 100644
+#if defined(__x86_64__)
// Older versions of Clang don't have _Float16. Since both float and _Float16
// are passed in the same register we can use the wider type and careful casting
// to conform to x86_64 psABI. This only works with the assumption that we're
// to conform to x86_64 psABI. This only works with the assumption that we're
8 changes: 4 additions & 4 deletions openxla_patches/gpu_race_condition.diff
Original file line number Diff line number Diff line change
@@ -1,14 +1,14 @@
diff --git a/xla/service/gpu/gpu_executable.cc b/xla/service/gpu/gpu_executable.cc
index 242961dd1..787275868 100644
index 1f9903cb3..763b7fc23 100644
--- a/xla/service/gpu/gpu_executable.cc
+++ b/xla/service/gpu/gpu_executable.cc
@@ -563,8 +563,7 @@ StatusOr<ExecutionOutput> GpuExecutable::ExecuteAsyncOnStreamImpl(
@@ -589,8 +589,7 @@ StatusOr<ExecutionOutput> GpuExecutable::ExecuteAsyncOnStreamImpl(
}

// Force synchronous execution if the allocator requires it.
- const bool block_host_until_done =
- !memory_allocator->AllowsAsynchronousDeallocation();
+ const bool block_host_until_done = true;


// Lock the GPU with a shared lock so that we don't interfere with autotuning
// Lock the GPU with a shared lock so that we don't interfere with autotuning
// that may be running during JIT compilation while allowing multiple XLA
184 changes: 0 additions & 184 deletions openxla_patches/gpu_topk_rewriter.diff

This file was deleted.

2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@

base_dir = os.path.dirname(os.path.abspath(__file__))

_libtpu_version = '0.1.dev20231022'
_libtpu_version = '0.1.dev20231125'
_libtpu_storage_path = f'https://storage.googleapis.com/cloud-tpu-tpuvm-artifacts/wheels/libtpu-nightly/libtpu_nightly-{_libtpu_version}-py3-none-any.whl'


Expand Down

0 comments on commit 785da81

Please sign in to comment.