From 181281753917742656c05a2880c64bc01d701355 Mon Sep 17 00:00:00 2001 From: Rui <179625410+rpsilva-aws@users.noreply.github.com> Date: Mon, 9 Dec 2024 16:34:39 -0800 Subject: [PATCH] Modify the deprecation plan for XLA_USE_BF16 (#8474) --- torch_xla/csrc/dtype.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/torch_xla/csrc/dtype.cpp b/torch_xla/csrc/dtype.cpp index 8f7be2eacb2..759c045f8f2 100644 --- a/torch_xla/csrc/dtype.cpp +++ b/torch_xla/csrc/dtype.cpp @@ -11,7 +11,7 @@ bool ShouldUseBF16() { bool use_bf16 = runtime::sys_util::GetEnvBool("XLA_USE_BF16", false); if (use_bf16) { std::cout - << "XLA_USE_BF16 will be deprecated after the 2.5 release, please " + << "XLA_USE_BF16 will be deprecated after the 2.6 release, please " "convert your model to bf16 directly\n"; TF_LOG(INFO) << "Using BF16 data type for floating point values"; } @@ -23,7 +23,7 @@ bool ShouldDowncastToBF16() { runtime::sys_util::GetEnvBool("XLA_DOWNCAST_BF16", false); if (downcast_bf16) { std::cout - << "XLA_DOWNCAST_BF16 will be deprecated after the 2.5 release, please " + << "XLA_DOWNCAST_BF16 will be deprecated after the 2.6 release, please " "downcast your model directly\n"; TF_LOG(INFO) << "Downcasting floating point values, F64->F32, F32->BF16"; }