From cc417a8563ae186c94884904e70152d74ef24291 Mon Sep 17 00:00:00 2001 From: YangFei1990 Date: Tue, 28 Nov 2023 18:24:29 -0800 Subject: [PATCH] formatting --- torch_xla/distributed/zero_redundancy_optimizer.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/torch_xla/distributed/zero_redundancy_optimizer.py b/torch_xla/distributed/zero_redundancy_optimizer.py index 337e154bf30..f00929eeb86 100644 --- a/torch_xla/distributed/zero_redundancy_optimizer.py +++ b/torch_xla/distributed/zero_redundancy_optimizer.py @@ -91,7 +91,9 @@ def init_zero(self): group = list(group) self.local_rank = group.index(self.global_rank) if self.local_rank is None: - raise ValueError(f"Current rank {self.global_rank} is missing from the sharding_groups {self.sharding_groups}") + raise ValueError( + f"Current rank {self.global_rank} is missing from the sharding_groups {self.sharding_groups}" + ) # Shard parameters for use in optimizer sharded_param_groups = self._shard_parameters() # Optimizer initialization