From da90a7ec5629fa324cc19a1d0a53e62023febe65 Mon Sep 17 00:00:00 2001 From: Will Cromar Date: Tue, 31 Oct 2023 18:48:22 +0000 Subject: [PATCH] remove declarations of input/output handlers --- torch_xla/csrc/xla_sharding_util.h | 26 -------------------------- 1 file changed, 26 deletions(-) diff --git a/torch_xla/csrc/xla_sharding_util.h b/torch_xla/csrc/xla_sharding_util.h index 32060c7fc09..d93cffba719 100644 --- a/torch_xla/csrc/xla_sharding_util.h +++ b/torch_xla/csrc/xla_sharding_util.h @@ -58,32 +58,6 @@ class ShardingUtil { bool unroll_windowed_einsum = false, bool bidirectional_windowed_einsum = false); - // Reshuffles arguments (sharded or replicated) on the devices. The - // size of the arguments vector must match that of the sharding_specs. - // The the returned arguments will be in 1:1 correspondence with the `devices` - // vector, so the `i`th result will belong on the `i`th device. - // TODO(yeounoh) avoiding pre-loading of the unpartitioned input arguments - // might improve the performance and save the bandwidth. - static std::vector> - InputHandler(std::vector arguments, - std::vector devices); - - // Processes replicated execution results, where `sharded_results` contains - // `PjRtData` handles and spans the number of devices (outer) and the number - // of arguments (innner). This requires `sharding_specs` of the same size as - // the number of arguments. `sharding_specs` can contain `nullptr` if the - // corresponding result argument is not sharded. The replicated execution - // `replicated_output=true` leaves the results in replicated states, which is - // aligned with the default exepctation of XLA compiler. However, we override - // the compiler's default behavior and allow the execution to return sharded - // results and wrap sharded arguments into `PjRtShardedData`. This returns a - // vector of size that is equal to the number of arguments. - static std::vector OutputHandler( - std::vector> - sharded_results, - std::vector sharding_specs, - bool replicated_output = false); - // Returns the shape of the resulting shards of `tensor` after applying // `sharding`. This assumes the shards will be padded to ensure they all // have the same shape.