From 49ef73160579aa08f0dbc689c03db2d711bdf012 Mon Sep 17 00:00:00 2001 From: Thien Nguyen Date: Fri, 22 Jul 2022 11:31:41 +0800 Subject: [PATCH] Move back MPI Init/Finalize to within HPC Virtualization A top-level MPI_Init at XACC Initialize() is not ideal since we may want to use an MPI-enabled backend (without HPC Virtualization) => a global MPI_Init is not ideal. Hence, move it back to within the scope of HPCVirt decorator. Fixing a MPI_Finalize race condition issue when ExaTN MPI is present within the installation. ExaTN has `exatnInitializedMPI` variable to determine if it should do the MPI_Finalize step, hence HPC Virt should have the same mechanism to prevent HPC Virt from finalizing MPI pre-maturely and causing MPI errors during ExaTN Finalize() which could call MPI API's Signed-off-by: Thien Nguyen --- .../hpc-virtualization/hpc_virt_decorator.cpp | 49 ++++++++++++++++++- .../hpc-virtualization/hpc_virt_decorator.hpp | 4 +- xacc/xacc.cpp | 27 ---------- 3 files changed, 49 insertions(+), 31 deletions(-) diff --git a/quantum/plugins/decorators/hpc-virtualization/hpc_virt_decorator.cpp b/quantum/plugins/decorators/hpc-virtualization/hpc_virt_decorator.cpp index 92c896e13..80bba7652 100644 --- a/quantum/plugins/decorators/hpc-virtualization/hpc_virt_decorator.cpp +++ b/quantum/plugins/decorators/hpc-virtualization/hpc_virt_decorator.cpp @@ -14,13 +14,29 @@ #include "hpc_virt_decorator.hpp" #include "InstructionIterator.hpp" #include "Utils.hpp" -#include "xacc.hpp" +#include "xacc_service.hpp" #include +#include "TearDown.hpp" +namespace { + static bool hpcVirtDecoratorInitializedMpi = false; +} namespace xacc { namespace quantum { void HPCVirtDecorator::initialize(const HeterogeneousMap ¶ms) { + if (!qpuComm) { + // Initializing MPI here + int provided, isMPIInitialized; + MPI_Initialized(&isMPIInitialized); + if (!isMPIInitialized) { + MPI_Init_thread(0, NULL, MPI_THREAD_MULTIPLE, &provided); + hpcVirtDecoratorInitializedMpi = true; + if (provided != MPI_THREAD_MULTIPLE) { + xacc::warning("MPI_THREAD_MULTIPLE not provided."); + } + } + } decoratedAccelerator->initialize(params); if (params.keyExists("n-virtual-qpus")) { @@ -261,6 +277,35 @@ void HPCVirtDecorator::execute( return; } +void HPCVirtDecorator::finalize() { + if (qpuComm) { + // Make sure we explicitly release this so that MPICommProxy is destroyed + // before framework shutdown (MPI_Finalize if needed) + qpuComm.reset(); + } +} + +class HPCVirtTearDown : public xacc::TearDown { +public: + virtual void tearDown() override { + auto c = xacc::getService("hpc-virtualization", false); + if (c) { + auto casted = std::dynamic_pointer_cast(c); + assert(casted); + casted->finalize(); + } + + int finalized, initialized; + MPI_Initialized(&initialized); + if (initialized) { + MPI_Finalized(&finalized); + if (!finalized && hpcVirtDecoratorInitializedMpi) { + MPI_Finalize(); + } + } + } + virtual std::string name() const override { return "xacc-hpc-virt"; } +}; } // namespace quantum } // namespace xacc @@ -279,9 +324,9 @@ class US_ABI_LOCAL HPCVirtActivator : public BundleActivator { void Start(BundleContext context) { auto c = std::make_shared(); - context.RegisterService(c); context.RegisterService(c); + context.RegisterService(std::make_shared()); } /** diff --git a/quantum/plugins/decorators/hpc-virtualization/hpc_virt_decorator.hpp b/quantum/plugins/decorators/hpc-virtualization/hpc_virt_decorator.hpp index 691597b90..0e164a925 100644 --- a/quantum/plugins/decorators/hpc-virtualization/hpc_virt_decorator.hpp +++ b/quantum/plugins/decorators/hpc-virtualization/hpc_virt_decorator.hpp @@ -45,8 +45,8 @@ class HPCVirtDecorator : public AcceleratorDecorator { const std::string name() const override { return "hpc-virtualization"; } const std::string description() const override { return ""; } - - ~HPCVirtDecorator() override { } + void finalize(); + ~HPCVirtDecorator() override {}; private: template diff --git a/xacc/xacc.cpp b/xacc/xacc.cpp index 55c0153e3..88eac856a 100644 --- a/xacc/xacc.cpp +++ b/xacc/xacc.cpp @@ -29,10 +29,6 @@ #include #include "TearDown.hpp" -#ifdef MPI_ENABLED -#include "mpi.h" -#endif - using namespace cxxopts; namespace xacc { @@ -49,10 +45,6 @@ std::map> allocated_buffers{}; std::string rootPathString = ""; -#ifdef MPI_ENABLED -int isMPIInitialized; -#endif - void set_verbose(bool v) { verbose = v; } int getArgc() { return argc; } @@ -112,19 +104,6 @@ void Initialize(int arc, char **arv) { if (!optionExists("queue-preamble")) { XACCLogger::instance()->dumpQueue(); } - - // Initializing MPI here -#ifdef MPI_ENABLED - int provided; - MPI_Initialized(&isMPIInitialized); - if (!isMPIInitialized) { - MPI_Init_thread(0, NULL, MPI_THREAD_MULTIPLE, &provided); - if (provided != MPI_THREAD_MULTIPLE) { - xacc::warning("MPI_THREAD_MULTIPLE not provided."); - } - isMPIInitialized = 1; - } -#endif } void setIsPyApi() { isPyApi = true; } @@ -856,12 +835,6 @@ void Finalize() { compilation_database.clear(); allocated_buffers.clear(); xacc::ServiceAPI_Finalize(); - // This replaces the HPC virtualization TearDown -#ifdef MPI_ENABLED - if (isMPIInitialized) { - MPI_Finalize(); - } -#endif } }