From 5ae5c6c50bc30fbc09c228c11dae26253a262208 Mon Sep 17 00:00:00 2001 From: Mahesh Natarajan Date: Sat, 16 Nov 2024 12:31:55 -0800 Subject: [PATCH 1/4] Test: Writing CSV file for single proc works --- .../DarkMatterParticleContainer.cpp | 8 +++----- Exec/ParticleFilterTest/GNUmakefile | 12 ++++++++++-- Exec/ParticleFilterTest/Make.package | 1 + Exec/ParticleFilterTest/Nyx_advance.cpp | 12 ++++++++++++ Exec/ParticleFilterTest/temp.H | 1 + 5 files changed, 27 insertions(+), 7 deletions(-) create mode 100644 Exec/ParticleFilterTest/temp.H diff --git a/Exec/ParticleFilterTest/DarkMatterParticleContainer.cpp b/Exec/ParticleFilterTest/DarkMatterParticleContainer.cpp index 139f8829..e5a7e81c 100644 --- a/Exec/ParticleFilterTest/DarkMatterParticleContainer.cpp +++ b/Exec/ParticleFilterTest/DarkMatterParticleContainer.cpp @@ -7,7 +7,7 @@ using namespace amrex; #include - +#include /// These are helper functions used when initializing from a morton-ordered /// binary particle file. @@ -757,10 +757,8 @@ void store_dm_particle_single (amrex::ParticleContainer<1+AMREX_SPACEDIM, 0>::Su p2.pos(comp) = p.pos(comp)+(kdir)*(phi[comp]-plo[comp]); Real z1 = p2.pos(comp); Real vz = p.rdata(comp+1); - // printf("%0.15g, %0.15g, %0.15g, %0.15g, %0.15g, %0.15g \n", x1, y1, z1, vx, vy, vz); - - // Print()< #include #include +#include using namespace amrex; using std::string; +FILE *file_lightcone_csv; + Real Nyx::advance (Real time, Real dt, @@ -240,6 +243,13 @@ Nyx::advance_hydro_plus_particles (Real time, MultiFab::RegionTag amrMoveKickDrift_tag("MoveKickDrift_" + std::to_string(level)); + std::string filename=Concatenate("lightcone_", int(100*(1/a_old-1)), 7); + filename=filename+".csv"; + file_lightcone_csv = fopen(filename.c_str(),"w"); + if(ParallelDescriptor::IOProcessor()) { + fprintf(file_lightcone_csv,"%s, %s, %s, %s, %s, %s\n", "x", "y", "z", "vx", "vy", "vz"); + } + for (int lev = level; lev <= finest_level_to_advance; lev++) { // We need grav_n_grow grow cells to track boundary particles @@ -260,6 +270,8 @@ Nyx::advance_hydro_plus_particles (Real time, } Nyx::theActiveParticles()[i]->moveKickDrift(grav_vec_old, lev, time, dt, a_old, a_half, where_width, radius_inner, radius_outer); } + fclose(file_lightcone_csv); + // Only need the coarsest virtual particles here. if (lev == level && level < finest_level) diff --git a/Exec/ParticleFilterTest/temp.H b/Exec/ParticleFilterTest/temp.H new file mode 100644 index 00000000..91cac962 --- /dev/null +++ b/Exec/ParticleFilterTest/temp.H @@ -0,0 +1 @@ +extern FILE *file_lightcone_csv; From 6a55d0264aa4fca287c00c8183cd51ddc99ee065 Mon Sep 17 00:00:00 2001 From: Mahesh Natarajan Date: Sun, 17 Nov 2024 12:02:19 -0800 Subject: [PATCH 2/4] Simple binary files for particle data and python script to read --- .../DarkMatterParticleContainer.H | 5 +- .../DarkMatterParticleContainer.cpp | 38 +- Exec/ParticleFilterTest/GNUmakefile | 2 +- Exec/ParticleFilterTest/Nyx_advance.cpp | 172 ++++- .../Nyx_advance_TryMPIButFail.cpp | 719 ++++++++++++++++++ Exec/ParticleFilterTest/Read_BinarySimple.py | 52 ++ Exec/ParticleFilterTest/temp.H | 18 + 7 files changed, 984 insertions(+), 22 deletions(-) create mode 100644 Exec/ParticleFilterTest/Nyx_advance_TryMPIButFail.cpp create mode 100644 Exec/ParticleFilterTest/Read_BinarySimple.py diff --git a/Exec/ParticleFilterTest/DarkMatterParticleContainer.H b/Exec/ParticleFilterTest/DarkMatterParticleContainer.H index afa40bbf..638cab59 100644 --- a/Exec/ParticleFilterTest/DarkMatterParticleContainer.H +++ b/Exec/ParticleFilterTest/DarkMatterParticleContainer.H @@ -64,7 +64,10 @@ AMREX_GPU_HOST_DEVICE AMREX_INLINE void store_dm_particle_single (amrex::Particl amrex::GpuArray const& phi, amrex::GpuArray const& dxi, const amrex::Real& dt, const amrex::Real& a_prev, - const amrex::Real& a_cur, const int& do_move, const amrex::Real& radius_inner, const amrex::Real& radius_outer, int index); + const amrex::Real& a_cur, const int& do_move, + const amrex::Real& radius_inner, + const amrex::Real& radius_outer, + int index, const bool is_file_write=false); #endif /* _DarkMatterParticleContainer_H_ */ diff --git a/Exec/ParticleFilterTest/DarkMatterParticleContainer.cpp b/Exec/ParticleFilterTest/DarkMatterParticleContainer.cpp index e5a7e81c..d6f0dcb2 100644 --- a/Exec/ParticleFilterTest/DarkMatterParticleContainer.cpp +++ b/Exec/ParticleFilterTest/DarkMatterParticleContainer.cpp @@ -461,19 +461,19 @@ DarkMatterParticleContainer::moveKickDrift (amrex::MultiFab& acceleration, Array4 accel= accel_fab.array(); int nc=AMREX_SPACEDIM; - int num_output=0; + int num_output=0; for(int i=0;i::SuperParticleType p = pstruct2[i]; int mask=shell_filter_test(ptile.getConstParticleTileData(),i); if(mask>0) { for(int j=0;jWritePlotFile(dir, name, real_comp_names_shell); + if(write_hdf5!=1){ + //ShellPC->WritePlotFile(dir, name, real_comp_names_shell); + } #ifdef AMREX_USE_HDF5 - else + else{ ShellPC->WritePlotFileHDF5(dir, name, real_comp_names_shell, compression); + } #endif } Print()<<"After write\t"<TotalNumberOfParticles()<<"\t"<::Su amrex::GpuArray const& phi, amrex::GpuArray const& dxi, const amrex::Real& dt, const amrex::Real& a_prev, - const amrex::Real& a_cur, const int& do_move, const Real& radius_inner, const Real& radius_outer, int index) + const amrex::Real& a_cur, const int& do_move, const Real& radius_inner, + const Real& radius_outer, int index, const bool is_file_write) { amrex::Real half_dt = 0.5 * dt; amrex::Real a_cur_inv = 1.0 / a_cur; @@ -757,7 +760,16 @@ void store_dm_particle_single (amrex::ParticleContainer<1+AMREX_SPACEDIM, 0>::Su p2.pos(comp) = p.pos(comp)+(kdir)*(phi[comp]-plo[comp]); Real z1 = p2.pos(comp); Real vz = p.rdata(comp+1); - fprintf(file_lightcone_csv,"%0.15g, %0.15g, %0.15g, %0.15g, %0.15g, %0.15g \n", x1, y1, z1, vx, vy, vz); + + // There are two calls to this routine. Do the particle output writing only once + // based on the bool. + if(is_file_write) { + //fprintf(file_lightcone_csv,"%0.15g, %0.15g, %0.15g, %0.15g, %0.15g, %0.15g \n", x1, y1, z1, vx, vy, vz); + myParticle tmp; + tmp.x = x1; tmp.y = y1; tmp.z = z1; + tmp.vx = vx; tmp.vy = vy; tmp.vz = vz; + shell_particles.emplace_back(tmp); + } for (int comp=0; comp < nc; ++comp) { p2.rdata(comp+1+3)=p.pos(comp); diff --git a/Exec/ParticleFilterTest/GNUmakefile b/Exec/ParticleFilterTest/GNUmakefile index 194abf9f..d3aa0af2 100644 --- a/Exec/ParticleFilterTest/GNUmakefile +++ b/Exec/ParticleFilterTest/GNUmakefile @@ -8,7 +8,7 @@ TOP = ../.. COMP = gnu USE_MPI = TRUE # Use with Async IO -MPI_THREAD_MULTIPLE = FALSE +MPI_THREAD_MULTIPLE = TRUE USE_OMP = FALSE USE_CUDA = FALSE diff --git a/Exec/ParticleFilterTest/Nyx_advance.cpp b/Exec/ParticleFilterTest/Nyx_advance.cpp index 82ba2e82..dbd141e7 100644 --- a/Exec/ParticleFilterTest/Nyx_advance.cpp +++ b/Exec/ParticleFilterTest/Nyx_advance.cpp @@ -10,6 +10,157 @@ using namespace amrex; using std::string; FILE *file_lightcone_csv; +std::vector shell_particles; + + +void SwapEnd(float& val) { + // Swap endianess if necessary + char* bytes = reinterpret_cast(&val); + std::swap(bytes[0], bytes[3]); + std::swap(bytes[1], bytes[2]); +} + +void writeBinaryVTK(const std::string& filename, const std::vector& particles) { + int rank, size; + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + MPI_Comm_size(MPI_COMM_WORLD, &size); + + size_t local_num_particles = particles.size(); + size_t total_num_particles = 0; + + // Get total particles across all ranks + MPI_Reduce(&local_num_particles, &total_num_particles, 1, MPI_UNSIGNED_LONG, MPI_SUM, 0, MPI_COMM_WORLD); + + // Compute offset for this rank's data + size_t offset = 0; + MPI_Exscan(&local_num_particles, &offset, 1, MPI_UNSIGNED_LONG, MPI_SUM, MPI_COMM_WORLD); + + // Header handling + size_t header_size = 0; + + if (rank == 0) { + std::ofstream file(filename, std::ios::binary); + if (!file) { + std::cerr << "Error: Could not open file " << filename << "\n"; + MPI_Abort(MPI_COMM_WORLD, 1); + } + + // Write the header + file << "# vtk DataFile Version 2.0\n"; + file << "Particle Cloud Data\n"; + file << "BINARY\n"; + file << "DATASET POLYDATA\n"; + file << "POINTS " << total_num_particles << " float\n"; + + // Determine header size + file.seekp(0, std::ios::end); + header_size = file.tellp(); + file.close(); + } + + // Broadcast the header size to all ranks + MPI_Bcast(&header_size, 1, MPI_UNSIGNED_LONG, 0, MPI_COMM_WORLD); + + // Use MPI collective I/O for binary data + MPI_File mpi_file; + MPI_File_open(MPI_COMM_WORLD, filename.c_str(), + MPI_MODE_WRONLY | MPI_MODE_APPEND, MPI_INFO_NULL, &mpi_file); + + // Compute byte offset for this rank + size_t byte_offset = header_size + sizeof(float) * 3 * offset; + + // Prepare local data + std::vector data(3 * local_num_particles); + for (size_t i = 0; i < local_num_particles; ++i) { + data[3 * i] = particles[i].x; + data[3 * i + 1] = particles[i].y; + data[3 * i + 2] = particles[i].z; + + // Convert to big-endian if needed + SwapEnd(data[3 * i]); + SwapEnd(data[3 * i + 1]); + SwapEnd(data[3 * i + 2]); + } + + // Write particle data collectively + MPI_File_write_at_all(mpi_file, byte_offset, data.data(), data.size(), MPI_FLOAT, MPI_STATUS_IGNORE); + + MPI_File_close(&mpi_file); + + if (rank == 0) { + std::cout << "Successfully wrote VTK file: " << filename << "\n"; + } +} + + +void writeBinarySimple(const std::string& filename, const std::vector& particles) { + int rank, size; + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + MPI_Comm_size(MPI_COMM_WORLD, &size); + + size_t local_num_particles = particles.size(); + size_t total_num_particles = 0; + + // Get total particles across all ranks + MPI_Reduce(&local_num_particles, &total_num_particles, 1, MPI_UNSIGNED_LONG, MPI_SUM, 0, MPI_COMM_WORLD); + + // Compute offset for this rank's data + size_t offset = 0; + MPI_Exscan(&local_num_particles, &offset, 1, MPI_UNSIGNED_LONG, MPI_SUM, MPI_COMM_WORLD); + + // Header handling + size_t header_size = 0; + + if (rank == 0) { + std::ofstream file(filename, std::ios::binary); + if (!file) { + std::cerr << "Error: Could not open file " << filename << "\n"; + MPI_Abort(MPI_COMM_WORLD, 1); + } + file.seekp(0, std::ios::end); + header_size = file.tellp(); + file.close(); + } + + // Broadcast the header size to all ranks + MPI_Bcast(&header_size, 1, MPI_UNSIGNED_LONG, 0, MPI_COMM_WORLD); + + // Use MPI collective I/O for binary data + MPI_File mpi_file; + MPI_File_open(MPI_COMM_WORLD, filename.c_str(), + MPI_MODE_WRONLY | MPI_MODE_APPEND, MPI_INFO_NULL, &mpi_file); + + // Compute byte offset for this rank + size_t byte_offset = header_size + sizeof(float) * 6 * offset; + + // Prepare local data + std::vector data(6 * local_num_particles); + for (size_t i = 0; i < local_num_particles; ++i) { + data[6 * i] = particles[i].x; + data[6 * i + 1] = particles[i].y; + data[6 * i + 2] = particles[i].z; + data[6 * i + 3] = particles[i].vx; + data[6 * i + 4] = particles[i].vy; + data[6 * i + 5] = particles[i].vz; + + // Convert to big-endian if needed + SwapEnd(data[6 * i]); + SwapEnd(data[6 * i + 1]); + SwapEnd(data[6 * i + 2]); + SwapEnd(data[6 * i + 3]); + SwapEnd(data[6 * i + 4]); + SwapEnd(data[6 * i + 5]); + } + + // Write particle data collectively + MPI_File_write_at_all(mpi_file, byte_offset, data.data(), data.size(), MPI_FLOAT, MPI_STATUS_IGNORE); + + MPI_File_close(&mpi_file); + + if (rank == 0) { + std::cout << "Successfully wrote VTK file: " << filename << "\n"; + } +} Real Nyx::advance (Real time, @@ -243,12 +394,17 @@ Nyx::advance_hydro_plus_particles (Real time, MultiFab::RegionTag amrMoveKickDrift_tag("MoveKickDrift_" + std::to_string(level)); + shell_particles.clear(); + shell_particles.shrink_to_fit(); + std::string filename=Concatenate("lightcone_", int(100*(1/a_old-1)), 7); - filename=filename+".csv"; - file_lightcone_csv = fopen(filename.c_str(),"w"); - if(ParallelDescriptor::IOProcessor()) { - fprintf(file_lightcone_csv,"%s, %s, %s, %s, %s, %s\n", "x", "y", "z", "vx", "vy", "vz"); - } + //std::string filename_csv = filename+".csv"; + std::string filename_vtk=filename+".vtk"; + std::string filename_bin=filename+".bin"; + //file_lightcone_csv = fopen(filename_csv.c_str(),"w"); + //if(ParallelDescriptor::IOProcessor()) { + // fprintf(file_lightcone_csv,"%s, %s, %s, %s, %s, %s\n", "x", "y", "z", "vx", "vy", "vz"); + //} for (int lev = level; lev <= finest_level_to_advance; lev++) { @@ -270,8 +426,10 @@ Nyx::advance_hydro_plus_particles (Real time, } Nyx::theActiveParticles()[i]->moveKickDrift(grav_vec_old, lev, time, dt, a_old, a_half, where_width, radius_inner, radius_outer); } - fclose(file_lightcone_csv); - + //fclose(file_lightcone_csv); + writeBinaryVTK(filename_vtk, shell_particles); + writeBinarySimple(filename_bin, shell_particles); + // Only need the coarsest virtual particles here. if (lev == level && level < finest_level) diff --git a/Exec/ParticleFilterTest/Nyx_advance_TryMPIButFail.cpp b/Exec/ParticleFilterTest/Nyx_advance_TryMPIButFail.cpp new file mode 100644 index 00000000..2f4da350 --- /dev/null +++ b/Exec/ParticleFilterTest/Nyx_advance_TryMPIButFail.cpp @@ -0,0 +1,719 @@ + +#include +#include +#include +#include +#include + +using namespace amrex; + +using std::string; + +FILE *file_lightcone_csv; +std::vector shell_particles; + +void writeBinaryVTK(const std::string& filename, + const std::vector& shell_particles) +{ + + // Calculate the number of particles and total particles across all processes + size_t local_num_particles = shell_particles.size(); + size_t total_num_particles = 0; + MPI_Reduce(&local_num_particles, &total_num_particles, 1, MPI_UNSIGNED_LONG, MPI_SUM, 0, MPI_COMM_WORLD); + + // Compute offset for each rank + size_t offset = 0; + MPI_Exscan(&local_num_particles, &offset, 1, MPI_UNSIGNED_LONG, MPI_SUM, MPI_COMM_WORLD); + + //std::cout << "I am rank " << rank << " " << total_num_particles << " " << offset << "\n"; + //exit(0); + + std::ofstream file(filename, std::ios::binary); + + if (!file) { + std::cerr << "Error: Could not open file " << filename << " for writing." << std::endl; + return; + } + + if (!file.is_open()) { + std::cerr << "Error: Unable to open file." << std::endl; + MPI_Abort(MPI_COMM_WORLD, 1); // Abort if the file cannot be opened + } + int myrank=amrex::ParallelDescriptor::MyProc(); + + MPI_Barrier(MPI_COMM_WORLD); // Ensure the file header is written before continuing + // VTK header for legacy format + if(myrank==0){ + std::cout << "Writing header ... " << "\n"; + file << "# vtk DataFile Version 2.0\n"; + file << "Particle Cloud Data\n"; + file << "BINARY\n"; + file << "DATASET POLYDATA\n"; + file << "POINTS " << total_num_particles << " float\n"; + //file.flush(); // Ensure data is written to disk + //std::fflush(nullptr); // Flush all system-level buffers + + + // Seek to the end of the header + file.seekp(0, std::ios::end); + } + MPI_Barrier(MPI_COMM_WORLD); // Ensure the file header is written before continuing + + // Write particles in binary format + // Compute the starting byte position for this rank's data + + // After writing the header, seek to the end of the file to determine the header size + file.seekp(0, std::ios::end); // Move the file pointer to the end + size_t header_size = file.tellp(); // Get the current file pointer position (i.e., header size) + + size_t byte_offset = header_size + sizeof(float) * 3 * offset; + + file.seekp(byte_offset, std::ios::beg); + for (const auto& point : shell_particles) { + float x = point.x, y = point.y, z = point.z; + + SwapEnd(x); + SwapEnd(y); + SwapEnd(z); + + // Write binary data + file.write(reinterpret_cast(&x), sizeof(float)); + file.write(reinterpret_cast(&y), sizeof(float)); + file.write(reinterpret_cast(&z), sizeof(float)); + } + + MPI_Barrier(MPI_COMM_WORLD); // Ensure the file header is written before continuing + + if(myrank==0){ + file.close(); + } + + if(myrank==0) { + if (!file.good()) { + std::cerr << "Error: Writing to file " << filename << " failed." << std::endl; + } else { + std::cout << "Successfully wrote " << filename << std::endl; + } + } +} + + +Real +Nyx::advance (Real time, + Real dt, + int iteration, + int ncycle) + + // Arguments: + // time : the current simulation time + // dt : the timestep to advance (e.g., go from time to + // time + dt) + // iteration : where we are in the current AMR subcycle. Each + // level will take a number of steps to reach the + // final time of the coarser level below it. This + // counter starts at 1 + // ncycle : the number of subcycles at this level + +{ + + const std::string region_name = ncycle > 1 ? "R::Nyx::advance" : "R::Nyx::advance::STEP1"; + BL_PROFILE_REGION(region_name); + MultiFab::RegionTag amrlevel_tag("AmrLevel_Level_" + std::to_string(level)); + +#ifdef NO_HYDRO + + return advance_particles_only(time, dt, iteration, ncycle); + +#else + if (do_hydro) + { + if (Nyx::theActiveParticles().size() > 0) + { +#ifndef AGN + if (do_dm_particles) +#endif + { + return advance_hydro_plus_particles(time, dt, iteration, ncycle); + } + } + else + { + return advance_hydro(time, dt, iteration, ncycle); + } + } + else if(do_dm_particles) + return advance_particles_only(time, dt, iteration, ncycle); + else + return advance_heatcool(time, dt, iteration, ncycle); +#endif + return 0; +} + +// +// This will advance the Nyx AmrLevel. +// If no subcycling is used, this will be a full multilevel advance at +// at level 0 and a finer levels will be skipped over. +// If subcycling is used, this will check if finer levels are subcycled +// relative to this level. All levels that are not subcycled will be +// advanced in a multilevel advance with this level and be skipped over +// subsequently. If the next level is subcycled relative to this one, +// then this will only be a single level advance. +// +#ifndef NO_HYDRO +#ifdef AMREX_PARTICLES +Real +Nyx::advance_hydro_plus_particles (Real time, + Real dt, + int iteration, + int ncycle) +{ + + // A particle in cell (i) can affect cell values in (i-1) to (i+1) + int stencil_deposition_width = 1; + + // A particle in cell (i) may need information from cell values in (i-1) to (i+1) + // to update its position (typically via interpolation of the acceleration from the grid) + int stencil_interpolation_width = 1; + + // A particle that starts in cell (i + ncycle) can reach + // cell (i) in ncycle number of steps .. after "iteration" steps + // the particle has to be within (i + ncycle+1-iteration) to reach cell (i) + // in the remaining (ncycle-iteration) steps + + // *** ghost_width *** is used + // *) to set how many cells are used to hold ghost particles i.e copies of particles + // that live on (level-1) can affect the grid over all of the ncycle steps. + // We define ghost cells at the coarser level to cover all iterations so + // we can't reduce this number as iteration increases. + + int ghost_width = ncycle + stencil_deposition_width; + + // *** where_width *** is used + // *) to set how many cells the Where call in moveKickDrift tests = + // ghost_width + (1-iteration) - 1: + // the minus 1 arises because this occurs *after* the move + + int where_width = ghost_width + (1-iteration) - 1; + + // *** grav_n_grow *** is used + // *) to determine how many ghost cells we need to fill in the MultiFab from + // which the particle interpolates its acceleration + // *) to set how many cells the Where call in moveKickDrift tests = (grav.nGrow()-2). + // *) the (1-iteration) arises because the ghost particles are created on the coarser + // level which means in iteration 2 the ghost particles may have moved 1 additional cell along + + int grav_n_grow = ghost_width + (1-iteration) + (iteration-1) + + stencil_interpolation_width ; + + { + BL_PROFILE_REGION("R::Nyx::advance_hydro_plus_particles"); + BL_PROFILE("Nyx::advance_hydro_plus_particles()"); + // Sanity checks + if (!do_hydro) + amrex::Abort("In `advance_hydro_plus_particles` but `do_hydro` not true"); + + if (Nyx::theActiveParticles().size() <= 0) + amrex::Abort("In `advance_hydro_plus_particles` but no active particles"); + + const int finest_level = parent->finestLevel(); + int finest_level_to_advance; + bool nosub = !parent->subCycle(); + + if (nosub) + { + if (level > 0) + return dt; + + finest_level_to_advance = finest_level; + } + else + { + // This level was advanced by a previous multilevel advance. + if (level > 0 && ncycle == 1) + return dt; + + // Find the finest level to advance + int lev = level; + while(lev < finest_level && parent->nCycle(lev+1) == 1) + lev++; + finest_level_to_advance = lev; + + // We must setup virtual and Ghost Particles + // + // Setup the virtual particles that represent finer level particles + // + setup_virtual_particles(); + // + // Setup ghost particles for use in finer levels. Note that Ghost particles + // that will be used by this level have already been created, the + // particles being set here are only used by finer levels. + // + for(int lev = level; lev <= finest_level_to_advance && lev < finest_level; lev++) + { + get_level(lev).setup_ghost_particles(ghost_width); + } + } + + Real dt_lev; + + // + // Move current data to previous, clear current. + // Don't do this if a coarser level has done this already. + // + if (level == 0 || iteration > 1) + { + + for (int lev = level; lev <= finest_level; lev++) + { + dt_lev = parent->dtLevel(lev); + for (int k = 0; k < NUM_STATE_TYPE; k++) + { + get_level(lev).state[k].allocOldData(); + get_level(lev).state[k].swapTimeLevels(dt_lev); + } + } + } + + const Real prev_time = state[State_Type].prevTime(); + const Real cur_time = state[State_Type].curTime(); + const Real a_old = get_comoving_a(prev_time); + const Real a_new = get_comoving_a(cur_time); + + if (do_grav) { + // + // We now do a multilevel solve for old Gravity. This goes to the + // finest level regardless of subcycling behavior. Consequentially, + // If we are subcycling we skip this step on the first iteration of + // finer levels. + BL_PROFILE_VAR("solve_for_old_phi", solve_for_old_phi); + if (level == 0 || iteration > 1) + { + + MultiFab::RegionTag amrGrav_tag("Gravity_" + std::to_string(level)); + + // fix fluxes on finer grids + if (do_reflux) + { + for (int lev = level; lev < finest_level; lev++) + { + gravity->zero_phi_flux_reg(lev + 1); + } + } + + // swap grav data + for (int lev = level; lev <= finest_level; lev++) + get_level(lev).gravity->swap_time_levels(lev); + + // + // Solve for phi using the previous phi as a guess. + // + int use_previous_phi_as_guess = 1; + int ngrow_for_solve = iteration + stencil_deposition_width; + gravity->multilevel_solve_for_old_phi(level, finest_level, + ngrow_for_solve, + use_previous_phi_as_guess); + } + BL_PROFILE_VAR_STOP(solve_for_old_phi); + + { + // + // Advance Particles + // + if (Nyx::theActiveParticles().size() > 0) + { + // Advance the particle velocities to the half-time and the positions to the new time + // We use the cell-centered gravity to correctly interpolate onto particle locations + const Real a_half = 0.5 * (a_old + a_new); + + if (particle_verbose && ParallelDescriptor::IOProcessor()) + std::cout << "moveKickDrift ... updating particle positions and velocity\n"; + + MultiFab::RegionTag amrMoveKickDrift_tag("MoveKickDrift_" + std::to_string(level)); + + shell_particles.clear(); + shell_particles.shrink_to_fit(); + + std::string filename=Concatenate("lightcone_", int(100*(1/a_old-1)), 7); + std::string filename_csv = filename+".csv"; + filename=filename+".vtk"; + file_lightcone_csv = fopen(filename_csv.c_str(),"w"); + if(ParallelDescriptor::IOProcessor()) { + fprintf(file_lightcone_csv,"%s, %s, %s, %s, %s, %s\n", "x", "y", "z", "vx", "vy", "vz"); + } + + for (int lev = level; lev <= finest_level_to_advance; lev++) + { + // We need grav_n_grow grow cells to track boundary particles + const auto& ba = get_level(lev).get_new_data(State_Type).boxArray(); + const auto& dm = get_level(lev).get_new_data(State_Type).DistributionMap(); + MultiFab grav_vec_old(ba, dm, AMREX_SPACEDIM, grav_n_grow); + get_level(lev).gravity->get_old_grav_vector(lev, grav_vec_old, time); + + for (int i = 0; i < Nyx::theActiveParticles().size(); i++) { + Real radius_inner = -1.e34; + Real radius_outer = -1.e34; + Real z_old = 1/a_old-1; + if(z_old=lightcone_end_z) { + integrate_distance_given_a(a_old, 1.0, radius_outer); + integrate_distance_given_a(a_new, 1.0, radius_inner); + Print()<moveKickDrift(grav_vec_old, lev, time, dt, a_old, a_half, where_width, radius_inner, radius_outer); + } + fclose(file_lightcone_csv); + writeBinaryVTK(filename, shell_particles); + + + // Only need the coarsest virtual particles here. + if (lev == level && level < finest_level) + for (int i = 0; i < Nyx::theVirtualParticles().size(); i++) + Nyx::theVirtualParticles()[i]->moveKickDrift(grav_vec_old, lev, time, dt, a_old, a_half, where_width); + + // Miiiight need all Ghosts + for (int i = 0; i < Nyx::theGhostParticles().size(); i++) + Nyx::theGhostParticles()[i]->moveKickDrift(grav_vec_old, lev, time, dt, a_old, a_half, where_width); +#ifdef NEUTRINO_DARK_PARTICLES + for (int i = 0; i < Nyx::theActiveParticles().size() && neutrino_cfl >= 1; i++) + Nyx::theActiveParticles()[i]->Redistribute(); +#endif + } + } // if active particles + } // lsg + } // if (do_grav) + + // + // Call the hydro advance at each level to be advanced + // + BL_PROFILE_VAR("just_the_hydro", just_the_hydro); + { + + MultiFab::RegionTag amrhydro_tag("Hydro_" + std::to_string(level)); + + for (int lev = level; lev <= finest_level_to_advance; lev++) + { +#ifdef SDC + if (sdc_split > 0 && (strang_restart_from_sdc == 0)) { + get_level(lev).sdc_hydro(time, dt, a_old, a_new); + } else { + get_level(lev).strang_hydro(time, dt, a_old, a_new); + } +#else + get_level(lev).strang_hydro(time, dt, a_old, a_new); +#endif + } + } + BL_PROFILE_VAR_STOP(just_the_hydro); + + { + // + // We must reflux before doing the next gravity solve + // + if (do_reflux) + { + for (int lev = level; lev < finest_level_to_advance; lev++) + { + get_level(lev).reflux(); + } + } + + // Always average down the new state from finer to coarser. + for (int lev = finest_level_to_advance-1; lev >= level; lev--) + { + get_level(lev).average_down( State_Type); + get_level(lev).average_down(DiagEOS_Type); + } + + // + // Here we use the "old" phi from the current time step as a guess for this + // solve + // + if (do_grav) + { + for (int lev = level; lev <= finest_level_to_advance; lev++) + { + MultiFab::Copy(parent->getLevel(lev).get_new_data(PhiGrav_Type), + parent->getLevel(lev).get_old_data(PhiGrav_Type), + 0, 0, 1, 0); + } + + // Solve for new Gravity + BL_PROFILE_VAR("solve_for_new_phi", solve_for_new_phi); + int use_previous_phi_as_guess = 1; + if (finest_level_to_advance > level) + { + MultiFab::RegionTag amrGrav_tag("Gravity_" + std::to_string(level)); + // The particle may be as many as "iteration" ghost cells out + int ngrow_for_solve = iteration + stencil_deposition_width; + gravity->multilevel_solve_for_new_phi(level, finest_level_to_advance, + ngrow_for_solve, + use_previous_phi_as_guess); + } + else + { + MultiFab::RegionTag amrGrav_tag("Gravity_" + std::to_string(level)); + int fill_interior = 0; + gravity->solve_for_new_phi(level,get_new_data(PhiGrav_Type), + gravity->get_grad_phi_curr(level), + fill_interior, grav_n_grow); + } + BL_PROFILE_VAR_STOP(solve_for_new_phi); + + // Reflux + if (do_reflux) + { + MultiFab::RegionTag amrGrav_tag("Gravity_" + std::to_string(level)); + for (int lev = level; lev <= finest_level_to_advance; lev++) + { + gravity->add_to_fluxes(lev, iteration, ncycle); + } + } + + // + // Now do corrector part of source term update + // + for (int lev = level; lev <= finest_level_to_advance; lev++) + { + MultiFab::RegionTag amrGrav_tag("Gravity_" + std::to_string(lev)); + + // Now do corrector part of source term update + correct_gsrc(lev,time,prev_time,cur_time,dt); + + MultiFab& S_new = get_level(lev).get_new_data(State_Type); + MultiFab& D_new = get_level(lev).get_new_data(DiagEOS_Type); + + // First reset internal energy before call to compute_temp + MultiFab reset_e_src(S_new.boxArray(), S_new.DistributionMap(), 1, NUM_GROW); + reset_e_src.setVal(0.0); + get_level(lev).reset_internal_energy(S_new,D_new,reset_e_src); + + get_level(lev).compute_new_temp(S_new,D_new); + } + + // Must average down again after doing the gravity correction; + // always average down from finer to coarser. + // Here we average down both the new state and phi and gravity. + for (int lev = finest_level_to_advance-1; lev >= level; lev--) + get_level(lev).average_down(); + + if (Nyx::theActiveParticles().size() > 0) + { + // Advance the particle velocities by dt/2 to the new time. We use the + // cell-centered gravity to correctly interpolate onto particle + // locations. + MultiFab::RegionTag amrMoveKickDrift_tag("MoveKick_" + std::to_string(level)); + const Real a_half = 0.5 * (a_old + a_new); + + if (particle_verbose && ParallelDescriptor::IOProcessor()) + std::cout << "moveKick ... updating velocity only\n"; + + for (int lev = level; lev <= finest_level_to_advance; lev++) + { + const auto& ba = get_level(lev).get_new_data(State_Type).boxArray(); + const auto& dm = get_level(lev).get_new_data(State_Type).DistributionMap(); + MultiFab grav_vec_new(ba, dm, AMREX_SPACEDIM, grav_n_grow); + get_level(lev).gravity->get_new_grav_vector(lev, grav_vec_new, cur_time); + + for (int i = 0; i < Nyx::theActiveParticles().size(); i++) + Nyx::theActiveParticles()[i]->moveKick(grav_vec_new, lev, time, dt, a_new, a_half); + + // Virtual particles will be recreated, so we need not kick them. + + // Ghost particles need to be kicked except during the final iteration. + if (iteration != ncycle) + for (int i = 0; i < Nyx::theGhostParticles().size(); i++) + Nyx::theGhostParticles()[i]->moveKick(grav_vec_new, lev, time, dt, a_new, a_half); + } + } + } // do_grav + + // + // Synchronize Energies + // + for (int lev = level; lev <= finest_level_to_advance; lev++) + { + MultiFab::RegionTag amrReset_tag("Reset_" + std::to_string(lev)); + MultiFab& S_new = get_level(lev).get_new_data(State_Type); + MultiFab& D_new = get_level(lev).get_new_data(DiagEOS_Type); + + get_level(lev).reset_internal_energy_nostore(S_new,D_new); + + } + } + } + // BL_PROFILE_REGION_STOP("R::Nyx::advance_hydro_plus_particles"); + + // Redistribution happens in post_timestep + return dt; +} +#endif + +Real +Nyx::advance_heatcool (Real time, + Real dt, + int iteration, + int ncycle) +{ + BL_PROFILE("Nyx::advance_heatcool()"); +#ifdef HEATCOOL + amrex::Print()<<"Using advance_heatcool since hydro and dm_particles are both off"<finestLevel(); + if (do_reflux && level < finest_level) + gravity->zero_phi_flux_reg(level + 1); + gravity->swap_time_levels(level); + } + + if (do_forcing) + forcing->evolve(dt); + + // Call the hydro advance itself + BL_PROFILE_VAR("just_the_hydro", just_the_hydro); +#ifdef SDC + if (sdc_split > 0) + { + sdc_hydro(time, dt, a_old, a_new); + } else { + strang_hydro(time, dt, a_old, a_new); + } +#else + strang_hydro(time, dt, a_old, a_new); +#endif + BL_PROFILE_VAR_STOP(just_the_hydro); + + if (do_grav) + { + if (verbose && ParallelDescriptor::IOProcessor()) + std::cout << "\n... new-time level solve at level " << level << '\n'; + + // + // Solve for new phi + // Here we use the "old" phi from the current time step as a guess for this solve + // + MultiFab::Copy(get_new_data(PhiGrav_Type),get_old_data(PhiGrav_Type),0,0,1,0); + int fill_interior = 0; + gravity->solve_for_new_phi(level,get_new_data(PhiGrav_Type), + gravity->get_grad_phi_curr(level),fill_interior); + if (do_reflux) + gravity->add_to_fluxes(level,iteration,ncycle); + + // Now do corrector part of source term update + correct_gsrc(level,time,prev_time,cur_time,dt); + } + + MultiFab& S_new = get_new_data(State_Type); + MultiFab& D_new = get_new_data(DiagEOS_Type); + + MultiFab reset_e_src(S_new.boxArray(), S_new.DistributionMap(), 1, NUM_GROW); + reset_e_src.setVal(0.0); + + // First reset internal energy before call to compute_temp + reset_internal_energy(S_new,D_new,reset_e_src); + compute_new_temp(S_new,D_new); + + return dt; +} +#endif diff --git a/Exec/ParticleFilterTest/Read_BinarySimple.py b/Exec/ParticleFilterTest/Read_BinarySimple.py new file mode 100644 index 00000000..2c42a8cc --- /dev/null +++ b/Exec/ParticleFilterTest/Read_BinarySimple.py @@ -0,0 +1,52 @@ +import numpy as np + +import matplotlib.pyplot as plt +from mpl_toolkits.mplot3d import Axes3D + +def read_binary_points(filename): + with open(filename, 'rb') as file: + # Read the binary data directly, assuming big-endian float32 + data = np.fromfile(file, dtype='>f4') # '>f4' indicates big-endian float32 + + # Reshape the data to Nx3 (x, y, z for each point) + if len(data) % 6 != 0: + raise ValueError("Data size is not a multiple of 6. The file might be corrupted or improperly formatted.") + + points = data.reshape((-1, 6)) + + num_points = len(data) // 6 + print(f"Total number of points: {num_points}") + + return points, num_points + +def plot_points(x, y, z): + fig = plt.figure(figsize=(10, 7)) + ax = fig.add_subplot(111, projection='3d') + + # Scatter plot for points + ax.scatter(x, y, z, c='blue', marker='o', s=1, alpha=0.8) + + # Set labels for the axes + ax.set_xlabel('X Coordinate') + ax.set_ylabel('Y Coordinate') + ax.set_zlabel('Z Coordinate') + + ax.set_title('Lightcone shell') + plt.show() + + +# Example usage +filename = "lightcone_0000010.bin" +points, num_points = read_binary_points(filename) + +# Display the first few points +x = np.zeros(num_points); +y = np.zeros(num_points); +z = np.zeros(num_points); +for i, point in enumerate(points[:num_points]): # Adjust the slice as needed + x[i], y[i], z[i], vx, vy, vz = point + print(f"{x[i]:.15g}, {y[i]:.15g}, {z[i]:.15g}, {vx:.15g}, {vy:.15g}, {vz:.15g}") + +plot_points(x, y, z) + + diff --git a/Exec/ParticleFilterTest/temp.H b/Exec/ParticleFilterTest/temp.H index 91cac962..ec9d4585 100644 --- a/Exec/ParticleFilterTest/temp.H +++ b/Exec/ParticleFilterTest/temp.H @@ -1 +1,19 @@ +class myParticle { + public: + double x, y, z, vx, vy, vz; +}; extern FILE *file_lightcone_csv; +extern std::vector shell_particles; + +template +inline void SwapEnd(T& var) +{ + char* varArray = reinterpret_cast(&var); + for(long i = 0; i < static_cast(sizeof(var)/2); i++) + std::swap(varArray[sizeof(var) - 1 - i],varArray[i]); +} + +void writeBinaryVTK(const std::string& filename, + const std::vector& shell_particles); +void writeBinarySimple(const std::string& filename, + const std::vector& shell_particles); From 6b3bdccdcdfda1ffbcfa2e0450a6abd7f220e743 Mon Sep 17 00:00:00 2001 From: Mahesh Natarajan Date: Wed, 4 Dec 2024 09:52:06 -0800 Subject: [PATCH 3/4] Writing binary files for lightcones --- .../DarkMatterParticleContainer.cpp | 127 +++++++------- Exec/ParticleFilterTest/LightConeParticle.H | 10 ++ Exec/ParticleFilterTest/LightConeParticle.cpp | 156 +++++++++++++++++ Exec/ParticleFilterTest/Make.package | 3 +- Exec/ParticleFilterTest/Nyx_advance.cpp | 161 +----------------- Exec/ParticleFilterTest/temp.H | 19 --- 6 files changed, 234 insertions(+), 242 deletions(-) create mode 100644 Exec/ParticleFilterTest/LightConeParticle.H create mode 100644 Exec/ParticleFilterTest/LightConeParticle.cpp delete mode 100644 Exec/ParticleFilterTest/temp.H diff --git a/Exec/ParticleFilterTest/DarkMatterParticleContainer.cpp b/Exec/ParticleFilterTest/DarkMatterParticleContainer.cpp index d6f0dcb2..c3979c3e 100644 --- a/Exec/ParticleFilterTest/DarkMatterParticleContainer.cpp +++ b/Exec/ParticleFilterTest/DarkMatterParticleContainer.cpp @@ -7,7 +7,7 @@ using namespace amrex; #include -#include +#include /// These are helper functions used when initializing from a morton-ordered /// binary particle file. @@ -579,6 +579,7 @@ DarkMatterParticleContainer::moveKickDrift (amrex::MultiFab& acceleration, Gpu::streamSynchronize(); } } + delete ShellPC; } void @@ -720,69 +721,69 @@ void store_dm_particle_single (amrex::ParticleContainer<1+AMREX_SPACEDIM, 0>::Su amrex::Real dt_a_cur_inv = dt * a_cur_inv; const GpuArray center({AMREX_D_DECL((phi[0]-plo[0])*0.5,(phi[1]-plo[1])*0.5,(phi[2]-plo[2])*0.5)}); - if (do_move == 1) - { - p2.rdata(0)=p.rdata(0); - bool result=false; - Real lenx = phi[0]-plo[0]; - Real leny = phi[1]-plo[1]; - Real lenz = phi[2]-plo[2]; - int maxind[3]; - maxind[0] = floor((radius_outer+lenx*0.5)/lenx); - maxind[1] = floor((radius_outer+leny*0.5)/leny); - maxind[2] = floor((radius_outer+lenz*0.5)/lenz); - - Real xlen, ylen, zlen; - //printf("Value is %d\n", maxind); - int local_index=-1; - for(int idir=-maxind[0];idir<=maxind[0];idir++) - for(int jdir=-maxind[1];jdir<=maxind[1];jdir++) - for(int kdir=-maxind[2];kdir<=maxind[2];kdir++) - { - xlen = p.pos(0)+(idir)*(phi[0]-plo[0]) - center[0]; - ylen = p.pos(1)+(jdir)*(phi[1]-plo[1]) - center[1]; - zlen = p.pos(2)+(kdir)*(phi[2]-plo[2]) - center[2]; - Real mag = sqrt(xlen*xlen+ylen*ylen+zlen*zlen); - result=result? true : (mag>radius_inner && magradius_inner && magradius_inner && magradius_inner && mag shell_particles; + +void writeBinaryVTK(const std::string& filename, + const std::vector& shell_particles); +void writeBinarySimple(const std::string& filename, + const std::vector& shell_particles); diff --git a/Exec/ParticleFilterTest/LightConeParticle.cpp b/Exec/ParticleFilterTest/LightConeParticle.cpp new file mode 100644 index 00000000..4ae779e5 --- /dev/null +++ b/Exec/ParticleFilterTest/LightConeParticle.cpp @@ -0,0 +1,156 @@ +#include +#include +#include +#include +#include + +#include + +void SwapEnd(float& val) { + // Swap endianess if necessary + char* bytes = reinterpret_cast(&val); + std::swap(bytes[0], bytes[3]); + std::swap(bytes[1], bytes[2]); +} + +void writeBinaryVTK(const std::string& filename, const std::vector& particles) { + int rank, size; + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + MPI_Comm_size(MPI_COMM_WORLD, &size); + + size_t local_num_particles = particles.size(); + size_t total_num_particles = 0; + + // Get total particles across all ranks + MPI_Reduce(&local_num_particles, &total_num_particles, 1, MPI_UNSIGNED_LONG, MPI_SUM, 0, MPI_COMM_WORLD); + + // Compute offset for this rank's data + size_t offset = 0; + MPI_Exscan(&local_num_particles, &offset, 1, MPI_UNSIGNED_LONG, MPI_SUM, MPI_COMM_WORLD); + + // Header handling + size_t header_size = 0; + + if (rank == 0) { + std::ofstream file(filename, std::ios::binary); + if (!file) { + std::cerr << "Error: Could not open file " << filename << "\n"; + MPI_Abort(MPI_COMM_WORLD, 1); + } + + // Write the header + file << "# vtk DataFile Version 2.0\n"; + file << "Particle Cloud Data\n"; + file << "BINARY\n"; + file << "DATASET POLYDATA\n"; + file << "POINTS " << total_num_particles << " float\n"; + + // Determine header size + file.seekp(0, std::ios::end); + header_size = file.tellp(); + file.close(); + } + + // Broadcast the header size to all ranks + MPI_Bcast(&header_size, 1, MPI_UNSIGNED_LONG, 0, MPI_COMM_WORLD); + + // Use MPI collective I/O for binary data + MPI_File mpi_file; + MPI_File_open(MPI_COMM_WORLD, filename.c_str(), + MPI_MODE_WRONLY | MPI_MODE_APPEND, MPI_INFO_NULL, &mpi_file); + + // Compute byte offset for this rank + size_t byte_offset = header_size + sizeof(float) * 3 * offset; + + // Prepare local data + std::vector data(3 * local_num_particles); + for (size_t i = 0; i < local_num_particles; ++i) { + data[3 * i] = particles[i].x; + data[3 * i + 1] = particles[i].y; + data[3 * i + 2] = particles[i].z; + + // Convert to big-endian if needed + SwapEnd(data[3 * i]); + SwapEnd(data[3 * i + 1]); + SwapEnd(data[3 * i + 2]); + } + + // Write particle data collectively + MPI_File_write_at_all(mpi_file, byte_offset, data.data(), data.size(), MPI_FLOAT, MPI_STATUS_IGNORE); + + MPI_File_close(&mpi_file); + + if (rank == 0) { + std::cout << "Successfully wrote VTK file: " << filename << "\n"; + } +} + +void writeBinarySimple(const std::string& filename, const std::vector& particles) { + int rank, size; + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + MPI_Comm_size(MPI_COMM_WORLD, &size); + + size_t local_num_particles = particles.size(); + size_t total_num_particles = 0; + + // Get total particles across all ranks + MPI_Reduce(&local_num_particles, &total_num_particles, 1, MPI_UNSIGNED_LONG, MPI_SUM, 0, MPI_COMM_WORLD); + + // Compute offset for this rank's data + size_t offset = 0; + MPI_Exscan(&local_num_particles, &offset, 1, MPI_UNSIGNED_LONG, MPI_SUM, MPI_COMM_WORLD); + + // Header handling + size_t header_size = 0; + + if (rank == 0) { + std::ofstream file(filename, std::ios::binary); + if (!file) { + std::cerr << "Error: Could not open file " << filename << "\n"; + MPI_Abort(MPI_COMM_WORLD, 1); + } + file.seekp(0, std::ios::end); + header_size = file.tellp(); + file.close(); + } + + // Broadcast the header size to all ranks + MPI_Bcast(&header_size, 1, MPI_UNSIGNED_LONG, 0, MPI_COMM_WORLD); + + // Use MPI collective I/O for binary data + MPI_File mpi_file; + MPI_File_open(MPI_COMM_WORLD, filename.c_str(), + MPI_MODE_WRONLY | MPI_MODE_APPEND, MPI_INFO_NULL, &mpi_file); + + // Compute byte offset for this rank + size_t byte_offset = header_size + sizeof(float) * 6 * offset; + + // Prepare local data + std::vector data(6 * local_num_particles); + for (size_t i = 0; i < local_num_particles; ++i) { + data[6 * i] = particles[i].x; + data[6 * i + 1] = particles[i].y; + data[6 * i + 2] = particles[i].z; + data[6 * i + 3] = particles[i].vx; + data[6 * i + 4] = particles[i].vy; + data[6 * i + 5] = particles[i].vz; + + // Convert to big-endian if needed + SwapEnd(data[6 * i]); + SwapEnd(data[6 * i + 1]); + SwapEnd(data[6 * i + 2]); + SwapEnd(data[6 * i + 3]); + SwapEnd(data[6 * i + 4]); + SwapEnd(data[6 * i + 5]); + } + + // Write particle data collectively + MPI_File_write_at_all(mpi_file, byte_offset, data.data(), data.size(), MPI_FLOAT, MPI_STATUS_IGNORE); + + MPI_File_close(&mpi_file); + + if (rank == 0) { + std::cout << "Successfully wrote VTK file: " << filename << "\n"; + } +} + diff --git a/Exec/ParticleFilterTest/Make.package b/Exec/ParticleFilterTest/Make.package index e784fe42..7ac175cd 100644 --- a/Exec/ParticleFilterTest/Make.package +++ b/Exec/ParticleFilterTest/Make.package @@ -1,3 +1,4 @@ CEXE_sources += Prob.cpp CEXE_sources += Prob_error.cpp -CEXE_headers += temp.H +CEXE_sources += LightConeParticle.cpp +CEXE_headers += LightConeParticle.H diff --git a/Exec/ParticleFilterTest/Nyx_advance.cpp b/Exec/ParticleFilterTest/Nyx_advance.cpp index dbd141e7..9c820350 100644 --- a/Exec/ParticleFilterTest/Nyx_advance.cpp +++ b/Exec/ParticleFilterTest/Nyx_advance.cpp @@ -3,164 +3,13 @@ #include #include #include -#include +#include using namespace amrex; using std::string; -FILE *file_lightcone_csv; -std::vector shell_particles; - - -void SwapEnd(float& val) { - // Swap endianess if necessary - char* bytes = reinterpret_cast(&val); - std::swap(bytes[0], bytes[3]); - std::swap(bytes[1], bytes[2]); -} - -void writeBinaryVTK(const std::string& filename, const std::vector& particles) { - int rank, size; - MPI_Comm_rank(MPI_COMM_WORLD, &rank); - MPI_Comm_size(MPI_COMM_WORLD, &size); - - size_t local_num_particles = particles.size(); - size_t total_num_particles = 0; - - // Get total particles across all ranks - MPI_Reduce(&local_num_particles, &total_num_particles, 1, MPI_UNSIGNED_LONG, MPI_SUM, 0, MPI_COMM_WORLD); - - // Compute offset for this rank's data - size_t offset = 0; - MPI_Exscan(&local_num_particles, &offset, 1, MPI_UNSIGNED_LONG, MPI_SUM, MPI_COMM_WORLD); - - // Header handling - size_t header_size = 0; - - if (rank == 0) { - std::ofstream file(filename, std::ios::binary); - if (!file) { - std::cerr << "Error: Could not open file " << filename << "\n"; - MPI_Abort(MPI_COMM_WORLD, 1); - } - - // Write the header - file << "# vtk DataFile Version 2.0\n"; - file << "Particle Cloud Data\n"; - file << "BINARY\n"; - file << "DATASET POLYDATA\n"; - file << "POINTS " << total_num_particles << " float\n"; - - // Determine header size - file.seekp(0, std::ios::end); - header_size = file.tellp(); - file.close(); - } - - // Broadcast the header size to all ranks - MPI_Bcast(&header_size, 1, MPI_UNSIGNED_LONG, 0, MPI_COMM_WORLD); - - // Use MPI collective I/O for binary data - MPI_File mpi_file; - MPI_File_open(MPI_COMM_WORLD, filename.c_str(), - MPI_MODE_WRONLY | MPI_MODE_APPEND, MPI_INFO_NULL, &mpi_file); - - // Compute byte offset for this rank - size_t byte_offset = header_size + sizeof(float) * 3 * offset; - - // Prepare local data - std::vector data(3 * local_num_particles); - for (size_t i = 0; i < local_num_particles; ++i) { - data[3 * i] = particles[i].x; - data[3 * i + 1] = particles[i].y; - data[3 * i + 2] = particles[i].z; - - // Convert to big-endian if needed - SwapEnd(data[3 * i]); - SwapEnd(data[3 * i + 1]); - SwapEnd(data[3 * i + 2]); - } - - // Write particle data collectively - MPI_File_write_at_all(mpi_file, byte_offset, data.data(), data.size(), MPI_FLOAT, MPI_STATUS_IGNORE); - - MPI_File_close(&mpi_file); - - if (rank == 0) { - std::cout << "Successfully wrote VTK file: " << filename << "\n"; - } -} - - -void writeBinarySimple(const std::string& filename, const std::vector& particles) { - int rank, size; - MPI_Comm_rank(MPI_COMM_WORLD, &rank); - MPI_Comm_size(MPI_COMM_WORLD, &size); - - size_t local_num_particles = particles.size(); - size_t total_num_particles = 0; - - // Get total particles across all ranks - MPI_Reduce(&local_num_particles, &total_num_particles, 1, MPI_UNSIGNED_LONG, MPI_SUM, 0, MPI_COMM_WORLD); - - // Compute offset for this rank's data - size_t offset = 0; - MPI_Exscan(&local_num_particles, &offset, 1, MPI_UNSIGNED_LONG, MPI_SUM, MPI_COMM_WORLD); - - // Header handling - size_t header_size = 0; - - if (rank == 0) { - std::ofstream file(filename, std::ios::binary); - if (!file) { - std::cerr << "Error: Could not open file " << filename << "\n"; - MPI_Abort(MPI_COMM_WORLD, 1); - } - file.seekp(0, std::ios::end); - header_size = file.tellp(); - file.close(); - } - - // Broadcast the header size to all ranks - MPI_Bcast(&header_size, 1, MPI_UNSIGNED_LONG, 0, MPI_COMM_WORLD); - - // Use MPI collective I/O for binary data - MPI_File mpi_file; - MPI_File_open(MPI_COMM_WORLD, filename.c_str(), - MPI_MODE_WRONLY | MPI_MODE_APPEND, MPI_INFO_NULL, &mpi_file); - - // Compute byte offset for this rank - size_t byte_offset = header_size + sizeof(float) * 6 * offset; - - // Prepare local data - std::vector data(6 * local_num_particles); - for (size_t i = 0; i < local_num_particles; ++i) { - data[6 * i] = particles[i].x; - data[6 * i + 1] = particles[i].y; - data[6 * i + 2] = particles[i].z; - data[6 * i + 3] = particles[i].vx; - data[6 * i + 4] = particles[i].vy; - data[6 * i + 5] = particles[i].vz; - - // Convert to big-endian if needed - SwapEnd(data[6 * i]); - SwapEnd(data[6 * i + 1]); - SwapEnd(data[6 * i + 2]); - SwapEnd(data[6 * i + 3]); - SwapEnd(data[6 * i + 4]); - SwapEnd(data[6 * i + 5]); - } - - // Write particle data collectively - MPI_File_write_at_all(mpi_file, byte_offset, data.data(), data.size(), MPI_FLOAT, MPI_STATUS_IGNORE); - - MPI_File_close(&mpi_file); - - if (rank == 0) { - std::cout << "Successfully wrote VTK file: " << filename << "\n"; - } -} +std::vector shell_particles; Real Nyx::advance (Real time, @@ -398,13 +247,8 @@ Nyx::advance_hydro_plus_particles (Real time, shell_particles.shrink_to_fit(); std::string filename=Concatenate("lightcone_", int(100*(1/a_old-1)), 7); - //std::string filename_csv = filename+".csv"; std::string filename_vtk=filename+".vtk"; std::string filename_bin=filename+".bin"; - //file_lightcone_csv = fopen(filename_csv.c_str(),"w"); - //if(ParallelDescriptor::IOProcessor()) { - // fprintf(file_lightcone_csv,"%s, %s, %s, %s, %s, %s\n", "x", "y", "z", "vx", "vy", "vz"); - //} for (int lev = level; lev <= finest_level_to_advance; lev++) { @@ -426,7 +270,6 @@ Nyx::advance_hydro_plus_particles (Real time, } Nyx::theActiveParticles()[i]->moveKickDrift(grav_vec_old, lev, time, dt, a_old, a_half, where_width, radius_inner, radius_outer); } - //fclose(file_lightcone_csv); writeBinaryVTK(filename_vtk, shell_particles); writeBinarySimple(filename_bin, shell_particles); diff --git a/Exec/ParticleFilterTest/temp.H b/Exec/ParticleFilterTest/temp.H deleted file mode 100644 index ec9d4585..00000000 --- a/Exec/ParticleFilterTest/temp.H +++ /dev/null @@ -1,19 +0,0 @@ -class myParticle { - public: - double x, y, z, vx, vy, vz; -}; -extern FILE *file_lightcone_csv; -extern std::vector shell_particles; - -template -inline void SwapEnd(T& var) -{ - char* varArray = reinterpret_cast(&var); - for(long i = 0; i < static_cast(sizeof(var)/2); i++) - std::swap(varArray[sizeof(var) - 1 - i],varArray[i]); -} - -void writeBinaryVTK(const std::string& filename, - const std::vector& shell_particles); -void writeBinarySimple(const std::string& filename, - const std::vector& shell_particles); From 2e5144adb0e79368edd0c39797b016415c70a935 Mon Sep 17 00:00:00 2001 From: Mahesh Natarajan Date: Wed, 4 Dec 2024 09:55:11 -0800 Subject: [PATCH 4/4] Removing unwanted file --- .../Nyx_advance_TryMPIButFail.cpp | 719 ------------------ 1 file changed, 719 deletions(-) delete mode 100644 Exec/ParticleFilterTest/Nyx_advance_TryMPIButFail.cpp diff --git a/Exec/ParticleFilterTest/Nyx_advance_TryMPIButFail.cpp b/Exec/ParticleFilterTest/Nyx_advance_TryMPIButFail.cpp deleted file mode 100644 index 2f4da350..00000000 --- a/Exec/ParticleFilterTest/Nyx_advance_TryMPIButFail.cpp +++ /dev/null @@ -1,719 +0,0 @@ - -#include -#include -#include -#include -#include - -using namespace amrex; - -using std::string; - -FILE *file_lightcone_csv; -std::vector shell_particles; - -void writeBinaryVTK(const std::string& filename, - const std::vector& shell_particles) -{ - - // Calculate the number of particles and total particles across all processes - size_t local_num_particles = shell_particles.size(); - size_t total_num_particles = 0; - MPI_Reduce(&local_num_particles, &total_num_particles, 1, MPI_UNSIGNED_LONG, MPI_SUM, 0, MPI_COMM_WORLD); - - // Compute offset for each rank - size_t offset = 0; - MPI_Exscan(&local_num_particles, &offset, 1, MPI_UNSIGNED_LONG, MPI_SUM, MPI_COMM_WORLD); - - //std::cout << "I am rank " << rank << " " << total_num_particles << " " << offset << "\n"; - //exit(0); - - std::ofstream file(filename, std::ios::binary); - - if (!file) { - std::cerr << "Error: Could not open file " << filename << " for writing." << std::endl; - return; - } - - if (!file.is_open()) { - std::cerr << "Error: Unable to open file." << std::endl; - MPI_Abort(MPI_COMM_WORLD, 1); // Abort if the file cannot be opened - } - int myrank=amrex::ParallelDescriptor::MyProc(); - - MPI_Barrier(MPI_COMM_WORLD); // Ensure the file header is written before continuing - // VTK header for legacy format - if(myrank==0){ - std::cout << "Writing header ... " << "\n"; - file << "# vtk DataFile Version 2.0\n"; - file << "Particle Cloud Data\n"; - file << "BINARY\n"; - file << "DATASET POLYDATA\n"; - file << "POINTS " << total_num_particles << " float\n"; - //file.flush(); // Ensure data is written to disk - //std::fflush(nullptr); // Flush all system-level buffers - - - // Seek to the end of the header - file.seekp(0, std::ios::end); - } - MPI_Barrier(MPI_COMM_WORLD); // Ensure the file header is written before continuing - - // Write particles in binary format - // Compute the starting byte position for this rank's data - - // After writing the header, seek to the end of the file to determine the header size - file.seekp(0, std::ios::end); // Move the file pointer to the end - size_t header_size = file.tellp(); // Get the current file pointer position (i.e., header size) - - size_t byte_offset = header_size + sizeof(float) * 3 * offset; - - file.seekp(byte_offset, std::ios::beg); - for (const auto& point : shell_particles) { - float x = point.x, y = point.y, z = point.z; - - SwapEnd(x); - SwapEnd(y); - SwapEnd(z); - - // Write binary data - file.write(reinterpret_cast(&x), sizeof(float)); - file.write(reinterpret_cast(&y), sizeof(float)); - file.write(reinterpret_cast(&z), sizeof(float)); - } - - MPI_Barrier(MPI_COMM_WORLD); // Ensure the file header is written before continuing - - if(myrank==0){ - file.close(); - } - - if(myrank==0) { - if (!file.good()) { - std::cerr << "Error: Writing to file " << filename << " failed." << std::endl; - } else { - std::cout << "Successfully wrote " << filename << std::endl; - } - } -} - - -Real -Nyx::advance (Real time, - Real dt, - int iteration, - int ncycle) - - // Arguments: - // time : the current simulation time - // dt : the timestep to advance (e.g., go from time to - // time + dt) - // iteration : where we are in the current AMR subcycle. Each - // level will take a number of steps to reach the - // final time of the coarser level below it. This - // counter starts at 1 - // ncycle : the number of subcycles at this level - -{ - - const std::string region_name = ncycle > 1 ? "R::Nyx::advance" : "R::Nyx::advance::STEP1"; - BL_PROFILE_REGION(region_name); - MultiFab::RegionTag amrlevel_tag("AmrLevel_Level_" + std::to_string(level)); - -#ifdef NO_HYDRO - - return advance_particles_only(time, dt, iteration, ncycle); - -#else - if (do_hydro) - { - if (Nyx::theActiveParticles().size() > 0) - { -#ifndef AGN - if (do_dm_particles) -#endif - { - return advance_hydro_plus_particles(time, dt, iteration, ncycle); - } - } - else - { - return advance_hydro(time, dt, iteration, ncycle); - } - } - else if(do_dm_particles) - return advance_particles_only(time, dt, iteration, ncycle); - else - return advance_heatcool(time, dt, iteration, ncycle); -#endif - return 0; -} - -// -// This will advance the Nyx AmrLevel. -// If no subcycling is used, this will be a full multilevel advance at -// at level 0 and a finer levels will be skipped over. -// If subcycling is used, this will check if finer levels are subcycled -// relative to this level. All levels that are not subcycled will be -// advanced in a multilevel advance with this level and be skipped over -// subsequently. If the next level is subcycled relative to this one, -// then this will only be a single level advance. -// -#ifndef NO_HYDRO -#ifdef AMREX_PARTICLES -Real -Nyx::advance_hydro_plus_particles (Real time, - Real dt, - int iteration, - int ncycle) -{ - - // A particle in cell (i) can affect cell values in (i-1) to (i+1) - int stencil_deposition_width = 1; - - // A particle in cell (i) may need information from cell values in (i-1) to (i+1) - // to update its position (typically via interpolation of the acceleration from the grid) - int stencil_interpolation_width = 1; - - // A particle that starts in cell (i + ncycle) can reach - // cell (i) in ncycle number of steps .. after "iteration" steps - // the particle has to be within (i + ncycle+1-iteration) to reach cell (i) - // in the remaining (ncycle-iteration) steps - - // *** ghost_width *** is used - // *) to set how many cells are used to hold ghost particles i.e copies of particles - // that live on (level-1) can affect the grid over all of the ncycle steps. - // We define ghost cells at the coarser level to cover all iterations so - // we can't reduce this number as iteration increases. - - int ghost_width = ncycle + stencil_deposition_width; - - // *** where_width *** is used - // *) to set how many cells the Where call in moveKickDrift tests = - // ghost_width + (1-iteration) - 1: - // the minus 1 arises because this occurs *after* the move - - int where_width = ghost_width + (1-iteration) - 1; - - // *** grav_n_grow *** is used - // *) to determine how many ghost cells we need to fill in the MultiFab from - // which the particle interpolates its acceleration - // *) to set how many cells the Where call in moveKickDrift tests = (grav.nGrow()-2). - // *) the (1-iteration) arises because the ghost particles are created on the coarser - // level which means in iteration 2 the ghost particles may have moved 1 additional cell along - - int grav_n_grow = ghost_width + (1-iteration) + (iteration-1) + - stencil_interpolation_width ; - - { - BL_PROFILE_REGION("R::Nyx::advance_hydro_plus_particles"); - BL_PROFILE("Nyx::advance_hydro_plus_particles()"); - // Sanity checks - if (!do_hydro) - amrex::Abort("In `advance_hydro_plus_particles` but `do_hydro` not true"); - - if (Nyx::theActiveParticles().size() <= 0) - amrex::Abort("In `advance_hydro_plus_particles` but no active particles"); - - const int finest_level = parent->finestLevel(); - int finest_level_to_advance; - bool nosub = !parent->subCycle(); - - if (nosub) - { - if (level > 0) - return dt; - - finest_level_to_advance = finest_level; - } - else - { - // This level was advanced by a previous multilevel advance. - if (level > 0 && ncycle == 1) - return dt; - - // Find the finest level to advance - int lev = level; - while(lev < finest_level && parent->nCycle(lev+1) == 1) - lev++; - finest_level_to_advance = lev; - - // We must setup virtual and Ghost Particles - // - // Setup the virtual particles that represent finer level particles - // - setup_virtual_particles(); - // - // Setup ghost particles for use in finer levels. Note that Ghost particles - // that will be used by this level have already been created, the - // particles being set here are only used by finer levels. - // - for(int lev = level; lev <= finest_level_to_advance && lev < finest_level; lev++) - { - get_level(lev).setup_ghost_particles(ghost_width); - } - } - - Real dt_lev; - - // - // Move current data to previous, clear current. - // Don't do this if a coarser level has done this already. - // - if (level == 0 || iteration > 1) - { - - for (int lev = level; lev <= finest_level; lev++) - { - dt_lev = parent->dtLevel(lev); - for (int k = 0; k < NUM_STATE_TYPE; k++) - { - get_level(lev).state[k].allocOldData(); - get_level(lev).state[k].swapTimeLevels(dt_lev); - } - } - } - - const Real prev_time = state[State_Type].prevTime(); - const Real cur_time = state[State_Type].curTime(); - const Real a_old = get_comoving_a(prev_time); - const Real a_new = get_comoving_a(cur_time); - - if (do_grav) { - // - // We now do a multilevel solve for old Gravity. This goes to the - // finest level regardless of subcycling behavior. Consequentially, - // If we are subcycling we skip this step on the first iteration of - // finer levels. - BL_PROFILE_VAR("solve_for_old_phi", solve_for_old_phi); - if (level == 0 || iteration > 1) - { - - MultiFab::RegionTag amrGrav_tag("Gravity_" + std::to_string(level)); - - // fix fluxes on finer grids - if (do_reflux) - { - for (int lev = level; lev < finest_level; lev++) - { - gravity->zero_phi_flux_reg(lev + 1); - } - } - - // swap grav data - for (int lev = level; lev <= finest_level; lev++) - get_level(lev).gravity->swap_time_levels(lev); - - // - // Solve for phi using the previous phi as a guess. - // - int use_previous_phi_as_guess = 1; - int ngrow_for_solve = iteration + stencil_deposition_width; - gravity->multilevel_solve_for_old_phi(level, finest_level, - ngrow_for_solve, - use_previous_phi_as_guess); - } - BL_PROFILE_VAR_STOP(solve_for_old_phi); - - { - // - // Advance Particles - // - if (Nyx::theActiveParticles().size() > 0) - { - // Advance the particle velocities to the half-time and the positions to the new time - // We use the cell-centered gravity to correctly interpolate onto particle locations - const Real a_half = 0.5 * (a_old + a_new); - - if (particle_verbose && ParallelDescriptor::IOProcessor()) - std::cout << "moveKickDrift ... updating particle positions and velocity\n"; - - MultiFab::RegionTag amrMoveKickDrift_tag("MoveKickDrift_" + std::to_string(level)); - - shell_particles.clear(); - shell_particles.shrink_to_fit(); - - std::string filename=Concatenate("lightcone_", int(100*(1/a_old-1)), 7); - std::string filename_csv = filename+".csv"; - filename=filename+".vtk"; - file_lightcone_csv = fopen(filename_csv.c_str(),"w"); - if(ParallelDescriptor::IOProcessor()) { - fprintf(file_lightcone_csv,"%s, %s, %s, %s, %s, %s\n", "x", "y", "z", "vx", "vy", "vz"); - } - - for (int lev = level; lev <= finest_level_to_advance; lev++) - { - // We need grav_n_grow grow cells to track boundary particles - const auto& ba = get_level(lev).get_new_data(State_Type).boxArray(); - const auto& dm = get_level(lev).get_new_data(State_Type).DistributionMap(); - MultiFab grav_vec_old(ba, dm, AMREX_SPACEDIM, grav_n_grow); - get_level(lev).gravity->get_old_grav_vector(lev, grav_vec_old, time); - - for (int i = 0; i < Nyx::theActiveParticles().size(); i++) { - Real radius_inner = -1.e34; - Real radius_outer = -1.e34; - Real z_old = 1/a_old-1; - if(z_old=lightcone_end_z) { - integrate_distance_given_a(a_old, 1.0, radius_outer); - integrate_distance_given_a(a_new, 1.0, radius_inner); - Print()<moveKickDrift(grav_vec_old, lev, time, dt, a_old, a_half, where_width, radius_inner, radius_outer); - } - fclose(file_lightcone_csv); - writeBinaryVTK(filename, shell_particles); - - - // Only need the coarsest virtual particles here. - if (lev == level && level < finest_level) - for (int i = 0; i < Nyx::theVirtualParticles().size(); i++) - Nyx::theVirtualParticles()[i]->moveKickDrift(grav_vec_old, lev, time, dt, a_old, a_half, where_width); - - // Miiiight need all Ghosts - for (int i = 0; i < Nyx::theGhostParticles().size(); i++) - Nyx::theGhostParticles()[i]->moveKickDrift(grav_vec_old, lev, time, dt, a_old, a_half, where_width); -#ifdef NEUTRINO_DARK_PARTICLES - for (int i = 0; i < Nyx::theActiveParticles().size() && neutrino_cfl >= 1; i++) - Nyx::theActiveParticles()[i]->Redistribute(); -#endif - } - } // if active particles - } // lsg - } // if (do_grav) - - // - // Call the hydro advance at each level to be advanced - // - BL_PROFILE_VAR("just_the_hydro", just_the_hydro); - { - - MultiFab::RegionTag amrhydro_tag("Hydro_" + std::to_string(level)); - - for (int lev = level; lev <= finest_level_to_advance; lev++) - { -#ifdef SDC - if (sdc_split > 0 && (strang_restart_from_sdc == 0)) { - get_level(lev).sdc_hydro(time, dt, a_old, a_new); - } else { - get_level(lev).strang_hydro(time, dt, a_old, a_new); - } -#else - get_level(lev).strang_hydro(time, dt, a_old, a_new); -#endif - } - } - BL_PROFILE_VAR_STOP(just_the_hydro); - - { - // - // We must reflux before doing the next gravity solve - // - if (do_reflux) - { - for (int lev = level; lev < finest_level_to_advance; lev++) - { - get_level(lev).reflux(); - } - } - - // Always average down the new state from finer to coarser. - for (int lev = finest_level_to_advance-1; lev >= level; lev--) - { - get_level(lev).average_down( State_Type); - get_level(lev).average_down(DiagEOS_Type); - } - - // - // Here we use the "old" phi from the current time step as a guess for this - // solve - // - if (do_grav) - { - for (int lev = level; lev <= finest_level_to_advance; lev++) - { - MultiFab::Copy(parent->getLevel(lev).get_new_data(PhiGrav_Type), - parent->getLevel(lev).get_old_data(PhiGrav_Type), - 0, 0, 1, 0); - } - - // Solve for new Gravity - BL_PROFILE_VAR("solve_for_new_phi", solve_for_new_phi); - int use_previous_phi_as_guess = 1; - if (finest_level_to_advance > level) - { - MultiFab::RegionTag amrGrav_tag("Gravity_" + std::to_string(level)); - // The particle may be as many as "iteration" ghost cells out - int ngrow_for_solve = iteration + stencil_deposition_width; - gravity->multilevel_solve_for_new_phi(level, finest_level_to_advance, - ngrow_for_solve, - use_previous_phi_as_guess); - } - else - { - MultiFab::RegionTag amrGrav_tag("Gravity_" + std::to_string(level)); - int fill_interior = 0; - gravity->solve_for_new_phi(level,get_new_data(PhiGrav_Type), - gravity->get_grad_phi_curr(level), - fill_interior, grav_n_grow); - } - BL_PROFILE_VAR_STOP(solve_for_new_phi); - - // Reflux - if (do_reflux) - { - MultiFab::RegionTag amrGrav_tag("Gravity_" + std::to_string(level)); - for (int lev = level; lev <= finest_level_to_advance; lev++) - { - gravity->add_to_fluxes(lev, iteration, ncycle); - } - } - - // - // Now do corrector part of source term update - // - for (int lev = level; lev <= finest_level_to_advance; lev++) - { - MultiFab::RegionTag amrGrav_tag("Gravity_" + std::to_string(lev)); - - // Now do corrector part of source term update - correct_gsrc(lev,time,prev_time,cur_time,dt); - - MultiFab& S_new = get_level(lev).get_new_data(State_Type); - MultiFab& D_new = get_level(lev).get_new_data(DiagEOS_Type); - - // First reset internal energy before call to compute_temp - MultiFab reset_e_src(S_new.boxArray(), S_new.DistributionMap(), 1, NUM_GROW); - reset_e_src.setVal(0.0); - get_level(lev).reset_internal_energy(S_new,D_new,reset_e_src); - - get_level(lev).compute_new_temp(S_new,D_new); - } - - // Must average down again after doing the gravity correction; - // always average down from finer to coarser. - // Here we average down both the new state and phi and gravity. - for (int lev = finest_level_to_advance-1; lev >= level; lev--) - get_level(lev).average_down(); - - if (Nyx::theActiveParticles().size() > 0) - { - // Advance the particle velocities by dt/2 to the new time. We use the - // cell-centered gravity to correctly interpolate onto particle - // locations. - MultiFab::RegionTag amrMoveKickDrift_tag("MoveKick_" + std::to_string(level)); - const Real a_half = 0.5 * (a_old + a_new); - - if (particle_verbose && ParallelDescriptor::IOProcessor()) - std::cout << "moveKick ... updating velocity only\n"; - - for (int lev = level; lev <= finest_level_to_advance; lev++) - { - const auto& ba = get_level(lev).get_new_data(State_Type).boxArray(); - const auto& dm = get_level(lev).get_new_data(State_Type).DistributionMap(); - MultiFab grav_vec_new(ba, dm, AMREX_SPACEDIM, grav_n_grow); - get_level(lev).gravity->get_new_grav_vector(lev, grav_vec_new, cur_time); - - for (int i = 0; i < Nyx::theActiveParticles().size(); i++) - Nyx::theActiveParticles()[i]->moveKick(grav_vec_new, lev, time, dt, a_new, a_half); - - // Virtual particles will be recreated, so we need not kick them. - - // Ghost particles need to be kicked except during the final iteration. - if (iteration != ncycle) - for (int i = 0; i < Nyx::theGhostParticles().size(); i++) - Nyx::theGhostParticles()[i]->moveKick(grav_vec_new, lev, time, dt, a_new, a_half); - } - } - } // do_grav - - // - // Synchronize Energies - // - for (int lev = level; lev <= finest_level_to_advance; lev++) - { - MultiFab::RegionTag amrReset_tag("Reset_" + std::to_string(lev)); - MultiFab& S_new = get_level(lev).get_new_data(State_Type); - MultiFab& D_new = get_level(lev).get_new_data(DiagEOS_Type); - - get_level(lev).reset_internal_energy_nostore(S_new,D_new); - - } - } - } - // BL_PROFILE_REGION_STOP("R::Nyx::advance_hydro_plus_particles"); - - // Redistribution happens in post_timestep - return dt; -} -#endif - -Real -Nyx::advance_heatcool (Real time, - Real dt, - int iteration, - int ncycle) -{ - BL_PROFILE("Nyx::advance_heatcool()"); -#ifdef HEATCOOL - amrex::Print()<<"Using advance_heatcool since hydro and dm_particles are both off"<finestLevel(); - if (do_reflux && level < finest_level) - gravity->zero_phi_flux_reg(level + 1); - gravity->swap_time_levels(level); - } - - if (do_forcing) - forcing->evolve(dt); - - // Call the hydro advance itself - BL_PROFILE_VAR("just_the_hydro", just_the_hydro); -#ifdef SDC - if (sdc_split > 0) - { - sdc_hydro(time, dt, a_old, a_new); - } else { - strang_hydro(time, dt, a_old, a_new); - } -#else - strang_hydro(time, dt, a_old, a_new); -#endif - BL_PROFILE_VAR_STOP(just_the_hydro); - - if (do_grav) - { - if (verbose && ParallelDescriptor::IOProcessor()) - std::cout << "\n... new-time level solve at level " << level << '\n'; - - // - // Solve for new phi - // Here we use the "old" phi from the current time step as a guess for this solve - // - MultiFab::Copy(get_new_data(PhiGrav_Type),get_old_data(PhiGrav_Type),0,0,1,0); - int fill_interior = 0; - gravity->solve_for_new_phi(level,get_new_data(PhiGrav_Type), - gravity->get_grad_phi_curr(level),fill_interior); - if (do_reflux) - gravity->add_to_fluxes(level,iteration,ncycle); - - // Now do corrector part of source term update - correct_gsrc(level,time,prev_time,cur_time,dt); - } - - MultiFab& S_new = get_new_data(State_Type); - MultiFab& D_new = get_new_data(DiagEOS_Type); - - MultiFab reset_e_src(S_new.boxArray(), S_new.DistributionMap(), 1, NUM_GROW); - reset_e_src.setVal(0.0); - - // First reset internal energy before call to compute_temp - reset_internal_energy(S_new,D_new,reset_e_src); - compute_new_temp(S_new,D_new); - - return dt; -} -#endif