From 10cabe393b32a9a3d65c7ffb344dacf5c1cb2504 Mon Sep 17 00:00:00 2001 From: SamRaymond Date: Sun, 15 Sep 2024 13:47:49 -0400 Subject: [PATCH] iniital upload - working collision of two disks --- .DS_Store | Bin 0 -> 6148 bytes README.md | 111 ++- main.py | 123 +++ material.py | 27 + nodes.py | 57 ++ particle_shapes.py | 97 ++ particles.py | 115 +++ plotting.py | 98 ++ results.py | 38 + results_vis.py | 179 ++++ shape_function.py | 42 + simulation_output/.DS_Store | Bin 0 -> 6148 bytes simulation_output/step_00000.csv | 1609 ++++++++++++++++++++++++++++++ simulation_output/step_01000.csv | 1609 ++++++++++++++++++++++++++++++ solver.py | 170 ++++ 15 files changed, 4274 insertions(+), 1 deletion(-) create mode 100644 .DS_Store create mode 100644 main.py create mode 100644 material.py create mode 100644 nodes.py create mode 100644 particle_shapes.py create mode 100644 particles.py create mode 100644 plotting.py create mode 100644 results.py create mode 100644 results_vis.py create mode 100644 shape_function.py create mode 100644 simulation_output/.DS_Store create mode 100644 simulation_output/step_00000.csv create mode 100644 simulation_output/step_01000.csv create mode 100644 solver.py diff --git a/.DS_Store b/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..c45e04ee81a7d5b485536cd324ae7c0040298a6f GIT binary patch literal 6148 zcmeHKQA@)x5WZ~FbqryGg1!ZO9k{7e#FsMXA8?`%Dz#+;i(NC;mOYF?-}MjqNBlkB zCE1|ZXAyS?$#==!rMVB9>V4h5aNCoKq8BC)vNjse{QK{Cpw(CyaX*$otS2Z7| zewrnHfA&bDJEfvvrTf8sIG%g0y=xVxei)BuIv^a4(dFqOj7MtTS6MttbnMU=oQBiz zT8E3p$>l{`o_5ccZMo=ny60`#yX-BO4QK!8`0CbsizcypH~b1piDbvXy8l>ftS|%2 z05iZ0Y%~M*qHvlU%>pOS3@`&_4AA)?u@Sl!bA$Tmz)qI{h&7znf;!a_6eBIV7ITAW zK@lbt(S! 0: + dt = cfl_factor * min_cell_size / max_wave_speed + else: + # Fallback if no valid wave speed could be calculated + dt = cfl_factor * min_cell_size + + return dt + + def set_object_velocity(particles, object_id, velocity): + for particle in particles.particles: + if particle['object_id'] == object_id: + particle['velocity'] = np.array(velocity) + + def assign_material_properties(self, material, object_id): + for particle in self.particles: + if particle['object_id'] == object_id: + particle['material'] = self.create_material(material) + particle['density'] = material['density'] + particle['volume'] = particle['mass'] / particle['density'] + diff --git a/plotting.py b/plotting.py new file mode 100644 index 0000000..dd77a81 --- /dev/null +++ b/plotting.py @@ -0,0 +1,98 @@ +import matplotlib.pyplot as plt +import matplotlib.patches as patches +import numpy as np + +def visualize_particles_and_grid(particles, grid, step): + fig, ax = plt.subplots(figsize=(5, 5)) + + # Extract positions and velocities + positions = np.array([p['position'] for p in particles.get_particles()]) + velocities = np.array([p['velocity'] for p in particles.get_particles()]) + + # Ensure velocities is 2D + if velocities.ndim == 1: + velocities = velocities.reshape(-1, 1) + + # Calculate velocity magnitudes + velocity_magnitudes = np.linalg.norm(velocities, axis=1) + + # Create color map for particles + cmap = plt.get_cmap('viridis') + norm = plt.Normalize(vmin=velocity_magnitudes.min(), vmax=velocity_magnitudes.max()) + + # Draw particles as squares + square_size = grid.cell_size / 2 + for pos, vel_mag in zip(positions, velocity_magnitudes): + color = cmap(norm(vel_mag)) + square = patches.Rectangle((pos[0] - square_size/2, pos[1] - square_size/2), + square_size, square_size, + facecolor=color, edgecolor='none', alpha=0.8) + ax.add_patch(square) + + # Draw MPM grid + for i in range(grid.node_count[0] + 1): + ax.axvline(x=i * grid.cell_size, color='gray', linestyle=':', alpha=0.5) + for j in range(grid.node_count[1] + 1): + ax.axhline(y=j * grid.cell_size, color='gray', linestyle=':', alpha=0.5) + + # Set plot properties + ax.set_aspect('equal') + ax.set_xlim(0, grid.physical_size[0]) + ax.set_ylim(0, grid.physical_size[1]) + ax.set_xlabel('X (m)') + ax.set_ylabel('Y (m)') + ax.set_title(f'Particles and MPM Grid - Step {step}') + + # Add colorbar + sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm) + sm.set_array([]) + cbar = fig.colorbar(sm, ax=ax, label='Velocity Magnitude') + + plt.tight_layout() + plt.show() + +def visualize_particles_with_object_id(particles, grid): + fig, ax = plt.subplots(figsize=(5, 5)) + # Extract positions and object IDs + positions = np.array([p['position'] for p in particles.get_particles()]) + object_ids = np.array([p['object_id'] for p in particles.get_particles()]) + + print(f"Total particles: {len(positions)}") + print(f"Unique object IDs: {np.unique(object_ids)}") + # print(f"Position range: x({positions[:, 0].min()}, {positions[:, 0].max()}), y({positions[:, 1].min()}, {positions[:, 1].max()})") + + # Create a color map for different object IDs + unique_ids = np.unique(object_ids) + colors = plt.cm.rainbow(np.linspace(0, 1, len(unique_ids))) + color_map = dict(zip(unique_ids, colors)) + + # Draw particles as squares + square_size = grid.cell_size / 2 + for pos, obj_id in zip(positions, object_ids): + color = color_map[obj_id] + square = patches.Rectangle((pos[0] - square_size/2, pos[1] - square_size/2), + square_size, square_size, + facecolor=color, edgecolor='none', alpha=0.8) + ax.add_patch(square) + + # Draw MPM grid + for i in range(grid.node_count[0] + 1): + ax.axvline(x=i * grid.cell_size, color='gray', linestyle=':', alpha=0.5) + for j in range(grid.node_count[1] + 1): + ax.axhline(y=j * grid.cell_size, color='gray', linestyle=':', alpha=0.5) + + # Set plot properties + ax.set_aspect('equal') + ax.set_xlim(0, grid.physical_size[0]) + ax.set_ylim(0, grid.physical_size[1]) + ax.set_xlabel('X (m)') + ax.set_ylabel('Y (m)') + ax.set_title('Particles with Object ID') + + # Add legend + legend_elements = [patches.Patch(facecolor=color_map[id], edgecolor='none', label=f'Object {id}') + for id in unique_ids] + ax.legend(handles=legend_elements, loc='upper right') + + plt.tight_layout() + plt.show() \ No newline at end of file diff --git a/results.py b/results.py new file mode 100644 index 0000000..c6823a8 --- /dev/null +++ b/results.py @@ -0,0 +1,38 @@ +import csv +import numpy as np + +def save_particles_to_csv(particles, output_file): + with open(output_file, 'w', newline='') as csvfile: + csv_writer = csv.writer(csvfile) + # Write header + csv_writer.writerow([ + 'particle_id', 'object_id', 'pos_x', 'pos_y', 'vel_x', 'vel_y', + 'stress_xx', 'stress_xy', 'stress_yx', 'stress_yy', + 'strain_xx', 'strain_xy', 'strain_yx', 'strain_yy', + 'volume', 'mass', 'density', + 'youngs_modulus', 'poisson_ratio' + ]) + + # Write particle data + for p_idx in range(particles.get_particle_count()): + particle = particles.get_particle(p_idx) + pos = particle['position'] + vel = particle['velocity'] + stress = particle['stress'] + strain = particle['strain'] + material = particle['material'] + + csv_writer.writerow([ + p_idx, + particle['object_id'], + pos[0], pos[1], + vel[0], vel[1], + stress[0,0], stress[0,1], stress[1,0], stress[1,1], + strain[0,0], strain[0,1], strain[1,0], strain[1,1], + particle['volume'], + particle['mass'], + particle['density'], + material.youngs_modulus, + material.poisson_ratio + ]) + diff --git a/results_vis.py b/results_vis.py new file mode 100644 index 0000000..b2d45a2 --- /dev/null +++ b/results_vis.py @@ -0,0 +1,179 @@ +import os +import csv +import numpy as np +import matplotlib.pyplot as plt +from matplotlib.animation import FuncAnimation +from matplotlib.colors import Normalize + +# Directory containing the CSV files +data_dir = "simulation_output" + +# Get all CSV files in the directory +csv_files = sorted([f for f in os.listdir(data_dir) if f.endswith('.csv')]) + +# Variables to store grid boundaries +grid_min_x = float('inf') +grid_max_x = float('-inf') +grid_min_y = float('inf') +grid_max_y = float('-inf') + +# Function to read a CSV file +def read_csv(filename): + global grid_min_x, grid_max_x, grid_min_y, grid_max_y + with open(os.path.join(data_dir, filename), 'r') as f: + reader = csv.reader(f) + header = next(reader) # Read the header + data = [list(map(float, row)) for row in reader if row] # Convert to float and skip empty rows + if not data: + return np.array([]), np.array([]), np.array([]), np.array([]), np.array([]) + data = np.array(data) + positions = data[:, 2:4] + velocities = data[:, 4:6] + stress = data[:, 6:10] + volumes = data[:, 14] + masses = data[:, 15] + + # Update grid boundaries + grid_min_x = min(grid_min_x, positions[:, 0].min()) + grid_max_x = max(grid_max_x, positions[:, 0].max()) + grid_min_y = min(grid_min_y, positions[:, 1].min()) + grid_max_y = max(grid_max_y, positions[:, 1].max()) + + return positions, velocities, stress, volumes, masses + +# Read the first file to get the number of particles and initialize the plot +initial_positions, initial_velocities, initial_stress, initial_volumes, initial_masses = read_csv(csv_files[0]) +num_particles = len(initial_positions) + +# Create the figure and axes +fig = plt.figure(figsize=(20, 15)) +gs = fig.add_gridspec(2, 2) +ax1 = fig.add_subplot(gs[0, 0]) +ax2 = fig.add_subplot(gs[0, 1]) +ax3 = fig.add_subplot(gs[1, :]) + +scatter1 = ax1.scatter([], [], s=10, c=[], cmap='viridis') +scatter2 = ax2.scatter([], [], s=10, c=[], cmap='plasma') +line1, = ax3.plot([], [], label='Kinetic Energy') +line2, = ax3.plot([], [], label='Elastic Energy') +line3, = ax3.plot([], [], label='Total Energy') + +ax1.set_aspect('equal') +ax2.set_aspect('equal') +ax1.set_title("Velocity Magnitude") +ax2.set_title("Von Mises Stress") +ax3.set_title("Energy Evolution") +ax3.set_xlabel("Step") +ax3.set_ylabel("Energy") +ax3.legend() + +title = fig.suptitle("Step: 0") + +# Add colorbars +cbar1 = plt.colorbar(scatter1, ax=ax1) +cbar2 = plt.colorbar(scatter2, ax=ax2) +cbar1.set_label('Velocity magnitude') +cbar2.set_label('Von Mises stress') + +# Initialize the color normalizations +norm1 = Normalize() +norm2 = Normalize() + +# Lists to store energy values and stress range +kinetic_energy = [] +elastic_energy = [] +total_energy = [] +stress_min = float('inf') +stress_max = float('-inf') + +# Function to calculate von Mises stress +def von_mises_stress(stress): + xx, xy, yx, yy = stress.T + return np.sqrt(0.5 * ((xx - yy)**2 + (yy - xx)**2 + 6 * (xy**2))) + +# Function to calculate kinetic energy +def calculate_kinetic_energy(velocities, masses): + return 0.5 * np.sum(masses[:, np.newaxis] * np.sum(velocities**2, axis=1)) + +# Function to calculate elastic energy +def calculate_elastic_energy(stress, volumes, youngs_modulus): + return 0.5 * np.sum(volumes[:, np.newaxis] * np.sum(stress**2, axis=1)) / youngs_modulus + +# Animation update function +def update(frame): + global kinetic_energy, elastic_energy, total_energy, stress_min, stress_max + + # Reset energy lists and stress range if the animation is starting over + if frame == 0: + kinetic_energy = [] + elastic_energy = [] + total_energy = [] + stress_min = float('inf') + stress_max = float('-inf') + + positions, velocities, stress, volumes, masses = read_csv(csv_files[frame]) + if len(positions) == 0: + return scatter1, scatter2, line1, line2, line3, title + + scatter1.set_offsets(positions) + scatter2.set_offsets(positions) + + # Calculate velocity magnitudes + vel_mag = np.linalg.norm(velocities, axis=1) + + # Calculate von Mises stress + von_mises = von_mises_stress(stress) + + # Update stress range + stress_min = min(stress_min, von_mises.min()) + stress_max = max(stress_max, von_mises.max()) + + # Update color normalizations + norm1.autoscale(vel_mag) + norm2.vmin = stress_min + norm2.vmax = stress_max + + scatter1.set_array(vel_mag) + scatter2.set_array(von_mises) + + # Update colorbars + scatter1.set_norm(norm1) + scatter2.set_norm(norm2) + cbar1.update_normal(scatter1) + cbar2.update_normal(scatter2) + + # Calculate and store energies + youngs_modulus = float(csv_files[frame].split('_')[-1].split('.')[0]) # Extract Young's modulus from filename + ke = calculate_kinetic_energy(velocities, masses) + ee = calculate_elastic_energy(stress, volumes, youngs_modulus) + te = ke + ee + kinetic_energy.append(ke) + elastic_energy.append(ee) + total_energy.append(te) + + # Update energy plots + steps = range(len(kinetic_energy)) + line1.set_data(steps, kinetic_energy) + line2.set_data(steps, elastic_energy) + line3.set_data(steps, total_energy) + ax3.relim() + ax3.autoscale_view() + + # Update axes limits + ax1.set_xlim(grid_min_x, grid_max_x) + ax1.set_ylim(grid_min_y, grid_max_y) + ax2.set_xlim(grid_min_x, grid_max_x) + ax2.set_ylim(grid_min_y, grid_max_y) + + title.set_text(f"Step: {frame}") + return scatter1, scatter2, line1, line2, line3, title + +# Create the animation +anim = FuncAnimation(fig, update, frames=len(csv_files), interval=500, blit=False) + +# Save the animation (optional) +# anim.save('simulation.mp4', writer='ffmpeg', fps=30) + +# Show the plot +plt.tight_layout() +plt.show() \ No newline at end of file diff --git a/shape_function.py b/shape_function.py new file mode 100644 index 0000000..56e2c86 --- /dev/null +++ b/shape_function.py @@ -0,0 +1,42 @@ +import numpy as np + +def shape_function(particle_pos, node_pos, cell_size): + """ + Calculate the linear nodal shape function value (tent function). + + Args: + particle_pos (np.array): Position of the particle (2D) + node_pos (np.array): Position of the grid node (2D) + cell_size (float): Size of a grid cell + + Returns: + tuple: Shape function values (sx, sy) + """ + dx = (particle_pos[0] - node_pos[0]) / cell_size + dy = (particle_pos[1] - node_pos[1]) / cell_size + + sx = 1 - abs(dx) if abs(dx) < 1 else 0 + sy = 1 - abs(dy) if abs(dy) < 1 else 0 + + return sx, sy + +def gradient_shape_function(particle_pos, node_pos, cell_size): + """ + Calculate the gradient of the linear nodal shape function. + + Args: + particle_pos (np.array): Position of the particle (2D) + node_pos (np.array): Position of the grid node (2D) + cell_size (float): Size of a grid cell + + Returns: + tuple: Gradient of the shape function (dsx_dx, dsy_dy) + """ + dx = (particle_pos[0] - node_pos[0]) / cell_size + dy = (particle_pos[1] - node_pos[1]) / cell_size + + dsx_dx = -1 if -1 < dx < 0 else (1 if 0 <= dx < 1 else 0) + dsy_dy = -1 if -1 < dy < 0 else (1 if 0 <= dy < 1 else 0) + + return dsx_dx / cell_size, dsy_dy / cell_size + diff --git a/simulation_output/.DS_Store b/simulation_output/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..5008ddfcf53c02e82d7eee2e57c38e5672ef89f6 GIT binary patch literal 6148 zcmeH~Jr2S!425mzP>H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0 0: + self.grid.node_particle_map[node_i][node_j].append(p_idx) + + def particle_to_grid(self): + for i in range(self.grid.node_count[0]): + for j in range(self.grid.node_count[1]): + node = self.grid.nodes[i, j] + node_pos = np.array([i * self.grid.cell_size, j * self.grid.cell_size]) + + node.force = np.zeros(2) + node.mass = 0 + node.momentum = np.zeros(2) + + particle_indices = self.grid.node_particle_map[i][j] + + for p_idx in particle_indices: + particle = self.particles.get_particle(p_idx) + particle_pos = particle['position'] + + shape_x, shape_y = shape_function(particle_pos, node_pos, self.grid.cell_size) + grad_shape_x, grad_shape_y = gradient_shape_function(particle_pos, node_pos, self.grid.cell_size) + + shape_value = shape_x * shape_y + + # Update node properties + node.mass += particle['mass'] * shape_value + node.momentum += particle['mass'] * particle['velocity'] * shape_value + + force_x = -(particle['mass'] / particle['density']) * ( + grad_shape_x * shape_y * particle['stress'][0, 0] + + grad_shape_y * shape_x * particle['stress'][0, 1] + ) + force_y = -(particle['mass'] / particle['density']) * ( + grad_shape_x * shape_y * particle['stress'][1, 0] + + grad_shape_y * shape_x * particle['stress'][1, 1] + ) + + node.force[0] += force_x + node.force[1] += force_y + + if node.mass > 0: + node.velocity = node.momentum / node.mass + else: + node.velocity = np.zeros(2) + + def update_grid(self): + dt = self.dt # Time step + for node in self.grid.nodes.flat: + if node.mass > 1e-9: + # Update momentum using explicit integration + node.momentum += node.force * dt + + # Update velocity based on new momentum + node.velocity = node.momentum / node.mass + + # Apply external forces (e.g., gravity) if needed + gravity = np.array([0, 0]) + for node in self.grid.nodes.flat: + if node.mass > 0: + node.velocity += gravity * dt + + # Clear forces for the next step + for node in self.grid.nodes.flat: + node.force = np.zeros(2) + + def grid_to_particle(self): + for p_idx, particle in enumerate(self.particles.particles): + particle_velocity_update = np.zeros(2) + strain_rate = np.zeros((2, 2)) + + nearby_nodes = self.grid.get_nearby_nodes(particle['position']) + + for node in nearby_nodes: + if node.mass > 1e-9: + shape_x, shape_y = shape_function(particle['position'], node.position, self.grid.cell_size) + grad_shape_x, grad_shape_y = gradient_shape_function(particle['position'], node.position, self.grid.cell_size) + + shape_value = shape_x * shape_y + particle_velocity_update += shape_value * node.velocity + + # Calculate strain rate components + strain_rate[0, 0] += grad_shape_x * shape_y * node.velocity[0] # exx + strain_rate[0, 1] += 0.5 * (grad_shape_x * shape_y * node.velocity[1] + + grad_shape_y * shape_x * node.velocity[0]) # exy + strain_rate[1, 0] = strain_rate[0, 1] # eyx = exy + strain_rate[1, 1] += grad_shape_y * shape_x * node.velocity[1] # eyy + + # Update particle velocity + particle['velocity'] = particle_velocity_update + + # Store strain rate in particle + particle['strain_rate'] = strain_rate + + def update_particles(self): + for particle in self.particles.particles: + # Update position using current velocity + particle['position'] += particle['velocity'] * self.dt + + # Update deformation gradient (F) + identity = np.eye(2) + particle['F'] = np.dot(identity + self.dt * particle['strain_rate'], particle.get('F', identity)) + + # Update volume + J = np.linalg.det(particle['F']) + particle['volume'] = particle['initial_volume'] * J + + # Update density + particle['density'] = particle['initial_density'] / J + + # Update stress using the material model + material = particle['material'] + stress_rate = material.compute_stress_rate(particle['strain_rate']) + particle['stress'] += stress_rate * self.dt + + def apply_boundary_conditions(self): + pass + # Implement boundary conditions here +