Skip to content

Commit

Permalink
Added paraview converter
Browse files Browse the repository at this point in the history
  • Loading branch information
akaszynski committed Aug 18, 2017
1 parent 84c1f82 commit deb8722
Show file tree
Hide file tree
Showing 8 changed files with 209 additions and 65 deletions.
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -24,3 +24,4 @@ pyansys/Interface.py

# Testing
Testing/

4 changes: 2 additions & 2 deletions doc/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,10 +51,10 @@
# built documents.
#
# The short X.Y version.
version = u'0.17'
version = u'0.18'

# The full version, including alpha/beta/rc tags.
release = u'0.17.1'
release = u'0.18.0'

# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
Expand Down
26 changes: 26 additions & 0 deletions doc/loading_results.rst
Original file line number Diff line number Diff line change
Expand Up @@ -216,5 +216,31 @@ The phase of the result can be changed by modifying the ``phase`` option. See
``help(result.PlotCyclicNodalResult)``` for details on its implementation.
Exporting to Paraview
~~~~~~~~~~~~~~~~~~~~~
Paraview is a visualization application that can be used for rapid generation
of plots and graphs using VTK through a GUI. ``pyansys`` can translate the
ANSYS result files to Paraview compatible files containing the geometry and
nodal results from the analysis:
.. code:: python
import pyansys
from pyansys import examples
# load example beam result file
result = pyansys.ResultReader(examples.rstfile)
# save as a binary vtk xml file
result.SaveAsVTK('beam.vtu')
The vtk xml file can now be loaded using paraview. This screenshot shows the
nodal displacement of the first result from the result file plotted within
`Paraview <https://www.paraview.org/>`_. Within the vtk file are two point
arrays (``NodalResult`` and ``NodalStress``) for each result in the result
file. The nodal result values will depend on the analysis type, while
nodal stress will always be the node average stress in the Sx, Sy Sz, Sxy, Syz,
and Sxz directions.
.. image:: paraview.jpg
Binary file added doc/paraview.jpg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
2 changes: 1 addition & 1 deletion pyansys/_version.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
# major, minor, patch
version_info = 0, 17, 2
version_info = 0, 18, 0

# Nice string for the version
__version__ = '.'.join(map(str, version_info))
6 changes: 3 additions & 3 deletions pyansys/archive_reader.py
Original file line number Diff line number Diff line change
Expand Up @@ -250,7 +250,7 @@ def Plot(self):
grid = self.uGrid

else:
raise Exception('Unstructred grid not generated. Run ParseVTK or ParseFEM first.')
raise Exception('Unstructred grid not generated. Run ParseFEM first.')

if not grid.GetNumberOfCells():
raise Exception('Unstructured grid contains no cells')
Expand All @@ -275,7 +275,7 @@ def SaveAsVTK(self, filename, binary=True):
The file extension will select the type of writer to use. *.vtk will
use the legacy writer, while *.vtu will select the VTK XML writer.
Run ParseFEM or ParseVTK before running this to generate the vtk object
Run ParseFEM before running this to generate the vtk object
Parameters
----------
Expand All @@ -300,7 +300,7 @@ def SaveAsVTK(self, filename, binary=True):

# Check if the unstructured grid exists
if not hasattr(self, 'uGrid'):
raise Exception('Run ParseFEM or ParseVTK first.')
raise Exception('Run ParseFEM first.')

# Write the grid
self.uGrid.WriteGrid(filename, binary)
Expand Down
186 changes: 131 additions & 55 deletions pyansys/binary_reader.py
Original file line number Diff line number Diff line change
Expand Up @@ -404,6 +404,7 @@ def PlotCyclicNodalResult(self, rnum, phase=0, comp='norm', as_abs=False, label=
"""
Plots a nodal result from a cyclic analysis.
Parameters
----------
rnum : interger
Expand All @@ -428,11 +429,13 @@ def PlotCyclicNodalResult(self, rnum, phase=0, comp='norm', as_abs=False, label=
default. When disabled, plots the maximum response of a single
sector of the cyclic solution in the component of interest.
Returns
-------
cpos : list
Camera position from vtk render window.
Notes
-----
None
Expand Down Expand Up @@ -642,17 +645,20 @@ def PlotNodalResult(self, rnum, comp='norm', as_abs=False, label=''):

def GetTimeValues(self):
"""
SUMMARY
Returns table of time values for results. For a modal analysis, this
corresponds to the frequencies of each mode.
INPUTS
Parameters
----------
None
OUTPUTS
tvalues (np.float64 array)
Returns
-------
tvalues : np.float64 array
Table of time values for results. For a modal analysis, this
corresponds to the frequencies of each mode.
"""

Expand All @@ -677,6 +683,7 @@ def GetNodalResult(self, rnum, sort=True):
"""
Returns the nodal result for a result number
Parameters
----------
rnum : interger
Expand All @@ -687,11 +694,13 @@ def GetNodalResult(self, rnum, sort=True):
node numbering (self.nnum) (default). If left unsorted, results
correspond to the nodal equivalence array self.resultheader['neqv']
Returns
-------
result : numpy.float array
Result is (nnod x numdof), or number of nodes by degrees of freedom
Notes
-----
None
Expand Down Expand Up @@ -832,7 +841,6 @@ def StoreGeometry(self):
NODES - node numbers defining the element
"""


# allocate memory for this (a maximum of 21 points per element)
etype = np.empty(nelm, np.int32)

Expand Down Expand Up @@ -905,7 +913,7 @@ def NodalStress(self, rnum):
None
"""
#%% debug cell

# Get the header information from the header dictionary
endian = self.resultheader['endian']
rpointers = self.resultheader['rpointers']
Expand Down Expand Up @@ -939,6 +947,7 @@ def NodalStress(self, rnum):
ele_ind_table += element_rst_ptr

# Each element header contains 25 records for the individual results
# get the location of the nodal stress
table_index = e_table.index('ptrENS')

# check number of records to read (differs with each version)
Expand All @@ -948,57 +957,43 @@ def NodalStress(self, rnum):
nnode_elem = nodstr[etype[0]]
f.seek((ele_ind_table[0] + ptrENS - 2)*4)
nitem = np.fromfile(f, endian + 'i', 1)[0]/nnode_elem


nstresses = self.edge_idx.size
stresses = np.empty((nstresses, 6), np.float32)
f.close()

#%% debug cell 2
c = 0
for i in range(len(ele_ind_table)):
# Element nodal stresses, ptrENS, is the third item in the table
f.seek((ele_ind_table[i] + table_index)*4)
ptrENS = np.fromfile(f, endian + 'i', 1)[0]

# read the stresses evaluated at the intergration points or nodes
nnode_elem = nodstr[etype[i]]

f.seek((ele_ind_table[i] + ptrENS)*4)
stress = np.fromfile(f, endian + 'f', nnode_elem*nitem).reshape((-1, nitem))#[:, sidx]

# store stresses
stresses[c:c + nnode_elem] = stress[:, :6]
c += nnode_elem

# close file
f.close()


# Average the stresses for each element at each node
# enode = self.edge_node_num_idx
# s_node = np.empty((enode.size, 6), np.float32)
# for i in range(6):
# s_node[:, i] = np.bincount(self.edge_idx, weights=stresses[:, i])[enode]
# ntimes = np.bincount(self.edge_idx)[enode]
# s_node /= ntimes.reshape((-1, 1))
# number of nodes
nnod = self.resultheader['nnod']

# different behavior depending on version of ANSYS
# v15 seems to use floating point while < v14.5 uses double and stores
# principle values
if nitem == 6: # single precision >= v14.5
ele_data_arr = np.zeros((nnod, 6), np.float32)
_rstHelper.LoadStress(self.filename, table_index, ele_ind_table,
nodstr, etype, nitem, ele_data_arr,
self.edge_idx)
elif nitem == 22: # double precision < v14.5
nitem = 11
ele_data_arr = np.zeros((nnod, 6), np.float64)
_rstHelper.LoadStressDouble(self.filename, table_index,
ele_ind_table, nodstr, etype, nitem,
ele_data_arr, self.edge_idx)

elif nitem == 11: # single precision < v14.5
ele_data_arr = np.zeros((nnod, 6), np.float32)
_rstHelper.LoadStress(self.filename, table_index, ele_ind_table,
nodstr, etype, nitem, ele_data_arr,
self.edge_idx)

else:
raise Exception('Invalid nitem. Unable to load nodal stresses')

# grabe element results from binary
# Average based on the edges of elements
enode = self.edge_node_num_idx
ntimes = np.bincount(self.edge_idx)[enode]

nnod = self.resultheader['nnod']
ele_data_arr = np.zeros((nnod, 6), np.float32)
_rstHelper.LoadStress(self.filename, table_index,
ele_ind_table, nodstr, etype,
nitem, ele_data_arr, self.edge_idx)

s_node = ele_data_arr[enode]
s_node /= ntimes.reshape((-1, 1))

return s_node


def PlotNodalStress(self, rnum, stype):
"""
Expand All @@ -1010,26 +1005,25 @@ def PlotNodalStress(self, rnum, stype):
across elements, stresses will vary based on the element they are
evaluated from.
Parameters
----------
rnum : interger
Result set using zero based indexing.
stype : string
Stress type from the following list: [Sx Sy Sz Sxy Syz Sxz]
Returns
-------
None
Notes
-----
None
"""

stress_types = ['Sx', 'Sy', 'Sz', 'Sxy', 'Syz', 'Sxz', 'Seqv']
if stype not in stress_types:
raise Exception("Stress type not in \n ['Sx', 'Sy', 'Sz', 'Sxy', 'Syz', 'Sxz']")
raise Exception('Stress type not in \n' +\
"['Sx', 'Sy', 'Sz', 'Sxy', 'Syz', 'Sxz']")

sidx = ['Sx', 'Sy', 'Sz', 'Sxy', 'Syz', 'Sxz'].index(stype)

Expand All @@ -1052,6 +1046,53 @@ def PlotNodalStress(self, rnum, stype):
del plobj

return cpos


def SaveAsVTK(self, filename, binary=True):
"""
Writes all appends all results to an unstructured grid and writes it to
disk.
The file extension will select the type of writer to use. *.vtk will
use the legacy writer, while *.vtu will select the VTK XML writer.
Parameters
----------
filename : str
Filename of grid to be written. The file extension will select the
type of writer to use. *.vtk will use the legacy writer, while
*.vtu will select the VTK XML writer.
binary : bool, optional
Writes as a binary file by default. Set to False to write ASCII
Notes
-----
Binary files write much faster than ASCII, but binary files written on
one system may not be readable on other systems. Binary can only be
selected for the legacy writer.
"""

# Copy grid as to not write results to original object
grid = self.uGrid.Copy()

for i in range(self.nsets):
# Nodal Results
val = self.GetNodalResult(i)
grid.AddPointScalars(val, 'NodalResult{:03d}'.format(i))

# Nodal Stress values are only valid
stress = self.NodalStress(i)
val = np.zeros((grid.GetNumberOfPoints(), stress.shape[1]))
val[self.edge_node_num_idx] = stress
grid.AddPointScalars(val, 'NodalStress{:03d}'.format(i))

# Write to file and clean up
grid.WriteGrid(filename)
del grid


def GetResultInfo(filename):
Expand Down Expand Up @@ -1205,4 +1246,39 @@ def delete_row_csc(mat, i):
mat.indptr[i:-1] = mat.indptr[i+1:]
mat.indptr[i:] -= n
mat.indptr = mat.indptr[:-1]
mat._shape = (mat._shape[0]-1, mat._shape[1])
mat._shape = (mat._shape[0]-1, mat._shape[1])


# =============================================================================
# load stress debug using numpy
# =============================================================================
# #%% numpy debug
# f = open(self.filename)
# nstresses = self.edge_idx.size
# stresses = np.empty((nstresses, 6), np.float32)
# c = 0
# for i in range(len(ele_ind_table)):
# # Element nodal stresses, ptrENS, is the third item in the table
# f.seek((ele_ind_table[i] + table_index)*4)
# ptrENS = np.fromfile(f, endian + 'i', 1)[0]
#
# # read the stresses evaluated at the intergration points or nodes
# nnode_elem = nodstr[etype[i]]
#
# f.seek((ele_ind_table[i] + ptrENS)*4)
# stress = np.fromfile(f, endian + 'f', nnode_elem*nitem).reshape((-1, nitem))#[:, sidx]
#
# # store stresses
# stresses[c:c + nnode_elem] = stress[:, :6]
# c += nnode_elem
#
# # close file
# f.close()
#
# # Average the stresses for each element at each node
# enode = self.edge_node_num_idx
# s_node = np.empty((enode.size, 6), np.float32)
# for i in range(6):
# s_node[:, i] = np.bincount(self.edge_idx, weights=stresses[:, i])[enode]
# ntimes = np.bincount(self.edge_idx)[enode]
# s_node /= ntimes.reshape((-1, 1))
Loading

0 comments on commit deb8722

Please sign in to comment.