Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

626 idea generate mc summary table #634

Open
wants to merge 3 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 13 additions & 0 deletions pymchelper/estimator.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,11 @@ def __init__(self):
self.z = self.x

self.number_of_primaries = 0 # number of histories simulated
self.run_time = 0 # runtime in seconds
self.run_time_sim = 0 # run time in seconds excluding initialization header
self.total_number_of_primaries = 0 # number of histories simulated
self.total_run_time = 0 # runtime in seconds
self.total_run_time_sim = 0 # run time in seconds excluding initialization header
self.file_counter = 0 # number of files read
self.file_corename = "" # common core for paths of contributing files
self.file_format = "" # binary file format of the input files
Expand Down Expand Up @@ -131,9 +136,17 @@ def average_with_nan(estimator_list, error_estimate=ErrorEstimate.stderr):
# TODO add compatibility check
if not estimator_list:
return None

result = copy.deepcopy(estimator_list[0])

for n, estimator in enumerate(estimator_list, start=2):
result.total_number_of_primaries += estimator.number_of_primaries
result.total_run_time += estimator.run_time
result.total_run_time_sim += estimator.run_time_sim

for page_no, page in enumerate(result.pages):
page.data_raw = np.nanmean([estimator.pages[page_no].data_raw for estimator in estimator_list], axis=0)

result.file_counter = len(estimator_list)
if result.file_counter > 1 and error_estimate != ErrorEstimate.none:
# s = stddev = sqrt(1/(n-1)sum(x-<x>)**2)
Expand Down
3 changes: 3 additions & 0 deletions pymchelper/input_output.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,6 +92,9 @@ def fromfilelist(input_file_list, error=ErrorEstimate.stderr, nan: bool = True):
# loop over all files with n running from 2
for n, filename in enumerate(input_file_list[1:], start=2):
current_estimator = fromfile(filename) # x
result.total_number_of_primaries += current_estimator.number_of_primaries
result.total_run_time += current_estimator.run_time
result.total_run_time_sim += current_estimator.run_time_sim

# Running variance algorithm based on algorithm by B. P. Welford,
# presented in Donald Knuth's Art of Computer Programming, Vol 2, page 232, 3rd edition.
Expand Down
2 changes: 2 additions & 0 deletions pymchelper/readers/shieldhit/binary_spec.py
Original file line number Diff line number Diff line change
Expand Up @@ -208,6 +208,8 @@ class SHBDOTagID(IntEnum):
SHBDOTagID.user: 'user',
SHBDOTagID.host: 'host',
SHBDOTagID.rt_nstat: 'number_of_primaries',
SHBDOTagID.rt_time: 'run_time',
SHBDOTagID.rt_timesim: 'run_time_sim',
SHBDOTagID.number_of_pages: 'page_count',
SHBDOTagID.geo_unit_ids: 'geo_unit_ids',
SHBDOTagID.geo_units: 'geo_units',
Expand Down
3 changes: 3 additions & 0 deletions pymchelper/readers/shieldhit/reader_bdo2019.py
Original file line number Diff line number Diff line change
Expand Up @@ -176,6 +176,9 @@ def read_data(self, estimator, nscale=1):
page.error_raw /= np.float64(estimator.number_of_primaries)

estimator.file_format = 'bdo2019'
estimator.total_number_of_primaries = estimator.number_of_primaries
estimator.total_run_time = estimator.run_time
estimator.total_run_time_sim = estimator.run_time_sim

logger.debug("Done reading bdo file.")
return True
4 changes: 4 additions & 0 deletions pymchelper/run.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,10 @@ def main(args=None):

parser_inspect = subparsers.add_parser(Converters.inspect.name, help='prints metadata')
add_default_options(parser_inspect)

parser_mcsum = subparsers.add_parser(Converters.mcsum.name, help='MC summary table after Sechopoulos et al (2018)')
add_default_options(parser_mcsum)

parser_inspect.add_argument('-d', '--details',
help='print detailed information about data attribute',
action="store_true")
Expand Down
5 changes: 4 additions & 1 deletion pymchelper/writers/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
from pymchelper.writers.trip98ddd import TRiP98DDDWriter
from pymchelper.writers.hdf import HdfWriter
from pymchelper.writers.json import JsonWriter
from pymchelper.writers.mcsum import MCSumWriter


class Converters(IntEnum):
Expand All @@ -25,6 +26,7 @@ class Converters(IntEnum):
inspect = 8
hdf = 9
json = 10
mcsum = 11

@classmethod
def _converter_mapping(cls, item):
Expand All @@ -38,7 +40,8 @@ def _converter_mapping(cls, item):
cls.sparse: SparseWriter,
cls.inspect: Inspector,
cls.hdf: HdfWriter,
cls.json: JsonWriter
cls.json: JsonWriter,
cls.mcsum: MCSumWriter
}.get(item)

@classmethod
Expand Down
42 changes: 42 additions & 0 deletions pymchelper/writers/mcsum.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
import logging
import time
import urllib.parse

logger = logging.getLogger(__name__)


class MCSumWriter:
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Do we really need a new writer for that functionality ?
Could it be part of inspect command ?
Like:
convertmc inspect blabla.bdo --summary
or
convertmc inspect --many "*.bdo" --summary
?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I thought of additional options for what format you would like to have. Also it may collide with inspect -d option, and may get more complex if also the scoring field will be filled out.

Other questions: we could hardcode also SH references, for convenience and make reference list at bottom.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

pymchelper also can read Fluka binary files, so SH12A references would need to be saved in a smarter way.

def __init__(self, filename, options):
logger.debug("Initialising Inspector writer")
self.options = options

def write(self, estimator):
"""Print all keys and values from estimator structure

they include also a metadata read from binary output file
"""

s = estimator.total_run_time
if s < 60:
ts = time.strftime('%S seconds', time.gmtime(s))
elif s < 3600:
ts = time.strftime('%M minutes', time.gmtime(s))
else:
ts = time.strftime('%H hours %M minutes', time.gmtime(s))

print('Item name | Description | References')
print('|---|---|---|')
print(f'| Code Version | {estimator.mc_code_version} | |')
print('| Validation | |')
print(f'| Timing | {estimator.file_counter} jobs, {ts} total runtime |')
print('| Geometry | |')
print('| Source | |')
print('| Physics | |')
print('| Scoring | |')
print(f'|\\# histories | {estimator.total_number_of_primaries} | |')

url = urllib.parse.quote("https://doi.org/10.1002/mp.12702", safe="")
url = "https://doi.org/10.1002/mp.12702"
print('Table caption: Summary of Monte Carlo simulations parameters in AAPM TG268 format.')
print(f'[See Sechopoulos et al. (2018), {url}.]')
return 0