diff --git a/__version__.py b/__version__.py
index 03724dc3d0..52a18bd020 100644
--- a/__version__.py
+++ b/__version__.py
@@ -5,7 +5,7 @@
# @website https://github.com/stochss/stochss
# =============================================================================
-__version__ = '2.3.2'
+__version__ = '2.3.8'
__title__ = 'StochSS'
__description__ = 'StochSS is an integrated development environment (IDE) \
for simulation of biochemical networks.'
diff --git a/client/model-view/model-view.js b/client/model-view/model-view.js
index ab7d5a0129..47482848d6 100644
--- a/client/model-view/model-view.js
+++ b/client/model-view/model-view.js
@@ -47,7 +47,8 @@ module.exports = View.extend({
'change [data-hook=all-discrete]' : 'setDefaultMode',
'change [data-hook=advanced]' : 'setDefaultMode',
'change [data-hook=edit-volume]' : 'updateVolumeViewer',
- 'click [data-hook=collapse-mv-advanced-section]' : 'changeCollapseButtonText'
+ 'click [data-hook=collapse-mv-advanced-section]' : 'changeCollapseButtonText',
+ 'click [data-hook=collapse-system-volume]' : 'changeCollapseButtonText'
},
initialize: function(attrs, options) {
View.prototype.initialize.apply(this, arguments);
diff --git a/client/pages/loading-page.js b/client/pages/loading-page.js
index 1710f889b0..5bfde943b7 100644
--- a/client/pages/loading-page.js
+++ b/client/pages/loading-page.js
@@ -55,10 +55,9 @@ let LoadingPage = PageView.extend({
let queryStr = "?path=" + self.responsePath + "&cmd=read";
let endpoint = path.join(app.getApiPath(), 'file/upload-from-link') + queryStr;
let errorCB = function (err, response, body) {
- $(this.queryByHook("loading-spinner")).css("display", "none");
- let model = $(modals.projectExportErrorHtml(body.reason, body.message)).modal();
- let close = document.querySelector("button[data-dismiss=modal]");
- close.addEventListener("click", function (e) {
+ $(self.queryByHook("loading-spinner")).css("display", "none");
+ let modal = $(modals.projectExportErrorHtml(body.reason, body.message)).modal();
+ modal.on('hidden.bs.modal', function (e) {
window.history.back();
});
}
diff --git a/client/pages/model-editor.js b/client/pages/model-editor.js
index 2c6826b4c0..c9911cb6e7 100644
--- a/client/pages/model-editor.js
+++ b/client/pages/model-editor.js
@@ -47,8 +47,7 @@ let ModelEditor = PageView.extend({
'click [data-hook=project-breadcrumb-link]' : 'handleProjectBreadcrumbClick',
'click [data-hook=toggle-preview-plot]' : 'togglePreviewPlot',
'click [data-hook=toggle-preview-domain]' : 'toggleDomainPlot',
- 'click [data-hook=download-png]' : 'clickDownloadPNGButton',
- 'click [data-hook=collapse-system-volume]' : 'changeCollapseButtonText'
+ 'click [data-hook=download-png]' : 'clickDownloadPNGButton'
},
initialize: function (attrs, options) {
PageView.prototype.initialize.apply(this, arguments);
diff --git a/client/pages/model-presentation.js b/client/pages/model-presentation.js
index e052b2d31a..662b1dfd83 100644
--- a/client/pages/model-presentation.js
+++ b/client/pages/model-presentation.js
@@ -63,7 +63,7 @@ let ModelPresentationPage = PageView.extend({
this.renderModelView();
},
renderModelView: function () {
- let modelView = ModelView({
+ let modelView = new ModelView({
model: this.model,
readOnly: true
});
diff --git a/stochss/handlers/util/__init__.py b/stochss/handlers/util/__init__.py
index d7351abbfc..dc029a54a4 100644
--- a/stochss/handlers/util/__init__.py
+++ b/stochss/handlers/util/__init__.py
@@ -23,6 +23,8 @@
from .stochss_spatial_model import StochSSSpatialModel
from .stochss_sbml import StochSSSBMLModel
from .stochss_notebook import StochSSNotebook
+from .parameter_sweep_notebook import StochSSParamSweepNotebook
+from .sciope_notebook import StochSSSciopeNotebook
from .stochss_workflow import StochSSWorkflow
from .stochss_job import StochSSJob
from .stochss_project import StochSSProject
diff --git a/stochss/handlers/util/parameter_sweep_notebook.py b/stochss/handlers/util/parameter_sweep_notebook.py
new file mode 100644
index 0000000000..75f586e1c3
--- /dev/null
+++ b/stochss/handlers/util/parameter_sweep_notebook.py
@@ -0,0 +1,342 @@
+'''
+StochSS is a platform for simulating biochemical systems
+Copyright (C) 2019-2021 StochSS developers.
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program. If not, see .
+'''
+
+from nbformat import v4 as nbf
+
+from .stochss_notebook import StochSSNotebook
+
+class StochSSParamSweepNotebook(StochSSNotebook):
+ '''
+ ################################################################################################
+ StochSS parameter sweep notebook object
+ ################################################################################################
+ '''
+ def __init__(self, path, new=False, models=None, settings=None):
+ '''Intitialize a parameter sweep notebook object and
+ if its new create it on the users file system.
+
+ Attributes
+ ----------
+ path : str
+ Path to the notebook'''
+ super().__init__(path=path, new=new, models=models, settings=settings)
+
+
+ def __create_1d_class_cell(self):
+ pad = " "
+ run_str = self.__create_1d_run_str()
+ plt_strs = [f"{pad}def plot(c):", f"{pad*2}from matplotlib import pyplot as plt",
+ f"{pad*2}from mpl_toolkits.axes_grid1 import make_axes_locatable",
+ f"{pad*2}fig, ax = plt.subplots(figsize=(8, 8))",
+ pad * 2 + "plt.title(f'Parameter Sweep - Variable:{c.variable_of_interest}')",
+ f"{pad*2}plt.errorbar(c.p1_range, c.data[:, 0], c.data[:, 1])",
+ f"{pad*2}plt.xlabel(c.p1, fontsize=16, fontweight='bold')",
+ f"{pad*2}plt.ylabel('Population', fontsize=16, fontweight='bold')"]
+ pltly_str = self.__create_1d_plotly_str()
+ class_cell = ["class ParameterSweep1D():", "", run_str, "", "",
+ "\n".join(plt_strs), "", "", pltly_str]
+ return nbf.new_code_cell("\n".join(class_cell))
+
+
+ def __create_1d_config_cell(self):
+ pad = " "
+ if self.settings['solver'] == "SSACSolver":
+ model_str = f"{pad}model = {self.get_class_name()}()"
+ else:
+ model_str = f"{pad}ps_class = {self.get_class_name()}"
+ config_cell = ["class ParameterSweepConfig(ParameterSweep1D):",
+ f"{pad}# What class defines the GillesPy2 model", model_str]
+ settings = self.settings['parameterSweepSettings']
+ eval_str = "float(eval(model.get_parameter(p1).expression))"
+ number_of_trajectories = self.settings['simulationSettings']['realizations']
+ if not settings['parameters']:
+ param = self.s_model['parameters'][0]
+ p_min = f"0.5 * {eval_str}"
+ p_max = f"1.5 * {eval_str}"
+ p_steps = "11"
+ spec_of_interest = self.s_model['species'][0]
+ else:
+ param = settings['parameters'][0]
+ p_min = param['min']
+ p_max = param['max']
+ p_steps = param['steps']
+ spec_of_interest = settings['speciesOfInterest']
+ config_cell.extend([f"{pad}# ENTER PARAMETER HERE", f"{pad}p1 = '{param['name']}'",
+ f"{pad}# ENTER START VALUE FOR P1 RANGE HERE", f"{pad}p1_min = {p_min}",
+ f"{pad}# ENTER END VALUE FOR P1 RANGE HERE", f"{pad}p1_max = {p_max}",
+ f"{pad}# ENTER THE NUMBER OF STEPS FOR P1 HERE",
+ f"{pad}p1_steps = {p_steps}",
+ f"{pad}p1_range = np.linspace(p1_min, p1_max, p1_steps)",
+ f"{pad}# ENTER VARIABLE OF INTEREST HERE",
+ f"{pad}variable_of_interest = '{spec_of_interest['name']}'",
+ f"{pad}number_of_trajectories = {number_of_trajectories}",
+ f"{pad}# What feature of the simulation are we examining",
+ f"{pad}feature_extraction = population_at_last_timepoint",
+ f"{pad}# for ensemble resutls: how do we aggreggate the values",
+ f"{pad}ensemble_aggragator = mean_std_of_ensemble"])
+ return nbf.new_code_cell("\n".join(config_cell))
+
+
+ @classmethod
+ def __create_1d_plotly_str(cls):
+ pad = " "
+ trace_str = f"{pad*2}trace_list = [go.Scatter(x=c.p1_range, y=c.data[:, 0]"
+ trace_str += ", error_y=error_y)]"
+ title_str = f"{pad*2}title = dict(text=f'Parameter Sweep - Variable: "
+ title_str += "{c.variable_of_interest}', x=0.5)"
+ lyout_str = f"{pad*2}layout = go.Layout(title=title, xaxis=xaxis_label, yaxis=yaxis_label)"
+ pltly_strs = [f"{pad}def plotplotly(c, return_plotly_figure=False):",
+ f"{pad*2}from plotly.offline import iplot",
+ f"{pad*2}import plotly.graph_objs as go", "",
+ f"{pad*2}visible = c.number_of_trajectories > 1",
+ f"{pad*2}error_y = dict(type='data', array=c.data[:, 1], visible=visible)",
+ "", trace_str, "", title_str,
+ f"{pad*2}yaxis_label = dict(title='Population')",
+ pad * 2 + "xaxis_label = dict(title=f'{c.p1}')", "",
+ lyout_str, "", f"{pad*2}fig = dict(data=trace_list, layout=layout)", "",
+ f"{pad*2}if return_plotly_figure:",
+ f"{pad*3}return fig", f"{pad*2}iplot(fig)"]
+ return "\n".join(pltly_strs)
+
+
+ def __create_1d_run_str(self):
+ pad = " "
+ run_strs = [f"{pad}def run(c, kwargs, verbose=False):",
+ f"{pad*2}c.verbose = verbose",
+ f"{pad*2}fn = c.feature_extraction",
+ f"{pad*2}ag = c.ensemble_aggragator",
+ f"{pad*2}data = np.zeros((len(c.p1_range), 2)) # mean and std",
+ f"{pad*2}for i, v1 in enumerate(c.p1_range):"]
+ res_str = f"{pad*4}tmp_results = "
+ if self.settings['solver'] == "SSACSolver":
+ res_str += "model.run(**kwargs, variables={c.p1:v1})"
+ else:
+ res_str += "tmp_model.run(**kwargs)"
+ run_strs.extend([f"{pad*3}tmp_model = c.ps_class()",
+ f"{pad*3}tmp_model.listOfParameters[c.p1].set_expression(v1)"])
+ run_strs.extend([f"{pad*3}if c.verbose:",
+ pad * 4 + "print(f'running {c.p1}={v1}')",
+ f"{pad*3}if(c.number_of_trajectories > 1):",
+ res_str,
+ f"{pad*4}(m, s) = ag([fn(x) for x in tmp_results])",
+ f"{pad*4}data[i, 0] = m",
+ f"{pad*4}data[i, 1] = s",
+ f"{pad*3}else:",
+ res_str.replace("results", "result"),
+ f"{pad*4}data[i, 0] = c.feature_extraction(tmp_result)",
+ f"{pad*2}c.data = data"])
+ return "\n".join(run_strs)
+
+
+ def __create_2d_class_cell(self):
+ pad = " "
+ run_str = self.__create_2d_run_str()
+ plt_strs = [f"{pad}def plot(c):", f"{pad*2}from matplotlib import pyplot as plt",
+ f"{pad*2}from mpl_toolkits.axes_grid1 import make_axes_locatable",
+ f"{pad*2}fig, ax = plt.subplots(figsize=(8, 8))",
+ f"{pad*2}plt.imshow(c.data)",
+ f"{pad*2}ax.set_xticks(np.arange(c.data.shape[1]) + 0.5, minor=False)",
+ f"{pad*2}ax.set_yticks(np.arange(c.data.shape[0]) + 0.5, minor=False)",
+ pad * 2 + "plt.title(f'Parameter Sweep - Variable: {c.variable_of_interest}')",
+ f"{pad*2}ax.set_xticklabels(c.p1_range, minor=False, rotation=90)",
+ f"{pad*2}ax.set_yticklabels(c.p2_range, minor=False)",
+ f"{pad*2}ax.set_xlabel(c.p1, fontsize=16, fontweight='bold')",
+ f"{pad*2}ax.set_ylabel(c.p2, fontsize=16, fontweight='bold')",
+ f"{pad*2}divider = make_axes_locatable(ax)",
+ f"{pad*2}cax = divider.append_axes('right', size='5%', pad=0.2)",
+ f"{pad*2}_ = plt.colorbar(ax=ax, cax=cax)"]
+ pltly_str = self.__create_2d_plotly_str()
+ class_cell = ["class ParameterSweep2D():", "", run_str, "", "",
+ "\n".join(plt_strs), "", "", pltly_str]
+ return nbf.new_code_cell("\n".join(class_cell))
+
+
+ def __create_2d_config_cell(self):
+ pad = " "
+ if self.settings['solver'] == "SSACSolver":
+ model_str = f"{pad}model = {self.get_class_name()}()"
+ else:
+ model_str = f"{pad}ps_class = {self.get_class_name()}"
+ config_cell = ["class ParameterSweepConfig(ParameterSweep2D):",
+ f"{pad}# What class defines the GillesPy2 model", model_str]
+ settings = self.settings['parameterSweepSettings']
+ p1_eval_str = "float(eval(model.get_parameter(p1).expression))"
+ p2_eval_str = "float(eval(model.get_parameter(p2).expression))"
+ number_of_trajectories = self.settings['simulationSettings']['realizations']
+ if not settings['parameters']:
+ param1 = self.s_model['parameters'][0]
+ p1_min = f"0.5 * {p1_eval_str}"
+ p1_max = f"1.5 * {p1_eval_str}"
+ param2 = self.s_model['parameters'][1]
+ p2_min = f"0.5 * {p2_eval_str}"
+ p2_max = f"1.5 * {p2_eval_str}"
+ spec_of_interest = self.s_model['species'][0]
+ else:
+ param1 = settings['parameters'][0]
+ p1_min = param1['min']
+ p1_max = param1['max']
+ param2 = settings['parameters'][1]
+ p2_min = param2['min']
+ p2_max = param2['max']
+ spec_of_interest = settings['speciesOfInterest']
+ config_cell.extend([f"{pad}# ENTER PARAMETER 1 HERE", f"{pad}p1 = '{param1['name']}'",
+ f"{pad}# ENTER PARAMETER 2 HERE", f"{pad}p2 = '{param2['name']}'",
+ f"{pad}# ENTER START VALUE FOR P1 RANGE HERE",
+ f"{pad}p1_min = {p1_min}",
+ f"{pad}# ENTER END VALUE FOR P1 RANGE HERE", f"{pad}p1_max = {p1_max}",
+ f"{pad}# ENTER THE NUMBER OF STEPS FOR P1 HERE",
+ f"{pad}p1_steps = {param1['steps'] if settings['parameters'] else 11}",
+ f"{pad}p1_range = np.linspace(p1_min, p1_max, p1_steps)",
+ f"{pad}# ENTER START VALUE FOR P2 RANGE HERE",
+ f"{pad}p2_min = {p2_min}",
+ f"{pad}# ENTER END VALUE FOR P2 RANGE HERE", f"{pad}p2_max = {p2_max}",
+ f"{pad}# ENTER THE NUMBER OF STEPS FOR P2 HERE",
+ f"{pad}p2_steps = {param2['steps'] if settings['parameters'] else 11}",
+ f"{pad}p2_range = np.linspace(p2_min, p2_max, p2_steps)",
+ f"{pad}# ENTER VARIABLE OF INTEREST HERE",
+ f"{pad}variable_of_interest = '{spec_of_interest['name']}'",
+ f"{pad}number_of_trajectories = {number_of_trajectories}",
+ f"{pad}# What feature of the simulation are we examining",
+ f"{pad}feature_extraction = population_at_last_timepoint",
+ f"{pad}# for ensemble resutls: how do we aggreggate the values",
+ f"{pad}ensemble_aggragator = average_of_ensemble"])
+ return nbf.new_code_cell("\n".join(config_cell))
+
+
+ @classmethod
+ def __create_2d_plotly_str(cls):
+ pad = " "
+ title_str = f"{pad*2}title = dict(text=f'Parameter Sweep - Variable: "
+ title_str += "{c.variable_of_interest}', x=0.5)"
+ lyout_str = f"{pad*2}layout = go.Layout(title=title, xaxis=xaxis_label, yaxis=yaxis_label)"
+ pltly_strs = [f"{pad}def plotplotly(c, return_plotly_figure=False):",
+ f"{pad*2}from plotly.offline import init_notebook_mode, iplot",
+ f"{pad*2}import plotly.graph_objs as go", "",
+ f"{pad*2}xaxis_ticks = c.p1_range", f"{pad*2}yaxis_ticks = c.p2_range", "",
+ f"{pad*2}trace_list = [go.Heatmap(z=c.data, x=xaxis_ticks, y=yaxis_ticks)]",
+ title_str, pad * 2 + "xaxis_label = dict(title=f'{c.p1}')",
+ pad * 2 + "yaxis_label = dict(title=f'{c.p2}')", "",
+ lyout_str, "", f"{pad*2}fig = dict(data=trace_list, layout=layout)", "",
+ f"{pad*2}if return_plotly_figure:",
+ f"{pad*3}return fig", f"{pad*2}iplot(fig)"]
+ return "\n".join(pltly_strs)
+
+
+ def __create_2d_run_str(self):
+ pad = " "
+ run_strs = [f"{pad}def run(c, kwargs, verbose=False):",
+ f"{pad*2}c.verbose = verbose",
+ f"{pad*2}fn = c.feature_extraction",
+ f"{pad*2}ag = c.ensemble_aggragator",
+ f"{pad*2}data = np.zeros((len(c.p1_range), len(c.p2_range)))",
+ f"{pad*2}for i, v1 in enumerate(c.p1_range):",
+ f"{pad*3}for j, v2 in enumerate(c.p2_range):"]
+ res_str = f"{pad*5}tmp_results = "
+ if self.settings['solver'] == "SSACSolver":
+ res_str += "model.run(**kwargs, variables={c.p1:v1, c.p2:v2})"
+ else:
+ res_str += "tmp_model.run(**kwargs)"
+ run_strs.extend([f"{pad*4}tmp_model = c.ps_class()",
+ f"{pad*4}tmp_model.listOfParameters[c.p1].set_expression(v1)",
+ f"{pad*4}tmp_model.listOfParameters[c.p2].set_expression(v2)"])
+ run_strs.extend([f"{pad*4}if c.verbose:",
+ pad * 5 + "print(f'running {c.p1}={v1}, {c.p2}={v2}')",
+ f"{pad*4}if(c.number_of_trajectories > 1):",
+ res_str,
+ f"{pad*5}data[i, j] = ag([fn(x) for x in tmp_results])",
+ f"{pad*4}else:",
+ res_str.replace("results", "result"),
+ f"{pad*5}data[i, j] = c.feature_extraction(tmp_result)",
+ f"{pad*2}c.data = data"])
+ return "\n".join(run_strs)
+
+
+ def __create_post_process_cells(self):
+ pad = " "
+ fe_vbs_pnt = f"{pad*2}print(f'population_at_last_timepoint"
+ fe_vbs_pnt += " {c.variable_of_interest}={res[c.variable_of_interest][-1]}')"
+ # feature extraction cell
+ fe_cell = ["# What value(s) do you want to extract from the simulation trajectory",
+ "def population_at_last_timepoint(c, res):", f"{pad}if c.verbose:",
+ fe_vbs_pnt, f"{pad}return res[c.variable_of_interest][-1]"]
+ # mean std aggragator cell
+ msa_cell = ["# How do we combine the values from multiple trajectores",
+ "def mean_std_of_ensemble(c, data):", f"{pad}a = np.average(data)",
+ f"{pad}s = np.std(data)", f"{pad}if c.verbose:",
+ pad * 2 + "print(f'mean_std_of_ensemble m:{a} s:{s}')",
+ f"{pad}return (a, s)"]
+ # average aggragator cell
+ aa_cell = [msa_cell[0], "def average_of_ensemble(c, data):",
+ f"{pad}a = np.average(data)", f"{pad}if c.verbose:",
+ pad * 2 + "print(f'average_of_ensemble = {a}')",
+ f"{pad}return a"]
+ cells = [nbf.new_markdown_cell("# Post Processing"),
+ nbf.new_markdown_cell("## Feature extraction function"),
+ nbf.new_code_cell('\n'.join(fe_cell)),
+ nbf.new_markdown_cell("## Aggregation function")]
+ if self.nb_type == self.PARAMETER_SWEEP_1D:
+ cells.append(nbf.new_code_cell('\n'.join(msa_cell)))
+ else:
+ cells.append(nbf.new_code_cell('\n'.join(aa_cell)))
+ return cells
+
+
+ def create_1d_notebook(self):
+ '''Create a 1D parameter sweep jupiter notebook for a StochSS model/workflow
+
+ Attributes
+ ----------'''
+ self.nb_type = self.PARAMETER_SWEEP_1D
+ self.settings['solver'] = self.get_gillespy2_solver_name()
+ run_strs = ["kwargs = configure_simulation()", "ps = ParameterSweepConfig()",
+ "%time ps.run(kwargs)"]
+ cells = self.create_common_cells()
+ cells.extend(self.__create_post_process_cells())
+ cells.extend([nbf.new_markdown_cell("# Parameter Sweep"),
+ self.__create_1d_class_cell(),
+ self.__create_1d_config_cell(),
+ nbf.new_code_cell("\n".join(run_strs)),
+ nbf.new_markdown_cell("# Visualization"),
+ nbf.new_code_cell("ps.plot()"),
+ nbf.new_code_cell("ps.plotplotly()")])
+
+ message = self.write_notebook_file(cells=cells)
+ return {"Message":message, "FilePath":self.get_path(), "File":self.get_file()}
+
+
+ def create_2d_notebook(self):
+ '''Create a 2D parameter sweep jupiter notebook for a StochSS model/workflow
+
+ Attributes
+ ----------'''
+ self.nb_type = self.PARAMETER_SWEEP_2D
+ self.settings['solver'] = self.get_gillespy2_solver_name()
+ run_strs = ["kwargs = configure_simulation()", "ps = ParameterSweepConfig()",
+ "%time ps.run(kwargs)"]
+ cells = self.create_common_cells()
+ cells.extend(self.__create_post_process_cells())
+ cells.extend([nbf.new_markdown_cell("# Parameter Sweep"),
+ self.__create_2d_class_cell(),
+ self.__create_2d_config_cell(),
+ nbf.new_code_cell("\n".join(run_strs)),
+ nbf.new_markdown_cell("# Visualization"),
+ nbf.new_code_cell("ps.plot()"),
+ nbf.new_code_cell("ps.plotplotly()")])
+
+ message = self.write_notebook_file(cells=cells)
+ return {"Message":message, "FilePath":self.get_path(), "File":self.get_file()}
diff --git a/stochss/handlers/util/sciope_notebook.py b/stochss/handlers/util/sciope_notebook.py
new file mode 100644
index 0000000000..a3ec3b7625
--- /dev/null
+++ b/stochss/handlers/util/sciope_notebook.py
@@ -0,0 +1,219 @@
+'''
+StochSS is a platform for simulating biochemical systems
+Copyright (C) 2019-2021 StochSS developers.
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program. If not, see .
+'''
+
+from nbformat import v4 as nbf
+
+from .stochss_notebook import StochSSNotebook
+
+class StochSSSciopeNotebook(StochSSNotebook):
+ '''
+ ################################################################################################
+ StochSS sciope notebook object
+ ################################################################################################
+ '''
+ def __init__(self, path, new=False, models=None, settings=None):
+ '''Intitialize a sciope notebook object and
+ if its new create it on the users file system.
+
+ Attributes
+ ----------
+ path : str
+ Path to the notebook'''
+ super().__init__(path=path, new=new, models=models, settings=settings)
+
+
+ @classmethod
+ def __create_me_expres_cells(cls):
+ # res conf cell
+ param_conf = "met.data.configurations['listOfParameters'] = "
+ param_conf += "list(model.listOfParameters.keys())"
+ rconf_strs = ["# First lets add some appropiate information about the model and features",
+ param_conf,
+ "met.data.configurations['listOfSpecies'] = list(model.listOfSpecies.keys())",
+ "met.data.configurations['listOfSummaries'] = met.summaries.features",
+ "met.data.configurations['timepoints'] = model.tspan"]
+ # met explore cell
+ mtexp_strs = ["# Here we use UMAP for dimension reduction", "met.explore(dr_method='umap')"]
+ # supervised train cell
+ sptrn_strs = ["from sciope.models.label_propagation import LPModel",
+ "# here lets use the dimension reduction embedding as input data",
+ "data = met.dr_model.embedding_", "",
+ "model_lp = LPModel()", "# train using basinhopping",
+ "model_lp.train(data, met.data.user_labels, min_=0.01, max_=10, niter=50)"]
+ # map label cell
+ cmt_str = "# just to vislualize the result we will map the label distribution "
+ cmt_str += "to the user_labels\n# (will enable us to see the LP model output "
+ cmt_str += "when using method 'explore')"
+ mplbl_strs = [cmt_str, "user_labels = np.copy(met.data.user_labels)",
+ "# takes the label corresponding to index 0",
+ "met.data.user_labels = model_lp.model.label_distributions_[:, 0]"]
+ cells = [nbf.new_markdown_cell("## Explore the result"),
+ nbf.new_code_cell("\n".join(rconf_strs)),
+ nbf.new_code_cell("\n".join(mtexp_strs)),
+ nbf.new_code_cell("\n".join(sptrn_strs)),
+ nbf.new_code_cell("\n".join(mplbl_strs)),
+ nbf.new_code_cell("met.explore(dr_method='umap')"),
+ nbf.new_code_cell("met.data.user_labels = user_labels")]
+ return cells
+
+
+ def __create_me_setup_cells(self):
+ spec_of_interest = list(self.model.get_all_species().keys())
+ # Wrapper cell
+ sim_str = "simulator = wrapper.get_simulator(gillespy_model=model, "
+ sim_str += f"run_settings=settings, species_of_interest={spec_of_interest})"
+ sim_strs = ["from sciope.utilities.gillespy2 import wrapper",
+ "settings = configure_simulation()", sim_str,
+ "expression_array = wrapper.get_parameter_expression_array(model)"]
+ # Dask cell
+ dask_strs = ["from dask.distributed import Client", "", "c = Client()"]
+ # lhc cell
+ lhc_str = "lhc = latin_hypercube_sampling.LatinHypercube("
+ lhc_str += "xmin=expression_array, xmax=expression_array*3)"
+ lhc_strs = ["from sciope.designs import latin_hypercube_sampling",
+ "from sciope.utilities.summarystats.auto_tsfresh import SummariesTSFRESH", "",
+ lhc_str, "lhc.generate_array(1000) # creates a LHD of size 1000", "",
+ "# will use default minimal set of features",
+ "summary_stats = SummariesTSFRESH()"]
+ # stochmet cell
+ ism_strs = ["from sciope.stochmet.stochmet import StochMET", "",
+ "met = StochMET(simulator, lhc, summary_stats)"]
+ cells = [nbf.new_markdown_cell("## Define simulator function (using gillespy2 wrapper)"),
+ nbf.new_code_cell("\n".join(sim_strs)),
+ nbf.new_markdown_cell("## Start local cluster using dask client"),
+ nbf.new_code_cell("\n".join(dask_strs)),
+ nbf.new_markdown_cell("## Define parameter sampler/design and summary statistics"),
+ nbf.new_code_cell("\n".join(lhc_strs)),
+ nbf.new_markdown_cell("## Initiate StochMET"),
+ nbf.new_code_cell("\n".join(ism_strs))]
+ return cells
+
+
+ def __create_mi_setup_cells(self):
+ pad = " "
+ priors = ["from sciope.utilities.priors import uniform_prior", "",
+ "# take default from mode 1 as reference",
+ "default_param = np.array(list(model.listOfParameters.items()))[:, 1]",
+ "", "bound = []", "for exp in default_param:",
+ f"{pad}bound.append(float(exp.expression))", "", "# Set the bounds",
+ "bound = np.array(bound)", "dmin = bound * 0.1", "dmax = bound * 2.0",
+ "", "# Here we use uniform prior",
+ "uni_prior = uniform_prior.UniformPrior(dmin, dmax)"]
+ stat_dist = ["from sciope.utilities.summarystats import auto_tsfresh",
+ "from sciope.utilities.distancefunctions import naive_squared", "",
+ "# Function to generate summary statistics",
+ "summ_func = auto_tsfresh.SummariesTSFRESH()", "",
+ "# Distance", "ns = naive_squared.NaiveSquaredDistance()"]
+ cells = [nbf.new_markdown_cell("## Define prior distribution"),
+ nbf.new_code_cell("\n".join(priors)),
+ nbf.new_markdown_cell("## Define simulator"),
+ self.__create_mi_simulator_cell(),
+ nbf.new_markdown_cell("## Define summary statistics and distance function"),
+ nbf.new_code_cell("\n".join(stat_dist))]
+ return cells
+
+
+ def __create_mi_simulator_cell(self):
+ pad = " "
+ comment = f"{pad}# params - array, need to have the same order as model.listOfParameters"
+ loop = f"{pad}for e, pname in enumerate(model.listOfParameters.keys()):"
+ if self.settings['solver'] == "SSACSolver":
+ comment += "\n"+ pad +"variables = {}"
+ func_def = "def get_variables(params, model):"
+ body = f"{pad*2}variables[pname] = params[e]"
+ return_str = f"{pad}return variables"
+ call = f"{pad}variables = get_variables(params, model)"
+ run = f"{pad}res = model.run(**kwargs, variables=variables)"
+ else:
+ func_def = "def set_model_parameters(params, model):"
+ body = f"{pad*2}model.get_parameter(pname).set_expression(params[e])"
+ return_str = f"{pad}return model"
+ call = f"{pad}model_update = set_model_parameters(params, model)"
+ run = f"{pad}res = model_update.run(**kwargs)"
+ sim_strs = [func_def, comment, loop, body, return_str, ""]
+ simulator = ["# Here we use the GillesPy2 Solver", "def simulator(params, model):",
+ call, "", run, f"{pad}res = res.to_array()",
+ f"{pad}tot_res = np.asarray([x.T for x in res]) # reshape to (N, S, T)",
+ f"{pad}# should not contain timepoints", f"{pad}tot_res = tot_res[:, 1:, :]",
+ "", f"{pad}return tot_res", ""]
+ sim_strs.extend(simulator)
+ sim2_com = "# Wrapper, simulator function to abc should should only take one argument "
+ sim2_com += "(the parameter point)"
+ simulator2 = [sim2_com, "def simulator2(x):", f"{pad}return simulator(x, model=model)"]
+ sim_strs.extend(simulator2)
+ return nbf.new_code_cell("\n".join(sim_strs))
+
+
+ def create_me_notebook(self):
+ '''Create a model exploration jupiter notebook for a StochSS model/workflow
+
+ Attributes
+ ----------'''
+ self.nb_type = self.MODEL_EXPLORATION
+ self.settings['solver'] = self.get_gillespy2_solver_name()
+ cells = [nbf.new_code_cell("%matplotlib notebook")]
+ cells.extend(self.create_common_cells())
+ cells.append(nbf.new_markdown_cell("# Model Exploration"))
+ cells.extend(self.__create_me_setup_cells())
+ cells.extend([nbf.new_markdown_cell("## Run parameter sweep"),
+ nbf.new_code_cell("met.compute(n_points=500, chunk_size=10)")])
+ cells.extend(self.__create_me_expres_cells())
+
+ message = self.write_notebook_file(cells=cells)
+ return {"Message":message, "FilePath":self.get_path(), "File":self.get_file()}
+
+
+ def create_mi_notebook(self):
+ '''Create a model inference jupiter notebook for a StochSS model/workflow
+
+ Attributes
+ ----------'''
+ self.nb_type = self.MODEL_INFERENCE
+ self.settings['solver'] = self.get_gillespy2_solver_name()
+ cells = [nbf.new_code_cell("%load_ext autoreload\n%autoreload 2")]
+ cells.extend(self.create_common_cells())
+ imports = "from tsfresh.feature_extraction.settings import MinimalFCParameters"
+ fd_header = "## Generate some fixed(observed) data based on default parameters of the model"
+ fd_str = "kwargs = configure_simulation()\nfixed_data = model.run(**kwargs)"
+ rshp_strs = ["# Reshape the data and remove timepoints array",
+ "fixed_data = fixed_data.to_array()",
+ "fixed_data = np.asarray([x.T for x in fixed_data])",
+ "fixed_data = fixed_data[:, 1:, :]"]
+ cells.extend([nbf.new_markdown_cell("# Model Inference"),
+ nbf.new_code_cell(imports), nbf.new_markdown_cell(fd_header),
+ nbf.new_code_cell(fd_str), nbf.new_code_cell("\n".join(rshp_strs))])
+ cells.extend(self.__create_mi_setup_cells())
+ # abc cell
+ abc_str = "from sciope.inference.abc_inference import ABC\n\n"
+ abc_str += "abc = ABC(fixed_data, sim=simulator2, prior_function=uni_prior, "
+ abc_str += "summaries_function=summ_func.compute, distance_function=ns)"
+ # compute fixed mean cell
+ fm_str = "# First compute the fixed(observed) mean\nabc.compute_fixed_mean(chunk_size=2)"
+ # run model inference cell
+ rmi_str = "res = abc.infer(num_samples=100, batch_size=10, chunk_size=2)"
+ # absolute error cell
+ abse_str = "from sklearn.metrics import mean_absolute_error\n\n"
+ abse_str += "mae_inference = mean_absolute_error(bound, abc.results['inferred_parameters'])"
+ cells.extend([nbf.new_markdown_cell("## Start local cluster using dask client"),
+ nbf.new_code_cell("from dask.distributed import Client\n\nc = Client()"),
+ nbf.new_markdown_cell("## Start abc instance"),
+ nbf.new_code_cell(abc_str), nbf.new_code_cell(fm_str),
+ nbf.new_code_cell(rmi_str), nbf.new_code_cell(abse_str)])
+
+ message = self.write_notebook_file(cells=cells)
+ return {"Message":message, "FilePath":self.get_path(), "File":self.get_file()}
diff --git a/stochss/handlers/util/stochss_folder.py b/stochss/handlers/util/stochss_folder.py
index 4bcfe716e9..51192689da 100644
--- a/stochss/handlers/util/stochss_folder.py
+++ b/stochss/handlers/util/stochss_folder.py
@@ -445,8 +445,11 @@ def upload_from_link(self, remote_path):
'''
ext = remote_path.split('.').pop()
body = requests.get(remote_path, allow_redirects=True).content
- if "presentation-download" in remote_path:
- file = f"{json.loads(body)['name']}.{ext}"
+ if "download-presentation" in remote_path:
+ if ext in ("mdl", "smdl"):
+ file = f"{json.loads(body)['name']}.{ext}"
+ elif ext == "ipynb":
+ file = json.loads(body)['file']
else:
file = self.get_file(path=remote_path)
path = self.get_new_path(dst_path=file)
diff --git a/stochss/handlers/util/stochss_model.py b/stochss/handlers/util/stochss_model.py
index 29e4d4b1ab..a2143f1b66 100644
--- a/stochss/handlers/util/stochss_model.py
+++ b/stochss/handlers/util/stochss_model.py
@@ -490,7 +490,7 @@ def publish_presentation(self):
present_link = f"https://live.stochss.org/stochss/present-model{query_str}"
downloadlink = os.path.join("https://live.stochss.org/stochss/download_presentation",
hostname, file)
- open_link = f"https://live.stochss.org?open={downloadlink}"
+ open_link = f"https://open.stochss.org?open={downloadlink}"
links = {"presentation": present_link, "download": downloadlink, "open": open_link}
return links, data
except PermissionError as err:
diff --git a/stochss/handlers/util/stochss_notebook.py b/stochss/handlers/util/stochss_notebook.py
index 2d2cbf8bd0..17189b85c6 100644
--- a/stochss/handlers/util/stochss_notebook.py
+++ b/stochss/handlers/util/stochss_notebook.py
@@ -76,6 +76,7 @@ def __init__(self, path, new=False, models=None, settings=None):
if changed:
self.path = n_path.replace(self.user_dir + '/', "")
+
def __create_boundary_condition_cells(self):
pad = " "
bc_cells = []
@@ -91,6 +92,7 @@ def __create_boundary_condition_cells(self):
message += f"are referenced incorrectly for notebooks: {str(err)}"
raise StochSSModelFormatError(message, traceback.format_exc()) from err
+
def __create_boundary_condition_string(self, model, pad):
if self.s_model['boundaryConditions']:
bound_conds = ["", f"{pad}# Boundary Conditions"]
@@ -104,14 +106,6 @@ def __create_boundary_condition_string(self, model, pad):
message += f"are referenced incorrectly for notebooks: {str(err)}"
raise StochSSModelFormatError(message, traceback.format_exc()) from err
- def __create_common_cells(self, interactive_backend=False):
- cells = [self.__create_import_cell(interactive_backend=interactive_backend),
- nbf.new_markdown_cell(f"# {self.get_name()}"),
- self.__create_model_cell(),
- nbf.new_code_cell(f'model = {self.__get_class_name()}()'),
- nbf.new_markdown_cell("# Simulation Parameters"),
- self.__create_configuration_cell()]
- return cells
def __create_configuration_cell(self):
pad = " "
@@ -158,6 +152,7 @@ def __create_configuration_cell(self):
config.extend([pad + "}", f"{pad}return kwargs"])
return nbf.new_code_cell("\n".join(config))
+
def __create_event_strings(self, model, pad):
if self.s_model['eventsCollection']:
triggers = ["", f"{pad}# Event Triggers"]
@@ -183,6 +178,7 @@ def __create_event_strings(self, model, pad):
message += f"are referenced incorrectly for notebooks: {str(err)}"
raise StochSSModelFormatError(message, traceback.format_exc()) from err
+
@classmethod
def __create_event_assignment_strings(cls, assignments, event, pad):
names = []
@@ -194,6 +190,7 @@ def __create_event_assignment_strings(cls, assignments, event, pad):
assignments.append(assign_str)
return ', '.join(names)
+
@classmethod
def __create_event_trigger_string(cls, triggers, event, pad):
name = f'{event["name"]}_trig'
@@ -202,6 +199,7 @@ def __create_event_trigger_string(cls, triggers, event, pad):
triggers.append(trig_str)
return name
+
def __create_function_definition_strings(self, model, pad):
if self.s_model['functionDefinitions']:
func_defs = ["", f"{pad}# Function Definitions"]
@@ -217,15 +215,11 @@ def __create_function_definition_strings(self, model, pad):
message += f"are referenced incorrectly for notebooks: {str(err)}"
raise StochSSModelFormatError(message, traceback.format_exc()) from err
- def __create_import_cell(self, interactive_backend=False):
+
+ def __create_import_cell(self):
try:
is_automatic = self.settings['simulationSettings']['isAutomatic']
- if self.nb_type == self.SPATIAL_SIMULATION:
- imports = ["%load_ext autoreload", "%autoreload 2", "", "import numpy as np"]
- else:
- imports = ["import numpy as np"]
- if interactive_backend:
- imports.append("%matplotlib notebook")
+ imports = ["import numpy as np"]
if self.s_model['is_spatial']:
imports.append("import spatialpy")
imports.append("from spatialpy import Model, Species, Parameter, Reaction, Mesh,\\")
@@ -258,6 +252,7 @@ def __create_import_cell(self, interactive_backend=False):
message += f"are referenced incorrectly for notebooks: {str(err)}"
raise StochSSModelFormatError(message, traceback.format_exc()) from err
+
def __create_initial_condition_strings(self, model, pad):
if self.s_model['initialConditions']:
ic_types = {"Place":"PlaceInitialCondition", "Scatter":"ScatterInitialCondition",
@@ -279,6 +274,7 @@ def __create_initial_condition_strings(self, model, pad):
message += f"are referenced incorrectly for notebooks: {str(err)}"
raise StochSSModelFormatError(message, traceback.format_exc()) from err
+
def __create_mesh_string(self, model, pad):
mesh = ["", f"{pad}# Domain",
f"{pad}mesh = Mesh.read_stochss_domain('{self.s_model['path']}')",
@@ -286,10 +282,11 @@ def __create_mesh_string(self, model, pad):
"", f"{pad}self.staticDomain = {self.s_model['domain']['static']}"]
model.extend(mesh)
+
def __create_model_cell(self):
pad = ' '
if self.s_model['is_spatial']:
- model = [f"class {self.__get_class_name()}(Model):",
+ model = [f"class {self.get_class_name()}(Model):",
" def __init__(self):",
f'{pad}Model.__init__(self, name="{self.get_name()}")']
self.__create_mesh_string(model=model, pad=pad)
@@ -299,7 +296,7 @@ def __create_model_cell(self):
self.__create_parameter_strings(model=model, pad=pad)
self.__create_reaction_strings(model=model, pad=pad)
else:
- model = [f"class {self.__get_class_name()}(Model):",
+ model = [f"class {self.get_class_name()}(Model):",
" def __init__(self, parameter_values=None):",
f'{pad}Model.__init__(self, name="{self.get_name()}")',
f"{pad}self.volume = {self.s_model['volume']}"]
@@ -312,6 +309,7 @@ def __create_model_cell(self):
self.__create_tspan_string(model=model, pad=pad)
return nbf.new_code_cell("\n".join(model))
+
def __create_parameter_strings(self, model, pad):
if self.s_model['parameters']:
parameters = ["", f"{pad}# Parameters"]
@@ -335,255 +333,6 @@ def __create_parameter_strings(self, model, pad):
message += f"are referenced incorrectly for notebooks: {str(err)}"
raise StochSSModelFormatError(message, traceback.format_exc()) from err
- def __create_ps_post_process_cells(self):
- pad = " "
- fe_vbs_pnt = f"{pad*2}print(f'population_at_last_timepoint"
- fe_vbs_pnt += " {c.variable_of_interest}={res[c.variable_of_interest][-1]}')"
- # feature extraction cell
- fe_cell = ["# What value(s) do you want to extract from the simulation trajectory",
- "def population_at_last_timepoint(c, res):", f"{pad}if c.verbose:",
- fe_vbs_pnt, f"{pad}return res[c.variable_of_interest][-1]"]
- # mean std aggragator cell
- msa_cell = ["# How do we combine the values from multiple trajectores",
- "def mean_std_of_ensemble(c, data):", f"{pad}a = np.average(data)",
- f"{pad}s = np.std(data)", f"{pad}if c.verbose:",
- pad * 2 + "print(f'mean_std_of_ensemble m:{a} s:{s}')",
- f"{pad}return (a, s)"]
- # average aggragator cell
- aa_cell = [msa_cell[0], "def average_of_ensemble(c, data):",
- f"{pad}a = np.average(data)", f"{pad}if c.verbose:",
- pad * 2 + "print(f'average_of_ensemble = {a}')",
- f"{pad}return a"]
- cells = [nbf.new_markdown_cell("# Post Processing"),
- nbf.new_markdown_cell("## Feature extraction function"),
- nbf.new_code_cell('\n'.join(fe_cell)),
- nbf.new_markdown_cell("## Aggregation function")]
- if self.nb_type == self.PARAMETER_SWEEP_1D:
- cells.append(nbf.new_code_cell('\n'.join(msa_cell)))
- else:
- cells.append(nbf.new_code_cell('\n'.join(aa_cell)))
- return cells
-
- def __create_ps1d_class_cell(self):
- pad = " "
- run_str = self.__create_ps1d_run_str()
- plt_strs = [f"{pad}def plot(c):", f"{pad*2}from matplotlib import pyplot as plt",
- f"{pad*2}from mpl_toolkits.axes_grid1 import make_axes_locatable",
- f"{pad*2}fig, ax = plt.subplots(figsize=(8, 8))",
- pad * 2 + "plt.title(f'Parameter Sweep - Variable:{c.variable_of_interest}')",
- f"{pad*2}plt.errorbar(c.p1_range, c.data[:, 0], c.data[:, 1])",
- f"{pad*2}plt.xlabel(c.p1, fontsize=16, fontweight='bold')",
- f"{pad*2}plt.ylabel('Population', fontsize=16, fontweight='bold')"]
- pltly_str = self.__create_ps1d_plotly_str()
- class_cell = ["class ParameterSweep1D():", "", run_str, "", "",
- "\n".join(plt_strs), "", "", pltly_str]
- return nbf.new_code_cell("\n".join(class_cell))
-
- def __create_ps1d_config_cell(self):
- pad = " "
- if self.settings['solver'] == "SSACSolver":
- model_str = f"{pad}model = {self.__get_class_name()}()"
- else:
- model_str = f"{pad}ps_class = {self.__get_class_name()}"
- config_cell = ["class ParameterSweepConfig(ParameterSweep1D):",
- f"{pad}# What class defines the GillesPy2 model", model_str]
- settings = self.settings['parameterSweepSettings']
- eval_str = "float(eval(model.get_parameter(p1).expression))"
- number_of_trajectories = self.settings['simulationSettings']['realizations']
- if not settings['parameters']:
- param = self.s_model['parameters'][0]
- p_min = f"0.5 * {eval_str}"
- p_max = f"1.5 * {eval_str}"
- p_steps = "11"
- spec_of_interest = self.s_model['species'][0]
- else:
- param = settings['parameters'][0]
- p_min = param['min']
- p_max = param['max']
- p_steps = param['steps']
- spec_of_interest = settings['speciesOfInterest']
- config_cell.extend([f"{pad}# ENTER PARAMETER HERE", f"{pad}p1 = '{param['name']}'",
- f"{pad}# ENTER START VALUE FOR P1 RANGE HERE", f"{pad}p1_min = {p_min}",
- f"{pad}# ENTER END VALUE FOR P1 RANGE HERE", f"{pad}p1_max = {p_max}",
- f"{pad}# ENTER THE NUMBER OF STEPS FOR P1 HERE",
- f"{pad}p1_steps = {p_steps}",
- f"{pad}p1_range = np.linspace(p1_min, p1_max, p1_steps)",
- f"{pad}# ENTER VARIABLE OF INTEREST HERE",
- f"{pad}variable_of_interest = '{spec_of_interest['name']}'",
- f"{pad}number_of_trajectories = {number_of_trajectories}",
- f"{pad}# What feature of the simulation are we examining",
- f"{pad}feature_extraction = population_at_last_timepoint",
- f"{pad}# for ensemble resutls: how do we aggreggate the values",
- f"{pad}ensemble_aggragator = mean_std_of_ensemble"])
- return nbf.new_code_cell("\n".join(config_cell))
-
- @classmethod
- def __create_ps1d_plotly_str(cls):
- pad = " "
- trace_str = f"{pad*2}trace_list = [go.Scatter(x=c.p1_range, y=c.data[:, 0]"
- trace_str += ", error_y=error_y)]"
- title_str = f"{pad*2}title = dict(text=f'Parameter Sweep - Variable: "
- title_str += "{c.variable_of_interest}', x=0.5)"
- lyout_str = f"{pad*2}layout = go.Layout(title=title, xaxis=xaxis_label, yaxis=yaxis_label)"
- pltly_strs = [f"{pad}def plotplotly(c, return_plotly_figure=False):",
- f"{pad*2}from plotly.offline import iplot",
- f"{pad*2}import plotly.graph_objs as go", "",
- f"{pad*2}visible = c.number_of_trajectories > 1",
- f"{pad*2}error_y = dict(type='data', array=c.data[:, 1], visible=visible)",
- "", trace_str, "", title_str,
- f"{pad*2}yaxis_label = dict(title='Population')",
- pad * 2 + "xaxis_label = dict(title=f'{c.p1}')", "",
- lyout_str, "", f"{pad*2}fig = dict(data=trace_list, layout=layout)", "",
- f"{pad*2}if return_plotly_figure:",
- f"{pad*3}return fig", f"{pad*2}iplot(fig)"]
- return "\n".join(pltly_strs)
-
- def __create_ps1d_run_str(self):
- pad = " "
- run_strs = [f"{pad}def run(c, kwargs, verbose=False):",
- f"{pad*2}c.verbose = verbose",
- f"{pad*2}fn = c.feature_extraction",
- f"{pad*2}ag = c.ensemble_aggragator",
- f"{pad*2}data = np.zeros((len(c.p1_range), 2)) # mean and std",
- f"{pad*2}for i, v1 in enumerate(c.p1_range):"]
- res_str = f"{pad*4}tmp_results = "
- if self.settings['solver'] == "SSACSolver":
- res_str += "model.run(**kwargs, variables={c.p1:v1})"
- else:
- res_str += "tmp_model.run(**kwargs)"
- run_strs.extend([f"{pad*3}tmp_model = c.ps_class()",
- f"{pad*3}tmp_model.listOfParameters[c.p1].set_expression(v1)"])
- run_strs.extend([f"{pad*3}if c.verbose:",
- pad * 4 + "print(f'running {c.p1}={v1}')",
- f"{pad*3}if(c.number_of_trajectories > 1):",
- res_str,
- f"{pad*4}(m, s) = ag([fn(x) for x in tmp_results])",
- f"{pad*4}data[i, 0] = m",
- f"{pad*4}data[i, 1] = s",
- f"{pad*3}else:",
- res_str.replace("results", "result"),
- f"{pad*4}data[i, 0] = c.feature_extraction(tmp_result)",
- f"{pad*2}c.data = data"])
- return "\n".join(run_strs)
-
- def __create_ps2d_class_cell(self):
- pad = " "
- run_str = self.__create_ps2d_run_str()
- plt_strs = [f"{pad}def plot(c):", f"{pad*2}from matplotlib import pyplot as plt",
- f"{pad*2}from mpl_toolkits.axes_grid1 import make_axes_locatable",
- f"{pad*2}fig, ax = plt.subplots(figsize=(8, 8))",
- f"{pad*2}plt.imshow(c.data)",
- f"{pad*2}ax.set_xticks(np.arange(c.data.shape[1]) + 0.5, minor=False)",
- f"{pad*2}ax.set_yticks(np.arange(c.data.shape[0]) + 0.5, minor=False)",
- pad * 2 + "plt.title(f'Parameter Sweep - Variable: {c.variable_of_interest}')",
- f"{pad*2}ax.set_xticklabels(c.p1_range, minor=False, rotation=90)",
- f"{pad*2}ax.set_yticklabels(c.p2_range, minor=False)",
- f"{pad*2}ax.set_xlabel(c.p1, fontsize=16, fontweight='bold')",
- f"{pad*2}ax.set_ylabel(c.p2, fontsize=16, fontweight='bold')",
- f"{pad*2}divider = make_axes_locatable(ax)",
- f"{pad*2}cax = divider.append_axes('right', size='5%', pad=0.2)",
- f"{pad*2}_ = plt.colorbar(ax=ax, cax=cax)"]
- pltly_str = self.__create_ps2d_plotly_str()
- class_cell = ["class ParameterSweep2D():", "", run_str, "", "",
- "\n".join(plt_strs), "", "", pltly_str]
- return nbf.new_code_cell("\n".join(class_cell))
-
- def __create_ps2d_config_cell(self):
- pad = " "
- if self.settings['solver'] == "SSACSolver":
- model_str = f"{pad}model = {self.__get_class_name()}()"
- else:
- model_str = f"{pad}ps_class = {self.__get_class_name()}"
- config_cell = ["class ParameterSweepConfig(ParameterSweep2D):",
- f"{pad}# What class defines the GillesPy2 model", model_str]
- settings = self.settings['parameterSweepSettings']
- p1_eval_str = "float(eval(model.get_parameter(p1).expression))"
- p2_eval_str = "float(eval(model.get_parameter(p2).expression))"
- number_of_trajectories = self.settings['simulationSettings']['realizations']
- if not settings['parameters']:
- param1 = self.s_model['parameters'][0]
- p1_min = f"0.5 * {p1_eval_str}"
- p1_max = f"1.5 * {p1_eval_str}"
- param2 = self.s_model['parameters'][1]
- p2_min = f"0.5 * {p2_eval_str}"
- p2_max = f"1.5 * {p2_eval_str}"
- spec_of_interest = self.s_model['species'][0]
- else:
- param1 = settings['parameters'][0]
- p1_min = param1['min']
- p1_max = param1['max']
- param2 = settings['parameters'][1]
- p2_min = param2['min']
- p2_max = param2['max']
- spec_of_interest = settings['speciesOfInterest']
- config_cell.extend([f"{pad}# ENTER PARAMETER 1 HERE", f"{pad}p1 = '{param1['name']}'",
- f"{pad}# ENTER PARAMETER 2 HERE", f"{pad}p2 = '{param2['name']}'",
- f"{pad}# ENTER START VALUE FOR P1 RANGE HERE",
- f"{pad}p1_min = {p1_min}",
- f"{pad}# ENTER END VALUE FOR P1 RANGE HERE", f"{pad}p1_max = {p1_max}",
- f"{pad}# ENTER THE NUMBER OF STEPS FOR P1 HERE",
- f"{pad}p1_steps = {param1['steps'] if settings['parameters'] else 11}",
- f"{pad}p1_range = np.linspace(p1_min, p1_max, p1_steps)",
- f"{pad}# ENTER START VALUE FOR P2 RANGE HERE",
- f"{pad}p2_min = {p2_min}",
- f"{pad}# ENTER END VALUE FOR P2 RANGE HERE", f"{pad}p2_max = {p2_max}",
- f"{pad}# ENTER THE NUMBER OF STEPS FOR P2 HERE",
- f"{pad}p2_steps = {param2['steps'] if settings['parameters'] else 11}",
- f"{pad}p2_range = np.linspace(p2_min, p2_max, p2_steps)",
- f"{pad}# ENTER VARIABLE OF INTEREST HERE",
- f"{pad}variable_of_interest = '{spec_of_interest['name']}'",
- f"{pad}number_of_trajectories = {number_of_trajectories}",
- f"{pad}# What feature of the simulation are we examining",
- f"{pad}feature_extraction = population_at_last_timepoint",
- f"{pad}# for ensemble resutls: how do we aggreggate the values",
- f"{pad}ensemble_aggragator = average_of_ensemble"])
- return nbf.new_code_cell("\n".join(config_cell))
-
- @classmethod
- def __create_ps2d_plotly_str(cls):
- pad = " "
- title_str = f"{pad*2}title = dict(text=f'Parameter Sweep - Variable: "
- title_str += "{c.variable_of_interest}', x=0.5)"
- lyout_str = f"{pad*2}layout = go.Layout(title=title, xaxis=xaxis_label, yaxis=yaxis_label)"
- pltly_strs = [f"{pad}def plotplotly(c, return_plotly_figure=False):",
- f"{pad*2}from plotly.offline import init_notebook_mode, iplot",
- f"{pad*2}import plotly.graph_objs as go", "",
- f"{pad*2}xaxis_ticks = c.p1_range", f"{pad*2}yaxis_ticks = c.p2_range", "",
- f"{pad*2}trace_list = [go.Heatmap(z=c.data, x=xaxis_ticks, y=yaxis_ticks)]",
- title_str, pad * 2 + "xaxis_label = dict(title=f'{c.p1}')",
- pad * 2 + "yaxis_label = dict(title=f'{c.p2}')", "",
- lyout_str, "", f"{pad*2}fig = dict(data=trace_list, layout=layout)", "",
- f"{pad*2}if return_plotly_figure:",
- f"{pad*3}return fig", f"{pad*2}iplot(fig)"]
- return "\n".join(pltly_strs)
-
- def __create_ps2d_run_str(self):
- pad = " "
- run_strs = [f"{pad}def run(c, kwargs, verbose=False):",
- f"{pad*2}c.verbose = verbose",
- f"{pad*2}fn = c.feature_extraction",
- f"{pad*2}ag = c.ensemble_aggragator",
- f"{pad*2}data = np.zeros((len(c.p1_range), len(c.p2_range)))",
- f"{pad*2}for i, v1 in enumerate(c.p1_range):",
- f"{pad*3}for j, v2 in enumerate(c.p2_range):"]
- res_str = f"{pad*5}tmp_results = "
- if self.settings['solver'] == "SSACSolver":
- res_str += "model.run(**kwargs, variables={c.p1:v1, c.p2:v2})"
- else:
- res_str += "tmp_model.run(**kwargs)"
- run_strs.extend([f"{pad*4}tmp_model = c.ps_class()",
- f"{pad*4}tmp_model.listOfParameters[c.p1].set_expression(v1)",
- f"{pad*4}tmp_model.listOfParameters[c.p2].set_expression(v2)"])
- run_strs.extend([f"{pad*4}if c.verbose:",
- pad * 5 + "print(f'running {c.p1}={v1}, {c.p2}={v2}')",
- f"{pad*4}if(c.number_of_trajectories > 1):",
- res_str,
- f"{pad*5}data[i, j] = ag([fn(x) for x in tmp_results])",
- f"{pad*4}else:",
- res_str.replace("results", "result"),
- f"{pad*5}data[i, j] = c.feature_extraction(tmp_result)",
- f"{pad*2}c.data = data"])
- return "\n".join(run_strs)
def __create_reaction_strings(self, model, pad):
if self.s_model['reactions']:
@@ -617,6 +366,7 @@ def __create_reaction_strings(self, model, pad):
message += f"are referenced incorrectly for notebooks: {str(err)}"
raise StochSSModelFormatError(message, traceback.format_exc()) from err
+
def __create_rules_strings(self, model, pad):
if self.s_model['rules']:
rate_rules = ["", f"{pad}# Rate Rules"]
@@ -641,120 +391,6 @@ def __create_rules_strings(self, model, pad):
message += f"are referenced incorrectly for notebooks: {str(err)}"
raise StochSSModelFormatError(message, traceback.format_exc()) from err
- @classmethod
- def __create_sme_expres_cells(cls):
- # res conf cell
- param_conf = "met.data.configurations['listOfParameters'] = "
- param_conf += "list(model.listOfParameters.keys())"
- rconf_strs = ["# First lets add some appropiate information about the model and features",
- param_conf,
- "met.data.configurations['listOfSpecies'] = list(model.listOfSpecies.keys())",
- "met.data.configurations['listOfSummaries'] = met.summaries.features",
- "met.data.configurations['timepoints'] = model.tspan"]
- # met explore cell
- mtexp_strs = ["# Here we use UMAP for dimension reduction", "met.explore(dr_method='umap')"]
- # supervised train cell
- sptrn_strs = ["from sciope.models.label_propagation import LPModel",
- "# here lets use the dimension reduction embedding as input data",
- "data = met.dr_model.embedding_", "",
- "model_lp = LPModel()", "# train using basinhopping",
- "model_lp.train(data, met.data.user_labels, min_=0.01, max_=10, niter=50)"]
- # map label cell
- cmt_str = "# just to vislualize the result we will map the label distribution "
- cmt_str += "to the user_labels\n# (will enable us to see the LP model output "
- cmt_str += "when using method 'explore')"
- mplbl_strs = [cmt_str, "user_labels = np.copy(met.data.user_labels)",
- "# takes the label corresponding to index 0",
- "met.data.user_labels = model_lp.model.label_distributions_[:, 0]"]
- cells = [nbf.new_markdown_cell("## Explore the result"),
- nbf.new_code_cell("\n".join(rconf_strs)),
- nbf.new_code_cell("\n".join(mtexp_strs)),
- nbf.new_code_cell("\n".join(sptrn_strs)),
- nbf.new_code_cell("\n".join(mplbl_strs)),
- nbf.new_code_cell("met.explore(dr_method='umap')"),
- nbf.new_code_cell("met.data.user_labels = user_labels")]
- return cells
-
- def __create_sme_setup_cells(self):
- spec_of_interest = list(self.model.get_all_species().keys())
- # Wrapper cell
- sim_str = "simulator = wrapper.get_simulator(gillespy_model=model, "
- sim_str += f"run_settings=settings, species_of_interest={spec_of_interest})"
- sim_strs = ["from sciope.utilities.gillespy2 import wrapper",
- "settings = configure_simulation()", sim_str,
- "expression_array = wrapper.get_parameter_expression_array(model)"]
- # Dask cell
- dask_strs = ["from dask.distributed import Client", "", "c = Client()"]
- # lhc cell
- lhc_str = "lhc = latin_hypercube_sampling.LatinHypercube("
- lhc_str += "xmin=expression_array, xmax=expression_array*3)"
- lhc_strs = ["from sciope.designs import latin_hypercube_sampling",
- "from sciope.utilities.summarystats.auto_tsfresh import SummariesTSFRESH", "",
- lhc_str, "lhc.generate_array(1000) # creates a LHD of size 1000", "",
- "# will use default minimal set of features",
- "summary_stats = SummariesTSFRESH()"]
- # stochmet cell
- ism_strs = ["from sciope.stochmet.stochmet import StochMET", "",
- "met = StochMET(simulator, lhc, summary_stats)"]
- cells = [nbf.new_markdown_cell("## Define simulator function (using gillespy2 wrapper)"),
- nbf.new_code_cell("\n".join(sim_strs)),
- nbf.new_markdown_cell("## Start local cluster using dask client"),
- nbf.new_code_cell("\n".join(dask_strs)),
- nbf.new_markdown_cell("## Define parameter sampler/design and summary statistics"),
- nbf.new_code_cell("\n".join(lhc_strs)),
- nbf.new_markdown_cell("## Initiate StochMET"),
- nbf.new_code_cell("\n".join(ism_strs))]
- return cells
-
- def __create_smi_setup_cells(self):
- pad = " "
- priors = ["# take default from mode 1 as reference",
- "default_param = np.array(list(model.listOfParameters.items()))[:, 1]",
- "", "bound = []", "for exp in default_param:",
- f"{pad}bound.append(float(exp.expression))", "", "# Set the bounds",
- "bound = np.array(bound)", "dmin = bound * 0.1", "dmax = bound * 2.0",
- "", "# Here we use uniform prior",
- "uni_prior = uniform_prior.UniformPrior(dmin, dmax)"]
- stat_dist = ["# Function to generate summary statistics",
- "summ_func = auto_tsfresh.SummariesTSFRESH()", "",
- "# Distance", "ns = naive_squared.NaiveSquaredDistance()"]
- cells = [nbf.new_markdown_cell("## Define prior distribution"),
- nbf.new_code_cell("\n".join(priors)),
- nbf.new_markdown_cell("## Define simulator"),
- self.__create_smi_simulator_cell(),
- nbf.new_markdown_cell("## Define summary statistics and distance function"),
- nbf.new_code_cell("\n".join(stat_dist))]
- return cells
-
- def __create_smi_simulator_cell(self):
- pad = " "
- comment = f"{pad}# params - array, need to have the same order as model.listOfParameters"
- loop = f"{pad}for e, pname in enumerate(model.listOfParameters.keys()):"
- if self.settings['solver'] == "SSACSolver":
- comment += "\n"+ pad +"variables = {}"
- func_def = "def get_variables(params, model):"
- body = f"{pad*2}variables[pname] = params[e]"
- return_str = f"{pad}return variables"
- call = f"{pad}variables = get_variables(params, model)"
- run = f"{pad}res = model.run(**kwargs, variables=variables)"
- else:
- func_def = "def set_model_parameters(params, model):"
- body = f"{pad*2}model.get_parameter(pname).set_expression(params[e])"
- return_str = f"{pad}return model"
- call = f"{pad}model_update = set_model_parameters(params, model)"
- run = f"{pad}res = model_update.run(**kwargs)"
- sim_strs = [func_def, comment, loop, body, return_str, ""]
- simulator = ["# Here we use the GillesPy2 Solver", "def simulator(params, model):",
- call, "", run, f"{pad}res = res.to_array()",
- f"{pad}tot_res = np.asarray([x.T for x in res]) # reshape to (N, S, T)",
- f"{pad}# should not contain timepoints", f"{pad}tot_res = tot_res[:, 1:, :]",
- "", f"{pad}return tot_res", ""]
- sim_strs.extend(simulator)
- sim2_com = "# Wrapper, simulator function to abc should should only take one argument "
- sim2_com += "(the parameter point)"
- simulator2 = [sim2_com, "def simulator2(x):", f"{pad}return simulator(x, model=model)"]
- sim_strs.extend(simulator2)
- return nbf.new_code_cell("\n".join(sim_strs))
def __create_species_strings(self, model, pad):
if self.s_model['species']:
@@ -785,6 +421,7 @@ def __create_species_strings(self, model, pad):
message += f"are referenced incorrectly for notebooks: {str(err)}"
raise StochSSModelFormatError(message, traceback.format_exc()) from err
+
def __create_stoich_spec_string(self, stoich_species):
species = {}
for stoich_spec in stoich_species:
@@ -800,6 +437,7 @@ def __create_stoich_spec_string(self, stoich_species):
spec_list.append(f"{name}: {ratio}")
return "{" + ", ".join(spec_list) + "}"
+
def __create_tspan_string(self, model, pad):
end = self.s_model['modelSettings']['endSim']
output_freq = self.s_model['modelSettings']['timeStep']
@@ -813,16 +451,6 @@ def __create_tspan_string(self, model, pad):
tspan.append(ts_str)
model.extend(tspan)
- def __get_class_name(self):
- name = self.get_name()
- for char in string.punctuation:
- name = name.replace(char, "")
- l_char = name[0]
- if l_char in string.digits:
- return f"M{name}"
- if l_char in string.ascii_lowercase:
- return name.replace(l_char, l_char.upper(), 1)
- return name
def __get_gillespy2_run_settings(self):
is_automatic = self.settings['simulationSettings']['isAutomatic']
@@ -852,6 +480,7 @@ def __get_gillespy2_run_settings(self):
run_settings.extend(algorithm_settings)
return run_settings
+
def __get_spatialpy_run_setting(self):
self.settings['simulationSettings']['realizations'] = 1
settings = self.settings['simulationSettings']
@@ -859,16 +488,6 @@ def __get_spatialpy_run_setting(self):
"seed":settings['seed'] if settings['seed'] != -1 else None}
return [f'"{key}":{val}' for key, val in settings_map.items()]
- def __get_gillespy2_solver_name(self):
- if self.settings['simulationSettings']['isAutomatic']:
- solver = self.model.get_best_solver().name
- self.settings['simulationSettings']['algorithm'] = self.SOLVER_MAP[solver]
- return solver
- algorithm_map = {'SSA': self.model.get_best_solver_algo("SSA").name,
- 'Tau-Leaping': self.model.get_best_solver_algo("Tau-Leaping").name,
- 'Hybrid-Tau-Leaping': 'TauHybridSolver',
- 'ODE': self.model.get_best_solver_algo("ODE").name}
- return algorithm_map[self.settings['simulationSettings']['algorithm']]
@classmethod
def __get_presentation_links(cls, hostname, file):
@@ -876,52 +495,20 @@ def __get_presentation_links(cls, hostname, file):
present_link = f"https://live.stochss.org/stochss/present-notebook{query_str}"
dl_link_base = "https://live.stochss.org/stochss/notebook/download_presentation"
download_link = os.path.join(dl_link_base, hostname, file)
- open_link = f"https://live.stochss.org?open={download_link}"
+ open_link = f"https://open.stochss.org?open={download_link}"
return {"presentation": present_link, "download": download_link, "open": open_link}
- def create_1dps_notebook(self):
- '''Create a 1D parameter sweep jupiter notebook for a StochSS model/workflow
- Attributes
- ----------'''
- self.nb_type = self.PARAMETER_SWEEP_1D
- self.settings['solver'] = self.__get_gillespy2_solver_name()
- run_strs = ["kwargs = configure_simulation()", "ps = ParameterSweepConfig()",
- "%time ps.run(kwargs)"]
- cells = self.__create_common_cells()
- cells.extend(self.__create_ps_post_process_cells())
- cells.extend([nbf.new_markdown_cell("# Parameter Sweep"),
- self.__create_ps1d_class_cell(),
- self.__create_ps1d_config_cell(),
- nbf.new_code_cell("\n".join(run_strs)),
- nbf.new_markdown_cell("# Visualization"),
- nbf.new_code_cell("ps.plot()"),
- nbf.new_code_cell("ps.plotplotly()")])
-
- message = self.write_notebook_file(cells=cells)
- return {"Message":message, "FilePath":self.get_path(), "File":self.get_file()}
-
- def create_2dps_notebook(self):
- '''Create a 2D parameter sweep jupiter notebook for a StochSS model/workflow
-
- Attributes
- ----------'''
- self.nb_type = self.PARAMETER_SWEEP_2D
- self.settings['solver'] = self.__get_gillespy2_solver_name()
- run_strs = ["kwargs = configure_simulation()", "ps = ParameterSweepConfig()",
- "%time ps.run(kwargs)"]
- cells = self.__create_common_cells()
- cells.extend(self.__create_ps_post_process_cells())
- cells.extend([nbf.new_markdown_cell("# Parameter Sweep"),
- self.__create_ps2d_class_cell(),
- self.__create_ps2d_config_cell(),
- nbf.new_code_cell("\n".join(run_strs)),
- nbf.new_markdown_cell("# Visualization"),
- nbf.new_code_cell("ps.plot()"),
- nbf.new_code_cell("ps.plotplotly()")])
+ def create_common_cells(self):
+ ''' Create the cells common to all notebook types. '''
+ cells = [self.__create_import_cell(),
+ nbf.new_markdown_cell(f"# {self.get_name()}"),
+ self.__create_model_cell(),
+ nbf.new_code_cell(f'model = {self.get_class_name()}()'),
+ nbf.new_markdown_cell("# Simulation Parameters"),
+ self.__create_configuration_cell()]
+ return cells
- message = self.write_notebook_file(cells=cells)
- return {"Message":message, "FilePath":self.get_path(), "File":self.get_file()}
def create_es_notebook(self):
'''Create an ensemble simulation jupiter notebook for a StochSS model/workflow
@@ -929,9 +516,9 @@ def create_es_notebook(self):
Attributes
----------'''
self.nb_type = self.ENSEMBLE_SIMULATION
- self.settings['solver'] = self.__get_gillespy2_solver_name()
+ self.settings['solver'] = self.get_gillespy2_solver_name()
run_str = "kwargs = configure_simulation()\nresults = model.run(**kwargs)"
- cells = self.__create_common_cells()
+ cells = self.create_common_cells()
cells.extend([nbf.new_code_cell(run_str),
nbf.new_markdown_cell("# Visualization"),
nbf.new_code_cell("results.plotplotly()")])
@@ -939,6 +526,7 @@ def create_es_notebook(self):
message = self.write_notebook_file(cells=cells)
return {"Message":message, "FilePath":self.get_path(), "File":self.get_file()}
+
def create_ses_notebook(self):
'''Create a spetial ensemble simulation jupiter notebook for a StochSS model/workflow
@@ -952,7 +540,8 @@ def create_ses_notebook(self):
plot_str = f"results.plot_species('{species}', animated=True, width=None, height=None)"
else:
plot_str = "results.plot_property('type', animated=True, width=None, height=None)"
- cells = self.__create_common_cells()
+ cells = [nbf.new_code_cell("%load_ext autoreload\n%autoreload 2")]
+ cells.extend(self.create_common_cells())
if 'boundaryConditions' in self.s_model.keys():
bc_cells = self.__create_boundary_condition_cells()
for i, bc_cell in enumerate(bc_cells):
@@ -964,66 +553,32 @@ def create_ses_notebook(self):
message = self.write_notebook_file(cells=cells)
return {"Message":message, "FilePath":self.get_path(), "File":self.get_file()}
- def create_sme_notebook(self):
- '''Create a model exploration jupiter notebook for a StochSS model/workflow
-
- Attributes
- ----------'''
- self.nb_type = self.MODEL_EXPLORATION
- self.settings['solver'] = self.__get_gillespy2_solver_name()
- cells = self.__create_common_cells(interactive_backend=True)
- cells.append(nbf.new_markdown_cell("# Model Exploration"))
- cells.extend(self.__create_sme_setup_cells())
- cells.extend([nbf.new_markdown_cell("## Run parameter sweep"),
- nbf.new_code_cell("met.compute(n_points=500, chunk_size=10)")])
- cells.extend(self.__create_sme_expres_cells())
- message = self.write_notebook_file(cells=cells)
- return {"Message":message, "FilePath":self.get_path(), "File":self.get_file()}
+ def get_class_name(self):
+ ''' Get the python style class name, '''
+ name = self.get_name()
+ for char in string.punctuation:
+ name = name.replace(char, "")
+ l_char = name[0]
+ if l_char in string.digits:
+ return f"M{name}"
+ if l_char in string.ascii_lowercase:
+ return name.replace(l_char, l_char.upper(), 1)
+ return name
- def create_smi_notebook(self):
- '''Create a model inference jupiter notebook for a StochSS model/workflow
- Attributes
- ----------'''
- self.nb_type = self.MODEL_INFERENCE
- self.settings['solver'] = self.__get_gillespy2_solver_name()
- cells = self.__create_common_cells()
- imports = ["%load_ext autoreload", "%autoreload 2", "",
- "from tsfresh.feature_extraction.settings import MinimalFCParameters",
- "from sciope.utilities.priors import uniform_prior",
- "from sciope.utilities.summarystats import auto_tsfresh",
- "from sciope.utilities.distancefunctions import naive_squared",
- "from sciope.inference.abc_inference import ABC",
- "from sklearn.metrics import mean_absolute_error",
- "from dask.distributed import Client"]
- fd_header = "## Generate some fixed(observed) data based on default parameters of the model"
- fd_str = "kwargs = configure_simulation()\nfixed_data = model.run(**kwargs)"
- rshp_strs = ["# Reshape the data and remove timepoints array",
- "fixed_data = fixed_data.to_array()",
- "fixed_data = np.asarray([x.T for x in fixed_data])",
- "fixed_data = fixed_data[:, 1:, :]"]
- cells.extend([nbf.new_markdown_cell("# Model Inference"),
- nbf.new_code_cell("\n".join(imports)), nbf.new_markdown_cell(fd_header),
- nbf.new_code_cell(fd_str), nbf.new_code_cell("\n".join(rshp_strs))])
- cells.extend(self.__create_smi_setup_cells())
- # abc cell
- abc_str = "abc = ABC(fixed_data, sim=simulator2, prior_function=uni_prior, "
- abc_str += "summaries_function=summ_func.compute, distance_function=ns)"
- # compute fixed mean cell
- fm_str = "# First compute the fixed(observed) mean\nabc.compute_fixed_mean(chunk_size=2)"
- # run model inference cell
- rmi_str = "res = abc.infer(num_samples=100, batch_size=10, chunk_size=2)"
- # absolute error cell
- abse_str = "mae_inference = mean_absolute_error(bound, abc.results['inferred_parameters'])"
- cells.extend([nbf.new_markdown_cell("## Start local cluster using dask client"),
- nbf.new_code_cell("c = Client()"),
- nbf.new_markdown_cell("## Start abc instance"),
- nbf.new_code_cell(abc_str), nbf.new_code_cell(fm_str),
- nbf.new_code_cell(rmi_str), nbf.new_code_cell(abse_str)])
+ def get_gillespy2_solver_name(self):
+ ''' Get the name of the gillespy2 solver. '''
+ if self.settings['simulationSettings']['isAutomatic']:
+ solver = self.model.get_best_solver().name
+ self.settings['simulationSettings']['algorithm'] = self.SOLVER_MAP[solver]
+ return solver
+ algorithm_map = {'SSA': self.model.get_best_solver_algo("SSA").name,
+ 'Tau-Leaping': self.model.get_best_solver_algo("Tau-Leaping").name,
+ 'Hybrid-Tau-Leaping': 'TauHybridSolver',
+ 'ODE': self.model.get_best_solver_algo("ODE").name}
+ return algorithm_map[self.settings['simulationSettings']['algorithm']]
- message = self.write_notebook_file(cells=cells)
- return {"Message":message, "FilePath":self.get_path(), "File":self.get_file()}
def load(self):
'''Read the notebook file and return as a dict'''
diff --git a/stochss/handlers/workflows.py b/stochss/handlers/workflows.py
index a7ec157b8b..daa572c3ed 100644
--- a/stochss/handlers/workflows.py
+++ b/stochss/handlers/workflows.py
@@ -28,7 +28,8 @@
# Use finish() for json, write() for text
from .util import StochSSJob, StochSSModel, StochSSSpatialModel, StochSSNotebook, StochSSWorkflow, \
- StochSSFolder, StochSSAPIError, report_error
+ StochSSParamSweepNotebook, StochSSSciopeNotebook, StochSSAPIError, report_error, \
+ StochSSFolder
log = logging.getLogger('stochss')
@@ -285,13 +286,18 @@ async def get(self):
else:
log.info("Creating notebook workflow for %s", file_obj.get_file())
log.debug("Type of workflow to be run: %s", wkfl_type)
- notebook = StochSSNotebook(**kwargs)
- notebooks = {"gillespy":notebook.create_es_notebook,
- "spatial":notebook.create_ses_notebook,
- "1d_parameter_sweep":notebook.create_1dps_notebook,
- "2d_parameter_sweep":notebook.create_2dps_notebook,
- "sciope_model_exploration":notebook.create_sme_notebook,
- "model_inference":notebook.create_smi_notebook}
+ if wkfl_type in ("1d_parameter_sweep", "2d_parameter_sweep"):
+ notebook = StochSSParamSweepNotebook(**kwargs)
+ notebooks = {"1d_parameter_sweep":notebook.create_1d_notebook,
+ "2d_parameter_sweep":notebook.create_2d_notebook}
+ elif wkfl_type in ("sciope_model_exploration", "model_inference"):
+ notebook = StochSSSciopeNotebook(**kwargs)
+ notebooks = {"sciope_model_exploration":notebook.create_me_notebook,
+ "model_inference":notebook.create_mi_notebook}
+ else:
+ notebook = StochSSNotebook(**kwargs)
+ notebooks = {"gillespy":notebook.create_es_notebook,
+ "spatial":notebook.create_ses_notebook}
resp = notebooks[wkfl_type]()
notebook.print_logs(log)
log.debug("Response: %s", resp)