diff --git a/pyproject.toml b/pyproject.toml
index e8e9a09bd0e..733ecc358f5 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -17,8 +17,9 @@ dependencies=[
"tree-sitter",
"requests",
"beautifulsoup4", # used to remove comments etc from pMML before sending to MORAE
- "typing_extensions==4.5.0", # see https://github.com/pydantic/pydantic/issues/5821#issuecomment-1559196859
- "fastapi",
+ "typing_extensions", # see https://github.com/pydantic/pydantic/issues/5821#issuecomment-1559196859
+ "fastapi~=0.100.0",
+ "pydantic~=2.0.0",
"uvicorn",
"python-multipart"
]
diff --git a/skema/img2mml/schema.py b/skema/img2mml/schema.py
index 17efba2d395..015975f82f1 100644
--- a/skema/img2mml/schema.py
+++ b/skema/img2mml/schema.py
@@ -25,7 +25,7 @@
class LatexEquation(BaseModel):
tex_src: str = Field(title="LaTeX equation", description="The LaTeX equation to process")
class Config:
- schema_extra = {
+ json_schema_extra = {
"example": {
"tex_src": "\\frac{\\partial x}{\\partial t} = {\\alpha x} - {\\beta x y}",
},
diff --git a/skema/rest/integrated_text_reading_proxy.py b/skema/rest/integrated_text_reading_proxy.py
index eb8f7dcd104..15f130747a0 100644
--- a/skema/rest/integrated_text_reading_proxy.py
+++ b/skema/rest/integrated_text_reading_proxy.py
@@ -236,7 +236,10 @@ def parquet_to_json(path):
def cosmos_client(name: str, data: BinaryIO):
- """ Posts a pdf to COSMOS and returns the JSON representation of the parquet file """
+ """
+ Posts a pdf to COSMOS and returns the JSON representation of the parquet file
+
+ """
# Create POST request to COSMOS server
# Prep the pdf data for upload
@@ -404,6 +407,21 @@ async def integrated_text_extractions(
annotate_skema: bool = True,
annotate_mit: bool = True,
) -> TextReadingAnnotationsOutput:
+ """
+ ### Python example
+ ```
+ params = {
+ "annotate_skema":True,
+ "annotate_mit": True
+ }
+
+ files = [("pdfs", ("paper.txt", open("paper.txt", "rb")))]
+
+ response = request.post(f"{URL}/text-reading/integrated-text-extractions", params=params, files=files)
+ if response.status_code == 200:
+ data = response.json()
+ ```
+ """
# Get the input plain texts
texts = texts.texts
@@ -429,6 +447,22 @@ async def integrated_pdf_extractions(
annotate_skema: bool = True,
annotate_mit: bool = True
) -> TextReadingAnnotationsOutput:
+ """
+
+ ### Python example
+ ```
+ params = {
+ "annotate_skema":True,
+ "annotate_mit": True
+ }
+
+ files = [("pdfs", ("ijerp.pdf", open("ijerp.pdf", "rb")))]
+
+ response = request.post(f"{URL}/text-reading/integrated-pdf-extractions", params=params, files=files)
+ if response.status_code == 200:
+ data = response.json()
+ ```
+ """
# TODO: Make this handle multiple pdf files in parallel
# Call COSMOS on the pdfs
cosmos_data = list()
@@ -453,10 +487,19 @@ async def integrated_pdf_extractions(
@router.post(
"/cosmos_to_json",
status_code=200,
- description="Calls COSMOS on a pdf and converts the data into json"
)
async def cosmos_to_json(pdf: UploadFile) -> List[Dict]:
- """ Calls COSMOS on a pdf and converts the data into json """
+ """ Calls COSMOS on a pdf and converts the data into json
+
+ ### Python example
+ ```
+ response = requests.post(f"{endpoint}/text-reading/cosmos_to_json",
+ files=[
+ ("pdf", ("ijerp.pdf", open("ijerph-18-09027.pdf", 'rb')))
+ ]
+ )
+ ```
+ """
return cosmos_client(pdf.filename, pdf.file)
@@ -467,7 +510,18 @@ async def cosmos_to_json(pdf: UploadFile) -> List[Dict]:
)
async def ground_to_mira(k: int, queries: MiraGroundingInputs, response: Response) -> List[
List[MiraGroundingOutputItem]]:
- """ Proxy to the MIRA grounding functionality on the SKEMA TR service """
+ """ Proxy to the MIRA grounding functionality on the SKEMA TR service
+
+ ### Python example
+ ```
+ queries = {"queries": ["infected", "suceptible"]}
+ params = {"k": 5}
+ response = requests.post(f"{endpoint}/text-reading/ground_to_mira", params=params, json=queries)
+
+ if response.status_code == 200:
+ results = response.json()
+ ```
+ """
params = {
"k": k
}
@@ -488,7 +542,18 @@ async def ground_to_mira(k: int, queries: MiraGroundingInputs, response: Respons
@router.post("/cards/get_model_card")
async def get_model_card(text_file: UploadFile, code_file: UploadFile, response: Response):
- """ Calls the model card endpoint from MIT's pipeline """
+ """ Calls the model card endpoint from MIT's pipeline
+
+ ### Python example
+ ```
+ files = {
+ "text_file": ('text_file.txt", open("text_file.txt", 'rb')),
+ "code_file": ('code_file.py", open("code_file.py", 'rb')),
+ }
+
+ response = requests.post(f"{endpoint}/text-reading/cards/get_model_card", files=files)
+ ```
+ """
params = {
"gpt_key": OPENAI_KEY,
@@ -504,11 +569,29 @@ async def get_model_card(text_file: UploadFile, code_file: UploadFile, response:
return inner_response.json()
@router.post("/cards/get_data_card")
-async def get_model_card(csv_file: UploadFile, doc_file: UploadFile, response: Response):
- """ Calls the data card endpoint from MIT's pipeline """
+async def get_data_card(smart:bool, csv_file: UploadFile, doc_file: UploadFile, response: Response):
+ """
+ Calls the data card endpoint from MIT's pipeline.
+ Smart run provides better results but may result in slow response times as a consequence of extra GPT calls.
+
+ ### Python example
+ ```
+ params = {
+ "smart": False
+ }
+
+ files = {
+ "csv_file": ('csv_file.csv", open("csv_file.csv", 'rb')),
+ "doc_file": ('doc_file.txt", open("doc_file.txt", 'rb')),
+ }
+
+ response = requests.post(f"{endpoint}/text-reading/cards/get_data_card", params=params files=files)
+ ```
+ """
params = {
"gpt_key": OPENAI_KEY,
+ "smart": smart
}
files = {
"csv_file": (csv_file.filename, csv_file.file, "text/csv"),
diff --git a/skema/rest/metal_proxy.py b/skema/rest/metal_proxy.py
index e20bc2d0d7b..a4429bbebb2 100644
--- a/skema/rest/metal_proxy.py
+++ b/skema/rest/metal_proxy.py
@@ -21,7 +21,24 @@ def link_amr(amr_type: str,
similarity_threshold: float = 0.5,
amr_file: UploadFile = File(...),
text_extractions_file: UploadFile = File(...)):
- """ Links an AMR to a text extractions file """
+ """ Links an AMR to a text extractions file
+
+ ### Python example
+ ```
+ params = {
+ "amr_type": "petrinet"
+ }
+
+ files = {
+ "amr_file": ("amr.json", open("amr.json"), "application/json"),
+ "text_extractions_file": ("extractions.json", open("extractions.json"), "application/json")
+ }
+
+ response = requests.post(f"{ENDPOINT}/metal/link_amr", params=params, files=files)
+ if response.status_code == 200:
+ enriched_amr = response.json()
+ ```
+ """
# Load the AMR
amr = json.load(amr_file.file)
diff --git a/skema/rest/proxies.py b/skema/rest/proxies.py
index ad77cbb834c..304acdbb55e 100644
--- a/skema/rest/proxies.py
+++ b/skema/rest/proxies.py
@@ -20,7 +20,7 @@
)
# Text Reading services
-MIT_TR_ADDRESS = os.environ.get("MIT_TR_ADDRESS", "https://mit-tr.askem.lum.ai")
+MIT_TR_ADDRESS = os.environ.get("MIT_TR_ADDRESS", "http://100.26.10.46")
SKEMA_TR_ADDRESS = os.environ.get("SKEMA_TR_ADDRESS", "http://hopper.sista.arizona.edu")
OPENAI_KEY = os.environ.get("OPENAI_KEY", "YOU_FORGOT_TO_SET_OPENAI_KEY")
COSMOS_ADDRESS = os.environ.get("COSMOS_ADDRESS", "http://cosmos0002.chtc.wisc.edu:8089")
diff --git a/skema/rest/schema.py b/skema/rest/schema.py
index e1fba680116..47f810b5977 100644
--- a/skema/rest/schema.py
+++ b/skema/rest/schema.py
@@ -6,6 +6,7 @@
from askem_extractions.data_model import AttributeCollection
from pydantic import BaseModel, Field
+
# see https://github.com/pydantic/pydantic/issues/5821#issuecomment-1559196859
from typing_extensions import Literal
@@ -42,14 +43,36 @@ class HealthStatus(BaseModel):
class EquationImagesToAMR(BaseModel):
# FIXME: will this work or do we need base64?
images: List[eqn2mml_schema.ImageBytes]
- model: Literal["regnet", "petrinet"] = Field(description="The model type")
+ model: Literal["regnet", "petrinet"] = Field(
+ description="The model type", example="petrinet"
+ )
class EquationLatexToAMR(BaseModel):
- equations: List[str] = Field(description="Equations in LaTeX",
- example=["\\frac{\\partial x}{\\partial t} = {\\alpha x} - {\\beta x y}",
- "\\frac{\\partial y}{\\partial t} = {\\alpha x y} - {\\gamma y}"])
- model: Literal["regnet", "petrinet"] = Field(description="The model type", example="regnet")
+ equations: List[str] = Field(
+ description="Equations in LaTeX",
+ example=[
+ r"\frac{\partial x}{\partial t} = {\alpha x} - {\beta x y}",
+ r"\frac{\partial y}{\partial t} = {\alpha x y} - {\gamma y}",
+ ],
+ )
+ model: Literal["regnet", "petrinet"] = Field(
+ description="The model type", example="regnet"
+ )
+
+
+class MmlToAMR(BaseModel):
+ equations: List[str] = Field(
+ description="Equations in pMML",
+ example=[
+ "",
+ "",
+ "",
+ ],
+ )
+ model: Literal["regnet", "petrinet"] = Field(
+ description="The model type", example="petrinet"
+ )
class CodeSnippet(BaseModel):
@@ -74,29 +97,28 @@ class MiraGroundingInputs(BaseModel):
class MiraGroundingOutputItem(BaseModel):
class MiraDKGConcept(BaseModel):
- id: str = Field(
- description="DKG element id",
- example="apollosv:00000233"
- )
+ id: str = Field(description="DKG element id", example="apollosv:00000233")
name: str = Field(
- description="Canonical name of the concept",
- example="infected population"
+ description="Canonical name of the concept", example="infected population"
)
description: Optional[str] = Field(
description="Long winded description of the concept",
- example="A population of only infected members of one species."
+ example="A population of only infected members of one species.",
)
synonyms: List[str] = Field(
description="Any alternative name to the cannonical one for the concept",
- example=["Ill individuals", "The sick and ailing"]
+ example=[["Ill individuals", "The sick and ailing"]],
)
embedding: List[float] = Field(
description="Word embedding of the underlying model for the concept"
)
+ def __hash__(self):
+ return hash(tuple([self.id, tuple(self.synonyms), tuple(self.embedding)]))
+
score: float = Field(
description="Cosine similarity of the embedding representation of the input with that of the DKG element",
- example=0.7896
+ example=0.7896,
)
groundingConcept: MiraDKGConcept = Field(
description="DKG concept associated to the query",
@@ -109,8 +131,8 @@ class MiraDKGConcept(BaseModel):
0.01590670458972454,
0.03795482963323593,
-0.08787763118743896,
- ]
- )
+ ],
+ ),
)
@@ -136,6 +158,9 @@ class TextReadingError(BaseModel):
example="Out of memory error",
)
+ def __hash__(self):
+ return hash(f"{self.pipeline}-{self.message}")
+
class TextReadingDocumentResults(BaseModel):
data: Optional[AttributeCollection] = Field(
@@ -149,6 +174,11 @@ class TextReadingDocumentResults(BaseModel):
example=[TextReadingError(pipeline="MIT", message="Unauthorized API key")],
)
+ def __hash__(self):
+ return hash(
+ tuple([self.data, "NONE" if self.errors is None else tuple(self.errors)])
+ )
+
class TextReadingAnnotationsOutput(BaseModel):
"""Contains the TR document results for all the documents submitted for annotation"""
@@ -156,11 +186,14 @@ class TextReadingAnnotationsOutput(BaseModel):
outputs: List[TextReadingDocumentResults] = Field(
name="outputs",
description="Contains the results of TR annotations for each input document. There is one entry per input and "
- "inputs and outputs are matched by the same index in the list",
+ "inputs and outputs are matched by the same index in the list",
example=[
- TextReadingDocumentResults(data=AttributeCollection(attributes=[])),
TextReadingDocumentResults(
- errors=[TextReadingError(pipeline="SKEMA", message="Dummy error")]
+ data=AttributeCollection(attributes=[]), errors=None
+ ),
+ TextReadingDocumentResults(
+ data=AttributeCollection(attributes=[]),
+ errors=[TextReadingError(pipeline="SKEMA", message="Dummy error")],
),
],
)
@@ -168,5 +201,5 @@ class TextReadingAnnotationsOutput(BaseModel):
generalized_errors: Optional[List[TextReadingError]] = Field(
name="generalized_errors",
description="Any pipeline-wide errors, not specific to a particular input",
- example=[TextReadingError(pipeline="MIT", message="API quota exceeded")]
+ example=[TextReadingError(pipeline="MIT", message="API quota exceeded")],
)
diff --git a/skema/rest/workflows.py b/skema/rest/workflows.py
index ac39477ad58..680c0148eb0 100644
--- a/skema/rest/workflows.py
+++ b/skema/rest/workflows.py
@@ -96,6 +96,21 @@ async def equations_to_amr(data: schema.EquationLatexToAMR):
)
return res.json()
+# pmml -> amr
+@router.post("/pmml/equations-to-amr", summary="Equations pMML → AMR")
+async def equations_to_amr(data: schema.MmlToAMR):
+
+ payload = {"mathml": data.equations, "model": data.model}
+ res = requests.put(f"{SKEMA_RS_ADDESS}/mathml/amr", json=payload)
+ if res.status_code != 200:
+ return JSONResponse(
+ status_code=400,
+ content={
+ "error": f"MORAE PUT /mathml/amr failed to process payload",
+ "payload": payload,
+ },
+ )
+ return res.json()
# code snippets -> fn -> petrinet amr
@router.post("/code/snippets-to-pn-amr", summary="Code snippets → PetriNet AMR")
diff --git a/skema/skema-rs/mathml/src/acset.rs b/skema/skema-rs/mathml/src/acset.rs
index 1670d00bcf1..c880b2fc02c 100644
--- a/skema/skema-rs/mathml/src/acset.rs
+++ b/skema/skema-rs/mathml/src/acset.rs
@@ -479,6 +479,15 @@ impl From> for PetriNet {
for term in terms.iter() {
println!("term: {:?}\n", term.clone());
+ for param in &term.parameters {
+ let parameters = Parameter {
+ id: param.clone(),
+ name: Some(param.clone()),
+ description: Some(format!("{} rate", param.clone())),
+ ..Default::default()
+ };
+ parameter_vec.push(parameters.clone());
+ }
}
// now for polarity pairs of terms we need to construct the transistions
@@ -507,22 +516,28 @@ impl From> for PetriNet {
};
transitions_vec.insert(transitions.clone());
+ let mut expression_string = "".to_string();
+
+ for param in t.0.parameters.clone().iter() {
+ expression_string = format!("{}{}*", expression_string.clone(), param.clone());
+ }
+
+ let exp_len = t.0.exp_states.len();
+ for (i, exp) in t.0.exp_states.clone().iter().enumerate() {
+ if i != exp_len {
+ expression_string =
+ format!("{}{}*", expression_string.clone(), exp.clone());
+ } else {
+ expression_string = format!("{}{}", expression_string.clone(), exp.clone());
+ }
+ }
+
let rate = Rate {
target: transitions.id.clone(),
- expression: "".to_string(), // the second term needs to be the product of the inputs
+ expression: expression_string.clone(), // the second term needs to be the product of the inputs
expression_mathml: Some(t.0.expression.clone()),
};
rate_vec.push(rate.clone());
-
- for param in &t.0.parameters {
- let parameters = Parameter {
- id: param.clone(),
- name: Some(param.clone()),
- description: Some(format!("{} rate", param.clone())),
- ..Default::default()
- };
- parameter_vec.push(parameters.clone());
- }
} else {
// construct transitions for complicated transitions
// mainly need to construct the output specially,
@@ -543,22 +558,28 @@ impl From> for PetriNet {
};
transitions_vec.insert(transitions.clone());
+ let mut expression_string = "".to_string();
+
+ for param in t.0.parameters.clone().iter() {
+ expression_string = format!("{}{}*", expression_string.clone(), param.clone());
+ }
+
+ let exp_len = t.0.exp_states.len() - 1;
+ for (i, exp) in t.0.exp_states.clone().iter().enumerate() {
+ if i != exp_len {
+ expression_string =
+ format!("{}{}*", expression_string.clone(), exp.clone());
+ } else {
+ expression_string = format!("{}{}", expression_string.clone(), exp.clone());
+ }
+ }
+
let rate = Rate {
target: transitions.id.clone(),
- expression: "".to_string(), // the second term needs to be the product of the inputs
+ expression: expression_string.clone(), // the second term needs to be the product of the inputs
expression_mathml: Some(t.0.expression.clone()),
};
rate_vec.push(rate.clone());
-
- for param in &t.0.parameters {
- let parameters = Parameter {
- id: param.clone(),
- name: Some(param.clone()),
- description: Some(format!("{} rate", param.clone())),
- ..Default::default()
- };
- parameter_vec.push(parameters.clone());
- }
}
}
diff --git a/skema/skema-rs/mathml/src/ast/operator.rs b/skema/skema-rs/mathml/src/ast/operator.rs
index 59ca4c262a4..c2e0f35d092 100644
--- a/skema/skema-rs/mathml/src/ast/operator.rs
+++ b/skema/skema-rs/mathml/src/ast/operator.rs
@@ -20,6 +20,7 @@ pub enum Operator {
Rparen,
Compose,
Factorial,
+ Exp,
Derivative(Derivative),
// Catchall for operators we haven't explicitly defined as enum variants yet.
Other(String),
@@ -41,6 +42,7 @@ impl fmt::Display for Operator {
Operator::Derivative(Derivative { order, var_index }) => {
write!(f, "D({order}, {var_index})")
}
+ Operator::Exp => write!(f, "Exp"),
Operator::Other(op) => write!(f, "{op}"),
}
}
diff --git a/skema/skema-rs/mathml/src/bin/mml2pn.rs b/skema/skema-rs/mathml/src/bin/mml2pn.rs
index b4877311482..9ce268610a3 100644
--- a/skema/skema-rs/mathml/src/bin/mml2pn.rs
+++ b/skema/skema-rs/mathml/src/bin/mml2pn.rs
@@ -1,4 +1,4 @@
-///! Program to parse MathML and convert it to a Petri Net
+//! Program to parse MathML and convert it to a Petri Net
use clap::{Parser, ValueEnum};
use mathml::mml2pn::ACSet;
diff --git a/skema/skema-rs/mathml/src/parsers/first_order_ode.rs b/skema/skema-rs/mathml/src/parsers/first_order_ode.rs
index 4d1b43fc310..9c2e87f0acf 100644
--- a/skema/skema-rs/mathml/src/parsers/first_order_ode.rs
+++ b/skema/skema-rs/mathml/src/parsers/first_order_ode.rs
@@ -516,6 +516,15 @@ pub fn get_term_mult(sys_states: Vec, eq: Vec) -> Pn
Atom(_x) => variables.push(y[1].to_string()),
}
}
+ Add => {
+ if y.len() == 1 {
+ variables.push(y[0].to_string());
+ } else {
+ for var in y.iter() {
+ variables.push(var.to_string().clone());
+ }
+ }
+ }
_ => {
println!("Not expected operation inside Multiply")
}
diff --git a/skema/skema-rs/mathml/src/parsers/math_expression_tree.rs b/skema/skema-rs/mathml/src/parsers/math_expression_tree.rs
index 3f207265a10..54a2ebef702 100644
--- a/skema/skema-rs/mathml/src/parsers/math_expression_tree.rs
+++ b/skema/skema-rs/mathml/src/parsers/math_expression_tree.rs
@@ -87,6 +87,48 @@ impl MathExpressionTree {
}
content_mathml
}
+
+ /// Translates to infix math expression to provide "string expressions" (e.g. ((α*ρ)*I) )
+ /// TA-4 uses "string expressions" to display over the transitions in their visual front end.
+ pub fn to_infix_expression(&self) -> String {
+ let mut expression = String::new();
+ match self {
+ MathExpressionTree::Atom(i) => match i {
+ MathExpression::Ci(x) => {
+ expression.push_str(&format!("{}", x.content));
+ }
+ MathExpression::Mi(Mi(id)) => {
+ expression.push_str(&id.to_string());
+ }
+ MathExpression::Mn(number) => {
+ expression.push_str(&number.to_string());
+ }
+ MathExpression::Mrow(_) => {
+ panic!("All Mrows should have been removed by now!");
+ }
+ t => panic!("Unhandled MathExpression: {:?}", t),
+ },
+
+ MathExpressionTree::Cons(head, rest) => {
+ let mut operation = String::new();
+ match head {
+ Operator::Add => operation.push('+'),
+ Operator::Subtract => operation.push('-'),
+ Operator::Multiply => operation.push('*'),
+ Operator::Equals => operation.push('='),
+ Operator::Divide => operation.push('/'),
+ _ => {}
+ }
+ let mut component = Vec::new();
+ for s in rest {
+ component.push(s.to_infix_expression());
+ }
+ let math_exp = format!("({})", component.join(&operation.to_string()));
+ expression.push_str(&math_exp);
+ }
+ }
+ expression
+ }
}
/// Represents a token for the Pratt parsing algorithm.
@@ -545,3 +587,56 @@ fn test_content_hackathon2_scenario1_eq8() {
"βκm"
);
}
+
+#[test]
+fn test_expression1() {
+ let input = "";
+ let exp = input.parse::().unwrap();
+ let math = exp.to_infix_expression();
+ assert_eq!(math, "(γ*I)");
+}
+
+#[test]
+fn test_expression2() {
+ let input = "
+
+ ";
+ let exp = input.parse::().unwrap();
+ let math = exp.to_infix_expression();
+ assert_eq!(math, "((α*ρ)*I)");
+}
+
+#[test]
+fn test_expression3() {
+ let input = "
+
+ ";
+ let exp = input.parse::().unwrap();
+ let math = exp.to_infix_expression();
+ assert_eq!(math, "((((β*I)*S)/N)-(δ*E))")
+}
+
+#[test]
+fn test_expression4() {
+ let input = "
+
+ ";
+ let exp = input.parse::().unwrap();
+ let math = exp.to_infix_expression();
+ assert_eq!(math, "((((1-α)*γ)*I)-(ϵ*R))")
+}
diff --git a/skema/skema-rs/skema/src/bin/morae.rs b/skema/skema-rs/skema/src/bin/morae.rs
index 178711cdc38..4b1cc8e8a57 100644
--- a/skema/skema-rs/skema/src/bin/morae.rs
+++ b/skema/skema-rs/skema/src/bin/morae.rs
@@ -2,20 +2,17 @@ use clap::Parser;
use mathml::mml2pn::get_mathml_asts_from_file;
pub use mathml::mml2pn::{ACSet, Term};
-use mathml::parsers::first_order_ode::{get_FirstOrderODE_vec_from_file};
-
-
+use mathml::parsers::first_order_ode::get_FirstOrderODE_vec_from_file;
#[cfg(test)]
use std::fs;
-
-
-
// new imports
use mathml::acset::{PetriNet, RegNet};
-use skema::model_extraction::{module_id2mathml_ast, subgraph2_core_dyn_ast};
+use skema::model_extraction::{
+ module_id2mathml_MET_ast, module_id2mathml_ast, subgraph2_core_dyn_ast,
+};
#[derive(Parser, Debug)]
struct Cli {
@@ -46,7 +43,7 @@ fn main() {
let host = "localhost";
- let math_content = module_id2mathml_ast(module_id, host);
+ let math_content = module_id2mathml_MET_ast(module_id, host);
let input_src = "../../data/mml2pn_inputs/testing_eqns/mml_list4.txt";
@@ -58,16 +55,11 @@ fn main() {
println!("\nmath_content: {:?}", math_content);
println!("\nmathml_ast: {:?}", odes);
- println!("\nPN from code: {:?}", ACSet::from(math_content.clone()));
-
println!(
"\nAMR from mathml: {}\n",
serde_json::to_string(&PetriNet::from(odes)).unwrap()
);
- println!(
- "\nAMR from code: {:?}",
- PetriNet::from(ACSet::from(math_content))
- );
+ println!("\nAMR from code: {:?}", PetriNet::from(math_content));
/*println!(
"\nAMR from mathml: {:?}\n",
PetriNet::from(ACSet::from(mathml_ast))
diff --git a/skema/skema-rs/skema/src/database.rs b/skema/skema-rs/skema/src/database.rs
index 087a1f9f343..593fd82fc1c 100644
--- a/skema/skema-rs/skema/src/database.rs
+++ b/skema/skema-rs/skema/src/database.rs
@@ -738,6 +738,21 @@ fn create_function_net_lib(gromet: &ModuleCollection, mut start: u32) -> Vec {
+ (nodes, edges, meta_nodes) = create_att_primitive(
+ &gromet.clone(),
+ eboxf.clone(),
+ sboxf.clone(),
+ nodes.clone(),
+ edges.clone(),
+ n1.clone(),
+ idx,
+ box_counter,
+ bf_counter,
+ start,
+ meta_nodes.clone(),
+ );
+ }
_ => {}
}
box_counter += 1;
diff --git a/skema/skema-rs/skema/src/model_extraction.rs b/skema/skema-rs/skema/src/model_extraction.rs
index 3a7f8be3fac..8fc4a57fe66 100644
--- a/skema/skema-rs/skema/src/model_extraction.rs
+++ b/skema/skema-rs/skema/src/model_extraction.rs
@@ -8,9 +8,12 @@ use std::collections::HashMap;
use std::string::ToString;
// new imports
-
+use mathml::ast::Ci;
use mathml::ast::MathExpression::Mo;
+use mathml::ast::Type::Function;
use mathml::ast::{MathExpression, Mi, Mrow};
+use mathml::parsers::first_order_ode::{flatten_mults, FirstOrderODE};
+use mathml::parsers::math_expression_tree::MathExpressionTree;
use mathml::petri_net::recognizers::is_add_or_subtract_operator;
// struct for returning line spans
@@ -62,6 +65,26 @@ pub fn get_line_span(
}
}
+pub fn module_id2mathml_MET_ast(module_id: i64, host: &str) -> Vec {
+ let graph = subgraph2petgraph(module_id, host); // makes petgraph of graph
+
+ let core_id = find_pn_dynamics(module_id, host); // gives back list of function nodes that might contain the dynamics
+
+ let _line_span = get_line_span(core_id[0], graph); // get's the line span of function id
+
+ //println!("\n{:?}", line_span);
+
+ //println!("function_core_id: {:?}", core_id[0].clone());
+ //println!("module_id: {:?}\n", module_id.clone());
+ // 4.5 now to check if of those expressions, if they are arithmetric in nature
+
+ // 5. pass id to subgrapg2_core_dyn to get core dynamics
+ let (core_dynamics_ast, _metadata_map_ast) =
+ subgrapg2_core_dyn_MET_ast(core_id[0], host).unwrap();
+
+ core_dynamics_ast
+}
+
pub fn module_id2mathml_ast(module_id: i64, host: &str) -> Vec