diff --git a/src/optigraph.jl b/src/optigraph.jl index 730351d..9e21ab0 100644 --- a/src/optigraph.jl +++ b/src/optigraph.jl @@ -503,7 +503,8 @@ the optinode variable value obtained by solving `graph` which contains said opti function JuMP.value(graph::OptiGraph, var::JuMP.VariableRef) node_pointer = JuMP.backend(var.model).result_location[graph.id] var_idx = node_pointer.node_to_optimizer_map[index(var)] - return MOI.get(backend(graph).optimizer, MOI.VariablePrimal(), var_idx) + # return MOI.get(backend(graph).optimizer, MOI.VariablePrimal(), var_idx) + return MOI.get(node_pointer, MOI.VariablePrimal(), var_idx) end """ diff --git a/src/optimizer_interface.jl b/src/optimizer_interface.jl index 8b9c39f..f9946d7 100644 --- a/src/optimizer_interface.jl +++ b/src/optimizer_interface.jl @@ -362,6 +362,9 @@ function JuMP.set_optimizer(node::OptiNode, optimizer_constructor) return nothing end +# NOTE: this resets NLP data on every NodePointer, so graph solutions get cleared +# This is currently a known limitation in Plasmo.jl. If you solve a node after a graph, +# it will remove the graph solution. function JuMP.optimize!(node::OptiNode; kwargs...) JuMP.optimize!(jump_model(node); kwargs...) return nothing