Skip to content

Commit

Permalink
Fix for SIP routine
Browse files Browse the repository at this point in the history
  • Loading branch information
mewilhel committed Jul 8, 2019
1 parent 61b3c2a commit ee4f437
Show file tree
Hide file tree
Showing 8 changed files with 84 additions and 26 deletions.
20 changes: 20 additions & 0 deletions News.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
# News for EAGO releases

## v0.1.1
- 4/12/2018: Initial release of combined EAGO packages v0.1.1.

## v0.1.2
- 6/20/2018: [EAGO v0.1.2 has been tagged](https://github.com/PSORLab/EAGO.jl/releases/tag/v0.1.2). Significant speed and functionality updates.

## v0.2.0
- 6/14/2019: [EAGO v0.2.0 has been tagged](https://github.com/PSORLab/EAGO.jl/releases/tag/v0.2.0). This update creates a number of breaking changes to the EAGO API. Please review the use cases provided in the documentation to update examples.
- Updated to support Julia 1.0+, MathOptInterface (MOI), and MOI construction of subproblems.
- Additional domain reduction routines available.
- Support for specialized handling of linear and quadratic terms.
- Significant performance improvements due to pre-allocation of Wengert tapes and MOI support.
- A more intuitive API for McCormick relaxation construction.

## v0.2.1
- 7/5/2019: **EAGO v0.2.1 has been tagged**. This contains fixes for a few minor issues.
- Bug fix for explicit SIP solving routine that occurred for uncertainty sets of dimension greater than 1.
- Bug fix for Max objective sense.
9 changes: 6 additions & 3 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -20,16 +20,19 @@ As a global optimization platform, EAGO's solvers can be used to find solutions

The EAGO package has numerous features: a solver accessible from JuMP/MathOptInterface, domain reduction routines, McCormick relaxations, and specialized non-convex semi-infinite program solvers. A full description of all EAGO features is available in the documentation website.

## News
## Recent News

- 4/12/2018: Initial release of combined EAGO packages.
- 6/20/2018: [EAGO v0.1.2 has been tagged](https://github.com/PSORLab/EAGO.jl/releases/tag/v0.1.2). Significant speed and functionality updates.
- 6/14/2019: [EAGO v0.2.0 has been tagged](https://github.com/PSORLab/EAGO.jl/releases/tag/v0.2.0). This update creates a number of breaking changes to the EAGO API. Please review the use cases provided in the documentation to update examples.
- Updated to support Julia 1.0+, MathOptInterface (MOI), and MOI construction of subproblems.
- Additional domain reduction routines available.
- Support for specialized handling of linear and quadratic terms.
- Significant performance improvements due to pre-allocation of Wengert tapes and MOI support.
- A more intuitive API for McCormick relaxation construction.
- 7/5/2019: **EAGO v0.2.1 has been tagged**. This contains fixes for a few minor issues.
- Bug fix for explicit SIP solving routine that occurred for uncertainty sets of dimension greater than 1.
- Bug fix for Max objective sense.

For a full list of EAGO release news, see click [here](https://github.com/PSORLab/EAGO.jl/News.md)

## Related Packages

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ function midpoint_affine!(src::Optimizer, trg, n::NodeBB, r, xpnt::Vector{Float6

# Add objective
if src.working_evaluator_block.has_objective

# Calculates relaxation and subgradient
df = zeros(Float64, np)
f = MOI.eval_objective(src.working_evaluator_block.evaluator, xpnt)
Expand All @@ -29,8 +30,8 @@ function midpoint_affine!(src::Optimizer, trg, n::NodeBB, r, xpnt::Vector{Float6
saf_const -= xpnt_c*grad_c
end
saf = MOI.ScalarAffineFunction(MOI.ScalarAffineTerm.(df, var[(1+nx):ngrad]), saf_const)
#println("objective cut: saf $saf")
MOI.set(trg, MOI.ObjectiveFunction{MOI.ScalarAffineFunction{Float64}}(), saf)

# Add objective cut if nonlinear (if bound is finite)
if src.global_upper_bound < Inf
cut_saf = MOI.ScalarAffineFunction(MOI.ScalarAffineTerm.(df, var[(1+nx):ngrad]), 0.0)
Expand Down
47 changes: 37 additions & 10 deletions src/eago_optimizer/default_optimizer/standard_forms/quadratic.jl
Original file line number Diff line number Diff line change
Expand Up @@ -50,14 +50,9 @@ function quadratic_convexity!(src::Optimizer)
end

"""
relax_convex_quadratic_inner!
Default routine for relaxing convex quadratic constraint `lower` < `func` < `upper`
on node `n`. Takes affine bounds of convex part at point `x0`.
relax_convex_kernel
"""
function relax_convex_quadratic_inner!(trg, src::Optimizer, func::MOI.ScalarQuadraticFunction{Float64},
lower::Float64, upper::Float64, n::NodeBB, x0::Vector{Float64})

function relax_convex_kernel(func::MOI.ScalarQuadraticFunction{Float64}, n::NodeBB, src::Optimizer, x0::Vector{Float64})
VarNum = length(n)
temp_sto = zeros(Float64,VarNum)
terms_coeff = Float64[]
Expand Down Expand Up @@ -89,6 +84,20 @@ function relax_convex_quadratic_inner!(trg, src::Optimizer, func::MOI.ScalarQuad

varIndx = [MOI.VariableIndex(src.variable_index_to_storage[i]) for i in terms_index]

return varIndx, terms_coeff, term_constant
end
"""
relax_convex_quadratic_inner!
Default routine for relaxing convex quadratic constraint `lower` < `func` < `upper`
on node `n`. Takes affine bounds of convex part at point `x0`.
"""
function relax_convex_quadratic_inner!(trg, src::Optimizer, func::MOI.ScalarQuadraticFunction{Float64},
lower::Float64, upper::Float64, n::NodeBB, x0::Vector{Float64})


varIndx, terms_coeff, term_constant = relax_convex_kernel(func, n, src, x0)

if (lower == upper)
saf_term = MOI.ScalarAffineTerm{Float64}.(terms_coeff,varIndx)
saf = MOI.ScalarAffineFunction{Float64}(saf_term,term_constant)
Expand Down Expand Up @@ -269,10 +278,28 @@ function relax_quadratic!(trg, src::Optimizer, n::NodeBB, r::RelaxationScheme)

# Relax quadratic objective
if isa(src.objective, MOI.ScalarQuadraticFunction{Float64}) # quadratic objective
if (src.objective_convexity)
relax_convex_quadratic_inner!(trg, src, src.objective, set.value, set.value, n, x0)
if (src.optimization_sense == MOI.MAX_SENSE)
objf = src.objective
m_objf= MOI.ScalarQuadraticFunction{Float64}(objf.affine_terms, objf.quadratic_terms, -objf.constant)
for i in 1:length(m_objf.affine_terms)
m_objf.affine_terms[i] = MOI.ScalarAffineTerm{Float64}(-m_objf.affine_terms[i].coefficient, m_objf.affine_terms[i].variable_index)
end
for i in 1:length(m_objf.quadratic_terms)
m_objf.quadratic_terms[i] = MOI.ScalarQuadraticTerm{Float64}(-m_objf.quadratic_terms[i].coefficient, m_objf.quadratic_terms[i].variable_index_1, m_objf.quadratic_terms[i].variable_index_2)
end
varIndx, terms_coeff, quadratic_constant = relax_nonconvex_kernel(m_objf, n, src, x0)
saf_term = MOI.ScalarAffineTerm{Float64}.(terms_coeff, varIndx)
obj_func = MOI.ScalarAffineFunction{Float64}(saf_term, quadratic_constant)
MOI.set(trg, MOI.ObjectiveFunction{MOI.ScalarAffineFunction{Float64}}(), obj_func)
else
relax_nonconvex_quadratic!(trg, src, src.objective, set.value, set.value, n, x0)
if (src.objective_convexity)
varIndx, terms_coeff, quadratic_constant = relax_convex_kernel(func, n, src, x0)
else
varIndx, terms_coeff, quadratic_constant = relax_nonconvex_kernel(src.objective, n, src, x0)
end
saf_term = MOI.ScalarAffineTerm{Float64}.(terms_coeff, varIndx)
obj_func = MOI.ScalarAffineFunction{Float64}(saf_term, quadratic_constant)
MOI.set(trg, MOI.ObjectiveFunction{MOI.ScalarAffineFunction{Float64}}(), obj_func)
end
end
#MOI.set(trg, MOI.ObjectiveFunction{MOI.ScalarAffineFunction{Float64}}(), QuadMidPoint)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ function default_upper_bounding!(x::Optimizer,y::NodeBB)

if is_feasible_solution(termination_status, result_status)
x.current_upper_info.feasibility = true
mult = (x.optimization_sense == MOI.MIN_SENSE) ? 1.0 : -1.0
mult = (x.optimization_sense == MOI.MAX_SENSE && x.objective == nothing) ? -1.0 : 1.0
x.current_upper_info.value = mult*MOI.get(x.working_upper_optimizer, MOI.ObjectiveValue())
x.current_upper_info.solution[1:end] = MOI.get(x.working_upper_optimizer, MOI.VariablePrimal(), x.upper_variables)
else
Expand Down
13 changes: 9 additions & 4 deletions src/eago_optimizer/default_optimizer/subroutines/utilities.jl
Original file line number Diff line number Diff line change
Expand Up @@ -208,7 +208,7 @@ function set_local_nlp!(m::Optimizer)

# Add objective sense
#MOI.set(m.InitialUpperOptimizer, MOI.ObjectiveSense(), m.OptimizationSense)
MOI.set(m.initial_upper_optimizer, MOI.ObjectiveSense(), MOI.MIN_SENSE)
#MOI.set(m.initial_upper_optimizer, MOI.ObjectiveSense(), MOI.MIN_SENSE)

# Specifies variables in upper problem
m.upper_variables = MOI.VariableIndex.(1:m.variable_number)
Expand All @@ -217,6 +217,7 @@ function set_local_nlp!(m::Optimizer)

# Add objective function (if any)
if (m.objective != nothing)
MOI.set(m.initial_upper_optimizer, MOI.ObjectiveSense(), m.optimization_sense)
if isa(m.objective, MOI.SingleVariable)
if (m.optimization_sense == MOI.MIN_SENSE)
MOI.set(m.initial_upper_optimizer, MOI.ObjectiveFunction{MOI.SingleVariable}(), m.objective)
Expand All @@ -240,11 +241,12 @@ function set_local_nlp!(m::Optimizer)
error("Objective sense must be MOI.MinSense or MOI.MaxSense")
end
elseif isa(m.objective, MOI.ScalarQuadraticFunction{Float64})
if (m.optimization_sense == MOI.MIN_SENSE)
if (m.optimization_sense == MOI.MIN_SENSE) || (m.optimization_sense == MOI.MAX_SENSE)
MOI.set(m.initial_upper_optimizer, MOI.ObjectiveFunction{MOI.ScalarQuadraticFunction{Float64}}(), m.objective)
#=
elseif (m.optimization_sense == MOI.MAX_SENSE)
neg_obj_qda_terms = []
for term in m.Objective.affine_terms
for term in m.objective.affine_terms
push!(neg_obj_aff_terms,MOI.ScalarAffineTerm{Float64}(-term.coefficient,term.variable_index))
end
neg_obj_qdq_terms = []
Expand All @@ -253,14 +255,17 @@ function set_local_nlp!(m::Optimizer)
end
neg_obj_qd = ScalarQuadraticFunction{Float64}(neg_obj_qda_terms,neg_obj_qdq_terms,-m.objective.constant)
MOI.set(m.initial_upper_optimizer, MOI.ObjectiveFunction{MOI.ScalarQuadraticFunction{Float64}}(), neg_obj_qd)
=#
else
error("Objective sense must be MOI.MIN_SENSE or MOI.MAX_SENSE")
end
end
else
@assert m.nlp_data != empty_nlp_data()
MOI.set(m.initial_upper_optimizer, MOI.ObjectiveSense(), m.optimization_sense)
if (m.optimization_sense == MOI.MAX_SENSE)
minus_objective!(m.nlp_data.evaluator, false) # try just disallowing hessian storage request?
#println("ran minus objective")
#minus_objective!(m.nlp_data.evaluator, false)
elseif (m.optimization_sense != MOI.MIN_SENSE)
error("Objective sense must be MOI.MIN_SENSE or MOI.MAX_SENSE")
end
Expand Down
2 changes: 0 additions & 2 deletions src/eago_optimizer/moi_wrapper/optimize.jl
Original file line number Diff line number Diff line change
Expand Up @@ -141,8 +141,6 @@ function MOI.optimize!(m::Optimizer; custom_mod! = triv_function, custom_mod_arg

(~m.use_upper_factory) && bld_user_upper_fact!(m) # if optimizer type is supplied for upper, build factory

#println("start nlp solve")
#println("start nlp solve m.fixed_variable = $(m.fixed_variable)")
# Runs the branch and bound routine
solve_nlp!(m)
end
14 changes: 9 additions & 5 deletions src/eago_semiinfinite/sip_explicit.jl
Original file line number Diff line number Diff line change
Expand Up @@ -11,14 +11,16 @@ function explicit_llp(xbar::Vector{Float64}, sip_storage::SIP_Result, problem_st
pU = problem_storage.p_u

model_llp = deepcopy(problem_storage.opts.model)
g(p) = problem_storage.gSIP(xbar, p)
register(model_llp, :g, np, g, autodiff=true)
@variable(model_llp, pL[i] <= p[i=1:np] <= pU[i])

if np == 1
g(p) = problem_storage.gSIP(xbar, p)
register(model_llp, :g, np, g, autodiff=true)
@variable(model_llp, pL[i] <= p[i=1:np] <= pU[i])
@NLobjective(model_llp, Min, -g(p[1]))
else
@NLobjective(model_llp, Min, -g(p...))
gmulti(p...) = problem_storage.gSIP(xbar, p)
register(model_llp, :gmulti, np, gmulti, autodiff=true)
@variable(model_llp, pL[i] <= p[i=1:np] <= pU[i])
@NLobjective(model_llp, Min, -gmulti(p...))
end


Expand Down Expand Up @@ -138,6 +140,8 @@ function explicit_sip_solve(f::Function, gSIP::Function, x_l::Vector{Float64},
n_p = length(p_l)
n_x = length(x_l)

println("ran update 1")

if opts == nothing
opts = SIP_Options()
opts.model = m
Expand Down

2 comments on commit ee4f437

@mewilhel
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@JuliaRegistrator register()

@JuliaRegistrator
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Error while trying to register: Version 0.2.0 already exists

Please sign in to comment.