Skip to content

Commit

Permalink
Merge pull request #44 from killah-t-cell/gb-add-normalization
Browse files Browse the repository at this point in the history
normalization
  • Loading branch information
killah-t-cell authored Jan 12, 2022
2 parents ef69898 + c4f73b9 commit ee7a361
Show file tree
Hide file tree
Showing 2 changed files with 21 additions and 12 deletions.
Binary file modified .DS_Store
Binary file not shown.
33 changes: 21 additions & 12 deletions src/solve.jl
Original file line number Diff line number Diff line change
Expand Up @@ -47,9 +47,9 @@ strategy – what NeuralPDE training strategy should be used
"""
function solve(plasma::CollisionlessPlasma;
lb=0.0, ub=1.0, time_lb=lb, time_ub=ub,
GPU=true, inner_layers=16, strategy=QuadratureTraining(),
GPU=true, inner_layers=16, strategy=StochasticTraining(100),
E_bcs=Neumann,
f_bcs=(a=1, g=0), maxiters=10000)
f_bcs=(a=1, g=0), maxiters=[30,100], normalized=true)
if lb > ub
error("lower bound must be larger than upper bound")
end
Expand Down Expand Up @@ -79,6 +79,13 @@ function solve(plasma::CollisionlessPlasma;
end
Ps = [d.P for d in dis]

if normalized
qs .= 1
ms .= 1
ϵ_0 = 1
μ_0 = 0
end

# variables
fs = Symbolics.variables(:f, eachindex(species); T=SymbolicUtils.FnType{Tuple,Real})
Es = Symbolics.variables(:E, 1:dim; T=SymbolicUtils.FnType{Tuple,Real})
Expand Down Expand Up @@ -161,11 +168,9 @@ function solve(plasma::CollisionlessPlasma;

# solve
opt = Optim.BFGS()
res = GalacticOptim.solve(prob, opt, cb = print_loss(timeCounter, startTime, times, losses), maxiters=200)
prob = remake(prob, u0=res.minimizer)
res = GalacticOptim.solve(prob, ADAM(0.01), cb = print_loss(timeCounter, startTime, times, losses), maxiters=maxiters)
res = GalacticOptim.solve(prob, opt, cb = print_loss(timeCounter, startTime, times, losses), maxiters=maxiters[1])
prob = remake(prob, u0=res.minimizer)
res = GalacticOptim.solve(prob, opt, cb = print_loss(timeCounter, startTime, times, losses), maxiters=200)
res = GalacticOptim.solve(prob, ADAM(0.01), cb = print_loss(timeCounter, startTime, times, losses), maxiters=maxiters[2])
phi = discretization.phi


Expand All @@ -187,8 +192,8 @@ strategy – what NeuralPDE training strategy should be used
"""
function solve(plasma::ElectrostaticPlasma;
lb=0.0, ub=1.0, time_lb=lb, time_ub=ub,
dim=3, GPU=true, inner_layers=16, strategy=QuadratureTraining(),
E_bcs=Neumann,f_bcs=(a=1, g=0), maxiters=10000)
dim=3, GPU=true, inner_layers=16, strategy=StochasticTraining(80*dim),
E_bcs=Neumann,f_bcs=(a=1, g=0), maxiters=[30,100], normalized=true)

if lb > ub
error("lower bound must be larger than upper bound")
Expand Down Expand Up @@ -218,6 +223,12 @@ function solve(plasma::ElectrostaticPlasma;
end
Ps = [d.P for d in dis]

if normalized
qs .= 1
ms .= 1
ϵ_0 = 1
end

# variables
fs = Symbolics.variables(:f, eachindex(species); T=SymbolicUtils.FnType{Tuple,Real})
Es = Symbolics.variables(:E, 1:dim; T=SymbolicUtils.FnType{Tuple,Real})
Expand Down Expand Up @@ -298,11 +309,9 @@ function solve(plasma::ElectrostaticPlasma;

# solve
opt = Optim.BFGS()
res = GalacticOptim.solve(prob, opt, cb = print_loss(timeCounter, startTime, times, losses), maxiters=200)
prob = remake(prob, u0=res.minimizer)
res = GalacticOptim.solve(prob, ADAM(0.01), cb = print_loss(timeCounter, startTime, times, losses), maxiters=maxiters)
res = GalacticOptim.solve(prob, opt, cb = print_loss(timeCounter, startTime, times, losses), maxiters=maxiters[1])
prob = remake(prob, u0=res.minimizer)
res = GalacticOptim.solve(prob, opt, cb = print_loss(timeCounter, startTime, times, losses), maxiters=200)
res = GalacticOptim.solve(prob, ADAM(0.01), cb = print_loss(timeCounter, startTime, times, losses), maxiters=maxiters[2])
phi = discretization.phi

return PlasmaSolution(plasma, vars, dict_vars, phi, res, initθ, domains, losses, times)
Expand Down

0 comments on commit ee7a361

Please sign in to comment.