Skip to content

Commit

Permalink
Revert "adjustments for FMISensitivity.jl"
Browse files Browse the repository at this point in the history
This reverts commit 8684a66.
  • Loading branch information
ThummeTo committed Oct 9, 2023
1 parent 8684a66 commit 52da385
Show file tree
Hide file tree
Showing 18 changed files with 403 additions and 750 deletions.
14 changes: 6 additions & 8 deletions Project.toml
Original file line number Diff line number Diff line change
@@ -1,14 +1,13 @@
name = "FMIFlux"
uuid = "fabad875-0d53-4e47-9446-963b74cae21f"
version = "0.11.0"
version = "0.10.6"

[deps]
Colors = "5ae59095-9a9b-59fe-a467-6f913c188581"
DiffEqCallbacks = "459566f4-90b8-5000-8ac3-15dfb0a30def"
DifferentiableEigen = "73a20539-4e65-4dcb-a56d-dc20f210a01b"
DifferentialEquations = "0c46a032-eb83-5123-abaf-570d42b7fbaa"
FMIImport = "9fcbc62e-52a0-44e9-a616-1359a0008194"
FMISensitivity = "3e748fe5-cd7f-4615-8419-3159287187d2"
Flux = "587475ba-b771-5e3f-ad9e-33799f191a9c"
Optim = "429524aa-4258-5aef-a3af-852621145aeb"
Printf = "de0858da-6303-5e67-8744-51eddeeeb8d7"
Expand All @@ -19,14 +18,13 @@ ThreadPools = "b189fb0b-2eb5-4ed4-bc0c-d34c51242431"

[compat]
Colors = "0.12.8"
DiffEqCallbacks = "2.33.0"
DiffEqCallbacks = "2.26.0"
DifferentiableEigen = "0.2.0"
DifferentialEquations = "7.10.0"
FMIImport = "0.16.0"
FMISensitivity = "0.1.0"
Flux = "0.14"
DifferentialEquations = "7.8.0"
FMIImport = "0.15.8"
Flux = "0.13, 0.14"
Optim = "1.7.0"
ProgressMeter = "1.9.0"
ProgressMeter = "1.7.0"
Requires = "1.3.0"
ThreadPools = "2.1.1"
julia = "1.6"
12 changes: 2 additions & 10 deletions examples/src/juliacon_2023.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -6,19 +6,11 @@
"source": [
"# Using NeuralODEs in real life applications\n",
"-----\n",
"Tutorial by Tobias Thummerer, Lars Mikelsons | Last edit: 08-04-2023\n",
"Tutorial by Tobias Thummerer | Last edit: 08-04-2023\n",
"\n",
"This workshop was held at the JuliaCon 2023 | 07-25-2023 | MIT (Boston, USA)\n",
"\n",
"Keywords: *#NeuralODE, #NeuralFMU, #PeNODE, #HybridModeling, #SciML*"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Workshop Video\n",
"[![Preview image](https://img.youtube.com/vi/X_u0KlZizD4/0.jpg)](https://www.youtube.com/watch?v=X_u0KlZizD4)"
"Keywords: *#NeuralODE, #NeuralFMU, #PeNODE, #HybridModeling*"
]
},
{
Expand Down
64 changes: 50 additions & 14 deletions src/FMIFlux.jl
Original file line number Diff line number Diff line change
Expand Up @@ -4,24 +4,63 @@
#

module FMIFlux
import FMISensitivity

import FMISensitivity.ForwardDiff
import FMISensitivity.Zygote
import FMISensitivity.ReverseDiff
import FMISensitivity.FiniteDiff

@debug "Debugging messages enabled for FMIFlux ..."

if VERSION < v"1.7.0"
@warn "Training under Julia 1.6 is very slow, please consider using Julia 1.7 or newer." maxlog=1
@warn "Training under Julia 1.6 is very slow, please consider using Julia 1.7 or newer."
end

import FMIImport.FMICore: hasCurrentComponent, getCurrentComponent, unsense
import FMIImport.FMICore.ChainRulesCore: ignore_derivatives
# Overwrite tag printing and limit partials length from ForwardDiff.jl
# import FMIImport.ForwardDiff
# function Base.show(io::IO, d::ForwardDiff.Dual{T,V,N}) where {T,V,N}
# print(io, "Dual(", ForwardDiff.value(d))
# for i in 1:min(N, 5)
# print(io, ", ", ForwardDiff.partials(d, i))
# end
# if N > 5
# print(io, ", [$(N-5) more]...")
# end
# print(io, ")")
# end

# ToDo: Quick-fixes until patch release SciMLSensitivity v0.7.29
import FMIImport.SciMLSensitivity: FakeIntegrator, u_modified!, TrackedAffect
import FMIImport.SciMLSensitivity.DiffEqBase: set_u!
function u_modified!(::FakeIntegrator, ::Bool)
return nothing
end
function set_u!(::FakeIntegrator, u)
return nothing
end

using Requires
import Flux
# ToDo: Quick-fixes until patch release SciMLSensitivity v0.7.28
# function Base.hasproperty(f::TrackedAffect, s::Symbol)
# if hasfield(TrackedAffect, s)
# return true
# else
# _affect = getfield(f, :affect!)
# return hasfield(typeof(_affect), s)
# end
# end
# function Base.getproperty(f::TrackedAffect, s::Symbol)
# if hasfield(TrackedAffect, s)
# return getfield(f, s)
# else
# _affect = getfield(f, :affect!)
# return getfield(_affect, s)
# end
# end
# function Base.setproperty!(f::TrackedAffect, s::Symbol, value)
# if hasfield(TrackedAffect, s)
# return setfield!(f, s, value)
# else
# _affect = getfield(f, :affect!)
# return setfield!(_affect, s, value)
# end
# end

using Requires, Flux

using FMIImport
using FMIImport: fmi2ValueReference, FMU, FMU2, FMU2Component
Expand All @@ -32,9 +71,6 @@ using FMIImport: fmi2SetTime, fmi2CompletedIntegratorStep, fmi2GetEventIndicator
using FMIImport: fmi2SampleJacobian, fmi2GetDirectionalDerivative, fmi2GetJacobian, fmi2GetJacobian!
using FMIImport: fmi2True, fmi2False

import FMIImport.FMICore: fmi2ValueReferenceFormat

include("hotfixes.jl")
include("convert.jl")
include("flux_overload.jl")
include("neural.jl")
Expand Down
3 changes: 3 additions & 0 deletions src/batch.jl
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,10 @@
#

import FMIImport: fmi2Real, fmi2FMUstate, fmi2EventInfo, fmi2ComponentState
import FMIImport.ChainRulesCore: ignore_derivatives
using DiffEqCallbacks: FunctionCallingCallback
using FMIImport.ForwardDiff
import FMIImport: unsense

struct FMULoss{T}
loss::T
Expand Down
4 changes: 3 additions & 1 deletion src/convert.jl
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,8 @@
# Licensed under the MIT license. See LICENSE file in the project root for details.
#

import Flux

function is64(model::Flux.Chain)
params = Flux.params(model)

Expand All @@ -18,5 +20,5 @@ function is64(model::Flux.Chain)
end

function convert64(model::Flux.Chain)
Flux.fmap(Flux.f64, model)
fmap(f64, model)
end
51 changes: 50 additions & 1 deletion src/flux_overload.jl
Original file line number Diff line number Diff line change
Expand Up @@ -3,5 +3,54 @@
# Licensed under the MIT license. See LICENSE file in the project root for details.
#

import Flux
import FMIImport.ChainRulesCore
import Flux.Random: AbstractRNG
import Flux.LinearAlgebra: I

# feed through
params = Flux.params
params = Flux.params

# # exports
# export Adam
# export Parallel

# #
# Chain = Flux.Chain
# export Chain

# # Float64 version of the Flux.glorot_uniform
# function glorot_uniform_64(rng::AbstractRNG, dims::Integer...; gain::Real=1)
# scale = Float64(gain) * sqrt(24.0 / sum(Flux.nfan(dims...)))
# (rand(rng, Float64, dims...) .- 0.5) .* scale
# end
# glorot_uniform_64(dims::Integer...; kw...) = glorot_uniform_64(Flux.default_rng_value(), dims...; kw...)
# glorot_uniform_64(rng::AbstractRNG=Flux.default_rng_value(); init_kwargs...) = (dims...; kwargs...) -> glorot_uniform_64(rng, dims...; init_kwargs..., kwargs...)
# ChainRulesCore.@non_differentiable glorot_uniform_64(::Any...)

# # Float64 version of the Flux.identity_init
# identity_init_64(cols::Integer; gain::Real=1, shift=0) = zeros(Float64, cols) # Assume bias
# identity_init_64(rows::Integer, cols::Integer; gain::Real=1, shift=0) = circshift(Matrix{Float64}(I * gain, rows,cols), shift)
# function identity_init_64(dims::Integer...; gain::Real=1, shift=0)
# nin, nout = dims[end-1], dims[end]
# centers = map(d -> cld(d, 2), dims[1:end-2])
# weights = zeros(Float64, dims...)
# for i in 1:min(nin,nout)
# weights[centers..., i, i] = gain
# end
# return circshift(weights, shift)
# end
# ChainRulesCore.@non_differentiable identity_init_64(::Any...)

# """
# Wrapper for Flux.Dense, that converts all parameters to Float64.
# """
# function Dense(args...; init=glorot_uniform_64, kwargs...)
# return Flux.Dense(args...; init=init, kwargs...)
# end
# function Dense(W::AbstractMatrix, args...; init=glorot_uniform_64, kwargs...)
# W = Matrix{Float64}(W)
# return Flux.Dense(W, args...; init=init, kwargs...)
# end
# Dense(in::Integer, out::Integer, σ = identity; init=glorot_uniform_64, kwargs...) = Dense(in => out, σ; init=init, kwargs...)
# export Dense
19 changes: 0 additions & 19 deletions src/hotfixes.jl

This file was deleted.

8 changes: 4 additions & 4 deletions src/layers.jl
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,8 @@ struct FMUParameterRegistrator{T}
function FMUParameterRegistrator{T}(fmu::FMU2, p_refs::fmi2ValueReferenceFormat, p::AbstractArray{T}) where {T}
@assert length(p_refs) == length(p) "`p_refs` and `p` need to be the same length!"
p_refs = prepareValueReference(fmu, p_refs)
fmu.default_p_refs = p_refs
fmu.default_p = p
fmu.optim_p_refs = p_refs
fmu.optim_p = p
return new(fmu, p_refs, p)
end

Expand All @@ -31,8 +31,8 @@ end
export FMUParameterRegistrator

function (l::FMUParameterRegistrator)(x)
l.fmu.default_p = l.p
l.fmu.default_p_refs = l.p_refs
l.fmu.optim_p = l.p
l.fmu.optim_p_refs = l.p_refs
return x
end

Expand Down
1 change: 1 addition & 0 deletions src/losses.jl
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ module Losses

using Flux
import ..FMIFlux: FMU2BatchElement, NeuralFMU, loss!, run!, ME_NeuralFMU, FMU2Solution
import FMIImport: unsense

mse = Flux.Losses.mse
mae = Flux.Losses.mae
Expand Down
2 changes: 2 additions & 0 deletions src/misc.jl
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,8 @@
# Licensed under the MIT license. See LICENSE file in the project root for details.
#

using Flux

"""
Compares non-equidistant (or equidistant) datapoints by linear interpolating and comparing at given interpolation points `t_comp`.
(Zygote-friendly: Zygote can differentiate through via AD.)
Expand Down
Loading

0 comments on commit 52da385

Please sign in to comment.