Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fixes to copy functions and minimal_copy rhs functions #50

Merged
merged 2 commits into from
Sep 11, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
33 changes: 15 additions & 18 deletions simulations/NavierStokes_2D/scripts/NeuralClosure+SciML.jl
Original file line number Diff line number Diff line change
Expand Up @@ -66,11 +66,12 @@ function create_io_arrays(data, setups)
(; u = reshape(u, N..., D, :), c = reshape(c, N..., D, :)) # squeeze time and sample dimensions
end
end
include("../../../src/equations/NavierStokes_utils.jl")

# Create input/output arrays for a-priori training (ubar vs c)
io = create_io_arrays(data, setups); # modified version that has also place-holder for boundary conditions
io_nc = NC.create_io_arrays(data, setups) # original syver version, without extra padding
ig = 1 #index of the LES grid to use.
io_nc = IO_padded_to_IO_nopad(io, setups) # original syver version, without extra padding

# check that the data is correctly reshaped
a = data[1].data[ig].u[2][1]
Expand All @@ -79,16 +80,13 @@ b = io[ig].u[:, :, 1, 2]
# io[igrid].u[nx, ny, dim as ux:1, time*nsample]
a == b
c = io_nc[ig].u[:, :, 1, 2]

include("../../../src/equations/NavierStokes_utils.jl")
NN_padded_to_INS(io[ig].u[:, :, :, 2:2], setups[ig])[1]
INS_to_NN(data[1].data[ig].u[2], setups[ig])[:, :, 1, 1]
NN_padded_to_NN_nopad(io[ig].u[:, :, :, 2:2], setups[ig])

u0_NN = io[ig].u[:, :, :, 2:2]
u0_INS = CN_NS.NN_padded_to_INS(u0_NN, setups[ig])
zu0_NN = io[ig].u[:, :, :, 2:2]

# * Creation of the model: NN closure
include("../../../src/models/cnn.jl")
import CoupledNODE: cnn
closure, θ, st = cnn(;
setup = setups[ig],
radii = [3, 3],
Expand Down Expand Up @@ -116,20 +114,17 @@ size(train_data[1]) # bar{u} filtered
size(train_data[2]) # c commutator error

# * loss a priori (similar to Syver's)
include("../../../src/loss/loss_priori.jl")
#import CoupledNODE: create_loss_priori, mean_squared_error
import CoupledNODE: create_loss_priori, mean_squared_error
loss_priori = create_loss_priori(mean_squared_error, closure)
# this created function can be called: loss_priori((x, y), θ, st) where x: input to model (\bar{u}), y: label (c), θ: params of NN, st: state of NN.
loss_priori(closure, θ, st, train_data) # check that the loss is working

# let's define a loss that calculates correctly and in the Lux format
#import CoupledNODE: loss_priori_lux
import CoupledNODE: loss_priori_lux
loss_priori_lux(closure, θ, st, train_data)

## old way of training
include("../../../src/utils.jl")
using Plots: plot!
#import CoupledNODE: callback
import CoupledNODE: callback
import Optimization, OptimizationOptimisers
optf = Optimization.OptimizationFunction(
(u, p) -> loss_priori(closure, u, st, train_data), # u here is the optimization variable (θ params of NN)
Expand All @@ -145,8 +140,8 @@ result_priori = Optimization.solve(
θ_priori = result_priori.u
# with this approach, we have the problem that we cannot loop trough the data.

include("../../../src/train.jl")
#import CoupledNODE: train
#include("../../../src/train.jl")
import CoupledNODE: train
import Optimization, OptimizationOptimisers
loss, tstate = train(closure, θ, st, dataloader, loss_priori_lux_style;
nepochs = 10, ad_type = Optimization.AutoZygote(),
Expand Down Expand Up @@ -226,7 +221,8 @@ size(dataloader_post().u[1][1])
dudt_nn2 = create_right_hand_side_with_closure_minimal_copy(
setups[ig], INS.psolver_spectral(setups[ig]), closure, st)
example2 = dataloader_luisa()
dudt_nn2(example2.u[:, :, :, 1], θ, example2.t[1]) # trick of compatibility: keep always last dimension (time*sample)
example2.u
dudt_nn2(example2.u[:, :, :, 1:1], θ, example2.t[1]) # trick of compatibility: keep always last dimension (time*sample)

# Define the loss (a-posteriori)
import Zygote
Expand All @@ -243,7 +239,8 @@ function loss_posteriori(model, p, st, data)
prob = ODEProblem(dudt_nn2, x, tspan, p)
pred = Array(solve(prob, Tsit5(); u0 = x, p = p, dt = dt, adaptive = false))
# remember that the first element of pred is the initial condition (SciML)
return T(sum(abs2, y - pred[:, :, :, 1, 2:end]) / sum(abs2, y))
return T(sum(abs2, y[:, :, :, 1:(size(pred, 5) - 1)] - pred[:, :, :, 1, 2:end]) /
sum(abs2, y))
end

# train a-posteriori: single data point
Expand All @@ -257,7 +254,7 @@ result_posteriori = Optimization.solve(
optprob,
OptimizationOptimisers.Adam(0.1);
callback = callback,
maxiters = 50,
maxiters = 5,
progress = true
)
θ_posteriori = result_posteriori.u
Expand Down
73 changes: 36 additions & 37 deletions src/equations/NavierStokes_utils.jl
Original file line number Diff line number Diff line change
Expand Up @@ -76,21 +76,24 @@ Right hand side with closure, tries to minimize data copying (more work can be d
"""
function create_right_hand_side_with_closure_minimal_copy(setup, psolver, closure, st)
function right_hand_side(u, p, t)
# TODO: are we allowed to change u itself? if not, do:
#u = deepcopy(u)
# otherwise u, u_nopad and u_INS all refer to the same memory location
u_nopad = NN_padded_to_NN_nopad(u, setup)
u_INS = NN_padded_to_INS(u, setup)
INS.apply_bc_u!(u_INS, t, setup)

copy_INS_to_INS_inplace!(INS.apply_bc_u(u_INS, t, setup), u_INS)

F = INS.momentum(u_INS, nothing, t, setup)
u_nopad = NN_padded_to_NN_nopad(u, setup)
u_lux = Lux.apply(closure, u_nopad, p, st)[1][:, :, :, 1:1]
u_nopad = NN_padded_to_NN_nopad(u, setup)
u_nopad .= u_lux
# assert_pad_nopad_similar(u, u_nopad, setup)
u_lux = Lux.apply(closure, u_nopad, p, st)[1]

F = F .+ NN_padded_to_INS(u, setup)
copy_INS_to_INS_inplace!(F, u_INS)
u_nopad .= u_nopad .+ u_lux

F = INS.apply_bc_u(F, t, setup; dudt = true)
PF = INS.project(F, setup; psolver)
PF = INS.apply_bc_u(PF, t, setup; dudt = true)
cat(PF[1], PF[2]; dims = 3)
copy_INS_to_INS_inplace!(INS.apply_bc_u(u_INS, t, setup; dudt = true), u_INS)
copy_INS_to_INS_inplace!(INS.project(u_INS, setup; psolver), u_INS)
copy_INS_to_INS_inplace!(INS.apply_bc_u(u_INS, t, setup; dudt = true), u_INS)
u
end
end

Expand All @@ -107,7 +110,7 @@ to a format suitable for neural network training `u[n, n, D, batch]`.
# Returns
- `u`: Velocity field converted to a tensor format suitable for neural network training.
"""
function INS_to_NN(u, setup)
#= function INS_to_NN(u, setup)
(; dimension, Iu) = setup.grid
D = dimension()
if D == 2
Expand All @@ -116,9 +119,21 @@ function INS_to_NN(u, setup)
elseif D == 3
u = cat(u[1][Iu[1]], u[2][Iu[2]], u[3][Iu[3]]; dims = 4)
u = reshape(u, size(u)..., 1) # One sample

else
error("Unsupported dimension: $D. Only 2D and 3D are supported.")
end
end =#

"""
copy_INS_to_INS_inplace

helper function to assign in-place to a tuple because I am no julia guru that can one-line this
"""
function copy_INS_to_INS_inplace!(u_source, u_dest)
for (u_s, u_d) in zip(u_source, u_dest)
u_d .= u_s
end
end

"""
Expand All @@ -136,35 +151,19 @@ to the IncompressibleNavierStokes.jl style `u[time step]=(ux, uy)`.
"""

function NN_padded_to_INS(u, setup)
(; grid, boundary_conditions) = setup
(; dimension) = grid
D = dimension()
# Not really sure about the necessity for the distinction.
# We just want a place holder for the boundaries and they will in any case be re-calculated via INS.apply_bc_u!
if D == 2
(@view(u[:, :, 1, 1]), @view(u[:, :, 1, 1]))
elseif D == 3
(@view(u[:, :, :, 1, 1]), @view(u[:, :, :, 2, 1]), @view(u[:, :, :, 3, 1]))
else
error("Unsupported dimension: $D. Only 2D and 3D are supported.")
end
emptydims = ((:) for _ in 1:(ndims(u) - 2))
dimdim = ndims(u) - 1
Tuple(@view(u[emptydims..., i, 1]) for i in 1:size(u, dimdim))
end

function NN_padded_to_NN_nopad(u, setup)
(; grid, boundary_conditions) = setup
(; dimension) = grid
D = dimension()
# Not really sure about the necessity for the distinction.
# We just want a place holder for the boundaries and they will in any case be re-calculated via INS.apply_bc_u!
if D == 2
x, y, _... = size(u)
@view u[2:(x - 1), 2:(y - 1), :, :]
elseif D == 3
x, y, z, _, _ = size(u)
@view u[2:(x - 1), 2:(y - 1), 2:(z - 1), :, :]
else
error("Unsupported dimension: $D. Only 2D and 3D are supported.")
end
(; Iu) = grid

# Iu has multiple, but similar entries, but there is only one grid. We choose the first one
Iu = Iu[1]
dimdiff = ((:) for _ in ndims(u) - ndims(Iu))
@view u[Iu, dimdiff...]
end

function IO_padded_to_IO_nopad(io, setups)
Expand Down
Loading