Skip to content

Commit

Permalink
Use Adam instead of ADAM (#247)
Browse files Browse the repository at this point in the history
  • Loading branch information
lorenzoh committed Jul 11, 2022
1 parent c0cbc53 commit dabd150
Show file tree
Hide file tree
Showing 8 changed files with 14 additions and 14 deletions.
2 changes: 1 addition & 1 deletion docs/introduction.md
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ The customizable, expanded version of the code looks like this:
dls = taskdataloaders(data, task)
model = taskmodel(task, Models.xresnet18())
lossfn = tasklossfn(task)
learner = Learner(model, dls, ADAM(), lossfn, ToGPU(), Metrics(accuracy))
learner = Learner(model, dls, Adam(), lossfn, ToGPU(), Metrics(accuracy))
```

At this step, we can also pass in any number of [callbacks](https://fluxml.ai/FluxTraining.jl/dev/docs/callbacks/reference.md.html) to customize the training. Here [`ToGPU`](#) ensures an available GPU is used, and [`Metrics`](#) adds additional metrics to track during training.
Expand Down
2 changes: 1 addition & 1 deletion docs/learning_methods.md
Original file line number Diff line number Diff line change
Expand Up @@ -176,7 +176,7 @@ model = Chain(
Dense(512, length(task.classes)),
)
)
optimizer = ADAM()
optimizer = Adam()
lossfn = Flux.Losses.logitcrossentropy

learner = Learner(model, lossfn; data = (traindl, valdl), optimizer)
Expand Down
2 changes: 1 addition & 1 deletion docs/notebooks/imagesegmentation.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -479,7 +479,7 @@
],
"source": [
"traindl, validdl = taskdataloaders(data, task, 16)\n",
"optimizer = ADAM()\n",
"optimizer = Adam()\n",
"learner = Learner(model, lossfn; data=(traindl, validdl), optimizer, callbacks=[ToGPU()])"
]
},
Expand Down
2 changes: 1 addition & 1 deletion docs/notebooks/keypointregression.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -508,7 +508,7 @@
" model,\n",
" tasklossfn(task);\n",
" data=(traindl, validdl),\n",
" optimizer=Flux.ADAM(),\n",
" optimizer=Flux.Adam(),\n",
" callbacks=[ToGPU()])"
]
},
Expand Down
6 changes: 3 additions & 3 deletions docs/notebooks/siamese.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -872,7 +872,7 @@
{
"data": {
"text/plain": [
"ADAM(0.001, (0.9, 0.999), IdDict{Any, Any}())"
"Adam(0.001, (0.9, 0.999), IdDict{Any, Any}())"
]
},
"execution_count": 24,
Expand All @@ -882,7 +882,7 @@
],
"source": [
"lossfn = Flux.Losses.logitcrossentropy\n",
"optimizer = Flux.ADAM()"
"optimizer = Flux.Adam()"
]
},
{
Expand Down Expand Up @@ -965,7 +965,7 @@
"h, w, ch, b = Flux.outputsize(encoder, (128, 128, 3, 1))\n",
"head = Models.visionhead(2ch, 2)\n",
"model = SiameseModel(encoder, head);\n",
"learner = Learner(model, (traindl, valdl), ADAM(0.01), lossfn, callbacks...)"
"learner = Learner(model, (traindl, valdl), Adam(0.01), lossfn, callbacks...)"
]
},
{
Expand Down
4 changes: 2 additions & 2 deletions src/learner.jl
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ Create a [`Learner`](#) to train a model for learning task `task` using
- `backbone = nothing`: Backbone model to construct task-specific model from using
[`taskmodel`](#)`(task, backbone)`.
- `model = nothing`: Complete model to use. If given, the `backbone` argument is ignored.
- `optimizer = ADAM()`: Optimizer passed to `Learner`.
- `optimizer = Adam()`: Optimizer passed to `Learner`.
- `lossfn = `[`tasklossfn`](#)`(task)`: Loss function passed to `Learner`.
Any other keyword arguments will be passed to [`taskdataloaders`](#).
Expand Down Expand Up @@ -51,7 +51,7 @@ function tasklearner(task::LearningTask,
callbacks = [],
pctgval = 0.2,
batchsize = 16,
optimizer = ADAM(),
optimizer = Adam(),
lossfn = tasklossfn(task),
kwargs...)
if isnothing(model)
Expand Down
8 changes: 4 additions & 4 deletions src/training/onecycle.jl
Original file line number Diff line number Diff line change
Expand Up @@ -54,11 +54,11 @@ end
# ## Tests

@testset "decay_optim" begin
optim = ADAM()
optim = Adam()
@test decay_optim(optim, 0.1) isa Optimiser
@test decay_optim(Optimiser(ADAM(), ADAM()), 0.1) isa Optimiser
@test decay_optim(Optimiser(Adam(), Adam()), 0.1) isa Optimiser
@test decay_optim(optim, 0.1).os[1] isa WeightDecay
o = decay_optim(Optimiser(ADAM(), WeightDecay(0.5)), 0.1)
o = decay_optim(Optimiser(Adam(), WeightDecay(0.5)), 0.1)
@test o.os[1] isa WeightDecay
@test o.os[2] isa ADAM
@test o.os[2] isa Adam
end
2 changes: 1 addition & 1 deletion test/runtests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ using InlineTest
using ..FastAI
import ..FastAI: Block, Encoding, encodedblock, decodedblock, encode, decode,
testencoding, test_task_show, checkblock
using Flux.Optimise: Optimiser, ADAM, apply!
using Flux.Optimise: Optimiser, Adam, apply!

ENV["DATADEPS_ALWAYS_ACCEPT"] = "true"
include("testdata.jl")
Expand Down

0 comments on commit dabd150

Please sign in to comment.