Skip to content

Commit

Permalink
fix: remove deprecated deps
Browse files Browse the repository at this point in the history
  • Loading branch information
avik-pal committed Aug 27, 2024
1 parent e70c68d commit 94c0ae2
Show file tree
Hide file tree
Showing 6 changed files with 54 additions and 79 deletions.
2 changes: 1 addition & 1 deletion .buildkite/pipeline.yml
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
steps:
- label: "Julia 1"
- label: "Julia 1 (CUDA)"
plugins:
- JuliaCI/julia#v1:
version: "1.10"
Expand Down
2 changes: 0 additions & 2 deletions .github/workflows/CI.yml
Original file line number Diff line number Diff line change
Expand Up @@ -46,8 +46,6 @@ jobs:
coverage: false
env:
GROUP: ${{ matrix.group }}
RETESTITEMS_NWORKERS: 0
RETESTITEMS_TESTITEM_TIMEOUT: 3600
- uses: julia-actions/julia-processcoverage@v1
- uses: codecov/codecov-action@v4
with:
Expand Down
3 changes: 1 addition & 2 deletions docs/Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -14,10 +14,10 @@ IterTools = "c8e1da08-722c-5040-9ed9-7db0dc04731e"
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
Lux = "b2108857-7c20-44ae-9111-449ecde12c47"
LuxCUDA = "d0bbae9a-e099-4d5b-a835-1c6931763bda"
MLDataUtils = "cc2ba9b6-d476-5e6d-8eaf-a92d5412d41d"
MLDatasets = "eb30cadb-4394-5ae3-aed4-317e484a6458"
MLUtils = "f1d291b0-491e-4a28-83b9-f70985020b54"
NNlib = "872c559c-99b0-510c-b3b7-b6c96a88d5cd"
OneHotArrays = "0b1bfda6-eb8a-41d2-88d8-f5af5cad476f"
Optimisers = "3bd65402-5787-11e9-1adc-39752487f4e2"
Optimization = "7f7a1694-90dd-40f0-9382-eb1efda571ba"
OptimizationOptimJL = "36348300-93cb-4f02-beb5-3c3902f8871e"
Expand Down Expand Up @@ -52,7 +52,6 @@ IterTools = "1"
LinearAlgebra = "1"
Lux = "0.5.5"
LuxCUDA = "0.3"
MLDataUtils = "0.5"
MLDatasets = "0.7"
MLUtils = "0.4"
NNlib = "0.9"
Expand Down
39 changes: 16 additions & 23 deletions docs/src/examples/mnist_conv_neural_ode.md
Original file line number Diff line number Diff line change
Expand Up @@ -8,10 +8,9 @@ using Fully Connected Layers.

```@example mnist_cnn
using DiffEqFlux, Statistics, ComponentArrays, CUDA, Zygote, MLDatasets, OrdinaryDiffEq,
Printf, Test, LuxCUDA, Random
Printf, Test, LuxCUDA, Random, MLUtils, OneHotArrays
using Optimization, OptimizationOptimisers
using MLDatasets: MNIST
using MLDataUtils: LabelEnc, convertlabel, stratifiedobs, batchview
const cdev = cpu_device()
const gdev = gpu_device()
Expand All @@ -22,26 +21,21 @@ ENV["DATADEPS_ALWAYS_ACCEPT"] = true
logitcrossentropy(ŷ, y) = mean(-sum(y .* logsoftmax(ŷ; dims = 1); dims = 1))
function loadmnist(batchsize = bs)
# Use MLDataUtils LabelEnc for natural onehot conversion
function onehot(labels_raw)
convertlabel(LabelEnc.OneOfK, labels_raw, LabelEnc.NativeLabels(collect(0:9)))
end
# Load MNIST
mnist = MNIST(; split = :train)
imgs, labels_raw = mnist.features, mnist.targets
dataset = MNIST(; split=:train)
imgs = dataset.features
labels_raw = dataset.targets
# Process images into (H,W,C,BS) batches
x_train = Float32.(reshape(imgs, size(imgs, 1), size(imgs, 2), 1, size(imgs, 3))) |>
gdev
x_train = batchview(x_train, batchsize)
# Onehot and batch the labels
y_train = onehot(labels_raw) |> gdev
y_train = batchview(y_train, batchsize)
return x_train, y_train
x_data = Float32.(reshape(imgs, size(imgs, 1), size(imgs, 2), 1, size(imgs, 3)))
y_data = onehotbatch(labels_raw, 0:9)
return DataLoader((x_data, y_data); batchsize, shuffle=true)
end
# Main
const bs = 32
x_train, y_train = loadmnist(bs)
dataloader = loadmnist(bs)
down = Chain(Conv((3, 3), 1 => 64, relu; stride = 1), GroupNorm(64, 64),
Conv((4, 4), 64 => 64, relu; stride = 2, pad = 1),
Expand All @@ -56,9 +50,7 @@ fc = Chain(GroupNorm(64, 64), x -> relu.(x), MeanPool((6, 6)),
nn_ode = NeuralODE(dudt, (0.0f0, 1.0f0), Tsit5(); save_everystep = false,
reltol = 1e-3, abstol = 1e-3, save_start = false)
function DiffEqArray_to_Array(x)
xarr = gdev(x.u[1])
end
DiffEqArray_to_Array(x) = x.u[end]
# Build our over-all model topology
m = Chain(down, # (28, 28, 1, BS) -> (6, 6, 64, BS)
Expand All @@ -70,8 +62,9 @@ ps = ComponentArray(ps) |> gdev
st = st |> gdev
# To understand the intermediate NN-ODE layer, we can examine it's dimensionality
img = x_train[1][:, :, :, 1:1] |> gdev
lab = y_train[1][:, 1:1] |> gdev
x_train1, y_train1 = first(dataloader)
img = x_train1[:, :, :, 1:1] |> gdev
lab = y_train1[:, 1:1] |> gdev
x_m, _ = m(img, ps, st)
Expand All @@ -91,15 +84,15 @@ function accuracy(model, data, ps, st; n_batches = 10)
end
# burn in accuracy
accuracy(m, zip(x_train, y_train), ps, st)
accuracy(m, ((x_train1, y_train1),), ps, st)
function loss_function(ps, x, y)
pred, st_ = m(x, ps, st)
return logitcrossentropy(pred, y), pred
end
#burn in loss
loss_function(ps, x_train[1], y_train[1])
loss_function(ps, x_train1, y_train1)
opt = OptimizationOptimisers.Adam(0.05)
iter = 0
Expand Down
86 changes: 36 additions & 50 deletions docs/src/examples/mnist_neural_ode.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,10 @@ on **GPUs** with **minibatching**.
(Step-by-step description below)

```@example mnist
using DiffEqFlux, CUDA, Zygote, MLDataUtils, NNlib, OrdinaryDiffEq, Test, Lux, Statistics,
ComponentArrays, Random, Optimization, OptimizationOptimisers, LuxCUDA
using DiffEqFlux, CUDA, Zygote, NNlib, OrdinaryDiffEq, Test, Lux, Statistics,
ComponentArrays, Random, Optimization, OptimizationOptimisers, LuxCUDA,
MLUtils, OneHotArrays
using MLDatasets: MNIST
using MLDataUtils: LabelEnc, convertlabel, stratifiedobs
CUDA.allowscalar(false)
ENV["DATADEPS_ALWAYS_ACCEPT"] = true
Expand All @@ -20,26 +20,21 @@ const gdev = gpu_device()
logitcrossentropy(ŷ, y) = mean(-sum(y .* logsoftmax(ŷ; dims = 1); dims = 1))
function loadmnist(batchsize = bs)
# Use MLDataUtils LabelEnc for natural onehot conversion
function onehot(labels_raw)
convertlabel(LabelEnc.OneOfK, labels_raw, LabelEnc.NativeLabels(collect(0:9)))
end
# Load MNIST
mnist = MNIST(; split = :train)
imgs, labels_raw = mnist.features, mnist.targets
dataset = MNIST(; split=:train)
imgs = dataset.features
labels_raw = dataset.targets
# Process images into (H,W,C,BS) batches
x_train = Float32.(reshape(imgs, size(imgs, 1), size(imgs, 2), 1, size(imgs, 3))) |>
gdev
x_train = batchview(x_train, batchsize)
# Onehot and batch the labels
y_train = onehot(labels_raw) |> gdev
y_train = batchview(y_train, batchsize)
return x_train, y_train
x_data = Float32.(reshape(imgs, size(imgs, 1), size(imgs, 2), 1, size(imgs, 3)))
y_data = onehotbatch(labels_raw, 0:9)
return DataLoader((x_data, y_data); batchsize, shuffle=true)
end
# Main
const bs = 128
x_train, y_train = loadmnist(bs)
const bs = 32
dataloader = loadmnist(bs)
down = Lux.Chain(Lux.FlattenLayer(), Lux.Dense(784, 20, tanh))
nn = Lux.Chain(Lux.Dense(20, 10, tanh), Lux.Dense(10, 10, tanh), Lux.Dense(10, 20, tanh))
Expand All @@ -48,10 +43,7 @@ fc = Lux.Dense(20, 10)
nn_ode = NeuralODE(nn, (0.0f0, 1.0f0), Tsit5(); save_everystep = false,
reltol = 1e-3, abstol = 1e-3, save_start = false)
function DiffEqArray_to_Array(x)
xarr = gdev(x.u[1])
return xarr
end
DiffEqArray_to_Array(x) = x.u[end]
#Build our over-all model topology
m = Lux.Chain(; down, nn_ode, convert = Lux.WrappedFunction(DiffEqArray_to_Array), fc)
Expand All @@ -65,13 +57,15 @@ ps_no_ode, st_no_ode = Lux.setup(Xoshiro(0), m_no_ode)
ps_no_ode = ComponentArray(ps_no_ode) |> gdev
st_no_ode = st_no_ode |> gdev
x_train1, y_train1 = first(dataloader)
#To understand the intermediate NN-ODE layer, we can examine it's dimensionality
x_d = first(down(x_train[1], ps.down, st.down))
x_d = first(down(x_train1, ps.down, st.down))
# We can see that we can compute the forward pass through the NN topology featuring an NNODE layer.
x_m = first(m(x_train[1], ps, st))
x_m = first(m(x_train1, ps, st))
#Or without the NN-ODE layer.
x_m = first(m_no_ode(x_train[1], ps_no_ode, st_no_ode))
x_m = first(m_no_ode(x_train1, ps_no_ode, st_no_ode))
classify(x) = argmax.(eachcol(x))
Expand All @@ -88,15 +82,15 @@ function accuracy(model, data, ps, st; n_batches = 100)
return total_correct / total
end
#burn in accuracy
accuracy(m, zip(x_train, y_train), ps, st)
accuracy(m, ((x_train1, y_train1),), ps, st)
function loss_function(ps, x, y)
pred, st_ = m(x, ps, st)
return logitcrossentropy(pred, y), pred
end
#burn in loss
loss_function(ps, x_train[1], y_train[1])
loss_function(ps, x_train1, y_train1)
opt = OptimizationOptimisers.Adam(0.05)
iter = 0
Expand Down Expand Up @@ -125,10 +119,10 @@ res = Optimization.solve(opt_prob, opt, zip(x_train, y_train); callback)
### Load Packages

```@example mnist
using DiffEqFlux, CUDA, Zygote, MLDataUtils, NNlib, OrdinaryDiffEq, Test, Lux, Statistics,
ComponentArrays, Random, Optimization, OptimizationOptimisers, LuxCUDA
using DiffEqFlux, CUDA, Zygote, NNlib, OrdinaryDiffEq, Test, Lux, Statistics,
ComponentArrays, Random, Optimization, OptimizationOptimisers, LuxCUDA,
MLUtils, OneHotArrays
using MLDatasets: MNIST
using MLDataUtils: LabelEnc, convertlabel, stratifiedobs
```

### GPU
Expand Down Expand Up @@ -163,30 +157,25 @@ meaning that every minibatch will contain 128 images with a single color channel
logitcrossentropy(ŷ, y) = mean(-sum(y .* logsoftmax(ŷ; dims = 1); dims = 1))
function loadmnist(batchsize = bs)
# Use MLDataUtils LabelEnc for natural onehot conversion
function onehot(labels_raw)
convertlabel(LabelEnc.OneOfK, labels_raw, LabelEnc.NativeLabels(collect(0:9)))
end
# Load MNIST
mnist = MNIST(; split = :train)
imgs, labels_raw = mnist.features, mnist.targets
dataset = MNIST(; split=:train)
imgs = dataset.features
labels_raw = dataset.targets
# Process images into (H,W,C,BS) batches
x_train = Float32.(reshape(imgs, size(imgs, 1), size(imgs, 2), 1, size(imgs, 3))) |>
gdev
x_train = batchview(x_train, batchsize)
# Onehot and batch the labels
y_train = onehot(labels_raw) |> gdev
y_train = batchview(y_train, batchsize)
return x_train, y_train
x_data = Float32.(reshape(imgs, size(imgs, 1), size(imgs, 2), 1, size(imgs, 3)))
y_data = onehotbatch(labels_raw, 0:9)
return DataLoader((x_data, y_data); batchsize, shuffle=true)
end
```

and then loaded from main:

```@example mnist
# Main
const bs = 128
x_train, y_train = loadmnist(bs)
const bs = 32
dataloader = loadmnist(bs)
```

### Layers
Expand Down Expand Up @@ -222,10 +211,7 @@ a Matrix (CuArray), and reduces the matrix from 3 to 2 dimensions for use in the
nn_ode = NeuralODE(nn, (0.0f0, 1.0f0), Tsit5(); save_everystep = false,
reltol = 1e-3, abstol = 1e-3, save_start = false)
function DiffEqArray_to_Array(x)
xarr = gdev(x.u[1])
return xarr
end
DiffEqArray_to_Array(x) = x.u[end]
```

For CPU: If this function does not automatically fall back to CPU when no GPU is present, we can
Expand Down Expand Up @@ -291,7 +277,7 @@ function loss_function(ps, x, y)
end
#burn in loss
loss_function(ps, x_train[1], y_train[1])
loss_function(ps, x_train1, y_train1)
```

#### Optimizer
Expand Down
1 change: 0 additions & 1 deletion test/neural_de_tests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -277,7 +277,6 @@ end
pd = ComponentArray(pd) |> gdev
st = st |> gdev
broken = hasfield(typeof(kwargs), :sensealg) &&
ndims(u0) == 2 &&
kwargs.sensealg isa TrackerAdjoint
@test begin
grads = Zygote.gradient(sum last first node, u0, pd, st)
Expand Down

0 comments on commit 94c0ae2

Please sign in to comment.