From 58369a514c429f2d8052d8ca66925019c4165e3b Mon Sep 17 00:00:00 2001 From: Avik Pal Date: Mon, 4 Nov 2024 18:29:25 -0500 Subject: [PATCH] fix: update to latest Reactant changes --- .buildkite/testing.yml | 6 +----- docs/src/manual/compiling_lux_models.md | 18 ++++++++++-------- lib/LuxCore/Project.toml | 2 +- test/runtests.jl | 2 -- 4 files changed, 12 insertions(+), 16 deletions(-) diff --git a/.buildkite/testing.yml b/.buildkite/testing.yml index 642dfdc75..935e95fe3 100644 --- a/.buildkite/testing.yml +++ b/.buildkite/testing.yml @@ -1,7 +1,7 @@ steps: - group: ":julia: (Lux) CUDA GPU" steps: - - label: ":julia: Julia {{matrix.julia}} + CUDA GPU + {{matrix.group}}" + - label: ":julia: Julia {{matrix.julia}} + CUDA GPU" plugins: - JuliaCI/julia#v1: version: "{{matrix.julia}}" @@ -45,7 +45,6 @@ steps: include(joinpath(dir, "../test/runtests.jl"))' env: BACKEND_GROUP: "CUDA" - LUX_TEST_GROUP: "{{matrix.group}}" if: build.message !~ /\[skip tests\]/ && build.message !~ /\[skip ci\]/ timeout_in_minutes: 60 matrix: @@ -53,9 +52,6 @@ steps: julia: - "1.10" - "1" - group: - - "!reactant" - - "reactant" - group: ":julia: (Lux) AMD GPU" steps: diff --git a/docs/src/manual/compiling_lux_models.md b/docs/src/manual/compiling_lux_models.md index 44e7fc27c..a153954f0 100644 --- a/docs/src/manual/compiling_lux_models.md +++ b/docs/src/manual/compiling_lux_models.md @@ -21,6 +21,11 @@ using Lux, Reactant, Enzyme, Random, Zygote using Functors, Optimisers, Printf ``` +!!! tip "Running on alternate accelerators" + + `Reactant.set_default_backend("gpu")` sets the default backend to CUDA and + `Reactant.set_default_backend("tpu")` sets the default backend to TPU. + !!! tip "Using the `TrainState` API" If you are using the [`Training.TrainState`](@ref) API, skip to the @@ -149,15 +154,12 @@ function train_model(model, ps, st, dataloader) train_state = Training.TrainState(model, ps, st, Adam(0.001f0)) for iteration in 1:1000 - for (xᵢ, yᵢ) in dataloader - grads, loss, stats, train_state = Training.single_train_step!( + for (i, (xᵢ, yᵢ)) in enumerate(dataloader) + _, loss, _, train_state = Training.single_train_step!( AutoEnzyme(), MSELoss(), (xᵢ, yᵢ), train_state) - end - if iteration % 100 == 0 || iteration == 1 - # We need to do this since scalar outputs are currently expressed as a zero-dim - # array - loss = Array(loss)[] - @printf("Iter: [%4d/%4d]\tLoss: %.8f\n", iteration, 1000, loss) + if (iteration % 100 == 0 || iteration == 1) && i == 1 + @printf("Iter: [%4d/%4d]\tLoss: %.8f\n", iteration, 1000, loss) + end end end diff --git a/lib/LuxCore/Project.toml b/lib/LuxCore/Project.toml index e38e5b6eb..eec8280b5 100644 --- a/lib/LuxCore/Project.toml +++ b/lib/LuxCore/Project.toml @@ -38,7 +38,7 @@ EnzymeCore = "0.8.5" Functors = "0.4.12" MLDataDevices = "1" Random = "1.10" -Reactant = "0.2.3" +Reactant = "0.2.4" ReverseDiff = "1.15" Setfield = "1" Tracker = "0.2.34" diff --git a/test/runtests.jl b/test/runtests.jl index 1353e5c91..1709d0352 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -124,8 +124,6 @@ if ("all" in LUX_TEST_GROUP || "misc" in LUX_TEST_GROUP) set_preferences!(Lux, "eltype_mismatch_handling" => "none"; force=true) end -Lux.set_dispatch_doctor_preferences!(; luxcore="error", luxlib="error") - const RETESTITEMS_NWORKERS = parse( Int, get(ENV, "RETESTITEMS_NWORKERS", string(min(Hwloc.num_physical_cores(), 4))))