diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index 1c874478..5d6214e8 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -21,8 +21,37 @@ steps: setup: julia: - "1" - - "1.6" - - "1.9-nightly" + - "nightly" + adjustments: + - with: + julia: "nightly" + soft_fail: true + - label: ":julia: Julia: {{matrix.julia}} + AMD GPU" + plugins: + - JuliaCI/julia#v1: + version: "{{matrix.julia}}" + - JuliaCI/julia-test#v1: + test_args: "--quickfail" + - JuliaCI/julia-coverage#v1: + codecov: true + dirs: + - src + - ext + env: + JULIA_AMDGPU_CORE_MUST_LOAD: "1" + JULIA_AMDGPU_HIP_MUST_LOAD: "1" + JULIA_AMDGPU_DISABLE_ARTIFACTS: "1" + GROUP: "AMDGPU" + agents: + queue: "juliagpu" + rocm: "*" + rocmgpu: "*" + if: build.message !~ /\[skip tests\]/ + timeout_in_minutes: 60 + matrix: + setup: + julia: + - "1" - "nightly" adjustments: - with: diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml index 79a134d9..e91619f2 100644 --- a/.github/workflows/CI.yml +++ b/.github/workflows/CI.yml @@ -19,8 +19,6 @@ jobs: matrix: version: - "1" - - "1.6" - - "~1.9.0-0" steps: - uses: actions/checkout@v3 - uses: julia-actions/setup-julia@v1 @@ -46,4 +44,3 @@ jobs: - uses: codecov/codecov-action@v3 with: files: lcov.info - flags: ${{ matrix.group }} diff --git a/Project.toml b/Project.toml index 7587eccf..eb6379a8 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "LuxLib" uuid = "82251201-b29d-42c6-8e01-566dec8acb11" authors = ["Avik Pal and contributors"] -version = "0.2.1" +version = "0.2.2" [deps] ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4" diff --git a/ext/LuxLibTrackerExt.jl b/ext/LuxLibTrackerExt.jl index 6fa96dca..8e50f9f0 100644 --- a/ext/LuxLibTrackerExt.jl +++ b/ext/LuxLibTrackerExt.jl @@ -102,10 +102,12 @@ end epsilon::Real) where {T <: FP_32_64} LuxLib._assert_same_backend(data(x), data(scale), data(bias)) if length(scale) != length(bias) != size(x, 3) - throw(ArgumentError("Length of `scale` and `bias` must be equal to the number of channels (N - 1 dim of the input array).")) + throw(ArgumentError("Length of `scale` and `bias` must be equal to the number of \ + channels (N - 1 dim of the input array).")) end if size(x, 3) % groups != 0 - throw(ArgumentError("Number of channels $(size(x, 3)) must be divisible by the number of groups $groups.")) + throw(ArgumentError("Number of channels $(size(x, 3)) must be divisible by the \ + number of groups $groups.")) end y, mu, rsig = LuxLib._groupnorm(data(x), groups, data(scale), data(bias), epsilon) diff --git a/src/api/dropout.jl b/src/api/dropout.jl index 83bd760f..cd741865 100644 --- a/src/api/dropout.jl +++ b/src/api/dropout.jl @@ -66,9 +66,7 @@ function dropout(rng::AbstractRNG, ::Val{false}; dims, invp::T=inv(p)) where {T, T1, T2, N} - if size(x) != size(mask) - return dropout(rng, x, p, Val(true); dims, invp) - end + size(x) != size(mask) && return dropout(rng, x, p, Val(true); dims, invp) return x .* ignore_derivatives(mask), mask, rng end diff --git a/test/Project.toml b/test/Project.toml index ab18c6c8..4b10768a 100644 --- a/test/Project.toml +++ b/test/Project.toml @@ -1,6 +1,7 @@ [deps] ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" JET = "c3a54625-cd67-489e-a8e7-0a5a0ff4e31b" +LuxAMDGPU = "83120cb1-ca15-4f04-bf3b-6967d2e6b60b" LuxCUDA = "d0bbae9a-e099-4d5b-a835-1c6931763bda" LuxTestUtils = "ac9de150-d08f-4546-94fb-7472b5760531" Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" diff --git a/test/api/dropout.jl b/test/api/dropout.jl index 580c30cd..c941a4c6 100644 --- a/test/api/dropout.jl +++ b/test/api/dropout.jl @@ -1,5 +1,4 @@ -using LuxCUDA, Statistics, Test -using LuxLib +using Statistics, Test, LuxLib include("../test_utils.jl") @@ -145,6 +144,7 @@ end @test y isa aType{T, length(x_shape)} @test size(y) == x_shape @test rng != rng_ + @test_broken isapprox(std(y), std(x); atol=1.0f-2, rtol=1.0f-2) __f = x -> sum(first(alpha_dropout(rng, x, T(0.5), Val(true)))) diff --git a/test/test_utils.jl b/test/test_utils.jl index c600840d..65112493 100644 --- a/test/test_utils.jl +++ b/test/test_utils.jl @@ -1,21 +1,23 @@ using LuxLib, LuxTestUtils, StableRNGs, Test, Zygote -using LuxCUDA # CUDA Support +using LuxCUDA, LuxAMDGPU using LuxTestUtils: @jet, @test_gradients, check_approx const GROUP = get(ENV, "GROUP", "All") cpu_testing() = GROUP == "All" || GROUP == "CPU" cuda_testing() = (GROUP == "All" || GROUP == "CUDA") && LuxCUDA.functional() -amdgpu_testing() = (GROUP == "All" || GROUP == "AMDGPU") # && LuxAMDGPU.functional() +amdgpu_testing() = (GROUP == "All" || GROUP == "AMDGPU") && LuxAMDGPU.functional() const MODES = begin # Mode, Array Type, GPU? cpu_mode = ("CPU", Array, false) cuda_mode = ("CUDA", CuArray, true) + amdgpu_mode = ("AMDGPU", ROCArray, true) modes = [] cpu_testing() && push!(modes, cpu_mode) cuda_testing() && push!(modes, cuda_mode) + amdgpu_testing() && push!(modes, amdgpu_mode) modes end