Skip to content
This repository has been archived by the owner on Nov 4, 2024. It is now read-only.

Commit

Permalink
Finite Differences on is a bit janky
Browse files Browse the repository at this point in the history
  • Loading branch information
avik-pal committed Apr 25, 2023
1 parent dd0c303 commit 5768fe5
Show file tree
Hide file tree
Showing 4 changed files with 16 additions and 28 deletions.
11 changes: 3 additions & 8 deletions test/api/batchnorm.jl
Original file line number Diff line number Diff line change
Expand Up @@ -48,14 +48,9 @@ end
if __istraining(training)
fp16 = T == Float16
if affine
__f = (args...) -> sum(first(batchnorm(args..., rm, rv; epsilon, training,
momentum=T(0.9))))
@eval @test_gradients $__f $x $scale $bias gpu_testing=$on_gpu soft_fail=$fp16 atol=1.0f-2 rtol=1.0f-2
else
__f = (args...) -> sum(first(batchnorm(args..., scale, bias, rm, rv;
epsilon, training, momentum=T(0.9))))

@eval @test_gradients $__f $x gpu_testing=$on_gpu soft_fail=$fp16 atol=1.0f-2 rtol=1.0f-2
__f = (args...) -> sum(first(batchnorm(x, args..., rm, rv; epsilon,
training, momentum=T(0.9))))
@eval @test_gradients $__f $scale $bias gpu_testing=$on_gpu soft_fail=$fp16 atol=1.0f-2 rtol=1.0f-2
end
end
end
Expand Down
18 changes: 9 additions & 9 deletions test/api/groupnorm.jl
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ function _groupnorm_generic_fallback(x, scale, bias, running_mean, running_var,
return reshape(x_, sz)
end

@testset "GroupNorm KernelAbstractions" begin for (mode, aType, on_gpu) in MODES
@testset "$mode: GroupNorm KernelAbstractions" for (mode, aType, on_gpu) in MODES
for T in (Float32, Float64),
sz in ((16, 16, 6, 4), (32, 32, 6, 4), (64, 64, 12, 4)),
groups in (2, 3)
Expand Down Expand Up @@ -66,12 +66,12 @@ end
@test check_approx(gs_bias, gs_bias_; atol=1.0f-3, rtol=1.0f-3)

fp16 = T == Float16
__f = sum _f
@eval @test_gradients $__f $x $scale $bias gpu_testing=$on_gpu atol=1.0f-3 rtol=1.0f-3 soft_fail=$fp16
__f = (args...) -> sum(groupnorm(x, args...; groups, epsilon))
@eval @test_gradients $__f $scale $bias gpu_testing=$on_gpu atol=1.0f-3 rtol=1.0f-3 soft_fail=$fp16
end
end end
end

@testset "GroupNorm Generic Fallback" begin for (mode, aType, on_gpu) in MODES
@testset "$mode: GroupNorm Generic Fallback" for (mode, aType, on_gpu) in MODES
for T in (Float16, Float32, Float64),
sz in ((4, 4, 6, 2), (8, 8, 6, 2), (16, 16, 12, 2)),
groups in (2, 3),
Expand All @@ -93,8 +93,8 @@ end end
@test size(nt.running_var) == (groups,)

fp16 = T == Float16
__f = (args...) -> sum(first(groupnorm(args..., rm, rv; groups, epsilon, training,
momentum=T(0.9))))
@eval @test_gradients $__f $x $scale $bias gpu_testing=$on_gpu atol=1.0f-2 rtol=1.0f-2 soft_fail=$fp16
__f = (args...) -> sum(first(groupnorm(x, args..., rm, rv; groups, epsilon,
training, momentum=T(0.9))))
@eval @test_gradients $__f $scale $bias gpu_testing=$on_gpu atol=1.0f-2 rtol=1.0f-2 soft_fail=$fp16
end
end end
end
8 changes: 2 additions & 6 deletions test/api/instancenorm.jl
Original file line number Diff line number Diff line change
Expand Up @@ -38,12 +38,8 @@ end
if __istraining(training)
fp16 = T == Float16
if affine
__f = (args...) -> sum(first(instancenorm(args...; epsilon, training)))
@eval @test_gradients $__f $x $scale $bias soft_fail=$fp16 atol=1.0f-2 rtol=1.0f-2 gpu_testing=$on_gpu
else
__f = (args...) -> sum(first(instancenorm(args..., scale, bias; epsilon,
training)))
@eval @test_gradients $__f $x soft_fail=$fp16 atol=1.0f-2 rtol=1.0f-2 gpu_testing=$on_gpu
__f = (args...) -> sum(first(instancenorm(x, args...; epsilon, training)))
@eval @test_gradients $__f $scale $bias soft_fail=$fp16 atol=1.0f-2 rtol=1.0f-2 gpu_testing=$on_gpu
end
end
end
Expand Down
7 changes: 2 additions & 5 deletions test/api/layernorm.jl
Original file line number Diff line number Diff line change
Expand Up @@ -40,11 +40,8 @@ end

fp16 = T == Float16
if affine_shape === nothing
__f = x -> sum(_f(x, nothing, nothing))
@eval @test_gradients $__f $x soft_fail=$fp16 atol=1.0f-2 rtol=1.0f-2 gpu_testing=$on_gpu
else
__f = sum _f
@eval @test_gradients $__f $x $scale $bias soft_fail=$fp16 atol=1.0f-2 rtol=1.0f-2 gpu_testing=$on_gpu
__f = (sc, b) -> sum(_f(x, sc, b))
@eval @test_gradients $__f $scale $bias soft_fail=$fp16 atol=1.0f-2 rtol=1.0f-2 gpu_testing=$on_gpu
end
end
end

0 comments on commit 5768fe5

Please sign in to comment.