Skip to content

Commit

Permalink
move GraphNeuralNetworks.jl to TestItems.jl (#517)
Browse files Browse the repository at this point in the history
  • Loading branch information
CarloLucibello authored Nov 12, 2024
1 parent e1910ca commit 530457c
Show file tree
Hide file tree
Showing 10 changed files with 568 additions and 599 deletions.
10 changes: 5 additions & 5 deletions GraphNeuralNetworks/Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -21,24 +21,22 @@ Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"
[weakdeps]
CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba"

# [extensions]
# GraphNeuralNetworksCUDAExt = "CUDA"

[compat]
CUDA = "4, 5"
ChainRulesCore = "1"
Flux = "0.14"
Functors = "0.4.1"
Graphs = "1.12"
GNNGraphs = "1.0"
GNNlib = "0.2"
Graphs = "1.12"
LinearAlgebra = "1"
MLUtils = "0.4"
MacroTools = "0.5"
NNlib = "0.9"
Random = "1"
Reexport = "1"
Statistics = "1"
TestItemRunner = "1.0.5"
cuDNN = "1"
julia = "1.10"

Expand All @@ -53,8 +51,10 @@ InlineStrings = "842dd82b-1e85-43dc-bf29-5d0ee9dffc48"
MLDatasets = "eb30cadb-4394-5ae3-aed4-317e484a6458"
SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
TestItemRunner = "f8b46487-2199-4994-9208-9a1283c18c0a"
Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f"
cuDNN = "02a925ec-e4fe-4b08-9a7e-0d78e3d38ccd"

[targets]
test = ["Test", "MLDatasets", "Adapt", "DataFrames", "InlineStrings", "SparseArrays", "Graphs", "Zygote", "FiniteDifferences", "ChainRulesTestUtils", "CUDA", "cuDNN"]
test = ["Test", "TestItemRunner", "MLDatasets", "Adapt", "DataFrames", "InlineStrings",
"SparseArrays", "Graphs", "Zygote", "FiniteDifferences", "ChainRulesTestUtils", "CUDA", "cuDNN"]
186 changes: 95 additions & 91 deletions GraphNeuralNetworks/test/examples/node_classification_cora.jl
Original file line number Diff line number Diff line change
@@ -1,107 +1,111 @@
using Flux
using Flux: onecold, onehotbatch
using Flux.Losses: logitcrossentropy
using GraphNeuralNetworks
using MLDatasets: Cora
using Statistics, Random
using CUDA
CUDA.allowscalar(false)
@testitem "Training Example" setup=[TestModule] begin
using .TestModule
using Flux
using Flux: onecold, onehotbatch
using Flux.Losses: logitcrossentropy
using GraphNeuralNetworks
using MLDatasets: Cora
using Statistics, Random
using CUDA
CUDA.allowscalar(false)

function eval_loss_accuracy(X, y, ids, model, g)
= model(g, X)
l = logitcrossentropy(ŷ[:, ids], y[:, ids])
acc = mean(onecold(ŷ[:, ids]) .== onecold(y[:, ids]))
return (loss = round(l, digits = 4), acc = round(acc * 100, digits = 2))
end
function eval_loss_accuracy(X, y, ids, model, g)
= model(g, X)
l = logitcrossentropy(ŷ[:, ids], y[:, ids])
acc = mean(onecold(ŷ[:, ids]) .== onecold(y[:, ids]))
return (loss = round(l, digits = 4), acc = round(acc * 100, digits = 2))
end

# arguments for the `train` function
Base.@kwdef mutable struct Args
η = 5.0f-3 # learning rate
epochs = 10 # number of epochs
seed = 17 # set seed > 0 for reproducibility
usecuda = false # if true use cuda (if available)
nhidden = 64 # dimension of hidden features
end
# arguments for the `train` function
Base.@kwdef mutable struct Args
η = 5.0f-3 # learning rate
epochs = 10 # number of epochs
seed = 17 # set seed > 0 for reproducibility
usecuda = false # if true use cuda (if available)
nhidden = 64 # dimension of hidden features
end

function train(Layer; verbose = false, kws...)
args = Args(; kws...)
args.seed > 0 && Random.seed!(args.seed)
function train(Layer; verbose = false, kws...)
args = Args(; kws...)
args.seed > 0 && Random.seed!(args.seed)

if args.usecuda && CUDA.functional()
device = Flux.gpu
args.seed > 0 && CUDA.seed!(args.seed)
else
device = Flux.cpu
end
if args.usecuda && CUDA.functional()
device = Flux.gpu
args.seed > 0 && CUDA.seed!(args.seed)
else
device = Flux.cpu
end

# LOAD DATA
dataset = Cora()
classes = dataset.metadata["classes"]
g = mldataset2gnngraph(dataset) |> device
X = g.ndata.features
y = onehotbatch(g.ndata.targets |> cpu, classes) |> device # remove when https://github.com/FluxML/Flux.jl/pull/1959 tagged
train_mask = g.ndata.train_mask
test_mask = g.ndata.test_mask
ytrain = y[:, train_mask]
# LOAD DATA
dataset = Cora()
classes = dataset.metadata["classes"]
g = mldataset2gnngraph(dataset) |> device
X = g.ndata.features
y = onehotbatch(g.ndata.targets |> cpu, classes) |> device # remove when https://github.com/FluxML/Flux.jl/pull/1959 tagged
train_mask = g.ndata.train_mask
test_mask = g.ndata.test_mask
ytrain = y[:, train_mask]

nin, nhidden, nout = size(X, 1), args.nhidden, length(classes)
nin, nhidden, nout = size(X, 1), args.nhidden, length(classes)

## DEFINE MODEL
model = GNNChain(Layer(nin, nhidden),
# Dropout(0.5),
Layer(nhidden, nhidden),
Dense(nhidden, nout)) |> device
## DEFINE MODEL
model = GNNChain(Layer(nin, nhidden),
# Dropout(0.5),
Layer(nhidden, nhidden),
Dense(nhidden, nout)) |> device

opt = Flux.setup(Adam(args.η), model)
opt = Flux.setup(Adam(args.η), model)

## TRAINING
function report(epoch)
train = eval_loss_accuracy(X, y, train_mask, model, g)
test = eval_loss_accuracy(X, y, test_mask, model, g)
println("Epoch: $epoch Train: $(train) Test: $(test)")
end
## TRAINING
function report(epoch)
train = eval_loss_accuracy(X, y, train_mask, model, g)
test = eval_loss_accuracy(X, y, test_mask, model, g)
println("Epoch: $epoch Train: $(train) Test: $(test)")
end

verbose && report(0)
@time for epoch in 1:(args.epochs)
grad = Flux.gradient(model) do model
= model(g, X)
logitcrossentropy(ŷ[:, train_mask], ytrain)
verbose && report(0)
@time for epoch in 1:(args.epochs)
grad = Flux.gradient(model) do model
= model(g, X)
logitcrossentropy(ŷ[:, train_mask], ytrain)
end
Flux.update!(opt, model, grad[1])
verbose && report(epoch)
end
Flux.update!(opt, model, grad[1])
verbose && report(epoch)
end

train_res = eval_loss_accuracy(X, y, train_mask, model, g)
test_res = eval_loss_accuracy(X, y, test_mask, model, g)
return train_res, test_res
end
train_res = eval_loss_accuracy(X, y, train_mask, model, g)
test_res = eval_loss_accuracy(X, y, test_mask, model, g)
return train_res, test_res
end

function train_many(; usecuda = false)
for (layer, Layer) in [
("GCNConv", (nin, nout) -> GCNConv(nin => nout, relu)),
("ResGatedGraphConv", (nin, nout) -> ResGatedGraphConv(nin => nout, relu)),
("GraphConv", (nin, nout) -> GraphConv(nin => nout, relu, aggr = mean)),
("SAGEConv", (nin, nout) -> SAGEConv(nin => nout, relu)),
("GATConv", (nin, nout) -> GATConv(nin => nout, relu)),
("GINConv", (nin, nout) -> GINConv(Dense(nin, nout, relu), 0.01, aggr = mean)),
("TransformerConv",
(nin, nout) -> TransformerConv(nin => nout, concat = false,
add_self_loops = true, root_weight = false,
heads = 2)),
## ("ChebConv", (nin, nout) -> ChebConv(nin => nout, 2)), # not working on gpu
## ("NNConv", (nin, nout) -> NNConv(nin => nout)), # needs edge features
## ("GatedGraphConv", (nin, nout) -> GatedGraphConv(nout, 2)), # needs nin = nout
## ("EdgeConv",(nin, nout) -> EdgeConv(Dense(2nin, nout, relu))), # Fits the training set but does not generalize well
]
@show layer
@time train_res, test_res = train(Layer; usecuda, verbose = false)
# @show train_res, test_res
@test train_res.acc > 94
@test test_res.acc > 69
function train_many(; usecuda = false)
for (layer, Layer) in [
("GCNConv", (nin, nout) -> GCNConv(nin => nout, relu)),
("ResGatedGraphConv", (nin, nout) -> ResGatedGraphConv(nin => nout, relu)),
("GraphConv", (nin, nout) -> GraphConv(nin => nout, relu, aggr = mean)),
("SAGEConv", (nin, nout) -> SAGEConv(nin => nout, relu)),
("GATConv", (nin, nout) -> GATConv(nin => nout, relu)),
("GINConv", (nin, nout) -> GINConv(Dense(nin, nout, relu), 0.01, aggr = mean)),
("TransformerConv",
(nin, nout) -> TransformerConv(nin => nout, concat = false,
add_self_loops = true, root_weight = false,
heads = 2)),
## ("ChebConv", (nin, nout) -> ChebConv(nin => nout, 2)), # not working on gpu
## ("NNConv", (nin, nout) -> NNConv(nin => nout)), # needs edge features
## ("GatedGraphConv", (nin, nout) -> GatedGraphConv(nout, 2)), # needs nin = nout
## ("EdgeConv",(nin, nout) -> EdgeConv(Dense(2nin, nout, relu))), # Fits the training set but does not generalize well
]
@show layer
@time train_res, test_res = train(Layer; usecuda, verbose = false)
# @show train_res, test_res
@test train_res.acc > 94
@test test_res.acc > 69
end
end
end

train_many(usecuda = false)
if TEST_GPU
train_many(usecuda = true)
train_many(usecuda = false)
# #TODO
# if TEST_GPU
# train_many(usecuda = true)
# end
end
103 changes: 54 additions & 49 deletions GraphNeuralNetworks/test/layers/basic.jl
Original file line number Diff line number Diff line change
@@ -1,57 +1,60 @@
@testset "GNNChain" begin
n, din, d, dout = 10, 3, 4, 2
deg = 4

g = GNNGraph(random_regular_graph(n, deg),
graph_type = GRAPH_T,
ndata = randn(Float32, din, n))
x = g.ndata.x

gnn = GNNChain(GCNConv(din => d),
LayerNorm(d),
x -> tanh.(x),
GraphConv(d => d, tanh),
Dropout(0.5),
Dense(d, dout))

testmode!(gnn)

test_layer(gnn, g, rtol = 1e-5, exclude_grad_fields = [, :σ²])

@testset "constructor with names" begin
m = GNNChain(GCNConv(din => d),
@testitem "GNNChain" setup=[TestModule] begin
using .TestModule
@testset "GNNChain $GRAPH_T" for GRAPH_T in GRAPH_TYPES
n, din, d, dout = 10, 3, 4, 2
deg = 4

g = GNNGraph(random_regular_graph(n, deg),
graph_type = GRAPH_T,
ndata = randn(Float32, din, n))
x = g.ndata.x

gnn = GNNChain(GCNConv(din => d),
LayerNorm(d),
x -> tanh.(x),
GraphConv(d => d, tanh),
Dropout(0.5),
Dense(d, dout))

m2 = GNNChain(enc = m,
dec = DotDecoder())
Flux.testmode!(gnn)

@test m2[:enc] === m
@test m2(g, x) == m2[:dec](g, m2[:enc](g, x))
end
test_layer(gnn, g, rtol = 1e-5, exclude_grad_fields = [, :σ²])

@testset "constructor with vector" begin
m = GNNChain(GCNConv(din => d),
LayerNorm(d),
x -> tanh.(x),
Dense(d, dout))
m2 = GNNChain([m.layers...])
@test m2(g, x) == m(g, x)
end
@testset "constructor with names" begin
m = GNNChain(GCNConv(din => d),
LayerNorm(d),
x -> tanh.(x),
Dense(d, dout))

@testset "Parallel" begin
AddResidual(l) = Parallel(+, identity, l)
m2 = GNNChain(enc = m,
dec = DotDecoder())

gnn = GNNChain(GraphConv(din => d, tanh),
LayerNorm(d),
AddResidual(GraphConv(d => d, tanh)),
BatchNorm(d),
Dense(d, dout))
@test m2[:enc] === m
@test m2(g, x) == m2[:dec](g, m2[:enc](g, x))
end

@testset "constructor with vector" begin
m = GNNChain(GCNConv(din => d),
LayerNorm(d),
x -> tanh.(x),
Dense(d, dout))
m2 = GNNChain([m.layers...])
@test m2(g, x) == m(g, x)
end

@testset "Parallel" begin
AddResidual(l) = Parallel(+, identity, l)

gnn = GNNChain(GraphConv(din => d, tanh),
LayerNorm(d),
AddResidual(GraphConv(d => d, tanh)),
BatchNorm(d),
Dense(d, dout))

trainmode!(gnn)
Flux.trainmode!(gnn)

test_layer(gnn, g, rtol = 1e-4, atol=1e-4, exclude_grad_fields = [, :σ²])
test_layer(gnn, g, rtol = 1e-4, atol=1e-4, exclude_grad_fields = [, :σ²])
end
end

@testset "Only graph input" begin
Expand All @@ -67,27 +70,29 @@
end
end

@testset "WithGraph" begin
@testitem "WithGraph" setup=[TestModule] begin
using .TestModule
x = rand(Float32, 2, 3)
g = GNNGraph([1, 2, 3], [2, 3, 1], ndata = x)
model = SAGEConv(2 => 3)
wg = WithGraph(model, g)
# No need to feed the graph to `wg`
@test wg(x) == model(g, x)
@test Flux.params(wg) == Flux.params(model)
@test Flux.trainables(wg) == Flux.trainables(model)
g2 = GNNGraph([1, 1, 2, 3], [2, 4, 1, 1])
x2 = rand(Float32, 2, 4)
# WithGraph will ignore the internal graph if fed with a new one.
@test wg(g2, x2) == model(g2, x2)

wg = WithGraph(model, g, traingraph = false)
@test length(Flux.params(wg)) == length(Flux.params(model))
@test length(Flux.trainables(wg)) == length(Flux.trainables(model))

wg = WithGraph(model, g, traingraph = true)
@test length(Flux.params(wg)) == length(Flux.params(model)) + length(Flux.params(g))
@test length(Flux.trainables(wg)) == length(Flux.trainables(model)) + length(Flux.trainables(g))
end

@testset "Flux restructure" begin
@testitem "Flux.restructure" setup=[TestModule] begin
using .TestModule
chain = GNNChain(GraphConv(2 => 2))
params, restructure = Flux.destructure(chain)
@test restructure(params) isa GNNChain
Expand Down
Loading

0 comments on commit 530457c

Please sign in to comment.