Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

CompatHelper: bump compat for "LoopVectorization" to "0.11" #54

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ DataFrames = "0.21, 0.22"
DataStructures = "0.17, 0.18"
LightGraphs = "1.3"
LogicCircuits = "0.2.2"
LoopVectorization = "0.8.20"
LoopVectorization = "0.8.20, 0.11"
MLDatasets = "0.4, 0.5"
MetaGraphs = "0.6"
Metis = "1.0"
Expand Down
5 changes: 2 additions & 3 deletions src/Logistic/parameters.jl
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
export learn_parameters, to_onehot

using CUDA
using LoopVectorization: @avx, vifelse

"""
LogisticCircuit Parameter learning through gradient descents
Expand Down Expand Up @@ -49,9 +48,9 @@ function update_parameters_cpu(bc, data, labels, cl, step_size)

@inline function on_edge_float(flows, values, prime, sub, element, grandpa, single_child, weights)
lock(params_lock) do # TODO: move lock to inner loop?
@avx for i = 1:size(flows, 1)
@simd for i = 1:size(flows, 1) # adding @avx here might give incorrect results
@inbounds edge_flow = values[i, prime] * values[i, sub] / values[i, grandpa] * flows[i, grandpa]
edge_flow = vifelse(isfinite(edge_flow), edge_flow, zero(eltype(flows)))
edge_flow = ifelse(isfinite(edge_flow), edge_flow, zero(eltype(flows)))
for class = 1:nc
@inbounds bc.parames[element, class] -= (cl[i, class] - labels[i, class]) * edge_flow * step_size
end
Expand Down
6 changes: 3 additions & 3 deletions src/Logistic/queries.jl
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
export class_likelihood_per_instance, class_weights_per_instance

using CUDA
using LoopVectorization: @avx, vifelse
using LoopVectorization: @avx


"""
Expand Down Expand Up @@ -59,9 +59,9 @@ function class_weights_per_instance_cpu(bc, data)

@inline function on_edge_float(flows, values, prime, sub, element, grandpa, single_child, weights)
lock(cw_lock) do # TODO: move lock to inner loop?
@avx for i = 1:size(flows, 1)
@simd for i = 1:size(flows, 1) # adding @avx here might give incorrect results
@inbounds edge_flow = values[i, prime] * values[i, sub] / values[i, grandpa] * flows[i, grandpa]
edge_flow = vifelse(isfinite(edge_flow), edge_flow, zero(eltype(flows)))
edge_flow = ifelse(isfinite(edge_flow), edge_flow, zero(eltype(flows)))
for class = 1:nc
@inbounds cw[i, class] += edge_flow * bc.params[element, class]
end
Expand Down
24 changes: 18 additions & 6 deletions test/queries/marginal_flow_tests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -3,11 +3,14 @@ using LogicCircuits
using ProbabilisticCircuits
using DataFrames: DataFrame
using CUDA
using Random
using Suppressor

include("../helper/gpu.jl")

@testset "Marginals" begin
EPS = 1e-5

prob_circuit = zoo_psdd("little_4var.psdd");
@test prob_circuit(false, false, false, missing) ≈ -1.0498221

Expand All @@ -22,16 +25,21 @@ include("../helper/gpu.jl")
0.3499999999999; 0.1; 1.0; 0.8]

calc_prob = exp.(MAR(prob_circuit, data_marg))
@test true_prob ≈ calc_prob atol=1e-7
@test true_prob ≈ calc_prob atol=EPS
@test marginal_log_likelihood_avg(prob_circuit, data_marg) ≈ sum(log.(true_prob))/7
@test marginal_all(prob_circuit, data_marg) ≈
[0.0 -Inf -Inf -Inf -Inf -Inf 0.0 0.0 0.0 0.0 -0.356675 -2.30259 -2.65926
marginal_all_result = marginal_all(prob_circuit, data_marg);

marginal_all_true_answer = [0.0 -Inf -Inf -Inf -Inf -Inf 0.0 0.0 0.0 0.0 -0.356675 -2.30259 -2.65926
0.0 -Inf -Inf 0.0 0.0 -Inf 0.0 -Inf -Inf 0.0 -2.30259 -1.20397 -3.50656
0.0 -Inf -Inf -Inf 0.0 0.0 0.0 0.0 -Inf -Inf -0.356675 -1.60944 -1.96611
0.0 -Inf -Inf -Inf -Inf 0.0 0.0 0.0 0.0 0.0 -0.356675 -0.693147 -1.04982
0.0 -Inf 0.0 0.0 -Inf 0.0 0.0 -Inf 0.0 0.0 -1.60944 -0.693147 -2.30259
0.0 -Inf 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 -2.98023f-8 -7.45058f-9 -3.72529f-8
0.0 -Inf -Inf 0.0 0.0 0.0 0.0 0.0 0.0 0.0 -0.223144 -7.45058f-9 -0.223144]
0.0 -Inf -Inf 0.0 0.0 0.0 0.0 0.0 0.0 0.0 -0.223144 -7.45058f-9 -0.223144];

for pair in zip(marginal_all_result, marginal_all_true_answer)
@test pair[1] ≈ pair[2] atol=EPS
end

cpu_gpu_agree_approx(data_marg) do d
marginal_all(prob_circuit, d)
Expand Down Expand Up @@ -64,11 +72,15 @@ include("../helper/gpu.jl")
end

# Strudel Marginal Flow Test
samples, _ = sample(prob_circuit, 100000)
rng = MersenneTwister(100003); # Fix the seed
samples, _ = sample(prob_circuit, 100000; rng)
mix, weights, _ = learn_strudel(DataFrame(convert(BitArray, samples)); num_mix = 10,
init_maxiter = 20, em_maxiter = 100, verbose = false)
mix_calc_prob = exp.(MAR(mix, data_marg, weights))
@test true_prob ≈ mix_calc_prob atol = 0.1
for mix_pair in zip(true_prob, mix_calc_prob)
@test mix_pair[1] ≈ mix_pair[2] atol=0.1
end

test_complete_mar(mix, data_full, weights, 0.1)
end

Expand Down