-
Notifications
You must be signed in to change notification settings - Fork 10
/
demo.R
executable file
·73 lines (56 loc) · 2.37 KB
/
demo.R
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
source("bayesian_multitask_multiple_kernel_learning_train.R")
source("bayesian_multitask_multiple_kernel_learning_test.R")
#initalize the parameters of the algorithm
parameters <- list()
#set the hyperparameters of gamma prior used for sample weights
parameters$alpha_lambda <- 1
parameters$beta_lambda <- 1
#set the hyperparameters of gamma prior used for intermediate noise
parameters$alpha_upsilon <- 1
parameters$beta_upsilon <- 1
#set the hyperparameters of gamma prior used for bias
parameters$alpha_gamma <- 1
parameters$beta_gamma <- 1
#set the hyperparameters of gamma prior used for kernel weights
parameters$alpha_omega <- 1
parameters$beta_omega <- 1
#set the hyperparameters of gamma prior used for output noise
parameters$alpha_epsilon <- 1
parameters$beta_epsilon <- 1
### IMPORTANT ###
#For gamma priors, you can experiment with three different (alpha, beta) values
#(1, 1) => default priors
#(1e-10, 1e+10) => good for obtaining sparsity
#(1e-10, 1e-10) => good for small sample size problems (like in Nature Biotechnology paper)
#set the number of iterations
parameters$iteration <- 200
#determine whether you want to calculate and store the lower bound values
parameters$progress <- 0
#set the seed for random number generator used to initalize random variables
parameters$seed <- 1606
#set the number of tasks (e.g., the number of compounds in Nature Biotechnology paper)
T <- ??
#set the number of kernels (e.g., the number of views in Nature Biotechnology paper)
P <- ??
#initialize the kernels and outputs of each task for training
Ktrain <- vector("list", T)
ytrain <- vector("list", T)
for (t in 1:T) {
Ktrain[[t]] <- ?? #should be an Ntra x Ntra x P matrix containing similarity values between training samples of task t
ytrain[[t]] <- ?? #should be an Ntra x 1 matrix containing target outputs of task t
}
#perform training
state <- bayesian_multitask_multiple_kernel_learning_train(Ktrain, ytrain, parameters)
#display the kernel weights
print(state$be$mu[(T+1):(T+P)])
#initialize the kernels of each task for testing
Ktest <- vector("list", T)
for (t in 1:T) {
Ktest[[t]] <- ?? #should be an Ntra x Ntest x P matrix containing similarity values between training and test samples of task t
}
#perform prediction
prediction <- bayesian_multitask_multiple_kernel_learning_test(Ktest, state)
#display the predictions for each task
for (t in 1:T) {
print(prediction$y[[t]]$mu)
}