Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Create S3 generic return_() #133

Closed
wants to merge 3 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions R/absolute_error.R
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ ae_median_sample <- function(true_values, predictions) {

ae_median <- abs(true_values - median_predictions)

return(ae_median)
return_(ae_median)
}


Expand Down Expand Up @@ -66,7 +66,7 @@ ae_median_quantile <- function(true_values, predictions, quantiles = NULL,
verbose = TRUE) {
if (!is.null(quantiles)) {
if (!any(quantiles == 0.5) && !any(is.na(quantiles))) {
return(NA_real_)
return_(NA_real_)
if (verbose) {
warning("in order to compute the absolute error of the median, `0.5` must be among the quantiles given. Maybe you want to use `abs_error()`?")
}
Expand All @@ -75,7 +75,7 @@ ae_median_quantile <- function(true_values, predictions, quantiles = NULL,
predictions <- predictions[quantiles == 0.5]
}
abs_error_median <- abs(true_values - predictions)
return(abs_error_median)
return_(abs_error_median)
}


Expand All @@ -101,5 +101,5 @@ ae_median_quantile <- function(true_values, predictions, quantiles = NULL,


abs_error <- function(true_values, predictions) {
return(abs(true_values - predictions))
return_(abs(true_values - predictions))
}
13 changes: 5 additions & 8 deletions R/bias.R
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@ bias <- function(true_values, predictions) {

if (continuous_predictions) {
res <- 1 - 2 * P_x
return(res)
return_(res)
} else {
# for integer case also calculate empirical cdf for (y-1)
P_xm1 <- vapply(seq_along(true_values),
Expand All @@ -110,7 +110,7 @@ bias <- function(true_values, predictions) {
.0)

res <- 1 - (P_x + P_xm1)
return(res)
return_(res)
}
}

Expand Down Expand Up @@ -195,7 +195,7 @@ quantile_bias <- function(range, lower, upper,

# deal with the point forecast case where inputs may be NA
if (length(range) == 0 | length(lower_predictions) == 0 | length(upper_predictions) == 0) {
return(NA_real_)
return_(NA_real_)
}

}
Expand All @@ -217,23 +217,20 @@ quantile_bias <- function(range, lower, upper,

if (true_value == median_prediction) {
bias <- 0
return(bias)
} else if (true_value < min(lower_predictions)) {
lower <- 0
bias <- 1 - 2 * lower
return(bias)
} else if (true_value > max(upper_predictions)) {
upper <- 1
bias <- 1 - 2 * upper
return(bias)
} else if (any(lower_predictions >= true_value)) {
lower <- max(lower_quantiles[lower_predictions <= true_value])
bias <- 1 - 2 * lower
return(bias)
} else if (any(upper_predictions <= true_value)){
upper <- min(upper_quantiles[upper_predictions >= true_value])
bias <- 1 - 2 * upper
return(bias)
}

return_(bias)
}

2 changes: 1 addition & 1 deletion R/brier_score.R
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ brier_score <- function (true_values, predictions) {
# ============================================

brierscore <- (sum((true_values - predictions)^2) ) / n
return(brierscore)
return_(brierscore)
}


10 changes: 5 additions & 5 deletions R/eval_forecasts.R
Original file line number Diff line number Diff line change
Expand Up @@ -228,7 +228,7 @@ eval_forecasts <- function(data = NULL,
if (verbose) {
warning("After attempting to merge, only an empty data.table was left")
}
return(data)
return_(data)
}
}

Expand Down Expand Up @@ -346,7 +346,7 @@ eval_forecasts <- function(data = NULL,
if (verbose) {
message("After removing all NA true values and predictions, there were no observations left")
}
return(data)
return_(data)
}


Expand All @@ -360,7 +360,7 @@ eval_forecasts <- function(data = NULL,
sd = sd,
summarised = summarised,
verbose = verbose)
return(res)
return_(res)
}

# Score quantile predictions -------------------------------------------------
Expand All @@ -378,7 +378,7 @@ eval_forecasts <- function(data = NULL,
compute_relative_skill = compute_relative_skill,
rel_skill_metric = rel_skill_metric,
baseline = baseline)
return(res)
return_(res)
}


Expand All @@ -396,7 +396,7 @@ eval_forecasts <- function(data = NULL,
pit_plots = pit_plots,
summarised = summarised,
verbose = verbose)
return(res)
return_(res)
}
}

Expand Down
1 change: 1 addition & 0 deletions R/eval_forecasts_binary.R
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@ eval_forecasts_binary <- function(data,
by = summarise_by]

}
return_(res)
}


2 changes: 1 addition & 1 deletion R/eval_forecasts_continuous_integer.R
Original file line number Diff line number Diff line change
Expand Up @@ -169,5 +169,5 @@ eval_forecasts_sample <- function(data,
pit_plots = pit_histograms)
}

return(res)
return_(res)
}
4 changes: 2 additions & 2 deletions R/eval_forecasts_helper.R
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ add_quantiles <- function(dt, varnames, quantiles, by) {
na.rm = TRUE)),
by = c(by)]
}
return(dt)
return_(dt)
}


Expand All @@ -41,5 +41,5 @@ add_sd <- function(dt, varnames, by) {
for (varname in varnames) {
dt[, paste0(varname, "_sd") := sd(get(varname), na.rm = TRUE), by = by]
}
return(dt)
return_(dt)
}
2 changes: 1 addition & 1 deletion R/eval_forecasts_quantile.R
Original file line number Diff line number Diff line change
Expand Up @@ -190,5 +190,5 @@ eval_forecasts_quantile <- function(data,
res[, c("quantile_coverage") := NULL]
}

return(res)
return_(res)
}
10 changes: 5 additions & 5 deletions R/interval_score.R
Original file line number Diff line number Diff line change
Expand Up @@ -106,12 +106,12 @@ interval_score <- function(true_values,
score <- sharpness + underprediction + overprediction

if (separate_results) {
return(list(interval_score = score,
sharpness = sharpness,
underprediction = underprediction,
overprediction = overprediction))
return_(list(interval_score = score,
sharpness = sharpness,
underprediction = underprediction,
overprediction = overprediction))
} else {
return(score)
return_(score)
}
}

Expand Down
2 changes: 1 addition & 1 deletion R/metrics_point_forecasts.R
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@

mse <- function(true_values, predictions) {
mse <- mean((true_values - predictions)^2)
return(mse)
return_(mse)
}


Expand Down
18 changes: 9 additions & 9 deletions R/pairwise-comparisons.R
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ pairwise_comparison <- function(scores,
summarise_by = summarise_by)
})

out <- data.table::rbindlist(results)
return_(data.table::rbindlist(results))
}


Expand Down Expand Up @@ -172,7 +172,7 @@ add_rel_skill_to_eval_forecasts <- function(unsummarised_scores,
# also delete skill metric from output
out[, eval(rel_skill_metric) := NULL]

return(out)
return_(out)
}


Expand Down Expand Up @@ -210,7 +210,7 @@ pairwise_comparison_one_group <- function(scores,

# if there aren't enough models to do any comparison, return NULL
if (length(models) < 2) {
return(NULL)
return_(NULL)
}

# the overlap is obtained by merging the available data for one model with
Expand Down Expand Up @@ -303,7 +303,7 @@ pairwise_comparison_one_group <- function(scores,
data.table::setnames(out, old = c("ratio", "theta", "rel_to_baseline"),
new = c("mean_scores_ratio", "relative_skill", "scaled_rel_skill"))

return(out)
return_(out)
}


Expand Down Expand Up @@ -355,7 +355,7 @@ compare_two_models <- function(scores,
unique(overlap)

if (nrow(overlap) == 0) {
return(list(ratio = NA_real_, pval = NA_real_))
return_(list(ratio = NA_real_, pval = NA_real_))
}

values_x <- overlap[[paste0(metric, ".x")]]
Expand All @@ -381,8 +381,8 @@ unique(overlap)
# alternative: do a paired t-test on ranks?
pval <- wilcox.test(values_x, values_y, paired = TRUE)$p.value
}
return(list(mean_scores_ratio = ratio,
pval = pval))
return_(list(mean_scores_ratio = ratio,
pval = pval))
}


Expand Down Expand Up @@ -471,7 +471,7 @@ plot_pairwise_comparison <- function(comparison_result,
right = FALSE,
labels = plot_scales)
# scale[is.na(scale)] <- 0
return(as.numeric(as.character(scale)))
return_(as.numeric(as.character(scale)))
}

if (type[1] == "together") {
Expand Down Expand Up @@ -646,7 +646,7 @@ plot_pairwise_comparison <- function(comparison_result,
}
}

return(plot)
return_(plot)
}


Expand Down
10 changes: 5 additions & 5 deletions R/pit.R
Original file line number Diff line number Diff line change
Expand Up @@ -220,7 +220,7 @@ pit <- function(true_values,
}
}

return(out)
return_(out)
}


Expand Down Expand Up @@ -308,7 +308,7 @@ pit_df <- function(data,
out$u <- res$u
}

return(out)
return_(out)
}


Expand Down Expand Up @@ -377,7 +377,7 @@ pit_df_fast <- function(data,
value.name = "prediction")


return(data)
return_(data)
}


Expand Down Expand Up @@ -421,7 +421,7 @@ hist_PIT <- function(PIT_samples,
ggplot2::labs(caption = paste0("p-value of Andersen-Darling test for uniformity: ",
round(caption, 3)))

return(hist_PIT)
return_(hist_PIT)
}


Expand Down Expand Up @@ -458,6 +458,6 @@ hist_PIT_quantile <- function(PIT_samples,
ggplot2::ylab("Frequency") +
ggplot2::labs()

return(hist_PIT)
return_(hist_PIT)
}

Loading