Skip to content

Commit

Permalink
replace windows_with_hadoop, is_windows
Browse files Browse the repository at this point in the history
  • Loading branch information
felixcheung committed Jun 11, 2017
1 parent 34a3b27 commit ecab122
Show file tree
Hide file tree
Showing 12 changed files with 35 additions and 39 deletions.
2 changes: 1 addition & 1 deletion R/pkg/R/install.R
Original file line number Diff line number Diff line change
Expand Up @@ -267,7 +267,7 @@ hadoopVersionName <- function(hadoopVersion) {
# The implementation refers to appdirs package: https://pypi.python.org/pypi/appdirs and
# adapt to Spark context
sparkCachePath <- function() {
if (.Platform$OS.type == "windows") {
if (is_windows()) {
winAppPath <- Sys.getenv("LOCALAPPDATA", unset = NA)
if (is.na(winAppPath)) {
stop(paste("%LOCALAPPDATA% not found.",
Expand Down
8 changes: 2 additions & 6 deletions R/pkg/R/utils.R
Original file line number Diff line number Diff line change
Expand Up @@ -908,10 +908,6 @@ isAtomicLengthOne <- function(x) {
is.atomic(x) && length(x) == 1
}

is_cran <- function() {
!identical(Sys.getenv("NOT_CRAN"), "true")
}

is_windows <- function() {
.Platform$OS.type == "windows"
}
Expand All @@ -920,6 +916,6 @@ hadoop_home_set <- function() {
!identical(Sys.getenv("HADOOP_HOME"), "")
}

not_cran_or_windows_with_hadoop <- function() {
!is_cran() && (!is_windows() || hadoop_home_set())
windows_with_hadoop <- function() {
!is_windows() || hadoop_home_set()
}
2 changes: 1 addition & 1 deletion R/pkg/tests/fulltests/test_Windows.R
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
context("Windows-specific tests")

test_that("sparkJars tag in SparkContext", {
if (.Platform$OS.type != "windows") {
if (!is_windows()) {
skip("This test is only for Windows, skipped")
}

Expand Down
8 changes: 4 additions & 4 deletions R/pkg/tests/fulltests/test_mllib_classification.R
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ test_that("spark.svmLinear", {
expect_equal(sort(as.list(take(select(prediction, "prediction"), 10))[[1]]), expected)

# Test model save and load
if (not_cran_or_windows_with_hadoop()) {
if (windows_with_hadoop()) {
modelPath <- tempfile(pattern = "spark-svm-linear", fileext = ".tmp")
write.ml(model, modelPath)
expect_error(write.ml(model, modelPath))
Expand Down Expand Up @@ -129,7 +129,7 @@ test_that("spark.logit", {
expect_true(all(abs(setosaCoefs - setosaCoefs) < 0.1))

# Test model save and load
if (not_cran_or_windows_with_hadoop()) {
if (windows_with_hadoop()) {
modelPath <- tempfile(pattern = "spark-logit", fileext = ".tmp")
write.ml(model, modelPath)
expect_error(write.ml(model, modelPath))
Expand Down Expand Up @@ -246,7 +246,7 @@ test_that("spark.mlp", {
expect_equal(head(mlpPredictions$prediction, 6), c("1.0", "0.0", "0.0", "0.0", "0.0", "0.0"))

# Test model save/load
if (not_cran_or_windows_with_hadoop()) {
if (windows_with_hadoop()) {
modelPath <- tempfile(pattern = "spark-mlp", fileext = ".tmp")
write.ml(model, modelPath)
expect_error(write.ml(model, modelPath))
Expand Down Expand Up @@ -359,7 +359,7 @@ test_that("spark.naiveBayes", {
"Yes", "Yes", "No", "No"))

# Test model save/load
if (not_cran_or_windows_with_hadoop()) {
if (windows_with_hadoop()) {
modelPath <- tempfile(pattern = "spark-naiveBayes", fileext = ".tmp")
write.ml(m, modelPath)
expect_error(write.ml(m, modelPath))
Expand Down
8 changes: 4 additions & 4 deletions R/pkg/tests/fulltests/test_mllib_clustering.R
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ test_that("spark.bisectingKmeans", {
c(0, 1, 2, 3))

# Test model save/load
if (not_cran_or_windows_with_hadoop()) {
if (windows_with_hadoop()) {
modelPath <- tempfile(pattern = "spark-bisectingkmeans", fileext = ".tmp")
write.ml(model, modelPath)
expect_error(write.ml(model, modelPath))
Expand Down Expand Up @@ -127,7 +127,7 @@ test_that("spark.gaussianMixture", {
expect_equal(p$prediction, c(0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1))

# Test model save/load
if (not_cran_or_windows_with_hadoop()) {
if (windows_with_hadoop()) {
modelPath <- tempfile(pattern = "spark-gaussianMixture", fileext = ".tmp")
write.ml(model, modelPath)
expect_error(write.ml(model, modelPath))
Expand Down Expand Up @@ -175,7 +175,7 @@ test_that("spark.kmeans", {
expect_true(class(summary.model$coefficients[1, ]) == "numeric")

# Test model save/load
if (not_cran_or_windows_with_hadoop()) {
if (windows_with_hadoop()) {
modelPath <- tempfile(pattern = "spark-kmeans", fileext = ".tmp")
write.ml(model, modelPath)
expect_error(write.ml(model, modelPath))
Expand Down Expand Up @@ -242,7 +242,7 @@ test_that("spark.lda with libsvm", {
expect_true(logPrior <= 0 & !is.na(logPrior))

# Test model save/load
if (not_cran_or_windows_with_hadoop()) {
if (windows_with_hadoop()) {
modelPath <- tempfile(pattern = "spark-lda", fileext = ".tmp")
write.ml(model, modelPath)
expect_error(write.ml(model, modelPath))
Expand Down
2 changes: 1 addition & 1 deletion R/pkg/tests/fulltests/test_mllib_fpm.R
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ test_that("spark.fpGrowth", {

expect_equivalent(expected_predictions, collect(predict(model, new_data)))

if (not_cran_or_windows_with_hadoop()) {
if (windows_with_hadoop()) {
modelPath <- tempfile(pattern = "spark-fpm", fileext = ".tmp")
write.ml(model, modelPath, overwrite = TRUE)
loaded_model <- read.ml(modelPath)
Expand Down
2 changes: 1 addition & 1 deletion R/pkg/tests/fulltests/test_mllib_recommendation.R
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ test_that("spark.als", {
tolerance = 1e-4)

# Test model save/load
if (not_cran_or_windows_with_hadoop()) {
if (windows_with_hadoop()) {
modelPath <- tempfile(pattern = "spark-als", fileext = ".tmp")
write.ml(model, modelPath)
expect_error(write.ml(model, modelPath))
Expand Down
4 changes: 2 additions & 2 deletions R/pkg/tests/fulltests/test_mllib_regression.R
Original file line number Diff line number Diff line change
Expand Up @@ -389,7 +389,7 @@ test_that("spark.isoreg", {
expect_equal(predict_result$prediction, c(7.0, 7.0, 6.0, 5.5, 5.0, 4.0, 1.0))

# Test model save/load
if (not_cran_or_windows_with_hadoop()) {
if (windows_with_hadoop()) {
modelPath <- tempfile(pattern = "spark-isoreg", fileext = ".tmp")
write.ml(model, modelPath)
expect_error(write.ml(model, modelPath))
Expand Down Expand Up @@ -440,7 +440,7 @@ test_that("spark.survreg", {
2.390146, 2.891269, 2.891269), tolerance = 1e-4)

# Test model save/load
if (not_cran_or_windows_with_hadoop()) {
if (windows_with_hadoop()) {
modelPath <- tempfile(pattern = "spark-survreg", fileext = ".tmp")
write.ml(model, modelPath)
expect_error(write.ml(model, modelPath))
Expand Down
18 changes: 9 additions & 9 deletions R/pkg/tests/fulltests/test_mllib_tree.R
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ test_that("spark.gbt", {
expect_equal(stats$numFeatures, 6)
expect_equal(length(stats$treeWeights), 20)

if (not_cran_or_windows_with_hadoop()) {
if (windows_with_hadoop()) {
modelPath <- tempfile(pattern = "spark-gbtRegression", fileext = ".tmp")
write.ml(model, modelPath)
expect_error(write.ml(model, modelPath))
Expand Down Expand Up @@ -78,7 +78,7 @@ test_that("spark.gbt", {
expect_equal(length(grep("setosa", predictions)), 50)
expect_equal(length(grep("versicolor", predictions)), 50)

if (not_cran_or_windows_with_hadoop()) {
if (windows_with_hadoop()) {
modelPath <- tempfile(pattern = "spark-gbtClassification", fileext = ".tmp")
write.ml(model, modelPath)
expect_error(write.ml(model, modelPath))
Expand All @@ -103,7 +103,7 @@ test_that("spark.gbt", {
expect_equal(stats$maxDepth, 5)

# spark.gbt classification can work on libsvm data
if (not_cran_or_windows_with_hadoop()) {
if (windows_with_hadoop()) {
data <- read.df(absoluteSparkPath("data/mllib/sample_binary_classification_data.txt"),
source = "libsvm")
model <- spark.gbt(data, label ~ features, "classification")
Expand Down Expand Up @@ -142,7 +142,7 @@ test_that("spark.randomForest", {
expect_equal(stats$numTrees, 20)
expect_equal(stats$maxDepth, 5)

if (not_cran_or_windows_with_hadoop()) {
if (windows_with_hadoop()) {
modelPath <- tempfile(pattern = "spark-randomForestRegression", fileext = ".tmp")
write.ml(model, modelPath)
expect_error(write.ml(model, modelPath))
Expand Down Expand Up @@ -176,7 +176,7 @@ test_that("spark.randomForest", {
expect_equal(length(grep("setosa", predictions)), 50)
expect_equal(length(grep("versicolor", predictions)), 50)

if (not_cran_or_windows_with_hadoop()) {
if (windows_with_hadoop()) {
modelPath <- tempfile(pattern = "spark-randomForestClassification", fileext = ".tmp")
write.ml(model, modelPath)
expect_error(write.ml(model, modelPath))
Expand Down Expand Up @@ -213,7 +213,7 @@ test_that("spark.randomForest", {
expect_equal(length(grep("2.0", predictions)), 50)

# spark.randomForest classification can work on libsvm data
if (not_cran_or_windows_with_hadoop()) {
if (windows_with_hadoop()) {
data <- read.df(absoluteSparkPath("data/mllib/sample_multiclass_classification_data.txt"),
source = "libsvm")
model <- spark.randomForest(data, label ~ features, "classification")
Expand All @@ -238,7 +238,7 @@ test_that("spark.decisionTree", {
expect_error(capture.output(stats), NA)
expect_true(length(capture.output(stats)) > 6)

if (not_cran_or_windows_with_hadoop()) {
if (windows_with_hadoop()) {
modelPath <- tempfile(pattern = "spark-decisionTreeRegression", fileext = ".tmp")
write.ml(model, modelPath)
expect_error(write.ml(model, modelPath))
Expand Down Expand Up @@ -269,7 +269,7 @@ test_that("spark.decisionTree", {
expect_equal(length(grep("setosa", predictions)), 50)
expect_equal(length(grep("versicolor", predictions)), 50)

if (not_cran_or_windows_with_hadoop()) {
if (windows_with_hadoop()) {
modelPath <- tempfile(pattern = "spark-decisionTreeClassification", fileext = ".tmp")
write.ml(model, modelPath)
expect_error(write.ml(model, modelPath))
Expand Down Expand Up @@ -305,7 +305,7 @@ test_that("spark.decisionTree", {
expect_equal(length(grep("2.0", predictions)), 50)

# spark.decisionTree classification can work on libsvm data
if (not_cran_or_windows_with_hadoop()) {
if (windows_with_hadoop()) {
data <- read.df(absoluteSparkPath("data/mllib/sample_multiclass_classification_data.txt"),
source = "libsvm")
model <- spark.decisionTree(data, label ~ features, "classification")
Expand Down
16 changes: 8 additions & 8 deletions R/pkg/tests/fulltests/test_sparkSQL.R
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ unsetHiveContext <- function() {
# Tests for SparkSQL functions in SparkR

filesBefore <- list.files(path = sparkRDir, all.files = TRUE)
sparkSession <- if (not_cran_or_windows_with_hadoop()) {
sparkSession <- if (windows_with_hadoop()) {
sparkR.session(master = sparkRTestMaster)
} else {
sparkR.session(master = sparkRTestMaster, enableHiveSupport = FALSE)
Expand Down Expand Up @@ -100,7 +100,7 @@ mockLinesMapType <- c("{\"name\":\"Bob\",\"info\":{\"age\":16,\"height\":176.5}}
mapTypeJsonPath <- tempfile(pattern = "sparkr-test", fileext = ".tmp")
writeLines(mockLinesMapType, mapTypeJsonPath)

if (.Platform$OS.type == "windows") {
if (is_windows()) {
Sys.setenv(TZ = "GMT")
}

Expand Down Expand Up @@ -320,7 +320,7 @@ test_that("createDataFrame uses files for large objects", {
})

test_that("read/write csv as DataFrame", {
if (not_cran_or_windows_with_hadoop()) {
if (windows_with_hadoop()) {
csvPath <- tempfile(pattern = "sparkr-test", fileext = ".csv")
mockLinesCsv <- c("year,make,model,comment,blank",
"\"2012\",\"Tesla\",\"S\",\"No comment\",",
Expand Down Expand Up @@ -589,7 +589,7 @@ test_that("Collect DataFrame with complex types", {
})

test_that("read/write json files", {
if (not_cran_or_windows_with_hadoop()) {
if (windows_with_hadoop()) {
# Test read.df
df <- read.df(jsonPath, "json")
expect_is(df, "SparkDataFrame")
Expand Down Expand Up @@ -720,7 +720,7 @@ test_that("test cache, uncache and clearCache", {
})

test_that("insertInto() on a registered table", {
if (not_cran_or_windows_with_hadoop()) {
if (windows_with_hadoop()) {
df <- read.df(jsonPath, "json")
write.df(df, parquetPath, "parquet", "overwrite")
dfParquet <- read.df(parquetPath, "parquet")
Expand Down Expand Up @@ -928,7 +928,7 @@ test_that("cache(), storageLevel(), persist(), and unpersist() on a DataFrame",
})

test_that("setCheckpointDir(), checkpoint() on a DataFrame", {
if (not_cran_or_windows_with_hadoop()) {
if (windows_with_hadoop()) {
checkpointDir <- file.path(tempdir(), "cproot")
expect_true(length(list.files(path = checkpointDir, all.files = TRUE)) == 0)

Expand Down Expand Up @@ -1305,7 +1305,7 @@ test_that("column calculation", {
})

test_that("test HiveContext", {
if (not_cran_or_windows_with_hadoop()) {
if (windows_with_hadoop()) {
setHiveContext(sc)

schema <- structType(structField("name", "string"), structField("age", "integer"),
Expand Down Expand Up @@ -2394,7 +2394,7 @@ test_that("read/write ORC files - compression option", {
})

test_that("read/write Parquet files", {
if (not_cran_or_windows_with_hadoop()) {
if (windows_with_hadoop()) {
df <- read.df(jsonPath, "json")
# Test write.df and read.df
write.df(df, parquetPath, "parquet", mode = "overwrite")
Expand Down
2 changes: 1 addition & 1 deletion R/pkg/tests/fulltests/test_streaming.R
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ context("Structured Streaming")
sparkSession <- sparkR.session(master = sparkRTestMaster, enableHiveSupport = FALSE)

jsonSubDir <- file.path("sparkr-test", "json", "")
if (.Platform$OS.type == "windows") {
if (is_windows()) {
# file.path removes the empty separator on Windows, adds it back
jsonSubDir <- paste0(jsonSubDir, .Platform$file.sep)
}
Expand Down
2 changes: 1 addition & 1 deletion R/pkg/tests/run-all.R
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ library(SparkR)
# Turn all warnings into errors
options("warn" = 2)

if (.Platform$OS.type == "windows") {
if (is_windows()) {
Sys.setenv(TZ = "GMT")
}

Expand Down

0 comments on commit ecab122

Please sign in to comment.