Skip to content

Commit

Permalink
Fix bulk import for Postgres on MacOs.
Browse files Browse the repository at this point in the history
  • Loading branch information
schuemie committed Nov 28, 2023
1 parent 59c8d38 commit 9919fbc
Show file tree
Hide file tree
Showing 4 changed files with 23 additions and 14 deletions.
3 changes: 3 additions & 0 deletions .settings/org.eclipse.jdt.core.prefs
Original file line number Diff line number Diff line change
Expand Up @@ -8,5 +8,8 @@ org.eclipse.jdt.core.compiler.debug.lineNumber=generate
org.eclipse.jdt.core.compiler.debug.localVariable=generate
org.eclipse.jdt.core.compiler.debug.sourceFile=generate
org.eclipse.jdt.core.compiler.problem.assertIdentifier=error
org.eclipse.jdt.core.compiler.problem.enablePreviewFeatures=disabled
org.eclipse.jdt.core.compiler.problem.enumIdentifier=error
org.eclipse.jdt.core.compiler.problem.reportPreviewFeatures=warning
org.eclipse.jdt.core.compiler.release=disabled
org.eclipse.jdt.core.compiler.source=1.8
2 changes: 2 additions & 0 deletions NEWS.md
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,8 @@ Bugfixes:

1. Fixed `dbFetch()` for DBI drivers, no longer ignoring `n` argument.

2. Fix bulk import for Postgres on MacOs.


DatabaseConnector 6.3.0
=======================
Expand Down
12 changes: 5 additions & 7 deletions R/BulkLoad.R
Original file line number Diff line number Diff line change
Expand Up @@ -286,7 +286,6 @@ bulkLoadHive <- function(connection, sqlTableName, sqlFieldNames, data) {
)
}


bulkLoadPostgres <- function(connection, sqlTableName, sqlFieldNames, sqlDataTypes, data) {
logTrace(sprintf("Inserting %d rows into table '%s' using PostgreSQL bulk load", nrow(data), sqlTableName))
startTime <- Sys.time()
Expand All @@ -306,13 +305,12 @@ bulkLoadPostgres <- function(connection, sqlTableName, sqlFieldNames, sqlDataTyp
password <- attr(connection, "password")()

if (.Platform$OS.type == "windows") {
winPsqlPath <- Sys.getenv("POSTGRES_PATH")
command <- file.path(winPsqlPath, "psql.exe")
if (!file.exists(command)) {
abort(paste("Could not find psql.exe in ", winPsqlPath))
}
command <- file.path(Sys.getenv("POSTGRES_PATH"), "psql.exe")
} else {
command <- "psql"
command <- file.path(Sys.getenv("POSTGRES_PATH"), "psql")
}
if (!file.exists(command)) {
abort(paste("Could not find psql.exe in ", Sys.getenv("POSTGRES_PATH")))
}
headers <- paste0("(", sqlFieldNames, ")")
if (is.null(port)) {
Expand Down
20 changes: 13 additions & 7 deletions R/InsertTable.R
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,8 @@ validateInt64Insert <- function() {
#'
#' PostgreSQL:
#' Uses the 'psql' executable to upload. Set the POSTGRES_PATH environment variable to the Postgres
#' binary path, e.g. 'C:/Program Files/PostgreSQL/11/bin'.
#' binary path, e.g. 'C:/Program Files/PostgreSQL/11/bin' on Windows or '/Library/PostgreSQL/16/bin'
#' on MacOs.
#'
#' @examples
#' \dontrun{
Expand Down Expand Up @@ -233,21 +234,26 @@ insertTable.default <- function(connection,
if (!is.null(databaseSchema)) {
tableName <- paste(databaseSchema, tableName, sep = ".")
}
if (Andromeda::isAndromedaTable(data)) {
warn("Batch-wise uploading of Andromeda tables currently not supported. Loading entire table in memory.",
.frequency = "regularly",
.frequency_id = "useMppBulkLoad"
)
data <- collect(data)
}
if (is.vector(data) && !is.list(data)) {
data <- data.frame(x = data)
}
if (length(data) < 1) {
if (ncol(data) < 1) {
abort("data must have at least one column")
}
if (is.null(names(data))) {
names(data) <- paste("V", 1:length(data), sep = "")
}
if (length(data[[1]]) > 0) {
if (!is.data.frame(data)) {
if (!is.data.frame(data)) {
if (nrow(data) > 0) {
data <- as.data.frame(data, row.names = 1:length(data[[1]]))
}
} else {
if (!is.data.frame(data)) {
} else {
data <- as.data.frame(data)
}
}
Expand Down

0 comments on commit 9919fbc

Please sign in to comment.