diff --git a/CMakeLists.txt b/CMakeLists.txt index 18b17d30e965..688dd42c54fe 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -898,19 +898,11 @@ if(INSTALL_PYTHON_VERSIONS) endforeach() endif() -if(USE_CPP_PACKAGE) - add_subdirectory(cpp-package) - target_compile_definitions(mxnet PUBLIC MXNET_USE_CPP_PACKAGE=1) - if(BUILD_CPP_EXAMPLES) - add_subdirectory(example/image-classification/predict-cpp) - endif() -endif() - add_subdirectory(tests) # ---[ Linter target find_package(Python3) -set(LINT_DIRS "include src plugin cpp-package tests") +set(LINT_DIRS "include src plugin tests") set(EXCLUDE_PATH "src/operator/contrib/ctc_include") add_custom_target(mxnet_lint COMMAND ${CMAKE_COMMAND} -DMSVC=${MSVC} -DPYTHON_EXECUTABLE=${Python3_EXECUTABLE} -DLINT_DIRS=${LINT_DIRS} -DPROJECT_SOURCE_DIR=${CMAKE_CURRENT_SOURCE_DIR} -DPROJECT_NAME=mxnet -DEXCLUDE_PATH=${EXCLUDE_PATH} -P ${CMAKE_CURRENT_SOURCE_DIR}/3rdparty/dmlc-core/cmake/lint.cmake) diff --git a/Makefile b/Makefile index 51930c782391..0bf856f677bb 100644 --- a/Makefile +++ b/Makefile @@ -48,10 +48,6 @@ ifndef DLPACK_PATH DLPACK_PATH = $(ROOTDIR)/3rdparty/dlpack endif -ifndef AMALGAMATION_PATH - AMALGAMATION_PATH = $(ROOTDIR)/amalgamation -endif - ifndef TVM_PATH TVM_PATH = $(TPARTYDIR)/tvm endif @@ -463,7 +459,7 @@ ifeq ($(USE_DIST_KVSTORE), 1) LDFLAGS += $(PS_LDFLAGS_A) endif -.PHONY: clean all extra-packages test lint clean_all rcpplint rcppexport roxygen\ +.PHONY: clean all extra-packages test lint clean_all roxygen\ cython3 cython cyclean all: lib/libmxnet.a lib/libmxnet.so $(BIN) extra-packages extension_libs @@ -663,12 +659,6 @@ $(BIN) : @mkdir -p $(@D) $(CXX) $(CFLAGS) -std=c++17 -o $@ $(filter %.cpp %.o %.c %.a %.cc, $^) $(LDFLAGS) -# CPP Package -ifeq ($(USE_CPP_PACKAGE), 1) -include cpp-package/cpp-package.mk -CFLAGS += -DMXNET_USE_CPP_PACKAGE=1 -endif - include mkldnn.mk include tests/cpp/unittest.mk @@ -676,10 +666,10 @@ extra-packages: $(EXTRA_PACKAGES) test: $(TEST) -lint: cpplint rcpplint jnilint pylint +lint: cpplint pylint cpplint: - 3rdparty/dmlc-core/scripts/lint.py mxnet cpp include src plugin cpp-package tests \ + 3rdparty/dmlc-core/scripts/lint.py mxnet cpp include src plugin tests \ --exclude_path src/operator/contrib/ctc_include include/mkldnn pylint: @@ -727,28 +717,6 @@ cython3: cyclean: rm -rf python/mxnet/*/*.so python/mxnet/*/*.cpp -scalaclean: - (cd $(ROOTDIR)/scala-package && mvn clean) - -scalapkg: - (cd $(ROOTDIR)/scala-package && mvn install -DskipTests) - -scalainstall: - (cd $(ROOTDIR)/scala-package && mvn install) - -scalaunittest: - (cd $(ROOTDIR)/scala-package && mvn install) - -scalaintegrationtest: - (cd $(ROOTDIR)/scala-package && mvn integration-test -DskipTests=false) - -jnilint: - 3rdparty/dmlc-core/scripts/lint.py mxnet-jnicpp cpp scala-package/native/src --exclude_path scala-package/native/src/main/native/org_apache_mxnet_native_c_api.h - -rclean: - $(RM) -r R-package/src/image_recordio.h R-package/NAMESPACE R-package/man R-package/R/mxnet_generated.R \ - R-package/inst R-package/src/*.o R-package/src/*.so mxnet_*.tar.gz - build/rat/apache-rat-0.13/apache-rat-0.13.jar: mkdir -p build/rat cd build/rat; \ @@ -770,25 +738,23 @@ ratcheck: build/rat/apache-rat-0.13/apache-rat-0.13.jar ifneq ($(EXTRA_OPERATORS),) -clean: rclean cyclean $(EXTRA_PACKAGES_CLEAN) +clean: cyclean $(EXTRA_PACKAGES_CLEAN) $(RM) -r build lib bin deps *~ */*~ */*/*~ */*/*/*~ (cd scala-package && mvn clean) || true cd $(DMLC_CORE); $(MAKE) clean; cd - cd $(PS_PATH); $(MAKE) clean; cd - cd $(NNVM_PATH); $(MAKE) clean; cd - cd $(TVM_PATH); $(MAKE) clean; cd - - cd $(AMALGAMATION_PATH); $(MAKE) clean; cd - $(RM) -r $(patsubst %, %/*.d, $(EXTRA_OPERATORS)) $(patsubst %, %/*/*.d, $(EXTRA_OPERATORS)) $(RM) -r $(patsubst %, %/*.o, $(EXTRA_OPERATORS)) $(patsubst %, %/*/*.o, $(EXTRA_OPERATORS)) else -clean: rclean mkldnn_clean cyclean testclean $(EXTRA_PACKAGES_CLEAN) +clean: mkldnn_clean cyclean testclean $(EXTRA_PACKAGES_CLEAN) $(RM) -r build lib bin *~ */*~ */*/*~ */*/*/*~ (cd scala-package && mvn clean) || true cd $(DMLC_CORE); $(MAKE) clean; cd - cd $(PS_PATH); $(MAKE) clean; cd - cd $(NNVM_PATH); $(MAKE) clean; cd - cd $(TVM_PATH); $(MAKE) clean; cd - - cd $(AMALGAMATION_PATH); $(MAKE) clean; cd - endif clean_all: clean diff --git a/R-package/.Rbuildignore b/R-package/.Rbuildignore deleted file mode 100644 index d0da835e2c00..000000000000 --- a/R-package/.Rbuildignore +++ /dev/null @@ -1,8 +0,0 @@ -\.o$ -src/*.so$ -\.dll$ -^.*\.Rproj$ -^\.Rproj\.user$ -dummy.NAMESPACE - -README.md diff --git a/R-package/.gitignore b/R-package/.gitignore deleted file mode 100644 index 22e260ba48f2..000000000000 --- a/R-package/.gitignore +++ /dev/null @@ -1,10 +0,0 @@ -.Rhistory -.RData -.Ruserdata -*.Rproj* -*.o -*.so -*.html -inst/doc -NAMESPACE -man diff --git a/R-package/DESCRIPTION b/R-package/DESCRIPTION deleted file mode 100644 index ab2a0b5cbd2c..000000000000 --- a/R-package/DESCRIPTION +++ /dev/null @@ -1,36 +0,0 @@ -Package: mxnet -Type: Package -Title: MXNet: A Flexible and Efficient Machine Learning Library for Heterogeneous Distributed Systems -Version: 2.0.0 -Date: 2017-06-27 -Author: Tianqi Chen, Qiang Kou, Tong He, Anirudh Acharya -Maintainer: Qiang Kou -Repository: apache/incubator-mxnet -Description: MXNet is a deep learning framework designed for both efficiency - and flexibility. It allows you to mix the flavours of deep learning programs - together to maximize the efficiency and your productivity. -License: Apache License (== 2.0) -URL: https://github.com/apache/incubator-mxnet/tree/master/R-package -BugReports: https://github.com/apache/incubator-mxnet/issues -Imports: - methods, - Rcpp (>= 0.12.1), - DiagrammeR (>= 0.9.0), - visNetwork (>= 1.0.3), - data.table, - jsonlite, - magrittr, - stringr -Suggests: - testthat, - mlbench, - knitr, - rmarkdown, - imager, - covr -Depends: - R (>= 3.4.4) -LinkingTo: Rcpp -VignetteBuilder: knitr -RoxygenNote: 6.1.1 -Encoding: UTF-8 diff --git a/R-package/LICENSE b/R-package/LICENSE deleted file mode 100644 index 0ebc218b32ba..000000000000 --- a/R-package/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright (c) 2015 by Contributors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - diff --git a/R-package/Makefile b/R-package/Makefile deleted file mode 100644 index 5a8bc42aca93..000000000000 --- a/R-package/Makefile +++ /dev/null @@ -1,33 +0,0 @@ -rcpplint: - ./3rdparty/dmlc-core/scripts/lint.py mxnet-rcpp all R-package/src - -rpkg: - mkdir -p R-package/inst/libs - cp src/io/image_recordio.h R-package/src - if [ -d "lib" ]; then \ - cp -rf lib/libmxnet.so R-package/inst/libs; \ - if [ -e "lib/libtvm_runtime.so" ]; then \ - cp -rf lib/libtvm_runtime.so R-package/inst/libs; \ - fi; \ - else \ - cp -rf build/libmxnet.so R-package/inst/libs; \ - if [ -e "build/libtvm_runtime.so" ]; then \ - cp -rf build/libtvm_runtime.so R-package/inst/libs; \ - fi; \ - fi - - mkdir -p R-package/inst/include - cp -rl include/* R-package/inst/include - Rscript -e "if(!require(devtools)){install.packages('devtools', repo = 'https://cloud.r-project.org/')}" - Rscript -e "if(!require(roxygen2)||packageVersion('roxygen2') < '6.1.1'){install.packages('roxygen2', repo = 'https://cloud.r-project.org/')}" - Rscript -e "library(devtools); library(methods); options(repos=c(CRAN='https://cloud.r-project.org/')); install_deps(pkg='R-package', dependencies = TRUE)" - cp R-package/dummy.NAMESPACE R-package/NAMESPACE # NAMESPACE will be replaced by devtools::document later - echo "import(Rcpp)" >> R-package/NAMESPACE - R CMD INSTALL R-package - Rscript -e "require(mxnet); mxnet:::mxnet.export('R-package'); warnings()" - Rscript -e "devtools::document('R-package');warnings()" - R CMD INSTALL R-package - -rpkgtest: - Rscript -e 'require(testthat);res<-test_dir("R-package/tests/testthat");if(!testthat:::all_passed(res)){stop("Test failures", call. = FALSE)}' - Rscript -e 'res<-covr:::package_coverage("R-package");fileConn<-file(paste("r-package_coverage_",toString(runif(1)),".json"));writeLines(covr:::to_codecov(res), fileConn);close(fileConn)' diff --git a/R-package/R/callback.R b/R-package/R/callback.R deleted file mode 100644 index 04f31b727ed2..000000000000 --- a/R-package/R/callback.R +++ /dev/null @@ -1,176 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -#' @export mx.metric.logger -library(methods) -mx.metric.logger <- setRefClass("mx.metric.logger", fields = list(train = "numeric", eval="numeric")) - -#' Log training metric each period -#' -#' @param period The number of batch to log the training evaluation metric -#' @param logger The logger class -#' -#' @export -mx.callback.log.train.metric <- function(period, logger=NULL) { - function(iteration, nbatch, env, verbose=TRUE) { - if (nbatch %% period == 0 && !is.null(env$metric)) { - result <- env$metric$get(env$train.metric) - if (nbatch != 0 && verbose) - message("Batch [", nbatch, "] Train-", result$name, "=", result$value) - if (!is.null(logger)) { - if (class(logger) != "mx.metric.logger") { - stop("Invalid mx.metric.logger.") - } - logger$train <- c(logger$train, result$value) - if (!is.null(env$eval.metric)) { - result <- env$metric$get(env$eval.metric) - if (nbatch != 0 && verbose) - message("Batch [", nbatch, "] Validation-", result$name, "=", result$value) - logger$eval <- c(logger$eval, result$value) - } - } - } - return(TRUE) - } -} - -#' Calculate the training speed -#' -#' @param batch_size The batch size -#' @param frequency The frequency of the training speed update -#' -#' @export -mx.callback.log.speedometer <- function(batch.size, frequency=50){ - function(iteration, nbatch, env, verbose=TRUE) { - count <- nbatch - if(is.null(env$count)) env$count <- 0 - if(is.null(env$init)) env$init <- FALSE - if (env$count > count) env$init <- FALSE - env$count = count - if(env$init){ - if (count %% frequency == 0 && !is.null(env$metric)){ - time <- as.double(difftime(Sys.time(), env$tic, units = "secs")) - speed <- frequency*batch.size/time - result <- env$metric$get(env$train.metric) - if (nbatch != 0 && verbose) - message("Batch [", nbatch, "] Speed: ", speed, " samples/sec Train-", - result$name, "=", result$value) - env$tic = Sys.time() - } - } else { - env$init <- TRUE - env$tic <- Sys.time() - } - } -} - -#' Save checkpoint to files each period iteration. -#' -#' @param prefix The prefix of the model checkpoint. -#' -#' @export -mx.callback.save.checkpoint <- function(prefix, period=1) { - function(iteration, nbatch, env, verbose=TRUE) { - if (iteration %% period == 0) { - mx.model.save(env$model, prefix, iteration) - if(verbose) message(sprintf("Model checkpoint saved to %s-%04d.params\n", prefix, iteration)) - } - return(TRUE) - } -} - -#' Early stop with different conditions -#' -#' Early stopping applying different conditions: hard thresholds or epochs number from the best score. Tested with "epoch.end.callback" function. -#' -#' @param train.metric Numeric. Hard threshold for the metric of the training data set (optional) -#' @param eval.metric Numeric. Hard threshold for the metric of the evaluating data set (if set, optional) -#' @param bad.steps Integer. How much epochs should gone from the best score? Use this option with evaluation data set -#' @param maximize Logical. Do your model use maximizing or minimizing optimization? -#' @param verbose Logical -#' -#' @export -#' -mx.callback.early.stop <- function(train.metric = NULL, eval.metric = NULL, bad.steps = NULL, maximize = FALSE, verbose = FALSE) { - - function(iteration, nbatch, env, verbose = verbose) { - - # hard threshold for train metric - if (!is.null(env$metric)) { - if (!is.null(train.metric)) { - result <- env$metric$get(env$train.metric) - if ((! maximize && result$value < train.metric) || (maximize && result$value > train.metric)) { - return(FALSE) - } - } - - # hard threshold for test metric - if (!is.null(eval.metric)) { - if (!is.null(env$eval.metric)) { - result <- env$metric$get(env$eval.metric) - if ((!maximize && result$value < eval.metric) || (maximize && result$value > eval.metric)) { - return(FALSE) - } - } - } - } - - # not worse than previous X steps - if (!is.null(bad.steps)) { - - # set / reset iteration variables - # it may be not the best practice to use global variables, - # but let's not touch "model.r" file - if (iteration == 1){ - # reset iterator - mx.best.iter <<- 1 - - # reset best score - if (maximize) { - mx.best.score <<- 0 - } - else { - mx.best.score <<- Inf - } - } - - # test early stop round - if (!is.null(env$eval.metric)) { - - result <- env$metric$get(env$eval.metric) - - if ((! maximize && result$value > mx.best.score) || (maximize && result$value < mx.best.score)) { - - if (mx.best.iter == bad.steps) { - if (verbose) { - message("Best score=", mx.best.score, ", iteration [", iteration - bad.steps, "]") - } - return(FALSE) - } else { - mx.best.iter <<- mx.best.iter + 1 - } - - } else { - mx.best.score <<- result$value - mx.best.iter <<- 1 - } - } - } - - return(TRUE) - } -} diff --git a/R-package/R/context.R b/R-package/R/context.R deleted file mode 100644 index 1c5a56ed919e..000000000000 --- a/R-package/R/context.R +++ /dev/null @@ -1,64 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -# Initialize the global context -init.context.default <- function() { - .MXNetEnv[["mx.ctx.internal.default.value"]] <- mx.cpu() -} - -#' Set/Get default context for array creation. -#' -#' @param new optional takes \code{mx.cpu()} or \code{mx.gpu(id)}, new default ctx. -#' @return The default context. -#' -#' @export -mx.ctx.default <- function(new = NULL) { - if (!is.null(new)) { - .MXNetEnv[["mx.ctx.internal.default.value"]] <- new - } - return (.MXNetEnv$mx.ctx.internal.default.value) -} - -#' Check if the type is mxnet context. -#' -#' @return Logical indicator -#' -#' @export -is.mx.context <- function(x) { - class(x) == "MXContext" -} - - -#' Create a mxnet CPU context. -#' -#' @param dev.id optional, default=0 -#' The device ID, this is meaningless for CPU, included for interface compatiblity. -#' @return The CPU context. -#' @name mx.cpu -#' -#' @export -NULL - -#' Create a mxnet GPU context. -#' -#' @param dev.id optional, default=0 -#' The GPU device ID, starts from 0. -#' @return The GPU context. -#' @name mx.gpu -#' -#' @export -NULL diff --git a/R-package/R/executor.R b/R-package/R/executor.R deleted file mode 100644 index 552a7e5c9718..000000000000 --- a/R-package/R/executor.R +++ /dev/null @@ -1,89 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -#' Simple bind the symbol to executor, -#' with information from input shapes. -#' -#' @export -mx.simple.bind <- function(symbol, ctx, grad.req = "null", fixed.param = NULL, ...) { - if (!is.MXSymbol(symbol)) stop("symbol need to be MXSymbol") - slist <- symbol$infer.shape(list(...)) - - if (is.null(slist)) { - stop("Need more shape information to decide the shapes of arguments") - } - arg.arrays <- sapply(slist$arg.shapes, function(shape) { - mx.nd.zeros(shape, ctx) - }, simplify = FALSE, USE.NAMES = TRUE) - aux.arrays <- sapply(slist$aux.shapes, function(shape) { - mx.nd.zeros(shape, ctx) - }, simplify = FALSE, USE.NAMES = TRUE) - grad.reqs <- lapply(names(slist$arg.shapes), function(nm) { - if (nm %in% fixed.param) { - "null" - } else if (!endsWith(nm, "label") && !endsWith(nm, "data")) { - grad.req - } else { - "null" - } - }) - mx.symbol.bind(symbol, ctx, - arg.arrays=arg.arrays, - aux.arrays=aux.arrays, - grad.reqs = grad.reqs) -} - -#' Update the executors with new arrays -#' This function will MUTATE the state of exec -#' -#' @export -mx.exec.update.arg.arrays <- function(exec, arg.arrays, match.name=FALSE, skip.null=FALSE) { - exec$update.arg.arrays(arg.arrays, match.name, skip.null) -} - -#' Update the executors with new arrays -#' This function will MUTATE the state of exec -#' -#' @export -mx.exec.update.aux.arrays <- function(exec, arg.arrays, match.name=FALSE, skip.null=FALSE) { - exec$update.aux.arrays(arg.arrays, match.name, skip.null) -} - -#' Update the executors with new arrays -#' This function will MUTATE the state of exec -#' -#' @export -mx.exec.update.grad.arrays <- function(exec, arg.arrays, match.name=FALSE, skip.null=FALSE) { - exec$update.grad.arrays(arg.arrays, match.name, skip.null) -} - - -#' Peform an forward on the executors -#' This function will MUTATE the state of exec -#' -#' @export -mx.exec.forward <- function(exec, is.train=TRUE) { - exec$forward(is.train, list()) -} - -#' Peform an backward on the executors -#' This function will MUTATE the state of exec -#' -#' @export -mx.exec.backward <- function(exec, ...) { - exec$backward(list(...)) -} diff --git a/R-package/R/initializer.R b/R-package/R/initializer.R deleted file mode 100644 index 6e1ea02cc8df..000000000000 --- a/R-package/R/initializer.R +++ /dev/null @@ -1,118 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -#' Internal default value initialization scheme. -#' -#' @param name the name of the variable. -#' @param shape the shape of the array to be generated. -#' -mx.init.internal.default <- function(name, shape, ctx, allow.unknown = FALSE) { - if (endsWith(name, "bias")) return (mx.nd.zeros(shape)) - if (endsWith(name, "gamma")) return (mx.nd.ones(shape)) - if (endsWith(name, "beta")) return (mx.nd.zeros(shape)) - if (endsWith(name, "moving_mean")) return (mx.nd.zeros(shape)) - if (endsWith(name, "moving_var")) return (mx.nd.ones(shape)) - if (allow.unknown) return(NULL) - stop(paste("Unkown initialization pattern for ", name)) -} - -#' Create a initializer that initialize the weight with uniform [-scale, scale] -#' -#' @param scale The scale of uniform distribution -#' -#' @export -mx.init.uniform <- function(scale) { - function(name, shape, ctx, allow.unknown = FALSE) { - if (!endsWith(name, "weight")) { - return (mx.init.internal.default(name = name, shape = shape, allow.unknown = allow.unknown)) - } - return (mx.nd.random.uniform(low = -scale, high = scale, shape = shape)) - } -} - -#' Create a initializer that initialize the weight with normal(0, sd) -#' -#' @param sd The standard deviation of normal distribution -#' -#' @export -mx.init.normal <- function(sd) { - function(name, shape, ctx, allow.unknown = FALSE) { - if (!endsWith(name, "weight")) { - return (mx.init.internal.default(name = name, shape = shape, allow.unknown = allow.unknown)) - } - return (mx.nd.random.normal(loc = 0, scale = sd, shape = shape)) - } -} - -#' @title Xavier initializer -#' -#' @description Create a initializer which initialize weight with Xavier or -#' similar initialization scheme. -#' -#' @param rnd_type A string of \code{character} indicating the type of -#' distribution from which the weights are initialized. -#' @param factor_type A string of \code{character}. -#' @param magnitude A \code{numeric} number indicating the scale of random -#' number range. -#' @export -mx.init.Xavier <- function(rnd_type = "uniform", factor_type = "avg", - magnitude = 3){ - function(name, shape, ctx, allow.unknown = FALSE){ - if (!endsWith(name, "weight")) { - return (mx.init.internal.default(name = name, shape = shape, allow.unknown = allow.unknown)) - } - - fan_out <- shape[length(shape)] - fan_in <- prod(shape[-length(shape)]) - factor_val <- switch(factor_type, - "avg" = (fan_in + fan_out) / 2, - "in" = fan_in, - "out" = fan_out, - stop("Not supported factor type. See usage of function mx.init.Xavier")) - - scale <- sqrt(magnitude / factor_val) - - if (rnd_type == "uniform"){ - return(mx.nd.random.uniform(low = -scale, high = scale, shape = shape)) - } else if (rnd_type == "gaussian"){ - return(mx.nd.random.normal(loc = 0, scale = scale, shape = shape)) - } else { - stop("Not supported random type. See usage of function mx.init.Xavier") - } - } -} - - -#' Create initialization of argument like arg.array -#' -#' @param initializer The initializer. -#' @param shape.array A named list that represents the shape of the weights -#' @param ctx mx.context The context of the weights -#' @param skip.unknown Whether skip the unknown weight types -#' @export -mx.init.create <- function(initializer, shape.array, ctx = NULL, skip.unknown = TRUE) { - if (length(shape.array) == 0) return(list()) - names <- names(shape.array) - ret <- lapply( - seq_along(names), - function(i) initializer(names[[i]], shape.array[[i]], ctx, allow.unknown = skip.unknown)) - names(ret) <- names - if (skip.unknown) { - ret <- mx.util.filter.null(ret) - } - return(ret) -} diff --git a/R-package/R/io.R b/R-package/R/io.R deleted file mode 100644 index b749a413c5b6..000000000000 --- a/R-package/R/io.R +++ /dev/null @@ -1,75 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -#' Judge if an object is mx.dataiter -#' -#' @return Logical indicator -#' -#' @export -is.mx.dataiter <- function(x) { - inherits(x, "Rcpp_MXNativeDataIter") || - inherits(x, "Rcpp_MXArrayDataIter") -} - -#' Extract a certain field from DataIter. -#' -#' @export -mx.io.extract <- function(iter, field) { - packer <- mx.nd.arraypacker() - iter$reset() - while (iter$iter.next()) { - dlist <- iter$value() - padded <- iter$num.pad() - data <- dlist[[field]] - oshape <- dim(data) - ndim <- length(oshape) - packer$push(mx.nd.slice(data, 0, oshape[[ndim]] - padded)) - } - iter$reset() - return(packer$get()) -} - -# -#' Create MXDataIter compatible iterator from R's array -#' -#' @param data The data array. -#' @param label The label array. -#' @param batch.size The batch size used to pack the array. -#' @param shuffle Whether shuffle the data -#' -#' @export -mx.io.arrayiter <- function(data, label, - batch.size=128, - shuffle=FALSE) { - if (shuffle) { - shape <- dim(data) - if (is.null(shape)) { - num.data <- length(data) - } else { - ndim <- length(shape) - num.data <- shape[[ndim]] - } - unif.rnds <- as.array(mx.runif(c(num.data), ctx=mx.cpu())); - } else { - unif.rnds <- as.array(0) - } - mx.io.internal.arrayiter(as.array(data), - as.array(label), - unif.rnds, - batch.size, - shuffle) -} diff --git a/R-package/R/kvstore.R b/R-package/R/kvstore.R deleted file mode 100644 index e785852b92a0..000000000000 --- a/R-package/R/kvstore.R +++ /dev/null @@ -1,29 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -is.MXKVStore <- function(x) { - inherits(x, "Rcpp_MXKVStore") -} - -#' Create a mxnet KVStore. -#' -#' @param type string(default="local") The type of kvstore. -#' @return The kvstore. -#' -#' @name mx.kv.create -#' @export -NULL diff --git a/R-package/R/lr_scheduler.R b/R-package/R/lr_scheduler.R deleted file mode 100644 index e5086e146447..000000000000 --- a/R-package/R/lr_scheduler.R +++ /dev/null @@ -1,92 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -#' Learning rate scheduler. Reduction based on a factor value. -#' -#' @param step (integer) -#' Schedule learning rate after n updates -#' @param factor (double) -#' The factor for reducing the learning rate -#' @return scheduler function -#' -#' @export -mx.lr_scheduler.FactorScheduler <- function(step, factor_val, stop_factor_lr=1e-8, verbose=TRUE) { - if(step < 1) stop("Schedule step must be greater or equal than 1 round") - if(factor_val > 1) stop("Factor must be no more than 1 to make lr reduce") - function(optimizerEnv){ - num_update <- optimizerEnv$num_update - count <- optimizerEnv$count - lr <- optimizerEnv$lr - if (num_update > count + step){ - count <- count + step - lr <- lr * factor_val - if(lr < stop_factor_lr){ - lr <- stop_factor_lr - if(verbose) message("Update[", num_update, "]: now learning rate arrived at ", - lr, "will not change in the future") - } else{ - if(verbose) message("Update[", num_update, "]: learning rate is changed to ", lr) - } - optimizerEnv$lr <- lr - optimizerEnv$count <- count - - } - } -} - -#' Multifactor learning rate scheduler. Reduction based on a factor value at different steps. -#' -#' @param step (array of integer) -#' Schedule learning rate after n updates -#' @param factor (double) -#' The factor for reducing the learning rate -#' @return scheduler function -#' -#' @export -mx.lr_scheduler.MultiFactorScheduler <- function(step, factor_val, stop_factor_lr=1e-8, verbose=TRUE) { - if(!all(step == cummax(step))) stop("Schedule step must be an increasing integer list") - if(any(step < 1)) stop("Schedule step must be greater or equal than 1 round") - if(factor_val > 1) stop("Factor must be no more than 1 to make lr reduce") - function(optimizerEnv){ - if(is.null(optimizerEnv$cur_step_ind)){ - cur_step_ind <- 1 - } else{ - cur_step_ind <- optimizerEnv$cur_step_ind - } - num_update <- optimizerEnv$num_update - lr <- optimizerEnv$lr - count <- optimizerEnv$count - if(cur_step_ind < length(step)){ - if(num_update > step[cur_step_ind]){ - count <- step[cur_step_ind] - cur_step_ind <- cur_step_ind + 1 - lr <- lr * factor_val - if(lr < stop_factor_lr){ - lr <- stop_factor_lr - if(verbose) message("Update[", num_update, "]: now learning rate arrived at ", - lr, "will not change in the future") - } else{ - if(verbose) message("Update[", num_update, "]: learning rate is changed to ", lr) - - } - optimizerEnv$lr <- lr - optimizerEnv$count <- count - optimizerEnv$cur_step_ind <- cur_step_ind - } - } - } -} diff --git a/R-package/R/metric.R b/R-package/R/metric.R deleted file mode 100644 index 8715ccfb3a1a..000000000000 --- a/R-package/R/metric.R +++ /dev/null @@ -1,134 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -#' Helper function to create a customized metric -#' -#' @export -mx.metric.custom <- function(name, feval) { - init <- function() { - c(0, 0) - } - update <- function(label, pred, state) { - m <- feval(label, pred) - state <- c(state[[1]] + 1, state[[2]] + m) - return(state) - } - get <- function(state) { - list(name=name, value=(state[[2]]/state[[1]])) - } - ret <- (list(init=init, update=update, get=get)) - class(ret) <- "mx.metric" - return(ret) -} - -#' Accuracy metric for classification -#' -#' @export -mx.metric.accuracy <- mx.metric.custom("accuracy", function(label, pred) { - pred <- mx.nd.argmax(data = pred, axis = 1, keepdims = F) - res <- mx.nd.mean(label == pred) - return(as.array(res)) -}) - -#' Top-k accuracy metric for classification -#' -#' @export -mx.metric.top_k_accuracy <- mx.metric.custom("top_k_accuracy", function(label, pred, top_k = 5) { - label <- mx.nd.reshape(data = label, shape = c(1,0)) - pred <- mx.nd.topk(data = pred, axis = 1, k = top_k, ret_typ = "indices") - pred <- mx.nd.broadcast.equal(lhs = pred, rhs = label) - res <- mx.nd.mean(mx.nd.sum(data = pred, axis = 1, keepdims = F)) - return(as.array(res)) -}) - -#' MSE (Mean Squared Error) metric for regression -#' -#' @export -mx.metric.mse <- mx.metric.custom("mse", function(label, pred) { - label <- mx.nd.reshape(label, shape = -1) - pred <- mx.nd.reshape(pred, shape = -1) - res <- mx.nd.mean(mx.nd.square(label-pred)) - return(as.array(res)) -}) - -#' RMSE (Root Mean Squared Error) metric for regression -#' -#' @export -mx.metric.rmse <- mx.metric.custom("rmse", function(label, pred) { - label <- mx.nd.reshape(label, shape = -1) - pred <- mx.nd.reshape(pred, shape = -1) - res <- mx.nd.sqrt(mx.nd.mean(mx.nd.square(label-pred))) - return(as.array(res)) -}) - -#' MAE (Mean Absolute Error) metric for regression -#' -#' @export -mx.metric.mae <- mx.metric.custom("mae", function(label, pred) { - label <- mx.nd.reshape(label, shape = -1) - pred <- mx.nd.reshape(pred, shape = -1) - res <- mx.nd.mean(mx.nd.abs(label-pred)) - return(as.array(res)) -}) - -#' RMSLE (Root Mean Squared Logarithmic Error) metric for regression -#' -#' @export -mx.metric.rmsle <- mx.metric.custom("rmsle", function(label, pred) { - label <- mx.nd.reshape(label, shape = -1) - pred <- mx.nd.reshape(pred, shape = -1) - res <- mx.nd.sqrt(mx.nd.mean(mx.nd.square(mx.nd.log1p(pred) - mx.nd.log1p(label)))) - return(as.array(res)) -}) - -#' Perplexity metric for language model -#' -#' @export -mx.metric.Perplexity <- mx.metric.custom("Perplexity", function(label, pred, mask_element = -1) { - - label <- mx.nd.reshape(label, shape = -1) - pred_probs <- mx.nd.pick(data = pred, index = label, axis = 1) - - mask <- label != mask_element - mask_length <- mx.nd.sum(mask) - - NLL <- -mx.nd.sum(mx.nd.log(pred_probs) * mask) / mask_length - res <- mx.nd.exp(NLL) - return(as.array(res)) -}) - -#' LogLoss metric for logistic regression -#' -#' @export -mx.metric.logloss <- mx.metric.custom("logloss", function(label, pred) { - label <- mx.nd.reshape(label, shape = -1) - pred <- mx.nd.reshape(pred, shape = -1) - pred <- mx.nd.clip(pred, a_min = 1e-15, a_max = 1-1e-15) - res <- -mx.nd.mean(label * mx.nd.log(pred) + (1-label) * mx.nd.log(1-pred)) - return(as.array(res)) -}) - -#' Accuracy metric for logistic regression -#' -#' @export -mx.metric.logistic_acc <- mx.metric.custom("accuracy", function(label, pred) { - label <- mx.nd.reshape(label, shape = -1) - pred <- mx.nd.reshape(pred, shape = -1) > 0.5 - res <- mx.nd.mean(label == pred) - return(as.array(res)) -}) - diff --git a/R-package/R/model.R b/R-package/R/model.R deleted file mode 100644 index 51d1705ba5f0..000000000000 --- a/R-package/R/model.R +++ /dev/null @@ -1,716 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -# slice the shape on the highest dimension -mx.model.slice.shape <- function(shape, nsplit) { - if (is.numeric(shape)) { - ndim <- length(shape) - batchsize <- shape[[ndim]] - step <- as.integer((batchsize + nsplit - 1) / nsplit) - lapply(seq_len(nsplit) - 1, function(k) { - begin = min(k * step, batchsize) - end = min((k + 1) * step, batchsize) - s <- shape - s[[ndim]] = end - begin - return(list(begin=begin, end=end, shape=s)) - }) - } else if (is.list(shape)) { - shape.names = names(shape) - ndim <- length(shape[[1]]) - batchsize <- shape[[1]][[ndim]] - step <- as.integer((batchsize + nsplit - 1) / nsplit) - lapply(seq_len(nsplit) - 1, function(k) { - begin = min(k * step, batchsize) - end = min((k + 1) * step, batchsize) - s <- lapply(shape, function(s) { - s[[ndim]] = end - begin - return(s) - }) - return(list(begin=begin, end=end, shape=s)) - }) - } -} - -# get the argument name of data and label -mx.model.check.arguments <- function(symbol) { - data <- NULL - label <- NULL - for (nm in arguments(symbol)) { - if (endsWith(nm, "data")) { - if (!is.null(data)) { - stop("Multiple fields contains suffix data") - } else { - data <- nm - } - } - if (endsWith(nm, "label")) { - if (!is.null(label)) { - stop("Multiple fields contains suffix label") - } else { - label <- nm - } - } - } - return(c(data, label)) -} - - -# Extract model from executors -mx.model.extract.model <- function(symbol, train.execs) { - reduce.sum <- function(x) Reduce("+", x) - # Get the parameters - ndevice <- length(train.execs) - narg <- length(train.execs[[1]]$ref.arg.arrays) - arg.params <- lapply(seq_len(narg), function(k) { - if (is.null(train.execs[[1]]$ref.grad.arrays[[k]])) { - result <- NULL - } else { - result <- reduce.sum(lapply(train.execs, function(texec) { - mx.nd.copyto(texec$ref.arg.arrays[[k]], mx.cpu()) - })) / ndevice - } - return(result) - }) - names(arg.params) <- names(train.execs[[1]]$ref.arg.arrays) - arg.params <- mx.util.filter.null(arg.params) - # Get the auxiliary - naux <- length(train.execs[[1]]$ref.aux.arrays) - if (naux != 0) { - aux.params <- lapply(seq_len(naux), function(k) { - reduce.sum(lapply(train.execs, function(texec) { - mx.nd.copyto(texec$ref.aux.arrays[[k]], mx.cpu()) - })) / ndevice - }) - names(aux.params) <- names(train.execs[[1]]$ref.aux.arrays) - } else { - aux.params <- list() - } - # Get the model - model <- list(symbol=symbol, arg.params=arg.params, aux.params=aux.params) - return(structure(model, class="MXFeedForwardModel")) -} - -# decide what type of kvstore to use -mx.model.create.kvstore <- function(kvstore, arg.params, ndevice, verbose=TRUE) { - if (is.MXKVStore(kvstore)) return (kvstore) - if (!is.character(kvstore)) { - stop("kvstore must be either MXKVStore or a string") - } - if (ndevice == 1) return (NULL) - if (kvstore == "local") { - max.size <- max(lengths(arg.params)) - if (max.size < 1024 * 1024 * 16) { - kvstore <- 'local_update_cpu' - } else { - kvstore <- 'local_allreduce_cpu' - } - if(verbose) message("Auto-select kvstore type = ", kvstore) - } - return(mx.kv.create(kvstore)) -} - -# Internal function to do multiple device training. -mx.model.train <- function(symbol, ctx, input.shape, output.shape, - arg.params, aux.params, - begin.round, end.round, optimizer, - train.data, eval.data, metric, - epoch.end.callback, batch.end.callback, - kvstore, fixed.param, verbose, - metric_cpu) { - ndevice <- length(ctx) - if(verbose) message("Start training with ", ndevice, " devices") - # create the executors - input_slice <- mx.model.slice.shape(input.shape, ndevice) - output_slice <- mx.model.slice.shape(output.shape, ndevice) - - arg_names <- arguments(symbol) - output.names <- names(output.shape) - #label_name <- arg_names[endsWith(arg_names, "label")] - train.execs <- lapply(seq_len(ndevice), function(i) { - arg_lst <- list(symbol = symbol, ctx = ctx[[i]], grad.req = "write") - arg_lst <- append(arg_lst, input_slice[[i]]$shape) - arg_lst <- append(arg_lst, output_slice[[i]]$shape) - arg_lst[["fixed.param"]] = unique(c(fixed.param, names(input.shape), names(output.shape))) - do.call(mx.simple.bind, arg_lst) - }) - # set the parameters into executors - for (texec in train.execs) { - mx.exec.update.arg.arrays(texec, arg.params, match.name=TRUE) - mx.exec.update.aux.arrays(texec, aux.params, match.name=TRUE) - } - # KVStore related stuffs - params.index <- - as.integer(mx.util.filter.null( - lapply(seq_along(train.execs[[1]]$ref.grad.arrays), function(k) { - if (!is.null(train.execs[[1]]$ref.grad.arrays[[k]])) k else NULL - }))) - update.on.kvstore <- FALSE - if (!is.null(kvstore) && kvstore$update.on.kvstore) { - update.on.kvstore <- TRUE - kvstore$set.optimizer(optimizer) - } else { - updaters <- lapply(seq_len(ndevice), function(i) { - mx.opt.get.updater(optimizer, train.execs[[i]]$ref.arg.arrays, ctx = ctx[[i]]) - }) - } - if (!is.null(kvstore)) { - kvstore$init(params.index, train.execs[[1]]$ref.arg.arrays[params.index]) - } - # Get the input names - - for (iteration in begin.round:end.round) { - # reset training data - train.data$reset() - nbatch <- 0 - if (!is.null(metric)) { - train.metric <- metric$init() - } - while (train.data$iter.next()) { - # Get input data slice - dlist <- train.data$value() - slices <- lapply(seq_len(ndevice), function(i) { - s <- input_slice[[i]] - ret <- sapply(names(dlist), function(n) {mx.nd.slice(dlist[[n]], s$begin, s$end)}) - return(ret) - }) - # copy data to executor - for (i in seq_len(ndevice)) { - s <- slices[[i]] - if (endsWith(output.names, "label")) { - names(s)[endsWith(names(s), "label")] = output.names - } - mx.exec.update.arg.arrays(train.execs[[i]], s, match.name=TRUE) - } - - # forward pass - for (texec in train.execs) { - mx.exec.forward(texec, is.train=TRUE) - } - - # copy of preds and labels for metric - if (!is.null(metric)) { - preds <- lapply(train.execs, function(texec) {texec$ref.outputs[[1]]}) - labels <- lapply(train.execs, function(texec) {texec$ref.arg.arrays[[output.names[length(output.names)]]]}) - if (metric_cpu) { - preds <- lapply(seq_along(train.execs), function(i) {mx.nd.copyto(preds[[i]], mx.cpu())}) - labels <- lapply(seq_along(train.execs), function(i) {mx.nd.copyto(labels[[i]], mx.cpu())}) - } - } - - # backward pass - for (texec in train.execs) { - mx.exec.backward(texec) - } - - if (!is.null(kvstore)) { - # push the gradient - kvstore$push(params.index, lapply(train.execs, function(texec) { - texec$ref.grad.arrays[params.index] - }), -params.index) - } - if (update.on.kvstore) { - # pull back weight - kvstore$pull(params.index, lapply(train.execs, function(texec) { - texec$ref.arg.arrays[params.index] - }), -params.index) - } else { - # pull back gradient sums - if (!is.null(kvstore)) { - kvstore$pull(params.index, lapply(train.execs, function(texec) { - texec$ref.grad.arrays[params.index] - }), -params.index) - } - arg.blocks <- lapply(seq_len(ndevice), function(i) { - updaters[[i]](train.execs[[i]]$ref.arg.arrays, train.execs[[i]]$ref.grad.arrays) - }) - for (i in seq_len(ndevice)) { - mx.exec.update.arg.arrays(train.execs[[i]], arg.blocks[[i]], skip.null=TRUE) - } - } - # Update the evaluation metrics - if (!is.null(metric)) { - for (i in seq_len(ndevice)) { - train.metric <- metric$update(label = labels[[i]], - pred = preds[[i]], - state = train.metric) - } - } - nbatch <- nbatch + 1 - if (!is.null(batch.end.callback)) { - batch.end.callback(iteration, nbatch, environment()) - } - } - - if (!is.null(metric)) { - result <- metric$get(train.metric) - if(verbose) message("[", iteration, "] Train-", result$name, "=", result$value) - } - if (!is.null(eval.data)) { - # reset eval data - eval.data$reset() - if (!is.null(metric)) { - eval.metric <- metric$init() - } - while (eval.data$iter.next()) { - dlist <- eval.data$value() - slices <- lapply(seq_len(ndevice), function(i) { - s <- input_slice[[i]] - ret <- sapply(names(dlist), function(n) {mx.nd.slice(dlist[[n]], s$begin, s$end)}) - return(ret) - }) - for (i in seq_len(ndevice)) { - s <- slices[[i]] - if (endsWith(output.names, "label")) { - names(s)[endsWith(names(s), "label")] = output.names - } - mx.exec.update.arg.arrays(train.execs[[i]], s, match.name=TRUE) - } - for (texec in train.execs) { - mx.exec.forward(texec, is.train=FALSE) - } - - # copy of preds and labels for metric and update metric - if (!is.null(metric)) { - preds <- lapply(train.execs, function(texec) {texec$ref.outputs[[1]]}) - labels <- lapply(train.execs, function(texec) {texec$ref.arg.arrays[[output.names[length(output.names)]]]}) - if (metric_cpu) { - preds <- lapply(seq_along(train.execs), function(i) {mx.nd.copyto(preds[[i]], mx.cpu())}) - labels <- lapply(seq_along(train.execs), function(i) {mx.nd.copyto(labels[[i]], mx.cpu())}) - } - for (i in seq_len(ndevice)) { - eval.metric <- metric$update(label = labels[[i]], - pred = preds[[i]], - state = eval.metric) - } - } - } - if (!is.null(metric)) { - result <- metric$get(eval.metric) - if(verbose) message("[", iteration, "] Validation-", result$name, "=", result$value) - } - } else { - eval.metric <- NULL - } - # get the model out - model <- mx.model.extract.model(symbol, train.execs) - - epoch_continue <- TRUE - if (!is.null(epoch.end.callback)) { - epoch_continue <- epoch.end.callback(iteration, 0, environment(), verbose = verbose) - } - - if (!epoch_continue) { - break - } - } - return(model) -} - -#' Parameter initialization -#' @param symbol The symbolic configuration of the neural network. -#' @param input.shape The shape of the input for the neural network. -#' @param output.shape The shape of the output for the neural network. It can be NULL. -#' @param initializer, initializer object. The initialization scheme for parameters. -#' @param ctx mx.context. The devices used to perform initialization. -#' @export -mx.model.init.params <- function(symbol, input.shape, output.shape, initializer, ctx) { - if (!is.MXSymbol(symbol)) stop("symbol needs to be MXSymbol") - - arg_lst <- list(symbol = symbol) - arg_lst <- append(arg_lst, input.shape) - arg_lst <- append(arg_lst, output.shape) - - slist <- do.call(mx.symbol.infer.shape, arg_lst) - if (is.null(slist)) stop("Not enough information to get shapes") - arg.params <- mx.init.create(initializer, slist$arg.shapes, ctx, skip.unknown=TRUE) - aux.params <- mx.init.create(initializer, slist$aux.shapes, ctx, skip.unknown=FALSE) - return(list(arg.params=arg.params, aux.params=aux.params)) -} - -# Initialize the data iter -mx.model.init.iter <- function(X, y, batch.size, is.train) { - if (is.mx.dataiter(X)) return(X) - if (is.null(y)) { - if (is.train) stop("Need to provide parameter y for training with R arrays.") - shape <- dim(X) - ndim <- length(shape) - y <- rep.int(0, times = shape[[ndim]]) - } - batch.size <- min(length(y), batch.size) - return(mx.io.arrayiter(X, y, batch.size=batch.size, shuffle=is.train)) -} - -# select layout by matching shape, report error if nothing matches up. -mx.model.select.layout.train <- function(X, y) { - if (is.null(y)) stop("Need to provide y for training") - y <- as.array(y) - dimX <- dim(X) - dimy <- dim(y) - if (length(dimX) != 2) return("colmajor") - rowmajor <- 0 - colmajor <- 0 - if (dimX[[1]] == dimy[[1]]) rowmajor <- 1 - if (dimX[[length(dimX)]] == dimy[[length(dimy)]]) colmajor <- 1 - if (rowmajor + colmajor != 1) { - stop("Cannot auto select array.layout, please specify this parameter") - } - if (rowmajor == 1) { - warning("Auto detect layout of input matrix, use rowmajor..\n") - return("rowmajor") - } else{ - warning("Auto detect layout input matrix, use colmajor..\n") - return("colmajor") - } -} - -# select layout by matching shape, report error if nothing matches up. -mx.model.select.layout.predict <- function(X, model) { - dimX <- dim(X) - if (length(dimX) != 2) return("colmajor") - rowmajor <- 1 - colmajor <- 1 - # try row major - ret <- mx.symbol.infer.shape(model$symbol, data=c(dimX[[2]], 1)) - if (!is.null(ret)) { - names = names(model$arg.params) - if (any(vapply(seq_along(names), - function(i) any(ret$arg.shapes[[names[i]]] != dim(model$arg.params[[i]])), - logical(1)))) rowmajor <- 0 - } - # try col major - ret <- mx.symbol.infer.shape(model$symbol, data=c(dimX[[1]], 1)) - if (!is.null(ret)) { - if (any(vapply(seq_along(names), - function(i) any(ret$arg.shapes[[names[i]]] != dim(model$arg.params[[i]])), - logical(1)))) colmajor <- 0 - } - if (rowmajor + colmajor != 1) { - stop("Cannot auto select array.layout, please specify this parameter") - } - if (rowmajor == 1) { - warning("Auto detect layout of input matrix, use rowmajor..\n") - return("rowmajor") - } else{ - warning("Auto detect layout input matrix, use colmajor..\n") - return("colmajor") - } -} - - -#' Create a MXNet Feedforward neural net model with the specified training. -#' -#' @param symbol The symbolic configuration of the neural network. -#' @param X mx.io.DataIter or R array/matrix -#' The training data. -#' @param y R array, optional label of the data -#' This is only used when X is R array. -#' @param ctx mx.context or list of mx.context, optional -#' The devices used to perform training. -#' @param begin.round integer (default=1) -#' The initial iteration over the training data to train the model. -#' @param num.round integer (default=10) -#' The number of iterations over training data to train the model. -#' @param optimizer string, default="sgd" -#' The optimization method. -#' @param initializer, initializer object. default=mx.init.uniform(0.01) -#' The initialization scheme for parameters. -#' @param eval.data mx.io.DataIter or list(data=R.array, label=R.array), optional -#' The validation set used for validation evaluation during the progress -#' @param eval.metric function, optional -#' The evaluation function on the results. -#' @param epoch.end.callback function, optional -#' The callback when iteration ends. -#' @param batch.end.callback function, optional -#' The callback when one mini-batch iteration ends. -#' @param array.batch.size integer (default=128) -#' The batch size used for R array training. -#' @param array.layout can be "auto", "colmajor", "rowmajor", (detault=auto) -#' The layout of array. "rowmajor" is only supported for two dimensional array. -#' For matrix, "rowmajor" means dim(X) = c(nexample, nfeatures), -#' "colmajor" means dim(X) = c(nfeatures, nexample) -#' "auto" will auto detect the layout by match the feature size, -#' and will report error when X is a square matrix to ask user to explicitly specify layout. -#' @param kvstore string (default="local") -#' The parameter synchronization scheme in multiple devices. -#' @param verbose logical (default=TRUE) -#' Specifies whether to print information on the iterations during training. -#' @param arg.params list, optional -#' Model parameter, list of name to NDArray of net's weights. -#' @param aux.params list, optional -#' Model parameter, list of name to NDArray of net's auxiliary states. -#' @param input.names optional -#' The names of the input symbols. -#' @param output.names optional -#' The names of the output symbols. -#' @param fixed.param -#' The parameters to be fixed during training. For these parameters, not gradients -#' will be calculated and thus no space will be allocated for the gradient. -#' @param allow.extra.params -#' Whether allow extra parameters that are not needed by symbol. -#' If this is TRUE, no error will be thrown when arg_params or aux_params -#' contain extra parameters that is not needed by the executor. -#' @return model A trained mxnet model. -#' -#' @export - -mx.model.FeedForward.create <- - function(symbol, X, y=NULL, ctx=NULL, begin.round=1, - num.round=10, optimizer="sgd", - initializer=mx.init.uniform(0.01), - eval.data=NULL, eval.metric=NULL, - epoch.end.callback=NULL, batch.end.callback=NULL, - array.batch.size=128, array.layout="auto", - kvstore = "local", verbose = TRUE, - arg.params = NULL, aux.params = NULL, - input.names=NULL, output.names = NULL, - fixed.param = NULL, allow.extra.params = FALSE, - metric_cpu = TRUE, - ...) { - if (is.array(X) || is.matrix(X)) { - if (array.layout == "auto") { - array.layout <- mx.model.select.layout.train(X, y) - } - if (array.layout == "rowmajor") { - X <- t(X) - } - } - X <- mx.model.init.iter(X, y, batch.size=array.batch.size, is.train=TRUE) - if (!X$iter.next()) { - X$reset() - if (!X$iter.next()) stop("Empty input") - } - if (is.null(input.names)) { - input.names <- "data" - } - input.shape <- sapply(input.names, function(n){dim(X$value()[[n]])}, simplify = FALSE) - if (is.null(output.names)) { - arg_names <- arguments(symbol) - output.names <- arg_names[endsWith(arg_names, "label")] - output.shape <- list() - output.shape[[output.names]] <- dim((X$value())$label) - } else { - output.shape <- sapply(output.names, function(n){dim(X$value()[[n]])}, simplify = FALSE) - } - params <- mx.model.init.params(symbol, input.shape, output.shape, initializer, mx.cpu()) - if (!is.null(arg.params)) params$arg.params <- arg.params - if (!is.null(aux.params)) params$aux.params <- aux.params - if (allow.extra.params) { - params$arg.params[!names(params$arg.params) %in% arguments(symbol)] <- NULL - } - if (is.null(ctx)) ctx <- mx.ctx.default() - if (is.mx.context(ctx)) { - ctx <- list(ctx) - } - if (!is.list(ctx)) stop("ctx must be mx.context or list of mx.context") - if (is.character(optimizer)) { - if (is.numeric(input.shape)) { - ndim <- length(input.shape) - batchsize = input.shape[[ndim]] - } else { - ndim <- length(input.shape[[1]]) - batchsize = input.shape[[1]][[ndim]] - } - optimizer <- mx.opt.create(optimizer, rescale.grad=(1/batchsize), ...) - } - if (!is.null(eval.data) && !is.list(eval.data) && !is.mx.dataiter(eval.data)) { - stop("The validation set should be either a mx.io.DataIter or a R list") - } - if (is.list(eval.data)) { - if (is.null(eval.data$data) || is.null(eval.data$label)){ - stop("Please provide the validation set as list(data=R.array, label=R.array)") - } - if (is.array(eval.data$data) || is.matrix(eval.data$data)) { - if (array.layout == "auto") { - array.layout <- mx.model.select.layout.train(eval.data$data, eval.data$label) - } - if (array.layout == "rowmajor") { - eval.data$data <- t(eval.data$data) - } - } - eval.data <- mx.model.init.iter(eval.data$data, eval.data$label, batch.size=array.batch.size, is.train = TRUE) - } - kvstore <- mx.model.create.kvstore(kvstore, params$arg.params, length(ctx), verbose=verbose) - model <- mx.model.train(symbol, ctx, input.shape, output.shape, - params$arg.params, params$aux.params, - begin.round, num.round, optimizer=optimizer, - train.data=X, eval.data=eval.data, - metric=eval.metric, - epoch.end.callback=epoch.end.callback, - batch.end.callback=batch.end.callback, - kvstore=kvstore, - fixed.param = fixed.param, - verbose=verbose, - metric_cpu = metric_cpu) - return (model) - } - -#' Predict the outputs given a model and dataset. -#' -#' @param model The MXNet Model. -#' @param X The dataset to predict. -#' @param ctx mx.cpu() or mx.gpu(). The device used to generate the prediction. -#' @param array.batch.size The batch size used in batching. Only used when X is R's array. -#' @param array.layout can be "auto", "colmajor", "rowmajor", (detault=auto) -#' The layout of array. "rowmajor" is only supported for two dimensional array. -#' For matrix, "rowmajor" means dim(X) = c(nexample, nfeatures), -#' "colmajor" means dim(X) = c(nfeatures, nexample) -#' "auto" will auto detect the layout by match the feature size, -#' and will report error when X is a square matrix to ask user to explicitly specify layout. -#' @param allow.extra.params -#' Whether allow extra parameters that are not needed by symbol. -#' If this is TRUE, no error will be thrown when arg_params or aux_params -#' contain extra parameters that is not needed by the executor. -#' @export -predict.MXFeedForwardModel <- function(model, X, ctx = NULL, array.batch.size = 128, - array.layout = "auto", allow.extra.params = FALSE) { - if (is.serialized(model)) model <- mx.unserialize(model) - if (is.null(ctx)) ctx <- mx.ctx.default() - if (is.array(X) || is.matrix(X)) { - if (array.layout == "auto") { - array.layout <- mx.model.select.layout.predict(X, model) - } - if (array.layout == "rowmajor") { - X <- t(X) - } - } - X <- mx.model.init.iter(X, NULL, batch.size=array.batch.size, is.train=FALSE) - X$reset() - if (!X$iter.next()) stop("Cannot predict on empty iterator") - dlist = X$value() - arg_lst <- list(symbol = model$symbol, ctx = ctx, data = dim(dlist$data), grad.req="null") - - pexec <- do.call(mx.simple.bind, arg_lst) - if (allow.extra.params) { - model$arg.params[!names(model$arg.params) %in% arguments(model$symbol)] <- NULL - } - mx.exec.update.arg.arrays(pexec, model$arg.params, match.name=TRUE) - mx.exec.update.aux.arrays(pexec, model$aux.params, match.name=TRUE) - packer <- mx.nd.arraypacker() - X$reset() - while (X$iter.next()) { - dlist = X$value() - mx.exec.update.arg.arrays(pexec, list(data=dlist$data), match.name=TRUE) - mx.exec.forward(pexec, is.train=FALSE) - out.pred <- mx.nd.copyto(pexec$ref.outputs[[1]], mx.cpu()) - padded <- X$num.pad() - oshape <- dim(out.pred) - ndim <- length(oshape) - packer$push(mx.nd.slice(out.pred, 0, oshape[[ndim]] - padded)) - } - X$reset() - return(packer$get()) -} - -#' Load model checkpoint from file. -#' -#' @param prefix string prefix of the model name -#' @param iteration integer Iteration number of model we would like to load. -#' -#' @export -mx.model.load <- function(prefix, iteration) { - symbol <- mx.symbol.load(path.expand(paste0(prefix, "-symbol.json"))) - save.dict <- mx.nd.load(path.expand(sprintf("%s-%04d.params", prefix, iteration))) - nms <- names(save.dict) - - arg.index <- startsWith(nms, "arg:") - aux.index <- startsWith(nms, "aux:") - - if (any(arg.index)) { - arg.params <- save.dict[arg.index] - names(arg.params) <- substr(nms[arg.index], 5, nchar(nms[arg.index])) - } else { - arg.params <- list() - } - if (any(aux.index)) { - aux.params <- save.dict[aux.index] - names(aux.params) <- substr(nms[aux.index], 5, nchar(nms[aux.index])) - } else { - aux.params <- list() - } - model <- list(symbol=symbol, arg.params=arg.params, aux.params=aux.params) - return(structure(model, class="MXFeedForwardModel")) -} - -#' Save model checkpoint into file. -#' -#' @param model The feedforward model to be saved. -#' @param prefix string prefix of the model name -#' @param iteration integer Iteration number of model we would like to load. -#' -#' @export -mx.model.save <- function(model, prefix, iteration) { - arg.params <- model$arg.params - aux.params <- model$aux.params - names(arg.params) <- as.character(lapply(names(arg.params), function(nm) { - paste0("arg:", nm) - })) - names(aux.params) <- as.character(lapply(names(aux.params), function(nm) { - paste0("aux:", nm) - })) - save.dict <- append(arg.params, aux.params) - mx.symbol.save(model$symbol, path.expand(paste0(prefix, "-symbol.json"))) - mx.nd.save(save.dict, path.expand(sprintf("%s-%04d.params", prefix, iteration))) -} - -#' Check if the model has been serialized into RData-compatiable format. -#' -#' @return Logical indicator -#' -#' @export -is.serialized <- function(model) { - if (!is.null(model[['is.serialized']])) { - return(model[['is.serialized']]) - } else { - return(FALSE) - } -} - -#' Serialize MXNet model into RData-compatiable format. -#' -#' @param model The mxnet model -#' -#' @export -mx.serialize <- function(model) { - if (!is.serialized(model)) { - model_rdata <- list() - model_rdata[['symbol_json']] <- model$symbol$as.json() - model_rdata[['arg.params']] <- lapply(model$arg.params, as.array) - model_rdata[['aux.params']] <- lapply(model$aux.params, as.array) - model_rdata[['is.serialized']] <- TRUE - class(model_rdata) <- "MXFeedForwardModel" - return(model_rdata) - } else { - return(model) - } -} - -#' Unserialize MXNet model from Robject. -#' -#' @param model The mxnet model loaded from RData files. -#' -#' @export -mx.unserialize <- function(model) { - if (!is.serialized(model)) { - return(model) - } else { - symbol <- mx.symbol.load.json(model$symbol_json) - arg.params <- lapply(model$arg.params, mx.nd.array) - aux.params <- lapply(model$aux.params, mx.nd.array) - model <- list(symbol=symbol, arg.params=arg.params, aux.params=aux.params) - return(structure(model, class="MXFeedForwardModel")) - } -} diff --git a/R-package/R/model.rnn.R b/R-package/R/model.rnn.R deleted file mode 100644 index db55bd25f1fe..000000000000 --- a/R-package/R/model.rnn.R +++ /dev/null @@ -1,370 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -# Internal function to do multiple device training on RNN -mx.model.train.buckets <- function(symbol, ctx, train.data, eval.data, - dlist, arg.params, aux.params, - grad.req, arg.update.idx, - begin.round, end.round, optimizer, metric, metric_cpu, - epoch.end.callback, batch.end.callback, kvstore, verbose) { - - ndevice <- length(ctx) - if (verbose) - message("Start training with ", ndevice, " devices") - - input.names <- names(dlist) - arg.params.names <- names(arg.params) - - if (is.list(symbol)) sym_ini <- symbol[[names(train.data$bucketID)]] else sym_ini <- symbol - - slices <- lapply(seq_len(ndevice), function(i) { - sapply(names(dlist), function(n) mx.nd.split(data=dlist[[n]], num_outputs = ndevice, axis = 0, squeeze_axis = FALSE)) - }) - - train.execs <- lapply(seq_len(ndevice), function(i) { - s <- slices[[i]] - mx.symbol.bind(symbol = sym_ini, arg.arrays = c(s, arg.params)[arg.update.idx], - aux.arrays = aux.params, ctx = ctx[[i]], grad.req = grad.req) - }) - - # KVStore related stuffs - params.index <- as.integer( - mx.util.filter.null( - lapply(seq_along(train.execs[[1]]$ref.grad.arrays), function(k) { - if (!is.null(train.execs[[1]]$ref.grad.arrays[[k]])) k else NULL} - ))) - - update.on.kvstore <- FALSE - if (!is.null(kvstore) && kvstore$update.on.kvstore) { - update.on.kvstore <- TRUE - kvstore$set.optimizer(optimizer) - } else { - updaters <- lapply(seq_len(ndevice), function(i) { - mx.opt.get.updater(optimizer, train.execs[[i]]$ref.arg.arrays, ctx = ctx[[i]]) - }) - } - - if (!is.null(kvstore)) { - kvstore$init(params.index, train.execs[[1]]$ref.arg.arrays[params.index]) - } - - # train over specified number of epochs - for (iteration in begin.round:end.round) { - nbatch <- 0 - gc() - if (!is.null(metric)) { - train.metric <- metric$init() - } - train.data$reset() - while (train.data$iter.next()) { - - # Get iterator data - dlist <- train.data$value()[input.names] - - # Slice inputs for multi-devices - slices <- lapply(seq_len(ndevice), function(i) { - sapply(names(dlist), function(n) mx.nd.split(data=dlist[[n]], num_outputs = ndevice, axis = 0, squeeze_axis = F)) - }) - - # Assign input to each executor - bug on inference if using BatchNorm - if (is.list(symbol)) { - train.execs <- lapply(seq_len(ndevice), function(i) { - s <- slices[[i]] - mx.symbol.bind(symbol = symbol[[names(train.data$bucketID)]], - arg.arrays = c(s, train.execs[[i]]$arg.arrays[arg.params.names])[arg.update.idx], - aux.arrays = train.execs[[i]]$aux.arrays, ctx = ctx[[i]], grad.req = grad.req) - }) - } else { - for (i in seq_len(ndevice)) { - s <- slices[[i]] - mx.exec.update.arg.arrays(train.execs[[i]], s, match.name=TRUE) - } - } - - # forward pass - for (texec in train.execs) { - mx.exec.forward(texec, is.train = TRUE) - } - - # copy of preds and labels for metric - if (!is.null(metric)) { - preds <- lapply(train.execs, function(texec) {texec$ref.outputs[[1]]}) - labels <- lapply(train.execs, function(texec) {texec$ref.arg.arrays[[input.names[length(input.names)]]]}) - if (metric_cpu) { - preds <- lapply(seq_along(train.execs), function(i) {mx.nd.copyto(preds[[i]], mx.cpu())}) - labels <- lapply(seq_along(train.execs), function(i) {mx.nd.copyto(labels[[i]], mx.cpu())}) - } - } - - # backward pass - for (texec in train.execs) { - mx.exec.backward(texec) - } - - if (!is.null(kvstore)) { - # push the gradient - kvstore$push(params.index, lapply(train.execs, function(texec) { - texec$ref.grad.arrays[params.index] - }), -params.index) - } - if (update.on.kvstore) { - # pull back weight - kvstore$pull(params.index, lapply(train.execs, function(texec) { - texec$ref.arg.arrays[params.index] - }), -params.index) - } else { - # pull back gradient sums - if (!is.null(kvstore)) { - kvstore$pull(params.index, lapply(train.execs, function(texec) { - texec$ref.grad.arrays[params.index] - }), -params.index) - } - arg.blocks <- lapply(seq_len(ndevice), function(i) { - updaters[[i]](train.execs[[i]]$ref.arg.arrays, train.execs[[i]]$ref.grad.arrays) - }) - for (i in seq_len(ndevice)) { - mx.exec.update.arg.arrays(train.execs[[i]], arg.blocks[[i]], skip.null = TRUE) - } - } - - # Update the evaluation metrics - if (!is.null(metric)) { - for (i in seq_len(ndevice)) { - train.metric <- metric$update(label = labels[[i]], - pred = preds[[i]], - state = train.metric) - } - } - - nbatch <- nbatch + 1 - - if (!is.null(batch.end.callback)) { - batch.end.callback(iteration, nbatch, environment()) - } - } - - if (!is.null(metric)) { - result <- metric$get(train.metric) - if (verbose) - message("[", iteration, "] Train-", result$name, "=", result$value) - } - - if (!is.null(eval.data)) { - if (!is.null(metric)) { - eval.metric <- metric$init() - } - eval.data$reset() - while (eval.data$iter.next()) { - - # Get iterator data - dlist <- eval.data$value()[input.names] - - # Slice input to multiple devices - slices <- lapply(seq_len(ndevice), function(i) { - sapply(names(dlist), function(n) mx.nd.split(data=dlist[[n]], num_outputs = ndevice, axis = 0, squeeze_axis = FALSE)) - }) - - # Assign input to each executor - bug on inference if using BatchNorm - if (is.list(symbol)) { - train.execs <- lapply(seq_len(ndevice), function(i) { - s <- slices[[i]] - mx.symbol.bind(symbol = symbol[[names(eval.data$bucketID)]], - arg.arrays = c(s, train.execs[[i]]$arg.arrays[arg.params.names])[arg.update.idx], - aux.arrays = train.execs[[i]]$aux.arrays, ctx = ctx[[i]], grad.req = grad.req) - }) - } else { - for (i in seq_len(ndevice)) { - s <- slices[[i]] - mx.exec.update.arg.arrays(train.execs[[i]], s, match.name=TRUE) - } - } - - # forward pass - for (texec in train.execs) { - mx.exec.forward(texec, is.train = FALSE) - } - - # copy of preds and labels for metric and update metric - if (!is.null(metric)) { - preds <- lapply(train.execs, function(texec) {texec$ref.outputs[[1]]}) - labels <- lapply(train.execs, function(texec) {texec$ref.arg.arrays[[input.names[length(input.names)]]]}) - if (metric_cpu) { - preds <- lapply(seq_along(train.execs), function(i) {mx.nd.copyto(preds[[i]], mx.cpu())}) - labels <- lapply(seq_along(train.execs), function(i) {mx.nd.copyto(labels[[i]], mx.cpu())}) - } - for (i in seq_len(ndevice)) { - eval.metric <- metric$update(label = labels[[i]], - pred = preds[[i]], - state = eval.metric) - } - } - } - - if (!is.null(metric)) { - result <- metric$get(eval.metric) - if (verbose) { - message("[", iteration, "] Validation-", result$name, "=", - result$value) - } - } - } else { - eval.metric <- NULL - } - # get the model out - model <- mx.model.extract.model(sym_ini, train.execs) - - epoch_continue <- TRUE - if (!is.null(epoch.end.callback)) { - epoch_continue <- epoch.end.callback(iteration, 0, environment(), verbose = verbose) - } - - if (!epoch_continue) { - break - } - } - return(model) -} - - -# -#' Train RNN with bucket support -#' -#' @param symbol Symbol or list of Symbols representing the model -#' @param train.data Training data created by mx.io.bucket.iter -#' @param eval.data Evaluation data created by mx.io.bucket.iter -#' @param num.round int, number of epoch -#' @param initializer -#' @param optimizer -#' @param batch.end.callback -#' @param epoch.end.callback -#' @param begin.round -#' @param metric -#' @param ctx -#' @param kvstore -#' @param verbose -#' -#' @export -mx.model.buckets <- function(symbol, train.data, eval.data = NULL, metric = NULL, - arg.params = NULL, aux.params = NULL, fixed.params = NULL, - num.round = 1, begin.round = 1, - initializer = mx.init.uniform(0.01), optimizer = "sgd", ctx = NULL, - batch.end.callback = NULL, epoch.end.callback = NULL, - kvstore = "local", verbose = TRUE, metric_cpu = TRUE) { - - if (!train.data$iter.next()) { - train.data$reset() - if (!train.data$iter.next()) - stop("Empty train.data") - } - - if (!is.null(eval.data)) { - if (!eval.data$iter.next()) { - eval.data$reset() - if (!eval.data$iter.next()) - stop("Empty eval.data") - } - } - - if (is.null(ctx)) - ctx <- mx.ctx.default() - if (is.mx.context(ctx)) { - ctx <- list(ctx) - } - if (!is.list(ctx)) - stop("ctx must be mx.context or list of mx.context") - if (is.character(optimizer)) { - if (is.numeric(input.shape)) { - ndim <- length(input.shape) - batchsize <- input.shape[[ndim]] - } else { - ndim <- length(input.shape[[1]]) - batchsize <- input.shape[[1]][[ndim]] - } - optimizer <- mx.opt.create(optimizer, rescale.grad = (1/batchsize), ...) - } - - sym_ini <- if (is.list(symbol)) symbol[[names(train.data$bucketID)]] else symbol - - arguments <- sym_ini$arguments - input.names <- intersect(names(train.data$value()), arguments) - - input.shape <- sapply(input.names, function(n) { - dim(train.data$value()[[n]]) - }, simplify = FALSE) - - shapes <- sym_ini$infer.shape(input.shape) - - # assign arg.params and aux.params arguments to arg.params.input and aux.params.input - arg.params.input <- arg.params - aux.params.input <- aux.params - - # initialize all arguments with zeros - arg.params <- lapply(shapes$arg.shapes, function(shape) { - mx.nd.zeros(shape = shape, ctx = mx.cpu()) - }) - - # initialize input parameters - dlist <- arg.params[input.names] - - # initialize parameters - only argument ending with _weight and _bias are initialized - arg.params.ini <- mx.init.create(initializer = initializer, shape.array = shapes$arg.shapes, ctx = mx.cpu(), skip.unknown = TRUE) - - # assign initilized parameters to arg.params - arg.params[names(arg.params.ini)] <- arg.params.ini - - # assign input params to arg.params - arg.params[names(arg.params.input)] <- arg.params.input - - # remove input params from arg.params - arg.params[input.names] <- NULL - - # Grad request - grad.req <- rep("null", length(arguments)) - grad.req.write <- arguments %in% setdiff(names(arg.params.ini), fixed.params) - grad.req[grad.req.write] <- "write" - - # Arg array order - update_names <- c(input.names, names(arg.params)) - arg.update.idx <- match(arguments, update_names) - - # aux parameters setup - aux.params <- lapply(shapes$aux.shapes, function(shape) { - mx.nd.zeros(shape = shape, ctx = mx.cpu()) - }) - - aux.params.ini <- mx.init.create(initializer, shapes$aux.shapes, ctx = mx.cpu(), skip.unknown = FALSE) - if (length(aux.params) > 0) { - aux.params[names(aux.params.ini)] <- aux.params.ini - } else aux.params <- NULL - - aux.params[names(aux.params.input)] <- aux.params.input - - # kvstore initialization - kvstore <- mx.model.create.kvstore(kvstore, params$arg.params, length(ctx), - verbose = verbose) - - ### Execute training - model <- mx.model.train.buckets(symbol = symbol, ctx = ctx, train.data = train.data, eval.data = eval.data, - dlist = dlist, arg.params = arg.params, aux.params = aux.params, - grad.req = grad.req, arg.update.idx = arg.update.idx, - optimizer = optimizer, metric = metric, - begin.round = begin.round, end.round = num.round, - batch.end.callback = batch.end.callback, epoch.end.callback = epoch.end.callback, - kvstore = kvstore, verbose = verbose, metric_cpu = metric_cpu) - - return(model) -} diff --git a/R-package/R/mx.io.bucket.iter.R b/R-package/R/mx.io.bucket.iter.R deleted file mode 100644 index 400baf3062a1..000000000000 --- a/R-package/R/mx.io.bucket.iter.R +++ /dev/null @@ -1,122 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -BucketIter <- setRefClass("BucketIter", fields = c("buckets", "bucket.names", "batch.size", - "data.mask.element", "shuffle", "bucket.plan", "bucketID", "epoch", "batch", "batch.per.bucket", - "last.batch.pad", "batch.per.epoch", "seed"), - methods = list( - initialize = function(buckets, - batch.size, data.mask.element = 0, shuffle = FALSE, seed = 123) { - .self$buckets <- buckets - .self$bucket.names <- names(.self$buckets) - .self$batch.size <- batch.size - .self$data.mask.element <- data.mask.element - .self$epoch <- 0 - .self$batch <- 0 - .self$shuffle <- shuffle - .self$batch.per.bucket <- 0 - .self$batch.per.epoch <- 0 - .self$bucket.plan <- NULL - .self$bucketID <- NULL - .self$seed <- seed - .self - }, reset = function() { - buckets_nb <- length(bucket.names) - buckets_id <- seq_len(buckets_nb) - buckets.size <- sapply(.self$buckets, function(x) { - tail(dim(x$data), 1) - }) - .self$batch.per.bucket <- ceiling(buckets.size/.self$batch.size) - .self$last.batch.pad <- .self$batch.size - buckets.size %% .self$batch.size - .self$last.batch.pad[.self$last.batch.pad == .self$batch.size] <- 0 - - .self$batch.per.epoch <- sum(.self$batch.per.bucket) - # Number of batches per epoch given the batch.size - .self$batch.per.epoch <- sum(.self$batch.per.bucket) - .self$epoch <- .self$epoch + 1 - .self$batch <- 0 - - if (.self$shuffle) { - set.seed(.self$seed) - bucket_plan_names <- sample(rep(names(.self$batch.per.bucket), times = .self$batch.per.bucket)) - .self$bucket.plan <- ave(bucket_plan_names == bucket_plan_names, bucket_plan_names, - FUN = cumsum) - names(.self$bucket.plan) <- bucket_plan_names - # Return first BucketID at reset for initialization of the model - .self$bucketID <- .self$bucket.plan[1] - - .self$buckets <- lapply(.self$buckets, function(x) { - shuffle_id <- sample(tail(dim(x$data), 1)) - if (length(dim(x$label)) == 0) { - list(data = x$data[, shuffle_id], label = x$label[shuffle_id]) - } else { - list(data = x$data[, shuffle_id], label = x$label[, shuffle_id]) - } - }) - } else { - bucket_plan_names <- rep(names(.self$batch.per.bucket), times = .self$batch.per.bucket) - .self$bucket.plan <- ave(bucket_plan_names == bucket_plan_names, bucket_plan_names, - FUN = cumsum) - names(.self$bucket.plan) <- bucket_plan_names - } - }, iter.next = function() { - .self$batch <- .self$batch + 1 - .self$bucketID <- .self$bucket.plan[batch] - return(.self$batch <= .self$batch.per.epoch) - }, value = function() { - # bucketID is a named integer: the integer indicates the batch id for the given - # bucket (used to fetch appropriate samples within the bucket) the name is a - # character containing the sequence length of the bucket (used to unroll the rnn - # to appropriate sequence length) - idx <- (.self$bucketID - 1) * (.self$batch.size) + seq_len(batch.size) - - # Reuse first idx for padding - if (bucketID == .self$batch.per.bucket[names(.self$bucketID)] & !.self$last.batch.pad[names(.self$bucketID)] == 0) { - idx <- c(idx[seq_len(.self$batch.size - .self$last.batch.pad[names(.self$bucketID)])], seq_len(.self$last.batch.pad[names(.self$bucketID)])) - } - - data <- .self$buckets[[names(.self$bucketID)]]$data[, idx, drop = F] - seq.mask <- as.integer(names(bucketID)) - apply(data==.self$data.mask.element, 2, sum) - if (length(dim(.self$buckets[[names(.self$bucketID)]]$label)) == 0) { - label <- .self$buckets[[names(.self$bucketID)]]$label[idx] - } else { - label <- .self$buckets[[names(.self$bucketID)]]$label[, idx, drop = F] - } - return(list(data = mx.nd.array(data), seq.mask = mx.nd.array(seq.mask), - label = mx.nd.array(label))) - }, num.pad = function() { - if (bucketID == .self$batch.per.bucket[names(.self$bucketID)] & !.self$last.batch.pad[names(.self$bucketID)] == 0){ - return(.self$last.batch.pad[names(.self$bucketID)]) - } else return(0) - }, finalize = function() { - })) - -# -#' Create Bucket Iter -#' -#' @param buckets The data array. -#' @param batch.size The batch size used to pack the array. -#' @param data.mask.element The element to mask -#' @param shuffle Whether shuffle the data -#' @param seed The random seed -#' -#' @export -mx.io.bucket.iter <- function(buckets, batch.size, data.mask.element = 0, shuffle = FALSE, - seed = 123) { - return(BucketIter$new(buckets = buckets, batch.size = batch.size, data.mask.element = data.mask.element, - shuffle = shuffle, seed = seed)) -} \ No newline at end of file diff --git a/R-package/R/ndarray.R b/R-package/R/ndarray.R deleted file mode 100644 index 18c005fce193..000000000000 --- a/R-package/R/ndarray.R +++ /dev/null @@ -1,217 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -#' Load an mx.nd.array object on disk -#' -#' @param filename the filename (including the path) -#' -#' @examples -#' mat = mx.nd.array(1:3) -#' mx.nd.save(mat, 'temp.mat') -#' mat2 = mx.nd.load('temp.mat') -#' as.array(mat) -#' as.array(mat2) -#' -#' @export -mx.nd.load <- function(filename) { - filename <- path.expand(filename) - return(mx.nd.internal.load(filename)) -} - -#' Save an mx.nd.array object -#' -#' @param ndarray the \code{mx.nd.array} object -#' @param filename the filename (including the path) -#' -#' @examples -#' mat = mx.nd.array(1:3) -#' mx.nd.save(mat, 'temp.mat') -#' mat2 = mx.nd.load('temp.mat') -#' as.array(mat) -#' as.array(mat2[[1]]) -#' -#' @export -mx.nd.save <- function(ndarray, filename) { - filename <- path.expand(filename) - if (!is.list(ndarray)) { - mx.nd.internal.save(list(ndarray), filename) - } else { - mx.nd.internal.save(ndarray, filename) - } -} - -mx.nd.internal.empty <- function(shape, ctx=NULL) { - if (is.null(ctx)) ctx <- mx.ctx.default() - if (!is.mx.context(ctx)) stop("wrong mx.context object, please specify with mx.cpu() or mx.gpu()") - return (mx.nd.internal.empty.array(shape, ctx)) -} - -#' Generate an mx.nd.array object with zeros -#' -#' @param shape the dimension of the \code{mx.nd.array} -#' @param ctx optional The context device of the array. mx.ctx.default() will be used in default. -#' -#' @examples -#' mat = mx.nd.zeros(10) -#' as.array(mat) -#' mat2 = mx.nd.zeros(c(5,5)) -#' as.array(mat) -#' mat3 = mx.nd.zeroes(c(3,3,3)) -#' as.array(mat3) -#' -#' @export -mx.nd.zeros <- function(shape, ctx=NULL) { - ret <- mx.nd.internal.empty(shape, ctx) - return (mx.nd.internal.set.value(0.0, out=ret)) -} - -#' Generate an mx.ndarray object with ones -#' -#' @param shape the dimension of the \code{mx.ndarray} -#' @param ctx optional The context device of the array. mx.ctx.default() will be used in default. -#' -#' @examples -#' mat = mx.nd.ones(10) -#' as.array(mat) -#' mat2 = mx.nd.ones(c(5,5)) -#' as.array(mat) -#' mat3 = mx.nd.ones(c(3,3,3)) -#' as.array(mat3) -#' -#' @export -mx.nd.ones <- function(shape, ctx=NULL) { - ret <- mx.nd.internal.empty(shape, ctx) - return (mx.nd.internal.set.value(1.0, out=ret)) -} - -#' Generate an mx.ndarray object on ctx, with data copied from src -#' -#' @param src The source mx.ndarray object. -#' @param ctx The target context. -#' -#' @export -mx.nd.copyto <- function(src, ctx) { - ret <- mx.nd.internal.empty(dim(src), ctx) - return (mx.nd.internal.copyto(src, out=ret)) -} - -#' Create a new \code{mx.ndarray} that copies the content from src on ctx. -#' -#' @param src.array Source array data of class \code{array}, \code{vector} or \code{matrix}. -#' @param ctx optional The context device of the array. mx.ctx.default() will be used in default. -#' -#' @return An \code{mx.ndarray} -#' -#' @rdname mx.nd.array -#' -#' @return An Rcpp_MXNDArray object -#' -#' @examples -#' mat = mx.nd.array(x) -#' mat = 1 - mat + (2 * mat)/(mat + 0.5) -#' as.array(mat) -#' -#' @export -mx.nd.array <- function(src.array, ctx=NULL) { - if (is.null(ctx)) ctx <- mx.ctx.default() - if (!is.array(src.array)) { - if (!is.vector(src.array) && !is.matrix(src.array)) { - stop("mx.nd.array takes an object of class array, vector or matrix only.") - } else { - src.array <- as.array(src.array) - } - } - return (mx.nd.internal.array(src.array, ctx)) -} - -is.MXNDArray <- function(nd) { - class(nd) == "MXNDArray" -} - -#' Check if src.array is mx.ndarray -#' -#' @return Logical indicator -#' -#' @examples -#' mat = mx.nd.array(1:10) -#' is.mx.ndarray(mat) -#' mat2 = 1:10 -#' is.mx.ndarray(mat2) -#' -#' @export -is.mx.ndarray <- function(src.array) { - is.MXNDArray(src.array) -} - -#' Binary operator overloading of mx.ndarray -#' @param e1 The first operand -#' @param e1 The second operand -#' @export -Ops.MXNDArray <- function(e1, e2) { - if (missing(e2)) { - mx.nd.internal.dispatch.Ops(.Generic, 0, e1) - } else { - mx.nd.internal.dispatch.Ops(.Generic, e1, e2) - } -} - -#' Dimension operator overload of mx.ndarray -#' @param nd The mx.ndarray -#' @export -dim.MXNDArray <- function(nd) { - mx.nd.internal.dim(nd) -} - -#' Length operator overload of mx.ndarray -#' @param nd The mx.ndarray -#' @export -length.MXNDArray <- function(nd) { - mx.nd.internal.length(nd) -} - -#' as.array operator overload of mx.ndarray -#' @param nd The mx.ndarray -#' @export -as.array.MXNDArray <- function(nd) { - mx.nd.internal.as.array(nd) -} - -#' as.matrix operator overload of mx.ndarray -#' @param nd The mx.ndarray -#' @export -as.matrix.MXNDArray <- function(nd) { - if (length(dim(nd)) != 2) { - stop("The input argument is not two dimensional matrix.") - } - as.matrix(as.array(nd)) -} - -#' print operator overload of mx.ndarray -#' @param nd The mx.ndarray -#' @export -print.MXNDArray <- function(nd) { - print(as.array(nd)) -} - -# TODO(KK) use generics? - -#' Get the context of mx.ndarray -#' @param nd The mx.ndarray -#' @export -ctx <-function(nd) { - mx.nd.internal.ctx(nd) -} diff --git a/R-package/R/optimizer.R b/R-package/R/optimizer.R deleted file mode 100644 index be8d977b2a98..000000000000 --- a/R-package/R/optimizer.R +++ /dev/null @@ -1,608 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -#' Create an SGD optimizer with respective parameters. -#' Perform SGD with momentum update -#' -#' @param learning.rate float, default=0.01 -#' The initial learning rate. -#' @param momentum float, default=0 -#' The momentum value -#' @param wd float, default=0.0 -#' L2 regularization coefficient add to all the weights. -#' @param rescale.grad float, default=1.0 -#' rescaling factor of gradient. -#' @param clip_gradient float, optional, default=-1 (no clipping if < 0) -#' clip gradient in range [-clip_gradient, clip_gradient]. -#' @param lr_scheduler function, optional -#' The learning rate scheduler. -mx.opt.sgd <- function(learning.rate = 0.01, - momentum = 0, - wd = 0, - rescale.grad = 1, - clip_gradient = -1, - lr_scheduler = NULL) { - - lr <- learning.rate - count <- 0 - num_update <- 0 - - sgd <- new.env() - sgd$lr <- lr - sgd$count <- 0 - sgd$num_update <- 0 - - create_exec <- function(index, weight_dim, ctx) { - - if (momentum == 0) { - - weight <- mx.symbol.Variable("weight") - grad <- mx.symbol.Variable("grad") - - sym <- mx.symbol.sgd_update(weight, - grad, - lr = lr, - wd = wd, - rescale_grad = rescale.grad, - clip_gradient = clip_gradient, - name = "w") - } else { - - weight <- mx.symbol.Variable("weight") - grad <- mx.symbol.Variable("grad") - mom <- mx.symbol.Variable("mom") - - sym <- mx.symbol.sgd_mom_update(weight, - grad, - mom, - lr = lr, - wd = wd, - momentum= momentum, - rescale_grad = rescale.grad, - clip_gradient = clip_gradient, - name = "w") - } - exec <- mx.simple.bind(symbol = sym, weight = weight_dim, ctx = ctx, grad.req = "null") - return(exec) - } - - update <- function(index, exec_w, weight, grad) { - - if (!is.null(lr_scheduler)){ - lr_scheduler(sgd) ## changing lr - lr <- sgd$lr - ## update count - indexKey <- paste0('ik', index) - if (!exists(envir = sgd, x = indexKey, inherits = FALSE)){ - sgd[[indexKey]] <- 0 - } else { - indexValue <- sgd[[indexKey]] - sgd[[indexKey]] <- indexValue + 1 - sgd$num_update <- max(sgd$num_update, sgd[[indexKey]]) - } - } - - mx.exec.update.arg.arrays(exec_w, arg.arrays = list(weight = weight,grad = grad), match.name = T) - mx.exec.forward(exec_w, is.train = F) - return(exec_w$ref.outputs$w_output) - } - return(list(create_exec = create_exec, update = update)) -} - -#' Create an RMSProp optimizer with respective parameters. -#' Reference: Tieleman T, Hinton G. Lecture 6.5-rmsprop: Divide the gradient by a running average of its recent magnitude[J]. COURSERA: Neural Networks for Machine Learning, 2012, 4(2). -#' The code follows: http://arxiv.org/pdf/1308.0850v5.pdf Eq(38) - Eq(45) by Alex Graves, 2013. -#' -#' @param learning.rate float, default=0.002 -#' The initial learning rate. -#' @param rho float, default=0.95 -#' decay factor of moving average for gradient, gradient^2. -#' @param momentum float, default=0.9 -#' "momentum" factor. -#' @param epsilon float, default=1e-4 -#' @param wd float, default=0.0 -#' L2 regularization coefficient add to all the weights. -#' @param rescale.grad float, default=1.0 -#' rescaling factor of gradient. -#' @param clip_gradient float, optional, default=-1 (no clipping if < 0) -#' clip gradient in range [-clip_gradient, clip_gradient]. -#' @param lr_scheduler function, optional -#' The learning rate scheduler. -#' -mx.opt.rmsprop <- function(learning.rate = 0.002, - centered = TRUE, - rho = 0.95, - momentum = 0.9, - epsilon = 1e-4, - wd = 0, - rescale.grad = 1, - clip_gradient = -1, - lr_scheduler = NULL) { - - lr <- learning.rate - count <- 0 - num_update <- 0 - - rmsprop <- new.env() - rmsprop$lr <- lr - rmsprop$count <- 0 - rmsprop$num_update <- 0 - - create_exec <- function(index, weight_dim, ctx) { - - if (centered) { - - weight <- mx.symbol.Variable("weight") - grad <- mx.symbol.Variable("grad") - n <- mx.symbol.Variable("n") - g <- mx.symbol.Variable("g") - delta <- mx.symbol.Variable("delta") - - sym <- mx.symbol.rmspropalex_update(weight, - grad, - n, - g, - delta, - lr = lr, - rho = rho, - momentum = momentum, - epsilon = epsilon, - wd = wd, - rescale_grad = rescale.grad, - clip_gradient = clip_gradient, - name = "w") - } else { - weight <- mx.symbol.Variable("weight") - grad <- mx.symbol.Variable("grad") - n <- mx.symbol.Variable("n") - - sym <- mx.symbol.rmsprop_update(weight, - grad, - n, - lr = lr, - rho = rho, - epsilon = epsilon, - wd = wd, - rescale_grad = rescale.grad, - clip_gradient = clip_gradient, - name = "w") - } - - exec <- mx.simple.bind(symbol = sym, weight = weight_dim, ctx = ctx, grad.req = "null") - return(exec) - } - - update <- function(index, exec_w, weight, grad) { - if (!is.null(lr_scheduler)){ - lr_scheduler(rmsprop) ## changing lr - lr <- rmsprop$lr - ## update count - indexKey <- paste0('ik', index) - if (!exists(envir = rmsprop, x = indexKey, inherits = FALSE)) { - rmsprop[[indexKey]] <- 0 - } else { - indexValue <- rmsprop[[indexKey]] - rmsprop[[indexKey]] <- indexValue + 1 - rmsprop$num_update <- max(rmsprop$num_update, rmsprop[[indexKey]]) - } - } - - mx.exec.update.arg.arrays(exec_w, arg.arrays = list(weight = weight,grad = grad), match.name = T) - mx.exec.forward(exec_w, is.train = F) - return(exec_w$ref.outputs$w_output) - } - return(list(create_exec = create_exec, update = update)) -} - -#' Create an Adam optimizer with respective parameters. -#' Adam optimizer as described in [King2014]. -#' -#' [King2014] Diederik Kingma, Jimmy Ba, -#' Adam: A Method for Stochastic Optimization, -#' http://arxiv.org/abs/1412.6980 -#' -#' @param learning.rate float, default=1e-3 -#' The initial learning rate. -#' @param beta1 float, default=0.9 -#' Exponential decay rate for the first moment estimates. -#' @param beta2 float, default=0.999 -#' Exponential decay rate for the second moment estimates. -#' @param epsilon float, default=1e-8 -#' @param wd float, default=0.0 -#' L2 regularization coefficient add to all the weights. -#' @param rescale.grad float, default=1.0 -#' rescaling factor of gradient. -#' @param clip_gradient float, optional, default=-1 (no clipping if < 0) -#' clip gradient in range [-clip_gradient, clip_gradient]. -#' @param lr_scheduler function, optional -#' The learning rate scheduler. -#' -mx.opt.adam <- function(learning.rate = 1e-3, - beta1 = 0.9, - beta2 = 0.999, - epsilon = 1e-8, - wd = 0, - rescale.grad = 1, - clip_gradient = -1, - lr_scheduler = NULL) { - - lr <- learning.rate - count <- 0 - num_update <- 0 - - adam <- new.env() - adam$lr <- lr - adam$count <- 0 - adam$num_update <- 0 - - create_exec <- function(index, weight_dim, ctx) { - - weight <- mx.symbol.Variable("weight") - grad <- mx.symbol.Variable("grad") - mean <- mx.symbol.Variable("mean") - var <- mx.symbol.Variable("var") - - sym <- mx.symbol.adam_update(weight, - grad, - mean, - var, - lr = lr, - beta1 = beta1, - beta2 = beta2, - epsilon = epsilon, - wd = wd, - rescale_grad = rescale.grad, - clip_gradient = clip_gradient, - name = "w") - - exec <- mx.simple.bind(symbol = sym, weight = weight_dim, ctx = ctx, grad.req = "null") - return(exec) - } - - update <- function(index, exec_w, weight, grad) { - if (!is.null(lr_scheduler)){ - lr_scheduler(adam) ## changing lr - lr <- adam$lr - ## update count - indexKey <- paste0('ik', index) - if (!exists(envir = adam, x = indexKey, inherits = FALSE)) { - adam[[indexKey]] <- 0 - } else { - indexValue <- adam[[indexKey]] - adam[[indexKey]] <- indexValue + 1 - adam$num_update <- max(adam$num_update, adam[[indexKey]]) - } - } - - mx.exec.update.arg.arrays(exec_w, arg.arrays = list(weight = weight,grad = grad), match.name = T) - mx.exec.forward(exec_w, is.train = F) - return(exec_w$ref.outputs$w_output) - } - return(list(create_exec = create_exec, update = update)) -} - - - -#' Create an AdaGrad optimizer with respective parameters. -#' AdaGrad optimizer of Duchi et al., 2011, -#' -#' This code follows the version in http://arxiv.org/pdf/1212.5701v1.pdf Eq(5) -#' by Matthew D. Zeiler, 2012. AdaGrad will help the network to converge faster -#' in some cases. -#' -#' @param learning.rate float, default=0.05 -#' Step size. -#' @param epsilon float, default=1e-8 -#' @param wd float, default=0.0 -#' L2 regularization coefficient add to all the weights. -#' @param rescale.grad float, default=1.0 -#' rescaling factor of gradient. -#' @param clip_gradient float, default=-1.0 (no clipping if < 0) -#' clip gradient in range [-clip_gradient, clip_gradient]. -#' @param lr_scheduler function, optional -#' The learning rate scheduler. -#' -mx.opt.adagrad <- function(learning.rate = 0.05, - epsilon = 1e-8, - wd = 0, - rescale.grad = 1, - clip_gradient = -1, - lr_scheduler = NULL) { - # use lr as short for learing rate. - lr <- learning.rate - count <- 0 - num_update <- 0 - - adagrad <- new.env() - adagrad$lr <- lr - adagrad$count <- 0 - adagrad$num_update <- 0 - - create_exec <- function(index, weight_dim, ctx) { - - weight <- mx.symbol.Variable("weight") - grad <- mx.symbol.Variable("grad") - history <- mx.symbol.Variable("history") - - grad <- grad * rescale.grad - if (!is.null(clip_gradient)) { - if (clip_gradient >= 0) { - grad <- mx.symbol.clip(data = grad, a.min = -clip_gradient, a.max = clip_gradient) - } - } - - history <- history + (grad * grad) - weight <- weight - lr * (grad / mx.symbol.sqrt(history + epsilon) + wd * weight) - - w <- mx.symbol.identity(weight, name = "w") - h <- mx.symbol.identity(history, name = "h") - sym <- mx.symbol.Group(c(w, h)) - - exec <- mx.simple.bind(symbol = sym, weight = weight_dim, ctx = ctx, grad.req = "null") - return(exec) - } - - update <- function(index, exec_w, weight, grad) { - if (!is.null(lr_scheduler)) { - lr_scheduler(adagrad) ## changing lr - lr <- adagrad$lr - ## update count - indexKey <- paste0('ik', index) - if (!exists(envir = adagrad, x = indexKey, inherits = FALSE)) { - adagrad[[indexKey]] <- 0 - } else { - indexValue <- adagrad[[indexKey]] - adagrad[[indexKey]] <- indexValue + 1 - adagrad$num_update <- max(adagrad$num_update, adagrad[[indexKey]]) - } - } - - mx.exec.update.arg.arrays(exec_w, arg.arrays = list(weight = weight,grad = grad), match.name = T) - mx.exec.forward(exec_w, is.train = F) - - # update state - mx.exec.update.arg.arrays(exec_w, arg.arrays = list(history = exec_w$ref.outputs$h_output), match.name = T) - - return(exec_w$ref.outputs$w_output) - } - return(list(create_exec = create_exec, update = update)) -} - - -#' Create an AdaDelta optimizer with respective parameters. -#' -#' AdaDelta optimizer as described in Zeiler, M. D. (2012). -#' *ADADELTA: An adaptive learning rate method.* -#' http://arxiv.org/abs/1212.5701 -#' -#' @param rho float, default=0.90 -#' Decay rate for both squared gradients and delta x. -#' @param epsilon float, default=1e-5 -#' The constant as described in the thesis. -#' @param wd float, default=0.0 -#' L2 regularization coefficient add to all the weights. -#' @param rescale.grad float, default=1 -#' rescaling factor of gradient. -#' @param clip_gradient float, default=-1 (no clipping if < 0) -#' clip gradient in range [-clip_gradient, clip_gradient]. -#' -mx.opt.adadelta <- function(rho = 0.90, - epsilon = 1e-5, - wd = 0, - rescale.grad = 1, - clip_gradient = -1) { - adadelta <- new.env() - - create_exec <- function(index, weight_dim, ctx) { - weight <- mx.symbol.Variable("weight") - grad <- mx.symbol.Variable("grad") - acc.g <- mx.symbol.Variable("acc.g") - acc.delta <- mx.symbol.Variable("acc.delta") - - grad <- grad * rescale.grad - if (!is.null(clip_gradient)) { - if (clip_gradient >= 0) { - grad <- mx.symbol.clip(data = grad, a.min = -clip_gradient, a.max = clip_gradient) - } - } - - # update state (acc.g, acc.delta) - acc.g <- rho * acc.g + (1 - rho) * (grad * grad) - current.delta <- mx.symbol.sqrt(acc.delta + epsilon) / mx.symbol.sqrt(acc.g + epsilon) * grad - acc.delta <- rho * acc.delta + (1 - rho) * (current.delta * current.delta) - weight <- weight - current.delta - wd * weight - - w <- mx.symbol.identity(weight, name = "w") - g <- mx.symbol.identity(acc.g, name = "g") - delta <- mx.symbol.identity(acc.delta, name = "delta") - sym <- mx.symbol.Group(c(w, g, delta)) - - exec <- mx.simple.bind(symbol = sym, weight = weight_dim, ctx = ctx, grad.req = "null") - return(exec) - } - - update <- function(index, exec_w, weight, grad) { - - mx.exec.update.arg.arrays(exec_w, arg.arrays = list(weight = weight,grad = grad), match.name = T) - mx.exec.forward(exec_w, is.train = F) - - # update state - mx.exec.update.arg.arrays(exec_w, - arg.arrays = list( - acc.g = exec_w$ref.outputs$g_output, - acc.delta = exec_w$ref.outputs$delta_output), - match.name = T) - - return(exec_w$ref.outputs$w_output) - } - return(list(create_exec = create_exec, update = update)) -} - - -#' Create a Nesterov Accelerated SGD( NAG) optimizer. -#' -#' NAG optimizer is described in Aleksandar Botev. et al (2016). -#' *NAG: A Nesterov accelerated SGD.* -#' https://arxiv.org/pdf/1607.01981.pdf -#' -#' @param learning.rate float, default=0.01 -#' The initial learning rate. -#' @param momentum float, default=0 -#' The momentum value -#' @param wd float, default=0.0 -#' L2 regularization coefficient added to all the weights. -#' @param rescale.grad float, default=1.0 -#' rescaling factor of gradient. -#' @param clip_gradient float, optional, default=-1 (no clipping if < 0) -#' clip gradient in range [-clip_gradient, clip_gradient]. -#' @param lr_scheduler function, optional -#' The learning rate scheduler. -#' -mx.opt.nag <- function(learning.rate = 0.01, - momentum = 0, - wd = 0, - rescale.grad = 1, - clip_gradient = -1, - lr_scheduler = NULL) { - - lr <- learning.rate - count <- 0 - num_update <- 0 - - nag <- new.env() - nag$lr <- learning.rate - nag$count <- 0 - nag$num_update <- 0 - - create_exec <- function(index, weight_dim, ctx) { - - weight <- mx.symbol.Variable("weight") - grad <- mx.symbol.Variable("grad") - mom <- mx.symbol.Variable("mom") - grad <- grad * rescale.grad - - if (!is.null(clip_gradient)) { - if (clip_gradient >= 0) { - grad <- mx.symbol.clip(data = grad, a.min = -clip_gradient, a.max = clip_gradient) - } - } - - if (momentum == 0) { - - weight <- weight - lr * (grad + (wd * weight)) - w <- mx.symbol.identity(weight, name = "w") - sym <- mx.symbol.Group(c(w)) - - } else { - - mom <- momentum * mom + grad + wd * weight - grad <- momentum * mom + grad - weight <- weight - lr * grad - - w <- mx.symbol.identity(weight, name = "w") - m <- mx.symbol.identity(mom, name = "m") - sym <- mx.symbol.Group(c(w, m)) - - } - - exec <- mx.simple.bind(symbol = sym, weight = weight_dim, ctx = ctx, grad.req = "null") - return(exec) - } - - update <- function(index, exec_w, weight, grad) { - - if (!is.null(lr_scheduler)){ - lr_scheduler(nag) ## changing lr - lr <- nag$lr - ## update count - indexKey <- paste0('ik', index) - if (!exists(envir = nag, x = indexKey, inherits = FALSE)){ - nag[[indexKey]] <- 0 - } else { - indexValue <- nag[[indexKey]] - nag[[indexKey]] <- indexValue + 1 - nag$num_update <- max(nag$num_update, nag[[indexKey]]) - } - } - - mx.exec.update.arg.arrays(exec_w, - arg.arrays = list(weight = weight,grad = grad), - match.name = T) - mx.exec.forward(exec_w, is.train = F) - - # update state - if (!is.null(exec_w$ref.outputs$m_output)){ - mx.exec.update.arg.arrays(exec_w, - arg.arrays = list(mom = exec_w$ref.outputs$m_output), - match.name = T) - } - - return(exec_w$ref.outputs$w_output) - } - return(list(create_exec = create_exec, update = update)) -} - - -#' Create an optimizer by name and parameters -#' -#' @param name The name of the optimizer -#' @param ... Additional arguments -#' -#' @export -mx.opt.create <- function(name, ...) { - switch(name, - "sgd" = mx.opt.sgd(...), - "rmsprop" = mx.opt.rmsprop(...), - "adam" = mx.opt.adam(...), - "adagrad" = mx.opt.adagrad(...), - "adadelta" = mx.opt.adadelta(...), - "nag" = mx.opt.nag(...), - stop("Unknown optimizer ", name)) -} - -#' Get an updater closure that can take list of weight and gradient -#' and return updated list of weight. -#' -#' @param optimizer The optimizer -#' @param weights The weights to be optimized -#' -#' @export -mx.opt.get.updater <- function(optimizer, weights, ctx) { - - exec_list <- lapply(seq_along(weights), function(i) { - if (is.null(weights[[i]])) { - return(NULL) - } else { - optimizer$create_exec(index = i, weight_dim = dim(weights[[i]]), ctx = ctx) - } - }) - - update <- optimizer$update - - update.closure <- function(weight, grad) { - - weight_list <- lapply(seq_along(weight), function(i) { - if (!is.null(grad[[i]])) { - return(update(i, exec_list[[i]], weight[[i]], grad[[i]])) - } else { - return(NULL) - } - }) - return(weight_list) - } - return(update.closure) -} diff --git a/R-package/R/profiler.R b/R-package/R/profiler.R deleted file mode 100644 index bed7ff81cc87..000000000000 --- a/R-package/R/profiler.R +++ /dev/null @@ -1,47 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -# profiler setting methods -# - -#' @export -MX.PROF.STATE <- list(STOP = 0L, RUN = 1L) - -#' Set up the configuration of profiler. -#' -#' @param flags list of key/value pair tuples. Indicates configuration parameters -#' profile_symbolic : boolean, whether to profile symbolic operators -#' profile_imperative : boolean, whether to profile imperative operators -#' profile_memory : boolean, whether to profile memory usage -#' profile_api : boolean, whether to profile the C API -#' file_name : string, output file for profile data -#' continuous_dump : boolean, whether to periodically dump profiling data to file -#' dump_period : float, seconds between profile data dumps -#' @export -mx.profiler.config <- function(params) { - mx.internal.profiler.config(params) -} - -#' Set up the profiler state to record operator. -#' -#' @param state Indicting whether to run the profiler, can be 'MX.PROF.STATE$RUN' or 'MX.PROF.STATE$STOP'. Default is `MX.PROF.STATE$STOP`. -#' @param filename The name of output trace file. Default is 'profile.json' -#' -#' @export -mx.profiler.state <- function(state = MX.PROF.STATE$STOP) { - mx.internal.profiler.state(state) -} diff --git a/R-package/R/random.R b/R-package/R/random.R deleted file mode 100644 index 173f777bee2d..000000000000 --- a/R-package/R/random.R +++ /dev/null @@ -1,94 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -# TODO(Tong, KK) check style to make it more like R.. - -#' Set the seed used by mxnet device-specific random number generators. -#' -#' @details -#' We have a specific reason why \code{mx.set.seed} is introduced, -#' instead of simply use \code{set.seed}. -#' -#' The reason that is that most of mxnet random number generator -#' can run on different devices, such as GPU. -#' We need to use massively parallel PRNG on GPU to get fast -#' random number generations. It can also be quite costly to seed these PRNGs. -#' So we introduced \code{mx.set.seed} for mxnet specific device random numbers. -#' -#' @param seed the seed value to the device random number generators. -#' -#' @examples -#' -#' mx.set.seed(0) -#' as.array(mx.runif(2)) -#' # 0.5488135 0.5928446 -#' mx.set.seed(0) -#' as.array(mx.rnorm(2)) -#' # 2.212206 1.163079 -#' -#' @export -mx.set.seed <- function(seed) { - mx.internal.set.seed(seed) -} - -#' Generate uniform distribution in [low, high) with specified shape. -#' -#' @param shape Dimension, The shape(dimension) of the result. -#' @param min numeric, The lower bound of distribution. -#' @param max numeric, The upper bound of distribution. -#' @param ctx, optional The context device of the array. mx.ctx.default() will be used in default. -#' -#' @examples -#' -#' mx.set.seed(0) -#' as.array(mx.runif(2)) -#' # 0.5488135 0.5928446 -#' mx.set.seed(0) -#' as.array(mx.rnorm(2)) -#' # 2.212206 1.163079 -#' -#' @export -mx.runif <- function(shape, min=0, max=1, ctx=NULL) { - if (!is.numeric(min)) stop("mx.rnorm only accept numeric min") - if (!is.numeric(max)) stop("mx.rnorm only accept numeric max") - ret <- mx.nd.internal.empty(shape, ctx) - return (mx.nd.internal.random.uniform(min, max, shape=shape, out=ret)) -} - -#' Generate nomal distribution with mean and sd. -#' -#' @param shape Dimension, The shape(dimension) of the result. -#' @param mean numeric, The mean of distribution. -#' @param sd numeric, The standard deviations. -#' @param ctx, optional The context device of the array. mx.ctx.default() will be used in default. -#' -#' @examples -#' -#' mx.set.seed(0) -#' as.array(mx.runif(2)) -#' # 0.5488135 0.5928446 -#' mx.set.seed(0) -#' as.array(mx.rnorm(2)) -#' # 2.212206 1.163079 -#' -#' @export -mx.rnorm <- function(shape, mean=0, sd=1, ctx=NULL) { - if (!is.numeric(mean)) stop("mx.rnorm only accept numeric mean") - if (!is.numeric(sd)) stop("mx.rnorm only accept numeric sd") - ret <- mx.nd.internal.empty(shape, ctx) - return (mx.nd.internal.random.normal(mean, sd, shape=shape, out=ret)) -} diff --git a/R-package/R/rnn.infer.R b/R-package/R/rnn.infer.R deleted file mode 100644 index d9e14d4ccafc..000000000000 --- a/R-package/R/rnn.infer.R +++ /dev/null @@ -1,286 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -#' Inference of RNN model -#' -#' @param infer.data DataIter -#' @param model Model used for inference -#' @param ctx -#' -#' @export -mx.infer.rnn <- function(infer.data, model, ctx = mx.cpu()) { - - ### Initialise the iterator - infer.data$reset() - infer.data$iter.next() - - if (is.null(ctx)) - ctx <- mx.ctx.default() - if (is.mx.context(ctx)) { - ctx <- list(ctx) - } - if (!is.list(ctx)) - stop("ctx must be mx.context or list of mx.context") - - ndevice <- length(ctx) - symbol <- model$symbol - if (is.list(symbol)) sym_ini <- symbol[[names(train.data$bucketID)]] else sym_ini <- symbol - - arguments <- sym_ini$arguments - input.names <- intersect(names(infer.data$value()), arguments) - - input.shape <- sapply(input.names, function(n) { - dim(infer.data$value()[[n]]) - }, simplify = FALSE) - - shapes <- sym_ini$infer.shape(input.shape) - - # initialize all arguments with zeros - arguments.ini <- lapply(shapes$arg.shapes, function(shape) { - mx.nd.zeros(shape = shape, ctx = mx.cpu()) - }) - - arg.params <- model$arg.params - arg.params.names <- names(arg.params) - aux.params <- model$aux.params - - # Initial binding - dlist <- arguments.ini[input.names] - - # Assign fixed parameters to their value and keep non initialized arguments to zero - arg.params.fix.names <- setdiff(arguments, c(arg.params.names, input.names)) - - # Assign zeros to non initialized arg parameters - arg.params.fix <- arguments.ini[arg.params.fix.names] - - # Grad request - grad.req <- rep("null", length(arguments)) - - # Arg array order - update_names <- c(input.names, arg.params.fix.names, arg.params.names) - arg_update_idx <- match(arguments, update_names) - - execs <- mx.symbol.bind(symbol = symbol, arg.arrays = c(dlist, arg.params.fix, arg.params)[arg_update_idx], - aux.arrays = aux.params, ctx = ctx[[1]], grad.req = grad.req) - - # Initial input shapes - need to be adapted for multi-devices - divide highest - # dimension by device nb - - packer <- mx.nd.arraypacker() - infer.data$reset() - while (infer.data$iter.next()) { - - # Get input data slice - dlist <- infer.data$value()[input.names] - - execs <- mx.symbol.bind(symbol = symbol, arg.arrays = c(dlist, execs$arg.arrays[arg.params.fix.names], execs$arg.arrays[arg.params.names])[arg_update_idx], - aux.arrays = execs$aux.arrays, ctx = ctx[[1]], grad.req = grad.req) - - mx.exec.forward(execs, is.train = FALSE) - - out.pred <- mx.nd.copyto(execs$ref.outputs[[1]], mx.cpu()) - padded <- infer.data$num.pad() - oshape <- dim(out.pred) - ndim <- length(oshape) - packer$push(mx.nd.slice.axis(data = out.pred, axis = 0, begin = 0, end = oshape[[ndim]] - padded)) - - } - infer.data$reset() - return(packer$get()) -} - - -#' Inference for one-to-one fusedRNN (CUDA) models -#' -#' @param infer.data Data iterator created by mx.io.bucket.iter -#' @param symbol Symbol used for inference -#' @param arg.params -#' @param aux.params -#' @param input.params -#' @param ctx -#' -#' @export -mx.infer.rnn.one <- function(infer.data, - symbol, - arg.params, - aux.params, - input.params = NULL, - ctx = mx.cpu()) { - - ### Initialise the iterator - infer.data$reset() - infer.data$iter.next() - - if (is.null(ctx)) - ctx <- mx.ctx.default() - if (is.mx.context(ctx)) { - ctx <- list(ctx) - } - if (!is.list(ctx)) - stop("ctx must be mx.context or list of mx.context") - - ndevice <- length(ctx) - - arguments <- symbol$arguments - input.names <- intersect(names(infer.data$value()), arguments) - - input.shape <- sapply(input.names, function(n) { - dim(infer.data$value()[[n]]) - }, simplify = FALSE) - - shapes <- symbol$infer.shape(input.shape) - - # initialize all arguments with zeros - arguments.ini <- lapply(shapes$arg.shapes, function(shape) { - mx.nd.zeros(shape = shape, ctx = mx.cpu()) - }) - - arg.params <- arg.params - arg.params.names <- names(arg.params) - - dlist <- arguments.ini[input.names] - - # Assign fixed parameters to their value and keep non initialized arguments to zero - arg.params.fix.names <- unique(c(names(input.params), setdiff(arguments, c(arg.params.names, input.names)))) - - # Assign zeros to non initialized arg parameters - arg.params.fix <- arguments.ini[arg.params.fix.names] - # Assign weights to arguments specifies by input.params - arg.params.fix[names(input.params)] <- input.params - - aux.params <- aux.params - - # Grad request - grad.req <- rep("null", length(arguments)) - - # Arg array order - update_names <- c(input.names, arg.params.fix.names, arg.params.names) - arg_update_idx <- match(arguments, update_names) - - # Initial binding - execs <- mx.symbol.bind(symbol = symbol, - arg.arrays = c(dlist, arg.params.fix, arg.params)[arg_update_idx], - aux.arrays = aux.params, ctx = ctx[[1]], grad.req = grad.req) - - # Initial input shapes - need to be adapted for multi-devices - divide highest - # dimension by device nb - - infer.data$reset() - while (infer.data$iter.next()) { - - # Get input data slice - dlist <- infer.data$value()[input.names] - - execs <- mx.symbol.bind(symbol = symbol, - arg.arrays = c(dlist, execs$arg.arrays[arg.params.fix.names], execs$arg.arrays[arg.params.names])[arg_update_idx], - aux.arrays = execs$aux.arrays, ctx = ctx[[1]], grad.req = grad.req) - - mx.exec.forward(execs, is.train = FALSE) - - out.pred <- mx.nd.copyto(execs$ref.outputs[[1]], mx.cpu()) - state <- mx.nd.copyto(execs$ref.outputs[[2]], mx.cpu()) - state_cell <- mx.nd.copyto(execs$ref.outputs[[3]], mx.cpu()) - - out <- lapply(execs$ref.outputs, function(out) { - mx.nd.copyto(out, mx.cpu()) - }) - } - infer.data$reset() - return(out) -} - - -#' Inference for one-to-one unroll models -#' -#' @param infer.data NDArray -#' @param symbol Model used for inference -#' @param num_hidden -#' @param arg.params -#' @param aux.params -#' @param init_states -#' @param ctx -#' -#' @export -mx.infer.rnn.one.unroll <- function(infer.data, - symbol, - num_hidden, - arg.params, - aux.params, - init_states = NULL, - ctx = mx.cpu()) { - - if (is.null(ctx)) - ctx <- mx.ctx.default() - if (is.mx.context(ctx)) { - ctx <- list(ctx) - } - - if (!is.list(ctx)) - stop("ctx must be mx.context or list of mx.context") - - ndevice <- length(ctx) - - arguments <- symbol$arguments - input.names <- intersect(c("data", "label"), arguments) - - input.shape <- list("data" = dim(infer.data), "label" = dim(infer.data)) - - # init_state_shapes - init_states_names <- arguments[startsWith(arguments, "init_")] - init_states_shapes <- lapply(init_states_names, function(x) c(num_hidden, tail(input.shape[[1]], 1))) - names(init_states_shapes) <- init_states_names - - shapes <- symbol$infer.shape(c(input.shape, init_states_shapes)) - - # initialize all arguments with zeros - arguments.ini <- lapply(shapes$arg.shapes, function(shape) { - mx.nd.zeros(shape = shape, ctx = mx.cpu()) - }) - - dlist <- list("data" = infer.data, "label" = infer.data) - - if (is.null(init_states)) { - init_states <- arguments.ini[init_states_names] - } else { - names(init_states) <- init_states_names - } - - # remove potential duplicates arguments - if inference on CUDA RNN symbol - arg.params <- arg.params[setdiff(names(arg.params), c(input.names, init_states_names))] - arg.params.names <- names(arg.params) - - # Aux params - aux.params <- aux.params - - # Grad request - grad.req <- rep("null", length(arguments)) - - # Arg array order - update_names <- c(input.names, init_states_names, arg.params.names) - arg_update_idx <- match(arguments, update_names) - - # Bind to exec - execs <- mxnet:::mx.symbol.bind(symbol = symbol, - arg.arrays = c(dlist, init_states, arg.params)[arg_update_idx], - aux.arrays = aux.params, ctx = ctx[[1]], grad.req = grad.req) - - mx.exec.forward(execs, is.train = FALSE) - - out <- lapply(execs$ref.outputs, function(out) mx.nd.copyto(out, mx.cpu())) - - return(out) -} diff --git a/R-package/R/symbol.R b/R-package/R/symbol.R deleted file mode 100644 index cbfd4c07e59d..000000000000 --- a/R-package/R/symbol.R +++ /dev/null @@ -1,264 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -#' Create a symbolic variable with specified name. -#' -#' @param name string -#' The name of the result symbol. -#' @return The result symbol -#' @name mx.symbol.Variable -#' -#' @export -NULL - -#' Create a symbol that groups symbols together. -#' -#' @param kwarg -#' Variable length of symbols or list of symbol. -#' @return The result symbol -#' -#' @export -mx.symbol.Group <- function(...) { - mx.varg.symbol.internal.Group(list(...)) -} - -#' Perform an feature concat on channel dim (dim 1) over all the inputs. -#' -#' @param data list, required -#' List of tensors to concatenate -#' @param num.args int, required -#' Number of inputs to be concated. -#' @param dim int, optional, default='1' -#' the dimension to be concated. -#' @param name string, optional -#' Name of the resulting symbol. -#' @return out The result mx.symbol -#' -#' @export -mx.symbol.concat <- function(data, num.args, dim = NULL, name = NULL) { - data[['num.args']] <- num.args - - if(!is.null(dim)) data[['dim']] <- dim - - if(!is.null(name)) data[['name']] <- name - - mx.varg.symbol.concat(data) -} - -#' Perform an feature concat on channel dim (dim 1) over all the inputs. -#' -#' @param data list, required -#' List of tensors to concatenate -#' @param num.args int, required -#' Number of inputs to be concated. -#' @param dim int, optional, default='1' -#' the dimension to be concated. -#' @param name string, optional -#' Name of the resulting symbol. -#' @return out The result mx.symbol -#' -#' @export -mx.symbol.Concat <- function(data, num.args, dim = NULL, name = NULL) { - warning("mx.symbol.Concat is deprecated. Use mx.symbol.concat instead.") - mx.symbol.concat(data, num.args, dim, name) -} - -#' @export -mx.symbol.min <- function(e1, e2) { - if (is.mx.symbol(e1) && is.mx.symbol(e2)) { - mx.varg.symbol.internal.minimum(list(e1, e2)) - } else if (is.mx.symbol(e1)) { - mx.varg.symbol.internal.minimum_scalar(list(e1, scalar = e2)) - } else if (is.mx.symbol(e2)) { - mx.varg.symbol.internal.minimum_scalar(list(e2, scalar = e1)) - } -} - -#' Save an mx.symbol object -#' -#' @param symbol the \code{mx.symbol} object -#' @param filename the filename (including the path) -#' -#' @examples -#' data = mx.symbol.Variable('data') -#' mx.symbol.save(data, 'temp.symbol') -#' data2 = mx.symbol.load('temp.symbol') -#' -#' @export -mx.symbol.save <-function(symbol, filename) { - filename <- path.expand(filename) - symbol$save(filename) -} - -#' Load an mx.symbol object -#' -#' @param filename the filename (including the path) -#' -#' @examples -#' data = mx.symbol.Variable('data') -#' mx.symbol.save(data, 'temp.symbol') -#' data2 = mx.symbol.load('temp.symbol') -#' -#' @export -mx.symbol.load <-function(filename) { - filename <- path.expand(filename) - mx.symbol.load(filename) -} - -#' Load an mx.symbol object from a json string -#' -#' @param str the json str represent a mx.symbol -#' -#' @export -#' @name mx.symbol.load.json -NULL - - -#' Inference the shape of arguments, outputs, and auxiliary states. -#' -#' @param symbol The \code{mx.symbol} object -#' -#' @export -mx.symbol.infer.shape <- function(symbol, ...) { - symbol$infer.shape(list(...)) -} - -is.MXSymbol <- function(x) { - inherits(x, "Rcpp_MXSymbol") -} - -#' Judge if an object is mx.symbol -#' -#' @return Logical indicator -#' -#' @export -is.mx.symbol <- is.MXSymbol - - -#' Get the arguments of symbol. -#' @param x The input symbol -#' -#' @export -arguments <- function(x) { - if (!is.MXSymbol(x)) - stop("only for MXSymbol type") - x$arguments -} - -#' Apply symbol to the inputs. -#' @param x The symbol to be applied -#' @param kwargs The keyword arguments to the symbol -#' -#' @export -mx.apply <- function(x, ...) { - if (!is.MXSymbol(x)) stop("only for MXSymbol type") - x$apply(list(...)) -} - -#' Get a symbol that contains all the internals -#' @param x The input symbol -#' -#' @export -internals <- function(x) { - if (!is.MXSymbol(x)) stop("only for MXSymbol type") - x$get.internals() -} - -#' Gets a new grouped symbol whose output contains inputs to output nodes of the original symbol. -#' @param x The input symbol -#' -#' @export -children <- function(x) { - if (!is.MXSymbol(x)) stop("only for MXSymbol type") - x$get.children() -} - -#' Get the outputs of a symbol. -#' @param x The input symbol -#' -#' @export -outputs <- function(x) { - if (!is.MXSymbol(x)) stop("only for MXSymbol type") - x$outputs -} - -init.symbol.methods <- function() { - # Think of what is the best naming - setMethod("+", signature(e1 = "Rcpp_MXSymbol", e2 = "Rcpp_MXSymbol"), function(e1, e2) { - mx.varg.symbol.internal.Plus(list(e1, e2)) - }) - setMethod("+", signature(e1 = "Rcpp_MXSymbol", e2 = "numeric"), function(e1, e2) { - mx.varg.symbol.internal.PlusScalar(list(e1, scalar = e2)) - }) - setMethod("+", signature(e1 = "numeric", e2 = "Rcpp_MXSymbol"), function(e1, e2) { - mx.varg.symbol.internal.PlusScalar(list(e2, scalar = e1)) - }) - setMethod("-", signature(e1 = "Rcpp_MXSymbol", e2 = "Rcpp_MXSymbol"), function(e1, e2) { - mx.varg.symbol.internal.Minus(list(e1, e2)) - }) - setMethod("-", signature(e1 = "Rcpp_MXSymbol", e2 = "numeric"), function(e1, e2) { - mx.varg.symbol.internal.MinusScalar(list(e1, scalar = e2)) - }) - setMethod("-", signature(e1 = "numeric", e2 = "Rcpp_MXSymbol"), function(e1, e2) { - mx.varg.symbol.internal.rminus_scalar(list(e2, scalar = e1)) - }) - setMethod("*", signature(e1 = "Rcpp_MXSymbol", e2 = "Rcpp_MXSymbol"), function(e1, e2) { - mx.varg.symbol.internal.Mul(list(e1, e2)) - }) - setMethod("*", signature(e1 = "Rcpp_MXSymbol", e2 = "numeric"), function(e1, e2) { - mx.varg.symbol.internal.MulScalar(list(e1, scalar = e2)) - }) - setMethod("*", signature(e1 = "numeric", e2 = "Rcpp_MXSymbol"), function(e1, e2) { - mx.varg.symbol.internal.MulScalar(list(e2, scalar = e1)) - }) - setMethod("/", signature(e1 = "Rcpp_MXSymbol", e2 = "Rcpp_MXSymbol"), function(e1, e2) { - mx.varg.symbol.internal.Div(list(e1, e2)) - }) - setMethod("/", signature(e1 = "Rcpp_MXSymbol", e2 = "numeric"), function(e1, e2) { - mx.varg.symbol.internal.DivScalar(list(e1, scalar = e2)) - }) - setMethod("/", signature(e1 = "numeric", e2 = "Rcpp_MXSymbol"), function(e1, e2) { - mx.varg.symbol.internal.rdiv_scalar(list(e2, scalar = e1)) - }) - setMethod("%%", signature(e1 = "Rcpp_MXSymbol", e2 = "Rcpp_MXSymbol"), function(e1, e2) { - mx.varg.symbol.internal.Mod(list(e1, e2)) - }) - setMethod("%%", signature(e1 = "Rcpp_MXSymbol", e2 = "numeric"), function(e1, e2) { - mx.varg.symbol.internal.ModScalar(list(e1, scalar = e2)) - }) - setMethod("%%", signature(e1 = "numeric", e2 = "Rcpp_MXSymbol"), function(e1, e2) { - mx.varg.symbol.internal.RModScalar(list(e2, scalar = e1)) - }) - setMethod("%/%", signature(e1 = "Rcpp_MXSymbol", e2 = "Rcpp_MXSymbol"), function(e1, e2) { - mx.varg.symbol.internal.Mod(list(e1, e2)) - }) - setMethod("%/%", signature(e1 = "Rcpp_MXSymbol", e2 = "numeric"), function(e1, e2) { - mx.varg.symbol.internal.ModScalar(list(e1, scalar = e2)) - }) - setMethod("%/%", signature(e1 = "numeric", e2 = "Rcpp_MXSymbol"), function(e1, e2) { - mx.varg.symbol.internal.RModScalar(list(e2, scalar = e1)) - }) - setMethod("^", signature(e1 = "Rcpp_MXSymbol", e2 = "Rcpp_MXSymbol"), function(e1, e2) { - mx.varg.symbol.internal.power(list(e1, e2)) - }) - setMethod("^", signature(e1 = "Rcpp_MXSymbol", e2 = "numeric"), function(e1, e2) { - mx.varg.symbol.internal.power_scalar(list(e1, scalar = e2)) - }) - setMethod("^", signature(e1 = "numeric", e2 = "Rcpp_MXSymbol"), function(e1, e2) { - mx.varg.symbol.internal.rpower_scalar(list(e2, scalar = e1)) - }) -} diff --git a/R-package/R/util.R b/R-package/R/util.R deleted file mode 100644 index dd2ca6183bf2..000000000000 --- a/R-package/R/util.R +++ /dev/null @@ -1,77 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -# filter out null, keep the names -mx.util.filter.null <- function(lst) { - Filter(Negate(is.null), lst) -} - -#' Internal function to generate mxnet_generated.R -#' Users do not need to call this function. -#' @param path The path to the root of the package. -#' -#' @export -mxnet.export <- function(path) { - mx.internal.export(path.expand(path)) -} - -#' Convert images into image recordio format -#' @param image_lst -#' The image lst file -#' @param root -#' The root folder for image files -#' @param output_rec -#' The output rec file -#' @param label_width -#' The label width in the list file. Default is 1. -#' @param pack_label -#' Whether to also pack multi dimenional label in the record file. Default is 0. -#' @param new_size -#' The shorter edge of image will be resized to the newsize. -#' Original images will be packed by default. -#' @param nsplit -#' It is used for part generation, logically split the image.lst to NSPLIT parts by position. -#' Default is 1. -#' @param partid -#' It is used for part generation, pack the images from the specific part in image.lst. -#' Default is 0. -#' @param center_crop -#' Whether to crop the center image to make it square. Default is 0. -#' @param quality -#' JPEG quality for encoding (1-100, default: 95) or PNG compression for encoding (1-9, default: 3). -#' @param color_mode -#' Force color (1), gray image (0) or keep source unchanged (-1). Default is 1. -#' @param unchanged -#' Keep the original image encoding, size and color. If set to 1, it will ignore the others parameters. -#' @param inter_method -#' NN(0), BILINEAR(1), CUBIC(2), AREA(3), LANCZOS4(4), AUTO(9), RAND(10). Default is 1. -#' @param encoding -#' The encoding type for images. It can be '.jpg' or '.png'. Default is '.jpg'. -#' @export -im2rec <- function(image_lst, root, output_rec, label_width = 1L, - pack_label = 0L, new_size = -1L, nsplit = 1L, - partid = 0L, center_crop = 0L, quality = 95L, - color_mode = 1L, unchanged = 0L, inter_method = 1L, - encoding = ".jpg") { - image_lst <- path.expand(image_lst) - root <- path.expand(root) - output_rec <- path.expand(output_rec) - mx.internal.im2rec(image_lst, root, output_rec, label_width, - pack_label, new_size, nsplit, partid, - center_crop, quality, color_mode, unchanged, - inter_method, encoding) -} diff --git a/R-package/R/viz.graph.R b/R-package/R/viz.graph.R deleted file mode 100644 index 488ed24818a1..000000000000 --- a/R-package/R/viz.graph.R +++ /dev/null @@ -1,167 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -#' -#' Convert symbol to Graphviz or visNetwork visualisation. -#' -#' @importFrom magrittr %>% -#' @importFrom stringr str_extract_all -#' @importFrom stringr str_replace_all -#' @importFrom stringr str_replace_na -#' @importFrom stringr str_trim -#' @importFrom jsonlite fromJSON -#' @importFrom DiagrammeR create_graph -#' @importFrom DiagrammeR add_global_graph_attrs -#' @importFrom DiagrammeR create_node_df -#' @importFrom DiagrammeR create_edge_df -#' @importFrom DiagrammeR render_graph -#' @importFrom visNetwork visHierarchicalLayout -#' -#' @param symbol a \code{string} representing the symbol of a model. -#' @param shape a \code{numeric} representing the input dimensions to the symbol. -#' @param direction a \code{string} representing the direction of the graph, either TD or LR. -#' @param type a \code{string} representing the rendering engine of the graph, either graph or vis. -#' @param graph.width.px a \code{numeric} representing the size (width) of the graph. In pixels -#' @param graph.height.px a \code{numeric} representing the size (height) of the graph. In pixels -#' -#' @return a graph object ready to be displayed with the \code{print} function. -#' -#' @export -graph.viz <- function(symbol, shape=NULL, direction="TD", type="graph", graph.width.px=NULL, graph.height.px=NULL){ - - # generate color code for each type of node. - get.color <- function(type) { - switch( - EXPR = type, - "data" = "#8dd3c7", - "FullyConnected" = , - "Convolution" = "#fb8072", - "LeakyReLU" = , - "Activation" = "#ffffb3", - "BatchNorm" = "#bebada", - "Pooling" = "#80b1d3", - "Flatten" = , - "Reshape" = , - "Concat" = "#fdb462", - "MakeLoss"=, - "#fccde5" # default value - ) - } - - get.shape <- function(type) { - switch( - EXPR = type, - "data" = "oval", - "Pooling" = "oval", - "Flatten" = "oval", - "Reshape" = "oval", - "Concat" = "oval", - "box" # default value - ) - } - - model_list <- fromJSON(symbol$as.json()) - model_nodes <- model_list$nodes - model_nodes$id <- seq_len(nrow(model_nodes))-1 - model_nodes$level <- model_nodes$ID - - # extract IDs from string list - tuple_str <- function(str) vapply(str_extract_all(str, "\\d+"), - function(x) paste0(x, collapse="X"), - character(1)) - - ### substitute op for heads - op_id <- sort(unique(model_list$heads[1,]+1)) - op_null <- which(model_nodes$op=="null") - op_substitute <- intersect(op_id, op_null) - model_nodes$op[op_substitute] <- model_nodes$name[op_substitute] - - model_nodes$color <- apply(model_nodes["op"], 1, get.color) - model_nodes$shape <- apply(model_nodes["op"], 1, get.shape) - - label_paste <- paste0(model_nodes$op, - "\n", - model_nodes$name, - "\n", - model_nodes$attr$num_hidden %>% str_replace_na() %>% str_replace_all(pattern = "NA", ""), - model_nodes$attr$act_type %>% str_replace_na() %>% str_replace_all(pattern = "NA", ""), - model_nodes$attr$pool_type %>% str_replace_na() %>% str_replace_all(pattern = "NA", ""), - model_nodes$attr$kernel %>% tuple_str %>% str_replace_na() %>% str_replace_all(pattern = "NA", ""), - " / ", - model_nodes$attr$stride %>% tuple_str %>% str_replace_na() %>% str_replace_all(pattern = "NA", ""), - ", ", - model_nodes$attr$num_filter %>% str_replace_na() %>% str_replace_all(pattern = "NA", "")) %>% - str_replace_all(pattern = "[^[:alnum:]]+$", "") %>% - str_trim - - model_nodes$label <- label_paste - - id.to.keep <- model_nodes$id[!model_nodes$op=="null"] - nodes_df <- model_nodes[model_nodes$id %in% id.to.keep, c("id", "label", "shape", "color")] - - ### remapping for DiagrammeR convention - nodes_df$id <- nodes_df$id - nodes_df$id_graph <- seq_len(nrow(nodes_df)) - id_dic <- nodes_df$id_graph - names(id_dic) <- as.character(nodes_df$id) - - edges_id <- model_nodes$id[lengths(model_nodes$inputs)!=0 & model_nodes$op!="null"] - edges_id <- id_dic[as.character(edges_id)] - edges <- model_nodes$inputs[lengths(model_nodes$inputs)!=0 & model_nodes$op!="null"] - edges <- sapply(edges, function(x)intersect(as.numeric(x[, 1]), id.to.keep), simplify = FALSE) - names(edges) <- edges_id - - edges_df <- data.frame(from=unlist(edges), - to=rep(names(edges), time=lengths(edges)), - arrows = "to", - color="black", - from_name_output=paste0(model_nodes$name[unlist(edges)+1], "_output"), - stringsAsFactors=FALSE) - edges_df$from <- id_dic[as.character(edges_df$from)] - - nodes_df_new <- create_node_df(n = nrow(nodes_df), label=nodes_df$label, shape=nodes_df$shape, type="base", penwidth=2, color=nodes_df$color, style="filled", - fillcolor=adjustcolor(nodes_df$color, alpha.f = 1), fontcolor = "black") - edge_df_new <- create_edge_df(from = edges_df$from, to=edges_df$to, color="black", fontcolor = "black") - - if (!is.null(shape)) { - if (is.list(shape)) { - edges_labels_raw <- symbol$get.internals()$infer.shape(shape)$out.shapes - } else edges_labels_raw <- symbol$get.internals()$infer.shape(list(data=shape))$out.shapes - if (!is.null(edges_labels_raw)) { - edge_label_str <- function(x) paste0(x, collapse="X") - edges_labels_raw <- vapply(edges_labels_raw, edge_label_str, character(1)) - names(edges_labels_raw)[names(edges_labels_raw)=="data"] <- "data_output" - edge_df_new$label <- edges_labels_raw[edges_df$from_name_output] - edge_df_new$rel <- edge_df_new$label - } - } - - graph <- create_graph(nodes_df = nodes_df_new, edges_df = edge_df_new, directed = TRUE, attr_theme = NULL) %>% - add_global_graph_attrs("layout", value = "dot", attr_type = "graph") %>% - add_global_graph_attrs("rankdir", value = direction, attr_type = "graph") - - if (type=="vis"){ - graph_render <- render_graph(graph = graph, output = "visNetwork", width = graph.width.px, height = graph.height.px) %>% - visHierarchicalLayout(direction = direction, sortMethod = "directed") - } else { - graph_render <- render_graph(graph = graph, output = "graph", width = graph.width.px, height = graph.height.px) - } - - return(graph_render) -} - -globalVariables(c("color", "shape", "label", "id", ".", "op")) diff --git a/R-package/R/zzz.R b/R-package/R/zzz.R deleted file mode 100644 index 1b185978b8e2..000000000000 --- a/R-package/R/zzz.R +++ /dev/null @@ -1,73 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -#' MXNet: Flexible and Efficient GPU computing and Deep Learning. -#' -#' MXNet is a flexible and efficient GPU computing and deep learning framework. -#' -#' It enables you to write seamless tensor/matrix computation with multiple GPUs in R. -#' -#' It also enables you construct and customize the state-of-art deep learning models in R, -#' and apply them to tasks such as image classification and data science challenges. -#' -#' @docType package -#' @name mxnet -#' @import methods Rcpp -NULL - -.MXNetEnv <- new.env() - -.onLoad <- function(libname, pkgname) { - # Require methods for older versions of R - require(methods) - tryCatch(library.dynam("libmxnet", pkgname, libname, local=FALSE), error = function(e) { - print('Loading libmxnet.so failed. Error:') - print(e) - print('Falling back to loading local: inst/libs/libmxnet.so') - dyn.load("R-package/inst/libs/libmxnet.so", local=FALSE) - }) - tryCatch(library.dynam("mxnet", pkgname, libname), error = function(e) { - print('Loading mxnet.so failed. Error:') - print(e) - print('Falling back to loading local: src/mxnet.so') - dyn.load("R-package/src/mxnet.so") - }) - loadModule("mxnet", TRUE) - init.symbol.methods() - init.context.default() -} - -.onUnload <- function(libpath) { - message("Start unload") - mx.internal.notify.shutdown() - library.dynam.unload("mxnet", libpath) - library.dynam.unload("libmxnet", libpath) - message("MXNet shutdown") -} - -.onAttach <- function(...) { - if (!interactive() || stats::runif(1) > 0.1) return() - - tips <- c( - "Need help? Feel free to open an issue on https://github.com/dmlc/mxnet/issues", - "For more documents, please visit https://mxnet.io", - "Use suppressPackageStartupMessages() to eliminate package startup messages." - ) - - tip <- sample(tips, 1) - packageStartupMessage(paste(strwrap(tip), collapse = "\n")) -} diff --git a/R-package/README.md b/R-package/README.md deleted file mode 100644 index 68996b06191e..000000000000 --- a/R-package/README.md +++ /dev/null @@ -1,31 +0,0 @@ - Deep Learning for R -========================== - -You have found MXNet R Package! The MXNet R packages brings flexible and efficient GPU -computing and state-of-the-art deep learning to R. - -- It enables you to write seamless tensor/matrix computation with multiple GPUs in R. -- It also enables you to construct and customize state-of-the-art deep learning models in R, - and apply them to tasks such as image classification and data science challenges. - -Sounds exciting? This page contains links to all the related documentation of the R package. - - -Installation ------------- - -We provide pre-built binary packages for Windows/OSX users. -You can install the CPU package directly from the R console: - -```r -cran <- getOption("repos") -cran["dmlc"] <- "https://apache-mxnet.s3-accelerate.dualstack.amazonaws.com/R/CRAN/" -options(repos = cran) -install.packages("mxnet") -``` - -To use the GPU version or to use it on Linux, please follow [Installation Guide](https://mxnet.io/install/index.html) - -License -------- -MXNet R-package is licensed under [Apache-2.0](./LICENSE) license. diff --git a/R-package/demo/00Index b/R-package/demo/00Index deleted file mode 100644 index f467bbdb3880..000000000000 --- a/R-package/demo/00Index +++ /dev/null @@ -1,6 +0,0 @@ -basic_bench Basic benchmark -basic_executor Basic executor operations -basic_kvstore Basic kvstore operations -basic_ndarray Basic ndarray operations -basic_random Basic random number generators -basic_symbol Basic symbol operations diff --git a/R-package/demo/basic_bench.R b/R-package/demo/basic_bench.R deleted file mode 100644 index c81a08cf491f..000000000000 --- a/R-package/demo/basic_bench.R +++ /dev/null @@ -1,35 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -require(mxnet) -require(methods) - -shape <- c(1, 1) -lr <- 0.01 -x <- mx.nd.ones(shape) -y <- mx.nd.zeros(shape) -print(x) -n <- 1000 - - -tic <- proc.time() -for (i in 1:n) { - y <- y + x * lr -} -toc <- proc.time() - tic -as.array(y) -print(toc) diff --git a/R-package/demo/basic_executor.R b/R-package/demo/basic_executor.R deleted file mode 100644 index 53f352300dca..000000000000 --- a/R-package/demo/basic_executor.R +++ /dev/null @@ -1,50 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -require(mxnet) -# TODO(KK, tong) think about setter getter interface(which breaks immutability, or current set and move interface. -# We need to make a choice between -# exec_old = exec -# exec$arg.arrays = some.array, this changes exec_old$arg.arrays as well, user won't aware -# V.S. -# exec_old = exec -# exec = mx.exec.set.arg.arrays(exec, some.array) -# exec_old is moved, user get an error when use exec_old - -A <- mx.symbol.Variable('A') -B <- mx.symbol.Variable('B') -C <- A + B -a <- mx.nd.zeros(c(2), mx.cpu()) -b <- mx.nd.array(as.array(c(1, 2)), mx.cpu()) - -exec <- mxnet:::mx.symbol.bind( - symbol = C, - ctx = mx.cpu(), - arg.arrays = list(A = a, B = b), - aux.arrays = list(), - grad.reqs = list("null", "null")) - -# calculate outputs -mx.exec.forward(exec) -out <- as.array(exec$outputs[[1]]) -print(out) - -mx.exec.update.arg.arrays(exec, list(A = b, B = b)) -mx.exec.forward(exec) - -out <- as.array(exec$outputs[[1]]) -print(out) diff --git a/R-package/demo/basic_kvstore.R b/R-package/demo/basic_kvstore.R deleted file mode 100644 index cfd5aec7f107..000000000000 --- a/R-package/demo/basic_kvstore.R +++ /dev/null @@ -1,31 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -require(mxnet) - -kv <- mx.kv.create() - -dlist <- lapply(1:3, function(i) { - x = as.array(c(i, i + 1)) - mat = mx.nd.array(x, mx.cpu(i)) - list(x = mat) -}) -kv$init(c(0), dlist[[1]]) -kv$push(c(0), dlist, 0) -kv$pull(c(0), dlist, 0) - -print(as.array(dlist[[1]][[1]])) diff --git a/R-package/demo/basic_ndarray.R b/R-package/demo/basic_ndarray.R deleted file mode 100644 index 9ace6344b327..000000000000 --- a/R-package/demo/basic_ndarray.R +++ /dev/null @@ -1,38 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -require(mxnet) - -x <- 1:3 -mat <- mx.nd.array(x) - -mat <- mat + 1.0 -mat <- mat + mat -mat <- mat - 5 -mat <- 10 / mat -mat <- 7 * mat -mat <- 1 - mat + (2 * mat) / (mat + 0.5) -as.array(mat) - -x <- as.array(matrix(1:4, 2, 2)) - -mx.ctx.default(mx.cpu(1)) -print(mx.ctx.default()) -print(is.mx.context(mx.cpu())) -mat <- mx.nd.array(x) -mat <- (mat * 3 + 5) / 10 -as.array(mat) diff --git a/R-package/demo/basic_random.R b/R-package/demo/basic_random.R deleted file mode 100644 index 480aaa1f5a0c..000000000000 --- a/R-package/demo/basic_random.R +++ /dev/null @@ -1,27 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -require(mxnet) - -mx.set.seed(10) - -print(mx.runif(c(2,2), -10, 10)) - -# Test initialization module for neural nets. -uinit <- mx.init.uniform(0.1) -print(uinit("fc1_weight", c(2, 2), mx.cpu())) -print(uinit("fc1_gamma", c(2, 2), mx.cpu())) diff --git a/R-package/demo/basic_symbol.R b/R-package/demo/basic_symbol.R deleted file mode 100644 index 7bdeb00d7f81..000000000000 --- a/R-package/demo/basic_symbol.R +++ /dev/null @@ -1,30 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -require(mxnet) - -data <- mx.symbol.Variable('data') -net1 <- mx.symbol.FullyConnected(data = data, name = 'fc1', num_hidden = 10) -net1 <- mx.symbol.FullyConnected(data = net1, name = 'fc2', num_hidden = 100) - -all.equal(arguments(net1), c('data', 'fc1_weight', 'fc1_bias', 'fc2_weight', 'fc2_bias')) - -net2 <- mx.symbol.FullyConnected(name = 'fc3', num_hidden = 10) -net2 <- mx.symbol.Activation(data = net2, act_type = 'relu') -net2 <- mx.symbol.FullyConnected(data = net2, name = 'fc4', num_hidden = 20) - -composed <- mx.apply(net2, fc3_data = net1, name = 'composed') diff --git a/R-package/dummy.NAMESPACE b/R-package/dummy.NAMESPACE deleted file mode 100644 index 6225fbf702e2..000000000000 --- a/R-package/dummy.NAMESPACE +++ /dev/null @@ -1,16 +0,0 @@ -# Generated by roxygen2: do not edit by hand - -import(Rcpp) -import(methods) -importFrom(DiagrammeR,add_global_graph_attrs) -importFrom(DiagrammeR,create_edge_df) -importFrom(DiagrammeR,create_graph) -importFrom(DiagrammeR,create_node_df) -importFrom(DiagrammeR,render_graph) -importFrom(jsonlite,fromJSON) -importFrom(magrittr,"%>%") -importFrom(stringr,str_extract_all) -importFrom(stringr,str_replace_all) -importFrom(stringr,str_replace_na) -importFrom(stringr,str_trim) -importFrom(visNetwork,visHierarchicalLayout) diff --git a/R-package/src/Makevars b/R-package/src/Makevars deleted file mode 100644 index 099471541b37..000000000000 --- a/R-package/src/Makevars +++ /dev/null @@ -1,4 +0,0 @@ -CXX_STD = CXX11 -PKG_LIBS = $(LAPACK_LIBS) $(BLAS_LIBS) `(pkg-config --libs opencv || pkg-config --libs opencv4)` -PKG_CFLAGS = `(pkg-config --cflags opencv || pkg-config --cflags opencv4)` -PKG_CPPFLAGS = -I../inst/include `(pkg-config --cflags opencv || pkg-config --cflags opencv4)` `Rscript -e 'Rcpp:::CxxFlags()'` diff --git a/R-package/src/Makevars.win b/R-package/src/Makevars.win deleted file mode 100644 index 7be3d8f935b1..000000000000 --- a/R-package/src/Makevars.win +++ /dev/null @@ -1,2 +0,0 @@ -PKG_CPPFLAGS = -I../inst/include -PKG_LIBS = $(LAPACK_LIBS) $(BLAS_LIBS) -L../inst/libs/x64/ -llibmxnet diff --git a/R-package/src/base.h b/R-package/src/base.h deleted file mode 100644 index 8645d8576b0e..000000000000 --- a/R-package/src/base.h +++ /dev/null @@ -1,397 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/*! - * Copyright (c) 2015 by Contributors - * \file base.h - * \brief Rcpp interface of MXNet - * All the interface is done through C API, - * to achieve maximum portability when we need different compiler for libmxnet. - */ -#ifndef MXNET_RCPP_BASE_H_ -#define MXNET_RCPP_BASE_H_ - -#include -#include -#include -#include -#include -#include -#include -#include - -/*! \brief namespace of mxnet */ -namespace mxnet { -/*! \brief namespace of R package */ -namespace R { - -/*! \brief macro to be compatible with non c++11 env */ -#if DMLC_USE_CXX11 == 0 -#ifndef nullptr -#define nullptr NULL -#endif -#endif - -/*! - * \brief Log that enables Stop and print message to R console - */ -class RLogFatal { - public: - RLogFatal(const char* file, int line) { - log_stream_ << file << ":" - << line << ": "; - } - inline std::ostringstream &stream() { - return log_stream_; - } - ~RLogFatal() -#if DMLC_USE_CXX11 - noexcept(false) -#endif - { - std::string msg = log_stream_.str() + '\n'; - throw Rcpp::exception(msg.c_str()); - } - - private: - std::ostringstream log_stream_; -}; - -/*! - * \brief LOG FATAL to report error to R console - * Need to append newline to it. - */ -#define RLOG_FATAL ::mxnet::R::RLogFatal(__FILE__, __LINE__).stream() - -/*! \brief LOG INFO to report message to R console, need to append newline */ -#define RLOG_INFO ::Rcpp::Rcout - -/*! - * \brief Checking macro for Rcpp code, report error ro R console - * \code - * RCHECK(data.size() == 1) << "Data size must be 1"; - * \endcode - */ -#define RCHECK(x) \ - if (!(x)) RLOG_FATAL << "RCheck failed: " #x << ' ' /* NOLINT(*) */ - -/*! - * \brief protected MXNet C API call, report R error if happens. - * \param func Expression to call. - */ -#define MX_CALL(func) \ - { \ - int e = (func); \ - if (e != 0) { \ - throw Rcpp::exception(MXGetLastError()); \ - } \ - } -/*! - * \brief set seed to the random number generator - * \param seed the seed to set. - */ -void SetSeed(int seed); - -/*! - * \brief Base Movable class of MXNet Module object. - * This class will define several common functions. - * \tparam Class The class name of subclass - */ -template -class MXNetMovable { - public: - /*! \brief The type of Class in R's side */ - typedef Rcpp::RObject RObjectType; - /*! - * \brief Get a pointer representation of obj. - * \param obj The R object. - * \return The pointer of the object. - * \throw Rcpp::exception if the object is moved. - */ - inline static Class* XPtr(const Rcpp::RObject& obj) { - Class* ptr = Rcpp::as(obj); - bool has_been_moved = static_cast*>(ptr)->moved_; - RCHECK(!has_been_moved) - << "Passed in a moved " << Class::TypeName() << " as parameter." - << " Moved parameters should no longer be used"; - return ptr; - } - - protected: - /*! \brief default constructor */ - MXNetMovable() : moved_(false) {} - /*! - * \brief Default implement to Move a existing R Class object to a new one. - * \param src The source R Object. - * \return A new R object containing moved information as old one. - */ - inline static RObjectType Move(const Rcpp::RObject& src) { - Class* old = Class::XPtr(src); - Class* moved = old->CreateMoveObject(); - static_cast*>(old)->moved_ = true; - return Rcpp::internal::make_new_object(moved); - } - - /*! \brief Whether the object has been moved */ - bool moved_; -}; - -/*! \brief Context of device enviroment */ -struct Context { - /*! \brief The device ID of the context */ - int dev_type; - /*! \brief The device ID of the context */ - int dev_id; - /*! \brief The R object type of the context */ - typedef Rcpp::List RObjectType; - /*! \brief default constructor */ - Context() {} - /*! - * \brief Constructor - * \param src source R representation. - */ - explicit Context(const Rcpp::RObject& src) { - Rcpp::List list(src); - this->dev_id = list[1]; - this->dev_type = list[2]; - } - /*! \return R object representation of the context */ - inline RObjectType RObject() const { - const char *dev_name = "cpu"; - if (dev_type == kGPU) dev_name = "gpu"; - Rcpp::List ret = Rcpp::List::create( - Rcpp::Named("device") = dev_name, - Rcpp::Named("device_id") = dev_id, - Rcpp::Named("device_typeid") = dev_type); - ret.attr("class") = "MXContext"; - return ret; - } - /*! - * Create a CPU context. - * \param dev_id the device id. - * \return CPU Context. - */ - inline static RObjectType CPU(int dev_id = 0) { - Context ctx; - ctx.dev_type = kCPU; - ctx.dev_id = dev_id; - return ctx.RObject(); - } - /*! - * Create a GPU context. - * \param dev_id the device id. - * \return GPU Context. - */ - inline static RObjectType GPU(int dev_id) { - Context ctx; - ctx.dev_type = kGPU; - ctx.dev_id = dev_id; - return ctx.RObject(); - } - /*! \brief initialize all the Rcpp module functions */ - inline static void InitRcppModule() { - using namespace Rcpp; // NOLINT(*); - function("mx.cpu", &CPU, - List::create(_["dev.id"] = 0), - "Create a CPU context."); - function("mx.gpu", &GPU, - List::create(_["dev.id"] = 0), - "Create a GPU context with specific device_id."); - } - /*! \brief the device type id for CPU */ - static const int kCPU = 1; - /*! \brief the device type id for GPU */ - static const int kGPU = 2; -}; - -/*! - * \brief Get a C char pointer vector representation of keys - * The keys must stay alive when using c_keys - * \param keys the string vector to get keys from - * \return the C char pointer - */ -inline std::vector CKeys(const std::vector &keys) { - std::vector c_keys(keys.size()); - for (size_t i = 0; i < keys.size(); ++i) { - c_keys[i] = keys[i].c_str(); - } - return c_keys; -} - -/*! - *\return whether the expression is simple arguments - * That is not module object and can be converted to string - */ -inline const char* TypeName(const Rcpp::RObject& args) { - switch (TYPEOF(args)) { - case REALSXP: return "numeric"; - case VECSXP: return "list"; - case INTSXP: return "integer"; - case CPLXSXP: return "complex"; - case LGLSXP: return "logical"; - case STRSXP: return "string"; - default: return "object type"; - } -} - -/*! - * \brief A simple function to convert value of known type to string. - * \param val the value - * \return the corresponding string - */ -template -inline std::string toString(const Rcpp::RObject& val) { - std::ostringstream os; - os << Rcpp::as(val); - return os.str(); -} - -/*! - * \brief Check whether the value is simple parameter - * \param val The value to check. - */ -inline bool isSimple(const Rcpp::RObject& val) { - switch (TYPEOF(val)) { - case STRSXP: - case INTSXP: - case REALSXP: - case LGLSXP: return true; - default: return false; - } -} - -/*! - * \brief Create a API compatile string presentation of value - * \param key The key name of the parameter - * \param val The value of the parameter - * \return A python string representation of val - */ -inline std::string toPyString(const std::string &key, const Rcpp::RObject& val) { - std::ostringstream os; - int len = Rf_length(val); - if (len != 1 || - key.substr(std::max(5, static_cast(key.size())) - 5) == std::string("shape")) { - RCHECK(TYPEOF(val) == INTSXP || TYPEOF(val) == REALSXP) - << "Only accept integer vectors or simple types"; - // Do shape convesion back to reversed shape. - Rcpp::IntegerVector vec(val); - os << "("; - for (size_t i = 0; i < vec.size(); ++i) { - int value = vec[vec.size() - i - 1]; - if (i != 0) os << ", "; - os << value; - } - if (vec.size() == 1) os << ","; - os << ")"; - return os.str(); - } - switch (TYPEOF(val)) { - case STRSXP: return Rcpp::as(val); - case INTSXP: return toString(val); - case REALSXP: return toString(val); - case LGLSXP: return toString(val); - default: { - RLOG_FATAL << "Unsupported parameter type " << TypeName(val) - << " for argument " << key - << ", expect integer, logical, or string."; - } - } - return os.str(); -} - -/*! - * \brief Convert dot . style seperator into underscore _ - * So num_hidden -> num.hidden - * This allows R user to use the dot style seperators. - * \param src the source key - * \retunr a converted key - */ -inline std::string FormatParamKey(std::string src) { - for (size_t i = 0; i < src.size(); ++i) { - if (src[i] == '.') src[i] = '_'; - } - return src; -} - -/*! \return wher list has names */ -inline bool HasName(const Rcpp::List& src) { - Rcpp::RObject obj = src.names(); - return obj != R_NilValue; -} - -/*! - * \brief Get names from list, return vector of empty strings if names do not present - * \param src the source list - * \retunr vector of string of same length as src. - */ -inline std::vector SafeGetListNames(const Rcpp::List& src) { - if (!HasName(src)) { - return std::vector(src.size(), std::string()); - } else { - return src.names(); - } -} - -/*! - * \brief convert Rcpp's Dimension to internal shape vector - * This will reverse the shape layout internally - * \param rshape The dimension in R - * \return A internal vector representation of shapes in mxnet. - */ -inline std::vector Dim2InternalShape(const Rcpp::Dimension &rshape) { - std::vector shape(rshape.size()); - for (size_t i = 0; i < rshape.size(); ++i) { - shape[rshape.size() - i - 1] = rshape[i]; - } - return shape; -} - -class NDArray; -class Symbol; -class Executor; -class KVStore; -} // namespace R -} // namespace mxnet - -// This is Rcpp namespace, contains patches to Rcpp -// The following section follows style of Rcpp -namespace Rcpp { - namespace internal { // NOLINT(*) - inline bool is_module_object_internal_fix(SEXP obj, const char* clazz) { - Environment env(obj); - SEXP sexp = env.get(".cppclass"); - if (TYPEOF(sexp) != EXTPTRSXP) return false; - XPtr xp(sexp); - return xp->has_typeinfo_name(clazz); - } - template bool is__module__object_fix(SEXP x) { - typedef typename Rcpp::traits::un_pointer::type CLASS; - if (!is__simple(x)) return false; - return is_module_object_internal_fix(x, typeid(CLASS).name()); - } - } // namespace internal NOLINT(*) - - template<> - inline bool is(SEXP x); - template<> - inline bool is(SEXP x); - template<> - inline bool is(SEXP x); -} // namespace Rcpp -#endif // MXNET_RCPP_BASE_H_ diff --git a/R-package/src/executor.cc b/R-package/src/executor.cc deleted file mode 100644 index 20380b4d986d..000000000000 --- a/R-package/src/executor.cc +++ /dev/null @@ -1,287 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/*! - * Copyright (c) 2015 by Contributors - * \file executor.h - * \brief Rcpp Symbol of MXNet. - */ -#include -#include -#include -#include "./base.h" -#include "./executor.h" -#include "./ndarray.h" -#include "./symbol.h" - -namespace mxnet { -namespace R { - -void Executor::UpdateArgArray(const Rcpp::List& array, - bool match_name, - bool skip_null) { - UpdateArray("arg.arrays", array, arg_arrays_, match_name, skip_null); -} - -void Executor::UpdateAuxArray(const Rcpp::List& array, - bool match_name, - bool skip_null) { - UpdateArray("aux.arrays", array, aux_arrays_, match_name, skip_null); -} - -void Executor::UpdateGradArray(const Rcpp::List& array, - bool match_name, - bool skip_null) { - UpdateArray("grad.arrays", array, grad_arrays_, match_name, skip_null); -} - -void Executor::UpdateArray(const char* array_name, - const Rcpp::List& from, - Rcpp::List* to, - bool match_name, - bool skip_null) { - if (!match_name) { - RCHECK(from.size() == to->size()) - << "Update array list must contain names"; - for (size_t i = 0; i < from.size(); ++i) { - if (to->at(i) != R_NilValue) { - if (from[i] != R_NilValue) { - NDArray dst = NDArray::FromRObject(to->at(i)); - NDArray::CopyFromTo(NDArray::FromRObject(from[i]), &dst); - } else { - RCHECK(skip_null) - << "Position " << i << " expected to be not NULL"; - } - } else { - RCHECK(from[i] == R_NilValue) - << "Position " << i << " expected to be NULL"; - } - } - } else { - if (from.size() == 0) return; - RCHECK(HasName(from)) - << "match.name is set to TRUE, the input list must have names in all elements"; - std::vector names = from.names(); - for (size_t i = 0; i < names.size(); ++i) { - RCHECK(names[i].length() != 0) - << "match.name is set to TRUE, the input list must have names in all elements"; - RCHECK(to->containsElementNamed(names[i].c_str())) - << "cannot find key " << names[i] << " in the array " << array_name; - int index = to->findName(names[i]); - if (to->at(index) != R_NilValue) { - if (from[i] != R_NilValue) { - NDArray dst = NDArray::FromRObject(to->at(index)); - NDArray::CopyFromTo(NDArray::FromRObject(from[i]), &dst); - } else { - RCHECK(skip_null) - << "Element " << names[i] << " expected to be not NULL"; - } - } else { - RCHECK(from[i] == R_NilValue) - << "Element " << names[i] << " expected to be NULL"; - } - } - } -} - -Rcpp::List Executor::CloneArray(const Rcpp::List& src) { - Rcpp::List ret(src.size()); - ret.names() = src.names(); - for (size_t i = 0; i < src.size(); ++i) { - if (src[i] != R_NilValue) { - RCHECK(Rcpp::is(src[i])) - << "Expected exec to be "<< Executor::TypeName(); - ret[i] = NDArray::FromRObject(src[i]).Clone().RObject(); - } else { - ret[i] = R_NilValue; - } - } - return ret; -} - -void Executor::Forward(bool is_train, - const Rcpp::List& kwargs) { - MX_CALL(MXExecutorForward(handle_, is_train)); -} - -void Executor::Backward(const Rcpp::List &output_grads) { - RCHECK(grad_arrays_ != nullptr) - << "This executor has not been bound with req.grad"; - std::vector grad_handles - = NDArray::GetHandles(output_grads, "output_grads", false); - MX_CALL(MXExecutorBackward(handle_, - static_cast(grad_handles.size()), - dmlc::BeginPtr(grad_handles))); -} - -inline Rcpp::List* CreateArrayList(const Rcpp::List& source_array, - const std::string& key, - const std::vector& names, - const Context::RObjectType& ctx, - std::vector* handles) { - Rcpp::List* ret = new Rcpp::List(source_array.size()); - try { - ret->names() = names; - handles->resize(source_array.size()); - for (size_t i = 0; i < source_array.size(); ++i) { - RCHECK(Rcpp::is(source_array[i])) - << "Expect input " << key << " to be list of " << NDArray::TypeName(); - NDArray src = NDArray::FromRObject(source_array[i]); - ret->at(i) = NDArray::Empty(src.dim(), ctx); - NDArray dst = NDArray::FromRObject(ret->at(i)); - handles->at(i) = dst->handle; - NDArray::CopyFromTo(src, &dst); - } - } catch(const Rcpp::exception& ex) { - delete ret; - throw ex; - } - return ret; -} - -inline Rcpp::List* CreateGradList(const Rcpp::List& source_array, - const Rcpp::List& grad_reqs, - const std::vector& names, - const Context::RObjectType& ctx, - std::vector *handles, - std::vector *grad_req_type) { - Rcpp::List* ret = new Rcpp::List(grad_reqs.size(), R_NilValue); - try { - ret->names() = names; - handles->resize(grad_reqs.size(), nullptr); - grad_req_type->resize(grad_reqs.size(), 0); - std::map req_map; - req_map["null"] = 0; - req_map["write"] = 1; - req_map["add"] = 3; - - for (size_t i = 0; i < grad_reqs.size(); ++i) { - if (Rcpp::as(grad_reqs[i]) != "null" - && Rcpp::as(grad_reqs[i]) != "write" - && Rcpp::as(grad_reqs[i]) != "add") { - RLOG_FATAL << "grad_req must be one of 'null', 'write' or 'add'"; - } - - if (Rcpp::as(grad_reqs[i]) != "null") { - ret->at(i) = NDArray::Empty(NDArray::FromRObject(source_array[i]).dim(), ctx); - handles->at(i) = NDArray::FromRObject(ret->at(i))->handle; - grad_req_type->at(i) = req_map[Rcpp::as(grad_reqs[i])]; - } - } - } catch(const Rcpp::exception& ex) { - delete ret; - throw ex; - } - return ret; -} - -inline Rcpp::List* CreateOutList(mx_uint out_size, - NDArrayHandle *out_arr, - const std::vector& names) { - Rcpp::List* ret = new Rcpp::List(out_size); - try { - ret->names() = names; - for (size_t i = 0; i < out_size; ++i) { - ret->at(i) = NDArray::RObject(out_arr[i], false); - } - } catch(const Rcpp::exception& ex) { - delete ret; - throw ex; - } - return ret; -} - -Executor::RObjectType Executor::Bind(const Symbol::RObjectType& symbol, - const Context::RObjectType& context, - const Rcpp::List& arg_arrays, - const Rcpp::List& aux_arrays, - const Rcpp::List& grad_reqs) { - Executor* exec = new Executor(); - try { - Symbol *sym = Symbol::XPtr(symbol); - // handles - std::vector grad_req_type; - std::vector arg_handles, grad_handles, aux_handles; - // for failure handling - exec->arg_arrays_ = CreateArrayList( - arg_arrays, "arg_arrays", - sym->ListArguments(), - context, &arg_handles); - exec->aux_arrays_ = CreateArrayList( - aux_arrays, "aux_arrays", - sym->ListAuxiliaryStates(), - context, &aux_handles); - exec->grad_arrays_ = CreateGradList( - arg_arrays, grad_reqs, - sym->ListArguments(), - context, &grad_handles, &grad_req_type); - Context ctx(context); - MX_CALL(MXExecutorBind( - sym->handle_, - ctx.dev_type, ctx.dev_id, - static_cast(arg_handles.size()), dmlc::BeginPtr(arg_handles), - dmlc::BeginPtr(grad_handles), dmlc::BeginPtr(grad_req_type), - static_cast(aux_handles.size()), dmlc::BeginPtr(aux_handles), - &(exec->handle_))); - mx_uint out_size; - NDArrayHandle *out_arr; - MX_CALL(MXExecutorOutputs(exec->handle_, &out_size, &out_arr)); - exec->out_arrays_ = CreateOutList( - out_size, out_arr, sym->ListOuputs()); - } catch(const Rcpp::exception& ex) { - delete exec; - throw ex; - } - return Rcpp::internal::make_new_object(exec); -} -void Executor::InitRcppModule() { - using namespace Rcpp; // NOLINT(*) - class_("MXExecutor") - .method("update.aux.arrays", - &Executor::UpdateAuxArray, - "Update auxilary states array of executor, this will mutate the executor") - .method("update.arg.arrays", - &Executor::UpdateArgArray, - "Update arguments array of executor, this will mutate the executor") - .method("update.grad.arrays", - &Executor::UpdateGradArray, - "Update gradient array of executor, this will mutate the executor") - .method("forward", - &Executor::Forward, - "Peform a forward operation on exec, this will set the outputs.") - .method("backward", - &Executor::Backward, - "Peform a backward operation on exec, this will set the gradients requested.") - .property("ref.arg.arrays", &Executor::arg_arrays) - .property("ref.grad.arrays", &Executor::grad_arrays) - .property("ref.aux.arrays", &Executor::aux_arrays) - .property("ref.outputs", &Executor::out_arrays) - .property("arg.arrays", &Executor::GetArgArrays) - .property("grad.arrays", &Executor::GetGradArrays) - .property("aux.arrays", &Executor::GetAuxArrays) - .property("outputs", &Executor::GetOuputArrays); - function("mx.symbol.bind", - &Executor::Bind, - List::create(_["symbol"], _["ctx"], - _["arg.arrays"], _["aux.arrays"], _["grad.reqs"]), - "Bind the symbol on argument arrays, generate gradient array according to grad_reqs"); -} - -} // namespace R -} // namespace mxnet diff --git a/R-package/src/executor.h b/R-package/src/executor.h deleted file mode 100644 index a2394433f77c..000000000000 --- a/R-package/src/executor.h +++ /dev/null @@ -1,222 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/*! - * Copyright (c) 2015 by Contributors - * \file executor.h - * \brief Rcpp Symbolic Execution interface of MXNet - */ -#ifndef MXNET_RCPP_EXECUTOR_H_ -#define MXNET_RCPP_EXECUTOR_H_ - -#include -#include -#include -#include "./base.h" -#include "./symbol.h" - -namespace mxnet { -namespace R { -/*! \brief The Rcpp Symbol class of MXNet */ -class Executor : public MXNetMovable { - public: - /*! \return typename from R side. */ - inline static const char* TypeName() { - return "MXExecutor"; - } - /*! - * \return Get reference of the arg arrays of executor. - */ - const Rcpp::List& arg_arrays() const { - return *arg_arrays_; - } - /*! - * \return Get reference of the aux arrays of executor. - */ - const Rcpp::List& aux_arrays() const { - return *aux_arrays_; - } - /*! - * \return Get reference of gradient arrays of executor. - */ - const Rcpp::List& grad_arrays() const { - return *grad_arrays_; - } - /*! - * \return Get reference of gradient arrays of executor. - */ - const Rcpp::List& out_arrays() const { - return *out_arrays_; - } - /*! - * \return Get the arg arrays of executor. - */ - Rcpp::List GetArgArrays() const { - return CloneArray(*arg_arrays_); - } - /*! - * \return Get the grad arrays of executor. - */ - Rcpp::List GetGradArrays() const { - return CloneArray(*grad_arrays_); - } - /*! - * \return Get the auxiliary arrays of executor. - */ - Rcpp::List GetAuxArrays() const { - return CloneArray(*aux_arrays_); - } - /*! - * \return Get the outputx arrays of executor. - */ - Rcpp::List GetOuputArrays() const { - return CloneArray(*out_arrays_); - } - /*! - * \brief Update the arg_arrays of executor, based on name-matching. - * \param array The array to update - * \param match_name whether to use name to match the input, instead of index. - * \param skip_null Whether null is allowed, when there is NULL in the array, simply ignore. - * \return a result executor, moved from exec. - */ - void UpdateArgArray(const Rcpp::List& array, - bool match_name, - bool allow_null); - /*! - * \brief Update the aux_arrays of executor, based on name-matching. - * \param array The array to update - * \param match_name whether to use name to match the input, instead of index. - * \param skip_null Whether null is allowed, when there is NULL in the array, simply ignore. - * \return a result executor, moved from exec. - */ - void UpdateAuxArray(const Rcpp::List& array, - bool match_name, - bool allow_null); - /*! - * \brief Update the grad_arrays of executor, based on name-matching. - * \param array The array to update - * \param match_name whether to use name to match the input, instead of index. - * \param skip_null Whether null is allowed, when there is NULL in the array, simply ignore. - * \return a result executor, moved from exec. - */ - void UpdateGradArray(const Rcpp::List& array, - bool match_name, - bool allow_null); - /*! - * \brief Peform a forward operation on exec, this will set the out_arrays. - * \param is_train whether it is training phase. - * \param kwargs additional parameters. - * \return a result executor, moved from exec. - */ - void Forward(bool is_train, - const Rcpp::List& kwargs); - /*! - * \brief Peform a backward operation on exec, this will set the grad_arrays. - * \param output_grads the gradient on outputs, to be propagated back. - * \return a result executor, moved from exec. - */ - void Backward(const Rcpp::List& output_grads); - /*! - * \brief Create a new R Executor by bind on symbol - * \param symbol The R symbol to bind. - * \param context The device to bind. - * \param arg_arrays The argument arrays giving the initial value of arguments. - * \param aux_arrays The auxiliary state arrays giving the initial value of auxiliary states. - * \param grad_reqs Array of booleans, giving the requirements of gradient. - */ - static RObjectType Bind(const Symbol::RObjectType& symbol, - const Context::RObjectType& context, - const Rcpp::List& arg_arrays, - const Rcpp::List& aux_arrays, - const Rcpp::List& grad_reqs); - /*! \brief static function to initialize the Rcpp functions */ - static void InitRcppModule(); - // destructor - ~Executor() { - delete out_arrays_; - delete arg_arrays_; - delete grad_arrays_; - delete aux_arrays_; - - if (!this->moved_) { - MX_CALL(MXExecutorFree(handle_)); - } - } - - private: - // friend with symbol - friend class Symbol; - // internal constructor, enable trivial operator= - Executor() - : out_arrays_(nullptr), - arg_arrays_(nullptr), - grad_arrays_(nullptr), - aux_arrays_(nullptr) {} - - /*! \return a new Object that is moved from current one */ - inline Executor* CreateMoveObject() { - Executor *moved = new Executor(); - *moved = *this; - out_arrays_ = nullptr; - arg_arrays_ = nullptr; - grad_arrays_ = nullptr; - aux_arrays_ = nullptr; - return moved; - } - /*! - * \brief Clone src into a new space. - * \param src source list of arrays to clone. - * \return A cloned list of arrays under same context. - */ - static Rcpp::List CloneArray(const Rcpp::List& src); - /*! - * \brief Copy arrays from to to - * \param array_name The name of the array, used for error message. - * \param from source list to copy from. - * \param to target list to copy to. - * \param match_name whether to use name to match the input, instead of index. - * \param skip_null Whether null is allowed, when there is NULL in the array, simply ignore. - */ - static void UpdateArray(const char* array_name, - const Rcpp::List& from, Rcpp::List *to, - bool match_name, bool skip_null); - /*! \brief output arrays of Executor */ - Rcpp::List *out_arrays_; - /*! \brief argument arrays of Executor */ - Rcpp::List *arg_arrays_; - /*! \brief gradient arrays of Executor */ - Rcpp::List *grad_arrays_; - /*! \brief auxiliary arrays of Executor */ - Rcpp::List *aux_arrays_; - /*! \brief internal executor handle */ - ExecutorHandle handle_; -}; -} // namespace R -} // namespace mxnet - -RCPP_EXPOSED_CLASS_NODECL(::mxnet::R::Executor); - -namespace Rcpp { - template<> - inline bool is(SEXP x) { - return internal::is__module__object_fix(x); - } -} - -#endif // MXNET_RCPP_EXECUTOR_H_ diff --git a/R-package/src/export.cc b/R-package/src/export.cc deleted file mode 100644 index ae64a1761966..000000000000 --- a/R-package/src/export.cc +++ /dev/null @@ -1,144 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/*! - * Copyright (c) 2015 by Contributors - * \file export.cc - * \brief Exporter module to export document and wrapper functions. - */ -#include -#include -#include -#include "./base.h" -#include "./export.h" - -namespace mxnet { -namespace R { -// docstring related function. -std::string MakeDocString(mx_uint num_args, - const char **arg_names, - const char **arg_type_infos, - const char **arg_descriptions, - bool remove_dup) { - std::set visited; - std::ostringstream os; - for (mx_uint i = 0; i < num_args; ++i) { - std::string arg = arg_names[i]; - if (visited.count(arg) != 0 && remove_dup) continue; - for (size_t j = 0; j < arg.length(); ++j) { - if (arg[j] == '_') arg[j] = '.'; - } - visited.insert(arg); - os << "@param " << arg << " " << arg_type_infos[i] << "\n" - << " " << arg_descriptions[i] << "\n"; - } - return os.str(); -} - - -Exporter* Exporter::Get() { - static Exporter inst; - return &inst; -} - -void Exporter::InitRcppModule() { - using namespace Rcpp; // NOLINT(*) - Exporter::Get()->scope_ = ::getCurrentScope(); - function("mx.internal.export", &Exporter::Export, - Rcpp::List::create(_["path"]), - "Internal function of mxnet, used to export generated functions file."); -} - -std::string ExportDocString(const std::string& docstring) { - std::ostringstream os; - std::istringstream is(docstring); - std::string line; - line.resize(1024); - while (is.getline(&line[0], line.length())) { - os << "#' " << line.c_str() << "\n"; - } - return os.str(); -} - -std::string ReplaceAll(std::string str, const std::string& from, const std::string& to) { - size_t start_pos = 0; - while ((start_pos = str.find(from, start_pos)) != std::string::npos) { - str.replace(start_pos, from.length(), to); - start_pos += to.length(); // Handles case where 'to' is a substring of 'from' - } - return str; -} - -void ExportVArgFunction(std::ostream& os, // NOLINT(*) - const std::string& func_name, - const std::string& docstr) { - std::string prefix = "mx.varg."; - std::string new_name = std::string("mx.") + (func_name.c_str() + prefix.length()); - os << "\n" << ExportDocString(docstr) - << new_name << " <- function(...) {\n" - << " " << func_name << "(list(...))\n" - << "}\n"; - RLOG_INFO << "Exporting " << func_name << " as " << new_name << "\n"; -} - -void ExportNormalFunction(std::ostream& os, // NOLINT(*) - const std::string& func_name, - const std::string& docstr) { - os << "\n" - << ExportDocString(docstr) - << "#' @name " << func_name << "\n" - << "NULL\n"; - RLOG_INFO << "Exporting " << func_name << " docstring\n"; -} - -void Exporter::Export(const std::string& path) { - std::string filename = path + "/R/mxnet_generated.R"; - std::ofstream script(filename.c_str()); - RLOG_INFO << "Start to generate "<< path << " ...\n"; - script << "######\n" - << "# Generated by mxnet.export, do not edit by hand.\n" - << "######\n"; - Rcpp::Module *scope = Exporter::Get()->scope_; - Rcpp::CharacterVector func_names = scope->functions_names(); - - for (size_t i = 0; i < func_names.size(); ++i) { - std::string fname = Rcpp::as(func_names[i]); - // skip internal functions - if (fname.find("internal.") != std::string::npos) continue; - if (fname == "mx.varg.symbol.Concat" - || fname == "mx.varg.symbol.concat" - || fname == "mx.varg.symbol.min_axis" - || fname == "mx.varg.symbol.min") continue; - Rcpp::List func_info(scope->get_function(fname)); - std::string docstr = Rcpp::as(func_info[2]); - - docstr = ReplaceAll(docstr, std::string("\a"), std::string("\\a")); - docstr = ReplaceAll(docstr, std::string("\b"), std::string("\\b")); - - if (docstr.find("@export") == std::string::npos) continue; - if (fname.find("mx.varg.") == 0) { - ExportVArgFunction(script, fname, docstr); - } else { - ExportNormalFunction(script, fname, docstr); - } - } - RLOG_INFO << "All generation finished on "<< path << " ...\n"; -} -} // namespace R -} // namespace mxnet diff --git a/R-package/src/export.h b/R-package/src/export.h deleted file mode 100644 index 4be6e15fce0b..000000000000 --- a/R-package/src/export.h +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/*! - * Copyright (c) 2015 by Contributors - * \file export.h - * \brief Export module that takes charge of code generation and document - * Generation for functions exported from R-side - */ -#ifndef MXNET_RCPP_EXPORT_H_ -#define MXNET_RCPP_EXPORT_H_ - -#include -#include - -namespace mxnet { -namespace R { -/*! \brief exporter class*/ -class Exporter { - public: - /*! - * \brief Export the generated file into path. - * \param path The path to be exported. - */ - static void Export(const std::string& path); - // intialize the Rcpp module - static void InitRcppModule(); - - public: - // get the singleton of exporter - static Exporter* Get(); - /*! \brief The scope of current module to export */ - Rcpp::Module* scope_; -}; - -/*! - * \brief Get human readable roxygen style function information. - * \param name the name of function. - * \parma num_args number of arguments. - * \parma arg_names name of arguments - * \parma arg_type_infos type information of arguments. - * \param arg_descriptions descriptions of arguments. - * \param remove_dup Whether to remove duplications - */ -std::string MakeDocString(mx_uint num_args, - const char **arg_names, - const char **arg_type_infos, - const char **arg_descriptions, - bool remove_dup = true); -} // namespace R -} // namespace mxnet -#endif // MXNET_RCPP_EXPORT_H_ diff --git a/R-package/src/im2rec.cc b/R-package/src/im2rec.cc deleted file mode 100644 index 324ec3c1e452..000000000000 --- a/R-package/src/im2rec.cc +++ /dev/null @@ -1,288 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/*! - * Copyright (c) 2017 by Contributors - * \file export.h - * \brief Export module that takes charge of code generation and document - * Generation for functions exported from R-side - */ - -#include -#include -#include -#include -#include -#include -#include -#include "dmlc/base.h" -#include "dmlc/io.h" -#include "dmlc/timer.h" -#include "dmlc/logging.h" -#include "dmlc/recordio.h" -#include -#include "image_recordio.h" -#include "base.h" -#include "im2rec.h" - -namespace mxnet { -namespace R { - -int GetInterMethod(int inter_method, int old_width, int old_height, - int new_width, int new_height, std::mt19937& prnd) { // NOLINT(*) - if (inter_method == 9) { - if (new_width > old_width && new_height > old_height) { - return 2; // CV_INTER_CUBIC for enlarge - } else if (new_width rand_uniform_int(0, 4); - return rand_uniform_int(prnd); - } else { - return inter_method; - } -} - -IM2REC* IM2REC::Get() { - static IM2REC inst; - return &inst; -} - -void IM2REC::InitRcppModule() { - using namespace Rcpp; // NOLINT(*) - IM2REC::Get()->scope_ = ::getCurrentScope(); - function("mx.internal.im2rec", &IM2REC::im2rec, - Rcpp::List::create(_["image_lst"], - _["root"], - _["output_rec"], - _["label_width"], - _["pack_label"], - _["new_size"], - _["nsplit"], - _["partid"], - _["center_crop"], - _["quality"], - _["color_mode"], - _["unchanged"], - _["inter_method"], - _["encoding"]), - ""); -} - -void IM2REC::im2rec(const std::string & image_lst, const std::string & root, - const std::string & output_rec, - int label_width, int pack_label, int new_size, int nsplit, - int partid, int center_crop, int quality, - int color_mode, int unchanged, - int inter_method, std::string encoding) { - // Check parameters ranges - if (color_mode != -1 && color_mode != 0 && color_mode != 1) { - Rcpp::stop("Color mode must be -1, 0 or 1."); - } - if (encoding != std::string(".jpg") && encoding != std::string(".png")) { - Rcpp::stop("Encoding mode must be .jpg or .png."); - } - if (label_width <= 1 && pack_label) { - Rcpp::stop("pack_label can only be used when label_width > 1"); - } - if (new_size > 0) { - LOG(INFO) << "New Image Size: Short Edge " << new_size; - } else { - LOG(INFO) << "Keep origin image size"; - } - if (center_crop) { - LOG(INFO) << "Center cropping to square"; - } - if (color_mode == 0) { - LOG(INFO) << "Use gray images"; - } - if (color_mode == -1) { - LOG(INFO) << "Keep original color mode"; - } - LOG(INFO) << "Encoding is " << encoding; - - if (encoding == std::string(".png") && quality > 9) { - quality = 3; - } - if (inter_method != 1) { - switch (inter_method) { - case 0: - LOG(INFO) << "Use inter_method CV_INTER_NN"; - break; - case 2: - LOG(INFO) << "Use inter_method CV_INTER_CUBIC"; - break; - case 3: - LOG(INFO) << "Use inter_method CV_INTER_AREA"; - break; - case 4: - LOG(INFO) << "Use inter_method CV_INTER_LANCZOS4"; - break; - case 9: - LOG(INFO) << "Use inter_method mod auto(cubic for enlarge, area for shrink)"; - break; - case 10: - LOG(INFO) << "Use inter_method mod rand(nn/bilinear/cubic/area/lanczos4)"; - break; - } - } - std::random_device rd; - std::mt19937 prnd(rd()); - using namespace dmlc; - static const size_t kBufferSize = 1 << 20UL; - mxnet::io::ImageRecordIO rec; - size_t imcnt = 0; - double tstart = dmlc::GetTime(); - dmlc::InputSplit *flist = - dmlc::InputSplit::Create(image_lst.c_str(), partid, nsplit, "text"); - std::ostringstream os; - if (nsplit == 1) { - os << output_rec; - } else { - os << output_rec << ".part" << std::setw(3) << std::setfill('0') << partid; - } - LOG(INFO) << "Write to output: " << os.str(); - dmlc::Stream *fo = dmlc::Stream::Create(os.str().c_str(), "w"); - LOG(INFO) << "Output: " << os.str(); - dmlc::RecordIOWriter writer(fo); - std::string fname, path, blob; - std::vector decode_buf; - std::vector encode_buf; - std::vector encode_params; - if (encoding == std::string(".png")) { - encode_params.push_back(CV_IMWRITE_PNG_COMPRESSION); - encode_params.push_back(quality); - LOG(INFO) << "PNG encoding compression: " << quality; - } else { - encode_params.push_back(CV_IMWRITE_JPEG_QUALITY); - encode_params.push_back(quality); - LOG(INFO) << "JPEG encoding quality: " << quality; - } - dmlc::InputSplit::Blob line; - std::vector label_buf(label_width, 0.f); - - while (flist->NextRecord(&line)) { - std::string sline(static_cast(line.dptr), line.size); - std::istringstream is(sline); - if (!(is >> rec.header.image_id[0] >> rec.header.label)) continue; - label_buf[0] = rec.header.label; - for (int k = 1; k < label_width; ++k) { - RCHECK(is >> label_buf[k]) - << "Invalid ImageList, did you provide the correct label_width?"; - } - if (pack_label) rec.header.flag = label_width; - rec.SaveHeader(&blob); - if (pack_label) { - size_t bsize = blob.size(); - blob.resize(bsize + label_buf.size()*sizeof(float)); - memcpy(BeginPtr(blob) + bsize, - BeginPtr(label_buf), label_buf.size()*sizeof(float)); - } - RCHECK(std::getline(is, fname)); - // eliminate invalid chars in the end - while (fname.length() != 0 && - (isspace(*fname.rbegin()) || !isprint(*fname.rbegin()))) { - fname.resize(fname.length() - 1); - } - // eliminate invalid chars in beginning. - const char *p = fname.c_str(); - while (isspace(*p)) ++p; - path = root + p; - // use "r" is equal to rb in dmlc::Stream - dmlc::Stream *fi = dmlc::Stream::Create(path.c_str(), "r"); - decode_buf.clear(); - size_t imsize = 0; - while (true) { - decode_buf.resize(imsize + kBufferSize); - size_t nread = fi->Read(BeginPtr(decode_buf) + imsize, kBufferSize); - imsize += nread; - decode_buf.resize(imsize); - if (nread != kBufferSize) break; - } - delete fi; - - - if (unchanged != 1) { - cv::Mat img = cv::imdecode(decode_buf, color_mode); - RCHECK(img.data != NULL) << "OpenCV decode fail:" << path; - cv::Mat res = img; - if (new_size > 0) { - if (center_crop) { - if (img.rows > img.cols) { - int margin = (img.rows - img.cols)/2; - img = img(cv::Range(margin, margin+img.cols), cv::Range(0, img.cols)); - } else { - int margin = (img.cols - img.rows)/2; - img = img(cv::Range(0, img.rows), cv::Range(margin, margin + img.rows)); - } - } - int interpolation_method = 1; - if (img.rows > img.cols) { - if (img.cols != new_size) { - interpolation_method = GetInterMethod(inter_method, img.cols, img.rows, - new_size, - img.rows * new_size / img.cols, prnd); - cv::resize(img, res, cv::Size(new_size, - img.rows * new_size / img.cols), - 0, 0, interpolation_method); - } else { - res = img.clone(); - } - } else { - if (img.rows != new_size) { - interpolation_method = GetInterMethod(inter_method, img.cols, - img.rows, new_size * img.cols / img.rows, - new_size, prnd); - cv::resize(img, res, cv::Size(new_size * img.cols / img.rows, - new_size), 0, 0, interpolation_method); - } else { - res = img.clone(); - } - } - } - encode_buf.clear(); - RCHECK(cv::imencode(encoding, res, encode_buf, encode_params)); - - // write buffer - size_t bsize = blob.size(); - blob.resize(bsize + encode_buf.size()); - memcpy(BeginPtr(blob) + bsize, - BeginPtr(encode_buf), encode_buf.size()); - } else { - size_t bsize = blob.size(); - blob.resize(bsize + decode_buf.size()); - memcpy(BeginPtr(blob) + bsize, - BeginPtr(decode_buf), decode_buf.size()); - } - writer.WriteRecord(BeginPtr(blob), blob.size()); - // write header - ++imcnt; - if (imcnt % 1000 == 0) { - LOG(INFO) << imcnt << " images processed, " << GetTime() - tstart << " sec elapsed"; - } - } - LOG(INFO) << "Total: " << imcnt << " images processed, " << GetTime() - tstart << " sec elapsed"; - delete fo; - delete flist; -} -} // namespace R -} // namespace mxnet diff --git a/R-package/src/im2rec.h b/R-package/src/im2rec.h deleted file mode 100644 index b9e82da0f079..000000000000 --- a/R-package/src/im2rec.h +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/*! - * Copyright (c) 2017 by Contributors - * \file export.h - * \brief Export module that takes charge of code generation and document - * Generation for functions exported from R-side - */ - -#ifndef MXNET_RCPP_IM2REC_H_ -#define MXNET_RCPP_IM2REC_H_ - -#include -#include -#include -#if CV_VERSION_MAJOR >= 4 -#include -#define CV_IMWRITE_PNG_COMPRESSION cv::IMWRITE_PNG_COMPRESSION -#define CV_IMWRITE_JPEG_QUALITY cv::IMWRITE_JPEG_QUALITY -#endif // CV_VERSION_MAJOR >= 4 - -namespace mxnet { -namespace R { - -class IM2REC { - public: - /*! - * \brief Export the generated file into path. - * \param path The path to be exported. - */ - static void im2rec(const std::string & image_lst, const std::string & root, - const std::string & output_rec, - int label_width = 1, int pack_label = 0, int new_size = -1, int nsplit = 1, - int partid = 0, int center_crop = 0, int quality = 95, - int color_mode = 1, int unchanged = 0, - int inter_method = 1, std::string encoding = ".jpg"); - // intialize the Rcpp module - static void InitRcppModule(); - - public: - // get the singleton of exporter - static IM2REC* Get(); - /*! \brief The scope of current module to export */ - Rcpp::Module* scope_; -}; - -} // namespace R -} // namespace mxnet - -#endif // MXNET_RCPP_IM2REC_H_ diff --git a/R-package/src/io.cc b/R-package/src/io.cc deleted file mode 100644 index c72ea92ce30f..000000000000 --- a/R-package/src/io.cc +++ /dev/null @@ -1,246 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/*! - * Copyright (c) 2015 by Contributors - * \file io.cc - * \brief Rcpp IO module of mxnet. - */ -#include -#include -#include "./base.h" -#include "./io.h" -#include "./export.h" -#include "./ndarray.h" - -namespace mxnet { -namespace R { - -void MXDataIter::Reset() { - MX_CALL(MXDataIterBeforeFirst(handle_)); -} - -bool MXDataIter::Next() { - int ret; - MX_CALL(MXDataIterNext(handle_, &ret)); - return ret != 0; -} - -int MXDataIter::NumPad() const { - int pad; - MX_CALL(MXDataIterGetPadNum(handle_, &pad)); - return pad; -} - -Rcpp::List MXDataIter::Value() const { - NDArrayHandle data, label; - MX_CALL(MXDataIterGetData(handle_, &data)); - MX_CALL(MXDataIterGetLabel(handle_, &label)); - return Rcpp::List::create( - Rcpp::Named("data") = NDArray::RObject(data, false), - Rcpp::Named("label") = NDArray::RObject(label, false)); -} - -ArrayDataIter::ArrayDataIter(const Rcpp::NumericVector& data, - const Rcpp::NumericVector& label, - const Rcpp::NumericVector& unif_rnds, - int batch_size, - bool shuffle) : counter_(0) { - Rcpp::IntegerVector dshape = data.attr("dim"); - Rcpp::IntegerVector lshape = label.attr("dim"); - if (dshape[dshape.size() - 1] != lshape[lshape.size() - 1]) { - if (dshape[0] == lshape[0]) { - RLOG_FATAL << "Seems X, y was passed in a Row major way, " - << "MXNetR adopts a column major convention.\n" - << "Please pass in transpose of X instead"; - } else { - RLOG_FATAL << "Data and label shape in-consistent"; - } - } - num_data = lshape[lshape.size() - 1]; - std::vector order(num_data); - for (size_t i = 0; i < order.size(); ++i) { - order[i] = i; - } - - if (shuffle) { - RCHECK(unif_rnds.size() == num_data); - for (size_t i = order.size() - 1; i != 0; --i) { - size_t idx = static_cast(unif_rnds[i] * (i + 1)); - if (idx < i) { - std::swap(order[i], order[idx]); - } - } - } - ArrayDataIter::Convert(data, order, batch_size, &data_); - ArrayDataIter::Convert(label, order, batch_size, &label_); - num_pad_ = (batch_size - (order.size() % batch_size)) % batch_size; - RCHECK(label_.size() == data_.size()) - << "Datasize not consistent"; -} - -void ArrayDataIter::Convert(const Rcpp::NumericVector& src, - const std::vector& order, - size_t batch_size, - std::vector *out) { - Rcpp::RObject dim = src.attr("dim"); - Rcpp::Dimension rshape(dim); - size_t ndim = rshape.size(); - std::vector temp(src.size()), batch; - std::copy(src.begin(), src.end(), temp.begin()); - out->clear(); - out->reserve(rshape[ndim - 1] / batch_size + 1); - size_t line_size = 1; - for (size_t i = 0; i < rshape.size() - 1; ++i) { - line_size *= rshape[i]; - } - rshape[ndim - 1] = batch_size; - batch.resize(batch_size * line_size, 0.0f); - - for (size_t begin = 0; begin < order.size(); begin += batch_size) { - size_t end = std::min(order.size(), begin + batch_size); - for (size_t i = begin; i < end; ++i) { - std::memcpy(&batch[(i - begin) * line_size], - &temp[order[i] * line_size], - sizeof(mx_float) * line_size); - } - NDArray::RObjectType ret = NDArray::Empty(rshape, Context::CPU()); - MX_CALL(MXNDArraySyncCopyFromCPU( - NDArray(ret)->handle, - dmlc::BeginPtr(batch), batch.size())); - out->push_back(NDArray(ret)); - } -} - -Rcpp::List ArrayDataIter::Value() const { - RCHECK(counter_ != 0 && counter_ <= num_data) - << "Read Iter at end or before iter.next is called"; - return Rcpp::List::create( - Rcpp::Named("data") = data_[counter_ - 1].RObject(), - Rcpp::Named("label") = label_[counter_ - 1].RObject()); -} - -bool ArrayDataIter::Next() { - if (counter_ < data_.size()) { - ++counter_; return true; - } else { - return false; - } -} - -int ArrayDataIter::NumPad() const { - if (counter_ == label_.size()) { - return static_cast(num_pad_); - } else { - return 0; - } -} - -Rcpp::RObject ArrayDataIter::Create(const Rcpp::NumericVector& data, - const Rcpp::NumericVector& label, - const Rcpp::NumericVector& unif_rnds, - int batch_size, - bool shuffle) { - return Rcpp::internal::make_new_object( - new ArrayDataIter(data, label, unif_rnds, batch_size, shuffle)); -} - -DataIterCreateFunction::DataIterCreateFunction -(DataIterCreator handle) - : handle_(handle) { - const char* name; - const char* description; - mx_uint num_args; - const char **arg_names; - const char **arg_type_infos; - const char **arg_descriptions; - - MX_CALL(MXDataIterGetIterInfo( - handle_, &name, &description, &num_args, - &arg_names, &arg_type_infos, &arg_descriptions)); - - if (name[0] == '_') { - name_ = std::string("mx.varg.io.internal.") + (name + 1); - } else { - name_ = std::string("mx.varg.io.") + name; - } - std::ostringstream os; - os << description << "\n\n" - << MakeDocString(num_args, arg_names, arg_type_infos, arg_descriptions) - << "@return iter The result mx.dataiter\n\n" - << "@export\n"; - this->docstring = os.str(); -} - -SEXP DataIterCreateFunction::operator() (SEXP* args) { - BEGIN_RCPP; - Rcpp::List kwargs(args[0]); - std::vector keys = SafeGetListNames(kwargs); - std::vector str_keys(keys.size()); - std::vector str_vals(keys.size()); - for (size_t i = 0; i < kwargs.size(); ++i) { - RCHECK(keys[i].length() != 0) - << name_ << " only accept key=value style arguments"; - str_keys[i] = FormatParamKey(keys[i]); - str_vals[i] = toPyString(keys[i], kwargs[i]); - } - DataIterHandle out; - std::vector c_str_keys = CKeys(str_keys); - std::vector c_str_vals = CKeys(str_vals); - - MX_CALL(MXDataIterCreateIter( - handle_, static_cast(str_keys.size()), - dmlc::BeginPtr(c_str_keys), - dmlc::BeginPtr(c_str_vals), - &out)); - return MXDataIter::RObject(out); - END_RCPP; -} - -void DataIter::InitRcppModule() { - using namespace Rcpp; // NOLINT(*) - class_("MXDataIter") - .method("iter.next", &DataIter::Next) - .method("reset", &DataIter::Reset) - .method("value", &DataIter::Value) - .method("num.pad", &DataIter::NumPad); - - class_("MXNativeDataIter") - .derives("MXDataIter"); - - class_("MXArrayDataIter") - .derives("MXDataIter"); - - function("mx.io.internal.arrayiter", &ArrayDataIter::Create); -} - -void DataIterCreateFunction::InitRcppModule() { - Rcpp::Module* scope = ::getCurrentScope(); - RCHECK(scope != nullptr) - << "Init Module need to be called inside scope"; - mx_uint out_size; - DataIterCreator *arr; - MX_CALL(MXListDataIters(&out_size, &arr)); - for (int i = 0; i < out_size; ++i) { - DataIterCreateFunction *f = new DataIterCreateFunction(arr[i]); - scope->Add(f->get_name(), f); - } -} -} // namespace R -} // namespace mxnet diff --git a/R-package/src/io.h b/R-package/src/io.h deleted file mode 100644 index 238c1bc3da05..000000000000 --- a/R-package/src/io.h +++ /dev/null @@ -1,227 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/*! - * Copyright (c) 2015 by Contributors - * \file io.h - * \brief Rcpp Data Loading and Iteration Interface of MXNet. - */ -#ifndef MXNET_RCPP_IO_H_ -#define MXNET_RCPP_IO_H_ - -#include -#include -#include -#include -#include "./base.h" -#include "./ndarray.h" - -namespace mxnet { -namespace R { -// creator function of DataIter -class DataIterCreateFunction; - -/*! \brief Base iterator interface */ -class DataIter { - public: - virtual ~DataIter() {} - /*! \return typename from R side. */ - inline static const char* TypeName() { - return "DataIter"; - } - /*! \brief Reset the iterator */ - virtual void Reset() = 0; - /*! - * \brief Move to next position. - * \return whether the move is successful. - */ - virtual bool Next() = 0; - /*! - * \brief number of padding examples. - * \return number of padding examples. - */ - virtual int NumPad() const = 0; - /*! - * \brief Get the Data Element - * \return List of NDArray of elements in this value. - */ - virtual Rcpp::List Value() const = 0; - /*! \brief initialize the R cpp Module */ - static void InitRcppModule(); -}; - -/*! - * \brief MXNet's internal data iterator. - */ -class MXDataIter : public DataIter { - public: - /*! \return typename from R side. */ - inline static const char* TypeName() { - return "MXNativeDataIter"; - } - // implement the interface - virtual void Reset(); - virtual bool Next(); - virtual int NumPad() const; - virtual Rcpp::List Value() const; - virtual ~MXDataIter() { - MX_CALL(MXDataIterFree(handle_)); - } - - private: - friend class DataIter; - friend class DataIterCreateFunction; - // constructor - MXDataIter() {} - explicit MXDataIter(DataIterHandle handle) - : handle_(handle) {} - /*! - * \brief create a R object that correspond to the Class - * \param handle the Handle needed for output. - */ - inline static Rcpp::RObject RObject(DataIterHandle handle) { - return Rcpp::internal::make_new_object(new MXDataIter(handle)); - } - /*! \brief internal data iter handle */ - DataIterHandle handle_; -}; - -/*! - * \brief data iterator that takes a NumericVector - * Shuffles it and iterate over its content. - * TODO(KK, tq) implement this when have time. - * c.f. python/io.py:NDArrayIter - */ -class ArrayDataIter : public DataIter { - public: - /*! \return typename from R side. */ - inline static const char* TypeName() { - return "MXArrayDataIter"; - } - /*! - * \brief Construct a ArrayDataIter from data and label. - * \param data The data array. - * \param label The label array. - * \param unif_rnds Uniform [0,1] random number of same length as label. - * Only needed when shuffle=TRUE - * \param batch_size The size of the batch. - * \param shuffle Whether shuffle the data. - */ - ArrayDataIter(const Rcpp::NumericVector& data, - const Rcpp::NumericVector& label, - const Rcpp::NumericVector& unif_rnds, - int batch_size, - bool shuffle); - virtual void Reset() { - counter_ = 0; - } - virtual bool Next(); - virtual int NumPad() const; - virtual Rcpp::List Value() const; - static Rcpp::RObject Create(const Rcpp::NumericVector& data, - const Rcpp::NumericVector& label, - const Rcpp::NumericVector& unif_rnds, - int batch_size, - bool shuffle); - - private: - friend class DataIter; - // create internal representation - static void Convert(const Rcpp::NumericVector &src, - const std::vector &order, - size_t batch_size, - std::vector *out); - /*! \brief The counter */ - size_t counter_; - /*! \brief number of pad instances*/ - size_t num_pad_; - /*! \brief number of data */ - size_t num_data; - /*! \brief The data list of each batch */ - std::vector data_; - /*! \brief The data list of each batch */ - std::vector label_; -}; - - -/*! \brief The DataIterCreate functions to be invoked */ -class DataIterCreateFunction : public ::Rcpp::CppFunction { - public: - virtual SEXP operator() (SEXP* args); - - virtual int nargs() { - return 1; - } - - virtual bool is_void() { - return false; - } - - virtual void signature(std::string& s, const char* name) { // NOLINT(*) - ::Rcpp::signature< SEXP, ::Rcpp::List >(s, name); - } - - virtual const char* get_name() { - return name_.c_str(); - } - - virtual SEXP get_formals() { - return Rcpp::List::create(Rcpp::_["alist"]); - } - - virtual DL_FUNC get_function_ptr() { - return (DL_FUNC)NULL; // NOLINT(*) - } - /*! \brief static function to initialize the Rcpp functions */ - static void InitRcppModule(); - - private: - // make constructor private - explicit DataIterCreateFunction(DataIterCreator handle); - /*! \brief internal creator handle. */ - DataIterCreator handle_; - // name of the function - std::string name_; -}; - - -} // namespace R -} // namespace mxnet - -RCPP_EXPOSED_CLASS_NODECL(::mxnet::R::MXDataIter); -RCPP_EXPOSED_CLASS_NODECL(::mxnet::R::ArrayDataIter); - -namespace Rcpp { - template<> - inline bool is(SEXP x) { - return internal::is__module__object_fix(x); - } - template<> - inline bool is(SEXP x) { - return internal::is__module__object_fix(x); - } - // This patch need to be kept even after the Rcpp update merged in. - template<> - inline bool is(SEXP x) { - return is(x) || - is(x); - } -} // namespace Rcpp -#endif // MXNET_RCPP_IO_H_ - diff --git a/R-package/src/kvstore.cc b/R-package/src/kvstore.cc deleted file mode 100644 index fe8de4d512ba..000000000000 --- a/R-package/src/kvstore.cc +++ /dev/null @@ -1,204 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/*! - * Copyright (c) 2015 by Contributors - * \file kvstore.cc - * \brief Rcpp NDArray of MXNet. - */ -#include -#include -#include -#include "./base.h" -#include "./kvstore.h" -#include "./ndarray.h" - -namespace mxnet { -namespace R { - -void KVStore::Init(const std::vector& keys, const Rcpp::List& weights) { - RCHECK(keys.size() == weights.size()) - << "The length of keys should be same as length of weights"; - std::vector handles = NDArray::GetHandles(weights, "weights"); - MX_CALL(MXKVStoreInit( - handle_, static_cast(handles.size()), - dmlc::BeginPtr(keys), dmlc::BeginPtr(handles))); -} - -void KVStore::Push(const std::vector& keys, - const Rcpp::List& weight_lists, - const std::vector& priority) { - RCHECK(keys.size() == priority.size() || priority.size() == 0) - << "The length of keys should be same as length of priority"; - - std::vector > vec(weight_lists.size()); - for (size_t i = 0; i < weight_lists.size(); ++i) { - RCHECK(Rcpp::is(weight_lists[i])) - << "Expect weight_lists to be list(list(ndarray))"; - Rcpp::List list = Rcpp::as(weight_lists[i]); - RCHECK(list.size() == keys.size()) - << "Expect length of keys to be same as each weight_list"; - vec[i] = NDArray::GetHandles(list, "weight_list"); - } - // do push - std::vector group_keys(vec.size()); - std::vector vals(vec.size()); - for (size_t i = 0; i < keys.size(); ++i) { - for (size_t j = 0; j < vec.size(); ++j) { - vals[j] = vec[j][i]; - } - std::fill(group_keys.begin(), group_keys.end(), keys[i]); - MX_CALL(MXKVStorePush(handle_, - static_cast(vals.size()), - dmlc::BeginPtr(group_keys), - dmlc::BeginPtr(vals), - priority.size() == 0 ? 0 : priority[i])); - } -} - -void KVStore::Pull(const std::vector& keys, - const Rcpp::List& out_lists, - const std::vector& priority) { - RCHECK(keys.size() == priority.size() || priority.size() == 0) - << "The length of keys should be same as length of priority"; - std::vector > vec(out_lists.size()); - for (size_t i = 0; i < out_lists.size(); ++i) { - RCHECK(Rcpp::is(out_lists[i])) - << "Expect out_lists to be list(list(ndarray))"; - Rcpp::List src = Rcpp::as(out_lists[i]); - RCHECK(src.size() == keys.size()) - << "Expect length of keys to be same as each out_lists"; - vec[i] = NDArray::GetHandles(src, "out_list"); - } - // do pull - std::vector group_keys(vec.size()); - std::vector vals(vec.size()); - for (size_t i = 0; i < keys.size(); ++i) { - for (size_t j = 0; j < vec.size(); ++j) { - vals[j] = vec[j][i]; - } - std::fill(group_keys.begin(), group_keys.end(), keys[i]); - MX_CALL(MXKVStorePull(handle_, static_cast(vals.size()), - dmlc::BeginPtr(group_keys), - dmlc::BeginPtr(vals), - priority.size() == 0 ? 0 : priority[i])); - } -} - -std::string KVStore::type() const { - const char* stype; - MX_CALL(MXKVStoreGetType(handle_, &stype)); - return std::string(stype); -} - -bool KVStore::update_on_kvstore() const { - std::string type = this->type(); - return type != "local_allreduce_cpu" && type != "local_allreduce_device"; -} - -extern "C" void KVUpdaterCallback(int key, NDArrayHandle recv, NDArrayHandle local, void* handle) { - NDArray weight(local, true), grad(recv, true); - static_cast(handle)->Update(key, grad, &weight); -} - -void KVStore::SetOptimizer(const Rcpp::List& optimizer) { - std::vector names = optimizer.names(); - RCHECK(names.size() == 2 && - names[0] == "create.state" && - names[1] == "update") - << "Invalid optimizer"; - fcreate_state_ = optimizer[0]; - fupdate_ = optimizer[1]; - optimizer_set_ = true; - MX_CALL(MXKVStoreSetUpdater(handle_, - KVUpdaterCallback, - this)); -} - -Rcpp::List KVStore::CreateState(int index, const NDArray& weight) const { - RCHECK(optimizer_set_) - << "Need to call set.optimizer for KVStore " << type(); - // Use R Internal API here - Rcpp::Shield call(Rf_lang3(fcreate_state_, Rcpp::wrap(index), weight.RObject())); - SEXP ret = Rcpp_eval(call); - if (Rf_isNull(ret)) { - return Rcpp::List::create(); - } else if (TYPEOF(ret) == EXTPTRSXP) { - return Rcpp::List::create(Rcpp::Named("state") = ret); - } else { - return ret; - } -} - -void KVStore::Update(int index, const NDArray& grad, NDArray *weight) { - RCHECK(optimizer_set_) - << "Need to call set.optimizer for KVStore " << type(); - std::map::iterator it = states_.find(index); - Rcpp::List state_lst = this->CreateState(index, *weight); - if (it == states_.end()) { - if (state_lst.size() != 0) { - states_.insert(std::make_pair(index, state_lst)); - it = states_.find(index); - } - } - - Rcpp::List rlist; - if (state_lst.size() == 0) { - Rcpp::Shield call(Rf_lang5(fupdate_, Rcpp::wrap(index), - weight->RObject(), grad.RObject(), - R_NilValue)); - rlist = Rcpp_eval(call); - } else if (state_lst.size() == 1) { - Rcpp::Shield call(Rf_lang5(fupdate_, Rcpp::wrap(index), - weight->RObject(), grad.RObject(), - it->second[0])); - rlist = Rcpp_eval(call); - } else { - // Use R Internal API here - Rcpp::Shield call(Rf_lang5(fupdate_, Rcpp::wrap(index), - weight->RObject(), grad.RObject(), - it->second)); - rlist = Rcpp_eval(call); - } - NDArray::CopyFromTo(NDArray::FromRObject(rlist["weight"]), weight); -} - - -Rcpp::RObject KVStore::Create(const char *type) { - KVStoreHandle handle; - MX_CALL(MXKVStoreCreate(type, &handle)); - return Rcpp::internal::make_new_object(new KVStore(handle)); -} - -void KVStore::InitRcppModule() { - using namespace Rcpp; // NOLINT(*) - class_("MXKVStore") - .method("init", &KVStore::Init) - .method("push", &KVStore::Push) - .method("pull", &KVStore::Pull) - .method("set.optimizer", &KVStore::SetOptimizer) - .property("type", &KVStore::type) - .property("update.on.kvstore", &KVStore::update_on_kvstore); - - function("mx.kv.create", &KVStore::Create, - List::create(_["type"] = "local"), - "Create a new kvstore"); -} -} // namespace R -} // namespace mxnet diff --git a/R-package/src/kvstore.h b/R-package/src/kvstore.h deleted file mode 100644 index 5f668c19a388..000000000000 --- a/R-package/src/kvstore.h +++ /dev/null @@ -1,114 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/*! - * Copyright (c) 2015 by Contributors - * \file kvstore.h - * \brief Rcpp Parameter Store interface of MXNet - */ -#ifndef MXNET_RCPP_KVSTORE_H_ -#define MXNET_RCPP_KVSTORE_H_ - -#include -#include -#include -#include -#include -#include "./base.h" - -namespace mxnet { -namespace R { -/*! - * \brief MXNet's Parameter store interface. - */ -class KVStore { - public: - /*! - * \brief initialize all the weights - * \param keys The keys of each weight. - * \param weights the weights NDArray list. - */ - void Init(const std::vector& keys, const Rcpp::List& weights); - /*! - * \brief Push the weights to the KVStore. - * - * This operation will do a aggregation first on weight_lists, the push things out. - * - * sum_list[i] = sum(list[i] for list in weight_lists) - * Then push(keys[i], sum_list[i]) for each i. - * - * \param keys list of keys, corresponds to key of each location. - * \param weight_lists List of Rcpp::List. - * \param priority The priority of each key. - */ - void Push(const std::vector& keys, - const Rcpp::List& weight_lists, - const std::vector& priority); - /*! - * \brief Pull the data back. - * This operation will MUTATE the content of out_lists. - * - * \param keys List of keys, corresponds to key of each location. - * \param out_lists List of Rcpp::List - * \param priority The priority of each key. - * \return The result list of pull. - */ - void Pull(const std::vector& keys, - const Rcpp::List& out_lists, - const std::vector& priority); - /*! \return The type of KVStore */ - std::string type() const; - /*! \brief Whether to perform update on KVStore */ - bool update_on_kvstore() const; - /*! \brief Setup optimizer */ - void SetOptimizer(const Rcpp::List& optimizer); - // update function - void Update(int index, const NDArray& grad, NDArray *weight); - /*! - * \brief create a KVStore - * \return the created KVStore - */ - static Rcpp::RObject Create(const char *type); - /*! \brief initialize the R cpp Module */ - static void InitRcppModule(); - // destructor - ~KVStore() { - MX_CALL(MXKVStoreFree(handle_)); - } - - private: - explicit KVStore(KVStoreHandle handle) - : handle_(handle), optimizer_set_(false) {} - // the internal callback to kvstore. This might return NULL - Rcpp::List CreateState(int index, const NDArray& weight) const; - /*! \brief internal KVStore handle */ - KVStoreHandle handle_; - /*! \brief Whether optimizer is setted*/ - bool optimizer_set_; - /*! \brief The internal state */ - std::map states_; - /*! \brief Function to create state */ - Rcpp::RObject fcreate_state_; - /*! \brief Function to perform update */ - Rcpp::RObject fupdate_; -}; - -} // namespace R -} // namespace mxnet -#endif // MXNET_RCPP_KVSTORE_H_ diff --git a/R-package/src/mxnet.cc b/R-package/src/mxnet.cc deleted file mode 100644 index abd1b827b05e..000000000000 --- a/R-package/src/mxnet.cc +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/*! - * Copyright (c) 2015 by Contributors - * \file mxnet.cc - * \brief The registry of all module functions and objects - */ -#include -#include "./base.h" -#include "./ndarray.h" -#include "./symbol.h" -#include "./executor.h" -#include "./io.h" -#include "./kvstore.h" -#include "./export.h" -#include "./im2rec.h" - -namespace mxnet { -namespace R { -void SetSeed(int seed) { - MX_CALL(MXRandomSeed(seed)); -} - -void NotifyShutdown() { - MX_CALL(MXNotifyShutdown()); -} - -void ProfilerSetConfig(SEXP params) { - Rcpp::List kwargs(params); - std::vector keys = SafeGetListNames(kwargs); - std::vector str_keys(keys.size()); - std::vector str_vals(keys.size()); - for (size_t i = 0; i < kwargs.size(); ++i) { - RCHECK(keys[i].length() != 0) - << "Profiler::SetConfig only accepts key=value style arguments"; - str_keys[i] = FormatParamKey(keys[i]); - str_vals[i] = toPyString(keys[i], kwargs[i]); - } - std::vector c_str_keys = CKeys(str_keys); - std::vector c_str_vals = CKeys(str_vals); - - MX_CALL(MXSetProfilerConfig(static_cast(str_keys.size()), - dmlc::BeginPtr(c_str_keys), dmlc::BeginPtr(c_str_vals))); -} - -void ProfilerSetState(int state) { - MX_CALL(MXSetProfilerState(state)); -} - -// init rcpp module in base -void InitRcppModule() { - using namespace Rcpp; // NOLINT(*) - function("mx.internal.set.seed", &SetSeed); - function("mx.internal.notify.shutdown", &NotifyShutdown); - function("mx.internal.profiler.config", &ProfilerSetConfig); - function("mx.internal.profiler.state", &ProfilerSetState); -} -} // namespace R -} // namespace mxnet - - -RCPP_MODULE(mxnet) { - using namespace mxnet::R; // NOLINT(*) - mxnet::R::InitRcppModule(); - Context::InitRcppModule(); - NDArray::InitRcppModule(); - NDArrayFunction::InitRcppModule(); - Symbol::InitRcppModule(); - SymbolFunction::InitRcppModule(); - Executor::InitRcppModule(); - DataIter::InitRcppModule(); - DataIterCreateFunction::InitRcppModule(); - KVStore::InitRcppModule(); - Exporter::InitRcppModule(); - IM2REC::InitRcppModule(); -} - diff --git a/R-package/src/name.h b/R-package/src/name.h deleted file mode 100644 index cbc9a665e9cb..000000000000 --- a/R-package/src/name.h +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/*! - * Copyright (c) 2015 by Contributors - * \file name.h - * \brief Name manager to get default names. - */ -#ifndef MXNET_RCPP_NAME_H_ -#define MXNET_RCPP_NAME_H_ - -#include -#include - -namespace mxnet { -namespace R { - -/*! - * \brief A name manager to attach names - * This is a very simple implementation. - */ -class NameManager { - public: - /*! - * \brief Get a canonical name given name and hint - * \param name The name passed in from parameter - * \param hint The hint used to generate the name. - */ - virtual std::string GetName(const std::string& name, - const std::string& hint) { - if (name.length() != 0) return name; - if (counter_.count(hint) == 0) { - counter_[hint] = 0; - } - size_t cnt = counter_[hint]++; - std::ostringstream os; - os << hint << cnt; - return os.str(); - } - /*! \return global singleton of the manager */ - static NameManager *Get(); - - private: - // internal counter - std::map counter_; -}; -} // namespace R -} // namespace mxnet -#endif // MXNET_RCPP_NAME_H_ diff --git a/R-package/src/ndarray.cc b/R-package/src/ndarray.cc deleted file mode 100644 index 0409d3ba8887..000000000000 --- a/R-package/src/ndarray.cc +++ /dev/null @@ -1,780 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/*! - * Copyright (c) 2015 by Contributors - * \file ndarray.cc - * \brief Rcpp NDArray of MXNet. - */ -#include -#include "./base.h" -#include "./export.h" -#include "./ndarray.h" - -namespace mxnet { -namespace R { -template -inline void ConvertLayout(InputIter it, - const mx_uint *ishape, - const size_t *ostride, - int dim, - size_t size, - mx_float *out_data) { - for (size_t i = 0; i < size; ++i, ++it) { - size_t offset = 0; - size_t counter = i; - for (int k = 0; k < dim; ++k) { - size_t idx = counter % ishape[k]; - offset += idx * ostride[k]; - counter /= ishape[k]; - } - out_data[offset] = *it; - } -} - -template -inline void ConvertLayout(const mx_float *in_data, - const mx_uint *ishape, - const size_t *ostride, - int dim, - size_t size, - OutputIter it) { - for (size_t i = 0; i < size; ++i, ++it) { - size_t offset = 0; - size_t counter = i; - for (int k = 0; k < dim; ++k) { - size_t idx = counter % ishape[k]; - offset += idx * ostride[k]; - counter /= ishape[k]; - } - RCHECK(offset < size) - << "offset=" << offset << ", size=" << size; - *it = in_data[offset]; - } -} - -inline std::vector GetReverseStride(const std::vector& ishape) { - std::vector stride(ishape.size()); - size_t prod = 1; - int ndim = static_cast(ishape.size()); - for (int k = ndim - 1; k >= 0 ; --k) { - stride[k] = prod; - prod *= ishape[k]; - } - return stride; -} - -template -inline void ColToRowMajor(InputIter begin, - const std::vector& ishape, - size_t size, - mx_float *out_data) { - int ndim = static_cast(ishape.size()); - std::vector out_stride = GetReverseStride(ishape); - // manual unroll special constants - const mx_uint *shape = dmlc::BeginPtr(ishape); - const size_t *stride = dmlc::BeginPtr(out_stride); - switch (ndim) { - case 1: { - ConvertLayout(begin, shape, stride, 1, size, out_data); - break; - } - case 2: { - ConvertLayout(begin, shape, stride, 2, size, out_data); - break; - } - case 3: { - ConvertLayout(begin, shape, stride, 3, size, out_data); - break; - } - default: { - ConvertLayout(begin, shape, stride, ndim, size, out_data); - break; - } - } -} - -template -inline void RowToColMajor(const mx_float *in_data, - const std::vector& ishape, - size_t size, - OutputIter begin) { - int ndim = static_cast(ishape.size()); - std::vector out_stride = GetReverseStride(ishape); - // manual unroll special constants - const mx_uint *shape = dmlc::BeginPtr(ishape); - const size_t *stride = dmlc::BeginPtr(out_stride); - switch (ndim) { - case 1: { - ConvertLayout(in_data, shape, stride, 1, size, begin); - break; - } - case 2: { - ConvertLayout(in_data, shape, stride, 2, size, begin); - break; - } - case 3: { - ConvertLayout(in_data, shape, stride, 3, size, begin); - break; - } - default: { - ConvertLayout(in_data, shape, stride, ndim, size, begin); - break; - } - } -} - -void NDArrayPacker::Push(const NDArray::RObjectType& nd) { - NDArray arr(nd); - Rcpp::Dimension rshape = arr.dim(); - if (shape_.size() == 0) { - shape_.resize(rshape.size()); - for (size_t i = 0; i < shape_.size(); ++i) { - shape_[i] = rshape[i]; - } - } else { - RCHECK(shape_.size() == rshape.size()) - << "The number of dimension need to be matched"; - for (size_t i = 0; i < shape_.size() - 1; ++i) { - RCHECK(shape_[i] == rshape[i]) - << "The dimension besides last need to be consistent for arrays pushed"; - } - shape_.back() += rshape[shape_.size() - 1]; - } - size_t begin = data_.size(); - size_t size = rshape.prod(); - data_.resize(begin + size); - MX_CALL(MXNDArraySyncCopyToCPU( - arr->handle, dmlc::BeginPtr(data_) + begin, size)); -} - -Rcpp::NumericVector NDArrayPacker::Get() const { - Rcpp::IntegerVector sp(shape_.begin(), shape_.end()); - Rcpp::RObject sexp = sp; - Rcpp::Dimension dim(sexp); - Rcpp::NumericVector ret(dim); - RCHECK(ret.size() == data_.size()); - std::copy(data_.begin(), data_.end(), ret.begin()); - return ret; -} - -Rcpp::RObject NDArrayPacker::CreateNDArrayPacker() { - return Rcpp::internal::make_new_object(new NDArrayPacker()); -} - -Rcpp::Dimension NDArray::dim() const { - int ndim; - const int *pshape; - MX_CALL(MXNDArrayGetShapeEx( - ptr_->handle, &ndim, &pshape)); - Rcpp::IntegerVector dat(pshape, pshape + ndim); - std::reverse(dat.begin(), dat.end()); - Rcpp::RObject ret = dat; - return Rcpp::Dimension(ret); -} - -NDArray NDArray::Clone() const { - std::vector shape = Dim2InternalShape(this->dim()); - Context ctx = this->ctx(); - NDArrayHandle handle; - MX_CALL(MXNDArrayCreate(dmlc::BeginPtr(shape), - static_cast(shape.size()), - ctx.dev_type, ctx.dev_id, true, &handle)); - NDArray ret(handle, true); - CopyFromTo(*this, &ret); - return ret; -} - -Context NDArray::ctx() const { - Context ctx; - MX_CALL(MXNDArrayGetContext(ptr_->handle, &ctx.dev_type, &ctx.dev_id)); - return ctx; -} - -size_t NDArray::Size() const { - Rcpp::Dimension dim = this->dim(); - size_t sz = 1; - for (size_t i = 0; i < dim.size(); ++i) { - sz *= dim[i]; - } - return sz; -} - -NDArray NDArray::Slice(mx_uint begin, mx_uint end) const { - NDArrayHandle out; - MX_CALL(MXNDArraySlice(ptr_->handle, begin, end, &out)); - return NDArray(out, ptr_->writable); -} - -Rcpp::NumericVector NDArray::AsNumericVector() const { - Rcpp::Dimension rshape = this->dim(); - std::vector temp(rshape.prod()); - MX_CALL(MXNDArraySyncCopyToCPU( - ptr_->handle, dmlc::BeginPtr(temp), temp.size())); - Rcpp::NumericVector ret(rshape); - std::copy(temp.begin(), temp.end(), ret.begin()); - return ret; -} - -void NDArray::Save(const Rcpp::List& data_lst, - const std::string& filename) { - std::vector lst_names; - if (HasName(data_lst)) { - lst_names = Rcpp::as >(data_lst.names()); - } - size_t num_args = data_lst.size(); - std::vector handles(num_args); - - for (int i = 0 ; i < data_lst.size(); ++i) { - Rcpp::RObject obj = data_lst[i]; - handles[i] = NDArray(obj)->handle; - } - std::vector keys = CKeys(lst_names); - MX_CALL(MXNDArraySave(filename.c_str(), num_args, - dmlc::BeginPtr(handles), - dmlc::BeginPtr(keys))); -} - -Rcpp::List NDArray::Load(const std::string& filename) { - mx_uint out_size; - NDArrayHandle* out_arr; - mx_uint out_name_size; - const char** out_names; - MX_CALL(MXNDArrayLoad(filename.c_str(), - &out_size, &out_arr, - &out_name_size, &out_names)); - Rcpp::List out(out_size); - for (mx_uint i = 0; i < out_size; ++i) { - out[i] = NDArray::RObject(out_arr[i], true); - } - if (out_name_size != 0) { - std::vector lst_names(out_size); - for (mx_uint i = 0; i < out_size; ++i) { - lst_names[i] = out_names[i]; - } - out.names() = lst_names; - } - return out; -} - -NDArray::RObjectType NDArray::Empty( - const Rcpp::Dimension& rshape, - const Context::RObjectType& rctx) { - std::vector shape = Dim2InternalShape(rshape); - Context ctx(rctx); - NDArrayHandle handle; - MX_CALL(MXNDArrayCreate(dmlc::BeginPtr(shape), - static_cast(shape.size()), - ctx.dev_type, ctx.dev_id, false, &handle)); - return NDArray::RObject(handle, true); -} - -std::vector NDArray::GetHandles(const Rcpp::List& array_list, - const std::string& list_name, - bool allow_null, - bool move_old_array) { - std::vector ret(array_list.size()); - for (size_t i = 0; i < ret.size(); ++i) { - if (array_list[i] == R_NilValue) { - RCHECK(allow_null) - << "Expect " << list_name << " to be list of non-NULL " << NDArray::TypeName(); - ret[i] = nullptr; - } else { - RCHECK(TYPEOF(array_list[i]) == EXTPTRSXP) - << "Expect " << list_name << " to be list of " << NDArray::TypeName(); - Rcpp::RObject obj = array_list[i]; - Rcpp::XPtr ptr(obj); - Rcpp::RObject attr = ptr.attr("class"); - RCHECK(attr != R_NilValue && Rcpp::as(attr) == "MXNDArray") - << "Expect " << list_name << " to be list of " << NDArray::TypeName(); - if (move_old_array) { - RCHECK(ptr->writable) - << "Passing a read only NDArray to mutate function"; - ptr->moved = true; - } - ret[i] = ptr->handle; - } - } - return ret; -} - -void NDArray::CopyFromTo(const NDArray& from, NDArray* to) { - static OpHandle copy_handle = NDArrayFunction::FindHandle("_copyto"); - NDArrayHandle from_handle = from->handle; - NDArrayHandle to_handle = (*to)->handle; - RCHECK(from_handle != to_handle) - << "Attempt to copy NDArray to itself"; - NDArrayHandle* p_output_vars = &to_handle; - int num_output = 1; - MX_CALL(MXImperativeInvoke(copy_handle, 1, &from_handle, - &num_output, &p_output_vars, - 0, nullptr, nullptr)); -} - -NDArray::RObjectType NDArray::Array( - const Rcpp::RObject& src, - const Context::RObjectType& ctx) { - Rcpp::NumericVector rdata(src); - Rcpp::RObject dim = rdata.attr("dim"); - Rcpp::Dimension rshape(dim); - RObjectType ret = NDArray::Empty(rshape, ctx); - std::vector temp(rdata.size()); - std::copy(rdata.begin(), rdata.end(), temp.begin()); - MX_CALL(MXNDArraySyncCopyFromCPU( - NDArray(ret)->handle, - dmlc::BeginPtr(temp), rdata.size())); - return ret; -} - -NDArrayFunction::NDArrayFunction(OpHandle handle, std::string name) - : handle_(handle) { - // initialize the docstring - const char* real_name; - const char* description; - mx_uint num_args; - const char **arg_names; - const char **arg_type_infos; - const char **arg_descriptions; - const char *key_var_num_args; - const char *ret_type; - - MX_CALL(MXSymbolGetAtomicSymbolInfo( - handle_, &real_name, &description, &num_args, - &arg_names, &arg_type_infos, &arg_descriptions, - &key_var_num_args, &ret_type)); - if (key_var_num_args != nullptr) { - key_var_num_args_ = key_var_num_args; - } - - if (name[0] == '_') { - name_ = std::string("mx.nd.internal.") + (name.c_str() + 1); - } else { - name_ = std::string("mx.nd.") + name; - } - for (size_t i = 0; i < name_.length(); ++i) { - if (name_[i] == '_') name_[i] = '.'; - } - - // dostring: generate python style for now, change to R style later - std::ostringstream os; - std::string descp = description; - if (descp.length() == 0) { - os << name; - } else { - os << description; - } - os << "\n\n" - << MakeDocString(num_args, arg_names, arg_type_infos, arg_descriptions) - << "@return out The result mx.ndarray\n\n" - << "@export\n"; - this->docstring = os.str(); - - Rcpp::List arg_values(num_args + 1); - arg_names_.resize(num_args + 1); - arg_nd_array_.resize(num_args + 1, false); - - for (mx_uint i = 0; i < num_args; ++i) { - arg_names_[i] = arg_names[i]; - std::string dtype = arg_type_infos[i]; - // check data type. - if (dtype.substr(0, 7) == "NDArray" || - dtype.substr(0, 6) == "Symbol") { - arg_nd_array_[i] = true; - } else { - // all kwargs are optional in front-end - arg_values[i] = R_NilValue; - } - } - arg_names_[num_args + 0] = "out"; - // out is are optional in front-end - arg_values[num_args + 0] = R_NilValue; - formals_ = arg_values; - formals_.attr("names") = arg_names_; -} - -SEXP NDArrayFunction::operator() (SEXP* args) { - BEGIN_RCPP; - - std::vector nd_args; - std::vector sparam_vals; - std::vector param_keys; - std::vector param_vals; - std::vector out_args; - - for (mx_uint i = 0; i < arg_names_.size() - 1; ++i) { - if (arg_nd_array_[i]) { - if (TYPEOF(args[i]) == 22) { - nd_args.push_back(NDArray(args[i])->handle); - } else if (TYPEOF(args[i]) == 19) { - Rcpp::List data_lst = Rcpp::as(args[i]); - for (size_t k = 0; k < data_lst.size(); k++) { - nd_args.push_back(NDArray((SEXP)data_lst[k])->handle); - } - } - } else { - if (args[i] != R_NilValue) { - param_keys.push_back(arg_names_[i].c_str()); - sparam_vals.push_back(toPyString(arg_names_[i], args[i])); - } - } - } - param_vals.resize(sparam_vals.size()); - for (size_t i = 0; i < sparam_vals.size(); ++i) { - param_vals[i] = sparam_vals[i].c_str(); - } - // contain out - if (args[arg_names_.size()-1] != R_NilValue) { - SEXP old_output = args[arg_names_.size() - 1]; - if (TYPEOF(old_output) == VECSXP) { - out_args = NDArray::GetHandles(old_output, "out", false, true); - } else { - out_args.push_back(NDArray(old_output)->handle); - } - } - - int num_output = static_cast(out_args.size()); - NDArrayHandle* p_output_vars = nullptr; - - if (num_output != 0) { - p_output_vars = &out_args[0]; - } - - MXImperativeInvoke( - handle_, - static_cast(nd_args.size()), - dmlc::BeginPtr(nd_args), - &num_output, - &p_output_vars, - static_cast(param_keys.size()), - dmlc::BeginPtr(param_keys), - dmlc::BeginPtr(param_vals)); - - if (num_output == 1) { - if (out_args.size() != 0) { - return NDArray(args[arg_names_.size() - 1]).Move().RObject(); - } else { - return NDArray(p_output_vars[0], true).RObject(); - } - } else { - Rcpp::List olist(num_output); - for (int i = 0; i < num_output; ++i) { - olist[i] = NDArray(p_output_vars[i], true).RObject(); - } - return olist; - } - - END_RCPP; -} - -OpHandle NDArrayFunction::FindHandle(const std::string& hname) { - OpHandle h; - if (NNGetOpHandle(hname.c_str(), &h) == 0 && h != nullptr) { - return h; - } - RLOG_FATAL << "FindHandle: cannot find function " << hname; - return nullptr; -} - - -// internal namespace of functions inside -namespace ndarray { -/*! - * \brief internal function to parse NDArray arguments - * \param sexp The soure value - * \param handle the output handle, if it is NDArray type. - * \param value the output value, if it is numeric type. - * \return whether it is NDArray type -*/ -inline bool ParseNDArrayArg(SEXP sexp, NDArrayHandle *handle, std::string *value) { - switch (TYPEOF(sexp)) { - case REALSXP: { - *value = toString(sexp); - return false; - } - case INTSXP: { - *value = toString(sexp); - return false; - } - case EXTPTRSXP: { - Rcpp::XPtr ptr(sexp); - Rcpp::RObject attr = ptr.attr("class"); - RCHECK(attr != R_NilValue && Rcpp::as(attr) == "MXNDArray") - << "MXNDArray binary operations only support NDArray and numeric values"; - RCHECK(!ptr->moved) - << "Passing in an NDArray that has been moved"; - *handle = ptr->handle; - return true; - } - default: { - RLOG_FATAL << "MXNDArray binary operations only support " - << "NDArray and numeric values as operands"; - } - } - return true; -} - -inline NDArrayHandle BinaryOp(OpHandle op, NDArrayHandle* handles) { - int num_output = 0; - NDArrayHandle* p_output_vars = nullptr; - MX_CALL(MXImperativeInvoke(op, 2, handles, - &num_output, &p_output_vars, - 0, nullptr, nullptr)); - RCHECK(num_output == 1); - return p_output_vars[0]; -} - -inline NDArrayHandle BinaryScalarOp( - OpHandle op, NDArrayHandle handle, const std::string &scalar) { - int num_output = 0; - NDArrayHandle* p_output_vars = nullptr; - const char* skey = "scalar"; - const char* svalue = scalar.c_str(); - - MX_CALL(MXImperativeInvoke(op, 1, &handle, - &num_output, &p_output_vars, - 1, &skey, &svalue)); - RCHECK(num_output == 1); - return p_output_vars[0]; -} - -// dispatch the binary ops of MXNDArray -NDArray::RObjectType DispatchOps(SEXP op, SEXP lhs, SEXP rhs) { - // function handles - static OpHandle plus = NDArrayFunction::FindHandle("_plus"); - static OpHandle plus_scalar = NDArrayFunction::FindHandle("_plus_scalar"); - static OpHandle minus = NDArrayFunction::FindHandle("_minus"); - static OpHandle minus_scalar = NDArrayFunction::FindHandle("_minus_scalar"); - static OpHandle rminus_scalar = NDArrayFunction::FindHandle("_rminus_scalar"); - static OpHandle mul = NDArrayFunction::FindHandle("_mul"); - static OpHandle mul_scalar = NDArrayFunction::FindHandle("_mul_scalar"); - static OpHandle div = NDArrayFunction::FindHandle("_div"); - static OpHandle div_scalar = NDArrayFunction::FindHandle("_div_scalar"); - static OpHandle rdiv_scalar = NDArrayFunction::FindHandle("_rdiv_scalar"); - static OpHandle mod = NDArrayFunction::FindHandle("_mod"); - static OpHandle mod_scalar = NDArrayFunction::FindHandle("_mod_scalar"); - static OpHandle rmod_scalar = NDArrayFunction::FindHandle("_rmod_scalar"); - static OpHandle equal = NDArrayFunction::FindHandle("_equal"); - static OpHandle equal_scalar = NDArrayFunction::FindHandle("_equal_scalar"); - static OpHandle not_equal = NDArrayFunction::FindHandle("_not_equal"); - static OpHandle not_equal_scalar = NDArrayFunction::FindHandle("_not_equal_scalar"); - static OpHandle greater = NDArrayFunction::FindHandle("_greater"); - static OpHandle greater_scalar = NDArrayFunction::FindHandle("_greater_scalar"); - static OpHandle greater_equal = NDArrayFunction::FindHandle("_greater_equal"); - static OpHandle greater_equal_scalar = NDArrayFunction::FindHandle("_greater_equal_scalar"); - static OpHandle lesser = NDArrayFunction::FindHandle("_lesser"); - static OpHandle lesser_scalar = NDArrayFunction::FindHandle("_lesser_scalar"); - static OpHandle lesser_equal = NDArrayFunction::FindHandle("_lesser_equal"); - static OpHandle lesser_equal_scalar = NDArrayFunction::FindHandle("_lesser_equal_scalar"); - // parse the arguments - std::string values[2]; - NDArrayHandle handles[2]; - NDArrayHandle out = nullptr; - bool lhs_nd = ParseNDArrayArg(lhs, &handles[0], &values[0]); - bool rhs_nd = ParseNDArrayArg(rhs, &handles[1], &values[1]); - RCHECK(lhs_nd || rhs_nd); - // create output and dispatch. - std::string sop = Rcpp::as(op); - switch (sop[0]) { - case '+': { - if (lhs_nd && rhs_nd) { - out = BinaryOp(plus, handles); - } else if (lhs_nd && !rhs_nd) { - out = BinaryScalarOp(plus_scalar, handles[0], values[1]); - } else { - out = BinaryScalarOp(plus_scalar, handles[1], values[0]); - } - break; - } - case '-': { - if (lhs_nd && rhs_nd) { - out = BinaryOp(minus, handles); - } else if (lhs_nd && !rhs_nd) { - out = BinaryScalarOp(minus_scalar, handles[0], values[1]); - } else { - out = BinaryScalarOp(rminus_scalar, handles[1], values[0]); - } - break; - } - case '*': { - if (lhs_nd && rhs_nd) { - out = BinaryOp(mul, handles); - } else if (lhs_nd && !rhs_nd) { - out = BinaryScalarOp(mul_scalar, handles[0], values[1]); - } else { - out = BinaryScalarOp(mul_scalar, handles[1], values[0]); - } - break; - } - case '/': { - if (lhs_nd && rhs_nd) { - out = BinaryOp(div, handles); - } else if (lhs_nd && !rhs_nd) { - out = BinaryScalarOp(div_scalar, handles[0], values[1]); - } else { - out = BinaryScalarOp(rdiv_scalar, handles[1], values[0]); - } - break; - } - case '%': { - if (lhs_nd && rhs_nd) { - out = BinaryOp(mod, handles); - } else if (lhs_nd && !rhs_nd) { - out = BinaryScalarOp(mod_scalar, handles[0], values[1]); - } else { - out = BinaryScalarOp(rmod_scalar, handles[1], values[0]); - } - break; - } - case '=': { - if (lhs_nd && rhs_nd) { - out = BinaryOp(equal, handles); - } else if (lhs_nd && !rhs_nd) { - out = BinaryScalarOp(equal_scalar, handles[0], values[1]); - } else { - out = BinaryScalarOp(equal_scalar, handles[1], values[0]); - } - break; - } - case '!': { - if (lhs_nd && rhs_nd) { - out = BinaryOp(not_equal, handles); - } else if (lhs_nd && !rhs_nd) { - out = BinaryScalarOp(not_equal_scalar, handles[0], values[1]); - } else { - out = BinaryScalarOp(not_equal_scalar, handles[1], values[0]); - } - break; - } - case '>': { - if (sop == ">=") { - if (lhs_nd && rhs_nd) { - out = BinaryOp(greater_equal, handles); - } else if (lhs_nd && !rhs_nd) { - out = BinaryScalarOp(greater_equal_scalar, handles[0], values[1]); - } else { - out = BinaryScalarOp(lesser_equal_scalar, handles[1], values[0]); - } - } else { - if (lhs_nd && rhs_nd) { - out = BinaryOp(greater, handles); - } else if (lhs_nd && !rhs_nd) { - out = BinaryScalarOp(greater_scalar, handles[0], values[1]); - } else { - out = BinaryScalarOp(lesser_scalar, handles[1], values[0]); - } - } - break; - } - case '<': { - if (sop == "<=") { - if (lhs_nd && rhs_nd) { - out = BinaryOp(lesser_equal, handles); - } else if (lhs_nd && !rhs_nd) { - out = BinaryScalarOp(lesser_equal_scalar, handles[0], values[1]); - } else { - out = BinaryScalarOp(greater_equal_scalar, handles[1], values[0]); - } - } else { - if (lhs_nd && rhs_nd) { - out = BinaryOp(lesser, handles); - } else if (lhs_nd && !rhs_nd) { - out = BinaryScalarOp(lesser_scalar, handles[0], values[1]); - } else { - out = BinaryScalarOp(greater_scalar, handles[1], values[0]); - } - } - break; - } - default: { - RLOG_FATAL << "Operator " << sop << " not supported for MXNDArray"; - } - } - return NDArray::RObject(out, true); -} - -Rcpp::Dimension dim(const NDArray::RObjectType& src) { - return NDArray(src).dim(); -} - -Context::RObjectType ctx(const NDArray::RObjectType& src) { - return NDArray(src).ctx().RObject(); -} - -unsigned long Size(const NDArray::RObjectType& src) { // NOLINT(*) - return NDArray(src).Size(); -} - -Rcpp::NumericVector AsNumericVector(const NDArray::RObjectType& src) { - return NDArray(src).AsNumericVector(); -} - -NDArray::RObjectType Slice(const NDArray::RObjectType& src, - mx_uint begin, mx_uint end) { - NDArray nd(src); - Rcpp::Dimension dim = nd.dim(); - size_t ndim = dim.size(); - RCHECK(dim[ndim - 1] >= end) - << "end=" << end << ", max-dim=" << dim[ndim - 1]; - return nd.Slice(begin, end).RObject(); -} -} // namespace ndarray - -// initialize the Rcpp module functions. -void NDArray::InitRcppModule() { - using namespace Rcpp; // NOLINT(*) - function("mx.nd.slice", &ndarray::Slice); - function("mx.nd.internal.load", &NDArray::Load); - function("mx.nd.internal.save", &NDArray::Save); - function("mx.nd.internal.array", &NDArray::Array); - function("mx.nd.internal.empty.array", &NDArray::Empty); - function("mx.nd.internal.dispatch.Ops", &ndarray::DispatchOps); - // exposing members - function("mx.nd.internal.dim", &ndarray::dim); - function("mx.nd.internal.ctx", &ndarray::ctx); - function("mx.nd.internal.length", &ndarray::Size); - function("mx.nd.internal.as.array", &ndarray::AsNumericVector); - - class_("NDArrayPacker") - .method("push", &NDArrayPacker::Push) - .method("get", &NDArrayPacker::Get); - function("mx.nd.arraypacker", &NDArrayPacker::CreateNDArrayPacker); -} - -void NDArrayFunction::InitRcppModule() { - Rcpp::Module* scope = ::getCurrentScope(); - RCHECK(scope != nullptr) - << "Init Module need to be called inside scope"; - - mx_uint out_size; - const char** op_name_ptrs; - std::vector op_names; - MX_CALL(MXListAllOpNames(&out_size, &op_name_ptrs)); - for (size_t i = 0; i < out_size; ++i) { - op_names.push_back(std::string(op_name_ptrs[i])); - } - - for (int i = 0; i < out_size; ++i) { - OpHandle handle; - MX_CALL(NNGetOpHandle(op_names[i].c_str(), &handle)); - NDArrayFunction *f = new NDArrayFunction(handle, op_names[i]); - scope->Add(f->get_name(), f); - } -} -} // namespace R -} // namespace mxnet diff --git a/R-package/src/ndarray.h b/R-package/src/ndarray.h deleted file mode 100644 index fb0551ee1c32..000000000000 --- a/R-package/src/ndarray.h +++ /dev/null @@ -1,325 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/*! - * Copyright (c) 2015 by Contributors - * \file ndarray.h - * \brief Rcpp NDArray interface of MXNet - */ -#ifndef MXNET_RCPP_NDARRAY_H_ -#define MXNET_RCPP_NDARRAY_H_ - -#include -#include -#include -#include -#include -#include -#include "./base.h" - -namespace mxnet { -namespace R { -// forward declare NDArrayFunction -class NDArrayFunction; - -/*! \brief Back-end chunk of NDArray */ -struct NDBlob { - public: - /*! - * \brief constructor - * \param handle The handle - */ - NDBlob(NDArrayHandle handle, bool writable) - : handle(handle), writable(writable), moved(false) { - } - /*! \brief destructor */ - ~NDBlob() { - if (!moved) { - MX_CALL(MXNDArrayFree(handle)); - } - } - /*! \brief The internal handle of NDArray */ - NDArrayHandle handle; - /*! \brief whether the Blob is writable */ - bool writable; - /*! \brief whether if the */ - bool moved; -}; - -/*! - * \brief Rcpp NDArray object of MXNet. - * We use lightweight Rcpp external ptr and S3 type object. - * For efficiently expose the object to R side. - */ -class NDArray { - public: - /*! \return typename from R side. */ - inline static const char* TypeName() { - return "MXNDArray"; - } - /*! \brief The returning type of new NDArray */ - typedef Rcpp::XPtr RObjectType; - /*! - * \brief copy constructor - * \param other Another NDArray to be copied from. - */ - NDArray(const NDArray& other) - : ptr_(other.ptr_) {} - /*! - * \brief constructor from R SEXP - * \param src The source SEXP - */ - explicit NDArray(SEXP src) - : ptr_(src) {} - /*! - * \brief Constructor - * \param handle The handle - */ - NDArray(NDArrayHandle handle, bool writable) - : ptr_(new NDBlob(handle, writable)) { - ptr_.attr("class") = "MXNDArray"; - } - /*! \return RObject representation */ - inline RObjectType RObject() const { - return ptr_; - } - /*! - * \brief Create a new moved NDArray - */ - inline NDArray Move() const { - RCHECK(ptr_->writable && !ptr_->moved) - << "Passing a read only NDArray to mutate function"; - ptr_->moved = true; - return NDArray(ptr_->handle, ptr_->writable); - } - // operator overloading - inline NDArray& operator=(const NDArray& other) { - ptr_ = other.ptr_; - return *this; - } - inline NDBlob* operator->() { - return ptr_.get(); - } - inline const NDBlob* operator->() const { - return ptr_.get(); - } - /*! - * \param src The source array. - * \return The dimension of the array - */ - Rcpp::Dimension dim() const; - /*! - * \brief Return a clone of NDArray. - * Do not expose this to R side. - * \return src The source NDArray. - * \return a new cloned NDArray. - */ - NDArray Clone() const; - /*! - * \return The context of NDArray. - */ - Context ctx() const; - /*! - * \brief Return a slice of NDArray. - * \param begin The begin of the slice. - * \param end The end of the slice. - * \return a sliced NDArray. - */ - NDArray Slice(mx_uint begin, mx_uint end) const; - /*! - * \return The number of elements in the array - */ - size_t Size() const; - /*! - * \return convert the NDArray to R's Array - */ - Rcpp::NumericVector AsNumericVector() const; - /*! - * \brief Create NDArray from RObject - * \param src Source object. - * \return The created NDArray - */ - inline static NDArray FromRObject(const Rcpp::RObject& src) { - return NDArray(src); - } - /*! - * \brief Create RObject NDArray. - * \param handle The source handle. - * \param writable Whether the NDArray is writable. - * \return The created NDArray - */ - inline static RObjectType RObject(NDArrayHandle handle, bool writable) { - return NDArray(handle, writable).RObject(); - } - /*! - * \brief Move the NDArray. - * \param src The source RObject. - * \return The moved NDArray - */ - inline static RObjectType Move(const Rcpp::RObject& src) { - return NDArray(src).Move().RObject(); - } - /*! - * \brief function to create an empty array - * \param shape The shape of the Array - * \return a new created MX.NDArray - */ - static RObjectType Empty(const Rcpp::Dimension& shape, - const Context::RObjectType& ctx); - /*! - * \brief Create a MX.NDArray by copy data from src R array. - * \param src the source R array - * \param ctx The context where - */ - static RObjectType Array(const Rcpp::RObject& src, - const Context::RObjectType& ctx); - /*! - * \brief internal function to copy NDArray from to to - * Do not expose this to R side. - * \param from The source NDArray. - * \param to The target NDArray. - */ - static void CopyFromTo(const NDArray& from, NDArray *to); - /*! - * \brief Load a list of ndarray from the file. - * \param filename the name of the file. - * \return R List of NDArrays - */ - static Rcpp::List Load(const std::string& filename); - /*! - * \brief Save a list of NDArray to file. - * \param data R List of NDArrays - * \param filename The name of the file to be saved. - */ - static void Save(const Rcpp::List& data, - const std::string& filename); - /*! - * \brief Extract NDArrayHandles from List. - * \param array_list The NDArray list. - * \param list_name The name of the list, used for error message. - * \param allow_null If set to True, allow null in the list. - * \param move_old_array If set to true, move the old ndarrays - */ - static std::vector GetHandles(const Rcpp::List& array_list, - const std::string& list_name, - bool allow_null = false, - bool move_old_array = false); - /*! \brief static function to initialize the Rcpp functions */ - static void InitRcppModule(); - - private: - /*! \brief internal pointer */ - Rcpp::XPtr ptr_; -}; - -/*! \brief The NDArray functions to be invoked */ -class NDArrayFunction : public ::Rcpp::CppFunction { - public: - virtual SEXP operator() (SEXP * args); - - virtual int nargs() { - return static_cast(arg_names_.size()); - } - - virtual bool is_void() { - return false; - } - - virtual void signature(std::string& s, const char* name) { // NOLINT(*) - ::Rcpp::signature< ::Rcpp::void_type >(s, name); - } - - virtual const char* get_name() { - return name_.c_str(); - } - - virtual SEXP get_formals() { - return formals_; - } - - virtual DL_FUNC get_function_ptr() { - return (DL_FUNC)NULL; // NOLINT(*) - } - /*! \brief static function to initialize the Rcpp functions */ - static void InitRcppModule(); - - // internal helper function to search function handle - static OpHandle FindHandle(const std::string& hname); - - private: - // make constructor private - explicit NDArrayFunction(OpHandle handle, std::string name); - /*! \brief internal functioon handle. */ - OpHandle handle_; - // name of the function - std::string name_; - // keyword arguments. - std::string key_var_num_args_; - // name of arguments - std::vector arg_names_; - // check - std::vector arg_nd_array_; - // ther formals of arguments - Rcpp::List formals_; -}; - -/*! - * \brief An array packer that packs NDArray array together on - * slowest changing dimension. - */ -class NDArrayPacker { - public: - // constructor - NDArrayPacker() {} - /*! - * \brief Push the array to the packer - * \param nd The array to push the data into. - */ - void Push(const NDArray::RObjectType& nd); - /*! - * \brief Get the R array out from packed data. - * \return The packed data. - */ - Rcpp::NumericVector Get() const; - /*! \return constructor */ - static Rcpp::RObject CreateNDArrayPacker(); - - private: - /*! \brief The internal data */ - std::vector data_; - /*! \brief The shape of data */ - std::vector shape_; -}; -} // namespace R -} // namespace mxnet - -RCPP_EXPOSED_CLASS_NODECL(::mxnet::R::NDArrayPacker); - -namespace Rcpp { - template<> - inline bool is(SEXP x) { - if (TYPEOF(x) != EXTPTRSXP) return false; - Rcpp::XPtr ptr(x); - SEXP attr = ptr.attr("class"); - return attr != R_NilValue && - Rcpp::as(attr) == "MXNDArray"; - return true; - } -} // namespace Rcpp -#endif // MXNET_RCPP_NDARRAY_H_ diff --git a/R-package/src/symbol.cc b/R-package/src/symbol.cc deleted file mode 100644 index 317e82568012..000000000000 --- a/R-package/src/symbol.cc +++ /dev/null @@ -1,419 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/*! - * Copyright (c) 2015 by Contributors - * \file symbol.cc - * \brief Rcpp Symbol of MXNet. - */ -#include -#include -#include -#include "./base.h" -#include "./symbol.h" -#include "./name.h" -#include "./export.h" - -namespace mxnet { -namespace R { - -NameManager* NameManager::Get() { - static NameManager inst; - return &inst; -} - -inline Symbol::RObjectType Symbol::Clone() const { - SymbolHandle ohandle; - MX_CALL(MXSymbolCopy(handle_, &ohandle)); - return Symbol::RObject(ohandle); -} - -Symbol::RObjectType Symbol::Apply(const Rcpp::List& kwargs) const { - RObjectType ret = this->Clone(); - if (kwargs.containsElementNamed("name")) { - int index = kwargs.findName("name"); - std::string name = kwargs[index]; - Rcpp::List kw(kwargs); - kw.erase(index); - Symbol::XPtr(ret)->Compose(kw, name); - } else { - std::string name; - Symbol::XPtr(ret)->Compose(kwargs, name); - } - return ret; -} - -std::string Symbol::DebugStr() const { - const char *str; - MX_CALL(MXSymbolPrint(handle_, &str)); - return str; -} - -void Symbol::Compose(const Rcpp::List& kwargs, const std::string &name) { - std::string target_name; - std::vector keys = SafeGetListNames(kwargs); - // get names - bool positional = keys.size() == 0 || keys[0].length() == 0; - for (size_t i = 0; i < keys.size(); ++i) { - RCHECK((keys[i].length() == 0) == positional) - << "Input symbols need to be either positional or key=value style, not both\n"; - } - if (positional) keys.resize(0); - - // string parameter keys - std::vector c_keys = CKeys(keys); - // string parameter values - std::vector handles(kwargs.size()); - for (size_t i = 0; i < kwargs.size(); ++i) { - handles[i] = Symbol::XPtr(kwargs[i])->handle_; - } - MX_CALL(NNSymbolCompose( - handle_, name.c_str(), - static_cast(handles.size()), - dmlc::BeginPtr(c_keys), dmlc::BeginPtr(handles))); -} - -std::vector Symbol::ListArguments() const { - mx_uint size; - const char **ret; - MX_CALL(MXSymbolListArguments(handle_, &size, &ret)); - return std::vector(ret, ret + size); -} - -std::vector Symbol::ListAuxiliaryStates() const { - mx_uint size; - const char **ret; - MX_CALL(MXSymbolListAuxiliaryStates(handle_, &size, &ret)); - return std::vector(ret, ret + size); -} - -std::vector Symbol::ListOuputs() const { - mx_uint size; - const char **ret; - MX_CALL(MXSymbolListOutputs(handle_, &size, &ret)); - return std::vector(ret, ret + size); -} - -Rcpp::List Symbol::getAttrs() const { - mx_uint size; - const char **ret; - MX_CALL(MXSymbolListAttrShallow(handle_, &size, &ret)); - std::vector key_values(ret, ret + 2*size); - - // fill return list - Rcpp::List list; - for (size_t i = 0; i < size; i++) { - list[key_values[2*i]] = key_values[2*i+1]; - } - return list; -} - -void Symbol::setAttrs(Rcpp::List attr) { - RCHECK(HasName(attr)) - << "Need to pass parameters in list of key=value style.\n"; - std::vector keys = attr.names(); - for (size_t i = 0; i < attr.size(); i++) { - RCHECK(TYPEOF(attr[i]) == STRSXP) << "Attribute values must be characters.\n"; - } - for (size_t i = 0; i < attr.size(); i++) { - MX_CALL(MXSymbolSetAttr(handle_, keys[i].c_str(), - Rcpp::as(attr[i]).c_str() )); - } -} - -void Symbol::Save(const std::string& fname) const { - MX_CALL(MXSymbolSaveToFile(handle_, fname.c_str())); -} - -std::string Symbol::AsJSON() const { - const char *json; - MX_CALL(MXSymbolSaveToJSON(handle_, &json)); - return json; -} - -Symbol::RObjectType Symbol::GetInternals() const { - SymbolHandle out; - MX_CALL(MXSymbolGetInternals(handle_, &out)); - return Symbol::RObject(out); -} - -Symbol::RObjectType Symbol::GetChildren() const { - SymbolHandle out; - MX_CALL(MXSymbolGetChildren(handle_, &out)); - return Symbol::RObject(out); -} - -Symbol::RObjectType Symbol::GetOutput(mx_uint index) const { - SymbolHandle out; - MX_CALL(MXSymbolGetOutput(handle_, index - 1, &out)); - return Symbol::RObject(out); -} - -// helper function to convert shape into Rcpp vector -inline Rcpp::List BuildShapeData(mx_uint shape_size, - const int *shape_ndim, - const int **shape_data, - const std::vector &names) { - Rcpp::List ret(shape_size); - for (mx_uint i = 0; i < shape_size; ++i) { - Rcpp::IntegerVector dim(shape_data[i], shape_data[i] + shape_ndim[i]); - std::reverse(dim.begin(), dim.end()); - ret[i] = dim; - } - ret.names() = names; - return ret; -} - -SEXP Symbol::InferShape(const Rcpp::List& kwargs) const { - RCHECK(HasName(kwargs)) - << "Need to pass parameters in key=value style.\n"; - std::vector keys = kwargs.names(); - std::vector arg_ind_ptr(1, 0); - std::vector arg_shape_data; - - for (size_t i = 0; i < kwargs.size(); ++i) { - RCHECK(keys[i].length() != 0) - << "Need to pass parameters in key=value style.\n"; - std::vector dim = Dim2InternalShape(kwargs[i]); - arg_shape_data.insert(arg_shape_data.end(), dim.begin(), dim.end()); - arg_ind_ptr.push_back(static_cast(arg_shape_data.size())); - } - std::vector c_keys = CKeys(keys); - - mx_uint in_shape_size; - const int *in_shape_ndim; - const int **in_shape_data; - mx_uint out_shape_size; - const int *out_shape_ndim; - const int **out_shape_data; - mx_uint aux_shape_size; - const int *aux_shape_ndim; - const int **aux_shape_data; - int complete; - - MX_CALL(MXSymbolInferShapeEx( - handle_, static_cast(kwargs.size()), dmlc::BeginPtr(c_keys), - dmlc::BeginPtr(arg_ind_ptr), dmlc::BeginPtr(arg_shape_data), - &in_shape_size, &in_shape_ndim, &in_shape_data, - &out_shape_size, &out_shape_ndim, &out_shape_data, - &aux_shape_size, &aux_shape_ndim, &aux_shape_data, - &complete)); - - if (complete != 0) { - return Rcpp::List::create( - Rcpp::Named("arg.shapes") = BuildShapeData( - in_shape_size, in_shape_ndim, in_shape_data, ListArguments()), - Rcpp::Named("out.shapes") = BuildShapeData( - out_shape_size, out_shape_ndim, out_shape_data, ListOuputs()), - Rcpp::Named("aux.shapes") = BuildShapeData( - aux_shape_size, aux_shape_ndim, aux_shape_data, ListAuxiliaryStates())); - } else { - return R_NilValue; - } -} - -Symbol::RObjectType Symbol::Variable(const std::string& name) { - SymbolHandle out; - MX_CALL(MXSymbolCreateVariable(name.c_str(), &out)); - return Symbol::RObject(out); -} - -Symbol::RObjectType Symbol::Load(const std::string& filename) { - SymbolHandle out; - MX_CALL(MXSymbolCreateFromFile(filename.c_str(), &out)); - return Symbol::RObject(out); -} - -Symbol::RObjectType Symbol::LoadJSON(const std::string& json) { - SymbolHandle out; - MX_CALL(MXSymbolCreateFromJSON(json.c_str(), &out)); - return Symbol::RObject(out); -} - -Symbol::RObjectType Symbol::Group(const Rcpp::List& symbols) { - // allow pass in single list - Rcpp::List kwargs = symbols; - if (symbols.size() == 1 && Rcpp::is(symbols[0])) { - kwargs = symbols[0]; - } - - std::vector handles(kwargs.size()); - for (size_t i = 0; i < kwargs.size(); ++i) { - RCHECK(Rcpp::is(kwargs[i])) - << "Group only accept MXSymbol as input\n"; - handles[i] = Symbol::XPtr(kwargs[i])->handle_; - } - SymbolHandle out; - MX_CALL(MXSymbolCreateGroup(static_cast(handles.size()), - dmlc::BeginPtr(handles), &out)); - return Symbol::RObject(out); -} - -SymbolFunction::SymbolFunction(OpHandle handle, std::string name) - : handle_(handle) { - const char* real_name; - const char* description; - mx_uint num_args; - const char **arg_names; - const char **arg_type_infos; - const char **arg_descriptions; - const char *key_var_num_args; - const char *ret_type; - - MX_CALL(MXSymbolGetAtomicSymbolInfo( - handle_, &real_name, &description, &num_args, - &arg_names, &arg_type_infos, &arg_descriptions, - &key_var_num_args, &ret_type)); - if (key_var_num_args != nullptr) { - key_var_num_args_ = key_var_num_args; - } - name_hint_ = name; - std::transform(name_hint_.begin(), name_hint_.end(), - name_hint_.begin(), ::tolower); - if (name[0] == '_') { - name_ = std::string("mx.varg.symbol.internal.") + (name.c_str() + 1); - } else { - name_ = std::string("mx.varg.symbol.") + name; - } - std::ostringstream os; - os << name << ':' << description << "\n\n" - << MakeDocString(num_args, arg_names, arg_type_infos, arg_descriptions) - << "@param name string, optional\n" - << " Name of the resulting symbol.\n" - << "@return out The result mx.symbol\n\n" - << "@export\n"; - this->docstring = os.str(); -} - -SEXP SymbolFunction::operator() (SEXP* args) { - BEGIN_RCPP; - Rcpp::List kwargs(args[0]); - std::vector keys = SafeGetListNames(kwargs); - // string key and values - std::vector str_keys; - std::vector str_vals; - // symbol key and values - std::vector sym_keys; - std::vector sym_vals; - // name of the result - std::string name; - - // classify keys - for (size_t i = 0; i < kwargs.size(); ++i) { - if (keys[i] == "name") { - name = Rcpp::as(kwargs[i]); - continue; - } - if (!isSimple(kwargs[i]) && Rcpp::is(kwargs[i])) { - sym_keys.push_back(keys[i]); - sym_vals.push_back(kwargs[i]); - } else { - RCHECK(keys[i].length() != 0) - << "Non Symbol parameters is only accepted via key=value style."; - str_keys.push_back(FormatParamKey(keys[i])); - str_vals.push_back(toPyString(keys[i], kwargs[i])); - } - } - - SymbolHandle shandle; - std::vector c_str_keys = CKeys(str_keys); - std::vector c_str_vals = CKeys(str_vals); - MX_CALL(NNSymbolCreateAtomicSymbol( - handle_, static_cast(str_keys.size()), - dmlc::BeginPtr(c_str_keys), - dmlc::BeginPtr(c_str_vals), - &shandle)); - Symbol::RObjectType ret = Symbol::RObject(shandle); - Rcpp::List compose_args = Rcpp::wrap(sym_vals); - compose_args.names() = sym_keys; - name = NameManager::Get()->GetName(name, name_hint_); - Symbol::XPtr(ret)->Compose(compose_args, name); - return ret; - END_RCPP; -} - -void Symbol::InitRcppModule() { - using namespace Rcpp; // NOLINT(*) - class_("MXSymbol") - .method("debug.str", &Symbol::DebugStr, - "Return the debug string of internals of symbol") - .method("apply", &Symbol::Apply, - "Return a new Symbol by applying current symbols into input") - .method("as.json", &Symbol::AsJSON, - "Return a json string representation of symbol") - .method("save", &Symbol::Save, - "Save symbol to file") - .property("arguments", &Symbol::ListArguments, - "List the arguments names of the symbol") - .property("attributes", &Symbol::getAttrs, &Symbol::setAttrs, - "Attributes of the symbol. Specified as named list.") - .property("outputs", &Symbol::ListOuputs, - "List the outputs names of the symbol") - .property("auxiliary.states", &Symbol::ListAuxiliaryStates, - "List the auxiliary state names of the symbol") - .method("get.internals", &Symbol::GetInternals, - "Get a symbol that contains all the internals") - .method("get.children", &Symbol::GetChildren, - "Get a symbol that contains all the children") - .method("get.output", &Symbol::GetOutput, - "Get index-th output symbol of current one") - .method("[[", &Symbol::GetOutput, - "Get index-th output symbol of current one") - .method("infer.shape", &Symbol::InferShape, - "Inference the shape information given unknown ones"); - - function("mx.symbol.Variable", - &Symbol::Variable, - List::create(_["name"]), - "Create a symbolic variable with specified name."); - function("mx.symbol.load", - &Symbol::Load, - List::create(_["file.name"]), - "Load a symbol from file."); - function("mx.symbol.load.json", - &Symbol::LoadJSON, - List::create(_["json.str"]), - "Load a symbol from json string."); - function("mx.varg.symbol.internal.Group", - &Symbol::Group, - List::create(_["slist"]), - "Create a symbol that groups symbols together."); -} - -void SymbolFunction::InitRcppModule() { - Rcpp::Module* scope = ::getCurrentScope(); - RCHECK(scope != nullptr) - << "Init Module need to be called inside scope"; - mx_uint out_size; - const char** op_name_ptrs; - std::vector op_names; - MX_CALL(MXListAllOpNames(&out_size, &op_name_ptrs)); - for (size_t i = 0; i < out_size; ++i) { - op_names.push_back(std::string(op_name_ptrs[i])); - } - - for (int i = 0; i < out_size; ++i) { - OpHandle handle; - MX_CALL(NNGetOpHandle(op_names[i].c_str(), &handle)); - SymbolFunction *f = new SymbolFunction(handle, op_names[i]); - scope->Add(f->get_name(), f); - } -} -} // namespace R -} // namespace mxnet diff --git a/R-package/src/symbol.h b/R-package/src/symbol.h deleted file mode 100644 index fb128fa91053..000000000000 --- a/R-package/src/symbol.h +++ /dev/null @@ -1,233 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/*! - * Copyright (c) 2015 by Contributors - * \file symbol.h - * \brief Rcpp Symbolic construction interface of MXNet - */ -#ifndef MXNET_RCPP_SYMBOL_H_ -#define MXNET_RCPP_SYMBOL_H_ - -#include -#include -#include -#include -#include -#include - -namespace mxnet { -namespace R { -// forward declare symbol functiono -class SymbolFunction; - -/*! \brief The Rcpp Symbol class of MXNet */ -class Symbol { - public: - // typedef RObjectType - typedef Rcpp::RObject RObjectType; - /*! \return typename from R side. */ - inline static const char* TypeName() { - return "MXSymbol"; - } - /*! - * \brief Apply the symbol as function on kwargs - * \param kwargs keyword arguments to the data - * \return A resulting symbol. - */ - RObjectType Apply(const Rcpp::List& kwargs) const; - /*! - * \brief Print the debug string of symbol - * \return the debug string. - */ - std::string DebugStr() const; - /*! \return the arguments in the symbol */ - std::vector ListArguments() const; - /*! \return the auxiliary states symbol */ - std::vector ListAuxiliaryStates() const; - /*! \return the outputs in the symbol */ - std::vector ListOuputs() const; - - /*! \return the attributes of the symbol */ - Rcpp::List getAttrs() const; - /*! - * \brief sets the attributes of the symbol - * \param attr list of keyword arguments - */ - void setAttrs(Rcpp::List attr); - - /*! - * \brief Save the symbol to file - * \param fname the file name we need to save to - */ - void Save(const std::string& fname) const; - /*! - * \brief save the symbol to json string - * \return a JSON string representation of symbol. - */ - std::string AsJSON() const; - /*! - * \brief Get a new grouped symbol whose output contains all the - * internal outputs of this symbol. - * \return The internal of the symbol. - */ - RObjectType GetInternals() const; - /*! - * \brief Gets a new grouped symbol whose output contains - * inputs to output nodes of the original symbol. - * \return The children of the symbol. - */ - RObjectType GetChildren() const; - /*! - * \brief Get index-th outputs of the symbol. - * \param symbol The symbol - * \param index the Index of the output. - * \param out The output symbol whose outputs are the index-th symbol. - */ - RObjectType GetOutput(mx_uint index) const; - /*! \brief Infer the shapes of arguments, outputs, and auxiliary states */ - SEXP InferShape(const Rcpp::List& kwargs) const; - /*! - * \brief Create a symbolic variable with specified name. - * - * \param name string, Name of the variable. - * \return The created variable symbol. - */ - static RObjectType Variable(const std::string& name); - /*! - * \brief Load a symbol variable from filename. - * - * \param filename string, the path to the symbol file. - * \return The loaded corresponding symbol. - */ - static RObjectType Load(const std::string& filename); - /*! - * \brief Load a symbol variable from json string - * - * \param json string, json string of symbol. - * \return The loaded corresponding symbol. - */ - static RObjectType LoadJSON(const std::string& json); - /*! - * \brief Create a symbol that groups symbols together. - * - * \param ... List of symbols to be grouped. - * \return The created grouped symbol. - */ - static RObjectType Group(const Rcpp::List& symbols); - /*! \brief static function to initialize the Rcpp functions */ - static void InitRcppModule(); - // destructor - ~Symbol() { - MX_CALL(MXSymbolFree(handle_)); - } - // get external pointer of Symbol - inline static Symbol* XPtr(const Rcpp::RObject& obj) { - return Rcpp::as(obj); - } - - private: - // friend with SymbolFunction - friend class SymbolFunction; - friend class Executor; - // enable trivial copy constructors etc. - Symbol() {} - // constructor - explicit Symbol(SymbolHandle handle) - : handle_(handle) {} - /*! - * \brief create a R object that correspond to the Class - * \param handle the Handle needed for output. - */ - inline static Rcpp::RObject RObject(SymbolHandle handle) { - return Rcpp::internal::make_new_object(new Symbol(handle)); - } - /*! - * \brief Return a clone of Symbol - * Do not expose to R side - * \param obj The source to be cloned from - * \return a Cloned Symbol - */ - inline RObjectType Clone() const; - /*! - * \brief Compose the symbol with kwargs - * \param kwargs keyword arguments to the data - * \param name name of the symbol. - */ - void Compose(const Rcpp::List& kwargs, const std::string &name); - - /*! \brief internal executor handle */ - SymbolHandle handle_; -}; - -/*! \brief The Symbol functions to be invoked */ -class SymbolFunction : public ::Rcpp::CppFunction { - public: - virtual SEXP operator() (SEXP* args); - - virtual int nargs() { - return 1; - } - - virtual bool is_void() { - return false; - } - - virtual void signature(std::string& s, const char* name) { // NOLINT(*) - ::Rcpp::signature< SEXP, ::Rcpp::List >(s, name); - } - - virtual const char* get_name() { - return name_.c_str(); - } - - virtual SEXP get_formals() { - return Rcpp::List::create(Rcpp::_["alist"]); - } - - virtual DL_FUNC get_function_ptr() { - return (DL_FUNC)NULL; // NOLINT(*) - } - /*! \brief static function to initialize the Rcpp functions */ - static void InitRcppModule(); - - private: - // make constructor private - explicit SymbolFunction(OpHandle handle, std::string name); - /*! \brief internal creator handle. */ - OpHandle handle_; - // name of the function - std::string name_; - // hint used to generate the names - std::string name_hint_; - // key to variable size arguments, if any. - std::string key_var_num_args_; -}; -} // namespace R -} // namespace mxnet - -RCPP_EXPOSED_CLASS_NODECL(::mxnet::R::Symbol); - -namespace Rcpp { - template<> - inline bool is(SEXP x) { - return internal::is__module__object_fix(x); - } -} -#endif // MXNET_RCPP_SYMBOL_H_ diff --git a/R-package/tests/testthat/get_data.R b/R-package/tests/testthat/get_data.R deleted file mode 100644 index 9bcacdb46ac8..000000000000 --- a/R-package/tests/testthat/get_data.R +++ /dev/null @@ -1,117 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -GetMNIST_ubyte <- function() { - if (!dir.exists("data")) { - dir.create("data/") - } - if (!file.exists("data/train-images-idx3-ubyte") | !file.exists("data/train-labels-idx1-ubyte") | - !file.exists("data/t10k-images-idx3-ubyte") | !file.exists("data/t10k-labels-idx1-ubyte")) { - download.file("http://data.mxnet.io/mxnet/data/mnist.zip", destfile = "data/mnist.zip") - unzip("data/mnist.zip", exdir = "data/") - file.remove("data/mnist.zip") - } -} - -GetMNIST_csv <- function() { - if (!dir.exists("data")) { - dir.create("data/") - } - if (!file.exists("data/train.csv") | !file.exists("data/test.csv")) { - download.file("https://apache-mxnet.s3-accelerate.dualstack.amazonaws.com/R/data/mnist_csv.zip", - destfile = "data/mnist_csv.zip") - unzip("data/mnist_csv.zip", exdir = "data/") - file.remove("data/mnist_csv.zip") - } -} - -GetCifar10 <- function() { - if (!dir.exists("data")) { - dir.create("data/") - } - if (!file.exists("data/cifar/train.rec") | !file.exists("data/cifar/test.rec") | - !file.exists("data/cifar/train.lst") | !file.exists("data/cifar/test.lst")) { - download.file("http://data.mxnet.io/mxnet/data/cifar10.zip", destfile = "data/cifar10.zip") - unzip("data/cifar10.zip", exdir = "data/") - file.remove("data/cifar10.zip") - } -} - -GetInception <- function() { - if (!dir.exists("model")) { - dir.create("model/") - } - - if (!file.exists("model/Inception-BN-0126.params")) { - download.file( - "http://data.mxnet.io/mxnet/models/imagenet/inception-bn/Inception-BN-0126.params?raw=true", - destfile = "model/Inception-BN-0126.params") - } - if (!file.exists("model/Inception-BN-symbol.json")) { - download.file( - "http://data.mxnet.io/mxnet/models/imagenet/inception-bn/Inception-BN-symbol.json", - destfile = "model/Inception-BN-symbol.json") - } -} - -GetCatDog <- function() { - if (!dir.exists("data")) { - dir.create("data/") - } - if (!file.exists("data/cats_dogs/cats_dogs_train.rec") | !file.exists("data/cats_dogs/cats_dogs_val.rec")) { - download.file("https://apache-mxnet.s3-accelerate.dualstack.amazonaws.com/R/data/cats_dogs.zip", - destfile = "data/cats_dogs.zip") - unzip("data/cats_dogs.zip", exdir = "data/") - file.remove("data/cats_dogs.zip") - } -} - -GetMovieLens <- function() { - if (!dir.exists("data")) { - dir.create("data/") - } - if (!file.exists("data/ml-100k/u.data")) { - download.file("http://files.grouplens.org/datasets/movielens/ml-100k.zip", - destfile = "data/ml-100k.zip") - unzip("data/ml-100k.zip", exdir = "data/") - file.remove("data/ml-100k.zip") - } -} - -GetISBI_data <- function() { - if (!dir.exists("data")) { - dir.create("data/") - } - if (!file.exists("data/ISBI/train-volume.tif") | !file.exists("data/ISBI/train-labels.tif")) { - download.file("https://apache-mxnet.s3-accelerate.dualstack.amazonaws.com/R/data/ISBI.zip", - destfile = "data/ISBI.zip") - unzip("data/ISBI.zip", exdir = "data/") - file.remove("data/ISBI.zip") - } -} - -GetCaptcha_data <- function() { - if (!dir.exists("data")) { - dir.create("data/") - } - if (!file.exists("data/captcha_example/captcha_train.rec") | !file.exists("data/captcha_example/captcha_test.rec")) { - download.file("https://apache-mxnet.s3-accelerate.dualstack.amazonaws.com/R/data/captcha_example.zip", - destfile = "data/captcha_example.zip") - unzip("data/captcha_example.zip", exdir = "data/") - file.remove("data/captcha_example.zip") - } -} diff --git a/R-package/tests/testthat/test_initializer.R b/R-package/tests/testthat/test_initializer.R deleted file mode 100644 index 7bb9455445ab..000000000000 --- a/R-package/tests/testthat/test_initializer.R +++ /dev/null @@ -1,131 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -require(mxnet) - -context("initializer") - -test_that("mx.init.uniform", { - uniform_init <- mx.init.uniform(scale = 1) - expect_equal(typeof(uniform_init), "closure") - - X_bias <- uniform_init("X_bias", c(1, 100), ctx = mx.ctx.default()) - expect_equal(X_bias, mx.nd.zeros(c(1, 100))) - - X_weight <- uniform_init("X_weight", c(5, 10, 1000), ctx = mx.ctx.default()) - expect_equal(X_weight >= -1, mx.nd.ones(c(5, 10, 1000))) - expect_equal(X_weight <= 1, mx.nd.ones(c(5, 10, 1000))) - mean_weight <- mean(as.array(X_weight)) - expect_equal(mean_weight, 0, tolerance = 0.01) -}) - -test_that("mx.init.normal", { - normal_init <- mx.init.normal(sd = 0.1) - expect_equal(typeof(normal_init), "closure") - - X_bias <- normal_init("X_bias", c(1, 100), ctx = mx.ctx.default()) - expect_equal(X_bias, mx.nd.zeros(c(1, 100))) - - X_weight <- normal_init("X_weight", c(5, 10, 1000), ctx = mx.ctx.default()) - weight_mean <- mean(as.array(X_weight)) - weight_sd <- sd(as.array(X_weight)) - expect_equal(weight_mean, 0, tolerance = 0.01) - expect_equal(weight_sd, 0.1, tolerance = 0.01) -}) - -test_that("mx.init.Xavier", { - xavier_init <- mx.init.Xavier() - expect_equal(typeof(xavier_init), "closure") - - # default parameters - shape <- c(2, 3, 324, 324) - fan_out <- shape[length(shape)] - fan_in <- prod(shape[-length(shape)]) - - X_bias <- xavier_init("X_bias", shape = shape, ctx = mx.ctx.default()) - expect_equal(X_bias, mx.nd.zeros(shape)) - - X_weight <- xavier_init("X_weight", shape = shape, ctx = mx.ctx.default()) - scale <- sqrt(3/((fan_in + fan_out)/2)) - expect_equal(X_weight >= -scale, mx.nd.ones(shape)) - expect_equal(X_weight <= scale, mx.nd.ones(shape)) - weight_mean <- mean(as.array(X_weight)) - expect_equal(weight_mean, 0, tolerance = 0.01) - - for (dist_type in c("gaussian", "uniform")) { - for (factor_type in c("in", "out", "avg")) { - xavier_init <- mx.init.Xavier(rnd_type = dist_type, factor_type = factor_type, - magnitude = 200) - expect_equal(typeof(xavier_init), "closure") - - X_weight <- xavier_init("X_weight", shape = shape, ctx = mx.ctx.default()) - factor_val <- switch(factor_type, avg = (fan_in + fan_out)/2, `in` = fan_in, - out = fan_out) - scale <- sqrt(200/factor_val) - - if (dist_type == "gaussian") { - weight_mean <- mean(as.array(X_weight)) - weight_sd <- sd(as.array(X_weight)) - expect_equal(weight_mean, 0, tolerance = 0.01) - expect_equal(weight_sd, scale, tolerance = 0.01) - } else { - expect_equal(X_weight >= -scale, mx.nd.ones(shape)) - expect_equal(X_weight <= scale, mx.nd.ones(shape)) - weight_mean <- mean(as.array(X_weight)) - expect_equal(weight_mean, 0, tolerance = 0.01) - } - } - } -}) - -test_that("mx.init.internal.default", { - sample_bias <- mxnet:::mx.init.internal.default("X_bias", c(5, 10, 100), ctx = mx.ctx.default()) - expect_equal(sample_bias, mx.nd.zeros(c(5, 10, 100))) - - sample_gamma <- mxnet:::mx.init.internal.default("X_gamma", c(5, 10, 100), ctx = mx.ctx.default()) - expect_equal(sample_gamma, mx.nd.ones(c(5, 10, 100))) - - sample_beta <- mxnet:::mx.init.internal.default("X_beta", c(5, 10, 100), ctx = mx.ctx.default()) - expect_equal(sample_beta, mx.nd.zeros(c(5, 10, 100))) - - sample_moving_mean <- mxnet:::mx.init.internal.default("X_moving_mean", c(5, - 10, 100), ctx = mx.ctx.default()) - expect_equal(sample_moving_mean, mx.nd.zeros(c(5, 10, 100))) - - sample_moving_var <- mxnet:::mx.init.internal.default("X_moving_var", c(5, 10, - 100), ctx = mx.ctx.default()) - expect_equal(sample_moving_var, mx.nd.ones(c(5, 10, 100))) - - expect_error(mxnet:::mx.init.internal.default("X", c(5, 10, 100), ctx = mx.ctx.default()), - "Unkown initialization pattern for X") -}) - -test_that("mx.init.create", { - uniform_init <- mx.init.uniform(scale = 1) - expect_equal(typeof(uniform_init), "closure") - arrs <- setNames(as.list(c(50000, 100)), c("X_weight", "X_bias")) - arr_init <- mx.init.create(uniform_init, arrs, ctx = mx.ctx.default()) - - X_bias <- arr_init$X_bias - expect_equal(X_bias, mx.nd.zeros(c(100))) - - X_weight <- arr_init$X_weight - expect_equal(X_weight >= -1, mx.nd.ones(c(50000))) - expect_equal(X_weight <= 1, mx.nd.ones(c(50000))) - mean_weight <- mean(as.array(X_weight)) - expect_equal(mean_weight, 0, tolerance = 0.01) -}) diff --git a/R-package/tests/testthat/test_io.R b/R-package/tests/testthat/test_io.R deleted file mode 100644 index 06b8e90c1392..000000000000 --- a/R-package/tests/testthat/test_io.R +++ /dev/null @@ -1,90 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -require(mxnet) - -context("io") - -source("get_data.R") - -test_that("MNISTIter", { - GetMNIST_ubyte() - batch.size <- 100 - train_dataiter <- mx.io.MNISTIter(image = "data/train-images-idx3-ubyte", label = "data/train-labels-idx1-ubyte", - data.shape = c(784), batch.size = batch.size, shuffle = TRUE, flat = TRUE, - silent = 0, seed = 10) - train_dataiter$reset() - batch_count <- 0 - while (train_dataiter$iter.next()) { - batch_count <- batch_count + 1 - } - nbatch <- 60000/batch.size - expect_equal(batch_count, nbatch) - train_dataiter$reset() - train_dataiter$iter.next() - label_0 <- as.array(train_dataiter$value()$label) - train_dataiter$iter.next() - train_dataiter$iter.next() - train_dataiter$iter.next() - train_dataiter$iter.next() - train_dataiter$reset() - train_dataiter$iter.next() - label_1 <- as.array(train_dataiter$value()$label) - expect_equal(label_0, label_1) -}) - -test_that("Cifar10Rec", { - GetCifar10() - dataiter <- mx.io.ImageRecordIter(path.imgrec = "./data/cifar/train.rec", path.imglist = "./data/cifar/train.lst", - mean.img = "./data/cifar/cifar10_mean.bin", batch.size = 100, data.shape = c(28, - 28, 3), rand.crop = TRUE, rand.mirror = TRUE) - labelcount <- rep(0, 10) - dataiter$reset() - while (dataiter$iter.next()) { - label <- as.array(dataiter$value()$label) - for (i in label) { - labelcount[i + 1] <- labelcount[i + 1] + 1 - } - } - - expect_equal(labelcount, rep(5000, 10)) -}) - -test_that("mx.io.arrayiter", { - X <- matrix(c(1:10000), 100, 100) - y <- c(1:100) - dataiter <- mx.io.arrayiter(X, y, batch.size = 20, shuffle = FALSE) - dataiter$reset() - batch_count <- 0 - while (dataiter$iter.next()) { - batch_count <- batch_count + 1 - } - expect_equal(batch_count, 100/20) - - y <- round(y/10) - dataiter <- mx.io.arrayiter(X, y, batch.size = 30, shuffle = FALSE) - labelcount <- rep(0, 11) - dataiter$reset() - while (dataiter$iter.next()) { - label <- as.array(dataiter$value()$label) - for (i in label) { - labelcount[i + 1] <- labelcount[i + 1] + 1 - } - } - - expect_equal(labelcount, c(5, 9, 11, 9, 11, 9, 11, 13, 22, 14, 6)) -}) diff --git a/R-package/tests/testthat/test_ndarray.R b/R-package/tests/testthat/test_ndarray.R deleted file mode 100644 index adddae7c6ebb..000000000000 --- a/R-package/tests/testthat/test_ndarray.R +++ /dev/null @@ -1,218 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -require(mxnet) - -context("ndarray") - -if (Sys.getenv("R_GPU_ENABLE") != "" & as.integer(Sys.getenv("R_GPU_ENABLE")) == - 1) { - mx.ctx.default(new = mx.gpu()) - message("Using GPU for testing.") -} - -test_that("element-wise calculation for vector", { - x <- 1:10 - mat <- mx.nd.array(as.array(x), mx.ctx.default()) - expect_equal(x, as.array(mat)) - expect_equal(x + 1, as.array(mat + 1)) - expect_equal(x - 10, as.array(mat - 10)) - expect_equal(x * 20, as.array(mat * 20)) - expect_equal(x/3, as.array(mat/3), tolerance = 1e-05) - expect_equal(-1 - x, as.array(-1 - mat)) - expect_equal(-5/x, as.array(-5/mat), tolerance = 1e-05) - expect_equal(x + x, as.array(mat + mat)) - expect_equal(x/x, as.array(mat/mat)) - expect_equal(x * x, as.array(mat * mat)) - expect_equal(x - x, as.array(mat - mat)) - expect_equal(as.array(1 - mat), as.array(1 - mat)) - - x <- runif(10, -10, 10) - nd <- mx.nd.array(as.array(x)) - expect_equal(sqrt(abs(x)), as.array(mx.nd.sqrt(mx.nd.abs(nd))), tolerance = 1e-06) - expect_equal(x^2, as.array(mx.nd.square(nd)), tolerance = 1e-06) -}) - -test_that("element-wise calculation for matrix", { - x <- matrix(1:4, 2, 2) - mat <- mx.nd.array(as.array(x), mx.ctx.default()) - expect_equal(x, as.array(mat)) - expect_equal(x + 1, as.array(mat + 1)) - expect_equal(x - 10, as.array(mat - 10)) - expect_equal(x * 20, as.array(mat * 20)) - expect_equal(x/3, as.array(mat/3), tolerance = 1e-05) - expect_equal(-1 - x, as.array(-1 - mat)) - expect_equal(-5/x, as.array(-5/mat), tolerance = 1e-05) - expect_equal(x + x, as.array(mat + mat)) - expect_equal(x/x, as.array(mat/mat)) - expect_equal(x * x, as.array(mat * mat)) - expect_equal(x - x, as.array(mat - mat)) - expect_equal(as.array(1 - mat), as.array(1 - mat)) -}) - -test_that("ndarray ones, zeros, save and load", { - expect_equal(rep(0, 10), as.array(mx.nd.zeros(10))) - expect_equal(matrix(0, 10, 5), as.array(mx.nd.zeros(c(10, 5)))) - expect_equal(rep(1, 10), as.array(mx.nd.ones(10))) - expect_equal(matrix(1, 10, 5), as.array(mx.nd.ones(c(10, 5)))) - mat <- mx.nd.array(1:20) - mx.nd.save(mat, "temp.mat") - mat2 <- mx.nd.load("temp.mat") - expect_true(is.mx.ndarray(mat2[[1]])) - expect_equal(as.array(mat), as.array(mat2[[1]])) - file.remove("temp.mat") -}) - -test_that("ndarray concatenate", { - shapes <- matrix(c(2, 3, 4, 2, 2, 2, 4, 2, 2, 1, 4, 2), nrow = 3, byrow = TRUE) - array_r <- apply(shapes, 2, function(s) { - runif(s, -10, 10) - }) - array_nd <- apply(array_r, 1, function(s) { - mx.nd.array(matrix(s, nrow = 1)) - }) - array_nd_concat <- mx.nd.concat(data = array_nd, num_args = 3, dim = 1) - expect_equal(array_r, as.matrix(array_nd_concat), tolerance = 1e-06) - - x1 <- mx.nd.array(c(1:24)) - x2 <- mx.nd.array(c(25:48)) - x3 <- mx.nd.concat(data = c(x1, x2), num_args = 2, dim = 0) - expect_equal(c(1:48), as.array(x3)) - expect_equal(dim(x3), 48) - - x1 <- array(1:24, dim = c(4, 3, 2)) - x2 <- array(25:48, dim = c(4, 3, 2)) - x3 <- c(1:4, 25:28, 5:8, 29:32, 9:12, 33:36, 13:16, 37:40, 17:20, 41:44, 21:24, - 45:48) - y1 <- mx.nd.array(x1) - y2 <- mx.nd.array(x2) - y3 <- mx.nd.concat(data = c(y1, y2), num_args = 2, dim = 2) - expect_equal(dim(y3), c(8, 3, 2)) - expect_equal(as.array(y3), array(x3, dim = c(8, 3, 2))) -}) - -test_that("ndarray clip", { - nd <- mx.nd.array(runif(10, -10, 10)) - nd2 <- mx.nd.clip(nd, -2, 3) - arr <- as.array(nd2) - expect_equal(arr >= -2 | arr <= 3, rep(TRUE, length(arr))) -}) - -test_that("ndarray dot", { - a <- matrix(runif(12), nrow = 3) - b <- matrix(runif(20), nrow = 4) - c <- a %*% b - - A <- mx.nd.array(t(a)) - B <- mx.nd.array(t(b)) - C <- mx.nd.dot(A, B) - - expect_equal(c, t(as.matrix(C)), tolerance = 1e-06) -}) - -test_that("ndarray crop", { - x <- mx.nd.ones(c(2, 3, 4)) - y <- mx.nd.crop(x, begin = c(0, 0, 0), end = c(2, 1, 3)) - expect_equal(array(1, dim = c(2, 1, 3)), as.array(y)) - - z <- mx.nd.zeros(c(2, 1, 3)) - x <- mxnet:::mx.nd.internal.crop.assign(x, z, begin = c(0, 0, 0), end = c(2, - 1, 3)) - arr_x <- array(1, dim = dim(x)) - arr_x[c(1:2), 1, c(1:3)] <- 0 - - expect_equal(as.array(x), arr_x) -}) - -test_that("ndarray negate", { - arr <- array(runif(24, -10, 10), dim = c(2, 3, 4)) - nd <- mx.nd.array(arr) - - expect_equal(arr, as.array(nd), tolerance = 1e-06) - expect_equal(-arr, as.array(-nd), tolerance = 1e-06) - expect_equal(arr, as.array(nd), tolerance = 1e-06) -}) - -test_that("ndarray equal", { - x <- mx.nd.zeros(c(2, 3)) - y <- mx.nd.ones(c(2, 3)) - z <- x == y - expect_equal(as.array(z), array(0, c(2, 3))) - - z <- 0 == x - expect_equal(as.array(z), array(1, c(2, 3))) -}) - -test_that("ndarray not equal", { - x <- mx.nd.zeros(c(2, 3)) - y <- mx.nd.ones(c(2, 3)) - z <- x != y - expect_equal(as.array(z), array(1, c(2, 3))) - - z <- 0 != x - expect_equal(as.array(z), array(0, c(2, 3))) -}) - -test_that("ndarray greater", { - x <- mx.nd.zeros(c(2, 3)) - y <- mx.nd.ones(c(2, 3)) - z <- x > y - expect_equal(as.array(z), array(0, c(2, 3))) - - z <- y > 0 - expect_equal(as.array(z), array(1, c(2, 3))) - - z <- 0 > y - expect_equal(as.array(z), array(0, c(2, 3))) - - z <- x >= y - expect_equal(as.array(z), array(0, c(2, 3))) - - z <- y >= 0 - expect_equal(as.array(z), array(1, c(2, 3))) - - z <- 0 >= y - expect_equal(as.array(z), array(0, c(2, 3))) - - z <- y >= 1 - expect_equal(as.array(z), array(1, c(2, 3))) -}) - -test_that("ndarray lesser", { - x <- mx.nd.zeros(c(2, 3)) - y <- mx.nd.ones(c(2, 3)) - z <- x < y - expect_equal(as.array(z), array(1, c(2, 3))) - - z <- y < 0 - expect_equal(as.array(z), array(0, c(2, 3))) - - z <- 0 < y - expect_equal(as.array(z), array(1, c(2, 3))) - - z <- x <= y - expect_equal(as.array(z), array(1, c(2, 3))) - - z <- y <= 0 - expect_equal(as.array(z), array(0, c(2, 3))) - - z <- 0 <= y - expect_equal(as.array(z), array(1, c(2, 3))) - - z <- y <= 1 - expect_equal(as.array(z), array(1, c(2, 3))) -}) diff --git a/R-package/tests/testthat/test_random.R b/R-package/tests/testthat/test_random.R deleted file mode 100644 index 4b1e19ecfcc4..000000000000 --- a/R-package/tests/testthat/test_random.R +++ /dev/null @@ -1,36 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -require(mxnet) - -context("random") - -test_that("mx.runif", { - X <- mx.runif(shape = 50000, min = 0, max = 1, ctx = mx.ctx.default()) - expect_equal(X >= 0, mx.nd.ones(50000)) - expect_equal(X <= 1, mx.nd.ones(50000)) - sample_mean <- mean(as.array(X)) - expect_equal(sample_mean, 0.5, tolerance = 0.01) -}) - -test_that("mx.rnorm", { - X <- mx.rnorm(shape = 50000, mean = 5, sd = 0.1, ctx = mx.ctx.default()) - sample_mean <- mean(as.array(X)) - sample_sd <- sd(as.array(X)) - expect_equal(sample_mean, 5, tolerance = 0.01) - expect_equal(sample_sd, 0.1, tolerance = 0.01) -}) diff --git a/R-package/tests/testthat/test_symbol.R b/R-package/tests/testthat/test_symbol.R deleted file mode 100644 index acad98ac7b1f..000000000000 --- a/R-package/tests/testthat/test_symbol.R +++ /dev/null @@ -1,119 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -require(mxnet) - -context("symbol") - -test_that("basic symbol operation", { - data <- mx.symbol.Variable("data") - net1 <- mx.symbol.FullyConnected(data = data, name = "fc1", num_hidden = 10) - net1 <- mx.symbol.FullyConnected(data = net1, name = "fc2", num_hidden = 100) - - expect_equal(arguments(net1), c("data", "fc1_weight", "fc1_bias", "fc2_weight", - "fc2_bias")) - expect_equal(outputs(net1), "fc2_output") - - net2 <- mx.symbol.FullyConnected(name = "fc3", num_hidden = 10) - net2 <- mx.symbol.Activation(data = net2, act_type = "relu") - net2 <- mx.symbol.FullyConnected(data = net2, name = "fc4", num_hidden = 20) - - composed <- mx.apply(net2, fc3_data = net1, name = "composed") - - expect_equal(arguments(composed), c("data", "fc1_weight", "fc1_bias", "fc2_weight", - "fc2_bias", "fc3_weight", "fc3_bias", "fc4_weight", "fc4_bias")) - expect_equal(outputs(composed), "composed_output") - - multi_out <- mx.symbol.Group(c(composed, net1)) - expect_equal(outputs(multi_out), c("composed_output", "fc2_output")) -}) - -test_that("symbol internal", { - data <- mx.symbol.Variable("data") - oldfc <- mx.symbol.FullyConnected(data = data, name = "fc1", num_hidden = 10) - net1 <- mx.symbol.FullyConnected(data = oldfc, name = "fc2", num_hidden = 100) - - expect_equal(arguments(net1), c("data", "fc1_weight", "fc1_bias", "fc2_weight", - "fc2_bias")) - - internal <- internals(net1) - fc1 <- internal[[match("fc1_output", internal$outputs)]] - - expect_equal(arguments(fc1), arguments(oldfc)) -}) - -test_that("symbol children", { - data <- mx.symbol.Variable("data") - oldfc <- mx.symbol.FullyConnected(data = data, name = "fc1", num_hidden = 10) - net1 <- mx.symbol.FullyConnected(data = oldfc, name = "fc2", num_hidden = 100) - - expect_equal(outputs(children(net1)), c("fc1_output", "fc2_weight", "fc2_bias")) - expect_equal(outputs(children(children(net1))), c("data", "fc1_weight", "fc1_bias")) - - net2 <- net1$get.children() - expect_equal(net2[[match("fc2_weight", net2$outputs)]]$arguments, "fc2_weight") - - data <- mx.symbol.Variable("data") - sliced <- mx.symbol.SliceChannel(data, num_outputs = 3, name = "slice") - expect_equal(outputs(children(sliced)), "data") -}) - -test_that("symbol infer type", { - num_hidden <- 128 - num_dim <- 64 - num_sample <- 10 - - data <- mx.symbol.Variable("data") - prev <- mx.symbol.Variable("prevstate") - x2h <- mx.symbol.FullyConnected(data = data, name = "x2h", num_hidden = num_hidden) - h2h <- mx.symbol.FullyConnected(data = prev, name = "h2h", num_hidden = num_hidden) - - out <- mx.symbol.Activation(data = mx.symbol.elemwise_add(x2h, h2h), name = "out", - act_type = "relu") - - # shape inference will fail because information is not available for h2h - ret <- mx.symbol.infer.shape(out, data = c(num_dim, num_sample)) - - expect_equal(ret, NULL) -}) - -test_that("symbol attributes access", { - str <- "(1, 1, 1, 1)" - x <- mx.symbol.Variable("x") - x$attributes <- list(`__shape__` = str) - - expect_equal(x$attributes$`__shape__`, str) - - y <- mx.symbol.Variable("y") - y$attributes$`__shape__` <- str - - expect_equal(y$attributes$`__shape__`, str) -}) - -test_that("symbol concat", { - s1 <- mx.symbol.Variable("data1") - s2 <- mx.symbol.Variable("data2") - s3 <- mx.symbol.concat(data = c(s1, s2), num.args = 2, name = "concat") - expect_equal(outputs(s3), "concat_output") - expect_equal(outputs(children(s3)), c("data1", "data2")) - expect_equal(arguments(s3), c("data1", "data2")) - - s4 <- mx.symbol.concat(data = c(s1, s2), num.args = 2, name = "concat") - expect_equal(outputs(s3), outputs(s4)) - expect_equal(outputs(children(s3)), outputs(children(s4))) - expect_equal(arguments(s3), arguments(s4)) -}) diff --git a/R-package/vignettes/CharRnnModel.Rmd b/R-package/vignettes/CharRnnModel.Rmd deleted file mode 100644 index 3c302bb5bf10..000000000000 --- a/R-package/vignettes/CharRnnModel.Rmd +++ /dev/null @@ -1,293 +0,0 @@ - -# Character-level Language Model using RNN - -This tutorial will demonstrate creating a language model using a character level RNN model using MXNet-R package. You will need the following R packages to run this tutorial - - - readr - - stringr - - stringi - - mxnet - -We will use the [tinyshakespeare](https://github.com/dmlc/web-data/tree/master/mxnet/tinyshakespeare) dataset to build this model. - - -```R -library("readr") -library("stringr") -library("stringi") -library("mxnet") -``` - -## Preprocess and prepare the data - -Download the data: - - -```R -download.data <- function(data_dir) { - dir.create(data_dir, showWarnings = FALSE) - if (!file.exists(paste0(data_dir,'input.txt'))) { - download.file(url='https://raw.githubusercontent.com/dmlc/web-data/master/mxnet/tinyshakespeare/input.txt', - destfile=paste0(data_dir,'input.txt'), method='wget') - } -} -``` - -Next we transform the test into feature vectors that is fed into the RNN model. The `make_data` function reads the dataset, cleans it of any non-alphanumeric characters, splits it into individual characters and groups it into sequences of length `seq.len`. - - -```R -make_data <- function(path, seq.len = 32, dic=NULL) { - - text_vec <- read_file(file = path) - text_vec <- stri_enc_toascii(str = text_vec) - text_vec <- str_replace_all(string = text_vec, pattern = "[^[:print:]]", replacement = "") - text_vec <- strsplit(text_vec, '') %>% unlist - - if (is.null(dic)) { - char_keep <- sort(unique(text_vec)) - } else char_keep <- names(dic)[!dic == 0] - - # Remove terms not part of dictionary - text_vec <- text_vec[text_vec %in% char_keep] - - # Build dictionary - dic <- 1:length(char_keep) - names(dic) <- char_keep - - # reverse dictionary - rev_dic <- names(dic) - names(rev_dic) <- dic - - # Adjust by -1 to have a 1-lag for labels - num.seq <- (length(text_vec) - 1) %/% seq.len - - features <- dic[text_vec[1:(seq.len * num.seq)]] - labels <- dic[text_vec[1:(seq.len * num.seq)+1]] - - features_array <- array(features, dim = c(seq.len, num.seq)) - labels_array <- array(labels, dim = c(seq.len, num.seq)) - - return (list(features_array = features_array, labels_array = labels_array, dic = dic, rev_dic = rev_dic)) -} - - -seq.len <- 100 -data_prep <- make_data(path = "input.txt", seq.len = seq.len, dic = NULL) -``` - -Fetch the features and labels for training the model, and split the data into training and evaluation in 9:1 ratio. - - -```R -X <- data_prep$features_array -Y <- data_prep$labels_array -dic <- data_prep$dic -rev_dic <- data_prep$rev_dic -vocab <- length(dic) - -samples <- tail(dim(X), 1) -train.val.fraction <- 0.9 - -X.train.data <- X[, 1:as.integer(samples * train.val.fraction)] -X.val.data <- X[, -(1:as.integer(samples * train.val.fraction))] - -X.train.label <- Y[, 1:as.integer(samples * train.val.fraction)] -X.val.label <- Y[, -(1:as.integer(samples * train.val.fraction))] - -train_buckets <- list("100" = list(data = X.train.data, label = X.train.label)) -eval_buckets <- list("100" = list(data = X.val.data, label = X.val.label)) - -train_buckets <- list(buckets = train_buckets, dic = dic, rev_dic = rev_dic) -eval_buckets <- list(buckets = eval_buckets, dic = dic, rev_dic = rev_dic) -``` - -Create iterators for training and evaluation datasets. - - -```R -vocab <- length(eval_buckets$dic) - -batch.size <- 32 - -train.data <- mx.io.bucket.iter(buckets = train_buckets$buckets, batch.size = batch.size, - data.mask.element = 0, shuffle = TRUE) - -eval.data <- mx.io.bucket.iter(buckets = eval_buckets$buckets, batch.size = batch.size, - data.mask.element = 0, shuffle = FALSE) -``` - -## Train the Model - - -This model is a multi-layer RNN for sampling from character-level language models. It has a one-to-one model configuration since for each character, we want to predict the next one. For a sequence of length 100, there are also 100 labels, corresponding the same sequence of characters but offset by a position of +1. The parameters output_last_state is set to TRUE in order to access the state of the RNN cells when performing inference. - - -```R -rnn_graph_one_one <- rnn.graph(num_rnn_layer = 3, - num_hidden = 96, - input_size = vocab, - num_embed = 64, - num_decode =vocab, - dropout = 0.2, - ignore_label = 0, - cell_type = "lstm", - masking = F, - output_last_state = T, - loss_output = "softmax", - config = "one-to-one") - -graph.viz(rnn_graph_one_one, type = "graph", direction = "LR", - graph.height.px = 180, shape=c(100, 64)) - -devices <- mx.cpu() - -initializer <- mx.init.Xavier(rnd_type = "gaussian", factor_type = "avg", magnitude = 3) - -optimizer <- mx.opt.create("adadelta", rho = 0.9, eps = 1e-5, wd = 1e-8, - clip_gradient = 5, rescale.grad = 1/batch.size) - -logger <- mx.metric.logger() -epoch.end.callback <- mx.callback.log.train.metric(period = 1, logger = logger) -batch.end.callback <- mx.callback.log.train.metric(period = 50) - -mx.metric.custom_nd <- function(name, feval) { - init <- function() { - c(0, 0) - } - update <- function(label, pred, state) { - m <- feval(label, pred) - state <- c(state[[1]] + 1, state[[2]] + m) - return(state) - } - get <- function(state) { - list(name = name, value = (state[[2]]/state[[1]])) - } - ret <- (list(init = init, update = update, get = get)) - class(ret) <- "mx.metric" - return(ret) -} - -mx.metric.Perplexity <- mx.metric.custom_nd("Perplexity", function(label, pred) { - label <- mx.nd.reshape(label, shape = -1) - label_probs <- as.array(mx.nd.choose.element.0index(pred, label)) - batch <- length(label_probs) - NLL <- -sum(log(pmax(1e-15, as.array(label_probs)))) / batch - Perplexity <- exp(NLL) - return(Perplexity) -}) - -model <- mx.model.buckets(symbol = rnn_graph_one_one, - train.data = train.data, eval.data = eval.data, - num.round = 20, ctx = devices, verbose = TRUE, - metric = mx.metric.Perplexity, - initializer = initializer, optimizer = optimizer, - batch.end.callback = NULL, - epoch.end.callback = epoch.end.callback) - -mx.model.save(model, prefix = "one_to_one_seq_model", iteration = 20) -``` - - Start training with 1 devices - [1] Train-Perplexity=13.7040474322178 - [1] Validation-Perplexity=7.94617194460922 - [2] Train-Perplexity=6.57039815554525 - [2] Validation-Perplexity=6.60806110658011 - [3] Train-Perplexity=5.65360504501481 - [3] Validation-Perplexity=6.18932770630876 - [4] Train-Perplexity=5.32547285727298 - [4] Validation-Perplexity=6.02198756798859 - [5] Train-Perplexity=5.14373631472579 - [5] Validation-Perplexity=5.8095658243407 - [6] Train-Perplexity=5.03077673487379 - [6] Validation-Perplexity=5.72582993567431 - [7] Train-Perplexity=4.94453383291536 - [7] Validation-Perplexity=5.6445258528126 - [8] Train-Perplexity=4.88635290100261 - [8] Validation-Perplexity=5.6730024536433 - [9] Train-Perplexity=4.84205646230548 - [9] Validation-Perplexity=5.50960780230982 - [10] Train-Perplexity=4.80441673535513 - [10] Validation-Perplexity=5.57002263750006 - [11] Train-Perplexity=4.77763413242626 - [11] Validation-Perplexity=5.55152143269169 - [12] Train-Perplexity=4.74937775290777 - [12] Validation-Perplexity=5.44968305351486 - [13] Train-Perplexity=4.72824849541467 - [13] Validation-Perplexity=5.50889348298234 - [14] Train-Perplexity=4.70980846981694 - [14] Validation-Perplexity=5.51473225859859 - [15] Train-Perplexity=4.69685776886122 - [15] Validation-Perplexity=5.45391985233811 - [16] Train-Perplexity=4.67837107034824 - [16] Validation-Perplexity=5.46636764997829 - [17] Train-Perplexity=4.66866961934873 - [17] Validation-Perplexity=5.44267086113492 - [18] Train-Perplexity=4.65611469144194 - [18] Validation-Perplexity=5.4290169469462 - [19] Train-Perplexity=4.64614689879405 - [19] Validation-Perplexity=5.44221549833917 - [20] Train-Perplexity=4.63764001963654 - [20] Validation-Perplexity=5.42114250842862 - - -## Inference on the Model - -We now use the saved model to do inference and sample text character by character that will look like the original training data. - - -```R -set.seed(0) -model <- mx.model.load(prefix = "one_to_one_seq_model", iteration = 20) - -internals <- model$symbol$get.internals() -sym_state <- internals$get.output(which(internals$outputs %in% "RNN_state")) -sym_state_cell <- internals$get.output(which(internals$outputs %in% "RNN_state_cell")) -sym_output <- internals$get.output(which(internals$outputs %in% "loss_output")) -symbol <- mx.symbol.Group(sym_output, sym_state, sym_state_cell) - -infer_raw <- c("Thou ") -infer_split <- dic[strsplit(infer_raw, '') %>% unlist] -infer_length <- length(infer_split) - -infer.data <- mx.io.arrayiter(data = matrix(infer_split), label = matrix(infer_split), - batch.size = 1, shuffle = FALSE) - -infer <- mx.infer.rnn.one(infer.data = infer.data, - symbol = symbol, - arg.params = model$arg.params, - aux.params = model$aux.params, - input.params = NULL, - ctx = devices) - -pred_prob <- as.numeric(as.array(mx.nd.slice.axis( - infer$loss_output, axis=0, begin = infer_length-1, end = infer_length))) -pred <- sample(length(pred_prob), prob = pred_prob, size = 1) - 1 -predict <- c(predict, pred) - -for (i in 1:200) { - - infer.data <- mx.io.arrayiter(data = as.matrix(pred), label = as.matrix(pred), - batch.size = 1, shuffle = FALSE) - - infer <- mx.infer.rnn.one(infer.data = infer.data, - symbol = symbol, - arg.params = model$arg.params, - aux.params = model$aux.params, - input.params = list(rnn.state = infer[[2]], - rnn.state.cell = infer[[3]]), - ctx = devices) - - pred_prob <- as.numeric(as.array(infer$loss_output)) - pred <- sample(length(pred_prob), prob = pred_prob, size = 1, replace = T) - 1 - predict <- c(predict, pred) -} - -predict_txt <- paste0(rev_dic[as.character(predict)], collapse = "") -predict_txt_tot <- paste0(infer_raw, predict_txt, collapse = "") -print(predict_txt_tot) -``` - - [1] "Thou NAknowledge thee my Comfort and his late she.FRIAR LAURENCE:Nothing a groats waterd forth. The lend he thank that;When she I am brother draw London: and not hear that know.BENVOLIO:How along, makes your " - - - diff --git a/R-package/vignettes/MultidimLstm.Rmd b/R-package/vignettes/MultidimLstm.Rmd deleted file mode 100644 index c726557677a3..000000000000 --- a/R-package/vignettes/MultidimLstm.Rmd +++ /dev/null @@ -1,302 +0,0 @@ -LSTM time series example -============================================= - -This tutorial shows how to use an LSTM model with multivariate data, and generate predictions from it. For demonstration purposes, we used an open source [pollution data](https://archive.ics.uci.edu/ml/datasets/Beijing+PM2.5+Data). -The tutorial is an illustration of how to use LSTM models with MXNet-R. We are forecasting the air pollution with data recorded at the US embassy in Beijing, China for five years. - -Dataset Attribution: -"PM2.5 data of US Embassy in Beijing" -We want to predict pollution levels(PM2.5 concentration) in the city given the above dataset. - -```r -Dataset description: -No: row number -year: year of data in this row -month: month of data in this row -day: day of data in this row -hour: hour of data in this row -pm2.5: PM2.5 concentration -DEWP: Dew Point -TEMP: Temperature -PRES: Pressure -cbwd: Combined wind direction -Iws: Cumulated wind speed -Is: Cumulated hours of snow -Ir: Cumulated hours of rain -``` - -We use past PM2.5 concentration, dew point, temperature, pressure, wind speed, snow and rain to predict -PM2.5 concentration levels. - -Load and pre-process the data ---------- -The first step is to load in the data and preprocess it. It is assumed that the data has been downloaded in a .csv file: data.csv from the [pollution dataset](https://archive.ics.uci.edu/ml/datasets/Beijing+PM2.5+Data) - - ```r -## Loading required packages -library("readr") -library("dplyr") -library("mxnet") -library("abind") - ``` - - - - ```r -## Preprocessing steps -Data <- read.csv(file = "/Users/khedia/Downloads/data.csv", - header = TRUE, - sep = ",") - -## Extracting specific features from the dataset as variables for time series We extract -## pollution, temperature, pressue, windspeed, snowfall and rainfall information from dataset -df <- data.frame(Data$pm2.5, - Data$DEWP, - Data$TEMP, - Data$PRES, - Data$Iws, - Data$Is, - Data$Ir) -df[is.na(df)] <- 0 - -## Now we normalise each of the feature set to a range(0,1) -df <- matrix(as.matrix(df), - ncol = ncol(df), - dimnames = NULL) - -rangenorm <- function(x) { - (x - min(x))/(max(x) - min(x)) -} -df <- apply(df, 2, rangenorm) -df <- t(df) - ``` -For using multidimesional data with MXNet-R, we need to convert training data to the form -(n_dim x seq_len x num_samples). For one-to-one RNN flavours labels should be of the form (seq_len x num_samples) while for many-to-one flavour, the labels should be of the form (1 x num_samples). Please note that MXNet-R currently supports only these two flavours of RNN. -We have used n_dim = 7, seq_len = 100, and num_samples = 430 because the dataset has 430 samples, each the length of 100 timestamps, we have seven time series as input features so each input has dimesnion of seven at each time step. - - -```r -n_dim <- 7 -seq_len <- 100 -num_samples <- 430 - -## extract only required data from dataset -trX <- df[1:n_dim, 25:(24 + (seq_len * num_samples))] - -## the label data(next PM2.5 concentration) should be one time step -## ahead of the current PM2.5 concentration -trY <- df[1, 26:(25 + (seq_len * num_samples))] - -## reshape the matrices in the format acceptable by MXNetR RNNs -trainX <- trX -dim(trainX) <- c(n_dim, seq_len, num_samples) -trainY <- trY -dim(trainY) <- c(seq_len, num_samples) -``` - - - -Defining and training the network ---------- - -```r -batch.size <- 32 - -# take first 300 samples for training - remaining 100 for evaluation -train_ids <- 1:300 -eval_ids <- 301:400 - -## The number of samples used for training and evaluation is arbitrary. I have kept aside few -## samples for testing purposes create dataiterators -train.data <- mx.io.arrayiter(data = trainX[, , train_ids, drop = F], - label = trainY[, train_ids], - batch.size = batch.size, shuffle = TRUE) - -eval.data <- mx.io.arrayiter(data = trainX[, , eval_ids, drop = F], - label = trainY[, eval_ids], - batch.size = batch.size, shuffle = FALSE) - -## Create the symbol for RNN -symbol <- rnn.graph(num_rnn_layer = 1, - num_hidden = 5, - input_size = NULL, - num_embed = NULL, - num_decode = 1, - masking = F, - loss_output = "linear", - dropout = 0.2, - ignore_label = -1, - cell_type = "lstm", - output_last_state = T, - config = "one-to-one") - - - -mx.metric.mse.seq <- mx.metric.custom("MSE", function(label, pred) { - label = mx.nd.reshape(label, shape = -1) - pred = mx.nd.reshape(pred, shape = -1) - res <- mx.nd.mean(mx.nd.square(label - pred)) - return(as.array(res)) -}) - - - -ctx <- mx.cpu() - -initializer <- mx.init.Xavier(rnd_type = "gaussian", - factor_type = "avg", - magnitude = 3) - -optimizer <- mx.opt.create("adadelta", - rho = 0.9, - eps = 1e-05, - wd = 1e-06, - clip_gradient = 1, - rescale.grad = 1/batch.size) - -logger <- mx.metric.logger() -epoch.end.callback <- mx.callback.log.train.metric(period = 10, - logger = logger) - -## train the network -system.time(model <- mx.model.buckets(symbol = symbol, - train.data = train.data, - eval.data = eval.data, - num.round = 100, - ctx = ctx, - verbose = TRUE, - metric = mx.metric.mse.seq, - initializer = initializer, - optimizer = optimizer, - batch.end.callback = NULL, - epoch.end.callback = epoch.end.callback)) -``` -Output: -``` -Start training with 1 devices -[1] Train-MSE=0.197570244409144 -[1] Validation-MSE=0.0153861071448773 -[2] Train-MSE=0.0152517843060195 -[2] Validation-MSE=0.0128299412317574 -[3] Train-MSE=0.0124418652616441 -[3] Validation-MSE=0.010827143676579 -[4] Train-MSE=0.0105128229130059 -[4] Validation-MSE=0.00940261723008007 -[5] Train-MSE=0.00914482437074184 -[5] Validation-MSE=0.00830172537826002 -[6] Train-MSE=0.00813581114634871 -[6] Validation-MSE=0.00747016374953091 -[7] Train-MSE=0.00735094994306564 -[7] Validation-MSE=0.00679832429159433 -[8] Train-MSE=0.00672049634158611 -[8] Validation-MSE=0.00623159145470709 -[9] Train-MSE=0.00620287149213254 -[9] Validation-MSE=0.00577476259786636 -[10] Train-MSE=0.00577280316501856 -[10] Validation-MSE=0.00539038667920977 -.......... -.......... -[91] Train-MSE=0.00177705133100972 -[91] Validation-MSE=0.00154715491225943 -[92] Train-MSE=0.00177639147732407 -[92] Validation-MSE=0.00154592350008897 -[93] Train-MSE=0.00177577760769054 -[93] Validation-MSE=0.00154474508599378 -[94] Train-MSE=0.0017752077546902 -[94] Validation-MSE=0.0015436161775142 -[95] Train-MSE=0.00177468206966296 -[95] Validation-MSE=0.00154253660002723 -[96] Train-MSE=0.00177419915562496 -[96] Validation-MSE=0.00154150440357625 -[97] Train-MSE=0.0017737578949891 -[97] Validation-MSE=0.00154051734716631 -[98] Train-MSE=0.00177335749613121 -[98] Validation-MSE=0.00153957353904843 -[99] Train-MSE=0.00177299699280411 -[99] Validation-MSE=0.00153867155313492 -[100] Train-MSE=0.00177267640829086 -[100] Validation-MSE=0.00153781197150238 - - user system elapsed - 21.937 1.914 13.402 -``` -We can see how mean squared error varies with epochs below. - -![png](https://github.com/dmlc/web-data/blob/master/mxnet/doc/tutorials/r/images/loss.png?raw=true) - -Inference on the network ---------- -Now we have trained the network. Let's use it for inference. - -```r -## We extract the state symbols for RNN -internals <- model$symbol$get.internals() -sym_state <- internals$get.output(which(internals$outputs %in% "RNN_state")) -sym_state_cell <- internals$get.output(which(internals$outputs %in% "RNN_state_cell")) -sym_output <- internals$get.output(which(internals$outputs %in% "loss_output")) -symbol <- mx.symbol.Group(sym_output, sym_state, sym_state_cell) - -## We will predict 100 timestamps for 401st sample (first sample from the test samples) -pred_length <- 100 -predicted <- numeric() - -## We pass the 400th sample through the network to get the weights and use it for predicting next -## 100 time stamps. -data <- mx.nd.array(trainX[, , 400, drop = F]) -label <- mx.nd.array(trainY[, 400, drop = F]) - - -## We create dataiterators for the input, please note that the label is required to create -## iterator and will not be used in the inference. You can use dummy values too in the label. -infer.data <- mx.io.arrayiter(data = data, - label = label, - batch.size = 1, - shuffle = FALSE) - -infer <- mx.infer.rnn.one(infer.data = infer.data, - symbol = symbol, - arg.params = model$arg.params, - aux.params = model$aux.params, - input.params = NULL, - ctx = ctx) -## Once we get the weights for the above time series, we try to predict the next 100 steps for -## this time series, which is technically our 401st time series. - -actual <- trainY[, 401] - -## Now we iterate one by one to generate each of the next timestamp pollution values - -for (i in 1:pred_length) { - - data <- mx.nd.array(trainX[, i, 401, drop = F]) - label <- mx.nd.array(trainY[i, 401, drop = F]) - infer.data <- mx.io.arrayiter(data = data, - label = label, - batch.size = 1, - shuffle = FALSE) - ## note that we use rnn state values from previous iterations here - infer <- mx.infer.rnn.one(infer.data = infer.data, - symbol = symbol, - ctx = ctx, - arg.params = model$arg.params, - aux.params = model$aux.params, - input.params = list(rnn.state = infer[[2]], - rnn.state.cell = infer[[3]])) - - pred <- infer[[1]] - predicted <- c(predicted, as.numeric(as.array(pred))) - -} - -``` -Now predicted contains the predicted 100 values. We use ggplot to plot the actual and predicted values as shown below. - -![png](https://github.com/dmlc/web-data/blob/master/mxnet/doc/tutorials/r/images/sample_401.png?raw=true) - -We also repeated the above experiments to generate the next 100 samples to 301st time series and we got the following results. - -![png](https://github.com/dmlc/web-data/blob/master/mxnet/doc/tutorials/r/images/sample_301.png?raw=true) - -The above tutorial is just for demonstration purposes and has not been tuned extensively for accuracy. - -For more tutorials on MXNet-R, head on to [MXNet-R tutorials](https://mxnet.apache.org/tutorials/r/index.html) \ No newline at end of file diff --git a/R-package/vignettes/classifyRealImageWithPretrainedModel.Rmd b/R-package/vignettes/classifyRealImageWithPretrainedModel.Rmd deleted file mode 100644 index 9cfdd5a5473f..000000000000 --- a/R-package/vignettes/classifyRealImageWithPretrainedModel.Rmd +++ /dev/null @@ -1,164 +0,0 @@ -# Classify Real-world Images with Pre-trained Model - - -MXNet is a flexible and efficient deep learning framework. One of the cool things that a deep learning -algorithm can do is to classify real world images. - -In this example we will show how to use a pretrained Inception-BatchNorm network to predict the content of -real world image. The network architecture is described in [1]. - -The pre-trained Inception-BatchNorm network can be downloaded from [this link](http://data.mxnet.io/mxnet/data/Inception.zip). -This model gives the recent state-of-art prediction accuracy on the image net dataset. - -## Package Loading - -To get started, we load the `mxnet` package first. - -```{r} -require(mxnet) -``` - -In this example, we also need the imager package to load and preprocess the images in R. - -```{r} -require(imager) -``` - -## Load the Pretrained Model - - -Make sure you unzip the pre-trained model in current folder. And we can use the model -loading function to load the model into R. - -```{r} -download.file('http://data.mxnet.io/mxnet/data/Inception.zip', destfile = 'Inception.zip') -unzip("Inception.zip") -model <- mx.model.load("Inception/Inception_BN", iteration = 39) -``` - -We also need to load in the mean image, which is used for preprocessing using ```mx.nd.load```. - -```{r} -mean.img <- as.array(mx.nd.load("Inception/mean_224.nd")[["mean_img"]]) -``` - -## Load and Preprocess the Image - -Now we are ready to classify a real image. In this example, we simply take the parrots image -from imager package. But you can always change it to other images. - -Load and plot the image: - -```{r, fig.align='center'} -im <- load.image(system.file("extdata/parrots.png", package = "imager")) -plot(im) -``` - -Before feeding the image to the deep net, we need to do some preprocessing -to make the image fit the input requirement of deepnet. The preprocessing -include cropping, and subtraction of the mean. -Because mxnet is deeply integerated with R, we can do all the processing in R function. - -The preprocessing function: - -```{r} -preproc.image <- function(im, mean.image) { - # crop the image - shape <- dim(im) - short.edge <- min(shape[1:2]) - xx <- floor((shape[1] - short.edge) / 2) - yy <- floor((shape[2] - short.edge) / 2) - croped <- crop.borders(im, xx, yy) - # resize to 224 x 224, needed by input of the model. - resized <- resize(croped, 224, 224) - # convert to array (x, y, channel) - arr <- as.array(resized) * 255 - dim(arr) <- c(224, 224, 3) - # subtract the mean - normed <- arr - mean.img - # Reshape to format needed by mxnet (width, height, channel, num) - dim(normed) <- c(224, 224, 3, 1) - return(normed) -} -``` - -We use the defined preprocessing function to get the normalized image. - -```{r} -normed <- preproc.image(im, mean.img) -``` - -## Classify the Image - -Now we are ready to classify the image! We can use the predict function -to get the probability over classes. - -```{r} -prob <- predict(model, X = normed) -dim(prob) -``` - -As you can see ```prob``` is a 1 times 1000 array, which gives the probability -over the 1000 image classes of the input. - -We can use the ```max.col``` on the transpose of prob. get the class index. - -```{r} -max.idx <- max.col(t(prob)) -max.idx -``` - -The index do not make too much sense. So let us see what it really corresponds to. -We can read the names of the classes from the following file. - -```{r} -synsets <- readLines("Inception/synset.txt") -``` - -And let us see what it really is - -```{r} -print(paste0("Predicted Top-class: ", synsets[[max.idx]])) -``` - -Actually I do not know what does the word mean when I saw it. -So I searched on the web to check it out.. and hmm it does get the right answer :) - -## Extract features - - -Besides the final classification results, we can also extract the internal features. -We need to get feature layer symbol out of internals first. Here we use `global_pool_output` -as an example. - -```{r} -internals = model$symbol$get.internals() -fea_symbol = internals[[match("global_pool_output", internals$outputs)]] -``` - -Next, we rebuild a new model using the feature symbol - -```{r} -model2 <- list(symbol = fea_symbol, - arg.params = model$arg.params, - aux.params = model$aux.params) - -class(model2) <- "MXFeedForwardModel" -``` - -Then we can do the `predict` using the new model to get the internal results. -You need to set `allow.extra.params = TRUE` since some parameters are not used this time. - -```{r} -global_pooling_feature <- predict(model2, X = normed, allow.extra.params = TRUE) -dim(global_pooling_feature) -``` - - -## Reference - - -[1] Ioffe, Sergey, and Christian Szegedy. "Batch normalization: Accelerating deep network training by reducing internal covariate shift." arXiv preprint arXiv:1502.03167 (2015). - - - diff --git a/R-package/vignettes/ndarray.Rmd b/R-package/vignettes/ndarray.Rmd deleted file mode 100644 index 08786b25fa86..000000000000 --- a/R-package/vignettes/ndarray.Rmd +++ /dev/null @@ -1,148 +0,0 @@ -# NDArray: Vectorized Tensor Computations on CPUs and GPUs - -`NDArray` is the basic vectorized operation unit in MXNet for matrix and tensor computations. -Users can perform usual calculations as on an R"s array, but with two additional features: - -- Multiple devices: All operations can be run on various devices including -CPUs and GPUs. - -- Automatic parallelization: All operations are automatically executed in - parallel with each other. - -## Create and Initialize - -Let"s create `NDArray` on either a GPU or a CPU: - -```{r} -require(mxnet) -a <- mx.nd.zeros(c(2, 3)) # create a 2-by-3 matrix on cpu -b <- mx.nd.zeros(c(2, 3), mx.cpu()) # create a 2-by-3 matrix on cpu -c <- mx.nd.zeros(c(2, 3), mx.gpu(0)) # create a 2-by-3 matrix on gpu 0, if you have CUDA enabled. -``` - -Typically for CUDA-enabled devices, the device id of a GPU starts from 0. -That's why we passed in 0 to the GPU id. - -We can initialize an `NDArray` object in various ways: - - -```{r} -a <- mx.nd.ones(c(4, 4)) -b <- mx.rnorm(c(4, 5)) -c <- mx.nd.array(1:5) -``` - -To check the numbers in an `NDArray`, we can simply run: - - -```{r} -a <- mx.nd.ones(c(2, 3)) -b <- as.array(a) -class(b) -``` - -```{r} -b -``` - -## Performing Basic Operations - -### Elemental-wise Operations - -You can perform elemental-wise operations on `NDArray` objects, as follows: - - -```{r} -a <- mx.nd.ones(c(2, 4)) * 2 -b <- mx.nd.ones(c(2, 4)) / 8 -as.array(a) -``` - -```{r} -as.array(b) -``` - -```{r} -c <- a + b -as.array(c) -``` - -```{r} -d <- c / a - 5 -as.array(d) -``` - -If two `NDArray`s are located on different devices, we need to explicitly move them to the same one. For instance: - - -```{r} -a <- mx.nd.ones(c(2, 3)) * 2 -b <- mx.nd.ones(c(2, 3), mx.gpu()) / 8 -c <- mx.nd.copyto(a, mx.gpu()) * b -as.array(c) -``` - -### Loading and Saving - -You can save a list of `NDArray` object to your disk with `mx.nd.save`: - - -```{r} -a <- mx.nd.ones(c(2, 3)) -mx.nd.save(list(a), "temp.ndarray") -``` - -You can load it back easily: - - -```{r} -a <- mx.nd.load("temp.ndarray") -as.array(a[[1]]) -``` - -We can directly save data to and load it from a distributed file system, such as Amazon S3 and HDFS: - - -```{r, eval=FALSE} -mx.nd.save(list(a), "s3://mybucket/mydata.bin") -mx.nd.save(list(a), "hdfs///users/myname/mydata.bin") -``` - -## Automatic Parallelization - -`NDArray` can automatically execute operations in parallel. Automatic parallelization is useful when -using multiple resources, such as CPU cards, GPU cards, and CPU-to-GPU memory bandwidth. - -For example, if we write `a <- a + 1` followed by `b <- b + 1`, and `a` is on a CPU and -`b` is on a GPU, executing them in parallel improves -efficiency. Furthermore, because copying data between CPUs and GPUs are also expensive, running in parallel with other computations further increases efficiency. - -It's hard to find the code that can be executed in parallel by eye. In the -following example, `a <- a + 1` and `c <- c * 3` can be executed in parallel, but `a <- a + 1` and -`b <- b * 3` should be in sequential. - - -```{r} -a <- mx.nd.ones(c(2,3)) -b <- a -c <- mx.nd.copyto(a, mx.cpu()) -a <- a + 1 -b <- b * 3 -c <- c * 3 -``` - -Luckily, MXNet can automatically resolve the dependencies and -execute operations in parallel accurately. This allows us to write our program assuming there is only a single thread. MXNet will -automatically dispatch the program to multiple devices. - -MXNet achieves this with lazy evaluation. Each operation is issued to an -internal engine, and then returned. For example, if we run `a <- a + 1`, it -returns immediately after pushing the plus operator to the engine. This -asynchronous processing allows us to push more operators to the engine. It determines -the read and write dependencies and the best way to execute them in -parallel. - -The actual computations are finished, allowing us to copy the results someplace else, such as `as.array(a)` or `mx.nd.save(a, "temp.dat")`. To write highly parallelized codes, we only need to postpone when we need -the results. - - diff --git a/README.md b/README.md index e6af7721e24f..9f1d59e7acd4 100644 --- a/README.md +++ b/README.md @@ -76,7 +76,6 @@ What's New * [MXNet.js: Javascript Package for Deep Learning in Browser (without server)](https://github.com/dmlc/mxnet.js/) * [Guide to Creating New Operators (Layers)](https://mxnet.apache.org/api/faq/new_op) * [Go binding for inference](https://github.com/songtianyi/go-mxnet-predictor) -* [Amalgamation and Go Binding for Predictors](https://github.com/jdeng/gomxnet/) - Outdated * [Large Scale Image Classification](https://github.com/apache/incubator-mxnet/tree/master/example/image-classification) Contents diff --git a/amalgamation/.gitignore b/amalgamation/.gitignore deleted file mode 100644 index 318284280c8a..000000000000 --- a/amalgamation/.gitignore +++ /dev/null @@ -1 +0,0 @@ -*-all.cc diff --git a/amalgamation/Makefile b/amalgamation/Makefile deleted file mode 100644 index 55aad1d470a2..000000000000 --- a/amalgamation/Makefile +++ /dev/null @@ -1,143 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -export MXNET_ROOT=`pwd`/.. -export TPARTYDIR=`pwd`/../3rdparty - -# Change this to path or specify in make command -ifndef OPENBLAS_ROOT - export OPENBLAS_ROOT=/usr/local/opt/openblas -endif - -# Whether use minimum build without blas and SSE, this will make the library super slow -ifndef MIN - export MIN=0 - DEFS=-DMSHADOW_USE_CBLAS=1 -else - DEFS=-DMSHADOW_USE_CBLAS=0 -endif - -ifndef ANDROID - export ANDROID=0 -else - DEFS+=-DMSHADOW_USE_SSE=0 -endif - -# Use locally installed emscripten if not specified -ifndef EMCC - EMCC=emcc -endif - -ifndef DISABLE_OPENMP - DEFS+=-DDISABLE_OPENMP=1 -endif - -.PHONY: all clean - -DEFS+=-DMSHADOW_USE_CUDA=0 -DMSHADOW_USE_MKL=0 -DMSHADOW_RABIT_PS=0 -DMSHADOW_DIST_PS=0 -DDMLC_LOG_STACK_TRACE=0 -DEFS+=-DMSHADOW_FORCE_STREAM -DMXNET_USE_OPENCV=0 -DMXNET_PREDICT_ONLY=1 -DEFS+=-DDMLC_USE_CXX11=1 -DDMLC_USE_CXX11=1 -DDMLC_USE_CXX14=1 -CFLAGS=-std=c++17 -Wno-unknown-pragmas -Wall $(DEFS) - -# if architecture of the CPU supports F16C instruction set, enable USE_F16C for fast fp16 computation on CPU -ifeq ($(USE_F16C), 1) - CFLAGS+=-mf16c -else - DEFS+=-DMSHADOW_USE_F16C=0 -endif - -ifneq ($(MIN), 1) - CFLAGS += -I${OPENBLAS_ROOT} -I${OPENBLAS_ROOT}/include - LDFLAGS+= -L${OPENBLAS_ROOT} -L${OPENBLAS_ROOT}/lib - -# Define which blas is installed. Uses OpenBLAS by default. - ifeq ($(USE_BLAS), atlas) - LDFLAGS += -lcblas - else ifeq ($(USE_BLAS), blas) - LDFLAGS += -lblas - else - LDFLAGS += -lopenblas - endif -endif - - -all: android libmxnet_predict.a ${MXNET_ROOT}/lib/libmxnet_predict.so - -nnvm.d: - ./prep_nnvm.sh - -dmlc.d: dmlc-minimum0.cc - ${CXX} ${CFLAGS} -M -MT dmlc-minimum0.o \ - -I ${TPARTYDIR}/dmlc-core/include \ - -D__MIN__=$(MIN) $+ > dmlc.d - - -mxnet_predict0.d: mxnet_predict0.cc nnvm.d dmlc.d - ${CXX} ${CFLAGS} -M -MT mxnet_predict0.o \ - -I ${MXNET_ROOT}/ -I ${TPARTYDIR}/mshadow/ -I ${TPARTYDIR}/dmlc-core/include -I ${TPARTYDIR}/dmlc-core/src \ - -I ${TPARTYDIR}/tvm/nnvm/include \ - -I ${MXNET_ROOT}/3rdparty/dlpack/include \ - -I ${MXNET_ROOT}/include \ - -D__MIN__=$(MIN) mxnet_predict0.cc > mxnet_predict0.d - cat dmlc.d >> mxnet_predict0.d - cat nnvm.d >> mxnet_predict0.d - -mxnet_predict-all.cc: mxnet_predict0.d dmlc-minimum0.cc nnvm.cc mxnet_predict0.cc - @echo "Generating amalgamation to " $@ - python ./amalgamation.py $+ $@ $(MIN) $(ANDROID) - -mxnet_predict-all.o: mxnet_predict-all.cc - ${CXX} ${CFLAGS} -fPIC -o $@ -c $+ - -libmxnet_predict.a: mxnet_predict-all.o - ar rcs libmxnet_predict.a $+ - -jni_libmxnet_predict.o: mxnet_predict-all.cc jni/predictor.cc - ${CXX} ${CFLAGS} -fPIC -o $@ -c jni/predictor.cc - -jni_libmxnet_predict.so: jni_libmxnet_predict.o - ${CXX} ${CFLAGS} -shared -o $@ $(filter %.o %.a, $^) $(LDFLAGS) - -ifneq ($(ANDROID), 1) - android: -else - CFLAGS+= -O3 - LDFLAGS+= -Wl,--no-warn-mismatch -lm_hard - android: jni_libmxnet_predict.so -endif - -libmxnet_predict.js: mxnet_predict-all.cc - ${EMCC} -std=c++17 -O2 $(DEFS) -DMSHADOW_USE_SSE=0 -D__MXNET_JS__ -o $@ $+ \ - -s EXPORTED_FUNCTIONS="['_MXPredCreate', \ - '_MXPredGetOutputShape', \ - '_MXPredSetInput', \ - '_MXPredForward', \ - '_MXPredPartialForward', \ - '_MXPredGetOutput', \ - '_MXPredFree', \ - '_MXNDListCreate', \ - '_MXNDListGet', \ - '_MXNDListFree']" \ - -s ALLOW_MEMORY_GROWTH=1 - -${MXNET_ROOT}/lib/libmxnet_predict.so: mxnet_predict-all.o - @mkdir -p ${MXNET_ROOT}/lib - ${CXX} ${CFLAGS} -shared -o $@ $(filter %.o %.a, $^) $(LDFLAGS) - ls -alh $@ - -clean: - rm -f *.d *.o *.so *.a *.js *.js.mem mxnet_predict-all.cc nnvm.cc diff --git a/amalgamation/README.md b/amalgamation/README.md deleted file mode 100644 index b58776e372aa..000000000000 --- a/amalgamation/README.md +++ /dev/null @@ -1,161 +0,0 @@ - - - - - - - - - - - - - - - - - -MXNet Amalgamation -================== -This folder contains a amalgamation generation script to generate the entire mxnet library into one file. -Currently it supports generation for [predict API](../include/mxnet/c_predict_api.h), -which allows you to run prediction in platform independent way. - -How to Generate the Amalgamation --------------------------------- -Type ```make``` will generate the following files -- mxnet_predict-all.cc - - The file you can used to compile predict API -- ../lib/libmxnet_predict.so - - The dynamic library generated for prediction. - -You can also checkout the [Makefile](Makefile) - -Dependency ----------- -The only dependency is a BLAS library. - -Make sure to disable all other dependencies in the `config.mk` file. - -Acknowledgement ---------------- -This module is created by [Jack Deng](https://github.com/jdeng). - -Android ---------------- -Setup NDK and build your standalone toolchain. [Instructions](http://developer.android.com/ndk/guides/standalone_toolchain.html#itc) Use the Advanced Method!!! In particular set PATH, CC and CXX. The minimum API level required is 16. - -Example: -``` -export PATH=/tmp/my-android-toolchain/bin:$PATH -export CC=arm-linux-androideabi-gcc # or export CC=arm-linux-androideabi-clang -export CXX=arm-linux-androideabi-g++ # or export CXX=arm-linux-androideabi-clang++ -``` - -Build OpenBLAS for Android: [Build OpenBLAS](https://github.com/xianyi/OpenBLAS/wiki/How-to-build-OpenBLAS-for-Android) Please put OpenBLAS source code outside mxnet directory. -Modify OPENBLAS_ROOT in Makefile -Type ```make ANDROID=1``` - -In most cases you will want to use jni_libmxnet_predict.so. It contains the JNIs. In case you want to build your own JNI, link with libmxnet_predict.o - -You can use generated library in [Leliana WhatsThis Android app](https://github.com/Leliana/WhatsThis). Rename jni_libmxnet_predict.so to libmxnet_predict.so and overwrite default library to use up-to-date mxnet version. - -Javascript ---------------- -JS version uses [emscripten](http://kripken.github.io/emscripten-site/) to cross-compile the amalgamation source file into a Javascript library that can be integrated into client side applications. If you already have emanscripten installed then - -```make clean libmxnet_predict.js MIN=1``` - -otherwise you can use [emscripten docker image](https://hub.docker.com/r/apiaryio/emcc/) to compile in the following way - -```make clean libmxnet_predict.js MIN=1 EMCC="docker run -v ${PWD}:/src apiaryio/emcc emcc"``` - -An example WebApp that uses the generated JS library can be found at [mxnet.js](https://github.com/dmlc/mxnet.js) - -iOS ---------------- -[Chinese guide](http://www.liuxiao.org/2015/12/ios-mxnet-%E7%9A%84-ios-%E7%89%88%E6%9C%AC%E7%BC%96%E8%AF%91/) - -Build OpenBlas for host machine [Instructions](https://github.com/xianyi/OpenBLAS/wiki/Installation-Guide) -Modify OPENBLAS_ROOT in Makefile. -Type ```make``` -If the build process is successful you will see the following output: -```ar rcs libmxnet_predict.a mxnet_predict-all.o``` - -Modify mxnet_predict-all.cc: - -If present comment -``` -#include -``` - -Add -``` -#include -``` - -Comment all occurrences of -``` -#include -``` - -Change -``` -#if defined(__ANDROID__) || defined(__MXNET_JS__) -#define MSHADOW_USE_SSE 0 -#endif -``` - -To -``` -#define MSHADOW_USE_SSE 0 -``` - -Change -``` -#ifdef __GNUC__ - #define MX_THREAD_LOCAL __thread -#elif __STDC_VERSION__ >= 201112L - #define MX_THREAD_LOCAL _Thread_local -#elif defined(_MSC_VER) - #define MX_THREAD_LOCAL __declspec(thread) -#endif -``` - -To -``` -#define MX_THREAD_LOCAL __declspec(thread) -``` - -**To build arm32 compatible version (e.g. iPhone 5):** - -Change -``` -typedef mxnet::common::ThreadLocalStore MXAPIErrorStore; - -const char *MXGetLastError() { - return MXAPIErrorStore::Get()->last_error.c_str(); -} - -void MXAPISetLastError(const char* msg) { - MXAPIErrorStore::Get()->last_error = msg; -} -``` - -To -``` -//typedef mxnet::common::ThreadLocalStore MXAPIErrorStore; - -const char *MXGetLastError() { - //return MXAPIErrorStore::Get()->last_error.c_str(); - return ""; -} - -void MXAPISetLastError(const char* msg) { - //MXAPIErrorStore::Get()->last_error = msg; - (void) msg; -} -``` - -You can use modified mxnet_predict-all.cc in [PPPOE WhatsThis iOS app](https://github.com/pppoe/WhatsThis-iOS). - diff --git a/amalgamation/amalgamation.py b/amalgamation/amalgamation.py deleted file mode 100644 index cb961c699fe8..000000000000 --- a/amalgamation/amalgamation.py +++ /dev/null @@ -1,236 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -from __future__ import print_function - -import sys -import os.path, re -from io import BytesIO, StringIO -import platform - -blacklist = [ - 'Windows.h', 'cublas_v2.h', 'cuda/tensor_gpu-inl.cuh', 'cuda_runtime.h', 'cudnn.h', - 'cudnn_lrn-inl.h', 'curand.h', 'curand_kernel.h', 'glog/logging.h', 'io/azure_filesys.h', - 'io/hdfs_filesys.h', 'io/s3_filesys.h', 'kvstore_dist.h', 'mach/clock.h', 'mach/mach.h', - 'malloc.h', 'mkl.h', 'mkl_cblas.h', 'mkl_vsl.h', 'mkl_vsl_functions.h', 'NvInfer.h', 'nvml.h', - 'opencv2/opencv.hpp', 'sys/stat.h', 'sys/types.h', 'cuda.h', 'cuda_fp16.h', 'omp.h', - 'onnx/onnx.pb.h', 'execinfo.h', 'packet/sse-inl.h', 'emmintrin.h', 'thrust/device_vector.h', - 'cusolverDn.h', 'internal/concurrentqueue_internal_debug.h', 'relacy/relacy_std.hpp', - 'relacy_shims.h', 'ittnotify.h', 'nvToolsExt.h', 'dmlc/build_config.h', - 'sys/isa_defs.h' - ] - -minimum = int(sys.argv[6]) if len(sys.argv) > 5 else 0 -android = int(sys.argv[7]) if len(sys.argv) > 6 else 0 - -# blacklist linear algebra headers when building without blas. -if minimum != 0: - blacklist.append('linalg.h') - -if platform.system() != 'Darwin': - blacklist.append('TargetConditionals.h') - -if platform.system() != 'Windows': - blacklist.append('windows.h') - blacklist.append('process.h') - blacklist.append('Shlwapi.h') - -if platform.system() == 'Windows': - blacklist.append('unistd.h') - -if 'freebsd' not in sys.platform: - blacklist.append('sys/endian.h') - - - -def get_sources(def_file): - sources = [] - files = [] - visited = set() - mxnet_path = os.path.realpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir)) - for line in open(def_file): - files = files + line.strip().split(' ') - - for f in files: - f = f.strip() - if not f or f.endswith('.o:') or f == '\\': continue - f = os.path.realpath(f) - fn = os.path.relpath(f) - if f.startswith(mxnet_path) and fn not in visited: - sources.append(fn) - visited.add(fn) - return sources - - -sources = get_sources(sys.argv[1]) - - -def find_source(name, start, stage): - candidates = [] - for x in sources: - if x == name: - candidates.append(x) - elif name.endswith(".cc") and x.endswith('/' + name): - if x.startswith("../" + stage): - candidates.append(x) - elif x.endswith('/' + name): - candidates.append(x) - #if x == name or x.endswith('/' + name): candidates.append(x) - if not candidates: return '' - if len(candidates) == 1: return candidates[0] - for x in candidates: - if '3rdparty' in x: - # make sure to compare the directory name after 3rdparty - if x.split('/')[2] == start.split('/')[2]: return x - else: - if x.split('/')[1] == start.split('/')[1]: return x - return '' - - -re1 = re.compile('<([./a-zA-Z0-9_-]*)>') -re2 = re.compile('"([./a-zA-Z0-9_-]*)"') -re3 = re.compile('DMLC_EXECINFO_H') - -sysheaders = [] -history = set([]) -out = BytesIO() - - -def expand(x, pending, stage): - """ - Expand the pending files in the current stage. - - Parameters - ---------- - x: str - The file to expand. - pending : str - The list of pending files to expand. - stage: str - The current stage for file expansion, used for matching the prefix of files. - """ - if x in history and x not in ['mshadow/mshadow/expr_scalar-inl.h']: # MULTIPLE includes - return - - if x in pending: - #print('loop found: {} in {}'.format(x, pending)) - return - - whtspace = ' ' * expand.treeDepth - expand.fileCount += 1 - comment = u"//=====[{:3d}] STAGE:{:>4} {}EXPANDING: {} =====\n\n".format(expand.fileCount, stage, whtspace, x) - out.write(comment.encode('ascii')) - print(comment) - - with open(x, 'rb') as x_h: - for line in x_h.readlines(): - uline = line.decode('utf-8') - if '#define DMLC_LOG_STACK_TRACE 1' in uline.strip(): - # Do not enable stacktrace logging - continue - if uline.find('#include') < 0: - out.write(line) - continue - if uline.strip().find('#include') > 0: - print(uline) - continue - m = re1.search(uline) - if not m: - m = re2.search(uline) - if m: - path = m.groups()[0] - else: - m = re3.search(uline) - if m: - path = 'execinfo.h' - else: - print(uline + ' not found') - continue - h = path.strip('./') if "../3rdparty/" not in path else path - if h.endswith('complex.h') and x.endswith('openblas_config.h'): - source = '' - elif h.startswith('ps/'): - source = '../3rdparty/ps-lite/include/' + h - else: - source = find_source(h, x, stage) - if not source: - if (h not in blacklist and - h not in sysheaders and - 'mkl' not in h and - 'nnpack' not in h and - 'tensorrt' not in h and - not h.endswith('.cuh')): sysheaders.append(h) - else: - expand.treeDepth += 1 - expand(source, pending + [x], stage) - expand.treeDepth -= 1 - - out.write(u"//===== EXPANDED : {} =====\n\n".format(x).encode('ascii')) - history.add(x) - - -# Vars to keep track of number of files expanded. -# Used in printing informative comments. -expand.treeDepth = 0 -expand.fileCount = 0 - -# Expand the stages -expand(sys.argv[2], [], "3rdparty/dmlc-core") -expand(sys.argv[3], [], "3rdparty/tvm/nnvm") -expand(sys.argv[4], [], "src") - -# Write to amalgamation file -with open(sys.argv[5], 'wb') as f: - - if minimum != 0: - sysheaders.remove('cblas.h') - f.write(b"#define MSHADOW_STAND_ALONE 1\n") - f.write(b"#define MSHADOW_USE_SSE 0\n") - f.write(b"#define MSHADOW_USE_CBLAS 0\n") - - f.write( - b""" -#if defined(__MACH__) -#include -#include -#endif - -#if !defined(__WIN32__) -#include -#include - -#if !defined(__ANDROID__) && (!defined(MSHADOW_USE_SSE) || MSHADOW_USE_SSE == 1) -#include -#endif - -#endif -\n""" - ) - - if minimum != 0 and android != 0 and 'complex.h' not in sysheaders: - sysheaders.append('complex.h') - - for k in sorted(sysheaders): - f.write("#include <{}>\n".format(k).encode('ascii')) - - f.write(b'\n') - f.write(out.getvalue()) - f.write(b'\n') - -for src in sources: - if src not in history and not src.endswith('.o'): - print('Not processed:', src) diff --git a/amalgamation/dmlc-minimum0.cc b/amalgamation/dmlc-minimum0.cc deleted file mode 100644 index 2fe629b20ed3..000000000000 --- a/amalgamation/dmlc-minimum0.cc +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/*! - * Copyright 2015 by Contributors. - * \brief Mininum DMLC library Amalgamation, used for easy plugin of dmlc lib. - * Normally this is not needed. - */ -#include "../3rdparty/dmlc-core/src/io/line_split.cc" -#include "../3rdparty/dmlc-core/src/io/recordio_split.cc" -#include "../3rdparty/dmlc-core/src/io/indexed_recordio_split.cc" -#include "../3rdparty/dmlc-core/src/io/input_split_base.cc" -#include "../3rdparty/dmlc-core/src/io/local_filesys.cc" -#include "../3rdparty/dmlc-core/src/data.cc" -#include "../3rdparty/dmlc-core/src/io.cc" -#include "../3rdparty/dmlc-core/src/io/filesys.cc" -#include "../3rdparty/dmlc-core/src/recordio.cc" - - diff --git a/amalgamation/jni/org/dmlc/mxnet/MxnetException.java b/amalgamation/jni/org/dmlc/mxnet/MxnetException.java deleted file mode 100644 index 08d80d683a4a..000000000000 --- a/amalgamation/jni/org/dmlc/mxnet/MxnetException.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.dmlc.mxnet; - -public class MxnetException extends Exception { - public MxnetException(){} - public MxnetException(String txt) { - super(txt); - } -} - diff --git a/amalgamation/jni/org/dmlc/mxnet/Predictor.java b/amalgamation/jni/org/dmlc/mxnet/Predictor.java deleted file mode 100644 index 53152dcf7436..000000000000 --- a/amalgamation/jni/org/dmlc/mxnet/Predictor.java +++ /dev/null @@ -1,119 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.dmlc.mxnet; - -import android.graphics.Bitmap; -import android.graphics.Color; - -public class Predictor { - static { - System.loadLibrary("mxnet_predict"); - } - - public static class InputNode { - String key; - int[] shape; - public InputNode(String key, int[] shape) { - this.key = key; - this.shape = shape; - } - } - - public static class Device { - public enum Type { - CPU, GPU, CPU_PINNED - } - - public Device(Type t, int i) { - this.type = t; - this.id = i; - } - - Type type; - int id; - int ctype() { - return this.type == Type.CPU? 1: this.type == Type.GPU? 2: 3; - } - } - - private long handle = 0; - - public Predictor(byte[] symbol, byte[] params, Device dev, InputNode[] input) { - String[] keys = new String[input.length]; - int[][] shapes = new int[input.length][]; - for (int i=0; i -/* Header for class org_dmlc_mxnet_Predictor */ - -#ifndef _Included_org_dmlc_mxnet_Predictor -#define _Included_org_dmlc_mxnet_Predictor -#ifdef __cplusplus -extern "C" { -#endif -/* - * Class: org_dmlc_mxnet_Predictor - * Method: createPredictor - * Signature: ([B[BII[Ljava/lang/String;[[I)J - */ -JNIEXPORT jlong JNICALL Java_org_dmlc_mxnet_Predictor_createPredictor - (JNIEnv *, jclass, jbyteArray, jbyteArray, jint, jint, jobjectArray, jobjectArray); - -/* - * Class: org_dmlc_mxnet_Predictor - * Method: nativeFree - * Signature: (J)V - */ -JNIEXPORT void JNICALL Java_org_dmlc_mxnet_Predictor_nativeFree - (JNIEnv *, jclass, jlong); - -/* - * Class: org_dmlc_mxnet_Predictor - * Method: nativeGetOutput - * Signature: (JI)[F - */ -JNIEXPORT jfloatArray JNICALL Java_org_dmlc_mxnet_Predictor_nativeGetOutput - (JNIEnv *, jclass, jlong, jint); - -/* - * Class: org_dmlc_mxnet_Predictor - * Method: nativeForward - * Signature: (JLjava/lang/String;[F)V - */ -JNIEXPORT void JNICALL Java_org_dmlc_mxnet_Predictor_nativeForward - (JNIEnv *, jclass, jlong, jstring, jfloatArray); - -#ifdef __cplusplus -} -#endif -#endif diff --git a/amalgamation/jni/predictor.cc b/amalgamation/jni/predictor.cc deleted file mode 100644 index 1936daf99f3d..000000000000 --- a/amalgamation/jni/predictor.cc +++ /dev/null @@ -1,129 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -#include -#include "org_dmlc_mxnet_Predictor.h" - -#include "../mxnet_predict-all.cc" - -JNIEXPORT jlong JNICALL Java_org_dmlc_mxnet_Predictor_createPredictor - (JNIEnv *env, jclass, jbyteArray jsymbol, jbyteArray jparams, jint devType, jint devId, jobjectArray jkeys, jobjectArray jshapes) -{ - jbyte* symbol = env->GetByteArrayElements(jsymbol, 0); - jbyte* params = env->GetByteArrayElements(jparams, 0); - jsize params_len = env->GetArrayLength(jparams); - - std::vector> track; - std::vector keys; - for (int i=0; iGetArrayLength(jkeys); i++) { - jstring js = (jstring) env->GetObjectArrayElement(jkeys, i); - const char *s = env->GetStringUTFChars(js, 0); - keys.emplace_back(s); - track.emplace_back(js, s); - } - - std::vector index; - std::vector shapes; - mx_uint prev = 0; - index.emplace_back(prev); - for (int i=0; iGetArrayLength(jshapes); i++) { - jintArray jshape = (jintArray) env->GetObjectArrayElement(jshapes, i); - jsize shape_len = env->GetArrayLength(jshape); - jint *shape = env->GetIntArrayElements(jshape, 0); - - prev += shape_len; - index.emplace_back(prev); - for (int j=0; jReleaseIntArrayElements(jshape, shape, 0); - } - - PredictorHandle handle = 0; - if (MXPredCreate((const char *)symbol, (const char *)params, params_len, devType, devId, (mx_uint)keys.size(), &(keys[0]), &(index[0]), &(shapes[0]), &handle) < 0) { - jclass MxnetException = env->FindClass("org/dmlc/mxnet/MxnetException"); - env->ThrowNew(MxnetException, MXGetLastError()); - } - - env->ReleaseByteArrayElements(jsymbol, symbol, 0); - env->ReleaseByteArrayElements(jparams, params, 0); - for (auto& t: track) { - env->ReleaseStringUTFChars(t.first, t.second); - } - - return (jlong)handle; -} - -JNIEXPORT void JNICALL Java_org_dmlc_mxnet_Predictor_nativeFree - (JNIEnv *, jclass, jlong h) -{ - PredictorHandle handle = (PredictorHandle)h; - MXPredFree(handle); -} - -JNIEXPORT jfloatArray JNICALL Java_org_dmlc_mxnet_Predictor_nativeGetOutput - (JNIEnv *env, jclass, jlong h, jint index) -{ - PredictorHandle handle = (PredictorHandle)h; - - mx_uint *shape = 0; - mx_uint shape_len; - if (MXPredGetOutputShape(handle, index, &shape, &shape_len) < 0) { - jclass MxnetException = env->FindClass("org/dmlc/mxnet/MxnetException"); - env->ThrowNew(MxnetException, MXGetLastError()); - } - - size_t size = 1; - for (mx_uint i=0; i data(size); - if (MXPredGetOutput(handle, index, &(data[0]), size) < 0) { - jclass MxnetException = env->FindClass("org/dmlc/mxnet/MxnetException"); - env->ThrowNew(MxnetException, MXGetLastError()); - } - - jfloatArray joutput = env->NewFloatArray(size); - jfloat *out = env->GetFloatArrayElements(joutput, NULL); - - for (int i=0; iReleaseFloatArrayElements(joutput, out, 0); - - return joutput; -} - -JNIEXPORT void JNICALL Java_org_dmlc_mxnet_Predictor_nativeForward - (JNIEnv *env, jclass, jlong h, jstring jkey, jfloatArray jinput) -{ - PredictorHandle handle = (PredictorHandle)h; - const char *key = env->GetStringUTFChars(jkey, 0); - jfloat* input = env->GetFloatArrayElements(jinput, 0); - jsize input_len = env->GetArrayLength(jinput); - - if (MXPredSetInput(handle, key, input, input_len) < 0) { - jclass MxnetException = env->FindClass("org/dmlc/mxnet/MxnetException"); - env->ThrowNew(MxnetException, MXGetLastError()); - } - - env->ReleaseStringUTFChars(jkey, key); - env->ReleaseFloatArrayElements(jinput, input, 0); - if (MXPredForward(handle) < 0) { - jclass MxnetException = env->FindClass("org/dmlc/mxnet/MxnetException"); - env->ThrowNew(MxnetException, MXGetLastError()); - } -} - - diff --git a/amalgamation/mxnet_predict0.cc b/amalgamation/mxnet_predict0.cc deleted file mode 100644 index f9bf45adfa19..000000000000 --- a/amalgamation/mxnet_predict0.cc +++ /dev/null @@ -1,102 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -// mxnet.cc - -#define MSHADOW_FORCE_STREAM - -#ifndef MSHADOW_USE_CBLAS -#if (__MIN__ == 1) -#define MSHADOW_USE_CBLAS 0 -#else -#define MSHADOW_USE_CBLAS 1 -#endif -#endif - -#define MSHADOW_USE_CUDA 0 -#define MSHADOW_USE_MKL 0 -#define MSHADOW_RABIT_PS 0 -#define MSHADOW_DIST_PS 0 - -#if defined(__ANDROID__) || defined(__MXNET_JS__) -#define MSHADOW_USE_SSE 0 -#endif - -#define MXNET_USE_OPENCV 0 -#define MXNET_PREDICT_ONLY 1 -#define DISABLE_OPENMP 1 -#define DMLC_LOG_STACK_TRACE 0 - -#include "src/common/utils.cc" - -#include "src/ndarray/ndarray_function.cc" -#include "src/ndarray/ndarray.cc" - -#include "src/imperative/imperative.cc" -#include "src/imperative/imperative_utils.cc" -#include "src/imperative/cached_op.cc" - -#include "src/engine/engine.cc" -#include "src/engine/naive_engine.cc" -#include "src/engine/openmp.cc" - -#include "src/profiler/profiler.cc" -#include "src/profiler/aggregate_stats.cc" - -#include "src/executor/graph_executor.cc" -#include "src/executor/attach_op_execs_pass.cc" -#include "src/executor/attach_op_resource_pass.cc" -#include "src/executor/inplace_addto_detect_pass.cc" -#include "src/executor/infer_graph_attr_pass.cc" - -#include "src/nnvm/legacy_json_util.cc" -#include "src/nnvm/legacy_op_util.cc" -#include "src/nnvm/graph_editor.cc" - -#include "src/operator/operator.cc" -#include "src/operator/operator_util.cc" -#include "src/operator/nn/activation.cc" -#include "src/operator/nn/batch_norm.cc" -#include "src/operator/nn/concat.cc" -#include "src/operator/nn/convolution.cc" -#include "src/operator/nn/deconvolution.cc" -#include "src/operator/nn/dropout.cc" -#include "src/operator/nn/fully_connected.cc" -#include "src/operator/leaky_relu.cc" -#include "src/operator/nn/pooling.cc" -#include "src/operator/nn/softmax_activation.cc" -#include "src/operator/tensor/elemwise_binary_broadcast_op_basic.cc" -#include "src/operator/tensor/elemwise_binary_op.cc" -#include "src/operator/tensor/elemwise_binary_op_basic.cc" -#include "src/operator/tensor/elemwise_binary_scalar_op_basic.cc" -#include "src/operator/tensor/elemwise_unary_op_basic.cc" -#include "src/operator/tensor/elemwise_unary_op_trig.cc" -#include "src/operator/tensor/matrix_op.cc" - -#include "src/storage/storage.cc" - -#include "src/resource.cc" -#include "src/initialize.cc" - -#include "src/c_api/c_predict_api.cc" -#include "src/c_api/c_api_symbolic.cc" -#include "src/c_api/c_api_ndarray.cc" -#include "src/c_api/c_api_error.cc" -#include "src/c_api/c_api_profile.cc" - diff --git a/amalgamation/prep_nnvm.sh b/amalgamation/prep_nnvm.sh deleted file mode 100755 index a8f63b6b4b1a..000000000000 --- a/amalgamation/prep_nnvm.sh +++ /dev/null @@ -1,45 +0,0 @@ -#! /bin/bash - -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -DMLC_CORE=$(pwd)/../3rdparty/dmlc-core -cd ../3rdparty/tvm/nnvm/amalgamation -make clean -make DMLC_CORE_PATH=$DMLC_CORE nnvm.d -cp nnvm.d ../../../../amalgamation/ -echo '#define MSHADOW_FORCE_STREAM - -#ifndef MSHADOW_USE_CBLAS -#if (__MIN__ == 1) -#define MSHADOW_USE_CBLAS 0 -#else -#define MSHADOW_USE_CBLAS 1 -#endif -#endif -#define MSHADOW_USE_CUDA 0 -#define MSHADOW_USE_MKL 0 -#define MSHADOW_RABIT_PS 0 -#define MSHADOW_DIST_PS 0 -#define DMLC_LOG_STACK_TRACE 0 - -#include "mshadow/tensor.h" -#include "mxnet/base.h" -#include "dmlc/json.h" -#include "mxnet/tensor_blob.h"' > temp -cat nnvm.cc >> temp -mv temp ../../../../amalgamation/nnvm.cc diff --git a/amalgamation/python/mxnet_predict.py b/amalgamation/python/mxnet_predict.py deleted file mode 100644 index 4f896846d92f..000000000000 --- a/amalgamation/python/mxnet_predict.py +++ /dev/null @@ -1,377 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -# coding: utf-8 -# pylint: disable=invalid-name, too-many-arguments -"""Lightweight API for mxnet prediction. - -This is for prediction only, use mxnet python package instead for most tasks. -""" -from __future__ import absolute_import - -import os -import sys -from array import array -import ctypes -import logging -import numpy as np - -# pylint: disable= no-member -_DTYPE_NP_TO_MX = { - None: -1, - np.float32: 0, - np.float64: 1, - np.float16: 2, - np.uint8: 3, - np.int32: 4, - np.int8: 5, - np.int64: 6, -} - -_DTYPE_MX_TO_NP = { - -1: None, - 0: np.float32, - 1: np.float64, - 2: np.float16, - 3: np.uint8, - 4: np.int32, - 5: np.int8, - 6: np.int64, -} - -__all__ = ["Predictor", "load_ndarray_file"] - - -py_str = lambda x: x.decode('utf-8') - - -def c_str_array(strings): - """Create ctypes const char ** from a list of Python strings. - - Parameters - ---------- - strings : list of string - Python strings. - - Returns - ------- - (ctypes.c_char_p * len(strings)) - A const char ** pointer that can be passed to C API. - """ - arr = (ctypes.c_char_p * len(strings))() - arr[:] = [s.encode('utf-8') for s in strings] - return arr - - -def c_str(string): - """"Convert a python string to C string.""" - if not isinstance(string, str): - string = string.decode('ascii') - return ctypes.c_char_p(string.encode('utf-8')) - - -def c_array(ctype, values): - """Create ctypes array from a python array.""" - return (ctype * len(values))(*values) - -def c_array_buf(ctype, buf): - """Create ctypes array from a Python buffer.""" - return (ctype * len(buf)).from_buffer(buf) - - - -def _find_lib_path(): - """Find mxnet library.""" - curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__))) - amalgamation_lib_path = os.path.join(curr_path, '../../lib/libmxnet_predict.so') - if os.path.exists(amalgamation_lib_path) and os.path.isfile(amalgamation_lib_path): - lib_path = [amalgamation_lib_path] - return lib_path - else: - logging.info('Cannot find libmxnet_predict.so. Will search for MXNet library using libinfo.py then.') - try: - from mxnet.libinfo import find_lib_path - lib_path = find_lib_path() - return lib_path - except ImportError: - libinfo_path = os.path.join(curr_path, '../../python/mxnet/libinfo.py') - if os.path.exists(libinfo_path) and os.path.isfile(libinfo_path): - libinfo = {'__file__': libinfo_path} - exec(compile(open(libinfo_path, "rb").read(), libinfo_path, 'exec'), libinfo, libinfo) - lib_path = libinfo['find_lib_path']() - return lib_path - else: - raise RuntimeError('Cannot find libinfo.py at %s.' % libinfo_path) - - -def _load_lib(): - """Load libary by searching possible path.""" - lib_path = _find_lib_path() - lib = ctypes.cdll.LoadLibrary(lib_path[0]) - # DMatrix functions - lib.MXGetLastError.restype = ctypes.c_char_p - return lib - - -def _check_call(ret): - """Check the return value of API.""" - if ret != 0: - raise RuntimeError(py_str(_LIB.MXGetLastError())) - - -def _monitor_callback_wrapper(callback): - """A wrapper for the user-defined handle.""" - def callback_handle(name, array, _): - """ ctypes function """ - callback(name, array) - return callback_handle - -_LIB = _load_lib() -# type definitions -mx_uint = ctypes.c_uint -mx_int = ctypes.c_int -mx_float = ctypes.c_float -mx_float_p = ctypes.POINTER(mx_float) -PredictorHandle = ctypes.c_void_p -NDListHandle = ctypes.c_void_p - -devstr2type = {'cpu': 1, 'gpu': 2, 'cpu_pinned': 3} - -class Predictor(object): - """A predictor class that runs prediction. - - Parameters - ---------- - symbol_json_str : str - Path to the symbol file. - - param_raw_bytes : str, bytes - The raw parameter bytes. - - input_shapes : dict of str to tuple - The shape of input data - - dev_type : str, optional - The device type of the predictor. - - dev_id : int, optional - The device id of the predictor. - - type_dict : Dict of str->numpy.dtype - Input type dictionary, name->dtype - """ - def __init__(self, symbol_file, - param_raw_bytes, input_shapes, - dev_type="cpu", dev_id=0, type_dict=None): - dev_type = devstr2type[dev_type] - indptr = [0] - sdata = [] - keys = [] - for k, v in input_shapes.items(): - if not isinstance(v, tuple): - raise ValueError("Expect input_shapes to be dict str->tuple") - keys.append(c_str(k)) - sdata.extend(v) - indptr.append(len(sdata)) - handle = PredictorHandle() - param_raw_bytes = bytearray(param_raw_bytes) - ptr = (ctypes.c_char * len(param_raw_bytes)).from_buffer(param_raw_bytes) - - # data types - num_provided_arg_types = 0 - # provided type argument names - provided_arg_type_names = ctypes.POINTER(ctypes.c_char_p)() - # provided types - provided_arg_type_data = ctypes.POINTER(mx_uint)() - if type_dict is not None: - provided_arg_type_names = [] - provided_arg_type_data = [] - for k, v in type_dict.items(): - v = np.dtype(v).type - if v in _DTYPE_NP_TO_MX: - provided_arg_type_names.append(k) - provided_arg_type_data.append(_DTYPE_NP_TO_MX[v]) - num_provided_arg_types = mx_uint(len(provided_arg_type_names)) - provided_arg_type_names = c_str_array(provided_arg_type_names) - provided_arg_type_data = c_array_buf(ctypes.c_int, array('i', provided_arg_type_data)) - - _check_call(_LIB.MXPredCreateEx( - c_str(symbol_file), - ptr, len(param_raw_bytes), - ctypes.c_int(dev_type), ctypes.c_int(dev_id), - mx_uint(len(indptr) - 1), - c_array(ctypes.c_char_p, keys), - c_array(mx_uint, indptr), - c_array(mx_uint, sdata), - num_provided_arg_types, - provided_arg_type_names, - provided_arg_type_data, - ctypes.byref(handle))) - self.type_dict = type_dict - self.handle = handle - - def __del__(self): - _check_call(_LIB.MXPredFree(self.handle)) - - def forward(self, **kwargs): - """Perform forward to get the output. - - Parameters - ---------- - **kwargs - Keyword arguments of input variable name to data. - - Examples - -------- - >>> predictor.forward(data=mydata) - >>> out = predictor.get_output(0) - """ - if self.type_dict and len(self.type_dict) != len(kwargs.items()): - raise ValueError("number of kwargs should be same as len of type_dict" \ - "Please check your forward pass inputs" \ - "or type_dict passed to Predictor instantiation") - - for k, v in kwargs.items(): - if not isinstance(v, np.ndarray): - raise ValueError("Expect numpy ndarray as input") - if self.type_dict and k in self.type_dict: - v = np.asarray(v, dtype=self.type_dict[k], order='C') - else: - v = np.asarray(v, dtype=np.float32, order='C') - _check_call(_LIB.MXPredSetInput( - self.handle, c_str(k), - v.ctypes.data_as(mx_float_p), - mx_uint(v.size))) - _check_call(_LIB.MXPredForward(self.handle)) - - def reshape(self, input_shapes): - """Change the input shape of the predictor. - - Parameters - ---------- - input_shapes : dict of str to tuple - The new shape of input data. - - Examples - -------- - >>> predictor.reshape({'data':data_shape_tuple}) - """ - indptr = [0] - sdata = [] - keys = [] - for k, v in input_shapes.items(): - if not isinstance(v, tuple): - raise ValueError("Expect input_shapes to be dict str->tuple") - keys.append(c_str(k)) - sdata.extend(v) - indptr.append(len(sdata)) - - new_handle = PredictorHandle() - _check_call(_LIB.MXPredReshape( - mx_uint(len(indptr) - 1), - c_array(ctypes.c_char_p, keys), - c_array(mx_uint, indptr), - c_array(mx_uint, sdata), - self.handle, - ctypes.byref(new_handle))) - _check_call(_LIB.MXPredFree(self.handle)) - self.handle = new_handle - - def get_output(self, index): - """Get the index-th output. - - Parameters - ---------- - index : int - The index of output. - - Returns - ------- - out : numpy array. - The output array. - """ - pdata = ctypes.POINTER(mx_uint)() - ndim = mx_uint() - out_type = mx_int() - _check_call(_LIB.MXPredGetOutputShape( - self.handle, index, - ctypes.byref(pdata), - ctypes.byref(ndim))) - _check_call(_LIB.MXPredGetOutputType( - self.handle, index, - ctypes.byref(out_type))) - shape = tuple(pdata[:ndim.value]) - data = np.empty(shape, dtype=_DTYPE_MX_TO_NP[out_type.value]) - _check_call(_LIB.MXPredGetOutput( - self.handle, mx_uint(index), - data.ctypes.data_as(mx_float_p), - mx_uint(data.size))) - return data - - def set_monitor_callback(self, callback, monitor_all=False): - cb_type = ctypes.CFUNCTYPE(None, ctypes.c_char_p, ctypes.c_void_p, ctypes.c_void_p) - self._monitor_callback = cb_type(_monitor_callback_wrapper(callback)) - _check_call(_LIB.MXPredSetMonitorCallback(self.handle, - self._monitor_callback, - None, - ctypes.c_int(monitor_all))) - - -def load_ndarray_file(nd_bytes): - """Load ndarray file and return as list of numpy array. - - Parameters - ---------- - nd_bytes : str or bytes - The internal ndarray bytes - - Returns - ------- - out : dict of str to numpy array or list of numpy array - The output list or dict, depending on whether the saved type is list or dict. - """ - handle = NDListHandle() - olen = mx_uint() - nd_bytes = bytearray(nd_bytes) - ptr = (ctypes.c_char * len(nd_bytes)).from_buffer(nd_bytes) - _check_call(_LIB.MXNDListCreate( - ptr, len(nd_bytes), - ctypes.byref(handle), ctypes.byref(olen))) - keys = [] - arrs = [] - - for i in range(olen.value): - key = ctypes.c_char_p() - cptr = mx_float_p() - pdata = ctypes.POINTER(mx_uint)() - ndim = mx_uint() - _check_call(_LIB.MXNDListGet( - handle, mx_uint(i), ctypes.byref(key), - ctypes.byref(cptr), ctypes.byref(pdata), ctypes.byref(ndim))) - shape = tuple(pdata[:ndim.value]) - dbuffer = (mx_float * np.prod(shape)).from_address(ctypes.addressof(cptr.contents)) - ret = np.frombuffer(dbuffer, dtype=np.float32).reshape(shape) - ret = np.array(ret, dtype=np.float32) - keys.append(py_str(key.value)) - arrs.append(ret) - _check_call(_LIB.MXNDListFree(handle)) - - if len(keys) == 0 or len(keys[0]) == 0: - return arrs - else: - return {keys[i] : arrs[i] for i in range(len(keys)) - } diff --git a/ci/dev_menu.py b/ci/dev_menu.py index 365d2b8e9cfc..c471302c76d7 100644 --- a/ci/dev_menu.py +++ b/ci/dev_menu.py @@ -127,18 +127,6 @@ def provision_virtualenv(venv_path=DEFAULT_PYENV): "ci/build.py --platform ubuntu_cpu_jekyll /work/runtime_functions.sh build_jekyll_docs"), ('[Docker] Build the Python API docs - outputs to "docs/python_docs/python/build/_build/html/"', "ci/build.py --platform ubuntu_cpu_python /work/runtime_functions.sh build_python_docs"), - ('[Docker] Build the C++ API docs - outputs to "docs/cpp_docs/build/html/html/"', - "ci/build.py --platform ubuntu_cpu_c /work/runtime_functions.sh build_c_docs"), - ('[Docker] Build the Clojure API docs - outputs to "contrib/clojure-package/target/doc"', - "ci/build.py --platform ubuntu_cpu_scala /work/runtime_functions.sh build_clojure_docs"), - ('[Docker] Build the Java API docs - outputs to "docs/scala-package/build/docs/java"', - "ci/build.py --platform ubuntu_cpu_scala /work/runtime_functions.sh build_java_docs"), - ('[Docker] Build the Julia API docs - outputs to "julia/docs/site/"', - "ci/build.py --platform ubuntu_cpu_julia /work/runtime_functions.sh build_julia_docs"), - ('[Docker] Build the R API docs - outputs to "R-package/build/mxnet-r-reference-manual.pdf"', - "ci/build.py --platform ubuntu_cpu_r /work/runtime_functions.sh build_r_docs"), - ('[Docker] Build the Scala API docs - outputs to "scala-package/docs/build/docs/scala"', - "ci/build.py --platform ubuntu_cpu_scala /work/runtime_functions.sh build_scala_docs"), ('[Docker] sanity_check. Check for linting and code formatting and licenses.', [ "ci/build.py --platform ubuntu_cpu /work/runtime_functions.sh sanity_check", diff --git a/ci/docker/runtime_functions.sh b/ci/docker/runtime_functions.sh index 3886d1134987..666c59e6e4aa 100755 --- a/ci/docker/runtime_functions.sh +++ b/ci/docker/runtime_functions.sh @@ -793,29 +793,6 @@ build_ubuntu_gpu_cuda101_cudnn7_mkldnn_cpp_test() { make cython PYTHON=python3 } -build_ubuntu_amalgamation() { - set -ex - # Amalgamation can not be run with -j nproc - export CC=gcc-7 - export CXX=g++-7 - build_ccache_wrappers - make -C amalgamation/ clean - make -C amalgamation/ \ - USE_BLAS=openblas -} - -build_ubuntu_amalgamation_min() { - set -ex - # Amalgamation can not be run with -j nproc - export CC=gcc-7 - export CXX=g++-7 - build_ccache_wrappers - make -C amalgamation/ clean - make -C amalgamation/ \ - USE_BLAS=openblas \ - MIN=1 -} - build_ubuntu_gpu_cmake() { set -ex cd /work/build @@ -901,8 +878,7 @@ build_ubuntu_blc() { sanity_check() { set -ex tools/license_header.py check - make cpplint jnilint - make -f R-package/Makefile rcpplint + make cpplint make pylint pytest -n 4 tests/tutorials/test_sanity_tutorials.py } @@ -1041,121 +1017,11 @@ unittest_ubuntu_python3_gpu_nocudnn() { pytest -m 'serial' --durations=50 --cov-report xml:tests_gpu.xml --cov-append --verbose tests/python/gpu } -unittest_centos7_cpu_scala() { - set -ex - source /opt/rh/devtoolset-7/enable - source /opt/rh/rh-maven35/enable - cd /work/mxnet - scala_prepare - cd scala-package - mvn -B integration-test -} - -unittest_ubuntu_cpu_clojure() { - set -ex - scala_prepare - cd scala-package - mvn -B install - cd .. - ./contrib/clojure-package/ci-test.sh -} - -unittest_ubuntu_cpu_clojure_integration() { - set -ex - cd scala-package - mvn -B install - cd .. - ./contrib/clojure-package/integration-tests.sh -} - - unittest_cpp() { set -ex build/tests/mxnet_unit_tests } -unittest_ubuntu_cpu_R() { - set -ex - mkdir -p /tmp/r-site-library - # build R packages in parallel - mkdir -p ~/.R/ - export CC=gcc-7 - export CXX=g++-7 - build_ccache_wrappers - echo "MAKEFLAGS = -j"$(nproc) > ~/.R/Makevars - # make -j not supported - make -f R-package/Makefile rpkg \ - R_LIBS=/tmp/r-site-library - - R CMD INSTALL --library=/tmp/r-site-library R-package - make -f R-package/Makefile rpkgtest R_LIBS=/tmp/r-site-library -} - -unittest_ubuntu_minimal_R() { - set -ex - mkdir -p /tmp/r-site-library - # build R packages in parallel - mkdir -p ~/.R/ - echo "MAKEFLAGS = -j"$(nproc) > ~/.R/Makevars - export CC=gcc-7 - export CXX=g++-7 - build_ccache_wrappers - # make -j not supported - make -f R-package/Makefile rpkg \ - R_LIBS=/tmp/r-site-library - - R CMD INSTALL --library=/tmp/r-site-library R-package -} - -unittest_ubuntu_gpu_R() { - set -ex - mkdir -p /tmp/r-site-library - # build R packages in parallel - mkdir -p ~/.R/ - export CC=gcc-7 - export CXX=g++-7 - build_ccache_wrappers - echo "MAKEFLAGS = -j"$(nproc) > ~/.R/Makevars - # make -j not supported - make -f R-package/Makefile rpkg \ - R_LIBS=/tmp/r-site-library - R CMD INSTALL --library=/tmp/r-site-library R-package - make -f R-package/Makefile rpkgtest R_LIBS=/tmp/r-site-library R_GPU_ENABLE=1 -} - -unittest_ubuntu_cpu_julia() { - set -ex - export PATH="$1/bin:$PATH" - export MXNET_HOME='/work/mxnet' - export JULIA_DEPOT_PATH='/work/julia-depot' - export INTEGRATION_TEST=1 - - julia -e 'using InteractiveUtils; versioninfo()' - - # FIXME - export LD_PRELOAD='/usr/lib/x86_64-linux-gnu/libjemalloc.so' - export LD_LIBRARY_PATH=/work/mxnet/lib:$LD_LIBRARY_PATH - - # use the prebuilt binary from $MXNET_HOME/lib - julia --project=./julia -e 'using Pkg; Pkg.build("MXNet")' - - # run the script `julia/test/runtests.jl` - julia --project=./julia -e 'using Pkg; Pkg.test("MXNet")' - - # See https://github.com/dmlc/MXNet.jl/pull/303#issuecomment-341171774 - julia --project=./julia -e 'using MXNet; mx._sig_checker()' -} - -unittest_ubuntu_cpu_julia07() { - set -ex - unittest_ubuntu_cpu_julia /work/julia07 -} - -unittest_ubuntu_cpu_julia10() { - set -ex - unittest_ubuntu_cpu_julia /work/julia10 -} - unittest_centos7_cpu() { set -ex source /opt/rh/rh-python36/enable @@ -1191,12 +1057,6 @@ integrationtest_ubuntu_cpu_onnx() { pytest -n 4 tests/python/unittest/onnx/test_node.py } -integrationtest_ubuntu_gpu_cpp_package() { - set -ex - export DMLC_LOG_STACK_TRACE_DEPTH=10 - cpp-package/tests/ci_test.sh -} - integrationtest_ubuntu_cpu_dist_kvstore() { set -ex pushd . @@ -1218,23 +1078,6 @@ integrationtest_ubuntu_cpu_dist_kvstore() { popd } -integrationtest_ubuntu_cpu_scala() { - set -ex - export DMLC_LOG_STACK_TRACE_DEPTH=10 - scala_prepare - cd scala-package - mvn -B verify -DskipTests=false -} - -integrationtest_ubuntu_gpu_scala() { - set -ex - export DMLC_LOG_STACK_TRACE_DEPTH=10 - scala_prepare - cd scala-package - export SCALA_TEST_ON_GPU=1 - mvn -B verify -DskipTests=false -} - integrationtest_ubuntu_gpu_dist_kvstore() { set -ex pushd . @@ -1333,16 +1176,6 @@ nightly_test_rat_check() { popd } -# Runs Imagenet inference -nightly_test_imagenet_inference() { - set -ex - export DMLC_LOG_STACK_TRACE_DEPTH=10 - echo $PWD - cp /work/mxnet/build/cpp-package/example/inference/imagenet_inference /work/mxnet/cpp-package/example/inference/ - cd /work/mxnet/cpp-package/example/inference/ - ./unit_test_imagenet_inference.sh -} - #Single Node KVStore Test nightly_test_KVStore_singleNode() { set -ex @@ -1371,31 +1204,6 @@ nightly_test_large_vector() { pytest tests/nightly/test_large_vector.py::test_basic } -#Tests Amalgamation Build with 5 different sets of flags -nightly_test_amalgamation() { - set -ex - export DMLC_LOG_STACK_TRACE_DEPTH=10 - export CC=gcc-7 - export CXX=g++-7 - # Amalgamation can not be run with -j nproc - make -C amalgamation/ clean - make -C amalgamation/ ${1} ${2} -} - -#Tests Amalgamation Build for Javascript -nightly_test_javascript() { - set -ex - export LLVM=/work/deps/emscripten-fastcomp/build/bin - export DMLC_LOG_STACK_TRACE_DEPTH=10 - export CC=gcc-7 - export CXX=g++-7 - # This part is needed to run emcc correctly - cd /work/deps/emscripten - ./emcc - touch ~/.emscripten - make -C /work/mxnet/amalgamation libmxnet_predict.js MIN=1 EMCC=/work/deps/emscripten/emcc -} - #Tests Model backwards compatibility on MXNet nightly_model_backwards_compat_test() { set -ex @@ -1427,22 +1235,6 @@ nightly_tutorial_test_ubuntu_python3_gpu() { pytest --durations=50 --cov-report xml:tests_tutorials.xml --capture=no test_tutorials.py } -nightly_java_demo_test_cpu() { - set -ex - cd /work/mxnet/scala-package/mxnet-demo/java-demo - mvn -B -Pci-nightly install - bash bin/java_sample.sh - bash bin/run_od.sh -} - -nightly_scala_demo_test_cpu() { - set -ex - cd /work/mxnet/scala-package/mxnet-demo/scala-demo - mvn -B -Pci-nightly install - bash bin/demo.sh - bash bin/run_im.sh -} - nightly_estimator() { set -ex export DMLC_LOG_STACK_TRACE_DEPTH=10 @@ -1457,18 +1249,6 @@ deploy_docs() { set -ex pushd . - # Setup for Julia docs - export PATH="/work/julia10/bin:$PATH" - export MXNET_HOME='/work/mxnet' - export JULIA_DEPOT_PATH='/work/julia-depot' - - julia -e 'using InteractiveUtils; versioninfo()' - - # FIXME - export LD_PRELOAD='/usr/lib/x86_64-linux-gnu/libjemalloc.so' - export LD_LIBRARY_PATH=/work/mxnet/lib:$LD_LIBRARY_PATH - # End Julia setup - export CC="ccache gcc" export CXX="ccache g++" @@ -1568,187 +1348,12 @@ build_c_docs() { } -build_r_docs() { - set -ex - pushd . - - build_docs_setup - r_root='R-package' - r_pdf='mxnet-r-reference-manual.pdf' - r_build='build' - docs_build_path="$r_root/$r_build/$r_pdf" - artifacts_path='docs/_build/r-artifacts.tgz' - - mkdir -p $r_root/$r_build - - unittest_ubuntu_minimal_R - - pushd $r_root - - R_LIBS=/tmp/r-site-library R CMD Rd2pdf . --no-preview --encoding=utf8 -o $r_build/$r_pdf - - popd - - GZIP=-9 tar zcvf $artifacts_path $docs_build_path - - popd -} - - -build_scala() { - set -ex - pushd . - - cd scala-package - mvn -B install -DskipTests - - popd -} - - -build_scala_docs() { - set -ex - pushd . - build_docs_setup - build_scala - - scala_path='scala-package' - docs_build_path='scala-package/docs/build/docs/scala' - artifacts_path='docs/_build/scala-artifacts.tgz' - - pushd $scala_path - - scala_doc_sources=`find . -type f -name "*.scala" | egrep "./core|./infer" | egrep -v "/javaapi" | egrep -v "Suite" | egrep -v "CancelTestUtil" | egrep -v "/mxnetexamples"` - jar_native=`find native -name "*.jar" | grep "target/lib/" | tr "\\n" ":" ` - jar_macros=`find macros -name "*.jar" | tr "\\n" ":" ` - jar_core=`find core -name "*.jar" | tr "\\n" ":" ` - jar_infer=`find infer -name "*.jar" | tr "\\n" ":" ` - scala_doc_classpath=$jar_native:$jar_macros:$jar_core:$jar_infer - - scala_ignore_errors='' - legacy_ver=".*1.2|1.3.*" - # BUILD_VER needs to be pull from environment vars - if [[ $_BUILD_VER =~ $legacy_ver ]] - then - # There are unresolvable errors on mxnet 1.2.x. We are ignoring those - # errors while aborting the ci on newer versions - echo "We will ignoring unresolvable errors on MXNet 1.2/1.3." - scala_ignore_errors='; exit 0' - fi - - scaladoc $scala_doc_sources -classpath $scala_doc_classpath $scala_ignore_errors -doc-title MXNet - popd - - # Clean-up old artifacts - rm -rf $docs_build_path - mkdir -p $docs_build_path - - for doc_file in index index.html org lib index.js package.html; do - mv $scala_path/$doc_file $docs_build_path - done - - GZIP=-9 tar -zcvf $artifacts_path -C $docs_build_path . - - popd -} - - -build_julia_docs() { - set -ex - pushd . - - build_docs_setup - # Setup environment for Julia docs - export PATH="/work/julia10/bin:$PATH" - export MXNET_HOME='/work/mxnet' - export JULIA_DEPOT_PATH='/work/julia-depot' - export LD_PRELOAD='/usr/lib/x86_64-linux-gnu/libjemalloc.so' - export LD_LIBRARY_PATH=/work/mxnet/lib:$LD_LIBRARY_PATH - - julia_doc_path='julia/docs/site/' - julia_doc_artifact='docs/_build/julia-artifacts.tgz' - - echo "Julia will check for MXNet in $MXNET_HOME/lib" - - - make -C julia/docs - - GZIP=-9 tar -zcvf $julia_doc_artifact -C $julia_doc_path . - - popd -} - - -build_java_docs() { - set -ex - pushd . - - build_docs_setup - build_scala - - # Re-use scala-package build artifacts. - java_path='scala-package' - docs_build_path='docs/scala-package/build/docs/java' - artifacts_path='docs/_build/java-artifacts.tgz' - - pushd $java_path - - java_doc_sources=`find . -type f -name "*.scala" | egrep "./core|./infer" | egrep "/javaapi" | egrep -v "Suite" | egrep -v "CancelTestUtil" | egrep -v "/mxnetexamples"` - jar_native=`find native -name "*.jar" | grep "target/lib/" | tr "\\n" ":" ` - jar_macros=`find macros -name "*.jar" | tr "\\n" ":" ` - jar_core=`find core -name "*.jar" | tr "\\n" ":" ` - jar_infer=`find infer -name "*.jar" | tr "\\n" ":" ` - java_doc_classpath=$jar_native:$jar_macros:$jar_core:$jar_infer - - scaladoc $java_doc_sources -classpath $java_doc_classpath -feature -deprecation -doc-title MXNet - popd - - # Clean-up old artifacts - rm -rf $docs_build_path - mkdir -p $docs_build_path - - for doc_file in index index.html org lib index.js package.html; do - mv $java_path/$doc_file $docs_build_path - done - - GZIP=-9 tar -zcvf $artifacts_path -C $docs_build_path . - - popd -} - - -build_clojure_docs() { - set -ex - pushd . - - build_docs_setup - build_scala - - clojure_path='contrib/clojure-package' - clojure_doc_path='contrib/clojure-package/target/doc' - clojure_doc_artifact='docs/_build/clojure-artifacts.tgz' - - pushd $clojure_path - lein codox - popd - - GZIP=-9 tar -zcvf $clojure_doc_artifact -C $clojure_doc_path . - - popd -} - build_docs() { pushd docs/_build tar -xzf jekyll-artifacts.tgz api_folder='html/api' # Python has it's own landing page/site so we don't put it in /docs/api mkdir -p $api_folder/python/docs && tar -xzf python-artifacts.tgz --directory $api_folder/python/docs - mkdir -p $api_folder/cpp/docs/api && tar -xzf c-artifacts.tgz --directory $api_folder/cpp/docs/api - mkdir -p $api_folder/r/docs/api && tar -xzf r-artifacts.tgz --directory $api_folder/r/docs/api - mkdir -p $api_folder/julia/docs/api && tar -xzf julia-artifacts.tgz --directory $api_folder/julia/docs/api - mkdir -p $api_folder/scala/docs/api && tar -xzf scala-artifacts.tgz --directory $api_folder/scala/docs/api - mkdir -p $api_folder/java/docs/api && tar -xzf java-artifacts.tgz --directory $api_folder/java/docs/api - mkdir -p $api_folder/clojure/docs/api && tar -xzf clojure-artifacts.tgz --directory $api_folder/clojure/docs/api GZIP=-9 tar -zcvf full_website.tgz -C html . popd } @@ -1865,18 +1470,6 @@ cd_s3_publish() { aws s3 cp ${filepath} s3://apache-mxnet/dist/python/${variant}/${filename} --grants read=uri=http://acs.amazonaws.com/groups/global/AllUsers full=id=43f628fab72838a4f0b929d7f1993b14411f4b0294b011261bc6bd3e950a6822 } -build_static_scala_cpu() { - set -ex - pushd . - scala_prepare - export MAVEN_PUBLISH_OS_TYPE=linux-x86_64-cpu - export mxnet_variant=cpu - source /opt/rh/devtoolset-7/enable - source /opt/rh/rh-maven35/enable - ./ci/publish/scala/build.sh - popd -} - build_static_python_cpu() { set -ex pushd . @@ -1898,36 +1491,7 @@ build_static_python_cu92() { popd } -publish_scala_build() { - set -ex - pushd . - scala_prepare - source /opt/rh/devtoolset-7/enable - source /opt/rh/rh-maven35/enable - export USE_SYSTEM_CUDA=1 - ./ci/publish/scala/build.sh - popd -} - -publish_scala_test() { - set -ex - pushd . - scala_prepare - source /opt/rh/rh-maven35/enable - ./ci/publish/scala/test.sh - popd -} - -publish_scala_deploy() { - set -ex - pushd . - scala_prepare - ./ci/publish/scala/deploy.sh - popd -} - # broken_link_checker - broken_link_checker() { set -ex ./tests/nightly/broken_link_checker_test/broken_link_checker.sh diff --git a/ci/jenkins/Jenkins_steps.groovy b/ci/jenkins/Jenkins_steps.groovy index 200ce9ca880e..923d41b4c383 100644 --- a/ci/jenkins/Jenkins_steps.groovy +++ b/ci/jenkins/Jenkins_steps.groovy @@ -36,11 +36,11 @@ mx_cmake_lib_debug = 'build/libmxnet.so, build/3rdparty/tvm/libtvm_runtime.so, b mx_mkldnn_lib = 'build/libmxnet.so, build/3rdparty/tvm/libtvm_runtime.so, build/libtvmop.so, build/tvmop.conf, build/3rdparty/openmp/runtime/src/libomp.so, build/libcustomop_lib.so, build/libcustomop_gpu_lib.so, build/libsubgraph_lib.so' mx_mkldnn_lib_make = 'lib/libmxnet.so, lib/libmxnet.a, lib/libtvm_runtime.so, lib/libtvmop.so, lib/tvmop.conf, build/libcustomop_lib.so, build/libcustomop_gpu_lib.so, build/libsubgraph_lib.so, 3rdparty/dmlc-core/libdmlc.a, 3rdparty/tvm/nnvm/lib/libnnvm.a' mx_tensorrt_lib = 'build/libmxnet.so, build/3rdparty/tvm/libtvm_runtime.so, build/libtvmop.so, build/tvmop.conf, build/3rdparty/openmp/runtime/src/libomp.so, lib/libnvonnxparser_runtime.so.0, lib/libnvonnxparser.so.0, lib/libonnx_proto.so, lib/libonnx.so' -mx_lib_cpp_examples = 'build/libmxnet.so, build/3rdparty/tvm/libtvm_runtime.so, build/libtvmop.so, build/tvmop.conf, build/3rdparty/openmp/runtime/src/libomp.so, build/libcustomop_lib.so, build/libcustomop_gpu_lib.so, build/libsubgraph_lib.so, build/cpp-package/example/**, python/mxnet/_cy3/*.so, python/mxnet/_ffi/_cy3/*.so' -mx_lib_cpp_examples_make = 'lib/libmxnet.so, lib/libmxnet.a, lib/libtvm_runtime.so, lib/libtvmop.so, lib/tvmop.conf, build/libcustomop_lib.so, build/libcustomop_gpu_lib.so, build/libsubgraph_lib.so, 3rdparty/dmlc-core/libdmlc.a, 3rdparty/tvm/nnvm/lib/libnnvm.a, 3rdparty/ps-lite/build/libps.a, deps/lib/libprotobuf-lite.a, deps/lib/libzmq.a, build/cpp-package/example/**, python/mxnet/_cy3/*.so, python/mxnet/_ffi/_cy3/*.so' -mx_lib_cpp_capi_make = 'lib/libmxnet.so, lib/libmxnet.a, lib/libtvm_runtime.so, lib/libtvmop.so, lib/tvmop.conf, libsample_lib.so, lib/libmkldnn.so.1, lib/libmklml_intel.so, 3rdparty/dmlc-core/libdmlc.a, 3rdparty/tvm/nnvm/lib/libnnvm.a, 3rdparty/ps-lite/build/libps.a, deps/lib/libprotobuf-lite.a, deps/lib/libzmq.a, build/cpp-package/example/**, python/mxnet/_cy3/*.so, python/mxnet/_ffi/_cy3/*.so, build/tests/cpp/mxnet_unit_tests' -mx_lib_cpp_examples_no_tvm_op = 'build/libmxnet.so, build/libcustomop_lib.so, build/libcustomop_gpu_lib.so, build/libsubgraph_lib.so, build/3rdparty/openmp/runtime/src/libomp.so, build/cpp-package/example/**, python/mxnet/_cy3/*.so, python/mxnet/_ffi/_cy3/*.so' -mx_lib_cpp_examples_cpu = 'build/libmxnet.so, build/3rdparty/tvm/libtvm_runtime.so, build/libtvmop.so, build/tvmop.conf, build/3rdparty/openmp/runtime/src/libomp.so, build/cpp-package/example/**' +mx_lib_cpp_examples = 'build/libmxnet.so, build/3rdparty/tvm/libtvm_runtime.so, build/libtvmop.so, build/tvmop.conf, build/3rdparty/openmp/runtime/src/libomp.so, build/libcustomop_lib.so, build/libcustomop_gpu_lib.so, build/libsubgraph_lib.so, python/mxnet/_cy3/*.so, python/mxnet/_ffi/_cy3/*.so' +mx_lib_cpp_examples_make = 'lib/libmxnet.so, lib/libmxnet.a, lib/libtvm_runtime.so, lib/libtvmop.so, lib/tvmop.conf, build/libcustomop_lib.so, build/libcustomop_gpu_lib.so, build/libsubgraph_lib.so, 3rdparty/dmlc-core/libdmlc.a, 3rdparty/tvm/nnvm/lib/libnnvm.a, 3rdparty/ps-lite/build/libps.a, deps/lib/libprotobuf-lite.a, deps/lib/libzmq.a, python/mxnet/_cy3/*.so, python/mxnet/_ffi/_cy3/*.so' +mx_lib_cpp_capi_make = 'lib/libmxnet.so, lib/libmxnet.a, lib/libtvm_runtime.so, lib/libtvmop.so, lib/tvmop.conf, libsample_lib.so, lib/libmkldnn.so.1, lib/libmklml_intel.so, 3rdparty/dmlc-core/libdmlc.a, 3rdparty/tvm/nnvm/lib/libnnvm.a, 3rdparty/ps-lite/build/libps.a, deps/lib/libprotobuf-lite.a, deps/lib/libzmq.a, python/mxnet/_cy3/*.so, python/mxnet/_ffi/_cy3/*.so, build/tests/cpp/mxnet_unit_tests' +mx_lib_cpp_examples_no_tvm_op = 'build/libmxnet.so, build/libcustomop_lib.so, build/libcustomop_gpu_lib.so, build/libsubgraph_lib.so, build/3rdparty/openmp/runtime/src/libomp.so, python/mxnet/_cy3/*.so, python/mxnet/_ffi/_cy3/*.so' +mx_lib_cpp_examples_cpu = 'build/libmxnet.so, build/3rdparty/tvm/libtvm_runtime.so, build/libtvmop.so, build/tvmop.conf, build/3rdparty/openmp/runtime/src/libomp.so' mx_cd_lib = 'lib/libmxnet.so, licenses/*, lib/libgfortran.so.4, lib/libquadmath.so.0, lib/libopenblas.so.0, include/mkldnn/dnnl_version.h, include/mkldnn/dnnl_config.h' // Python unittest for CPU @@ -612,32 +612,6 @@ def compile_unix_clang10_cuda_werror(lib_name) { }] } -def compile_unix_amalgamation_min() { - return ['Amalgamation MIN': { - node(NODE_LINUX_CPU) { - ws('workspace/amalgamationmin') { - timeout(time: max_time, unit: 'MINUTES') { - utils.init_git() - utils.docker_run('ubuntu_cpu', 'build_ubuntu_amalgamation_min', false) - } - } - } - }] -} - -def compile_unix_amalgamation() { - return ['Amalgamation': { - node(NODE_LINUX_CPU) { - ws('workspace/amalgamation') { - timeout(time: max_time, unit: 'MINUTES') { - utils.init_git() - utils.docker_run('ubuntu_cpu', 'build_ubuntu_amalgamation', false) - } - } - } - }] -} - def compile_windows_cpu(lib_name) { return ['Build CPU windows':{ node(NODE_WINDOWS_CPU) { @@ -722,19 +696,6 @@ def compile_windows_gpu_mkldnn(lib_name) { }] } -def compile_static_scala_cpu() { - return ['Static build CPU CentOS7 Scala' : { - node(NODE_LINUX_CPU) { - ws('workspace/ut-publish-scala-cpu') { - timeout(time: max_time, unit: 'MINUTES') { - utils.init_git() - utils.docker_run('centos7_cpu', 'build_static_scala_cpu', false) - } - } - } - }] -} - def compile_static_python_cpu() { return ['Static build CPU CentOS7 Python' : { node(NODE_LINUX_CPU) { @@ -937,185 +898,6 @@ def test_unix_python3_mkldnn_nocudnn_gpu(lib_name) { }] } -def test_unix_cpp_package_gpu(lib_name) { - return ['cpp-package GPU Makefile': { - node(NODE_LINUX_GPU_G4) { - ws('workspace/it-cpp-package') { - timeout(time: max_time, unit: 'MINUTES') { - utils.unpack_and_init(lib_name, mx_lib_cpp_examples_make) - utils.docker_run('ubuntu_gpu_cu101', 'integrationtest_ubuntu_gpu_cpp_package', true) - utils.publish_test_coverage() - } - } - } - }] -} - -def test_unix_scala_cpu(lib_name) { - return ['Scala: CPU Makefile': { - node(NODE_LINUX_CPU) { - ws('workspace/ut-scala-cpu') { - timeout(time: max_time, unit: 'MINUTES') { - utils.unpack_and_init(lib_name, mx_lib_make) - utils.docker_run('ubuntu_cpu', 'integrationtest_ubuntu_cpu_scala', false) - utils.publish_test_coverage() - } - } - } - }] -} - -def test_unix_scala_mkldnn_cpu(lib_name){ - return ['Scala: MKLDNN-CPU Makefile': { - node(NODE_LINUX_CPU) { - ws('workspace/ut-scala-mkldnn-cpu') { - timeout(time: max_time, unit: 'MINUTES') { - utils.unpack_and_init(lib_name, mx_mkldnn_lib_make) - utils.docker_run('ubuntu_cpu', 'integrationtest_ubuntu_cpu_scala', false) - utils.publish_test_coverage() - } - } - } - }] -} - -def test_unix_scala_gpu(lib_name) { - return ['Scala: GPU Makefile': { - node(NODE_LINUX_GPU_G4) { - ws('workspace/ut-scala-gpu') { - timeout(time: max_time, unit: 'MINUTES') { - utils.unpack_and_init(lib_name, mx_lib_make) - utils.docker_run('ubuntu_gpu_cu101', 'integrationtest_ubuntu_gpu_scala', true) - utils.publish_test_coverage() - } - } - } - }] -} - -def test_unix_clojure_cpu(lib_name) { - return ['Clojure: CPU Makefile': { - node(NODE_LINUX_CPU) { - ws('workspace/ut-clojure-cpu') { - timeout(time: max_time, unit: 'MINUTES') { - utils.unpack_and_init(lib_name, mx_lib_make) - utils.docker_run('ubuntu_cpu', 'unittest_ubuntu_cpu_clojure', false) - utils.publish_test_coverage() - } - } - } - }] -} - -def test_unix_clojure_integration_cpu(lib_name) { - return ['Clojure: CPU Integration Makefile': { - node(NODE_LINUX_CPU) { - ws('workspace/ut-clojure-integration-cpu') { - timeout(time: max_time, unit: 'MINUTES') { - utils.unpack_and_init(lib_name, mx_lib_make) - utils.docker_run('ubuntu_cpu', 'unittest_ubuntu_cpu_clojure_integration', false) - } - } - } - }] -} - -def test_unix_r_cpu(lib_name) { - return ['R: CPU': { - node(NODE_LINUX_CPU) { - ws('workspace/ut-r-cpu') { - timeout(time: max_time, unit: 'MINUTES') { - utils.unpack_and_init(lib_name, mx_lib, true) - utils.docker_run('ubuntu_cpu', 'unittest_ubuntu_cpu_R', false) - utils.publish_test_coverage() - } - } - } - }] -} - -def test_unix_r_mkldnn_cpu(lib_name) { - return ['R: MKLDNN-CPU': { - node(NODE_LINUX_CPU) { - ws('workspace/ut-r-mkldnn-cpu') { - timeout(time: max_time, unit: 'MINUTES') { - utils.unpack_and_init(lib_name, mx_mkldnn_lib, true) - utils.docker_run('ubuntu_cpu', 'unittest_ubuntu_minimal_R', false) - utils.publish_test_coverage() - } - } - } - }] -} - -def test_unix_cpp_gpu(lib_name) { - return ['Cpp: GPU': { - node(NODE_LINUX_GPU_G4) { - ws('workspace/ut-cpp-gpu') { - timeout(time: max_time, unit: 'MINUTES') { - utils.unpack_and_init(lib_name, mx_cmake_lib) - utils.docker_run('ubuntu_gpu_cu101', 'unittest_cpp', true) - utils.publish_test_coverage() - } - } - } - }] -} - -def test_unix_cpp_cpu(lib_name) { - return ['Cpp: CPU': { - node(NODE_LINUX_CPU) { - ws('workspace/ut-cpp-cpu') { - timeout(time: max_time, unit: 'MINUTES') { - utils.unpack_and_init(lib_name, mx_cmake_lib_debug, true) - utils.docker_run('ubuntu_cpu', 'unittest_cpp', false) - utils.publish_test_coverage() - } - } - } - }] -} - -def test_unix_r_gpu(lib_name) { - return ['R: GPU': { - node(NODE_LINUX_GPU_G4) { - ws('workspace/ut-r-gpu') { - timeout(time: max_time, unit: 'MINUTES') { - utils.unpack_and_init(lib_name, mx_lib) - utils.docker_run('ubuntu_gpu_cu101', 'unittest_ubuntu_gpu_R', true) - utils.publish_test_coverage() - } - } - } - }] -} - -def test_unix_julia07_cpu(lib_name) { - return ['Julia 0.7: CPU': { - node(NODE_LINUX_CPU) { - ws('workspace/ut-it-julia07-cpu') { - timeout(time: max_time, unit: 'MINUTES') { - utils.unpack_and_init(lib_name, mx_lib, true) - utils.docker_run('ubuntu_cpu', 'unittest_ubuntu_cpu_julia07', false) - } - } - } - }] -} - -def test_unix_julia10_cpu(lib_name) { - return ['Julia 1.0: CPU': { - node(NODE_LINUX_CPU) { - ws('workspace/ut-it-julia10-cpu') { - timeout(time: max_time, unit: 'MINUTES') { - utils.unpack_and_init(lib_name, mx_lib, true) - utils.docker_run('ubuntu_cpu', 'unittest_ubuntu_cpu_julia10', false) - } - } - } - }] -} - def test_unix_onnx_cpu(lib_name) { return ['Onnx: CPU Makefile': { node(NODE_LINUX_CPU) { @@ -1261,20 +1043,6 @@ def test_centos7_pypi_package_cd_gpu(lib_name) { }] } -def test_centos7_scala_cpu(lib_name) { - return ['Scala: CentOS CPU Makefile': { - node(NODE_LINUX_CPU) { - ws('workspace/ut-scala-centos7-cpu') { - timeout(time: max_time, unit: 'MINUTES') { - utils.unpack_and_init(lib_name, mx_lib_make) - utils.docker_run('centos7_cpu', 'unittest_centos7_cpu_scala', false) - utils.publish_test_coverage() - } - } - } - }] -} - def test_windows_python3_gpu(lib_name) { return ['Python 3: GPU Win':{ node(NODE_WINDOWS_GPU) { @@ -1331,34 +1099,6 @@ def test_windows_python3_cpu(lib_name) { }] } -def test_windows_julia07_cpu(lib_name) { - return ['Julia 0.7: CPU Win': { - node(NODE_WINDOWS_CPU) { - ws('workspace/ut-julia07-cpu') { - timeout(time: max_time, unit: 'MINUTES') { - utils.init_git_win() - unstash lib_name - powershell 'ci/windows/test_jl07_cpu.ps1' - } - } - } - }] -} - -def test_windows_julia10_cpu(lib_name) { - return ['Julia 1.0: CPU Win': { - node(NODE_WINDOWS_CPU) { - ws('workspace/ut-julia10-cpu') { - timeout(time: max_time, unit: 'MINUTES') { - utils.init_git_win() - unstash lib_name - powershell 'ci/windows/test_jl10_cpu.ps1' - } - } - } - }] -} - def test_qemu_armv7_cpu(lib_name) { return ['ARMv7 QEMU': { node(NODE_LINUX_CPU) { @@ -1433,117 +1173,6 @@ def docs_python(lib_name) { } -// Call this function from Jenkins to generate just the C and C++ API microsite artifacts. -def docs_c(lib_name) { - return ['C Docs': { - node(NODE_LINUX_CPU) { - ws('workspace/docs') { - timeout(time: max_time, unit: 'MINUTES') { - utils.unpack_and_init(lib_name, 'lib/libmxnet.so', false) - utils.docker_run('ubuntu_cpu_c', 'build_c_docs', false) - if (should_pack_website()) { - utils.pack_lib('c-artifacts', 'docs/_build/c-artifacts.tgz', false) - } - } - } - } - }] -} - - -// Call this function from Jenkins to generate just the Julia API microsite artifacts. -def docs_julia(lib_name) { - return ['Julia Docs': { - node(NODE_LINUX_CPU) { - ws('workspace/docs') { - timeout(time: max_time, unit: 'MINUTES') { - utils.unpack_and_init(lib_name, 'lib/libmxnet.so', false) - utils.docker_run('ubuntu_cpu_julia', 'build_julia_docs', false) - if (should_pack_website()) { - utils.pack_lib('julia-artifacts', 'docs/_build/julia-artifacts.tgz', false) - } - } - } - } - }] -} - - -// Call this function from Jenkins to generate just the R API PDF artifact. -def docs_r(lib_name) { - return ['R Docs': { - node(NODE_LINUX_CPU) { - ws('workspace/docs') { - timeout(time: max_time, unit: 'MINUTES') { - utils.unpack_and_init(lib_name, 'lib/libmxnet.so', false) - utils.docker_run('ubuntu_cpu_r', 'build_r_docs', false) - if (should_pack_website()) { - utils.pack_lib('r-artifacts', 'docs/_build/r-artifacts.tgz', false) - } - } - } - } - }] -} - - -// Call this function from Jenkins to generate just the Scala API microsite artifacts. -// It will also generate the Scala package. -def docs_scala(lib_name) { - return ['Scala Docs': { - node(NODE_LINUX_CPU) { - ws('workspace/docs') { - timeout(time: max_time, unit: 'MINUTES') { - utils.unpack_and_init(lib_name, 'lib/libmxnet.so', false) - utils.docker_run('ubuntu_cpu_scala', 'build_scala_docs', false) - if (should_pack_website()) { - utils.pack_lib('scala-artifacts', 'docs/_build/scala-artifacts.tgz', false) - } - } - } - } - }] -} - - -// Call this function from Jenkins to generate just the Java API microsite artifacts. -// It will also generate the Scala package. -def docs_java(lib_name) { - return ['Java Docs': { - node(NODE_LINUX_CPU) { - ws('workspace/docs') { - timeout(time: max_time, unit: 'MINUTES') { - utils.unpack_and_init(lib_name, 'lib/libmxnet.so', false) - utils.docker_run('ubuntu_cpu_scala', 'build_java_docs', false) - if (should_pack_website()) { - utils.pack_lib('java-artifacts', 'docs/_build/java-artifacts.tgz', false) - } - } - } - } - }] -} - - -// Call this function from Jenkins to generate just the Clojure API microsite artifacts. -// It will also generate the Scala package. -def docs_clojure(lib_name) { - return ['Clojure Docs': { - node(NODE_LINUX_CPU) { - ws('workspace/docs') { - timeout(time: max_time, unit: 'MINUTES') { - utils.unpack_and_init(lib_name, 'lib/libmxnet.so', false) - utils.docker_run('ubuntu_cpu_scala', 'build_clojure_docs', false) - if (should_pack_website()) { - utils.pack_lib('clojure-artifacts', 'docs/_build/clojure-artifacts.tgz', false) - } - } - } - } - }] -} - - // Call this function from Jenkins to generate just the main website artifacts. def docs_jekyll() { return ['Main Jekyll Website': { @@ -1573,13 +1202,7 @@ def docs_prepare() { utils.init_git() unstash 'jekyll-artifacts' - unstash 'c-artifacts' unstash 'python-artifacts' - unstash 'r-artifacts' - unstash 'julia-artifacts' - unstash 'scala-artifacts' - unstash 'java-artifacts' - unstash 'clojure-artifacts' utils.docker_run('ubuntu_cpu_jekyll', 'build_docs', false) diff --git a/ci/jenkins/Jenkinsfile_centos_cpu b/ci/jenkins/Jenkinsfile_centos_cpu index 406c607882ae..f652478a6f32 100644 --- a/ci/jenkins/Jenkinsfile_centos_cpu +++ b/ci/jenkins/Jenkinsfile_centos_cpu @@ -37,14 +37,12 @@ core_logic: { custom_steps.compile_centos7_cpu('centos7_cpu'), custom_steps.compile_centos7_cpu_make('centos7_cpu_make'), custom_steps.compile_centos7_cpu_mkldnn(), - custom_steps.compile_static_scala_cpu(), custom_steps.compile_static_python_cpu(), custom_steps.compile_static_cd_cpu('centos7_cpu_cd') ]) utils.parallel_stage('Tests', [ custom_steps.test_centos7_python3_cpu('centos7_cpu'), - custom_steps.test_centos7_scala_cpu('centos7_cpu_make'), custom_steps.test_centos7_python3_cd_cpu('centos7_cpu_cd'), custom_steps.test_centos7_pypi_package_cd_cpu('centos7_cpu_cd') ]) diff --git a/ci/jenkins/Jenkinsfile_miscellaneous b/ci/jenkins/Jenkinsfile_miscellaneous index eb6b632cdbd3..cb25801d15df 100644 --- a/ci/jenkins/Jenkinsfile_miscellaneous +++ b/ci/jenkins/Jenkinsfile_miscellaneous @@ -38,9 +38,7 @@ core_logic: { custom_steps.compile_unix_asan_cpu('cpu_asan'), custom_steps.compile_unix_gcc8_werror('cpu_gcc8'), custom_steps.compile_unix_clang10_werror('cpu_clang10'), - custom_steps.compile_unix_clang10_cuda_werror('gpu_clang10'), - custom_steps.compile_unix_amalgamation_min(), - custom_steps.compile_unix_amalgamation() + custom_steps.compile_unix_clang10_cuda_werror('gpu_clang10') ]) utils.parallel_stage('Tests', [ diff --git a/ci/jenkins/Jenkinsfile_unix_cpu b/ci/jenkins/Jenkinsfile_unix_cpu index 86498f0c3d1f..0909fc5139fe 100644 --- a/ci/jenkins/Jenkinsfile_unix_cpu +++ b/ci/jenkins/Jenkinsfile_unix_cpu @@ -50,16 +50,7 @@ core_logic: { custom_steps.test_unix_python3_mkl_cpu('cpu_mkl'), custom_steps.test_unix_python3_mkldnn_cpu('mkldnn_cpu'), custom_steps.test_unix_python3_mkldnn_mkl_cpu('mkldnn_mkl_cpu'), - custom_steps.test_unix_scala_cpu('cpu_make'), - custom_steps.test_unix_scala_mkldnn_cpu('mkldnn_cpu_make'), - custom_steps.test_unix_clojure_cpu('cpu_make'), - custom_steps.test_unix_clojure_integration_cpu('cpu_make'), - custom_steps.test_unix_r_cpu('cpu'), - custom_steps.test_unix_r_mkldnn_cpu('mkldnn_cpu'), - custom_steps.test_unix_julia07_cpu('cpu'), - custom_steps.test_unix_julia10_cpu('cpu'), custom_steps.test_unix_onnx_cpu('cpu_make'), - custom_steps.test_unix_cpp_cpu('cpu_debug'), /* Disabled due to master build failure: * http://jenkins.mxnet-ci.amazon-ml.com/blue/organizations/jenkins/incubator-mxnet/detail/master/1221/pipeline/ * https://github.com/apache/incubator-mxnet/issues/11801 diff --git a/ci/jenkins/Jenkinsfile_unix_gpu b/ci/jenkins/Jenkinsfile_unix_gpu index 8ea598e37882..1fe96bf690df 100644 --- a/ci/jenkins/Jenkinsfile_unix_gpu +++ b/ci/jenkins/Jenkinsfile_unix_gpu @@ -50,10 +50,6 @@ core_logic: { custom_steps.test_unix_python3_gpu('gpu'), custom_steps.test_unix_python3_mkldnn_gpu('mkldnn_gpu'), custom_steps.test_unix_python3_mkldnn_nocudnn_gpu('mkldnn_gpu_nocudnn'), - custom_steps.test_unix_r_gpu('gpu'), - custom_steps.test_unix_cpp_gpu('cmake_gpu'), - custom_steps.test_unix_cpp_package_gpu('gpu_make'), - custom_steps.test_unix_scala_gpu('gpu_make'), // TODO(szha): fix and reenable the hanging issue. tracked in #18098 // custom_steps.test_unix_distributed_kvstore_gpu('gpu'), custom_steps.test_unix_byteps_gpu('gpu'), diff --git a/ci/jenkins/Jenkinsfile_website_c_docs b/ci/jenkins/Jenkinsfile_website_c_docs index 5c6988d01f7a..caddbae8c620 100644 --- a/ci/jenkins/Jenkinsfile_website_c_docs +++ b/ci/jenkins/Jenkinsfile_website_c_docs @@ -37,10 +37,6 @@ core_logic: { custom_steps.compile_unix_lite('libmxnet') ]) - utils.parallel_stage('C Docs', [ - custom_steps.docs_c('libmxnet') - ]) - } , failure_handler: { diff --git a/ci/jenkins/Jenkinsfile_website_clojure_docs b/ci/jenkins/Jenkinsfile_website_clojure_docs index d19650528c1c..caddbae8c620 100644 --- a/ci/jenkins/Jenkinsfile_website_clojure_docs +++ b/ci/jenkins/Jenkinsfile_website_clojure_docs @@ -37,11 +37,6 @@ core_logic: { custom_steps.compile_unix_lite('libmxnet') ]) - utils.parallel_stage('Clojure Docs', [ - custom_steps.docs_clojure('libmxnet') - - ]) - } , failure_handler: { diff --git a/ci/jenkins/Jenkinsfile_website_full b/ci/jenkins/Jenkinsfile_website_full index 9eadf7b98e09..ca05c822b55e 100644 --- a/ci/jenkins/Jenkinsfile_website_full +++ b/ci/jenkins/Jenkinsfile_website_full @@ -40,13 +40,7 @@ core_logic: { utils.parallel_stage('Build Docs', [ custom_steps.docs_jekyll(), - custom_steps.docs_c('libmxnet'), custom_steps.docs_python('libmxnet'), - custom_steps.docs_julia('libmxnet'), - custom_steps.docs_r('libmxnet'), - custom_steps.docs_scala('libmxnet'), - custom_steps.docs_java('libmxnet'), - custom_steps.docs_clojure('libmxnet') ]) utils.parallel_stage('Prepare', [ diff --git a/ci/jenkins/Jenkinsfile_website_full_pr b/ci/jenkins/Jenkinsfile_website_full_pr index a5ec4124ebed..7b78f9d190cd 100644 --- a/ci/jenkins/Jenkinsfile_website_full_pr +++ b/ci/jenkins/Jenkinsfile_website_full_pr @@ -40,13 +40,7 @@ core_logic: { utils.parallel_stage('Build Docs', [ // Optimization would be to flag these not to stash if not previewing them custom_steps.docs_jekyll(), - custom_steps.docs_c('libmxnet'), custom_steps.docs_python('libmxnet'), - custom_steps.docs_julia('libmxnet'), - custom_steps.docs_r('libmxnet'), - custom_steps.docs_scala('libmxnet'), - custom_steps.docs_java('libmxnet'), - custom_steps.docs_clojure('libmxnet') ]) // TODO: add a website preview function diff --git a/ci/jenkins/Jenkinsfile_website_java_docs b/ci/jenkins/Jenkinsfile_website_java_docs index 89c5d8cd536a..03d160009740 100644 --- a/ci/jenkins/Jenkinsfile_website_java_docs +++ b/ci/jenkins/Jenkinsfile_website_java_docs @@ -36,12 +36,6 @@ core_logic: { utils.parallel_stage('Build', [ custom_steps.compile_unix_lite('libmxnet') ]) - - utils.parallel_stage('Java Docs', [ - custom_steps.docs_java('libmxnet') - - ]) - } , failure_handler: { diff --git a/ci/jenkins/Jenkinsfile_website_julia_docs b/ci/jenkins/Jenkinsfile_website_julia_docs index 0301490a0b96..8a2528f89ce7 100644 --- a/ci/jenkins/Jenkinsfile_website_julia_docs +++ b/ci/jenkins/Jenkinsfile_website_julia_docs @@ -37,11 +37,6 @@ core_logic: { custom_steps.compile_unix_lite('libmxnet') ]) - utils.parallel_stage('Julia Docs', [ - custom_steps.docs_julia('libmxnet') - - ]) - } , failure_handler: { diff --git a/ci/jenkins/Jenkinsfile_website_nightly b/ci/jenkins/Jenkinsfile_website_nightly index e92a4472200d..39f7c48a924b 100644 --- a/ci/jenkins/Jenkinsfile_website_nightly +++ b/ci/jenkins/Jenkinsfile_website_nightly @@ -40,13 +40,7 @@ core_logic: { utils.parallel_stage('Build Docs', [ custom_steps.docs_jekyll(), - custom_steps.docs_c('libmxnet'), custom_steps.docs_python('libmxnet'), - custom_steps.docs_julia('libmxnet'), - custom_steps.docs_r('libmxnet'), - custom_steps.docs_scala('libmxnet'), - custom_steps.docs_java('libmxnet'), - custom_steps.docs_clojure('libmxnet') ]) utils.parallel_stage('Prepare', [ diff --git a/ci/jenkins/Jenkinsfile_website_r_docs b/ci/jenkins/Jenkinsfile_website_r_docs deleted file mode 100644 index 8967a892f907..000000000000 --- a/ci/jenkins/Jenkinsfile_website_r_docs +++ /dev/null @@ -1,53 +0,0 @@ -// -*- mode: groovy -*- - -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -// -// Jenkins pipeline -// See documents at https://jenkins.io/doc/book/pipeline/jenkinsfile/ - -// timeout in minutes -max_time = 60 - -node('utility') { - // Loading the utilities requires a node context unfortunately - checkout scm - utils = load('ci/Jenkinsfile_utils.groovy') - custom_steps = load('ci/jenkins/Jenkins_steps.groovy') -} -utils.assign_node_labels(utility: 'utility', linux_cpu: 'mxnetlinux-cpu') - -utils.main_wrapper( -core_logic: { - utils.parallel_stage('Build', [ - custom_steps.compile_unix_lite('libmxnet') - ]) - - utils.parallel_stage('R Docs', [ - custom_steps.docs_r('libmxnet') - - ]) - -} -, -failure_handler: { - // Only send email if master or release branches failed - if (currentBuild.result == "FAILURE" && (env.BRANCH_NAME == "master" || env.BRANCH_NAME.startsWith("v"))) { - emailext body: 'Build for MXNet branch ${BRANCH_NAME} has broken. Please view the build at ${BUILD_URL}', replyTo: '${EMAIL}', subject: '[BUILD FAILED] Branch ${BRANCH_NAME} build ${BUILD_NUMBER}', to: '${EMAIL}' - } -} -) diff --git a/ci/jenkins/Jenkinsfile_website_scala_docs b/ci/jenkins/Jenkinsfile_website_scala_docs deleted file mode 100644 index cf3303dfb340..000000000000 --- a/ci/jenkins/Jenkinsfile_website_scala_docs +++ /dev/null @@ -1,53 +0,0 @@ -// -*- mode: groovy -*- - -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -// -// Jenkins pipeline -// See documents at https://jenkins.io/doc/book/pipeline/jenkinsfile/ - -// timeout in minutes -max_time = 20 - -node('utility') { - // Loading the utilities requires a node context unfortunately - checkout scm - utils = load('ci/Jenkinsfile_utils.groovy') - custom_steps = load('ci/jenkins/Jenkins_steps.groovy') -} -utils.assign_node_labels(utility: 'utility', linux_cpu: 'mxnetlinux-cpu') - -utils.main_wrapper( -core_logic: { - utils.parallel_stage('Build', [ - custom_steps.compile_unix_lite('libmxnet') - ]) - - utils.parallel_stage('Scala Docs', [ - custom_steps.docs_scala('libmxnet') - - ]) - -} -, -failure_handler: { - // Only send email if master or release branches failed - if (currentBuild.result == "FAILURE" && (env.BRANCH_NAME == "master" || env.BRANCH_NAME.startsWith("v"))) { - emailext body: 'Build for MXNet branch ${BRANCH_NAME} has broken. Please view the build at ${BUILD_URL}', replyTo: '${EMAIL}', subject: '[BUILD FAILED] Branch ${BRANCH_NAME} build ${BUILD_NUMBER}', to: '${EMAIL}' - } -} -) diff --git a/ci/jenkins/Jenkinsfile_windows_cpu b/ci/jenkins/Jenkinsfile_windows_cpu index 26e2ba25575e..2592f65c6b90 100644 --- a/ci/jenkins/Jenkinsfile_windows_cpu +++ b/ci/jenkins/Jenkinsfile_windows_cpu @@ -42,8 +42,6 @@ core_logic: { utils.parallel_stage('Tests', [ custom_steps.test_windows_python3_cpu('windows_package_cpu'), - custom_steps.test_windows_julia07_cpu('windows_package_cpu'), - custom_steps.test_windows_julia10_cpu('windows_package_cpu') ]) } , diff --git a/ci/publish/Jenkinsfile b/ci/publish/Jenkinsfile index b522bb008c60..ee3655bcb136 100644 --- a/ci/publish/Jenkinsfile +++ b/ci/publish/Jenkinsfile @@ -20,9 +20,6 @@ // Jenkins pipeline // See documents at https://jenkins.io/doc/book/pipeline/jenkinsfile/ -//mxnet libraries -mx_scala_pub = 'lib/**, 3rdparty/dmlc-core/libdmlc.a, 3rdparty/tvm/nnvm/lib/libnnvm.a, 3rdparty/ps-lite/build/libps.a, deps/lib/libprotobuf-lite.a, deps/lib/libzmq.a, config.mk, scala-package/pom.xml, scala-package/**/pom.xml, scala-package/**/target/**, scala-package/*/target/repo/**' - // timeout in minutes max_time = 120 @@ -50,45 +47,6 @@ def wrapStep(nodeToRun, workspaceName, step) { } } -def toBuild = [:] -def labels = ['cpu', 'gpu'] -for (x in labels) { - def label = x // Required due to language - toBuild["Scala Build ${label}"] = wrapStep(nodeMap['cpu'], "build-scala-${label}") { - withEnv(["MAVEN_PUBLISH_OS_TYPE=${scalaOSMap[label]}", "mxnet_variant=${scalaVariantMap[label]}"]) { - utils.init_git() - utils.docker_run("centos7_cpu", 'publish_scala_build', false, '500m', 'MAVEN_PUBLISH_OS_TYPE mxnet_variant') - utils.pack_lib("scala_${label}", mx_scala_pub, false) - } - } -} - -def toTest = [:] -def systems = ['ubuntu1604', 'ubuntu1804', 'centos7'] -for (x in labels) { - def label = x // Required due to language - for (y in systems) { - def system = y // Required due to language - toTest["Scala Test ${system} ${label}"] = wrapStep(nodeMap[label], "test-scala-${system}-${label}") { - withEnv(["mxnet_variant=${scalaVariantMap[label]}"]) { - utils.unpack_and_init("scala_${label}", mx_scala_pub, false) - utils.docker_run("publish.test.${system}_${label}", 'publish_scala_test', label == 'gpu', '500m', 'mxnet_variant') - } - } - } -} - -def toDeploy = [:] -for (x in labels) { - def label = x // Required due to language - toDeploy["Scala Deploy ${label}"] = wrapStep(nodeMap[label], "deploy-scala-${label}") { - withEnv(["MAVEN_PUBLISH_OS_TYPE=${scalaOSMap[label]}", "mxnet_variant=${scalaVariantMap[label]}"]) { - utils.unpack_and_init("scala_${label}", mx_scala_pub, false) - utils.docker_run("publish.ubuntu1604_${label}", 'publish_scala_deploy', label == 'gpu' ? true : false, '500m', 'MAVEN_PUBLISH_OS_TYPE MAVEN_PUBLISH_SECRET_ENDPOINT_URL MAVEN_PUBLISH_SECRET_NAME_CREDENTIALS MAVEN_PUBLISH_SECRET_NAME_GPG DOCKERHUB_SECRET_ENDPOINT_REGION mxnet_variant') - } - } -} - utils.main_wrapper( core_logic: { stage('Build Packages') { diff --git a/ci/publish/website/deploy.sh b/ci/publish/website/deploy.sh index 3309d852f77f..8b89415e001d 100644 --- a/ci/publish/website/deploy.sh +++ b/ci/publish/website/deploy.sh @@ -33,7 +33,7 @@ set -ex # Configuration for artifacts version=$2 -api_list=("cpp" "clojure" "java" "julia" "python" "r" "scala") +api_list=("python") jekyll_fork=ThomasDelteil diff --git a/contrib/clojure-package/.gitignore b/contrib/clojure-package/.gitignore deleted file mode 100644 index c304e7926588..000000000000 --- a/contrib/clojure-package/.gitignore +++ /dev/null @@ -1,51 +0,0 @@ -/target -/classes -/checkouts -pom.xml -pom.xml.asc -*.jar -*.class -/.lein-* -/.nrepl-port -.hgignore -.hg/ -data/* -model/* -*~ -*.params -*.states -*.json -examples/module/data/* -examples/module/target/* -examples/rnn/data/char_lstm.zip -examples/rnn/data/obama.txt -examples/pre-trained-models/caltech-256/caltech-256-60-train.rec -examples/pre-trained-models/caltech-256/caltech-256-60-val.rec -examples/pre-trained-models/model/synset.txt -examples/pre-trained-models/test-image.jpg -examples/imclassification/data/* -examples/gan/data/* -examples/gan/results/* -examples/cnn-text-classification/data/glove/* -examples/cnn-text-classification/data/mr-data/* -examples/multi-label/data/mnist.zip -examples/multi-label/data/t10k-images-idx3-ubyte -examples/multi-label/data/t10k-labels-idx1-ubyte -examples/multi-label/data/train-images-idx3-ubyte -examples/multi-label/data/train-labels-idx1-ubyte -examples/visualization/test-vis/* -examples/visualization/test-vis.pdf -.DS_Store -src/.DS_Store -src/org/.DS_Store -test/test-ndarray.clj -test/test-ndarray-random.clj -test/test-ndarray-random-api.clj -test/test-ndarray-api.clj -test/test-symbol.clj -test/test-symbol-random.clj -test/test-symbol-random-api.clj -test/test-symbol-api.clj -test/test-images/* -src/org/apache/clojure_mxnet/gen/* - diff --git a/contrib/clojure-package/LICENSE b/contrib/clojure-package/LICENSE deleted file mode 100644 index 26c4e047be5f..000000000000 --- a/contrib/clojure-package/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2018 by Contributors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - diff --git a/contrib/clojure-package/README.md b/contrib/clojure-package/README.md deleted file mode 100644 index 313dd6d60332..000000000000 --- a/contrib/clojure-package/README.md +++ /dev/null @@ -1,191 +0,0 @@ - - - - - - - - - - - - - - - - - -# Clojure MXNet - -A Clojure Package Built on the MXNet Deep Learning Library - -## Introduction - -MXNet is a flexible and efficient deep learning library. While its core is built in C++ for maximum performance, support for multiple programming languages in intermediate and high-level APIs is a first-class feature. MXNet is currently an incubating Apache project. - -The motivation for creating a Clojure package was to give Clojurians access to a world-class deep learning platform, thereby building bridges for future development and innovation in the community. The Clojure package provides all the essential tools, including low-level and high-level APIs, dynamic graphs, etc., and enables building advanced architectures like GANs or LSTM to tackle challenging applications such as image recognition or natural language processing. - -To maximize leverage, the Clojure package has been built on the existing Scala package using [Java Interop](https://clojure.org/reference/java_interop). This approach has allowed rapid initial development and close parity with the Scala package functionality. It also leaves the door open to incrementally developing Clojure code that directly interfaces MXNet core using [JNI](https://en.wikipedia.org/wiki/Java_Native_Interface). - -For a **video introduction**, see [Clojure MXNet with Carin Meier - Clojure Virtual Meetup](https://www.crowdcast.io/e/clojure-mxnet-with-carin) (setup instructions from 20:49). - -## Current State and Plans - -The Clojure MXNet package is currently treated as *user-contributed code* within MXNet, as can be seen from its placement under `contrib` in the source tree. This means that it should first undergo a stabilization period and receive feedback from users before it can graduate to a fully integrated and supported part of MXNet. - -That said, because it closely tracks the Scala package, Clojure MXNet can be expected to have a similar level of maturity and stability regarding the low-level functionality. It is mostly in the hand-written Java interop part of the Clojure wrapper where bugs are more likely to be encountered. Such bugs tend to be fixed rather quickly once they are known and their origin is clear (see also [Getting Involved](#getting-involved)). - -For an overview of the development status and open problems, please refer to [Clojure Package Contribution Needs](https://cwiki.apache.org/confluence/display/MXNET/Clojure+Package+Contribution+Needs). - -## Getting Involved - -By far the best way to get involved with this project is to install the Clojure MXNet package, run the examples, play around, build new things with it, and get back to the development team with feedback! Your input can not only help to identify current issues, but also guide the future development of the Clojure package by pointing out must-have features that are currently missing, or by identifying usability or performace problems of high impact. - -There are two main ways of reaching out to other users and the package maintainers: - -- If you have a question or general feedback, or you encountered a problem but are not sure if it's a bug or a misunderstanding, then the *Apache Slack* (channels `#mxnet` and `#mxnet-scala`) is the best place to turn check out. To join, [ask for an invitation](https://mxnet.apache.org/community/contribute.html#slack) at `dev@mxnet.apache.org`. -- If you found a bug, miss an important feature or want to give feedback directly relevant for development, please head over to the MXNet [GitHub issue page](https://github.com/apache/incubator-mxnet/issues) and create a new issue. If the issue is specific to the Clojure package, consider using a title starting with `[Clojure]` to make it easily discoverable among the many other, mostly generic issues. - -Of course, contributions to code or documentation are also more than welcome! Please check out the [Clojure Package Contribution Needs](https://cwiki.apache.org/confluence/display/MXNET/Clojure+Package+Contribution+Needs) to get an idea about where and how to contribute code. - -For a more comprehensive overview of different ways to contribute, see [Contributing to MXNet](https://mxnet.apache.org/community/contribute.html). - -## Getting Started - -The Clojure MXNet framework consists of a core C library, a Scala API that talks to the core through [JNI (Java Native Interface)](https://en.wikipedia.org/wiki/Java_Native_Interface) bindings, and finally a Clojure wrapper around the Scala API. - -Since the core contains native (compiled) code and is bundled with the language bindings, your hardware and OS matter to the choices to be made during installation. The following combinations of operating system and compute device are supported: - -- Linux CPU -- Linux GPU -- OSX CPU - -Currently there are no pre-built jars available. jars that were previously available have been removed due to their inclusion of [ASF Category-X dependencies](https://www.apache.org/legal/resolved.html#category-x). Thus you should build MXNet and the Clojure package from source. - -**Note:** This guide assumes that you are familiar with the basics of creating Clojure projects and managing dependencies. See [here](https://github.com/technomancy/leiningen/blob/stable/doc/TUTORIAL.md) for the official Leiningen tutorial. - -### Building MXNet and the Clojure package from source - -The first step is to download the latest version from the [Download page](https://mxnet.apache.org/get_started/download). - -#### Building the core library - -Detailed instructions for building MXNet core from source can be found [in the MXNet installation documentation](https://mxnet.apache.org/install/index.html). The relevant sections are: - -- For Ubuntu Linux: [CUDA Dependencies](https://mxnet.apache.org/get_started/ubuntu_setup#cuda-dependencies) and [Building MXNet from Source](https://mxnet.apache.org/get_started/ubuntu_setup#build-mxnet-from-source) -- For Mac OSX: [Build the Shared Library](https://mxnet.apache.org/get_started/osx_setup.html#build-the-shared-library) - -In particular, ignore all of the language-interface-specific sections. - -The outcome of this step will be a shared library `lib/libmxnet.so` that is used in the next step. - -#### Building the Scala jar - -- Ensure you have JDK 8 on your system. Later versions may produce cryptic build errors mentioning `scala.reflect.internal.MissingRequirementError`. -- Build and install the Scala package in your local Maven directory using the following commands: - - ```bash - cd scala-package - mvn install - ``` - -#### Building the Clojure jar - -- Enter the `contrib/clojure` directory. -- Run `lein test`. All the tests should run without an error. -- Run `lein install` to build and install the Clojure jar locally. - -To run examples, you can now use `lein run` in any of the example directories, e.g., `examples/imclassification`. You can also specify the compute device, e.g., `lein run :cpu 2` (for 2 CPUs) or `lein run :gpu` (for 1 GPU). - -## Docker Files - -There are Dockerfiles available as well. - -- [Community Provided by Magnet](https://hub.docker.com/u/magnetcoop/) -- [MXNet CI](https://github.com/apache/incubator-mxnet/blob/master/ci/docker/Dockerfile.build.ubuntu_cpu) and the install scripts - - [Ubuntu core](https://github.com/apache/incubator-mxnet/blob/master/ci/docker/install/ubuntu_core.sh) - - [Ubuntu Scala](https://github.com/apache/incubator-mxnet/blob/master/ci/docker/install/ubuntu_scala.sh) - - [Ubuntu Clojure](https://github.com/apache/incubator-mxnet/blob/master/ci/docker/install/ubuntu_clojure.sh) - -## Need Help? - -If you are having trouble getting started or have a question, feel free to reach out at: - -- Clojurian Slack #mxnet channel. To join, go to [http://clojurians.net/](http://clojurians.net/). -- Apache Slack #mxnet and #mxnet-scala channel. To join this slack send an email to dev@mxnet.apache.org. -- Create an Issue on [https://github.com/apache/incubator-mxnet/issues](https://github.com/apache/incubator-mxnet/issues). - - -## Examples -There are quite a few examples in the examples directory. To use. - -`lein install` in the main project -`cd` in the example project of interest - -There are README is every directory outlining instructions. - -A good place to get started is the module example. -Do `lein run` for the CPU version or `lein run :gpu` for GPU. - -## Generating documentation - -To generate API docs, run `lein codox`. The html docs will be generated in the target/docs directory. - -## Code Coverage - -To run the Code Coverage tool. Run `lein cloverage`. - -## Tools to keep style consistent - -To keep the style consistent for the project we include the script that make it easier. -There are two script in the base of the project and in each examples. - -To run it just see the following file. `lein-cljfmt-check` and `lein-cljfmt-fix`. -The first command will run and check and confirm if the code needed to be updated to reflect the community style guide. -The second command will apply the change and fix any inconsistent indentation in place. This is recommendd to be done -before the submit a new pull request so we can keep the style consistent throughout the project. - -## FAQ - -**Why build on the Scala package?** - -The motivation section addresses this, but the main reason is high leverage is using the great work that the Scala package has already done. - -**How can I tell if the GPU is being used?** - -CUDA is finding a best algorithm... As long as a Context.gpu() passed in the code as a context, GPU should be used. - -This command can be very handy too - -`nvidia-smi --query-gpu=timestamp,name,utilization.gpu,utilization.memory,memory.total,memory.free,memory.used --format=csv -l 5 -timestamp, name, utilization.gpu [%], utilization.memory [%], memory.total [MiB], memory.free [MiB], memory.used [MiB]` - -**Supported APIs** -There are 3 high level APIs supported in MXNet: (Model/FeedForward), Module, and Gluon. The Module API is supported in the Clojure package because of the existing support for it in the Scala package. The Module API is very similar to the Gluon API and examples of the usage can be found in the examples directory. The Model/FeedForward API is deprected. - -Gluon support will come later and may or may not be built on the Scala gluon API (when it lands there) - -## Architecture & Design - -See the Confluence page: https://cwiki.apache.org/confluence/display/MXNET/MXNet+Clojure - -## Building and Deploying Jars - -The release process for deploying the Clojure jars is on the [Apache MXNet developer wiki](https://cwiki.apache.org/confluence/display/MXNET/Clojure+Release+Process). - - -## Special Thanks -Special thanks to people that provided testing and feedback to make this possible - -- Chris Hodapp -- Iñaki Arenaza & Magnet Coop -- r0man -- Ben Kamphaus -- Sivaram Konanki -- Rustam Gilaztdinov -- Kamil Hryniewicz -- Christian Weilbach -- Burin Choomnuan -- Avram Aelony -- Jim Dunn -- Kovas Boguta diff --git a/contrib/clojure-package/ci-test.sh b/contrib/clojure-package/ci-test.sh deleted file mode 100755 index ba2d258e12d0..000000000000 --- a/contrib/clojure-package/ci-test.sh +++ /dev/null @@ -1,24 +0,0 @@ -#!/bin/bash -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -set -evx - -MXNET_HOME=${PWD} -cd ${MXNET_HOME}/contrib/clojure-package -lein test -lein cloverage --codecov diff --git a/contrib/clojure-package/examples/infer/objectdetector/.gitignore b/contrib/clojure-package/examples/infer/objectdetector/.gitignore deleted file mode 100644 index a1f0468035fe..000000000000 --- a/contrib/clojure-package/examples/infer/objectdetector/.gitignore +++ /dev/null @@ -1,13 +0,0 @@ -/target -/classes -/checkouts -/images -pom.xml -pom.xml.asc -*.jar -*.class -/.lein-* -/.nrepl-port -.hgignore -.hg/ -results \ No newline at end of file diff --git a/contrib/clojure-package/examples/infer/objectdetector/README.md b/contrib/clojure-package/examples/infer/objectdetector/README.md deleted file mode 100644 index 3a38e7d3907e..000000000000 --- a/contrib/clojure-package/examples/infer/objectdetector/README.md +++ /dev/null @@ -1,44 +0,0 @@ - - - - - - - - - - - - - - - - - -# objectdetector - -Run object detection on images using clojure infer package. - -## Installation - -Before you run this example, make sure that you have the clojure package installed. -In the main clojure package directory, do `lein install`. Then you can run -`lein install` in this directory. - -## Usage - -``` -$ chmod +x scripts/get_ssd_data.sh -$ ./scripts/get_ssd_data.sh -$ -$ lein run -- --help -$ lein run -- -m models/resnet50_ssd/resnet50_ssd_model -i images/dog.jpg -d images/ -$ -$ # or the available lein alias -$ lein run-detector -$ -$ lein uberjar -$ java -jar target/objectdetector-0.1.0-SNAPSHOT-standalone.jar --help -$ java -jar target/objectdetector-0.1.0-SNAPSHOT-standalone.jar \ - -m models/resnet50_ssd/resnet50_ssd_model -i images/dog.jpg -d images/ -``` diff --git a/contrib/clojure-package/examples/infer/objectdetector/project.clj b/contrib/clojure-package/examples/infer/objectdetector/project.clj deleted file mode 100644 index 6b24a4b4f9b1..000000000000 --- a/contrib/clojure-package/examples/infer/objectdetector/project.clj +++ /dev/null @@ -1,27 +0,0 @@ -;; -;; Licensed to the Apache Software Foundation (ASF) under one or more -;; contributor license agreements. See the NOTICE file distributed with -;; this work for additional information regarding copyright ownership. -;; The ASF licenses this file to You under the Apache License, Version 2.0 -;; (the "License"); you may not use this file except in compliance with -;; the License. You may obtain a copy of the License at -;; -;; http://www.apache.org/licenses/LICENSE-2.0 -;; -;; Unless required by applicable law or agreed to in writing, software -;; distributed under the License is distributed on an "AS IS" BASIS, -;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -;; See the License for the specific language governing permissions and -;; limitations under the License. -;; - -(defproject objectdetector "0.1.0-SNAPSHOT" - :description "Object detection using infer with MXNet" - :repositories [["vendredi" "https://repository.hellonico.info/repository/hellonico/"]] - :plugins [[lein-cljfmt "0.5.7"]] - :aliases {"run-detector" ["run" "--" "-m" "models/resnet50_ssd/resnet50_ssd_model" "-i" "images/dog.jpg" "-d" "images/"]} - :dependencies [[org.clojure/clojure "1.9.0"] - [org.clojure/tools.cli "0.4.1"] - [org.apache.mxnet.contrib.clojure/clojure-mxnet "2.0.0-SNAPSHOT"]] - :main ^:skip-aot infer.objectdetector-example - :profiles {:uberjar {:aot :all}}) diff --git a/contrib/clojure-package/examples/infer/objectdetector/scripts/get_ssd_data.sh b/contrib/clojure-package/examples/infer/objectdetector/scripts/get_ssd_data.sh deleted file mode 100755 index 06440a28452e..000000000000 --- a/contrib/clojure-package/examples/infer/objectdetector/scripts/get_ssd_data.sh +++ /dev/null @@ -1,49 +0,0 @@ -#!/bin/bash - -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - - -set -e - -MXNET_ROOT=$(cd "$(dirname $0)/.."; pwd) - -data_path=$MXNET_ROOT/models/resnet50_ssd - -image_path=$MXNET_ROOT/images - -if [ ! -d "$data_path" ]; then - mkdir -p "$data_path" -fi - -if [ ! -d "$image_path" ]; then - mkdir -p "$image_path" -fi - -if [ ! -f "$data_path/resnet50_ssd_model-0000.params" ]; then - wget https://s3.amazonaws.com/model-server/models/resnet50_ssd/resnet50_ssd_model-symbol.json -P $data_path - wget https://s3.amazonaws.com/model-server/models/resnet50_ssd/resnet50_ssd_model-0000.params -P $data_path - wget https://s3.amazonaws.com/model-server/models/resnet50_ssd/synset.txt -P $data_path -fi - -if [ ! -f "$image_path/000001.jpg" ]; then - cd $image_path - wget https://cloud.githubusercontent.com/assets/3307514/20012566/cbb53c76-a27d-11e6-9aaa-91939c9a1cd5.jpg -O 000001.jpg - wget https://cloud.githubusercontent.com/assets/3307514/20012567/cbb60336-a27d-11e6-93ff-cbc3f09f5c9e.jpg -O dog.jpg - wget https://cloud.githubusercontent.com/assets/3307514/20012563/cbb41382-a27d-11e6-92a9-18dab4fd1ad3.jpg -O person.jpg -fi - diff --git a/contrib/clojure-package/examples/infer/objectdetector/src/infer/objectdetector_example.clj b/contrib/clojure-package/examples/infer/objectdetector/src/infer/objectdetector_example.clj deleted file mode 100644 index 65d822ff36aa..000000000000 --- a/contrib/clojure-package/examples/infer/objectdetector/src/infer/objectdetector_example.clj +++ /dev/null @@ -1,158 +0,0 @@ -;; Licensed to the Apache Software Foundation (ASF) under one or more -;; contributor license agreements. See the NOTICE file distributed with -;; this work for additional information regarding copyright ownership. -;; The ASF licenses this file to You under the Apache License, Version 2.0 -;; (the "License"); you may not use this file except in compliance with -;; the License. You may obtain a copy of the License at -;; -;; http://www.apache.org/licenses/LICENSE-2.0 -;; -;; Unless required by applicable law or agreed to in writing, software -;; distributed under the License is distributed on an "AS IS" BASIS, -;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -;; See the License for the specific language governing permissions and -;; limitations under the License. -;; - -(ns infer.objectdetector-example - (:require [org.apache.clojure-mxnet.context :as context] - [org.apache.clojure-mxnet.dtype :as dtype] - [org.apache.clojure-mxnet.image :as image] - [org.apache.clojure-mxnet.infer :as infer] - [org.apache.clojure-mxnet.layout :as layout] - [clojure.java.io :as io] - [clojure.string :as string] - [clojure.tools.cli :refer [parse-opts]]) - (:gen-class) - (:import (javax.imageio ImageIO) - (java.io File))) - -(defn check-valid-dir - "Check that the input directory exists" - [input-dir] - (let [dir (io/file input-dir)] - (and - (.exists dir) - (.isDirectory dir)))) - -(defn check-valid-file - "Check that the file exists" - [input-file] - (.exists (io/file input-file))) - -(def cli-options - [["-m" "--model-path-prefix PREFIX" "Model path prefix" - :default "models/resnet50_ssd/resnet50_ssd_model" - :validate [#(check-valid-file (str % "-symbol.json")) - "Model path prefix is invalid"]] - ["-i" "--input-image IMAGE" "Input image" - :default "images/dog.jpg" - :validate [check-valid-file "Input file not found"]] - ["-o" "--output-dir IMAGE_DIR" "Output directory. Defaults to results" - :default "results/" - :validate [check-valid-dir "Output directory not found"]] - ["-d" "--input-dir IMAGE_DIR" "Input directory" - :default "images/" - :validate [check-valid-dir "Input directory not found"]] - ["-h" "--help"]]) - - -(defn process-result! [output-dir image-path predictions] - (println "looking at image" image-path) - (println "predictions: " predictions) - (let [buf (ImageIO/read (new File image-path)) - width (.getWidth buf) - height (.getHeight buf) - names (mapv :class predictions) - coords (mapv (fn [prediction] - (-> prediction - (update :x-min #(* width %)) - (update :x-max #(* width %)) - (update :y-min #(* height %)) - (update :y-max #(* height %)))) - predictions) - new-img (-> (ImageIO/read (new File image-path)) - (image/draw-bounding-box! coords - {:stroke 2 - :names (mapv #(str (:class %) "-" (:prob %)) - predictions) - :transparency 0.5 - - :font-size-mult 1.0}))] - (->> (string/split image-path #"\/") - last - (io/file output-dir) - (ImageIO/write new-img "jpg")))) - -(defn process-results [images results output-dir] - (doall (map (partial process-result! output-dir) images results))) - -(defn detect-single-image - "Detect objects in a single image and print top-5 predictions" - ([detector input-dir] (detect-single-image detector input-dir "results")) - ([detector input-image output-dir] - (.mkdir (io/file output-dir)) - (let [image (infer/load-image-from-file input-image) - topk 3 - res (infer/detect-objects detector image topk) - ] - (process-results - [input-image] - res - output-dir) - (first res) - ))) - -(defn detect-images-in-dir - "Detect objects in all jpg images in the directory" - ([detector input-dir] (detect-images-in-dir detector input-dir "results")) - ([detector input-dir output-dir] - (.mkdir (io/file output-dir)) - (let [batch-size 20 - image-file-batches (->> input-dir - io/file - file-seq - sort - (filter #(.isFile %)) - (filter #(re-matches #".*\.jpg$" (.getPath %))) - (mapv #(.getPath %)) - (partition-all batch-size))] - (apply concat - (for [image-files image-file-batches] - (let [image-batch (infer/load-image-paths image-files) - topk 3 - res (infer/detect-objects-batch detector image-batch topk) ] - (process-results - image-files - res - output-dir) - res)))))) - -(defn run-detector - "Runs an image detector based on options provided" - [options] - (let [{:keys [model-path-prefix input-image input-dir output-dir - device device-id]} options - width 512 height 512 - descriptors [{:name "data" - :shape [1 3 height width] - :layout layout/NCHW - :dtype dtype/FLOAT32}] - factory (infer/model-factory model-path-prefix descriptors) - detector (infer/create-object-detector - factory - {:contexts [(context/default-context)]})] - (println "Output results to:" output-dir ":") - (println "Object detection on a single image") - (detect-single-image detector input-image output-dir) - (println "Object detection on images in a directory") - (detect-images-in-dir detector input-dir output-dir))) - -(defn -main - [& args] - (let [{:keys [options summary errors] :as opts} - (parse-opts args cli-options)] - (cond - (:help options) (println summary) - (some? errors) (println (string/join "\n" errors)) - :else (run-detector options)))) diff --git a/contrib/clojure-package/examples/infer/objectdetector/test/infer/objectdetector_example_test.clj b/contrib/clojure-package/examples/infer/objectdetector/test/infer/objectdetector_example_test.clj deleted file mode 100644 index 3d20c614918f..000000000000 --- a/contrib/clojure-package/examples/infer/objectdetector/test/infer/objectdetector_example_test.clj +++ /dev/null @@ -1,68 +0,0 @@ -;; Licensed to the Apache Software Foundation (ASF) under one or more -;; contributor license agreements. See the NOTICE file distributed with -;; this work for additional information regarding copyright ownership. -;; The ASF licenses this file to You under the Apache License, Version 2.0 -;; (the "License"); you may not use this file except in compliance with -;; the License. You may obtain a copy of the License at -;; -;; http://www.apache.org/licenses/LICENSE-2.0 -;; -;; Unless required by applicable law or agreed to in writing, software -;; distributed under the License is distributed on an "AS IS" BASIS, -;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -;; See the License for the specific language governing permissions and -;; limitations under the License. -;; - -(ns infer.objectdetector-example-test - (:require [infer.objectdetector-example :refer [detect-single-image - detect-images-in-dir]] - [org.apache.clojure-mxnet.context :as context] - [org.apache.clojure-mxnet.dtype :as dtype] - [org.apache.clojure-mxnet.infer :as infer] - [org.apache.clojure-mxnet.layout :as layout] - [clojure.java.io :as io] - [clojure.java.shell :refer [sh]] - [clojure.test :refer :all])) - -(def model-dir "models/") -(def image-dir "images/") -(def model-path-prefix (str model-dir "resnet50_ssd/resnet50_ssd_model")) -(def image-file (str image-dir "dog.jpg")) - -(when-not (.exists (io/file (str model-path-prefix "-symbol.json"))) - (sh "./scripts/get_ssd_data.sh")) - -(defn create-detector [] - (let [descriptors [{:name "data" - :shape [1 3 512 512] - :layout layout/NCHW - :dtype dtype/FLOAT32}] - factory (infer/model-factory model-path-prefix descriptors)] - (infer/create-object-detector factory))) - -(deftest test-single-detection - (let [detector (create-detector) - predictions (detect-single-image detector image-file) - {:keys [class prob x-min x-max y-min y-max] :as pred} (first predictions)] - (clojure.pprint/pprint predictions) - (is (some? predictions)) - (is (= 3 (count predictions))) - (is (string? class)) - (is (< 0.8 prob)) - (is (every? #(< 0 % 1) [x-min x-max y-min y-max])) - (is (= #{"dog" "bicycle" "car"} (set (mapv :class predictions)))))) - -(deftest test-batch-detection - (let [detector (create-detector) - batch-predictions (detect-images-in-dir detector image-dir) - _ (clojure.pprint/pprint batch-predictions) - predictions (first batch-predictions) - {:keys [class prob x-min x-max y-min y-max] :as pred} (first predictions)] - (is (some? batch-predictions)) - (is (= 3 (count predictions))) - (is (string? class)) - (is (< 0.8 prob)) - (println [x-min x-max y-min y-max]) - (every? #(< 0 % 1) [x-min x-max y-min y-max]) - (is (= #{"dog" "person"} (set (mapv :class predictions)))))) diff --git a/contrib/clojure-package/examples/infer/predictor/.gitignore b/contrib/clojure-package/examples/infer/predictor/.gitignore deleted file mode 100644 index 35491f1a084a..000000000000 --- a/contrib/clojure-package/examples/infer/predictor/.gitignore +++ /dev/null @@ -1,12 +0,0 @@ -/target -/classes -/checkouts -/images -pom.xml -pom.xml.asc -*.jar -*.class -/.lein-* -/.nrepl-port -.hgignore -.hg/ diff --git a/contrib/clojure-package/examples/infer/predictor/README.md b/contrib/clojure-package/examples/infer/predictor/README.md deleted file mode 100644 index 9a844c4b9353..000000000000 --- a/contrib/clojure-package/examples/infer/predictor/README.md +++ /dev/null @@ -1,41 +0,0 @@ - - - - - - - - - - - - - - - - - -# predictor - -Run model prediction using clojure infer package. - -## Installation - -Before you run this example, make sure that you have the clojure package installed. -In the main clojure package directory, do `lein install`. Then you can run -`lein install` in this directory. - -## Usage - -``` -$ chmod +x scripts/get_resnet_18_data.sh -$ ./scripts/get_resnet_18_data.sh -$ -$ lein run -- --help -$ lein run -- -m models/resnet-18/resnet-18 -i images/kitten.jpg -$ -$ lein uberjar -$ java -jar target/predictor-0.1.0-SNAPSHOT-standalone.jar --help -$ java -jar target/predictor-0.1.0-SNAPSHOT-standalone.jar \ - -m models/resnet-18/resnet-18 -i images/kitten.jpg -``` diff --git a/contrib/clojure-package/examples/infer/predictor/project.clj b/contrib/clojure-package/examples/infer/predictor/project.clj deleted file mode 100644 index bd05805aa6d4..000000000000 --- a/contrib/clojure-package/examples/infer/predictor/project.clj +++ /dev/null @@ -1,25 +0,0 @@ -;; -;; Licensed to the Apache Software Foundation (ASF) under one or more -;; contributor license agreements. See the NOTICE file distributed with -;; this work for additional information regarding copyright ownership. -;; The ASF licenses this file to You under the Apache License, Version 2.0 -;; (the "License"); you may not use this file except in compliance with -;; the License. You may obtain a copy of the License at -;; -;; http://www.apache.org/licenses/LICENSE-2.0 -;; -;; Unless required by applicable law or agreed to in writing, software -;; distributed under the License is distributed on an "AS IS" BASIS, -;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -;; See the License for the specific language governing permissions and -;; limitations under the License. -;; - -(defproject predictor "0.1.0-SNAPSHOT" - :description "Model prediction using infer with MXNet" - :plugins [[lein-cljfmt "0.5.7"]] - :dependencies [[org.clojure/clojure "1.9.0"] - [org.clojure/tools.cli "0.4.1"] - [org.apache.mxnet.contrib.clojure/clojure-mxnet "2.0.0-SNAPSHOT"]] - :main ^:skip-aot infer.predictor-example - :profiles {:uberjar {:aot :all}}) diff --git a/contrib/clojure-package/examples/infer/predictor/scripts/get_resnet_18_data.sh b/contrib/clojure-package/examples/infer/predictor/scripts/get_resnet_18_data.sh deleted file mode 100755 index cf85355fae2d..000000000000 --- a/contrib/clojure-package/examples/infer/predictor/scripts/get_resnet_18_data.sh +++ /dev/null @@ -1,44 +0,0 @@ -#!/bin/bash - -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -set -evx - -MXNET_ROOT=$(cd "$(dirname $0)/.."; pwd) - -data_path=$MXNET_ROOT/models/resnet-18/ - -image_path=$MXNET_ROOT/images/ - -if [ ! -d "$data_path" ]; then - mkdir -p "$data_path" -fi - -if [ ! -d "$image_path" ]; then - mkdir -p "$image_path" -fi - -if [ ! -f "$data_path/resnet-18-0000.params" ]; then - wget https://s3.us-east-2.amazonaws.com/scala-infer-models/resnet-18/resnet-18-symbol.json -P $data_path - wget https://s3.us-east-2.amazonaws.com/scala-infer-models/resnet-18/resnet-18-0000.params -P $data_path - wget https://s3.us-east-2.amazonaws.com/scala-infer-models/resnet-18/synset.txt -P $data_path -fi - -if [ ! -f "$image_path/kitten.jpg" ]; then - wget https://s3.us-east-2.amazonaws.com/mxnet-scala/scala-example-ci/resnet152/kitten.jpg -P $image_path -fi diff --git a/contrib/clojure-package/examples/infer/predictor/scripts/get_resnet_data.sh b/contrib/clojure-package/examples/infer/predictor/scripts/get_resnet_data.sh deleted file mode 100755 index fcef59bacc6f..000000000000 --- a/contrib/clojure-package/examples/infer/predictor/scripts/get_resnet_data.sh +++ /dev/null @@ -1,44 +0,0 @@ -#!/bin/bash - -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -set -e - -MXNET_ROOT=$(cd "$(dirname $0)/.."; pwd) - -data_path=$MXNET_ROOT/models/resnet-152/ - -image_path=$MXNET_ROOT/images/ - -if [ ! -d "$data_path" ]; then - mkdir -p "$data_path" -fi - -if [ ! -d "$image_path" ]; then - mkdir -p "$image_path" -fi - -if [ ! -f "$data_path/resnet-152-0000.params" ]; then - wget https://s3.us-east-2.amazonaws.com/mxnet-scala/scala-example-ci/resnet152/resnet-152-0000.params -P $data_path - wget https://s3.us-east-2.amazonaws.com/mxnet-scala/scala-example-ci/resnet152/resnet-152-symbol.json -P $data_path - wget https://s3.us-east-2.amazonaws.com/mxnet-scala/scala-example-ci/resnet152/synset.txt -P $data_path -fi - -if [ ! -f "$image_path/kitten.jpg" ]; then - wget https://s3.us-east-2.amazonaws.com/mxnet-scala/scala-example-ci/resnet152/kitten.jpg -P $image_path -fi diff --git a/contrib/clojure-package/examples/infer/predictor/src/infer/predictor_example.clj b/contrib/clojure-package/examples/infer/predictor/src/infer/predictor_example.clj deleted file mode 100644 index 41a003a86ce0..000000000000 --- a/contrib/clojure-package/examples/infer/predictor/src/infer/predictor_example.clj +++ /dev/null @@ -1,107 +0,0 @@ -;; Licensed to the Apache Software Foundation (ASF) under one or more -;; contributor license agreements. See the NOTICE file distributed with -;; this work for additional information regarding copyright ownership. -;; The ASF licenses this file to You under the Apache License, Version 2.0 -;; (the "License"); you may not use this file except in compliance with -;; the License. You may obtain a copy of the License at -;; -;; http://www.apache.org/licenses/LICENSE-2.0 -;; -;; Unless required by applicable law or agreed to in writing, software -;; distributed under the License is distributed on an "AS IS" BASIS, -;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -;; See the License for the specific language governing permissions and -;; limitations under the License. -;; - -(ns infer.predictor-example - (:require [org.apache.clojure-mxnet.context :as context] - [org.apache.clojure-mxnet.dtype :as dtype] - [org.apache.clojure-mxnet.image :as image] - [org.apache.clojure-mxnet.infer :as infer] - [org.apache.clojure-mxnet.layout :as layout] - [org.apache.clojure-mxnet.ndarray :as ndarray] - [clojure.java.io :as io] - [clojure.string :refer [join split]] - [clojure.tools.cli :refer [parse-opts]]) - (:gen-class)) - -(defn check-valid-file - "Check that the file exists" - [input-file] - (.exists (io/file input-file))) - -(def cli-options - [["-m" "--model-path-prefix PREFIX" "Model path prefix" - :default "models/resnet-18/resnet-18" - :validate [#(check-valid-file (str % "-symbol.json")) - "Model path prefix is invalid"]] - ["-i" "--input-image IMAGE" "Image path" - :default "images/kitten.jpg" - :validate [check-valid-file "Input image path not found"]] - ["-h" "--help"]]) - -(defn print-prediction - [prediction] - (println (apply str (repeat 80 "="))) - (println prediction) - (println (apply str (repeat 80 "=")))) - -(defn preprocess - "Preprocesses image to make it ready for prediction" - [image-path width height] - (-> image-path - infer/load-image-from-file - (infer/reshape-image width height) - (infer/buffered-image-to-pixels [3 width height]) - (ndarray/expand-dims 0))) - -(defn do-inference - "Run inference using given predictor" - [predictor image] - (let [predictions (infer/predict-with-ndarray predictor [image])] - (first predictions))) - -(defn postprocess - [model-path-prefix predictions] - (let [synset-file (-> model-path-prefix - io/file - (.getParent) - (io/file "synset.txt")) - synset-names (split (slurp synset-file) #"\n") - [max-idx] (ndarray/->int-vec (ndarray/argmax predictions 1))] - (synset-names max-idx))) - -(defn run-predictor - "Runs an image classifier based on options provided" - [options] - (let [{:keys [model-path-prefix input-image]} options - width 224 - height 224 - descriptors [{:name "data" - :shape [1 3 height width] - :layout layout/NCHW - :dtype dtype/FLOAT32}] - factory (infer/model-factory model-path-prefix descriptors) - predictor (infer/create-predictor - factory - {:contexts [(context/default-context)]}) - image-ndarray (preprocess input-image width height) - predictions (do-inference predictor image-ndarray) - best-prediction (postprocess model-path-prefix predictions)] - (print-prediction best-prediction))) - -(defn -main - [& args] - (let [{:keys [options summary errors] :as opts} - (parse-opts args cli-options)] - (cond - (:help options) (println summary) - (some? errors) (println (join "\n" errors)) - :else (run-predictor options)))) - -(comment - (run-predictor {:model-path-prefix "models/resnet-18/resnet-18" - :input-image "images/kitten.jpg"}) - - ) diff --git a/contrib/clojure-package/examples/neural-style/.gitignore b/contrib/clojure-package/examples/neural-style/.gitignore deleted file mode 100644 index 4ec03eb2c0a3..000000000000 --- a/contrib/clojure-package/examples/neural-style/.gitignore +++ /dev/null @@ -1,13 +0,0 @@ -/target -/classes -/checkouts -pom.xml -pom.xml.asc -*.jar -*.class -/.lein-* -/.nrepl-port -.hgignore -.hg/ -output/* -input/* diff --git a/contrib/clojure-package/examples/neural-style/README.md b/contrib/clojure-package/examples/neural-style/README.md deleted file mode 100644 index bbf1c737be84..000000000000 --- a/contrib/clojure-package/examples/neural-style/README.md +++ /dev/null @@ -1,48 +0,0 @@ - - - - - - - - - - - - - - - - - -# neural-style - -An example of neural style transfer - - -## Installation - -Before you run this example, make sure that you have the clojure package installed. -In the main clojure package directory, do `lein install`. Then you can run -`lein install` in this directory. - -## Usage - -use the `download.sh` script to get the params file and the input and output file - -Then use `lein run` - -The output images will be stored in the output directory. Please feel free to play with the params at the top of the file - - -This example only works on 1 device (cpu) right now - -If you are running on AWS you will need to setup X11 for graphics -`sudo apt install xauth x11-apps` - -then relogin in `ssh -X -i creds ubuntu@yourinstance` - - -_Note: This example is not working all the way - it needs some debugging help_ - - diff --git a/contrib/clojure-package/examples/neural-style/download.sh b/contrib/clojure-package/examples/neural-style/download.sh deleted file mode 100755 index 393d03b6163e..000000000000 --- a/contrib/clojure-package/examples/neural-style/download.sh +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/bash - -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -set -evx - -mkdir -p model -cd model -wget https://github.com/dmlc/web-data/raw/master/mxnet/neural-style/model/vgg19.params -cd .. - -mkdir -p input -cd input -wget https://github.com/dmlc/web-data/raw/master/mxnet/neural-style/input/IMG_4343.jpg -wget https://github.com/dmlc/web-data/raw/master/mxnet/neural-style/input/starry_night.jpg -cd .. - -mkdir -p output diff --git a/contrib/clojure-package/examples/neural-style/project.clj b/contrib/clojure-package/examples/neural-style/project.clj deleted file mode 100644 index ff8784263207..000000000000 --- a/contrib/clojure-package/examples/neural-style/project.clj +++ /dev/null @@ -1,25 +0,0 @@ -;; -;; Licensed to the Apache Software Foundation (ASF) under one or more -;; contributor license agreements. See the NOTICE file distributed with -;; this work for additional information regarding copyright ownership. -;; The ASF licenses this file to You under the Apache License, Version 2.0 -;; (the "License"); you may not use this file except in compliance with -;; the License. You may obtain a copy of the License at -;; -;; http://www.apache.org/licenses/LICENSE-2.0 -;; -;; Unless required by applicable law or agreed to in writing, software -;; distributed under the License is distributed on an "AS IS" BASIS, -;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -;; See the License for the specific language governing permissions and -;; limitations under the License. -;; - -(defproject neural-style "0.1.0-SNAPSHOT" - :description "Neural Style Transfer with MXNet" - :plugins [[lein-cljfmt "0.5.7"]] - :repositories [["vendredi" {:url "https://repository.hellonico.info/repository/hellonico/"}]] - :dependencies [[org.clojure/clojure "1.9.0"] - [org.apache.mxnet.contrib.clojure/clojure-mxnet "2.0.0-SNAPSHOT"] - [origami "4.0.0-3"]] - :main neural-style.core) diff --git a/contrib/clojure-package/examples/neural-style/src/neural_style/core.clj b/contrib/clojure-package/examples/neural-style/src/neural_style/core.clj deleted file mode 100644 index aa4c44717561..000000000000 --- a/contrib/clojure-package/examples/neural-style/src/neural_style/core.clj +++ /dev/null @@ -1,239 +0,0 @@ -;; -;; Licensed to the Apache Software Foundation (ASF) under one or more -;; contributor license agreements. See the NOTICE file distributed with -;; this work for additional information regarding copyright ownership. -;; The ASF licenses this file to You under the Apache License, Version 2.0 -;; (the "License"); you may not use this file except in compliance with -;; the License. You may obtain a copy of the License at -;; -;; http://www.apache.org/licenses/LICENSE-2.0 -;; -;; Unless required by applicable law or agreed to in writing, software -;; distributed under the License is distributed on an "AS IS" BASIS, -;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -;; See the License for the specific language governing permissions and -;; limitations under the License. -;; - -(ns neural-style.core - (:require [org.apache.clojure-mxnet.context :as context] - [org.apache.clojure-mxnet.executor :as executor] - [org.apache.clojure-mxnet.lr-scheduler :as lr-scheduler] - [org.apache.clojure-mxnet.ndarray :as ndarray] - [org.apache.clojure-mxnet.optimizer :as opt] - [org.apache.clojure-mxnet.random :as random] - [org.apache.clojure-mxnet.shape :as mx-shape] - [org.apache.clojure-mxnet.symbol :as sym] - [clojure.java.io :as io] - [clojure.java.shell :refer [sh]] - [opencv4.core :as cv] - [opencv4.utils :as cvu] - [neural-style.model-vgg-19 :as model-vgg-19]) - (:gen-class));; An Implementation of the paper A Neural Algorithm of Artistic Style - ;;by Leon A. Gatys, Alexander S. Ecker, and Matthias Bethge - -(when-not (.exists (io/file "input")) - (do (println "Retrieving data...") (sh "./download.sh"))) - -(def content-image "input/IMG_4343.jpg") -(def style-image "input/starry_night.jpg") -(def model-path "model/vgg19.params") -(def max-long-edge 600) ;; resize the content image -(def style-weight 1) ;; the weight for the style image -(def content-weight 3) ;; the weight for the content image -(def blur-radius 5) ;; the blur filter radius -(def output-dir "output") -(def lr 10.0) ;; the learning rate -(def tv-weight 0.01) ;; the magnitude on the tv loss -(def num-epochs 1000) - -;;;; -; IMAGE MANIPULATION -;;;; - -(defn image->ndarray - "normalize the pixels for vgg19" - [simg] - (let [h (.height simg) w (.width simg)] - (println "The nd image size is:" {:height h :width w}) - (-> simg - (cv/convert-to! cv/CV_8SC3 0.5) - (cv/add! (cv/new-scalar -103.939 -116.779 -123.68) ) - (cvu/mat->flat-rgb-array) - (ndarray/array [1 (.channels simg) h w])))) - -(defn ndarray->image [img] - (let [nd (ndarray/->vec img) - [_ _ h w] (mx-shape/->vec (ndarray/shape img)) - to-cv1 (fn [bytes h w] (cv/>> (cv/new-mat h w cv/CV_8S) (byte-array bytes))) - byte-arrays (reverse (partition (* h w) nd)) - mats (map #(to-cv1 % h w) byte-arrays)] - (-> mats - (cv/merge! (cv/new-mat h w cv/CV_8SC3)) - (cv/add! (cv/new-scalar 103.939 116.779 123.68)) - (cv/convert-to! cv/CV_8UC3 2)))) - -(defn preprocess-content-image [path short-edge] - (-> path - (cv/imread) - (#(cvu/resize-by % (/ short-edge (.width %)))) - (image->ndarray))) - -(defn preprocess-style-image [path shape-vec] - (let [[_ _ h w] shape-vec] - (println "The style image is size " {:height h :width w}) - (-> path - (cv/imread) - (cv/resize! (cv/new-size w h)) - (image->ndarray)))) - -(defn save-image [img filename radius blur?] - (println "Saving image:" filename) - (-> img - (ndarray->image) - (#(if blur? (cv/blur! % (cv/new-size blur-radius blur-radius)) %)) - (cv/imwrite filename))) - -;;;; -; TRAINING -;;;; - -(defn style-gram-symbol [input-size style] - (let [[_ output-shape _] (sym/infer-shape style {:data [1 3 (first input-size) (second input-size)]}) - output-shapes (mx-shape/->vec output-shape) - {:keys [gram-list grad-scale]} (doall (reduce - (fn [result i] - (let [shape (get output-shapes i) - [s0 s1 s2 s3] shape - x (sym/reshape {:data (sym/get style i) :target-shape [s1 (* s2 s3)]}) - ;; use fully connected to quickly do dot(x x^T) - gram (sym/fully-connected {:data x :weight x :no-bias true :num-hidden s1})] - (-> result - (update :gram-list conj gram) - (update :grad-scale conj (* s1 s2 s3 s1))))) - {:gram-list [] :grad-scale []} - (range (count (sym/list-outputs style)))))] - {:gram (sym/group (into [] gram-list)) :g-scale grad-scale})) - -(defn get-loss [gram content] - (let [gram-loss (doall (mapv (fn [i] - (let [gvar (sym/variable (str "target_gram_" i))] - (sym/sum (sym/square (sym/- gvar (sym/get gram i)))))) - (range (count (sym/list-outputs gram))))) - cvar (sym/variable "target_content") - content-loss (sym/sum (sym/square (sym/- cvar content)))] - {:style-loss (sym/group gram-loss) :content-loss content-loss})) - -(defn get-tv-grad-executor [img ctx tv-weight] - (when (pos? tv-weight) - (let [img-shape (mx-shape/->vec (ndarray/shape img)) - n-channel (get img-shape 1) - s-img (sym/variable "img") - s-kernel (sym/variable "kernel") - channels (sym/split {:data s-img :axis 1 :num-outputs n-channel}) - out (sym/concat (doall (mapv (fn [i] - (sym/convolution {:data (sym/get channels i) :weight s-kernel - :num-filter 1 :kernel [3 3] :pad [1 1] :no-bias true :stride [1 1]})) - (range n-channel)))) - kernel (ndarray/* (ndarray/array [0 -1 0 -1 4 -1 0 -1 0] [1 1 3 3] {:ctx ctx}) - 0.8) - out (ndarray/* out tv-weight)] - (sym/bind out ctx {"img" img "kernel" kernel})))) - -(defn train - ([devs] (train devs 30)) - ([devs n-epochs] - (let [dev (first devs) - content-np (preprocess-content-image content-image max-long-edge) - content-np-shape (mx-shape/->vec (ndarray/shape content-np)) - style-np (preprocess-style-image style-image content-np-shape) - size [(get content-np-shape 2) (get content-np-shape 3)] - {:keys [style content]} (model-vgg-19/get-symbol) - {:keys [gram g-scale]} (style-gram-symbol size style) - model-executor (model-vgg-19/get-executor gram content model-path size dev) - - _ (ndarray/set (:data model-executor) style-np) - _ (executor/forward (:executor model-executor)) - - style-array (mapv #(ndarray/copy %) (:style model-executor)) - - mode-executor nil - _ (ndarray/set (:data model-executor) content-np) - _ (executor/forward (:executor model-executor)) - content-array (ndarray/copy (:content model-executor)) - - {:keys [style-loss content-loss]} (get-loss gram content) - model-executor (model-vgg-19/get-executor style-loss content-loss model-path size dev) - - grad-array (-> (doall (mapv (fn [i] - (do - (ndarray/set (get (:arg-map model-executor) (str "target_gram_" i)) (get style-array i)) - (ndarray/* (ndarray/ones [1] {:ctx dev}) (/ style-weight (get g-scale i))))) - (range (count style-array)))) - (conj (ndarray/* (ndarray/ones [1] {:ctx dev}) content-weight))) - - _ (ndarray/copy-to content-array (get (:arg-map model-executor) "target_content")) - - ;;;train - - ;;initialize with random noise - img (ndarray/- (random/uniform 0 255 content-np-shape {:ctx dev}) 128) - ;;; img (random/uniform -0.1 0.1 content-np-shape dev) - ;; img content-np - lr-sched (lr-scheduler/factor-scheduler 10 0.9) - - _ (save-image content-np (str output-dir "/input.png") blur-radius false) - _ (save-image style-np (str output-dir "/style.png") blur-radius false) - - optimizer (opt/adam {:learning-rate lr - :wd 0.005 - :lr-scheduler lr-sched}) - optim-state (opt/create-state optimizer 0 img) - - _ (println "Starting training....") - old-img (ndarray/copy-to img dev) - clip-norm (apply * (mx-shape/->vec (ndarray/shape img))) - tv-grad-executor (get-tv-grad-executor img dev tv-weight) - eps 0.0 - e 0] - (doseq [i (range n-epochs)] - (ndarray/set (:data model-executor) img) - (-> (:executor model-executor) - (executor/forward) - (executor/backward grad-array)) - - (let [g-norm (ndarray/to-scalar (ndarray/norm (:data-grad model-executor)))] - (if (> g-norm clip-norm) - (ndarray/set (:data-grad model-executor) (ndarray/* (:data-grad model-executor) (/ clip-norm g-norm))))) - - (if tv-grad-executor - (do - (executor/forward tv-grad-executor) - (opt/update optimizer 0 - img - (ndarray/+ (:data-grad model-executor) (first (executor/outputs tv-grad-executor))) - optim-state)) - (opt/update optimizer 0 img (:data-grad model-executor) optim-state)) - - (let [eps (ndarray/to-scalar - (ndarray/div (ndarray/norm (ndarray/- old-img img)) - (ndarray/norm img)))] - (println "Epoch " i "relative change " eps) - (when (zero? (mod i 2)) - (save-image (ndarray/copy img) (str output-dir "/out_" i ".png") blur-radius true))) - (ndarray/set old-img img)) - (save-image (ndarray/copy img) (str output-dir "/final.png") 0 false) - (ndarray->image img)))) - -(defn -main [& args] - ;;; Note this only works on cpu right now - (let [[dev dev-num] args - devs (if (= dev ":gpu") - (mapv #(context/gpu %) (range (Integer/parseInt (or dev-num "1")))) - (mapv #(context/cpu %) (range (Integer/parseInt (or dev-num "1")))))] - (println "Running with context devices of" devs) - (train devs))) - -(comment - - (train [(context/cpu)])) diff --git a/contrib/clojure-package/examples/neural-style/src/neural_style/model_vgg_19.clj b/contrib/clojure-package/examples/neural-style/src/neural_style/model_vgg_19.clj deleted file mode 100644 index e7efa8c93ebb..000000000000 --- a/contrib/clojure-package/examples/neural-style/src/neural_style/model_vgg_19.clj +++ /dev/null @@ -1,98 +0,0 @@ -;; -;; Licensed to the Apache Software Foundation (ASF) under one or more -;; contributor license agreements. See the NOTICE file distributed with -;; this work for additional information regarding copyright ownership. -;; The ASF licenses this file to You under the Apache License, Version 2.0 -;; (the "License"); you may not use this file except in compliance with -;; the License. You may obtain a copy of the License at -;; -;; http://www.apache.org/licenses/LICENSE-2.0 -;; -;; Unless required by applicable law or agreed to in writing, software -;; distributed under the License is distributed on an "AS IS" BASIS, -;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -;; See the License for the specific language governing permissions and -;; limitations under the License. -;; - -(ns neural-style.model-vgg-19 - (:require [org.apache.clojure-mxnet.executor :as executor] - [org.apache.clojure-mxnet.ndarray :as ndarray] - [org.apache.clojure-mxnet.symbol :as sym])) - -(defn get-symbol [] - (let [data (sym/variable "data") - conv1-1 (sym/convolution "conv1_1" {:data data :num-filter 64 :pad [1 1] :kernel [3 3] :stride [1 1] - :no-bias false :workspace 1024}) - relu1-1 (sym/activation "relu1_1" {:data conv1-1 :act-type "relu"}) - conv1-2 (sym/convolution "conv1-2" {:data relu1-1 :num-filter 64 :pad [1 1] :kernel [3 3] :stride [1 1] - :no-bias false :workspace 1024}) - relu1-2 (sym/activation "relu1_2" {:data conv1-2 :act-type "relu"}) - pool1 (sym/pooling "pool1" {:data relu1-2 :pad [0 0] :kernel [2 2] :stride [2 2] :pool-type "avg"}) - conv2-1 (sym/convolution "conv2_1" {:data pool1 :num-filter 128 :pad [1 1] :kernel [3 3] :stride [1 1] - :no-bias false :workspace 1024}) - relu2-1 (sym/activation "relu2_1" {:data conv2-1 :act-type "relu"}) - conv2-2 (sym/convolution "conv2_2" {:data relu2-1 :num-filter 128 :pad [1 1] :kernel [3 3] :stride [1 1] - :no-bias false :workspace 1024}) - relu2-2 (sym/activation "relu2_2" {:data conv2-2 :act-type "relu"}) - pool2 (sym/pooling "pool2" {:data relu2-2 :pad [0 0] :kernel [2 2] :stride [2 2] :pool-type "avg"}) - conv3-1 (sym/convolution "conv3_1" {:data pool2 :num-filter 256 :pad [1 1] :kernel [3 3] :stride [1 1] - :no-bias false :workspace 1024}) - relu3-1 (sym/activation "relu3_1" {:data conv3-1 :act-type "relu"}) - conv3-2 (sym/convolution "conv3_2" {:data relu3-1 :num-filter 256 :pad [1 1] :kernel [3 3] :stride [1 1] - :no-bias false :workspace 1024}) - relu3-2 (sym/activation "relu3_2" {:data conv3-2 :act-type "relu"}) - conv3-3 (sym/convolution "conv3_3" {:data relu3-2 :num-filter 256 :pad [1 1] :kernel [3 3] :stride [1 1] - :no-bias false :workspace 1024}) - relu3-3 (sym/activation "relu3_3" {:data conv3-3 :act-type "relu"}) - conv3-4 (sym/convolution "conv3_4" {:data relu3-3 :num-filter 256 :pad [1 1] :kernel [3 3] :stride [1 1] - :no-bias false :workspace 1024}) - relu3-4 (sym/activation "relu3_4" {:data conv3-4 :act-type "relu"}) - pool3 (sym/pooling "pool3" {:data relu3-4 :pad [0 0] :kernel [2 2] :stride [2 2] :pool-type "avg"}) - conv4-1 (sym/convolution "conv4_1" {:data pool3 :num-filter 512 :pad [1 1] :kernel [3 3] :stride [1 1] - :no-bias false :workspace 1024}) - relu4-1 (sym/activation "relu4_1" {:data conv4-1 :act-type "relu"}) - conv4-2 (sym/convolution "conv4_2" {:data relu4-1 :num-filter 512 :pad [1 1] :kernel [3 3] :stride [1 1] - :no-bias false :workspace 1024}) - relu4-2 (sym/activation "relu4_2" {:data conv4-2 :act-type "relu"}) - conv4-3 (sym/convolution "conv4_3" {:data relu4-2 :num-filter 512 :pad [1 1] :kernel [3 3] :stride [1 1] - :no-bias false :workspace 1024}) - relu4-3 (sym/activation "relu4_3" {:data conv4-3 :act-type "relu"}) - conv4-4 (sym/convolution "conv4_4" {:data relu4-3 :num-filter 512 :pad [1 1] :kernel [3 3] :stride [1 1] - :no-bias false :workspace 1024}) - relu4-4 (sym/activation "relu4_4" {:data conv4-4 :act-type "relu"}) - pool4 (sym/pooling "pool4" {:data relu4-4 :pad [0 0] :kernel [2 2] :stride [2 2] :pool-type "avg"}) - conv5-1 (sym/convolution "conv5_1" {:data pool4 :num-filter 512 :pad [1 1] :kernel [3 3] :stride [1 1] - :no-bias false :workspace 1024}) - relu5-1 (sym/activation "relu5_1" {:data conv5-1 :act-type "relu"}) - - ;;; style and content layers - style (sym/group [relu1-1 relu2-1 relu3-1 relu4-1 relu5-1]) - content (sym/group [relu1-1])] - {:style style :content content})) - -(defn get-executor [style content model-path input-size ctx] - (let [out (sym/group [style content]) - ;; make executor - [arg-shapes output-shapes aux-shapes] (sym/infer-shape out {:data [1 3 (first input-size) (second input-size)]}) - arg-names (sym/list-arguments out) - arg-map (zipmap arg-names (map #(ndarray/zeros % {:ctx ctx}) arg-shapes)) - grad-map {"data" (ndarray/copy-to (get arg-map "data") ctx)} - ;; init with pre-training weights - ;;; I'm not sure this is being set properly - pretrained (do (ndarray/load model-path)) - arg-map (into {} (mapv (fn [[k v]] - (let [pretrained-key (str "arg:" k)] - (if (and (get pretrained pretrained-key) (not= "data" k)) - (do (ndarray/set v (get pretrained pretrained-key)) - [k v]) - [k v]))) - arg-map)) - exec (sym/bind out ctx arg-map grad-map) - outs (executor/outputs exec)] - {:executor exec - :data (get arg-map "data") - :data-grad (get grad-map "data") - :style (into [] (butlast outs)) - :content (last outs) - :arg-map arg-map})) diff --git a/contrib/clojure-package/examples/neural-style/test/neural_style/vgg_19_test.clj b/contrib/clojure-package/examples/neural-style/test/neural_style/vgg_19_test.clj deleted file mode 100644 index 83be4a88bb3b..000000000000 --- a/contrib/clojure-package/examples/neural-style/test/neural_style/vgg_19_test.clj +++ /dev/null @@ -1,50 +0,0 @@ -;; -;; Licensed to the Apache Software Foundation (ASF) under one or more -;; contributor license agreements. See the NOTICE file distributed with -;; this work for additional information regarding copyright ownership. -;; The ASF licenses this file to You under the Apache License, Version 2.0 -;; (the "License"); you may not use this file except in compliance with -;; the License. You may obtain a copy of the License at -;; -;; http://www.apache.org/licenses/LICENSE-2.0 -;; -;; Unless required by applicable law or agreed to in writing, software -;; distributed under the License is distributed on an "AS IS" BASIS, -;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -;; See the License for the specific language governing permissions and -;; limitations under the License. -;; - -(ns neural-style.vgg-19-test - (:require - [clojure.test :refer :all] - [opencv4.core :as cv] - [clojure.java.io :as io] - [org.apache.clojure-mxnet.ndarray :as ndarray] - [org.apache.clojure-mxnet.context :as context] - [neural-style.core :as neural])) - -(defn pic-to-ndarray-vec[path] - (-> path - cv/imread - neural/image->ndarray)) - -(defn last-modified-check[x] - (let [t (- (System/currentTimeMillis) (.lastModified x)) ] - (if (> 10000 t) ; 10 seconds - x - (throw (Exception. (str "Generated File Too Old: (" t " ms) [" x "]")))))) - -(defn latest-pic-to-ndarray-vec[folder] - (->> folder - io/as-file - (.listFiles) - (sort-by #(.lastModified %)) - last - (last-modified-check) - (.getPath) - pic-to-ndarray-vec)) - -(deftest vgg-19-test - (neural/train [(context/cpu)] 3) - (is (not (nil? (latest-pic-to-ndarray-vec "output"))))) \ No newline at end of file diff --git a/contrib/clojure-package/examples/pre-trained-models/.gitignore b/contrib/clojure-package/examples/pre-trained-models/.gitignore deleted file mode 100644 index c53038ec0e3d..000000000000 --- a/contrib/clojure-package/examples/pre-trained-models/.gitignore +++ /dev/null @@ -1,11 +0,0 @@ -/target -/classes -/checkouts -pom.xml -pom.xml.asc -*.jar -*.class -/.lein-* -/.nrepl-port -.hgignore -.hg/ diff --git a/contrib/clojure-package/examples/pre-trained-models/README.md b/contrib/clojure-package/examples/pre-trained-models/README.md deleted file mode 100644 index 6a3b25e3388d..000000000000 --- a/contrib/clojure-package/examples/pre-trained-models/README.md +++ /dev/null @@ -1,68 +0,0 @@ - - - - - - - - - - - - - - - - - -# pre-trained-models - -This shows examples of how to use the pretrained models. MXNet comes with a number of pretrained models -https://mxnet.apache.org/model_zoo/index.html - - -## Installation - -Before you run this example, make sure that you have the clojure package installed. -In the main clojure package directory, do `lein install`. Then you can run -`lein install` in this directory. - - -## Predict Image from pretrained models - -From the example on https://mxnet.apache.org/tutorials/python/predict_image.html - - -The `predict-image.clj` file loads up the pre-trained resnet-152 model and uses it to predict the classifications from images on the internet - -*To use run download-reset-152.sh to get the model params and json * - -Run the example with the available leiningen alias: - -``` -$ lein predict-image -# -# or with your own image: -# -$ lein predict-image -``` - - -## Fine Tune from pretrained models - -From the finetune example https://mxnet.apache.org/faq/finetune.html - -The `fine-tune.clj` file loads up the samller resnet-50 model and adds a fine tune layer to reclassify the caltech iamge set - -*To use run download-resnet-50.sh to get the model params and json and download-caltech.sh to get the pregenerated rec files* - -You can run the fine tune example by doing `lein run` (cpu) - -You can control the devices you run on by doing: - -`lein run :cpu 2` - This will run on 2 cpu devices -`lein run :gpu 1` - This will run on 1 gpu device -`lein run :gpu 2` - This will run on 2 gpu devices - - - diff --git a/contrib/clojure-package/examples/pre-trained-models/download-caltech.sh b/contrib/clojure-package/examples/pre-trained-models/download-caltech.sh deleted file mode 100755 index 8ad8acaffe56..000000000000 --- a/contrib/clojure-package/examples/pre-trained-models/download-caltech.sh +++ /dev/null @@ -1,26 +0,0 @@ -#!/bin/bash - -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -set -evx - -mkdir -p caltech-256 -cd caltech-256 -wget http://data.mxnet.io/data/caltech-256/caltech-256-60-train.rec -wget http://data.mxnet.io/data/caltech-256/caltech-256-60-val.rec -cd .. diff --git a/contrib/clojure-package/examples/pre-trained-models/download-resnet-152.sh b/contrib/clojure-package/examples/pre-trained-models/download-resnet-152.sh deleted file mode 100755 index b3aa7668f751..000000000000 --- a/contrib/clojure-package/examples/pre-trained-models/download-resnet-152.sh +++ /dev/null @@ -1,28 +0,0 @@ -#!/bin/bash - -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -set -evx - -mkdir -p model -cd model -wget http://data.mxnet.io/models/imagenet-11k/resnet-152/resnet-152-symbol.json -wget http://data.mxnet.io/models/imagenet-11k/resnet-152/resnet-152-0000.params -wget http://data.mxnet.io/models/imagenet-11k/synset.txt -cd .. - diff --git a/contrib/clojure-package/examples/pre-trained-models/download-resnet-50.sh b/contrib/clojure-package/examples/pre-trained-models/download-resnet-50.sh deleted file mode 100755 index 3286f51e8e18..000000000000 --- a/contrib/clojure-package/examples/pre-trained-models/download-resnet-50.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/bash - -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -set -evx - -mkdir -p model -cd model -wget http://data.mxnet.io/models/imagenet/resnet/50-layers/resnet-50-symbol.json -wget http://data.mxnet.io/models/imagenet/resnet/50-layers/resnet-50-0000.params -cd .. - diff --git a/contrib/clojure-package/examples/pre-trained-models/project.clj b/contrib/clojure-package/examples/pre-trained-models/project.clj deleted file mode 100644 index 6eb387db6862..000000000000 --- a/contrib/clojure-package/examples/pre-trained-models/project.clj +++ /dev/null @@ -1,26 +0,0 @@ -;; -;; Licensed to the Apache Software Foundation (ASF) under one or more -;; contributor license agreements. See the NOTICE file distributed with -;; this work for additional information regarding copyright ownership. -;; The ASF licenses this file to You under the Apache License, Version 2.0 -;; (the "License"); you may not use this file except in compliance with -;; the License. You may obtain a copy of the License at -;; -;; http://www.apache.org/licenses/LICENSE-2.0 -;; -;; Unless required by applicable law or agreed to in writing, software -;; distributed under the License is distributed on an "AS IS" BASIS, -;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -;; See the License for the specific language governing permissions and -;; limitations under the License. -;; - -(defproject pre-trained-models "0.1.0-SNAPSHOT" - :description "Example of using pre-trained models with MXNet" - :plugins [[lein-cljfmt "0.5.7"]] - :repositories [["vendredi" {:url "https://repository.hellonico.info/repository/hellonico/"}]] - :aliases {"predict-image" ["run" "-m" "pre-trained-models.predict-image" ]} - :dependencies [[org.clojure/clojure "1.9.0"] - [org.apache.mxnet.contrib.clojure/clojure-mxnet "2.0.0-SNAPSHOT"] - [origami "4.0.0-3"]] - :main pre-trained-models.fine-tune) diff --git a/contrib/clojure-package/examples/pre-trained-models/src/pre_trained_models/predict_image.clj b/contrib/clojure-package/examples/pre-trained-models/src/pre_trained_models/predict_image.clj deleted file mode 100644 index 4df641da70c1..000000000000 --- a/contrib/clojure-package/examples/pre-trained-models/src/pre_trained_models/predict_image.clj +++ /dev/null @@ -1,112 +0,0 @@ -;; -;; Licensed to the Apache Software Foundation (ASF) under one or more -;; contributor license agreements. See the NOTICE file distributed with -;; this work for additional information regarding copyright ownership. -;; The ASF licenses this file to You under the Apache License, Version 2.0 -;; (the "License"); you may not use this file except in compliance with -;; the License. You may obtain a copy of the License at -;; -;; http://www.apache.org/licenses/LICENSE-2.0 -;; -;; Unless required by applicable law or agreed to in writing, software -;; distributed under the License is distributed on an "AS IS" BASIS, -;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -;; See the License for the specific language governing permissions and -;; limitations under the License. -;; - -(ns pre-trained-models.predict-image - (:require [clojure.java.io :as io] - [clojure.string :as string] - [org.apache.clojure-mxnet.module :as m] - [org.apache.clojure-mxnet.ndarray :as ndarray] - [org.apache.clojure-mxnet.shape :as mx-shape] - [org.apache.clojure-mxnet.symbol :as sym] - [opencv4.core :as cv] - [opencv4.utils :as cvu])) - -;; based on https://mxnet.incubator.apache.org/tutorials/python/predict_image.html - -;; run download-reset-152.sh to get the model params and json - -(def model-dir "model") -(def num-channels 3) -(def h 224) -(def w 224) - -(defn download [uri file] - (with-open [in (io/input-stream uri) - out (io/output-stream file)] - (io/copy in out))) - -(defn get-image [url show?] - (-> url - (cvu/mat-from-url) - (cv/resize! (cv/new-size h w)) - (#(do (if show? (cvu/imshow %)) %)) - (cv/convert-to! cv/CV_8SC3 0.5) - (cvu/mat->flat-rgb-array) - (ndarray/array [1 num-channels h w]))) - -(defn predict [img-url show?] - (let [mod (m/load-checkpoint {:prefix (str model-dir "/resnet-152") :epoch 0}) - labels (-> (slurp (str model-dir "/synset.txt")) - (string/split #"\n")) - nd-img (get-image img-url show?) - prob (-> mod - (m/bind {:for-training false :data-shapes [{:name "data" :shape [1 num-channels h w]}]}) - (m/forward {:data [nd-img]}) - (m/outputs) - (ffirst)) - prob-with-labels (mapv (fn [p l] {:prob p :label l}) - (ndarray/->vec prob) - labels)] - (->> (sort-by :prob prob-with-labels) - (reverse) - (take 5)))) - -(defn feature-extraction [] - (let [nd-img (get-image "http://animalsbirds.com/wp-content/uploads/2016/07/Animal-Cat-HD-Wallpapers.jpg" false) - mod (-> (m/load-checkpoint {:prefix (str model-dir "/resnet-152") :epoch 0}) - (m/bind {:for-training false :data-shapes [{:name "data" :shape [1 num-channels h w]}]})) - fe-sym (-> (m/symbol mod) - (sym/get-internals) - (sym/get "flatten0_output")) - fe-mod (-> (m/module fe-sym {:label-names nil}) - (m/bind {:for-training false :data-shapes [{:name "data" :shape [1 num-channels h w]}]}) - (m/init-params {:arg-params (m/arg-params mod) :aux-params (m/aux-params mod)}))] - (-> fe-mod - (m/forward {:data [nd-img]}) - (m/outputs) - (ffirst) - (ndarray/shape) - (mx-shape/->vec)))) - -(defn -main [& args] - (println - (predict - (or (first args) - "https://raw.githubusercontent.com/dmlc/web-data/master/mxnet/doc/tutorials/python/predict_image/cat.jpg" ) - true))) - -(comment - - (predict "https://raw.githubusercontent.com/dmlc/web-data/master/mxnet/doc/tutorials/python/predict_image/cat.jpg" true) - ;; ({:prob 0.69066674, :label "n02122948 kitten, kitty"} - ;; {:prob 0.04466057, :label "n01323155 kit"} - ;; {:prob 0.029682875, :label "n01318894 pet"} - ;; {:prob 0.028944906, :label "n02122878 tabby, queen"} - ;; {:prob 0.027530408, :label "n01322221 baby"}) - - (predict "http://thenotoriouspug.com/wp-content/uploads/2015/01/Pug-Cookie-1920x1080-1024x576.jpg" true) - ;; ({:prob 0.44412872, :label "n02110958 pug, pug-dog"} - ;; {:prob 0.093773685, - ;; :label "n13905792 wrinkle, furrow, crease, crinkle, seam, line"} - ;; {:prob 0.02395489, :label "n01318894 pet"} - ;; {:prob 0.023736171, - ;; :label "n02084732 pooch, doggie, doggy, barker, bow-wow"} - ;; {:prob 0.023329297, :label "n02083346 canine, canid"}) - - (feature-extraction) ;=> [1 2048] -) - diff --git a/contrib/clojure-package/examples/profiler/.gitignore b/contrib/clojure-package/examples/profiler/.gitignore deleted file mode 100644 index c53038ec0e3d..000000000000 --- a/contrib/clojure-package/examples/profiler/.gitignore +++ /dev/null @@ -1,11 +0,0 @@ -/target -/classes -/checkouts -pom.xml -pom.xml.asc -*.jar -*.class -/.lein-* -/.nrepl-port -.hgignore -.hg/ diff --git a/contrib/clojure-package/examples/profiler/README.md b/contrib/clojure-package/examples/profiler/README.md deleted file mode 100644 index 162d012cf5dc..000000000000 --- a/contrib/clojure-package/examples/profiler/README.md +++ /dev/null @@ -1,32 +0,0 @@ - - - - - - - - - - - - - - - - - -# profiler - -An example of using the profiler. - - -## Installation - -Before you run this example, make sure that you have the clojure package installed. -In the main clojure package directory, do `lein install`. Then you can run -`lein install` in this directory. - -## Usage - -To run use `lein run` -A file will be generated in the directory afterwards `profile-matmul-20iter.json` diff --git a/contrib/clojure-package/examples/profiler/project.clj b/contrib/clojure-package/examples/profiler/project.clj deleted file mode 100644 index 8428c5f200fd..000000000000 --- a/contrib/clojure-package/examples/profiler/project.clj +++ /dev/null @@ -1,22 +0,0 @@ -;; -;; Licensed to the Apache Software Foundation (ASF) under one or more -;; contributor license agreements. See the NOTICE file distributed with -;; this work for additional information regarding copyright ownership. -;; The ASF licenses this file to You under the Apache License, Version 2.0 -;; (the "License"); you may not use this file except in compliance with -;; the License. You may obtain a copy of the License at -;; -;; http://www.apache.org/licenses/LICENSE-2.0 -;; -;; Unless required by applicable law or agreed to in writing, software -;; distributed under the License is distributed on an "AS IS" BASIS, -;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -;; See the License for the specific language governing permissions and -;; limitations under the License. -;; - -(defproject profiler "0.1.0-SNAPSHOT" - :plugins [[lein-cljfmt "0.5.7"]] - :dependencies [[org.clojure/clojure "1.9.0"] - [org.apache.mxnet.contrib.clojure/clojure-mxnet "2.0.0-SNAPSHOT"]] - :main profiler.core) diff --git a/contrib/clojure-package/examples/profiler/src/profiler/core.clj b/contrib/clojure-package/examples/profiler/src/profiler/core.clj deleted file mode 100644 index 67ba0feb8a9b..000000000000 --- a/contrib/clojure-package/examples/profiler/src/profiler/core.clj +++ /dev/null @@ -1,59 +0,0 @@ -;; -;; Licensed to the Apache Software Foundation (ASF) under one or more -;; contributor license agreements. See the NOTICE file distributed with -;; this work for additional information regarding copyright ownership. -;; The ASF licenses this file to You under the Apache License, Version 2.0 -;; (the "License"); you may not use this file except in compliance with -;; the License. You may obtain a copy of the License at -;; -;; http://www.apache.org/licenses/LICENSE-2.0 -;; -;; Unless required by applicable law or agreed to in writing, software -;; distributed under the License is distributed on an "AS IS" BASIS, -;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -;; See the License for the specific language governing permissions and -;; limitations under the License. -;; - -(ns profiler.core - (:require [org.apache.clojure-mxnet.context :as context] - [org.apache.clojure-mxnet.executor :as executor] - [org.apache.clojure-mxnet.ndarray :as ndarray] - [org.apache.clojure-mxnet.profiler :as profiler] - [org.apache.clojure-mxnet.random :as random] - [org.apache.clojure-mxnet.symbol :as sym]) - (:gen-class)) - -(def profiler-mode "symbolic") ;; can be symbolic, imperative, api, mem -(def output-path ".") ;; the profile file output directory -(def profiler-name "profile-matmul-20iter.json") -(def iter-num 5) -(def begin-profiling-iter 0) -(def end-profiling-iter 1) -(def gpu? false) - -(defn run [] - (let [shape [4096 4096] - path (str output-path "/" profiler-name) - ctx (if gpu? (context/gpu) (context/cpu)) - kwargs {:filename path - (keyword (str "profile-" profiler-mode)) 1} - C (sym/dot "dot" [(sym/variable "A") (sym/variable "B")]) - a (random/uniform -1.0 1.0 shape {:ctx ctx}) - b (random/uniform -1.0 1.0 shape {:ctx ctx}) - exec (sym/bind C ctx {"A" [a] "B" [b]})] - - (profiler/profiler-set-config kwargs) - (doseq [i (range iter-num)] - (when (= i begin-profiling-iter) - (profiler/profiler-set-state "run")) - (when (= i end-profiling-iter) - (profiler/profiler-set-state "stop")) - (-> exec - (executor/forward) - (executor/outputs) - (first) - (ndarray/wait-to-read))))) - -(defn -main [& args] - (run)) diff --git a/contrib/clojure-package/examples/profiler/test/core_test.clj b/contrib/clojure-package/examples/profiler/test/core_test.clj deleted file mode 100644 index 9f03c94713b9..000000000000 --- a/contrib/clojure-package/examples/profiler/test/core_test.clj +++ /dev/null @@ -1,30 +0,0 @@ -;; -;; Licensed to the Apache Software Foundation (ASF) under one or more -;; contributor license agreements. See the NOTICE file distributed with -;; this work for additional information regarding copyright ownership. -;; The ASF licenses this file to You under the Apache License, Version 2.0 -;; (the "License"); you may not use this file except in compliance with -;; the License. You may obtain a copy of the License at -;; -;; http://www.apache.org/licenses/LICENSE-2.0 -;; -;; Unless required by applicable law or agreed to in writing, software -;; distributed under the License is distributed on an "AS IS" BASIS, -;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -;; See the License for the specific language governing permissions and -;; limitations under the License. -;; - -(ns core_test - (:require - [profiler.core :as profiler] - [clojure.java.io :as io] - [clojure.test :refer :all])) - -(defn count-lines[file] - (count (line-seq (io/reader (io/as-file file))))) - -(deftest run-profiler - (profiler/run) - (let [new-file (clojure.java.io/as-file profiler/profiler-name)] - (is (.exists new-file)))) diff --git a/contrib/clojure-package/examples/scripts/get_cifar_data.sh b/contrib/clojure-package/examples/scripts/get_cifar_data.sh deleted file mode 100755 index 12b3770c2700..000000000000 --- a/contrib/clojure-package/examples/scripts/get_cifar_data.sh +++ /dev/null @@ -1,38 +0,0 @@ -#!/bin/bash - -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - - -set -evx - -if [ ! -z "$MXNET_HOME" ]; then - data_path="$MXNET_HOME" -else - data_path="./data" -fi - -if [ ! -d "$data_path" ]; then - mkdir -p "$data_path" -fi - -cifar_data_path="$data_path/cifar10.zip" -if [ ! -f "$cifar_data_path" ]; then - wget http://data.mxnet.io/mxnet/data/cifar10.zip -P $data_path - cd $data_path - unzip -u cifar10.zip -fi diff --git a/contrib/clojure-package/examples/scripts/get_mnist_data.sh b/contrib/clojure-package/examples/scripts/get_mnist_data.sh deleted file mode 100755 index 703ece207a1f..000000000000 --- a/contrib/clojure-package/examples/scripts/get_mnist_data.sh +++ /dev/null @@ -1,38 +0,0 @@ -#!/bin/bash - -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - - -set -evx - -if [ ! -z "$MXNET_HOME" ]; then - data_path="$MXNET_HOME" -else - data_path="./data" -fi - -if [ ! -d "$data_path" ]; then - mkdir -p "$data_path" -fi - -mnist_data_path="$data_path/mnist.zip" -if [ ! -f "$mnist_data_path" ]; then - wget http://data.mxnet.io/mxnet/data/mnist.zip -P $data_path - cd $data_path - unzip -u mnist.zip -fi diff --git a/contrib/clojure-package/examples/tutorial/.gitignore b/contrib/clojure-package/examples/tutorial/.gitignore deleted file mode 100644 index 338927e78384..000000000000 --- a/contrib/clojure-package/examples/tutorial/.gitignore +++ /dev/null @@ -1,12 +0,0 @@ -/target -/classes -/checkouts -pom.xml -pom.xml.asc -*.jar -*.class -/.lein-* -/.nrepl-port -.hgignore -.hg/ -filename \ No newline at end of file diff --git a/contrib/clojure-package/examples/tutorial/README.md b/contrib/clojure-package/examples/tutorial/README.md deleted file mode 100644 index 27a9f54155ae..000000000000 --- a/contrib/clojure-package/examples/tutorial/README.md +++ /dev/null @@ -1,31 +0,0 @@ - - - - - - - - - - - - - - - - - -# tutorial - - -## Installation - -Before you run this example, make sure that you have the clojure package installed. -In the main clojure package directory, do `lein install`. Then you can run -`lein install` in this directory. - -## Usage - -Tutorials are based on the Scala api examples here https://mxnet.apache.org/api/scala/ndarray.html - -Start with ndarray then move onto symbol and module diff --git a/contrib/clojure-package/examples/tutorial/project.clj b/contrib/clojure-package/examples/tutorial/project.clj deleted file mode 100644 index 743df5e62e7b..000000000000 --- a/contrib/clojure-package/examples/tutorial/project.clj +++ /dev/null @@ -1,27 +0,0 @@ -;; -;; Licensed to the Apache Software Foundation (ASF) under one or more -;; contributor license agreements. See the NOTICE file distributed with -;; this work for additional information regarding copyright ownership. -;; The ASF licenses this file to You under the Apache License, Version 2.0 -;; (the "License"); you may not use this file except in compliance with -;; the License. You may obtain a copy of the License at -;; -;; http://www.apache.org/licenses/LICENSE-2.0 -;; -;; Unless required by applicable law or agreed to in writing, software -;; distributed under the License is distributed on an "AS IS" BASIS, -;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -;; See the License for the specific language governing permissions and -;; limitations under the License. -;; - -(defproject tutorial "0.1.0-SNAPSHOT" - :description "MXNET tutorials" - :plugins [[lein-cljfmt "0.5.7"]] - :dependencies [[org.clojure/clojure "1.9.0"] - [org.apache.mxnet.contrib.clojure/clojure-mxnet "2.0.0-SNAPSHOT"] - - ;; Uncomment the one appropriate for your machine & configuration: - #_[org.apache.mxnet.contrib.clojure/clojure-mxnet-linux-cpu "1.4.0"] - #_[org.apache.mxnet.contrib.clojure/clojure-mxnet-linux-gpu "1.4.0"] - #_[org.apache.mxnet.contrib.clojure/clojure-mxnet-osx-cpu "1.4.0"]]) diff --git a/contrib/clojure-package/examples/tutorial/src/tutorial/introduction.clj b/contrib/clojure-package/examples/tutorial/src/tutorial/introduction.clj deleted file mode 100644 index 9b2bf7533572..000000000000 --- a/contrib/clojure-package/examples/tutorial/src/tutorial/introduction.clj +++ /dev/null @@ -1,34 +0,0 @@ -;; -;; Licensed to the Apache Software Foundation (ASF) under one or more -;; contributor license agreements. See the NOTICE file distributed with -;; this work for additional information regarding copyright ownership. -;; The ASF licenses this file to You under the Apache License, Version 2.0 -;; (the "License"); you may not use this file except in compliance with -;; the License. You may obtain a copy of the License at -;; -;; http://www.apache.org/licenses/LICENSE-2.0 -;; -;; Unless required by applicable law or agreed to in writing, software -;; distributed under the License is distributed on an "AS IS" BASIS, -;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -;; See the License for the specific language governing permissions and -;; limitations under the License. -;; - -(ns tutorial.introduction - (:require [org.apache.clojure-mxnet.ndarray :as ndarray])) - -;; MXNet supports the Clojure programming language. The MXNet Clojure package brings flexible and efficient GPU computing and state-of-art deep learning to Clojure. It enables you to write seamless tensor/matrix computation with multiple GPUs in Clojure. It also lets you construct and customize the state-of-art deep learning models in Clojure, and apply them to tasks, such as image classification and data science challenges. - -;; You can perform tensor or matrix computation in pure Clojure: - -(def arr (ndarray/ones [2 3])) - -arr ;=> #object[org.apache.mxnet.NDArray 0x597d72e "org.apache.mxnet.NDArray@e35c3ba9"] - -(ndarray/shape-vec arr) ;=> [2 3] - -(-> (ndarray/* arr 2) - (ndarray/->vec)) ;=> [2.0 2.0 2.0 2.0 2.0 2.0] - -(ndarray/shape-vec (ndarray/* arr 2)) ;=> [2 3] diff --git a/contrib/clojure-package/examples/tutorial/src/tutorial/kvstore.clj b/contrib/clojure-package/examples/tutorial/src/tutorial/kvstore.clj deleted file mode 100644 index b0795f7d256c..000000000000 --- a/contrib/clojure-package/examples/tutorial/src/tutorial/kvstore.clj +++ /dev/null @@ -1,94 +0,0 @@ -;; -;; Licensed to the Apache Software Foundation (ASF) under one or more -;; contributor license agreements. See the NOTICE file distributed with -;; this work for additional information regarding copyright ownership. -;; The ASF licenses this file to You under the Apache License, Version 2.0 -;; (the "License"); you may not use this file except in compliance with -;; the License. You may obtain a copy of the License at -;; -;; http://www.apache.org/licenses/LICENSE-2.0 -;; -;; Unless required by applicable law or agreed to in writing, software -;; distributed under the License is distributed on an "AS IS" BASIS, -;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -;; See the License for the specific language governing permissions and -;; limitations under the License. -;; - -(ns tutorial.kvstore - "A REPL tutorial of the MXNet Clojure API for KVStore, based on - https://mxnet.apache.org/api/clojure/kvstore.html" - (:require [org.apache.clojure-mxnet.kvstore :as kvstore] - [org.apache.clojure-mxnet.ndarray :as ndarray] - [org.apache.clojure-mxnet.context :as context])) - - -;;;; Basic Push and Pull - -;; Provides basic operation over multiple devices (GPUs or CPUs) on a -;; single device. - -;;; Initialization -;; Let’s consider a simple example. It initializes a (`int`, -;; `NDArray`) pair into the store, and then pulls the value out. - -(def kv (kvstore/create "local")) ; create a local kvstore -(def shape [2 3]) -;; init the kvstore with a vector of keys (strings) and ndarrays -(kvstore/init kv ["3"] [(ndarray/* (ndarray/ones shape) 2)]) -(def a (ndarray/zeros shape)) -(kvstore/pull kv ["3"] [a]) -(ndarray/->vec a) ;=> [2.0 2.0 2.0 2.0 2.0 2.0] - - -;;; Push, Aggregation, and Updater -;; For any key that’s been initialized, you can push a new value with -;; the same shape to the key, as follows: -(kvstore/push kv ["3"] [(ndarray/* (ndarray/ones shape) 8)]) -(kvstore/pull kv ["3"] [a]) -(ndarray/->vec a);=>[8.0 8.0 8.0 8.0 8.0 8.0] - -;; The data that you want to push can be stored on any -;; device. Furthermore, you can push multiple values into the same -;; key, where KVStore first sums all of these values, and then pushes -;; the aggregated value, as follows: - -;; (Here we use multiple CPUs.) -(def cpus [(context/cpu 0) (context/cpu 1) (context/cpu 2)]) -(def b [(ndarray/ones shape {:ctx (nth cpus 0)}) - (ndarray/ones shape {:ctx (nth cpus 1)}) - (ndarray/ones shape {:ctx (nth cpus 2)})]) -(kvstore/push kv ["3" "3" "3"] b) -(kvstore/pull kv "3" a) -(ndarray/->vec a) ;=> [3.0 3.0 3.0 3.0 3.0 3.0] - -;;; Pull -;; You’ve already seen how to pull a single key-value pair. Similar to -;; the way that you use the push command, you can pull the value into -;; several devices with a single call. -(def b [(ndarray/ones shape {:ctx (context/cpu 0)}) - (ndarray/ones shape {:ctx (context/cpu 1)})]) -(kvstore/pull kv ["3" "3"] b) -(map ndarray/->vec b) ;=> ([3.0 3.0 3.0 3.0 3.0 3.0] [3.0 3.0 3.0 3.0 3.0 3.0]) - - -;;;; List Key-Value Pairs - -;; All of the operations that we’ve discussed so far are performed on -;; a single key. KVStore also provides the interface for generating a -;; list of key-value pairs. For a single device, use the following: - -(def ks ["5" "7" "9"]) -(kvstore/init kv ks [(ndarray/ones shape) - (ndarray/ones shape) - (ndarray/ones shape)]) -(kvstore/push kv ks [(ndarray/ones shape) - (ndarray/ones shape) - (ndarray/ones shape)]) -(def b [(ndarray/zeros shape) - (ndarray/zeros shape) - (ndarray/zeros shape)]) -(kvstore/pull kv ks b) -(map ndarray/->vec b) ;=> ([1.0 1.0 1.0 1.0 1.0 1.0] [1.0 1.0 1.0 1.0 1.0 1.0] [1.0 1.0 1.0 1.0 1.0 1.0]) - - diff --git a/contrib/clojure-package/examples/tutorial/src/tutorial/ndarray.clj b/contrib/clojure-package/examples/tutorial/src/tutorial/ndarray.clj deleted file mode 100644 index cf9de98e775c..000000000000 --- a/contrib/clojure-package/examples/tutorial/src/tutorial/ndarray.clj +++ /dev/null @@ -1,123 +0,0 @@ -;; -;; Licensed to the Apache Software Foundation (ASF) under one or more -;; contributor license agreements. See the NOTICE file distributed with -;; this work for additional information regarding copyright ownership. -;; The ASF licenses this file to You under the Apache License, Version 2.0 -;; (the "License"); you may not use this file except in compliance with -;; the License. You may obtain a copy of the License at -;; -;; http://www.apache.org/licenses/LICENSE-2.0 -;; -;; Unless required by applicable law or agreed to in writing, software -;; distributed under the License is distributed on an "AS IS" BASIS, -;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -;; See the License for the specific language governing permissions and -;; limitations under the License. -;; - -(ns tutorial.ndarray - "A REPL tutorial of the MXNet Clojure API for NDArray, based on - https://mxnet.apache.org/api/clojure/ndarray.html" - (:require [org.apache.clojure-mxnet.ndarray :as ndarray] - [org.apache.clojure-mxnet.context :as context])) - -;; The NDArray API contains tensor operations similar to -;; `numpy.ndarray`. The syntax is also similar, except for some -;; additional calls for dealing with I/O and multiple devices. - - -;;;; Create NDArray - -;; Create an MXNet NDArray as follows: -(def a (ndarray/zeros [100 50])) ; all-zero array of dimension 100 x 50 -(def b (ndarray/ones [256 32 128 1])) ; all-one array of given dimensions -(def c (ndarray/array [1 2 3 4 5 6] [2 3])) ; array with given contents in shape 2 x 3 - -;;; There are also ways to convert an NDArray to a vec or to get the -;;; shape as an object or vec: -(ndarray/->vec c) ;=> [1.0 2.0 3.0 4.0 5.0 6.0] -(ndarray/shape c) ;=> #object[org.apache.mxnet.Shape 0x583c865 "(2,3)"] -(ndarray/shape-vec c) ;=> [2 3] - - - -;; There are some basic NDArray operations, like arithmetic and slice -;; operations. - - -;;;; NDArray Operations: Arithmetic - -(def a (ndarray/ones [1 5])) -(def b (ndarray/ones [1 5])) -(ndarray/->vec (ndarray/+ a b)) ;=> [2.0 2.0 2.0 2.0 2.0] - -;; original ndarrays are unchanged -(ndarray/->vec a) ;=> [1.0 1.0 1.0 1.0 1.0] -(ndarray/->vec b) ;=> [1.0 1.0 1.0 1.0 1.0] - -;; inplace operators -(ndarray/+= a b) -(ndarray/->vec a) ;=> [2.0 2.0 2.0 2.0 2.0] - -;; Other arthimetic operations are similar. - - -;;;; NDArray Operations: Slice - -(def a (ndarray/array [1 2 3 4 5 6] [3 2])) -(def a1 (ndarray/slice a 1)) -(ndarray/shape-vec a1) ;=> [1 2] -(ndarray/->vec a1) ;=> [3.0 4.0] - -(def a2 (ndarray/slice a 1 3)) -(ndarray/shape-vec a2) ;=>[2 2] -(ndarray/->vec a2) ;=> [3.0 4.0 5.0 6.0] - - -;;;; NDArray Operations: Dot Product - -(def arr1 (ndarray/array [1 2] [1 2])) -(def arr2 (ndarray/array [3 4] [2 1])) -(def res (ndarray/dot arr1 arr2)) -(ndarray/shape-vec res) ;=> [1 1] -(ndarray/->vec res) ;=> [11.0] - - -;;;; Save and Load NDArray - -;; You can use MXNet functions to save and load a map of NDArrays from -;; file systems, as follows: - -(ndarray/save "filename" {"arr1" arr1 "arr2" arr2}) -;; (you can also do "s3://path" or "hdfs") - -;; (ndarray/save "/Users/daveliepmann/src/coursework/mxnet-clj-tutorials/abc" -;; {"arr1" arr1 "arr2" arr2}) - -;; To load: -(def from-file (ndarray/load "filename")) - -from-file ;=>{"arr1" #object[org.apache.mxnet.NDArray 0x6115ba61 "org.apache.mxnet.NDArray@43d85753"], "arr2" #object[org.apache.mxnet.NDArray 0x374b5eff "org.apache.mxnet.NDArray@5c93def4"]} - -;; The good thing about using the `save` and `load` interface is that -;; you can use the format across all `mxnet` language bindings. They -;; also already support Amazon S3 and HDFS. - - -;;;; Multi-Device Support - -;; Device information is stored in the `mxnet.Context` structure. When -;; creating NDArray in MXNet, you can use the context argument (the -;; default is the CPU context) to create arrays on specific devices as -;; follows: - -(def cpu-a (ndarray/zeros [100 200])) -(ndarray/context cpu-a) ;=> #object[org.apache.mxnet.Context 0x3f376123 "cpu(0)"] - -(comment - (def gpu-b (ndarray/zeros [100 200] {:ctx (context/gpu 0)})) ;; to use with gpu -) - -;; Currently, we do not allow operations among arrays from different -;; contexts. To manually enable this, use the `copy-to` function to -;; copy the content to different devices, and continue computation. diff --git a/contrib/clojure-package/examples/tutorial/src/tutorial/symbol.clj b/contrib/clojure-package/examples/tutorial/src/tutorial/symbol.clj deleted file mode 100644 index 0dc45dc095ef..000000000000 --- a/contrib/clojure-package/examples/tutorial/src/tutorial/symbol.clj +++ /dev/null @@ -1,106 +0,0 @@ -;; -;; Licensed to the Apache Software Foundation (ASF) under one or more -;; contributor license agreements. See the NOTICE file distributed with -;; this work for additional information regarding copyright ownership. -;; The ASF licenses this file to You under the Apache License, Version 2.0 -;; (the "License"); you may not use this file except in compliance with -;; the License. You may obtain a copy of the License at -;; -;; http://www.apache.org/licenses/LICENSE-2.0 -;; -;; Unless required by applicable law or agreed to in writing, software -;; distributed under the License is distributed on an "AS IS" BASIS, -;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -;; See the License for the specific language governing permissions and -;; limitations under the License. -;; - -(ns tutorial.symbol - "A REPL tutorial of the MXNet Clojure Symbolic API, based on - https://mxnet.apache.org/api/clojure/symbol.html" - (:require [org.apache.clojure-mxnet.executor :as executor] - [org.apache.clojure-mxnet.ndarray :as ndarray] - [org.apache.clojure-mxnet.symbol :as sym] - [org.apache.clojure-mxnet.context :as context])) - - -;;;; How to Compose Symbols - -;; The symbolic API provides a way to configure computation -;; graphs. You can configure the graphs either at the level of neural -;; network layer operations or as fine-grained operations. - -;; The basic arithmetic operators (plus, minus, div, multiplication) -;; work as expected. The following example creates a computation graph -;; that adds two inputs together. - -(def a (sym/variable "a")) -(def b (sym/variable "b")) -(def c (sym/+ a b)) - - -;;;; More Complicated Compositions - -;; MXNet provides well-optimized symbols for layers commonly used in -;; deep learning (see src/operator). We can also define new operators -;; in Python. The following example first performs an element-wise add -;; between two symbols, then feeds them to the fully connected -;; operator: - -(def lhs (sym/variable "data1")) -(def rhs (sym/variable "data2")) -(def net (sym/fully-connected "fc1" {:data (sym/+ lhs rhs) - :num-hidden 128})) -(sym/list-arguments net) ;=> ["data1" "data2" "fc1_weight" "fc1_bias"] - - -;;;; Group Multiple Symbols - -;; To construct neural networks with multiple loss layers, we can use -;; `group` to group multiple symbols together. - -;;;; Serialization - -;; You can use the `save` and `load` functions to serialize Symbol -;; objects as JSON. These functions have the advantage of being -;; language-agnostic and cloud-friendly. You can also get a JSON -;; string directly using `to-json`. - -;; The following example shows how to save a symbol to a file, load it -;; back, and compare two symbols using a JSON string. You can also -;; save to S3 as well. - -(def a (sym/variable "a")) -(def b (sym/variable "b")) -(def c (sym/+ a b)) -(sym/save c "symbol-c.json") -(def c2 (sym/load "symbol-c.json")) -(= (sym/to-json c) (sym/to-json c2)) ;=>true - - -;;;; Executing Symbols - -;; To execute symbols, first we need to define the data that they -;; should run on. We can do this with the `bind` function, which -;; returns an executor. We then use `forward` to evaluate and -;; `outputs` to get the results. - -(def a (sym/variable "a")) -(def b (sym/variable "b")) -(def c (sym/+ a b)) - -(def ex - (sym/bind c {"a" (ndarray/ones [2 2]) - "b" (ndarray/ones [2 2])})) - -(-> (executor/forward ex) - (executor/outputs) - (first) - (ndarray/->vec));=> [2.0 2.0 2.0 2.0] - -(comment - ;; We can evaluate the same symbol on GPU with different data. - ;; (To do this you must have the correct native library jar defined as a dependency.) - (def ex (sym/bind c (context/gpu 0) {"a" (ndarray/ones [2 2]) - "b" (ndarray/ones [2 2])})) -) diff --git a/contrib/clojure-package/integration-tests.sh b/contrib/clojure-package/integration-tests.sh deleted file mode 100755 index 39cea6536c83..000000000000 --- a/contrib/clojure-package/integration-tests.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/bash -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -set -evx - -MXNET_HOME=${PWD} -cd ${MXNET_HOME}/contrib/clojure-package -# first build the package and install it -lein install - -# then run through the examples -EXAMPLES_HOME=${MXNET_HOME}/contrib/clojure-package/examples -# use AWK pattern for blacklisting -TEST_CASES=`find ${EXAMPLES_HOME} -name test | awk '!/dontselect1|cnn-text-classification|gan|neural-style|pre-trained-models|profiler/'` -for i in $TEST_CASES ; do - cd ${i} && lein test -done diff --git a/contrib/clojure-package/lein-cljfmt-check b/contrib/clojure-package/lein-cljfmt-check deleted file mode 100755 index 93f2d3ca822d..000000000000 --- a/contrib/clojure-package/lein-cljfmt-check +++ /dev/null @@ -1,24 +0,0 @@ -#!/usr/bin/env sh - -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -set -vx - -lein cljfmt check `find ./src/org/apache/clojure_mxnet -depth 1 -type f -iname "*.clj" | grep -v /gen/` -lein cljfmt check `find ./test -type f -iname "*.clj" | grep -v /test/good-test` -lein cljfmt check `find ./examples -type f -iname "*.clj" | grep -v /scripts/` diff --git a/contrib/clojure-package/lein-cljfmt-fix b/contrib/clojure-package/lein-cljfmt-fix deleted file mode 100755 index 8c35892d3beb..000000000000 --- a/contrib/clojure-package/lein-cljfmt-fix +++ /dev/null @@ -1,24 +0,0 @@ -#!/usr/bin/env sh - -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -set -evx - -lein cljfmt fix `find ./src/org/apache/clojure_mxnet -type f -iname "*.clj" | grep -v /gen/` -lein cljfmt fix `find ./test -type f -iname "*.clj" | grep -v /test/good-test` -lein cljfmt fix `find ./examples -type f -iname "*.clj" | grep -v /scripts/` diff --git a/contrib/clojure-package/project.clj b/contrib/clojure-package/project.clj deleted file mode 100644 index 0b0cf0b5a91d..000000000000 --- a/contrib/clojure-package/project.clj +++ /dev/null @@ -1,47 +0,0 @@ -;; -;; Licensed to the Apache Software Foundation (ASF) under one or more -;; contributor license agreements. See the NOTICE file distributed with -;; this work for additional information regarding copyright ownership. -;; The ASF licenses this file to You under the Apache License, Version 2.0 -;; (the "License"); you may not use this file except in compliance with -;; the License. You may obtain a copy of the License at -;; -;; http://www.apache.org/licenses/LICENSE-2.0 -;; -;; Unless required by applicable law or agreed to in writing, software -;; distributed under the License is distributed on an "AS IS" BASIS, -;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -;; See the License for the specific language governing permissions and -;; limitations under the License. -;; - -(defproject org.apache.mxnet.contrib.clojure/clojure-mxnet "2.0.0-SNAPSHOT" - :description "Clojure package for MXNet" - :url "https://github.com/apache/incubator-mxnet" - :license {:name "Apache License" - :url "http://www.apache.org/licenses/LICENSE-2.0"} - :dependencies [[org.clojure/clojure "1.9.0"] - [t6/from-scala "0.3.0"] - - ;; To use with nightly snapshot - ;[org.apache.mxnet/mxnet-full_2.11-osx-x86_64-cpu ""] - ;[org.apache.mxnet/mxnet-full_2.11-linux-x86_64-cpu ""] - ;[org.apache.mxnet/mxnet-full_2.11-linux-x86_64-gpu " string - (clojure.string/replace #"(\s+)([A-Z][a-z]+)" "$1-$2") - (clojure.string/replace #"([A-Z]+)([A-Z][a-z]+)" "$1-$2") - (clojure.string/replace #"([a-z0-9])([A-Z])" "$1-$2") - (clojure.string/lower-case) - (clojure.string/replace #"\_" "-") - (clojure.string/replace #"\/" "div"))) - -(defn transform-param-names [coerce-fn parameter-types] - (->> parameter-types - (map str) - (map (fn [x] (or (coerce-fn x) x))) - (map (fn [x] (last (clojure.string/split x #"\.")))))) - -(defn symbol-transform-param-name [parameter-types] - (transform-param-names util/symbol-param-coerce parameter-types)) - -(defn ndarray-transform-param-name [parameter-types] - (transform-param-names util/ndarray-param-coerce parameter-types)) - -(defn has-variadic? [params] - (->> params - (map str) - (filter (fn [s] (re-find #"\&" s))) - count - pos?)) - -(defn increment-param-name [pname] - (if-let [num-str (re-find #"-\d" pname)] - (str - (first (clojure.string/split pname #"-")) - "-" - (inc (Integer/parseInt (last (clojure.string/split num-str #"-"))))) - (str pname "-" 1))) - -(defn rename-duplicate-params [pnames] - (->> (reduce - (fn [pname-counts n] - (let [rn (if (pname-counts n) (str n "-" (pname-counts n)) n) - inc-pname-counts (update-in pname-counts [n] (fnil inc 0))] - (update-in inc-pname-counts [:params] conj rn))) - {:params []} - pnames) - :params)) - -(defn get-public-no-default-methods [obj] - (->> (r/reflect obj) - :members - (map #(into {} %)) - (filter #(-> % :flags :public)) - (remove #(re-find #"org\$apache\$mxnet" (str (:name %)))) - (remove #(re-find #"\$default" (str (:name %)))))) - -(defn get-public-to-gen-methods [public-to-hand-gen public-no-default] - (let [public-to-hand-gen-names - (into #{} (mapv (comp str :name) public-to-hand-gen))] - (remove #(-> % :name str public-to-hand-gen-names) public-no-default))) - -(defn public-by-name-and-param-count [public-reflect-info] - (->> public-reflect-info - (group-by :name) - (map (fn [[k v]] [k (group-by #(count (:parameter-types %)) v)])) - (into {}))) - -(def license - (str - ";; Licensed to the Apache Software Foundation (ASF) under one or more\n" - ";; contributor license agreements. See the NOTICE file distributed with\n" - ";; this work for additional information regarding copyright ownership.\n" - ";; The ASF licenses this file to You under the Apache License, Version 2.0\n" - ";; (the \"License\"); you may not use this file except in compliance with\n" - ";; the License. You may obtain a copy of the License at\n" - ";;\n" - ";; http://www.apache.org/licenses/LICENSE-2.0\n" - ";;\n" - ";; Unless required by applicable law or agreed to in writing, software\n" - ";; distributed under the License is distributed on an \"AS IS\" BASIS,\n" - ";; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n" - ";; See the License for the specific language governing permissions and\n" - ";; limitations under the License.\n" - ";;\n")) - -(defn write-to-file [functions ns-gen fname] - (with-open [w (clojure.java.io/writer fname)] - (.write w ns-gen) - (.write w "\n\n") - (.write w ";; Do not edit - this is auto-generated") - (.write w "\n\n") - (.write w license) - (.write w "\n\n") - (.write w "\n\n") - (doseq [f functions] - (let [fstr (-> f - clojure.pprint/pprint - with-out-str - (clojure.string/replace #"\\n\\n" "\n"))] - (.write w fstr)) - (.write w "\n")))) - -(defn remove-prefix - [prefix s] - (let [regex (re-pattern (str prefix "(.*)")) - replacement "$1"] - (clojure.string/replace s regex replacement))) - -(defn in-namespace-random? [op-name] - (or (clojure.string/includes? op-name "random_") - (clojure.string/includes? op-name "sample_"))) - -(defn op-name->namespace-type [op-name] - (cond - (#{"uniform" "normal"} op-name) :deprecated - (clojure.string/includes? op-name "random_") :random - (clojure.string/includes? op-name "sample_") :random - :else :core)) - -;;;;;;; Common operations - -(def libinfo (Base/_LIB)) - -(def op-names - (let [l ($ ListBuffer/empty)] - (.mxListAllOpNames libinfo l) - (->> l - (util/buffer->vec) - (remove #(or (= "Custom" %) (re-matches #"^_.*" %)))))) - -(defn- parse-arg-type [s] - (let [[_ var-arg-type _ set-arg-type arg-spec _ type-req _ default-val] (re-find #"(([\w-\[\]\s]+)|\{([^}]+)\})\s*(\([^)]+\))?(,\s*(optional|required)(,\s*default=(.*))?)?" s)] - {:type (clojure.string/trim (or set-arg-type var-arg-type)) - :spec arg-spec - :optional? (or (= "optional" type-req) - (= "boolean" var-arg-type)) - :default default-val - :orig s})) - -(defn- get-op-handle [op-name] - (let [ref (new Base$RefLong 0)] - (do (.nnGetOpHandle libinfo op-name ref) - (.value ref)))) - -(defn gen-op-info [op-name] - (let [handle (get-op-handle op-name) - name (new Base$RefString nil) - desc (new Base$RefString nil) - key-var-num-args (new Base$RefString nil) - num-args (new Base$RefInt 0) - arg-names ($ ListBuffer/empty) - arg-types ($ ListBuffer/empty) - arg-descs ($ ListBuffer/empty)] - (do (.mxSymbolGetAtomicSymbolInfo libinfo - handle - name - desc - num-args - arg-names - arg-types - arg-descs - key-var-num-args) - {:fn-name (clojure-case (.value name)) - :fn-description (.value desc) - :args (mapv (fn [t n d] (assoc t :name n :description d)) - (mapv parse-arg-type (util/buffer->vec arg-types)) - (mapv clojure-case (util/buffer->vec arg-names)) - (util/buffer->vec arg-descs)) - :key-var-num-args (clojure-case (.value key-var-num-args))}))) - -;;;;;;; Symbol - -(def symbol-public-no-default - (get-public-no-default-methods Symbol)) - -(into #{} (mapcat :parameter-types symbol-public-no-default)) -;; #{java.lang.Object scala.collection.Seq scala.Option long double scala.collection.immutable.Map int ml.dmlc.mxnet.Executor float ml.dmlc.mxnet.Context java.lang.String scala.Enumeration$Value ml.dmlc.mxnet.Symbol int<> ml.dmlc.mxnet.Symbol<> ml.dmlc.mxnet.Shape java.lang.String<>} - -(def symbol-hand-gen-set - #{"scala.Option" - "scala.Enumeration$Value" - "org.apache.mxnet.Context" - "scala.Tuple2" - "scala.collection.Traversable"}) - -;;; min and max have a conflicting arity of 2 with the auto gen signatures -(def symbol-filter-name-set #{"max" "min"}) - -(defn is-symbol-hand-gen? [info] - (or - (->> (:name info) - str - (get symbol-filter-name-set)) - (->> (map str (:parameter-types info)) - (into #{}) - (clojure.set/intersection symbol-hand-gen-set) - count - pos?))) - -(def symbol-public-to-hand-gen - (filter is-symbol-hand-gen? symbol-public-no-default)) -(def symbol-public-to-gen - (get-public-to-gen-methods symbol-public-to-hand-gen - symbol-public-no-default)) - - -(count symbol-public-to-hand-gen) ;=> 35 mostly bind! -(count symbol-public-to-gen) ;=> 307 - -(into #{} (map :name symbol-public-to-hand-gen)) -;;=> #{arange bind ones zeros simpleBind Variable} - - - -(defn symbol-vector-args [] - `(if (map? ~'kwargs-map-or-vec-or-sym) - (~'util/empty-list) - (~'util/coerce-param ~'kwargs-map-or-vec-or-sym #{"scala.collection.Seq"}))) - -(defn symbol-map-args [] - `(if (map? ~'kwargs-map-or-vec-or-sym) - (util/convert-symbol-map ~'kwargs-map-or-vec-or-sym) - nil)) - - -(defn add-symbol-arities [params function-name] - (if (= ["sym-name" "kwargs-map" "symbol-list" "kwargs-map-1"] - (mapv str params)) - [`([~'sym-name ~'attr-map ~'kwargs-map] - (~function-name ~'sym-name (~'util/convert-symbol-map ~'attr-map) (~'util/empty-list) (~'util/convert-symbol-map ~'kwargs-map))) - `([~'sym-name ~'kwargs-map-or-vec-or-sym] - (~function-name ~'sym-name nil ~(symbol-vector-args) ~(symbol-map-args))) - `([~'kwargs-map-or-vec-or-sym] - (~function-name nil nil ~(symbol-vector-args) ~(symbol-map-args)))])) - -(defn gen-symbol-function-arity [op-name op-values function-name] - (mapcat - (fn [[param-count info]] - (let [targets (->> (mapv :parameter-types info) - (apply interleave) - (mapv str) - (partition (count info)) - (mapv set)) - pnames (->> (mapv :parameter-types info) - (mapv symbol-transform-param-name) - (apply interleave) - (partition (count info)) - (mapv #(clojure.string/join "-or-" %)) - (rename-duplicate-params) - (mapv symbol)) - coerced-params (mapv (fn [p t] `(~'util/nil-or-coerce-param ~(symbol (clojure.string/replace p #"\& " "")) ~t)) pnames targets) - params (if (= #{:public :static} (:flags (first info))) - pnames - (into ['sym] pnames)) - function-body (if (= #{:public :static} (:flags (first info))) - `(~'util/coerce-return (~(symbol (str "Symbol/" op-name)) ~@coerced-params)) - `(~'util/coerce-return (~(symbol (str "." op-name)) ~'sym ~@coerced-params) - ))] - (when (not (and (> param-count 1) (has-variadic? params))) - `[( - ~params - ~function-body - ) - ~@(add-symbol-arities params function-name)]))) - op-values)) - - -(def all-symbol-functions - (for [operation (sort (public-by-name-and-param-count symbol-public-to-gen))] - (let [[op-name op-values] operation - function-name (-> op-name - str - scala/decode-scala-symbol - clojure-case - symbol)] - `(~'defn ~function-name - ~@(remove nil? (gen-symbol-function-arity op-name op-values function-name)))))) - -(def symbol-gen-ns "(ns org.apache.clojure-mxnet.symbol - (:refer-clojure :exclude [* - + > >= < <= / cast concat identity flatten load max - min repeat reverse set sort take to-array empty sin - get apply shuffle ref]) - (:require [org.apache.clojure-mxnet.util :as util]) - (:import (org.apache.mxnet Symbol)))") - - -(defn generate-symbol-file [] - (println "Generating symbol file") - (write-to-file all-symbol-functions - symbol-gen-ns - "src/org/apache/clojure_mxnet/gen/symbol.clj")) - -;;;;;;; NDArray - - -(def ndarray-public-no-default - (get-public-no-default-methods NDArray)) - -(def ndarray-hand-gen-set - #{"org.apache.mxnet.NDArrayFuncReturn" - "org.apache.mxnet.Context" - "scala.Enumeration$Value" - "scala.Tuple2" - "scala.collection.Traversable"}) - -(defn is-ndarray-hand-gen? [info] - (->> (map str (:parameter-types info)) - (into #{}) - (clojure.set/intersection ndarray-hand-gen-set) - count - pos?)) - -(def ndarray-public-to-hand-gen - (filter is-ndarray-hand-gen? ndarray-public-no-default)) -(def ndarray-public-to-gen - (get-public-to-gen-methods ndarray-public-to-hand-gen - ndarray-public-no-default)) - -(count ndarray-public-to-hand-gen) ;=> 15 -(count ndarray-public-to-gen) ;=> 486 - -(->> ndarray-public-to-hand-gen (map :name) (into #{})) - -(defn gen-ndarray-function-arity [op-name op-values] - (for [[param-count info] op-values] - (let [targets (->> (mapv :parameter-types info) - (apply interleave) - (mapv str) - (partition (count info)) - (mapv set)) - pnames (->> (mapv :parameter-types info) - (mapv ndarray-transform-param-name) - (apply interleave) - (partition (count info)) - (mapv #(clojure.string/join "-or-" %)) - (rename-duplicate-params) - (mapv symbol)) - coerced-params (mapv (fn [p t] `(~'util/coerce-param ~(symbol (clojure.string/replace p #"\& " "")) ~t)) pnames targets) - params (if (= #{:public :static} (:flags (first info))) - pnames - (into ['ndarray] pnames)) - function-body (if (= #{:public :static} (:flags (first info))) - `(~'util/coerce-return (~(symbol (str "NDArray/" op-name)) ~@coerced-params)) - `(~'util/coerce-return (~(symbol (str "." op-name)) ~'ndarray ~@coerced-params) - ))] - (when (not (and (> param-count 1) (has-variadic? params))) - `( - ~params - ~function-body - ))))) - - -(defn gen-ndarray-functions [public-to-gen-methods] - (for [operation (sort (public-by-name-and-param-count public-to-gen-methods))] - (let [[op-name op-values] operation - function-name (-> op-name - str - scala/decode-scala-symbol - clojure-case - symbol)] - `(~'defn ~function-name - ~@(remove nil? (gen-ndarray-function-arity op-name op-values)))))) - -(def all-ndarray-functions - (gen-ndarray-functions ndarray-public-to-gen)) - -(def ndarray-gen-ns - "(ns org.apache.clojure-mxnet.ndarray - (:refer-clojure :exclude [* - + > >= < <= / cast concat flatten identity load max - min repeat reverse set sort take to-array empty shuffle - ref]) - (:import (org.apache.mxnet NDArray Shape)))") - - -(defn generate-ndarray-file [] - (println "Generating ndarray file") - (write-to-file all-ndarray-functions - ndarray-gen-ns - "src/org/apache/clojure_mxnet/gen/ndarray.clj")) - -;;;;;;; SymbolAPI - -(defn fn-name->random-fn-name - [fn-name] - (cond - (clojure.string/starts-with? fn-name "-random-") - (remove-prefix "-random-" fn-name) - - (clojure.string/starts-with? fn-name "-sample-") - (str (remove-prefix "-sample-" fn-name) "-like") - - :else fn-name)) - -(defn symbol-api-coerce-param - [{:keys [name sym type optional?]}] - (let [coerced-param (case type - "Shape" `(when ~sym (~'mx-shape/->shape ~sym)) - "NDArray-or-Symbol[]" `(~'clojure.core/into-array ~sym) - "Map[String, String]" - `(when ~sym - (->> ~sym - (mapv (fn [[~'k ~'v]] [~'k (str ~'v)])) - (into {}) - ~'util/convert-map)) - sym) - nil-param-allowed? (#{"name" "attr"} name)] - (if (and optional? (not nil-param-allowed?)) - `(~'util/->option ~coerced-param) - coerced-param))) - -(defn gen-symbol-api-doc [fn-description params] - (let [param-descriptions (mapv (fn [{:keys [name description optional?]}] - (str "`" name "`: " - description - (when optional? " (optional)") - "\n")) - params)] - (str fn-description "\n\n" - (apply str param-descriptions)))) - -(defn gen-symbol-api-default-arity [op-name params] - (let [opt-params (filter :optional? params) - coerced-params (mapv symbol-api-coerce-param params) - default-args (array-map :keys (mapv :sym params) - :or (into {} - (mapv (fn [{:keys [sym]}] [sym nil]) - opt-params)) - :as 'opts)] - `([~default-args] - (~'util/coerce-return - (~(symbol (str "SymbolAPI/" op-name)) - ~@coerced-params))))) - -(defn symbol-api-gen-ns - [random-namespace?] - (str - "(ns\n" - " ^{:doc \"Experimental\"}\n" - (if random-namespace? - " org.apache.clojure-mxnet.symbol-random-api\n" - " org.apache.clojure-mxnet.symbol-api\n") - " (:refer-clojure :exclude [* - + > >= < <= / cast concat identity flatten load max\n" - " min repeat reverse set sort take to-array empty sin\n" - " get apply shuffle ref])\n" - " (:require [org.apache.clojure-mxnet.util :as util]\n" - " [org.apache.clojure-mxnet.shape :as mx-shape])\n" - " (:import (org.apache.mxnet SymbolAPI)))")) - -(defn make-gen-symbol-api-function - [{:keys [fn-name->fn-name] :or {fn-name->fn-name identity}}] - (fn [op-name] - (let [{:keys [fn-name fn-description args]} - (-> op-name (gen-op-info) (update :fn-name fn-name->fn-name)) - params (mapv (fn [{:keys [name type optional?] :as opts}] - (assoc opts - :sym (symbol name) - :optional? (or optional? - (= "NDArray-or-Symbol" type)))) - (conj args - {:name "name" - :type "String" - :optional? true - :description "Name of the symbol"} - {:name "attr" - :type "Map[String, String]" - :optional? true - :description "Attributes of the symbol"})) - doc (clojure.string/join - "\n\n " - (-> (gen-symbol-api-doc fn-description params) - (clojure.string/split #"\n"))) - default-call (gen-symbol-api-default-arity op-name params)] - `(~'defn ~(symbol fn-name) - ~doc - ~@default-call)))) - -(def gen-symbol-api-function - (make-gen-symbol-api-function {})) - -(def gen-symbol-random-api-function - (make-gen-symbol-api-function {:fn-name->fn-name fn-name->random-fn-name})) - -(defn all-symbol-api-functions [op-names] - (->> op-names - (filter #(= :core (op-name->namespace-type %))) - (mapv gen-symbol-api-function))) - -(count (all-symbol-api-functions op-names)) ;215 - -(defn all-symbol-random-api-functions [op-names] - (->> op-names - (filter #(= :random (op-name->namespace-type %))) - (mapv gen-symbol-random-api-function))) - -(count (all-symbol-random-api-functions op-names)) ;16 - -(defn generate-symbol-api-file [op-names] - (println "Generating symbol-api file") - (write-to-file (all-symbol-api-functions op-names) - (symbol-api-gen-ns false) - "src/org/apache/clojure_mxnet/gen/symbol_api.clj")) - -(defn generate-symbol-random-api-file [op-names] - (println "Generating symbol-random-api file") - (write-to-file (all-symbol-random-api-functions op-names) - (symbol-api-gen-ns true) - "src/org/apache/clojure_mxnet/gen/symbol_random_api.clj")) - -;;;;;;; NDArrayAPI - -(defn ndarray-api-coerce-param - [{:keys [sym type optional?]}] - (let [coerced-param (case type - "Shape" `(when ~sym (~'mx-shape/->shape ~sym)) - "NDArray-or-Symbol[]" `(~'clojure.core/into-array ~sym) - sym)] - (if optional? - `(~'util/->option ~coerced-param) - coerced-param))) - -(defn gen-ndarray-api-doc [fn-description params] - (let [param-descriptions (mapv (fn [{:keys [name description optional?]}] - (str "`" name "`: " - description - (when optional? " (optional)") - "\n")) - params)] - (str fn-description "\n\n" - (apply str param-descriptions)))) - -(defn gen-ndarray-api-default-arity [op-name params] - (let [opt-params (filter :optional? params) - coerced-params (mapv ndarray-api-coerce-param params) - default-args (array-map :keys (mapv :sym params) - :or (into {} - (mapv (fn [{:keys [sym]}] [sym nil]) - opt-params)) - :as 'opts)] - `([~default-args] - (~'util/coerce-return - (~(symbol (str "NDArrayAPI/" op-name)) - ~@coerced-params))))) - -(defn gen-ndarray-api-required-arity [fn-name req-params] - (let [req-args (->> req-params - (mapv (fn [{:keys [sym]}] [(keyword sym) sym])) - (into {}))] - `(~(mapv :sym req-params) - (~(symbol fn-name) ~req-args)))) - -(defn make-gen-ndarray-api-function - [{:keys [fn-name->fn-name] :or {fn-name->fn-name identity}}] - (fn [op-name] - (let [{:keys [fn-name fn-description args]} - (-> op-name (gen-op-info) (update :fn-name fn-name->fn-name)) - params (mapv (fn [{:keys [name] :as opts}] - (assoc opts :sym (symbol name))) - (conj args {:name "out" - :type "NDArray-or-Symbol" - :optional? true - :description "Output array."})) - doc (clojure.string/join - "\n\n " - (-> (gen-ndarray-api-doc fn-description params) - (clojure.string/split #"\n"))) - opt-params (filter :optional? params) - req-params (remove :optional? params) - req-call (gen-ndarray-api-required-arity fn-name req-params) - default-call (gen-ndarray-api-default-arity op-name params)] - (if (= 1 (count req-params)) - `(~'defn ~(symbol fn-name) - ~doc - ~@default-call) - `(~'defn ~(symbol fn-name) - ~doc - ~req-call - ~default-call))))) - -(def gen-ndarray-api-function - (make-gen-ndarray-api-function {})) - -(def gen-ndarray-random-api-function - (make-gen-ndarray-api-function {:fn-name->fn-name fn-name->random-fn-name})) - -(defn all-ndarray-api-functions [op-names] - (->> op-names - (filter #(= :core (op-name->namespace-type %))) - (mapv gen-ndarray-api-function))) - -(count (all-ndarray-api-functions op-names)) ; 213 - -(defn all-ndarray-random-api-functions [op-names] - (->> op-names - (filter #(= :random (op-name->namespace-type %))) - (mapv gen-ndarray-random-api-function))) - -(count (all-ndarray-random-api-functions op-names)) ;16 - -(defn ndarray-api-gen-ns [random-namespace?] - (str - "(ns\n" - " ^{:doc \"Experimental\"}\n" - (if random-namespace? - " org.apache.clojure-mxnet.ndarray-random-api\n" - " org.apache.clojure-mxnet.ndarray-api\n") - " (:refer-clojure :exclude [* - + > >= < <= / cast concat flatten identity load max\n" - " min repeat reverse set sort take to-array empty shuffle\n" - " ref])\n" - " (:require [org.apache.clojure-mxnet.shape :as mx-shape]\n" - " [org.apache.clojure-mxnet.util :as util])\n" - " (:import (org.apache.mxnet NDArrayAPI)))")) - -(defn generate-ndarray-api-file [op-names] - (println "Generating ndarray-api file") - (write-to-file (all-ndarray-api-functions op-names) - (ndarray-api-gen-ns false) - "src/org/apache/clojure_mxnet/gen/ndarray_api.clj")) - -(defn generate-ndarray-random-api-file [op-names] - (println "Generating ndarray-random-api file") - (write-to-file (all-ndarray-random-api-functions op-names) - (ndarray-api-gen-ns true) - "src/org/apache/clojure_mxnet/gen/ndarray_random_api.clj")) - - -;;; autogen the files -(do - (generate-ndarray-file) - - ;; NDArrayAPI - (generate-ndarray-api-file op-names) - (generate-ndarray-random-api-file op-names) - - (generate-symbol-file) - - ;; SymbolAPI - (generate-symbol-api-file op-names) - (generate-symbol-random-api-file op-names)) - - -(comment - - (gen-op-info "ElementWiseSum") - - (gen-ndarray-api-function "Activation") - - (gen-symbol-api-function "Activation") - - (gen-ndarray-random-api-function "random_randint") - - (gen-ndarray-random-api-function "sample_normal") - - (gen-symbol-random-api-function "random_poisson") - - ;; This generates a file with the bulk of the nd-array functions - (generate-ndarray-file) - - ;; This generates a file with the bulk of the symbol functions - (generate-symbol-file)) diff --git a/contrib/clojure-package/src/org/apache/clojure_mxnet/base.clj b/contrib/clojure-package/src/org/apache/clojure_mxnet/base.clj deleted file mode 100644 index 41ef821cd63b..000000000000 --- a/contrib/clojure-package/src/org/apache/clojure_mxnet/base.clj +++ /dev/null @@ -1,21 +0,0 @@ -;; -;; Licensed to the Apache Software Foundation (ASF) under one or more -;; contributor license agreements. See the NOTICE file distributed with -;; this work for additional information regarding copyright ownership. -;; The ASF licenses this file to You under the Apache License, Version 2.0 -;; (the "License"); you may not use this file except in compliance with -;; the License. You may obtain a copy of the License at -;; -;; http://www.apache.org/licenses/LICENSE-2.0 -;; -;; Unless required by applicable law or agreed to in writing, software -;; distributed under the License is distributed on an "AS IS" BASIS, -;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -;; See the License for the specific language governing permissions and -;; limitations under the License. -;; - -(ns org.apache.clojure-mxnet.base - (:import (org.apache.mxnet Base))) - -(def MX_REAL_TYPE (Base/MX_REAL_TYPE)) diff --git a/contrib/clojure-package/src/org/apache/clojure_mxnet/callback.clj b/contrib/clojure-package/src/org/apache/clojure_mxnet/callback.clj deleted file mode 100644 index 3809c73c5fd1..000000000000 --- a/contrib/clojure-package/src/org/apache/clojure_mxnet/callback.clj +++ /dev/null @@ -1,38 +0,0 @@ -;; -;; Licensed to the Apache Software Foundation (ASF) under one or more -;; contributor license agreements. See the NOTICE file distributed with -;; this work for additional information regarding copyright ownership. -;; The ASF licenses this file to You under the Apache License, Version 2.0 -;; (the "License"); you may not use this file except in compliance with -;; the License. You may obtain a copy of the License at -;; -;; http://www.apache.org/licenses/LICENSE-2.0 -;; -;; Unless required by applicable law or agreed to in writing, software -;; distributed under the License is distributed on an "AS IS" BASIS, -;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -;; See the License for the specific language governing permissions and -;; limitations under the License. -;; - -(ns org.apache.clojure-mxnet.callback - (:require [org.apache.clojure-mxnet.eval-metric :as em]) - (:import (org.apache.mxnet Callback$Speedometer))) - -;;; used to track status during epoch - -(defn speedometer - ([batch-size frequent] - (proxy [Callback$Speedometer] [(int batch-size) (int frequent)] - (invoke [epoch batch-count eval-metric] - (proxy-super invoke epoch batch-count eval-metric) - ;;; so that it prints to repl as well - (when (and (zero? (mod batch-count frequent)) - (pos? batch-count)) - (println "Speedometer: epoch " epoch " count " batch-count " metric " (em/get eval-metric )))))) - ([batch-size] - (speedometer batch-size 50))) - -(defn invoke [callback epoch nbatch metric] - (doto callback - (.invoke (int epoch) (int nbatch) metric))) diff --git a/contrib/clojure-package/src/org/apache/clojure_mxnet/context.clj b/contrib/clojure-package/src/org/apache/clojure_mxnet/context.clj deleted file mode 100644 index 126838cea4c2..000000000000 --- a/contrib/clojure-package/src/org/apache/clojure_mxnet/context.clj +++ /dev/null @@ -1,42 +0,0 @@ -;; -;; Licensed to the Apache Software Foundation (ASF) under one or more -;; contributor license agreements. See the NOTICE file distributed with -;; this work for additional information regarding copyright ownership. -;; The ASF licenses this file to You under the Apache License, Version 2.0 -;; (the "License"); you may not use this file except in compliance with -;; the License. You may obtain a copy of the License at -;; -;; http://www.apache.org/licenses/LICENSE-2.0 -;; -;; Unless required by applicable law or agreed to in writing, software -;; distributed under the License is distributed on an "AS IS" BASIS, -;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -;; See the License for the specific language governing permissions and -;; limitations under the License. -;; - -(ns org.apache.clojure-mxnet.context - (:import (org.apache.mxnet Context))) - -(defn cpu - ([device-id] - (new Context "cpu" device-id)) - ([] - (cpu 0))) - -(defn gpu - ([device-id] - (new Context "gpu" device-id)) - ([] - (gpu 0))) - -(defn cpu-context [] - (cpu)) - -(defn default-context [] (cpu-context)) - -(defn device-type [context] - (.deviceType context)) - -(defn device-id [context] - (.deviceId context)) diff --git a/contrib/clojure-package/src/org/apache/clojure_mxnet/dtype.clj b/contrib/clojure-package/src/org/apache/clojure_mxnet/dtype.clj deleted file mode 100644 index 1d96c2247ec6..000000000000 --- a/contrib/clojure-package/src/org/apache/clojure_mxnet/dtype.clj +++ /dev/null @@ -1,26 +0,0 @@ -;; -;; Licensed to the Apache Software Foundation (ASF) under one or more -;; contributor license agreements. See the NOTICE file distributed with -;; this work for additional information regarding copyright ownership. -;; The ASF licenses this file to You under the Apache License, Version 2.0 -;; (the "License"); you may not use this file except in compliance with -;; the License. You may obtain a copy of the License at -;; -;; http://www.apache.org/licenses/LICENSE-2.0 -;; -;; Unless required by applicable law or agreed to in writing, software -;; distributed under the License is distributed on an "AS IS" BASIS, -;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -;; See the License for the specific language governing permissions and -;; limitations under the License. -;; - -(ns org.apache.clojure-mxnet.dtype - (:import (org.apache.mxnet DType))) - -(def UINT8 (DType/UInt8)) -(def INT32 (DType/Int32)) -(def FLOAT16 (DType/Float16)) -(def FLOAT32 (DType/Float32)) -(def FLOAT64 (DType/Float64)) - diff --git a/contrib/clojure-package/src/org/apache/clojure_mxnet/eval_metric.clj b/contrib/clojure-package/src/org/apache/clojure_mxnet/eval_metric.clj deleted file mode 100644 index f1fe2d18bd35..000000000000 --- a/contrib/clojure-package/src/org/apache/clojure_mxnet/eval_metric.clj +++ /dev/null @@ -1,110 +0,0 @@ -;; -;; Licensed to the Apache Software Foundation (ASF) under one or more -;; contributor license agreements. See the NOTICE file distributed with -;; this work for additional information regarding copyright ownership. -;; The ASF licenses this file to You under the Apache License, Version 2.0 -;; (the "License"); you may not use this file except in compliance with -;; the License. You may obtain a copy of the License at -;; -;; http://www.apache.org/licenses/LICENSE-2.0 -;; -;; Unless required by applicable law or agreed to in writing, software -;; distributed under the License is distributed on an "AS IS" BASIS, -;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -;; See the License for the specific language governing permissions and -;; limitations under the License. -;; - -(ns org.apache.clojure-mxnet.eval-metric - (:refer-clojure :exclude [get update]) - (:require [org.apache.clojure-mxnet.util :as util]) - (:import (org.apache.mxnet Accuracy TopKAccuracy F1 Perplexity MAE MSE RMSE CustomMetric CompositeEvalMetric))) - -(defn accuracy - "Basic Accuracy Metric" - [] - (new Accuracy)) - -(defn top-k-accuracy - "Calculate to k predications accuracy - - top-k number of predicts (int)" - [top-k] - (new TopKAccuracy (int top-k))) - -(defn f1 - "Calculate the F1 score of a binary classification problem." - [] - (new F1)) - -(defn perplexity - "Calculate perplexity - - opts - :ignore-label Index of invalid label to ignore when counting. Usually should be -1. Include - all entries if None. - :axis The axis from prediction that was used to - compute softmax. Default is -1 which means use the last axis." - ([{:keys [ignore-label axis] :as opts - :or {axis -1}}] - (new Perplexity - (if ignore-label (util/->option (int ignore-label)) (util/->option nil)) - (int axis))) - ([] - (perplexity {}))) - -(defn mae - "Calculate Mean Absolute Error loss" - [] - (new MAE)) - -(defn mse - "Calculate Mean Squared Error loss" - [] - (new MSE)) - -(defn rmse - "Calculate Root Mean Squred Error loss" - [] - (new RMSE)) - -(defmacro custom-metric - "Custom evaluation metric that takes a NDArray function. - - f-eval Customized evaluation function that takes two ndarrays and returns a number - function must be in the form of (fn [] ) clojure style - - mname The name of the metric" - [f-eval mname] - `(new CustomMetric (util/scala-fn ~f-eval) ~mname)) - -(defn comp-metric - "Create a metric instance composed out of several metrics" - [metrics] - (let [cm (CompositeEvalMetric.)] - (doseq [m metrics] (.add cm m)) - cm)) - -(defn get - "Get the values of the metric in as a map of {name value} pairs" - [metric] - (let [m (apply zipmap (-> (.get metric) - util/tuple->vec))] - (if-not (instance? CompositeEvalMetric metric) - (first m) - m))) - -(defn reset - "clear the internal statistics to an initial state" - [metric] - (doto metric - (.reset))) - -(defn update - "Update the internal evaluation" - [metric labels preds] - (doto metric - (.update (util/vec->indexed-seq labels) (util/vec->indexed-seq preds)))) - -(defn get-and-reset - "Get the values and then reset the metric" - [metric] - (let [v (get metric)] - (reset metric) - v)) diff --git a/contrib/clojure-package/src/org/apache/clojure_mxnet/executor.clj b/contrib/clojure-package/src/org/apache/clojure_mxnet/executor.clj deleted file mode 100644 index b9883f77d560..000000000000 --- a/contrib/clojure-package/src/org/apache/clojure_mxnet/executor.clj +++ /dev/null @@ -1,102 +0,0 @@ -;; -;; Licensed to the Apache Software Foundation (ASF) under one or more -;; contributor license agreements. See the NOTICE file distributed with -;; this work for additional information regarding copyright ownership. -;; The ASF licenses this file to You under the Apache License, Version 2.0 -;; (the "License"); you may not use this file except in compliance with -;; the License. You may obtain a copy of the License at -;; -;; http://www.apache.org/licenses/LICENSE-2.0 -;; -;; Unless required by applicable law or agreed to in writing, software -;; distributed under the License is distributed on an "AS IS" BASIS, -;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -;; See the License for the specific language governing permissions and -;; limitations under the License. -;; - -(ns org.apache.clojure-mxnet.executor - (:require [org.apache.clojure-mxnet.util :as util] - [clojure.reflect :as r] - [org.apache.clojure-mxnet.ndarray :as ndarray] - [org.apache.clojure-mxnet.shape :as mx-shape])) - -;; need to revisit to get all functions - -(defn ->vec [nd-array] - (vec (.toArray nd-array))) - -(defn forward - "* Calculate the outputs specified by the binded symbol. - * @param is-train whether this forward is for evaluation purpose. - * @param kwargs Additional specification of input arguments." - ([executor] - (do (.forward executor) - executor)) - ([executor is-train kwargs] - (do (.forward executor is-train (util/map->scala-tuple-seq kwargs)) - executor))) - -(defn backward - "* Do backward pass to get the gradient of arguments. - * @param ndarray-or-vec Gradient on the outputs to be propagated back. - * This parameter is only needed when bind is called - * on outputs that are not a loss function." - ([executor] - (do (.backward executor) - executor)) - ([executor ndarray-or-vec] - (do (.backward executor (if (vector? ndarray-or-vec) (into-array ndarray-or-vec) ndarray-or-vec)) - executor))) - -(defn outputs [executor] - "list all the output ndarrays" - (.outputs executor)) - -(defn grad-arrays [executor] - "list all the gradient ndarrays" - (.gradArrays executor)) - -(defn arg-arrays [executor] - "list all the argument ndarrays" - (.argArrays executor)) - -(defn grad-map [executor] - (util/scala-map->map (.gradDict executor))) - -(defn arg-map [executor] - (util/scala-map->map (.argDict executor))) - -(defn set-arg [executor arg-name arg-val-or-vec] - (-> executor - (arg-map) - (get arg-name) - (ndarray/set arg-val-or-vec))) - -(defn set-arg-arrays [executor vec-of-ndarray-or-val] - (doall (map (fn [arg-array v] (ndarray/set arg-array v)) (vec (arg-arrays executor)) vec-of-ndarray-or-val))) - -(defn get-grad [executor grad-name] - (-> executor - (grad-map) - (get grad-name))) - -(defn reshape - " * Return a new executor with the same symbol and shared memory, - * but different input/output shapes. - * For runtime reshaping, variable length sequences, etc. - * The returned executor shares state with the current one, - * and cannot be used in parallel with it. - * @param kwargs Map of string to shape-vec. - * - new shape for arguments. - * @parms opts with :partial-shaping Whether to allow changing the shape of unspecified arguments. - * and :allow-up-sizing Whether to allow allocating new ndarrays that's larger than the original." - ([executor kwargs {:keys [partial-shaping allow-up-sizing] - :or {partial-shaping false allow-up-sizing false}}] - (do - (let [kwargs-shapes (zipmap (keys kwargs) - (mapv (fn [v] (if (vector? v) (mx-shape/->shape v) v)) (vals kwargs)))] - (.reshape executor partial-shaping allow-up-sizing (util/convert-map kwargs-shapes))) - executor)) - ([executor kwargs] - (reshape executor kwargs {}))) diff --git a/contrib/clojure-package/src/org/apache/clojure_mxnet/gen/.gitignore b/contrib/clojure-package/src/org/apache/clojure_mxnet/gen/.gitignore deleted file mode 100644 index 76bedaeabbaf..000000000000 --- a/contrib/clojure-package/src/org/apache/clojure_mxnet/gen/.gitignore +++ /dev/null @@ -1,5 +0,0 @@ -# Ignore everything in this directory -* -# Except this file -!.gitignore - diff --git a/contrib/clojure-package/src/org/apache/clojure_mxnet/image.clj b/contrib/clojure-package/src/org/apache/clojure_mxnet/image.clj deleted file mode 100644 index 68dcbfec5850..000000000000 --- a/contrib/clojure-package/src/org/apache/clojure_mxnet/image.clj +++ /dev/null @@ -1,369 +0,0 @@ -;; -;; Licensed to the Apache Software Foundation (ASF) under one or more -;; contributor license agreements. See the NOTICE file distributed with -;; this work for additional information regarding copyright ownership. -;; The ASF licenses this file to You under the Apache License, Version 2.0 -;; (the "License"); you may not use this file except in compliance with -;; the License. You may obtain a copy of the License at -;; -;; http://www.apache.org/licenses/LICENSE-2.0 -;; -;; Unless required by applicable law or agreed to in writing, software -;; distributed under the License is distributed on an "AS IS" BASIS, -;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -;; See the License for the specific language governing permissions and -;; limitations under the License. -;; - -(ns org.apache.clojure-mxnet.image - "Image API of Clojure package." - (:refer-clojure :exclude [read]) - (:require [t6.from-scala.core :refer [$ $$] :as $] - [org.apache.clojure-mxnet.dtype :as dtype] - [org.apache.clojure-mxnet.ndarray :as ndarray] - [org.apache.clojure-mxnet.util :as util] - [clojure.spec.alpha :as s]) - (:import (org.apache.mxnet Image NDArray) - (java.awt.image BufferedImage) - (java.io InputStream))) - -;; Flags for conversion of images -(def GRAYSCALE 0) -(def COLOR 1) - -(s/def ::input-stream #(instance? InputStream %)) -(s/def ::color-flag #{GRAYSCALE COLOR}) -(s/def ::to-rgb boolean?) -(s/def ::ndarray #(instance? NDArray %)) -(s/def ::output (s/or :empty nil? :ndarray ::ndarray)) -(s/def ::decode-image-opts - (s/keys :opt-un [::color-flag ::to-rgb ::output])) - -(defn ^:deprecated decode-image - "DEPRECATED: use `decode` instead. - - Decodes an image from an input stream with OpenCV - `input-stream`: `InputStream` - Contains the binary encoded image - `color-flag`: 0 or 1 - Convert decoded image to grayscale (0) or color (1) - `to-rgb`: boolean - Whether to convert decoded image to mxnet's default RGB - format (instead of opencv's default BGR) - `output`: nil or `NDArray` - returns: `NDArray` with dtype uint8 - - Ex: - (decode-image input-stream) - (decode-image input-stream {:color-flag 1}) - (decode-image input-stream {:color-flag 0 :output nd})" - ([input-stream {:keys [color-flag to-rgb output] - :or {color-flag COLOR to-rgb true output nil} - :as opts}] - (util/validate! ::input-stream input-stream "Invalid input stream") - (util/validate! ::decode-image-opts opts "Invalid options for decoding") - (Image/imDecode input-stream color-flag to-rgb ($/option output))) - ([input-stream] - (decode-image input-stream {}))) - -(s/def ::color #{:grayscale :color}) -(s/def ::decode-image-opts-2 (s/keys :opt-un [::color ::to-rgb ::output])) - -(defn- color->int [color] - (case color - :grayscale 0 - :color 1)) - -(defn decode - "Decodes an image from an input stream with OpenCV. - `input-stream`: `InputStream` - Contains the binary encoded image - `color`: keyword in `#{:color :grayscale}` - Convert decoded image to - grayscale or color - `to-rgb`: boolean - Whether to convert decoded image to mxnet's default RGB - format (instead of opencv's default BGR) - `output`: nil or `NDArray` - returns: `NDArray` with dtype uint8 - - Ex: - (decode input-stream) - (decode input-stream {:color :color}) - (decode input-stream {:color :grayscale :output nd})" - ([input-stream {:keys [color to-rgb output] - :or {color :color to-rgb true output nil} - :as opts}] - (util/validate! ::input-stream input-stream "Invalid input stream") - (util/validate! ::decode-image-opts-2 opts "Invalid options for decoding") - (Image/imDecode input-stream (color->int color) to-rgb ($/option output))) - ([input-stream] - (decode input-stream {}))) - -(s/def ::filename string?) -(s/def ::optional-color-flag - (s/or :none nil? :some ::color-flag)) -(s/def ::optional-to-rgb - (s/or :none nil? :some ::to-rgb)) - -(defn ^:deprecated read-image - "DEPRECATED: use `read` instead. - - Reads an image file and returns an ndarray with OpenCV. It returns image in - RGB by default instead of OpenCV's default BGR. - `filename`: string - Name of the image file to be loaded - `color-flag`: 0 or 1 - Convert decoded image to grayscale (0) or color (1) - `to-rgb`: boolean - Whether to convert decoded image to mxnet's default RGB - format (instead of opencv's default BGR) - `output`: nil or `NDArray` - returns: `NDArray` with dtype uint8 - - Ex: - (read-image \"cat.jpg\") - (read-image \"cat.jpg\" {:color-flag 0}) - (read-image \"cat.jpg\" {:color-flag 1 :output nd})" - ([filename {:keys [color-flag to-rgb output] - :or {color-flag nil to-rgb nil output nil} - :as opts}] - (util/validate! ::filename filename "Invalid filename") - (util/validate! ::optional-color-flag color-flag "Invalid color flag") - (util/validate! ::optional-to-rgb to-rgb "Invalid conversion flag") - (util/validate! ::output output "Invalid output") - (Image/imRead - filename - ($/option color-flag) - ($/option to-rgb) - ($/option output))) - ([filename] - (read-image filename {}))) - -(defn read - "Reads an image file and returns an ndarray with OpenCV. It returns image in - RGB by default instead of OpenCV's default BGR. - `filename`: string - Name of the image file to be loaded - `color`: keyword in `#{:color :grayscale}` - Convert decoded image to - grayscale or color - `to-rgb`: boolean - Whether to convert decoded image to mxnet's default RGB - format (instead of opencv's default BGR) - `output`: nil or `NDArray` - returns: `NDArray` with dtype uint8 - - Ex: - (read \"cat.jpg\") - (read \"cat.jpg\" {:color :grayscale}) - (read \"cat.jpg\" {:color :color :output nd})" - ([filename {:keys [color to-rgb output] - :or {color :color to-rgb nil output nil} - :as opts}] - (util/validate! ::filename filename "Invalid filename") - (util/validate! ::color color "Invalid color") - (util/validate! ::optional-to-rgb to-rgb "Invalid conversion flag") - (util/validate! ::output output "Invalid output") - (Image/imRead - filename - ($/option (when color (color->int color))) - ($/option to-rgb) - ($/option output))) - ([filename] - (read filename {}))) - -(s/def ::int int?) -(s/def ::optional-int (s/or :none nil? :some int?)) - -(defn ^:deprecated resize-image - "DEPRECATED: use `resize` instead. - - Resizes the image array to (width, height) - `input`: `NDArray` - source image in NDArray - `w`: int - Width of resized image - `h`: int - Height of resized image - `interpolation`: Interpolation method. Default is INTER_LINEAR - `ouput`: nil or `NDArray` - returns: `NDArray` - - Ex: - (resize-image nd-img 300 300) - (resize-image nd-img 28 28 {:output nd})" - ([input w h {:keys [interpolation output] - :or {interpolation nil output nil} - :as opts}] - (util/validate! ::ndarray input "Invalid input array") - (util/validate! ::int w "Invalid width") - (util/validate! ::int h "Invalid height") - (util/validate! ::optional-int interpolation "Invalid interpolation") - (util/validate! ::output output "Invalid output") - (Image/imResize input w h ($/option interpolation) ($/option output))) - ([input w h] - (resize-image input w h {}))) - -(defn resize - "Resizes the image array to (width, height) - `input`: `NDArray` - source image in NDArray - `w`: int - Width of resized image - `h`: int - Height of resized image - `interpolation`: Interpolation method. Default is INTER_LINEAR - `ouput`: nil or `NDArray` - returns: `NDArray` - - Ex: - (resize nd-img 300 300) - (resize nd-img 28 28 {:output nd})" - ([input w h {:keys [interpolation output] - :or {interpolation nil output nil} - :as opts}] - (util/validate! ::ndarray input "Invalid input array") - (util/validate! ::int w "Invalid width") - (util/validate! ::int h "Invalid height") - (util/validate! ::optional-int interpolation "Invalid interpolation") - (util/validate! ::output output "Invalid output") - (Image/imResize input w h ($/option interpolation) ($/option output))) - ([input w h] - (resize input w h {}))) - -(defn apply-border - "Pad image border with OpenCV. - `input`: `NDArray` - source image in NDArray - `top`: int - Top margin - `bottom`: int - Bottom margin - `left`: int - Left margin - `right`: int - Right margin - `fill-type`: nil or Filling type - Default BORDER_CONSTANT - `value`: nil or double - Deprecated, use `values` instead - `values`: Fill with value(RGB or gray), up to 4 channels - `output`: nil or `NDArray` - returns: `NDArray` - - Ex: - (apply-border img-nd 1 1 1 1) - (apply-border img-nd 3 3 0 0)" - ([input top bottom left right - {:keys [fill-type value values output] - :or {fill-type nil value nil values nil output nil} - :as opts}] - (util/validate! ::ndarray input "Invalid input array") - (util/validate! ::int top "Invalid top margin") - (util/validate! ::int bottom "Invalid bottom margin") - (util/validate! ::int left "Invalid left margin") - (util/validate! ::int right "Invalid right margin") - (util/validate! ::optional-int fill-type "Invalid fill type") - (util/validate! ::output output "Invalid output") - (Image/copyMakeBorder input top bottom left right - ($/option fill-type) - ($/option value) - ($/option values) - ($/option output))) - ([input top bottom left right] - (apply-border input top bottom left right {}))) - -(defn fixed-crop - "Return a fixed crop of the image. - `input`: `NDArray` - Source image in NDArray - `x0`: int - Starting x point - `y0`: int - Starting y point - `w`: int - Width of the image - `h`: int - Height of the image - returns: cropped `NDArray` - - Ex: - (fixed-crop nd-img 0 0 28 28) - (fixed-crop nd-img 10 0 100 300)" - [input x0 y0 w h] - (util/validate! ::ndarray input "Invalid input array") - (util/validate! ::int x0 "Invalid starting x coordinate") - (util/validate! ::int y0 "Invalid starting y coordinate") - (util/validate! ::int w "Invalid width") - (util/validate! ::int h "Invalid height") - (Image/fixedCrop input x0 y0 w h)) - -(defn rgb-array? - "Returns whether the ndarray is in the RGB format - `input`: `NDArray` - Source image in NDArray - returns: boolean" - [input] - (util/validate! ::ndarray input "Invalid input array") - (let [shape (ndarray/shape-vec input)] - (and - (= 3 (count shape)) - (= 3 (shape 2))))) - -(s/def ::all-bytes #(= dtype/UINT8 (ndarray/dtype %))) -(s/def ::rgb-array rgb-array?) -(s/def ::to-image-ndarray - (s/and ::ndarray ::all-bytes ::rgb-array)) - -(defn ^:deprecated to-image - "DEPRECATED: user `ndarray->image` instead. - - Convert a NDArray image in RGB format to a real image. - `input`: `NDArray` - Source image in NDArray - returns: `BufferedImage`" - [input] - (util/validate! ::to-image-ndarray input "Invalid input array") - (Image/toImage input)) - -(defn ndarray->image - "Convert a NDArray image in RGB format to a real image. - `input`: `NDArray` - Source image in NDArray - returns: `BufferedImage`" - [input] - (util/validate! ::to-image-ndarray input "Invalid input array") - (Image/toImage input)) - -(s/def ::buffered-image #(instance? BufferedImage %)) -(s/def ::x-min number?) -(s/def ::x-max number?) -(s/def ::y-min number?) -(s/def ::y-max number?) -(s/def ::coordinate (s/keys :req-un [::x-min ::x-max ::y-min ::y-max])) -(s/def ::coordinates (s/coll-of ::coordinate)) -(s/def ::names (s/nilable (s/coll-of string?))) -(s/def ::stroke (s/and integer? pos?)) -(s/def ::font-size-mult (s/and float? pos?)) -(s/def ::transparency (s/and float? #(<= 0.0 % 1.0))) -(s/def ::coordinates-names - (fn [[coordinates names]] (= (count coordinates) (count names)))) - -(defn- convert-coordinate - "Convert bounding box coordinate to Scala correct types." - [{:keys [x-min x-max y-min y-max]}] - {:xmin (int x-min) - :xmax (int x-max) - :ymin (int y-min) - :ymax (int y-max)}) - -(defn draw-bounding-box! - "Draw bounding boxes on `buffered-image` and Mutate the input image. - `buffered-image`: BufferedImage - `coordinates`: collection of {:xmin int :xmax int :ymin int :ymax int} - `font-size-mult`: positive float - Font size multiplier - `names`: collection of strings - List of names for the bounding boxes - `stroke`: positive integer - thickness of the bounding box - `transparency`: float in (0.0, 1.0) - Transparency of the bounding box - returns: Modified `buffered-image` - Ex: - (draw-bounding-box! img [{:x-min 0 :x-max 100 :y-min 0 :y-max 100}]) - (draw-bounding-box! [{:x-min 190 :x-max 850 :y-min 50 :y-max 450} - {:x-min 200 :x-max 350 :y-min 440 :y-max 530}] - {:stroke 2 - :names [\"pug\" \"cookie\"] - :transparency 0.8 - :font-size-mult 2.0})" - ([buffered-image coordinates] - (draw-bounding-box! buffered-image coordinates {})) - ([buffered-image coordinates - {:keys [names stroke font-size-mult transparency] - :or {stroke 3 font-size-mult 1.0 transparency 1.0} - :as opts}] - (util/validate! ::buffered-image buffered-image "Invalid input image") - (util/validate! ::coordinates coordinates "Invalid input coordinates") - (util/validate! ::names names "Invalid input names") - (util/validate! ::stroke stroke "Invalid input stroke") - (util/validate! ::font-size-mult font-size-mult "Invalid input font-size-mult") - (util/validate! ::transparency transparency "Invalid input transparency") - (when (pos? (count names)) - (util/validate! ::coordinates-names [coordinates names] "Invalid number of names")) - (Image/drawBoundingBox - buffered-image - (->> coordinates - (map convert-coordinate) - (map util/convert-map) - (into-array)) - (util/->option (into-array names)) - (util/->option (int stroke)) - (util/->option (float font-size-mult)) - (util/->option (float transparency))) - buffered-image)) diff --git a/contrib/clojure-package/src/org/apache/clojure_mxnet/infer.clj b/contrib/clojure-package/src/org/apache/clojure_mxnet/infer.clj deleted file mode 100644 index 09edf15b4288..000000000000 --- a/contrib/clojure-package/src/org/apache/clojure_mxnet/infer.clj +++ /dev/null @@ -1,372 +0,0 @@ -;; -;; Licensed to the Apache Software Foundation (ASF) under one or more -;; contributor license agreements. See the NOTICE file distributed with -;; this work for additional information regarding copyright ownership. -;; The ASF licenses this file to You under the Apache License, Version 2.0 -;; (the "License"); you may not use this file except in compliance with -;; the License. You may obtain a copy of the License at -;; -;; http://www.apache.org/licenses/LICENSE-2.0 -;; -;; Unless required by applicable law or agreed to in writing, software -;; distributed under the License is distributed on an "AS IS" BASIS, -;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -;; See the License for the specific language governing permissions and -;; limitations under the License. -;; - -(ns org.apache.clojure-mxnet.infer - (:refer-clojure :exclude [type]) - (:require [org.apache.clojure-mxnet.context :as context] - [org.apache.clojure-mxnet.dtype :as dtype] - [org.apache.clojure-mxnet.io :as mx-io] - [org.apache.clojure-mxnet.shape :as shape] - [org.apache.clojure-mxnet.util :as util] - [clojure.spec.alpha :as s] - [org.apache.clojure-mxnet.shape :as mx-shape]) - (:import (java.awt.image BufferedImage) - (org.apache.mxnet NDArray) - (org.apache.mxnet.infer Classifier ImageClassifier - ObjectDetector Predictor))) - -(s/def ::predictor #(instance? Predictor %)) -(s/def ::classifier #(instance? Classifier %)) -(s/def ::image-classifier #(instance? ImageClassifier %)) -(s/def ::object-detector #(instance? ObjectDetector %)) - -(defrecord WrappedPredictor [predictor]) -(defrecord WrappedClassifier [classifier]) -(defrecord WrappedImageClassifier [image-classifier]) -(defrecord WrappedObjectDetector [object-detector]) - -(s/def ::ndarray #(instance? NDArray %)) -(s/def ::number-array (s/coll-of number? :kind vector?)) -(s/def ::vvec-of-numbers (s/coll-of ::number-array :kind vector?)) -(s/def ::vec-of-ndarrays (s/coll-of ::ndarray :kind vector?)) -(s/def ::image #(instance? BufferedImage %)) -(s/def ::batch-images (s/coll-of ::image :kind vector?)) - -(s/def ::wrapped-predictor (s/keys :req-un [::predictor])) -(s/def ::wrapped-classifier (s/keys :req-un [::classifier])) -(s/def ::wrapped-image-classifier (s/keys :req-un [::image-classifier])) -(s/def ::wrapped-detector (s/keys :req-un [::object-detector])) - -(defn- format-detection-predictions [predictions] - (mapv (fn [[c p]] - (let [[prob xmin ymin xmax ymax] (mapv float p)] - {:class c :prob prob :x-min xmin :y-min ymin :x-max xmax :y-max ymax})) - predictions)) - -(defn- format-classification-predictions [predictions] - (mapv (fn [[c p]] {:class c :prob p}) predictions)) - -(defprotocol APredictor - (predict [wrapped-predictor inputs]) - (predict-with-ndarray [wrapped-predictor input-arrays])) - -(defprotocol AClassifier - (classify - [wrapped-classifier inputs] - [wrapped-classifier inputs topk]) - (classify-with-ndarray - [wrapped-classifier inputs] - [wrapped-classifier inputs topk])) - -(defprotocol AImageClassifier - (classify-image - [wrapped-image-classifier image] - [wrapped-image-classifier image topk] - [wrapped-image-classifier image topk dtype]) - (classify-image-batch - [wrapped-image-classifier images] - [wrapped-image-classifier images topk] - [wrapped-image-classifier images topk dtype])) - -(defprotocol AObjectDetector - (detect-objects - [wrapped-detector image] - [wrapped-detector image topk]) - (detect-objects-batch - [wrapped-detector images] - [wrapped-detector images topk]) - (detect-objects-with-ndarrays - [wrapped-detector input-arrays] - [wrapped-detector input-arrays topk])) - -(extend-protocol APredictor - WrappedPredictor - (predict - [wrapped-predictor inputs] - (util/validate! ::wrapped-predictor wrapped-predictor - "Invalid predictor") - (util/validate! ::vvec-of-numbers inputs - "Invalid inputs") - (->> (.predict (:predictor wrapped-predictor) - (util/vec->indexed-seq (mapv float-array inputs))) - (util/coerce-return-recursive) - (mapv #(mapv float %)))) - (predict-with-ndarray [wrapped-predictor input-arrays] - (util/validate! ::wrapped-predictor wrapped-predictor - "Invalid predictor") - (util/validate! ::vec-of-ndarrays input-arrays - "Invalid input arrays") - (-> (.predictWithNDArray (:predictor wrapped-predictor) - (util/vec->indexed-seq input-arrays)) - (util/coerce-return-recursive)))) - -(s/def ::nil-or-int (s/nilable int?)) - -(extend-protocol AClassifier - WrappedClassifier - (classify - ([wrapped-classifier inputs] - (classify wrapped-classifier inputs nil)) - ([wrapped-classifier inputs topk] - (util/validate! ::wrapped-classifier wrapped-classifier - "Invalid classifier") - (util/validate! ::vvec-of-numbers inputs - "Invalid inputs") - (util/validate! ::nil-or-int topk "Invalid top-K") - (->> (.classify (:classifier wrapped-classifier) - (util/vec->indexed-seq (mapv float-array inputs)) - (util/->int-option topk)) - (util/coerce-return-recursive) - (format-classification-predictions)))) - (classify-with-ndarray - ([wrapped-classifier inputs] - (classify-with-ndarray wrapped-classifier inputs nil)) - ([wrapped-classifier inputs topk] - (util/validate! ::wrapped-classifier wrapped-classifier - "Invalid classifier") - (util/validate! ::vec-of-ndarrays inputs - "Invalid inputs") - (util/validate! ::nil-or-int topk "Invalid top-K") - (->> (.classifyWithNDArray (:classifier wrapped-classifier) - (util/vec->indexed-seq inputs) - (util/->int-option topk)) - (util/coerce-return-recursive) - (mapv format-classification-predictions)))) - WrappedImageClassifier - (classify - ([wrapped-image-classifier inputs] - (classify wrapped-image-classifier inputs nil)) - ([wrapped-image-classifier inputs topk] - (util/validate! ::wrapped-image-classifier wrapped-image-classifier - "Invalid classifier") - (util/validate! ::vvec-of-numbers inputs - "Invalid inputs") - (util/validate! ::nil-or-int topk "Invalid top-K") - (->> (.classify (:image-classifier wrapped-image-classifier) - (util/vec->indexed-seq (mapv float-array inputs)) - (util/->int-option topk)) - (util/coerce-return-recursive) - (format-classification-predictions)))) - (classify-with-ndarray - ([wrapped-image-classifier inputs] - (classify-with-ndarray wrapped-image-classifier inputs nil)) - ([wrapped-image-classifier inputs topk] - (util/validate! ::wrapped-image-classifier wrapped-image-classifier - "Invalid classifier") - (util/validate! ::vec-of-ndarrays inputs - "Invalid inputs") - (util/validate! ::nil-or-int topk "Invalid top-K") - (->> (.classifyWithNDArray (:image-classifier wrapped-image-classifier) - (util/vec->indexed-seq inputs) - (util/->int-option topk)) - (util/coerce-return-recursive) - (mapv format-classification-predictions))))) - -(s/def ::image #(instance? BufferedImage %)) -(s/def ::dtype #{dtype/UINT8 dtype/INT32 dtype/FLOAT16 dtype/FLOAT32 dtype/FLOAT64}) - -(extend-protocol AImageClassifier - WrappedImageClassifier - (classify-image - ([wrapped-image-classifier image] - (classify-image wrapped-image-classifier image nil dtype/FLOAT32)) - ([wrapped-image-classifier image topk] - (classify-image wrapped-image-classifier image topk dtype/FLOAT32)) - ([wrapped-image-classifier image topk dtype] - (util/validate! ::wrapped-image-classifier wrapped-image-classifier - "Invalid classifier") - (util/validate! ::image image "Invalid image") - (util/validate! ::nil-or-int topk "Invalid top-K") - (util/validate! ::dtype dtype "Invalid dtype") - (->> (.classifyImage (:image-classifier wrapped-image-classifier) - image - (util/->int-option topk) - dtype) - (util/coerce-return-recursive) - (mapv format-classification-predictions)))) - (classify-image-batch - ([wrapped-image-classifier images] - (classify-image-batch wrapped-image-classifier images nil dtype/FLOAT32)) - ([wrapped-image-classifier images topk] - (classify-image-batch wrapped-image-classifier images topk dtype/FLOAT32)) - ([wrapped-image-classifier images topk dtype] - (util/validate! ::wrapped-image-classifier wrapped-image-classifier - "Invalid classifier") - (util/validate! ::batch-images images "Invalid Batch Images") - (util/validate! ::nil-or-int topk "Invalid top-K") - (util/validate! ::dtype dtype "Invalid dtype") - (->> (.classifyImageBatch (:image-classifier wrapped-image-classifier) - (util/vec->indexed-seq images) - (util/->int-option topk) - dtype) - (util/coerce-return-recursive) - (mapv format-classification-predictions))))) - -(extend-protocol AObjectDetector - WrappedObjectDetector - (detect-objects - ([wrapped-detector image] - (detect-objects wrapped-detector image nil)) - ([wrapped-detector image topk] - (util/validate! ::wrapped-detector wrapped-detector - "Invalid object detector") - (util/validate! ::image image "Invalid image") - (util/validate! ::nil-or-int topk "Invalid top-K") - (->> (.imageObjectDetect (:object-detector wrapped-detector) - image - (util/->int-option topk)) - (util/coerce-return-recursive) - (mapv format-detection-predictions)))) - (detect-objects-batch - ([wrapped-detector images] - (detect-objects-batch wrapped-detector images nil)) - ([wrapped-detector images topk] - (util/validate! ::wrapped-detector wrapped-detector - "Invalid object detector") - (util/validate! ::nil-or-int topk "Invalid top-K") - (util/validate! ::batch-images images "Invalid Batch Images") - (->> (.imageBatchObjectDetect (:object-detector wrapped-detector) - (util/vec->indexed-seq images) - (util/->int-option topk)) - (util/coerce-return-recursive) - (mapv format-detection-predictions)))) - (detect-objects-with-ndarrays - ([wrapped-detector input-arrays] - (detect-objects-with-ndarrays wrapped-detector input-arrays nil)) - ([wrapped-detector input-arrays topk] - (util/validate! ::wrapped-detector wrapped-detector - "Invalid object detector") - (util/validate! ::vec-of-ndarrays input-arrays - "Invalid inputs") - (util/validate! ::nil-or-int topk "Invalid top-K") - (->> (.objectDetectWithNDArray (:object-detector wrapped-detector) - (util/vec->indexed-seq input-arrays) - (util/->int-option topk)) - (util/coerce-return-recursive) - (mapv format-detection-predictions))))) - -(defprotocol AInferenceFactory - (create-predictor [factory] [factory opts]) - (create-classifier [factory] [factory opts]) - (create-image-classifier [factory] [factory opts]) - (create-object-detector [factory] [factory opts])) - -(defn convert-descriptors - [descriptors] - (util/vec->indexed-seq - (into [] (map mx-io/data-desc descriptors)))) - -(defrecord InferenceFactory [model-path-prefix input-descriptors] - AInferenceFactory - (create-predictor - [factory] - (create-predictor factory {})) - (create-predictor - [factory opts] - (let [{:keys [contexts epoch] - :or {contexts [(context/cpu)] epoch 0}} opts] - (->WrappedPredictor - (new Predictor - model-path-prefix - (convert-descriptors input-descriptors) - (into-array contexts) - (util/->int-option epoch))))) - (create-classifier - [factory] - (create-classifier factory {})) - (create-classifier - [factory opts] - (let [{:keys [contexts epoch] - :or {contexts [(context/cpu)] epoch 0}} opts] - (->WrappedClassifier - (new Classifier - model-path-prefix - (convert-descriptors input-descriptors) - (into-array contexts) - (util/->int-option epoch))))) - (create-image-classifier - [factory] - (create-image-classifier factory {})) - (create-image-classifier - [factory opts] - (let [{:keys [contexts epoch] - :or {contexts [(context/cpu)] epoch 0}} opts] - (->WrappedImageClassifier - (new ImageClassifier - model-path-prefix - (convert-descriptors input-descriptors) - (into-array contexts) - (util/->int-option epoch))))) - (create-object-detector - [factory] - (create-object-detector factory {})) - (create-object-detector - [factory opts] - (let [{:keys [contexts epoch] - :or {contexts [(context/cpu)] epoch 0}} opts] - (->WrappedObjectDetector - (new ObjectDetector - model-path-prefix - (convert-descriptors input-descriptors) - (into-array contexts) - (util/->int-option epoch)))))) - -(s/def ::model-path-prefix string?) -(s/def ::input-descriptors (s/coll-of ::mx-io/data-desc)) - -(defn model-factory - "Creates a factory that can be used to instantiate an image classifier - predictor or object detector" - [model-path-prefix input-descriptors] - (util/validate! ::model-path-prefix model-path-prefix - "Invalid model path prefix") - (util/validate! ::input-descriptors input-descriptors - "Invalid input descriptors") - (->InferenceFactory model-path-prefix input-descriptors)) - -(defn reshape-image - "Reshape an image to a new shape" - [image width height] - (util/validate! ::image image "Invalid image") - (util/validate! int? width "Invalid width") - (util/validate! int? height "Invalid height") - (ImageClassifier/reshapeImage image width height)) - -(defn buffered-image-to-pixels - "Convert input BufferedImage to NDArray of input shape" - ([image input-shape-vec] - (buffered-image-to-pixels image input-shape-vec dtype/FLOAT32)) - ([image input-shape-vec dtype] - (util/validate! ::image image "Invalid image") - (util/validate! (s/coll-of int?) input-shape-vec "Invalid shape vector") - (ImageClassifier/bufferedImageToPixels image (shape/->shape input-shape-vec) dtype))) - -(s/def ::image-path string?) -(s/def ::image-paths (s/coll-of ::image-path)) - -(defn load-image-from-file - "Loads an input image given a file name" - [image-path] - (util/validate! ::image-path image-path "Invalid image path") - (ImageClassifier/loadImageFromFile image-path)) - -(defn load-image-paths - "Loads images from a list of file names" - [image-paths] - (util/validate! ::image-paths image-paths "Invalid image paths") - (util/scala-vector->vec - (ImageClassifier/loadInputBatch (util/convert-vector image-paths)))) diff --git a/contrib/clojure-package/src/org/apache/clojure_mxnet/initializer.clj b/contrib/clojure-package/src/org/apache/clojure_mxnet/initializer.clj deleted file mode 100644 index a161759b76be..000000000000 --- a/contrib/clojure-package/src/org/apache/clojure_mxnet/initializer.clj +++ /dev/null @@ -1,57 +0,0 @@ -;; -;; Licensed to the Apache Software Foundation (ASF) under one or more -;; contributor license agreements. See the NOTICE file distributed with -;; this work for additional information regarding copyright ownership. -;; The ASF licenses this file to You under the Apache License, Version 2.0 -;; (the "License"); you may not use this file except in compliance with -;; the License. You may obtain a copy of the License at -;; -;; http://www.apache.org/licenses/LICENSE-2.0 -;; -;; Unless required by applicable law or agreed to in writing, software -;; distributed under the License is distributed on an "AS IS" BASIS, -;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -;; See the License for the specific language governing permissions and -;; limitations under the License. -;; - -(ns org.apache.clojure-mxnet.initializer - (:refer-clojure :exclude [apply]) - (:import (org.apache.mxnet Uniform Normal Xavier))) - -(defn uniform - "Initialize the weight with uniform [-scale, scale] - scale - The scale of uniform distribution" - ([scale] - (new Uniform (float scale))) - ([] - (uniform 0.07))) - -(defn normal - "Initialize the weight with normal(0, sigma) - sigma - Standard deviation for gaussian distribution." - ([sigma] - (new Normal (float sigma))) - ([] - (normal 0.01))) - -(defn xavier - "Initialize the weight with Xavier or similar initialization scheme - rand-type - 'gaussian' or 'uniform' - factor-type - 'avg' 'in' or 'out' - magnitude - scale of random number range " - ([{:keys [rand-type factor-type magnitude :as opts] - :or {rand-type "uniform" - factor-type "avg" - magnitude 3}}] - (new Xavier rand-type factor-type (float magnitude))) - ([] - (xavier {}))) - -(defn apply [initializer name arr] - (let [r (.apply initializer name arr)] - arr)) - -(defn init-weight [initializer name arr] - (doto initializer - (.initWeight name arr))) diff --git a/contrib/clojure-package/src/org/apache/clojure_mxnet/io.clj b/contrib/clojure-package/src/org/apache/clojure_mxnet/io.clj deleted file mode 100644 index a2b639934f49..000000000000 --- a/contrib/clojure-package/src/org/apache/clojure_mxnet/io.clj +++ /dev/null @@ -1,349 +0,0 @@ -;; -;; Licensed to the Apache Software Foundation (ASF) under one or more -;; contributor license agreements. See the NOTICE file distributed with -;; this work for additional information regarding copyright ownership. -;; The ASF licenses this file to You under the Apache License, Version 2.0 -;; (the "License"); you may not use this file except in compliance with -;; the License. You may obtain a copy of the License at -;; -;; http://www.apache.org/licenses/LICENSE-2.0 -;; -;; Unless required by applicable law or agreed to in writing, software -;; distributed under the License is distributed on an "AS IS" BASIS, -;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -;; See the License for the specific language governing permissions and -;; limitations under the License. -;; - -(ns org.apache.clojure-mxnet.io - (:refer-clojure :exclude [next]) - (:require [clojure.spec.alpha :as s] - [org.apache.clojure-mxnet.base :as base] - [org.apache.clojure-mxnet.shape :as mx-shape] - [org.apache.clojure-mxnet.util :as util] - [org.apache.clojure-mxnet.dtype :as dtype] - [org.apache.clojure-mxnet.layout :as layout] - [org.apache.clojure-mxnet.ndarray :as ndarray] - [org.apache.clojure-mxnet.random :as random]) - (:import (org.apache.mxnet IO DataDesc DataBatch NDArray) - (org.apache.mxnet.io ResizeIter PrefetchingIter NDArrayIter MXDataIter))) - -(defn batches - "Convert the data-pack to a batch seq" - [data-pack] - (util/scala-iterator->seq (.toIterator data-pack))) - -(defn batch-label - "Returns the vector of ndarrays that represents the label" - [batch] - (util/scala-vector->vec (.label batch))) - -(defn batch-data - "Returns the vector of ndarrays that represents the data" - [batch] - (util/scala-vector->vec (.data batch))) - -(defn batch-index - "Returns the vector of ints that represents the index" - [batch] - (util/scala-vector->vec (.index batch))) - -(defn batch-pad - "Returns the pad of the batch" - [batch] - (.pad batch)) - -(defn iterator [data-pack] - (.iterator data-pack)) - -(defn resize-iter [iter nbatch]) - -(defn provide-data - "Provides the description of the data iterator in the form of - [{:name name :shape shape-vec}]" - [pack-iterator] - (->> pack-iterator - (.provideData) - (util/scala-map->map) - (mapv (fn [[k v]] {:name k :shape (mx-shape/->vec v)})))) - -(defn provide-label - "Provides the description of the label iterator in the form of - [{:name name :shape shape-vec}]" - [pack-iterator] - (->> pack-iterator - (.provideLabel) - (util/scala-map->map) - (mapv (fn [[k v]] {:name k :shape (mx-shape/->vec v)})))) - -(defn data-desc->map [dd] - {:name (.name dd) - :shape (mx-shape/->vec (.shape dd)) - :dtype (.dtype dd) - :layout (.layout dd)}) - -(defn provide-data-desc - "Provides the Data Desc of the data iterator in the form of - [{:name name :shape shape-vec :dtype dtype :layout layout}]" - [pack-iterator] - (->> pack-iterator - (.provideDataDesc) - (util/scala-vector->vec) - (mapv data-desc->map))) - -(defn provide-label-desc - "Provides the Data Desc of the label iterator in the form of - [{:name name :shape shape-vec :dtype dtype :layout layout}]" - [pack-iterator] - (->> pack-iterator - (.provideLabelDesc) - (util/scala-vector->vec) - (mapv data-desc->map))) - -(defn reset [iterator] - (.reset iterator)) - -(defn has-next? [iterator] - (.hasNext iterator)) - -(defn next [iterator] - (.next iterator)) - -(defn iter-label [iterator] - (util/scala-vector->vec (.getLabel iterator))) - -(defn iter-data [iterator] - (util/scala-vector->vec (.getData iterator))) - -(defn iter-init-label [iterator] - (util/scala-vector->vec (.initLabel iterator))) - -(defmacro do-batches [iter f] - "Takes an iterator and a function of one argument. The iterator will be reset and run thhrough all the batches with the batch passed to the function argument. nil is returned" - `(do - (reset ~iter) - (loop [it# ~iter] - (when (has-next? it#) - (let [b# (next it#)] - (do (~f b#)) - (recur it#)))))) - -(defmacro for-batches - "Takes an iterator and a function of one argument. The iterator will be reset and run thhrough all the batches with the batch passed to the function argument. The result of the function will be conjed to a vector result of all the batches and returned at the end." - [iter f] - `(do - (reset ~iter) - (loop [it# ~iter - result# []] - (if (has-next? it#) - (let [b# (next it#)] - (recur it# (conj result# (do (~f b#))))) - result#)))) - -(defmacro reduce-batches - "Takes an iterator and a function of two arguments. The iterator will be reset and run thhrough all the batches with the batch passed to the function argument. The result of the function will the result of the reduce function" - ([iter f initial-val] - `(do - (reset ~iter) - (loop [it# ~iter - result# ~initial-val] - (if (has-next? it#) - (let [b# (next it#) - r# (do (~f result# b#))] - (recur it# r#)) - result#)))) - ([iter f] - `(reduce-batches ~iter ~f 0))) - -(defn - csv-iter - ([kwargs] - (util/apply-scala-fn (IO/CSVIter) (util/convert-io-map kwargs)))) - -(defn - csv-pack - ([kwargs] - (util/apply-scala-fn (IO/CSVPack) (util/convert-io-map kwargs)))) - -(defn - image-recode-pack - ([kwargs] - (util/apply-scala-fn - (IO/ImageRecodePack) - (util/convert-io-map kwargs)))) - -(defn - image-record-iter - ([kwargs] - (util/apply-scala-fn - (IO/ImageRecordIter) - (util/convert-io-map kwargs)))) - -(defn - mnist-iter - ([kwargs] - (util/apply-scala-fn (IO/MNISTIter) (util/convert-io-map kwargs)))) - -(defn - mnist-pack - ([kwargs] - (util/apply-scala-fn (IO/MNISTPack) (util/convert-io-map kwargs)))) - -(defn - create-iterator - ([iter-name kwargs-map] - (util/coerce-return (IO/createIterator iter-name (util/convert-io-map kwargs-map))))) - -(defn - create-mx-data-pack - ([pack-name kwargs-map] - (util/coerce-return (IO/createMXDataPack pack-name (util/convert-io-map kwargs-map))))) - -(defn resize-iter - "* Resize a data iterator to given number of batches per epoch. - * May produce incomplete batch in the middle of an epoch due - * to padding from internal iterator. - * - * @param data-iter Internal data iterator. - * @param resize number of batches per epoch to resize to. - * @param reset-internal whether to reset internal iterator with reset" - [data-iter resize reset-iternal] - (new ResizeIter data-iter resize reset-iternal)) - -(defn prefetching-iter - "Takes one or more data iterators and combines them with pre-fetching" - [iters data-names label-names] - (new PrefetchingIter - (util/vec->indexed-seq iters) - (->> data-names - (mapv util/convert-map) - (util/vec->indexed-seq)) - (->> label-names - (mapv util/convert-map) - (util/vec->indexed-seq)))) - -(defn ndarray-iter - " * NDArrayIter object in mxnet. Taking NDArray to get dataiter. - * - * @param data vector of iter - Can either by in the form for [ndarray..] or - * {data-desc0 ndarray0 data-desc2 ndarray2 ...} - * @opts map of: - * :label Same as data, but is not fed to the model during testing. - * :data-batch-size Batch Size (default 1) - * :shuffle Whether to shuffle the data (default false) - * :last-batch-handle = pad, discard, or rollover. (default pad) - * :data-name String of data name (default data) - * :label-name String of label name (default label) - * How to handle the last batch - * This iterator will pad, discard or roll over the last batch if - * the size of data does not match batch-size. Roll over is intended - * for training and can cause problems if used for prediction." - ([data {:keys [label data-batch-size shuffle last-batch-handle data-name label-name] :as opts - :or {label nil - data-batch-size 1 - shuffle false - last-batch-handle "pad" - data-name "data" - label-name "label"}}] - (if (map? data) - (new NDArrayIter - (.toIndexedSeq (util/list-map data)) - (if label - (.toIndexedSeq (util/list-map label)) - (util/empty-indexed-seq)) - (int data-batch-size) - shuffle - last-batch-handle) - (new NDArrayIter - (util/vec->indexed-seq data) - (if label (util/vec->indexed-seq label) (util/empty-indexed-seq)) - (int data-batch-size) - shuffle - last-batch-handle - data-name - label-name))) - ([data] - (ndarray-iter data {}))) - -(defn dispose [iterator] - (.dispose iterator)) - -(s/def ::name string?) -(s/def ::shape vector?) -(s/def ::dtype #{dtype/UINT8 dtype/INT32 dtype/FLOAT16 dtype/FLOAT32 dtype/FLOAT64}) -(s/def ::layout (s/or :custom string? :standard #{layout/UNDEFINED - layout/NCHW - layout/NTC - layout/NT - layout/N})) -(s/def ::data-desc (s/keys :req-un [::name ::shape] :opt-un [::dtype ::layout])) - -(defn data-desc - ([{:keys [name shape dtype layout] :as opts - :or {dtype base/MX_REAL_TYPE - layout layout/UNDEFINED}}] - (util/validate! ::data-desc opts "Invalid data description") - (new DataDesc name (mx-shape/->shape shape) dtype layout)) - ([name shape] - (data-desc {:name name :shape shape}))) - -(s/def ::ndarray #(instance? NDArray %)) -(s/def ::data vector?) -(s/def ::label (s/nilable (s/coll-of ::ndarray :kind vector?))) -(s/def ::index (s/nilable (s/coll-of int? :kind vector?))) -(s/def ::pad integer?) -(s/def ::bucket-key string?) -(s/def ::provided-data ::data-desc) -(s/def ::provided-label ::data-desc) -(s/def ::data-batch-class #(instance? DataBatch %)) - -(s/def ::data-batch - (s/or - :data-batch-class - ::data-batch-class - :data-batch-map - (s/keys :req-un [::data] :opt-un [::label ::index ::pad ::bucket-key ::provided-data ::provided-label]))) - -(defn data-batch - [{:keys [data label index pad bucket-key provided-data provided-label] :as info - :or {data [] label [] index [] pad 0}}] - ;; provided-data and provided label is a map of name to shape to indicate the order of the data/label loading - (util/validate! ::data-batch info "Invalid data batch") - (new DataBatch - (util/vec->indexed-seq data) - (util/vec->indexed-seq label) - (util/vec->indexed-seq index) - (int pad) - bucket-key - (when provided-data (util/list-map provided-data)) - (when provided-label (util/list-map provided-label)))) - -(defn rand-iter - "A implementation of a random noise iterator - Instead of data pass in the shape vector of the noise shape" - ([shape-vec {:keys [label data-batch-size shuffle last-batch-handle data-name label-name] :as opts - :or {label nil - data-batch-size 1 - shuffle false - last-batch-handle "pad" - data-name "rand" - label-name "label"}}] - (let [data [(ndarray/ones shape-vec)]] - (proxy [NDArrayIter] - [(util/vec->indexed-seq data) - (if label (util/vec->indexed-seq label) (util/empty-indexed-seq)) - (int data-batch-size) - shuffle - last-batch-handle - data-name - label-name] - (provideData [] - (util/list-map {data-name (mx-shape/->vec (ndarray/shape (first data)))})) - (provideLabel [] (util/empty-list-map)) - (hasNext [] true) - (getData - ([] (util/vec->indexed-seq [(random/normal 0 1 (mx-shape/->vec (ndarray/shape (first data))))]))) - (getLabel - ([] (util/vec->indexed-seq [])))))) - ([shape-vec] - (rand-iter shape-vec {}))) diff --git a/contrib/clojure-package/src/org/apache/clojure_mxnet/kvstore.clj b/contrib/clojure-package/src/org/apache/clojure_mxnet/kvstore.clj deleted file mode 100644 index ea7e7f933cdd..000000000000 --- a/contrib/clojure-package/src/org/apache/clojure_mxnet/kvstore.clj +++ /dev/null @@ -1,203 +0,0 @@ -;; -;; Licensed to the Apache Software Foundation (ASF) under one or more -;; contributor license agreements. See the NOTICE file distributed with -;; this work for additional information regarding copyright ownership. -;; The ASF licenses this file to You under the Apache License, Version 2.0 -;; (the "License"); you may not use this file except in compliance with -;; the License. You may obtain a copy of the License at -;; -;; http://www.apache.org/licenses/LICENSE-2.0 -;; -;; Unless required by applicable law or agreed to in writing, software -;; distributed under the License is distributed on an "AS IS" BASIS, -;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -;; See the License for the specific language governing permissions and -;; limitations under the License. -;; - -(ns org.apache.clojure-mxnet.kvstore - (:refer-clojure :exclude [type]) - (:require [clojure.spec.alpha :as spec] - [org.apache.clojure-mxnet.util :as util] - [clojure.spec.alpha :as s]) - (:import (org.apache.mxnet KVStore NDArray))) - -(defn create - " Create a new KVStore - WARNING: it is your responsibility to clear this object through dispose. - - name : #{local, dist} (default is local) - The type of KVStore - - local works for multiple devices on a single machine (single process) - - dist works for multi-machines (multiple processes)" - ([name] - (KVStore/create name)) - ([] - (create "local"))) - -(defn dispose - "Release the native memory. - The object shall never be used after it is disposed." - [kvstore] - (.dispose kvstore)) - -(s/def ::ks (s/or :string string? - :vec-of-string (s/coll-of string? :kind vector?))) -(s/def ::ndarray #(instance? NDArray %)) -(s/def ::vs (s/or :ndarray ::ndarray - :vec-of-ndarray (s/coll-of ::ndarray :kind vector?))) - -(defn init - "Initialize a single or a sequence of key-value pairs into the store. - For each key, one must init it before push and pull. - Only worker 0's (rank == 0) data are used. - This function returns after data have been initialized successfully - kvstore - KVstore - ks - keys (vec or strings or single string) - vs - values (vec or NDArrays or single ndarry)" - [kvstore ks vs] - (util/validate! ::ks ks "Invalid keys") - (util/validate! ::vs vs "Invalid values") - (doto kvstore - (.init (into-array (if (vector? ks) ks [ks])) - (into-array (if (vector? vs) vs [vs]))))) - -(s/def ::priority int?) - -(defn push - " Push a single or a sequence of key-value pairs into the store. - Data consistency: - 1. this function returns after adding an operator to the engine. - 2. push is always called after all previous push and pull on the same key are finished - 3. there is no synchronization between workers. One can use _barrier() to sync all workers - - -ks Keys - -vs values According values - - priority - The priority of the push operation. - The higher the priority, the faster this action is likely - to be executed before other push actions." - ([kvstore ks vs priority] - (util/validate! ::ks ks "Invalid keys") - (util/validate! ::vs vs "Invalid values") - (util/validate! ::priority priority "Invalid priority") - (let [store-vals (if (vector? vs) vs [vs]) - store-keys (if (vector? ks) ks (into [] (repeat (count store-vals) ks)))] - (doto kvstore - (.push (into-array store-keys) - (into-array store-vals) - (int priority))))) - ([kvstore ks vs] - (push kvstore ks vs 0))) - -(s/def ::outs (s/or :ndarray ::ndarray - :vec-of-ndarray (s/coll-of ::ndarray :kind vector?))) - -(defn pull - " Pull a single value or a sequence of values from the store. - Data consistency: - 1. this function returns after adding an operator to the engine. But any - further read on out will be blocked until it is finished. - 2. pull is always called after all previous push and pull on the same key are finished - 3. It pulls the newest value from the store. - - kvstore - - ks single or vector of (strings) - - outs single or vector of outs (NDArrays) - - priority - The priority of the push operation. - The higher the priority, the faster this action is likely - to be executed before other push actions." - ([kvstore ks outs priority] - (util/validate! ::ks ks "Invalid keys") - (util/validate! ::outs outs "Invalid outs") - (util/validate! ::priority priority "Invalid priority") - (let [store-vals (if (vector? outs) outs [outs]) - store-keys (if (vector? ks) ks (into [] (repeat (count store-vals) ks)))] - (doto kvstore - (.pull (into-array store-keys) - (into-array store-vals) - (int priority))))) - ([kvstore ks outs] - (pull kvstore ks outs 0))) - -(defn type - "Get the type of the kvstore" - [kvstore] - (.type kvstore)) - -(defn num-workers - "Get the number of worker nodes" - [kvstore] - (.numWorkers kvstore)) - -(defn rank - "Get the rank of this worker node - returns The rank of this node, which is in [0, get_num_workers()) " - [kvstore] - (.rank kvstore)) - -(defn set-optimizer - "Register an optimizer to the store - If there are multiple machines, this process (should be a worker node) - will pack this optimizer and send it to all servers. It returns after - this action is done" - [kvstore optimizer] - (doto kvstore - (.setOptimizer optimizer))) - -(defn barrier - "Global barrier among all worker nodes - For example, assume there are n machines, we want to let machine 0 first - init the values, and then pull the inited value to all machines. Before - pulling, we can place a barrier to guarantee that the initialization is - finished." - [kvstore] - (doto kvstore - (.barrier kvstore))) - -(defn num-dead-node [kvstore node-id] - (.numDeadNode kvstore (int node-id))) - -(defn set-barrier-before-exit - " Whether to do barrier when the kvstore finalizes - - kvstore - - barrier-before-exit boolean" - [kvstore barrier-before-exit] - (doto kvstore - (.setBarrierBeforeExit barrier-before-exit))) - -(s/def ::head int?) -(s/def ::body string?) - -(defn send-command-to-servers - "Send a command to all server nodes - Send a command to all server nodes, which will make each server node run - KVStoreServer.controller - This function returns after the command has been executed in all server nodes - -kvstore - -head the head of the command - - body the body of the command" - [kvstore head body] - (util/validate! ::head head "Invalid head") - (util/validate! ::body body "Invalid body") - (doto kvstore - (.sendCommandToServers (int head) body))) - -(s/def ::fname string?) - -(defn save-optimizer-states - "Save optimizer (updater) state to file - - kvstore - - fname Path to output states file." - [kvstore fname] - (util/validate! ::fname fname "Invalid filename") - (doto kvstore - (.saveOptimizerStates fname))) - -(defn load-optimizer-states - "Load optimizer (updater) state from file - - kvstore - -fname Path to input states file." - [kvstore fname] - (util/validate! ::fname fname "Invalid filename") - (doto kvstore - (.loadOptimizerStates fname))) diff --git a/contrib/clojure-package/src/org/apache/clojure_mxnet/kvstore_server.clj b/contrib/clojure-package/src/org/apache/clojure_mxnet/kvstore_server.clj deleted file mode 100644 index ffce70a24d59..000000000000 --- a/contrib/clojure-package/src/org/apache/clojure_mxnet/kvstore_server.clj +++ /dev/null @@ -1,37 +0,0 @@ -;; -;; Licensed to the Apache Software Foundation (ASF) under one or more -;; contributor license agreements. See the NOTICE file distributed with -;; this work for additional information regarding copyright ownership. -;; The ASF licenses this file to You under the Apache License, Version 2.0 -;; (the "License"); you may not use this file except in compliance with -;; the License. You may obtain a copy of the License at -;; -;; http://www.apache.org/licenses/LICENSE-2.0 -;; -;; Unless required by applicable law or agreed to in writing, software -;; distributed under the License is distributed on an "AS IS" BASIS, -;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -;; See the License for the specific language governing permissions and -;; limitations under the License. -;; - -(ns org.apache.clojure-mxnet.kvstore-server - (:require [clojure.spec.alpha :as spec] - [org.apache.clojure-mxnet.util :as util] - [clojure.spec.alpha :as s]) - (:import (org.apache.mxnet KVStoreServer))) - -(s/def ::env-map (s/map-of string? string?)) - -(defn init [env-map] - (util/validate! ::env-map env-map "Invalid environment map") - (KVStoreServer/init (util/convert-map env-map))) - -(s/def ::die-if-others-go-out-timeout int?) - -(defn start - ([die-if-others-go-out-timeout] - (util/validate! ::die-if-others-go-out-timeout die-if-others-go-out-timeout "Invalid setting") - (KVStoreServer/start die-if-others-go-out-timeout)) - ([] - (start 0))) diff --git a/contrib/clojure-package/src/org/apache/clojure_mxnet/layout.clj b/contrib/clojure-package/src/org/apache/clojure_mxnet/layout.clj deleted file mode 100644 index f379a7a02d28..000000000000 --- a/contrib/clojure-package/src/org/apache/clojure_mxnet/layout.clj +++ /dev/null @@ -1,35 +0,0 @@ -;; -;; Licensed to the Apache Software Foundation (ASF) under one or more -;; contributor license agreements. See the NOTICE file distributed with -;; this work for additional information regarding copyright ownership. -;; The ASF licenses this file to You under the Apache License, Version 2.0 -;; (the "License"); you may not use this file except in compliance with -;; the License. You may obtain a copy of the License at -;; -;; http://www.apache.org/licenses/LICENSE-2.0 -;; -;; Unless required by applicable law or agreed to in writing, software -;; distributed under the License is distributed on an "AS IS" BASIS, -;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -;; See the License for the specific language governing permissions and -;; limitations under the License. -;; - -(ns org.apache.clojure-mxnet.layout - (:import (org.apache.mxnet Layout))) - -;; -;; Layout definition of DataDesc -;; N Batch size -;; C channels -;; H Height -;; W Weight -;; T sequence length -;; __undefined__ default value of Layout -;; - -(def UNDEFINED (Layout/UNDEFINED)) ;"__UNDEFINED__" -(def NCHW (Layout/NCHW)) ;=> "NCHW" -(def NTC (Layout/NTC)) ;=> "NTC" -(def NT (Layout/NT)) ;=> "NT" -(def N (Layout/N)) ;=> "N diff --git a/contrib/clojure-package/src/org/apache/clojure_mxnet/lr_scheduler.clj b/contrib/clojure-package/src/org/apache/clojure_mxnet/lr_scheduler.clj deleted file mode 100644 index d3965401af95..000000000000 --- a/contrib/clojure-package/src/org/apache/clojure_mxnet/lr_scheduler.clj +++ /dev/null @@ -1,27 +0,0 @@ -;; -;; Licensed to the Apache Software Foundation (ASF) under one or more -;; contributor license agreements. See the NOTICE file distributed with -;; this work for additional information regarding copyright ownership. -;; The ASF licenses this file to You under the Apache License, Version 2.0 -;; (the "License"); you may not use this file except in compliance with -;; the License. You may obtain a copy of the License at -;; -;; http://www.apache.org/licenses/LICENSE-2.0 -;; -;; Unless required by applicable law or agreed to in writing, software -;; distributed under the License is distributed on an "AS IS" BASIS, -;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -;; See the License for the specific language governing permissions and -;; limitations under the License. -;; - -(ns org.apache.clojure-mxnet.lr-scheduler - (:import (org.apache.mxnet FactorScheduler))) - -(defn factor-scheduler - "Assume the weight has been updated by n times, then the learning rate will - be base_lr * factor^^(floor(n/step)) - - step int, schedule learning rate after n updates - - factor number, the factor for reducing the learning rate" - [step factor] - (new FactorScheduler (int step) (float factor))) diff --git a/contrib/clojure-package/src/org/apache/clojure_mxnet/module.clj b/contrib/clojure-package/src/org/apache/clojure_mxnet/module.clj deleted file mode 100644 index 09f17e5d81f4..000000000000 --- a/contrib/clojure-package/src/org/apache/clojure_mxnet/module.clj +++ /dev/null @@ -1,773 +0,0 @@ -;; -;; Licensed to the Apache Software Foundation (ASF) under one or more -;; contributor license agreements. See the NOTICE file distributed with -;; this work for additional information regarding copyright ownership. -;; The ASF licenses this file to You under the Apache License, Version 2.0 -;; (the "License"); you may not use this file except in compliance with -;; the License. You may obtain a copy of the License at -;; -;; http://www.apache.org/licenses/LICENSE-2.0 -;; -;; Unless required by applicable law or agreed to in writing, software -;; distributed under the License is distributed on an "AS IS" BASIS, -;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -;; See the License for the specific language governing permissions and -;; limitations under the License. -;; - -(ns org.apache.clojure-mxnet.module - "Module API for Clojure package." - (:refer-clojure :exclude [update symbol]) - (:require [org.apache.clojure-mxnet.callback :as callback] - [org.apache.clojure-mxnet.context :as context] - [org.apache.clojure-mxnet.eval-metric :as eval-metric] - [org.apache.clojure-mxnet.initializer :as initializer] - [org.apache.clojure-mxnet.io :as mx-io] - [org.apache.clojure-mxnet.optimizer :as optimizer] - [org.apache.clojure-mxnet.shape :as mx-shape] - [org.apache.clojure-mxnet.util :as util] - [clojure.java.io :as io] - [clojure.spec.alpha :as s] - [org.apache.clojure-mxnet.ndarray :as ndarray]) - (:import (org.apache.mxnet.module Module FitParams BaseModule) - (org.apache.mxnet.io MXDataIter NDArrayIter) - (org.apache.mxnet Initializer Optimizer NDArray DataBatch - Context EvalMetric Monitor Callback$Speedometer - DataDesc))) - -(defn module - "Module is a basic module that wrap a `symbol`. - `sym`: Symbol definition. - `opts-map` { - `data-names`: vector of strings - Default is [\"data\"] - Input data names - `label-names`: vector of strings - Default is [\"softmax_label\"] - Input label names - `contexts`: Context - Default is `context/cpu`. - `workload-list`: Default nil - Indicating uniform workload. - `fixed-param-names`: Default nil - Indicating no network parameters are fixed. - } - Ex: - (module sym) - (module sym {:data-names [\"data\"] - :label-names [\"linear_regression_label\"]}" - ([sym {:keys [data-names label-names contexts - workload-list fixed-param-names] :as opts - :or {data-names ["data"] - label-names ["softmax_label"] - contexts [(context/default-context)]}}] - (new Module - sym - (util/vec->indexed-seq data-names) - (util/vec->indexed-seq label-names) - (into-array contexts) - (util/->option (when workload-list (util/vec->indexed-seq workload-list))) - (util/->option (when fixed-param-names (util/vec->set fixed-param-names))))) - ([sym data-names label-names contexts] - (module sym {:data-names data-names :label-names label-names :contexts contexts})) - ([sym] - (module sym {}))) - -(defn data-names [mod] - (.dataNames mod)) - -(defn data-shapes [mod] - (.dataShapes mod)) - -(defn label-shapes [mod] - (.labelShapes mod)) - -(defn output-names [mod] - (.outputNames mod)) - -(defn output-shapes [mod] - (.outputShapes mod)) - -(s/def ::data-shapes (s/coll-of ::mx-io/data-desc)) -(s/def ::label-shapes (s/coll-of ::mx-io/data-desc)) -(s/def ::for-training boolean?) -(s/def ::inputs-need-grad boolean?) -(s/def ::force-rebind boolean?) -(s/def ::shared-module #(instance? Module)) -(s/def ::grad-req string?) -(s/def ::bind-opts - (s/keys :req-un [::data-shapes] - :opt-un [::label-shapes ::for-training ::inputs-need-grad - ::force-rebind ::shared-module ::grad-req])) - -(defn bind - "Bind the symbols to construct executors. This is necessary before one - can perform computation with the module. - `mod`: module - `opts-map` { - `data-shapes`: map of `:name`, `:shape`, `:dtype`, and `:layout` - Typically is `(provide-data-desc data-iter)`.Data shape must be in the - form of `io/data-desc` - `label-shapes`: map of `:name` `:shape` `:dtype` and `:layout` - Typically is `(provide-label-desc data-iter)`. - `for-training`: boolean - Default is `true` - Whether the executors should be bind for training. - `inputs-need-grad`: boolean - Default is `false`. - Whether the gradients to the input data need to be computed. - Typically this is not needed. But this might be needed when - implementing composition of modules. - `force-rebind`: boolean - Default is `false`. - This function does nothing if the executors are already binded. But - with this `true`, the executors will be forced to rebind. - `shared-module`: Default is nil. - This is used in bucketing. When not `nil`, the shared module - essentially corresponds to a different bucket -- a module with - different symbol but with the same sets of parameters (e.g. unrolled - RNNs with different lengths). - } - Ex: - (bind {:data-shapes (mx-io/provide-data train-iter) - :label-shapes (mx-io/provide-label test-iter)})) " - [mod {:keys [data-shapes label-shapes for-training inputs-need-grad - force-rebind shared-module grad-req] :as opts - :or {for-training true - inputs-need-grad false - force-rebind false - grad-req "write"}}] - (util/validate! ::bind-opts opts "Incorrect bind options") - (doto mod - (.bind - (->> data-shapes - (map mx-io/data-desc) - (util/vec->indexed-seq)) - (util/->option (some->> label-shapes - (map mx-io/data-desc) - (util/vec->indexed-seq))) - for-training - inputs-need-grad - force-rebind - (util/->option shared-module) - grad-req))) - -(s/def ::intializer #(instance? Initializer %)) -(s/def ::arg-params map?) -(s/def ::aux-params map?) -(s/def ::force-init boolean?) -(s/def ::allow-extra boolean?) -(s/def ::init-params-opts - (s/keys :opt-un [::initializer ::arg-params ::aux-params - ::force-init ::allow-extra])) - -(defn init-params - "Initialize the parameters and auxiliary states. - `opts-map` { - `initializer`: Initializer - Default is `uniform` - Called to initialize parameters if needed. - `arg-params`: map - If not nil, should be a map of existing arg-params. Initialization - will be copied from that. - `aux-params`: map - If not nil, should be a map of existing aux-params. Initialization - will be copied from that. - `allow-missing`: boolean - Default is `false` - If true, params could contain missing values, and the initializer will - be called to fill those missing params. - `force-init` boolean - Default is `false` - If true, will force re-initialize even if already initialized. - `allow-extra`: boolean - Default is `false` - Whether allow extra parameters that are not needed by symbol. - If this is `true`, no error will be thrown when `arg-params` or - `aux-params` contain extra parameters that is not needed by the - executor. - Ex: - (init-params {:initializer (initializer/xavier)}) - (init-params {:force-init true :allow-extra true})" - ([mod {:keys [initializer arg-params aux-params allow-missing force-init - allow-extra] :as opts - :or {initializer (initializer/uniform 0.01) - allow-missing false - force-init false - allow-extra false}}] - (util/validate! ::init-params-opts opts "Invalid init-params opts") - (doto mod - (.initParams - initializer - (some-> arg-params (util/convert-map)) - (some-> aux-params (util/convert-map)) - allow-missing - force-init - allow-extra))) - ([mod] - (init-params mod {}))) - -(s/def ::optimizer #(instance? Optimizer %)) -(s/def ::kvstore string?) -(s/def ::reset-optimizer boolean?) -(s/def ::force-init boolean?) -(s/def ::init-optimizer-opts - (s/keys :opt-un [::optimizer ::kvstore ::reset-optimizer ::force-init])) - -(defn init-optimizer - "Install and initialize optimizers. - `mod`: Module - `opts-map` { - `kvstore`: string - Default is \"local\" - `optimizer`: Optimizer - Default is `sgd` - `reset-optimizer`: boolean - Default is `true` - Indicating whether we should set `rescaleGrad` & `idx2name` for - optimizer according to executorGroup. - `force-init`: boolean - Default is `false` - Indicating whether we should force re-initializing the optimizer - in the case an optimizer is already installed. - Ex: - (init-optimizer {:optimizer (optimizer/sgd {:learning-rate 0.1})})" - ([mod {:keys [kvstore optimizer reset-optimizer force-init] :as opts - :or {kvstore "local" - optimizer (optimizer/sgd) - reset-optimizer true - force-init false}}] - (util/validate! ::init-optimizer-opts opts "Invalid init-optimizer options") - (doto mod - (.initOptimizer kvstore optimizer reset-optimizer force-init))) - ([mod] - (init-optimizer mod {}))) - -(defn forward - "Forward computation. - `data-batch`: Either map or DataBatch - Input data of form `io/data-batch`. - `is-train`: Default is nil - Which means `is_train` takes the value of `for_training`." - ([mod data-batch is-train] - (util/validate! ::mx-io/data-batch data-batch "Invalid data batch") - (doto mod - (.forward - (if (map? data-batch) - (mx-io/data-batch data-batch) - data-batch) - (util/->option is-train)))) - ([mod data-batch-map] - (forward mod data-batch-map nil))) - -(s/def ::ndarray #(instance? NDArray %)) -(s/def ::out-grads (s/nilable (s/coll-of ::ndarray))) - -(defn backward - "Backward computation. - `out-grads`: collection of NDArrays - Gradient on the outputs to be propagated back. This parameter is only - needed when bind is called on outputs that are not a loss function." - ([mod out-grads] - (util/validate! ::out-grads out-grads "Invalid out-grads") - (doto mod - (.backward (some-> out-grads into-array)))) - ([mod] - (backward mod nil))) - -(defn forward-backward - "A convenient function that calls both `forward` and `backward`." - [mod data-batch] - (util/validate! ::mx-io/data-batch data-batch "Invalid data-batch") - (doto mod - (.forwardBackward data-batch))) - -(defn outputs - "Get outputs of the previous forward computation. - In the case when data-parallelism is used, the outputs will be collected from - multiple devices. The results will look like - `[[out1_dev1, out1_dev2], [out2_dev1, out2_dev2]]`. - Those `NDArray`s might live on different devices." - [mod] - (->> (.getOutputs mod) - (util/scala-vector->vec) - (mapv util/scala-vector->vec))) - -(defn update - "Update parameters according to the installed optimizer and the gradients - computed in the previous forward-backward batch." - [mod] - (doto mod - (.update))) - -(defn outputs-merged - "Get outputs of the previous forward computation. - In the case when data-parallelism is used, the outputs will be merged from - multiple devices, as they look like from a single executor. - The results will look like `[out1, out2]`." - [mod] - (->> (.getOutputsMerged mod) - (util/scala-vector->vec))) - -(defn input-grads - "Get the gradients to the inputs, computed in the previous backward computation. - In the case when data-parallelism is used, the outputs will be collected from - multiple devices. The results will look like - `[[grad1_dev1, grad1_dev2], [grad2_dev1, grad2_dev2]]`. - Those `NDArray`s might live on different devices." - [mod] - (->> (.getInputGrads mod) - (util/scala-vector->vec) - (mapv util/scala-vector->vec))) - -(defn input-grads-merged - "Get the gradients to the inputs, computed in the previous backward computation. - In the case when data-parallelism is used, the outputs will be merged from - multiple devices, as they look like from a single executor. - The results will look like `[grad1, grad2]`." - [mod] - (->> (.getInputGradsMerged mod) - (util/scala-vector->vec))) - -(s/def ::prefix string?) -(s/def ::epoch int?) -(s/def ::save-opt-states boolean?) -(s/def ::save-checkpoint-opts - (s/keys :req-un [::prefix ::epoch] - :opt-un [::save-opt-states ::save-checkpoint])) - -(defn save-checkpoint - "Save current progress to checkpoint. - Use mx.callback.module_checkpoint as epoch_end_callback to save during - training. - `mod`: Module - `opts-map` { - `prefix`: string - The file prefix to checkpoint to - `epoch`: int - The current epoch number - `save-opt-states`: boolean - Default is `false` - Whether to save optimizer states for continue training - } - Ex: - (save-checkpoint {:prefix \"saved_model\" :epoch 0 :save-opt-states true})" - ([mod {:keys [prefix epoch save-opt-states] :as opts - :or {save-opt-states false}}] - (util/validate! ::save-checkpoint-opts opts "Invalid save checkpoint opts") - (doto mod - (.saveCheckpoint prefix (int epoch) save-opt-states))) - ([mod prefix epoch] - (save-checkpoint mod {:prefix prefix :epoch epoch}))) - -(s/def ::load-optimizer-states boolean?) -(s/def ::data-names (s/coll-of string? :kind vector?)) -(s/def ::label-names (s/coll-of string? :kind vector?)) -(s/def ::context #(instance? Context %)) -(s/def ::contexts (s/coll-of ::context :kind vector?)) -(s/def ::workload-list (s/coll-of number? :kind vector?)) -(s/def ::fixed-params-names (s/coll-of string? :kind vector?)) -(s/def ::load-checkpoint-opts - (s/keys :req-un [::prefix ::epoch] - :opt-un [::load-optimizer-states ::data-names ::label-names - ::contexts ::workload-list ::fixed-param-names])) - -(defn load-checkpoint - "Create a model from previously saved checkpoint. - `opts-map` { - `prefix`: string - Path prefix of saved model files. You should have prefix-symbol.json, - prefix-xxxx.params, and optionally prefix-xxxx.states, where xxxx is - the epoch number. - `epoch`: int - Epoch to load. - `load-optimizer-states`: boolean - Default is false - Whether to load optimizer states. Checkpoint needs to have been made - with `save-optimizer-states` = `true`. - `data-names`: vector of strings - Default is [\"data\"] - Input data names. - `label-names`: vector of strings - Default is [\"softmax_label\"] - Input label names. - `contexts`: Context - Default is `context/cpu` - `workload-list`: Default nil - Indicating uniform workload. - `fixed-param-names`: Default nil - Indicating no network parameters are fixed. - Ex: - (load-checkpoint {:prefix \"my-model\" :epoch 1 :load-optimizer-states true}" - ([{:keys [prefix epoch load-optimizer-states data-names label-names contexts - workload-list fixed-param-names] :as opts - :or {load-optimizer-states false - data-names ["data"] - label-names ["softmax_label"] - contexts [(context/cpu)] - workload-list nil - fixed-param-names nil}}] - (util/validate! ::load-checkpoint-opts opts "Invalid load-checkpoint opts") - (Module/loadCheckpoint - prefix - (int epoch) - load-optimizer-states - (util/vec->indexed-seq data-names) - (util/vec->indexed-seq label-names) - (into-array contexts) - (util/->option (when workload-list (util/vec->indexed-seq workload-list))) - (util/->option (when fixed-param-names (util/vec->set fixed-param-names))))) - ([prefix epoch] - (load-checkpoint {:prefix prefix :epoch epoch}))) - -(defn load-optimizer-states [mod fname] - (.mod load fname)) - -(defn symbol [mod] - (.getSymbol mod)) - -(defn params [mod] - (map util/scala-map->map (util/coerce-return (.getParams mod)))) - -(defn arg-params [mod] - (util/scala-map->map (.argParams mod))) - -(defn aux-params [mod] - (util/scala-map->map (.auxParams mod))) - -(defn reshape - "Reshapes the module for new input shapes. - `mod`: Module - `data-shapes`: Typically is `(provide-data data-iter)` - `label-shapes`: Typically is `(provide-label data-tier)`" - ([mod data-shapes label-shapes] - (util/validate! ::data-shapes data-shapes "Invalid data-shapes") - (util/validate! (s/nilable ::label-shapes) label-shapes "Invalid label-shapes") - (doto mod - (.reshape - (->> data-shapes - (map mx-io/data-desc) - (util/vec->indexed-seq)) - (util/->option (some->> label-shapes - (map mx-shape/->shape) - (util/vec->indexed-seq)))))) - ([mod data-shapes] - (reshape mod data-shapes nil))) - -(s/def ::set-param-opts - (s/keys :opt-un [::arg-params ::aux-params ::allow-missing - ::force-init ::allow-extra])) - -(defn get-params [mod] - (.getParams mod)) - -(defn set-params - "Assign parameters and aux state values. - `mod`: Module - `opts-map` { - `arg-params`: map - map of name to value (`NDArray`) mapping. - `aux-params`: map - map of name to value (`NDArray`) mapping. - `allow-missing`: boolean - If true, params could contain missing values, and the initializer will - be called to fill those missing params. - `force-init`: boolean - Default is `false` - If true, will force re-initialize even if already initialized. - `allow-extra`: boolean - Default is `false` - Whether allow extra parameters that are not needed by symbol. If this - is `true`, no error will be thrown when arg-params or aux-params - contain extra parameters that is not needed by the executor. - } - Ex: - (set-params mod - {:arg-params {\"fc_0_weight\" (ndarray/array [0.15 0.2 0.25 0.3] [2 2]) - :allow-missing true})" - [mod {:keys [arg-params aux-params allow-missing force-init - allow-extra] :as opts - :or {allow-missing false force-init true allow-extra false}}] - (util/validate! ::set-param-opts opts "Invalid set-params") - (doto mod - (.setParams - (util/convert-symbol-map arg-params) - (when aux-params (util/convert-symbol-map aux-params)) - allow-missing - force-init - allow-extra))) - -(defn install-monitor - "Install monitor on all executors." - [mod monitor] - (doto mod - (.installMonitor monitor))) - -(defn borrow-optimizer - "Borrow optimizer from a shared module. Used in bucketing, where exactly the - same optimizer (esp. kvstore) is used. - `mod`: Module - `shared-module`" - [mod shared-module] - (doto mod - (.borrowOptimizer shared-module))) - -(defn save-optimizer-states - "Save optimizer (updater) state to file. - `mod`: Module - `fname`: string - Path to output states file." - [mod fname] - (doto mod - (.saveOptimizerStates mod fname))) - -(defn load-optimizer-states - "Load optimizer (updater) state from file. - `mod`: Module - `fname`: string - Path to input states file." - [mod fname] - (doto mod - (.loadOptimzerStates fname))) - -(s/def ::eval-metric #(instance? EvalMetric %)) -(s/def ::labels (s/coll-of ::ndarray :kind vector?)) - -(defn update-metric - "Evaluate and accumulate evaluation metric on outputs of the last forward - computation. - `mod`: module - `eval-metric`: EvalMetric - `labels`: collection of NDArrays - Ex: - (update-metric mod (eval-metric/mse) labels)" - [mod eval-metric labels] - (util/validate! ::eval-metric eval-metric "Invalid eval metric") - (util/validate! ::labels labels "Invalid labels") - (doto mod - (.updateMetric eval-metric (util/vec->indexed-seq labels)))) - -(s/def ::begin-epoch int?) -(s/def ::validation-metric ::eval-metric) -(s/def ::monitor #(instance? Monitor %)) -(s/def ::batch-end-callback #(instance? Callback$Speedometer %)) -(s/def ::fit-params-opts - (s/keys :opt-un [::eval-metric ::kvstore ::optimizer ::initializer - ::arg-params ::aux-params ::allow-missing ::force-rebind - ::force-init ::begin-epoch ::validation-metric ::monitor - ::batch-end-callback])) - -;; callbacks are not supported for now -(defn fit-params - "Initialize FitParams with provided parameters. - `eval-metric`: EvalMetric - Default is `accuracy` - `kvstore`: String - Default is \"local\" - `optimizer`: Optimizer - Default is `sgd` - `initializer`: Initializer - Default is `uniform` - Called to initialize parameters if needed. - `arg-params`: map - If not nil, should be a map of existing `arg-params`. Initialization - will be copied from that. - `aux-params`: map - - If not nil, should be a map of existing `aux-params`. Initialization - will be copied from that. - `allow-missing`: boolean - Default is `false` - If `true`, params could contain missing values, and the initializer will - be called to fill those missing params. - `force-rebind`: boolean - Default is `false` - This function does nothing if the executors are already binded. But with - this `true`, the executors will be forced to rebind. - `force-init`: boolean - Default is `false` - If `true`, will force re-initialize even if already initialized. - `begin-epoch`: int - Default is 0 - `validation-metric`: EvalMetric - `monitor`: Monitor - Ex: - (fit-params {:force-init true :force-rebind true :allow-missing true}) - (fit-params - {:batch-end-callback (callback/speedometer batch-size 100) - :initializer (initializer/xavier) - :optimizer (optimizer/sgd {:learning-rate 0.01}) - :eval-metric (eval-metric/mse)})" - ([{:keys [eval-metric kvstore optimizer - initializer arg-params aux-params - allow-missing force-rebind force-init begin-epoch - validation-metric monitor batch-end-callback] :as opts - :or {eval-metric (eval-metric/accuracy) - kvstore "local" - optimizer (optimizer/sgd) - initializer (initializer/uniform 0.01) - allow-missing false - force-rebind false - force-init false - begin-epoch 0}}] - (util/validate! ::fit-params-opts opts "Invalid fit param opts") - (doto (new FitParams) - (.setEvalMetric eval-metric) - (.setKVStore kvstore) - (.setOptimizer optimizer) - (.setInitializer initializer) - (.setArgParams (some-> arg-params (util/convert-map))) - (.setAuxParams (some-> aux-params (util/convert-map))) - (.setAllowMissing allow-missing) - (.setForceRebind force-rebind) - (.setForceInit force-init) - (.setBeginEpoch (int begin-epoch)) - (.setValidationMetric validation-metric) - (.setMonitor monitor) - (.setBatchEndCallback batch-end-callback))) - ([] - (new FitParams))) - -(s/def ::mx-data-iter #(instance? MXDataIter %)) -(s/def ::ndarray-iter #(instance? NDArrayIter %)) -(s/def ::train-data (s/or :mx-iter ::mx-data-iter :ndarry-iter ::ndarray-iter)) -(s/def ::eval-data ::train-data) -(s/def ::num-epoch (s/and int? pos?)) -(s/def ::fit-params #(instance? FitParams %)) -(s/def ::fit-options - (s/keys :req-un [::train-data] - :opt-un [::eval-data ::num-epoch ::fit-params])) - -;;; High Level API - -(defn score - "Run prediction on `eval-data` and evaluate the performance according to - `eval-metric`. - `mod`: module - `opts-map` { - `eval-data`: DataIter - `eval-metric`: EvalMetric - `num-batch`: int - Default is `Integer.MAX_VALUE` - Number of batches to run. Indicating run until the `DataIter` - finishes. - `batch-end-callback`: not supported yet. - `reset`: boolean - Default is `true`, - Indicating whether we should reset `eval-data` before starting - evaluating. - `epoch`: int - Default is 0 - For compatibility, this will be passed to callbacks (if any). During - training, this will correspond to the training epoch number. - } - Ex: - (score mod {:eval-data data-iter :eval-metric (eval-metric/accuracy)}) - (score mod {:eval-data data-iter - :eval-metric (eval-metric/mse) :num-batch 10})" - [mod {:keys [eval-data eval-metric num-batch reset epoch] :as opts - :or {num-batch Integer/MAX_VALUE - reset true - epoch 0}}] - (util/validate! ::score-opts opts "Invalid score options") - (do (eval-metric/reset eval-metric) - (eval-metric/get - (.score mod - eval-data - eval-metric - (int num-batch) - (util/->option nil) - (util/->option nil) - reset - (int epoch))))) - -(defn fit - "Train the module parameters. - `mod`: Module - `opts-map` { - `train-data`: DataIter - `eval-data`: DataIter - If not nil, will be used as validation set and evaluate the - performance after each epoch. - `num-epoch`: int - Number of epochs to run training. - `fit-params`: FitParams - Extra parameters for training (see fit-params). - } - Ex: - (fit {:train-data train-iter :eval-data test-iter :num-epoch 100) - (fit {:train-data train-iter - :eval-data test-iter - :num-epoch 5 - :fit-params - (fit-params {:batch-end-callback (callback/speedometer 128 100) - :initializer (initializer/xavier) - :optimizer (optimizer/sgd {:learning-rate 0.01}) - :eval-metric (eval-metric/mse)}))" - [mod {:keys [train-data eval-data num-epoch fit-params] :as opts - :or {num-epoch 1 - fit-params (new FitParams)}}] - (util/validate! ::fit-options opts "Invalid options for fit") - (doto mod - (.fit - train-data - (util/->option eval-data) - (int num-epoch) - fit-params))) - -(s/def ::eval-data ::train-data) -(s/def ::num-batch integer?) -(s/def ::reset boolean?) -(s/def ::predict-opts - (s/keys :req-un [::eval-data] :opt-un [::num-batch ::reset])) - -(defn predict-batch - "Run the predication on a data batch. - `mod`: Module - `data-batch`: data-batch" - [mod data-batch] - (util/validate! ::mx-io/data-batch data-batch "Invalid data batch") - (util/coerce-return (.predict mod (if (map? data-batch) - (mx-io/data-batch data-batch) - data-batch)))) - -(defn predict - "Run prediction and collect the outputs. - `mod`: Module - `opts-map` { - `eval-data`: DataIter - `num-batch` int - Default is `-1` - Indicating running all the batches in the data iterator. - `reset`: boolean - Default is `true` - Indicating whether we should reset the data iter before start doing - prediction. - } - returns: vector of NDArrays `[out1, out2, out3]` where each element is the - concatenation of the outputs for all the mini-batches. - Ex: - (predict mod {:eval-data test-iter}) - (predict mod {:eval-data test-iter :num-batch 10 :reset false})" - [mod {:keys [eval-data num-batch reset] :as opts - :or {num-batch -1 - reset true}}] - (util/validate! ::predict-opts opts "Invalid opts for predict") - (util/scala-vector->vec (.predict mod eval-data (int num-batch) reset))) - -(s/def ::predict-every-batch-opts - (s/keys :req-un [::eval-data] :opt-un [::num-batch ::reset])) - -(defn predict-every-batch - "Run prediction and collect the outputs. - `mod`: Module - `opts-map` { - `eval-data`: DataIter - `num-batch` int - Default is `-1` - Indicating running all the batches in the data iterator. - `reset` boolean - Default is `true` - Indicating whether we should reset the data iter before start doing - prediction. - } - returns: nested list like this - `[[out1_batch1, out2_batch1, ...], [out1_batch2, out2_batch2, ...]]` - - Note: This mode is useful because in some cases (e.g. bucketing), the module - does not necessarily produce the same number of outputs. - Ex: - (predict-every-batch mod {:eval-data test-iter})" - [mod {:keys [eval-data num-batch reset] :as opts - :or {num-batch -1 - reset true}}] - (util/validate! ::predict-every-batch-opts - opts - "Invalid opts for predict-every-batch") - (mapv util/scala-vector->vec - (util/scala-vector->vec - (.predictEveryBatch mod eval-data (int num-batch) reset)))) - -(s/def ::score-opts - (s/keys :req-un [::eval-data ::eval-metric] - :opt-un [::num-batch ::reset ::epoch])) - -(defn exec-group [mod] - (.execGroup mod)) - -(defn grad-arrays [mod] - (mapv vec (util/buffer->vec (.gradArrays (.execGroup mod))))) - -(comment - (require '[clojure.reflect :as r]) - (r/reflect DataDesc) - (new DataDesc) - - (.setEpochEndCallback (if epoch-end-callback - (util/->option epoch-end-callback) - (util/->option nil))) - (.setBatchEndCallback (if batch-end-callback - (util/->option batch-end-callback) - (util/->option nil))) - - (fit-params {:allow-missing true}) - (fit-params {})) diff --git a/contrib/clojure-package/src/org/apache/clojure_mxnet/monitor.clj b/contrib/clojure-package/src/org/apache/clojure_mxnet/monitor.clj deleted file mode 100644 index cbe04843db02..000000000000 --- a/contrib/clojure-package/src/org/apache/clojure_mxnet/monitor.clj +++ /dev/null @@ -1,42 +0,0 @@ -;; -;; Licensed to the Apache Software Foundation (ASF) under one or more -;; contributor license agreements. See the NOTICE file distributed with -;; this work for additional information regarding copyright ownership. -;; The ASF licenses this file to You under the Apache License, Version 2.0 -;; (the "License"); you may not use this file except in compliance with -;; the License. You may obtain a copy of the License at -;; -;; http://www.apache.org/licenses/LICENSE-2.0 -;; -;; Unless required by applicable law or agreed to in writing, software -;; distributed under the License is distributed on an "AS IS" BASIS, -;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -;; See the License for the specific language governing permissions and -;; limitations under the License. -;; - -(ns org.apache.clojure-mxnet.monitor - (:require [org.apache.clojure-mxnet.util :as util]) - (:import (org.apache.mxnet Monitor))) - -(defmacro monitor - "Monitor outputs, weights, and gradients for debugging. - - interval Number of batches between printing. - - stat-func A function that computes statistics of tensors. - Takes a NDArray and returns a NDArray. defaults - to mean absolute value |x|/size(x). Function must be in the form of clojure (fn [x])" - [interval stat-fun] - `(new Monitor (int ~interval) (util/scala-fn ~stat-fun))) - -(defn tic - "Start collecting stats for current batch. - Call before forward" - [monitor] - (doto monitor - (.tic))) - -(defn toc - "End collecting for current batch and return results. - Call after computation of current batch." - [monitor] - (map util/tuple->vec (util/scala-vector->vec (.toVector (.toc monitor))))) diff --git a/contrib/clojure-package/src/org/apache/clojure_mxnet/ndarray.clj b/contrib/clojure-package/src/org/apache/clojure_mxnet/ndarray.clj deleted file mode 100644 index 9caa00d49010..000000000000 --- a/contrib/clojure-package/src/org/apache/clojure_mxnet/ndarray.clj +++ /dev/null @@ -1,236 +0,0 @@ -;; -;; Licensed to the Apache Software Foundation (ASF) under one or more -;; contributor license agreements. See the NOTICE file distributed with -;; this work for additional information regarding copyright ownership. -;; The ASF licenses this file to You under the Apache License, Version 2.0 -;; (the "License"); you may not use this file except in compliance with -;; the License. You may obtain a copy of the License at -;; -;; http://www.apache.org/licenses/LICENSE-2.0 -;; -;; Unless required by applicable law or agreed to in writing, software -;; distributed under the License is distributed on an "AS IS" BASIS, -;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -;; See the License for the specific language governing permissions and -;; limitations under the License. -;; - -(ns org.apache.clojure-mxnet.ndarray - "NDArray API for Clojure package." - (:refer-clojure :exclude [* - + > >= < <= / cast concat flatten identity load max - min repeat reverse set sort take to-array empty shuffle - ref]) - (:require - [clojure.spec.alpha :as s] - - [org.apache.clojure-mxnet.base :as base] - [org.apache.clojure-mxnet.context :as mx-context] - [org.apache.clojure-mxnet.shape :as mx-shape] - [org.apache.clojure-mxnet.util :as util] - [t6.from-scala.core :refer [$] :as $]) - (:import (org.apache.mxnet NDArray))) - -;; loads the generated functions into the namespace -(do (clojure.core/load "gen/ndarray")) - -(defn ->vec - "Converts a nd-array to a vector (one dimensional)" - [ndarray] - (-> ndarray to-array aclone vec)) - -(defn empty - "Create an empty uninitialized new NDArray, with specified shape" - ([shape-vec {:keys [ctx dtype] - :or {ctx (mx-context/default-context) dtype base/MX_REAL_TYPE} - :as opts}] - (NDArray/empty (mx-shape/->shape shape-vec) ctx dtype)) - ([shape-vec] - (empty shape-vec {}))) - -(defn zeros - "Create a new NDArray filled with 0, with specified shape." - ([shape-vec {:keys [ctx dtype] - :or {ctx (mx-context/default-context) dtype base/MX_REAL_TYPE} - :as opts}] - (NDArray/zeros (mx-shape/->shape shape-vec) ctx dtype)) - ([shape-vec] - (zeros shape-vec {}))) - -(defn ones - "Create a new NDArray filled with 1, with specified shape." - ([shape-vec {:keys [ctx dtype] - :or {ctx (mx-context/default-context) dtype base/MX_REAL_TYPE} - :as opts}] - (NDArray/ones (mx-shape/->shape shape-vec) ctx dtype)) - ([shape-vec] - (ones shape-vec {}))) - -(defn full - "Create a new NDArray filled with given value, with specified shape." - ([shape-vec value {:keys [ctx dtype] - :or {ctx (mx-context/default-context)} - :as opts}] - (NDArray/full (mx-shape/->shape shape-vec) value ctx)) - ([shape-vec value] - (full shape-vec value {}))) - -(defn array - "Create a new NDArray that copies content from source vector" - ([source-vec shape-vec {:keys [ctx dtype] - :or {ctx (mx-context/default-context)} - :as opts}] - (NDArray/array (float-array source-vec) (mx-shape/->shape shape-vec) ctx)) - ([source-vec shape-vec] - (array source-vec shape-vec {}))) - -(defn arange - "Returns evenly spaced values within a given interval. - Values are generated within the half-open interval [`start`, `stop`). In other - words, the interval includes `start` but excludes `stop`." - ([start stop {:keys [step repeat ctx dtype] - :or {step (float 1) repeat (int 1) ctx (mx-context/default-context) dtype base/MX_REAL_TYPE} - :as opts}] - (NDArray/arange (float start) ($/option (float stop)) step repeat ctx dtype)) - ([start stop] - (arange start stop {}))) - -(defn ->ndarray - "Creates a new NDArray based on the given n-dimenstional vector - of numbers. - `nd-vec`: n-dimensional vector with numbers. - `opts-map` { - `ctx`: Context of the output ndarray, will use default context if unspecified. - } - returns: `ndarray` with the given values and matching the shape of the input vector. - Ex: - (->ndarray [5.0 -4.0]) - (->ndarray [5 -4] {:ctx (context/cpu)}) - (->ndarray [[1 2 3] [4 5 6]]) - (->ndarray [[[1.0] [2.0]]]" - ([nd-vec {:keys [ctx] - :or {ctx (mx-context/default-context)} - :as opts}] - (array (vec (clojure.core/flatten nd-vec)) - (util/nd-seq-shape nd-vec) - {:ctx ctx})) - ([nd-vec] (->ndarray nd-vec {}))) - -(defn slice - "Return a sliced NDArray that shares memory with current one." - ([ndarray i] - (.slice ndarray (int i))) - ([ndarray start stop] - (.slice ndarray (int start) (int stop)))) - -(defn copy-to - "Copy the content of current array to other" - [source-ndarray target-ndarray] - (.copyTo source-ndarray target-ndarray)) - -(defn save - "Save list of NDArray or dict of str->NDArray to binary file - (The name of the file.Can be S3 or HDFS address (remember built with S3 support)) - Example of fname: - * - `s3://my-bucket/path/my-s3-ndarray` - * - `hdfs://my-bucket/path/my-hdfs-ndarray` - * - `/path-to/my-local-ndarray`" - [fname map-of-name-to-ndarray] - (NDArray/save fname (util/coerce-param map-of-name-to-ndarray #{"scala.collection.immutable.Map"}))) - -(defn load - "Takes a filename and returns back a map of ndarray-name to ndarray" - [filename] - (let [info (NDArray/load filename) - [names ndarrays] (util/tuple->vec info)] - (into {} (map (fn [n a] {(str n) a}) names ndarrays)))) - -(defn save-to-file - "Save one ndarray to a file" - [fname ndarray] - (save fname {"default" ndarray})) - -(defn load-from-file - "Load one ndarry from a file" - [fname] - (first (load2-array fname))) - -(defn as-in-context - "Return an `NDArray` that lives in the target context. If the array - is already in that context, `self` is returned. Otherwise, a copy is made." - [ndarray ctx] - (.asInContext ndarray ctx)) - -(defn as-type - "Return a copied numpy array of current array with specified type." - [ndarray dtype] - (.asType ndarray dtype)) - -(defn / [ndarray num-or-NDArray] - (div ndarray num-or-NDArray)) - -(defn concatenate - ([ndarrays {:keys [axis always-copy] :or {axis 1 always-copy true}}] - (NDArray/concatenate (apply $/immutable-list ndarrays) (int axis) always-copy)) - ([ndarrays] - (NDArray/concatenate (apply $/immutable-list ndarrays)))) - -(defn ->raw [ndarray] - (-> ndarray internal .getRaw)) - -(defn ->float-vec [ndarray] - (-> ndarray internal .toFloatArray vec)) - -(defn ->int-vec [ndarray] - (-> ndarray internal .toIntArray vec)) - -(defn ->double-vec [ndarray] - (-> ndarray internal .toDoubleArray vec)) - -(defn ->byte-vec [ndarray] - (-> ndarray internal .toByteArray vec)) - -(defn shape-vec [ndarray] - (mx-shape/->vec (shape ndarray))) - -(s/def ::ndarray #(instance? NDArray %)) -(s/def ::vector vector?) -(s/def ::sequential sequential?) -(s/def ::shape-vec-match-vec - (fn [[v vec-shape]] (= (count v) (reduce clojure.core/* 1 vec-shape)))) - -(s/fdef vec->nd-vec - :args (s/cat :v ::sequential :shape-vec ::sequential) - :ret ::vector) - -(defn- vec->nd-vec - "Convert a vector `v` into a n-dimensional vector given the `shape-vec` - Ex: - (vec->nd-vec [1 2 3] [1 1 3]) ;[[[1 2 3]]] - (vec->nd-vec [1 2 3 4 5 6] [2 3 1]) ;[[[1] [2] [3]] [[4] [5] [6]]] - (vec->nd-vec [1 2 3 4 5 6] [1 2 3]) ;[[[1 2 3]] [4 5 6]]] - (vec->nd-vec [1 2 3 4 5 6] [3 1 2]) ;[[[1 2]] [[3 4]] [[5 6]]] - (vec->nd-vec [1 2 3 4 5 6] [3 2]) ;[[1 2] [3 4] [5 6]]" - [v [s1 & ss :as shape-vec]] - (util/validate! ::sequential v "Invalid input vector `v`") - (util/validate! ::sequential shape-vec "Invalid input vector `shape-vec`") - (util/validate! ::shape-vec-match-vec - [v shape-vec] - "Mismatch between vector `v` and vector `shape-vec`") - (if-not (seq ss) - (vec v) - (->> v - (partition (clojure.core// (count v) s1)) - vec - (mapv #(vec->nd-vec % ss))))) - -(s/fdef ->nd-vec :args (s/cat :ndarray ::ndarray) :ret ::vector) - -(defn ->nd-vec - "Convert an ndarray `ndarray` into a n-dimensional Clojure vector. - Ex: - (->nd-vec (array [1] [1 1 1])) ;[[[1.0]]] - (->nd-vec (array [1 2 3] [3 1 1])) ;[[[1.0]] [[2.0]] [[3.0]]] - (->nd-vec (array [1 2 3 4 5 6]) [3 1 2]) ;[[[1.0 2.0]] [[3.0 4.0]] [[5.0 6.0]]]" - [ndarray] - (util/validate! ::ndarray ndarray "Invalid input array") - (vec->nd-vec (->vec ndarray) (shape-vec ndarray))) diff --git a/contrib/clojure-package/src/org/apache/clojure_mxnet/ndarray_api.clj b/contrib/clojure-package/src/org/apache/clojure_mxnet/ndarray_api.clj deleted file mode 100644 index e222775c60f6..000000000000 --- a/contrib/clojure-package/src/org/apache/clojure_mxnet/ndarray_api.clj +++ /dev/null @@ -1,32 +0,0 @@ -;; Licensed to the Apache Software Foundation (ASF) under one or more -;; contributor license agreements. See the NOTICE file distributed with -;; this work for additional information regarding copyright ownership. -;; The ASF licenses this file to You under the Apache License, Version 2.0 -;; (the "License"); you may not use this file except in compliance with -;; the License. You may obtain a copy of the License at -;; -;; http://www.apache.org/licenses/LICENSE-2.0 -;; -;; Unless required by applicable law or agreed to in writing, software -;; distributed under the License is distributed on an "AS IS" BASIS, -;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -;; See the License for the specific language governing permissions and -;; limitations under the License. -;; - -(ns org.apache.clojure-mxnet.ndarray-api - "Experimental NDArray API" - (:refer-clojure - :exclude [* - + > >= < <= / cast concat flatten identity load max - min repeat reverse set sort take to-array empty shuffle - ref]) - (:require [org.apache.clojure-mxnet.base :as base] - [org.apache.clojure-mxnet.context :as mx-context] - [org.apache.clojure-mxnet.shape :as mx-shape] - [org.apache.clojure-mxnet.util :as util] - [clojure.reflect :as r] - [t6.from-scala.core :refer [$] :as $]) - (:import (org.apache.mxnet NDArrayAPI))) - -;; loads the generated functions into the namespace -(do (clojure.core/load "gen/ndarray_api")) diff --git a/contrib/clojure-package/src/org/apache/clojure_mxnet/ndarray_random_api.clj b/contrib/clojure-package/src/org/apache/clojure_mxnet/ndarray_random_api.clj deleted file mode 100644 index 1f45b6d4d646..000000000000 --- a/contrib/clojure-package/src/org/apache/clojure_mxnet/ndarray_random_api.clj +++ /dev/null @@ -1,28 +0,0 @@ -;; Licensed to the Apache Software Foundation (ASF) under one or more -;; contributor license agreements. See the NOTICE file distributed with -;; this work for additional information regarding copyright ownership. -;; The ASF licenses this file to You under the Apache License, Version 2.0 -;; (the "License"); you may not use this file except in compliance with -;; the License. You may obtain a copy of the License at -;; -;; http://www.apache.org/licenses/LICENSE-2.0 -;; -;; Unless required by applicable law or agreed to in writing, software -;; distributed under the License is distributed on an "AS IS" BASIS, -;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -;; See the License for the specific language governing permissions and -;; limitations under the License. -;; - -(ns org.apache.clojure-mxnet.ndarray-random-api - "Experimental NDArray Random API" - (:require [org.apache.clojure-mxnet.base :as base] - [org.apache.clojure-mxnet.context :as mx-context] - [org.apache.clojure-mxnet.shape :as mx-shape] - [org.apache.clojure-mxnet.util :as util] - [clojure.reflect :as r] - [t6.from-scala.core :refer [$] :as $]) - (:import (org.apache.mxnet NDArrayAPI))) - -;; loads the generated functions into the namespace -(do (clojure.core/load "gen/ndarray_random_api")) diff --git a/contrib/clojure-package/src/org/apache/clojure_mxnet/optimizer.clj b/contrib/clojure-package/src/org/apache/clojure_mxnet/optimizer.clj deleted file mode 100644 index e94a59879466..000000000000 --- a/contrib/clojure-package/src/org/apache/clojure_mxnet/optimizer.clj +++ /dev/null @@ -1,219 +0,0 @@ -;; -;; Licensed to the Apache Software Foundation (ASF) under one or more -;; contributor license agreements. See the NOTICE file distributed with -;; this work for additional information regarding copyright ownership. -;; The ASF licenses this file to You under the Apache License, Version 2.0 -;; (the "License"); you may not use this file except in compliance with -;; the License. You may obtain a copy of the License at -;; -;; http://www.apache.org/licenses/LICENSE-2.0 -;; -;; Unless required by applicable law or agreed to in writing, software -;; distributed under the License is distributed on an "AS IS" BASIS, -;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -;; See the License for the specific language governing permissions and -;; limitations under the License. -;; - -(ns org.apache.clojure-mxnet.optimizer - (:refer-clojure :exclude [update]) - (:require - [clojure.spec.alpha :as s] - [org.apache.clojure-mxnet.util :as util]) - (:import - (org.apache.mxnet.optimizer SGD DCASGD NAG AdaDelta RMSProp AdaGrad Adam SGLD) - (org.apache.mxnet FactorScheduler))) - -(s/def ::learning-rate number?) -(s/def ::momentum number?) -(s/def ::wd number?) -(s/def ::clip-gradient number?) -(s/def ::lr-scheduler #(instance? FactorScheduler %)) -(s/def ::sgd-opts (s/keys :opt-un [::learning-rate ::momentum ::wd ::clip-gradient ::lr-scheduler])) - -(defn sgd - "A very simple SGD optimizer with momentum and weight regularization." - ([{:keys [learning-rate momentum wd clip-gradient lr-scheduler] :as opts - :or {learning-rate 0.01 - momentum 0.0 - wd 0.0001 - clip-gradient 0}}] - (util/validate! ::sgd-opts opts "Incorrect sgd optimizer options") - (new SGD (float learning-rate) (float momentum) (float wd) (float clip-gradient) lr-scheduler)) - ([] - (sgd {}))) - -(s/def ::lambda number?) -(s/def ::dcasgd-opts (s/keys :opt-un [::learning-rate ::momentum ::lambda ::wd ::clip-gradient ::lr-scheduler])) - -(defn dcasgd - "DCASGD optimizer with momentum and weight regularization. - Implementation of paper 'Asynchronous Stochastic Gradient Descent with - Delay Compensation for Distributed Deep Learning'" - ([{:keys [learning-rate momentum lambda wd clip-gradient lr-scheduler] :as opts - :or {learning-rate 0.01 - momentum 0.0 - lambda 0.04 - wd 0.0 - clip-gradient 0}}] - (util/validate! ::sgd-opts opts "Incorrect dcasgd optimizer options") - (new DCASGD (float learning-rate) (float lambda) (float momentum) (float wd) (float clip-gradient) lr-scheduler)) - ([] - (dcasgd {}))) - -(s/def ::nag-opts (s/keys :opt-un [::learning-rate ::momentum ::wd ::clip-gradient ::lr-scheduler])) - -(defn nag - "SGD with nesterov. - It is implemented according to - https://github.com/torch/optim/blob/master/sgd.lua" - ([{:keys [learning-rate momentum wd clip-gradient lr-scheduler] :as opts - :or {learning-rate 0.01 - momentum 0.0 - wd 0.0001 - clip-gradient 0}}] - (util/validate! ::nag-opts opts "Incorrect nag optimizer options") - (new NAG (float learning-rate) (float momentum) (float wd) (float clip-gradient) lr-scheduler)) - ([] - (nag {}))) - -(s/def ::rho number?) -(s/def ::rescale-gradient number?) -(s/def ::epsilon number?) -(s/def ::ada-delta-opts (s/keys :opt-un [::rho ::rescale-gradient ::epsilon ::wd ::clip-gradient])) - -(defn ada-delta - "AdaDelta optimizer as described in Matthew D. Zeiler, 2012. - http://arxiv.org/abs/1212.5701" - ([{:keys [rho rescale-gradient epsilon wd clip-gradient] :as opts - :or {rho 0.05 - rescale-gradient 1.0 - epsilon 1e-8 - wd 0.0 - clip-gradient 0}}] - (util/validate! ::ada-delta-opts opts "Incorrect ada-delta optimizer options") - (new AdaDelta (float rho) (float rescale-gradient) (float epsilon) (float wd) (float clip-gradient))) - ([] - (ada-delta {}))) - -(s/def rho number?) -(s/def momentum number?) -(s/def ::rms-prop-opts (s/keys :opt-un [::learning-rate ::rescale-gradient ::rho ::momentum ::wd ::clip-gradient])) - -(defn rms-prop - "RMSProp optimizer as described in Tieleman & Hinton, 2012. - http://arxiv.org/pdf/1308.0850v5.pdf Eq(38) - Eq(45) by Alex Graves, 2013. - - learningRate Step size. - - rho decay factor of moving average for gradient, gradient^^2. - - momentum momentum factor of moving average for gradient. - - rescale-gradient rescaling factor of gradient. - - wd L2 regularization coefficient add to all the weights - - clip-gradient clip gradient in range [-clip_gradient, clip_gradient] - - lr-scheduler The learning rate scheduler" - ([{:keys [learning-rate rescale-gradient rho momentum wd lr-scheduler clip-gradient] :as opts - :or {learning-rate 0.002 - rescale-gradient 1.0 - rho 0.95 - momentum 0.9 - wd 0.0 - clip-gradient 0}}] - (util/validate! ::rms-prop-opts opts "Incorrect rms-prop optimizer options") - (new RMSProp (float learning-rate) (float rescale-gradient) (float rho) - (float momentum) (float wd) lr-scheduler (float clip-gradient))) - ([] - (rms-prop {}))) - -(s/def ::ada-grad-opts (s/keys :opt-un [::learning-rate ::rescale-gradient ::epsilon ::wd])) - -(defn ada-grad - " AdaGrad optimizer as described in Duchi, Hazan and Singer, 2011. - http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf - - - learning-rate Step size. - - epsilon A small number to make the updating processing stable. - Default value is set to 1e-7. - - rescale-gradient rescaling factor of gradient. - - wd L2 regularization coefficient add to all the weights" - ([{:keys [learning-rate rescale-gradient epsilon wd] :as opts - :or {learning-rate 0.05 - rescale-gradient 1.0 - epsilon 1e-7 - wd 0.0}}] - (util/validate! ::ada-grad-opts opts "Incorrect ada-grad optimizer options") - (new AdaGrad (float learning-rate) (float rescale-gradient) (float epsilon) (float wd))) - ([] - (ada-grad {}))) - -(s/def ::beta1 number?) -(s/def ::beta2 number?) -(s/def ::adam-opts (s/keys :opt-un [::learning-rate ::beta1 ::beta2 ::epsilon ::decay-factor ::wd ::clip-gradient ::lr-scheduler])) - -(defn adam - "Adam optimizer as described in [King2014] - - [King2014] Diederik Kingma, Jimmy Ba, - Adam: A Method for Stochastic Optimization, - http://arxiv.org/abs/1412.6980 - - - learning-rate Step size. - - beta1 Exponential decay rate for the first moment estimates. - - beta2 Exponential decay rate for the second moment estimates. - - epsilon - - decay-factor - - wd L2 regularization coefficient add to all the weights - - clip-gradient clip gradient in range [-clip_gradient, clip_gradient] - - lr-scheduler The learning rate scheduler" - ([{:keys [learning-rate beta1 beta2 epsilon decay-factor wd clip-gradient lr-scheduler] :as opts - :or {learning-rate 0.002 - beta1 0.9 - beta2 0.999 - epsilon 1e-8 - decay-factor (- 1 1e-8) - wd 0 - clip-gradient 0}}] - (util/validate! ::adam-opts opts "Incorrect adam optimizer options") - (new Adam (float learning-rate) (float beta1) (float beta2) (float epsilon) - (float decay-factor) (float wd) (float clip-gradient) lr-scheduler)) - ([] - (adam {}))) - -(s/def ::sgld-opts (s/keys :opt-un [::learning-rate ::rescale-gradient ::wd ::clip-gradient ::lr-scheduler])) - -(defn sgld - "Stochastic Langevin Dynamics Updater to sample from a distribution. - - - learning-rate Step size. - - rescale-gradient rescaling factor of gradient. - - wd L2 regularization coefficient add to all the weights - - clip-gradient Float, clip gradient in range [-clip_gradient, clip_gradient] - - lr-scheduler The learning rate scheduler" - ([{:keys [learning-rate rescale-gradient wd clip-gradient lr-scheduler] :as opts - :or {learning-rate 0.01 - rescale-gradient 1 - wd 0.0001 - clip-gradient 0}}] - (util/validate! ::sgld-opts opts "Incorrect sgld optimizer options") - (new SGLD (float learning-rate) (float rescale-gradient) (float wd) - (float clip-gradient) lr-scheduler)) - ([] - (sgld {}))) - -(defn update - "Update the parameters. - - optimizer - the optimizer - - index An unique integer key used to index the parameters - - weight weight ndarray - - grad grad ndarray - - state NDArray or other objects returned by initState - The auxiliary state used in optimization. - " - [optimizer index weight grad state] - (doto optimizer - (.update (int index) weight grad state))) - -(defn create-state - "Create additional optimizer state such as momentum." - [optimizer index weight] - (do - (.createState optimizer (int index) weight))) - diff --git a/contrib/clojure-package/src/org/apache/clojure_mxnet/primitives.clj b/contrib/clojure-package/src/org/apache/clojure_mxnet/primitives.clj deleted file mode 100644 index 0967df2289d8..000000000000 --- a/contrib/clojure-package/src/org/apache/clojure_mxnet/primitives.clj +++ /dev/null @@ -1,46 +0,0 @@ -;; -;; Licensed to the Apache Software Foundation (ASF) under one or more -;; contributor license agreements. See the NOTICE file distributed with -;; this work for additional information regarding copyright ownership. -;; The ASF licenses this file to You under the Apache License, Version 2.0 -;; (the "License"); you may not use this file except in compliance with -;; the License. You may obtain a copy of the License at -;; -;; http://www.apache.org/licenses/LICENSE-2.0 -;; -;; Unless required by applicable law or agreed to in writing, software -;; distributed under the License is distributed on an "AS IS" BASIS, -;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -;; See the License for the specific language governing permissions and -;; limitations under the License. -;; - -(ns org.apache.clojure-mxnet.primitives - (:import (org.apache.mxnet MX_PRIMITIVES$MX_FLOAT MX_PRIMITIVES$MX_Double - MX_PRIMITIVES$MX_PRIMITIVE_TYPE))) - - -;;; Defines customer mx primitives that can be used for mathematical computations -;;; in NDArrays to control precision. Currently Float and Double are supported - -;;; For purposes of automatic conversion in ndarray functions, doubles are default -;; to specify using floats you must use a Float - -(defn mx-float - "Creates a MXNet float primitive" - [num] - (new MX_PRIMITIVES$MX_FLOAT num)) - -(defn mx-double - "Creates a MXNet double primitive" - [num] - (new MX_PRIMITIVES$MX_Double num)) - -(defn ->num - "Returns the underlying number value" - [primitive] - (.data primitive)) - -(defn primitive? [x] - (instance? MX_PRIMITIVES$MX_PRIMITIVE_TYPE x)) - diff --git a/contrib/clojure-package/src/org/apache/clojure_mxnet/profiler.clj b/contrib/clojure-package/src/org/apache/clojure_mxnet/profiler.clj deleted file mode 100644 index 5b4f9b198131..000000000000 --- a/contrib/clojure-package/src/org/apache/clojure_mxnet/profiler.clj +++ /dev/null @@ -1,47 +0,0 @@ -;; -;; Licensed to the Apache Software Foundation (ASF) under one or more -;; contributor license agreements. See the NOTICE file distributed with -;; this work for additional information regarding copyright ownership. -;; The ASF licenses this file to You under the Apache License, Version 2.0 -;; (the "License"); you may not use this file except in compliance with -;; the License. You may obtain a copy of the License at -;; -;; http://www.apache.org/licenses/LICENSE-2.0 -;; -;; Unless required by applicable law or agreed to in writing, software -;; distributed under the License is distributed on an "AS IS" BASIS, -;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -;; See the License for the specific language governing permissions and -;; limitations under the License. -;; - -(ns org.apache.clojure-mxnet.profiler - (:import (org.apache.mxnet Profiler)) - (:require [org.apache.clojure-mxnet.util :as util])) - -(defn profiler-set-config - " Set up the configure of profiler. - -mode, optional Indicting whether to enable the profiler, can - be symbolic or all. Default is symbolic. - -fileName, optional The name of output trace file. Default is profile.json." - [kwargs] - (Profiler/profilerSetConfig - (util/convert-io-map kwargs))) - -(defn profiler-set-state - "Set up the profiler state to record operator. - -state, optional - - Indicting whether to run the profiler, can - be stop or run. Default is stop." - ([state] - (Profiler/profilerSetState state)) - ([] - (profiler-set-state "stop"))) - -(defn dump-profile - " Dump profile and stop profiler. Use this to save profile - in advance in case your program cannot exit normally." - ([finished] - (Profiler/dumpProfile (int finished))) - ([] - (dump-profile 1))) diff --git a/contrib/clojure-package/src/org/apache/clojure_mxnet/random.clj b/contrib/clojure-package/src/org/apache/clojure_mxnet/random.clj deleted file mode 100644 index 1261e659e6dc..000000000000 --- a/contrib/clojure-package/src/org/apache/clojure_mxnet/random.clj +++ /dev/null @@ -1,99 +0,0 @@ -;; -;; Licensed to the Apache Software Foundation (ASF) under one or more -;; contributor license agreements. See the NOTICE file distributed with -;; this work for additional information regarding copyright ownership. -;; The ASF licenses this file to You under the Apache License, Version 2.0 -;; (the "License"); you may not use this file except in compliance with -;; the License. You may obtain a copy of the License at -;; -;; http://www.apache.org/licenses/LICENSE-2.0 -;; -;; Unless required by applicable law or agreed to in writing, software -;; distributed under the License is distributed on an "AS IS" BASIS, -;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -;; See the License for the specific language governing permissions and -;; limitations under the License. -;; - -(ns org.apache.clojure-mxnet.random - "Random Number interface of mxnet." - (:require - [clojure.spec.alpha :as s] - [org.apache.clojure-mxnet.context :as context] - [org.apache.clojure-mxnet.shape :as mx-shape] - [org.apache.clojure-mxnet.util :as util]) - (:import (org.apache.mxnet Context Random))) - -(s/def ::low number?) -(s/def ::high number?) -(s/def ::low-high (fn [[low high]] (<= low high))) -(s/def ::shape-vec (s/coll-of pos-int? :kind vector?)) -(s/def ::ctx #(instance? Context %)) -(s/def ::uniform-opts (s/keys :opt-un [::ctx])) - -(defn uniform - "Generate uniform distribution in [`low`, `high`) with shape. - `low`: The lower bound of distribution. - `high`: The upper bound of distribution. - `shape-vec`: vector shape of the ndarray generated. - `opts-map` { - `ctx`: Context of output ndarray, will use default context if not specified. - `out`: Output place holder} - returns: The result ndarray with generated result. - Ex: - (uniform 0 1 [1 10]) - (uniform -10 10 [100 100])" - ([low high shape-vec {:keys [ctx out] :as opts}] - (util/validate! ::uniform-opts opts "Incorrect random uniform parameters") - (util/validate! ::low low "Incorrect random uniform parameter") - (util/validate! ::high high "Incorrect random uniform parameters") - (util/validate! ::low-high [low high] "Incorrect random uniform parameters") - (util/validate! ::shape-vec shape-vec "Incorrect random uniform parameters") - (Random/uniform (float low) (float high) (mx-shape/->shape shape-vec) ctx out)) - ([low high shape-vec] - (uniform low high shape-vec {}))) - -(s/def ::loc number?) -(s/def ::scale (s/and number? pos?)) -(s/def ::normal-opts (s/keys :opt-un [::ctx])) - -(defn normal - "Generate normal (Gaussian) distribution N(mean, stdvar^^2) with shape. - `loc`: Mean (centre) of the distribution. - `scale`: Standard deviation (spread or width) of the distribution. - `shape-vec`: vector shape of the ndarray generated. - `opts-map` { - `ctx`: Context of output ndarray, will use default context if not specified. - `out`: Output place holder} - returns: The result ndarray with generated result. - Ex: - (normal 0 1 [10 10]) - (normal -5 4 [2 3])" - ([loc scale shape-vec {:keys [ctx out] :as opts}] - (util/validate! ::normal-opts opts "Incorrect random normal parameters") - (util/validate! ::loc loc "Incorrect random normal parameters") - (util/validate! ::scale scale "Incorrect random normal parameters") - (util/validate! ::shape-vec shape-vec "Incorrect random uniform parameters") - (Random/normal (float loc) - (float scale) - (mx-shape/->shape shape-vec) ctx out)) - ([loc scale shape-vec] - (normal loc scale shape-vec {}))) - -(s/def ::seed-state number?) -(defn seed - "Seed the random number generators in mxnet. - This seed will affect behavior of functions in this module, - as well as results from executors that contains Random number - such as Dropout operators. - - `seed-state`: The random number seed to set to all devices. - note: The random number generator of mxnet is by default device specific. - This means if you set the same seed, the random number sequence - generated from GPU0 can be different from CPU. - Ex: - (seed-state 42) - (seed-state 42.0)" - [seed-state] - (util/validate! ::seed-state seed-state "Incorrect seed parameters") - (Random/seed (int seed-state))) diff --git a/contrib/clojure-package/src/org/apache/clojure_mxnet/resource_scope.clj b/contrib/clojure-package/src/org/apache/clojure_mxnet/resource_scope.clj deleted file mode 100644 index 26673485e54c..000000000000 --- a/contrib/clojure-package/src/org/apache/clojure_mxnet/resource_scope.clj +++ /dev/null @@ -1,53 +0,0 @@ -;; -;; Licensed to the Apache Software Foundation (ASF) under one or more -;; contributor license agreements. See the NOTICE file distributed with -;; this work for additional information regarding copyright ownership. -;; The ASF licenses this file to You under the Apache License, Version 2.0 -;; (the "License"); you may not use this file except in compliance with -;; the License. You may obtain a copy of the License at -;; -;; http://www.apache.org/licenses/LICENSE-2.0 -;; -;; Unless required by applicable law or agreed to in writing, software -;; distributed under the License is distributed on an "AS IS" BASIS, -;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -;; See the License for the specific language governing permissions and -;; limitations under the License. -;; - -(ns org.apache.clojure-mxnet.resource-scope - (:require [org.apache.clojure-mxnet.util :as util]) - (:import (org.apache.mxnet ResourceScope))) - -(defmacro - using - "Uses a Resource Scope for all forms. This is a way to manage all Native Resources like NDArray and Symbol - it will deallocate all Native Resources by calling close on them automatically. It will not call close on Native Resources returned from the form. - Example: - (resource-scope/using - (let [temp-x (ndarray/ones [3 1]) - temp-y (ndarray/ones [3 1])] - (ndarray/+ temp-x temp-y))) " - [& forms] - `(ResourceScope/using (new ResourceScope) (util/forms->scala-fn ~@forms))) - - -(defmacro - with-do - "Alias for a do within a resource scope using. - Example: - (resource-scope/with-do - (ndarray/ones [3 1]) - :all-cleaned-up) - " - [& forms] - `(using (do ~@forms))) - -(defmacro - with-let - "Alias for a let within a resource scope using. - Example: - (resource-scope/with-let [temp-x (ndarray/ones [3 1]) - temp-y (ndarray/ones [3 1])] - (ndarray/+ temp-x temp-y))" - [& forms] - `(using (let ~@forms))) diff --git a/contrib/clojure-package/src/org/apache/clojure_mxnet/shape.clj b/contrib/clojure-package/src/org/apache/clojure_mxnet/shape.clj deleted file mode 100644 index 01bedeff8a9c..000000000000 --- a/contrib/clojure-package/src/org/apache/clojure_mxnet/shape.clj +++ /dev/null @@ -1,34 +0,0 @@ -;; -;; Licensed to the Apache Software Foundation (ASF) under one or more -;; contributor license agreements. See the NOTICE file distributed with -;; this work for additional information regarding copyright ownership. -;; The ASF licenses this file to You under the Apache License, Version 2.0 -;; (the "License"); you may not use this file except in compliance with -;; the License. You may obtain a copy of the License at -;; -;; http://www.apache.org/licenses/LICENSE-2.0 -;; -;; Unless required by applicable law or agreed to in writing, software -;; distributed under the License is distributed on an "AS IS" BASIS, -;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -;; See the License for the specific language governing permissions and -;; limitations under the License. -;; - -(ns org.apache.clojure-mxnet.shape - (:require [t6.from-scala.core :refer [$] :as $]) - (:import (org.apache.mxnet Shape))) - -(defn ->shape [v] - (new Shape (apply $/immutable-list (map int v)))) - -(defn ->vec [shape-obj] - (-> shape-obj - .toArray - vec)) - -(defn length [shape-obj] - (.length shape-obj)) - -(defn product [shape] - (.product shape)) diff --git a/contrib/clojure-package/src/org/apache/clojure_mxnet/symbol.clj b/contrib/clojure-package/src/org/apache/clojure_mxnet/symbol.clj deleted file mode 100644 index 0951655829d9..000000000000 --- a/contrib/clojure-package/src/org/apache/clojure_mxnet/symbol.clj +++ /dev/null @@ -1,247 +0,0 @@ -;; -;; Licensed to the Apache Software Foundation (ASF) under one or more -;; contributor license agreements. See the NOTICE file distributed with -;; this work for additional information regarding copyright ownership. -;; The ASF licenses this file to You under the Apache License, Version 2.0 -;; (the "License"); you may not use this file except in compliance with -;; the License. You may obtain a copy of the License at -;; -;; http://www.apache.org/licenses/LICENSE-2.0 -;; -;; Unless required by applicable law or agreed to in writing, software -;; distributed under the License is distributed on an "AS IS" BASIS, -;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -;; See the License for the specific language governing permissions and -;; limitations under the License. -;; - -(ns org.apache.clojure-mxnet.symbol - (:refer-clojure :exclude [* - + > >= < <= / cast concat identity flatten load max - min repeat reverse set sort take to-array empty sin - get apply shuffle ref]) - (:require [org.apache.clojure-mxnet.base :as base] - [org.apache.clojure-mxnet.context :as mx-context] - [org.apache.clojure-mxnet.executor :as ex] - [org.apache.clojure-mxnet.shape :as mx-shape] - [org.apache.clojure-mxnet.util :as util] - [t6.from-scala.core :refer [$] :as $] - [org.apache.clojure-mxnet.ndarray :as ndarray]) - (:import (org.apache.mxnet Symbol))) - -;; loads the generated functions into the namespace -(do (clojure.core/load "gen/symbol")) - -;;;;;; - -(defn variable - "Create a symbolic variable with a specified name. - attr-map: Additional attributes to set on the variable - shape-vec: The shape vector of the variable. If specified, this will be used during shape inference. - lr-mult: The learning rate multiplier - wd-mult: The weight decay multiplier for the input variable - dtype: The dtype for the input variable - kwarg-map: Additional attributes which must start and end with double underscores" - ([var-name] - (variable var-name {})) - ([var-name {:keys [attrs shape lr-mult wd-mult dtype kwargs] :as opts}] - (Symbol/Variable var-name - (when attrs (util/convert-symbol-map attrs)) - (when shape (mx-shape/->shape shape)) - (if lr-mult (float lr-mult) ($/option nil)) - (if wd-mult (float wd-mult) ($/option nil)) - dtype - (if kwargs (util/convert-symbol-map kwargs) (util/empty-map))))) - -(defn bind - "Bind the current symbol to get an executor. - sym: symbol - ctx: the device context of the generated executor to run on - bind-map: map of str to ndarray - bind-grad-map: map of str to ndarray" - ([sym ctx bind-map-or-vec bind-grads-map-or-vec grad-req bind-aux-map-or-vec] - (.bind sym - ctx - (util/coerce-param bind-map-or-vec #{"scala.collection.immutable.Map" "scala.collection.Seq"}) - (util/coerce-param bind-grads-map-or-vec #{"scala.collection.immutable.Map" "scala.collection.Seq"}) - grad-req - (util/coerce-param bind-aux-map-or-vec #{"scala.collection.immutable.Map" "scala.collection.Seq"}) - nil - nil)) - ([sym ctx bind-map-or-vec bind-grads-map-or-vec] - (.bind sym - ctx - (util/coerce-param bind-map-or-vec #{"scala.collection.immutable.Map" "scala.collection.Seq"}) - (util/coerce-param bind-grads-map-or-vec #{"scala.collection.immutable.Map" "scala.collection.Seq"}))) - ([sym ctx bind-map-or-vec] - (.bind sym - ctx - (util/coerce-param bind-map-or-vec #{"scala.collection.immutable.Map" "scala.collection.Seq"}) - nil)) - ([sym bind-map-or-vec] - (.bind sym - (mx-context/default-context) - (util/coerce-param bind-map-or-vec #{"scala.collection.immutable.Map" "scala.collection.Seq"})))) - -(defn simple-bind - " Bind current symbol to get an executor, allocate all the ndarrays needed. - Allows specifying data types. - This function will ask user to pass in ndarray of position - they like to bind to, and it will automatically allocate the ndarray - for arguments and auxiliary states that user did not specify explicitly. - - ctx: The device context the generated executor to run on. - shape-vec-map: map of name->shape - opt-map: options map of: - :grad-req {'write', 'add', 'null'}, or list of str or dict of str to str, optional - Specifies how we should update the gradient to the args_grad. - - 'write' means everytime gradient is write to specified args_grad NDArray. - - 'add' means everytime gradient is add to the specified NDArray. - - 'null' means no action is taken, the gradient may not be calculated. - :type-map map of name->dtype. - Will return the generator" - ([sym ctx shape-vec-map {:keys [grad-req type-map] :as opts - :or {grad-req "write"}}] - (let [shape-map (->> shape-vec-map - (map (fn [[k v]] [k (mx-shape/->shape v)])) - (into {}))] - (.simpleBind sym ctx grad-req - (util/nil-or-coerce-param shape-map #{"scala.collection.immutable.Map"}) - (util/nil-or-coerce-param type-map #{"scala.collection.immutable.Map"})))) - ([sym ctx shape-vec-map] - (simple-bind sym ctx shape-vec-map {})) - ([sym ctx] - (.simpleBind sym ctx "write" (util/empty-map) nil))) - -(defn ones - "Returns a new symbol of given shape and type, filled with ones" - ([shape-vec {:keys [ctx dtype] :as optss - :or {ctx nil dtype base/MX_REAL_TYPE}}] - (Symbol/ones (mx-shape/->shape shape-vec) dtype ctx)) - ([shape-vec] - (ones shape-vec {}))) - -(defn zeros - "Returns a new symbol of given shape and type, filled with zeros" - ([shape-vec {:keys [ctx dtype] :as opts - :or {ctx nil dtype base/MX_REAL_TYPE}}] - (Symbol/zeros (mx-shape/->shape shape-vec) dtype ctx)) - ([shape-vec] - (zeros shape-vec {}))) - -(defn arange - "Returns evenly spaced values within a given interval. - Values are generated within the half-open interval [`start`, `stop`). In other - words, the interval includes `start` but excludes `stop`." - ([start stop {:keys [step repeat dtype] - :or {step (float 1) repeat (int 1) dtype base/MX_REAL_TYPE} - :as opts}] - (Symbol/arange (float start) ($/option (float stop)) step repeat false nil dtype)) - ([start stop] - (arange start stop {}))) - -(defn arange-with-inference - "Behaves like arange operator, but infers the stop value from the output shape, - which must be known from the rest of the net." - ([start {:keys [step repeat dtype] - :or {step (float 1) repeat (int 1) dtype base/MX_REAL_TYPE} - :as opts}] - (Symbol/arange (float start) ($/option nil) step repeat true nil dtype)) - ([start] - (arange-with-inference start {}))) - -;;; manually defined because of a conflicting arity of 2 with the auto-gen -(defn min - ([sym-name kwargs-map symbol-list kwargs-map-1] - (util/coerce-return - (Symbol/min - (util/nil-or-coerce-param sym-name #{"java.lang.String"}) - (util/nil-or-coerce-param - kwargs-map - #{"scala.collection.immutable.Map"}) - (util/nil-or-coerce-param symbol-list #{"scala.collection.Seq"}) - (util/nil-or-coerce-param - kwargs-map-1 - #{"scala.collection.immutable.Map"})))) - ([sym-name attr-map kwargs-map] - (min sym-name attr-map (util/empty-list) kwargs-map)) - ([kwargs-map] (min nil nil (util/empty-list) kwargs-map)) - ([sym1 sym2] - (util/coerce-return - (Symbol/min - (util/nil-or-coerce-param - sym1 - #{"ml.dmlc.mxnet.Symbol" "java.lang.Object"}) - (util/nil-or-coerce-param - sym2 - #{"ml.dmlc.mxnet.Symbol" "java.lang.Object"}))))) - -;;; manually defined because of a conflicting arity of 2 with the auto-gen - -(defn max - ([sym1 sym2] - (util/coerce-return - (Symbol/max - (util/nil-or-coerce-param - sym1 - #{"ml.dmlc.mxnet.Symbol" "java.lang.Object"}) - (util/nil-or-coerce-param - sym2 - #{"ml.dmlc.mxnet.Symbol" "java.lang.Object"})))) - ([sym-name kwargs-map symbol-list kwargs-map-1] - (util/coerce-return - (Symbol/max - (util/nil-or-coerce-param sym-name #{"java.lang.String"}) - (util/nil-or-coerce-param - kwargs-map - #{"scala.collection.immutable.Map"}) - (util/nil-or-coerce-param symbol-list #{"scala.collection.Seq"}) - (util/nil-or-coerce-param - kwargs-map-1 - #{"scala.collection.immutable.Map"})))) - ([sym-name attr-map kwargs-map] - (max sym-name attr-map (util/empty-list) kwargs-map)) - ([kwargs-map] (max nil nil (util/empty-list) kwargs-map))) - -;;; redefining to make it easier to work with - -(defn- coerce-infer-shape-return [ret] - (->> ret - (map util/scala-vector->vec) - (map (fn [shapes] (map mx-shape/->vec shapes))))) - -(defn - infer-shape - ([sym vec-or-strings vec-of-ints vec-of-ints-1] - (let [ret (util/coerce-return - (.inferShape - sym - (util/nil-or-coerce-param vec-or-strings #{"java.lang.String<>"}) - (util/nil-or-coerce-param vec-of-ints #{"int<>"}) - (util/nil-or-coerce-param vec-of-ints-1 #{"int<>"})))] - (coerce-infer-shape-return ret))) - ([sym symbol-list-or-kwargs-map] - (let [ret (util/coerce-return - (.inferShape - sym - (if (map? symbol-list-or-kwargs-map) - (util/convert-shape-map symbol-list-or-kwargs-map) - (util/nil-or-coerce-param - symbol-list-or-kwargs-map - #{"scala.collection.Seq" "scala.collection.immutable.Map"}))))] - (coerce-infer-shape-return ret)))) - -(defn - save-checkpoint - "Taken from the model save checkpoint" - [prefix epoch sym arg-params aux-params] - (do - (save sym (str prefix "-symbol.json")) - (let [save-map (merge (->> arg-params - (mapv (fn [[k v]] [(str "arg:" k) v])) - (into {})) - (->> aux-params - (mapv (fn [[k v]] [(str "aux:" k) v])) - (into {}))) - param-name (format "%s-%04d.params" prefix epoch)] - (ndarray/save param-name save-map) - (println "Saved checkpoint to " param-name)))) diff --git a/contrib/clojure-package/src/org/apache/clojure_mxnet/symbol_api.clj b/contrib/clojure-package/src/org/apache/clojure_mxnet/symbol_api.clj deleted file mode 100644 index 69cc8136d500..000000000000 --- a/contrib/clojure-package/src/org/apache/clojure_mxnet/symbol_api.clj +++ /dev/null @@ -1,32 +0,0 @@ -;; Licensed to the Apache Software Foundation (ASF) under one or more -;; contributor license agreements. See the NOTICE file distributed with -;; this work for additional information regarding copyright ownership. -;; The ASF licenses this file to You under the Apache License, Version 2.0 -;; (the "License"); you may not use this file except in compliance with -;; the License. You may obtain a copy of the License at -;; -;; http://www.apache.org/licenses/LICENSE-2.0 -;; -;; Unless required by applicable law or agreed to in writing, software -;; distributed under the License is distributed on an "AS IS" BASIS, -;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -;; See the License for the specific language governing permissions and -;; limitations under the License. -;; - -(ns org.apache.clojure-mxnet.symbol-api - "Experimental Symbol API" - (:refer-clojure :exclude [* - + > >= < <= / cast concat identity flatten load max - min repeat reverse set sort take to-array empty sin - get apply shuffle ref]) - (:require [org.apache.clojure-mxnet.base :as base] - [org.apache.clojure-mxnet.context :as mx-context] - [org.apache.clojure-mxnet.executor :as ex] - [org.apache.clojure-mxnet.shape :as mx-shape] - [org.apache.clojure-mxnet.util :as util] - [t6.from-scala.core :refer [$] :as $] - [org.apache.clojure-mxnet.ndarray :as ndarray]) - (:import (org.apache.mxnet SymbolAPI))) - -;; loads the generated functions into the namespace -(do (clojure.core/load "gen/symbol_api")) diff --git a/contrib/clojure-package/src/org/apache/clojure_mxnet/symbol_random_api.clj b/contrib/clojure-package/src/org/apache/clojure_mxnet/symbol_random_api.clj deleted file mode 100644 index 76f6fdefc334..000000000000 --- a/contrib/clojure-package/src/org/apache/clojure_mxnet/symbol_random_api.clj +++ /dev/null @@ -1,32 +0,0 @@ -;; Licensed to the Apache Software Foundation (ASF) under one or more -;; contributor license agreements. See the NOTICE file distributed with -;; this work for additional information regarding copyright ownership. -;; The ASF licenses this file to You under the Apache License, Version 2.0 -;; (the "License"); you may not use this file except in compliance with -;; the License. You may obtain a copy of the License at -;; -;; http://www.apache.org/licenses/LICENSE-2.0 -;; -;; Unless required by applicable law or agreed to in writing, software -;; distributed under the License is distributed on an "AS IS" BASIS, -;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -;; See the License for the specific language governing permissions and -;; limitations under the License. -;; - -(ns org.apache.clojure-mxnet.symbol-random-api - "Experimental Symbol Random API" - (:refer-clojure :exclude [* - + > >= < <= / cast concat identity flatten load max - min repeat reverse set sort take to-array empty sin - get apply shuffle ref]) - (:require [org.apache.clojure-mxnet.base :as base] - [org.apache.clojure-mxnet.context :as mx-context] - [org.apache.clojure-mxnet.executor :as ex] - [org.apache.clojure-mxnet.shape :as mx-shape] - [org.apache.clojure-mxnet.util :as util] - [t6.from-scala.core :refer [$] :as $] - [org.apache.clojure-mxnet.ndarray :as ndarray]) - (:import (org.apache.mxnet SymbolAPI))) - -;; loads the generated functions into the namespace -(do (clojure.core/load "gen/symbol_random_api")) diff --git a/contrib/clojure-package/src/org/apache/clojure_mxnet/util.clj b/contrib/clojure-package/src/org/apache/clojure_mxnet/util.clj deleted file mode 100644 index 9dc6c8f88ddd..000000000000 --- a/contrib/clojure-package/src/org/apache/clojure_mxnet/util.clj +++ /dev/null @@ -1,278 +0,0 @@ -;; -;; Licensed to the Apache Software Foundation (ASF) under one or more -;; contributor license agreements. See the NOTICE file distributed with -;; this work for additional information regarding copyright ownership. -;; The ASF licenses this file to You under the Apache License, Version 2.0 -;; (the "License"); you may not use this file except in compliance with -;; the License. You may obtain a copy of the License at -;; -;; http://www.apache.org/licenses/LICENSE-2.0 -;; -;; Unless required by applicable law or agreed to in writing, software -;; distributed under the License is distributed on an "AS IS" BASIS, -;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -;; See the License for the specific language governing permissions and -;; limitations under the License. -;; - -(ns org.apache.clojure-mxnet.util - (:require [clojure.spec.alpha :as s] - [t6.from-scala.core :refer [$ $$] :as $] - [clojure.string :as string] - [org.apache.clojure-mxnet.primitives :as primitives] - [org.apache.clojure-mxnet.shape :as mx-shape]) - (:import (org.apache.mxnet NDArray) - (scala Product Tuple2 Tuple3) - (scala.collection.immutable List IndexedSeq ListMap) - (scala.collection JavaConversions Map) - (scala Option))) - -(def ndarray-param-coerce {"float" "num" - "int" "num" - "boolean" "bool" - "scala.collection.immutable.Map" "kwargs-map" - "scala.collection.Seq" "& nd-array-and-params" - "int<>" "vec-of-ints" - "float<>" "vec-of-floats" - "byte<>" "byte-array" - "org.apache.mxnet.NDArray" "ndarray" - "org.apache.mxnet.Symbol" "sym" - "org.apache.mxnet.MX_PRIMITIVES$MX_PRIMITIVE_TYPE" "double-or-float"}) - -(def symbol-param-coerce {"java.lang.String" "sym-name" - "float" "num" - "int" "num" - "boolean" "bool" - "scala.collection.immutable.Map" "kwargs-map" - "scala.collection.Seq" "symbol-list" - "int<>" "vec-of-ints" - "float<>" "vec-of-floats" - "byte<>" "byte-array" - "java.lang.String<>" "vec-of-strings" - "org.apache.mxnet.Symbol" "sym" - "java.lang.Object" "object"}) - -(defn empty-list [] - ($ List/empty)) - -(defn empty-map [] - ($ Map/empty)) - -(defn empty-indexed-seq [] - ($ IndexedSeq/empty)) - -(defn empty-list-map [] - ($ ListMap/empty)) - -(defn ->option [v] - ($ Option v)) - -(defn ->int-option [v] - (->option (when v (int v)))) - -(defn option->value [opt] - ($/view opt)) - -(defn keyword->snake-case - "Transforms a keyword `kw` into a snake-case string. - `kw`: keyword - returns: string - Ex: - (keyword->snake-case :foo-bar) ;\"foo_bar\" - (keyword->snake-case :foo) ;\"foo\"" - [kw] - (if (keyword? kw) - (string/replace (name kw) "-" "_") - kw)) - -(defn convert-tuple [param] - (apply $/tuple param)) - -(def tuple-param-names #{"kernel" "stride" "pad" "target-shape" "shape"}) - -(defn convert-by-shape [param] - (into {} (mapv (fn [[k v]] - [k (if (vector? v) (mx-shape/->shape v) v)]) - param))) - -(defn tuple-convert-by-param-name [param] - (into {} (mapv (fn [[k v]] - (if (or (get tuple-param-names k) - (get tuple-param-names (name k))) - [k (str (if (vector? v) (mx-shape/->shape v) v))] - [k v])) - param))) - -(def io-param-names #{"input-shape" "data-shape" "label-shape"}) - -(defn io-convert-by-param-name [param] - (into {} (mapv (fn [[k v]] (cond - (or (get io-param-names k) - (get io-param-names (name k))) [k (str (if (vector? v) (mx-shape/->shape v) v))] - (true? v) [k "True"] - (false? v) [k "False"] - :else [k (str v)])) - param))) - -(defn convert-map [param] - (if (empty? param) - (empty-map) - (apply $/immutable-map (->> param - (into []) - (flatten) - (mapv keyword->snake-case))))) - -(defn convert-symbol-map [param] - (convert-map (tuple-convert-by-param-name param))) - -(defn convert-io-map [param] - (convert-map (io-convert-by-param-name param))) - -(defn convert-shape-map [param] - (convert-map (convert-by-shape param))) - -(defn convert-vector [param] - (apply $/immutable-list param)) - -(defn vec->set [param] - (apply $/immutable-set param)) - -(defn vec->indexed-seq [x] - (.toIndexedSeq (convert-vector x))) - -(defn apply-scala-fn [f args] - (.apply f args)) - -(defn coerce-param [param targets] - (cond - (and (get targets "scala.collection.immutable.Map") (map? param)) (convert-map param) - (and (get targets "float") (number? param)) (float param) - (and (get targets "scala.collection.Seq") (instance? org.apache.mxnet.NDArray param)) ($/immutable-list param) - (and (get targets "scala.collection.Seq") (instance? org.apache.mxnet.Symbol param)) ($/immutable-list param) - (and (get targets "scala.collection.Seq") (and (or (vector? param) (seq? param)) (empty? param))) (empty-list) - (and (get targets "scala.collection.Seq") (or (vector? param) (seq? param))) (apply $/immutable-list param) - (and (get targets "org.apache.mxnet.Shape") (or (vector? param) (seq? param) (empty? param))) (mx-shape/->shape param) - (and (get targets "int<>") (vector? param)) (int-array param) - (and (get targets "float<>") (vector? param)) (float-array param) - (and (get targets "java.lang.String<>") (vector? param)) (into-array param) - (and (get targets "org.apache.mxnet.NDArray<>") (vector? param)) (into-array param) - (and (get targets "org.apache.mxnet.Symbol<>") (vector? param)) (into-array param) - (and (get targets "org.apache.mxnet.MX_PRIMITIVES$MX_PRIMITIVE_TYPE") (instance? Float param)) (primitives/mx-float param) - (and (get targets "org.apache.mxnet.MX_PRIMITIVES$MX_PRIMITIVE_TYPE") (number? param)) (primitives/mx-double param) - :else param)) - -(defn nil-or-coerce-param [param targets] - (when param - (coerce-param param targets))) - -(defn scala-map->map - [^Map m] - (into {} (JavaConversions/mapAsJavaMap m))) - -(defn buffer->vec [b] - (into [] (JavaConversions/bufferAsJavaList b))) - -(defn scala-vector->vec [x] - (into [] (JavaConversions/asJavaCollection x))) - -(defn scala-iterator->seq [x] - (iterator-seq (JavaConversions/asJavaIterator x))) - -(defn tuple->vec [^Product p] - (->> (.productArity p) - (range) - (map #(.productElement p %)) - (into []))) - -(defn coerce-return [return-val] - (cond - (instance? scala.collection.mutable.ArrayBuffer return-val) (buffer->vec return-val) - (instance? scala.collection.immutable.Vector return-val) (scala-vector->vec return-val) - (instance? org.apache.mxnet.NDArrayFuncReturn return-val) (.head return-val) - (instance? Map return-val) (scala-map->map return-val) - (instance? Tuple2 return-val) (tuple->vec return-val) - (instance? Tuple3 return-val) (tuple->vec return-val) - (primitives/primitive? return-val) (primitives/->num return-val) - :else return-val)) - -(defn coerce-return-recursive [return-val] - (let [coerced-val (coerce-return return-val)] - (if (vector? coerced-val) - (into [] (map coerce-return-recursive coerced-val)) - coerced-val))) - -(defmacro scala-fn - "Creates a scala fn from an anonymous clojure fn of the form (fn [x] body)" - [f] - `($/fn ~@(drop-last (rest f)) ~(last f))) - -(defn translate-keyword-shape [[k v]] - [(if (keyword? k) (string/replace (name k) "-" "_") k) - (if (vector? v) (mx-shape/->shape v) v)]) - -(defn map->tuple [m] - (->> m - (into []) - (map translate-keyword-shape) - (map convert-tuple))) - -(defn list-map [m] - (loop [lm ($ ListMap/empty) - tuples (map->tuple m)] - (if (seq tuples) - (recur ($ lm "+" (first tuples)) (rest tuples)) - lm))) - -(defn validate! [spec value error-msg] - (when-not (s/valid? spec value) - (s/explain spec value) - (throw (ex-info error-msg - (s/explain-data spec value))))) - -(s/def ::non-empty-seq (s/and sequential? not-empty)) -(defn to-array-nd - "Converts any N-D sequential structure to an array - with the same dimensions." - [nd-seq] - (validate! ::non-empty-seq nd-seq "Invalid N-D sequence") - (if (sequential? (first nd-seq)) - (to-array (mapv to-array-nd nd-seq)) - (to-array nd-seq))) - -(defn nd-seq-shape - "Computes the shape of a n-dimensional sequential structure" - [nd-seq] - (validate! ::non-empty-seq nd-seq "Invalid N-D sequence") - (loop [s nd-seq - shape [(count s)]] - (if (sequential? (first s)) - (recur (first s) (conj shape (count (first s)))) - shape))) - -(defn map->scala-tuple-seq - "* Convert a map to a scala-Seq of scala-Tuple. - * Should also work if a seq of seq of 2 things passed. - * Otherwise passed through unchanged." - [map-or-tuple-seq] - (letfn [(key->name [k] - (if (or (keyword? k) (string? k) (symbol? k)) - (string/replace (name k) "-" "_") - k)) - (->tuple [kvp-or-tuple] - (if (coll? kvp-or-tuple) - (let [[k v] kvp-or-tuple] - ($/tuple (key->name k) v)) - ;; pass-through - kvp-or-tuple))] - (if (coll? map-or-tuple-seq) - (->> map-or-tuple-seq - (map ->tuple) - (apply $/immutable-list)) - ;; pass-through - map-or-tuple-seq))) - -(defmacro forms->scala-fn - "Creates a scala fn of zero args from forms" - [& forms] - `($/fn [] - (do ~@forms))) diff --git a/contrib/clojure-package/src/org/apache/clojure_mxnet/visualization.clj b/contrib/clojure-package/src/org/apache/clojure_mxnet/visualization.clj deleted file mode 100644 index 37d141bb6548..000000000000 --- a/contrib/clojure-package/src/org/apache/clojure_mxnet/visualization.clj +++ /dev/null @@ -1,60 +0,0 @@ -;; -;; Licensed to the Apache Software Foundation (ASF) under one or more -;; contributor license agreements. See the NOTICE file distributed with -;; this work for additional information regarding copyright ownership. -;; The ASF licenses this file to You under the Apache License, Version 2.0 -;; (the "License"); you may not use this file except in compliance with -;; the License. You may obtain a copy of the License at -;; -;; http://www.apache.org/licenses/LICENSE-2.0 -;; -;; Unless required by applicable law or agreed to in writing, software -;; distributed under the License is distributed on an "AS IS" BASIS, -;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -;; See the License for the specific language governing permissions and -;; limitations under the License. -;; - -(ns org.apache.clojure-mxnet.visualization - (:require [org.apache.clojure-mxnet.util :as util] - [org.apache.clojure-mxnet.shape :as mx-shape]) - (:import (org.apache.mxnet Visualization))) - -(defn plot-network - "convert symbol to Dot object for visualization - - symbol symbol to be visualized - - title title of the dot graph - - shape-map Map of shapes, str -> shape, given input shapes - - node-attrs Map of node's attributes - for example: {:shape \"oval\" :fixedsize \"false\"} - - - hide-weight if true (default) then inputs with names like `*_weight` - or `*_bias` will be hidden - returns Dot object of symbol" - ([sym shape-map {:keys [title node-attrs hide-weights] :as opts - :or {title "plot" - hide-weights true}}] - (Visualization/plotNetwork sym - title - (->> shape-map - (map (fn [[k v]] [k (mx-shape/->shape v)])) - (into {}) - (util/convert-map)) - (util/convert-map node-attrs) - hide-weights)) - ([sym shape-map] - (plot-network sym shape-map {}))) - -(defn render - " Render file with Graphviz engine into format. - - dot the dot file from plot-network function - - engine The layout commmand used for rendering ('dot', 'neato', ...). - - format The output format used for rendering ('pdf', 'png', ...). - - filename Name of the DOT source file to render. - - path Path to save the Dot source file. - " - ([dot engine format filename path] - (doto dot - (.render engine format filename path))) - ([dot filename path] - (render dot "dot" "pdf" filename path))) diff --git a/contrib/clojure-package/test/dev/generator_test.clj b/contrib/clojure-package/test/dev/generator_test.clj deleted file mode 100644 index 3d7d41f79d18..000000000000 --- a/contrib/clojure-package/test/dev/generator_test.clj +++ /dev/null @@ -1,396 +0,0 @@ -;; -;; Licensed to the Apache Software Foundation (ASF) under one or more -;; contributor license agreements. See the NOTICE file distributed with -;; this work for additional information regarding copyright ownership. -;; The ASF licenses this file to You under the Apache License, Version 2.0 -;; (the "License"); you may not use this file except in compliance with -;; the License. You may obtain a copy of the License at -;; -;; http://www.apache.org/licenses/LICENSE-2.0 -;; -;; Unless required by applicable law or agreed to in writing, software -;; distributed under the License is distributed on an "AS IS" BASIS, -;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -;; See the License for the specific language governing permissions and -;; limitations under the License. -;; - -(ns dev.generator-test - (:require [clojure.test :refer :all] - [dev.generator :as gen] - [clojure.string :as string])) - -(defn file-function-name [f] - (->> (string/split (slurp f) #"\n") - (take 33) - last - (string/trim))) - -(deftest test-clojure-case - (is (= "foo-bar" (gen/clojure-case "FooBar"))) - (is (= "foo-bar-baz" (gen/clojure-case "FooBarBaz"))) - (is (= "foo-bar-baz" (gen/clojure-case "FOOBarBaz"))) - (is (= "foo-bar" (gen/clojure-case "foo_bar"))) - (is (= "foo-bar" (gen/clojure-case "Foo_Bar"))) - (is (= "div+" (gen/clojure-case "/+")))) - -(deftest fn-name->random-fn-name - (is (= "poisson" (gen/fn-name->random-fn-name "-random-poisson"))) - (is (= "poisson-like" (gen/fn-name->random-fn-name "-sample-poisson")))) - -(deftest remove-prefix - (is (= "randint" (gen/remove-prefix "-random-" "-random-randint"))) - (is (= "exponential" (gen/remove-prefix "-sample-" "-sample-exponential")))) - -(deftest in-namespace-random? - (is (gen/in-namespace-random? "random_randint")) - (is (gen/in-namespace-random? "sample_poisson")) - (is (not (gen/in-namespace-random? "rnn"))) - (is (not (gen/in-namespace-random? "activation")))) - -(defn ndarray-reflect-info [name] - (->> gen/ndarray-public-no-default - (filter #(= name (str (:name %)))) - first)) - -(defn symbol-reflect-info [name] - (->> gen/symbol-public-no-default - (filter #(= name (str (:name %)))) - first)) - -(deftest test-symbol-transform-param-name - (let [params ["java.lang.String" - "scala.collection.immutable.Map" - "scala.collection.Seq" - "scala.collection.immutable.Map"] - transformed-params ["sym-name" - "kwargs-map" - "symbol-list" - "kwargs-map"]] - (is (= transformed-params (gen/symbol-transform-param-name params))) - (is (= transformed-params (gen/symbol-transform-param-name - (:parameter-types (symbol-reflect-info "floor"))))))) - -(deftest test-gen-op-info - (testing "activation" - (let [activation-info (gen/gen-op-info "Activation")] - (is (= "activation" (:fn-name activation-info))) - (is (string? (:fn-description activation-info))) - (is (= 2 (-> activation-info :args count))) - (is (= "" (:key-var-num-args activation-info))) - - (is (= "data" (-> activation-info :args first :name))) - (is (= "NDArray-or-Symbol" (-> activation-info :args first :type))) - (is (false? (-> activation-info :args first :optional?))) - (is (nil? (-> activation-info :args first :default))) - (is (string? (-> activation-info :args first :description))) - - (is (= "act-type" (-> activation-info :args second :name))) - (is (= "'relu', 'sigmoid', 'softrelu', 'softsign', 'tanh'" (-> activation-info :args second :type))) - (is (false? (-> activation-info :args second :optional?))) - (is (nil? (-> activation-info :args second :default))) - (is (string? (-> activation-info :args second :description))))) - - (testing "argmin" - (let [argmin-info (gen/gen-op-info "argmin")] - (is (= "argmin" (:fn-name argmin-info))) - (is (= 3 (-> argmin-info :args count))) - - (is (= "data" (-> argmin-info :args (nth 0) :name))) - (is (= "NDArray-or-Symbol" (-> argmin-info :args (nth 0) :type))) - (is (false? (-> argmin-info :args (nth 0) :optional?))) - - (is (= "axis" (-> argmin-info :args (nth 1) :name))) - (is (= "int or None" (-> argmin-info :args (nth 1) :type))) - (is (= "'None'" (-> argmin-info :args (nth 1) :default))) - (is (true? (-> argmin-info :args (nth 1) :optional?))) - - (is (= "keepdims" (-> argmin-info :args (nth 2) :name))) - (is (= "boolean" (-> argmin-info :args (nth 2) :type))) - (is (= "0" (-> argmin-info :args (nth 2) :default))) - (is (true? (-> argmin-info :args (nth 2) :optional?))))) - - (testing "concat" - (let [concat-info (gen/gen-op-info "Concat")] - (is (= "concat" (:fn-name concat-info))) - (is (= 3 (-> concat-info :args count))) - (is (= "num-args" (:key-var-num-args concat-info))) - - (is (= "data" (-> concat-info :args (nth 0) :name))) - (is (= "NDArray-or-Symbol[]" (-> concat-info :args (nth 0) :type))) - (is (false? (-> concat-info :args (nth 0) :optional?))) - - (is (= "num-args" (-> concat-info :args (nth 1) :name))) - (is (= "int" (-> concat-info :args (nth 1) :type))) - (is (false? (-> concat-info :args (nth 1) :optional?))) - - (is (= "dim" (-> concat-info :args (nth 2) :name))) - (is (= "int" (-> concat-info :args (nth 2) :type))) - (is (= "'1'" (-> concat-info :args (nth 2) :default))) - (is (true? (-> concat-info :args (nth 2) :optional?))))) - - (testing "convolution" - (let [convolution-info (gen/gen-op-info "Convolution")] - - (is (= "convolution" (:fn-name convolution-info))) - (is (= 14 (-> convolution-info :args count))) - (is (= "" (:key-var-num-args convolution-info))) - - (is (= "data" (-> convolution-info :args (nth 0) :name))) - (is (= "NDArray-or-Symbol" (-> convolution-info :args (nth 0) :type))) - (is (false? (-> convolution-info :args (nth 0) :optional?))) - - (is (= "weight" (-> convolution-info :args (nth 1) :name))) - (is (= "NDArray-or-Symbol" (-> convolution-info :args (nth 1) :type))) - (is (false? (-> convolution-info :args (nth 1) :optional?))) - - (is (= "kernel" (-> convolution-info :args (nth 3) :name))) - (is (= "Shape" (-> convolution-info :args (nth 3) :type))) - (is (= "(tuple)" (-> convolution-info :args (nth 3) :spec))) - (is (false? (-> convolution-info :args (nth 3) :optional?))) - - (is (= "stride" (-> convolution-info :args (nth 4) :name))) - (is (= "Shape" (-> convolution-info :args (nth 4) :type))) - (is (= "(tuple)" (-> convolution-info :args (nth 4) :spec))) - (is (= "[]" (-> convolution-info :args (nth 4) :default))) - (is (true? (-> convolution-info :args (nth 4) :optional?))) - - (is (= "num-filter" (-> convolution-info :args (nth 7) :name))) - (is (= "int" (-> convolution-info :args (nth 7) :type))) - (is (= "(non-negative)" (-> convolution-info :args (nth 7) :spec))) - (is (false? (-> convolution-info :args (nth 7) :optional?))) - - (is (= "num-group" (-> convolution-info :args (nth 8) :name))) - (is (= "int" (-> convolution-info :args (nth 8) :type))) - (is (= "(non-negative)" (-> convolution-info :args (nth 8) :spec))) - (is (= "1" (-> convolution-info :args (nth 8) :default))) - (is (true? (-> convolution-info :args (nth 8) :optional?))) - - (is (= "workspace" (-> convolution-info :args (nth 9) :name))) - (is (= "long" (-> convolution-info :args (nth 9) :type))) - (is (= "(non-negative)" (-> convolution-info :args (nth 9) :spec))) - (is (= "1024" (-> convolution-info :args (nth 9) :default))) - (is (true? (-> convolution-info :args (nth 9) :optional?))) - - (is (= "no-bias" (-> convolution-info :args (nth 10) :name))) - (is (= "boolean" (-> convolution-info :args (nth 10) :type))) - (is (= "0" (-> convolution-info :args (nth 10) :default))) - (is (true? (-> convolution-info :args (nth 10) :optional?))) - - (is (= "layout" (-> convolution-info :args (nth 13) :name))) - (is (= "None, 'NCDHW', 'NCHW', 'NCW', 'NDHWC', 'NHWC'" (-> convolution-info :args (nth 13) :type))) - (is (= "'None'" (-> convolution-info :args (nth 13) :default))) - (is (true? (-> convolution-info :args (nth 13) :optional?))))) - - (testing "element wise sum" - (let [element-wise-sum-info (gen/gen-op-info "ElementWiseSum")] - (is (= "add-n" (:fn-name element-wise-sum-info))) - (is (= 1 (-> element-wise-sum-info :args count))) - (is (= "num-args" (:key-var-num-args element-wise-sum-info))) - - (is (= "args" (-> element-wise-sum-info :args (nth 0) :name))) - (is (= "NDArray-or-Symbol[]" (-> element-wise-sum-info :args (nth 0) :type))) - (is (false? (-> element-wise-sum-info :args (nth 0) :optional?)))))) - -(deftest test-ndarray-transform-param-name - (let [params ["scala.collection.immutable.Map" - "scala.collection.Seq"] - transformed-params ["kwargs-map" "& nd-array-and-params"]] - (is (= transformed-params (gen/ndarray-transform-param-name params))) - (is (= transformed-params (gen/ndarray-transform-param-name - (:parameter-types (ndarray-reflect-info "sqrt"))))))) - -(deftest test-has-variadic? - (is (false? (gen/has-variadic? ["sym-name" "kwargs-map" "symbol-list" "kwargs-map-1"]))) - (is (true? (gen/has-variadic? ["kwargs-map" "& nd-array-and-params"])))) - -(deftest test-increment-param-name - (is (= "foo-1" (gen/increment-param-name "foo"))) - (is (= "foo-2" (gen/increment-param-name "foo-1")))) - -(deftest test-rename-duplicate-params - (is (= ["foo" "bar" "baz"] (gen/rename-duplicate-params ["foo" "bar" "baz"]))) - (is (= ["foo" "bar" "bar-1"] (gen/rename-duplicate-params ["foo" "bar" "bar"]))) - (is (= ["foo" "bar" "bar-1" "foo-1"] (gen/rename-duplicate-params ["foo" "bar" "bar" "foo"]))) - (is (= ["foo" "bar" "bar-1" "bar-2"] (gen/rename-duplicate-params ["foo" "bar" "bar" "bar"]))) - (is (= ["foo" "bar" "bar-1" "bar-2" "foo-1" "baz"] (gen/rename-duplicate-params ["foo" "bar" "bar" "bar" "foo" "baz"])))) - -(deftest test-is-symbol-hand-gen? - (is (not (false? (gen/is-symbol-hand-gen? (symbol-reflect-info "max"))))) - (is (not (false? (gen/is-symbol-hand-gen? (symbol-reflect-info "Variable"))))) - (is (false? (gen/is-symbol-hand-gen? (symbol-reflect-info "sqrt"))))) - -(deftest test-is-ndarray-hand-gen? - (is (not (false? (gen/is-ndarray-hand-gen? (ndarray-reflect-info "zeros"))))) - (is (false? (gen/is-ndarray-hand-gen? (ndarray-reflect-info "sqrt"))))) - -(deftest test-public-by-name-and-param-count - (let [lrn-info (get (gen/public-by-name-and-param-count gen/symbol-public-to-gen) - (symbol "LRN"))] - (is (= 4 (-> lrn-info keys first))) - (is (= "LRN" (-> lrn-info vals ffirst :name str))))) - -(deftest test-symbol-vector-args - (is (= '(if (clojure.core/map? kwargs-map-or-vec-or-sym) - (util/empty-list) - (util/coerce-param - kwargs-map-or-vec-or-sym - #{"scala.collection.Seq"})) - (gen/symbol-vector-args)))) - -(deftest test-symbol-map-args - (is (= '(if (clojure.core/map? kwargs-map-or-vec-or-sym) - (org.apache.clojure-mxnet.util/convert-symbol-map - kwargs-map-or-vec-or-sym) - nil) - (gen/symbol-map-args)))) - -(deftest test-add-symbol-arities - (let [params (map symbol ["sym-name" "kwargs-map" "symbol-list" "kwargs-map-1"]) - function-name (symbol "foo") - [ar1 ar2 ar3] (gen/add-symbol-arities params function-name)] - (is (= '([sym-name attr-map kwargs-map] - (foo - sym-name - (util/convert-symbol-map attr-map) - (util/empty-list) - (util/convert-symbol-map kwargs-map))) - ar1)) - (is (= '([sym-name kwargs-map-or-vec-or-sym] - (foo - sym-name - nil - (if - (clojure.core/map? kwargs-map-or-vec-or-sym) - (util/empty-list) - (util/coerce-param - kwargs-map-or-vec-or-sym - #{"scala.collection.Seq"})) - (if - (clojure.core/map? kwargs-map-or-vec-or-sym) - (org.apache.clojure-mxnet.util/convert-symbol-map - kwargs-map-or-vec-or-sym) - nil))) - ar2)) - (is (= '([kwargs-map-or-vec-or-sym] - (foo - nil - nil - (if - (clojure.core/map? kwargs-map-or-vec-or-sym) - (util/empty-list) - (util/coerce-param - kwargs-map-or-vec-or-sym - #{"scala.collection.Seq"})) - (if - (clojure.core/map? kwargs-map-or-vec-or-sym) - (org.apache.clojure-mxnet.util/convert-symbol-map - kwargs-map-or-vec-or-sym) - nil))) - ar3)))) - -(deftest test-gen-symbol-function-arity - (let [op-name (symbol "$div") - op-values {1 [{:name (symbol "$div") - :return-type "org.apache.mxnet.Symbol," - :declaring-class "org.apache.mxnet.Symbol," - :parameter-types ["org.apache.mxnet.Symbol"], - :exception-types [], - :flags #{:public}} - {:name (symbol "$div") :return-type "org.apache.mxnet.Symbol," - :declaring-class "org.apache.mxnet.Symbol," - :parameter-types ["java.lang.Object"], - :exception-types [], - :flags #{:public}}]} - function-name (symbol "div")] - (is (= '(([sym sym-or-object] - (util/coerce-return - (.$div - sym - (util/nil-or-coerce-param - sym-or-object - #{"org.apache.mxnet.Symbol" "java.lang.Object"}))))) - (gen/gen-symbol-function-arity op-name op-values function-name))))) - -(deftest test-gen-ndarray-function-arity - (let [op-name (symbol "$div") - op-values {1 [{:name (symbol "$div") - :return-type "org.apache.mxnet.NDArray," - :declaring-class "org.apache.mxnet.NDArray," - :parameter-types ["float"], - :exception-types [], - :flags #{:public}} - {:name (symbol "$div") - :return-type "org.apache.mxnet.NDArray," - :declaring-class "org.apache.mxnet.NDArray," - :parameter-types ["org.apache.mxnet.NDArray"], - :exception-types [], - :flags #{:public}}]}] - (is (= '(([ndarray num-or-ndarray] - (util/coerce-return - (.$div - ndarray - (util/coerce-param - num-or-ndarray - #{"float" "org.apache.mxnet.NDArray"}))))) - (gen/gen-ndarray-function-arity op-name op-values))))) - -(deftest test-write-to-file - (testing "symbol-api" - (let [fname "test/test-symbol-api.clj" - fns (gen/all-symbol-api-functions gen/op-names) - _ (gen/write-to-file [(first fns) (second fns)] - (gen/symbol-api-gen-ns false) - fname)] - (is (= "activation" - (file-function-name "test/good-test-symbol-api.clj") - (file-function-name fname))))) - - (testing "symbol-random-api" - (let [fname "test/test-symbol-random-api.clj" - fns (gen/all-symbol-random-api-functions gen/op-names) - _ (gen/write-to-file [(first fns) (second fns)] - (gen/symbol-api-gen-ns true) - fname)] - (is (= "exponential" - (file-function-name "test/good-test-symbol-random-api.clj") - (file-function-name fname))))) - - - (testing "symbol" - (let [fname "test/test-symbol.clj" - _ (gen/write-to-file [(first gen/all-symbol-functions)] - gen/symbol-gen-ns - fname) - good-contents (slurp "test/good-test-symbol.clj") - contents (slurp fname)] - (is (= good-contents contents)))) - - (testing "ndarray-api" - (let [fname "test/test-ndarray-api.clj" - fns (gen/all-ndarray-api-functions gen/op-names) - _ (gen/write-to-file [(first fns) (second fns)] - (gen/ndarray-api-gen-ns false) - fname)] - (is (= "activation" - (file-function-name "test/good-test-ndarray-api.clj") - (file-function-name fname))))) - - (testing "ndarray-random-api" - (let [fname "test/test-ndarray-random-api.clj" - fns (gen/all-ndarray-random-api-functions gen/op-names) - _ (gen/write-to-file [(first fns) (second fns)] - (gen/ndarray-api-gen-ns true) - fname)] - (is (= "exponential" - (file-function-name "test/good-test-ndarray-random-api.clj") - (file-function-name fname))))) - - (testing "ndarray" - (let [fname "test/test-ndarray.clj" - _ (gen/write-to-file [(first gen/all-ndarray-functions)] - gen/ndarray-gen-ns - fname) - good-contents (slurp "test/good-test-ndarray.clj") - contents (slurp fname)] - (is (= good-contents contents))))) diff --git a/contrib/clojure-package/test/good-test-ndarray-api.clj b/contrib/clojure-package/test/good-test-ndarray-api.clj deleted file mode 100644 index f7f58f8f7c88..000000000000 --- a/contrib/clojure-package/test/good-test-ndarray-api.clj +++ /dev/null @@ -1,170 +0,0 @@ -(ns - ^{:doc "Experimental"} - org.apache.clojure-mxnet.ndarray-api - (:refer-clojure :exclude [* - + > >= < <= / cast concat flatten identity load max - min repeat reverse set sort take to-array empty shuffle - ref]) - (:require [org.apache.clojure-mxnet.shape :as mx-shape] - [org.apache.clojure-mxnet.util :as util]) - (:import (org.apache.mxnet NDArrayAPI))) - -;; Do not edit - this is auto-generated - -;; Licensed to the Apache Software Foundation (ASF) under one or more -;; contributor license agreements. See the NOTICE file distributed with -;; this work for additional information regarding copyright ownership. -;; The ASF licenses this file to You under the Apache License, Version 2.0 -;; (the "License"); you may not use this file except in compliance with -;; the License. You may obtain a copy of the License at -;; -;; http://www.apache.org/licenses/LICENSE-2.0 -;; -;; Unless required by applicable law or agreed to in writing, software -;; distributed under the License is distributed on an "AS IS" BASIS, -;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -;; See the License for the specific language governing permissions and -;; limitations under the License. -;; - - - - -(defn - activation - "Applies an activation function element-wise to the input. - - The following activation functions are supported: - - - `relu`: Rectified Linear Unit, :math:`y = max(x, 0)` - - `sigmoid`: :math:`y = \\frac{1}{1 + exp(-x)}` - - `tanh`: Hyperbolic tangent, :math:`y = \\frac{exp(x) - exp(-x)}{exp(x) + exp(-x)}` - - `softrelu`: Soft ReLU, or SoftPlus, :math:`y = log(1 + exp(x))` - - `softsign`: :math:`y = \\frac{x}{1 + abs(x)}` - - - - Defined in src/operator/nn/activation.cc:L167 - - `data`: The input array. - `act-type`: Activation function to be applied. - `out`: Output array. (optional)" - ([data act-type] (activation {:data data, :act-type act-type})) - ([{:keys [data act-type out], :or {out nil}, :as opts}] - (util/coerce-return - (NDArrayAPI/Activation data act-type (util/->option out))))) - -(defn - batch-norm - "Batch normalization. - - Normalizes a data batch by mean and variance, and applies a scale ``gamma`` as - well as offset ``beta``. - - Assume the input has more than one dimension and we normalize along axis 1. - We first compute the mean and variance along this axis: - - .. math:: - - data\\_mean[i] = mean(data[:,i,:,...]) \\\\ - data\\_var[i] = var(data[:,i,:,...]) - - Then compute the normalized output, which has the same shape as input, as following: - - .. math:: - - out[:,i,:,...] = \\frac{data[:,i,:,...] - data\\_mean[i]}{\\sqrt{data\\_var[i]+\\epsilon}} * gamma[i] + beta[i] - - Both *mean* and *var* returns a scalar by treating the input as a vector. - - Assume the input has size *k* on axis 1, then both ``gamma`` and ``beta`` - have shape *(k,)*. If ``output_mean_var`` is set to be true, then outputs both ``data_mean`` and - the inverse of ``data_var``, which are needed for the backward pass. Note that gradient of these - two outputs are blocked. - - Besides the inputs and the outputs, this operator accepts two auxiliary - states, ``moving_mean`` and ``moving_var``, which are *k*-length - vectors. They are global statistics for the whole dataset, which are updated - by:: - - moving_mean = moving_mean * momentum + data_mean * (1 - momentum) - moving_var = moving_var * momentum + data_var * (1 - momentum) - - If ``use_global_stats`` is set to be true, then ``moving_mean`` and - ``moving_var`` are used instead of ``data_mean`` and ``data_var`` to compute - the output. It is often used during inference. - - The parameter ``axis`` specifies which axis of the input shape denotes - the 'channel' (separately normalized groups). The default is 1. Specifying -1 sets the channel - axis to be the last item in the input shape. - - Both ``gamma`` and ``beta`` are learnable parameters. But if ``fix_gamma`` is true, - then set ``gamma`` to 1 and its gradient to 0. - - .. Note:: - When ``fix_gamma`` is set to True, no sparse support is provided. If ``fix_gamma is`` set to False, - the sparse tensors will fallback. - - - - Defined in src/operator/nn/batch_norm.cc:L572 - - `data`: Input data to batch normalization - `gamma`: gamma array - `beta`: beta array - `moving-mean`: running mean of input - `moving-var`: running variance of input - `eps`: Epsilon to prevent div 0. Must be no less than CUDNN_BN_MIN_EPSILON defined in cudnn.h when using cudnn (usually 1e-5) (optional) - `momentum`: Momentum for moving average (optional) - `fix-gamma`: Fix gamma while training (optional) - `use-global-stats`: Whether use global moving statistics instead of local batch-norm. This will force change batch-norm into a scale shift operator. (optional) - `output-mean-var`: Output the mean and inverse std (optional) - `axis`: Specify which shape axis the channel is specified (optional) - `cudnn-off`: Do not select CUDNN operator, if available (optional) - `out`: Output array. (optional)" - ([data gamma beta moving-mean moving-var] - (batch-norm - {:data data, - :gamma gamma, - :beta beta, - :moving-mean moving-mean, - :moving-var moving-var})) - ([{:keys - [data - gamma - beta - moving-mean - moving-var - eps - momentum - fix-gamma - use-global-stats - output-mean-var - axis - cudnn-off - out], - :or - {eps nil, - momentum nil, - fix-gamma nil, - use-global-stats nil, - output-mean-var nil, - axis nil, - cudnn-off nil, - out nil}, - :as opts}] - (util/coerce-return - (NDArrayAPI/BatchNorm - data - gamma - beta - moving-mean - moving-var - (util/->option eps) - (util/->option momentum) - (util/->option fix-gamma) - (util/->option use-global-stats) - (util/->option output-mean-var) - (util/->option axis) - (util/->option cudnn-off) - (util/->option out))))) - diff --git a/contrib/clojure-package/test/good-test-ndarray-random-api.clj b/contrib/clojure-package/test/good-test-ndarray-random-api.clj deleted file mode 100644 index 230e1033c008..000000000000 --- a/contrib/clojure-package/test/good-test-ndarray-random-api.clj +++ /dev/null @@ -1,95 +0,0 @@ -(ns - ^{:doc "Experimental"} - org.apache.clojure-mxnet.ndarray-random-api - (:refer-clojure :exclude [* - + > >= < <= / cast concat flatten identity load max - min repeat reverse set sort take to-array empty shuffle - ref]) - (:require [org.apache.clojure-mxnet.shape :as mx-shape] - [org.apache.clojure-mxnet.util :as util]) - (:import (org.apache.mxnet NDArrayAPI))) - -;; Do not edit - this is auto-generated - -;; Licensed to the Apache Software Foundation (ASF) under one or more -;; contributor license agreements. See the NOTICE file distributed with -;; this work for additional information regarding copyright ownership. -;; The ASF licenses this file to You under the Apache License, Version 2.0 -;; (the "License"); you may not use this file except in compliance with -;; the License. You may obtain a copy of the License at -;; -;; http://www.apache.org/licenses/LICENSE-2.0 -;; -;; Unless required by applicable law or agreed to in writing, software -;; distributed under the License is distributed on an "AS IS" BASIS, -;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -;; See the License for the specific language governing permissions and -;; limitations under the License. -;; - - - - -(defn - exponential - "Draw random samples from an exponential distribution. - - Samples are distributed according to an exponential distribution parametrized by *lambda* (rate). - - Example:: - - exponential(lam=4, shape=(2,2)) = [[ 0.0097189 , 0.08999364], - [ 0.04146638, 0.31715935]] - - - Defined in src/operator/random/sample_op.cc:L137 - - `lam`: Lambda parameter (rate) of the exponential distribution. (optional) - `shape`: Shape of the output. (optional) - `ctx`: Context of output, in format [cpu|gpu|cpu_pinned](n). Only used for imperative calls. (optional) - `dtype`: DType of the output in case this can't be inferred. Defaults to float32 if not defined (dtype=None). (optional) - `out`: Output array. (optional)" - ([] (exponential {})) - ([{:keys [lam shape ctx dtype out], - :or {lam nil, shape nil, ctx nil, dtype nil, out nil}, - :as opts}] - (util/coerce-return - (NDArrayAPI/random_exponential - (util/->option lam) - (util/->option (clojure.core/when shape (mx-shape/->shape shape))) - (util/->option ctx) - (util/->option dtype) - (util/->option out))))) - -(defn - gamma - "Draw random samples from a gamma distribution. - - Samples are distributed according to a gamma distribution parametrized by *alpha* (shape) and *beta* (scale). - - Example:: - - gamma(alpha=9, beta=0.5, shape=(2,2)) = [[ 7.10486984, 3.37695289], - [ 3.91697288, 3.65933681]] - - - Defined in src/operator/random/sample_op.cc:L125 - - `alpha`: Alpha parameter (shape) of the gamma distribution. (optional) - `beta`: Beta parameter (scale) of the gamma distribution. (optional) - `shape`: Shape of the output. (optional) - `ctx`: Context of output, in format [cpu|gpu|cpu_pinned](n). Only used for imperative calls. (optional) - `dtype`: DType of the output in case this can't be inferred. Defaults to float32 if not defined (dtype=None). (optional) - `out`: Output array. (optional)" - ([] (gamma {})) - ([{:keys [alpha beta shape ctx dtype out], - :or {alpha nil, beta nil, shape nil, ctx nil, dtype nil, out nil}, - :as opts}] - (util/coerce-return - (NDArrayAPI/random_gamma - (util/->option alpha) - (util/->option beta) - (util/->option (clojure.core/when shape (mx-shape/->shape shape))) - (util/->option ctx) - (util/->option dtype) - (util/->option out))))) - diff --git a/contrib/clojure-package/test/good-test-ndarray.clj b/contrib/clojure-package/test/good-test-ndarray.clj deleted file mode 100644 index b048a819c642..000000000000 --- a/contrib/clojure-package/test/good-test-ndarray.clj +++ /dev/null @@ -1,38 +0,0 @@ -(ns org.apache.clojure-mxnet.ndarray - (:refer-clojure :exclude [* - + > >= < <= / cast concat flatten identity load max - min repeat reverse set sort take to-array empty shuffle - ref]) - (:import (org.apache.mxnet NDArray Shape))) - -;; Do not edit - this is auto-generated - -;; Licensed to the Apache Software Foundation (ASF) under one or more -;; contributor license agreements. See the NOTICE file distributed with -;; this work for additional information regarding copyright ownership. -;; The ASF licenses this file to You under the Apache License, Version 2.0 -;; (the "License"); you may not use this file except in compliance with -;; the License. You may obtain a copy of the License at -;; -;; http://www.apache.org/licenses/LICENSE-2.0 -;; -;; Unless required by applicable law or agreed to in writing, software -;; distributed under the License is distributed on an "AS IS" BASIS, -;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -;; See the License for the specific language governing permissions and -;; limitations under the License. -;; - - - - -(defn - div - ([ndarray ndarray-or-double-or-float] - (util/coerce-return - (.$div - ndarray - (util/coerce-param - ndarray-or-double-or-float - #{"org.apache.mxnet.MX_PRIMITIVES$MX_PRIMITIVE_TYPE" - "org.apache.mxnet.NDArray"}))))) - diff --git a/contrib/clojure-package/test/good-test-symbol-api.clj b/contrib/clojure-package/test/good-test-symbol-api.clj deleted file mode 100644 index 3081304ebdb3..000000000000 --- a/contrib/clojure-package/test/good-test-symbol-api.clj +++ /dev/null @@ -1,192 +0,0 @@ -(ns - ^{:doc "Experimental"} - org.apache.clojure-mxnet.symbol-api - (:refer-clojure :exclude [* - + > >= < <= / cast concat identity flatten load max - min repeat reverse set sort take to-array empty sin - get apply shuffle ref]) - (:require [org.apache.clojure-mxnet.util :as util] - [org.apache.clojure-mxnet.shape :as mx-shape]) - (:import (org.apache.mxnet SymbolAPI))) - -;; Do not edit - this is auto-generated - -;; Licensed to the Apache Software Foundation (ASF) under one or more -;; contributor license agreements. See the NOTICE file distributed with -;; this work for additional information regarding copyright ownership. -;; The ASF licenses this file to You under the Apache License, Version 2.0 -;; (the "License"); you may not use this file except in compliance with -;; the License. You may obtain a copy of the License at -;; -;; http://www.apache.org/licenses/LICENSE-2.0 -;; -;; Unless required by applicable law or agreed to in writing, software -;; distributed under the License is distributed on an "AS IS" BASIS, -;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -;; See the License for the specific language governing permissions and -;; limitations under the License. -;; - - - - -(defn - activation - "Applies an activation function element-wise to the input. - - The following activation functions are supported: - - - `relu`: Rectified Linear Unit, :math:`y = max(x, 0)` - - `sigmoid`: :math:`y = \\frac{1}{1 + exp(-x)}` - - `tanh`: Hyperbolic tangent, :math:`y = \\frac{exp(x) - exp(-x)}{exp(x) + exp(-x)}` - - `softrelu`: Soft ReLU, or SoftPlus, :math:`y = log(1 + exp(x))` - - `softsign`: :math:`y = \\frac{x}{1 + abs(x)}` - - - - Defined in src/operator/nn/activation.cc:L167 - - `data`: The input array. (optional) - `act-type`: Activation function to be applied. - `name`: Name of the symbol (optional) - `attr`: Attributes of the symbol (optional)" - [{:keys [data act-type name attr], - :or {data nil, name nil, attr nil}, - :as opts}] - (util/coerce-return - (SymbolAPI/Activation - (util/->option data) - act-type - name - (clojure.core/when - attr - (clojure.core/->> - attr - (clojure.core/mapv - (clojure.core/fn [[k v]] [k (clojure.core/str v)])) - (clojure.core/into {}) - util/convert-map))))) - -(defn - batch-norm - "Batch normalization. - - Normalizes a data batch by mean and variance, and applies a scale ``gamma`` as - well as offset ``beta``. - - Assume the input has more than one dimension and we normalize along axis 1. - We first compute the mean and variance along this axis: - - .. math:: - - data\\_mean[i] = mean(data[:,i,:,...]) \\\\ - data\\_var[i] = var(data[:,i,:,...]) - - Then compute the normalized output, which has the same shape as input, as following: - - .. math:: - - out[:,i,:,...] = \\frac{data[:,i,:,...] - data\\_mean[i]}{\\sqrt{data\\_var[i]+\\epsilon}} * gamma[i] + beta[i] - - Both *mean* and *var* returns a scalar by treating the input as a vector. - - Assume the input has size *k* on axis 1, then both ``gamma`` and ``beta`` - have shape *(k,)*. If ``output_mean_var`` is set to be true, then outputs both ``data_mean`` and - the inverse of ``data_var``, which are needed for the backward pass. Note that gradient of these - two outputs are blocked. - - Besides the inputs and the outputs, this operator accepts two auxiliary - states, ``moving_mean`` and ``moving_var``, which are *k*-length - vectors. They are global statistics for the whole dataset, which are updated - by:: - - moving_mean = moving_mean * momentum + data_mean * (1 - momentum) - moving_var = moving_var * momentum + data_var * (1 - momentum) - - If ``use_global_stats`` is set to be true, then ``moving_mean`` and - ``moving_var`` are used instead of ``data_mean`` and ``data_var`` to compute - the output. It is often used during inference. - - The parameter ``axis`` specifies which axis of the input shape denotes - the 'channel' (separately normalized groups). The default is 1. Specifying -1 sets the channel - axis to be the last item in the input shape. - - Both ``gamma`` and ``beta`` are learnable parameters. But if ``fix_gamma`` is true, - then set ``gamma`` to 1 and its gradient to 0. - - .. Note:: - When ``fix_gamma`` is set to True, no sparse support is provided. If ``fix_gamma is`` set to False, - the sparse tensors will fallback. - - - - Defined in src/operator/nn/batch_norm.cc:L572 - - `data`: Input data to batch normalization (optional) - `gamma`: gamma array (optional) - `beta`: beta array (optional) - `moving-mean`: running mean of input (optional) - `moving-var`: running variance of input (optional) - `eps`: Epsilon to prevent div 0. Must be no less than CUDNN_BN_MIN_EPSILON defined in cudnn.h when using cudnn (usually 1e-5) (optional) - `momentum`: Momentum for moving average (optional) - `fix-gamma`: Fix gamma while training (optional) - `use-global-stats`: Whether use global moving statistics instead of local batch-norm. This will force change batch-norm into a scale shift operator. (optional) - `output-mean-var`: Output the mean and inverse std (optional) - `axis`: Specify which shape axis the channel is specified (optional) - `cudnn-off`: Do not select CUDNN operator, if available (optional) - `name`: Name of the symbol (optional) - `attr`: Attributes of the symbol (optional)" - [{:keys - [data - gamma - beta - moving-mean - moving-var - eps - momentum - fix-gamma - use-global-stats - output-mean-var - axis - cudnn-off - name - attr], - :or - {output-mean-var nil, - axis nil, - cudnn-off nil, - fix-gamma nil, - eps nil, - data nil, - attr nil, - beta nil, - name nil, - use-global-stats nil, - moving-mean nil, - moving-var nil, - momentum nil, - gamma nil}, - :as opts}] - (util/coerce-return - (SymbolAPI/BatchNorm - (util/->option data) - (util/->option gamma) - (util/->option beta) - (util/->option moving-mean) - (util/->option moving-var) - (util/->option eps) - (util/->option momentum) - (util/->option fix-gamma) - (util/->option use-global-stats) - (util/->option output-mean-var) - (util/->option axis) - (util/->option cudnn-off) - name - (clojure.core/when - attr - (clojure.core/->> - attr - (clojure.core/mapv - (clojure.core/fn [[k v]] [k (clojure.core/str v)])) - (clojure.core/into {}) - util/convert-map))))) - diff --git a/contrib/clojure-package/test/good-test-symbol-random-api.clj b/contrib/clojure-package/test/good-test-symbol-random-api.clj deleted file mode 100644 index 7202d2e27d12..000000000000 --- a/contrib/clojure-package/test/good-test-symbol-random-api.clj +++ /dev/null @@ -1,118 +0,0 @@ -(ns - ^{:doc "Experimental"} - org.apache.clojure-mxnet.symbol-random-api - (:refer-clojure :exclude [* - + > >= < <= / cast concat identity flatten load max - min repeat reverse set sort take to-array empty sin - get apply shuffle ref]) - (:require [org.apache.clojure-mxnet.util :as util] - [org.apache.clojure-mxnet.shape :as mx-shape]) - (:import (org.apache.mxnet SymbolAPI))) - -;; Do not edit - this is auto-generated - -;; Licensed to the Apache Software Foundation (ASF) under one or more -;; contributor license agreements. See the NOTICE file distributed with -;; this work for additional information regarding copyright ownership. -;; The ASF licenses this file to You under the Apache License, Version 2.0 -;; (the "License"); you may not use this file except in compliance with -;; the License. You may obtain a copy of the License at -;; -;; http://www.apache.org/licenses/LICENSE-2.0 -;; -;; Unless required by applicable law or agreed to in writing, software -;; distributed under the License is distributed on an "AS IS" BASIS, -;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -;; See the License for the specific language governing permissions and -;; limitations under the License. -;; - - - - -(defn - exponential - "Draw random samples from an exponential distribution. - - Samples are distributed according to an exponential distribution parametrized by *lambda* (rate). - - Example:: - - exponential(lam=4, shape=(2,2)) = [[ 0.0097189 , 0.08999364], - [ 0.04146638, 0.31715935]] - - - Defined in src/operator/random/sample_op.cc:L137 - - `lam`: Lambda parameter (rate) of the exponential distribution. (optional) - `shape`: Shape of the output. (optional) - `ctx`: Context of output, in format [cpu|gpu|cpu_pinned](n). Only used for imperative calls. (optional) - `dtype`: DType of the output in case this can't be inferred. Defaults to float32 if not defined (dtype=None). (optional) - `name`: Name of the symbol (optional) - `attr`: Attributes of the symbol (optional)" - [{:keys [lam shape ctx dtype name attr], - :or {lam nil, shape nil, ctx nil, dtype nil, name nil, attr nil}, - :as opts}] - (util/coerce-return - (SymbolAPI/random_exponential - (util/->option lam) - (util/->option (clojure.core/when shape (mx-shape/->shape shape))) - (util/->option ctx) - (util/->option dtype) - name - (clojure.core/when - attr - (clojure.core/->> - attr - (clojure.core/mapv - (clojure.core/fn [[k v]] [k (clojure.core/str v)])) - (clojure.core/into {}) - util/convert-map))))) - -(defn - gamma - "Draw random samples from a gamma distribution. - - Samples are distributed according to a gamma distribution parametrized by *alpha* (shape) and *beta* (scale). - - Example:: - - gamma(alpha=9, beta=0.5, shape=(2,2)) = [[ 7.10486984, 3.37695289], - [ 3.91697288, 3.65933681]] - - - Defined in src/operator/random/sample_op.cc:L125 - - `alpha`: Alpha parameter (shape) of the gamma distribution. (optional) - `beta`: Beta parameter (scale) of the gamma distribution. (optional) - `shape`: Shape of the output. (optional) - `ctx`: Context of output, in format [cpu|gpu|cpu_pinned](n). Only used for imperative calls. (optional) - `dtype`: DType of the output in case this can't be inferred. Defaults to float32 if not defined (dtype=None). (optional) - `name`: Name of the symbol (optional) - `attr`: Attributes of the symbol (optional)" - [{:keys [alpha beta shape ctx dtype name attr], - :or - {alpha nil, - beta nil, - shape nil, - ctx nil, - dtype nil, - name nil, - attr nil}, - :as opts}] - (util/coerce-return - (SymbolAPI/random_gamma - (util/->option alpha) - (util/->option beta) - (util/->option (clojure.core/when shape (mx-shape/->shape shape))) - (util/->option ctx) - (util/->option dtype) - name - (clojure.core/when - attr - (clojure.core/->> - attr - (clojure.core/mapv - (clojure.core/fn [[k v]] [k (clojure.core/str v)])) - (clojure.core/into {}) - util/convert-map))))) - diff --git a/contrib/clojure-package/test/good-test-symbol.clj b/contrib/clojure-package/test/good-test-symbol.clj deleted file mode 100644 index 947d9262d38d..000000000000 --- a/contrib/clojure-package/test/good-test-symbol.clj +++ /dev/null @@ -1,38 +0,0 @@ -(ns org.apache.clojure-mxnet.symbol - (:refer-clojure :exclude [* - + > >= < <= / cast concat identity flatten load max - min repeat reverse set sort take to-array empty sin - get apply shuffle ref]) - (:require [org.apache.clojure-mxnet.util :as util]) - (:import (org.apache.mxnet Symbol))) - -;; Do not edit - this is auto-generated - -;; Licensed to the Apache Software Foundation (ASF) under one or more -;; contributor license agreements. See the NOTICE file distributed with -;; this work for additional information regarding copyright ownership. -;; The ASF licenses this file to You under the Apache License, Version 2.0 -;; (the "License"); you may not use this file except in compliance with -;; the License. You may obtain a copy of the License at -;; -;; http://www.apache.org/licenses/LICENSE-2.0 -;; -;; Unless required by applicable law or agreed to in writing, software -;; distributed under the License is distributed on an "AS IS" BASIS, -;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -;; See the License for the specific language governing permissions and -;; limitations under the License. -;; - - - - -(defn - div - ([sym sym-or-object] - (util/coerce-return - (.$div - sym - (util/nil-or-coerce-param - sym-or-object - #{"org.apache.mxnet.Symbol" "java.lang.Object"}))))) - diff --git a/contrib/clojure-package/test/org/apache/clojure_mxnet/callback_test.clj b/contrib/clojure-package/test/org/apache/clojure_mxnet/callback_test.clj deleted file mode 100644 index 13932ae1ee5e..000000000000 --- a/contrib/clojure-package/test/org/apache/clojure_mxnet/callback_test.clj +++ /dev/null @@ -1,34 +0,0 @@ -;; -;; Licensed to the Apache Software Foundation (ASF) under one or more -;; contributor license agreements. See the NOTICE file distributed with -;; this work for additional information regarding copyright ownership. -;; The ASF licenses this file to You under the Apache License, Version 2.0 -;; (the "License"); you may not use this file except in compliance with -;; the License. You may obtain a copy of the License at -;; -;; http://www.apache.org/licenses/LICENSE-2.0 -;; -;; Unless required by applicable law or agreed to in writing, software -;; distributed under the License is distributed on an "AS IS" BASIS, -;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -;; See the License for the specific language governing permissions and -;; limitations under the License. -;; - -(ns org.apache.clojure-mxnet.callback-test - (:require [org.apache.clojure-mxnet.callback :as callback] - [clojure.test :refer :all] - [org.apache.clojure-mxnet.eval-metric :as eval-metric] - [org.apache.clojure-mxnet.ndarray :as ndarray])) - -(deftest test-speedometer - (let [speedometer (callback/speedometer 1) - metric (eval-metric/accuracy)] - (eval-metric/update metric [(ndarray/ones [2])] [(ndarray/ones [2 3])]) - ;;; only side effects of logging - (callback/invoke speedometer 0 1 metric) - (callback/invoke speedometer 0 2 metric) - (callback/invoke speedometer 0 3 metric) - (callback/invoke speedometer 0 10 metric) - (callback/invoke speedometer 0 50 metric) - (callback/invoke speedometer 0 100 metric))) diff --git a/contrib/clojure-package/test/org/apache/clojure_mxnet/eval_metric_test.clj b/contrib/clojure-package/test/org/apache/clojure_mxnet/eval_metric_test.clj deleted file mode 100644 index 1f4dba35fa7a..000000000000 --- a/contrib/clojure-package/test/org/apache/clojure_mxnet/eval_metric_test.clj +++ /dev/null @@ -1,68 +0,0 @@ -;; -;; Licensed to the Apache Software Foundation (ASF) under one or more -;; contributor license agreements. See the NOTICE file distributed with -;; this work for additional information regarding copyright ownership. -;; The ASF licenses this file to You under the Apache License, Version 2.0 -;; (the "License"); you may not use this file except in compliance with -;; the License. You may obtain a copy of the License at -;; -;; http://www.apache.org/licenses/LICENSE-2.0 -;; -;; Unless required by applicable law or agreed to in writing, software -;; distributed under the License is distributed on an "AS IS" BASIS, -;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -;; See the License for the specific language governing permissions and -;; limitations under the License. -;; - -(ns org.apache.clojure-mxnet.eval-metric-test - (:require [org.apache.clojure-mxnet.eval-metric :as eval-metric] - [clojure.test :refer :all] - [org.apache.clojure-mxnet.ndarray :as ndarray])) - -(defn test-eval-metric [test-metric metric-name labels preds metric-val] - (println "Testing eval metric" metric-name) - (let [metric test-metric] - (eval-metric/update metric labels preds) - (is (= [metric-name metric-val] (eval-metric/get metric))) - - (testing "get does not reset the metric" - (is (= [metric-name metric-val] (eval-metric/get metric)))) - - (testing "resetting the metric" - (eval-metric/reset metric) - (is (= [metric-name "NaN"] (map str (eval-metric/get metric))))) - - (testing "get-and-reset gets the metric and then resets it" - (eval-metric/update metric labels preds) - (is (= [metric-name metric-val] (eval-metric/get-and-reset metric))) - (is (= [metric-name "NaN"] (map str (eval-metric/get metric))))))) - -(deftest test-metrics - (doseq [[metric-fn metric-name labels preds metric-val] - [[(eval-metric/accuracy) "accuracy" [(ndarray/zeros [2])] [(ndarray/zeros [2 3])] 1.0] - [(eval-metric/top-k-accuracy 2) "top_k_accuracy" [(ndarray/zeros [2])] [(ndarray/zeros [2 3])] 1.0] - [(eval-metric/f1) "f1" [(ndarray/zeros [2])] [(ndarray/zeros [2 3])] 0.0] - [(eval-metric/perplexity) "Perplexity" [(ndarray/ones [2])] [(ndarray/ones [2 3])] 1.0] - [(eval-metric/mae) "mae" [(ndarray/ones [2])] [(ndarray/ones [2])] 0.0] - [(eval-metric/mse) "mse" [(ndarray/ones [2])] [(ndarray/ones [2])] 0.0] - [(eval-metric/rmse) "rmse" [(ndarray/ones [2])] [(ndarray/ones [2])] 0.0]]] - (test-eval-metric metric-fn metric-name labels preds metric-val))) - -(deftest test-custom-metric - (let [metric (eval-metric/custom-metric (fn [label pred] - (float - (- (apply + (ndarray/->vec label)) - (apply + (ndarray/->vec pred))))) - "my-metric")] - (eval-metric/update metric [(ndarray/ones [2])] [(ndarray/ones [2])]) - (is (= ["my-metric" 0.0] (eval-metric/get metric))))) - -(deftest test-comp-metric - (let [metric (eval-metric/comp-metric [(eval-metric/accuracy) - (eval-metric/f1) - (eval-metric/top-k-accuracy 2)])] - (eval-metric/update metric [(ndarray/ones [2])] [(ndarray/ones [2 3])]) - (is (= {"accuracy" 0.0 - "f1" 0.0 - "top_k_accuracy" 1.0} (eval-metric/get metric))))) diff --git a/contrib/clojure-package/test/org/apache/clojure_mxnet/executor_test.clj b/contrib/clojure-package/test/org/apache/clojure_mxnet/executor_test.clj deleted file mode 100644 index ebd1a9d061a4..000000000000 --- a/contrib/clojure-package/test/org/apache/clojure_mxnet/executor_test.clj +++ /dev/null @@ -1,96 +0,0 @@ -;; -;; Licensed to the Apache Software Foundation (ASF) under one or more -;; contributor license agreements. See the NOTICE file distributed with -;; this work for additional information regarding copyright ownership. -;; The ASF licenses this file to You under the Apache License, Version 2.0 -;; (the "License"); you may not use this file except in compliance with -;; the License. You may obtain a copy of the License at -;; -;; http://www.apache.org/licenses/LICENSE-2.0 -;; -;; Unless required by applicable law or agreed to in writing, software -;; distributed under the License is distributed on an "AS IS" BASIS, -;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -;; See the License for the specific language governing permissions and -;; limitations under the License. -;; - -(ns org.apache.clojure-mxnet.executor-test - (:require [org.apache.clojure-mxnet.context :as context] - [org.apache.clojure-mxnet.executor :as executor] - [org.apache.clojure-mxnet.ndarray :as ndarray] - [org.apache.clojure-mxnet.random :as random] - [org.apache.clojure-mxnet.symbol :as sym] - [org.apache.clojure-mxnet.test-util :as test-util] - [clojure.test :refer :all])) - -(deftest test-bind - (let [shape [100 30] - lhs (sym/variable "lhs") - rhs (sym/variable "rhs") - ret (sym/+ lhs rhs)] - (is (= ["lhs" "rhs"] (sym/list-arguments ret))) - - (let [lhs-arr (random/uniform -10 10 shape) - rhs-arr (random/uniform -10 10 shape) - lhs-grad (ndarray/empty shape) - rhs-grad (ndarray/empty shape) - exec (sym/bind ret (context/default-context) [lhs-arr rhs-arr] [lhs-grad rhs-grad]) - exec2 (sym/bind ret (context/default-context) [lhs-arr rhs-arr]) - exec3 (sym/bind ret (context/default-context) {"rhs" rhs-arr "lhs" lhs-arr} {"lhs" lhs-grad "rhs" rhs-grad})] - (executor/forward exec) - (executor/forward exec2) - (executor/forward exec3) - (is (test-util/approx= 1e-6 (-> (ndarray/+ lhs-arr rhs-arr) ndarray/->vec) (-> (executor/outputs exec) first ndarray/->vec))) - (is (test-util/approx= 1e-6 (-> (ndarray/+ lhs-arr rhs-arr) ndarray/->vec) (-> (executor/outputs exec2) first ndarray/->vec))) - (is (test-util/approx= 1e-6 (-> (ndarray/+ lhs-arr rhs-arr) ndarray/->vec) (-> (executor/outputs exec3) first ndarray/->vec))) - - ;; test gradient - (let [out-grad (ndarray/ones shape) - lhs-grad2 out-grad - rhs-grad2 out-grad] - (executor/backward exec out-grad) - (is (test-util/approx= 1e-6 (ndarray/->vec lhs-grad) (ndarray/->vec lhs-grad2))) - (is (test-util/approx= 1e-6 (ndarray/->vec rhs-grad) (ndarray/->vec rhs-grad2))))))) - -(deftest test-reshape - (let [x (sym/variable "x") - y (sym/fully-connected {:data x :num-hidden 4}) - exec (sym/simple-bind y (context/default-context) {"x" [5 4]}) - _ (executor/set-arg-arrays exec [1 1 0]) - new-exec (executor/reshape exec {"x" [3 4]})] - (executor/forward new-exec) - ;; test sub exec forward - (is (every? #(= 4.0 %) (->> (executor/outputs new-exec) - (map ndarray/->vec) - first))) - ;; test shared memory - (is (= [4.0 4.0 4.0] (->> (executor/outputs exec) - (map ndarray/->vec) - first - (take 3)))) - ;; test base exec forward - (executor/forward exec) - (is (every? #(= 4.0 %) (->> (executor/outputs exec) - (map ndarray/->vec) - first))))) - -(deftest test-forward - (let [a (sym/variable "a") - b (sym/variable "b") - c (sym/+ a b) - ex (sym/bind c {:a (ndarray/* (ndarray/ones [1 2]) 2) - :b (ndarray/* (ndarray/ones [1 2]) 3)})] - ;; test forward with binded values - (executor/forward ex) - (is (= [5.0 5.0] (-> ex executor/outputs first ndarray/->vec))) - ;; test forward with new a (b is still [3.0 3.0] - (executor/forward ex false {:a (ndarray/* (ndarray/ones [1 2]) 4)}) - (is (= [7.0 7.0] (-> ex executor/outputs first ndarray/->vec))) - ;; test forward with new b (a is still [4.0 4.0] - (executor/forward ex false {:b (ndarray/* (ndarray/ones [1 2]) 5)}) - (is (= [9.0 9.0] (-> ex executor/outputs first ndarray/->vec))) - ;; test forward with new a & b - (executor/forward ex false {:a (ndarray/* (ndarray/ones [1 2]) 6) - :b (ndarray/* (ndarray/ones [1 2]) 7)}) - (is (= [13.0 13.0] (-> ex executor/outputs first ndarray/->vec))))) diff --git a/contrib/clojure-package/test/org/apache/clojure_mxnet/image_test.clj b/contrib/clojure-package/test/org/apache/clojure_mxnet/image_test.clj deleted file mode 100644 index a5b68033678d..000000000000 --- a/contrib/clojure-package/test/org/apache/clojure_mxnet/image_test.clj +++ /dev/null @@ -1,124 +0,0 @@ -;; -;; Licensed to the Apache Software Foundation (ASF) under one or more -;; contributor license agreements. See the NOTICE file distributed with -;; this work for additional information regarding copyright ownership. -;; The ASF licenses this file to You under the Apache License, Version 2.0 -;; (the "License"); you may not use this file except in compliance with -;; the License. You may obtain a copy of the License at -;; -;; http://www.apache.org/licenses/LICENSE-2.0 -;; -;; Unless required by applicable law or agreed to in writing, software -;; distributed under the License is distributed on an "AS IS" BASIS, -;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -;; See the License for the specific language governing permissions and -;; limitations under the License. -;; - -(ns org.apache.clojure-mxnet.image-test - (:require [org.apache.clojure-mxnet.image :as image] - [org.apache.clojure-mxnet.ndarray :as ndarray] - [clojure.java.io :as io] - [clojure.test :refer [deftest is use-fixtures run-tests]] - [test-helper]) - (:import (javax.imageio ImageIO) - (java.io File))) - - -(test-helper/load-test-images) - -(def tmp-dir (System/getProperty "java.io.tmpdir")) -(def image-path (.getAbsolutePath (io/file tmp-dir "Pug-Cookie.jpg"))) -(def image-src-path "test/test-images/Pug-Cookie.jpg") - -(defn- cp - "Copy from filepath `from` to filepath `to`." - [from to] - (with-open [in (io/input-stream (io/file from)) - out (io/output-stream (io/file to))] - (io/copy in out))) - -(defn- rm - "Removes `filepath`." - [filepath] - (io/delete-file filepath)) - -(defn- with-file - "Provides `src-path` in `dest-path` for the test function `f` to use." - [src-path dest-path] - (fn [f] - (cp src-path dest-path) - (f) - (rm dest-path))) - -(use-fixtures :once (with-file image-src-path image-path)) - -(deftest test-decode-image - (let [img-arr (image/decode-image (io/input-stream image-path)) - img-arr-2 (image/decode-image (io/input-stream image-path) - {:color-flag image/GRAYSCALE})] - (is (= [576 1024 3] (ndarray/shape-vec img-arr))) - (is (= [576 1024 1] (ndarray/shape-vec img-arr-2))))) - -(deftest test-decode - (let [img-arr (image/decode (io/input-stream image-path)) - img-arr-2 (image/decode (io/input-stream image-path) - {:color :grayscale})] - (is (= [576 1024 3] (ndarray/shape-vec img-arr))) - (is (= [576 1024 1] (ndarray/shape-vec img-arr-2))))) - -(deftest test-read-image - (let [img-arr (image/read-image image-path) - img-arr-2 (image/read-image image-path {:color-flag image/GRAYSCALE})] - (is (= [576 1024 3] (ndarray/shape-vec img-arr))) - (is (= [576 1024 1] (ndarray/shape-vec img-arr-2))))) - -(deftest test-read - (let [img-arr (image/read image-path) - img-arr-2 (image/read image-path {:color :grayscale})] - (is (= [576 1024 3] (ndarray/shape-vec img-arr))) - (is (= [576 1024 1] (ndarray/shape-vec img-arr-2))))) - -(deftest test-resize-image - (let [img-arr (image/read image-path) - resized-arr (image/resize-image img-arr 224 224)] - (is (= [224 224 3] (ndarray/shape-vec resized-arr))))) - -(deftest test-resize - (let [img-arr (image/read image-path) - resized-arr (image/resize img-arr 224 224)] - (is (= [224 224 3] (ndarray/shape-vec resized-arr))))) - -(deftest test-fixed-crop - (let [img-arr (image/read image-path) - cropped-arr (image/fixed-crop img-arr 0 0 224 224)] - (is (= [224 224 3] (ndarray/shape-vec cropped-arr))))) - -(deftest test-apply-border - (let [img-arr (image/read image-path) - padded-arr (image/apply-border img-arr 1 1 1 1)] - (is (= [578 1026 3] (ndarray/shape-vec padded-arr))))) - -(deftest test-to-image - (let [img-arr (image/read image-path) - resized-arr (image/resize img-arr 224 224) - new-img (image/to-image resized-arr)] - (is (ImageIO/write new-img "png" (io/file tmp-dir "out.png"))))) - -(deftest test-ndarray->image - (let [img-arr (image/read image-path) - resized-arr (image/resize img-arr 224 224) - new-img (image/ndarray->image resized-arr)] - (is (ImageIO/write new-img "png" (io/file tmp-dir "out.png"))))) - -(deftest test-draw-bounding-box! - (let [orig-img (ImageIO/read (new File image-path)) - new-img (image/draw-bounding-box! - orig-img - [{:x-min 190 :x-max 850 :y-min 50 :y-max 450} - {:x-min 200 :x-max 350 :y-min 440 :y-max 530}] - {:stroke 2 - :names ["pug" "cookie"] - :transparency 0.8 - :font-size-mult 2.0})] - (is (ImageIO/write new-img "png" (io/file tmp-dir "out.png"))))) diff --git a/contrib/clojure-package/test/org/apache/clojure_mxnet/infer/objectdetector_test.clj b/contrib/clojure-package/test/org/apache/clojure_mxnet/infer/objectdetector_test.clj deleted file mode 100644 index e2b9579c7000..000000000000 --- a/contrib/clojure-package/test/org/apache/clojure_mxnet/infer/objectdetector_test.clj +++ /dev/null @@ -1,82 +0,0 @@ -;; Licensed to the Apache Software Foundation (ASF) under one or more -;; contributor license agreements. See the NOTICE file distributed with -;; this work for additional information regarding copyright ownership. -;; The ASF licenses this file to You under the Apache License, Version 2.0 -;; (the "License"); you may not use this file except in compliance with -;; the License. You may obtain a copy of the License at -;; -;; http://www.apache.org/licenses/LICENSE-2.0 -;; -;; Unless required by applicable law or agreed to in writing, software -;; distributed under the License is distributed on an "AS IS" BASIS, -;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -;; See the License for the specific language governing permissions and -;; limitations under the License. -;; - -(ns org.apache.clojure-mxnet.infer.objectdetector-test - (:require [org.apache.clojure-mxnet.context :as context] - [org.apache.clojure-mxnet.dtype :as dtype] - [org.apache.clojure-mxnet.infer :as infer] - [org.apache.clojure-mxnet.layout :as layout] - [clojure.java.io :as io] - [clojure.java.shell :refer [sh]] - [clojure.test :refer :all] - [org.apache.clojure-mxnet.ndarray :as ndarray])) - -(def model-dir "data/") -(def model-path-prefix (str model-dir "resnet50_ssd/resnet50_ssd_model")) - -(when-not (.exists (io/file (str model-path-prefix "-symbol.json"))) - (sh "./scripts/infer/get_ssd_data.sh")) - -(defn create-detector [] - (let [descriptors [{:name "data" - :shape [1 3 512 512] - :layout layout/NCHW - :dtype dtype/FLOAT32}] - factory (infer/model-factory model-path-prefix descriptors)] - (infer/create-object-detector factory))) - -(deftest test-single-detection - (let [detector (create-detector) - image (infer/load-image-from-file "test/test-images/kitten.jpg") - [predictions-all] (infer/detect-objects detector image) - [predictions] (infer/detect-objects detector image 5) - {:keys [class prob x-min x-max y-min y-max] :as pred} (first predictions)] - (is (some? predictions)) - (is (= 5 (count predictions))) - (is (= 13 (count predictions-all))) - (is (= "cat" class)) - (is (< 0.8 prob)) - (every? #(< 0 % 1) [x-min x-max y-min y-max]))) - -(deftest test-batch-detection - (let [detector (create-detector) - image-batch (infer/load-image-paths ["test/test-images/kitten.jpg" - "test/test-images/Pug-Cookie.jpg"]) - [batch-predictions-all] (infer/detect-objects-batch detector image-batch) - [predictions] (infer/detect-objects-batch detector image-batch 5) - {:keys [class prob x-min x-max y-min y-max] :as pred} (first predictions)] - (is (some? predictions)) - (is (= 13 (count batch-predictions-all))) - (is (= 5 (count predictions))) - (is (= "cat" class)) - (is (< 0.8 prob)) - (every? #(< 0 % 1) [x-min x-max y-min y-max]))) - -(deftest test-detection-with-ndarrays - (let [detector (create-detector) - image (-> (infer/load-image-from-file "test/test-images/kitten.jpg") - (infer/reshape-image 512 512) - (infer/buffered-image-to-pixels [3 512 512] dtype/FLOAT32) - (ndarray/expand-dims 0)) - [predictions-all] (infer/detect-objects-with-ndarrays detector [image]) - [predictions] (infer/detect-objects-with-ndarrays detector [image] 1) - {:keys [class prob x-min x-max y-min y-max] :as pred} (first predictions)] - (is (some? predictions-all)) - (is (= 1 (count predictions))) - (is (= "cat" class)) - (is (< 0.8 prob)) - (every? #(< 0 % 1) [x-min x-max y-min y-max]))) - diff --git a/contrib/clojure-package/test/org/apache/clojure_mxnet/initializer_test.clj b/contrib/clojure-package/test/org/apache/clojure_mxnet/initializer_test.clj deleted file mode 100644 index 288a41496f0b..000000000000 --- a/contrib/clojure-package/test/org/apache/clojure_mxnet/initializer_test.clj +++ /dev/null @@ -1,45 +0,0 @@ -;; -;; Licensed to the Apache Software Foundation (ASF) under one or more -;; contributor license agreements. See the NOTICE file distributed with -;; this work for additional information regarding copyright ownership. -;; The ASF licenses this file to You under the Apache License, Version 2.0 -;; (the "License"); you may not use this file except in compliance with -;; the License. You may obtain a copy of the License at -;; -;; http://www.apache.org/licenses/LICENSE-2.0 -;; -;; Unless required by applicable law or agreed to in writing, software -;; distributed under the License is distributed on an "AS IS" BASIS, -;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -;; See the License for the specific language governing permissions and -;; limitations under the License. -;; - -(ns org.apache.clojure-mxnet.initializer-test - (:require [org.apache.clojure-mxnet.initializer :as initializer] - [org.apache.clojure-mxnet.ndarray :as ndarray] - [clojure.test :refer :all])) - -(defn exercise-initializer [init] - (-> init - (initializer/init-weight "test-weight" (ndarray/zeros [3 3]))) - - (is (number? - (-> init - (initializer/apply "test-weight" (ndarray/zeros [3 3])) - (ndarray/->vec) - (first))))) - -(deftest test-uniform - (exercise-initializer (initializer/uniform)) - (exercise-initializer (initializer/uniform 0.8))) - -(deftest test-normal - (exercise-initializer (initializer/normal)) - (exercise-initializer (initializer/normal 0.2))) - -(deftest test-xavier - (exercise-initializer (initializer/xavier)) - (exercise-initializer (initializer/xavier {:rand-type "gaussian" - :factor-type "in" - :magnitude 2}))) diff --git a/contrib/clojure-package/test/org/apache/clojure_mxnet/io_test.clj b/contrib/clojure-package/test/org/apache/clojure_mxnet/io_test.clj deleted file mode 100644 index 9babf1e22536..000000000000 --- a/contrib/clojure-package/test/org/apache/clojure_mxnet/io_test.clj +++ /dev/null @@ -1,214 +0,0 @@ -;; -;; Licensed to the Apache Software Foundation (ASF) under one or more -;; contributor license agreements. See the NOTICE file distributed with -;; this work for additional information regarding copyright ownership. -;; The ASF licenses this file to You under the Apache License, Version 2.0 -;; (the "License"); you may not use this file except in compliance with -;; the License. You may obtain a copy of the License at -;; -;; http://www.apache.org/licenses/LICENSE-2.0 -;; -;; Unless required by applicable law or agreed to in writing, software -;; distributed under the License is distributed on an "AS IS" BASIS, -;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -;; See the License for the specific language governing permissions and -;; limitations under the License. -;; - -(ns org.apache.clojure-mxnet.io-test - (:require [clojure.java.io :as io] - [clojure.java.shell :refer [sh]] - [org.apache.clojure-mxnet.io :as mx-io] - [org.apache.clojure-mxnet.ndarray :as ndarray] - [org.apache.clojure-mxnet.util :as util] - [org.apache.clojure-mxnet.shape :as mx-shape] - [clojure.test :refer :all] - [org.apache.clojure-mxnet.dtype :as dtype] - [org.apache.clojure-mxnet.layout :as layout])) - -(deftest test-mnsit-iter-and-mnist-pack - (let [_ (when-not (.exists (io/file "data/train-images-idx3-ubyte")) - (sh "scripts/get_mnist_data.sh")) - params {:image "data/train-images-idx3-ubyte" - :label "data/train-labels-idx1-ubyte" - :data-shape [784] - :batch-size 100 - :shuffle 1 - :flat 1 - :silent 0 - :seed 10} - mnist-pack (mx-io/mnist-pack params)] - (is (= 600 (count (mx-io/batches mnist-pack)))) - - (let [mnist-iter (mx-io/iterator mnist-pack) - provide-data (mx-io/provide-data mnist-iter) - provide-label (mx-io/provide-label mnist-iter)] - (is (= [100 784] (-> provide-data first :shape))) - (is (= [100] (-> provide-label first :shape))) - (is (= 600 (mx-io/reduce-batches mnist-iter (fn [result batch] (inc result))))) - ;; test reset - (let [_ (mx-io/reset mnist-iter) - _ (mx-io/next mnist-iter) - label0 (-> (mx-io/iter-label mnist-iter) first (ndarray/->vec)) - data0 (-> (mx-io/iter-data mnist-iter) first (ndarray/->vec)) - _ (mx-io/next mnist-iter) - _ (mx-io/next mnist-iter) - _ (mx-io/next mnist-iter) - _ (mx-io/reset mnist-iter) - _ (mx-io/next mnist-iter) - label1 (-> (mx-io/iter-label mnist-iter) first (ndarray/->vec)) - data1 (-> (mx-io/iter-data mnist-iter) first (ndarray/->vec))] - (is (= label1 label0)) - (is (= data1 data0)))))) - -(deftest test-provide-data-and-label - (let [test-data (mx-io/mnist-iter {:image "data/train-images-idx3-ubyte" - :label "data/train-labels-idx1-ubyte" - :label-name "softmax_label" - :data-shape [1 28 28] - :label-shape [1 1 10] - :batch-size 100 - :shuffle true - :flat false - :silent false - :seed 10})] - (is (= [{:name "data", :shape [100 1 28 28]}] - (mx-io/provide-data test-data))) - (is (= [{:name "softmax_label", :shape [100]}] - (mx-io/provide-label test-data))) - (is (= [{:name "data", :shape [100 1 28 28] - :dtype dtype/FLOAT32 - :layout layout/UNDEFINED}] - (mx-io/provide-data-desc test-data))) - (is (= [{:name "softmax_label" - :shape [100] - :dtype dtype/FLOAT32 - :layout layout/UNDEFINED}] - (mx-io/provide-label-desc test-data))))) - -(deftest test-image-record-iter - (let [_ (when-not (.exists (io/file "data/cifar/train.rec")) - (sh "scripts/get_cifar_data.sh")) - params {:path-imgrec "data/cifar/train.rec" - :label "data/cifar/cifar10_mean.bin" - :rand-crop false - :and-mirror false - :shuffle false - :data-shape [3 28 28] - :batch-size 100 - :preprocess-threads 4 - :prefetch-buffer 1} - img-rec-iter (mx-io/image-record-iter params) - nbatch 500] - (is (= [100 3 28 28] (-> (mx-io/provide-data img-rec-iter) first :shape))) - (is (= [100] (-> (mx-io/provide-label img-rec-iter) first :shape))) - (is (= nbatch (mx-io/reduce-batches img-rec-iter (fn [result batch] (inc result))))))) - -(deftest test-resize-iter - (let [_ (when-not (.exists (io/file "data/train-images-idx3-ubyte")) - (sh "scripts/get_mnist_data.sh")) - params {:image "data/train-images-idx3-ubyte" - :label "data/train-labels-idx1-ubyte" - :data-shape [784] - :batch-size 100 - :shuffle 1 - :flat 1 - :silent 0 - :seed 10} - mnist-iter (mx-io/mnist-iter params) - nbatch 400 - resize-iter (mx-io/resize-iter mnist-iter nbatch false)] - (is (= nbatch (mx-io/reduce-batches resize-iter (fn [result batch] (inc result))))) - (mx-io/reset resize-iter) - (is (= nbatch (mx-io/reduce-batches resize-iter (fn [result batch] (inc result))))))) - -(deftest test-prefetching-iter - (let [_ (when-not (.exists (io/file "data/train-images-idx3-ubyte")) - (sh "scripts/get_mnist_data.sh")) - params {:image "data/train-images-idx3-ubyte" - :label "data/train-labels-idx1-ubyte" - :data-shape [784] - :batch-size 100 - :shuffle 1 - :flat 1 - :silent 0 - :seed 10} - mnist-iter1 (mx-io/mnist-iter params) - mnist-iter2 (mx-io/mnist-iter params) - nbatch 600 - prefetch-iter (mx-io/prefetching-iter [mnist-iter1 mnist-iter2] - [{"data" "data1"} {"data" "data2"}] - [{"label" "label1"} {"label" "label2"}])] - (is (= nbatch (mx-io/reduce-batches prefetch-iter (fn [result batch] (inc result))))) - (let [provide-data (mx-io/provide-data prefetch-iter) - provide-label (mx-io/provide-label prefetch-iter)] - (is (= #{[100 784]} (into #{} (map :shape provide-data)))) - (is (= #{[100]} (into #{} (map :shape provide-label)))) - (mx-io/dispose prefetch-iter)))) - -(deftest test-ndarray-iter - (let [shape0 [1000 2 2] - data [(ndarray/ones shape0) (ndarray/zeros shape0)] - shape1 [1000 1] - label [(ndarray/ones shape1)] - batch-data0 (ndarray/ones [128 2 2]) - batch-data1 (ndarray/zeros [128 2 2]) - batch-label (ndarray/ones [128 1])] - - ;; test pad - (let [data-iter0 (mx-io/ndarray-iter data {:label label - :data-batch-size 128 - :shuffle false - :last-batch-handle "pad"}) - nbatch0 8] - (is (= nbatch0 (count (mx-io/for-batches data-iter0 (fn [batch] 1))))) - (is (every? true? (mx-io/for-batches data-iter0 - (fn [batch] - (= batch-data0 - (first (mx-io/batch-data batch))))))) - (is (every? true? (mx-io/for-batches data-iter0 - (fn [batch] - (= batch-data1 - (second (mx-io/batch-data batch))))))) - (is (every? true? (mx-io/for-batches data-iter0 - (fn [batch] - (= batch-label - (first (mx-io/batch-label batch)))))))) - - ;; test discard - (let [data-iter1 (mx-io/ndarray-iter data {:label label - :data-batch-size 128 - :shuffle false - :last-batch-handle "discard"}) - nbatch1 7] - (is (= nbatch1 (mx-io/reduce-batches data-iter1 (fn [result batch] (inc result)))))) - - ;; test empty label for prediction - (let [data-iter2 (mx-io/ndarray-iter data {:data-batch-size 128 - :shuffle false - :last-batch-handle "discard"}) - nbatch2 7] - (is (= nbatch2 (mx-io/reduce-batches data-iter2 (fn [result batch] (inc result))))) - (is (= [] (mx-io/iter-init-label data-iter2)))) - - ;;; testing with a specified layout - (let [label-desc (mx-io/data-desc {:name "label" - :shape [2 2] - :dtype dtype/INT32 - :layout layout/NT}) - data-desc (mx-io/data-desc {:name "data" - :shape [2 2 2] - :dtype dtype/FLOAT32 - :layout layout/NTC}) - label (ndarray/ones [2 2] {:dtype dtype/INT32}) - data (ndarray/ones [2 2 2] {:dtype dtype/FLOAT32}) - data-iter3 (mx-io/ndarray-iter {data-desc data} - {:label {label-desc label}})] - (is (= {:dtype dtype/FLOAT32 :layout layout/NTC} - (-> (mx-io/provide-data-desc data-iter3) - first - (select-keys [:dtype :layout])))) - (is (= {:dtype dtype/INT32 :layout layout/NT} - (-> (mx-io/provide-label-desc data-iter3) - first - (select-keys [:dtype :layout]))))))) diff --git a/contrib/clojure-package/test/org/apache/clojure_mxnet/kvstore_test.clj b/contrib/clojure-package/test/org/apache/clojure_mxnet/kvstore_test.clj deleted file mode 100644 index a01f94960d12..000000000000 --- a/contrib/clojure-package/test/org/apache/clojure_mxnet/kvstore_test.clj +++ /dev/null @@ -1,81 +0,0 @@ -;; -;; Licensed to the Apache Software Foundation (ASF) under one or more -;; contributor license agreements. See the NOTICE file distributed with -;; this work for additional information regarding copyright ownership. -;; The ASF licenses this file to You under the Apache License, Version 2.0 -;; (the "License"); you may not use this file except in compliance with -;; the License. You may obtain a copy of the License at -;; -;; http://www.apache.org/licenses/LICENSE-2.0 -;; -;; Unless required by applicable law or agreed to in writing, software -;; distributed under the License is distributed on an "AS IS" BASIS, -;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -;; See the License for the specific language governing permissions and -;; limitations under the License. -;; - -(ns org.apache.clojure-mxnet.kvstore-test - (:require [org.apache.clojure-mxnet.kvstore :as kvstore] - [clojure.test :refer :all] - [org.apache.clojure-mxnet.ndarray :as ndarray] - [org.apache.clojure-mxnet.context :as context])) - -(deftest test-init-and-pull - (let [kv (kvstore/create) - shape [2 1] - out (ndarray/zeros shape)] - (-> kv - (kvstore/init "3" (ndarray/ones shape)) - (kvstore/pull "3" out)) - (is (= [1.0 1.0] (ndarray/->vec out))))) - -(deftest test-push-and-pull - (let [kv (kvstore/create) - shape [2 1] - out (ndarray/zeros shape)] - (-> kv - (kvstore/init "3" (ndarray/ones shape)) - (kvstore/push "3" (ndarray/* (ndarray/ones shape) 4)) - (kvstore/pull "3" out)) - (is (= [4.0 4.0] (ndarray/->vec out))))) - -(deftest test-aggregate - (let [shape [4 4] - ks ["b" "c" "d"] - kv (kvstore/create) - num-devs 4 - devs (mapv (fn [_] (context/cpu)) (range num-devs)) - vals (mapv #(ndarray/ones shape {:ctx %}) devs)] - (-> kv - (kvstore/init "a" (ndarray/zeros shape)) - (kvstore/init ks [(ndarray/zeros shape) (ndarray/zeros shape) (ndarray/zeros shape)]) - (kvstore/push "a" vals) - (kvstore/pull "a" vals)) - (is (= 0.0 (->> vals - (mapv ndarray/->vec) - flatten - (map #(- % num-devs)) - (apply +)))) - (let [result (for [k ks] - (let [tmp-vals (mapv #(ndarray/* (ndarray/ones shape {:ctx %}) 2.0) devs)] - (-> kv - (kvstore/push k tmp-vals) - (kvstore/pull k tmp-vals)) - (map ndarray/->vec tmp-vals)))] - (is (= 0.0 (->> result - (flatten) - (map #(- % (* num-devs 2))) - (apply +))))))) - -(deftest test-type - (is (= "local" (-> (kvstore/create "local") - (kvstore/type))))) - -(deftest test-get-numworkers - (is (= 1 (-> (kvstore/create "local") - (kvstore/num-workers))))) - -(deftest test-get-rank - (is (= 0 (-> (kvstore/create "local") - (kvstore/rank))))) diff --git a/contrib/clojure-package/test/org/apache/clojure_mxnet/lr_scheduler_test.clj b/contrib/clojure-package/test/org/apache/clojure_mxnet/lr_scheduler_test.clj deleted file mode 100644 index c60389a87020..000000000000 --- a/contrib/clojure-package/test/org/apache/clojure_mxnet/lr_scheduler_test.clj +++ /dev/null @@ -1,24 +0,0 @@ -;; -;; Licensed to the Apache Software Foundation (ASF) under one or more -;; contributor license agreements. See the NOTICE file distributed with -;; this work for additional information regarding copyright ownership. -;; The ASF licenses this file to You under the Apache License, Version 2.0 -;; (the "License"); you may not use this file except in compliance with -;; the License. You may obtain a copy of the License at -;; -;; http://www.apache.org/licenses/LICENSE-2.0 -;; -;; Unless required by applicable law or agreed to in writing, software -;; distributed under the License is distributed on an "AS IS" BASIS, -;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -;; See the License for the specific language governing permissions and -;; limitations under the License. -;; - -(ns org.apache.clojure-mxnet.lr-scheduler-test - (:require [org.apache.clojure-mxnet.lr-scheduler :as lr-scheduler] - [clojure.test :refer :all])) - -(deftest test-factor-scheduler - ;; just excercising - (lr-scheduler/factor-scheduler 2 0.3)) diff --git a/contrib/clojure-package/test/org/apache/clojure_mxnet/ndarray_test.clj b/contrib/clojure-package/test/org/apache/clojure_mxnet/ndarray_test.clj deleted file mode 100644 index 13209e609a1d..000000000000 --- a/contrib/clojure-package/test/org/apache/clojure_mxnet/ndarray_test.clj +++ /dev/null @@ -1,499 +0,0 @@ -;; -;; Licensed to the Apache Software Foundation (ASF) under one or more -;; contributor license agreements. See the NOTICE file distributed with -;; this work for additional information regarding copyright ownership. -;; The ASF licenses this file to You under the Apache License, Version 2.0 -;; (the "License"); you may not use this file except in compliance with -;; the License. You may obtain a copy of the License at -;; -;; http://www.apache.org/licenses/LICENSE-2.0 -;; -;; Unless required by applicable law or agreed to in writing, software -;; distributed under the License is distributed on an "AS IS" BASIS, -;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -;; See the License for the specific language governing permissions and -;; limitations under the License. -;; - -(ns org.apache.clojure-mxnet.ndarray-test - (:require [org.apache.clojure-mxnet.base :as base] - [org.apache.clojure-mxnet.context :as ctx] - [org.apache.clojure-mxnet.dtype :as dtype] - [org.apache.clojure-mxnet.ndarray :as ndarray :refer [->vec zeros ones += -= *= full shape]] - [org.apache.clojure-mxnet.shape :as mx-shape] - [org.apache.clojure-mxnet.test-util :as test-util] - [clojure.test :refer :all])) - -(deftest test->vec - (is (= [0.0 0.0 0.0 0.0] (->vec (zeros [2 2]))))) - -(deftest test-to-array - (is (= [0.0 0.0 0.0 0.0] (vec (ndarray/to-array (zeros [2 2])))))) - -(deftest test-to-scalar - (is (= 0.0 (ndarray/to-scalar (zeros [1])))) - (is (= 1.0 (ndarray/to-scalar (ones [1])))) - (is (thrown-with-msg? Exception #"The current array is not a scalar" - (ndarray/to-scalar (zeros [1 1]))))) - -(deftest test-size-and-shape - (let [m (zeros [4 1])] - (is (= (mx-shape/->shape [4 1]) (ndarray/shape m))) - (is (= 4 (ndarray/size m))))) - -(deftest test-dtype - (is (= base/MX_REAL_TYPE (ndarray/dtype (zeros [3 2]))))) - -(deftest test-set-scalar-value - (is (= [10.0 10.0] (-> (ndarray/empty [2 1]) - (ndarray/set 10) - (->vec))))) - -(deftest test-copy-from-vector - (is (= [1.0 2.0 3.0 4.0] (-> (ndarray/empty [4 1]) - (ndarray/set [1 2 3 4]) - (->vec))))) - -(deftest test-plus - (let [ndzeros (zeros [2 1]) - ndones (ndarray/+ ndzeros 1)] - (is (= [1.0 1.0] (->vec ndones))) - (is (= [2.0 2.0] (->vec (ndarray/+ ndones 1)))) - (is (= [1.0 1.0] (->vec ndones))) - ;;; += mutuates - (is (= [2.0 2.0] (->vec (+= ndones 1)))) - (is (= [2.0 2.0] (->vec ndones))))) - -(deftest test-minus - (let [ndones (ones [2 1]) - ndzeros (ndarray/- ndones 1)] - (is (= [0.0 0.0] (->vec ndzeros))) - (is (= [-1.0 -1.0] (->vec (ndarray/- ndzeros 1)))) - (is (= [0.0 0.0] (->vec ndzeros))) - ;;; += mutuates - (is (= [-1.0 -1.0] (->vec (-= ndzeros 1)))) - (is (= [-1.0 -1.0] (->vec ndzeros))))) - -(deftest test-multiplication - (let [ndones (ones [2 1]) - ndtwos (ndarray/* ndones 2)] - (is (= [2.0 2.0] (->vec ndtwos))) - (is (= [1.0 1.0] (->vec (ndarray/* ndones ndones)))) - (is (= [4.0 4.0] (->vec (ndarray/* ndtwos ndtwos)))) - ;; *= mutates - (is (= [4.0 4.0] (->vec (*= ndtwos ndtwos)))) - (is (= [4.0 4.0] (->vec ndtwos))))) - -(deftest test-division - (let [ndones (ones [2 1]) - ndzeros (ndarray/- ndones 1) - ndhalves (ndarray/div ndones 2)] - (is (= [0.5 0.5] (->vec ndhalves))) - (is (= [1.0 1.0] (->vec (ndarray/div ndhalves ndhalves)))) - (is (= [1.0 1.0] (->vec (ndarray/div ndones ndones)))) - (is (= [0.0 0.0] (->vec (ndarray/div ndzeros ndones)))) - ;; div= mutates - (is (= [1.0 1.0] (->vec (ndarray/div= ndhalves ndhalves)))) - (is (= [1.0 1.0] (->vec ndhalves))))) - -(deftest test-full - (let [nda (full [1 2] 3.0)] - (is (= (shape nda) (mx-shape/->shape [1 2]))) - (is (= [3.0 3.0] (->vec nda))))) - -(deftest test-clip - (let [nda (-> (ndarray/empty [3 2]) - (ndarray/set [1 2 3 4 5 6]))] - (is (= [2.0 2.0 3.0 4.0 5.0 5.0] (->vec (ndarray/clip nda 2 5)))))) - -(deftest test-sqrt - (let [nda (-> (ndarray/empty [4 1]) - (ndarray/set [0 1 4 9]))] - (is (= [0.0 1.0 2.0 3.0] (->vec (ndarray/sqrt nda)))))) - -(deftest test-rsqrt - (let [nda (ndarray/array [1.0 4.0] [2 1])] - (is (= [1.0 0.5] (->vec (ndarray/rsqrt nda)))))) - -(deftest test-norm - (let [nda (-> (ndarray/empty [3 1]) - (ndarray/set [1 2 3])) - normed (ndarray/norm nda)] - (is (= [1] (mx-shape/->vec (shape normed)))) - (is (test-util/approx= 1e-4 (Math/sqrt 14.0) (ndarray/to-scalar normed))))) - -(deftest test-one-hot-encode - (let [nda1 (ndarray/array [1 0 2] [3]) - nda2 (ndarray/empty [3 3]) - res (ndarray/onehot-encode nda1 nda2)] - (is (= [3 3] (mx-shape/->vec (shape res)))) - (is (= [0.0 1.0 0.0 - 1.0 0.0 0.0 - 0.0 0.0 1.0] (->vec res))))) - -(deftest test-dot - (let [nda1 (ndarray/array [1 2] [1 2]) - nda2 (ndarray/array [3 4] [2 1]) - res (ndarray/dot nda1 nda2)] - (is (= [1 1] (mx-shape/->vec (shape res)))) - (is (= [11.0] (->vec res))))) - -(deftest test-arrange - (let [start 0 - stop 5 - step 0.5 - repeat 2] - (is (= [0.0 0.0 0.5 0.5 1.0 1.0 1.5 1.5 2.0 2.0 2.5 2.5 3.0 3.0 3.5 3.5 4.0 4.0 4.5 4.5] - (->vec (ndarray/arange start stop {:step step :repeat repeat})))))) - -(deftest test->ndarray - (let [nda1 (ndarray/->ndarray [5.0 -4.0]) - nda2 (ndarray/->ndarray [[1 2 3] - [4 5 6]]) - nda3 (ndarray/->ndarray [[[7.0] [8.0]]])] - (is (= [5.0 -4.0] (->vec nda1))) - (is (= [2] (mx-shape/->vec (shape nda1)))) - (is (= [1.0 2.0 3.0 4.0 5.0 6.0] (->vec nda2))) - (is (= [2 3] (mx-shape/->vec (shape nda2)))) - (is (= [7.0 8.0] (->vec nda3))) - (is (= [1 2 1] (mx-shape/->vec (shape nda3)))))) - -(deftest test-power - (let [nda (ndarray/array [3 5] [2 1])] - - (let [nda-power-1 (ndarray/power 2 nda)] - (is (= [2 1] (-> nda-power-1 shape mx-shape/->vec))) - (is (= [8.0 32.0] (->vec nda-power-1)))) - - (let [nda-power-2 (ndarray/power nda 2)] - (is (= [2 1] (-> nda-power-2 shape mx-shape/->vec))) - (is (= [9.0 25.0] (->vec nda-power-2)))) - - (let [nda-power-3 (ndarray/power nda nda)] - (is (= [2 1] (-> nda-power-3 shape mx-shape/->vec))) - (is (= [27.0 3125.0] (->vec nda-power-3)))) - - (let [nda-power-4 (ndarray/** nda 2)] - (is (= [2 1] (-> nda-power-4 shape mx-shape/->vec))) - (is (= [9.0 25.0] (->vec nda-power-4)))) - - (let [nda-power-5 (ndarray/** nda nda)] - (is (= [2 1] (-> nda-power-5 shape mx-shape/->vec))) - (is (= [27.0 3125.0] (->vec nda-power-5)))) - - (let [_ (ndarray/**= nda 2)] - (is (= [2 1] (-> nda shape mx-shape/->vec))) - (is (= [9.0 25.0] (->vec nda)))) - - (let [_ (ndarray/set nda [3 5]) - _ (ndarray/**= nda nda)] - (is (= [2 1] (-> nda shape mx-shape/->vec))) - (is (= [27.0 3125.0] (->vec nda)))))) - -(deftest test-equal - (let [nda1 (ndarray/array [1 2 3 5] [2 2]) - nda2 (ndarray/array [1 4 3 6] [2 2])] - - (is (= [2 2] (-> (ndarray/equal nda1 nda2) shape mx-shape/->vec))) - (is (= [1.0 0.0 1.0 0.0] (->vec (ndarray/equal nda1 nda2)))) - - (is (= [2 2] (-> (ndarray/equal nda1 3) shape mx-shape/->vec))) - (is (= [0.0 0.0 1.0 0.0] (->vec (ndarray/equal nda1 3)))))) - -(deftest test-not-equal - (let [nda1 (ndarray/array [1 2 3 5] [2 2]) - nda2 (ndarray/array [1 4 3 6] [2 2])] - - (is (= [2 2] (-> (ndarray/not-equal nda1 nda2) shape mx-shape/->vec))) - (is (= [0.0 1.0 0.0 1.0] (->vec (ndarray/not-equal nda1 nda2)))) - - (is (= [2 2] (-> (ndarray/not-equal nda1 3) shape mx-shape/->vec))) - (is (= [1.0 1.0 0.0 1.0] (->vec (ndarray/not-equal nda1 3)))))) - -(deftest test-greater - (let [nda1 (ndarray/array [1 2 4 5] [2 2]) - nda2 (ndarray/array [1 4 3 6] [2 2])] - - (is (= [2 2] (-> (ndarray/> nda1 nda2) shape mx-shape/->vec))) - (is (= [0.0 0.0 1.0 0.0] (->vec (ndarray/> nda1 nda2)))) - - (is (= [2 2] (-> (ndarray/> nda1 2) shape mx-shape/->vec))) - (is (= [0.0 0.0 1.0 1.0] (->vec (ndarray/> nda1 2)))))) - -(deftest test-greater-equal - (let [nda1 (ndarray/array [1 2 4 5] [2 2]) - nda2 (ndarray/array [1 4 3 6] [2 2])] - - (is (= [2 2] (-> (ndarray/>= nda1 nda2) shape mx-shape/->vec))) - (is (= [1.0 0.0 1.0 0.0] (->vec (ndarray/>= nda1 nda2)))) - - (is (= [2 2] (-> (ndarray/>= nda1 2) shape mx-shape/->vec))) - (is (= [0.0 1.0 1.0 1.0] (->vec (ndarray/>= nda1 2)))))) - -(deftest test-lesser - (let [nda1 (ndarray/array [1 2 4 5] [2 2]) - nda2 (ndarray/array [1 4 3 6] [2 2])] - - (is (= [2 2] (-> (ndarray/< nda1 nda2) shape mx-shape/->vec))) - (is (= [0.0 1.0 0.0 1.0] (->vec (ndarray/< nda1 nda2)))) - - (is (= [2 2] (-> (ndarray/< nda1 2) shape mx-shape/->vec))) - (is (= [1.0 0.0 0.0 0.0] (->vec (ndarray/< nda1 2)))))) - -(deftest test-lesser-equal - (let [nda1 (ndarray/array [1 2 4 5] [2 2]) - nda2 (ndarray/array [1 4 3 6] [2 2])] - - (is (= [2 2] (-> (ndarray/<= nda1 nda2) shape mx-shape/->vec))) - (is (= [1.0 1.0 0.0 1.0] (->vec (ndarray/<= nda1 nda2)))) - - (is (= [2 2] (-> (ndarray/< nda1 2) shape mx-shape/->vec))) - (is (= [1.0 1.0 0.0 0.0] (->vec (ndarray/<= nda1 2)))))) - -(deftest test-choose-element-0index - (let [nda (ndarray/array [1 2 3 4 6 5] [2 3]) - indices (ndarray/array [0 1] [2]) - res (ndarray/choose-element-0index nda indices)] - (is (= [1.0 6.0] (->vec res))))) - -(deftest test-copy-to - (let [source (ndarray/array [1 2 3] [1 3]) - dest (ndarray/empty [1 3]) - _ (ndarray/copy-to source dest)] - (is (= [1 3] (-> dest shape mx-shape/->vec))) - (is (= [1.0 2.0 3.0] (->vec dest))))) - -(deftest test-abs - (let [nda (ndarray/array [-1 -2 3] [3 1])] - (is (= [1.0 2.0 3.0] (->vec (ndarray/abs nda)))))) - -(deftest test-sign - (let [nda (ndarray/array [-1 -2 3] [3 1])] - (is (= [-1.0 -1.0 1.0] (->vec (ndarray/sign nda)))))) - -(deftest test-round - (let [nda (ndarray/array [1.5 2.1 3.7] [3 1])] - (is (= [2.0 2.0 4.0] (->vec (ndarray/round nda)))))) - -(deftest test-ceil - (let [nda (ndarray/array [1.5 2.1 3.7] [3 1])] - (is (= [2.0 3.0 4.0] (->vec (ndarray/ceil nda)))))) - -(deftest test-floor - (let [nda (ndarray/array [1.5 2.1 3.7] [3 1])] - (is (= [1.0 2.0 3.0] (->vec (ndarray/floor nda)))))) - -(deftest test-square - (let [nda (ndarray/array [1 2 3] [3 1])] - (is (= [1.0 4.0 9.0] (->vec (ndarray/square nda)))))) - -(deftest test-exp - (let [nda (ones [1])] - (is (test-util/approx= 1e-3 2.71828 (ndarray/to-scalar (ndarray/exp nda)))))) - -(deftest test-log - (let [nda (-> (ndarray/empty [1]) - (ndarray/set 10))] - (is (test-util/approx= 1e-3 2.30258 (ndarray/to-scalar (ndarray/log nda)))))) - -(deftest test-cos - (let [nda (-> (ndarray/empty [1]) - (ndarray/set 12))] - (is (test-util/approx= 1e-3 0.8438539 (ndarray/to-scalar (ndarray/cos nda)))))) - -(deftest test-sin - (let [nda (-> (ndarray/empty [1]) - (ndarray/set 12))] - (is (test-util/approx= 1e-3 -0.536572918 (ndarray/to-scalar (ndarray/sin nda)))))) - -(deftest test-max - (let [nda (ndarray/array [1.5 2.1 3.7] [3 1])] - (is (test-util/approx= 1e-3 3.7 (ndarray/to-scalar (ndarray/max nda)))))) - -(deftest test-maximum - (let [nda1 (ndarray/array [1.5 2.1 3.7] [3 1]) - nda2 (ndarray/array [4 1 3.5] [3 1]) - res (ndarray/maximum nda1 nda2)] - (is (= [3 1] (-> res shape mx-shape/->vec))) - (is (test-util/approx= 1e-3 [4.0 2.1 3.7] (->vec res))))) - -(deftest test-min - (let [nda (ndarray/array [1.5 2.1 3.7] [3 1])] - (is (test-util/approx= 1e-3 1.5 (ndarray/to-scalar (ndarray/min nda)))))) - -(deftest test-minimum - (let [nda1 (ndarray/array [1.5 2.1 3.7] [3 1]) - nda2 (ndarray/array [4 1 3.5] [3 1]) - res (ndarray/minimum nda1 nda2)] - (is (= [3 1] (-> res shape mx-shape/->vec))) - (is (test-util/approx= 1e-3 [1.5 1.0 3.5] (->vec res))))) - -(deftest test-sum - (let [nda (ndarray/array [1 2 3 4] [2 2])] - (is (test-util/approx= 1e-3 10.0 (ndarray/to-scalar (ndarray/sum nda)))))) - -(deftest test-argmax-channel - (let [nda (ndarray/array [1 2 4 3] [2 2]) - argmax (ndarray/argmax-channel nda)] - (is (= [2] (-> argmax shape mx-shape/->vec))) - (is (= [1.0 0.0] (->vec argmax))))) - -(deftest test-concatenate-axis-0 - (let [nda1 (ndarray/array [1 2 4 3 3 3] [2 3]) - nda2 (ndarray/array [8 7 6] [1 3]) - res (ndarray/concatenate [nda1 nda2])] - (is (= [3 3] (-> res shape mx-shape/->vec))) - (is (= [1.0 2.0 4.0 3.0 3.0 3.0 8.0 7.0 6.0] (->vec res))))) - -(deftest test-concatenate-axis-1 - (let [nda1 (ndarray/array [1 2 3 4] [2 2]) - nda2 (ndarray/array [5 6] [2 1]) - res (ndarray/concatenate [nda1 nda2] {:axis 1})] - (is (= [2 3] (-> res shape mx-shape/->vec))) - (is (= [1.0 2.0 5.0 3.0 4.0 6.0] (->vec res))))) - -(deftest test-transpose - (let [nda (ndarray/array [1 2 4 3 3 3] [2 3])] - (is (= [1.0 2.0 4.0 3.0 3.0 3.0] (->vec nda))) - (is (= [3 2] (-> (ndarray/t nda) shape mx-shape/->vec))) - (is (= [1.0 3.0 2.0 3.0 4.0 3.0] (->vec (ndarray/t nda)))))) - -(def file-seq-num (atom 0)) - -(deftest test-save-and-load-with-names - (let [filename (str (System/getProperty "java.io.tmpdir") "/ndarray" (swap! file-seq-num inc) ".bin") - nda (ndarray/array [1 2 3] [3 1]) - _ (ndarray/save filename {"local" nda}) - load-map (ndarray/load filename)] - (is (= ["local"] (keys load-map))) - (is (= 1 (count (vals load-map)))) - (is (= [3 1] (-> (get load-map "local") shape mx-shape/->vec))) - (is (= [1.0 2.0 3.0] (->vec (get load-map "local")))))) - -(deftest test-save-to-file-and-load-from-file - (let [filename (str (System/getProperty "java.io.tmpdir") "/ndarray" (swap! file-seq-num inc) ".bin") - nda (ndarray/array [1 2 3] [3 1]) - _ (ndarray/save-to-file filename nda) - load-nda (ndarray/load-from-file filename)] - (is (= [3 1] (-> load-nda shape mx-shape/->vec))) - (is (= [1.0 2.0 3.0] (->vec load-nda))))) - -(deftest test-get-context - (let [nda (ones [3 2]) - ctx (ndarray/context nda)] - (is (= "cpu" (ctx/device-type ctx))) - (is (= 0 (ctx/device-id ctx))))) - -(deftest test-equals - (let [nda1 (ndarray/array [1 2 3] [3 1]) - nda2 (ndarray/array [1 2 3] [3 1]) - nda3 (ndarray/array [1 2 3] [1 3]) - nda4 (ndarray/array [3 2 3] [3 1])] - (is (= nda1 nda2)) - (is (not= nda1 nda3)) - (is (not= nda1 nda4)))) - -(deftest test-slice - (let [nda (ndarray/array [1 2 3 4 5 6] [3 2])] - - (let [nda1 (ndarray/slice nda 1)] - (is (= [1 2] (-> nda1 shape mx-shape/->vec))) - (is (= [3.0 4.0] (->vec nda1)))) - - (let [nda2 (ndarray/slice nda 1 3)] - (is (= [2 2] (-> nda2 shape mx-shape/->vec))) - (is (= [3.0 4.0 5.0 6.0] (->vec nda2)))))) - -(deftest test-at - (let [nda (ndarray/array [1 2 3 4 5 6] [3 2]) - res (ndarray/at nda 1)] - (is (= [2] (-> res shape mx-shape/->vec))) - (is (= [3 4] (-> res ndarray/->int-vec))))) - -(deftest test-reshape - (let [nda (ndarray/array [1 2 3 4 5 6] [3 2]) - nda1 (ndarray/reshape nda [2 3])] - (is (= [2 3] (-> nda1 shape mx-shape/->vec))) - (is (= [1.0 2.0 3.0 4.0 5.0 6.0] (->vec nda1))))) - -(deftest test-dispose-deps - (let [nda1 (ones [1 2]) - nda2 (ones [1 2]) - nda3 (ones [1 2]) - nda-with-deps (ndarray/+ nda3 (ndarray/+ nda1 nda2))] - (is (= 4 (ndarray/size (ndarray/dependencies nda-with-deps)))) - (is (contains? (-> (ndarray/dependencies nda-with-deps) keys set) (ndarray/handle nda1))) - (is (contains? (-> (ndarray/dependencies nda-with-deps) keys set) (ndarray/handle nda2))) - (is (contains? (-> (ndarray/dependencies nda-with-deps) keys set) (ndarray/handle nda3))) - (is (not (ndarray/is-disposed nda1))) - (is (not (ndarray/is-disposed nda2))) - (is (not (ndarray/is-disposed nda3))) - - (let [nda-no-deps (ndarray/dispose-deps nda-with-deps)] - (is (= 0 (ndarray/size (ndarray/dependencies nda-no-deps)))) - (is (ndarray/is-disposed nda1)) - (is (ndarray/is-disposed nda2)) - (is (ndarray/is-disposed nda3))))) - -(deftest test-dispose-deps-except - (let [nda1 (ones [1 2]) - nda2 (ones [1 2]) - nda3 (ones [1 2]) - nda1-2 (ndarray/+ nda1 nda2)] - - (let [res (-> (ndarray/+ nda1 nda2) - (ndarray/+ nda1-2) - (ndarray/+ nda3) - (ndarray/dispose-deps-except nda1-2))] - (is (= 3 (ndarray/size (ndarray/dependencies res)))) - (is (contains? (-> (ndarray/dependencies res) keys set) (ndarray/handle nda1))) - (is (contains? (-> (ndarray/dependencies res) keys set) (ndarray/handle nda2))) - (is (contains? (-> (ndarray/dependencies res) keys set) (ndarray/handle nda1-2))) - (is (not (ndarray/is-disposed nda1))) - (is (not (ndarray/is-disposed nda2))) - (is (ndarray/is-disposed nda3))))) - -(deftest test-serialize-deserialize - (let [nda (ndarray/* (ndarray/ones [1 2]) 3) - nda-bytes (ndarray/serialize nda) - nda-copy (ndarray/deserialize nda-bytes)] - (is (= nda nda-copy)))) - -(deftest test-dtype-int32 - (let [nda (ndarray/* (ones [1 2] {:dtype dtype/INT32}) 2)] - (is (= dtype/INT32 (ndarray/dtype nda))) - (is (= 8 (count (ndarray/->raw nda)))) - (is (= [2.0 2.0] (ndarray/->float-vec nda))) - (is (= [2 2] (ndarray/->int-vec nda))) - (is (= [2.0 2.0] (ndarray/->double-vec nda))) - (is (= [(byte 2) (byte 2)] (ndarray/->byte-vec nda))))) - -(deftest test-dtype-uint8 - (let [nda (ndarray/* (ones [1 2] {:dtype dtype/UINT8}) 2)] - (is (= dtype/UINT8 (ndarray/dtype nda))) - (is (= 2 (count (ndarray/->raw nda)))) - (is (= [2.0 2.0] (ndarray/->float-vec nda))) - (is (= [2 2] (ndarray/->int-vec nda))) - (is (= [2.0 2.0] (ndarray/->double-vec nda))) - (is (= [(byte 2) (byte 2)] (ndarray/->byte-vec nda))))) - -(deftest test-dtype-float64 - (let [nda (ndarray/* (ones [1 2] {:dtype dtype/FLOAT64}) 2)] - (is (= dtype/FLOAT64 (ndarray/dtype nda))) - (is (= 16 (count (ndarray/->raw nda)))) - (is (= [2.0 2.0] (ndarray/->float-vec nda))) - (is (= [2 2] (ndarray/->int-vec nda))) - (is (= [2.0 2.0] (ndarray/->double-vec nda))) - (is (= [(byte 2) (byte 2)] (ndarray/->byte-vec nda))))) - -(deftest test->nd-vec - (is (= [[[1.0]]] - (ndarray/->nd-vec (ndarray/array [1] [1 1 1])))) - (is (= [[[1.0]] [[2.0]] [[3.0]]] - (ndarray/->nd-vec (ndarray/array [1 2 3] [3 1 1])))) - (is (= [[[1.0 2.0]] [[3.0 4.0]] [[5.0 6.0]]] - (ndarray/->nd-vec (ndarray/array [1 2 3 4 5 6] [3 1 2])))) - (is (= [[[1.0] [2.0]] [[3.0] [4.0]] [[5.0] [6.0]]] - (ndarray/->nd-vec (ndarray/array [1 2 3 4 5 6] [3 2 1])))) - (is (thrown-with-msg? Exception #"Invalid input array" - (ndarray/->nd-vec [1 2 3 4 5])))) diff --git a/contrib/clojure-package/test/org/apache/clojure_mxnet/operator_test.clj b/contrib/clojure-package/test/org/apache/clojure_mxnet/operator_test.clj deleted file mode 100644 index 2498e3027bcf..000000000000 --- a/contrib/clojure-package/test/org/apache/clojure_mxnet/operator_test.clj +++ /dev/null @@ -1,588 +0,0 @@ -;; -;; Licensed to the Apache Software Foundation (ASF) under one or more -;; contributor license agreements. See the NOTICE file distributed with -;; this work for additional information regarding copyright ownership. -;; The ASF licenses this file to You under the Apache License, Version 2.0 -;; (the "License"); you may not use this file except in compliance with -;; the License. You may obtain a copy of the License at -;; -;; http://www.apache.org/licenses/LICENSE-2.0 -;; -;; Unless required by applicable law or agreed to in writing, software -;; distributed under the License is distributed on an "AS IS" BASIS, -;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -;; See the License for the specific language governing permissions and -;; limitations under the License. -;; - -(ns org.apache.clojure-mxnet.operator-test - (:require [org.apache.clojure-mxnet.context :as context] - [org.apache.clojure-mxnet.executor :as executor] - [org.apache.clojure-mxnet.ndarray :as ndarray] - [org.apache.clojure-mxnet.random :as random] - [org.apache.clojure-mxnet.shape :as mx-shape] - [org.apache.clojure-mxnet.symbol :as sym] - [org.apache.clojure-mxnet.util :as util] - [org.apache.clojure-mxnet.test-util :as test-util] - [clojure.test :refer :all]) - (:import (org.apache.mxnet NDArray))) - -(defn approx= [tolerance x y] - (test-util/approx= tolerance - (if (instance? NDArray x) (ndarray/->vec x) x) - (if (instance? NDArray y) (ndarray/->vec y) y))) - -(deftest test-elementwise-sum - (let [n 4 - shape-vec [5 5 3] - inputs (mapv (fn [i] (sym/variable (str "arg" i))) (range n)) - out (sym/element-wise-sum "esum" inputs) - arr (into [] (repeatedly n #(random/uniform -10 10 shape-vec))) - arr-grad (into [] (repeatedly n #(ndarray/empty shape-vec))) - exec (sym/bind out (context/default-context) arr arr-grad) - forward-output (-> exec (executor/forward) (executor/outputs) first) - forward-output-expected (reduce sym/+ arr)] - (approx= 1e-4 forward-output-expected forward-output) - - ;; backward - (let [out-grad (random/uniform -10 10 shape-vec) - _ (executor/backward exec out-grad)] - (doseq [grad arr-grad] - (is (= out-grad grad)))))) - -(deftest test-concat - (let [a (sym/variable "a") - b (sym/variable "b") - c (sym/concat "conc" nil [a b] {:dim 0}) - exec (sym/bind c (context/default-context) {"a" (ndarray/array [1 2] [2 1]) - "b" (ndarray/array [3 4] [2 1])}) - output (-> (executor/forward exec) - (executor/outputs) - (first))] - (is (= [1.0 2.0 3.0 4.0] (ndarray/->vec output))) - (is (= [4 1] (ndarray/shape-vec output))))) - -(defn check-regression [model forward-fn backward-fn] - (let [shape-vec [3 1] - arr-data (random/uniform -1 1 shape-vec) - arr-label (random/uniform -1 1 [(first shape-vec)]) - arr-grad (ndarray/empty shape-vec) - exec1 (sym/bind model (context/default-context) [arr-data arr-label] {:data arr-grad}) - out1 (-> exec1 (executor/forward) (executor/outputs) first) - np-out (map forward-fn - (ndarray/->vec arr-data))] - (is (= shape-vec (-> out1 ndarray/shape mx-shape/->vec))) - (is (approx= 1e-6 np-out out1)) - - ;;backward - (executor/backward exec1) - (let [npout-back (mapv backward-fn - np-out (ndarray/->vec arr-label))] - (is (approx= 1e-6 npout-back arr-grad))))) - -(deftest swap-axes - (let [data (sym/variable "data") - shape-vec [2 3 4] - arr-data (ndarray/ones shape-vec)] - - (-> (ndarray/slice arr-data 0) - (ndarray/set 1)) - - (-> (ndarray/slice arr-data 1) - (ndarray/set 2)) - - ;; [[[ 1., 1., 1., 1.], - ;; [ 1., 1., 1., 1.], - ;; [ 1., 1., 1., 1.]], - ;; - ;; [[ 2., 2., 2., 2.], - ;; [ 2., 2., 2., 2.], - ;; [ 2., 2., 2., 2.]]] - - (let [swap0 (sym/swap-axis {:data data :dim1 0 :dim2 2}) - swap (sym/swap-axis {:data swap0 :dim1 1 :dim2 2}) - exec (sym/bind swap (context/default-context) arr-data) - out (-> (executor/forward exec) - (executor/outputs) - first)] - ;; After swapaxes(swapaxes(arrData, 0, 2), 1, 2) - ;; out should be - ;; [[[ 1., 1., 1.], - ;; [ 2., 2., 2.]], - ;; - ;; [[ 1., 1., 1.], - ;; [ 2., 2., 2.]], - ;; - ;; [[ 1., 1., 1.], - ;; [ 2., 2., 2.]], - ;; - ;; [[ 1., 1., 1.], - ;; [ 2., 2., 2.]]] - (= [4 2 3] (mx-shape/->vec (ndarray/shape out))) - (doseq [i (range 4)] - (let [val (ndarray/->vec (ndarray/slice out i))] - (is (approx= 1e-6 [1 1 1 2 2 2] val))))))) - -(defn check-symbolic-forward [test-sym location expected tolerance] - (let [arr-data (mapv #(ndarray/copy %) location) - arr-grad (mapv #(ndarray/empty (mx-shape/->vec (ndarray/shape %))) location) - exec (sym/bind test-sym (context/default-context) arr-data arr-grad) - outputs (-> exec - (executor/forward) - (executor/outputs))] - (is (every? true? (map - (fn [x y] - #_(println "expected " (ndarray/->vec x)) - #_(println "actual " (ndarray/->vec y)) - (approx= tolerance x y)) - expected - outputs))))) - -(defn check-symbolic-backward [test-sym location grad expected tolerance] - (let [arr-data (mapv #(ndarray/copy %) location) - arr-grad (mapv #(ndarray/empty (mx-shape/->vec (ndarray/shape %))) location) - out-grad (mapv #(ndarray/copy %) grad) - exec (sym/bind test-sym (context/default-context) arr-data arr-grad) - exec (-> exec - (executor/forward) - (executor/backward out-grad)) - grad-arrays (executor/grad-arrays exec)] - (is (every? true? (map - (fn [x y] - #_(println "expected " (ndarray/->vec x)) - #_(println "actual " (ndarray/->vec y)) - (approx= tolerance x y)) - expected - grad-arrays))))) - -(deftest test-scalar-op - (let [data (sym/variable "data") - shape-vec [3 4] - data-tmp (ndarray/* (ndarray/ones shape-vec) 5) - ;; (4x + 2)/2 - test (-> (sym/* data 4) - (sym/+ 2) - (sym/div 2)) - npout (-> (ndarray/* data-tmp 4) - (ndarray/+ 2) - (ndarray/div 2)) - ;; backward deriv is 2 - np-out-grad (ndarray/* (ndarray/ones shape-vec) 2)] - - (check-symbolic-forward test [data-tmp] [npout] 1e-5) - (check-symbolic-backward test [data-tmp] [(ndarray/ones shape-vec)] [np-out-grad] 1e-5))) - -(deftest ones - (let [ones (sym/ones [2 2]) - exec (sym/simple-bind ones (context/default-context))] - (is (approx= 1e-4 - [1 1 1 1] - (-> exec (executor/forward) (executor/outputs) (first)))))) - -(deftest zeros - (let [zeros (sym/zeros [2 2]) - exec (sym/simple-bind zeros (context/default-context))] - (is (approx= 1e-4 - [0 0 0 0] - (-> exec (executor/forward) (executor/outputs) (first)))))) - -(deftest test-arange - (let [start 1 - stop 100 - step 2 - result (range start stop step) - x (sym/arange start stop {:step step}) - exec (sym/simple-bind x (context/default-context))] - (executor/forward exec) - (is (= 0 (count (executor/grad-arrays exec)))) - (is (approx= 1e-4 result (-> (executor/outputs exec) (first)))))) - -(deftest test-arange-with-inference - (let [arange (sym/arange-with-inference 0) - data (sym/variable "data") - added (sym/+ arange data) - result (range 0 4) - data-tmp (ndarray/zeros [4]) - exec (sym/bind added (context/default-context) {"data" data-tmp})] - (executor/forward exec) - (is (= 0 (count (executor/grad-arrays exec)))) - (is (approx= 1e-4 result (-> (executor/outputs exec) (first)))))) - -(deftest test-scalar-pow - (let [data (sym/variable "data") - shape-vec [1 1] - data-tmp (ndarray/* (ndarray/ones shape-vec) 3) - data-tmp-powered (ndarray/* (ndarray/ones shape-vec) 9) - test (sym/** data 2)] - (check-symbolic-forward test [data-tmp] [data-tmp-powered] 1e-5) - (check-symbolic-backward test [data-tmp] [(ndarray/ones shape-vec)] [(ndarray/* data-tmp 2)] 1e-5))) - -(deftest test-symbol-pow - (let [shape-vec [1 1] - data (sym/variable "data") - data-tmp (ndarray/* (ndarray/ones shape-vec) 2) - exp (sym/variable "exp") - exp-tmp (ndarray/* (ndarray/ones shape-vec) 3) - test (sym/** data exp)] - (check-symbolic-forward test [data-tmp exp-tmp] [(ndarray/* (ndarray/ones shape-vec) 8)] 1e-5) - (let [data-deriv (ndarray/* (ndarray/* (ndarray/ones shape-vec) 4) exp-tmp) - exp-deriv (ndarray/* (ndarray/* (ndarray/ones shape-vec) 8) - (ndarray/* (ndarray/ones shape-vec) (Math/log 2)))] - (check-symbolic-backward test - [data-tmp exp-tmp] - [(ndarray/ones shape-vec)] - [data-deriv exp-deriv] 1e-5)))) - -(deftest test-pow-fn - (let [shape-vec [3 4] - exp (sym/variable "exp") - y (sym/** exp 2) - x (ndarray/* (ndarray/ones shape-vec) 3)] - (check-symbolic-forward y [x] [(ndarray/* (ndarray/ones shape-vec) 9)] 1e-5) - ;; deriv is 2x - (check-symbolic-backward y - [x] - [(ndarray/ones shape-vec)] - [(-> (ndarray/ones shape-vec) - (ndarray/* 6))] - 1e-5))) - -(defn check-scalar-operation - [operator data-vec num expected] - (let [data (sym/variable "datas") - shape-vec [2 2] - test (operator data num) - exec (sym/simple-bind test (context/default-context) {"datas" shape-vec}) - _ (executor/set-arg exec "datas" data-vec) - output (-> (executor/forward exec) (executor/outputs) first)] - (is (approx= 1e-5 expected output)) - (is (= [0 0 0 0] (-> (executor/backward exec (ndarray/ones shape-vec)) - (executor/get-grad "datas") - (ndarray/->int-vec)))))) - -(defn check-symbol-operation - [operator data-vec-1 data-vec-2 expected] - (let [data (sym/variable "datas") - data2 (sym/variable "datas2") - shape-vec [2 2] - test (operator data data2) - exec (sym/simple-bind test (context/default-context) {"datas" shape-vec "datas2" shape-vec}) - _ (executor/set-arg exec "datas" data-vec-1) - _ (executor/set-arg exec "datas2" data-vec-2) - output (-> (executor/forward exec) (executor/outputs) first)] - (is (approx= 1e-5 expected output)) - _ (executor/backward exec (ndarray/ones shape-vec)) - (is (= [0 0 0 0] (-> (executor/get-grad exec "datas") (ndarray/->int-vec)))) - (is (= [0 0 0 0] (-> (executor/get-grad exec "datas2") (ndarray/->int-vec)))))) - -(defn check-scalar-2-operation - [operator data-vec expected] - (let [data (sym/variable "datas") - shape-vec [2 2] - test (operator data 2) - exec (sym/simple-bind test (context/default-context) {"datas" shape-vec}) - _ (executor/set-arg exec "datas" data-vec) - output (-> (executor/forward exec) (executor/outputs) first)] - (is (approx= 1e-5 expected output)) - (is (= [0 0 0 0] (-> (executor/backward exec (ndarray/ones shape-vec)) - (executor/get-grad "datas") - (ndarray/->int-vec)))))) - -(deftest test-scalar-equal - (check-scalar-operation sym/equal [1 2 3 4] 2 [0 1 0 0])) - -(deftest test-symbol-equal - (check-symbol-operation sym/equal [1 2 3 4] [1 3 2 6] [1 0 0 0])) - -(deftest test-scalar-equal-2 - (check-scalar-2-operation sym/equal [1 2 3 4] [0 1 0 0])) - -(deftest test-scalar-not-equal - (check-scalar-operation sym/not-equal [1 2 3 4] 2 [1 0 1 1])) - -(deftest test-symbol-not-equal - (check-symbol-operation sym/not-equal [1 2 3 4] [1 3 2 6] [0 1 1 1])) - -(deftest test-scalar-not-equal-2 - (check-scalar-2-operation sym/not-equal [1 2 3 4] [1 0 1 1])) - -(deftest test-scalar-greater - (check-scalar-operation sym/> [1 2 3 4] 2 [0 0 1 1])) - -(deftest test-symbol-greater - (check-symbol-operation sym/> [1 2 3 4] [1 3 2 6] [0 0 1 0])) - -(deftest test-scalar-greater-equal - (check-scalar-operation sym/>= [1 2 3 4] 2 [0 1 1 1])) - -(deftest test-symbol-greater-equal - (check-symbol-operation sym/>= [1 2 3 4] [1 3 2 6] [1 0 1 0])) - -(deftest test-scalar-lesser - (check-scalar-operation sym/< [1 2 3 4] 2 [1 0 0 0])) - -(deftest test-symbol-lesser - (check-symbol-operation sym/< [1 2 3 4] [1 3 2 6] [0 1 0 1])) - -(deftest test-scalar-lesser-equal - (check-scalar-operation sym/<= [1 2 3 4] 2 [1 1 0 0])) - -(deftest test-symbol-lesser-equal - (check-symbol-operation sym/<= [1 2 3 4] [1 3 2 6] [1 1 0 1])) - -(deftest test-embedding - (let [data (sym/variable "data") - embed (sym/embedding "embed" {:data data :input-dim 10 :output-dim 4})] - (println "Embedded symbol:" (sym/to-json embed)))) - -(deftest test-binary-duplicate-input - (let [data (sym/variable "data") - shape-vec [3 4] - data-tmp (ndarray/* (ndarray/ones shape-vec) 5) - arr-data (ndarray/copy data-tmp) - arr-grad (ndarray/* (ndarray/ones shape-vec) 3) - out-grad (ndarray/ones shape-vec) - square (sym/* data data) - exec-square (sym/bind square (context/default-context) arr-data arr-grad)] - (executor/forward exec-square) - (approx= 1e-6 (ndarray/* data-tmp data-tmp) (-> (executor/outputs exec-square) (first))) - (executor/backward exec-square out-grad) - (approx= 1e-6 (ndarray/* data-tmp 2) arr-grad))) - -(deftest test-sign - (let [data (sym/variable "data") - shape-vec [3 4] - data-tmp (ndarray/* (ndarray/ones shape-vec) 5) - arr-data (ndarray/copy data-tmp) - arr-grad (ndarray/* (ndarray/ones shape-vec) 3) - - test (sym/sign data) - exec-test (sym/bind test (context/default-context) [arr-data] [arr-grad])] - (is (test-util/approx= 1e-6 - (-> (ndarray/sign data-tmp) (ndarray/->vec)) - (-> exec-test (executor/forward) (executor/outputs) first (ndarray/->vec)))) - (executor/backward exec-test (ndarray/* (ndarray/ones shape-vec) 2)) - (is (approx= 1e-6 (ndarray/zeros shape-vec) arr-grad)))) - -(deftest test-round-ceil-floor - (let [data (sym/variable "data") - shape-vec [3 4] - data-tmp (ndarray/* (ndarray/ones shape-vec) 5.543) - arr-data (ndarray/copy data-tmp) - arr-grad (ndarray/* (ndarray/ones shape-vec) 2) - - test (-> (sym/round data) - (sym/+ (sym/ceil data)) - (sym/+ (sym/floor data))) - exec-test (sym/bind test (context/default-context) [arr-data])] - (is (approx= 1e-6 - (-> (ndarray/round data-tmp) - (ndarray/+ (ndarray/ceil data-tmp)) - (ndarray/+ (ndarray/floor data-tmp))) - (-> (executor/forward exec-test) (executor/outputs) (first)))))) - -(deftest test-rsqrt-cos-sin - (let [data (sym/variable "data") - shape-vec [3 4] - data-tmp (ndarray/* (ndarray/ones shape-vec) 5) - arr-data (ndarray/copy data-tmp) - arr-grad (ndarray/* (ndarray/ones shape-vec) 3) - - test (-> (sym/rsqrt data) - (sym/+ (sym/cos data)) - (sym/+ (sym/sin data))) - exec-test (sym/bind test (context/default-context) [arr-data])] - (is (approx= 1e-6 - (-> (ndarray/rsqrt data-tmp) - (ndarray/+ (ndarray/cos data-tmp)) - (ndarray/+ (ndarray/sin data-tmp))) - (-> (executor/forward exec-test) (executor/outputs) (first)))))) - -(deftest test-maximum - (let [data1 (sym/variable "data1") - data2 (sym/variable "data2") - shape-vec [3 4] - data-tmp1 (random/uniform 0 100 shape-vec) - data-tmp2 (random/uniform 0 100 shape-vec) - - arr-data1 (ndarray/copy data-tmp1) - arr-data2 (ndarray/copy data-tmp2) - - test (sym/max data1 data2) - exec-test (sym/bind test (context/default-context) [arr-data1 arr-data2]) - out (-> (executor/forward exec-test) (executor/outputs) (first))] - (is (approx= 1e-6 - (mapv max (ndarray/->vec data-tmp1) (ndarray/->vec data-tmp2)) - out)))) - -(deftest test-minimun - (let [data1 (sym/variable "data1") - data2 (sym/variable "data2") - shape-vec [3 4] - data-tmp1 (random/uniform 0 100 shape-vec) - data-tmp2 (random/uniform 0 100 shape-vec) - - arr-data1 (ndarray/copy data-tmp1) - arr-data2 (ndarray/copy data-tmp2) - - test (sym/min data1 data2) - exec-test (sym/bind test (context/default-context) [arr-data1 arr-data2]) - out (-> (executor/forward exec-test) (executor/outputs) (first))] - (is (approx= 1e-6 - (mapv min (ndarray/->vec data-tmp1) (ndarray/->vec data-tmp2)) - out)))) - -(deftest test-transpose - (let [data (sym/variable "data") - test (sym/transpose data) - shape-vec [3 4] - ctx (context/default-context) - arr-data (random/uniform 0 100 shape-vec {:ctx ctx}) - trans (ndarray/transpose (ndarray/copy arr-data)) - exec-test (sym/bind test ctx {"data" arr-data}) - out (-> (executor/forward exec-test) - (executor/outputs) - (first))] - (is (approx= 1e-6 trans out)) - (is (= [4 3] (mx-shape/->vec (ndarray/shape out)))))) - -(deftest test-smooth-l1-and-make-loss - (let [data (sym/variable "data") - smooth-l1 (sym/smooth-l1 {:data data :scalar 1.0}) - loss (sym/make-loss {:data smooth-l1}) - shape-vec [2 6] - ctx (context/default-context) - input (ndarray/array [-3.5 -2.5 -1.5 -0.5 -0.3 -0.1 - 0.1 0.3 0.5 1.5 2.5 3.5] shape-vec) - grad (ndarray/empty shape-vec) - arr-tmp [3.0 2.0 1.0 0.125 0.045 0.005 - 0.005 0.045 0.125 1.0 2.0 3.0] - grad-tmp [-1.0 -1.0 -1.0 -0.5 -0.3 -0.1 - 0.1 0.3 0.5 1.0 1.0 1.0] - exec-test (sym/bind loss ctx {:data input} {:data grad}) - out (-> (executor/forward exec-test) (executor/outputs) first)] - (is (approx= 1e-6 arr-tmp out)) - (executor/backward exec-test) - (is (approx= 1e-6 grad-tmp grad)))) - -(deftest test-maximum-minimum-scalar - (let [data (sym/variable "data") - shape-vec [3 4] - data-tmp (ndarray/* (ndarray/ones shape-vec) 2) - arr-data (ndarray/copy data-tmp) - test (-> (sym/max data 3) - (sym/+ (sym/max data 9)) - (sym/+ (sym/min data 5)) - (sym/+ (sym/min data 4))) - exec-test (sym/bind test (context/default-context) [arr-data])] - ;; 3 + 9 + 2 + 2 - (is (approx= 1e-6 (ndarray/* (ndarray/ones shape-vec) 16) (-> (executor/forward exec-test) (executor/outputs) (first)))))) - -(deftest test-abs - (let [data (sym/variable "data") - shape-vec [3 4] - data-tmp (ndarray/* (ndarray/ones shape-vec) 5) - arr-data (ndarray/copy data-tmp) - arr-grad (ndarray/* (ndarray/ones shape-vec) 3) - test (sym/abs data) - exec-test (sym/bind test (context/default-context) arr-data arr-grad)] - (is (approx= 1e-6 (ndarray/abs data-tmp) (-> (executor/forward exec-test) (executor/outputs) (first)))) - - (let [out-grad (ndarray/* (ndarray/ones shape-vec) 2) - npout-grad (ndarray/* out-grad (ndarray/sign data-tmp))] - (executor/backward exec-test out-grad) - (is (approx= 1e-6 npout-grad arr-grad)))));; configure A: input --> conv --> deconv --> output. - ;; the convolution and deconvoluiton has similar parameter which ensure - ;; the input shape is the same as output, and the same weights between conv - ;; and deconv; - ;; If the input value of forward() and backwrad() is the same, then -;; the output value of them should also the same; - -(defn check-deconvolution-forward-backward [{:keys [input-shape-vec num-filter kernel stride pad]}] - (let [data (sym/variable "data") - conv (sym/convolution "conv" {:data data :kernel kernel :stride stride - :pad pad :num-filter num-filter :no-bias "true"}) - deconv (sym/deconvolution "deconv" {:data conv :kernel kernel :stride stride - :pad pad :num-filter num-filter :no-bias "true"}) - arg-names (sym/list-arguments deconv) - arg-shape-vecs (first (sym/infer-shape deconv {:data input-shape-vec})) - input-data (random/uniform -5 5 input-shape-vec) - out-grad input-data - conv-weight (random/normal 0 1 [num-filter (second input-shape-vec) (first kernel) (last kernel)]) - args {:data input-data :conv-weight conv-weight :deconv-weight conv-weight} - args-grad (mapv #(ndarray/empty %) arg-shape-vecs) - exec (sym/bind deconv (context/default-context) args args-grad) - out (-> (executor/forward exec) (executor/outputs) first)] - (executor/backward exec out-grad) - (is (approx= 1e-3 (ndarray/->vec out) (ndarray/->vec (first args-grad)))))) - -(deftest test-deconvolution-forward-and-backward - (check-deconvolution-forward-backward {:input-shape-vec [1 1 5 5] :num-filter 1 :kernel [3 3] :stride [1 1] :pad [1 1]}) - (check-deconvolution-forward-backward {:input-shape-vec [32 3 28 28] :num-filter 3 :kernel [3 3] :stride [1 1] :pad [1 1]}) - ;; commented out to make the tests fast - #_(check-deconvolution-forward-backward {:input-shape-vec [10 3 403 403] :num-filter 3 :kernel [7 7] :stride [5 5] :pad [2 2]})) - -;; configure A: input --> conv --> output. -;; configure B: input --> deconv --> output -;; the convolution and deconvoluiton has similar parameter which ensure -;; the input shape is the same as output; -;; During backward(), if the input of A equals output of B, and the output -;; of A equals input of B, then the grad of weight should be the same; - -(defn check-deconvolution-gradient [{:keys [input-shape-vec num-filter pad]}] - (let [stride [1 1] - kernel [(inc (* 2 (first pad))) (inc (* 2 (second pad)))] - data-conv (sym/variable "data_conv") - conv (sym/convolution "conv" {:data data-conv :kernel kernel :stride stride - :pad pad :num-filter num-filter :no-bias "true"}) - data-deconv (sym/variable "data_deconv") - deconv (sym/deconvolution "deconv" {:data data-deconv :kernel kernel :stride stride - :pad pad :num-filter num-filter :no-bias true}) - conv-data (random/uniform -5 5 input-shape-vec) - conv-args {"data_conv" conv-data "conv_weight" (random/normal 0 1 [num-filter (second input-shape-vec) (first kernel) (second kernel)])} - conv-args-grad [(ndarray/zeros (-> conv-data (ndarray/shape) (ndarray/->vec))) - (ndarray/zeros [num-filter (second input-shape-vec) (first kernel) (second kernel)])] - exec-conv (sym/bind conv (context/default-context) conv-args conv-args-grad) - conv-out-grad (random/normal 0 2 (-> (executor/outputs exec-conv) (first) (ndarray/shape) (mx-shape/->vec)))] - (executor/forward exec-conv) - (executor/backward exec-conv conv-out-grad) - - (let [deconv-data conv-out-grad - deconv-args {"data_deconv" deconv-data "deconv_weight" (get conv-args "conv_weight")} - deconv-args-grad [(ndarray/zeros (-> deconv-data (ndarray/shape) (mx-shape/->vec))) - (ndarray/zeros [num-filter (second input-shape-vec) (first kernel) (second kernel)])] - exec-deconv (sym/bind deconv (context/default-context) deconv-args deconv-args-grad) - deconv-out-grad conv-data] - (executor/forward exec-deconv) - (executor/backward exec-deconv deconv-out-grad) - - (is (approx= 1e-4 (ndarray/->vec (second conv-args-grad)) (ndarray/->vec (second deconv-args-grad))))))) - -(deftest test-deconvolution-gradient - (check-deconvolution-gradient {:input-shape-vec [1 3 5 5] :num-filter 3 :pad [1 1]})) - -(defn check-nearest-up-sampling-with-shape [{:keys [shape-vecs scale root-scale]}] - (let [arr (zipmap (map #(str "arg_" %) (range 0 (count shape-vecs))) - (map #(random/uniform -10 10 %) shape-vecs)) - arr-grad (zipmap (map #(str "arg_" %) (range 0 (count shape-vecs))) - (map #(ndarray/zeros %) shape-vecs)) - up-args (mapv #(sym/variable (str "arg_" %)) (range 0 (count shape-vecs))) - up (sym/up-sampling "up-sampling" nil up-args {:sample-type "nearest" :scale root-scale}) - exec (sym/bind up (context/default-context) arr arr-grad)] - (executor/forward exec) - (executor/backward exec (executor/outputs exec)) - (doseq [k (range 0 (count shape-vecs))] - (let [k-name (str "arg_" k) - expected (->> (get arr k-name) (ndarray/->vec) (mapv #(* % (Math/pow root-scale 2) (Math/pow scale (* 2 k))))) - real (-> (get arr-grad k-name) (ndarray/->vec))] - (is (approx= 0.1 expected real)))))) - -(deftest test-nearest-upsampling - (doall (for [root-scale (range 1 4) - scale (range 1 4) - num-shape (range 1 4) - base (range 1 4)] - (let [shape-vecs (mapv (fn [i] [1 3 (* base root-scale (int (Math/pow scale (- (dec num-shape) i)))) - (* base root-scale (int (Math/pow scale (- (dec num-shape) i))))]) - (range 0 num-shape))] - (check-nearest-up-sampling-with-shape {:shape-vecs shape-vecs :scale scale :root-scale root-scale}))))) diff --git a/contrib/clojure-package/test/org/apache/clojure_mxnet/optimizer_test.clj b/contrib/clojure-package/test/org/apache/clojure_mxnet/optimizer_test.clj deleted file mode 100644 index f2413dc91101..000000000000 --- a/contrib/clojure-package/test/org/apache/clojure_mxnet/optimizer_test.clj +++ /dev/null @@ -1,56 +0,0 @@ -;; -;; Licensed to the Apache Software Foundation (ASF) under one or more -;; contributor license agreements. See the NOTICE file distributed with -;; this work for additional information regarding copyright ownership. -;; The ASF licenses this file to You under the Apache License, Version 2.0 -;; (the "License"); you may not use this file except in compliance with -;; the License. You may obtain a copy of the License at -;; -;; http://www.apache.org/licenses/LICENSE-2.0 -;; -;; Unless required by applicable law or agreed to in writing, software -;; distributed under the License is distributed on an "AS IS" BASIS, -;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -;; See the License for the specific language governing permissions and -;; limitations under the License. -;; - -(ns org.apache.clojure-mxnet.optimizer-test - (:require [org.apache.clojure-mxnet.module :as m] - [org.apache.clojure-mxnet.optimizer :as optimizer] - [org.apache.clojure-mxnet.symbol :as sym] - [clojure.test :refer :all])) - -(defn test-optimizer [[opt-name optimizer-fn]] - (println "Testing optimizer - " opt-name) - (let [s (sym/variable "data") - s (sym/fully-connected {:data s :num-hidden 100}) - ;; single device - mod (m/module s {:data-names ["data"] :label-names nil})] - (-> mod - (m/bind {:data-shapes [{:name "data" :shape [10 10] :layout "NT"}]}) - (m/init-params) - (m/init-optimizer {:optimizer (optimizer-fn)}) - (m/update)))) - -(deftest test-optimizer-update - (let [opts [["sgd" optimizer/sgd] - ["dcasgd" optimizer/dcasgd] - ["nag" optimizer/nag] - ["ada-delta" optimizer/ada-delta] - ["rms-prop" optimizer/rms-prop] - ["ada-grad" optimizer/ada-grad] - ["adam" optimizer/adam] - ["sgld" optimizer/sgld]]] - (doseq [opt opts] - (test-optimizer opt)))) - -(deftest test-optimizers-parameters-specs - (is (thrown? Exception (optimizer/sgd {:wd 'a}))) - (is (thrown? Exception (optimizer/dcasgd {:lambda 'a}))) - (is (thrown? Exception (optimizer/nag {:momentum 'a}))) - (is (thrown? Exception (optimizer/ada-delta {:epsilon 'a}))) - (is (thrown? Exception (optimizer/rms-prop {:rho 'a}))) - (is (thrown? Exception (optimizer/ada-grad {:rescale-gradient 'a}))) - (is (thrown? Exception (optimizer/adam {:beta1 'a}))) - (is (thrown? Exception (optimizer/sgld {:lr-scheduler 0.1})))) \ No newline at end of file diff --git a/contrib/clojure-package/test/org/apache/clojure_mxnet/primitives_test.clj b/contrib/clojure-package/test/org/apache/clojure_mxnet/primitives_test.clj deleted file mode 100644 index 1a538e537b8b..000000000000 --- a/contrib/clojure-package/test/org/apache/clojure_mxnet/primitives_test.clj +++ /dev/null @@ -1,45 +0,0 @@ -;; -;; Licensed to the Apache Software Foundation (ASF) under one or more -;; contributor license agreements. See the NOTICE file distributed with -;; this work for additional information regarding copyright ownership. -;; The ASF licenses this file to You under the Apache License, Version 2.0 -;; (the "License"); you may not use this file except in compliance with -;; the License. You may obtain a copy of the License at -;; -;; http://www.apache.org/licenses/LICENSE-2.0 -;; -;; Unless required by applicable law or agreed to in writing, software -;; distributed under the License is distributed on an "AS IS" BASIS, -;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -;; See the License for the specific language governing permissions and -;; limitations under the License. -;; - -(ns org.apache.clojure-mxnet.primitives-test - (:require [org.apache.clojure-mxnet.primitives :as primitives] - [clojure.test :refer :all]) - (:import (org.apache.mxnet MX_PRIMITIVES$MX_PRIMITIVE_TYPE - MX_PRIMITIVES$MX_FLOAT - MX_PRIMITIVES$MX_Double))) - -(deftest test-primitive-types - (is (not (primitives/primitive? 3))) - (is (primitives/primitive? (primitives/mx-float 3))) - (is (primitives/primitive? (primitives/mx-double 3)))) - -(deftest test-float-primitives - (is (instance? MX_PRIMITIVES$MX_PRIMITIVE_TYPE (primitives/mx-float 3))) - (is (instance? MX_PRIMITIVES$MX_FLOAT (primitives/mx-float 3))) - (is (instance? Float (-> (primitives/mx-float 3) - (primitives/->num)))) - (is (= 3.0 (-> (primitives/mx-float 3) - (primitives/->num))))) - -(deftest test-double-primitives - (is (instance? MX_PRIMITIVES$MX_PRIMITIVE_TYPE (primitives/mx-double 2))) - (is (instance? MX_PRIMITIVES$MX_Double (primitives/mx-double 2))) - (is (instance? Double (-> (primitives/mx-double 2) - (primitives/->num)))) - (is (= 2.0 (-> (primitives/mx-double 2) - (primitives/->num))))) - diff --git a/contrib/clojure-package/test/org/apache/clojure_mxnet/profiler_test.clj b/contrib/clojure-package/test/org/apache/clojure_mxnet/profiler_test.clj deleted file mode 100644 index f4b74343fa1d..000000000000 --- a/contrib/clojure-package/test/org/apache/clojure_mxnet/profiler_test.clj +++ /dev/null @@ -1,31 +0,0 @@ -;; -;; Licensed to the Apache Software Foundation (ASF) under one or more -;; contributor license agreements. See the NOTICE file distributed with -;; this work for additional information regarding copyright ownership. -;; The ASF licenses this file to You under the Apache License, Version 2.0 -;; (the "License"); you may not use this file except in compliance with -;; the License. You may obtain a copy of the License at -;; -;; http://www.apache.org/licenses/LICENSE-2.0 -;; -;; Unless required by applicable law or agreed to in writing, software -;; distributed under the License is distributed on an "AS IS" BASIS, -;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -;; See the License for the specific language governing permissions and -;; limitations under the License. -;; - -(ns org.apache.clojure-mxnet.profiler-test - (:require [org.apache.clojure-mxnet.profiler :as profiler] - [clojure.test :refer :all])) - -;; Just excercising the interop - -(deftest test-profiler - (do - (profiler/profiler-set-config {:filename "test-profile.json" - :profile-symbolic 1}) - (profiler/profiler-set-state "run") - (profiler/profiler-set-state "stop") - (profiler/profiler-set-state) - (profiler/dump-profile 0))) diff --git a/contrib/clojure-package/test/org/apache/clojure_mxnet/random_test.clj b/contrib/clojure-package/test/org/apache/clojure_mxnet/random_test.clj deleted file mode 100644 index ca1dcc9430dc..000000000000 --- a/contrib/clojure-package/test/org/apache/clojure_mxnet/random_test.clj +++ /dev/null @@ -1,69 +0,0 @@ -;; -;; Licensed to the Apache Software Foundation (ASF) under one or more -;; contributor license agreements. See the NOTICE file distributed with -;; this work for additional information regarding copyright ownership. -;; The ASF licenses this file to You under the Apache License, Version 2.0 -;; (the "License"); you may not use this file except in compliance with -;; the License. You may obtain a copy of the License at -;; -;; http://www.apache.org/licenses/LICENSE-2.0 -;; -;; Unless required by applicable law or agreed to in writing, software -;; distributed under the License is distributed on an "AS IS" BASIS, -;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -;; See the License for the specific language governing permissions and -;; limitations under the License. -;; - -(ns org.apache.clojure-mxnet.random-test - (:require [org.apache.clojure-mxnet.context :as context] - [org.apache.clojure-mxnet.ndarray :as ndarray] - [org.apache.clojure-mxnet.random :as random] - [clojure.test :refer :all])) - -(deftest test-uniform-on-cpu - (let [ctx (context/default-context)] - (let [[a b] [-10 10] - shape [100 100] - _ (random/seed 128) - un1 (random/uniform a b shape {:ctx ctx}) - _ (random/seed 128) - un2 (random/uniform a b shape {:ctx ctx})] - (is (= un1 un2)) - (is (< (Math/abs - (/ (/ (apply + (ndarray/->vec un1)) - (- (ndarray/size un1) (+ a b))) - 2.0)) - 0.1))))) - -(deftest test-normal-on-cpu - (let [[mu sigma] [10 2] - shape [100 100] - _ (random/seed 128) - ret1 (random/normal mu sigma shape) - _ (random/seed 128) - ret2 (random/normal mu sigma shape)] - (is (= ret1 ret2)) - - (let [array (ndarray/->vec ret1) - mean (/ (apply + array) (count array)) - devs (map #(* (- % mean) (- % mean)) array) - stddev (Math/sqrt (/ (apply + devs) (count array)))] - (is (< (Math/abs (- mean mu)) 0.1)) - (is (< (Math/abs (- stddev sigma)) 0.1))))) - -(defn random-or-normal [fn_] - (is (thrown? Exception (fn_ 'a 2 []))) - (is (thrown? Exception (fn_ 1 'b []))) - (is (thrown? Exception (fn_ 1 2 [-1]))) - (is (thrown? Exception (fn_ 1 0 [1 2]))) - (is (thrown? Exception (fn_ 1 -1 [1 2]))) - (is (thrown? Exception (fn_ 1 2 [2 3 0]))) - (is (thrown? Exception (fn_ 1 2 [10 10] {:ctx "a"}))) - (let [ctx (context/default-context)] - (is (not (nil? (fn_ 1 1 [100 100] {:ctx ctx})))))) - -(deftest test-random-parameters-specs - (random-or-normal random/normal) - (random-or-normal random/uniform) - (is (thrown? Exception (random/seed "a")))) diff --git a/contrib/clojure-package/test/org/apache/clojure_mxnet/resource_scope_test.clj b/contrib/clojure-package/test/org/apache/clojure_mxnet/resource_scope_test.clj deleted file mode 100644 index 77df03402629..000000000000 --- a/contrib/clojure-package/test/org/apache/clojure_mxnet/resource_scope_test.clj +++ /dev/null @@ -1,146 +0,0 @@ -;; -;; Licensed to the Apache Software Foundation (ASF) under one or more -;; contributor license agreements. See the NOTICE file distributed with -;; this work for additional information regarding copyright ownership. -;; The ASF licenses this file to You under the Apache License, Version 2.0 -;; (the "License"); you may not use this file except in compliance with -;; the License. You may obtain a copy of the License at -;; -;; http://www.apache.org/licenses/LICENSE-2.0 -;; -;; Unless required by applicable law or agreed to in writing, software -;; distributed under the License is distributed on an "AS IS" BASIS, -;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -;; See the License for the specific language governing permissions and -;; limitations under the License. -;; - -(ns org.apache.clojure-mxnet.resource-scope-test - (:require [org.apache.clojure-mxnet.ndarray :as ndarray] - [org.apache.clojure-mxnet.symbol :as sym] - [org.apache.clojure-mxnet.resource-scope :as resource-scope] - [clojure.test :refer :all])) - - -(deftest test-resource-scope-with-ndarray - (let [native-resources (atom {}) - x (ndarray/ones [2 2]) - return-val (resource-scope/using - (let [temp-x (ndarray/ones [3 1]) - temp-y (ndarray/ones [3 1])] - (swap! native-resources assoc :temp-x temp-x) - (swap! native-resources assoc :temp-y temp-y) - (ndarray/+ temp-x 1)))] - (is (true? (ndarray/is-disposed (:temp-x @native-resources)))) - (is (true? (ndarray/is-disposed (:temp-y @native-resources)))) - (is (false? (ndarray/is-disposed return-val))) - (is (false? (ndarray/is-disposed x))) - (is (= [2.0 2.0 2.0] (ndarray/->vec return-val))))) - -(deftest test-nested-resource-scope-with-ndarray - (let [native-resources (atom {}) - x (ndarray/ones [2 2]) - return-val (resource-scope/using - (let [temp-x (ndarray/ones [3 1])] - (swap! native-resources assoc :temp-x temp-x) - (resource-scope/using - (let [temp-y (ndarray/ones [3 1])] - (swap! native-resources assoc :temp-y temp-y)))))] - (is (true? (ndarray/is-disposed (:temp-y @native-resources)))) - (is (true? (ndarray/is-disposed (:temp-x @native-resources)))) - (is (false? (ndarray/is-disposed x))))) - -(deftest test-resource-scope-with-sym - (let [native-resources (atom {}) - x (sym/ones [2 2]) - return-val (resource-scope/using - (let [temp-x (sym/ones [3 1]) - temp-y (sym/ones [3 1])] - (swap! native-resources assoc :temp-x temp-x) - (swap! native-resources assoc :temp-y temp-y) - (sym/+ temp-x 1)))] - (is (true? (sym/is-disposed (:temp-x @native-resources)))) - (is (true? (sym/is-disposed (:temp-y @native-resources)))) - (is (false? (sym/is-disposed return-val))) - (is (false? (sym/is-disposed x))))) - -(deftest test-nested-resource-scope-with-ndarray - (let [native-resources (atom {}) - x (ndarray/ones [2 2]) - return-val (resource-scope/using - (let [temp-x (ndarray/ones [3 1])] - (swap! native-resources assoc :temp-x temp-x) - (resource-scope/using - (let [temp-y (ndarray/ones [3 1])] - (swap! native-resources assoc :temp-y temp-y)))))] - (is (true? (ndarray/is-disposed (:temp-y @native-resources)))) - (is (true? (ndarray/is-disposed (:temp-x @native-resources)))) - (is (false? (ndarray/is-disposed x))))) - -(deftest test-nested-resource-scope-with-sym - (let [native-resources (atom {}) - x (sym/ones [2 2]) - return-val (resource-scope/using - (let [temp-x (sym/ones [3 1])] - (swap! native-resources assoc :temp-x temp-x) - (resource-scope/using - (let [temp-y (sym/ones [3 1])] - (swap! native-resources assoc :temp-y temp-y)))))] - (is (true? (sym/is-disposed (:temp-y @native-resources)))) - (is (true? (sym/is-disposed (:temp-x @native-resources)))) - (is (false? (sym/is-disposed x))))) - -(deftest test-list-creation-with-returning-first - (let [native-resources (atom []) - return-val (resource-scope/using - (let [temp-ndarrays (doall (repeatedly 3 #(ndarray/ones [3 1]))) - _ (reset! native-resources temp-ndarrays)] - (first temp-ndarrays)))] - (is (false? (ndarray/is-disposed return-val))) - (is (= [false true true] (mapv ndarray/is-disposed @native-resources))))) - -(deftest test-list-creation - (let [native-resources (atom []) - return-val (resource-scope/using - (let [temp-ndarrays (doall (repeatedly 3 #(ndarray/ones [3 1]))) - _ (reset! native-resources temp-ndarrays)] - (ndarray/ones [3 1])))] - (is (false? (ndarray/is-disposed return-val))) - (is (= [true true true] (mapv ndarray/is-disposed @native-resources))))) - -(deftest test-list-creation-without-let - (let [native-resources (atom []) - return-val (resource-scope/using - (first (doall (repeatedly 3 #(do - (let [x (ndarray/ones [3 1])] - (swap! native-resources conj x) - x))))))] - (is (false? (ndarray/is-disposed return-val))) - (is (= [false true true] (mapv ndarray/is-disposed @native-resources))))) - -(deftest test-with-let - (let [native-resources (atom {}) - x (ndarray/ones [2 2]) - return-val (resource-scope/with-let [temp-x (ndarray/ones [3 1]) - temp-y (ndarray/ones [3 1])] - (swap! native-resources assoc :temp-x temp-x) - (swap! native-resources assoc :temp-y temp-y) - (ndarray/+ temp-x 1))] - (is (true? (ndarray/is-disposed (:temp-x @native-resources)))) - (is (true? (ndarray/is-disposed (:temp-y @native-resources)))) - (is (false? (ndarray/is-disposed return-val))) - (is (false? (ndarray/is-disposed x))) - (is (= [2.0 2.0 2.0] (ndarray/->vec return-val))))) - -(deftest test-with-do - (let [native-resources (atom {}) - x (ndarray/ones [2 2]) - return-val (resource-scope/with-do - (swap! native-resources assoc :temp-x (ndarray/ones [3 1])) - (swap! native-resources assoc :temp-y (ndarray/ones [3 1])) - (ndarray/ones [3 1]))] - (is (true? (ndarray/is-disposed (:temp-x @native-resources)))) - (is (true? (ndarray/is-disposed (:temp-y @native-resources)))) - (is (false? (ndarray/is-disposed return-val))) - (is (false? (ndarray/is-disposed x))) - (is (= [1.0 1.0 1.0] (ndarray/->vec return-val))))) diff --git a/contrib/clojure-package/test/org/apache/clojure_mxnet/shape_test.clj b/contrib/clojure-package/test/org/apache/clojure_mxnet/shape_test.clj deleted file mode 100644 index 5828da96411d..000000000000 --- a/contrib/clojure-package/test/org/apache/clojure_mxnet/shape_test.clj +++ /dev/null @@ -1,28 +0,0 @@ -;; -;; Licensed to the Apache Software Foundation (ASF) under one or more -;; contributor license agreements. See the NOTICE file distributed with -;; this work for additional information regarding copyright ownership. -;; The ASF licenses this file to You under the Apache License, Version 2.0 -;; (the "License"); you may not use this file except in compliance with -;; the License. You may obtain a copy of the License at -;; -;; http://www.apache.org/licenses/LICENSE-2.0 -;; -;; Unless required by applicable law or agreed to in writing, software -;; distributed under the License is distributed on an "AS IS" BASIS, -;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -;; See the License for the specific language governing permissions and -;; limitations under the License. -;; - -(ns org.apache.clojure-mxnet.shape-test - (:require [org.apache.clojure-mxnet.shape :as mx-shape] - [clojure.test :refer :all])) - -(deftest test-to-string - (let [s (mx-shape/->shape [1 2 3])] - (is (= "(1,2,3)" (str s))))) - -(deftest test-equals - (is (= (mx-shape/->shape [1 2 3]) (mx-shape/->shape [1 2 3]))) - (is (not= (mx-shape/->shape [1 2]) (mx-shape/->shape [1 2 3])))) diff --git a/contrib/clojure-package/test/org/apache/clojure_mxnet/symbol_api_test.clj b/contrib/clojure-package/test/org/apache/clojure_mxnet/symbol_api_test.clj deleted file mode 100644 index 03bf7c31b30c..000000000000 --- a/contrib/clojure-package/test/org/apache/clojure_mxnet/symbol_api_test.clj +++ /dev/null @@ -1,51 +0,0 @@ -;; -;; Licensed to the Apache Software Foundation (ASF) under one or more -;; contributor license agreements. See the NOTICE file distributed with -;; this work for additional information regarding copyright ownership. -;; The ASF licenses this file to You under the Apache License, Version 2.0 -;; (the "License"); you may not use this file except in compliance with -;; the License. You may obtain a copy of the License at -;; -;; http://www.apache.org/licenses/LICENSE-2.0 -;; -;; Unless required by applicable law or agreed to in writing, software -;; distributed under the License is distributed on an "AS IS" BASIS, -;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -;; See the License for the specific language governing permissions and -;; limitations under the License. -;; - -(ns org.apache.clojure-mxnet.symbol-api-test - (:require [org.apache.clojure-mxnet.dtype :as dtype] - [org.apache.clojure-mxnet.executor :as executor] - [org.apache.clojure-mxnet.ndarray :as ndarray] - [org.apache.clojure-mxnet.symbol :as sym] - [org.apache.clojure-mxnet.symbol-api :as sym-api] - [org.apache.clojure-mxnet.util :as util] - [clojure.test :refer :all] - [org.apache.clojure-mxnet.context :as context])) - -(deftest test-compose - (let [data (sym/variable "data") - net1 (sym-api/fully-connected {:data data :num-hidden 10 :name "fc1"}) - net1 (sym-api/fully-connected {:data net1 :num-hidden 100 :name "fc2"} ) - - net2 (sym-api/fully-connected {:num-hidden 10 :name "fc3"}) - net2 (sym-api/activation {:data net2 :act-type "relu"}) - net2 (sym-api/fully-connected {:data net2 :num-hidden 20 :name "fc4"}) - - composed (sym/apply net2 "composed" {"fc3_data" net1}) - - multi-out (sym/group [composed net1])] - - (is (= ["data" "fc1_weight" "fc1_bias" "fc2_weight" "fc2_bias"] (sym/list-arguments net1))) - (is (= 2 (count (sym/list-outputs multi-out)))))) - -(deftest test-symbol-internal - (let [data (sym/variable "data") - oldfc (sym-api/fully-connected {:data data :num-hidden 10 :name"fc1"}) - net1 (sym-api/fully-connected {:data oldfc :num-hidden 100 :name"fc2"})] - (is (= ["data" "fc1_weight" "fc1_bias" "fc2_weight" "fc2_bias"] (sym/list-arguments net1))) - (= (sym/list-arguments oldfc) (-> (sym/get-internals net1) - (sym/get "fc1_output") - (sym/list-arguments))))) diff --git a/contrib/clojure-package/test/org/apache/clojure_mxnet/symbol_test.clj b/contrib/clojure-package/test/org/apache/clojure_mxnet/symbol_test.clj deleted file mode 100644 index 5308b883aa3c..000000000000 --- a/contrib/clojure-package/test/org/apache/clojure_mxnet/symbol_test.clj +++ /dev/null @@ -1,83 +0,0 @@ -;; -;; Licensed to the Apache Software Foundation (ASF) under one or more -;; contributor license agreements. See the NOTICE file distributed with -;; this work for additional information regarding copyright ownership. -;; The ASF licenses this file to You under the Apache License, Version 2.0 -;; (the "License"); you may not use this file except in compliance with -;; the License. You may obtain a copy of the License at -;; -;; http://www.apache.org/licenses/LICENSE-2.0 -;; -;; Unless required by applicable law or agreed to in writing, software -;; distributed under the License is distributed on an "AS IS" BASIS, -;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -;; See the License for the specific language governing permissions and -;; limitations under the License. -;; - -(ns org.apache.clojure-mxnet.symbol-test - (:require [org.apache.clojure-mxnet.dtype :as dtype] - [org.apache.clojure-mxnet.executor :as executor] - [org.apache.clojure-mxnet.ndarray :as ndarray] - [org.apache.clojure-mxnet.symbol :as sym] - [org.apache.clojure-mxnet.util :as util] - [clojure.test :refer :all] - [org.apache.clojure-mxnet.context :as context])) - -(deftest test-compose - (let [data (sym/variable "data") - net1 (sym/fully-connected "fc1" {:data data :num-hidden 10}) - net1 (sym/fully-connected "fc2" {:data net1 :num-hidden 100}) - - net2 (sym/fully-connected "fc3" {:num-hidden 10}) - net2 (sym/activation {:data net2 :act-type "relu"}) - net2 (sym/fully-connected "fc4" {:data net2 :num-hidden 20}) - - composed (sym/apply net2 "composed" {"fc3_data" net1}) - - multi-out (sym/group [composed net1])] - - (is (= ["data" "fc1_weight" "fc1_bias" "fc2_weight" "fc2_bias"] (sym/list-arguments net1))) - (println (sym/debug-str composed)) - (is (= 2 (count (sym/list-outputs multi-out)))))) - -(deftest test-symbol-internal - (let [data (sym/variable "data") - oldfc (sym/fully-connected "fc1" {:data data :num-hidden 10}) - net1 (sym/fully-connected "fc2" {:data oldfc :num-hidden 100})] - (is (= ["data" "fc1_weight" "fc1_bias" "fc2_weight" "fc2_bias"] (sym/list-arguments net1))) - (= (sym/list-arguments oldfc) (-> (sym/get-internals net1) - (sym/get "fc1_output") - (sym/list-arguments))))) - -(deftest test-copy - (let [data (sym/variable "data") - data2 (sym/clone data)] - (is (= (sym/to-json data) (sym/to-json data2))))) - -(deftest test-basic-bind - (let [a (sym/variable "a") - b (sym/variable "b") - c (sym/+ a b) - ex (sym/bind c {"a" (ndarray/ones [2 2]) "b" (ndarray/ones [2 2])})] - (is (= [2.0 2.0 2.0 2.0] (-> (executor/forward ex) - (executor/outputs) - (first) - (ndarray/->vec)))))) -(deftest test-simple-bind - (let [a (sym/ones [3]) - b (sym/ones [3]) - c (sym/+ a b) - ex (sym/simple-bind c (context/default-context))] - (is (= [2.0 2.0 2.0] (-> (executor/forward ex) - (executor/outputs) - (first) - (ndarray/->vec)))))) - -(deftest test-infer-shape - (let [a (sym/variable "a") - b (sym/variable "b") - c (sym/+ a b) - [arg-shapes out-shapes] (sym/infer-shape c {"a" [2 2] "b" [2 2]})] - (is (= [[2 2] [2 2]] arg-shapes)) - (is (= [[2 2]] out-shapes)))) diff --git a/contrib/clojure-package/test/org/apache/clojure_mxnet/test_util.clj b/contrib/clojure-package/test/org/apache/clojure_mxnet/test_util.clj deleted file mode 100644 index d632c969eae9..000000000000 --- a/contrib/clojure-package/test/org/apache/clojure_mxnet/test_util.clj +++ /dev/null @@ -1,29 +0,0 @@ -;; -;; Licensed to the Apache Software Foundation (ASF) under one or more -;; contributor license agreements. See the NOTICE file distributed with -;; this work for additional information regarding copyright ownership. -;; The ASF licenses this file to You under the Apache License, Version 2.0 -;; (the "License"); you may not use this file except in compliance with -;; the License. You may obtain a copy of the License at -;; -;; http://www.apache.org/licenses/LICENSE-2.0 -;; -;; Unless required by applicable law or agreed to in writing, software -;; distributed under the License is distributed on an "AS IS" BASIS, -;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -;; See the License for the specific language governing permissions and -;; limitations under the License. -;; - -(ns org.apache.clojure-mxnet.test-util - (:require [clojure.test :as t])) - -(defn approx= [tolerance x y] - (if (and (number? x) (number? y)) - (let [diff (Math/abs (- x y))] - (< diff tolerance)) - (and - (= (count x) (count y)) - (reduce (fn [x y] (and x y)) - (map #(approx= tolerance %1 %2) x y))))) - diff --git a/contrib/clojure-package/test/org/apache/clojure_mxnet/util_test.clj b/contrib/clojure-package/test/org/apache/clojure_mxnet/util_test.clj deleted file mode 100644 index 6652b68a4830..000000000000 --- a/contrib/clojure-package/test/org/apache/clojure_mxnet/util_test.clj +++ /dev/null @@ -1,262 +0,0 @@ -;; -;; Licensed to the Apache Software Foundation (ASF) under one or more -;; contributor license agreements. See the NOTICE file distributed with -;; this work for additional information regarding copyright ownership. -;; The ASF licenses this file to You under the Apache License, Version 2.0 -;; (the "License"); you may not use this file except in compliance with -;; the License. You may obtain a copy of the License at -;; -;; http://www.apache.org/licenses/LICENSE-2.0 -;; -;; Unless required by applicable law or agreed to in writing, software -;; distributed under the License is distributed on an "AS IS" BASIS, -;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -;; See the License for the specific language governing permissions and -;; limitations under the License. -;; - -(ns org.apache.clojure-mxnet.util-test - (:require [clojure.test :refer :all] - [org.apache.clojure-mxnet.shape :as mx-shape] - [org.apache.clojure-mxnet.util :as util] - [org.apache.clojure-mxnet.ndarray :as ndarray] - [org.apache.clojure-mxnet.primitives :as primitives] - [org.apache.clojure-mxnet.symbol :as sym] - [org.apache.clojure-mxnet.test-util :as test-util] - [clojure.spec.alpha :as s]) - (:import (org.apache.mxnet Shape NDArrayFuncReturn NDArray) - (scala.collection Map Set) - (scala.collection.mutable ArrayBuffer) - (scala.collection.immutable List IndexedSeq ListMap Vector) - (scala Option Tuple1 Tuple2 Tuple3))) - -(deftest test-empty-list - (let [x (util/empty-list)] - (is (instance? List x)) - (is (true? (.isEmpty x))))) - -(deftest test-empty-map - (let [x (util/empty-map)] - (is (instance? Map x)) - (is (true? (.isEmpty x))))) - -(deftest test-indexed-seq - (let [x (util/empty-indexed-seq)] - (is (instance? IndexedSeq x)) - (is (true? (.isEmpty x))))) - -(deftest test-empty-list-map - (let [x (util/empty-list-map)] - (is (instance? ListMap x)) - (is (true? (.isEmpty x))))) - -(deftest test->option - (let [x (util/->option 1)] - (is (instance? Option x)) - (is (= 1 (.get x))))) - -(deftest test->int-option - (let [x (util/->int-option 4.5)] - (is (instance? Option x)) - (is (= 4 (.get x))))) - -(deftest test-empty->int-option - (let [x (util/->int-option nil)] - (is (instance? Option x)) - (is (.isEmpty x)))) - -(deftest test-option->value - (is (= 2 (-> (util/->option 2) - (util/option->value))))) - -(deftest test-keyword->snake-case - (is (= ["foo_bar" "foo2" "bar_bar"] - (mapv util/keyword->snake-case [:foo_bar :foo2 :bar-bar])))) - -(deftest test-convert-tuple - (is (instance? Tuple1 (util/convert-tuple [1]))) - (is (instance? Tuple2 (util/convert-tuple [1 2]))) - (is (instance? Tuple3 (util/convert-tuple [1 2 3])))) - -(deftest test-convert-by-shape - (let [x (util/convert-by-shape {:a [100] :b "hi"})] - (is (instance? Shape (:a x))) - (is (= "hi" (:b x))))) - -(deftest tuple-convert-by-param-name - (let [x (util/tuple-convert-by-param-name {:foo [100] :kernel [3 3] :bar "hi"})] - (is (= "(3,3)" (:kernel x))) - (is (= [100] (:foo x))) - (is (= "hi" (:bar x))))) - -(deftest test-io-convert-by-param-name - (let [x (util/io-convert-by-param-name {:input-shape [10 10] :freeze? true :foo 1})] - (is (= "(10,10)" (:input-shape x))) - (is (= "True" (:freeze? x))) - (is (= "1" (:foo x))))) - -(deftest test-convert-map - (let [x (util/convert-map {:a [10] :b 1 :foo-bar 2})] - (is (instance? Map x)) - (is (= "Set(a, b, foo_bar)" (-> x (.keys) str))))) - -(deftest test-convert-vector - (let [x (util/convert-vector [1 2 3])] - (is (instance? List x)) - (is (= "List(1, 2, 3)" (str x))))) - -(deftest test-vec->set - (let [x (util/vec->set [1 2 3])] - (is (instance? Set x)) - (is (= "Set(1, 2, 3)" (str x))))) - -(deftest test-vec->indexed-seq - (let [x (util/vec->indexed-seq [1 2 3])] - (is (instance? Vector x)) - (is (= "Vector(1, 2, 3)" (str x))))) - -(deftest test-scala-function - (let [s-fn (util/scala-fn (fn [x] (+ x 2)))] - (is (= 4 (util/apply-scala-fn s-fn 2))))) - -(deftest test-coerce-param - (is (instance? Map (util/coerce-param {:x 1} #{"scala.collection.immutable.Map"}))) - (is (map? (util/coerce-param {:x 1} #{"float"}))) - - (is (float? (util/coerce-param 1 #{"float"}))) - - (is (instance? List (util/coerce-param (ndarray/ones [3]) #{"scala.collection.Seq"}))) - (is (instance? List (util/coerce-param (sym/variable "a") #{"scala.collection.Seq"}))) - (is (instance? List (util/coerce-param [1 2] #{"scala.collection.Seq"}))) - (is (instance? List (util/coerce-param [] #{"scala.collection.Seq"}))) - - (is (= "[I" (->> (util/coerce-param [1 2] #{"int<>"}) str (take 2) (apply str)))) - (is (= "[F" (->> (util/coerce-param [1 2] #{"float<>"}) str (take 2) (apply str)))) - (is (= "[L" (->> (util/coerce-param [1 2] #{"java.lang.String<>"}) str (take 2) (apply str)))) - - (is (primitives/primitive? (util/coerce-param 1.0 #{"org.apache.mxnet.MX_PRIMITIVES$MX_PRIMITIVE_TYPE"}))) - (is (primitives/primitive? (util/coerce-param (float 1.0) #{"org.apache.mxnet.MX_PRIMITIVES$MX_PRIMITIVE_TYPE"}))) - - (is (= 1 (util/coerce-param 1 #{"unknown"})))) - -(deftest test-nil-or-coerce-param - (is (instance? Map (util/nil-or-coerce-param {:x 1} #{"scala.collection.immutable.Map"}))) - (is (nil? (util/coerce-param nil #{"scala.collection.immutable.Map"})))) - -(deftest test-scala-map->map - (is (= {"a" 1} (-> (util/convert-map {:a 1}) - (util/scala-map->map))))) - -(deftest test-buffer->vec - (is (= [] (util/buffer->vec (ArrayBuffer.))))) - -(deftest test-scala-vector->vec - (is (= [1 2 3] (util/scala-vector->vec - (util/vec->indexed-seq [1 2 3]))))) - -(deftest test-scala-iterator->seq - (is (= [1 2 3] (-> (util/vec->indexed-seq [1 2 3]) - (.iterator) - (util/scala-iterator->seq))))) - -(deftest test-tuple->vec - (is (= [1 2] (-> (util/convert-tuple [1 2]) - (util/tuple->vec))))) - -(deftest test-to-array-nd - (let [a1 (util/to-array-nd '(1)) - a2 (util/to-array-nd [1.0 2.0]) - a3 (util/to-array-nd [[3.0] [4.0]]) - a4 (util/to-array-nd [[[5 -5]]])] - (is (= 1 (alength a1))) - (is (= [1] (->> a1 vec))) - (is (= 2 (alength a2))) - (is (= 2.0 (aget a2 1))) - (is (= [1.0 2.0] (->> a2 vec))) - (is (= 2 (alength a3))) - (is (= 1 (alength (aget a3 0)))) - (is (= 4.0 (aget a3 1 0))) - (is (= [[3.0] [4.0]] (->> a3 vec (mapv vec)))) - (is (= 1 (alength a4))) - (is (= 1 (alength (aget a4 0)))) - (is (= 2 (alength (aget a4 0 0)))) - (is (= 5 (aget a4 0 0 0))) - (is (= [[[5 -5]]] (->> a4 vec (mapv vec) (mapv #(mapv vec %))))))) - -(deftest test-nd-seq-shape - (is (= [1] (util/nd-seq-shape '(5)))) - (is (= [2] (util/nd-seq-shape [1.0 2.0]))) - (is (= [3] (util/nd-seq-shape [1 1 1]))) - (is (= [2 1] (util/nd-seq-shape [[3.0] [4.0]]))) - (is (= [1 3 2] (util/nd-seq-shape [[[5 -5] [5 -5] [5 -5]]])))) - -(deftest test-coerce-return - (is (= [] (util/coerce-return (ArrayBuffer.)))) - (is (= [1 2 3] (util/coerce-return (util/vec->indexed-seq [1 2 3])))) - (is (instance? NDArray - (util/coerce-return - (new NDArrayFuncReturn (into-array [(ndarray/zeros [3])]))))) - (is (= {"x" 1} (util/coerce-return - (util/convert-map {:x 1})))) - (is (= [1 2] (util/coerce-return - (util/convert-tuple [1 2])))) - (is (= [1 2 3] (util/coerce-return - (util/convert-tuple [1 2 3])))) - - (is (instance? Double (util/coerce-return (primitives/mx-double 3)))) - (is (= 3.0 (util/coerce-return (primitives/mx-double 3)))) - (is (instance? Float (util/coerce-return (primitives/mx-float 2)))) - (is (= 2.0 (util/coerce-return (primitives/mx-float 2)))) - - (is (= "foo" (util/coerce-return "foo")))) - -(deftest test-translate-keyword-shape - (let [[name shape] (util/translate-keyword-shape [:foo-a [5]])] - (is (= name "foo_a")) - (is (instance? Shape shape)) - (is (= "(5)" (str shape))))) - -(deftest test-map->tuple - (let [x (util/map->tuple {:foo-a [5]})] - (is (instance? Tuple2 (first x))) - (is (= "(foo_a,(5))" (str (first x)))))) - -(deftest test-list-map - (let [x (util/list-map {:x 1 :y 2})] - (is (instance? ListMap x)) - (is (= "Map(x -> 1, y -> 2)" (str x))))) - -(s/def ::x string?) - -(deftest test-validate - (is (nil? (util/validate! string? "foo" "Not a string!"))) - (is (thrown-with-msg? Exception #"Not a string!" (util/validate! ::x 1 "Not a string!")))) - -(deftest test-approx= - (let [data1 [1 1 1 1] - data2 [1 1 1 1 9 9 9 9] - data3 [1 1 1 2]] - (is (not (test-util/approx= 1e-9 data1 data2))) - (is (test-util/approx= 2 data1 data3)))) - -(deftest test-map->scala-tuple-seq - ;; convert as much, and pass-through the rest - (is (nil? (util/map->scala-tuple-seq nil))) - (is (= "List()" - (str (util/map->scala-tuple-seq {})) - (str (util/map->scala-tuple-seq [])) - (str (util/map->scala-tuple-seq '())))) - (is (= "List(a, b)" (str (util/map->scala-tuple-seq ["a" "b"])))) - (is (= "List((a,b), (c,d), (e,f), (a_b,g), (c_d,h), (e_f,i))" - (str (util/map->scala-tuple-seq {:a "b", 'c "d", "e" "f" - :a-b "g", 'c-d "h", "e-f" "i"})))) - (let [nda (util/map->scala-tuple-seq {:a-b (ndarray/ones [1 2])})] - (is (= "a_b" (._1 (.head nda)))) - (is (= [1.0 1.0] (ndarray/->vec (._2 (.head nda))))))) - -(deftest test-forms->scala-fn - (let [scala-fn (util/forms->scala-fn - (def x 1) - (def y 2) - {:x x :y y})] - (is (= {:x 1 :y 2} (.apply scala-fn))))) diff --git a/contrib/clojure-package/test/test_helper.clj b/contrib/clojure-package/test/test_helper.clj deleted file mode 100644 index 1dda7871ad69..000000000000 --- a/contrib/clojure-package/test/test_helper.clj +++ /dev/null @@ -1,26 +0,0 @@ -;; Licensed to the Apache Software Foundation (ASF) under one or more -;; contributor license agreements. See the NOTICE file distributed with -;; this work for additional information regarding copyright ownership. -;; The ASF licenses this file to You under the Apache License, Version 2.0 -;; (the "License"); you may not use this file except in compliance with -;; the License. You may obtain a copy of the License at -;; -;; http://www.apache.org/licenses/LICENSE-2.0 -;; -;; Unless required by applicable law or agreed to in writing, software -;; distributed under the License is distributed on an "AS IS" BASIS, -;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -;; See the License for the specific language governing permissions and -;; limitations under the License. -;; - - -(ns test-helper - (:require [clojure.java.io :as io] - [clojure.java.shell :refer [sh]])) - -(def data-dir "test/test-images/") - -(defn load-test-images [] - (when-not (.exists (io/file (str data-dir "Pug-Cookie.jpg"))) - (sh "./scripts/get_test_images.sh"))) diff --git a/contrib/clojure-package/testing.md b/contrib/clojure-package/testing.md deleted file mode 100644 index 0e87790686f0..000000000000 --- a/contrib/clojure-package/testing.md +++ /dev/null @@ -1,40 +0,0 @@ - - - - - - - - - - - - - - - - - -## Help with Testing - -If you want to give the repo a spin and help make it stable and ready for prime time that would be awesome. - -Here is what you can do. - -* Clone the project -* Edit the project.clj file and uncomment the line that is for your system (OSX, Linux CPU, or Linux GPU) -* Run `lein deps` (this might take a bit - the jars are big!) -* Run `lein test` - there should be no errors. The tests are all cpu -* Run `lein install` to install the clojure-package locally -* Go to the module examples `cd examples/module` -* Either run `lein run` or `lein run :gpu` - -If you find any problems, please log on issue. - -Thanks! - -## Want to explore more? - -The examples/tutorial is a good REPL walkthrough -The examples/pre-trained-modules is nice too -The examples/gan is just plain fun :) diff --git a/cpp-package/.gitignore b/cpp-package/.gitignore deleted file mode 100644 index 51453c9b8423..000000000000 --- a/cpp-package/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -# Rebuildable file(s) -include/mxnet-cpp/op.h diff --git a/cpp-package/.travis.yml b/cpp-package/.travis.yml deleted file mode 100644 index e7a332d09125..000000000000 --- a/cpp-package/.travis.yml +++ /dev/null @@ -1,48 +0,0 @@ -sudo: false - -language: cpp - -os: - - linux -# disable for now since clang doesn't support openmp -# - osx - -env: - # code analysis - - TASK=lint - # TODO: build example - - TASK=build - -# dependent apt packages -addons: - apt: - sources: - - ubuntu-toolchain-r-test - packages: - - gcc-4.8 - - g++-4.8 -# - wget -# - git -# - libcurl4-openssl-dev -# - unzip -# - libatlas-dev -# - libopencv-dev - -before_install: - -install: - - source tests/travis/setup.sh - -script: - - tests/travis/run_test.sh - -cache: - directories: - - ${HOME}/.cache/usr - -notifications: -# Emails are sent to the committer's git-configured email address by default, - email: - on_success: change - on_failure: always - #slack: dmlc:NmroCzntCiWOuxUZpii40USd diff --git a/cpp-package/CMakeLists.txt b/cpp-package/CMakeLists.txt deleted file mode 100644 index db64fa99bddf..000000000000 --- a/cpp-package/CMakeLists.txt +++ /dev/null @@ -1,52 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -cmake_minimum_required(VERSION 3.13) -project(mxnet_cpp C CXX) - -add_library(mxnet_cpp INTERFACE) - -set(CPP_PACKAGE_INCLUDE_DIR ${CMAKE_CURRENT_LIST_DIR}/include/) -target_include_directories(mxnet_cpp INTERFACE "${CPP_PACKAGE_INCLUDE_DIR}") -file(GLOB_RECURSE CPP_PACKAGE_HEADERS - "${CPP_PACKAGE_INCLUDE_DIR}/*.h" - "${CPP_PACKAGE_INCLUDE_DIR}/*.hpp") -set(CPP_PACKAGE_OP_H_HEADER ${CMAKE_CURRENT_LIST_DIR}/include/mxnet-cpp/op.h) -target_sources(mxnet_cpp INTERFACE ${CPP_PACKAGE_HEADERS} ${CPP_PACKAGE_OP_H_HEADER}) -target_link_libraries(mxnet_cpp INTERFACE mxnet ${mxnet_LINKER_LIBS}) - -add_custom_target( - cpp_package_op_h ALL - BYPRODUCTS ${CPP_PACKAGE_OP_H_HEADER} - MAIN_DEPENDENCY mxnet - DEPENDS mxnet ${CMAKE_CURRENT_SOURCE_DIR}/scripts/OpWrapperGenerator.py - COMMAND echo "Running: OpWrapperGenerator.py" - COMMAND python OpWrapperGenerator.py $ - WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/scripts -) -add_dependencies(mxnet_cpp cpp_package_op_h) - -if(MSVC) - target_compile_options(mxnet_cpp INTERFACE "/utf-8") -endif(MSVC) - -if(BUILD_CPP_EXAMPLES) - add_subdirectory(example) - add_subdirectory(example/inference) -endif() - -install(DIRECTORY include/ DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}) diff --git a/cpp-package/LICENSE b/cpp-package/LICENSE deleted file mode 100644 index 13e990c523fb..000000000000 --- a/cpp-package/LICENSE +++ /dev/null @@ -1,11 +0,0 @@ -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/cpp-package/README.md b/cpp-package/README.md deleted file mode 100644 index 77ff0ee36e80..000000000000 --- a/cpp-package/README.md +++ /dev/null @@ -1,62 +0,0 @@ - - - - - - - - - - - - - - - - - -# MXNet C++ Package - -The MXNet C++ Package provides C++ API bindings to the users of MXNet. Currently, these bindings are not available as standalone package. -The users of these bindings are required to build this package as mentioned below. - -## Building C++ Package - -The cpp-package directory contains the implementation of C++ API. As mentioned above, users are required to build this directory or package before using it. -**The cpp-package is built while building the MXNet shared library, *libmxnet.so*.** - -### Steps to build the C++ package: -1. Building the MXNet C++ package requires building MXNet from source. -2. Clone the MXNet GitHub repository **recursively** to ensure the code in submodules is available for building MXNet. - ``` - git clone --recursive https://github.com/apache/incubator-mxnet mxnet - ``` - -3. Install the [prerequisites](), desired [BLAS libraries]() and optional [OpenCV, CUDA, and cuDNN]() for building MXNet from source. -4. There is a configuration file for make, [make/config.mk]() that contains all the compilation options. You can edit this file and set the appropriate options prior to running the **make** command. -5. Please refer to [platform specific build instructions]() and available [build configurations](https://mxnet.apache.org/install/build_from_source#build-configurations) for more details. -5. For enabling the build of C++ Package, set the **USE\_CPP\_PACKAGE = 1** in [make/config.mk](). Optionally, the compilation flag can also be specified on **make** command line as follows. - ``` - make -j USE_CPP_PACKAGE=1 - ``` - -## Usage - -In order to consume the C++ API please follow the steps below. - -1. Ensure that the MXNet shared library is built from source with the **USE\_CPP\_PACKAGE = 1**. -2. Include the [MxNetCpp.h]() in the program that is going to consume MXNet C++ API. - ``` - #include - ``` -3. While building the program, ensure that the correct paths to the directories containing header files and MXNet shared library. -4. The program links the MXNet shared library dynamically. Hence the library needs to be accessible to the program during runtime. This can be achieved by including the path to the shared library in the environment variable **LD\_LIBRARY\_PATH** for Linux, Mac. and Ubuntu OS and **PATH** for Windows OS. - - -## Tutorial - -A basic tutorial can be found at . - -## Examples - -The example directory contains examples for you to get started. Please build the MXNet C++ Package before building the examples. diff --git a/cpp-package/cpp-package.mk b/cpp-package/cpp-package.mk deleted file mode 100644 index b9e7c33311a1..000000000000 --- a/cpp-package/cpp-package.mk +++ /dev/null @@ -1,45 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -ifndef LINT_LANG - LINT_LANG="all" -endif - -ifdef CAFFE_PATH -export LD_LIBRARY_PATH=$(CAFFE_PATH)/lib -endif - -CPP_PACKAGE_OP_H_FILE = cpp-package/include/mxnet-cpp/op.h - -EXTRA_PACKAGES += cpp-package-all -EXTRA_PACKAGES_CLEAN += cpp-package-clean - -.PHONY: cpp-package-all cpp-package-lint cpp-package-clean - -cpp-package-all: $(CPP_PACKAGE_OP_H_FILE) - -cpp-package-clean: - rm -f $(CPP_PACKAGE_OP_H_FILE) - -$(CPP_PACKAGE_OP_H_FILE): lib/libmxnet.so cpp-package/scripts/OpWrapperGenerator.py - (cd cpp-package/scripts; python OpWrapperGenerator.py $(ROOTDIR)/lib/libmxnet.so) - -cpp-package-lint: - (cd cpp-package; python scripts/lint.py dmlc ${LINT_LANG} include example) - -include cpp-package/example/example.mk -include cpp-package/example/inference/inference.mk diff --git a/cpp-package/example/CMakeLists.txt b/cpp-package/example/CMakeLists.txt deleted file mode 100644 index d54843f319b4..000000000000 --- a/cpp-package/example/CMakeLists.txt +++ /dev/null @@ -1,25 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -# Explicitly set GENERATED property https://gitlab.kitware.com/cmake/cmake/issues/18399 -set_property(SOURCE ${CMAKE_CURRENT_LIST_DIR}/../include/mxnet-cpp/op.h PROPERTY GENERATED 1) - -if(MSVC) - add_custom_target(cpp_package_deploy_library ALL - DEPENDS mxnet - COMMAND ${CMAKE_COMMAND} -E copy $ $) -endif() diff --git a/cpp-package/example/Makefile b/cpp-package/example/Makefile deleted file mode 100644 index d42cf455386c..000000000000 --- a/cpp-package/example/Makefile +++ /dev/null @@ -1,56 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -ifeq ($(OS),Windows_NT) - UNAME_S := Windows -else - UNAME_S := $(shell uname -s) -endif - -prebuild : - @mkdir -p build - $(shell ./get_data.sh) - $(shell cp -r ../../lib ./) -CPPEX_SRC = $(wildcard *.cpp) -CPPEX_EXE = $(patsubst %.cpp, %, $(CPPEX_SRC)) - -CFLAGS += -I../../include -I../../3rdparty/tvm/nnvm/include -I../../3rdparty/dmlc-core/include -I../include - -ifeq ($(MXNET_USE_CPU),1) - CFLAGS += -D MXNET_USE_CPU -endif - -# CPPEX_CFLAGS += -I../include -CPPEX_EXTRA_LDFLAGS := -L../../lib -lmxnet -MXNET_LIB_PATH := $(shell cd ../../lib; pwd) - -.PHONY: all clean - -all: prebuild $(CPPEX_EXE) - -debug: CPPEX_CFLAGS += -DDEBUG -g -debug: prebuild all - -$(CPPEX_EXE):% : %.cpp - $(CXX) -std=c++17 $(CFLAGS) $(CPPEX_CFLAGS) -o build/$@ $(filter %.cpp %.a, $^) $(CPPEX_EXTRA_LDFLAGS) -ifeq ($(UNAME_S), Darwin) - install_name_tool -add_rpath @loader_path build/$@ - install_name_tool -add_rpath $(MXNET_LIB_PATH) build/$@ -endif - -clean: - @rm -rf build diff --git a/cpp-package/example/README.md b/cpp-package/example/README.md deleted file mode 100644 index 208532e0066e..000000000000 --- a/cpp-package/example/README.md +++ /dev/null @@ -1,37 +0,0 @@ - - - - - - - - - - - - - - - - - -# MXNet C++ Package Examples - -## Building C++ examples - -The examples in this folder demonstrate the **training** workflow. The **inference workflow** related examples can be found in [inference]() folder. -Please build the MXNet C++ Package as explained in the [README]() File before building these examples manually. -The examples in this folder are built while building the MXNet library and cpp-package from source. However, they can be built manually as follows - -From cpp-package/examples directory - -- Build all examples in release mode: **make all** -- Build all examples in debug mode: **make debug** - -By default, the examples are built to be run on GPU. To build examples to run on CPU: - -- Release: **make all MXNET\_USE\_CPU=1** -- Debug: **make debug MXNET\_USE\_CPU=1** - -The examples that are built to be run on GPU may not work on the non-GPU machines. -The makefile will also download the necessary data files and store in a data folder. (The download will take couple of minutes, but will be done only once on a fresh installation.) diff --git a/cpp-package/example/example.mk b/cpp-package/example/example.mk deleted file mode 100644 index cf92e4076d18..000000000000 --- a/cpp-package/example/example.mk +++ /dev/null @@ -1,39 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -CPPEX_SRC = $(wildcard cpp-package/example/*.cpp) -CPPEX_EXE = $(patsubst cpp-package/example/%.cpp, build/cpp-package/example/%, $(CPPEX_SRC)) - -CPPEX_CFLAGS += -Icpp-package/include -CPPEX_EXTRA_LDFLAGS := -L$(ROOTDIR)/lib -lmxnet - -EXTRA_PACKAGES += cpp-package-example-all -EXTRA_PACKAGES_CLEAN += cpp-package-example-clean - -.PHONY: cpp-package-example-all cpp-package-example-clean - -cpp-package-example-all: cpp-package-all $(CPPEX_EXE) - -build/cpp-package/example/% : cpp-package/example/%.cpp lib/libmxnet.so $(CPP_PACKAGE_OP_H_FILE) - @mkdir -p $(@D) - $(CXX) -std=c++17 $(CFLAGS) $(CPPEX_CFLAGS) -MM -MT cpp-package/example/$* $< >build/cpp-package/example//$*.d - $(CXX) -std=c++17 $(CFLAGS) $(CPPEX_CFLAGS) -o $@ $(filter %.cpp %.a, $^) $(LDFLAGS) $(CPPEX_EXTRA_LDFLAGS) - -cpp-package-example-clean: - rm -rf build/cpp-package/example/* - --include build/cpp-package/example/*.d diff --git a/cpp-package/example/feature_extract/Makefile b/cpp-package/example/feature_extract/Makefile deleted file mode 100644 index 084b60632729..000000000000 --- a/cpp-package/example/feature_extract/Makefile +++ /dev/null @@ -1,41 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -CXX=g++ -BLAS=-L /opt/openblas/lib -lopenblas -DMSHADOW_USE_CBLAS=1 -DMSHADOW_USE_MKL=0 -CUDA=-DMSHADOW_USE_CUDA=1 -OPENCV_CFLAGS=`pkg-config --cflags opencv` -OPENCV_LDFLAGS=`pkg-config --libs opencv` - -CFLAGS=$(COMMFLAGS) -I../../../3rdparty/nnvm/include -I../../../3rdparty/dmlc-core/include -I ../../include -I ../../../include -Wall -O3 -msse3 -funroll-loops -Wno-unused-parameter -Wno-unknown-pragmas -fopenmp -LDFLAGS=$(COMMFLAGS) -L ../../../lib -lmxnet $(BLAS) $(CUDA) -lgomp -pthread - -all: feature_extract prepare_data_with_opencv - -feature_extract: ./feature_extract.cpp - $(CXX) -c -std=c++17 $(CFLAGS) $^ - $(CXX) $(basename $@).o -o $@ $(LDFLAGS) - -rm -f $(basename $@).o - -prepare_data_with_opencv: ./prepare_data_with_opencv.cpp - $(CXX) -c -std=c++17 $(OPENCV_CFLAGS) $^ - $(CXX) $(basename $@).o -o $@ $(OPENCV_LDFLAGS) - -rm -f $(basename $@).o - -clean: - -rm -f feature_extract - -rm -f prepare_data_with_opencv diff --git a/cpp-package/example/feature_extract/README.md b/cpp-package/example/feature_extract/README.md deleted file mode 100644 index 0b94bef7705f..000000000000 --- a/cpp-package/example/feature_extract/README.md +++ /dev/null @@ -1,29 +0,0 @@ - - - - - - - - - - - - - - - - - -This example shows how to extract features with a pretrained model. - -Execute `run.sh` to: -- Download a pretrained model -- Download sample pictures (`dog.jpg` and `cat.jpg`) -- Compile the files -- Execute the featurization on `dog.jpg` and `cat.jpg` - - -Note: -1. The filename of network parameters may vary, line 67 in `feature_extract.cpp` should be updated accordingly. -2. You need to build MXNet from source to get access to the `lib/libmxnet.so` or point `LD_LIBRARY_PATH` to where it is installed in your system diff --git a/cpp-package/example/feature_extract/feature_extract.cpp b/cpp-package/example/feature_extract/feature_extract.cpp deleted file mode 100644 index d614fd576238..000000000000 --- a/cpp-package/example/feature_extract/feature_extract.cpp +++ /dev/null @@ -1,139 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/*! - */ -#include -#include -#include -#include -#include -#include "mxnet-cpp/MxNetCpp.h" -using namespace std; -using namespace mxnet::cpp; - -/* - * This example shows how to extract features with a pretrained model. - * Get the model here: - * https://github.com/dmlc/mxnet-model-gallery - * */ - -/*The global context, change them if necessary*/ -Context global_ctx(kGPU, 0); -// Context global_ctx(kCPU,0); - -class FeatureExtractor { - private: - /*the mean image, get from the pretrained model*/ - NDArray mean_img; - /*the following two maps store all the paramters need by the model*/ - map args_map; - map aux_map; - Symbol net; - Executor *executor; - /*Get the feature layer we want to extract*/ - void GetFeatureSymbol() { - /* - * use the following to check all the layers' names: - * */ - /* - net=Symbol::Load("./model/Inception_BN-symbol.json").GetInternals(); - for(const auto & layer_name:net.ListOutputs()){ - LG< paramters; - NDArray::Load("./model/Inception-BN-0126.params", 0, ¶mters); - for (const auto &k : paramters) { - if (k.first.substr(0, 4) == "aux:") { - auto name = k.first.substr(4, k.first.size() - 4); - aux_map[name] = k.second.Copy(global_ctx); - } - if (k.first.substr(0, 4) == "arg:") { - auto name = k.first.substr(4, k.first.size() - 4); - args_map[name] = k.second.Copy(global_ctx); - } - } - /*WaitAll is need when we copy data between GPU and the main memory*/ - NDArray::WaitAll(); - } - void GetMeanImg() { - mean_img = NDArray(Shape(1, 3, 224, 224), global_ctx, false); - mean_img.SyncCopyFromCPU( - NDArray::LoadToMap("./model/mean_224.nd")["mean_img"].GetData(), - 1 * 3 * 224 * 224); - NDArray::WaitAll(); - } - - public: - FeatureExtractor() { - /*prepare the model, fill the pretrained parameters, get the mean image*/ - GetFeatureSymbol(); - LoadParameters(); - GetMeanImg(); - } - - void Extract(NDArray data) { - /*Normalize the pictures*/ - data.Slice(0, 1) -= mean_img; - data.Slice(1, 2) -= mean_img; - args_map["data"] = data; - /*bind the executor*/ - executor = net.SimpleBind(global_ctx, args_map, map(), - map(), aux_map); - executor->Forward(false); - /*print out the features*/ - auto array = executor->outputs[0].Copy(Context(kCPU, 0)); - NDArray::WaitAll(); - array = array.Reshape({2, 1024}); - for (int i = 0; i < 1024; ++i) { - cout << array.At(0, i) << ","; - } - cout << endl; - } -}; - -NDArray Data2NDArray() { - NDArray ret(Shape(2, 3, 224, 224), global_ctx, false); - ifstream inf("./img.dat", ios::binary); - vector data(2 * 3 * 224 * 224); - inf.read(reinterpret_cast(data.data()), 2 * 3 * 224 * 224 * sizeof(float)); - inf.close(); - ret.SyncCopyFromCPU(data.data(), 2 * 3 * 224 * 224); - NDArray::WaitAll(); - return ret; -} - -int main() { - /* - * get the data from a binary file ./img.data - * this file is generated by ./prepare_data_with_opencv - * it stores 2 pictures in NDArray format - * - */ - auto data = Data2NDArray(); - FeatureExtractor fe; - fe.Extract(data); - return 0; -} diff --git a/cpp-package/example/feature_extract/prepare_data_with_opencv.cpp b/cpp-package/example/feature_extract/prepare_data_with_opencv.cpp deleted file mode 100644 index fe32e896adb1..000000000000 --- a/cpp-package/example/feature_extract/prepare_data_with_opencv.cpp +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/*! - */ -#include -#include -#include -#include -#include - -using namespace std; - -/*read images and store them the NDArray format that MXNet.cpp can handle*/ -void Mat2Array() { - string file_name_list[] = {"./dog.jpg", "./cat.jpg"}; - - std::vector array; - for (auto &t : file_name_list) { - cv::Mat mat = cv::imread(t); - /*resize pictures to (224, 224) according to the pretrained model*/ - cv::resize(mat, mat, cv::Size(224, 224)); - for (int c = 0; c < 3; ++c) { - for (int i = 0; i < 224; ++i) { - for (int j = 0; j < 224; ++j) { - array.push_back(static_cast(mat.data[(i * 224 + j) * 3 + c])); - } - } - } - } - ofstream outf("./img.dat", ios::binary); - outf.write(reinterpret_cast(array.data()), array.size() * sizeof(float)); - outf.close(); -} - -int main(int argc, char *argv[]) { - Mat2Array(); - return 0; -} diff --git a/cpp-package/example/feature_extract/run.sh b/cpp-package/example/feature_extract/run.sh deleted file mode 100755 index b98ddb9eb81e..000000000000 --- a/cpp-package/example/feature_extract/run.sh +++ /dev/null @@ -1,38 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -# Downloading the data and model -mkdir -p model -wget -nc -O model/Inception-BN-symbol.json \ - http://data.mxnet.io/mxnet/models/imagenet/inception-bn/Inception-BN-symbol.json -wget -nc -O model/synset.txt \ - http://data.mxnet.io/mxnet/models/imagenet/synset.txt -wget -nc -O model/Inception-BN-0126.params \ - http://data.mxnet.io/mxnet/models/imagenet/inception-bn/Inception-BN-0126.params?raw=true -wget -nc -O cat.jpg https://github.com/dmlc/web-data/blob/master/mxnet/doc/tutorials/python/predict_image/cat.jpg?raw=true -wget -nc -O dog.jpg https://github.com/dmlc/web-data/blob/master/mxnet/doc/tutorials/python/predict_image/dog.jpg?raw=true -wget -nc -O model/mean_224.nd https://github.com/dmlc/web-data/raw/master/mxnet/example/feature_extract/mean_224.nd -tar -xvzf inception-bn.tar.gz -C model --skip-old-files - -# Building -make - -# Preparing the data -./prepare_data_with_opencv - -# Running the featurization -LD_LIBRARY_PATH=../../../lib ./feature_extract diff --git a/cpp-package/example/get_data.sh b/cpp-package/example/get_data.sh deleted file mode 100755 index e11077234ade..000000000000 --- a/cpp-package/example/get_data.sh +++ /dev/null @@ -1,64 +0,0 @@ -#!/usr/bin/env bash - -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -set -e - -mkdir -p data/mnist_data -cd data/mnist_data - -download () { - local URL=$1 - local GZ_FILE_NAME="${URL##*/}" - - local FILE_NAME="${GZ_FILE_NAME%.*}" - if [[ -f "${FILE_NAME}" ]]; then - echo "File ${FILE_NAME} already downloaded." - return 0 - fi - - echo "Downloading ${URL} ..." - local CURL_OPTIONS="--connect-timeout 10 \ - --max-time 300 \ - --retry-delay 10 \ - --retry 3 \ - --retry-delay 0 \ - --location \ - --silent" - curl ${CURL_OPTIONS} ${URL} -o ${GZ_FILE_NAME} - - if [[ ! -f "${GZ_FILE_NAME}" ]]; then - echo "File ${URL} couldn't be downloaded!" - exit 1 - fi - - gzip -d ${GZ_FILE_NAME} - (($? != 0)) && exit 1 || return 0 -} - -# MNIST dataset from: http://yann.lecun.com/exdb/mnist/ -FILES=( - "http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz" - "http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz" - "http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz" - "http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz" - "http://data.mxnet.io/data/mnist_train.csv.gz") - -for FILE in ${FILES[@]}; do - download ${FILE} -done diff --git a/cpp-package/example/inference/CMakeLists.txt b/cpp-package/example/inference/CMakeLists.txt deleted file mode 100644 index 0566d28a57df..000000000000 --- a/cpp-package/example/inference/CMakeLists.txt +++ /dev/null @@ -1,22 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -# Explicitly set GENERATED property https://gitlab.kitware.com/cmake/cmake/issues/18399 -set_property(SOURCE ${CMAKE_CURRENT_LIST_DIR}/../../include/mxnet-cpp/op.h PROPERTY GENERATED 1) - -add_executable(imagenet_inference "imagenet_inference.cpp") -target_link_libraries(imagenet_inference mxnet_cpp) diff --git a/cpp-package/example/inference/Makefile b/cpp-package/example/inference/Makefile deleted file mode 100644 index a0ec819e3749..000000000000 --- a/cpp-package/example/inference/Makefile +++ /dev/null @@ -1,40 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - - -CPPEX_SRC = $(wildcard *.cpp) -CPPEX_EXE = $(patsubst %.cpp, %, $(CPPEX_SRC)) -OPENCV_CFLAGS=`pkg-config --cflags opencv` -OPENCV_LDFLAGS=`pkg-config --libs opencv` - -CXX=g++ - - -CFLAGS=$(COMMFLAGS) -I../../../3rdparty/tvm/nnvm/include -I../../../3rdparty/dmlc-core/include -I ../../include -I ../../../include -Wall -O3 -msse3 -funroll-loops -Wno-unused-parameter -Wno-unknown-pragmas -CPPEX_EXTRA_LDFLAGS := -L../../../lib -lmxnet $(OPENCV_LDFLAGS) - -all: $(CPPEX_EXE) - -debug: CPPEX_CFLAGS += -DDEBUG -g -debug: all - - -$(CPPEX_EXE):% : %.cpp - $(CXX) -std=c++17 $(CFLAGS) $(CPPEX_CFLAGS) -o $@ $(filter %.cpp %.a, $^) $(CPPEX_EXTRA_LDFLAGS) - -clean: - rm -f $(CPPEX_EXE) diff --git a/cpp-package/example/inference/README.md b/cpp-package/example/inference/README.md deleted file mode 100644 index 90047e5fe14f..000000000000 --- a/cpp-package/example/inference/README.md +++ /dev/null @@ -1,213 +0,0 @@ - - - - - - - - - - - - - - - - - -# MXNet C++ Package Inference Workflow Examples - -## Building C++ Inference examples - -The examples in this folder demonstrate the **inference** workflow. Please build the MXNet C++ Package as explained in the [README]() File before building these examples. -To build examples use following commands: - -- Release: **make all** -- Debug: **make debug all** - - -## Examples demonstrating inference workflow - -This directory contains following examples. In order to run the examples, ensure that the path to the MXNet shared library is added to the OS specific environment variable viz. **LD\_LIBRARY\_PATH** for Linux, Mac and Ubuntu OS and **PATH** for Windows OS. - -## [imagenet_inference.cpp]() - -This example demonstrates image classification workflow with pre-trained models using MXNet C++ API. Now this script also supports inference with quantized CNN models generated by Intel® MKL-DNN (see this [quantization flow](https://github.com/apache/incubator-mxnet/blob/master/example/quantization/README.md)). By using C++ API, the latency of most models will be reduced to some extent compared with current Python implementation. - -Most of CNN models have been tested on Linux systems. And 50000 images are used to collect accuracy numbers. Please refer to this [README](https://github.com/apache/incubator-mxnet/blob/master/example/quantization/README.md) for more details about accuracy. - -The following performance numbers are collected via using C++ inference API on AWS EC2 C5.12xlarge. The environment variables are set like below: - -``` -export KMP_AFFINITY=granularity=fine,noduplicates,compact,1,0 -export OMP_NUM_THREADS=$(vCPUs/2) -export MXNET_ENGINE_TYPE=NaiveEngine -``` -Also users are recommended to use ```numactl``` or ```taskset``` to bind a running process to the specified cores. - -| Model | Dataset |BS=1 (imgs/sec) |BS=64 (imgs/sec) | -|:---|:---|:---:|:---:| -| | |FP32 / INT8 | FP32 / INT8 | -| ResNet18-V1 | [Validation Dataset](http://data.mxnet.io/data/val_256_q90.rec) |369.00 / 778.82|799.7 / 2598.04| -| ResNet50-V1 | [Validation Dataset](http://data.mxnet.io/data/val_256_q90.rec) |160.72 / 405.84|349.73 / 1297.65 | -| ResNet101-V1 | [Validation Dataset](http://data.mxnet.io/data/val_256_q90.rec) | 89.56 / 197.55| 193.25 / 740.47| -|Squeezenet 1.0|[Validation Dataset](http://data.mxnet.io/data/val_256_q90.rec) | 294.46 / 899.28| 857.70 / 3065.13| -|MobileNet 1.0|[Validation Dataset](http://data.mxnet.io/data/val_256_q90.rec) |554.94 / 676.59|1279.44 / 3393.43| -|MobileNetV2 1.0|[Validation Dataset](http://data.mxnet.io/data/val_256_q90.rec) |303.40 / 776.40|994.25 / 4227.77| -|Inception V3|[Validation Dataset](http://data.mxnet.io/data/val_256_q90.rec) |108.20 / 219.20 | 232.22 / 870.09 | -|ResNet152-V2|[Validation Dataset](http://data.mxnet.io/data/val_256_q90.rec) |52.28 / 64.62|107.03 / 134.04 | -|Inception-BN|[Validation Dataset](http://data.mxnet.io/data/val_256_q90.rec) | 211.86 / 306.37| 632.79 / 2115.28| - -The command line to launch inference by this script can accept are as shown below: -``` -./imagenet_inference --help -Usage: -imagenet_inference --symbol_file - --params_file - --dataset - --data_nthreads - --input_shape ] - --rgb_mean - --rgb_std - --batch_size - --num_skipped_batches - --num_inference_batches - --data_layer_type - --gpu - --enableTRT " - --benchmark -``` - -Follow the below steps to do inference with more models. - -- Download the pre-trained FP32 models into ```./model``` directory. -- Refer this [README](https://github.com/apache/incubator-mxnet/blob/master/example/quantization/README.md) to generate the corresponding quantized models and also put them into ```./model``` directory. -- Prepare [validation dataset](http://data.mxnet.io/data/val_256_q90.rec) and put it into ```./data``` directory. - -The below command lines show how to run inference with FP32/INT8 resnet50_v1 model. Because the C++ inference script provides the almost same command line as this [Python script](https://github.com/apache/incubator-mxnet/blob/master/example/quantization/imagenet_inference.py) and then users can easily go from Python to C++. -``` - -# FP32 inference -./imagenet_inference --symbol_file "./model/resnet50_v1-symbol.json" --params_file "./model/resnet50_v1-0000.params" --dataset "./data/val_256_q90.rec" --rgb_mean "123.68 116.779 103.939" --rgb_std "58.393 57.12 57.375" --batch_size 64 --num_skipped_batches 50 --num_inference_batches 500 - -# INT8 inference -./imagenet_inference --symbol_file "./model/resnet50_v1-quantized-5batches-naive-symbol.json" --params_file "./model/resnet50_v1-quantized-0000.params" --dataset "./data/val_256_q90.rec" --rgb_mean "123.68 116.779 103.939" --rgb_std "58.393 57.12 57.375" --batch_size 64 --num_skipped_batches 50 --num_inference_batches 500 - -# FP32 dummy data -./imagenet_inference --symbol_file "./model/resnet50_v1-symbol.json" --batch_size 64 --num_inference_batches 500 --benchmark - -# INT8 dummy data -./imagenet_inference --symbol_file "./model/resnet50_v1-quantized-5batches-naive-symbol.json" --batch_size 64 --num_inference_batches 500 --benchmark - -``` -For a quick inference test, users can directly run [unit_test_imagenet_inference.sh]() by using the below command. This script will automatically download the pre-trained **Inception-Bn** and **resnet50_v1_int8** model and **validation dataset** which are required for inference. - -``` -./unit_test_imagenet_inference.sh -``` -And you may get the similiar outputs like below: -``` ->>> INFO: FP32 real data -imagenet_inference.cpp:282: Loading the model from ./model/Inception-BN-symbol.json -imagenet_inference.cpp:295: Loading the model parameters from ./model/Inception-BN-0126.params -imagenet_inference.cpp:443: INFO:Dataset for inference: ./data/val_256_q90.rec -imagenet_inference.cpp:444: INFO:label_name = softmax_label -imagenet_inference.cpp:445: INFO:rgb_mean: (123.68, 116.779, 103.939) -imagenet_inference.cpp:447: INFO:rgb_std: (1, 1, 1) -imagenet_inference.cpp:449: INFO:Image shape: (3, 224, 224) -imagenet_inference.cpp:451: INFO:Finished inference with: 500 images -imagenet_inference.cpp:453: INFO:Batch size = 1 for inference -imagenet_inference.cpp:454: INFO:Accuracy: 0.744 -imagenet_inference.cpp:455: INFO:Throughput: xxxx images per second - ->>> INFO: FP32 dummy data -imagenet_inference.cpp:282: Loading the model from ./model/Inception-BN-symbol.json -imagenet_inference.cpp:372: Running the forward pass on model to evaluate the performance.. -imagenet_inference.cpp:387: benchmark completed! -imagenet_inference.cpp:388: batch size: 1 num batch: 500 throughput: xxxx imgs/s latency:xxxx ms - ->>> INFO: INT8 dummy data -imagenet_inference.cpp:282: Loading the model from ./model/resnet50_v1_int8-symbol.json -imagenet_inference.cpp:372: Running the forward pass on model to evaluate the performance.. -imagenet_inference.cpp:387: benchmark completed! -imagenet_inference.cpp:388: batch size: 1 num batch: 500 throughput: xxxx imgs/s latency:xxxx ms -``` -For running this example with TensorRT, you can quickly try the following example to run a benchmark test for testing Inception BN: -``` -./imagenet_inference --symbol_file "./model/Inception-BN-symbol.json" --params_file "./model/Inception-BN-0126.params" --batch_size 16 --num_inference_batches 500 --benchmark --enableTRT -``` -Sample output will looks like this (the example is running on a AWS P3.2xl machine): -``` -imagenet_inference.cpp:302: Loading the model from ./model/Inception-BN-symbol.json -build_subgraph.cc:686: start to execute partition graph. -imagenet_inference.cpp:317: Loading the model parameters from ./model/Inception-BN-0126.params -imagenet_inference.cpp:424: Running the forward pass on model to evaluate the performance.. -imagenet_inference.cpp:439: benchmark completed! -imagenet_inference.cpp:440: batch size: 16 num batch: 500 throughput: 6284.78 imgs/s latency:0.159115 ms -``` - -## [sentiment_analysis_rnn.cpp]() -This example demonstrates how you can load a pre-trained RNN model and use it to predict the sentiment expressed in the given movie review with the MXNet C++ API. The example is capable of processing variable legnth inputs. It performs the following tasks -- Loads the pre-trained RNN model. -- Loads the dictionary file containing the word to index mapping. -- Splits the review in multiple lines separated by "." -- The example predicts the sentiment score for individual lines and outputs the average score. - -The example is capable of processing variable length input by implementing following technique: -- The example creates executors for pre-determined input lenghts such as 5, 10, 15, 20, 25, etc called **buckets**. -- Each bucket is identified by **bucket-key** representing the length on input required by corresponding executor. -- For each line in the review, the example finds the number of words in the line and tries to find a closest bucket or executor. -- If the bucket key does not match the number of words in the line, the example pads or trims the input line to match the required length. - -The example uses a pre-trained RNN model trained with a IMDB dataset. The RNN model was built by exercising the [GluonNLP Sentiment Analysis Tutorial](). The tutorial uses 'standard_lstm_lm_200' available in Gluon Model Zoo and fine tunes it for the IMDB dataset -The model consists of : -- Embedding Layer -- 2 LSTM Layers with hidden dimension size of 200 -- Average pooling layer -- Sigmoid output layer -The model was trained for 10 epochs to achieve 85% test accuracy. -The visual representation of the model is [here](). - -The model files can be found here. -- [sentiment_analysis-symbol.json](< https://s3.amazonaws.com/mxnet-cpp/RNN_model/sentiment_analysis-symbol.json>) -- [sentiment_analysis-0010.params](< https://s3.amazonaws.com/mxnet-cpp/RNN_model/sentiment_analysis-0010.params>) -- [sentiment_token_to_idx.txt]() Each line of the dictionary file contains a word and a unique index for that word, separated by a space, with a total of 32787 words generated from the training dataset. -The example downloads the above files while running. - -The example's command line parameters are as shown below: - -``` -./sentiment_analysis_rnn --help -Usage: -sentiment_analysis_rnn ---input Input movie review. The review can be single line or multiline.e.g. "This movie is the best." OR "This movie is the best. The direction is awesome." -[--gpu] Specify this option if workflow needs to be run in gpu context -If the review is multiline, the example predicts sentiment score for each line and the final score is the average of scores obtained for each line. - -``` - -The following command line shows running the example with the movie review containing only one line. - -``` -./sentiment_analysis_rnn --input "This movie has the great story" -``` - -The above command will output the sentiment score as follows: -``` -sentiment_analysis_rnn.cpp:346: Input Line : [This movie has the great story] Score : 0.999898 -sentiment_analysis_rnn.cpp:449: The sentiment score between 0 and 1, (1 being positive)=0.999898 -``` - -The following command line shows invoking the example with the multi-line review. - -``` -./sentiment_analysis_rnn --input "This movie is the best. The direction is awesome." -``` -The above command will output the sentiment score for each line in the review and average score as follows: -``` -Input Line : [This movie is the best] Score : 0.964498 -Input Line : [ The direction is awesome] Score : 0.968855 -The sentiment score between 0 and 1, (1 being positive)=0.966677 -``` - -Alternatively, you can run the [unit_test_sentiment_analysis_rnn.sh]() script. diff --git a/cpp-package/example/inference/imagenet_inference.cpp b/cpp-package/example/inference/imagenet_inference.cpp deleted file mode 100644 index 845a227fe93d..000000000000 --- a/cpp-package/example/inference/imagenet_inference.cpp +++ /dev/null @@ -1,662 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * This example demonstrates image classification workflow with pre-trained models using MXNet C++ API. - * The example performs following tasks. - * 1. Load the pre-trained model. - * 2. Load the parameters of pre-trained model. - * 3. Load the inference dataset and create a new ImageRecordIter. - * 4. Run the forward pass and obtain throughput & accuracy. - */ -#ifndef _WIN32 -#include -#endif -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "mxnet/c_api.h" -#include "mxnet/tuple.h" -#include "mxnet-cpp/MxNetCpp.h" -#include "mxnet-cpp/initializer.h" - -using namespace mxnet::cpp; - -double ms_now() { - double ret; -#ifdef _WIN32 - auto timePoint = std::chrono::high_resolution_clock::now().time_since_epoch(); - ret = std::chrono::duration(timePoint).count(); -#else - struct timeval time; - gettimeofday(&time, nullptr); - ret = 1e+3 * time.tv_sec + 1e-3 * time.tv_usec; -#endif - return ret; -} - - -// define the data type for NDArray, aliged with the definition in mshadow/base.h -enum TypeFlag { - kFloat32 = 0, - kFloat64 = 1, - kFloat16 = 2, - kUint8 = 3, - kInt32 = 4, - kInt8 = 5, - kInt64 = 6, -}; - -/* - * class Predictor - * - * This class encapsulates the functionality to load the model, prepare dataset and run the forward pass. - */ - -class Predictor { - public: - Predictor() {} - Predictor(const std::string& model_json_file, - const std::string& model_params_file, - const Shape& input_shape, - bool use_gpu, - bool enable_tensorrt, - const std::string& dataset, - const int data_nthreads, - const std::string& data_layer_type, - const std::vector& rgb_mean, - const std::vector& rgb_std, - int shuffle_chunk_seed, - int seed, bool benchmark); - void BenchmarkScore(int num_inference_batches); - void Score(int num_skipped_batches, int num_inference_batches); - ~Predictor(); - - private: - bool CreateImageRecordIter(); - bool AdvanceDataIter(int skipped_batches); - void LoadModel(const std::string& model_json_file); - void LoadParameters(const std::string& model_parameters_file); - void SplitParamMap(const std::map ¶mMap, - std::map *argParamInTargetContext, - std::map *auxParamInTargetContext, - Context targetContext); - void ConvertParamMapToTargetContext(const std::map ¶mMap, - std::map *paramMapInTargetContext, - Context targetContext); - void InitParameters(); - - inline bool FileExists(const std::string &name) { - std::ifstream fhandle(name.c_str()); - return fhandle.good(); - } - int GetDataLayerType(); - - std::map args_map_; - std::map aux_map_; - Symbol net_; - Executor *executor_; - Shape input_shape_; - Context global_ctx_ = Context::cpu(); - - MXDataIter *val_iter_; - bool use_gpu_; - bool enable_tensorrt_; - std::string dataset_; - int data_nthreads_; - std::string data_layer_type_; - std::vector rgb_mean_; - std::vector rgb_std_; - int shuffle_chunk_seed_; - int seed_; - bool benchmark_; -}; - - -/* - * The constructor takes following parameters as input: - * 1. model_json_file: The model in json formatted file. - * 2. model_params_file: File containing model parameters - * 3. input_shape: Shape of input data to the model. Since this class will be running one inference at a time, - * the input shape is required to be in format Shape(1, number_of_channels, height, width) - * The input image will be resized to (height x width) size before running the inference. - * 4. use_gpu: determine if run inference on GPU - * 5. enable_tensorrt: determine if enable TensorRT - * 6. dataset: data file (.rec) to be used for inference - * 7. data_nthreads: number of threads for data loading - * 8. data_layer_type: data type for data layer - * 9. rgb_mean: mean value to be subtracted on R/G/B channel - * 10. rgb_std: standard deviation on R/G/B channel - * 11. shuffle_chunk_seed: shuffling chunk seed - * 12. seed: shuffling seed - * 13. benchmark: use dummy data for inference - * - * The constructor will: - * 1. Create ImageRecordIter based on the given dataset file. - * 2. Load the model and parameter files. - * 3. Infer and construct NDArrays according to the input argument and create an executor. - */ -Predictor::Predictor(const std::string& model_json_file, - const std::string& model_params_file, - const Shape& input_shape, - bool use_gpu, - bool enable_tensorrt, - const std::string& dataset, - const int data_nthreads, - const std::string& data_layer_type, - const std::vector& rgb_mean, - const std::vector& rgb_std, - int shuffle_chunk_seed, - int seed, bool benchmark) - : input_shape_(input_shape), - use_gpu_(use_gpu), - enable_tensorrt_(enable_tensorrt), - dataset_(dataset), - data_nthreads_(data_nthreads), - data_layer_type_(data_layer_type), - rgb_mean_(rgb_mean), - rgb_std_(rgb_std), - shuffle_chunk_seed_(shuffle_chunk_seed), - seed_(seed), - benchmark_(benchmark) { - if (use_gpu) { - global_ctx_ = Context::gpu(); - } - - // initilize data iterator - if (!benchmark_ && !CreateImageRecordIter()) { - LG << "Error: failed to create ImageRecordIter"; - throw std::runtime_error("ImageRecordIter cannot be created"); - } - - // Load the model - LoadModel(model_json_file); - // Initilize the parameters - // benchmark=true && model_params_file.empty(), randomly initialize parameters - // else, load parameters - if (benchmark_ && model_params_file.empty()) { - InitParameters(); - } else { - LoadParameters(model_params_file); - } - - int dtype = GetDataLayerType(); - if (dtype == -1) { - throw std::runtime_error("Unsupported data layer type..."); - } - args_map_["data"] = NDArray(input_shape_, global_ctx_, false, dtype); - Shape label_shape(input_shape_[0]); - args_map_["softmax_label"] = NDArray(label_shape, global_ctx_, false); - std::vector arg_arrays; - std::vector grad_arrays; - std::vector grad_reqs; - std::vector aux_arrays; - - // infer and create ndarrays according to the given input ndarrays. - net_.InferExecutorArrays(global_ctx_, &arg_arrays, &grad_arrays, &grad_reqs, - &aux_arrays, args_map_, std::map(), - std::map(), aux_map_); - for (auto& i : grad_reqs) i = OpReqType::kNullOp; - - // Create an executor after binding the model to input parameters. - executor_ = new Executor(net_, global_ctx_, arg_arrays, grad_arrays, grad_reqs, aux_arrays); -} - -/* - * The following function is used to get the data layer type for input data - */ -int Predictor::GetDataLayerType() { - int ret_type = -1; - if (data_layer_type_ == "float32") { - ret_type = kFloat32; - } else if (data_layer_type_ == "int8") { - ret_type = kInt8; - } else if (data_layer_type_ == "uint8") { - ret_type = kUint8; - } else { - LG << "Unsupported data layer type " << data_layer_type_ << "..." - << "Please use one of {float32, int8, uint8}"; - } - return ret_type; -} - -/* - * create a new ImageRecordIter according to the given parameters - */ -bool Predictor::CreateImageRecordIter() { - val_iter_ = new MXDataIter("ImageRecordIter"); - if (!FileExists(dataset_)) { - LG << "Error: " << dataset_ << " must be provided"; - return false; - } - - std::vector shape_vec; - for (index_t i = 1; i < input_shape_.ndim(); i++) - shape_vec.push_back(input_shape_[i]); - mxnet::TShape data_shape(shape_vec.begin(), shape_vec.end()); - - // set image record parser parameters - val_iter_->SetParam("path_imgrec", dataset_); - val_iter_->SetParam("label_width", 1); - val_iter_->SetParam("data_shape", data_shape); - val_iter_->SetParam("preprocess_threads", data_nthreads_); - val_iter_->SetParam("shuffle_chunk_seed", shuffle_chunk_seed_); - - // set Batch parameters - val_iter_->SetParam("batch_size", input_shape_[0]); - - // image record parameters - val_iter_->SetParam("shuffle", true); - val_iter_->SetParam("seed", seed_); - - // set normalize parameters - val_iter_->SetParam("mean_r", rgb_mean_[0]); - val_iter_->SetParam("mean_g", rgb_mean_[1]); - val_iter_->SetParam("mean_b", rgb_mean_[2]); - val_iter_->SetParam("std_r", rgb_std_[0]); - val_iter_->SetParam("std_g", rgb_std_[1]); - val_iter_->SetParam("std_b", rgb_std_[2]); - - // set prefetcher parameters - if (use_gpu_) { - val_iter_->SetParam("ctx", "gpu"); - } else { - val_iter_->SetParam("ctx", "cpu"); - } - val_iter_->SetParam("dtype", data_layer_type_); - - val_iter_->CreateDataIter(); - return true; -} - -/* - * The following function loads the model from json file. - */ -void Predictor::LoadModel(const std::string& model_json_file) { - if (!FileExists(model_json_file)) { - LG << "Model file " << model_json_file << " does not exist"; - throw std::runtime_error("Model file does not exist"); - } - LG << "Loading the model from " << model_json_file << std::endl; - net_ = Symbol::Load(model_json_file); - if (enable_tensorrt_) { - net_ = net_.GetBackendSymbol("TensorRT"); - } -} - -/* - * The following function loads the model parameters. - */ -void Predictor::LoadParameters(const std::string& model_parameters_file) { - if (!FileExists(model_parameters_file)) { - LG << "Parameter file " << model_parameters_file << " does not exist"; - throw std::runtime_error("Model parameters does not exist"); - } - LG << "Loading the model parameters from " << model_parameters_file << std::endl; - std::map parameters; - NDArray::Load(model_parameters_file, 0, ¶meters); - if (enable_tensorrt_) { - std::map intermediate_args_map; - std::map intermediate_aux_map; - SplitParamMap(parameters, &intermediate_args_map, &intermediate_aux_map, Context::cpu()); - contrib::InitTensorRTParams(net_, &intermediate_args_map, &intermediate_aux_map); - ConvertParamMapToTargetContext(intermediate_args_map, &args_map_, global_ctx_); - ConvertParamMapToTargetContext(intermediate_aux_map, &aux_map_, global_ctx_); - } else { - SplitParamMap(parameters, &args_map_, &aux_map_, global_ctx_); - } - /*WaitAll is need when we copy data between GPU and the main memory*/ - NDArray::WaitAll(); -} - -/* - * The following function split loaded param map into arg parm - * and aux param with target context - */ -void Predictor::SplitParamMap(const std::map ¶mMap, - std::map *argParamInTargetContext, - std::map *auxParamInTargetContext, - Context targetContext) { - for (const auto& pair : paramMap) { - std::string type = pair.first.substr(0, 4); - std::string name = pair.first.substr(4); - if (type == "arg:") { - (*argParamInTargetContext)[name] = pair.second.Copy(targetContext); - } else if (type == "aux:") { - (*auxParamInTargetContext)[name] = pair.second.Copy(targetContext); - } - } -} - -/* - * The following function copy the param map into the target context - */ -void Predictor::ConvertParamMapToTargetContext(const std::map ¶mMap, - std::map *paramMapInTargetContext, - Context targetContext) { - for (const auto& pair : paramMap) { - (*paramMapInTargetContext)[pair.first] = pair.second.Copy(targetContext); - } -} - -/* - * The following function randomly initializes the parameters when benchmark_ is true. - */ -void Predictor::InitParameters() { - std::vector data_shape; - for (index_t i = 0; i < input_shape_.ndim(); i++) { - data_shape.push_back(input_shape_[i]); - } - - std::map > arg_shapes; - std::vector > aux_shapes, in_shapes, out_shapes; - arg_shapes["data"] = data_shape; - net_.InferShape(arg_shapes, &in_shapes, &aux_shapes, &out_shapes); - - // initializer to call - Xavier xavier(Xavier::uniform, Xavier::avg, 2.0f); - - auto arg_name_list = net_.ListArguments(); - for (index_t i = 0; i < in_shapes.size(); i++) { - const auto &shape = in_shapes[i]; - const auto &arg_name = arg_name_list[i]; - int paramType = kFloat32; - if (Initializer::StringEndWith(arg_name, "weight_quantize") || - Initializer::StringEndWith(arg_name, "bias_quantize")) { - paramType = kInt8; - } - NDArray tmp_arr(shape, global_ctx_, false, paramType); - xavier(arg_name, &tmp_arr); - args_map_[arg_name] = tmp_arr.Copy(global_ctx_); - } - - auto aux_name_list = net_.ListAuxiliaryStates(); - for (index_t i = 0; i < aux_shapes.size(); i++) { - const auto &shape = aux_shapes[i]; - const auto &aux_name = aux_name_list[i]; - NDArray tmp_arr(shape, global_ctx_, false); - xavier(aux_name, &tmp_arr); - aux_map_[aux_name] = tmp_arr.Copy(global_ctx_); - } - /*WaitAll is need when we copy data between GPU and the main memory*/ - NDArray::WaitAll(); -} - -/* - * The following function runs the forward pass on the model - * and use dummy data for benchmark. - */ -void Predictor::BenchmarkScore(int num_inference_batches) { - // Create dummy data - std::vector dummy_data(input_shape_.Size()); - std::default_random_engine generator; - std::uniform_real_distribution val(0.0f, 1.0f); - for (size_t i = 0; i < static_cast(input_shape_.Size()); ++i) { - dummy_data[i] = static_cast(val(generator)); - } - executor_->arg_dict()["data"].SyncCopyFromCPU( - dummy_data.data(), - input_shape_.Size()); - NDArray::WaitAll(); - - LG << "Running the forward pass on model to evaluate the performance.."; - - // warm up. - for (int i = 0; i < 5; i++) { - executor_->Forward(false); - NDArray::WaitAll(); - } - - // Run the forward pass. - double ms = ms_now(); - for (int i = 0; i < num_inference_batches; i++) { - executor_->Forward(false); - NDArray::WaitAll(); - } - ms = ms_now() - ms; - LG << " benchmark completed!"; - LG << " batch size: " << input_shape_[0] << " num batch: " << num_inference_batches - << " throughput: " << 1000.0 * input_shape_[0] * num_inference_batches / ms - << " imgs/s latency:" << ms / input_shape_[0] / num_inference_batches << " ms"; -} - -/* - * \param skipped_batches skip the first number of batches - * - */ -bool Predictor::AdvanceDataIter(int skipped_batches) { - assert(skipped_batches >= 0); - if (skipped_batches == 0) return true; - int skipped_count = 0; - while (val_iter_->Next()) { - if (++skipped_count >= skipped_batches) break; - } - if (skipped_count != skipped_batches) return false; - return true; -} - -/* - * The following function runs the forward pass on the model - * and use real data for testing accuracy and performance. - */ -void Predictor::Score(int num_skipped_batches, int num_inference_batches) { - // Create metrics - Accuracy val_acc; - - val_iter_->Reset(); - val_acc.Reset(); - int nBatch = 0; - - if (!AdvanceDataIter(num_skipped_batches)) { - LG << "skipped batches should less than total batches!"; - return; - } - - double ms = ms_now(); - while (val_iter_->Next()) { - auto data_batch = val_iter_->GetDataBatch(); - data_batch.data.CopyTo(&args_map_["data"]); - data_batch.label.CopyTo(&args_map_["softmax_label"]); - NDArray::WaitAll(); - - // running on forward pass - executor_->Forward(false); - NDArray::WaitAll(); - val_acc.Update(data_batch.label, executor_->outputs[0]); - - if (++nBatch >= num_inference_batches) { - break; - } - } - ms = ms_now() - ms; - auto args_name = net_.ListArguments(); - LG << "INFO:" << "Dataset for inference: " << dataset_; - LG << "INFO:" << "label_name = " << args_name[args_name.size()-1]; - LG << "INFO:" << "rgb_mean: " << "(" << rgb_mean_[0] << ", " << rgb_mean_[1] - << ", " << rgb_mean_[2] << ")"; - LG << "INFO:" << "rgb_std: " << "(" << rgb_std_[0] << ", " << rgb_std_[1] - << ", " << rgb_std_[2] << ")"; - LG << "INFO:" << "Image shape: " << "(" << input_shape_[1] << ", " - << input_shape_[2] << ", " << input_shape_[3] << ")"; - LG << "INFO:" << "Finished inference with: " << nBatch * input_shape_[0] - << " images "; - LG << "INFO:" << "Batch size = " << input_shape_[0] << " for inference"; - LG << "INFO:" << "Accuracy: " << val_acc.Get(); - LG << "INFO:" << "Throughput: " << (1000.0 * nBatch * input_shape_[0] / ms) - << " images per second"; -} - -Predictor::~Predictor() { - if (executor_) { - delete executor_; - } - if (!benchmark_ && val_iter_) { - delete val_iter_; - } - MXNotifyShutdown(); -} - -/* - * Convert the input string of number into the vector. - */ -template -std::vector createVectorFromString(const std::string& input_string) { - std::vector dst_vec; - char *p_next; - T elem; - bool bFloat = std::is_same::value; - if (!bFloat) { - elem = strtol(input_string.c_str(), &p_next, 10); - } else { - elem = strtof(input_string.c_str(), &p_next); - } - - dst_vec.push_back(elem); - while (*p_next) { - if (!bFloat) { - elem = strtol(p_next, &p_next, 10); - } else { - elem = strtof(p_next, &p_next); - } - dst_vec.push_back(elem); - } - return dst_vec; -} - -void printUsage() { - std::cout << "Usage:" << std::endl; - std::cout << "imagenet_inference --symbol_file " << std::endl - << "--params_file " << std::endl - << "--dataset " << std::endl - << "--data_nthreads " << std::endl - << "--input_shape ] " << std::endl - << "--rgb_mean " - << std::endl - << "--rgb_std " << std::endl - << "--batch_size " << std::endl - << "--num_skipped_batches " << std::endl - << "--num_inference_batches " << std::endl - << "--data_layer_type " << std::endl - << "--gpu " << std::endl - << "--enableTRT " << std::endl - << "--benchmark " - << std::endl; -} - -int main(int argc, char** argv) { - std::string model_file_json; - std::string model_file_params; - std::string dataset(""); - std::string input_rgb_mean("0 0 0"); - std::string input_rgb_std("1 1 1"); - bool use_gpu = false; - bool enable_tensorrt = false; - bool benchmark = false; - int batch_size = 64; - int num_skipped_batches = 0; - int num_inference_batches = 100; - std::string data_layer_type("float32"); - std::string input_shape("3 224 224"); - int seed = 48564309; - int shuffle_chunk_seed = 3982304; - int data_nthreads = 60; - - int index = 1; - while (index < argc) { - if (strcmp("--symbol_file", argv[index]) == 0) { - index++; - model_file_json = (index < argc ? argv[index]:""); - } else if (strcmp("--params_file", argv[index]) == 0) { - index++; - model_file_params = (index < argc ? argv[index]:""); - } else if (strcmp("--dataset", argv[index]) == 0) { - index++; - dataset = (index < argc ? argv[index]:dataset); - } else if (strcmp("--data_nthreads", argv[index]) == 0) { - index++; - data_nthreads = strtol(argv[index], nullptr, 10); - } else if (strcmp("--input_shape", argv[index]) == 0) { - index++; - input_shape = (index < argc ? argv[index]:input_shape); - } else if (strcmp("--rgb_mean", argv[index]) == 0) { - index++; - input_rgb_mean = (index < argc ? argv[index]:input_rgb_mean); - } else if (strcmp("--rgb_std", argv[index]) == 0) { - index++; - input_rgb_std = (index < argc ? argv[index]:input_rgb_std); - } else if (strcmp("--batch_size", argv[index]) == 0) { - index++; - batch_size = strtol(argv[index], nullptr, 10); - } else if (strcmp("--num_skipped_batches", argv[index]) == 0) { - index++; - num_skipped_batches = strtol(argv[index], nullptr, 10); - } else if (strcmp("--num_inference_batches", argv[index]) == 0) { - index++; - num_inference_batches = strtol(argv[index], nullptr, 10); - } else if (strcmp("--data_layer_type", argv[index]) == 0) { - index++; - data_layer_type = (index < argc ? argv[index]:data_layer_type); - } else if (strcmp("--gpu", argv[index]) == 0) { - use_gpu = true; - } else if (strcmp("--enableTRT", argv[index]) == 0) { - use_gpu = true; - enable_tensorrt = true; - } else if (strcmp("--benchmark", argv[index]) == 0) { - benchmark = true; - } else if (strcmp("--help", argv[index]) == 0) { - printUsage(); - return 0; - } - index++; - } - - if (model_file_json.empty() - || (!benchmark && model_file_params.empty()) - || (enable_tensorrt && model_file_params.empty())) { - LG << "ERROR: Model details such as symbol, param files are not specified"; - printUsage(); - return 1; - } - std::vector input_dimensions = createVectorFromString(input_shape); - input_dimensions.insert(input_dimensions.begin(), batch_size); - Shape input_data_shape(input_dimensions); - - std::vector rgb_mean = createVectorFromString(input_rgb_mean); - std::vector rgb_std = createVectorFromString(input_rgb_std); - - // Initialize the predictor object - Predictor predict(model_file_json, model_file_params, input_data_shape, use_gpu, enable_tensorrt, - dataset, data_nthreads, data_layer_type, rgb_mean, rgb_std, shuffle_chunk_seed, - seed, benchmark); - - if (benchmark) { - predict.BenchmarkScore(num_inference_batches); - } else { - predict.Score(num_skipped_batches, num_inference_batches); - } - return 0; -} diff --git a/cpp-package/example/inference/inference.mk b/cpp-package/example/inference/inference.mk deleted file mode 100644 index 7708db6e029a..000000000000 --- a/cpp-package/example/inference/inference.mk +++ /dev/null @@ -1,39 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -CPPEX_SRC = $(wildcard cpp-package/example/inference/*.cpp) -CPPEX_EXE = $(patsubst cpp-package/example/inference/%.cpp, build/cpp-package/example/%, $(CPPEX_SRC)) - -CPPEX_CFLAGS += -Icpp-package/include -CPPEX_EXTRA_LDFLAGS := -L$(ROOTDIR)/lib -lmxnet - -EXTRA_PACKAGES += cpp-package-inference-example-all -EXTRA_PACKAGES_CLEAN += cpp-package-inference-example-clean - -.PHONY: cpp-package-inference-example-all cpp-package-inference-example-clean - -cpp-package-inference-example-all: cpp-package-all $(CPPEX_EXE) - -build/cpp-package/example/% : cpp-package/example/inference/%.cpp lib/libmxnet.so $(CPP_PACKAGE_OP_H_FILE) - @mkdir -p $(@D) - $(CXX) -std=c++17 $(CFLAGS) $(CPPEX_CFLAGS) -MM -MT cpp-package/example/inference/$* $< >build/cpp-package/example/$*.d - $(CXX) -std=c++17 $(CFLAGS) $(CPPEX_CFLAGS) -o $@ $(filter %.cpp %.a, $^) $(LDFLAGS) $(CPPEX_EXTRA_LDFLAGS) - -cpp-package-inference-example-clean: - rm -rf build/cpp-package/example/inference* - --include build/cpp-package/example/inference/*.d diff --git a/cpp-package/example/inference/sentiment_analysis_rnn.cpp b/cpp-package/example/inference/sentiment_analysis_rnn.cpp deleted file mode 100755 index 53b618ff116c..000000000000 --- a/cpp-package/example/inference/sentiment_analysis_rnn.cpp +++ /dev/null @@ -1,488 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * This example demonstrates sentiment prediction workflow with pre-trained RNN model using MXNet C++ API. - * The example performs following tasks. - * 1. Load the pre-trained RNN model, - * 2. Load the dictionary file that contains word to index mapping. - * 3. Create executors for pre-determined input lengths. - * 4. Convert each line in the input to the vector of indices. - * 5. Predictor finds the right executor for each line. - * 4. Run the forward pass for each line and predicts the sentiment scores. - * The example uses a pre-trained RNN model that is trained with the IMDB dataset. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "mxnet-cpp/MxNetCpp.h" - -using namespace mxnet::cpp; - -static const int DEFAULT_BUCKET_KEYS[] = {30, 25, 20, 15, 10, 5}; -static const char DEFAULT_S3_URL[] = "https://s3.amazonaws.com/mxnet-cpp/RNN_model/"; - - -/* - * class Predictor - * - * This class encapsulates the functionality to load the model, process input image and run the forward pass. - */ - -class Predictor { - public: - Predictor() {} - Predictor(const std::string& model_json, - const std::string& model_params, - const std::string& input_dictionary, - const std::vector& bucket_keys, - bool use_gpu = false); - float PredictSentiment(const std::string &input_review); - ~Predictor(); - - private: - void LoadModel(const std::string& model_json_file); - void LoadParameters(const std::string& model_parameters_file); - void LoadDictionary(const std::string &input_dictionary); - inline bool FileExists(const std::string& name) { - struct stat buffer; - return (stat(name.c_str(), &buffer) == 0); - } - float PredictSentimentForOneLine(const std::string &input_line); - int ConvertToIndexVector(const std::string& input, - std::vector *input_vector); - int GetIndexForOutputSymbolName(const std::string& output_symbol_name); - float GetIndexForWord(const std::string& word); - int GetClosestBucketKey(int num_words); - - std::map args_map; - std::map aux_map; - std::map wordToIndex; - Symbol net; - std::map executor_buckets; - Context global_ctx = Context::cpu(); - int highest_bucket_key; -}; - - -/* - * The constructor takes the following parameters as input: - * 1. model_json: The RNN model in json formatted file. - * 2. model_params: File containing model parameters - * 3. input_dictionary: File containing the word and associated index. - * 4. bucket_keys: A vector of bucket keys for creating executors. - * - * The constructor: - * 1. Loads the model and parameter files. - * 2. Loads the dictionary file to create index to word and word to index maps. - * 3. For each bucket key in the input vector of bucket keys, it creates an executor. - * The executors share the memory. The bucket key determines the length of input data - * required for that executor. - * 4. Creates a map of bucket key to corresponding executor. - * 5. The model is loaded only once. The executors share the memory for the parameters. - */ -Predictor::Predictor(const std::string& model_json, - const std::string& model_params, - const std::string& input_dictionary, - const std::vector& bucket_keys, - bool use_gpu) { - if (use_gpu) { - global_ctx = Context::gpu(); - } - - /* - * Load the dictionary file that contains the word and its index. - * The function creates word to index and index to word map. The maps are used to create index - * vector for the input sentence. - */ - LoadDictionary(input_dictionary); - - // Load the model - LoadModel(model_json); - - // Load the model parameters. - LoadParameters(model_params); - - /* - * Create the executors for each bucket key. The bucket key represents the shape of input data. - * The executors will share the memory by using following technique: - * 1. Infer the executor arrays and bind the first executor with the first bucket key. - * 2. Then for creating the next bucket key, adjust the shape of input argument to match that key. - * 3. Create the executor for the next bucket key by passing the inferred executor arrays and - * pointer to the executor created for the first key. - */ - std::vector arg_arrays; - std::vector grad_arrays; - std::vector grad_reqs; - std::vector aux_arrays; - - /* - * Create master executor with highest bucket key for optimizing the shared memory between the - * executors for the remaining bucket keys. - */ - highest_bucket_key = *(std::max_element(bucket_keys.begin(), bucket_keys.end())); - args_map["data0"] = NDArray(Shape(highest_bucket_key, 1), global_ctx, false); - args_map["data1"] = NDArray(Shape(1), global_ctx, false); - - net.InferExecutorArrays(global_ctx, &arg_arrays, &grad_arrays, &grad_reqs, - &aux_arrays, args_map, std::map(), - std::map(), aux_map); - Executor *master_executor = net.Bind(global_ctx, arg_arrays, grad_arrays, grad_reqs, aux_arrays, - std::map(), nullptr); - executor_buckets[highest_bucket_key] = master_executor; - - for (int bucket : bucket_keys) { - if (executor_buckets.find(bucket) == executor_buckets.end()) { - arg_arrays[0] = NDArray(Shape(bucket, 1), global_ctx, false); - Executor *executor = net.Bind(global_ctx, arg_arrays, grad_arrays, grad_reqs, aux_arrays, - std::map(), master_executor); - executor_buckets[bucket] = executor; - } - } -} - - -/* - * The following function loads the model from json file. - */ -void Predictor::LoadModel(const std::string& model_json_file) { - if (!FileExists(model_json_file)) { - LG << "Model file " << model_json_file << " does not exist"; - throw std::runtime_error("Model file does not exist"); - } - LG << "Loading the model from " << model_json_file << std::endl; - net = Symbol::Load(model_json_file); -} - - -/* - * The following function loads the model parameters. - */ -void Predictor::LoadParameters(const std::string& model_parameters_file) { - if (!FileExists(model_parameters_file)) { - LG << "Parameter file " << model_parameters_file << " does not exist"; - throw std::runtime_error("Model parameters does not exist"); - } - LG << "Loading the model parameters from " << model_parameters_file << std::endl; - std::map parameters; - NDArray::Load(model_parameters_file, 0, ¶meters); - for (const auto &k : parameters) { - if (k.first.substr(0, 4) == "aux:") { - auto name = k.first.substr(4, k.first.size() - 4); - aux_map[name] = k.second.Copy(global_ctx); - } - if (k.first.substr(0, 4) == "arg:") { - auto name = k.first.substr(4, k.first.size() - 4); - args_map[name] = k.second.Copy(global_ctx); - } - } - /*WaitAll is need when we copy data between GPU and the main memory*/ - NDArray::WaitAll(); -} - - -/* - * The following function loads the dictionary file. - * The function constructs the word to index and index to word maps. - * These maps will be used to represent words in the input sentence to their indices. - * Ensure to use the same dictionary file that was used for training the network. - */ -void Predictor::LoadDictionary(const std::string& input_dictionary) { - if (!FileExists(input_dictionary)) { - LG << "Dictionary file " << input_dictionary << " does not exist"; - throw std::runtime_error("Dictionary file does not exist"); - } - LG << "Loading the dictionary file."; - std::ifstream fi(input_dictionary.c_str()); - if (!fi.is_open()) { - std::cerr << "Error opening dictionary file " << input_dictionary << std::endl; - assert(false); - } - - std::string line; - std::string word; - int index; - while (std::getline(fi, line)) { - std::istringstream stringline(line); - stringline >> word >> index; - wordToIndex[word] = index; - } - fi.close(); -} - - -/* - * The function returns the index associated with the word in the dictionary. - * If the word is not present, the index representing "" is returned. - * If the "" is not present then 0 is returned. - */ -float Predictor::GetIndexForWord(const std::string& word) { - if (wordToIndex.find(word) == wordToIndex.end()) { - if (wordToIndex.find("") == wordToIndex.end()) - return 0; - else - return static_cast(wordToIndex[""]); - } - return static_cast(wordToIndex[word]); -} - -/* - * The function populates the input vector with indices from the dictionary that - * correspond to the words in the input string. - * The function returns the number of words in the input line. - */ -int Predictor::ConvertToIndexVector(const std::string& input, std::vector *input_vector) { - std::istringstream input_string(input); - input_vector->clear(); - const char delimiter = ' '; - std::string token; - size_t words = 0; - while (std::getline(input_string, token, delimiter) && (words <= input_vector->size())) { - input_vector->push_back(GetIndexForWord(token)); - words++; - } - return words; -} - - -/* - * The function returns the index at which the given symbol name will appear - * in the output vector of NDArrays obtained after running the forward pass on the executor. - */ -int Predictor::GetIndexForOutputSymbolName(const std::string& output_symbol_name) { - int index = 0; - for (const std::string op : net.ListOutputs()) { - if (op == output_symbol_name) { - return index; - } else { - index++; - } - } - throw std::runtime_error("The output symbol name can not be found"); -} - - -/* - * The function finds the closest bucket for the given num_words in the input line. - * If the exact bucket key exists, function returns that bucket key. - * If the matching bucket key does not exist, function looks for the next bucket key - * that is greater than given num_words. - * If the next larger bucket does not exist, function returns the largest bucket key. - */ -int Predictor::GetClosestBucketKey(int num_words) { - int closest_bucket_key = highest_bucket_key; - - if (executor_buckets.lower_bound(num_words) != executor_buckets.end()) { - closest_bucket_key = executor_buckets.lower_bound(num_words)->first; - } - return closest_bucket_key; -} - - -/* - * The following function runs the forward pass on the model for the given line. - * - */ -float Predictor::PredictSentimentForOneLine(const std::string& input_line) { - /* - * Initialize a vector of length equal to 'num_words' with index corresponding to . - * Convert the input string to a vector of indices that represent - * the words in the input string. - */ - std::vector index_vector(GetIndexForWord("")); - int num_words = ConvertToIndexVector(input_line, &index_vector); - int bucket_key = GetClosestBucketKey(num_words); - - /* - * The index_vector has size equal to num_words. The vector needs to be padded if - * the bucket_key is greater than num_words. The vector needs to be trimmed if - * the bucket_key is smaller than num_words. - */ - index_vector.resize(bucket_key, GetIndexForWord("")); - - Executor* executor = executor_buckets[bucket_key]; - executor->arg_dict()["data0"].SyncCopyFromCPU(index_vector.data(), index_vector.size()); - executor->arg_dict()["data1"] = num_words; - - // Run the forward pass. - executor->Forward(false); - - /* - * The output is available in executor->outputs. It is a vector of - * NDArray. We need to find the index in that vector that - * corresponds to the output symbol "sentimentnet0_hybridsequential0_dense0_fwd_output". - */ - const std::string output_symbol_name = "sentimentnet0_hybridsequential0_dense0_fwd_output"; - int output_index = GetIndexForOutputSymbolName(output_symbol_name); - std::vector outputs = executor->outputs; - auto arrayout = executor->outputs[output_index].Copy(global_ctx); - /* - * We will run sigmoid operator to find out the sentiment score between - * 0 and 1 where 1 represents positive. - */ - NDArray ret; - Operator("sigmoid")(arrayout).Invoke(ret); - ret.WaitToRead(); - - return ret.At(0, 0); -} - - -/* - * The function predicts the sentiment score for the input review. - * The function splits the input review in lines (separated by '.'). - * It finds sentiment score for each line and computes the average. - */ -float Predictor::PredictSentiment(const std::string& input_review) { - std::istringstream input_string(input_review); - int num_lines = 0; - float sentiment_score = 0.0f; - - // Split the iput review in separate lines separated by '.' - const char delimiter = '.'; - std::string line; - while (std::getline(input_string, line, delimiter)) { - // Predict the sentiment score for each line. - float score = PredictSentimentForOneLine(line); - LG << "Input Line : [" << line << "] Score : " << score; - sentiment_score += score; - num_lines++; - } - - // Find the average sentiment score. - sentiment_score = sentiment_score / num_lines; - return sentiment_score; -} - - -/* - * The destructor frees the executor and notifies MXNetEngine to shutdown. - */ -Predictor::~Predictor() { - for (auto bucket : this->executor_buckets) { - Executor* executor = bucket.second; - delete executor; - } - MXNotifyShutdown(); -} - - -/* - * The function prints the usage information. - */ -void printUsage() { - std::cout << "Usage:" << std::endl; - std::cout << "sentiment_analysis_rnn " << std::endl - << "--input Input movie review. The review can be single line or multiline." - << "e.g. \"This movie is the best.\" OR " - << "\"This movie is the best. The direction is awesome.\" " << std::endl - << "[--gpu] Specify this option if workflow needs to be run in gpu context " - << std::endl - << "If the review is multiline, the example predicts sentiment score for each line " - << "and the final score is the average of scores obtained for each line." - << std::endl; -} - - -/* - * The function downloads the model files from s3 bucket. - */ -void DownloadFiles(const std::vector model_files) { - std::string wget_command("wget -nc "); - std::string s3_url(DEFAULT_S3_URL); - for (auto &file : model_files) { - std::ostringstream oss; - oss << wget_command << s3_url << file << " -O " << file; - int status = system(oss.str().c_str()); - LG << "Downloading " << file << " with status " << status; - } - return; -} - - -int main(int argc, char** argv) { - std::string model_file_json = "./sentiment_analysis-symbol.json"; - std::string model_file_params ="./sentiment_analysis-0010.params"; - std::string input_dictionary = "./sentiment_token_to_idx.txt"; - std::string input_review = "This movie is the best"; - bool use_gpu = false; - - int index = 1; - while (index < argc) { - if (strcmp("--input", argv[index]) == 0) { - index++; - input_review = (index < argc ? argv[index]:input_review); - } else if (strcmp("--gpu", argv[index]) == 0) { - use_gpu = true; - } else if (strcmp("--help", argv[index]) == 0) { - printUsage(); - return 0; - } - index++; - } - - - /* - * Download the trained RNN model file, param file and dictionary file. - * The dictionary file contains word to index mapping. - * Each line of the dictionary file contains a word and the unique index for that word separated - * by a space. For example: - * snippets 11172 - * This dictionary file is created when the RNN model was trained with a particular dataset. - * Hence the dictionary file is specific to the dataset with which model was trained. - */ - std::vector files; - files.push_back(model_file_json); - files.push_back(model_file_params); - files.push_back(input_dictionary); - - DownloadFiles(files); - - std::vector buckets(DEFAULT_BUCKET_KEYS, - DEFAULT_BUCKET_KEYS + sizeof(DEFAULT_BUCKET_KEYS) / sizeof(int)); - - try { - // Initialize the predictor object - Predictor predict(model_file_json, model_file_params, input_dictionary, buckets, use_gpu); - - // Run the forward pass to predict the sentiment score for the given review. - float sentiment_score = predict.PredictSentiment(input_review); - LG << "The sentiment score between 0 and 1, (1 being positive)=" << sentiment_score; - } catch (std::runtime_error &error) { - LG << MXGetLastError(); - LG << "Execution failed with ERROR: " << error.what(); - return 1; - } catch (...) { - /* - * If underlying MXNet code has thrown an exception the error message is - * accessible through MXGetLastError() function. - */ - LG << "Execution failed with following MXNet error"; - LG << MXGetLastError(); - return 1; - } - return 0; -} diff --git a/cpp-package/example/inference/unit_test_imagenet_inference.sh b/cpp-package/example/inference/unit_test_imagenet_inference.sh deleted file mode 100755 index c645388cd419..000000000000 --- a/cpp-package/example/inference/unit_test_imagenet_inference.sh +++ /dev/null @@ -1,63 +0,0 @@ -#!/usr/bin/env bash - -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -set -ex -# create ./model directory if not existed -if [ ! -d model ]; then - mkdir -p model -fi -# create ./data directory if not existed -if [ ! -d data ]; then - mkdir -p data -fi -# Downloading the data and model if not existed -model_file=./model/Inception-BN-symbol.json -params_file=./model/Inception-BN-0126.params -if [ ! -f ${model_file} ] || [ ! -f ${params_file} ]; then - wget -nc http://data.mxnet.io/models/imagenet/inception-bn.tar.gz - tar -xvzf inception-bn.tar.gz -C model -fi -cd model -wget -nc https://raw.githubusercontent.com/dmlc/gluon-cv/master/gluoncv/model_zoo/quantized/resnet50_v1_int8-symbol.json -cd ../data -wget -nc http://data.mxnet.io/data/val_256_q90.rec -cd .. - -# Running inference on imagenet. -if [ "$(uname)" == "Darwin" ]; then - echo ">>> INFO: FP32 real data" - DYLD_LIBRARY_PATH=${DYLD_LIBRARY_PATH}:../../../lib ./imagenet_inference --symbol_file "./model/Inception-BN-symbol.json" --params_file "./model/Inception-BN-0126.params" --dataset "./data/val_256_q90.rec" --rgb_mean "123.68 116.779 103.939" --batch_size 1 --num_skipped_batches 50 --num_inference_batches 500 - - echo ">>> INFO: FP32 dummy data" - DYLD_LIBRARY_PATH=${DYLD_LIBRARY_PATH}:../../../lib ./imagenet_inference --symbol_file "./model/Inception-BN-symbol.json" --batch_size 1 --num_inference_batches 500 --benchmark -else - echo ">>> INFO: FP32 real data" - LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:../../../lib ./imagenet_inference --symbol_file "./model/Inception-BN-symbol.json" --params_file "./model/Inception-BN-0126.params" --dataset "./data/val_256_q90.rec" --rgb_mean "123.68 116.779 103.939" --batch_size 1 --num_skipped_batches 50 --num_inference_batches 500 - - echo ">>> INFO: FP32 dummy data" - LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:../../../lib ./imagenet_inference --symbol_file "./model/Inception-BN-symbol.json" --batch_size 1 --num_inference_batches 500 --benchmark - - lib_name=$(ls -a ../../../lib | grep -oE 'mkldnn' | tail -1) - if [[ -n ${lib_name} ]] && [[ 'mkldnn' =~ ${lib_name} ]]; then - echo ">>> INFO: INT8 dummy data" - LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:../../../lib ./imagenet_inference --symbol_file "./model/resnet50_v1_int8-symbol.json" --batch_size 1 --num_inference_batches 500 --benchmark - else - echo "Skipped INT8 test because mkldnn was not found which is required for running inference with quantized models." - fi -fi diff --git a/cpp-package/example/inference/unit_test_sentiment_analysis_rnn.sh b/cpp-package/example/inference/unit_test_sentiment_analysis_rnn.sh deleted file mode 100755 index 6f42e449ce58..000000000000 --- a/cpp-package/example/inference/unit_test_sentiment_analysis_rnn.sh +++ /dev/null @@ -1,41 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -function compare_range() { - perl -e "{if($1>$2 && $1<=$3){print 1} else {print 0}}" -} - -set -e # exit on the first error -export EXE_NAME="sentiment_analysis_rnn" - -# Running the example with a movie review. -if [ "$(uname)" == "Darwin" ]; then - DYLD_LIBRARY_PATH=${DYLD_LIBRARY_PATH}:../../../lib ./${EXE_NAME} --input "This movie is the best." 2&> ${EXE_NAME}.log -else - LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:../../../lib ./${EXE_NAME} --input "This movie is the best." 2&> ${EXE_NAME}.log -fi -result=`grep "The sentiment score between 0 and 1.*\=" ${EXE_NAME}.log | cut -d '=' -f2` -lower_bound=0.8 -upper_bound=0.99 -if [ $(compare_range $result $lower_bound $upper_bound) == 1 ]; -then - echo "PASS: ${EXE_NAME} correctly predicted the sentiment with score = $result" - exit 0 -else - echo "FAIL: ${EXE_NAME} FAILED to predict the sentiment with score = $result" - exit 1 -fi \ No newline at end of file diff --git a/cpp-package/example/mnist_to_csv.py b/cpp-package/example/mnist_to_csv.py deleted file mode 100644 index dad9ed5f9c72..000000000000 --- a/cpp-package/example/mnist_to_csv.py +++ /dev/null @@ -1,59 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -# Following file converts the mnist data to CSV format. -# Usage: -# mnist_to_csv.py train-images-idx3-ubyte train-labels-idx1-ubyte mnist_train.csv 60000 -# mnist_to_csv.py t10k-images-idx3-ubyte t10k-labels-idx1-ubyte mnist_test.csv 10000 -# - -import argparse - -def convert_to_csv(args): - imageFile = open(args.imageFile, "rb") - labelFile = open(args.labelFile, "rb") - outputFile = open(args.outputFile, "w") - - imageFile.read(16) - labelFile.read(8) - images = [] - - for i in range(args.num_records): - image = [ord(labelFile.read(1))] - for j in range(28 * 28): - image.append(ord(imageFile.read(1))) - images.append(image) - - for image in images: - outputFile.write(",".join(str(pix) for pix in image) + "\n") - - imageFile.close() - outputFile.close() - labelFile.close() - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument("imageFile", type=str, help="image file in mnist format e.g. train-images-idx3-ubyte") - parser.add_argument("labelFile", type=str, help="label file in mnist format e.g train-labels-idx1-ubyte") - parser.add_argument("outputFile", type=str, help="Output file in CSV format e.g mnist_train_trial.csv") - parser.add_argument("num_records", type=int, help="Number of images in the input files.e.g 60000") - args = parser.parse_args() - - try: - convert_to_csv(args) - except Exception as e: - print("Error : Exception {}".format(str(e))) diff --git a/cpp-package/example/run_lenet_with_mxdataiter.sh b/cpp-package/example/run_lenet_with_mxdataiter.sh deleted file mode 100755 index cafad3201635..000000000000 --- a/cpp-package/example/run_lenet_with_mxdataiter.sh +++ /dev/null @@ -1,23 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -if [ ! -f "./mnist.zip" ]; then - wget http://webdocs.cs.ualberta.ca/~bx3/data/mnist.zip - unzip -u mnist.zip -fi -make lenet_with_mxdataiter -LD_LIBRARY_PATH=../lib/linux ./lenet_with_mxdataiter diff --git a/cpp-package/example/test_kvstore.cpp b/cpp-package/example/test_kvstore.cpp deleted file mode 100644 index d9e0400a5ac8..000000000000 --- a/cpp-package/example/test_kvstore.cpp +++ /dev/null @@ -1,201 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -#include "mxnet/c_api.h" // MXGetGPUCount() -#include "mxnet-cpp/MxNetCpp.h" - -using namespace mxnet::cpp; - -static bool test_single_key(const Context &context, const std::string &context_str) { - std::string key = "singlekeytest-" + context_str; - - NDArray result(Shape(4), context); - NDArray result_cpu; - - // initialize data - NDArray data_cpu({0.f, 233.f, -0.12f, 9.f}, Shape(4), Context::cpu()); - NDArray data = data_cpu.Copy(context); - NDArray::WaitAll(); - - KVStore::Init(key, data); - NDArray::WaitAll(); - - // retrieve result - KVStore::Pull(key, &result); - NDArray::WaitAll(); - - result_cpu = result.Copy(Context::cpu()); - NDArray::WaitAll(); - - // compare - for (size_t j=0; j < result_cpu.Size(); j++) { - if (result_cpu.GetData()[j] != data_cpu.GetData()[j]) { - LG << "Error: wrong initialized data in singlekeytest-" << context_str - << ", expect " << data_cpu.GetData()[j] - << " got " << result_cpu.GetData()[j]; - return false; - } - } - - // push gradient - NDArray grad_cpu({0.1f, -2.f, -4.4f, 0.f}, Shape(4), Context::cpu()); - NDArray grad = grad_cpu.Copy(context); - NDArray::WaitAll(); - - KVStore::Push(key, grad); - NDArray::WaitAll(); - - // retrieve result - KVStore::Pull(key, &result); - NDArray::WaitAll(); - - result_cpu = result.Copy(Context::cpu()); - NDArray::WaitAll(); - - // compare - for (size_t j=0; j < result_cpu.Size(); j++) { - if (result_cpu.GetData()[j] != grad_cpu.GetData()[j]) { - LG << "Error: wrong gradient data in singlekeytest-" << context_str - << ", expect " << grad_cpu.GetData()[j] - << " got " << result_cpu.GetData()[j]; - return false; - } - } - - return true; -} - -static bool test_multiple_key(const Context &context, const std::string &context_str) { - std::vector keys(2); - keys[0] = "multikeytest-0-" + context_str; - keys[1] = "multikeytest-1-" + context_str; - - std::vector results(2); - results[0] = NDArray(Shape(4), context); - results[1] = NDArray(Shape(4), context); - std::vector results_cpu(2); - - // initialize data - std::vector data_cpu(2); - data_cpu[0] = NDArray({0.f, 2.f, -3.12f, 4.f}, Shape(4), Context::cpu()); - data_cpu[1] = NDArray({0.8f, -2.f, 6.6f, 77.f}, Shape(4), Context::cpu()); - std::vector data(2); - data[0] = data_cpu[0].Copy(context); - data[1] = data_cpu[1].Copy(context); - NDArray::WaitAll(); - - KVStore::Init(keys, data); - NDArray::WaitAll(); - - // retrieve result - KVStore::Pull(keys, &results); - NDArray::WaitAll(); - - results_cpu[0] = results[0].Copy(Context::cpu()); - results_cpu[1] = results[1].Copy(Context::cpu()); - NDArray::WaitAll(); - - // compare - for (size_t i=0; i < results_cpu.size(); i++) { - for (size_t j=0; j < results_cpu[i].Size(); j++) { - if (results_cpu[i].GetData()[j] != data_cpu[i].GetData()[j]) { - LG << "Error: wrong initialized data in multikeytest-" << context_str - << ", expect " << data_cpu[i].GetData()[j] - << " got " << results_cpu[i].GetData()[j]; - return false; - } - } - } - - // push gradient, reduce for the second - std::vector push_keys(3); - push_keys[0] = "multikeytest-0-" + context_str; - push_keys[1] = "multikeytest-1-" + context_str; - push_keys[2] = "multikeytest-1-" + context_str; - - std::vector grads_cpu(3); - grads_cpu[0] = NDArray({0.2f, -0.3f, -1.1f, 0.0f}, Shape(4), Context::cpu()); - grads_cpu[1] = NDArray({2.f, 4.f, -4.f, -5.f}, Shape(4), Context::cpu()); - grads_cpu[2] = NDArray({-3.f, -0.2f, 12.f, -9.f}, Shape(4), Context::cpu()); - std::vector grads(3); - grads[0] = grads_cpu[0].Copy(context); - grads[1] = grads_cpu[1].Copy(context); - grads[2] = grads_cpu[2].Copy(context); - NDArray::WaitAll(); - - KVStore::Push(push_keys, grads); - NDArray::WaitAll(); - - // retrieve result - KVStore::Pull(keys, &results); - NDArray::WaitAll(); - - results_cpu[0] = results[0].Copy(Context::cpu()); - results_cpu[1] = results[1].Copy(Context::cpu()); - NDArray::WaitAll(); - - // compare the first - for (size_t j=0; j < results_cpu[0].Size(); j++) { - if (results_cpu[0].GetData()[j] != grads_cpu[0].GetData()[j]) { - LG << "Error: wrong gradient data in multikeytest-" << context_str - << ", expect " << grads_cpu[0].GetData()[j] - << " got " << results_cpu[0].GetData()[j]; - return false; - } - } - - // compare the second - for (size_t j=0; j < results_cpu[1].Size(); j++) { - if (results_cpu[1].GetData()[j] != (grads_cpu[1].GetData()[j] + grads_cpu[2].GetData()[j])) { - LG << "Error: wrong reduced gradient data in multikeytest-" << context_str - << ", expect " << (grads_cpu[1].GetData()[j] + grads_cpu[2].GetData()[j]) - << " got " << results_cpu[1].GetData()[j]; - return false; - } - } - - return true; -} - -int main(int argc, char** argv) { - KVStore::SetType("local"); - - bool success1 = test_single_key(Context::cpu(), "cpu"); - bool success2 = test_multiple_key(Context::cpu(), "cpu"); - - bool success3 = true; - bool success4 = true; - - int gpu_count = 0; - if (MXGetGPUCount(&gpu_count) != 0) { - LG << "Error: MXGetGPUCount"; - - MXNotifyShutdown(); - return 1; - } - - if (gpu_count > 0) { - success3 = test_single_key(Context::gpu(), "gpu"); - success4 = test_multiple_key(Context::gpu(), "gpu"); - } - - int ret = (success1 && success2 && success3 && success4) ? 0 : 1; - - MXNotifyShutdown(); - return ret; -} diff --git a/cpp-package/example/test_ndarray_copy.cpp b/cpp-package/example/test_ndarray_copy.cpp deleted file mode 100644 index a3b3011993fa..000000000000 --- a/cpp-package/example/test_ndarray_copy.cpp +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - * - */ -#include -#include "mxnet/c_api.h" -#include "dmlc/logging.h" -#include "mxnet-cpp/MxNetCpp.h" -using namespace mxnet::cpp; - -enum TypeFlag { - kFloat32 = 0, - kFloat64 = 1, - kFloat16 = 2, - kUint8 = 3, - kInt32 = 4, - kInt8 = 5, - kInt64 = 6, -}; - -/* - * The file is used for testing if there exist type inconsistency - * when using Copy API to create a new NDArray. - * By running: build/test_ndarray. - */ -int main(int argc, char** argv) { - std::vector shape1{128, 2, 32}; - Shape shape2(32, 8, 64); - - int gpu_count = 0; - if (MXGetGPUCount(&gpu_count) != 0) { - LOG(ERROR) << "MXGetGPUCount failed"; - return -1; - } - - Context context = (gpu_count > 0) ? Context::gpu() : Context::cpu(); - - NDArray src1(shape1, context, true, kFloat16); - NDArray src2(shape2, context, false, kInt8); - NDArray dst1, dst2; - dst1 = src1.Copy(context); - dst2 = src2.Copy(context); - NDArray::WaitAll(); - CHECK_EQ(src1.GetDType(), dst1.GetDType()); - CHECK_EQ(src2.GetDType(), dst2.GetDType()); - return 0; -} diff --git a/cpp-package/example/test_optimizer.cpp b/cpp-package/example/test_optimizer.cpp deleted file mode 100644 index 70190eff5dc6..000000000000 --- a/cpp-package/example/test_optimizer.cpp +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - * - * The file is used for testing if the optimizer could be created more than 1. - * By running: build/test_optimizer - * It return 0(means no error) if it succeed otherwise 1(error). - */ -#include "mxnet-cpp/MxNetCpp.h" - -using namespace mxnet::cpp; - -int main(int argc, char** argv) { - // Confirm >1 optimizers can be created w/o error - Optimizer* opt = OptimizerRegistry::Find("sgd"); - opt = OptimizerRegistry::Find("adam"); - int ret = (opt == 0) ? 1 : 0; - - delete opt; - MXNotifyShutdown(); - return ret; -} diff --git a/cpp-package/example/utils.h b/cpp-package/example/utils.h deleted file mode 100644 index 87847701ce6e..000000000000 --- a/cpp-package/example/utils.h +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -#ifndef CPP_PACKAGE_EXAMPLE_UTILS_H_ -#define CPP_PACKAGE_EXAMPLE_UTILS_H_ - -#include -#include -#include -#include "mxnet-cpp/MxNetCpp.h" - -using namespace mxnet::cpp; - -#define TRY \ - try { -#define CATCH \ - } catch(dmlc::Error &err) { \ - LG << "Status: FAIL";\ - LG << "With Error: " << MXGetLastError(); \ - return 1; \ - } - -bool isFileExists(const std::string &filename) { - std::ifstream fhandle(filename.c_str()); - return fhandle.good(); -} - -bool check_datafiles(const std::vector &data_files) { - for (size_t index=0; index < data_files.size(); index++) { - if (!(isFileExists(data_files[index]))) { - LG << "Error: File does not exist: "<< data_files[index]; - return false; - } - } - return true; -} - -bool setDataIter(MXDataIter *iter , const std::string &useType, - const std::vector &data_files, int batch_size) { - if (!check_datafiles(data_files)) { - return false; - } - - iter->SetParam("batch_size", batch_size); - iter->SetParam("shuffle", 1); - iter->SetParam("flat", 1); - - if (useType == "Train") { - iter->SetParam("image", data_files[0]); - iter->SetParam("label", data_files[1]); - } else if (useType == "Label") { - iter->SetParam("image", data_files[2]); - iter->SetParam("label", data_files[3]); - } - - iter->CreateDataIter(); - return true; -} - -#endif // CPP_PACKAGE_EXAMPLE_UTILS_H_ diff --git a/cpp-package/include/mxnet-cpp/CPPLINT.cfg b/cpp-package/include/mxnet-cpp/CPPLINT.cfg deleted file mode 100644 index ba8649383947..000000000000 --- a/cpp-package/include/mxnet-cpp/CPPLINT.cfg +++ /dev/null @@ -1,19 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -filter=-runtime/references -exclude_files=op.h diff --git a/cpp-package/include/mxnet-cpp/MxNetCpp.h b/cpp-package/include/mxnet-cpp/MxNetCpp.h deleted file mode 100644 index a513565377fd..000000000000 --- a/cpp-package/include/mxnet-cpp/MxNetCpp.h +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/*! - * Copyright (c) 2016 by Contributors - * \file MxNetCpp.h - * \brief meta include file for mxnet.cpp - * \author Chuntao Hong, Zhang Chen - */ - -#ifndef MXNET_CPP_MXNETCPP_H_ -#define MXNET_CPP_MXNETCPP_H_ - -#include "mxnet-cpp/executor.hpp" -#include "mxnet-cpp/symbol.hpp" -#include "mxnet-cpp/ndarray.hpp" -#include "mxnet-cpp/monitor.hpp" -#include "mxnet-cpp/operator.hpp" -#include "mxnet-cpp/optimizer.hpp" -#include "mxnet-cpp/kvstore.hpp" -#include "mxnet-cpp/op.h" -#include "mxnet-cpp/op_suppl.h" -#include "mxnet-cpp/io.hpp" -#include "mxnet-cpp/metric.h" -#include "mxnet-cpp/initializer.h" -#include "mxnet-cpp/contrib.h" - -#endif // MXNET_CPP_MXNETCPP_H_ diff --git a/cpp-package/include/mxnet-cpp/base.h b/cpp-package/include/mxnet-cpp/base.h deleted file mode 100644 index d0f1bea15f00..000000000000 --- a/cpp-package/include/mxnet-cpp/base.h +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/*! -* Copyright (c) 2016 by Contributors -* \file base.h -* \brief base definitions for mxnetcpp -* \author Chuntao Hong, Zhang Chen -*/ - -#ifndef MXNET_CPP_BASE_H_ -#define MXNET_CPP_BASE_H_ - -#include -#include "mxnet/c_api.h" -#include "nnvm/c_api.h" - -namespace mxnet { -namespace cpp { - -typedef unsigned index_t; - -enum OpReqType { - /*! \brief no operation, do not write anything */ - kNullOp, - /*! \brief write gradient to provided space */ - kWriteTo, - /*! - * \brief perform an inplace write, - * Target shares memory with one of input arguments. - * This option only happen when - */ - kWriteInplace, - /*! \brief add to the provided space */ - kAddTo -}; - -} // namespace cpp -} // namespace mxnet - -#endif // MXNET_CPP_BASE_H_ diff --git a/cpp-package/include/mxnet-cpp/contrib.h b/cpp-package/include/mxnet-cpp/contrib.h deleted file mode 100644 index 890ab2bf0062..000000000000 --- a/cpp-package/include/mxnet-cpp/contrib.h +++ /dev/null @@ -1,115 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/*! -* Copyright (c) 2019 by Contributors -* \file contrib.h -* \brief utility function to enable some contrib features -* \author Haohuan Wang -*/ -#ifndef MXNET_CPP_CONTRIB_H_ -#define MXNET_CPP_CONTRIB_H_ - -#include -#include -#include -#include -#include "mxnet-cpp/symbol.h" - -namespace mxnet { -namespace cpp { -namespace details { - - /*! - * split a string with the given delimiter - * @param str string to be parsed - * @param delimiter delimiter - * @return delimited list of string - */ - inline std::vector split(const std::string& str, const std::string& delimiter) { - std::vector splitted; - size_t last = 0; - size_t next = 0; - while ((next = str.find(delimiter, last)) != std::string::npos) { - splitted.push_back(str.substr(last, next - last)); - last = next + 1; - } - splitted.push_back(str.substr(last)); - return splitted; - } - -} // namespace details - -namespace contrib { - - // needs to be same with - // https://github.com/apache/incubator-mxnet/blob/1c874cfc807cee755c38f6486e8e0f4d94416cd8/src/operator/subgraph/tensorrt/tensorrt-inl.h#L190 - static const std::string TENSORRT_SUBGRAPH_PARAM_IDENTIFIER = "subgraph_params_names"; - // needs to be same with - // https://github.com/apache/incubator-mxnet/blob/master/src/operator/subgraph/tensorrt/tensorrt.cc#L244 - static const std::string TENSORRT_SUBGRAPH_PARAM_PREFIX = "subgraph_param_"; - /*! - * this is a mimic to https://github.com/apache/incubator-mxnet/blob/master/python/mxnet/contrib/tensorrt.py#L37 - * @param symbol symbol that already called subgraph api - * @param argParams original arg params, params needed by tensorrt will be removed after calling this function - * @param auxParams original aux params, params needed by tensorrt will be removed after calling this function - */ - inline void InitTensorRTParams(const mxnet::cpp::Symbol& symbol, - std::map *argParams, - std::map *auxParams) { - mxnet::cpp::Symbol internals = symbol.GetInternals(); - mx_uint numSymbol = internals.GetNumOutputs(); - for (mx_uint i = 0; i < numSymbol; ++i) { - std::map attrs = internals[i].ListAttributes(); - if (attrs.find(TENSORRT_SUBGRAPH_PARAM_IDENTIFIER) != attrs.end()) { - std::string new_params_names; - std::map tensorrtParams; - std::vector keys = details::split( - attrs[TENSORRT_SUBGRAPH_PARAM_IDENTIFIER], ";"); - for (const auto& key : keys) { - if (argParams->find(key) != argParams->end()) { - new_params_names += key + ";"; - tensorrtParams[TENSORRT_SUBGRAPH_PARAM_PREFIX + key] = (*argParams)[key]; - argParams->erase(key); - } else if (auxParams->find(key) != auxParams->end()) { - new_params_names += key + ";"; - tensorrtParams[TENSORRT_SUBGRAPH_PARAM_PREFIX + key] = (*auxParams)[key]; - auxParams->erase(key); - } - } - std::map new_attrs = {}; - for (const auto& kv : tensorrtParams) { - // passing the ndarray address into TRT node attributes to get the weight - uint64_t address = reinterpret_cast(kv.second.GetHandle()); - new_attrs[kv.first] = std::to_string(address); - } - if (!new_attrs.empty()) { - internals[i].SetAttributes(new_attrs); - internals[i].SetAttribute(TENSORRT_SUBGRAPH_PARAM_IDENTIFIER, - new_params_names.substr(0, new_params_names.length() - 1)); - } - } - } -} - -} // namespace contrib -} // namespace cpp -} // namespace mxnet - -#endif // MXNET_CPP_CONTRIB_H_ diff --git a/cpp-package/include/mxnet-cpp/executor.h b/cpp-package/include/mxnet-cpp/executor.h deleted file mode 100644 index 4cb28819de02..000000000000 --- a/cpp-package/include/mxnet-cpp/executor.h +++ /dev/null @@ -1,145 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/*! -* Copyright (c) 2016 by Contributors -* \file executor.h -* \brief executor definition -* \author Chuntao Hong, Zhang Chen -*/ - -#ifndef MXNET_CPP_EXECUTOR_H_ -#define MXNET_CPP_EXECUTOR_H_ - -#include -#include -#include -#include -#include "mxnet-cpp/base.h" -#include "mxnet-cpp/symbol.h" - -namespace mxnet { -namespace cpp { - -class Optimizer; - -/*! -* \brief Executor interface -*/ -class Executor { - friend class Monitor; - public: - Executor(const Symbol &symbol, Context context, - const std::vector &arg_arrays, - const std::vector &grad_arrays, - const std::vector &grad_reqs, - const std::vector &aux_arrays, - const std::map &group_to_ctx = - std::map(), - Executor *shared_exec = nullptr); - explicit Executor(const ExecutorHandle &h) { handle_ = h; } - /*! - * \brief Perform a Forward operation of Operator - * After this operation, user can get the result by using function head. - */ - void Forward(bool is_train) { - MXExecutorForward(handle_, is_train ? 1 : 0); - mx_uint out_size; - NDArrayHandle *out_array; - CHECK_EQ(MXExecutorOutputs(handle_, &out_size, &out_array), 0); - for (mx_uint i = 0; i < out_size; ++i) { - outputs[i] = NDArray(out_array[i]); - } - } - /*! - * \brief Perform a Backward operation of the Operator. - * This must be called after Forward. - * After this operation, NDArrays specified by grad_in_args_store will be - *updated accordingly. - * User is allowed to pass in an empty Array if the head node is - * loss function and head gradeitn is not needed. - * - * \param head_grads the gradient of head nodes to be backproped. - */ - void Backward(const std::vector &head_grads = - std::vector()) { - std::vector head_grads_; - for (auto d : head_grads) { - head_grads_.push_back(d.GetHandle()); - } - if (head_grads_.size() > 0) { - MXExecutorBackward(handle_, head_grads_.size(), head_grads_.data()); - } else { - MXExecutorBackward(handle_, 0, nullptr); - } - } - // TODO(zhangchen-qinyinghua) - // To implement reshape function - void Reshape(); - /*! - * \brief update the arguments with given learning rate and optimizer - * \return the SymbolHandle - */ - std::string DebugStr(); - /*! - * \brief destructor, free the handle - */ - ~Executor() { MXExecutorFree(handle_); } - std::vector arg_arrays; - std::vector grad_arrays; - std::vector aux_arrays; - /*! - * \brief arrays store the outputs of forward - */ - std::vector outputs; - std::map arg_dict() { - return GetDict(symbol_.ListArguments(), arg_arrays); - } - std::map grad_dict() { - return GetDict(symbol_.ListArguments(), grad_arrays); - } - std::map aux_dict() { - return GetDict(symbol_.ListAuxiliaryStates(), aux_arrays); - } - - private: - Executor(const Executor &e); - Executor &operator=(const Executor &e); - ExecutorHandle handle_; - Symbol symbol_; - std::map GetDict(const std::vector &names, - const std::vector &arrays) { - std::map ret; - std::set name_set; - for (const auto &s : names) { - CHECK(name_set.find(s) == name_set.end()) << "Duplicate names detected, " - << s; - name_set.insert(s); - } - CHECK_EQ(name_set.size(), arrays.size()) - << "names size not equal to arrays size"; - for (size_t i = 0; i < names.size(); ++i) { - ret[names[i]] = arrays[i]; - } - return ret; - } -}; -} // namespace cpp -} // namespace mxnet -#endif // MXNET_CPP_EXECUTOR_H_ diff --git a/cpp-package/include/mxnet-cpp/executor.hpp b/cpp-package/include/mxnet-cpp/executor.hpp deleted file mode 100644 index acb6b461d7e5..000000000000 --- a/cpp-package/include/mxnet-cpp/executor.hpp +++ /dev/null @@ -1,104 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/*! - * \file executor.hpp - * \brief implementation of the executor - * \author Zhang Chen, Chuntao Hong - */ - -#ifndef MXNET_CPP_EXECUTOR_HPP_ -#define MXNET_CPP_EXECUTOR_HPP_ - -#include -#include -#include -#include "mxnet-cpp/executor.h" -#include "mxnet-cpp/optimizer.h" - - -namespace mxnet { -namespace cpp { -inline Executor::Executor(const Symbol &symbol, Context context, - const std::vector &arg_arrays, - const std::vector &grad_arrays, - const std::vector &grad_reqs, - const std::vector &aux_arrays, - const std::map &group_to_ctx, - Executor *shared_exec) { - this->arg_arrays = arg_arrays; - this->grad_arrays = grad_arrays; - this->aux_arrays = aux_arrays; - this->symbol_ = symbol; - - std::vector arg_handles; - std::vector grad_handles; - std::vector aux_handles; - - for (const auto &array : arg_arrays) { - arg_handles.push_back(array.GetHandle()); - } - for (const auto &array : grad_arrays) { - grad_handles.push_back(array.GetHandle()); - } - for (const auto &array : aux_arrays) { - aux_handles.push_back(array.GetHandle()); - } - - std::vector grad_reqs_uint; - for (auto s : grad_reqs) grad_reqs_uint.push_back(s); - - std::vector map_keys; - std::vector dev_types, dev_ids; - for (const auto &s : group_to_ctx) { - map_keys.push_back(s.first.c_str()); - dev_types.push_back(s.second.GetDeviceType()); - dev_ids.push_back(s.second.GetDeviceId()); - } - - ExecutorHandle shared_exec_handle = - shared_exec == nullptr ? nullptr : shared_exec->handle_; - - CHECK_EQ(MXExecutorBindEX(symbol.GetHandle(), context.GetDeviceType(), - context.GetDeviceId(), group_to_ctx.size(), - map_keys.data(), dev_types.data(), dev_ids.data(), - arg_handles.size(), arg_handles.data(), - grad_handles.data(), grad_reqs_uint.data(), - aux_handles.size(), aux_handles.data(), - shared_exec_handle, &handle_), - 0); - - mx_uint out_size; - NDArrayHandle *out_array; - CHECK_EQ(MXExecutorOutputs(handle_, &out_size, &out_array), 0); - for (mx_uint i = 0; i < out_size; ++i) { - outputs.push_back(NDArray(out_array[i])); - } -} - -inline std::string Executor::DebugStr() { - const char *output; - MXExecutorPrint(handle_, &output); - return std::string(output); -} - -} // namespace cpp -} // namespace mxnet - -#endif // MXNET_CPP_EXECUTOR_HPP_ diff --git a/cpp-package/include/mxnet-cpp/initializer.h b/cpp-package/include/mxnet-cpp/initializer.h deleted file mode 100644 index 34725b9dfa81..000000000000 --- a/cpp-package/include/mxnet-cpp/initializer.h +++ /dev/null @@ -1,257 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/*! - * Copyright (c) 2016 by Contributors - * \file initializer.h - * \brief random initializer - * \author Zhang Chen - */ - -#ifndef MXNET_CPP_INITIALIZER_H_ -#define MXNET_CPP_INITIALIZER_H_ - -#include -#include -#include -#include -#include "mxnet-cpp/ndarray.h" - -namespace mxnet { -namespace cpp { - -class Initializer { - public: - static bool StringStartWith(const std::string& name, - const std::string& check_str) { - return (name.size() >= check_str.size() && - name.substr(0, check_str.size()) == check_str); - } - static bool StringEndWith(const std::string& name, - const std::string& check_str) { - return (name.size() >= check_str.size() && - name.substr(name.size() - check_str.size(), check_str.size()) == - check_str); - } - virtual void operator()(const std::string& name, NDArray* arr) { - if (StringStartWith(name, "upsampling")) { - InitBilinear(arr); - } else if (StringEndWith(name, "bias")) { - InitBias(arr); - } else if (StringEndWith(name, "gamma")) { - InitGamma(arr); - } else if (StringEndWith(name, "beta")) { - InitBeta(arr); - } else if (StringEndWith(name, "weight")) { - InitWeight(arr); - } else if (StringEndWith(name, "moving_mean")) { - InitZero(arr); - } else if (StringEndWith(name, "moving_var")) { - InitOne(arr); - } else if (StringEndWith(name, "moving_inv_var")) { - InitZero(arr); - } else if (StringEndWith(name, "moving_avg")) { - InitZero(arr); - } else if (StringEndWith(name, "min")) { - InitZero(arr); - } else if (StringEndWith(name, "max")) { - InitOne(arr); - } else if (StringEndWith(name, "weight_quantize")) { - InitQuantizedWeight(arr); - } else if (StringEndWith(name, "bias_quantize")) { - InitQuantizedBias(arr); - } else { - InitDefault(arr); - } - } - - protected: - virtual void InitBilinear(NDArray* arr) { - Shape shape(arr->GetShape()); - std::vector weight(shape.Size(), 0); - int f = std::ceil(shape[3] / 2.0); - float c = (2 * f - 1 - f % 2) / (2. * f); - for (size_t i = 0; i < shape.Size(); ++i) { - int x = i % shape[3]; - int y = (i / shape[3]) % shape[2]; - weight[i] = (1 - std::abs(x / f - c)) * (1 - std::abs(y / f - c)); - } - (*arr).SyncCopyFromCPU(weight); - } - virtual void InitZero(NDArray* arr) { (*arr) = 0.0f; } - virtual void InitOne(NDArray* arr) { (*arr) = 1.0f; } - virtual void InitBias(NDArray* arr) { (*arr) = 0.0f; } - virtual void InitGamma(NDArray* arr) { (*arr) = 1.0f; } - virtual void InitBeta(NDArray* arr) { (*arr) = 0.0f; } - virtual void InitWeight(NDArray* arr) {} - virtual void InitQuantizedWeight(NDArray* arr) { - std::default_random_engine generator; - std::uniform_int_distribution _val(-127, 127); - (*arr) = _val(generator); - } - virtual void InitQuantizedBias(NDArray* arr) { - (*arr) = 0; - } - virtual void InitDefault(NDArray* arr) {} -}; - -class Constant : public Initializer { - public: - explicit Constant(float value) - : value(value) {} - void operator()(const std::string &name, NDArray *arr) override { - (*arr) = value; - } - protected: - float value; -}; - -class Zero : public Constant { - public: - Zero(): Constant(0.0f) {} -}; - -class One : public Constant { - public: - One(): Constant(1.0f) {} -}; - -class Uniform : public Initializer { - public: - explicit Uniform(float scale) - : Uniform(-scale, scale) {} - Uniform(float begin, float end) - : begin(begin), end(end) {} - void operator()(const std::string &name, NDArray *arr) override { - if (StringEndWith(name, "weight_quantize")) { - InitQuantizedWeight(arr); - return; - } - if (StringEndWith(name, "bias_quantize")) { - InitQuantizedBias(arr); - return; - } - NDArray::SampleUniform(begin, end, arr); - } - protected: - float begin, end; -}; - -class Normal : public Initializer { - public: - Normal(float mu, float sigma) - : mu(mu), sigma(sigma) {} - void operator()(const std::string &name, NDArray *arr) override { - if (StringEndWith(name, "weight_quantize")) { - InitQuantizedWeight(arr); - return; - } - if (StringEndWith(name, "bias_quantize")) { - InitQuantizedBias(arr); - return; - } - NDArray::SampleGaussian(mu, sigma, arr); - } - protected: - float mu, sigma; -}; - -class Bilinear : public Initializer { - public: - Bilinear() {} - void operator()(const std::string &name, NDArray *arr) override { - if (StringEndWith(name, "weight_quantize")) { - InitQuantizedWeight(arr); - return; - } - if (StringEndWith(name, "bias_quantize")) { - InitQuantizedBias(arr); - return; - } - InitBilinear(arr); - } -}; - -class Xavier : public Initializer { - public: - enum RandType { - gaussian, - uniform - } rand_type; - enum FactorType { - avg, - in, - out - } factor_type; - float magnitude; - Xavier(RandType rand_type = gaussian, FactorType factor_type = avg, - float magnitude = 3) - : rand_type(rand_type), factor_type(factor_type), magnitude(magnitude) {} - - void operator()(const std::string &name, NDArray* arr) override { - if (StringEndWith(name, "weight_quantize")) { - InitQuantizedWeight(arr); - return; - } - if (StringEndWith(name, "bias_quantize")) { - InitQuantizedBias(arr); - return; - } - - Shape shape(arr->GetShape()); - float hw_scale = 1.0f; - if (shape.ndim() > 2) { - for (size_t i = 2; i < shape.ndim(); ++i) { - hw_scale *= shape[i]; - } - } - float fan_in = shape[1] * hw_scale, fan_out = shape[0] * hw_scale; - float factor = 1.0f; - switch (factor_type) { - case avg: - factor = (fan_in + fan_out) / 2.0; - break; - case in: - factor = fan_in; - break; - case out: - factor = fan_out; - } - float scale = std::sqrt(magnitude / factor); - switch (rand_type) { - case uniform: - NDArray::SampleUniform(-scale, scale, arr); - break; - case gaussian: - NDArray::SampleGaussian(0, scale, arr); - break; - } - } -}; - -class MSRAPrelu : public Xavier { - public: - explicit MSRAPrelu(FactorType factor_type = avg, float slope = 0.25f) - : Xavier(gaussian, factor_type, 2. / (1 + slope * slope)) {} -}; - -} // namespace cpp -} // namespace mxnet - -#endif // MXNET_CPP_INITIALIZER_H_ diff --git a/cpp-package/include/mxnet-cpp/io.h b/cpp-package/include/mxnet-cpp/io.h deleted file mode 100644 index 7d2d620bd886..000000000000 --- a/cpp-package/include/mxnet-cpp/io.h +++ /dev/null @@ -1,149 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/*! -* Copyright (c) 2016 by Contributors -* \file operator.h -* \brief definition of io, such as DataIter -* \author Zhang Chen -*/ -#ifndef MXNET_CPP_IO_H_ -#define MXNET_CPP_IO_H_ - -#include -#include -#include -#include -#include "mxnet-cpp/base.h" -#include "mxnet-cpp/ndarray.h" -#include "dmlc/logging.h" - -namespace mxnet { -namespace cpp { -/*! -* \brief Default object for holding a mini-batch of data and related -* information. -*/ -class DataBatch { - public: - NDArray data; - NDArray label; - int pad_num; - std::vector index; -}; -class DataIter { - public: - virtual void BeforeFirst(void) = 0; - virtual bool Next(void) = 0; - virtual NDArray GetData(void) = 0; - virtual NDArray GetLabel(void) = 0; - virtual int GetPadNum(void) = 0; - virtual std::vector GetIndex(void) = 0; - - DataBatch GetDataBatch() { - return DataBatch{GetData(), GetLabel(), GetPadNum(), GetIndex()}; - } - void Reset() { BeforeFirst(); } - - virtual ~DataIter() = default; -}; - -class MXDataIterMap { - public: - inline MXDataIterMap() { - mx_uint num_data_iter_creators = 0; - DataIterCreator *data_iter_creators = nullptr; - int r = MXListDataIters(&num_data_iter_creators, &data_iter_creators); - CHECK_EQ(r, 0); - for (mx_uint i = 0; i < num_data_iter_creators; i++) { - const char *name; - const char *description; - mx_uint num_args; - const char **arg_names; - const char **arg_type_infos; - const char **arg_descriptions; - r = MXDataIterGetIterInfo(data_iter_creators[i], &name, &description, - &num_args, &arg_names, &arg_type_infos, - &arg_descriptions); - CHECK_EQ(r, 0); - mxdataiter_creators_[name] = data_iter_creators[i]; - } - } - inline DataIterCreator GetMXDataIterCreator(const std::string &name) { - return mxdataiter_creators_[name]; - } - - private: - std::map mxdataiter_creators_; -}; - -struct MXDataIterBlob { - public: - MXDataIterBlob() : handle_(nullptr) {} - explicit MXDataIterBlob(DataIterHandle handle) : handle_(handle) {} - ~MXDataIterBlob() { MXDataIterFree(handle_); } - DataIterHandle handle_; - - private: - MXDataIterBlob &operator=(const MXDataIterBlob &); -}; - -class MXDataIter : public DataIter { - public: - explicit MXDataIter(const std::string &mxdataiter_type); - MXDataIter(const MXDataIter &other) { - creator_ = other.creator_; - params_ = other.params_; - blob_ptr_ = other.blob_ptr_; - } - void BeforeFirst(); - bool Next(); - NDArray GetData(); - NDArray GetLabel(); - int GetPadNum(); - std::vector GetIndex(); - MXDataIter CreateDataIter(); - /*! - * \brief set config parameters - * \param name name of the config parameter - * \param value value of the config parameter - * \return reference of self - */ - template - MXDataIter &SetParam(const std::string &name, const T &value) { - std::string value_str; - std::stringstream ss; - ss << value; - ss >> value_str; - - params_[name] = value_str; - return *this; - } - - private: - DataIterCreator creator_; - std::map params_; - std::shared_ptr blob_ptr_; - static MXDataIterMap*& mxdataiter_map(); -}; -} // namespace cpp -} // namespace mxnet - -#endif // MXNET_CPP_IO_H_ - diff --git a/cpp-package/include/mxnet-cpp/io.hpp b/cpp-package/include/mxnet-cpp/io.hpp deleted file mode 100644 index 677c0f6ee1f0..000000000000 --- a/cpp-package/include/mxnet-cpp/io.hpp +++ /dev/null @@ -1,108 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/*! -* \file operator.hpp -* \brief implementation of data iter -* \author Zhang Chen -*/ -#ifndef MXNET_CPP_IO_HPP_ -#define MXNET_CPP_IO_HPP_ - -#include -#include -#include "mxnet-cpp/io.h" - -namespace mxnet { -namespace cpp { - -inline MXDataIterMap*& MXDataIter::mxdataiter_map() { - static MXDataIterMap* mxdataiter_map_ = new MXDataIterMap; - return mxdataiter_map_; -} - -inline MXDataIter::MXDataIter(const std::string &mxdataiter_type) { - creator_ = mxdataiter_map()->GetMXDataIterCreator(mxdataiter_type); - blob_ptr_ = std::make_shared(nullptr); -} - -inline void MXDataIter::BeforeFirst() { - int r = MXDataIterBeforeFirst(blob_ptr_->handle_); - CHECK_EQ(r, 0); -} - -inline bool MXDataIter::Next() { - int out; - int r = MXDataIterNext(blob_ptr_->handle_, &out); - CHECK_EQ(r, 0); - return out; -} - -inline NDArray MXDataIter::GetData() { - NDArrayHandle handle; - int r = MXDataIterGetData(blob_ptr_->handle_, &handle); - CHECK_EQ(r, 0); - return NDArray(handle); -} - -inline NDArray MXDataIter::GetLabel() { - NDArrayHandle handle; - int r = MXDataIterGetLabel(blob_ptr_->handle_, &handle); - CHECK_EQ(r, 0); - return NDArray(handle); -} - -inline int MXDataIter::GetPadNum() { - int out; - int r = MXDataIterGetPadNum(blob_ptr_->handle_, &out); - CHECK_EQ(r, 0); - return out; -} -inline std::vector MXDataIter::GetIndex() { - uint64_t *out_index, out_size; - int r = MXDataIterGetIndex(blob_ptr_->handle_, &out_index, &out_size); - CHECK_EQ(r, 0); - std::vector ret; - for (uint64_t i = 0; i < out_size; ++i) { - ret.push_back(out_index[i]); - } - return ret; -} - -inline MXDataIter MXDataIter::CreateDataIter() { - std::vector param_keys; - std::vector param_values; - - for (auto &data : params_) { - param_keys.push_back(data.first.c_str()); - param_values.push_back(data.second.c_str()); - } - - MXDataIterCreateIter(creator_, param_keys.size(), param_keys.data(), - param_values.data(), &blob_ptr_->handle_); - return *this; -} - -// MXDataIter MNIst - -} // namespace cpp -} // namespace mxnet - -#endif // MXNET_CPP_IO_HPP_ - diff --git a/cpp-package/include/mxnet-cpp/kvstore.h b/cpp-package/include/mxnet-cpp/kvstore.h deleted file mode 100644 index 67f984fce0ee..000000000000 --- a/cpp-package/include/mxnet-cpp/kvstore.h +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/*! -* Copyright (c) 2016 by Contributors -* \file kvstore.h -* \brief definition of kvstore -* \author Chuntao Hong -*/ - -#ifndef MXNET_CPP_KVSTORE_H_ -#define MXNET_CPP_KVSTORE_H_ - -#include -#include -#include "mxnet-cpp/ndarray.h" - -namespace mxnet { -namespace cpp { - -class KVStore { - public: - static void SetType(const std::string& type); - static void RunServer(); - static void Init(int key, const NDArray& val); - static void Init(const std::string& key, const NDArray& val); - static void Init(const std::vector& keys, const std::vector& vals); - static void Init(const std::vector& keys, const std::vector& vals); - static void Push(int key, const NDArray& val, int priority = 0); - static void Push(const std::string& key, const NDArray& val, int priority = 0); - static void Push(const std::vector& keys, - const std::vector& vals, int priority = 0); - static void Push(const std::vector& keys, - const std::vector& vals, int priority = 0); - static void Pull(int key, NDArray* out, int priority = 0); - static void Pull(const std::string& key, NDArray* out, int priority = 0); - static void Pull(const std::vector& keys, - std::vector* outs, int priority = 0); - static void Pull(const std::vector& keys, - std::vector* outs, int priority = 0); - // TODO(lx): put lr in optimizer or not? - static void SetOptimizer(std::unique_ptr optimizer, bool local = false); - static std::string GetType(); - static int GetRank(); - static int GetNumWorkers(); - static void Barrier(); - static std::string GetRole(); - - private: - KVStore(); - static KVStoreHandle& get_handle(); - static std::unique_ptr& get_optimizer(); - static KVStore*& get_kvstore(); - static void Controller(int head, const char* body, void* controller_handle); - static void Updater(int key, NDArrayHandle recv, NDArrayHandle local, void* handle_); -}; - -} // namespace cpp -} // namespace mxnet - -#endif // MXNET_CPP_KVSTORE_H_ diff --git a/cpp-package/include/mxnet-cpp/kvstore.hpp b/cpp-package/include/mxnet-cpp/kvstore.hpp deleted file mode 100644 index 6cd405b91dd4..000000000000 --- a/cpp-package/include/mxnet-cpp/kvstore.hpp +++ /dev/null @@ -1,268 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/*! - * \file kvstore.hpp - * \brief implementation of kvstore - * \author Xin Li - */ - -#include -#include -#include -#include -#include - -#include "mxnet-cpp/kvstore.h" -#include "mxnet-cpp/optimizer.h" - -#ifndef MXNET_CPP_KVSTORE_HPP_ -#define MXNET_CPP_KVSTORE_HPP_ - -namespace mxnet { -namespace cpp { - -inline void KVStore::Controller(int head, const char* body, void* controller_handle) { - if (head == 0) { - std::map params; - std::istringstream sin(body); - std::string line; - while (getline(sin, line)) { - size_t n = line.find('='); - params.emplace(line.substr(0, n), line.substr(n+1)); - } - std::unique_ptr opt(OptimizerRegistry::Find(params.at("opt_type"))); - params.erase("opt_type"); - for (const auto& pair : params) { - opt->SetParam(pair.first, pair.second); - } - get_kvstore()->SetOptimizer(std::move(opt), true); - } -} - -inline KVStoreHandle& KVStore::get_handle() { - static KVStoreHandle handle_ = nullptr; - return handle_; -} - -inline std::unique_ptr& KVStore::get_optimizer() { - static std::unique_ptr optimizer_; - return optimizer_; -} - -inline KVStore*& KVStore::get_kvstore() { - static KVStore* kvstore_ = new KVStore; - return kvstore_; -} - -inline KVStore::KVStore() {} - -inline void KVStore::SetType(const std::string& type) { - CHECK_EQ(MXKVStoreCreate(type.c_str(), &(get_kvstore()->get_handle())), 0); -} - -inline void KVStore::RunServer() { - CHECK_NE(GetRole(), "worker"); - CHECK_EQ(MXKVStoreRunServer(get_kvstore()->get_handle(), &Controller, 0), 0); -} - -inline void KVStore::Init(int key, const NDArray& val) { - NDArrayHandle val_handle = val.GetHandle(); - CHECK_EQ(MXKVStoreInit(get_kvstore()->get_handle(), 1, &key, &val_handle), 0); -} - -inline void KVStore::Init(const std::string& key, const NDArray& val) { - const char* key_handle = key.c_str(); - NDArrayHandle val_handle = val.GetHandle(); - CHECK_EQ(MXKVStoreInitEx(get_kvstore()->get_handle(), 1, &key_handle, &val_handle), 0); -} - -inline void KVStore::Init(const std::vector& keys, const std::vector& vals) { - CHECK_EQ(keys.size(), vals.size()); - std::vector val_handles(vals.size()); - std::transform(vals.cbegin(), vals.cend(), val_handles.begin(), - [](const NDArray& val) { - return val.GetHandle(); - }); - - CHECK_EQ(MXKVStoreInit(get_kvstore()->get_handle(), keys.size(), keys.data(), - val_handles.data()), 0); -} - -inline void KVStore::Init(const std::vector& keys, const std::vector& vals) { - CHECK_EQ(keys.size(), vals.size()); - std::vector key_handles(keys.size()); - std::transform(keys.cbegin(), keys.cend(), key_handles.begin(), - [](const std::string& key) { - return key.c_str(); - }); - std::vector val_handles(vals.size()); - std::transform(vals.cbegin(), vals.cend(), val_handles.begin(), - [](const NDArray& val) { - return val.GetHandle(); - }); - - CHECK_EQ(MXKVStoreInitEx(get_kvstore()->get_handle(), key_handles.size(), key_handles.data(), - val_handles.data()), 0); -} - -inline void KVStore::Push(int key, const NDArray& val, int priority) { - NDArrayHandle val_handle = val.GetHandle(); - CHECK_EQ(MXKVStorePush(get_kvstore()->get_handle(), 1, &key, &val_handle, priority), 0); -} - -inline void KVStore::Push(const std::string& key, const NDArray& val, int priority) { - const char* key_handle = key.c_str(); - NDArrayHandle val_handle = val.GetHandle(); - CHECK_EQ(MXKVStorePushEx(get_kvstore()->get_handle(), 1, &key_handle, &val_handle, priority), 0); -} - -inline void KVStore::Push(const std::vector& keys, - const std::vector& vals, int priority) { - CHECK_EQ(keys.size(), vals.size()); - std::vector val_handles(vals.size()); - std::transform(vals.cbegin(), vals.cend(), val_handles.begin(), - [](const NDArray& val) { - return val.GetHandle(); - }); - - CHECK_EQ(MXKVStorePush(get_kvstore()->get_handle(), keys.size(), keys.data(), - val_handles.data(), priority), 0); -} - -inline void KVStore::Push(const std::vector& keys, - const std::vector& vals, int priority) { - CHECK_EQ(keys.size(), vals.size()); - std::vector key_handles(keys.size()); - std::transform(keys.cbegin(), keys.cend(), key_handles.begin(), - [](const std::string& key) { - return key.c_str(); - }); - std::vector val_handles(vals.size()); - std::transform(vals.cbegin(), vals.cend(), val_handles.begin(), - [](const NDArray& val) { - return val.GetHandle(); - }); - - CHECK_EQ(MXKVStorePushEx(get_kvstore()->get_handle(), key_handles.size(), key_handles.data(), - val_handles.data(), priority), 0); -} - -inline void KVStore::Pull(int key, NDArray* out, int priority) { - NDArrayHandle out_handle = out->GetHandle(); - CHECK_EQ(MXKVStorePull(get_kvstore()->get_handle(), 1, &key, &out_handle, priority), 0); -} - -inline void KVStore::Pull(const std::string& key, NDArray* out, int priority) { - const char* key_handle = key.c_str(); - NDArrayHandle out_handle = out->GetHandle(); - CHECK_EQ(MXKVStorePullEx(get_kvstore()->get_handle(), 1, &key_handle, &out_handle, priority), 0); -} - -inline void KVStore::Pull(const std::vector& keys, - std::vector* outs, int priority) { - CHECK_EQ(keys.size(), outs->size()); - - std::vector out_handles(keys.size()); - std::transform(outs->cbegin(), outs->cend(), out_handles.begin(), - [](const NDArray& val) { - return val.GetHandle(); - }); - - CHECK_EQ(MXKVStorePull(get_kvstore()->get_handle(), keys.size(), keys.data(), - out_handles.data(), priority), 0); -} - -inline void KVStore::Pull(const std::vector& keys, - std::vector* outs, int priority) { - CHECK_EQ(keys.size(), outs->size()); - - std::vector key_handles(keys.size()); - std::transform(keys.cbegin(), keys.cend(), key_handles.begin(), - [](const std::string& key) { - return key.c_str(); - }); - std::vector out_handles(keys.size()); - std::transform(outs->cbegin(), outs->cend(), out_handles.begin(), - [](const NDArray& val) { - return val.GetHandle(); - }); - - CHECK_EQ(MXKVStorePullEx(get_kvstore()->get_handle(), key_handles.size(), key_handles.data(), - out_handles.data(), priority), 0); -} - -inline void KVStore::Updater(int key, NDArrayHandle recv, NDArrayHandle local, - void* handle_) { - Optimizer *opt = static_cast(handle_); - opt->Update(key, NDArray(local), NDArray(recv)); -} - -inline void KVStore::SetOptimizer(std::unique_ptr optimizer, bool local) { - if (local) { - get_kvstore()->get_optimizer() = std::move(optimizer); - CHECK_EQ(MXKVStoreSetUpdater(get_kvstore()->get_handle(), - &Updater, get_kvstore()->get_optimizer().get()), 0); - } else { - CHECK_EQ(MXKVStoreSendCommmandToServers(get_kvstore()->get_handle(), 0, - (*optimizer).Serialize().c_str()), 0); - } -} - -inline std::string KVStore::GetType() { - const char *type; - CHECK_EQ(MXKVStoreGetType(get_kvstore()->get_handle(), &type), 0); - return type; -} - -inline int KVStore::GetRank() { - int rank; - CHECK_EQ(MXKVStoreGetRank(get_kvstore()->get_handle(), &rank), 0); - return rank; -} - -inline int KVStore::GetNumWorkers() { - int num_workers; - CHECK_EQ(MXKVStoreGetGroupSize(get_kvstore()->get_handle(), &num_workers), 0); - return num_workers; -} - -inline void KVStore::Barrier() { - CHECK_EQ(MXKVStoreBarrier(get_kvstore()->get_handle()), 0); -} - -inline std::string KVStore::GetRole() { - int ret; - CHECK_EQ(MXKVStoreIsSchedulerNode(&ret), 0); - if (ret) { - return "scheduler"; - } - CHECK_EQ(MXKVStoreIsServerNode(&ret), 0); - if (ret) { - return "server"; - } - CHECK_EQ(MXKVStoreIsWorkerNode(&ret), 0); - CHECK(ret); - return "worker"; -} - -} // namespace cpp -} // namespace mxnet - -#endif // MXNET_CPP_KVSTORE_HPP_ diff --git a/cpp-package/include/mxnet-cpp/lr_scheduler.h b/cpp-package/include/mxnet-cpp/lr_scheduler.h deleted file mode 100644 index cffd1c7576e5..000000000000 --- a/cpp-package/include/mxnet-cpp/lr_scheduler.h +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/*! -* Copyright (c) 2017 by Contributors -* \file lr_scheduler.h -* \brief Scheduling learning rate -*/ - -#ifndef MXNET_CPP_LR_SCHEDULER_H_ -#define MXNET_CPP_LR_SCHEDULER_H_ - -#include "dmlc/logging.h" - -namespace mxnet { -namespace cpp { - -/*! -* \brief lr scheduler interface -*/ -class LRScheduler { - public: - /*! - * \brief constructor - * \param base_lr the initial learning rate. - */ - explicit LRScheduler(float base_lr = 0.01) - : base_lr_(base_lr) {} - /*! - * \brief set base lr - * \param lr learning rate from optimizer - */ - void SetLR(const float lr) { base_lr_ = lr; } - /*! - * \brief get a new learning rate - */ - virtual float GetLR(unsigned num_update) = 0; - /*! - * \brief destructor - */ - virtual ~LRScheduler() {} - - protected: - float base_lr_; -}; - -class FactorScheduler : public LRScheduler { - public: - explicit FactorScheduler(int step, float factor = 1, float stop_factor_lr = 1e-8) - : LRScheduler() { - step_ = step; - factor_ = factor; - stop_factor_lr_ = stop_factor_lr; - } - - float GetLR(unsigned num_update) override { - while (num_update > unsigned(count_ + step_)) { - count_ += step_; - base_lr_ *= factor_; - if (base_lr_ < stop_factor_lr_) { - base_lr_ = stop_factor_lr_; - LG << "Update[" << num_update << "]: now learning rate arrived at " \ - << base_lr_ << ", will not change in the future"; - } else { - LG << "Update[" << num_update << "]: Change learning rate to " << base_lr_; - } - } - return base_lr_; - } - - private: - int count_ = 0; - int step_; - float factor_; - float stop_factor_lr_; -}; - -} // namespace cpp -} // namespace mxnet - -#endif // MXNET_CPP_LR_SCHEDULER_H_ diff --git a/cpp-package/include/mxnet-cpp/metric.h b/cpp-package/include/mxnet-cpp/metric.h deleted file mode 100644 index d015d8b4acc9..000000000000 --- a/cpp-package/include/mxnet-cpp/metric.h +++ /dev/null @@ -1,210 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/*! -* Copyright (c) 2016 by Contributors -* \file base.h -* \brief metrics defined -* \author Zhang Chen -*/ - -#ifndef MXNET_CPP_METRIC_H_ -#define MXNET_CPP_METRIC_H_ - -#include -#include -#include -#include -#include "mxnet-cpp/ndarray.h" -#include "dmlc/logging.h" - -namespace mxnet { -namespace cpp { - -class EvalMetric { - public: - explicit EvalMetric(const std::string& name, int num = 0) - : name(name), num(num) {} - virtual void Update(NDArray labels, NDArray preds) = 0; - void Reset() { - num_inst = 0; - sum_metric = 0.0f; - } - float Get() { return sum_metric / num_inst; } - void GetNameValue(); - - protected: - std::string name; - int num; - float sum_metric = 0.0f; - int num_inst = 0; - - static void CheckLabelShapes(NDArray labels, NDArray preds, - bool strict = false) { - if (strict) { - CHECK_EQ(Shape(labels.GetShape()), Shape(preds.GetShape())); - } else { - CHECK_EQ(labels.Size(), preds.Size()); - } - } -}; - -class Accuracy : public EvalMetric { - public: - Accuracy() : EvalMetric("accuracy") {} - - void Update(NDArray labels, NDArray preds) override { - CHECK_EQ(labels.GetShape().size(), 1); - mx_uint len = labels.GetShape()[0]; - std::vector pred_data(len); - std::vector label_data(len); - preds.ArgmaxChannel().SyncCopyToCPU(&pred_data, len); - labels.SyncCopyToCPU(&label_data, len); - for (mx_uint i = 0; i < len; ++i) { - sum_metric += (pred_data[i] == label_data[i]) ? 1 : 0; - num_inst += 1; - } - } -}; - -class LogLoss : public EvalMetric { - public: - LogLoss() : EvalMetric("logloss") {} - - void Update(NDArray labels, NDArray preds) override { - static const float epsilon = 1e-15; - mx_uint len = labels.GetShape()[0]; - mx_uint m = preds.GetShape()[1]; - std::vector pred_data(len * m); - std::vector label_data(len); - preds.SyncCopyToCPU(&pred_data, pred_data.size()); - labels.SyncCopyToCPU(&label_data, len); - for (mx_uint i = 0; i < len; ++i) { - sum_metric += - -std::log(std::max(pred_data[i * m + label_data[i]], epsilon)); - num_inst += 1; - } - } -}; - -class MAE : public EvalMetric { - public: - MAE() : EvalMetric("mae") {} - - void Update(NDArray labels, NDArray preds) override { - CheckLabelShapes(labels, preds); - - std::vector pred_data; - preds.SyncCopyToCPU(&pred_data); - std::vector label_data; - labels.SyncCopyToCPU(&label_data); - - size_t len = preds.Size(); - mx_float sum = 0; - for (size_t i = 0; i < len; ++i) { - sum += std::abs(pred_data[i] - label_data[i]); - } - sum_metric += sum / len; - ++num_inst; - } -}; - -class MSE : public EvalMetric { - public: - MSE() : EvalMetric("mse") {} - - void Update(NDArray labels, NDArray preds) override { - CheckLabelShapes(labels, preds); - - std::vector pred_data; - preds.SyncCopyToCPU(&pred_data); - std::vector label_data; - labels.SyncCopyToCPU(&label_data); - - size_t len = preds.Size(); - mx_float sum = 0; - for (size_t i = 0; i < len; ++i) { - mx_float diff = pred_data[i] - label_data[i]; - sum += diff * diff; - } - sum_metric += sum / len; - ++num_inst; - } -}; - -class RMSE : public EvalMetric { - public: - RMSE() : EvalMetric("rmse") {} - - void Update(NDArray labels, NDArray preds) override { - CheckLabelShapes(labels, preds); - - std::vector pred_data; - preds.SyncCopyToCPU(&pred_data); - std::vector label_data; - labels.SyncCopyToCPU(&label_data); - - size_t len = preds.Size(); - mx_float sum = 0; - for (size_t i = 0; i < len; ++i) { - mx_float diff = pred_data[i] - label_data[i]; - sum += diff * diff; - } - sum_metric += std::sqrt(sum / len); - ++num_inst; - } -}; - -class PSNR : public EvalMetric { - public: - PSNR() : EvalMetric("psnr") { - } - - void Update(NDArray labels, NDArray preds) override { - CheckLabelShapes(labels, preds); - - std::vector pred_data; - preds.SyncCopyToCPU(&pred_data); - std::vector label_data; - labels.SyncCopyToCPU(&label_data); - - size_t len = preds.Size(); - mx_float sum = 0; - for (size_t i = 0; i < len; ++i) { - mx_float diff = pred_data[i] - label_data[i]; - sum += diff * diff; - } - mx_float mse = sum / len; - if (mse > 0) { - sum_metric += 10 * std::log(255.0f / mse) / log10_; - } else { - sum_metric += 99.0f; - } - ++num_inst; - } - - private: - mx_float log10_ = std::log(10.0f); -}; - -} // namespace cpp -} // namespace mxnet - -#endif // MXNET_CPP_METRIC_H_ - diff --git a/cpp-package/include/mxnet-cpp/model.h b/cpp-package/include/mxnet-cpp/model.h deleted file mode 100644 index b3a0a9dbef6e..000000000000 --- a/cpp-package/include/mxnet-cpp/model.h +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/*! -* Copyright (c) 2016 by Contributors -* \file model.h -* \brief MXNET.cpp model module -* \author Zhang Chen -*/ - -#ifndef MXNET_CPP_MODEL_H_ -#define MXNET_CPP_MODEL_H_ - -#include -#include -#include "mxnet-cpp/base.h" -#include "mxnet-cpp/symbol.h" -#include "mxnet-cpp/ndarray.h" - -namespace mxnet { -namespace cpp { - -struct FeedForwardConfig { - Symbol symbol; - std::vector ctx = {Context::cpu()}; - int num_epoch = 0; - int epoch_size = 0; - std::string optimizer = "sgd"; - // TODO(zhangchen-qinyinghua) More implement - // initializer=Uniform(0.01), - // numpy_batch_size=128, - // arg_params=None, aux_params=None, - // allow_extra_params=False, - // begin_epoch=0, - // **kwargs): - FeedForwardConfig(const FeedForwardConfig &other) {} - FeedForwardConfig() {} -}; -class FeedForward { - public: - explicit FeedForward(const FeedForwardConfig &conf) : conf_(conf) {} - void Predict(); - void Score(); - void Fit(); - void Save(); - void Load(); - static FeedForward Create(); - - private: - void InitParams(); - void InitPredictor(); - void InitIter(); - void InitEvalIter(); - FeedForwardConfig conf_; -}; - -} // namespace cpp -} // namespace mxnet - -#endif // MXNET_CPP_MODEL_H_ - diff --git a/cpp-package/include/mxnet-cpp/monitor.h b/cpp-package/include/mxnet-cpp/monitor.h deleted file mode 100644 index 76e7ce836f18..000000000000 --- a/cpp-package/include/mxnet-cpp/monitor.h +++ /dev/null @@ -1,108 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/*! -* Copyright (c) 2017 by Contributors -* \file monitor.h -* \brief monitor definition -* \author Xin Li -*/ - -#ifndef MXNET_CPP_MONITOR_H_ -#define MXNET_CPP_MONITOR_H_ - -#include -#include -#include -#include -#include -#include -#include -#include "mxnet-cpp/base.h" -#include "mxnet-cpp/ndarray.h" -#include "mxnet-cpp/executor.h" - -namespace mxnet { -namespace cpp { - -/*! -* \brief Default function for monitor that computes statistics of the input tensor, -* which is the mean absolute |x|/size(x) -* \param x The input tensor -* \return The statistics of the input tensor -*/ -NDArray _default_monitor_func(const NDArray &x); - -/*! -* \brief Monitor interface -*/ -class Monitor { - public: - typedef std::function StatFunc; - typedef std::tuple Stat; - - /*! - * \brief Monitor constructor - * \param interval Number of batches between printing. - * \param pattern A regular expression specifying which tensors to monitor. - * \param stat_func A function that computes statistics of tensors. Defaults to mean - * absolute value |x|/size(x). - */ - Monitor(int interval, std::regex pattern = std::regex(".*"), - StatFunc stat_func = _default_monitor_func); - - /*! - * \brief install callback to executor. Supports installing to multiple executors. - * \param exe The executor to install to. - * \param monitor_all If true, monitor both input and output, otherwise monitor output only. - */ - void install(Executor *exe, bool monitor_all = false); - - /*! - * \brief Start collecting stats for current batch. Call before calling forward. - */ - void tic(); - - /*! - * \brief End collecting for current batch and return results. Call after computation - * of current batch. - */ - std::vector toc(); - - /*! - * \brief End collecting and print results. - */ - void toc_print(); - - protected: - int interval; - std::regex pattern; - StatFunc stat_func; - std::vector exes; - - int step; - bool activated; - std::vector stats; - - static void executor_callback(const char *name, NDArrayHandle ndarray, void *monitor_ptr); -}; - -} // namespace cpp -} // namespace mxnet -#endif // MXNET_CPP_MONITOR_H_ diff --git a/cpp-package/include/mxnet-cpp/monitor.hpp b/cpp-package/include/mxnet-cpp/monitor.hpp deleted file mode 100644 index 4439e1bd3a7c..000000000000 --- a/cpp-package/include/mxnet-cpp/monitor.hpp +++ /dev/null @@ -1,124 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/*! -* \file monitor.hpp -* \brief monitor implementation -* \author Xin Li -*/ - -#ifndef MXNET_CPP_MONITOR_HPP_ -#define MXNET_CPP_MONITOR_HPP_ - -#include -#include -#include -#include -#include -#include "mxnet-cpp/monitor.h" - -namespace mxnet { -namespace cpp { -inline NDArray _default_monitor_func(const NDArray &x) { - return Operator("norm").PushInput(x).Invoke()[0] / std::sqrt(x.Size()); -} - -inline Monitor::Monitor(int interval, std::regex pattern, StatFunc stat_func) - : interval(interval), pattern(pattern), stat_func(stat_func), step(0) { -} - -inline void Monitor::install(Executor *exe, bool monitor_all) { - MXExecutorSetMonitorCallbackEX(exe->handle_, - static_cast(&Monitor::executor_callback), - this, monitor_all); - exes.push_back(exe); -} - -inline void Monitor::tic() { - if (step % interval == 0) { - activated = true; - stats.clear(); - } -} - -inline std::vector Monitor::toc() { - std::vector results; - if (activated) { - activated = false; - - for (auto* exe : exes) { - for (auto& arg : exe->arg_arrays) { - arg.WaitToRead(); - } - for (auto& aux : exe->aux_arrays) { - aux.WaitToRead(); - } - - for (auto &pair : exe->arg_dict()) { - if (std::regex_match(pair.first, pattern)) { - stats.emplace_back(step, pair.first, stat_func(pair.second)); - } - } - for (auto &pair : exe->aux_dict()) { - if (std::regex_match(pair.first, pattern)) { - stats.emplace_back(step, pair.first, stat_func(pair.second)); - } - } - } - results.swap(stats); - } - ++step; - return results; -} - -inline void Monitor::toc_print() { - auto results = toc(); - std::vector data(1); - for (auto& stat : results) { - NDArray ndarray = std::get<2>(stat); - - std::string str; - if (ndarray.Size() == 1) { - if (ndarray.GetContext().GetDeviceType() != DeviceType::kGPU) { - data[0] = ndarray.GetData()[0]; - } else { - ndarray.SyncCopyToCPU(&data); - } - str = std::to_string(data[0]); - } else { - std::ostringstream out; - out << ndarray; - str = out.str(); - } - - LG << "Batch: " << std::get<0>(stat) << ' ' << std::get<1>(stat) << ' ' << str; - } -} - -inline void Monitor::executor_callback(const char *name, NDArrayHandle handle, - void *monitor_ptr) { - Monitor *monitor = static_cast(monitor_ptr); - if (monitor->activated && std::regex_match(name, monitor->pattern)) { - monitor->stats.emplace_back(monitor->step, name, monitor->stat_func(NDArray(handle))); - } -} - -} // namespace cpp -} // namespace mxnet -#endif // MXNET_CPP_MONITOR_HPP_ diff --git a/cpp-package/include/mxnet-cpp/ndarray.h b/cpp-package/include/mxnet-cpp/ndarray.h deleted file mode 100644 index 0a9a41234758..000000000000 --- a/cpp-package/include/mxnet-cpp/ndarray.h +++ /dev/null @@ -1,485 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/*! -* Copyright (c) 2016 by Contributors -* \file ndarray.h -* \brief definition of ndarray -* \author Chuntao Hong, Zhang Chen -*/ - -#ifndef MXNET_CPP_NDARRAY_H_ -#define MXNET_CPP_NDARRAY_H_ - -#include -#include -#include -#include -#include -#include "mxnet-cpp/base.h" -#include "mxnet-cpp/shape.h" - -namespace mxnet { -namespace cpp { - -enum DeviceType { - kCPU = 1, - kGPU = 2, - kCPUPinned = 3 -}; - -/*! -* \brief Context interface -*/ -class Context { - public: - /*! - * \brief Context constructor - * \param type type of the device - * \param id id of the device - */ - Context(const DeviceType &type, int id) : type_(type), id_(id) {} - /*! - * \return the type of the device - */ - DeviceType GetDeviceType() const { return type_; } - /*! - * \return the id of the device - */ - int GetDeviceId() const { return id_; } - - /*! - * \brief Return a GPU context - * \param device_id id of the device - * \return the corresponding GPU context - */ - static Context gpu(int device_id = 0) { - return Context(DeviceType::kGPU, device_id); - } - - /*! - * \brief Return a CPU context - * \param device_id id of the device. this is not needed by CPU - * \return the corresponding CPU context - */ - static Context cpu(int device_id = 0) { - return Context(DeviceType::kCPU, device_id); - } - - private: - DeviceType type_; - int id_; -}; - -/*! -* \brief struct to store NDArrayHandle -*/ -struct NDBlob { - public: - /*! - * \brief default constructor - */ - NDBlob() : handle_(nullptr) {} - /*! - * \brief construct with a NDArrayHandle - * \param handle NDArrayHandle to store - */ - explicit NDBlob(NDArrayHandle handle) : handle_(handle) {} - /*! - * \brief destructor, free the NDArrayHandle - */ - ~NDBlob() { MXNDArrayFree(handle_); } - /*! - * \brief the NDArrayHandle - */ - NDArrayHandle handle_; - - private: - NDBlob(const NDBlob &); - NDBlob &operator=(const NDBlob &); -}; - -/*! -* \brief NDArray interface -*/ -class NDArray { - public: - /*! - * \brief construct with a none handle - */ - NDArray(); - /*! - * \brief construct with a NDArrayHandle - */ - explicit NDArray(const NDArrayHandle &handle); - /*! - * \brief construct a new dynamic NDArray - * \param shape the shape of array - * \param context context of NDArray - * \param delay_alloc whether delay the allocation - * \param dtype data type of NDArray - */ - NDArray(const std::vector &shape, const Context &context, - bool delay_alloc = true, int dtype = 0); - /*! - * \brief construct a new dynamic NDArray - * \param shape the shape of array - * \param constext context of NDArray - * \param delay_alloc whether delay the allocation - * \param dtype data type of NDArray - */ - NDArray(const Shape &shape, const Context &context, - bool delay_alloc = true, int dtype = 0); - NDArray(const mx_float *data, size_t size); - /*! - * \brief construct a new dynamic NDArray - * \param data the data to create NDArray from - * \param shape the shape of array - * \param constext context of NDArray - */ - NDArray(const mx_float *data, const Shape &shape, const Context &context); - /*! - * \brief construct a new dynamic NDArray - * \param data the data to create NDArray from - * \param shape the shape of array - * \param constext context of NDArray - */ - NDArray(const std::vector &data, const Shape &shape, - const Context &context); - explicit NDArray(const std::vector &data); - NDArray operator+(mx_float scalar); - NDArray operator-(mx_float scalar); - NDArray operator*(mx_float scalar); - NDArray operator/(mx_float scalar); - NDArray operator%(mx_float scalar); - NDArray operator+(const NDArray &); - NDArray operator-(const NDArray &); - NDArray operator*(const NDArray &); - NDArray operator/(const NDArray &); - NDArray operator%(const NDArray &); - /*! - * \brief set all the elements in ndarray to be scalar - * \param scalar the scalar to set - * \return reference of self - */ - NDArray &operator=(mx_float scalar); - /*! - * \brief elementwise add to current space - * this mutate the current NDArray - * \param scalar the data to add - * \return reference of self - */ - NDArray &operator+=(mx_float scalar); - /*! - * \brief elementwise subtract from current ndarray - * this mutate the current NDArray - * \param scalar the data to subtract - * \return reference of self - */ - NDArray &operator-=(mx_float scalar); - /*! - * \brief elementwise multiplication to current ndarray - * this mutate the current NDArray - * \param scalar the data to subtract - * \return reference of self - */ - NDArray &operator*=(mx_float scalar); - /*! - * \brief elementwise division from current ndarray - * this mutate the current NDArray - * \param scalar the data to subtract - * \return reference of self - */ - NDArray &operator/=(mx_float scalar); - /*! - * \brief elementwise modulo from current ndarray - * this mutate the current NDArray - * \param scalar the data to subtract - * \return reference of self - */ - NDArray &operator%=(mx_float scalar); - /*! - * \brief elementwise add to current space - * this mutate the current NDArray - * \param src the data to add - * \return reference of self - */ - NDArray &operator+=(const NDArray &src); - /*! - * \brief elementwise subtract from current ndarray - * this mutate the current NDArray - * \param src the data to subtract - * \return reference of self - */ - NDArray &operator-=(const NDArray &src); - /*! - * \brief elementwise multiplication to current ndarray - * this mutate the current NDArray - * \param src the data to subtract - * \return reference of self - */ - NDArray &operator*=(const NDArray &src); - /*! - * \brief elementwise division from current ndarray - * this mutate the current NDArray - * \param src the data to subtract - * \return reference of self - */ - NDArray &operator/=(const NDArray &src); - /*! - * \brief elementwise modulo from current ndarray - * this mutate the current NDArray - * \param src the data to subtract - * \return reference of self - */ - NDArray &operator%=(const NDArray &src); - NDArray ArgmaxChannel(); - /*! - * \brief Do a synchronize copy from a contiguous CPU memory region. - * - * This function will call WaitToWrite before the copy is performed. - * This is useful to copy data from existing memory region that are - * not wrapped by NDArray(thus dependency not being tracked). - * - * \param data the data source to copy from. - * \param size the memory size we want to copy from. - */ - void SyncCopyFromCPU(const mx_float *data, size_t size); - /*! - * \brief Do a synchronize copy from a contiguous CPU memory region. - * - * This function will call WaitToWrite before the copy is performed. - * This is useful to copy data from existing memory region that are - * not wrapped by NDArray(thus dependency not being tracked). - * - * \param data the data source to copy from, int the form of mx_float vector - */ - void SyncCopyFromCPU(const std::vector &data); - /*! - * \brief Do a synchronize copy to a contiguous CPU memory region. - * - * This function will call WaitToRead before the copy is performed. - * This is useful to copy data from existing memory region that are - * not wrapped by NDArray(thus dependency not being tracked). - * - * \param data the data source to copyinto. - * \param size the memory size we want to copy into. Defualt value is Size() - */ - void SyncCopyToCPU(mx_float *data, size_t size = 0); - /*! - * \brief Do a synchronize copy to a contiguous CPU memory region. - * - * This function will call WaitToRead before the copy is performed. - * This is useful to copy data from existing memory region that are - * not wrapped by NDArray(thus dependency not being tracked). - * - * \param data the data source to copyinto. - * \param size the memory size we want to copy into. Defualt value is Size() - */ - void SyncCopyToCPU(std::vector *data, size_t size = 0); - /*! - * \brief copy the content of current array to a target array. - * \param other the target NDArray - * \return the target NDarray - */ - NDArray CopyTo(NDArray * other) const; - /*! - * \brief return a new copy to this NDArray - * \param Context the new context of this NDArray - * \return the new copy - */ - NDArray Copy(const Context &) const; - /*! - * \brief return offset of the element at (h, w) - * \param h height position - * \param w width position - * \return offset of two dimensions array - */ - size_t Offset(size_t h = 0, size_t w = 0) const; - /*! - * \brief return offset of three dimensions array - * \param c channel position - * \param h height position - * \param w width position - * \return offset of three dimensions array - */ - size_t Offset(size_t c, size_t h, size_t w) const; - /*! - * \brief return value of the element at (index) - * \param index position - * \return value of one dimensions array - */ - mx_float At(size_t index) const; - /*! - * \brief return value of the element at (h, w) - * \param h height position - * \param w width position - * \return value of two dimensions array - */ - mx_float At(size_t h, size_t w) const; - /*! - * \brief return value of three dimensions array - * \param c channel position - * \param h height position - * \param w width position - * \return value of three dimensions array - */ - mx_float At(size_t c, size_t h, size_t w) const; - /*! - * \brief Slice a NDArray - * \param begin begin index in first dim - * \param end end index in first dim - * \return sliced NDArray - */ - NDArray Slice(mx_uint begin, mx_uint end) const; - /*! - * \brief Return a reshaped NDArray that shares memory with current one - * \param new_shape the new shape - * \return reshaped NDarray - */ - NDArray Reshape(const Shape &new_shape) const; - /*! - * \brief Block until all the pending write operations with respect - * to current NDArray are finished, and read can be performed. - */ - void WaitToRead() const; - /*! - * \brief Block until all the pending read/write operations with respect - * to current NDArray are finished, and write can be performed. - */ - void WaitToWrite(); - /*! - * \brief Block until all the pending read/write operations with respect - * to current NDArray are finished, and read/write can be performed. - */ - static void WaitAll(); - /*! - * \brief Sample gaussian distribution for each elements of out. - * \param mu mean of gaussian distribution. - * \param sigma standard deviation of gaussian distribution. - * \param out output NDArray. - */ - static void SampleGaussian(mx_float mu, mx_float sigma, NDArray *out); - /*! - * \brief Sample uniform distribution for each elements of out. - * \param begin lower bound of distribution. - * \param end upper bound of distribution. - * \param out output NDArray. - */ - static void SampleUniform(mx_float begin, mx_float end, NDArray *out); - /*! - * \brief Load NDArrays from binary file. - * \param file_name name of the binary file. - * \param array_list a list of NDArrays returned, do not fill the list if - * nullptr is given. - * \param array_map a map from names to NDArrays returned, do not fill the map - * if nullptr is given or no names is stored in binary file. - */ - static void Load(const std::string &file_name, - std::vector *array_list = nullptr, - std::map *array_map = nullptr); - /*! - * \brief Load map of NDArrays from binary file. - * \param file_name name of the binary file. - * \return a list of NDArrays. - */ - static std::map LoadToMap(const std::string &file_name); - /*! - * \brief Load list of NDArrays from binary file. - * \param file_name name of the binary file. - * \return a map from names to NDArrays. - */ - static std::vector LoadToList(const std::string &file_name); - /*! - * \brief Load NDArrays from buffer. - * \param buffer Pointer to buffer. (ie contents of param file) - * \param size Size of buffer - * \param array_list a list of NDArrays returned, do not fill the list if - * nullptr is given. - * \param array_map a map from names to NDArrays returned, do not fill the map - * if nullptr is given or no names is stored in binary file. - */ - static void LoadFromBuffer(const void *buffer, size_t size, - std::vector *array_list = nullptr, - std::map *array_map = nullptr); - /*! - * \brief Load map of NDArrays from buffer. - * \param buffer Pointer to buffer. (ie contents of param file) - * \param size Size of buffer - * \return a list of NDArrays. - */ - static std::map LoadFromBufferToMap(const void *buffer, size_t size); - /*! - * \brief Load list of NDArrays from buffer. - * \param buffer Pointer to buffer. (ie contents of param file) - * \param size Size of buffer - * \return a map from names to NDArrays. - */ - static std::vector LoadFromBufferToList(const void *buffer, size_t size); - /*! - * \brief save a map of string->NDArray to binary file. - * \param file_name name of the binary file. - * \param array_map a map from names to NDArrays. - */ - static void Save(const std::string &file_name, - const std::map &array_map); - /*! - * \brief save a list of NDArrays to binary file. - * \param file_name name of the binary file. - * \param array_list a list of NDArrays. - */ - static void Save(const std::string &file_name, - const std::vector &array_list); - /*! - * \return the size of current NDArray, a.k.a. the production of all shape dims - */ - size_t Size() const; - /*! - * \return the shape of current NDArray, in the form of mx_uint vector - */ - std::vector GetShape() const; - /*! - * \return the data type of current NDArray - */ - int GetDType() const; - /*! - * \brief Get the pointer to data (IMPORTANT: The ndarray should not be in GPU) - * \return the data pointer to the current NDArray - */ - const mx_float *GetData() const; - - /*! - * \return the context of NDArray - */ - Context GetContext() const; - - /*! - * \return the NDArrayHandle of the current NDArray - */ - NDArrayHandle GetHandle() const { return blob_ptr_->handle_; } - - private: - std::shared_ptr blob_ptr_; -}; - -std::ostream& operator<<(std::ostream& out, const NDArray &ndarray); -} // namespace cpp -} // namespace mxnet - -#endif // MXNET_CPP_NDARRAY_H_ diff --git a/cpp-package/include/mxnet-cpp/ndarray.hpp b/cpp-package/include/mxnet-cpp/ndarray.hpp deleted file mode 100644 index 38935defae74..000000000000 --- a/cpp-package/include/mxnet-cpp/ndarray.hpp +++ /dev/null @@ -1,466 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/*! - * \file ndarray.hpp - * \brief implementation of the ndarray - * \author Zhang Chen, Chuntao Hong - */ - -#ifndef MXNET_CPP_NDARRAY_HPP_ -#define MXNET_CPP_NDARRAY_HPP_ - -#include -#include -#include -#include -#include -#include "dmlc/logging.h" -#include "mxnet-cpp/ndarray.h" -#include "mxnet-cpp/operator.h" - -namespace mxnet { -namespace cpp { - -inline NDArray::NDArray() { - NDArrayHandle handle; - CHECK_EQ(MXNDArrayCreateNone(&handle), 0); - blob_ptr_ = std::make_shared(handle); -} -inline NDArray::NDArray(const NDArrayHandle &handle) { - blob_ptr_ = std::make_shared(handle); -} -inline NDArray::NDArray(const std::vector &shape, const Context &context, - bool delay_alloc, int dtype) { - NDArrayHandle handle; - CHECK_EQ(MXNDArrayCreateEx(shape.data(), shape.size(), context.GetDeviceType(), - context.GetDeviceId(), delay_alloc, dtype, &handle), - 0); - blob_ptr_ = std::make_shared(handle); -} -inline NDArray::NDArray(const Shape &shape, const Context &context, - bool delay_alloc, int dtype) { - NDArrayHandle handle; - CHECK_EQ(MXNDArrayCreateEx(shape.data(), shape.ndim(), context.GetDeviceType(), - context.GetDeviceId(), delay_alloc, dtype, &handle), - 0); - blob_ptr_ = std::make_shared(handle); -} -inline NDArray::NDArray(const mx_float *data, size_t size) { - NDArrayHandle handle; - CHECK_EQ(MXNDArrayCreateNone(&handle), 0); - MXNDArraySyncCopyFromCPU(handle, data, size); - blob_ptr_ = std::make_shared(handle); -} -inline NDArray::NDArray(const mx_float *data, const Shape &shape, - const Context &context) { - NDArrayHandle handle; - CHECK_EQ(MXNDArrayCreate(shape.data(), shape.ndim(), context.GetDeviceType(), - context.GetDeviceId(), false, &handle), - 0); - CHECK_EQ(MXNDArraySyncCopyFromCPU(handle, data, shape.Size()), 0); - blob_ptr_ = std::make_shared(handle); -} -inline NDArray::NDArray(const std::vector &data, const Shape &shape, - const Context &context) { - NDArrayHandle handle; - CHECK_EQ(MXNDArrayCreate(shape.data(), shape.ndim(), context.GetDeviceType(), - context.GetDeviceId(), false, &handle), - 0); - MXNDArraySyncCopyFromCPU(handle, data.data(), shape.Size()); - blob_ptr_ = std::make_shared(handle); -} -inline NDArray::NDArray(const std::vector &data) { - NDArrayHandle handle; - CHECK_EQ(MXNDArrayCreateNone(&handle), 0); - MXNDArraySyncCopyFromCPU(handle, data.data(), data.size()); - blob_ptr_ = std::make_shared(handle); -} - -inline NDArray NDArray::operator+(mx_float scalar) { - NDArray ret; - Operator("_plus_scalar")(*this, scalar).Invoke(ret); - return ret; -} -inline NDArray NDArray::operator-(mx_float scalar) { - NDArray ret; - Operator("_minus_scalar")(*this, scalar).Invoke(ret); - return ret; -} -inline NDArray NDArray::operator*(mx_float scalar) { - NDArray ret; - Operator("_mul_scalar")(*this, scalar).Invoke(ret); - return ret; -} -inline NDArray NDArray::operator/(mx_float scalar) { - NDArray ret; - Operator("_div_scalar")(*this, scalar).Invoke(ret); - return ret; -} -inline NDArray NDArray::operator%(mx_float scalar) { - NDArray ret; - Operator("_mod_scalar")(*this, scalar).Invoke(ret); - return ret; -} -inline NDArray NDArray::operator+(const NDArray &rhs) { - NDArray ret; - Operator("_plus")(*this, rhs).Invoke(ret); - return ret; -} -inline NDArray NDArray::operator-(const NDArray &rhs) { - NDArray ret; - Operator("_minus")(*this, rhs).Invoke(ret); - return ret; -} -inline NDArray NDArray::operator*(const NDArray &rhs) { - NDArray ret; - Operator("_mul")(*this, rhs).Invoke(ret); - return ret; -} -inline NDArray NDArray::operator/(const NDArray &rhs) { - NDArray ret; - Operator("_div")(*this, rhs).Invoke(ret); - return ret; -} -inline NDArray NDArray::operator%(const NDArray &rhs) { - NDArray ret; - Operator("_mod")(*this, rhs).Invoke(ret); - return ret; -} -inline NDArray &NDArray::operator=(mx_float scalar) { - Operator("_set_value")(scalar).Invoke(*this); - return *this; -} -inline NDArray &NDArray::operator+=(mx_float scalar) { - Operator("_plus_scalar")(*this, scalar).Invoke(*this); - return *this; -} -inline NDArray &NDArray::operator-=(mx_float scalar) { - Operator("_minus_scalar")(*this, scalar).Invoke(*this); - return *this; -} -inline NDArray &NDArray::operator*=(mx_float scalar) { - Operator("_mul_scalar")(*this, scalar).Invoke(*this); - return *this; -} -inline NDArray &NDArray::operator/=(mx_float scalar) { - Operator("_div_scalar")(*this, scalar).Invoke(*this); - return *this; -} -inline NDArray &NDArray::operator%=(mx_float scalar) { - Operator("_mod_scalar")(*this, scalar).Invoke(*this); - return *this; -} -inline NDArray &NDArray::operator+=(const NDArray &rhs) { - Operator("_plus")(*this, rhs).Invoke(*this); - return *this; -} -inline NDArray &NDArray::operator-=(const NDArray &rhs) { - Operator("_minus")(*this, rhs).Invoke(*this); - return *this; -} -inline NDArray &NDArray::operator*=(const NDArray &rhs) { - Operator("_mul")(*this, rhs).Invoke(*this); - return *this; -} -inline NDArray &NDArray::operator/=(const NDArray &rhs) { - Operator("_div")(*this, rhs).Invoke(*this); - return *this; -} -inline NDArray &NDArray::operator%=(const NDArray &rhs) { - Operator("_mod")(*this, rhs).Invoke(*this); - return *this; -} - -inline NDArray NDArray::ArgmaxChannel() { - NDArray ret; - Operator("argmax_channel")(*this).Invoke(ret); - return ret; -} - -inline void NDArray::SyncCopyFromCPU(const mx_float *data, size_t size) { - MXNDArraySyncCopyFromCPU(blob_ptr_->handle_, data, size); -} -inline void NDArray::SyncCopyFromCPU(const std::vector &data) { - MXNDArraySyncCopyFromCPU(blob_ptr_->handle_, data.data(), data.size()); -} -inline void NDArray::SyncCopyToCPU(mx_float *data, size_t size) { - MXNDArraySyncCopyToCPU(blob_ptr_->handle_, data, size > 0 ? size : Size()); -} -inline void NDArray::SyncCopyToCPU(std::vector *data, size_t size) { - size = size > 0 ? size : Size(); - data->resize(size); - MXNDArraySyncCopyToCPU(blob_ptr_->handle_, data->data(), size); -} -inline NDArray NDArray::Copy(const Context &ctx) const { - NDArray ret(GetShape(), ctx, true, this->GetDType()); - Operator("_copyto")(*this).Invoke(ret); - return ret; -} -inline NDArray NDArray::CopyTo(NDArray * other) const { - Operator("_copyto")(*this).Invoke(*other); - return *other; -} -inline NDArray NDArray::Slice(mx_uint begin, mx_uint end) const { - NDArrayHandle handle; - CHECK_EQ(MXNDArraySlice(GetHandle(), begin, end, &handle), 0); - return NDArray(handle); -} -inline NDArray NDArray::Reshape(const Shape &new_shape) const { - NDArrayHandle handle; - std::vector dims(new_shape.ndim()); - for (index_t i = 0; i < new_shape.ndim(); ++i) { - dims[i] = new_shape[i]; - } - new_shape.data(); - CHECK_EQ( - MXNDArrayReshape(GetHandle(), new_shape.ndim(), dims.data(), &handle), 0); - return NDArray(handle); -} -inline void NDArray::WaitToRead() const { - CHECK_EQ(MXNDArrayWaitToRead(blob_ptr_->handle_), 0) << MXGetLastError(); -} -inline void NDArray::WaitToWrite() { - CHECK_EQ(MXNDArrayWaitToWrite(blob_ptr_->handle_), 0) << MXGetLastError(); -} -inline void NDArray::WaitAll() { CHECK_EQ(MXNDArrayWaitAll(), 0) << MXGetLastError(); } -inline void NDArray::SampleGaussian(mx_float mu, mx_float sigma, NDArray *out) { - Operator("_random_normal")(mu, sigma).Invoke(*out); -} -inline void NDArray::SampleUniform(mx_float begin, mx_float end, NDArray *out) { - Operator("_random_uniform")(begin, end).Invoke(*out); -} -inline void NDArray::Load(const std::string &file_name, - std::vector *array_list, - std::map *array_map) { - mx_uint out_size, out_name_size; - NDArrayHandle *out_arr; - const char **out_names; - CHECK_EQ(MXNDArrayLoad(file_name.c_str(), &out_size, &out_arr, &out_name_size, - &out_names), - 0); - if (array_list != nullptr) { - array_list->reserve(out_size); - for (mx_uint i = 0; i < out_size; ++i) { - array_list->push_back(NDArray(out_arr[i])); - } - } - if (array_map != nullptr && out_name_size > 0) { - CHECK_EQ(out_name_size, out_size); - for (mx_uint i = 0; i < out_size; ++i) { - (*array_map)[out_names[i]] = NDArray(out_arr[i]); - } - } -} -inline std::map NDArray::LoadToMap( - const std::string &file_name) { - std::map array_map; - mx_uint out_size, out_name_size; - NDArrayHandle *out_arr; - const char **out_names; - CHECK_EQ(MXNDArrayLoad(file_name.c_str(), &out_size, &out_arr, &out_name_size, - &out_names), - 0); - if (out_name_size > 0) { - CHECK_EQ(out_name_size, out_size); - for (mx_uint i = 0; i < out_size; ++i) { - array_map[out_names[i]] = NDArray(out_arr[i]); - } - } - return array_map; -} -inline std::vector NDArray::LoadToList(const std::string &file_name) { - std::vector array_list; - mx_uint out_size, out_name_size; - NDArrayHandle *out_arr; - const char **out_names; - CHECK_EQ(MXNDArrayLoad(file_name.c_str(), &out_size, &out_arr, &out_name_size, - &out_names), - 0); - array_list.reserve(out_size); - for (mx_uint i = 0; i < out_size; ++i) { - array_list.push_back(NDArray(out_arr[i])); - } - return array_list; -} -inline void NDArray::LoadFromBuffer(const void *buffer, size_t size, - std::vector *array_list, - std::map *array_map) { - mx_uint out_size, out_name_size; - NDArrayHandle *out_arr; - const char **out_names; - CHECK_EQ(MXNDArrayLoadFromBuffer(buffer, size, &out_size, &out_arr, &out_name_size, - &out_names), - 0); - if (array_list != nullptr) { - array_list->reserve(out_size); - for (mx_uint i = 0; i < out_size; ++i) { - array_list->push_back(NDArray(out_arr[i])); - } - } - if (array_map != nullptr && out_name_size > 0) { - CHECK_EQ(out_name_size, out_size); - for (mx_uint i = 0; i < out_size; ++i) { - (*array_map)[out_names[i]] = NDArray(out_arr[i]); - } - } -} -inline std::map NDArray::LoadFromBufferToMap( - const void *buffer, size_t size) { - std::map array_map; - mx_uint out_size, out_name_size; - NDArrayHandle *out_arr; - const char **out_names; - CHECK_EQ(MXNDArrayLoadFromBuffer(buffer, size, &out_size, &out_arr, &out_name_size, - &out_names), - 0); - if (out_name_size > 0) { - CHECK_EQ(out_name_size, out_size); - for (mx_uint i = 0; i < out_size; ++i) { - array_map[out_names[i]] = NDArray(out_arr[i]); - } - } - return array_map; -} -inline std::vector NDArray::LoadFromBufferToList(const void *buffer, size_t size) { - std::vector array_list; - mx_uint out_size, out_name_size; - NDArrayHandle *out_arr; - const char **out_names; - CHECK_EQ(MXNDArrayLoadFromBuffer(buffer, size, &out_size, &out_arr, &out_name_size, - &out_names), - 0); - array_list.reserve(out_size); - for (mx_uint i = 0; i < out_size; ++i) { - array_list.push_back(NDArray(out_arr[i])); - } - return array_list; -} -inline void NDArray::Save(const std::string &file_name, - const std::map &array_map) { - std::vector args; - std::vector keys; - for (const auto &t : array_map) { - args.push_back(t.second.GetHandle()); - keys.push_back(t.first.c_str()); - } - CHECK_EQ( - MXNDArraySave(file_name.c_str(), args.size(), args.data(), keys.data()), - 0); -} -inline void NDArray::Save(const std::string &file_name, - const std::vector &array_list) { - std::vector args; - for (const auto &t : array_list) { - args.push_back(t.GetHandle()); - } - CHECK_EQ(MXNDArraySave(file_name.c_str(), args.size(), args.data(), nullptr), - 0); -} - -inline size_t NDArray::Offset(size_t h, size_t w) const { - auto const shape = GetShape(); - CHECK_EQ(shape.size(), 2) << "The NDArray needs to be 2 dimensional."; - - return (h * shape[1]) + w; -} - -inline size_t NDArray::Offset(size_t c, size_t h, size_t w) const { - auto const shape = GetShape(); - CHECK_EQ(shape.size(), 3) << "The NDArray needs to be 3 dimensional."; - return h * shape[0] * shape[2] + w * shape[0] + c; -} - -inline mx_float NDArray::At(size_t h, size_t w) const { - return GetData()[Offset(h, w)]; -} - -inline mx_float NDArray::At(size_t c, size_t h, size_t w) const { - return GetData()[Offset(c, h, w)]; -} - -inline mx_float NDArray::At(size_t index) const { - auto shape = GetShape(); - CHECK_EQ(shape.size(), 1) << "The NDArray needs to be 1 dimensional."; - CHECK_LT(index, shape[0]) << "Specified index is out of range."; - return GetData()[index]; -} - -inline size_t NDArray::Size() const { - size_t ret = 1; - for (auto &i : GetShape()) ret *= i; - return ret; -} - -inline std::vector NDArray::GetShape() const { - const int *out_pdata; - int out_dim; - MXNDArrayGetShapeEx(blob_ptr_->handle_, &out_dim, &out_pdata); - std::vector ret; - for (int i = 0; i < out_dim; ++i) { - ret.push_back(out_pdata[i]); - } - return ret; -} - -inline int NDArray::GetDType() const { - int ret; - MXNDArrayGetDType(blob_ptr_->handle_, &ret); - return ret; -} - -inline const mx_float *NDArray::GetData() const { - void *ret; - MXNDArrayGetData(blob_ptr_->handle_, &ret); - if (GetDType() != 0) { - return nullptr; - } - return static_cast(ret); -} - -inline Context NDArray::GetContext() const { - int out_dev_type; - int out_dev_id; - MXNDArrayGetContext(blob_ptr_->handle_, &out_dev_type, &out_dev_id); - return Context((DeviceType)out_dev_type, out_dev_id); -} - -inline std::ostream & operator<<(std::ostream &out, const NDArray &ndarray) { - // TODO(lx75249): Consider DType / beautify like numpy - auto shape = ndarray.GetShape(); - NDArray cpu_array(ndarray.GetShape(), Context::cpu()); - if (ndarray.GetContext().GetDeviceType() != DeviceType::kGPU) { - cpu_array = ndarray; - } else { - ndarray.WaitToRead(); - ndarray.CopyTo(&cpu_array); - } - - out << '['; - cpu_array.WaitToRead(); - std::copy(cpu_array.GetData(), cpu_array.GetData() + ndarray.Size(), - std::ostream_iterator(out, ", ")); - out << ']'; - return out; -} - -} // namespace cpp -} // namespace mxnet - -#endif // MXNET_CPP_NDARRAY_HPP_ diff --git a/cpp-package/include/mxnet-cpp/op_map.h b/cpp-package/include/mxnet-cpp/op_map.h deleted file mode 100644 index 17746d1fa596..000000000000 --- a/cpp-package/include/mxnet-cpp/op_map.h +++ /dev/null @@ -1,111 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/*! -* Copyright (c) 2016 by Contributors -* \file op_map.h -* \brief definition of OpMap -* \author Chuntao Hong -*/ - -#ifndef MXNET_CPP_OP_MAP_H_ -#define MXNET_CPP_OP_MAP_H_ - -#include -#include -#include "mxnet-cpp/base.h" -#include "dmlc/logging.h" - -namespace mxnet { -namespace cpp { - -/*! -* \brief OpMap instance holds a map of all the symbol creators so we can -* get symbol creators by name. -* This is used internally by Symbol and Operator. -*/ -class OpMap { - public: - /*! - * \brief Create an Mxnet instance - */ - inline OpMap() { - mx_uint num_symbol_creators = 0; - AtomicSymbolCreator *symbol_creators = nullptr; - int r = - MXSymbolListAtomicSymbolCreators(&num_symbol_creators, &symbol_creators); - CHECK_EQ(r, 0); - for (mx_uint i = 0; i < num_symbol_creators; i++) { - const char *name; - const char *description; - mx_uint num_args; - const char **arg_names; - const char **arg_type_infos; - const char **arg_descriptions; - const char *key_var_num_args; - r = MXSymbolGetAtomicSymbolInfo(symbol_creators[i], &name, &description, - &num_args, &arg_names, &arg_type_infos, - &arg_descriptions, &key_var_num_args); - CHECK_EQ(r, 0); - symbol_creators_[name] = symbol_creators[i]; - } - - nn_uint num_ops; - const char **op_names; - r = NNListAllOpNames(&num_ops, &op_names); - CHECK_EQ(r, 0); - for (nn_uint i = 0; i < num_ops; i++) { - OpHandle handle; - r = NNGetOpHandle(op_names[i], &handle); - CHECK_EQ(r, 0); - op_handles_[op_names[i]] = handle; - } - } - - /*! - * \brief Get a symbol creator with its name. - * - * \param name name of the symbol creator - * \return handle to the symbol creator - */ - inline AtomicSymbolCreator GetSymbolCreator(const std::string &name) { - if (symbol_creators_.count(name) == 0) - return GetOpHandle(name); - return symbol_creators_[name]; - } - - /*! - * \brief Get an op handle with its name. - * - * \param name name of the op - * \return handle to the op - */ - inline OpHandle GetOpHandle(const std::string &name) { - return op_handles_[name]; - } - - private: - std::map symbol_creators_; - std::map op_handles_; -}; - -} // namespace cpp -} // namespace mxnet - -#endif // MXNET_CPP_OP_MAP_H_ diff --git a/cpp-package/include/mxnet-cpp/op_suppl.h b/cpp-package/include/mxnet-cpp/op_suppl.h deleted file mode 100644 index 4f3011c17caa..000000000000 --- a/cpp-package/include/mxnet-cpp/op_suppl.h +++ /dev/null @@ -1,180 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/*! -* Copyright (c) 2016 by Contributors -* \file op_suppl.h -* \brief A supplement and amendment of the operators from op.h -* \author Zhang Chen, zhubuntu, Xin Li -*/ - -#ifndef MXNET_CPP_OP_SUPPL_H_ -#define MXNET_CPP_OP_SUPPL_H_ - -#include -#include -#include -#include "mxnet-cpp/base.h" -#include "mxnet-cpp/shape.h" -#include "mxnet-cpp/operator.h" -#include "mxnet-cpp/MxNetCpp.h" - -namespace mxnet { -namespace cpp { - -inline Symbol _Plus(Symbol lhs, Symbol rhs) { - return Operator("_Plus")(lhs, rhs) - .CreateSymbol(); -} -inline Symbol _Mul(Symbol lhs, Symbol rhs) { - return Operator("_Mul")(lhs, rhs) - .CreateSymbol(); -} -inline Symbol _Minus(Symbol lhs, Symbol rhs) { - return Operator("_Minus")(lhs, rhs) - .CreateSymbol(); -} -inline Symbol _Div(Symbol lhs, Symbol rhs) { - return Operator("_Div")(lhs, rhs) - .CreateSymbol(); -} -inline Symbol _Mod(Symbol lhs, Symbol rhs) { - return Operator("_Mod")(lhs, rhs) - .CreateSymbol(); -} -inline Symbol _Power(Symbol lhs, Symbol rhs) { - return Operator("_Power")(lhs, rhs) - .CreateSymbol(); -} -inline Symbol _Maximum(Symbol lhs, Symbol rhs) { - return Operator("_Maximum")(lhs, rhs) - .CreateSymbol(); -} -inline Symbol _Minimum(Symbol lhs, Symbol rhs) { - return Operator("_Minimum")(lhs, rhs) - .CreateSymbol(); -} -inline Symbol _PlusScalar(Symbol lhs, mx_float scalar) { - return Operator("_PlusScalar")(lhs) - .SetParam("scalar", scalar) - .CreateSymbol(); -} -inline Symbol _MinusScalar(Symbol lhs, mx_float scalar) { - return Operator("_MinusScalar")(lhs) - .SetParam("scalar", scalar) - .CreateSymbol(); -} -inline Symbol _RMinusScalar(mx_float scalar, Symbol rhs) { - return Operator("_RMinusScalar")(rhs) - .SetParam("scalar", scalar) - .CreateSymbol(); -} -inline Symbol _MulScalar(Symbol lhs, mx_float scalar) { - return Operator("_MulScalar")(lhs) - .SetParam("scalar", scalar) - .CreateSymbol(); -} -inline Symbol _DivScalar(Symbol lhs, mx_float scalar) { - return Operator("_DivScalar")(lhs) - .SetParam("scalar", scalar) - .CreateSymbol(); -} -inline Symbol _RDivScalar(mx_float scalar, Symbol rhs) { - return Operator("_RDivScalar")(rhs) - .SetParam("scalar", scalar) - .CreateSymbol(); -} -inline Symbol _ModScalar(Symbol lhs, mx_float scalar) { - return Operator("_ModScalar")(lhs) - .SetParam("scalar", scalar) - .CreateSymbol(); -} -inline Symbol _RModScalar(mx_float scalar, Symbol rhs) { - return Operator("_RModScalar")(rhs) - .SetParam("scalar", scalar) - .CreateSymbol(); -} -inline Symbol _PowerScalar(Symbol lhs, mx_float scalar) { - return Operator("_PowerScalar")(lhs) - .SetParam("scalar", scalar) - .CreateSymbol(); -} -inline Symbol _RPowerScalar(mx_float scalar, Symbol rhs) { - return Operator("_RPowerScalar")(rhs) - .SetParam("scalar", scalar) - .CreateSymbol(); -} -inline Symbol _MaximumScalar(Symbol lhs, mx_float scalar) { - return Operator("_MaximumScalar")(lhs) - .SetParam("scalar", scalar) - .CreateSymbol(); -} -inline Symbol _MinimumScalar(Symbol lhs, mx_float scalar) { - return Operator("_MinimumScalar")(lhs) - .SetParam("scalar", scalar) - .CreateSymbol(); -} -// TODO(zhangcheng-qinyinghua) -// make crop function run in op.h -// This function is due to [zhubuntu](https://github.com/zhubuntu) -inline Symbol Crop(const std::string& symbol_name, - int num_args, - Symbol data, - Symbol crop_like, - Shape offset = Shape(0, 0), - Shape h_w = Shape(0, 0), - bool center_crop = false) { - return Operator("Crop") - .SetParam("num_args", num_args) - .SetParam("offset", offset) - .SetParam("h_w", h_w) - .SetParam("center_crop", center_crop) - .SetInput("arg0", data) - .SetInput("arg1", crop_like) - .CreateSymbol(symbol_name); -} - - -/*! - * \brief Apply activation function to input. - * Softmax Activation is only available with CUDNN on GPUand will be - * computed at each location across channel if input is 4D. - * \param symbol_name name of the resulting symbol. - * \param data Input data to activation function. - * \param act_type Activation function to be applied. - * \return new symbol - */ -inline Symbol Activation(const std::string& symbol_name, - Symbol data, - const std::string& act_type) { - assert(act_type == "relu" || - act_type == "sigmoid" || - act_type == "softrelu" || - act_type == "tanh"); - return Operator("Activation") - .SetParam("act_type", act_type.c_str()) - .SetInput("data", data) - .CreateSymbol(symbol_name); -} - -} // namespace cpp -} // namespace mxnet - -#endif // MXNET_CPP_OP_SUPPL_H_ - diff --git a/cpp-package/include/mxnet-cpp/op_util.h b/cpp-package/include/mxnet-cpp/op_util.h deleted file mode 100644 index b2b442fd8a88..000000000000 --- a/cpp-package/include/mxnet-cpp/op_util.h +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/*! -* Copyright (c) 2017 by Contributors -* \file op_util.h -* \brief operator helper functions -* \author Chris Olivier -*/ - -#ifndef MXNET_CPP_OP_UTIL_H_ -#define MXNET_CPP_OP_UTIL_H_ - -#include - -#if defined(MXNET_USE_CAFFE) && MXNET_USE_CAFFE != 0 -#include -#include -#endif - -namespace mxnet { -namespace cpp { - -#if defined(MXNET_USE_CAFFE) && MXNET_USE_CAFFE != 0 - -inline ::caffe::LayerParameter textToCaffeLayerParameter(const std::string& text) { - caffe::NetParameter np; - const bool success = google::protobuf::TextFormat::ParseFromString(text, &np); - CHECK_EQ(success, true) << "Invalid protpbuf layer string: " << text; - return ::caffe::LayerParameter(np.layer(0)); -} - -template -inline StreamType& operator << (StreamType& os, const ::caffe::LayerParameter& op) { - std::string s; - caffe::NetParameter np; - // Avoid wasting time making a copy -- just push in out default object's pointer - np.mutable_layer()->AddAllocated(const_cast<::caffe::LayerParameter *>(&op)); - google::protobuf::TextFormat::PrintToString(np, &s); - np.mutable_layer()->ReleaseLast(); - os << s; - return os; -} -#endif - -} // namespace cpp -} // namespace mxnet - -#endif // MXNET_CPP_OP_UTIL_H_ diff --git a/cpp-package/include/mxnet-cpp/operator.h b/cpp-package/include/mxnet-cpp/operator.h deleted file mode 100644 index 9f289f0e248b..000000000000 --- a/cpp-package/include/mxnet-cpp/operator.h +++ /dev/null @@ -1,210 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/*! -* Copyright (c) 2016 by Contributors -* \file operator.h -* \brief definition of operator -* \author Chuntao Hong, Zhang Chen -*/ - -#ifndef MXNET_CPP_OPERATOR_H_ -#define MXNET_CPP_OPERATOR_H_ - -#include -#include -#include -#include "mxnet-cpp/base.h" -#include "mxnet-cpp/op_map.h" -#include "mxnet-cpp/symbol.h" - -namespace mxnet { -namespace cpp { -class Mxnet; -/*! -* \brief Operator interface -*/ -class Operator { - public: - /*! - * \brief Operator constructor - * \param operator_name type of the operator - */ - explicit Operator(const std::string &operator_name); - Operator &operator=(const Operator &rhs); - /*! - * \brief set config parameters - * \param name name of the config parameter - * \param value value of the config parameter - * \return reference of self - */ - template - Operator &SetParam(const std::string &name, const T &value) { - std::string value_str; - std::stringstream ss; - ss << value; - ss >> value_str; - - params_[name] = value_str; - return *this; - } - /*! - * \brief set config parameters from positional inputs - * \param pos the position of parameter - * \param value value of the config parameter - * \return reference of self - */ - template - Operator &SetParam(int pos, const T &value) { - std::string value_str; - std::stringstream ss; - ss << value; - ss >> value_str; - - params_[arg_names_[pos]] = value_str; - return *this; - } - /*! - * \brief add an input symbol - * \param name name of the input symbol - * \param symbol the input symbol - * \return reference of self - */ - Operator &SetInput(const std::string &name, const Symbol &symbol); - /*! - * \brief add an input symbol - * \param symbol the input symbol - */ - template - void PushInput(const Symbol &symbol) { - input_symbols_.push_back(symbol.GetHandle()); - } - /*! - * \brief add input symbols - * \return reference of self - */ - Operator &operator()() { return *this; } - /*! - * \brief add input symbols - * \param symbol the input symbol - * \return reference of self - */ - Operator &operator()(const Symbol &symbol) { - input_symbols_.push_back(symbol.GetHandle()); - return *this; - } - /*! - * \brief add a list of input symbols - * \param symbols the vector of the input symbols - * \return reference of self - */ - Operator &operator()(const std::vector &symbols) { - for (auto &s : symbols) { - input_symbols_.push_back(s.GetHandle()); - } - return *this; - } - /*! - * \brief create a Symbol from the current operator - * \param name the name of the operator - * \return the operator Symbol - */ - Symbol CreateSymbol(const std::string &name = ""); - - /*! - * \brief add an input ndarray - * \param name name of the input ndarray - * \param ndarray the input ndarray - * \return reference of self - */ - Operator &SetInput(const std::string &name, const NDArray &ndarray); - /*! - * \brief add an input ndarray - * \param ndarray the input ndarray - */ - template - Operator &PushInput(const NDArray &ndarray) { - input_ndarrays_.push_back(ndarray.GetHandle()); - return *this; - } - /*! - * \brief add positional inputs - */ - template - Operator &PushInput(const T &t, Args... args) { - SetParam(N, t); - PushInput(args...); - return *this; - } - /*! - * \brief add the last positional input - */ - template - Operator &PushInput(const T &t) { - SetParam(N, t); - return *this; - } - /*! - * \brief add input ndarrays - * \param ndarray the input ndarray - * \return reference of self - */ - Operator &operator()(const NDArray &ndarray) { - input_ndarrays_.push_back(ndarray.GetHandle()); - return *this; - } - /*! - * \brief add a list of input ndarrays - * \param ndarrays the vector of the input ndarrays - * \return reference of self - */ - Operator &operator()(const std::vector &ndarrays) { - for (auto &s : ndarrays) { - input_ndarrays_.push_back(s.GetHandle()); - } - return *this; - } - /*! - * \brief add input ndarrays - * \return reference of self - */ - template - Operator &operator()(Args... args) { - PushInput(args...); - return *this; - } - std::vector Invoke(); - void Invoke(NDArray &output); - void Invoke(std::vector &outputs); - - private: - std::map params_desc_; - bool variable_params_ = false; - std::map params_; - std::vector input_symbols_; - std::vector input_ndarrays_; - std::vector input_keys_; - std::vector arg_names_; - AtomicSymbolCreator handle_; - static OpMap*& op_map(); -}; -} // namespace cpp -} // namespace mxnet - -#endif // MXNET_CPP_OPERATOR_H_ diff --git a/cpp-package/include/mxnet-cpp/operator.hpp b/cpp-package/include/mxnet-cpp/operator.hpp deleted file mode 100644 index 8cdd78d2c0e9..000000000000 --- a/cpp-package/include/mxnet-cpp/operator.hpp +++ /dev/null @@ -1,180 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/*! -* \file operator.hpp -* \brief implementation of operator -* \author Chuntao Hong, Zhang Chen -*/ - -#ifndef MXNET_CPP_OPERATOR_HPP_ -#define MXNET_CPP_OPERATOR_HPP_ - -#include -#include -#include -#include -#include "mxnet-cpp/base.h" -#include "mxnet-cpp/op_map.h" -#include "mxnet-cpp/operator.h" - -namespace mxnet { -namespace cpp { - -/* - * Pushing NDArray or Symbol as inputs here to avoid partial specialization - * like PushInput, which is not allowed in C++ - */ -template <> -inline Operator& Operator::SetParam(int pos, const NDArray &value) { - input_ndarrays_.push_back(value.GetHandle()); - return *this; -} -template <> -inline Operator& Operator::SetParam(int pos, const Symbol &value) { - input_symbols_.push_back(value.GetHandle()); - return *this; -} - -inline OpMap*& Operator::op_map() { - static OpMap *op_map_ = new OpMap(); - return op_map_; -} - -inline Operator::Operator(const std::string &operator_name) { - handle_ = op_map()->GetSymbolCreator(operator_name); - const char *name; - const char *description; - mx_uint num_args; - const char **arg_names; - const char **arg_type_infos; - const char **arg_descriptions; - const char *key_var_num_args; - MXSymbolGetAtomicSymbolInfo(handle_, - &name, - &description, - &num_args, - &arg_names, - &arg_type_infos, - &arg_descriptions, - &key_var_num_args); - for (mx_uint i = 0; i < num_args; ++i) { - arg_names_.push_back(arg_names[i]); - } -} - -inline Symbol Operator::CreateSymbol(const std::string &name) { - if (input_keys_.size() > 0) { - CHECK_EQ(input_keys_.size(), input_symbols_.size()); - } - const char *pname = name == "" ? nullptr : name.c_str(); - - SymbolHandle symbol_handle; - std::vector input_keys; - std::vector param_keys; - std::vector param_values; - - for (auto &data : params_) { - param_keys.push_back(data.first.c_str()); - param_values.push_back(data.second.c_str()); - } - for (auto &data : this->input_keys_) { - input_keys.push_back(data.c_str()); - } - const char **input_keys_p = - (input_keys.size() > 0) ? input_keys.data() : nullptr; - - MXSymbolCreateAtomicSymbol(handle_, param_keys.size(), param_keys.data(), - param_values.data(), &symbol_handle); - MXSymbolCompose(symbol_handle, pname, input_symbols_.size(), input_keys_p, - input_symbols_.data()); - return Symbol(symbol_handle); -} - -inline void Operator::Invoke(std::vector &outputs) { - if (input_keys_.size() > 0) { - CHECK_EQ(input_keys_.size(), input_ndarrays_.size()); - } - - std::vector input_keys; - std::vector param_keys; - std::vector param_values; - - for (auto &data : params_) { - param_keys.push_back(data.first.c_str()); - param_values.push_back(data.second.c_str()); - } - - int num_inputs = input_ndarrays_.size(); - int num_outputs = outputs.size(); - std::vector output_handles; - std::transform(outputs.begin(), outputs.end(), - std::back_inserter(output_handles), [](NDArray& a) { - return a.GetHandle(); - }); - - NDArrayHandle *outputs_receiver = nullptr; - if (num_outputs > 0) { - outputs_receiver = output_handles.data(); - } - - if (MXImperativeInvoke(handle_, num_inputs, input_ndarrays_.data(), - &num_outputs, &outputs_receiver, - param_keys.size(), param_keys.data(), - param_values.data())) - LOG(FATAL) << MXGetLastError(); - - if (outputs.size() > 0) - return; - - std::transform(outputs_receiver, outputs_receiver+num_outputs, - std::back_inserter(outputs), [](const NDArrayHandle& handle) { - return NDArray(handle); - }); -} - -inline std::vector Operator::Invoke() { - std::vector outputs; - Invoke(outputs); - return outputs; -} - -inline void Operator::Invoke(NDArray &output) { - std::vector outputs{output}; - Invoke(outputs); -} - -inline Operator &Operator::SetInput(const std::string &name, const Symbol &symbol) { - if (symbol.GetHandle()) { - input_keys_.push_back(name.c_str()); - input_symbols_.push_back(symbol.GetHandle()); - } - return *this; -} - -inline Operator &Operator::SetInput(const std::string &name, const NDArray &ndarray) { - input_keys_.push_back(name.c_str()); - input_ndarrays_.push_back(ndarray.GetHandle()); - return *this; -} - -} // namespace cpp -} // namespace mxnet - -#endif // MXNET_CPP_OPERATOR_HPP_ diff --git a/cpp-package/include/mxnet-cpp/optimizer.h b/cpp-package/include/mxnet-cpp/optimizer.h deleted file mode 100644 index 320b13eebf2d..000000000000 --- a/cpp-package/include/mxnet-cpp/optimizer.h +++ /dev/null @@ -1,214 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/*! -* Copyright (c) 2016 by Contributors -* \file optimizer.h -* \brief definition of optimizer -* \author Chuntao Hong, Zhang Chen -*/ - -#ifndef MXNET_CPP_OPTIMIZER_H_ -#define MXNET_CPP_OPTIMIZER_H_ - -#include -#include -#include -#include -#include -#include "mxnet-cpp/base.h" -#include "dmlc/logging.h" -#include "mxnet-cpp/ndarray.h" -#include "mxnet-cpp/op_map.h" -#include "mxnet-cpp/lr_scheduler.h" - -namespace mxnet { -namespace cpp { - -/*! -* \brief Optimizer interface -*/ -class Optimizer { - public: - /*! - * \brief constructor - * \param beign_num_update The initial number of updates - */ - explicit Optimizer(unsigned begin_num_update); - /*! - * \brief get optimizer type - * \return string of optimizer type - */ - virtual std::string GetType() const = 0; - /*! - * \brief destructor - */ - virtual ~Optimizer(); - /*! - * \brief set config parameters - * \param name name of the config parameter - * \param value value of the config parameter - * \return reference of self - */ - template - Optimizer *SetParam(const std::string &name, const T &value) { - std::string value_str; - std::stringstream ss; - ss << value; - ss >> value_str; - - params_[name] = value_str; - return this; - } - /*! - * \bried set the lr scheduler - * \param lrScheduler lr scheduler used for this optimizer - * \return reference if self - */ - Optimizer *SetLRScheduler(std::unique_ptr lrScheduler) { - CHECK(lrScheduler); - lrScheduler_ = std::move(lrScheduler); - lrScheduler_->SetLR(std::stof(params_["lr"])); - return this; - } - /*! - * \brief Update a weight with gradient. - * \param index the unique index for the weight. - * \param weight the weight to update. - * \param grad gradient for the weight. - */ - virtual void Update(int index, NDArray weight, NDArray grad) = 0; - // TODO(zhangcheng-qinyinghua) - // implement Update a list of arrays, maybe in the form of map - // void Update(int index, std::vector weights, std::vector - // grad, mx_float lr); - - /*! - * \brief Serialize the optimizer parameters to a string. - * \return serialization - */ - std::string Serialize() const; - - protected: - std::map params_; - static OpMap*& op_map(); - const std::vector GetParamKeys_() const; - const std::vector GetParamValues_() const; - std::map count_; - unsigned begin_num_update_, num_update_; - unsigned UpdateCount_(int index); - float GetLR_(int index); - float GetWD_(int index); - virtual void CreateState_(int index, NDArray weight); - std::unique_ptr lrScheduler_ = nullptr; -}; - -typedef std::function OptimizerCreator; - -class OptimizerRegistry { - public: - static Optimizer* Find(const std::string& name); - static int __REGISTER__(const std::string& name, OptimizerCreator creator); - private: - static std::map& cmap(); - OptimizerRegistry() = delete; - ~OptimizerRegistry() = delete; -}; -#define MXNETCPP_REGISTER_OPTIMIZER(Name, OptimizerType)\ - OptimizerRegistry::__REGISTER__(#Name, [](){return new OptimizerType();}) - -class SGDOptimizer : public Optimizer { - public: - explicit SGDOptimizer(unsigned begin_num_update = 0); - std::string GetType() const override; - void Update(int index, NDArray weight, NDArray grad) override; - private: - virtual ~SGDOptimizer(); - void CreateState_(int index, NDArray weight) override; - std::map states_; - AtomicSymbolCreator update_handle_; - AtomicSymbolCreator mom_update_handle_; -}; - -class SignumOptimizer : public Optimizer { - public: - explicit SignumOptimizer(unsigned begin_num_update = 0); - std::string GetType() const override; - void Update(int index, NDArray weight, NDArray grad) override; - private: - virtual ~SignumOptimizer(); - void CreateState_(int index, NDArray weight) override; - std::map states_; - AtomicSymbolCreator update_handle_; - AtomicSymbolCreator mom_update_handle_; -}; - - -class RMSPropOptimizer : public Optimizer { - public: - explicit RMSPropOptimizer(unsigned begin_num_update = 0); - std::string GetType() const override; - void Update(int index, NDArray weight, NDArray grad) override; - private: - virtual ~RMSPropOptimizer(); - void CreateState_(int index, NDArray weight) override; - std::map n_, g_, delta_; - AtomicSymbolCreator update_handle_; - AtomicSymbolCreator alex_update_handle_; -}; - -class AdamOptimizer : public Optimizer { - public: - explicit AdamOptimizer(unsigned begin_num_update = 0); - std::string GetType() const override; - void Update(int index, NDArray weight, NDArray grad) override; - private: - virtual ~AdamOptimizer(); - void CreateState_(int index, NDArray weight) override; - std::map mean_; - std::map var_; - AtomicSymbolCreator update_handle_; -}; - -class AdaGradOptimizer : public Optimizer { - public: - explicit AdaGradOptimizer(unsigned begin_num_update = 0); - std::string GetType() const override; - void Update(int index, NDArray weight, NDArray grad) override; - private: - virtual ~AdaGradOptimizer(); - void CreateState_(int index, NDArray weight) override; - std::map history_; -}; - -class AdaDeltaOptimizer : public Optimizer { - public: - explicit AdaDeltaOptimizer(unsigned begin_num_update = 0); - std::string GetType() const override; - void Update(int index, NDArray weight, NDArray grad) override; - private: - virtual ~AdaDeltaOptimizer(); - void CreateState_(int index, NDArray weight) override; - std::map acc_g_, acc_delta_; -}; - -} // namespace cpp -} // namespace mxnet - -#endif // MXNET_CPP_OPTIMIZER_H_ diff --git a/cpp-package/include/mxnet-cpp/optimizer.hpp b/cpp-package/include/mxnet-cpp/optimizer.hpp deleted file mode 100644 index b259c7bba61d..000000000000 --- a/cpp-package/include/mxnet-cpp/optimizer.hpp +++ /dev/null @@ -1,493 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/*! -* \file optimizer.hpp -* \brief implementation of optimizer -* \author Chuntao Hong, Zhang Chen -*/ - -#ifndef MXNET_CPP_OPTIMIZER_HPP_ -#define MXNET_CPP_OPTIMIZER_HPP_ - -#include -#include -#include -#include -#include -#include -#include -#include "mxnet-cpp/optimizer.h" -#include "mxnet-cpp/op.h" -#include "mxnet-cpp/op_map.h" - -namespace { - -// TODO(lx75249): Add imperative operators to op.h under ndarray namespace -inline void _clip(mxnet::cpp::NDArray &data, float limit) { - data = mxnet::cpp::Operator("clip") - .SetParam("a_min", -limit) - .SetParam("a_max", limit) - .SetInput("data", data) - .Invoke()[0]; -} -inline mxnet::cpp::NDArray _sqrt(mxnet::cpp::NDArray data) { - return mxnet::cpp::Operator("sqrt") - .SetInput("data", data) - .Invoke()[0]; -} - -} // namespace - -namespace mxnet { -namespace cpp { -inline Optimizer::Optimizer(unsigned begin_num_update) - : begin_num_update_(begin_num_update), - num_update_(begin_num_update_) { - params_["lr"] = "0.01f"; - params_["wd"] = "0.f"; -} - -inline std::map& OptimizerRegistry::cmap() { - static std::map cmap_; - return cmap_; -} - -inline OpMap*& Optimizer::op_map() { - static OpMap *op_map_ = new OpMap(); - return op_map_; -} - -inline Optimizer::~Optimizer() {} - -inline void Optimizer::CreateState_(int index, NDArray weight) { -} - -inline std::string Optimizer::Serialize() const { - using ValueType = std::map::value_type; - auto params = params_; - params.emplace("opt_type", GetType()); - return std::accumulate(params.cbegin(), params.cend(), std::string(""), - [](const std::string& sum, const ValueType& i) { - return sum + '\n' + i.first + '=' + i.second; - }).substr(1); -} - -inline const std::vector Optimizer::GetParamKeys_() const { - std::vector keys; - for (auto& iter : params_) - keys.push_back(iter.first.c_str()); - return keys; -} - -inline const std::vector Optimizer::GetParamValues_() const { - std::vector values; - for (auto& iter : params_) - values.push_back(iter.second.c_str()); - return values; -} - -inline unsigned Optimizer::UpdateCount_(int index) { - if (count_.count(index) == 0) { - count_.emplace(index, begin_num_update_); - } - unsigned new_count = ++count_[index]; - num_update_ = std::max(num_update_, new_count); - return new_count; -} - -inline float Optimizer::GetLR_(int index) { - if (nullptr != lrScheduler_) { - return lrScheduler_->GetLR(num_update_); - } - return std::stof(params_["lr"]); -} - -inline float Optimizer::GetWD_(int index) { - float wd = std::stof(params_["wd"]); - return wd; -} - -inline Optimizer* OptimizerRegistry::Find(const std::string& name) { - if (cmap().empty()) { - // Optimizers should only be registered once - MXNETCPP_REGISTER_OPTIMIZER(sgd, SGDOptimizer); - MXNETCPP_REGISTER_OPTIMIZER(rmsprop, RMSPropOptimizer); - MXNETCPP_REGISTER_OPTIMIZER(adam, AdamOptimizer); - MXNETCPP_REGISTER_OPTIMIZER(adagrad, AdaGradOptimizer); - MXNETCPP_REGISTER_OPTIMIZER(adadelta, AdaDeltaOptimizer); - MXNETCPP_REGISTER_OPTIMIZER(signum, SignumOptimizer); - } - auto it = cmap().find(name); - if (it == cmap().end()) - return nullptr; - return it->second(); -} - -inline int OptimizerRegistry::__REGISTER__(const std::string& name, OptimizerCreator creator) { - CHECK_EQ(cmap().count(name), 0) << name << " already registered"; - cmap().emplace(name, std::move(creator)); - return 0; -} - -inline SGDOptimizer::SGDOptimizer(unsigned begin_num_update) - : Optimizer(begin_num_update) { - update_handle_ = op_map()->GetSymbolCreator("sgd_update"); - mom_update_handle_ = op_map()->GetSymbolCreator("sgd_mom_update"); -} - -inline std::string SGDOptimizer::GetType() const { - return "sgd"; -} - -inline SGDOptimizer::~SGDOptimizer() { - for (auto &it : states_) { - delete it.second; - } -} - -inline void SGDOptimizer::Update(int index, NDArray weight, NDArray grad) { - if (states_.count(index) == 0) { - CreateState_(index, weight); - } - - params_["lr"] = std::to_string(GetLR_(index)); - params_["wd"] = std::to_string(GetWD_(index)); - UpdateCount_(index); - auto keys = GetParamKeys_(); - auto values = GetParamValues_(); - CHECK_EQ(keys.size(), values.size()); - - NDArrayHandle inputs[3]; - inputs[0] = weight.GetHandle(); - inputs[1] = grad.GetHandle(); - - int num_outputs = 1; - NDArrayHandle output = weight.GetHandle(); - NDArrayHandle *outputs = &output; - - if (states_[index] == nullptr) { - MXImperativeInvoke(update_handle_, 2, inputs, - &num_outputs, &outputs, - keys.size(), keys.data(), values.data()); - } else { - inputs[2] = states_[index]->GetHandle(); - MXImperativeInvoke(mom_update_handle_, 3, inputs, - &num_outputs, &outputs, - keys.size(), keys.data(), values.data()); - } -} - -inline void SGDOptimizer::CreateState_(int index, NDArray weight) { - if (params_.count("momentum") == 0) { - states_[index] = nullptr; - } else { - states_[index] = new NDArray(weight.GetShape(), weight.GetContext()); - *states_[index] = 0; - } -} - -// inplementing Signum optimizer - -inline SignumOptimizer::SignumOptimizer(unsigned begin_num_update) - : Optimizer(begin_num_update) { - update_handle_ = op_map()->GetSymbolCreator("signsgd_update"); - mom_update_handle_ = op_map()->GetSymbolCreator("signum_update"); -} - -inline std::string SignumOptimizer::GetType() const { - return "signum"; -} - -inline SignumOptimizer::~SignumOptimizer() { - for (auto &it : states_) { - delete it.second; - } -} - -inline void SignumOptimizer::Update(int index, NDArray weight, NDArray grad) { - if (states_.count(index) == 0) { - CreateState_(index, weight); - } - - params_["lr"] = std::to_string(GetLR_(index)); - params_["wd"] = std::to_string(GetWD_(index)); - UpdateCount_(index); - auto keys = GetParamKeys_(); - auto values = GetParamValues_(); - CHECK_EQ(keys.size(), values.size()); - - NDArrayHandle inputs[3]; - inputs[0] = weight.GetHandle(); - inputs[1] = grad.GetHandle(); - - int num_outputs = 1; - NDArrayHandle output = weight.GetHandle(); - NDArrayHandle *outputs = &output; - - if (states_[index] == nullptr) { - MXImperativeInvoke(update_handle_, 2, inputs, - &num_outputs, &outputs, - keys.size(), keys.data(), values.data()); - } else { - inputs[2] = states_[index]->GetHandle(); - MXImperativeInvoke(mom_update_handle_, 3, inputs, - &num_outputs, &outputs, - keys.size(), keys.data(), values.data()); - } -} - -inline void SignumOptimizer::CreateState_(int index, NDArray weight) { - if (params_.count("momentum") == 0) { - states_[index] = nullptr; - } else { - states_[index] = new NDArray(weight.GetShape(), weight.GetContext()); - *states_[index] = 0; - } -} - -// finish implementing Signum - - - -inline RMSPropOptimizer::RMSPropOptimizer(unsigned begin_num_update) - : Optimizer(begin_num_update) { - update_handle_ = op_map()->GetSymbolCreator("rmsprop_update"); - alex_update_handle_ = op_map()->GetSymbolCreator("rmspropalex_update"); - SetParam("rho", 0.9f); - SetParam("momentum", 0.9f); - SetParam("epsilon", 1e-8); -} - -inline std::string RMSPropOptimizer::GetType() const { - return "rmsprop"; -} - -inline RMSPropOptimizer::~RMSPropOptimizer() { - for (auto &it : n_) { - delete it.second; - } - for (auto &it : g_) { - delete it.second; - } - for (auto &it : delta_) { - delete it.second; - } -} - -inline void RMSPropOptimizer::Update(int index, NDArray weight, NDArray grad) { - if (n_.count(index) == 0) { - CreateState_(index, weight); - } - - params_["lr"] = std::to_string(GetLR_(index)); - params_["wd"] = std::to_string(GetWD_(index)); - UpdateCount_(index); - auto keys = GetParamKeys_(); - auto values = GetParamValues_(); - CHECK_EQ(keys.size(), values.size()); - - NDArrayHandle inputs[5]; - inputs[0] = weight.GetHandle(); - inputs[1] = grad.GetHandle(); - inputs[2] = n_[index]->GetHandle(); - inputs[3] = g_[index]->GetHandle(); - inputs[4] = delta_[index]->GetHandle(); - - int num_outputs = 1; - NDArrayHandle output = weight.GetHandle(); - NDArrayHandle *outputs = &output; - - MXImperativeInvoke(alex_update_handle_, 5, inputs, - &num_outputs, &outputs, - keys.size(), keys.data(), values.data()); -} - -inline void RMSPropOptimizer::CreateState_(int index, NDArray weight) { - n_[index] = new NDArray(weight.GetShape(), weight.GetContext()); - *n_[index] = 0; - g_[index] = new NDArray(weight.GetShape(), weight.GetContext()); - *g_[index] = 0; - delta_[index] = new NDArray(weight.GetShape(), weight.GetContext()); - *delta_[index] = 0; -} - -inline AdamOptimizer::AdamOptimizer(unsigned begin_num_update) - : Optimizer(begin_num_update) { - update_handle_ = op_map()->GetSymbolCreator("adam_update"); - SetParam("beta1", 0.9f); - SetParam("beta2", 0.999f); - SetParam("epsilon", 1e-8); -} - -inline std::string AdamOptimizer::GetType() const { - return "adam"; -} - -inline AdamOptimizer::~AdamOptimizer() { - for (auto &it : mean_) { - delete it.second; - } - for (auto &it : var_) { - delete it.second; - } -} - -inline void AdamOptimizer::Update(int index, NDArray weight, NDArray grad) { - if (mean_.count(index) == 0) { - CreateState_(index, weight); - } - - params_["lr"] = std::to_string(GetLR_(index)); - params_["wd"] = std::to_string(GetWD_(index)); - UpdateCount_(index); - auto keys = GetParamKeys_(); - auto values = GetParamValues_(); - CHECK_EQ(keys.size(), values.size()); - - float lr = std::stof(params_["lr"]); - float b1 = std::stof(params_["beta1"]); - float b2 = std::stof(params_["beta2"]); - float t = count_[index]; - float coef1 = 1.0f - std::pow(b1, t); - float coef2 = 1.0f - std::pow(b2, t); - lr *= std::sqrt(coef2) / coef1; - - NDArrayHandle inputs[4]; - inputs[0] = weight.GetHandle(); - inputs[1] = grad.GetHandle(); - - int num_outputs = 1; - NDArrayHandle output = weight.GetHandle(); - NDArrayHandle *outputs = &output; - - inputs[2] = mean_[index]->GetHandle(); - inputs[3] = var_[index]->GetHandle(); - - MXImperativeInvoke(update_handle_, 4, inputs, - &num_outputs, &outputs, - keys.size(), keys.data(), values.data()); -} - -inline void AdamOptimizer::CreateState_(int index, NDArray weight) { - mean_[index] = new NDArray(weight.GetShape(), weight.GetContext()); - *mean_[index] = 0; - var_[index] = new NDArray(weight.GetShape(), weight.GetContext()); - *var_[index] = 0; -} - -inline AdaGradOptimizer::AdaGradOptimizer(unsigned begin_num_update) - : Optimizer(begin_num_update) { - SetParam("eps", 1e-7); -} - -inline std::string AdaGradOptimizer::GetType() const { - return "adagrad"; -} - -inline void AdaGradOptimizer::Update(int index, NDArray weight, NDArray grad) { - if (history_.count(index) == 0) { - CreateState_(index, weight); - } - - float eps = std::stof(params_["eps"]); - float lr = GetLR_(index); - float wd = GetWD_(index); - UpdateCount_(index); - if (params_.count("rescale_grad") > 0) { - grad *= std::stof(params_["rescale_grad"]); - } - if (params_.count("clip_gradient") > 0) { - _clip(grad, std::stof(params_["clip_gradient"])); - } - auto& history = *history_[index]; - history += grad * grad; - weight -= (grad / _sqrt(history + eps) + weight * wd) * lr; -} - -inline AdaGradOptimizer::~AdaGradOptimizer() { - for (auto& it : history_) { - delete it.second; - } -} - -inline void AdaGradOptimizer::CreateState_(int index, NDArray weight) { - history_[index] = new NDArray(weight.GetShape(), weight.GetContext()); - *history_[index] = 0; -} - -inline AdaDeltaOptimizer::AdaDeltaOptimizer(unsigned begin_num_update) - : Optimizer(begin_num_update) { - SetParam("rho", 0.90f); - SetParam("epsilon", 1e-5); -} - -inline std::string AdaDeltaOptimizer::GetType() const { - return "adadelta"; -} - -inline void AdaDeltaOptimizer::Update(int index, NDArray weight, NDArray grad) { - if (acc_g_.count(index) == 0) { - CreateState_(index, weight); - } - - float rho = std::stof(params_["rho"]); - float epsilon = std::stof(params_["epsilon"]); - float wd = GetWD_(index); - UpdateCount_(index); - - if (params_.count("rescale_grad") > 0) { - grad *= std::stof(params_["rescale_grad"]); - } - if (params_.count("clip_gradient") > 0) { - _clip(grad, std::stof(params_["clip_gradient"])); - } - - auto& acc_g = *acc_g_[index]; - auto& acc_delta = *acc_delta_[index]; - acc_g *= rho; - acc_g += grad * grad * (1.0f - rho); - - auto delta = _sqrt(acc_delta + epsilon) / _sqrt(acc_g + epsilon) * grad; - acc_delta *= rho; - acc_delta += delta * delta * (1.0f - rho); - weight *= 1.0f - wd; - weight -= delta; -} - -inline AdaDeltaOptimizer::~AdaDeltaOptimizer() { - for (auto& it : acc_g_) { - delete it.second; - } - for (auto& it : acc_delta_) { - delete it.second; - } -} - -inline void AdaDeltaOptimizer::CreateState_(int index, NDArray weight) { - acc_g_[index] = new NDArray(weight.GetShape(), weight.GetContext()); - *acc_g_[index] = 0; - acc_delta_[index] = new NDArray(weight.GetShape(), weight.GetContext()); - *acc_delta_[index] = 0; -} - -} // namespace cpp -} // namespace mxnet - -#endif // MXNET_CPP_OPTIMIZER_HPP_ diff --git a/cpp-package/include/mxnet-cpp/shape.h b/cpp-package/include/mxnet-cpp/shape.h deleted file mode 100644 index b15f19ca3eb1..000000000000 --- a/cpp-package/include/mxnet-cpp/shape.h +++ /dev/null @@ -1,408 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/*! -* Copyright (c) 2016 by Contributors -* \file shape.h -* \brief definition of shape -* \author Chuntao Hong, Zhang Chen -*/ - -#ifndef MXNET_CPP_SHAPE_H_ -#define MXNET_CPP_SHAPE_H_ - -#include -#include -#include -#include -#include "mxnet-cpp/base.h" - -namespace mxnet { -namespace cpp { - -/*! -* \brief dynamic shape class that can hold shape -* of arbirary dimension -*/ -struct Shape { - public: - /*! \brief constructor */ - Shape() - : ndim_(0), - num_heap_allocated_(0), - data_heap_(nullptr) {} - /*! - * \brief constructor from a vector of index_t - * \param v the vector - */ - explicit Shape(const std::vector &v) - : ndim_(v.size()) { - if (ndim_ <= kStackCache) { - data_heap_ = nullptr; - num_heap_allocated_ = 0; - std::copy(v.begin(), v.end(), data_stack_); - } else { - data_heap_ = new index_t[ndim_]; - num_heap_allocated_ = ndim_; - std::copy(v.begin(), v.end(), data_heap_); - } - } - /*! - * \brief constructor one dimmension shape - * \param s1 size of the first dimmension - */ - explicit Shape(index_t s1) - : ndim_(1) { - if (ndim_ <= kStackCache) { - data_heap_ = nullptr; - num_heap_allocated_ = 0; - data_stack_[0] = s1; - } else { - data_heap_ = new index_t[ndim_]; - num_heap_allocated_ = ndim_; - data_heap_[0] = s1; - } - } - /*! - * \brief constructor two dimmension shape - * \param s1 size of the first dimmension - * \param s2 size of the second dimmension - */ - Shape(index_t s1, index_t s2) - : ndim_(2) { - if (ndim_ <= kStackCache) { - data_heap_ = nullptr; - num_heap_allocated_ = 0; - data_stack_[0] = s1; - data_stack_[1] = s2; - } else { - data_heap_ = new index_t[ndim_]; - num_heap_allocated_ = ndim_; - data_heap_[0] = s1; - data_heap_[1] = s2; - } - } - /*! - * \brief constructor three dimmension shape - * \param s1 size of the first dimmension - * \param s2 size of the second dimmension - * \param s3 size of the third dimmension - */ - Shape(index_t s1, index_t s2, index_t s3) - : ndim_(3) { - if (ndim_ <= kStackCache) { - data_heap_ = nullptr; - num_heap_allocated_ = 0; - data_stack_[0] = s1; - data_stack_[1] = s2; - data_stack_[2] = s3; - } else { - data_heap_ = new index_t[ndim_]; - num_heap_allocated_ = ndim_; - data_heap_[0] = s1; - data_heap_[1] = s2; - data_heap_[2] = s3; - } - } - /*! - * \brief constructor four dimmension shape - * \param s1 size of the first dimmension - * \param s2 size of the second dimmension - * \param s3 size of the third dimmension - * \param s4 size of the fourth dimmension - */ - Shape(index_t s1, index_t s2, index_t s3, index_t s4) - : ndim_(4) { - if (ndim_ <= kStackCache) { - data_heap_ = nullptr; - num_heap_allocated_ = 0; - data_stack_[0] = s1; - data_stack_[1] = s2; - data_stack_[2] = s3; - data_stack_[3] = s4; - } else { - data_heap_ = new index_t[ndim_]; - num_heap_allocated_ = ndim_; - data_heap_[0] = s1; - data_heap_[1] = s2; - data_heap_[2] = s3; - data_heap_[3] = s4; - } - } - /*! - * \brief constructor five dimmension shape - * \param s1 size of the first dimmension - * \param s2 size of the second dimmension - * \param s3 size of the third dimmension - * \param s4 size of the fourth dimmension - * \param s5 size of the fifth dimmension - */ - Shape(index_t s1, index_t s2, index_t s3, index_t s4, index_t s5) - : ndim_(5) { - if (ndim_ <= kStackCache) { - data_heap_ = nullptr; - num_heap_allocated_ = 0; - data_stack_[0] = s1; - data_stack_[1] = s2; - data_stack_[2] = s3; - data_stack_[3] = s4; - data_stack_[4] = s5; - } else { - data_heap_ = new index_t[ndim_]; - num_heap_allocated_ = ndim_; - data_heap_[0] = s1; - data_heap_[1] = s2; - data_heap_[2] = s3; - data_heap_[3] = s4; - data_heap_[4] = s5; - } - } - /*! - * \brief constructor from Shape - * \param s the source shape - */ - Shape(const Shape &s) - : ndim_(s.ndim_) { - if (ndim_ <= kStackCache) { - data_heap_ = nullptr; - num_heap_allocated_ = 0; - std::copy(s.data_stack_, s.data_stack_ + ndim_, data_stack_); - } else { - data_heap_ = new index_t[ndim_]; - num_heap_allocated_ = ndim_; - std::copy(s.data_heap_, s.data_heap_ + ndim_, data_heap_); - } - } -#if MSHADOW_IN_CXX11 - /*! - * \brief move constructor from Shape - * \param s the source shape - */ - Shape(Shape &&s) - : ndim_(s.ndim_), - num_heap_allocated_(s.num_heap_allocated_), - data_heap_(s.data_heap_) { - if (ndim_ <= kStackCache) { - std::copy(s.data_stack_, s.data_stack_ + ndim_, data_stack_); - } - // remove data heap space from s - s.data_heap_ = nullptr; - } -#endif - /*! \brief destructor */ - ~Shape() { - // data_heap_ can be nullptr - delete[] data_heap_; - } - /*! - * \brief copy shape from content betwen two iterators - * \param begin the beginning of iterator - * \param end the end of the iterator - * \tparam RandomAccessIterator iterator type - */ - template - inline void CopyFrom(RandomAccessIterator begin, - RandomAccessIterator end) { - this->SetDim(end - begin); - std::copy(begin, end, data()); - } - /*! - * \brief assignment from shape - * \param shape source shape - * \return reference of self - */ - inline Shape &operator=(const Shape &shape) { - this->SetDim(shape.ndim_); - const index_t *src = shape.data(); - std::copy(src, src + ndim_, data()); - return *this; - } - /*! - * \brief assignment from vector - * \param shape source shape - * \return reference of self - */ - inline Shape &operator=(const std::vector &shape) { - this->CopyFrom(shape.begin(), shape.end()); - return *this; - } - /*! \return the data content of the shape */ - inline const index_t *data() const { - return ndim_ <= kStackCache ? data_stack_ : data_heap_; - } - /*! \return the data content of the shape */ - inline index_t *data() { - return ndim_ <= kStackCache ? data_stack_ : data_heap_; - } - /*! \brief return number of dimension of the tensor inside */ - inline index_t ndim(void) const { - return ndim_; - } - /*! - * \brief get corresponding index - * \param i dimension index - * \return the corresponding dimension size - */ - inline index_t &operator[](index_t i) { - return data()[i]; - } - /*! - * \brief get corresponding index - * \param i dimension index - * \return the corresponding dimension size - */ - inline const index_t &operator[](index_t i) const { - return data()[i]; - } - /*! \brief total number of elements in the tensor */ - inline size_t Size(void) const { - size_t size = 1; - const index_t *d = this->data(); - for (index_t i = 0; i < ndim_; ++i) { - size *= d[i]; - } - return size; - } - /*! - * \return whether two shape equals - * \param s the shape to compare against - */ - inline bool operator==(const Shape &s) const { - if (ndim_ != s.ndim_) return false; - if (ndim_ <= kStackCache) { - for (index_t i = 0; i < ndim_; ++i) { - if (data_stack_[i] != s.data_stack_[i]) return false; - } - } else { - for (index_t i = 0; i < ndim_; ++i) { - if (data_heap_[i] != s.data_heap_[i]) return false; - } - } - return true; - } - /*! - * \return whether two shape not equals - * \param s the shape to compare against - */ - inline bool operator!=(const Shape &s) const { - return !(*this == s); - } - - friend std::ostream &operator<<(std::ostream &os, const Shape &shape); - friend std::istream &operator>>(std::istream &is, Shape &shape); - - private: - // the shape will be stored in data_stack_ - // when dimension is smaller than kStackCache - // when it is bigger, it will be stored in data_heap_; - /*! \brief size of in stack space */ - static const index_t kStackCache = 5; - /*! \brief number of dimnsion of the shape */ - index_t ndim_; - /*! \brief number of cells allocated in data_heap_ */ - index_t num_heap_allocated_; - /*! \brief in stack space used to store shape when it is small */ - index_t data_stack_[kStackCache]; - /*! \brief space to store shape when dimension is big*/ - index_t *data_heap_; - /*! - * \brief internal function to set the dimension - * \param dim the dimension of the shape - */ - inline void SetDim(index_t dim) { - if (dim > kStackCache && - dim > num_heap_allocated_) { - // data_heap_ can be nullptr - delete[] data_heap_; - data_heap_ = new index_t[dim]; - num_heap_allocated_ = dim; - } - ndim_ = dim; - } -}; - -/*! -* \brief allow string printing of the shape -* \param os the output stream -* \param shape the shape -* \return the ostream -*/ -inline std::ostream &operator<<(std::ostream &os, const Shape &shape) { - os << '('; - for (index_t i = 0; i < shape.ndim(); ++i) { - if (i != 0) os << ','; - os << static_cast(shape[i]); // Supports negative Shape 'special codes' for inferring - } - // python style tuple - if (shape.ndim() == 1) os << ','; - os << ')'; - return os; -} - -/*! -* \brief read shape from the istream -* \param is the input stream -* \param shape the shape -* \return the istream -*/ -inline std::istream &operator>>(std::istream &is, Shape &shape) { - // get ( - while (true) { - char ch = is.get(); - if (ch == '(') break; - if (!isspace(ch)) { - is.setstate(std::ios::failbit); - return is; - } - } - index_t idx; - std::vector tmp; - while (is >> idx) { - tmp.push_back(idx); - char ch; - do { - ch = is.get(); - } while (isspace(ch)); - if (ch == ',') { - while (true) { - ch = is.peek(); - if (isspace(ch)) { - is.get(); continue; - } - if (ch == ')') { - is.get(); break; - } - break; - } - if (ch == ')') break; - } else if (ch == ')') { - break; - } else { - is.setstate(std::ios::failbit); - return is; - } - } - shape.CopyFrom(tmp.begin(), tmp.end()); - return is; -} - -} // namespace cpp -} // namespace mxnet - -#endif // MXNET_CPP_SHAPE_H_ diff --git a/cpp-package/include/mxnet-cpp/symbol.h b/cpp-package/include/mxnet-cpp/symbol.h deleted file mode 100644 index 336b7259f78f..000000000000 --- a/cpp-package/include/mxnet-cpp/symbol.h +++ /dev/null @@ -1,300 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/*! -* Copyright (c) 2016 by Contributors -* \file symbol.h -* \brief definition of symbol -* \author Chuntao Hong, Zhang Chen -*/ - -#ifndef MXNET_CPP_SYMBOL_H_ -#define MXNET_CPP_SYMBOL_H_ - -#include -#include -#include -#include "mxnet-cpp/base.h" -#include "mxnet-cpp/ndarray.h" -#include "mxnet-cpp/op_map.h" - -namespace mxnet { -namespace cpp { - -class Executor; - -/*! -* \brief struct to store SymbolHandle -*/ -struct SymBlob { - public: - /*! - * \brief default constructor - */ - SymBlob() : handle_(nullptr) {} - /*! - * \brief construct with SymbolHandle to store - */ - explicit SymBlob(SymbolHandle handle) : handle_(handle) {} - /*! - * \brief destructor, free the SymbolHandle - */ - ~SymBlob() { MXSymbolFree(handle_); } - /*! - * \brief the SymbolHandle to store - */ - SymbolHandle handle_; - - private: - SymBlob(const SymBlob &); - SymBlob &operator=(const SymBlob &); -}; - -/*! -* \brief Symbol interface -*/ -class Symbol { - public: - Symbol() {} - /*! - * \brief construct a Symbol with SymbolHandle - * \param handle the given SymbolHandle - */ - explicit Symbol(SymbolHandle handle); - /*! - * \brief construct a variable Symbol - * \param name the name of the variable - */ - explicit Symbol(const char *name); - /*! - * \brief construct a variable Symbol - * \param name the name of the variable - */ - explicit Symbol(const std::string &name); - Symbol operator+(const Symbol &rhs) const; - Symbol operator-(const Symbol &rhs) const; - Symbol operator*(const Symbol &rhs) const; - Symbol operator/(const Symbol &rhs) const; - Symbol operator%(const Symbol &rhs) const; - - Symbol operator+(mx_float scalar) const; - Symbol operator-(mx_float scalar) const; - Symbol operator*(mx_float scalar) const; - Symbol operator/(mx_float scalar) const; - Symbol operator%(mx_float scalar) const; - Symbol Copy() const; - /*! - * \brief construct a variable Symbol - * \param name the name of the variable - */ - static Symbol Variable(const std::string &name = ""); - Symbol operator[](int index); - Symbol operator[](const std::string &index); - /*! - * \brief Create a symbol that groups symbols together - * \param symbols List of symbols to be grouped - */ - static Symbol Group(const std::vector &symbols); - /*! - * \brief load Symbol from a JSON file - * \param file_name the name of the file - */ - static Symbol Load(const std::string &file_name); - /*! - * \brief load Symbol from a JSON string - * \param json_str the JSON string - */ - static Symbol LoadJSON(const std::string &json_str); - /*! - * \brief save Symbol to a file - * \param file_name the name of the file - */ - void Save(const std::string &file_name) const; - /*! - * \brief save Symbol into a JSON string - */ - std::string ToJSON() const; - /*! - * \brief save Symbol into a JSON string - * \retutrn the symbol whose outputs are all the internals. - */ - Symbol GetInternals() const; - /*! - * \return the SymbolHandle - */ - SymbolHandle GetHandle() const { return (blob_ptr_) ? blob_ptr_->handle_: nullptr; } - /*! - * \brief construct an operator Symbol, with given input Symbol and config - * \param name the name of the Symbol - * \param input_keys the vector of keys of the input - * \param input_values the vector of the intput Symbols - * \param config_keys the vector of keys of the config - * \param config_values the vecotr of values of the config - */ - Symbol(const std::string &operator_name, const std::string &name, - std::vector input_keys, - std::vector input_values, - std::vector config_keys, - std::vector config_values); - /*! - * \brief infer the shapes by providing shapes of known argument shapes. - * \param arg_shapes map of argument name to shape of arguments with known - * shapes. - * \param in_shapes used to store infered shapes of input arguments. - * \param out_shapes used to store infered shapes of outputs. - * \param aux_shapes use to store the infered shapes of auxiliary states - */ - void InferShape( - const std::map > &arg_shapes, - std::vector > *in_shape, - std::vector > *aux_shape, - std::vector > *out_shape) const; - /*! - * \brief List the arguments names. - * - * The position of the returned list also corresponds to calling position in - *operator() - * \return the arguments list of this symbol, they can be either named or - *unnamed (empty string). - */ - std::vector ListArguments() const; - /*! \return lists all argument names and aux states of the symbol */ - std::vector ListInputs() const; - /*! \return get the descriptions of outputs for this symbol */ - std::vector ListOutputs() const; - /*! \return get the descriptions of auxiliary data for this symbol */ - std::vector ListAuxiliaryStates() const; - /*! \return get all attributes for this symbol */ - std::map ListAttributes() const; - /*! - * \brief set key-value attribute to the symbol - * @param key string represent the key for the attribute - * @param value string represent the value for the attribute - */ - void SetAttribute(const std::string& key, const std::string& value); - /*! - * \brief set a series of key-value attribute to the symbol - * @param attrs string:string map represent the key value attributes - */ - void SetAttributes(const std::map& attrs); - /*! \return get number of outputs for this symbol */ - mx_uint GetNumOutputs() const; - /*! \return get the new symbol through subgraph API for this symbol */ - mxnet::cpp::Symbol GetBackendSymbol(const std::string& backendName) const; - /*! \return get the name of the symbol */ - std::string GetName() const; - /*! - * \brief infer and construct all the arrays to bind to executor by providing - * some known arrays. - * \param context the context of all the infered arrays - * \param arg_arrays infered input arguments arrays. - * \param arad_arrays infered arrays to store the gradient output of the input - * arguments. - * \param aux_arrays infered arrays that is used as internal state in op. - * \param args_map map of some given arguments arrays. - * \param args_grad_store map of some gradient given store arrays. - * \param args_req_type map of some given type of gradient saving. Can only be - * in {kNullOp, kAddTo, kWriteTo}. - * \param aux_map NDArray that stores the internal state in op - */ - void InferExecutorArrays( - const Context &context, std::vector *arg_arrays, - std::vector *grad_arrays, std::vector *grad_reqs, - std::vector *aux_arrays, - const std::map &args_map, - const std::map &arg_grad_store = - std::map(), - const std::map &grad_req_type = - std::map(), - const std::map &aux_map = - std::map()) const; - /*! - * \brief infer and construct all the input arguments arrays to bind to - * executor by providing some known arguments arrays. - * \param context the context of all the infered arrays. - * \param args_map map of all the infered input arguments arrays. - * \param known_args map of some given arguments arrays. - */ - void InferArgsMap(const Context &context, - std::map *args_map, - const std::map &known_args) const; - /*! - * \brief Create an executor by bind symbol with context and arguments. - * If user do not want to compute the gradients of i-th argument, - *grad_req_type[i] can be kNullOp. - * The input arrays in the given maps should have the same name with the input - *symbol. - * Only need some of the necessary arrays, and the other arrays can be infered - *automatically. - * - * \param context the context of binding. - * \param args_map the NDArray that stores the input arguments to the symbol. - * \param arg_grad_store NDArray that is used to store the gradient output of - *the input arguments. - * \param grad_req_type requirment type of gradient saving. Can only be in - *{kNullOp, kAddTo, kWriteTo}. - * \param aux_map NDArray that stores the internal state in op - * \return a new executor, which need to be free manually. - */ - Executor *SimpleBind(const Context &context, - const std::map &args_map, - const std::map &arg_grad_store = - std::map(), - const std::map &grad_req_type = - std::map(), - const std::map &aux_map = - std::map()); - /*! - * \brief Create an executor by bind symbol with context and arguments. - * If user do not want to compute the gradients of i-th argument, - *grad_req_type[i] can be kNullOp. - * - * \param context the context of binding. - * \param arg_arrays the NDArray that stores the input arguments to the symbol. - * \param grad_arrays NDArray that is used to store the gradient output of the - *input arguments. - * \param grad_reqs requirment type of gradient saving. Can only be in - *{kNullOp, kAddTo, kWriteTo}. - * \param aux_arrays NDArray that is used as internal state in op - * \param group_to_ctx dict of string to mx.Context - * \param shared_exec Executor to share memory with. This is intended for - *runtime reshaping, variable length sequencesn etc. The returned executor - *shares state with shared_exec, and should not be used in parallel with it. - * \return a new executor, which need to be free manually. - */ - Executor *Bind(const Context &context, const std::vector &arg_arrays, - const std::vector &grad_arrays, - const std::vector &grad_reqs, - const std::vector &aux_arrays, - const std::map &group_to_ctx = - std::map(), - Executor *shared_exec = nullptr); - - private: - std::shared_ptr blob_ptr_; - static OpMap*& op_map(); -}; -Symbol operator+(mx_float lhs, const Symbol &rhs); -Symbol operator-(mx_float lhs, const Symbol &rhs); -Symbol operator*(mx_float lhs, const Symbol &rhs); -Symbol operator/(mx_float lhs, const Symbol &rhs); -Symbol operator%(mx_float lhs, const Symbol &rhs); -} // namespace cpp -} // namespace mxnet -#endif // MXNET_CPP_SYMBOL_H_ diff --git a/cpp-package/include/mxnet-cpp/symbol.hpp b/cpp-package/include/mxnet-cpp/symbol.hpp deleted file mode 100644 index 454d775ad23b..000000000000 --- a/cpp-package/include/mxnet-cpp/symbol.hpp +++ /dev/null @@ -1,424 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/*! - * \file symbol.hpp - * \brief implementation of the symbol - * \author Zhang Chen, Chuntao Hong - */ - -#ifndef MXNET_CPP_SYMBOL_HPP_ -#define MXNET_CPP_SYMBOL_HPP_ - -#include -#include -#include -#include - -#include "dmlc/logging.h" -#include "mxnet-cpp/symbol.h" - -#include "mxnet-cpp/op_suppl.h" - -namespace mxnet { -namespace cpp { -inline OpMap*& Symbol::op_map() { - static OpMap* op_map_ = new OpMap(); - return op_map_; -} -inline Symbol::Symbol(SymbolHandle handle) { - blob_ptr_ = std::make_shared(handle); -} -inline Symbol::Symbol(const char *name) { - SymbolHandle handle; - CHECK_EQ(MXSymbolCreateVariable(name, &(handle)), 0); - blob_ptr_ = std::make_shared(handle); -} -inline Symbol::Symbol(const std::string &name) : Symbol(name.c_str()) {} -inline Symbol Symbol::Variable(const std::string &name) { return Symbol(name); } -inline Symbol Symbol::operator+(const Symbol &rhs) const { return _Plus(*this, rhs); } -inline Symbol Symbol::operator-(const Symbol &rhs) const { return _Minus(*this, rhs); } -inline Symbol Symbol::operator*(const Symbol &rhs) const { return _Mul(*this, rhs); } -inline Symbol Symbol::operator/(const Symbol &rhs) const { return _Div(*this, rhs); } -inline Symbol Symbol::operator%(const Symbol &rhs) const { return _Mod(*this, rhs); } -inline Symbol Symbol::operator+(mx_float scalar) const { - return _PlusScalar(*this, scalar); -} -inline Symbol Symbol::operator-(mx_float scalar) const { - return _MinusScalar(*this, scalar); -} -inline Symbol Symbol::operator*(mx_float scalar) const { - return _MulScalar(*this, scalar); -} -inline Symbol Symbol::operator/(mx_float scalar) const { - return _DivScalar(*this, scalar); -} -inline Symbol Symbol::operator%(mx_float scalar) const { - return _ModScalar(*this, scalar); -} -inline Symbol Symbol::operator[](int index) { - SymbolHandle out; - MXSymbolGetOutput(GetHandle(), index, &out); - return Symbol(out); -} -inline Symbol Symbol::operator[](const std::string &index) { - auto outputs = ListOutputs(); - for (mx_uint i = 0; i < outputs.size(); ++i) { - if (outputs[i] == index) { - return (*this)[i]; - } - } - LOG(FATAL) << "Cannot find output that matches name " << index; - return (*this)[0]; -} -inline Symbol Symbol::Group(const std::vector &symbols) { - SymbolHandle out; - std::vector handle_list; - for (const auto &t : symbols) { - handle_list.push_back(t.GetHandle()); - } - MXSymbolCreateGroup(handle_list.size(), handle_list.data(), &out); - return Symbol(out); -} -inline Symbol Symbol::Load(const std::string &file_name) { - op_map(); - SymbolHandle handle; - CHECK_EQ(MXSymbolCreateFromFile(file_name.c_str(), &(handle)), 0); - return Symbol(handle); -} -inline Symbol Symbol::LoadJSON(const std::string &json_str) { - op_map(); - SymbolHandle handle; - CHECK_EQ(MXSymbolCreateFromJSON(json_str.c_str(), &(handle)), 0); - return Symbol(handle); -} -inline void Symbol::Save(const std::string &file_name) const { - CHECK_EQ(MXSymbolSaveToFile(GetHandle(), file_name.c_str()), 0); -} -inline std::string Symbol::ToJSON() const { - const char *out_json; - CHECK_EQ(MXSymbolSaveToJSON(GetHandle(), &out_json), 0); - return std::string(out_json); -} -inline Symbol Symbol::GetInternals() const { - SymbolHandle handle; - CHECK_EQ(MXSymbolGetInternals(GetHandle(), &handle), 0); - return Symbol(handle); -} -inline Symbol::Symbol(const std::string &operator_name, const std::string &name, - std::vector input_keys, - std::vector input_values, - std::vector config_keys, - std::vector config_values) { - SymbolHandle handle; - AtomicSymbolCreator creator = op_map()->GetSymbolCreator(operator_name); - MXSymbolCreateAtomicSymbol(creator, config_keys.size(), config_keys.data(), - config_values.data(), &handle); - MXSymbolCompose(handle, operator_name.c_str(), input_keys.size(), - input_keys.data(), input_values.data()); - blob_ptr_ = std::make_shared(handle); -} - -inline Symbol Symbol::Copy() const { - SymbolHandle handle; - CHECK_EQ(MXSymbolCopy(GetHandle(), &handle), 0); - return Symbol(handle); -} - -inline std::vector Symbol::ListArguments() const { - std::vector ret; - mx_uint size; - const char **sarr; - MXSymbolListArguments(GetHandle(), &size, &sarr); - for (mx_uint i = 0; i < size; ++i) { - ret.push_back(std::string(sarr[i])); - } - return ret; -} - -inline std::vector Symbol::ListInputs() const { - std::vector ret; - mx_uint size; - const char **sarr; - NNSymbolListInputNames(GetHandle(), 0, &size, &sarr); - for (mx_uint i = 0; i < size; ++i) { - ret.push_back(std::string(sarr[i])); - } - return ret; -} - -inline std::vector Symbol::ListOutputs() const { - std::vector ret; - mx_uint size; - const char **sarr; - MXSymbolListOutputs(GetHandle(), &size, &sarr); - for (mx_uint i = 0; i < size; ++i) { - ret.push_back(std::string(sarr[i])); - } - return ret; -} -inline std::vector Symbol::ListAuxiliaryStates() const { - std::vector ret; - mx_uint size; - const char **sarr; - MXSymbolListAuxiliaryStates(GetHandle(), &size, &sarr); - for (mx_uint i = 0; i < size; ++i) { - ret.push_back(std::string(sarr[i])); - } - return ret; -} - -inline std::map Symbol::ListAttributes() const { - mx_uint size; - const char** pairs; - CHECK_EQ(MXSymbolListAttrShallow(GetHandle(), &size, &pairs), 0); - std::map attributes; - for (mx_uint i = 0; i < size; ++i) { - // pairs is 2 * size with key, value pairs according to - // https://github.com/apache/incubator-mxnet/blob/master/include/mxnet/c_api.h#L1428 - attributes[pairs[2 * i]] = pairs[2 * i + 1]; - } - return attributes; -} - -inline void Symbol::SetAttribute(const std::string &key, const std::string &value) { - CHECK_EQ(MXSymbolSetAttr(GetHandle(), key.c_str(), value.c_str()), 0); -} - -inline void Symbol::SetAttributes(const std::map &attrs) { - for (const auto& kv : attrs) { - SetAttribute(kv.first, kv.second); - } -} - -inline mx_uint Symbol::GetNumOutputs() const { - mx_uint numOutputs; - CHECK_EQ(MXSymbolGetNumOutputs(GetHandle(), &numOutputs), 0); - return numOutputs; -} - -inline mxnet::cpp::Symbol Symbol::GetBackendSymbol(const std::string &backendName) const { - SymbolHandle symbolHandle; - CHECK_EQ(MXGenBackendSubgraph(GetHandle(), backendName.c_str(), &symbolHandle), 0); - return mxnet::cpp::Symbol(symbolHandle); -} - -inline std::string Symbol::GetName() const { - int success; - const char* out_name; - CHECK_EQ(MXSymbolGetName(GetHandle(), &out_name, &success), 0); - CHECK_EQ(success, 1); - return std::string(out_name); -} - -inline void Symbol::InferShape( - const std::map > &arg_shapes, - std::vector > *in_shape, - std::vector > *aux_shape, - std::vector > *out_shape) const { - - std::vector keys; - std::vector arg_ind_ptr; - std::vector arg_shape_data; - - for (const auto &arg : arg_shapes) { - keys.push_back(arg.first.c_str()); - arg_ind_ptr.push_back(arg_shape_data.size()); - for (auto i : arg.second) { - arg_shape_data.push_back(i); - } - } - arg_ind_ptr.push_back(arg_shape_data.size()); - - mx_uint in_shape_size; - const int *in_shape_ndim; - const int **in_shape_data; - mx_uint out_shape_size; - const int *out_shape_ndim; - const int **out_shape_data; - mx_uint aux_shape_size; - const int *aux_shape_ndim; - const int **aux_shape_data; - int complete; - - CHECK_EQ(MXSymbolInferShapeEx(GetHandle(), keys.size(), keys.data(), - arg_ind_ptr.data(), arg_shape_data.data(), - &in_shape_size, &in_shape_ndim, &in_shape_data, - &out_shape_size, &out_shape_ndim, &out_shape_data, - &aux_shape_size, &aux_shape_ndim, &aux_shape_data, - &complete), - 0); - - if (complete) { - for (mx_uint i = 0; i < in_shape_size; ++i) { - in_shape->push_back(std::vector()); - for (int j = 0; j < in_shape_ndim[i]; ++j) { - (*in_shape)[i].push_back(in_shape_data[i][j]); - } - } - for (mx_uint i = 0; i < aux_shape_size; ++i) { - aux_shape->push_back(std::vector()); - for (int j = 0; j < aux_shape_ndim[i]; ++j) { - (*aux_shape)[i].push_back(aux_shape_data[i][j]); - } - } - for (mx_uint i = 0; i < out_shape_size; ++i) { - out_shape->push_back(std::vector()); - for (int j = 0; j < out_shape_ndim[i]; ++j) { - (*out_shape)[i].push_back(out_shape_data[i][j]); - } - } - } -} - -inline void Symbol::InferExecutorArrays( - const Context &context, std::vector *arg_arrays, - std::vector *grad_arrays, std::vector *grad_reqs, - std::vector *aux_arrays, - const std::map &args_map, - const std::map &arg_grad_store, - const std::map &grad_req_type, - const std::map &aux_map) const { - - const auto arg_name_list = ListArguments(); - std::vector > in_shapes, aux_shapes, out_shapes; - std::map > arg_shapes; - - for (const auto &arg_name : arg_name_list) { - auto iter = args_map.find(arg_name); - if (iter != args_map.end()) { - arg_shapes[arg_name] = iter->second.GetShape(); - } - } - - InferShape(arg_shapes, &in_shapes, &aux_shapes, &out_shapes); - - for (size_t i = 0; i < in_shapes.size(); ++i) { - const auto &shape = in_shapes[i]; - const auto &arg_name = arg_name_list[i]; - auto iter_arg = args_map.find(arg_name); - if (iter_arg != args_map.end()) { - arg_arrays->push_back(iter_arg->second); - } else { - arg_arrays->push_back(NDArray(shape, context, false)); - NDArray::SampleGaussian(0, 1, &arg_arrays->back()); - } - auto iter_grad = arg_grad_store.find(arg_name); - if (iter_grad != arg_grad_store.end()) { - grad_arrays->push_back(iter_grad->second); - } else { - grad_arrays->push_back(NDArray(shape, context, false)); - } - auto iter_req = grad_req_type.find(arg_name); - if (iter_req != grad_req_type.end()) { - grad_reqs->push_back(iter_req->second); - } else if (arg_name.rfind("data") != std::string::npos - || arg_name.rfind("label") != std::string::npos) { - grad_reqs->push_back(OpReqType::kNullOp); - } else { - grad_reqs->push_back(OpReqType::kWriteTo); - } - } - - const auto aux_name_list = ListAuxiliaryStates(); - for (size_t i = 0; i < aux_shapes.size(); ++i) { - const auto &shape = aux_shapes[i]; - const auto &aux_name = aux_name_list[i]; - auto iter_aux = aux_map.find(aux_name); - if (iter_aux != aux_map.end()) { - aux_arrays->push_back(iter_aux->second); - } else { - aux_arrays->push_back(NDArray(shape, context, false)); - NDArray::SampleGaussian(0, 1, &aux_arrays->back()); - } - } -} -inline void Symbol::InferArgsMap( - const Context &context, std::map *args_map, - const std::map &known_args) const { - - const auto arg_name_list = ListArguments(); - std::vector > in_shapes, aux_shapes, out_shapes; - std::map > arg_shapes; - - for (const auto &arg_name : arg_name_list) { - auto iter = known_args.find(arg_name); - if (iter != known_args.end()) { - arg_shapes[arg_name] = iter->second.GetShape(); - } - } - - InferShape(arg_shapes, &in_shapes, &aux_shapes, &out_shapes); - - for (size_t i = 0; i < in_shapes.size(); ++i) { - const auto &shape = in_shapes[i]; - const auto &arg_name = arg_name_list[i]; - auto iter_arg = known_args.find(arg_name); - if (iter_arg != known_args.end()) { - (*args_map)[arg_name] = iter_arg->second; - } else { - (*args_map)[arg_name] = NDArray(shape, context, false); - NDArray::SampleGaussian(0, 1, &(*args_map)[arg_name]); - } - } -} - -inline Executor *Symbol::SimpleBind( - const Context &context, const std::map &args_map, - const std::map &arg_grad_store, - const std::map &grad_req_type, - const std::map &aux_map) { - std::vector arg_arrays; - std::vector grad_arrays; - std::vector grad_reqs; - std::vector aux_arrays; - - InferExecutorArrays(context, &arg_arrays, &grad_arrays, &grad_reqs, - &aux_arrays, args_map, arg_grad_store, grad_req_type, - aux_map); - - return new Executor(*this, context, arg_arrays, grad_arrays, grad_reqs, - aux_arrays); -} - -inline Executor *Symbol::Bind(const Context &context, - const std::vector &arg_arrays, - const std::vector &grad_arrays, - const std::vector &grad_reqs, - const std::vector &aux_arrays, - const std::map &group_to_ctx, - Executor *shared_exec) { - return new Executor(*this, context, arg_arrays, grad_arrays, grad_reqs, - aux_arrays, group_to_ctx, shared_exec); -} -inline Symbol operator+(mx_float lhs, const Symbol &rhs) { return rhs + lhs; } -inline Symbol operator-(mx_float lhs, const Symbol &rhs) { - return mxnet::cpp::_RMinusScalar(lhs, rhs); -} -inline Symbol operator*(mx_float lhs, const Symbol &rhs) { return rhs * lhs; } -inline Symbol operator/(mx_float lhs, const Symbol &rhs) { - return mxnet::cpp::_RDivScalar(lhs, rhs); -} -inline Symbol operator%(mx_float lhs, const Symbol &rhs) { - return mxnet::cpp::_RModScalar(lhs, rhs); -} -} // namespace cpp -} // namespace mxnet - -#endif // MXNET_CPP_SYMBOL_HPP_ diff --git a/cpp-package/scripts/OpWrapperGenerator.py b/cpp-package/scripts/OpWrapperGenerator.py deleted file mode 100644 index 96e20baf342d..000000000000 --- a/cpp-package/scripts/OpWrapperGenerator.py +++ /dev/null @@ -1,441 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -# -*- coding: utf-8 -*- -# This is a python script that generates operator wrappers such as FullyConnected, -# based on current libmxnet.dll. This script is written so that we don't need to -# write new operator wrappers when new ones are added to the library. - -import codecs -import filecmp -import logging -import os -import platform -import re -import shutil -import sys -import tempfile -import traceback -from ctypes import * -from ctypes.util import find_library - - -def gen_enum_value(value): - return 'k' + value[0].upper() + value[1:] - -class EnumType: - name = '' - enumValues = [] - def __init__(self, typeName = 'ElementWiseOpType', \ - typeString = "{'avg', 'max', 'sum'}"): - self.name = typeName - if (typeString[0] == '{'): # is a enum type - isEnum = True - # parse enum - self.enumValues = typeString[typeString.find('{') + 1:typeString.find('}')].split(',') - for i in range(0, len(self.enumValues)): - self.enumValues[i] = self.enumValues[i].strip().strip("'") - else: - logging.warn("trying to parse none-enum type as enum: %s" % typeString) - def GetDefinitionString(self, indent = 0): - indentStr = ' ' * indent - ret = indentStr + 'enum class %s {\n' % self.name - for i in range(0, len(self.enumValues)): - ret = ret + indentStr + ' %s = %d' % (gen_enum_value(self.enumValues[i]), i) - if (i != len(self.enumValues) -1): - ret = ret + "," - ret = ret + "\n" - ret = ret + "};\n" - return ret - def GetDefaultValueString(self, value = ''): - return self.name + "::" + gen_enum_value(value) - def GetEnumStringArray(self, indent = 0): - indentStr = ' ' * indent - ret = indentStr + 'static const char *%sValues[] = {\n' % self.name - for i in range(0, len(self.enumValues)): - ret = ret + indentStr + ' "%s"' % self.enumValues[i] - if (i != len(self.enumValues) -1): - ret = ret + "," - ret = ret + "\n" - ret = ret + indentStr + "};\n" - return ret - def GetConvertEnumVariableToString(self, variable=''): - return "%sValues[int(%s)]" % (self.name, variable) - - -class Arg: - typeDict = {'boolean':'bool',\ - 'boolean or None':'dmlc::optional',\ - 'Shape(tuple)':'Shape',\ - 'Symbol':'Symbol',\ - 'NDArray':'Symbol',\ - 'NDArray-or-Symbol':'Symbol',\ - 'Symbol[]':'const std::vector&',\ - 'Symbol or Symbol[]':'const std::vector&',\ - 'NDArray[]':'const std::vector&',\ - 'caffe-layer-parameter':'::caffe::LayerParameter',\ - 'NDArray-or-Symbol[]':'const std::vector&',\ - 'float':'mx_float',\ - 'real_t':'mx_float',\ - 'int':'int',\ - 'int (non-negative)': 'uint32_t',\ - 'long (non-negative)': 'uint64_t',\ - 'int or None':'dmlc::optional',\ - 'float or None':'dmlc::optional',\ - 'long':'int64_t',\ - 'double':'double',\ - 'double or None':'dmlc::optional',\ - 'Shape or None':'dmlc::optional',\ - 'string':'const std::string&',\ - 'tuple of ':'nnvm::Tuple'} - name = '' - type = '' - description = '' - isEnum = False - enum = None - hasDefault = False - defaultString = '' - def __init__(self, opName = '', argName = '', typeString = '', descString = ''): - self.name = argName - self.description = descString - if (typeString[0] == '{'): # is enum type - self.isEnum = True - self.enum = EnumType(self.ConstructEnumTypeName(opName, argName), typeString) - self.type = self.enum.name - else: - try: - self.type = self.typeDict[typeString.split(',')[0]] - except: - print('argument "%s" of operator "%s" has unknown type "%s"' % (argName, opName, typeString)) - pass - if typeString.find('default=') != -1: - self.hasDefault = True - self.defaultString = typeString.split('default=')[1].strip().strip("'") - if typeString.startswith('string'): - self.defaultString = self.MakeCString(self.defaultString) - elif self.isEnum: - self.defaultString = self.enum.GetDefaultValueString(self.defaultString) - elif self.defaultString == 'None': - self.defaultString = self.type + '()' - elif self.type == "bool": - if self.defaultString == "1" or self.defaultString == "True": - self.defaultString = "true" - else: - self.defaultString = "false" - elif self.defaultString[0] == '(': - self.defaultString = 'Shape' + self.defaultString - elif self.defaultString[0] == '[': - self.defaultString = 'Shape(' + self.defaultString[1:-1] + ")" - elif self.type == 'dmlc::optional': - self.defaultString = self.type + '(' + self.defaultString + ')' - elif self.type == 'dmlc::optional': - self.defaultString = self.type + '(' + self.defaultString + ')' - elif typeString.startswith('caffe-layer-parameter'): - self.defaultString = 'textToCaffeLayerParameter(' + self.MakeCString(self.defaultString) + ')' - hasCaffe = True - - def MakeCString(self, str): - str = str.replace('\n', "\\n") - str = str.replace('\t', "\\t") - return '\"' + str + '\"' - - def ConstructEnumTypeName(self, opName = '', argName = ''): - a = opName[0].upper() - # format ArgName so instead of act_type it returns ActType - argNameWords = argName.split('_') - argName = '' - for an in argNameWords: - argName = argName + an[0].upper() + an[1:] - typeName = a + opName[1:] + argName - return typeName - -class Op: - name = '' - description = '' - args = [] - - def __init__(self, name = '', description = '', args = []): - self.name = name - self.description = description - # add a 'name' argument - nameArg = Arg(self.name, \ - 'symbol_name', \ - 'string', \ - 'name of the resulting symbol') - args.insert(0, nameArg) - # reorder arguments, put those with default value to the end - orderedArgs = [] - for arg in args: - if not arg.hasDefault: - orderedArgs.append(arg) - for arg in args: - if arg.hasDefault: - orderedArgs.append(arg) - self.args = orderedArgs - - def WrapDescription(self, desc = ''): - ret = [] - sentences = desc.split('.') - lines = desc.split('\n') - for line in lines: - line = line.strip() - if len(line) <= 80: - ret.append(line.strip()) - else: - while len(line) > 80: - pos = line.rfind(' ', 0, 80)+1 - if pos <= 0: - pos = line.find(' ') - if pos < 0: - pos = len(line) - ret.append(line[:pos].strip()) - line = line[pos:] - return ret - - def GenDescription(self, desc = '', \ - firstLineHead = ' * \\brief ', \ - otherLineHead = ' * '): - ret = '' - descs = self.WrapDescription(desc) - ret = ret + firstLineHead - if len(descs) == 0: - return ret.rstrip() - ret = (ret + descs[0]).rstrip() + '\n' - for i in range(1, len(descs)): - ret = ret + (otherLineHead + descs[i]).rstrip() + '\n' - return ret - - def GetOpDefinitionString(self, use_name, indent=0): - ret = '' - indentStr = ' ' * indent - # define enums if any - for arg in self.args: - if arg.isEnum and use_name: - # comments - ret = ret + self.GenDescription(arg.description, \ - '/*! \\brief ', \ - ' * ') - ret = ret + " */\n" - # definition - ret = ret + arg.enum.GetDefinitionString(indent) + '\n' - # create function comments - ret = ret + self.GenDescription(self.description, \ - '/*!\n * \\brief ', \ - ' * ') - for arg in self.args: - if arg.name != 'symbol_name' or use_name: - ret = ret + self.GenDescription(arg.name + ' ' + arg.description, \ - ' * \\param ', \ - ' * ') - ret = ret + " * \\return new symbol\n" - ret = ret + " */\n" - # create function header - declFirstLine = indentStr + 'inline Symbol %s(' % self.name - ret = ret + declFirstLine - argIndentStr = ' ' * len(declFirstLine) - arg_start = 0 if use_name else 1 - if len(self.args) > arg_start: - ret = ret + self.GetArgString(self.args[arg_start]) - for i in range(arg_start+1, len(self.args)): - ret = ret + ',\n' - ret = ret + argIndentStr + self.GetArgString(self.args[i]) - ret = ret + ') {\n' - # create function body - # if there is enum, generate static enum<->string mapping - for arg in self.args: - if arg.isEnum: - ret = ret + arg.enum.GetEnumStringArray(indent + 2) - # now generate code - ret = ret + indentStr + ' return Operator(\"%s\")\n' % self.name - for arg in self.args: # set params - if arg.type == 'Symbol' or \ - arg.type == 'const std::string&' or \ - arg.type == 'const std::vector&': - continue - v = arg.name - if arg.isEnum: - v = arg.enum.GetConvertEnumVariableToString(v) - ret = ret + indentStr + ' ' * 11 + \ - '.SetParam(\"%s\", %s)\n' % (arg.name, v) - #ret = ret[:-1] # get rid of the last \n - symbols = '' - inputAlreadySet = False - for arg in self.args: # set inputs - if arg.type != 'Symbol': - continue - inputAlreadySet = True - #if symbols != '': - # symbols = symbols + ', ' - #symbols = symbols + arg.name - ret = ret + indentStr + ' ' * 11 + \ - '.SetInput(\"%s\", %s)\n' % (arg.name, arg.name) - for arg in self.args: # set input arrays vector - if arg.type != 'const std::vector&': - continue - if (inputAlreadySet): - logging.error("op %s has both Symbol[] and Symbol inputs!" % self.name) - inputAlreadySet = True - symbols = arg.name - ret = ret + '(%s)\n' % symbols - ret = ret + indentStr + ' ' * 11 - if use_name: - ret = ret + '.CreateSymbol(symbol_name);\n' - else: - ret = ret + '.CreateSymbol();\n' - ret = ret + indentStr + '}\n' - return ret - - def GetArgString(self, arg): - ret = '%s %s' % (arg.type, arg.name) - if arg.hasDefault: - ret = ret + ' = ' + arg.defaultString - return ret - - -def ParseAllOps(): - """ - MXNET_DLL int MXSymbolListAtomicSymbolCreators(mx_uint *out_size, - AtomicSymbolCreator **out_array); - - MXNET_DLL int MXSymbolGetAtomicSymbolInfo(AtomicSymbolCreator creator, - const char **name, - const char **description, - mx_uint *num_args, - const char ***arg_names, - const char ***arg_type_infos, - const char ***arg_descriptions, - const char **key_var_num_args); - """ - cdll.libmxnet = cdll.LoadLibrary(sys.argv[1]) - ListOP = cdll.libmxnet.MXSymbolListAtomicSymbolCreators - GetOpInfo = cdll.libmxnet.MXSymbolGetAtomicSymbolInfo - ListOP.argtypes=[POINTER(c_int), POINTER(POINTER(c_void_p))] - GetOpInfo.argtypes=[c_void_p, \ - POINTER(c_char_p), \ - POINTER(c_char_p), \ - POINTER(c_int), \ - POINTER(POINTER(c_char_p)), \ - POINTER(POINTER(c_char_p)), \ - POINTER(POINTER(c_char_p)), \ - POINTER(c_char_p), \ - POINTER(c_char_p) - ] - - nOps = c_int() - opHandlers = POINTER(c_void_p)() - r = ListOP(byref(nOps), byref(opHandlers)) - ret = '' - ret2 = '' - for i in range(0, nOps.value): - handler = opHandlers[i] - name = c_char_p() - description = c_char_p() - nArgs = c_int() - argNames = POINTER(c_char_p)() - argTypes = POINTER(c_char_p)() - argDescs = POINTER(c_char_p)() - varArgName = c_char_p() - return_type = c_char_p() - - GetOpInfo(handler, byref(name), byref(description), \ - byref(nArgs), byref(argNames), byref(argTypes), \ - byref(argDescs), byref(varArgName), byref(return_type)) - - if name.value.decode('utf-8').startswith('_'): # get rid of functions like __init__ - continue - - args = [] - - for i in range(0, nArgs.value): - arg = Arg(name.value.decode('utf-8'), - argNames[i].decode('utf-8'), - argTypes[i].decode('utf-8'), - argDescs[i].decode('utf-8')) - args.append(arg) - - op = Op(name.value.decode('utf-8'), description.value.decode('utf-8'), args) - - ret = ret + op.GetOpDefinitionString(True) + "\n" - ret2 = ret2 + op.GetOpDefinitionString(False) + "\n" - return ret + ret2 - -if __name__ == "__main__": - #et = EnumType(typeName = 'MyET') - #print(et.GetDefinitionString()) - #print(et.GetEnumStringArray()) - #arg = Arg() - #print(arg.ConstructEnumTypeName('SoftmaxActivation', 'act_type')) - #arg = Arg(opName = 'FullConnected', argName='act_type', \ - # typeString="{'elu', 'leaky', 'prelu', 'rrelu'},optional, default='leaky'", \ - # descString='Activation function to be applied.') - #print(arg.isEnum) - #print(arg.defaultString) - #arg = Arg("fc", "alpha", "float, optional, default=0.0001", "alpha") - #decl = "%s %s" % (arg.type, arg.name) - #if arg.hasDefault: - # decl = decl + "=" + arg.defaultString - #print(decl) - - temp_file_name = "" - output_file = '../include/mxnet-cpp/op.h' - try: - # generate file header - patternStr = ("/*!\n" - "* Copyright (c) 2016 by Contributors\n" - "* \\file op.h\n" - "* \\brief definition of all the operators\n" - "* \\author Chuntao Hong, Xin Li\n" - "*/\n" - "\n" - "#ifndef MXNET_CPP_OP_H_\n" - "#define MXNET_CPP_OP_H_\n" - "\n" - "#include \n" - "#include \n" - "#include \"mxnet-cpp/base.h\"\n" - "#include \"mxnet-cpp/shape.h\"\n" - "#include \"mxnet-cpp/op_util.h\"\n" - "#include \"mxnet-cpp/operator.h\"\n" - "#include \"dmlc/optional.h\"\n" - "#include \"nnvm/tuple.h\"\n" - "\n" - "namespace mxnet {\n" - "namespace cpp {\n" - "\n" - "%s" - "} //namespace cpp\n" - "} //namespace mxnet\n" - "#endif // MXNET_CPP_OP_H_\n") - - # Generate a temporary file name - tf = tempfile.NamedTemporaryFile() - temp_file_name = tf.name - tf.close() - with codecs.open(temp_file_name, 'w', 'utf-8') as f: - f.write(patternStr % ParseAllOps()) - except Exception as e: - traceback.print_exc() - if (os.path.exists(output_file)): - os.remove(output_file) - if len(temp_file_name) > 0: - os.remove(temp_file_name) - raise(e) - if os.path.exists(output_file): - if not filecmp.cmp(temp_file_name, output_file): - os.remove(output_file) - if not os.path.exists(output_file): - shutil.move(temp_file_name, output_file) diff --git a/cpp-package/scripts/lint.py b/cpp-package/scripts/lint.py deleted file mode 100644 index f6e549878a42..000000000000 --- a/cpp-package/scripts/lint.py +++ /dev/null @@ -1,193 +0,0 @@ -#!/usr/bin/env python - -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -# pylint: disable=protected-access, unused-variable, locally-disabled, redefined-variable-type -"""Lint helper to generate lint summary of source. -Copyright by Contributors -""" -from __future__ import print_function -import codecs -import sys -import re -import os -import cpplint -from cpplint import _cpplint_state -from pylint import epylint - -CXX_SUFFIX = set(['cc', 'c', 'cpp', 'h', 'cu', 'hpp']) -PYTHON_SUFFIX = set(['py']) - -class LintHelper(object): - """Class to help runing the lint and records summary""" - - @staticmethod - def _print_summary_map(strm, result_map, ftype): - """Print summary of certain result map.""" - if len(result_map) == 0: - return 0 - npass = len([x for k, x in result_map.iteritems() if len(x) == 0]) - strm.write('=====%d/%d %s files passed check=====\n' % (npass, len(result_map), ftype)) - for fname, emap in result_map.iteritems(): - if len(emap) == 0: - continue - strm.write('%s: %d Errors of %d Categories map=%s\n' % ( - fname, sum(emap.values()), len(emap), str(emap))) - return len(result_map) - npass - - def __init__(self): - self.project_name = None - self.cpp_header_map = {} - self.cpp_src_map = {} - self.python_map = {} - pylint_disable = ['superfluous-parens', - 'too-many-instance-attributes', - 'too-few-public-methods'] - # setup pylint - self.pylint_opts = ['--extension-pkg-whitelist=numpy', - '--disable=' + ','.join(pylint_disable)] - - self.pylint_cats = set(['error', 'warning', 'convention', 'refactor']) - # setup cpp lint - cpplint_args = ['.', '--extensions=' + (','.join(CXX_SUFFIX))] - _ = cpplint.ParseArguments(cpplint_args) - cpplint._SetFilters(','.join(['-build/c++11', - '-build/namespaces', - '-build/include', - '-build/header_guard', - '+build/include_what_you_use', - '+build/include_order'])) - cpplint._SetCountingStyle('toplevel') - cpplint._line_length = 100 - - def process_cpp(self, path, suffix): - """Process a cpp file.""" - _cpplint_state.ResetErrorCounts() - cpplint.ProcessFile(str(path), _cpplint_state.verbose_level) - _cpplint_state.PrintErrorCounts() - errors = _cpplint_state.errors_by_category.copy() - - if suffix == 'h': - self.cpp_header_map[str(path)] = errors - else: - self.cpp_src_map[str(path)] = errors - - def process_python(self, path): - """Process a python file.""" - (pylint_stdout, pylint_stderr) = epylint.py_run( - ' '.join([str(path)] + self.pylint_opts), return_std=True) - emap = {} - print(pylint_stderr.read()) - for line in pylint_stdout: - sys.stderr.write(line) - key = line.split(':')[-1].split('(')[0].strip() - if key not in self.pylint_cats: - continue - if key not in emap: - emap[key] = 1 - else: - emap[key] += 1 - sys.stderr.write('\n') - self.python_map[str(path)] = emap - - def print_summary(self, strm): - """Print summary of lint.""" - nerr = 0 - nerr += LintHelper._print_summary_map(strm, self.cpp_header_map, 'cpp-header') - nerr += LintHelper._print_summary_map(strm, self.cpp_src_map, 'cpp-soruce') - nerr += LintHelper._print_summary_map(strm, self.python_map, 'python') - if nerr == 0: - strm.write('All passed!\n') - else: - strm.write('%d files failed lint\n' % nerr) - return nerr - -# singleton helper for lint check -_HELPER = LintHelper() - -def get_header_guard_dmlc(filename): - """Get Header Guard Convention for DMLC Projects. - For headers in include, directly use the path - For headers in src, use project name plus path - Examples: with project-name = dmlc - include/dmlc/timer.h -> DMLC_TIMTER_H_ - src/io/libsvm_parser.h -> DMLC_IO_LIBSVM_PARSER_H_ - """ - fileinfo = cpplint.FileInfo(filename) - file_path_from_root = fileinfo.RepositoryName() - inc_list = ['include', 'api', 'wrapper'] - - if file_path_from_root.find('src/') != -1 and _HELPER.project_name is not None: - idx = file_path_from_root.find('src/') - file_path_from_root = _HELPER.project_name + file_path_from_root[idx + 3:] - else: - for spath in inc_list: - prefix = spath + os.sep - if file_path_from_root.startswith(prefix): - file_path_from_root = re.sub('^' + prefix, '', file_path_from_root) - break - return re.sub(r'[-./\s]', '_', file_path_from_root).upper() + '_' - -cpplint.GetHeaderGuardCPPVariable = get_header_guard_dmlc - -def process(fname, allow_type): - """Process a file.""" - fname = str(fname) - # HACK: ignore op.h which is automatically generated - if fname.endswith('op.h'): - return - arr = fname.rsplit('.', 1) - if fname.find('#') != -1 or arr[-1] not in allow_type: - return - if arr[-1] in CXX_SUFFIX: - _HELPER.process_cpp(fname, arr[-1]) - if arr[-1] in PYTHON_SUFFIX: - _HELPER.process_python(fname) - -def main(): - """Main entry function.""" - if len(sys.argv) < 3: - print('Usage: ') - print('\tfiletype can be python/cpp/all') - exit(-1) - _HELPER.project_name = sys.argv[1] - file_type = sys.argv[2] - allow_type = [] - if file_type == 'python' or file_type == 'all': - allow_type += [x for x in PYTHON_SUFFIX] - if file_type == 'cpp' or file_type == 'all': - allow_type += [x for x in CXX_SUFFIX] - allow_type = set(allow_type) - if os.name != 'nt': - sys.stderr = codecs.StreamReaderWriter(sys.stderr, - codecs.getreader('utf8'), - codecs.getwriter('utf8'), - 'replace') - for path in sys.argv[3:]: - if os.path.isfile(path): - process(path, allow_type) - else: - for root, dirs, files in os.walk(path): - for name in files: - process(os.path.join(root, name), allow_type) - - nerr = _HELPER.print_summary(sys.stderr) - sys.exit(nerr > 0) - -if __name__ == '__main__': - main() diff --git a/cpp-package/tests/ci_test.sh b/cpp-package/tests/ci_test.sh deleted file mode 100755 index d04522ded198..000000000000 --- a/cpp-package/tests/ci_test.sh +++ /dev/null @@ -1,40 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -set -e # exit on the first error -cd $(dirname $(readlink -f $0))/../example -echo $PWD -export LD_LIBRARY_PATH=$(readlink -f ../../lib):$LD_LIBRARY_PATH -echo $LD_LIBRARY_PATH -ls -l ../../lib/ - -./get_data.sh - -cp ../../build/cpp-package/example/test_optimizer . -./test_optimizer - -cp ../../build/cpp-package/example/test_kvstore . -./test_kvstore - -cp ../../build/cpp-package/example/test_ndarray_copy . -./test_ndarray_copy - -cd inference - -cp ../../../build/cpp-package/example/sentiment_analysis_rnn . -./unit_test_sentiment_analysis_rnn.sh -cd .. diff --git a/cpp-package/tests/travis/run_test.sh b/cpp-package/tests/travis/run_test.sh deleted file mode 100755 index 4925b3526bf3..000000000000 --- a/cpp-package/tests/travis/run_test.sh +++ /dev/null @@ -1,42 +0,0 @@ -#!/bin/bash - -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - - -if [ ${TASK} == "lint" ]; then - make lint || exit -1 - echo "Check documentations of c++ code..." - make doc 2>log.txt - (cat log.txt| grep -v ENABLE_PREPROCESSING |grep -v "unsupported tag") > logclean.txt - echo "---------Error Log----------" - cat logclean.txt - echo "----------------------------" - (cat logclean.txt|grep warning) && exit -1 - (cat logclean.txt|grep error) && exit -1 - exit 0 -fi - -if [ ${TRAVIS_OS_NAME} == "linux" ]; then - # use g++-4.8 in linux - export CXX=g++-4.8 -fi - -if [ ${TASK} == "build" ]; then - make - exit $? -fi diff --git a/cpp-package/tests/travis/setup.sh b/cpp-package/tests/travis/setup.sh deleted file mode 100755 index e0c850ed39a9..000000000000 --- a/cpp-package/tests/travis/setup.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/bin/bash - -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - - -if [ ${TASK} == "lint" ]; then - pip3 install cpplint 'pylint==2.3.1' --user -fi diff --git a/docker/install/r.sh b/docker/install/r.sh index a0fa27359ba5..639509fc5af0 100755 --- a/docker/install/r.sh +++ b/docker/install/r.sh @@ -28,9 +28,5 @@ apt-get install -y r-base r-base-dev libxml2-dev libxt-dev libssl-dev cd "$(dirname "${BASH_SOURCE[0]}")" -if [ ! -f "./DESCRIPTION" ]; then - cp ../../R-package/DESCRIPTION . -fi - Rscript -e "install.packages('devtools', repo = 'https://cran.rstudio.com')" Rscript -e "library(devtools); library(methods); options(repos=c(CRAN='https://cran.rstudio.com')); install_deps(dependencies = TRUE)" diff --git a/docs/static_site/src/pages/api/faq/smart_device.md b/docs/static_site/src/pages/api/faq/smart_device.md deleted file mode 100644 index 7fe8ddd6e477..000000000000 --- a/docs/static_site/src/pages/api/faq/smart_device.md +++ /dev/null @@ -1,120 +0,0 @@ ---- -layout: page_category -title: Deep Learning at the Edge -category: faq -faq_c: Deployment Environments -question: How to run MXNet securely? -permalink: /api/faq/smart_device ---- - - - - - - - - - - - - - - - - - -# Deep Learning in a Single File for Smart Devices - -Deep learning (DL) systems are complex and often depend on a number of libraries. -Porting a DL library to different platforms can be painful, especially for smart devices. -One simple solution to this problem is to provide a light interface to the library, complete with all required code in a single file with minimal dependencies. -In this document, we explain how to amalgamate all necessary code into a single file, -and demonstrate the approach with an example in which we run image recognition on a mobile device. - -## Amalgamation: Making the Whole System a Single File - -We come to the idea of of amalgamation following the example of SQLite, -which pack all the code needed to run a simple database into a single source file. -All that's necessary to create the library is to compile that single file. -This simplifies the problem of porting to various platforms. - -Thanks to [Jack Deng](https://github.com/jdeng), -MXNet provides an [amalgamation](https://github.com/dmlc/mxnet/tree/master/amalgamation) script -that compiles all code needed for prediction based on trained DL models into a single `.cc` file, -containing approximately 30K lines of code. This code only depends on the BLAS library. -Moreover, we've also created an even more minimal version, -with the BLAS dependency removed. -You can compile the single file into JavaScript by using [emscripten](https://github.com/kripken/emscripten). - -The compiled library can be used by any other programming language. -The `.h` file contains a light prediction API. -Porting to another language with a C foreign function interface requires little effort. - -For examples, see the following examples on GitHub: - -- Go: [https://github.com/jdeng/gomxnet](https://github.com/jdeng/gomxnet) -- Java: [https://github.com/dmlc/mxnet/tree/master/amalgamation/jni](https://github.com/dmlc/mxnet/tree/master/amalgamation/jni) -- Python: [https://github.com/dmlc/mxnet/tree/master/amalgamation/python](https://github.com/dmlc/mxnet/tree/master/amalgamation/python) - - -If you plan to amalgamate your system, there are a few guidelines you ought to observe when building the project: - -- Minimize dependence on other libraries. -- Use namespace to encapsulate the types and operators. -- Avoid running commands such as ```using namespace xyz``` on the global scope. -- Avoid cyclic include dependencies. - - -## Image Recognition Demo on Mobile Devices - -With amalgamation, deploying the system on smart devices (such as Android or iOS) is simple. But there are two additional considerations: - -- The model should be small enough to fit into the device's memory. -- The model shouldn't be too expensive to run given the relatively low computational power of these devices. - -Let's use image recognition as an example. -We start with the state-of-the-art inception model. -We train it on an ImageNet dataset, -using multiple servers with GTX 980 cards. -The resulting model fits into memory, -but it's too expensive to run. -We remove some layers, but now the results are poor. - -Finally, we show an Android example, thanks to Leliana, [https://github.com/Leliana/WhatsThis](https://github.com/Leliana/WhatsThis) to demonstrate how to run on Android. - - - - -By using amalgamation, we can easily port the prediction library to mobile devices, with nearly no dependency. -After compiling the library for smart platforms, the last thing we must do is to call C-API in the target language (Java/Swift). - - - -Besides this pre-trained Inception-BatchNorm network, we've provided two pre-trained models. - -We tested our model on a Nexus 5: - - -| | Top-1 Validation on ILSVRC2012 | Time | App Size | Runtime Temp Memory Req | -| ---------------- | ----------------------------------- | ----- | --- | ------------ | -| FastPoorNet | around 52%, similar to 2011 winner | 1s | <10MB | <5MB | -| Sub InceptionBN | around 64%, similar to 2013 winner | 2.7s | <40MB | <10MB | -| InceptionBN | around 70% | 4s-5s | <60MB | 10MB | - -These models are for demonstration only. -They aren't fine-tuned for mobile devices, -and there is definitely room for improvement. -We believe that making a lightweight, portable, -and fast deep learning library is fun and interesting, -and hope you enjoy using the library. - -## Source Code -[https://github.com/Leliana/WhatsThis](https://github.com/Leliana/WhatsThis) - - -## Demo APK Download - -- [FastPoorNet](https://github.com/dmlc/web-data/blob/master/mxnet/apk/fastpoornet.apk?raw=true) - - -- [SubInception](https://github.com/dmlc/web-data/blob/master/mxnet/apk/subinception.apk?raw=true) diff --git a/docs/static_site/src/pages/api/r/index.md b/docs/static_site/src/pages/api/r/index.md index 01aee98801aa..4538778f2603 100644 --- a/docs/static_site/src/pages/api/r/index.md +++ b/docs/static_site/src/pages/api/r/index.md @@ -47,6 +47,3 @@ You can perform tensor or matrix computation in R: [1,] 2 2 2 [2,] 2 2 2 ``` -## Resources - -* [MXNet R Reference Manual](/api/r/docs/api/R-package/build/mxnet-r-reference-manual.pdf) diff --git a/docs/static_site/src/pages/get_started/build_from_source.md b/docs/static_site/src/pages/get_started/build_from_source.md index 4ec9a7fa838a..a80a3d3ee40f 100644 --- a/docs/static_site/src/pages/get_started/build_from_source.md +++ b/docs/static_site/src/pages/get_started/build_from_source.md @@ -248,148 +248,6 @@ python3 -m pip install --user graphviz==0.8.4 jupyter Please also see the [MXNet Python API](/api/python) page. -### Install the MXNet Package for C++ - -To enable C++ package, just add `USE_CPP_PACKAGE=1` as build option when -building the MXNet shared library following the instructions from the previous -section. - -You can find C++ code examples in the `cpp-package/example` folder of the MXNet -project. The folder contains a README explaining how to build the examples. The -`predict-cpp` explains Image Classification using MXNet's C Predict API. - -Please also see the [MXNet C++ API](/api/cpp) page. - - -### Install the MXNet Package for Clojure - -Refer to the [Clojure setup -guide](https://github.com/apache/incubator-mxnet/tree/master/contrib/clojure-package). - -Please also see the [MXNet Clojure API](/api/clojure) page. - -### Install the MXNet Package for Julia - -Make sure to install at least Julia 1.0.3. - -To use the Julia binding you need to set the `MXNET_HOME` and `LD_LIBRARY_PATH` -environment variables. For example, - -```bash -export MXNET_HOME=$HOME/incubator-mxnet -export LD_LIBRARY_PATH=$HOME/incubator-mxnet/build:$LD_LIBRARY_PATH -``` - -Then install MXNet with Julia: - -```bash -julia --color=yes --project=./ -e \ - 'using Pkg; \ - Pkg.develop(PackageSpec(name="MXNet", path = joinpath(ENV["MXNET_HOME"], "julia")))' -``` - -Please also see the [MXNet Julia API](/api/julia) page. - - -### Install the MXNet Package for Perl - -#### Installing perl package dependencies on Debian Linux derivatives (Debian, Ubuntu, ...) - -``` -sudo apt-get install libmouse-perl pdl cpanminus swig libgraphviz-perl -cpanm -q -L "${HOME}/perl5" Function::Parameters Hash::Ordered PDL::CCS -``` - -#### Installing perl package dependencies on macOS -```bash -brew install swig -sudo sh -c 'curl -L https://cpanmin.us | perl - App::cpanminus' -sudo cpanm -q -n PDL Mouse Function::Parameters Hash::Ordered PDL::CCS -``` - -#### Install the MXNet Package for Perl -After you build the shared library, run the following command from the MXNet -source root directory to build the MXNet Perl package: - -```bash -MXNET_HOME=${PWD} -export LD_LIBRARY_PATH=${MXNET_HOME}/lib -export PERL5LIB=${HOME}/perl5/lib/perl5 - -cd ${MXNET_HOME}/perl-package/AI-MXNetCAPI/ -perl Makefile.PL INSTALL_BASE=${HOME}/perl5 -make install - -cd ${MXNET_HOME}/perl-package/AI-NNVMCAPI/ -perl Makefile.PL INSTALL_BASE=${HOME}/perl5 -make install - -cd ${MXNET_HOME}/perl-package/AI-MXNet/ -perl Makefile.PL INSTALL_BASE=${HOME}/perl5 -make install -``` - -Please also see the [MXNet Perl API](/api/perl) page. - -### Install the MXNet Package for R - -To install R and the devtools, run - -```bash -sudo apt-get update -sudo apt-get install -y r-base-core r-cran-devtools libcairo2-dev libxml2-dev -``` - -`libxml2-dev` is required for the `roxygen2` dependency and `libcairo2-dev` is -required for the suggested `imager` dependency. - -To generate documentation, it is also required to install `roxygen2`. - -```bash -R -> install.packages("roxygen2") -> Would you like to use a personal library instead? (y/n) y -> Would you like to create a personal library ... to install packages into? (y/n) y -``` - -Note: To successfully complete the next step, you need a personal R library. If -you were able to run `install.packages("roxygen2")` above, you either had -already, or you have successfully created a personal library just now. - -To build and install the MXNet-R bindings, run: - -```bash -make -f R-package/Makefile rpkg -``` - -Please also see the [MXNet R API](/api/r) page. - -### Install the MXNet Package for Scala - -After building the MXNet shared library, you may simply run the following from -the MXNet scala-package folder: - -```bash -mvn install -``` - -This will install both the Java Inference API and the required MXNet-Scala package.
- -Please also see the [MXNet Scala API](/api/scala) page. - -### Install the MXNet Package for Java - -After building the MXNet shared library, you may simply run the following from -the MXNet scala-package folder: - -```bash -mvn install -``` - -This will install both the Java Inference API and the required MXNet-Scala package.
- -Please also see the [MXNet Java API](/api/java) page. - ## Contributions You are more than welcome to contribute easy installation scripts for other operating systems and programming languages. diff --git a/example/README.md b/example/README.md index c21dbc4d3edc..d0eafa7dc54a 100644 --- a/example/README.md +++ b/example/README.md @@ -93,8 +93,6 @@ If your tutorial depends on specific packages, simply add them to this provision * [MXNet Julia API](https://mxnet.apache.org/api/julia/index.html) * [MXNet Perl API](https://mxnet.apache.org/api/perl/index.html) * [go-mxnet-predictor](https://github.com/songtianyi/go-mxnet-predictor) - Go binding for inference -* [MXNet JNI](https://github.com/dmlc/mxnet/tree/master/amalgamation/jni) - JNI(Android) library -* [MXNet Amalgamation](https://github.com/dmlc/mxnet/tree/master/amalgamation) - Amalgamation (entire library in a single file) * [MXNet Javascript](https://github.com/dmlc/mxnet.js/) - MXNetJS: Javascript Package for Deep Learning in Browser (without server) ### Deep Learning Examples in the MXNet Project Repository diff --git a/example/image-classification/predict-cpp/CMakeLists.txt b/example/image-classification/predict-cpp/CMakeLists.txt deleted file mode 100644 index bdc64b30c466..000000000000 --- a/example/image-classification/predict-cpp/CMakeLists.txt +++ /dev/null @@ -1,19 +0,0 @@ -# Check OpenCV -if(NOT USE_OPENCV OR NOT OpenCV_FOUND OR OpenCV_VERSION_MAJOR LESS 3) - message(WARNING "\ -OpenCV version >= 3 should be enabled and found to build image classification example, skipping...") - return() -endif() - -add_executable(image-classification-predict image-classification-predict.cc) -include_directories(SYSTEM ${OpenCV_INCLUDE_DIRS}) - -target_link_libraries(image-classification-predict - dmlc - ${nnvm_LINKER_LIBS} - ${mxnet_LINKER_LIBS} - mxnet - ) -add_dependencies(image-classification-predict mxnet) - - diff --git a/example/image-classification/predict-cpp/Makefile b/example/image-classification/predict-cpp/Makefile deleted file mode 100644 index 05f1afc53821..000000000000 --- a/example/image-classification/predict-cpp/Makefile +++ /dev/null @@ -1,31 +0,0 @@ -# Special thanks to https://github.com/pertusa for the Makefile -CFLAGS=-std=c++17 -Wno-unknown-pragmas -Wall - -# Added for openblas -# export OPENBLAS_ROOT=/usr/local/opt/openblas - -# CFLAGS+= -I${OPENBLAS_ROOT}/include -# LDFLAGS=-L${OPENBLAS_ROOT}/lib -lopenblas - -# Added for opencv -CFLAGS+= `pkg-config --cflags opencv` -LDFLAGS+=`pkg-config --libs opencv` - -# Added for mxnet -export MXNET_ROOT=`pwd`/../../.. - -CFLAGS+=-Wall -I$(MXNET_ROOT)/include -LDFLAGS+=$(MXNET_ROOT)/lib/libmxnet.so -lpthread - -image-classification-predict: image-classification-predict.o - g++ -O3 -o image-classification-predict image-classification-predict.o $(LDFLAGS) - -image-classification-predict.o: image-classification-predict.cc - g++ -O3 -c image-classification-predict.cc ${CFLAGS} - -clean: - rm image-classification-predict - rm -f *.d *.o - -lint: - python ../../../3rdparty/dmlc-core/scripts/lint.py mxnet "cpp" ./ diff --git a/example/image-classification/predict-cpp/README.md b/example/image-classification/predict-cpp/README.md deleted file mode 100644 index a59e4d70ee65..000000000000 --- a/example/image-classification/predict-cpp/README.md +++ /dev/null @@ -1,72 +0,0 @@ -# Image Classification Example Using the C Predict API -This is a simple predictor which shows how to use the MXNet C Predict API for image classification with a pre-trained ImageNet model in a single thread and multiple threads. - -## Prerequisites - -* OpenCV for image processing: `USE_OPENCV` is set to true by default when [building from source](https://mxnet.apache.org/install/build_from_source.html) - -## How to Use this Example - -### Download the Model Artifacts -1. You will need the model artifacts for the Inception ImageNet model. You can download these from http://data.mxnet.io/mxnet/models/imagenet/inception-bn/ -2. Place them into a `model/Inception/` subfolder, or if not, you will need to edit the source file and update the paths in the Build step. - -* [model/Inception/Inception-BN-symbol.json](http://data.mxnet.io/mxnet/models/imagenet/inception-bn/Inception-BN-symbol.json) -* [model/Inception/Inception-BN-0126.params](http://data.mxnet.io/mxnet/models/imagenet/inception-bn/Inception-BN-0126.params) -* [model/Inception/synset.txt](http://data.mxnet.io/mxnet/models/imagenet/synset.txt) - -### Build -1. If using a different location for the model artifacts, edit `image-classification-predict.cc` file, and change the following lines to your artifacts' paths: - ```c - // Models path for your model, you have to modify it - std::string json_file = "model/Inception/Inception-BN-symbol.json"; - std::string param_file = "model/Inception/Inception-BN-0126.params"; - std::string synset_file = "model/Inception/synset.txt"; - std::string nd_file = "model/Inception/mean_224.nd"; - ``` - -2. You may also want to change the image size and channels: - ```c - // Image size and channels - int width = 224; - int height = 224; - int channels = 3; - ``` - -3. Simply just use our Makefile to build: - ```bash - make - ``` - -### Run -Run the example by passing it an image that you want to classify. If you don't have one handy, run the following to get one: - - ```bash - wget https://upload.wikimedia.org/wikipedia/commons/thumb/f/f4/Honeycrisp.jpg/1920px-Honeycrisp.jpg - ``` - -Then run the `image-classification-predict` program, passing the image as the first argument and the number of threads as the second parameter. - - ```bash - ./image-classification-predict 1920px-Honeycrisp.jpg 1 - ``` - -## Tips - -* If you don't run it in the MXNet root path, you may need to copy the `lib` folder here. - -## Author -* **Xiao Liu** - -* E-mail: liuxiao@foxmail.com - -* Homepage: [www.liuxiao.org](http://www.liuxiao.org/) - -## Thanks -* pertusa (for Makefile and image reading check) - -* caprice-j (for reading function) - -* sofiawu (for sample model) - -* piiswrong and tqchen (for useful coding suggestions) diff --git a/example/image-classification/predict-cpp/image-classification-predict.cc b/example/image-classification/predict-cpp/image-classification-predict.cc deleted file mode 100644 index 0971f14bed49..000000000000 --- a/example/image-classification/predict-cpp/image-classification-predict.cc +++ /dev/null @@ -1,332 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/*! - * Copyright (c) 2015 by Xiao Liu, pertusa, caprice-j - * \file image_classification-predict.cpp - * \brief C++ predict example of mxnet - * - * This is a simple predictor which shows how to use c api for image classification. It uses - * opencv for image reading. - * - * Created by liuxiao on 12/9/15. - * Thanks to : pertusa, caprice-j, sofiawu, tqchen, piiswrong - * Home Page: www.liuxiao.org - * E-mail: liuxiao@foxmail.com -*/ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -// Path for c_predict_api -#include - -const mx_float DEFAULT_MEAN = 117.0; - -static std::string trim(const std::string& input) { - auto not_space = [](int ch) { - return !std::isspace(ch); - }; - auto output = input; - output.erase(output.begin(), std::find_if(output.begin(), output.end(), not_space)); - output.erase(std::find_if(output.rbegin(), output.rend(), not_space).base(), output.end()); - return output; -} - -// Read file to buffer -class BufferFile { - public : - std::string file_path_; - std::size_t length_ = 0; - std::unique_ptr buffer_; - - explicit BufferFile(const std::string& file_path) - : file_path_(file_path) { - - std::ifstream ifs(file_path.c_str(), std::ios::in | std::ios::binary); - if (!ifs) { - std::cerr << "Can't open the file. Please check " << file_path << ". \n"; - return; - } - - ifs.seekg(0, std::ios::end); - length_ = static_cast(ifs.tellg()); - ifs.seekg(0, std::ios::beg); - std::cout << file_path.c_str() << " ... " << length_ << " bytes\n"; - - // Buffer as null terminated to be converted to string - buffer_.reset(new char[length_ + 1]); - buffer_[length_] = 0; - ifs.read(buffer_.get(), length_); - ifs.close(); - } - - std::size_t GetLength() { - return length_; - } - - char* GetBuffer() { - return buffer_.get(); - } -}; - -void GetImageFile(const std::string& image_file, - mx_float* image_data, int channels, - cv::Size resize_size, const mx_float* mean_data = nullptr) { - // Read all kinds of file into a BGR color 3 channels image - cv::Mat im_ori = cv::imread(image_file, cv::IMREAD_COLOR); - - if (im_ori.empty()) { - std::cerr << "Can't open the image. Please check " << image_file << ". \n"; - std::abort(); - } - - cv::Mat im; - - resize(im_ori, im, resize_size); - - int size = im.rows * im.cols * channels; - - mx_float* ptr_image_r = image_data; - mx_float* ptr_image_g = image_data + size / 3; - mx_float* ptr_image_b = image_data + size / 3 * 2; - - float mean_b, mean_g, mean_r; - mean_b = mean_g = mean_r = DEFAULT_MEAN; - - for (int i = 0; i < im.rows; i++) { - auto data = im.ptr(i); - - for (int j = 0; j < im.cols; j++) { - if (mean_data) { - mean_r = *mean_data; - if (channels > 1) { - mean_g = *(mean_data + size / 3); - mean_b = *(mean_data + size / 3 * 2); - } - mean_data++; - } - if (channels > 1) { - *ptr_image_b++ = static_cast(*data++) - mean_b; - *ptr_image_g++ = static_cast(*data++) - mean_g; - } - - *ptr_image_r++ = static_cast(*data++) - mean_r;; - } - } -} - -// LoadSynsets -// Code from : https://github.com/pertusa/mxnet_predict_cc/blob/master/mxnet_predict.cc -std::vector LoadSynset(const std::string& synset_file) { - std::ifstream fi(synset_file.c_str()); - - if (!fi.is_open()) { - std::cerr << "Error opening synset file " << synset_file << std::endl; - std::abort(); - } - - std::vector output; - - std::string synset, lemma; - while (fi >> synset) { - getline(fi, lemma); - output.push_back(lemma); - } - - fi.close(); - - return output; -} - -void PrintOutputResult(const std::vector& data, const std::vector& synset) { - if (data.size() != synset.size()) { - std::cerr << "Result data and synset size do not match!" << std::endl; - } - - float best_accuracy = 0.0; - std::size_t best_idx = 0; - - for (std::size_t i = 0; i < data.size(); ++i) { - std::cout << "Accuracy[" << i << "] = " << std::setprecision(8) << data[i] << std::endl; - - if (data[i] > best_accuracy) { - best_accuracy = data[i]; - best_idx = i; - } - } - - std::cout << "Best Result: " << trim(synset[best_idx]) << " (id=" << best_idx << ", " << - "accuracy=" << std::setprecision(8) << best_accuracy << ")" << std::endl; -} - -void predict(PredictorHandle pred_hnd, const std::vector &image_data, - NDListHandle nd_hnd, const std::string &synset_file, int i) { - auto image_size = image_data.size(); - // Set Input Image - MXPredSetInput(pred_hnd, "data", image_data.data(), static_cast(image_size)); - - // Do Predict Forward - MXPredForward(pred_hnd); - - mx_uint output_index = 0; - - mx_uint* shape = nullptr; - mx_uint shape_len; - - // Get Output Result - MXPredGetOutputShape(pred_hnd, output_index, &shape, &shape_len); - - std::size_t size = 1; - for (mx_uint i = 0; i < shape_len; ++i) { size *= shape[i]; } - - std::vector data(size); - - MXPredGetOutput(pred_hnd, output_index, &(data[0]), static_cast(size)); - - // Release NDList - if (nd_hnd) { - MXNDListFree(nd_hnd); - } - - // Release Predictor - MXPredFree(pred_hnd); - - // Synset path for your model, you have to modify it - auto synset = LoadSynset(synset_file); - - // Print Output Data - PrintOutputResult(data, synset); -} - -int main(int argc, char* argv[]) { - if (argc < 2) { - std::cout << "No test image here." << std::endl - << "Usage: ./image-classification-predict apple.jpg [num_threads]" << std::endl; - return EXIT_FAILURE; - } - - std::string test_file(argv[1]); - int num_threads = 1; - if (argc == 3) - num_threads = std::atoi(argv[2]); - - // Models path for your model, you have to modify it - std::string json_file = "model/Inception/Inception-BN-symbol.json"; - std::string param_file = "model/Inception/Inception-BN-0126.params"; - std::string synset_file = "model/Inception/synset.txt"; - std::string nd_file = "model/Inception/mean_224.nd"; - - BufferFile json_data(json_file); - BufferFile param_data(param_file); - - // Parameters - int dev_type = 1; // 1: cpu, 2: gpu - int dev_id = 0; // arbitrary. - mx_uint num_input_nodes = 1; // 1 for feedforward - const char* input_key[1] = { "data" }; - const char** input_keys = input_key; - - // Image size and channels - int width = 224; - int height = 224; - int channels = 3; - - const mx_uint input_shape_indptr[2] = { 0, 4 }; - const mx_uint input_shape_data[4] = { 1, - static_cast(channels), - static_cast(height), - static_cast(width) }; - - if (json_data.GetLength() == 0 || param_data.GetLength() == 0) { - return EXIT_FAILURE; - } - - auto image_size = static_cast(width * height * channels); - - // Read Mean Data - const mx_float* nd_data = nullptr; - NDListHandle nd_hnd = nullptr; - BufferFile nd_buf(nd_file); - - if (nd_buf.GetLength() > 0) { - mx_uint nd_index = 0; - mx_uint nd_len; - const mx_uint* nd_shape = nullptr; - const char* nd_key = nullptr; - mx_uint nd_ndim = 0; - - MXNDListCreate(static_cast(nd_buf.GetBuffer()), - static_cast(nd_buf.GetLength()), - &nd_hnd, &nd_len); - - MXNDListGet(nd_hnd, nd_index, &nd_key, &nd_data, &nd_shape, &nd_ndim); - } - - // Read Image Data - std::vector image_data(image_size); - - GetImageFile(test_file, image_data.data(), channels, cv::Size(width, height), nd_data); - - if (num_threads == 1) { - // Create Predictor - PredictorHandle pred_hnd; - MXPredCreate(static_cast(json_data.GetBuffer()), - static_cast(param_data.GetBuffer()), - static_cast(param_data.GetLength()), - dev_type, - dev_id, - num_input_nodes, - input_keys, - input_shape_indptr, - input_shape_data, - &pred_hnd); - assert(pred_hnd); - - predict(pred_hnd, image_data, nd_hnd, synset_file, 0); - } else { - // Create Predictor - std::vector pred_hnds(num_threads, nullptr); - MXPredCreateMultiThread(static_cast(json_data.GetBuffer()), - static_cast(param_data.GetBuffer()), - static_cast(param_data.GetLength()), - dev_type, - dev_id, - num_input_nodes, - input_keys, - input_shape_indptr, - input_shape_data, - pred_hnds.size(), - pred_hnds.data()); -#ifndef NDEBUG - for (auto hnd : pred_hnds) - assert(hnd); -#endif - - std::vector threads; - for (int i = 0; i < num_threads; i++) - threads.emplace_back(predict, pred_hnds[i], image_data, nd_hnd, synset_file, i); - for (int i = 0; i < num_threads; i++) - threads[i].join(); - } - printf("run successfully\n"); - - return EXIT_SUCCESS; -} diff --git a/example/multi_threaded_inference/Makefile b/example/multi_threaded_inference/Makefile index a58928b12759..6dba1178c14e 100644 --- a/example/multi_threaded_inference/Makefile +++ b/example/multi_threaded_inference/Makefile @@ -19,7 +19,6 @@ CFLAGS=-std=c++17 -g -Wno-unknown-pragmas -Wall -DMXNET_USE_CUDA=1 -DMXNET_USE_CUDNN=1 -DMXNET_USE_MKLDNN=1 export MXNET_ROOT = `pwd`/../.. -export CPP_PACKAGE = $(MXNET_ROOT)/cpp-package CFLAGS += `pkg-config --cflags opencv` LDFLAGS += `pkg-config --libs opencv` @@ -42,7 +41,7 @@ ifndef MKLDNN_INCLUDE_DIR #export MKLDNN_INCLUDE_DIR = $(MXNET_ROOT)/3rdparty/mkldnn/include endif -CFLAGS += -I$(MXNET_ROOT)/include -I$(CPP_PACKAGE)/include -I$(USE_CUDA_PATH)/include -I$(MKLDNN_INCLUDE_DIR) -I$(MKLDNN_BUILD_DIR)/include +CFLAGS += -I$(MXNET_ROOT)/include -I$(USE_CUDA_PATH)/include -I$(MKLDNN_INCLUDE_DIR) -I$(MKLDNN_BUILD_DIR)/include # If MXNET_LIB_DIR env variable set use that, otherwise defaults to MXNET_ROOT/build ifndef MXNET_LIB_DIR diff --git a/include/mxnet/c_api.h b/include/mxnet/c_api.h index d2e0d73234e2..153d8c2c8520 100644 --- a/include/mxnet/c_api.h +++ b/include/mxnet/c_api.h @@ -116,9 +116,6 @@ typedef void (*EngineAsyncFunc)(void*, void*, void*); typedef void (*EngineSyncFunc)(void*, void*); /*! \brief Callback to free the param for EngineAsyncFunc/EngineSyncFunc */ typedef void (*EngineFuncParamDeleter)(void*); -typedef void (*ExecutorMonitorCallback)(const char*, - NDArrayHandle, - void*); /*! \brief Monitor callback called at operator level for cached op */ typedef void (*CachedOpMonitorCallback)(const char*, const char*, @@ -2258,376 +2255,6 @@ MXNET_DLL int MXOptimizeForBackend(SymbolHandle sym_handle, char*** new_aux_names_handle); -//-------------------------------------------- -// Part 4: Executor interface -//-------------------------------------------- -/*! - * \brief Delete the executor - * \param handle the executor. - * \return 0 when success, -1 when failure happens - */ -MXNET_DLL int MXExecutorFree(ExecutorHandle handle); -/*! - * \brief Print the content of execution plan, used for debug. - * \param handle the executor. - * \param out_str pointer to hold the output string of the printing. - * \return 0 when success, -1 when failure happens - */ -MXNET_DLL int MXExecutorPrint(ExecutorHandle handle, const char **out_str); -/*! - * \brief Executor forward method - * - * \param handle executor handle - * \param is_train int value to indicate whether the forward pass is for evaluation - * \return 0 when success, -1 when failure happens - */ -MXNET_DLL int MXExecutorForward(ExecutorHandle handle, int is_train); -/*! - * \brief Excecutor run backward - * - * \param handle execute handle - * \param len lenth - * \param head_grads NDArray handle for heads' gradient - * - * \return 0 when success, -1 when failure happens - */ -MXNET_DLL int MXExecutorBackward(ExecutorHandle handle, - uint32_t len, - NDArrayHandle *head_grads); -/*! - * \brief Excecutor run backward - * - * \param handle execute handle - * \param len lenth - * \param head_grads NDArray handle for heads' gradient - * \param is_train int value to indicate whether the backward pass is for evaluation - * - * \return 0 when success, -1 when failure happens - */ -MXNET_DLL int MXExecutorBackwardEx(ExecutorHandle handle, - uint32_t len, - NDArrayHandle *head_grads, - int is_train); -/*! - * \brief Get executor's head NDArray - * - * \param handle executor handle - * \param out_size output narray vector size - * \param out out put narray handles - * \return 0 when success, -1 when failure happens - */ -MXNET_DLL int MXExecutorOutputs(ExecutorHandle handle, - uint32_t *out_size, - NDArrayHandle **out); - -/*! - * \brief Generate Executor from symbol - * - * \param symbol_handle symbol handle - * \param dev_type device type - * \param dev_id device id - * \param len length - * \param in_args in args array - * \param arg_grad_store arg grads handle array - * \param grad_req_type grad req array - * \param aux_states_len length of auxiliary states - * \param aux_states auxiliary states array - * \param out output executor handle - * \return 0 when success, -1 when failure happens - */ -MXNET_DLL int MXExecutorBind(SymbolHandle symbol_handle, - int dev_type, - int dev_id, - uint32_t len, - NDArrayHandle *in_args, - NDArrayHandle *arg_grad_store, - uint32_t *grad_req_type, - uint32_t aux_states_len, - NDArrayHandle *aux_states, - ExecutorHandle *out); -/*! - * \brief Generate Executor from symbol, - * This is advanced function, allow specify group2ctx map. - * The user can annotate "ctx_group" attribute to name each group. - * - * \param symbol_handle symbol handle - * \param dev_type device type of default context - * \param dev_id device id of default context - * \param num_map_keys size of group2ctx map - * \param map_keys keys of group2ctx map - * \param map_dev_types device type of group2ctx map - * \param map_dev_ids device id of group2ctx map - * \param len length - * \param in_args in args array - * \param arg_grad_store arg grads handle array - * \param grad_req_type grad req array - * \param aux_states_len length of auxiliary states - * \param aux_states auxiliary states array - * \param out output executor handle - * \return 0 when success, -1 when failure happens - */ -MXNET_DLL int MXExecutorBindX(SymbolHandle symbol_handle, - int dev_type, - int dev_id, - uint32_t num_map_keys, - const char** map_keys, - const int* map_dev_types, - const int* map_dev_ids, - uint32_t len, - NDArrayHandle *in_args, - NDArrayHandle *arg_grad_store, - uint32_t *grad_req_type, - uint32_t aux_states_len, - NDArrayHandle *aux_states, - ExecutorHandle *out); -/*! - * \brief Generate Executor from symbol, - * This is advanced function, allow specify group2ctx map. - * The user can annotate "ctx_group" attribute to name each group. - * - * \param symbol_handle symbol handle - * \param dev_type device type of default context - * \param dev_id device id of default context - * \param num_map_keys size of group2ctx map - * \param map_keys keys of group2ctx map - * \param map_dev_types device type of group2ctx map - * \param map_dev_ids device id of group2ctx map - * \param len length - * \param in_args in args array - * \param arg_grad_store arg grads handle array - * \param grad_req_type grad req array - * \param aux_states_len length of auxiliary states - * \param aux_states auxiliary states array - * \param shared_exec input executor handle for memory sharing - * \param out output executor handle - * \return 0 when success, -1 when failure happens - */ -MXNET_DLL int MXExecutorBindEX(SymbolHandle symbol_handle, - int dev_type, - int dev_id, - uint32_t num_map_keys, - const char** map_keys, - const int* map_dev_types, - const int* map_dev_ids, - uint32_t len, - NDArrayHandle *in_args, - NDArrayHandle *arg_grad_store, - uint32_t *grad_req_type, - uint32_t aux_states_len, - NDArrayHandle *aux_states, - ExecutorHandle shared_exec, - ExecutorHandle *out); -/*! \brief DEPRECATED. Use MXExecutorSimpleBindEx instead. - */ -MXNET_DLL int MXExecutorSimpleBind(SymbolHandle symbol_handle, - int dev_type, - int dev_id, - const uint32_t num_g2c_keys, - const char** g2c_keys, - const int* g2c_dev_types, - const int* g2c_dev_ids, - const uint32_t provided_grad_req_list_len, - const char** provided_grad_req_names, - const char** provided_grad_req_types, - const uint32_t num_provided_arg_shapes, - const char** provided_arg_shape_names, - const uint32_t* provided_arg_shape_data, - const uint32_t* provided_arg_shape_idx, - const uint32_t num_provided_arg_dtypes, - const char** provided_arg_dtype_names, - const int* provided_arg_dtypes, - const uint32_t num_provided_arg_stypes, - const char** provided_arg_stype_names, - const int* provided_arg_stypes, - const uint32_t num_shared_arg_names, - const char** shared_arg_name_list, - int* shared_buffer_len, - const char** shared_buffer_name_list, - NDArrayHandle* shared_buffer_handle_list, - const char*** updated_shared_buffer_name_list, - NDArrayHandle** updated_shared_buffer_handle_list, - uint32_t* num_in_args, - NDArrayHandle** in_args, - NDArrayHandle** arg_grads, - uint32_t* num_aux_states, - NDArrayHandle** aux_states, - ExecutorHandle shared_exec_handle, - ExecutorHandle* out); - - -MXNET_DLL int MXExecutorSimpleBindEx(SymbolHandle symbol_handle, - int dev_type, - int dev_id, - const uint32_t num_g2c_keys, - const char** g2c_keys, - const int* g2c_dev_types, - const int* g2c_dev_ids, - const uint32_t provided_grad_req_list_len, - const char** provided_grad_req_names, - const char** provided_grad_req_types, - const uint32_t num_provided_arg_shapes, - const char** provided_arg_shape_names, - const int* provided_arg_shape_data, - const uint32_t* provided_arg_shape_idx, - const uint32_t num_provided_arg_dtypes, - const char** provided_arg_dtype_names, - const int* provided_arg_dtypes, - const uint32_t num_provided_arg_stypes, - const char** provided_arg_stype_names, - const int* provided_arg_stypes, - const uint32_t num_shared_arg_names, - const char** shared_arg_name_list, - int* shared_buffer_len, - const char** shared_buffer_name_list, - NDArrayHandle* shared_buffer_handle_list, - const char*** updated_shared_buffer_name_list, - NDArrayHandle** updated_shared_buffer_handle_list, - uint32_t* num_in_args, - NDArrayHandle** in_args, - NDArrayHandle** arg_grads, - uint32_t* num_aux_states, - NDArrayHandle** aux_states, - ExecutorHandle shared_exec_handle, - ExecutorHandle* out); - - -MXNET_DLL int MXExecutorSimpleBindEx64(SymbolHandle symbol_handle, - int dev_type, - int dev_id, - const uint32_t num_g2c_keys, - const char** g2c_keys, - const int* g2c_dev_types, - const int* g2c_dev_ids, - const uint32_t provided_grad_req_list_len, - const char** provided_grad_req_names, - const char** provided_grad_req_types, - const uint32_t num_provided_arg_shapes, - const char** provided_arg_shape_names, - const int64_t* provided_arg_shape_data, - const uint32_t* provided_arg_shape_idx, - const uint32_t num_provided_arg_dtypes, - const char** provided_arg_dtype_names, - const int* provided_arg_dtypes, - const uint32_t num_provided_arg_stypes, - const char** provided_arg_stype_names, - const int* provided_arg_stypes, - const uint32_t num_shared_arg_names, - const char** shared_arg_name_list, - int* shared_buffer_len, - const char** shared_buffer_name_list, - NDArrayHandle* shared_buffer_handle_list, - const char*** updated_shared_buffer_name_list, - NDArrayHandle** updated_shared_buffer_handle_list, - uint32_t* num_in_args, - NDArrayHandle** in_args, - NDArrayHandle** arg_grads, - uint32_t* num_aux_states, - NDArrayHandle** aux_states, - ExecutorHandle shared_exec_handle, - ExecutorHandle* out); - - -/*! - * \brief DEPRECATED. Use MXExecutorReshapeEx instead. - * Return a new executor with the same symbol and shared memory, - * but different input/output shapes. - * - * \param partial_shaping Whether to allow changing the shape of unspecified arguments. - * \param allow_up_sizing Whether to allow allocating new ndarrays that's larger than the original. - * \param dev_type device type of default context - * \param dev_id device id of default context - * \param num_map_keys size of group2ctx map - * \param map_keys keys of group2ctx map - * \param map_dev_types device type of group2ctx map - * \param map_dev_ids device id of group2ctx map - * \param num_in_args length of in_args - * \param in_args in args array - * \param arg_grads arg grads handle array - * \param num_aux_states length of auxiliary states - * \param aux_states auxiliary states array - * \param shared_exec input executor handle for memory sharing - * \param out output executor handle - * \return a new executor - */ -MXNET_DLL int MXExecutorReshape(int partial_shaping, - int allow_up_sizing, - int dev_type, - int dev_id, - uint32_t num_map_keys, - const char** map_keys, - const int* map_dev_types, - const int* map_dev_ids, - const uint32_t num_provided_arg_shapes, - const char** provided_arg_shape_names, - const uint32_t* provided_arg_shape_data, - const uint32_t* provided_arg_shape_idx, - uint32_t* num_in_args, - NDArrayHandle** in_args, - NDArrayHandle** arg_grads, - uint32_t* num_aux_states, - NDArrayHandle** aux_states, - ExecutorHandle shared_exec, - ExecutorHandle *out); -/*! - * \brief Return a new executor with the same symbol and shared memory, - * but different input/output shapes. - * - * \param partial_shaping Whether to allow changing the shape of unspecified arguments. - * \param allow_up_sizing Whether to allow allocating new ndarrays that's larger than the original. - * \param dev_type device type of default context - * \param dev_id device id of default context - * \param num_map_keys size of group2ctx map - * \param map_keys keys of group2ctx map - * \param map_dev_types device type of group2ctx map - * \param map_dev_ids device id of group2ctx map - * \param num_in_args length of in_args - * \param in_args in args array - * \param arg_grads arg grads handle array - * \param num_aux_states length of auxiliary states - * \param aux_states auxiliary states array - * \param shared_exec input executor handle for memory sharing - * \param out output executor handle - * \return a new executor - */ -MXNET_DLL int MXExecutorReshapeEx(int partial_shaping, - int allow_up_sizing, - int dev_type, - int dev_id, - uint32_t num_map_keys, - const char** map_keys, - const int* map_dev_types, - const int* map_dev_ids, - const uint32_t num_provided_arg_shapes, - const char** provided_arg_shape_names, - const int* provided_arg_shape_data, - const uint32_t* provided_arg_shape_idx, - uint32_t* num_in_args, - NDArrayHandle** in_args, - NDArrayHandle** arg_grads, - uint32_t* num_aux_states, - NDArrayHandle** aux_states, - ExecutorHandle shared_exec, - ExecutorHandle *out); - -/*! - * \brief get optimized graph from graph executor - */ -MXNET_DLL int MXExecutorGetOptimizedSymbol(ExecutorHandle handle, - SymbolHandle *out); -/*! - * \brief set a call back to notify the completion of operation - */ -MXNET_DLL int MXExecutorSetMonitorCallback(ExecutorHandle handle, - ExecutorMonitorCallback callback, - void* callback_handle); - -/*! - * \brief set a call back to notify the completion of operation - * \param monitor_all If true, monitor both input and output, otherwise monitor output only. - */ -MXNET_DLL int MXExecutorSetMonitorCallbackEX(ExecutorHandle handle, - ExecutorMonitorCallback callback, - void *callback_handle, bool monitor_all); //-------------------------------------------- // Part 5: IO Interface //-------------------------------------------- diff --git a/include/mxnet/c_predict_api.h b/include/mxnet/c_predict_api.h deleted file mode 100644 index 7769664a9783..000000000000 --- a/include/mxnet/c_predict_api.h +++ /dev/null @@ -1,348 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/*! - * Copyright (c) 2015 by Contributors - * \file c_predict_api.h - * \brief C predict API of mxnet, contains a minimum API to run prediction. - * This file is self-contained, and do not dependent on any other files. - */ -#ifndef MXNET_C_PREDICT_API_H_ -#define MXNET_C_PREDICT_API_H_ - -/*! \brief Inhibit C++ name-mangling for MXNet functions. */ -#ifdef __cplusplus -extern "C" { -#endif // __cplusplus - -#ifdef _WIN32 -#ifdef MXNET_EXPORTS -#define MXNET_DLL __declspec(dllexport) -#else -#define MXNET_DLL __declspec(dllimport) -#endif -#else -#define MXNET_DLL -#endif - -/*! \brief manually define unsigned int */ -typedef uint32_t mx_uint; -/*! \brief manually define float */ -typedef float mx_float; -/*! \brief handle to Predictor */ -typedef void *PredictorHandle; -/*! \brief handle to NDArray list */ -typedef void *NDListHandle; -/*! \brief handle to NDArray */ -typedef void *NDArrayHandle; -/*! \brief callback used for add monitoring to nodes in the graph */ -typedef void (*PredMonitorCallback)(const char*, - NDArrayHandle, - void*); - -/*! - * \brief Get the last error happeneed. - * \return The last error happened at the predictor. - */ -MXNET_DLL const char* MXGetLastError(); - -/*! - * \brief create a predictor - * \param symbol_json_str The JSON string of the symbol. - * \param param_bytes The in-memory raw bytes of parameter ndarray file. - * \param param_size The size of parameter ndarray file. - * \param dev_type The device type, 1: cpu, 2:gpu - * \param dev_id The device id of the predictor. - * \param num_input_nodes Number of input nodes to the net, - * For feedforward net, this is 1. - * \param input_keys The name of input argument. - * For feedforward net, this is {"data"} - * \param input_shape_indptr Index pointer of shapes of each input node. - * The length of this array = num_input_nodes + 1. - * For feedforward net that takes 4 dimensional input, this is {0, 4}. - * \param input_shape_data A flattened data of shapes of each input node. - * For feedforward net that takes 4 dimensional input, this is the shape data. - * \param out The created predictor handle. - * \return 0 when success, -1 when failure. - */ -MXNET_DLL int MXPredCreate(const char* symbol_json_str, - const void* param_bytes, - int param_size, - int dev_type, int dev_id, - uint32_t num_input_nodes, - const char** input_keys, - const uint32_t* input_shape_indptr, - const uint32_t* input_shape_data, - PredictorHandle* out); - -/*! - * \brief create a predictor - * \param symbol_json_str The JSON string of the symbol. - * \param param_bytes The in-memory raw bytes of parameter ndarray file. - * \param param_size The size of parameter ndarray file. - * \param dev_type The device type, 1: cpu, 2: gpu - * \param dev_id The device id of the predictor. - * \param num_input_nodes Number of input nodes to the net. - * For feedforward net, this is 1. - * \param input_keys The name of the input argument. - * For feedforward net, this is {"data"} - * \param input_shape_indptr Index pointer of shapes of each input node. - * The length of this array = num_input_nodes + 1. - * For feedforward net that takes 4 dimensional input, this is {0, 4}. - * \param input_shape_data A flattened data of shapes of each input node. - * For feedforward net that takes 4 dimensional input, this is the shape data. - * \param num_provided_arg_dtypes - * The length of provided_arg_dtypes. - * \param provided_arg_dtype_names - * The provided_arg_dtype_names the names of args for which dtypes are provided. - * \param provided_arg_dtypes - * The provided_arg_dtypes the dtype provided - * \param out The created predictor handle. - * \return 0 when success, -1 when failure. - */ -MXNET_DLL int MXPredCreateEx(const char* symbol_json_str, - const void* param_bytes, - int param_size, - int dev_type, int dev_id, - const uint32_t num_input_nodes, - const char** input_keys, - const uint32_t* input_shape_indptr, - const uint32_t* input_shape_data, - const uint32_t num_provided_arg_dtypes, - const char** provided_arg_dtype_names, - const int* provided_arg_dtypes, - PredictorHandle* out); - -/*! - * \brief create a predictor wich customized outputs - * \param symbol_json_str The JSON string of the symbol. - * \param param_bytes The in-memory raw bytes of parameter ndarray file. - * \param param_size The size of parameter ndarray file. - * \param dev_type The device type, 1: cpu, 2:gpu - * \param dev_id The device id of the predictor. - * \param num_input_nodes Number of input nodes to the net, - * For feedforward net, this is 1. - * \param input_keys The name of input argument. - * For feedforward net, this is {"data"} - * \param input_shape_indptr Index pointer of shapes of each input node. - * The length of this array = num_input_nodes + 1. - * For feedforward net that takes 4 dimensional input, this is {0, 4}. - * \param input_shape_data A flattened data of shapes of each input node. - * For feedforward net that takes 4 dimensional input, this is the shape data. - * \param num_output_nodes Number of output nodes to the net, - * \param output_keys The name of output argument. - * For example {"global_pool"} - * \param out The created predictor handle. - * \return 0 when success, -1 when failure. - */ - -MXNET_DLL int MXPredCreatePartialOut(const char* symbol_json_str, - const void* param_bytes, - int param_size, - int dev_type, int dev_id, - uint32_t num_input_nodes, - const char** input_keys, - const uint32_t* input_shape_indptr, - const uint32_t* input_shape_data, - uint32_t num_output_nodes, - const char** output_keys, - PredictorHandle* out); - -/*! - * \brief create predictors for multiple threads. One predictor for a thread. - * \param symbol_json_str The JSON string of the symbol. - * \param param_bytes The in-memory raw bytes of parameter ndarray file. - * \param param_size The size of parameter ndarray file. - * \param dev_type The device type, 1: cpu, 2:gpu - * \param dev_id The device id of the predictor. - * \param num_input_nodes Number of input nodes to the net, - * For feedforward net, this is 1. - * \param input_keys The name of input argument. - * For feedforward net, this is {"data"} - * \param input_shape_indptr Index pointer of shapes of each input node. - * The length of this array = num_input_nodes + 1. - * For feedforward net that takes 4 dimensional input, this is {0, 4}. - * \param input_shape_data A flattened data of shapes of each input node. - * For feedforward net that takes 4 dimensional input, this is the shape data. - * \param num_threads The number of threads that we'll run the predictors. - * \param out An array of created predictor handles. The array has to be large - * enough to keep `num_threads` predictors. - * \return 0 when success, -1 when failure. - */ -MXNET_DLL int MXPredCreateMultiThread(const char* symbol_json_str, - const void* param_bytes, - int param_size, - int dev_type, int dev_id, - uint32_t num_input_nodes, - const char** input_keys, - const uint32_t* input_shape_indptr, - const uint32_t* input_shape_data, - int num_threads, - PredictorHandle* out); - -/*! - * \brief Change the input shape of an existing predictor. - * \param num_input_nodes Number of input nodes to the net, - * For feedforward net, this is 1. - * \param input_keys The name of input argument. - * For feedforward net, this is {"data"} - * \param input_shape_indptr Index pointer of shapes of each input node. - * The length of this array = num_input_nodes + 1. - * For feedforward net that takes 4 dimensional input, this is {0, 4}. - * \param input_shape_data A flattened data of shapes of each input node. - * For feedforward net that takes 4 dimensional input, this is the shape data. - * \param handle The original predictor handle. - * \param out The reshaped predictor handle. - * \return 0 when success, -1 when failure. - */ -MXNET_DLL int MXPredReshape(uint32_t num_input_nodes, - const char** input_keys, - const uint32_t* input_shape_indptr, - const uint32_t* input_shape_data, - PredictorHandle handle, - PredictorHandle* out); -/*! - * \brief Get the shape of output node. - * The returned shape_data and shape_ndim is only valid before next call to MXPred function. - * \param handle The handle of the predictor. - * \param index The index of output node, set to 0 if there is only one output. - * \param shape_data Used to hold pointer to the shape data - * \param shape_ndim Used to hold shape dimension. - * \return 0 when success, -1 when failure. - */ -MXNET_DLL int MXPredGetOutputShape(PredictorHandle handle, - uint32_t index, - uint32_t** shape_data, - uint32_t* shape_ndim); - -/*! - * \brief Get the dtype of output node. - * The returned data type is only valid before next call to MXPred function. - * \param handle The handle of the predictor. - * \param out_index The index of the output node, set to 0 if there is only one output. - * \param out_dtype The dtype of the output node - */ -MXNET_DLL int MXPredGetOutputType(PredictorHandle handle, - uint32_t out_index, - int* out_dtype); - -/*! - * \brief Set the input data of predictor. - * \param handle The predictor handle. - * \param key The name of input node to set. - * For feedforward net, this is "data". - * \param data The pointer to the data to be set, with the shape specified in MXPredCreate. - * \param size The size of data array, used for safety check. - * \return 0 when success, -1 when failure. - */ -MXNET_DLL int MXPredSetInput(PredictorHandle handle, - const char* key, - const float* data, - uint32_t size); -/*! - * \brief Run a forward pass to get the output. - * \param handle The handle of the predictor. - * \return 0 when success, -1 when failure. - */ -MXNET_DLL int MXPredForward(PredictorHandle handle); -/*! - * \brief Run a interactive forward pass to get the output. - * This is helpful for displaying progress of prediction which can be slow. - * User must call PartialForward from step=0, keep increasing it until step_left=0. - * \code - * int step_left = 1; - * for (int step = 0; step_left != 0; ++step) { - * MXPredPartialForward(handle, step, &step_left); - * printf("Current progress [%d/%d]\n", step, step + step_left + 1); - * } - * \endcode - * \param handle The handle of the predictor. - * \param step The current step to run forward on. - * \param step_left The number of steps left - * \return 0 when success, -1 when failure. - */ -MXNET_DLL int MXPredPartialForward(PredictorHandle handle, int step, int* step_left); -/*! - * \brief Get the output value of prediction. - * \param handle The handle of the predictor. - * \param index The index of output node, set to 0 if there is only one output. - * \param data User allocated data to hold the output. - * \param size The size of data array, used for safe checking. - * \return 0 when success, -1 when failure. - */ -MXNET_DLL int MXPredGetOutput(PredictorHandle handle, - uint32_t index, - float* data, - uint32_t size); -/*! - * \brief Free a predictor handle. - * \param handle The handle of the predictor. - * \return 0 when success, -1 when failure. - */ -MXNET_DLL int MXPredFree(PredictorHandle handle); -/*! - * \brief Create a NDArray List by loading from ndarray file. - * This can be used to load mean image file. - * \param nd_file_bytes The byte contents of nd file to be loaded. - * \param nd_file_size The size of the nd file to be loaded. - * \param out The out put NDListHandle - * \param out_length Length of the list. - * \return 0 when success, -1 when failure. - */ -MXNET_DLL int MXNDListCreate(const char* nd_file_bytes, - int nd_file_size, - NDListHandle *out, - uint32_t* out_length); -/*! - * \brief Get an element from list - * \param handle The handle to the NDArray - * \param index The index in the list - * \param out_key The output key of the item - * \param out_data The data region of the item - * \param out_shape The shape of the item. - * \param out_ndim The number of dimension in the shape. - * \return 0 when success, -1 when failure. - */ -MXNET_DLL int MXNDListGet(NDListHandle handle, - uint32_t index, - const char** out_key, - const float** out_data, - const uint32_t** out_shape, - uint32_t* out_ndim); - -/*! - * \brief set a call back to notify the completion of operation and allow for - * additional monitoring - */ -MXNET_DLL int MXPredSetMonitorCallback(PredictorHandle handle, - PredMonitorCallback callback, - void* callback_handle, - bool monitor_all); -/*! - * \brief Free a MXAPINDList - * \param handle The handle of the MXAPINDList. - * \return 0 when success, -1 when failure. - */ -MXNET_DLL int MXNDListFree(NDListHandle handle); - -#ifdef __cplusplus -} -#endif // __cplusplus - -#endif // MXNET_C_PREDICT_API_H_ diff --git a/julia/.gitattributes b/julia/.gitattributes deleted file mode 100644 index 4b76ca8606cb..000000000000 --- a/julia/.gitattributes +++ /dev/null @@ -1 +0,0 @@ -NEWS.md merge=union diff --git a/julia/.gitignore b/julia/.gitignore deleted file mode 100644 index e7b35fa85d96..000000000000 --- a/julia/.gitignore +++ /dev/null @@ -1,11 +0,0 @@ -*.jl.cov -*.jl.*.cov -*.jl.mem -*.pyc -.ipynb_checkpoints -data -deps/src -deps/usr -deps/deps.jl -.vscode -/Manifest.toml diff --git a/julia/LICENSE.md b/julia/LICENSE.md deleted file mode 100644 index 3e2c5a2673b8..000000000000 --- a/julia/LICENSE.md +++ /dev/null @@ -1,196 +0,0 @@ - - - - - - - - - - - - - - - - - -The MXNet.jl package is licensed under version 2.0 of the Apache License: - -> Copyright (c) 2015-2018: -> * Chiyuan Zhang -> -> Apache License -> Version 2.0, January 2004 -> http://www.apache.org/licenses/ -> -> TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION -> -> 1. Definitions. -> -> "License" shall mean the terms and conditions for use, reproduction, -> and distribution as defined by Sections 1 through 9 of this document. -> -> "Licensor" shall mean the copyright owner or entity authorized by -> the copyright owner that is granting the License. -> -> "Legal Entity" shall mean the union of the acting entity and all -> other entities that control, are controlled by, or are under common -> control with that entity. For the purposes of this definition, -> "control" means (i) the power, direct or indirect, to cause the -> direction or management of such entity, whether by contract or -> otherwise, or (ii) ownership of fifty percent (50%) or more of the -> outstanding shares, or (iii) beneficial ownership of such entity. -> -> "You" (or "Your") shall mean an individual or Legal Entity -> exercising permissions granted by this License. -> -> "Source" form shall mean the preferred form for making modifications, -> including but not limited to software source code, documentation -> source, and configuration files. -> -> "Object" form shall mean any form resulting from mechanical -> transformation or translation of a Source form, including but -> not limited to compiled object code, generated documentation, -> and conversions to other media types. -> -> "Work" shall mean the work of authorship, whether in Source or -> Object form, made available under the License, as indicated by a -> copyright notice that is included in or attached to the work -> (an example is provided in the Appendix below). -> -> "Derivative Works" shall mean any work, whether in Source or Object -> form, that is based on (or derived from) the Work and for which the -> editorial revisions, annotations, elaborations, or other modifications -> represent, as a whole, an original work of authorship. For the purposes -> of this License, Derivative Works shall not include works that remain -> separable from, or merely link (or bind by name) to the interfaces of, -> the Work and Derivative Works thereof. -> -> "Contribution" shall mean any work of authorship, including -> the original version of the Work and any modifications or additions -> to that Work or Derivative Works thereof, that is intentionally -> submitted to Licensor for inclusion in the Work by the copyright owner -> or by an individual or Legal Entity authorized to submit on behalf of -> the copyright owner. For the purposes of this definition, "submitted" -> means any form of electronic, verbal, or written communication sent -> to the Licensor or its representatives, including but not limited to -> communication on electronic mailing lists, source code control systems, -> and issue tracking systems that are managed by, or on behalf of, the -> Licensor for the purpose of discussing and improving the Work, but -> excluding communication that is conspicuously marked or otherwise -> designated in writing by the copyright owner as "Not a Contribution." -> -> "Contributor" shall mean Licensor and any individual or Legal Entity -> on behalf of whom a Contribution has been received by Licensor and -> subsequently incorporated within the Work. -> -> 2. Grant of Copyright License. Subject to the terms and conditions of -> this License, each Contributor hereby grants to You a perpetual, -> worldwide, non-exclusive, no-charge, royalty-free, irrevocable -> copyright license to reproduce, prepare Derivative Works of, -> publicly display, publicly perform, sublicense, and distribute the -> Work and such Derivative Works in Source or Object form. -> -> 3. Grant of Patent License. Subject to the terms and conditions of -> this License, each Contributor hereby grants to You a perpetual, -> worldwide, non-exclusive, no-charge, royalty-free, irrevocable -> (except as stated in this section) patent license to make, have made, -> use, offer to sell, sell, import, and otherwise transfer the Work, -> where such license applies only to those patent claims licensable -> by such Contributor that are necessarily infringed by their -> Contribution(s) alone or by combination of their Contribution(s) -> with the Work to which such Contribution(s) was submitted. If You -> institute patent litigation against any entity (including a -> cross-claim or counterclaim in a lawsuit) alleging that the Work -> or a Contribution incorporated within the Work constitutes direct -> or contributory patent infringement, then any patent licenses -> granted to You under this License for that Work shall terminate -> as of the date such litigation is filed. -> -> 4. Redistribution. You may reproduce and distribute copies of the -> Work or Derivative Works thereof in any medium, with or without -> modifications, and in Source or Object form, provided that You -> meet the following conditions: -> -> (a) You must give any other recipients of the Work or -> Derivative Works a copy of this License; and -> -> (b) You must cause any modified files to carry prominent notices -> stating that You changed the files; and -> -> (c) You must retain, in the Source form of any Derivative Works -> that You distribute, all copyright, patent, trademark, and -> attribution notices from the Source form of the Work, -> excluding those notices that do not pertain to any part of -> the Derivative Works; and -> -> (d) If the Work includes a "NOTICE" text file as part of its -> distribution, then any Derivative Works that You distribute must -> include a readable copy of the attribution notices contained -> within such NOTICE file, excluding those notices that do not -> pertain to any part of the Derivative Works, in at least one -> of the following places: within a NOTICE text file distributed -> as part of the Derivative Works; within the Source form or -> documentation, if provided along with the Derivative Works; or, -> within a display generated by the Derivative Works, if and -> wherever such third-party notices normally appear. The contents -> of the NOTICE file are for informational purposes only and -> do not modify the License. You may add Your own attribution -> notices within Derivative Works that You distribute, alongside -> or as an addendum to the NOTICE text from the Work, provided -> that such additional attribution notices cannot be construed -> as modifying the License. -> -> You may add Your own copyright statement to Your modifications and -> may provide additional or different license terms and conditions -> for use, reproduction, or distribution of Your modifications, or -> for any such Derivative Works as a whole, provided Your use, -> reproduction, and distribution of the Work otherwise complies with -> the conditions stated in this License. -> -> 5. Submission of Contributions. Unless You explicitly state otherwise, -> any Contribution intentionally submitted for inclusion in the Work -> by You to the Licensor shall be under the terms and conditions of -> this License, without any additional terms or conditions. -> Notwithstanding the above, nothing herein shall supersede or modify -> the terms of any separate license agreement you may have executed -> with Licensor regarding such Contributions. -> -> 6. Trademarks. This License does not grant permission to use the trade -> names, trademarks, service marks, or product names of the Licensor, -> except as required for reasonable and customary use in describing the -> origin of the Work and reproducing the content of the NOTICE file. -> -> 7. Disclaimer of Warranty. Unless required by applicable law or -> agreed to in writing, Licensor provides the Work (and each -> Contributor provides its Contributions) on an "AS IS" BASIS, -> WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -> implied, including, without limitation, any warranties or conditions -> of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A -> PARTICULAR PURPOSE. You are solely responsible for determining the -> appropriateness of using or redistributing the Work and assume any -> risks associated with Your exercise of permissions under this License. -> -> 8. Limitation of Liability. In no event and under no legal theory, -> whether in tort (including negligence), contract, or otherwise, -> unless required by applicable law (such as deliberate and grossly -> negligent acts) or agreed to in writing, shall any Contributor be -> liable to You for damages, including any direct, indirect, special, -> incidental, or consequential damages of any character arising as a -> result of this License or out of the use or inability to use the -> Work (including but not limited to damages for loss of goodwill, -> work stoppage, computer failure or malfunction, or any and all -> other commercial damages or losses), even if such Contributor -> has been advised of the possibility of such damages. -> -> 9. Accepting Warranty or Additional Liability. While redistributing -> the Work or Derivative Works thereof, You may choose to offer, -> and charge a fee for, acceptance of support, warranty, indemnity, -> or other liability obligations and/or rights consistent with this -> License. However, in accepting such obligations, You may act only -> on Your own behalf and on Your sole responsibility, not on behalf -> of any other Contributor, and only if You agree to indemnify, -> defend, and hold each Contributor harmless for any liability -> incurred by, or claims asserted against, such Contributor by reason -> of your accepting any such warranty or additional liability. diff --git a/julia/NEWS.md b/julia/NEWS.md deleted file mode 100644 index 8f00b599f551..000000000000 --- a/julia/NEWS.md +++ /dev/null @@ -1,731 +0,0 @@ - - - - - - - - - - - - - - - - - -# v1.7.0 -# v2.0.0 - - -# v1.6.0 - -* Add an abstract type `AbstractMXError` as the parent type for all MXNet-related - API errors. (#16235) - -* Porting more `context` functions from Python. - * `num_gpus()` (#16236) - * `gpu_memory_info()` (#16324) - - -# v1.5.0 - -* Following material from `mx` module got exported (#TBD): - * `NDArray` - * `context()` - * `expand_dims()` - * `@inplace` - * `σ()` - * `sigmoid()` - * `relu()` - * `softmax()` - * `log_softmax()` - * `broadcast_to()` - * `broadcast_axis()` - * `broadcast_axes()` - - * `SymbolicNode` - * `Variable` - * `@var` - - * `Context` - * `cpu()` - * `gpu()` - - * `AbstractModel` - * `FeedForward` - * `predict()` - - * `MLP` - - * `Executor` - * `bind()` - * `simple_bind()` - * `forward()` - * `backward()` - - * `AbstractEvalMetric` - * `ACE` - * `Accuracy` - * `MSE` - * `MultiACE` - * `MultiMetric` - * `NMSE` - * `SeqMetric` - - * `KVStore` - * `init!()` - * `push!()` - * `pull!()` - * `barrier()` - * `set_updater()` - * `set_optimizer()` - - * `AbstractInitializer` - * `UniformInitializer` - * `NormalInitializer` - * `XavierInitializer` - - * `AbstractOptimizer` - * `AdaDelta` - * `AdaGrad` - * `ADAM` - * `AdaMax` - * `Nadam` - * `RMSProp` - * `SGD` - * `getupdater()` - * `normgrad!()` - * `update!()` - - * `AbstractDataProvider` - * `AbstractDataBatch` - * `ArrayDataProvider` - * `ArrayDataBatch` - - * `to_graphviz()` - -## New APIs - -### `SymbolicNode` - -* `mx.get_children` for exploring the graph programmatically. (#TBD) - -* A handy macro `@mx.var` for creating `mx.Variable`. (#TBD) - - ```julia - julia> x = @mx.var x - MXNet.mx.SymbolicNode x - - julia> x, y, z = @mx.var x y z - (MXNet.mx.SymbolicNode x, MXNet.mx.SymbolicNode y, MXNet.mx.SymbolicNode z) - ``` - -### `NDArray` - -* A handy constructor: `NDArray(Type, AbstractArray)` is added. (#TBD) - - E.g. - ```julia - julia> NDArray([1, 2, 3]) - 3-element mx.NDArray{Int64,1} @ CPU0: - 1 - 2 - 3 - - julia> NDArray(Float32, [1, 2, 3]) - 3-element mx.NDArray{Float32,1} @ CPU0: - 1.0 - 2.0 - 3.0 - ``` - -* `mx.empty` is deprecated and replaced by `UndefInitializer` constructor. (#TBD) - - E.g. - ```julia - julia> NDArray(undef, 2, 5) - 2×5 NDArray{Float32,2} @ CPU0: - -21260.344f0 1.674986f19 0.00016893122f0 1.8363f-41 0.0f0 - 3.0763f-41 1.14321726f27 4.24219f-8 0.0f0 0.0f0 - ``` - -* A port of Python's `autograd` for `NDArray` (#274) - -* `size(x, dims...)` is supported now. (#TBD) - - ```julia - julia> x = mx.NDArray([1 2; 3 4; 5 6]) - 3×2 mx.NDArray{Int64,2} @ CPU0: - 1 2 - 3 4 - 5 6 - - julia> size(x, 1, 2, 3, 4) - (3, 2, 1, 1) - - ``` - -* `copy(AbstractArray, context)` is implemented now. (#TBD) - - ```julia - julia> copy(1:4, mx.cpu()) - 4 mx.NDArray{Int64,1} @ CPU0: - 1 - 2 - 3 - 4 - - julia> copy(1.:4, mx.cpu()) - 4 mx.NDArray{Float64,1} @ CPU0: - 1.0 - 2.0 - 3.0 - 4.0 - ``` - -* `copy!(NDArray, AbstractArray)` is implemented now. (#TBD) - - ```julia - julia> x = mx.zeros(3) - 3-element mx.NDArray{Float32,1} @ CPU0: - 0.0 - 0.0 - 0.0 - - julia> copy!(x, 3:5) - 3-element mx.NDArray{Float32,1} @ CPU0: - 3.0 - 4.0 - 5.0 - ``` - -* `Base.ones(x::NDArray)` for creating an one-ed `NDArray`. (#TBD) - -* `Base.zeros(x::NDArray)` for creating a zero-ed `NDArray`. (#TBD) - -* Modulo operator. (#TBD) - - ```julia - x = NDArray(...) - y = NDArray(...) - - x .% y - x .% 2 - 2 .% x - ``` - -* Inplace modulo operator, `mod_from!` and `rmod_from!`. (#TBD) - - ```julia - mod_from!(x, y) - mod_from!(x, 2) - rmod_from!(2, x) - ``` - -* `cat`, `vcat`, `hcat` is implemented. (#TBD) - - E.g. `hcat` - ```julia - julia> x - 4 mx.NDArray{Float64,1} @ CPU0: - 1.0 - 2.0 - 3.0 - 4.0 - - julia> y - 4 mx.NDArray{Float64,1} @ CPU0: - 2.0 - 4.0 - 6.0 - 8.0 - - julia> [x y] - 4×2 mx.NDArray{Float64,2} @ CPU0: - 1.0 2.0 - 2.0 4.0 - 3.0 6.0 - 4.0 8.0 - ``` - -* Transposing a column `NDArray` to a row `NDArray` is supported now. (#TBD) - - ```julia - julia> x = NDArray(Float32[1, 2, 3, 4]) - 4 mx.NDArray{Float32,1} @ CPU0: - 1.0 - 2.0 - 3.0 - 4.0 - - julia> x' - 1×4 mx.NDArray{Float32,2} @ CPU0: - 1.0 2.0 3.0 4.0 - ``` - -* Matrix/tensor multiplication is supported now. (#TBD) - - ```julia - julia> x - 2×3 mx.NDArray{Float32,2} @ CPU0: - 1.0 2.0 3.0 - 4.0 5.0 6.0 - - julia> y - 3 mx.NDArray{Float32,1} @ CPU0: - -1.0 - -2.0 - -3.0 - - julia> x * y - 2 mx.NDArray{Float32,1} @ CPU0: - -14.0 - -32.0 - ``` - -## API Changes - -### `NDArray` - -* Broadcasting along dimension supported on following operators, - and the original `mx.broadcast_*` APIs are deprecated - (#401) (#402) (#403): - - * `+` - * `-` - * `*` - * `/` - * `%` - * `^` - * `==` - * `!=` - * `>` - * `>=` - * `<` - * `<=` - * `max` - * `min` - - ```julia - julia> x = NDArray([1 2 3; - 4 5 6]) - 2×3 mx.NDArray{Int64,2} @ CPU0: - 1 2 3 - 4 5 6 - - julia> y = NDArray([1; - 10]) - 2-element mx.NDArray{Int64,1} @ CPU0: - 1 - 10 - - julia> x .+ y - 2×3 mx.NDArray{Int64,2} @ CPU0: - 2 3 4 - 14 15 16 - ``` - -* Please use dot-call on following trigonometric functions. - Also, the `arc*` has been renamed to keep consistent with `Base`. - (#TBD) - - * `sin.(x)` - * `cos.(x)` - * `tan.(x)` - * `arcsin(x)` -> `asin.(x)` - * `arccos(x)` -> `acos.(x)` - * `arctan(x)` -> `atan.(x)` - -* Please use dot-call on following hyperbolic functions. - Also, the `arc*` has been renamed to keep consistent with `Base`. - (#TBD) - - * `sinh.(x)` - * `cosh.(x)` - * `tanh.(x)` - * `arcsinh(x)` -> `asinh.(x)` - * `arccosh(x)` -> `acosh.(x)` - * `arctanh(x)` -> `atanh.(x)` - -* Please use dot-call on following activation functions. - And the `dim` of `softmax` and `log_softmax` has been fixed - as Julia column-based style. - (#TBD) - - * `σ.(x)` - * `relu.(x)` - * `softmax.(x, [dim = ndims(x)])` - * `log_softmax.(x, [dim = ndims(x)])` - -* `rand`, `rand!`, `randn`, `randn!` is more Base-like now (#TBD). - - ```julia - julia> mx.rand(2, 3) - 2×3 mx.NDArray{Float32,2} @ CPU0: - 0.631961 0.324175 0.0762663 - 0.285366 0.395292 0.074995 - - julia> mx.rand(2, 3; low = 1, high = 10) - 2×3 mx.NDArray{Float32,2} @ CPU0: - 7.83884 7.85793 7.64791 - 7.68646 8.56082 8.42189 - ``` - - ```julia - julia> mx.randn(2, 3) - 2×3 mx.NDArray{Float32,2} @ CPU0: - 0.962853 0.424535 -0.320123 - 0.478113 1.72886 1.72287 - - julia> mx.randn(2, 3, μ = 100) - 2×3 mx.NDArray{Float32,2} @ CPU0: - 99.5635 100.483 99.888 - 99.9889 100.533 100.072 - ``` - -* Signature of `clip` changed and renamed to `clamp`. - It doesn't require any keyword argument now. - (#TBD) - - Before: `clip(x, a_min = -4, a_max = 4)` - After: `clamp(x, -4, 4)` - -### Optimizer - -We overhauled the optimizer APIs, introducing breaking changes. -There are tons of renaming, and we try to increase the flexibility. -Making it decouples from some high-level, so user can use it without -understand some detail implementations of `fit!`. - -See #396. - -* All the keyword argument of optimizers have been renamed. - Now we have more elegant keyword arguments than Python's, - thanks to well Unicode support on Julia's REPL and editor plugin. - *These are breaking changes, no deprecation warning.* - - | old | new | comment | - |---------------------------|-----------|--------------------------------| - | `opts.lr` | `η` | type `\eta` in REPL | - | `opts.momentum` | `μ` | type `\mu` in REPL | - | `opts.grad_clip` | `clip` | type `\nablac` in REPL | - | `opts.weight_decay` | `λ` | type `\lambda` in REPL | - | `opts.lr_schedular` | `η_sched` | type `\eta_sched` in REPL | - | `opts.momentum_schedular` | `μ_sched` | type `\mu_sched` in REPL | - - For instance, one accessed the learning via `SGD().opts.lr`, - but now, it's `SGD().η`. - -* New keyword argument `scale` for gradient rescaling. - - Docstring: - ``` - If != 0, multiply the gradient with `∇r` before updating. - Often choose to be `1.0 / batch_size`. - If leave it default, high-level API like `fit!` will set it to - `1.0 / batch_size`, since `fit!` knows the `batch_size`. - ``` - -* Keyword arguments of `NadamScheduler` has been renamed. - *This is a breaking change, no deprecation warning.* - - * Before - - ```julia - NadamScheduler(; mu0 = 0.99, delta = 0.004, gamma = 0.5, alpha = 0.96) - ``` - - * After - - ```julia - NadamScheduler(; μ = 0.99, δ = 0.004, γ = 0.5, α = 0.96) - ``` - -* The attribute `optimizer.state` is removed. - `OptimizationState` is only used by high-level abstraction, like `fit!`. - -* `LearningRate` scheduler API changes: - - * `get_learning_rate` is removed. - Please use `Base.get` to get learning rate. - - ```julia - julia> sched = mx.LearningRate.Exp(.1) - MXNet.mx.LearningRate.Exp(0.1, 0.9, 0) - - julia> get(sched) - 0.1 - - julia> update!(sched); - - julia> get(sched) - 0.09000000000000001 - ``` - - * `update!` to bump counter of `Scheduler.t` - ```julia - julia> sched.t - 1 - - julia> update!(sched); - - julia> sched.t - 2 - - julia> update!(sched); - - julia> sched.t - 3 - ``` - -* `Momentum` module API changes: - - * `get_momentum_scheduler` is removed. Please use `Base.get` instead. - - ```julia - julia> get(mx.Momentum.Fixed(.9)) - 0.9 - ``` - ----- - -# v0.3.0 (2017.11.16) - -* Update `libmxnet` to - * On Windows: v0.12.0. - (See https://github.com/apache/incubator-mxnet/releases/tag/0.12.0) - - * On Linux/macOS: v0.12.1. - (See https://github.com/apache/incubator-mxnet/releases/tag/0.12.1) - -* Drop 0.5 support. ([#300][300]) - -## New API - -### `SymbolicNode` - -* Debugging print support. ([#276][276]) - -### `NDArray` - -* `deepcopy` for `NDArray` ([#273][273]) - -* `scalar ./ NDArray` is available now. ([#292][292]) - -* `fill` and `fill!` for `NDArray`. ([#297][297], [#311][311]) - - An API correspond to Python's `mx.nd.full()` - - * `fill(x, dims, ctx=cpu())` - * `fill(x, dims...)` - * `fill!(arr::NDArray, x)` - -* Matrix (2D `NDArray`) multiplication is available now. ([#300][300]) - - ```julia - julia> x - 1x2 mx.NDArray{Float64} @ CPU0: - 1.0 2.0 - - julia> x' * x - 2x2 mx.NDArray{Float64} @ CPU0: - 1.0 2.0 - 2.0 4.0 - ``` - -* `NDArray` `getindex`/`setindex!` linear indexing support and `first` for - extracting scalar value. ([#294][294]) - - ```julia - julia> x = mx.zeros(2, 5) - - julia> x[5] = 42 # do synchronization and set the value - ``` - - ```julia - julia> y = x[5] # actually, getindex won't do synchronization, but REPL's showing did it for you - 1 mx.NDArray{Float32} @ CPU0: - 42.0 - - julia> first(y) # do sync and get the value - 42.0f0 - - julia> y[] # this is available, also - 42.0f0 - ``` -* Elementwise power of `NDArray`. ([#293][293]) - - * `x.^2` - * `2.^x` - * `x.^y` - * where `x` and `y` are `NDArray`s. - -* Elementwise power of irrational and `NDArray`. ([#310][310]) - - * `e.^x` - * `x.^e` - * `π.^x` - -## API Changes - -### `SymbolicNode` - -* `reshape` of `SymbolicNode` shares the same interface with Base - and additional keyword argument. ([#279][279]) - - * `reshape(SymbolicNode, dim; reverse=false, name)` - * `reshape(SymbolicNode, dim...; reverse=false, name)` - * `Reshape` is deprecated. - -* `mx.forward(x)` will return `x.outputs` now. ([#312][312]) - -### `NDArray` - -* `reshape` of `NDArray` shares the same interface with Base. ([#272][272]) - - * `reshape(NDArray, dim; reverse=false)` - * `reshape(NDArray, dim...; reverse=false)` - * `Reshape` is deprecated. - -* `srand!` deprecated, please use `srand`. ([#282][282]) - -* `mean` and `sum` of `NDArray` share the same interface with Base - and fix the `axis` indexing. ([#303][303]) - - * This is a breaking change; no deprecated warning. - * Before: `mean(arr, axis=0)` - * After: `mean(arr, 1)` - -* `max` and `min` of `NDArray` renamed to `maximum` and `minimum` and share the - same interface with Base. The `axis` indexing is fixed, also. ([#303][303]) - - * This is a breaking change; no deprecated warning. - * Before: `mx.max(arr, axis=0)` or `mx.max_axis(arr, axis=0)` - * After: `maximum(arr, 1)` - -* `mx.transpose` for high dimension `NDArray` has been renamed to `permutedims` - and shares the same interface with Base. ([#303][303]) - - * This is a breaking changes; no deprecated warning. - * Before: `mx.transpose(A, axis=[2, 1, 3])` - * After: `permutedims(A, [2, 1, 3])` - -* `prod` of `NDArray` shares the same interface with Base and fix the `axis` - indexing. ([#303][303]) - - * This is a breaking change; no deprecated warning. - * Before: `prod(arr, axis=-1)` - * After: `prod(arr, 1)` - -## Bugfix - -* Broadcasting operation on same variable is back. ([#300][300], [#314][314]) - ```julia - x = mx.NDArray(...) - x .* x - ``` - - ```julia - y = mx.Variable(:y) - y .* y - ``` - -[272]: https://github.com/dmlc/MXNet.jl/pull/272 -[273]: https://github.com/dmlc/MXNet.jl/pull/273 -[276]: https://github.com/dmlc/MXNet.jl/pull/276 -[279]: https://github.com/dmlc/MXNet.jl/pull/279 -[282]: https://github.com/dmlc/MXNet.jl/pull/282 -[292]: https://github.com/dmlc/MXNet.jl/pull/292 -[293]: https://github.com/dmlc/MXNet.jl/pull/293 -[294]: https://github.com/dmlc/MXNet.jl/pull/294 -[297]: https://github.com/dmlc/MXNet.jl/pull/297 -[300]: https://github.com/dmlc/MXNet.jl/pull/300 -[303]: https://github.com/dmlc/MXNet.jl/pull/303 -[310]: https://github.com/dmlc/MXNet.jl/pull/310 -[311]: https://github.com/dmlc/MXNet.jl/pull/311 -[312]: https://github.com/dmlc/MXNet.jl/pull/312 -[314]: https://github.com/dmlc/MXNet.jl/pull/314 - -# v0.2.2 (2017.05.14) -* Updated supported version of MXNet to 0.9.4. -* Improved build-system with support for auto-detecting GPU support. -* Several updates to Metrics. -* CI for Windows. -* Verbosity option for `predict` (@rdeits) - -# v0.2.1 (2017.01.29) -* Bugfix release for Windows - -# v0.2.0 (2017.01.26) -* Drop support for Julia v0.4. -* Added support for NVVM. -* Updated supported version of MXNet to 0.9.2 -* New optimizers (@Arkoniak). - -# v0.1.0 (2016.09.08) - -* Track specific libmxnet version for each release. -* Migrated documentation system to `Documenter.jl` (@vchuravy) -* Simplified building by using Julia's OpenBlas (@staticfloat) -* Freezing parameters (@vchuravy) -* Support `DType` for `NDArray` (@vchuravy) - -# v0.0.8 (2016.02.08) - -* Fix compatability with Julia v0.5. -* Fix seg-faults introduced by upstream API changes. - -# v0.0.7 (2015.12.14) - -* Fix compatability with Julia v0.4.2 (@BigEpsilon) -* Metrics in epoch callbacks (@kasiabozek) - -# v0.0.6 (2015.12.02) - -* Variants of Xaiver initializers (@vchuravy) -* More arithmetic operators on symbolic nodes -* Basic interface for symbolic node attributes (@vchuravy) - -# v0.0.5 (2015.11.14) - -* char-lstm example. -* Network visualization via GraphViz. -* NN-factory for common models. -* Convenient `@nd_as_jl` macro to work with `NDArray` as Julia Arrays. -* Refactoring: `Symbol` -> `SymbolicNode`. -* More evaluation metrics (@vchuravy, @Andy-P) - -# v0.0.4 (2015.11.09) - -* ADAM optimizer (@cbecker) -* Improved data provider API. -* More documentation. -* Fix a bug in array data iterator (@vchuravy) - -# v0.0.3 (2015.10.27) - -* Model prediction API. -* Model checkpoint loading and saving. -* IJulia Notebook example of using pre-trained imagenet model as classifier. -* Symbol saving and loading. -* NDArray saving and loading. -* Optimizer gradient clipping. -* Model training callback APIs, default checkpoint and speedometer callbacks. -* Julia Array / NDArray data iterator. -* Sphinx documentation system and documents for dynamically imported libmxnet APIs. - -# v0.0.2 (2015.10.23) - -* Fix a bug in build script that causes Julia REPL to exit. - -# v0.0.1 (2015.10.23) - -Initial release. - -* Basic libmxnet API. -* Basic documentation, overview and MNIST tutorial. -* Working MNIST and cifar-10 examples, with multi-GPU training. -* Automatic building of libmxnet with BinDeps.jl. - diff --git a/julia/Project.toml b/julia/Project.toml deleted file mode 100644 index 994a696c1399..000000000000 --- a/julia/Project.toml +++ /dev/null @@ -1,26 +0,0 @@ -name = "MXNet" -uuid = "a7949054-b901-59c6-b8e3-7238c29bf7f0" -authors = ["Chiyuan Zhang ", "Valentin Churavy ", "Iblis Lin "] -version = "1.6.0" - -[deps] -BinDeps = "9e28174c-4ba2-5203-b857-d8d62c4213ee" -Formatting = "59287772-0a20-5a39-b81b-1366585eb4c0" -JSON = "682c06a0-de6a-54ab-a142-c8b1cf79cde6" -Libdl = "8f399da3-3557-5675-b5ff-fb832c97cbdb" -LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" -MacroTools = "1914dd2f-81c6-5fcd-8719-6d5c9610ff09" -Markdown = "d6f4376e-aef5-505a-96c1-9c027394607a" -Printf = "de0858da-6303-5e67-8744-51eddeeeb8d7" -Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" -Reexport = "189a3867-3050-52da-a836-e630ba90ab69" -Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2" - -[compat] -julia = "≥0.7" - -[extras] -Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" - -[targets] -test = ["Test"] diff --git a/julia/README-DEV.md b/julia/README-DEV.md deleted file mode 100644 index 25a6c6d93ab9..000000000000 --- a/julia/README-DEV.md +++ /dev/null @@ -1,30 +0,0 @@ - - - - - - - - - - - - - - - - - -# Workflow for making a release - -1. Update `NEWS.md` to list important changes -2. Check out the `stable` branch, merge with `master`. -3. Update `libmxnet_curr_ver` in `deps/build.jl` to the latest commit SHA (or any proper reference). Using `master` here is not good because future changes in libmxnet might break existing Julia packages. -4. Run tests. -5. Merge master into stable branch. -6. Tag stable branch: `git tag v1.2.3` -7. Push tag to remote: `git push origin ` -8. Edit the [releases page](https://github.com/dmlc/MXNet.jl/releases) - to copy the release notes from `NEWS.md` to the newly created release tag. -9. Goto https://github.com/JuliaLang/METADATA.jl/pulls - and check `attobot` already make a PR for the release. diff --git a/julia/README.md b/julia/README.md deleted file mode 100644 index 405f662d034c..000000000000 --- a/julia/README.md +++ /dev/null @@ -1,29 +0,0 @@ - - - - - - - - - - - - - - - - - -# MXNet - -[![MXNet](http://pkg.julialang.org/badges/MXNet_0.6.svg)](http://pkg.julialang.org/?pkg=MXNet) - - -MXNet.jl is the [Apache MXNet](https://github.com/apache/incubator-mxnet) [Julia](http://julialang.org/) package. MXNet.jl brings flexible and efficient GPU computing and state-of-art deep learning to Julia. Some highlight of its features include: - -* Efficient tensor/matrix computation across multiple devices, including multiple CPUs, GPUs and distributed server nodes. -* Flexible symbolic manipulation to composite and construction of state-of-the-art deep learning models. - -For more details, please refer to the -[documentation](https://dmlc.github.io/MXNet.jl/latest) and [examples](examples). diff --git a/julia/deps/build.jl b/julia/deps/build.jl deleted file mode 100644 index 8e6a66c9bb9f..000000000000 --- a/julia/deps/build.jl +++ /dev/null @@ -1,252 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -using JSON -using Libdl -using LinearAlgebra - -################################################################################ -# First try to detect and load existing libmxnet -################################################################################ -libmxnet_detected = false -libmxnet_curr_ver = get(ENV, "MXNET_COMMIT", "master") -curr_win = "20190608" # v1.5.0 - -if haskey(ENV, "MXNET_HOME") - MXNET_HOME = ENV["MXNET_HOME"] - @info("MXNET_HOME environment detected: $MXNET_HOME") - @info("Trying to load existing libmxnet...") - # In case of macOS, if user build libmxnet from source and set the MXNET_HOME, - # the output is still named as `libmxnet.so`. - lib = Libdl.find_library(["libmxnet.$(Libdl.dlext)", "libmxnet.so"], - [joinpath(MXNET_HOME, "lib"), joinpath(MXNET_HOME, "build"), MXNET_HOME]) - if !isempty(lib) - @info("Existing libmxnet detected at $lib, skip building...") - libmxnet_detected = true - else - @info("Failed to load existing libmxnet, trying to build from source...") - end -end - -# Try to find cuda -CUDAPATHS = String[] -if haskey(ENV, "CUDA_HOME") - push!(CUDAPATHS, joinpath(ENV["CUDA_HOME"], "lib64")) -elseif Sys.islinux() - append!(CUDAPATHS, ["/opt/cuda/lib64", "/usr/local/cuda/lib64"]) -end - -if Sys.isunix() - nvcc_path = Sys.which("nvcc") - if nvcc_path ≢ nothing - @info "Found nvcc: $nvcc_path" - push!(CUDAPATHS, replace(nvcc_path, "bin/nvcc" => "lib64")) - end -end - -HAS_CUDA = false -HAS_CUDNN = false -let cudalib = Libdl.find_library(["libcuda", "nvcuda.dll"], CUDAPATHS) - global HAS_CUDA = !isempty(cudalib) && Libdl.dlopen_e(cudalib) != C_NULL -end - -if !HAS_CUDA && Sys.iswindows() - # TODO: this needs to be improved. - try - run(`nvcc --version`) - global HAS_CUDA = true - catch - end -end - -if HAS_CUDA # then check cudnn - let cudnnlib = Libdl.find_library("libcudnn", CUDAPATHS) - global HAS_CUDNN = !isempty(cudnnlib) && Libdl.dlopen_e(cudnnlib) != C_NULL - if HAS_CUDNN && !haskey(ENV, "CUDA_HOME") # inference `CUDA_HOME` - ENV["CUDA_HOME"] = dirname(dirname(cudnnlib)) - end - end -end - -if HAS_CUDA - @info("Found a CUDA installation.") - if HAS_CUDNN - @info("Found a CuDNN installation.") - end - @info("CUDA_HOME -> $(get(ENV, "CUDA_HOME", "nothing"))") -else - @info("Did not find a CUDA installation, using CPU-only version of MXNet.") -end - -# propagate more build flags from ENV -const CC = get(ENV, "CC", nothing) -const CXX = get(ENV, "CXX", nothing) -const ADD_CFLAGS = get(ENV, "ADD_CFLAGS", nothing) -const ADD_LDFLAGS = get(ENV, "ADD_LDFLAGS", nothing) -const USE_JEMALLOC = get(ENV, "USE_JEMALLOC", nothing) # "0" or "1" - -function get_cpucore() - if haskey(ENV, "TRAVIS") # on travis-ci - 2 - else - min(Sys.CPU_THREADS, 32) - end -end - -using BinDeps -@BinDeps.setup -if !libmxnet_detected - if Sys.iswindows() - @info("Prebuilt windows binaries is not available currently. You will have to built MXNet yourself.") - return - end - - ################################################################################ - # If not found, try to build automatically using BinDeps - ################################################################################ - - blas_path = Libdl.dlpath(Libdl.dlopen(Base.libblas_name)) - blas_vendor = LinearAlgebra.BLAS.vendor() - - ilp64 = "" - if blas_vendor == :openblas64 - ilp64 = "-DINTERFACE64" - end - - FORCE_LAPACK = false - if blas_vendor == :unknown - @info("Julia is built with an unkown blas library ($blas_path).") - @info("Attempting build without reusing the blas library") - USE_JULIA_BLAS = false - elseif !(blas_vendor in (:openblas, :openblas64)) - @info("Unsure if we can build against $blas_vendor.") - @info("Attempting build anyway.") - USE_JULIA_BLAS = true - else - USE_JULIA_BLAS = true - FORCE_LAPACK = true - end - @info("USE_JULIA_BLAS -> $USE_JULIA_BLAS") - - blas_name = blas_vendor == :openblas64 ? "openblas" : string(blas_vendor) - MSHADOW_LDFLAGS = "MSHADOW_LDFLAGS=-lm $blas_path" - - #-------------------------------------------------------------------------------- - # Build libmxnet - mxnet = library_dependency("mxnet", aliases=["mxnet", "libmxnet", "libmxnet.so"]) - - _prefix = joinpath(BinDeps.depsdir(mxnet), "usr") - _srcdir = joinpath(BinDeps.depsdir(mxnet), "src") - _mxdir = joinpath(_srcdir, "mxnet") - _libdir = joinpath(_prefix, "lib") - # We have do eagerly delete the installed libmxnet.so - # Otherwise we won't rebuild on an update. - run(`rm -f $_libdir/libmxnet.$(Libdl.dlext)`) - provides(BuildProcess, - (@build_steps begin - CreateDirectory(_srcdir) - CreateDirectory(_libdir) - @build_steps begin - BinDeps.DirectoryRule(_mxdir, @build_steps begin - ChangeDirectory(_srcdir) - `git clone https://github.com/apache/incubator-mxnet mxnet` - end) - @build_steps begin - ChangeDirectory(_mxdir) - `git fetch` - if libmxnet_curr_ver != "master" - `git checkout $libmxnet_curr_ver` - else - `git checkout origin/$libmxnet_curr_ver` - end - `git submodule update --init --recursive` - `git -C 3rdparty/mshadow checkout -- make/mshadow.mk` - `cp -v ../../cblas.h include/cblas.h` - `sed -i -s "s/MSHADOW_CFLAGS = \(.*\)/MSHADOW_CFLAGS = \1 $ilp64/" 3rdparty/mshadow/make/mshadow.mk` - - # Copy config.mk, always override the file - if Sys.isapple() - `cp make/osx.mk config.mk` - else - `cp make/config.mk config.mk` - end - - # Configure OpenCV - `sed -i -s 's/USE_OPENCV = 1/USE_OPENCV = 0/' config.mk` - - # Configure CUDA - if HAS_CUDA - @build_steps begin - `sed -i -s 's/USE_CUDA = 0/USE_CUDA = 1/' config.mk` - # address https://github.com/apache/incubator-mxnet/pull/7856 - `sed -i -s "s/ADD_LDFLAGS =\(.*\)/ADD_LDFLAGS =\1 -lcublas -lcusolver -lcurand -lcudart/" config.mk` - if haskey(ENV, "CUDA_HOME") - `sed -i -s "s@USE_CUDA_PATH = NONE@USE_CUDA_PATH = $(ENV["CUDA_HOME"])@" config.mk` - end - if haskey(ENV, "CUDA_HOME") - # address https://github.com/apache/incubator-mxnet/pull/7838 - flag = "-L$(ENV["CUDA_HOME"])/lib64 -L$(ENV["CUDA_HOME"])/lib" - `sed -i -s "s@ADD_LDFLAGS =\(.*\)@ADD_LDFLAGS =\1 $flag@" config.mk` - end - if HAS_CUDNN - `sed -i -s 's/USE_CUDNN = 0/USE_CUDNN = 1/' config.mk` - end - end - end - - # Force enable LAPACK build - # Julia's OpenBLAS has LAPACK functionality already - if FORCE_LAPACK - if Sys.isapple() - MSHADOW_LDFLAGS *= " -framework Accelerate" - end - `sed -i -s 's/ADD_CFLAGS =\(.*\)/ADD_CFLAGS =\1 -DMXNET_USE_LAPACK/' config.mk` - end - - # propagate more build flags from ENV - if CC != nothing - `sed -i -s "s@^export CC =\(.*\)@export CC = $CC@" config.mk` - end - if CXX != nothing - `sed -i -s "s@^export CXX =\(.*\)@export CXX = $CXX@" config.mk` - end - if ADD_CFLAGS != nothing - `sed -i -s "s@ADD_CFLAGS =\(.*\)@ADD_CFLAGS =\1 $ADD_CFLAGS@" config.mk` - end - if ADD_LDFLAGS != nothing - `sed -i -s "s@ADD_LDFLAGS =\(.*\)@ADD_LDFLAGS =\1 $ADD_LDFLAGS@" config.mk` - end - if USE_JEMALLOC != nothing - `sed -i -s "s@USE_JEMALLOC =\(.*\)@USE_JEMALLOC = $USE_JEMALLOC@" config.mk` - end - - if USE_JULIA_BLAS - `make -j$(get_cpucore()) USE_BLAS=$blas_name $MSHADOW_LDFLAGS` - else - `make -j$(get_cpucore())` - end - end - FileRule(joinpath(_libdir, "libmxnet.$(Libdl.dlext)"), @build_steps begin - # the output file on macos is still in `.so` suffix - # so we rename it - `cp $_mxdir/lib/libmxnet.so $_libdir/libmxnet.$(Libdl.dlext)` - end) - end - end), mxnet, installed_libpath=_libdir) - - @BinDeps.install Dict(:mxnet => :mxnet) -end diff --git a/julia/deps/cblas.h b/julia/deps/cblas.h deleted file mode 100644 index d9449dc8e21d..000000000000 --- a/julia/deps/cblas.h +++ /dev/null @@ -1,563 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -#ifndef CBLAS_H -#define CBLAS_H - -/* - * This file modified from the OpenBLAS repository. - */ - -#include - -#ifdef __cplusplus -extern "C" { - /* Assume C declarations for C++ */ -#endif /* __cplusplus */ - - -/* - * Since all of GotoBlas was written without const, - * we disable it at build time. - */ -#ifndef OPENBLAS_CONST -# define OPENBLAS_CONST const -#endif - -/* - * Add definitions for BLASLONG and blasint - */ - -#if defined(OS_WINDOWS) && defined(__64BIT__) -typedef long long BLASLONG; -typedef unsigned long long BLASULONG; -#else -typedef long BLASLONG; -typedef unsigned long BLASULONG; -#endif - -#ifdef INTERFACE64 -typedef BLASLONG blasint; -#else -typedef int blasint; -#endif - -/* copy from openblas_config_template.h */ -/* C99 supports complex floating numbers natively, which GCC also offers as an - extension since version 3.0. If neither are available, use a compatible - structure as fallback (see Clause 6.2.5.13 of the C99 standard). */ -#if ((defined(__STDC_IEC_559_COMPLEX__) || __STDC_VERSION__ >= 199901L || \ - (__GNUC__ >= 3 && !defined(__cplusplus))) && !(defined(FORCE_OPENBLAS_COMPLEX_STRUCT))) -#ifndef __cplusplus - #include -#endif - typedef float _Complex openblas_complex_float; - typedef double _Complex openblas_complex_double; -#else - typedef struct { float real, imag; } openblas_complex_float; - typedef struct { double real, imag; } openblas_complex_double; -#endif - -#ifdef INTERFACE64 -# define cblas_sdsdot cblas_sdsdot64_ -# define cblas_dsdot cblas_dsdot64_ -# define cblas_sdot cblas_sdot64_ -# define cblas_ddot cblas_ddot64_ -# define cblas_cdotu cblas_cdotu64_ -# define cblas_cdotc cblas_cdotc64_ -# define cblas_zdotu cblas_zdotu64_ -# define cblas_zdotc cblas_zdotc64_ -# define cblas_cdotu_sub cblas_cdotu_sub64_ -# define cblas_cdotc_sub cblas_cdotc_sub64_ -# define cblas_zdotu_sub cblas_zdotu_sub64_ -# define cblas_zdotc_sub cblas_zdotc_sub64_ -# define cblas_sasum cblas_sasum64_ -# define cblas_dasum cblas_dasum64_ -# define cblas_scasum cblas_scasum64_ -# define cblas_dzasum cblas_dzasum64_ -# define cblas_snrm2 cblas_snrm264_ -# define cblas_dnrm2 cblas_dnrm264_ -# define cblas_scnrm2 cblas_scnrm264_ -# define cblas_dznrm2 cblas_dznrm264_ -# define cblas_isamax cblas_isamax64_ -# define cblas_idamax cblas_idamax64_ -# define cblas_icamax cblas_icamax64_ -# define cblas_izamax cblas_izamax64_ -# define cblas_saxpy cblas_saxpy64_ -# define cblas_daxpy cblas_daxpy64_ -# define cblas_caxpy cblas_caxpy64_ -# define cblas_zaxpy cblas_zaxpy64_ -# define cblas_scopy cblas_scopy64_ -# define cblas_dcopy cblas_dcopy64_ -# define cblas_ccopy cblas_ccopy64_ -# define cblas_zcopy cblas_zcopy64_ -# define cblas_sswap cblas_sswap64_ -# define cblas_dswap cblas_dswap64_ -# define cblas_cswap cblas_cswap64_ -# define cblas_zswap cblas_zswap64_ -# define cblas_srot cblas_srot64_ -# define cblas_drot cblas_drot64_ -# define cblas_srotg cblas_srotg64_ -# define cblas_drotg cblas_drotg64_ -# define cblas_srotm cblas_srotm64_ -# define cblas_drotm cblas_drotm64_ -# define cblas_srotmg cblas_srotmg64_ -# define cblas_drotmg cblas_drotmg64_ -# define cblas_sscal cblas_sscal64_ -# define cblas_dscal cblas_dscal64_ -# define cblas_cscal cblas_cscal64_ -# define cblas_zscal cblas_zscal64_ -# define cblas_csscal cblas_csscal64_ -# define cblas_zdscal cblas_zdscal64_ -# define cblas_sgemv cblas_sgemv64_ -# define cblas_dgemv cblas_dgemv64_ -# define cblas_cgemv cblas_cgemv64_ -# define cblas_zgemv cblas_zgemv64_ -# define cblas_sger cblas_sger64_ -# define cblas_dger cblas_dger64_ -# define cblas_cgeru cblas_cgeru64_ -# define cblas_cgerc cblas_cgerc64_ -# define cblas_zgeru cblas_zgeru64_ -# define cblas_zgerc cblas_zgerc64_ -# define cblas_strsv cblas_strsv64_ -# define cblas_dtrsv cblas_dtrsv64_ -# define cblas_ctrsv cblas_ctrsv64_ -# define cblas_ztrsv cblas_ztrsv64_ -# define cblas_strmv cblas_strmv64_ -# define cblas_dtrmv cblas_dtrmv64_ -# define cblas_ctrmv cblas_ctrmv64_ -# define cblas_ztrmv cblas_ztrmv64_ -# define cblas_ssyr cblas_ssyr64_ -# define cblas_dsyr cblas_dsyr64_ -# define cblas_cher cblas_cher64_ -# define cblas_zher cblas_zher64_ -# define cblas_ssyr2 cblas_ssyr264_ -# define cblas_dsyr2 cblas_dsyr264_ -# define cblas_cher2 cblas_cher264_ -# define cblas_zher2 cblas_zher264_ -# define cblas_sgbmv cblas_sgbmv64_ -# define cblas_dgbmv cblas_dgbmv64_ -# define cblas_cgbmv cblas_cgbmv64_ -# define cblas_zgbmv cblas_zgbmv64_ -# define cblas_ssbmv cblas_ssbmv64_ -# define cblas_dsbmv cblas_dsbmv64_ -# define cblas_stbmv cblas_stbmv64_ -# define cblas_dtbmv cblas_dtbmv64_ -# define cblas_ctbmv cblas_ctbmv64_ -# define cblas_ztbmv cblas_ztbmv64_ -# define cblas_stbsv cblas_stbsv64_ -# define cblas_dtbsv cblas_dtbsv64_ -# define cblas_ctbsv cblas_ctbsv64_ -# define cblas_ztbsv cblas_ztbsv64_ -# define cblas_stpmv cblas_stpmv64_ -# define cblas_dtpmv cblas_dtpmv64_ -# define cblas_ctpmv cblas_ctpmv64_ -# define cblas_ztpmv cblas_ztpmv64_ -# define cblas_stpsv cblas_stpsv64_ -# define cblas_dtpsv cblas_dtpsv64_ -# define cblas_ctpsv cblas_ctpsv64_ -# define cblas_ztpsv cblas_ztpsv64_ -# define cblas_ssymv cblas_ssymv64_ -# define cblas_dsymv cblas_dsymv64_ -# define cblas_chemv cblas_chemv64_ -# define cblas_zhemv cblas_zhemv64_ -# define cblas_sspmv cblas_sspmv64_ -# define cblas_dspmv cblas_dspmv64_ -# define cblas_sspr cblas_sspr64_ -# define cblas_dspr cblas_dspr64_ -# define cblas_chpr cblas_chpr64_ -# define cblas_zhpr cblas_zhpr64_ -# define cblas_sspr2 cblas_sspr264_ -# define cblas_dspr2 cblas_dspr264_ -# define cblas_chpr2 cblas_chpr264_ -# define cblas_zhpr2 cblas_zhpr264_ -# define cblas_chbmv cblas_chbmv64_ -# define cblas_zhbmv cblas_zhbmv64_ -# define cblas_chpmv cblas_chpmv64_ -# define cblas_zhpmv cblas_zhpmv64_ -# define cblas_sgemm cblas_sgemm64_ -# define cblas_dgemm cblas_dgemm64_ -# define cblas_cgemm cblas_cgemm64_ -# define cblas_cgemm3m cblas_cgemm3m64_ -# define cblas_zgemm cblas_zgemm64_ -# define cblas_zgemm3m cblas_zgemm3m64_ -# define cblas_ssymm cblas_ssymm64_ -# define cblas_dsymm cblas_dsymm64_ -# define cblas_csymm cblas_csymm64_ -# define cblas_zsymm cblas_zsymm64_ -# define cblas_ssyrk cblas_ssyrk64_ -# define cblas_dsyrk cblas_dsyrk64_ -# define cblas_csyrk cblas_csyrk64_ -# define cblas_zsyrk cblas_zsyrk64_ -# define cblas_ssyr2k cblas_ssyr2k64_ -# define cblas_dsyr2k cblas_dsyr2k64_ -# define cblas_csyr2k cblas_csyr2k64_ -# define cblas_zsyr2k cblas_zsyr2k64_ -# define cblas_strmm cblas_strmm64_ -# define cblas_dtrmm cblas_dtrmm64_ -# define cblas_ctrmm cblas_ctrmm64_ -# define cblas_ztrmm cblas_ztrmm64_ -# define cblas_strsm cblas_strsm64_ -# define cblas_dtrsm cblas_dtrsm64_ -# define cblas_ctrsm cblas_ctrsm64_ -# define cblas_ztrsm cblas_ztrsm64_ -# define cblas_chemm cblas_chemm64_ -# define cblas_zhemm cblas_zhemm64_ -# define cblas_cherk cblas_cherk64_ -# define cblas_zherk cblas_zherk64_ -# define cblas_cher2k cblas_cher2k64_ -# define cblas_zher2k cblas_zher2k64_ -# define cblas_xerbla cblas_xerbla64_ -# define cblas_saxpby cblas_saxpby64_ -# define cblas_daxpby cblas_daxpby64_ -# define cblas_caxpby cblas_caxpby64_ -# define cblas_zaxpby cblas_zaxpby64_ -# define cblas_somatcopy cblas_somatcopy64_ -# define cblas_domatcopy cblas_domatcopy64_ -# define cblas_comatcopy cblas_comatcopy64_ -# define cblas_zomatcopy cblas_zomatcopy64_ -# define cblas_simatcopy cblas_simatcopy64_ -# define cblas_dimatcopy cblas_dimatcopy64_ -# define cblas_cimatcopy cblas_cimatcopy64_ -# define cblas_zimatcopy cblas_zimatcopy64_ -# define cblas_sgeadd cblas_sgeadd64_ -# define cblas_dgeadd cblas_dgeadd64_ -# define cblas_cgeadd cblas_cgeadd64_ -# define cblas_zgeadd cblas_zgeadd64_ -#endif - -#define CBLAS_INDEX size_t - - -typedef enum CBLAS_ORDER {CblasRowMajor=101, CblasColMajor=102} CBLAS_ORDER; -typedef enum CBLAS_TRANSPOSE {CblasNoTrans=111, CblasTrans=112, CblasConjTrans=113, CblasConjNoTrans=114} CBLAS_TRANSPOSE; -typedef enum CBLAS_UPLO {CblasUpper=121, CblasLower=122} CBLAS_UPLO; -typedef enum CBLAS_DIAG {CblasNonUnit=131, CblasUnit=132} CBLAS_DIAG; -typedef enum CBLAS_SIDE {CblasLeft=141, CblasRight=142} CBLAS_SIDE; - -float cblas_sdsdot(OPENBLAS_CONST blasint n, OPENBLAS_CONST float alpha, OPENBLAS_CONST float *x, OPENBLAS_CONST blasint incx, OPENBLAS_CONST float *y, OPENBLAS_CONST blasint incy); -double cblas_dsdot (OPENBLAS_CONST blasint n, OPENBLAS_CONST float *x, OPENBLAS_CONST blasint incx, OPENBLAS_CONST float *y, OPENBLAS_CONST blasint incy); -float cblas_sdot(OPENBLAS_CONST blasint n, OPENBLAS_CONST float *x, OPENBLAS_CONST blasint incx, OPENBLAS_CONST float *y, OPENBLAS_CONST blasint incy); -double cblas_ddot(OPENBLAS_CONST blasint n, OPENBLAS_CONST double *x, OPENBLAS_CONST blasint incx, OPENBLAS_CONST double *y, OPENBLAS_CONST blasint incy); - -openblas_complex_float cblas_cdotu(OPENBLAS_CONST blasint n, OPENBLAS_CONST float *x, OPENBLAS_CONST blasint incx, OPENBLAS_CONST float *y, OPENBLAS_CONST blasint incy); -openblas_complex_float cblas_cdotc(OPENBLAS_CONST blasint n, OPENBLAS_CONST float *x, OPENBLAS_CONST blasint incx, OPENBLAS_CONST float *y, OPENBLAS_CONST blasint incy); -openblas_complex_double cblas_zdotu(OPENBLAS_CONST blasint n, OPENBLAS_CONST double *x, OPENBLAS_CONST blasint incx, OPENBLAS_CONST double *y, OPENBLAS_CONST blasint incy); -openblas_complex_double cblas_zdotc(OPENBLAS_CONST blasint n, OPENBLAS_CONST double *x, OPENBLAS_CONST blasint incx, OPENBLAS_CONST double *y, OPENBLAS_CONST blasint incy); - -void cblas_cdotu_sub(OPENBLAS_CONST blasint n, OPENBLAS_CONST float *x, OPENBLAS_CONST blasint incx, OPENBLAS_CONST float *y, OPENBLAS_CONST blasint incy, openblas_complex_float *ret); -void cblas_cdotc_sub(OPENBLAS_CONST blasint n, OPENBLAS_CONST float *x, OPENBLAS_CONST blasint incx, OPENBLAS_CONST float *y, OPENBLAS_CONST blasint incy, openblas_complex_float *ret); -void cblas_zdotu_sub(OPENBLAS_CONST blasint n, OPENBLAS_CONST double *x, OPENBLAS_CONST blasint incx, OPENBLAS_CONST double *y, OPENBLAS_CONST blasint incy, openblas_complex_double *ret); -void cblas_zdotc_sub(OPENBLAS_CONST blasint n, OPENBLAS_CONST double *x, OPENBLAS_CONST blasint incx, OPENBLAS_CONST double *y, OPENBLAS_CONST blasint incy, openblas_complex_double *ret); - -float cblas_sasum (OPENBLAS_CONST blasint n, OPENBLAS_CONST float *x, OPENBLAS_CONST blasint incx); -double cblas_dasum (OPENBLAS_CONST blasint n, OPENBLAS_CONST double *x, OPENBLAS_CONST blasint incx); -float cblas_scasum(OPENBLAS_CONST blasint n, OPENBLAS_CONST float *x, OPENBLAS_CONST blasint incx); -double cblas_dzasum(OPENBLAS_CONST blasint n, OPENBLAS_CONST double *x, OPENBLAS_CONST blasint incx); - -float cblas_snrm2 (OPENBLAS_CONST blasint N, OPENBLAS_CONST float *X, OPENBLAS_CONST blasint incX); -double cblas_dnrm2 (OPENBLAS_CONST blasint N, OPENBLAS_CONST double *X, OPENBLAS_CONST blasint incX); -float cblas_scnrm2(OPENBLAS_CONST blasint N, OPENBLAS_CONST float *X, OPENBLAS_CONST blasint incX); -double cblas_dznrm2(OPENBLAS_CONST blasint N, OPENBLAS_CONST double *X, OPENBLAS_CONST blasint incX); - -CBLAS_INDEX cblas_isamax(OPENBLAS_CONST blasint n, OPENBLAS_CONST float *x, OPENBLAS_CONST blasint incx); -CBLAS_INDEX cblas_idamax(OPENBLAS_CONST blasint n, OPENBLAS_CONST double *x, OPENBLAS_CONST blasint incx); -CBLAS_INDEX cblas_icamax(OPENBLAS_CONST blasint n, OPENBLAS_CONST float *x, OPENBLAS_CONST blasint incx); -CBLAS_INDEX cblas_izamax(OPENBLAS_CONST blasint n, OPENBLAS_CONST double *x, OPENBLAS_CONST blasint incx); - -void cblas_saxpy(OPENBLAS_CONST blasint n, OPENBLAS_CONST float alpha, OPENBLAS_CONST float *x, OPENBLAS_CONST blasint incx, float *y, OPENBLAS_CONST blasint incy); -void cblas_daxpy(OPENBLAS_CONST blasint n, OPENBLAS_CONST double alpha, OPENBLAS_CONST double *x, OPENBLAS_CONST blasint incx, double *y, OPENBLAS_CONST blasint incy); -void cblas_caxpy(OPENBLAS_CONST blasint n, OPENBLAS_CONST float *alpha, OPENBLAS_CONST float *x, OPENBLAS_CONST blasint incx, float *y, OPENBLAS_CONST blasint incy); -void cblas_zaxpy(OPENBLAS_CONST blasint n, OPENBLAS_CONST double *alpha, OPENBLAS_CONST double *x, OPENBLAS_CONST blasint incx, double *y, OPENBLAS_CONST blasint incy); - -void cblas_scopy(OPENBLAS_CONST blasint n, OPENBLAS_CONST float *x, OPENBLAS_CONST blasint incx, float *y, OPENBLAS_CONST blasint incy); -void cblas_dcopy(OPENBLAS_CONST blasint n, OPENBLAS_CONST double *x, OPENBLAS_CONST blasint incx, double *y, OPENBLAS_CONST blasint incy); -void cblas_ccopy(OPENBLAS_CONST blasint n, OPENBLAS_CONST float *x, OPENBLAS_CONST blasint incx, float *y, OPENBLAS_CONST blasint incy); -void cblas_zcopy(OPENBLAS_CONST blasint n, OPENBLAS_CONST double *x, OPENBLAS_CONST blasint incx, double *y, OPENBLAS_CONST blasint incy); - -void cblas_sswap(OPENBLAS_CONST blasint n, float *x, OPENBLAS_CONST blasint incx, float *y, OPENBLAS_CONST blasint incy); -void cblas_dswap(OPENBLAS_CONST blasint n, double *x, OPENBLAS_CONST blasint incx, double *y, OPENBLAS_CONST blasint incy); -void cblas_cswap(OPENBLAS_CONST blasint n, float *x, OPENBLAS_CONST blasint incx, float *y, OPENBLAS_CONST blasint incy); -void cblas_zswap(OPENBLAS_CONST blasint n, double *x, OPENBLAS_CONST blasint incx, double *y, OPENBLAS_CONST blasint incy); - -void cblas_srot(OPENBLAS_CONST blasint N, float *X, OPENBLAS_CONST blasint incX, float *Y, OPENBLAS_CONST blasint incY, OPENBLAS_CONST float c, OPENBLAS_CONST float s); -void cblas_drot(OPENBLAS_CONST blasint N, double *X, OPENBLAS_CONST blasint incX, double *Y, OPENBLAS_CONST blasint incY, OPENBLAS_CONST double c, OPENBLAS_CONST double s); - -void cblas_srotg(float *a, float *b, float *c, float *s); -void cblas_drotg(double *a, double *b, double *c, double *s); - -void cblas_srotm(OPENBLAS_CONST blasint N, float *X, OPENBLAS_CONST blasint incX, float *Y, OPENBLAS_CONST blasint incY, OPENBLAS_CONST float *P); -void cblas_drotm(OPENBLAS_CONST blasint N, double *X, OPENBLAS_CONST blasint incX, double *Y, OPENBLAS_CONST blasint incY, OPENBLAS_CONST double *P); - -void cblas_srotmg(float *d1, float *d2, float *b1, OPENBLAS_CONST float b2, float *P); -void cblas_drotmg(double *d1, double *d2, double *b1, OPENBLAS_CONST double b2, double *P); - -void cblas_sscal(OPENBLAS_CONST blasint N, OPENBLAS_CONST float alpha, float *X, OPENBLAS_CONST blasint incX); -void cblas_dscal(OPENBLAS_CONST blasint N, OPENBLAS_CONST double alpha, double *X, OPENBLAS_CONST blasint incX); -void cblas_cscal(OPENBLAS_CONST blasint N, OPENBLAS_CONST float *alpha, float *X, OPENBLAS_CONST blasint incX); -void cblas_zscal(OPENBLAS_CONST blasint N, OPENBLAS_CONST double *alpha, double *X, OPENBLAS_CONST blasint incX); -void cblas_csscal(OPENBLAS_CONST blasint N, OPENBLAS_CONST float alpha, float *X, OPENBLAS_CONST blasint incX); -void cblas_zdscal(OPENBLAS_CONST blasint N, OPENBLAS_CONST double alpha, double *X, OPENBLAS_CONST blasint incX); - -void cblas_sgemv(OPENBLAS_CONST enum CBLAS_ORDER order, OPENBLAS_CONST enum CBLAS_TRANSPOSE trans, OPENBLAS_CONST blasint m, OPENBLAS_CONST blasint n, - OPENBLAS_CONST float alpha, OPENBLAS_CONST float *a, OPENBLAS_CONST blasint lda, OPENBLAS_CONST float *x, OPENBLAS_CONST blasint incx, OPENBLAS_CONST float beta, float *y, OPENBLAS_CONST blasint incy); -void cblas_dgemv(OPENBLAS_CONST enum CBLAS_ORDER order, OPENBLAS_CONST enum CBLAS_TRANSPOSE trans, OPENBLAS_CONST blasint m, OPENBLAS_CONST blasint n, - OPENBLAS_CONST double alpha, OPENBLAS_CONST double *a, OPENBLAS_CONST blasint lda, OPENBLAS_CONST double *x, OPENBLAS_CONST blasint incx, OPENBLAS_CONST double beta, double *y, OPENBLAS_CONST blasint incy); -void cblas_cgemv(OPENBLAS_CONST enum CBLAS_ORDER order, OPENBLAS_CONST enum CBLAS_TRANSPOSE trans, OPENBLAS_CONST blasint m, OPENBLAS_CONST blasint n, - OPENBLAS_CONST float *alpha, OPENBLAS_CONST float *a, OPENBLAS_CONST blasint lda, OPENBLAS_CONST float *x, OPENBLAS_CONST blasint incx, OPENBLAS_CONST float *beta, float *y, OPENBLAS_CONST blasint incy); -void cblas_zgemv(OPENBLAS_CONST enum CBLAS_ORDER order, OPENBLAS_CONST enum CBLAS_TRANSPOSE trans, OPENBLAS_CONST blasint m, OPENBLAS_CONST blasint n, - OPENBLAS_CONST double *alpha, OPENBLAS_CONST double *a, OPENBLAS_CONST blasint lda, OPENBLAS_CONST double *x, OPENBLAS_CONST blasint incx, OPENBLAS_CONST double *beta, double *y, OPENBLAS_CONST blasint incy); - -void cblas_sger (OPENBLAS_CONST enum CBLAS_ORDER order, OPENBLAS_CONST blasint M, OPENBLAS_CONST blasint N, OPENBLAS_CONST float alpha, OPENBLAS_CONST float *X, OPENBLAS_CONST blasint incX, OPENBLAS_CONST float *Y, OPENBLAS_CONST blasint incY, float *A, OPENBLAS_CONST blasint lda); -void cblas_dger (OPENBLAS_CONST enum CBLAS_ORDER order, OPENBLAS_CONST blasint M, OPENBLAS_CONST blasint N, OPENBLAS_CONST double alpha, OPENBLAS_CONST double *X, OPENBLAS_CONST blasint incX, OPENBLAS_CONST double *Y, OPENBLAS_CONST blasint incY, double *A, OPENBLAS_CONST blasint lda); -void cblas_cgeru(OPENBLAS_CONST enum CBLAS_ORDER order, OPENBLAS_CONST blasint M, OPENBLAS_CONST blasint N, OPENBLAS_CONST float *alpha, OPENBLAS_CONST float *X, OPENBLAS_CONST blasint incX, OPENBLAS_CONST float *Y, OPENBLAS_CONST blasint incY, float *A, OPENBLAS_CONST blasint lda); -void cblas_cgerc(OPENBLAS_CONST enum CBLAS_ORDER order, OPENBLAS_CONST blasint M, OPENBLAS_CONST blasint N, OPENBLAS_CONST float *alpha, OPENBLAS_CONST float *X, OPENBLAS_CONST blasint incX, OPENBLAS_CONST float *Y, OPENBLAS_CONST blasint incY, float *A, OPENBLAS_CONST blasint lda); -void cblas_zgeru(OPENBLAS_CONST enum CBLAS_ORDER order, OPENBLAS_CONST blasint M, OPENBLAS_CONST blasint N, OPENBLAS_CONST double *alpha, OPENBLAS_CONST double *X, OPENBLAS_CONST blasint incX, OPENBLAS_CONST double *Y, OPENBLAS_CONST blasint incY, double *A, OPENBLAS_CONST blasint lda); -void cblas_zgerc(OPENBLAS_CONST enum CBLAS_ORDER order, OPENBLAS_CONST blasint M, OPENBLAS_CONST blasint N, OPENBLAS_CONST double *alpha, OPENBLAS_CONST double *X, OPENBLAS_CONST blasint incX, OPENBLAS_CONST double *Y, OPENBLAS_CONST blasint incY, double *A, OPENBLAS_CONST blasint lda); - -void cblas_strsv(OPENBLAS_CONST enum CBLAS_ORDER order, OPENBLAS_CONST enum CBLAS_UPLO Uplo, OPENBLAS_CONST enum CBLAS_TRANSPOSE TransA, OPENBLAS_CONST enum CBLAS_DIAG Diag, OPENBLAS_CONST blasint N, OPENBLAS_CONST float *A, OPENBLAS_CONST blasint lda, float *X, OPENBLAS_CONST blasint incX); -void cblas_dtrsv(OPENBLAS_CONST enum CBLAS_ORDER order, OPENBLAS_CONST enum CBLAS_UPLO Uplo, OPENBLAS_CONST enum CBLAS_TRANSPOSE TransA, OPENBLAS_CONST enum CBLAS_DIAG Diag, OPENBLAS_CONST blasint N, OPENBLAS_CONST double *A, OPENBLAS_CONST blasint lda, double *X, OPENBLAS_CONST blasint incX); -void cblas_ctrsv(OPENBLAS_CONST enum CBLAS_ORDER order, OPENBLAS_CONST enum CBLAS_UPLO Uplo, OPENBLAS_CONST enum CBLAS_TRANSPOSE TransA, OPENBLAS_CONST enum CBLAS_DIAG Diag, OPENBLAS_CONST blasint N, OPENBLAS_CONST float *A, OPENBLAS_CONST blasint lda, float *X, OPENBLAS_CONST blasint incX); -void cblas_ztrsv(OPENBLAS_CONST enum CBLAS_ORDER order, OPENBLAS_CONST enum CBLAS_UPLO Uplo, OPENBLAS_CONST enum CBLAS_TRANSPOSE TransA, OPENBLAS_CONST enum CBLAS_DIAG Diag, OPENBLAS_CONST blasint N, OPENBLAS_CONST double *A, OPENBLAS_CONST blasint lda, double *X, OPENBLAS_CONST blasint incX); - -void cblas_strmv(OPENBLAS_CONST enum CBLAS_ORDER order, OPENBLAS_CONST enum CBLAS_UPLO Uplo, OPENBLAS_CONST enum CBLAS_TRANSPOSE TransA, OPENBLAS_CONST enum CBLAS_DIAG Diag, OPENBLAS_CONST blasint N, OPENBLAS_CONST float *A, OPENBLAS_CONST blasint lda, float *X, OPENBLAS_CONST blasint incX); -void cblas_dtrmv(OPENBLAS_CONST enum CBLAS_ORDER order, OPENBLAS_CONST enum CBLAS_UPLO Uplo, OPENBLAS_CONST enum CBLAS_TRANSPOSE TransA, OPENBLAS_CONST enum CBLAS_DIAG Diag, OPENBLAS_CONST blasint N, OPENBLAS_CONST double *A, OPENBLAS_CONST blasint lda, double *X, OPENBLAS_CONST blasint incX); -void cblas_ctrmv(OPENBLAS_CONST enum CBLAS_ORDER order, OPENBLAS_CONST enum CBLAS_UPLO Uplo, OPENBLAS_CONST enum CBLAS_TRANSPOSE TransA, OPENBLAS_CONST enum CBLAS_DIAG Diag, OPENBLAS_CONST blasint N, OPENBLAS_CONST float *A, OPENBLAS_CONST blasint lda, float *X, OPENBLAS_CONST blasint incX); -void cblas_ztrmv(OPENBLAS_CONST enum CBLAS_ORDER order, OPENBLAS_CONST enum CBLAS_UPLO Uplo, OPENBLAS_CONST enum CBLAS_TRANSPOSE TransA, OPENBLAS_CONST enum CBLAS_DIAG Diag, OPENBLAS_CONST blasint N, OPENBLAS_CONST double *A, OPENBLAS_CONST blasint lda, double *X, OPENBLAS_CONST blasint incX); - -void cblas_ssyr(OPENBLAS_CONST enum CBLAS_ORDER order, OPENBLAS_CONST enum CBLAS_UPLO Uplo, OPENBLAS_CONST blasint N, OPENBLAS_CONST float alpha, OPENBLAS_CONST float *X, OPENBLAS_CONST blasint incX, float *A, OPENBLAS_CONST blasint lda); -void cblas_dsyr(OPENBLAS_CONST enum CBLAS_ORDER order, OPENBLAS_CONST enum CBLAS_UPLO Uplo, OPENBLAS_CONST blasint N, OPENBLAS_CONST double alpha, OPENBLAS_CONST double *X, OPENBLAS_CONST blasint incX, double *A, OPENBLAS_CONST blasint lda); -void cblas_cher(OPENBLAS_CONST enum CBLAS_ORDER order, OPENBLAS_CONST enum CBLAS_UPLO Uplo, OPENBLAS_CONST blasint N, OPENBLAS_CONST float alpha, OPENBLAS_CONST float *X, OPENBLAS_CONST blasint incX, float *A, OPENBLAS_CONST blasint lda); -void cblas_zher(OPENBLAS_CONST enum CBLAS_ORDER order, OPENBLAS_CONST enum CBLAS_UPLO Uplo, OPENBLAS_CONST blasint N, OPENBLAS_CONST double alpha, OPENBLAS_CONST double *X, OPENBLAS_CONST blasint incX, double *A, OPENBLAS_CONST blasint lda); - -void cblas_ssyr2(OPENBLAS_CONST enum CBLAS_ORDER order, OPENBLAS_CONST enum CBLAS_UPLO Uplo,OPENBLAS_CONST blasint N, OPENBLAS_CONST float alpha, OPENBLAS_CONST float *X, - OPENBLAS_CONST blasint incX, OPENBLAS_CONST float *Y, OPENBLAS_CONST blasint incY, float *A, OPENBLAS_CONST blasint lda); -void cblas_dsyr2(OPENBLAS_CONST enum CBLAS_ORDER order, OPENBLAS_CONST enum CBLAS_UPLO Uplo, OPENBLAS_CONST blasint N, OPENBLAS_CONST double alpha, OPENBLAS_CONST double *X, - OPENBLAS_CONST blasint incX, OPENBLAS_CONST double *Y, OPENBLAS_CONST blasint incY, double *A, OPENBLAS_CONST blasint lda); -void cblas_cher2(OPENBLAS_CONST enum CBLAS_ORDER order, OPENBLAS_CONST enum CBLAS_UPLO Uplo, OPENBLAS_CONST blasint N, OPENBLAS_CONST float *alpha, OPENBLAS_CONST float *X, OPENBLAS_CONST blasint incX, - OPENBLAS_CONST float *Y, OPENBLAS_CONST blasint incY, float *A, OPENBLAS_CONST blasint lda); -void cblas_zher2(OPENBLAS_CONST enum CBLAS_ORDER order, OPENBLAS_CONST enum CBLAS_UPLO Uplo, OPENBLAS_CONST blasint N, OPENBLAS_CONST double *alpha, OPENBLAS_CONST double *X, OPENBLAS_CONST blasint incX, - OPENBLAS_CONST double *Y, OPENBLAS_CONST blasint incY, double *A, OPENBLAS_CONST blasint lda); - -void cblas_sgbmv(OPENBLAS_CONST enum CBLAS_ORDER order, OPENBLAS_CONST enum CBLAS_TRANSPOSE TransA, OPENBLAS_CONST blasint M, OPENBLAS_CONST blasint N, - OPENBLAS_CONST blasint KL, OPENBLAS_CONST blasint KU, OPENBLAS_CONST float alpha, OPENBLAS_CONST float *A, OPENBLAS_CONST blasint lda, OPENBLAS_CONST float *X, OPENBLAS_CONST blasint incX, OPENBLAS_CONST float beta, float *Y, OPENBLAS_CONST blasint incY); -void cblas_dgbmv(OPENBLAS_CONST enum CBLAS_ORDER order, OPENBLAS_CONST enum CBLAS_TRANSPOSE TransA, OPENBLAS_CONST blasint M, OPENBLAS_CONST blasint N, - OPENBLAS_CONST blasint KL, OPENBLAS_CONST blasint KU, OPENBLAS_CONST double alpha, OPENBLAS_CONST double *A, OPENBLAS_CONST blasint lda, OPENBLAS_CONST double *X, OPENBLAS_CONST blasint incX, OPENBLAS_CONST double beta, double *Y, OPENBLAS_CONST blasint incY); -void cblas_cgbmv(OPENBLAS_CONST enum CBLAS_ORDER order, OPENBLAS_CONST enum CBLAS_TRANSPOSE TransA, OPENBLAS_CONST blasint M, OPENBLAS_CONST blasint N, - OPENBLAS_CONST blasint KL, OPENBLAS_CONST blasint KU, OPENBLAS_CONST float *alpha, OPENBLAS_CONST float *A, OPENBLAS_CONST blasint lda, OPENBLAS_CONST float *X, OPENBLAS_CONST blasint incX, OPENBLAS_CONST float *beta, float *Y, OPENBLAS_CONST blasint incY); -void cblas_zgbmv(OPENBLAS_CONST enum CBLAS_ORDER order, OPENBLAS_CONST enum CBLAS_TRANSPOSE TransA, OPENBLAS_CONST blasint M, OPENBLAS_CONST blasint N, - OPENBLAS_CONST blasint KL, OPENBLAS_CONST blasint KU, OPENBLAS_CONST double *alpha, OPENBLAS_CONST double *A, OPENBLAS_CONST blasint lda, OPENBLAS_CONST double *X, OPENBLAS_CONST blasint incX, OPENBLAS_CONST double *beta, double *Y, OPENBLAS_CONST blasint incY); - -void cblas_ssbmv(OPENBLAS_CONST enum CBLAS_ORDER order, OPENBLAS_CONST enum CBLAS_UPLO Uplo, OPENBLAS_CONST blasint N, OPENBLAS_CONST blasint K, OPENBLAS_CONST float alpha, OPENBLAS_CONST float *A, - OPENBLAS_CONST blasint lda, OPENBLAS_CONST float *X, OPENBLAS_CONST blasint incX, OPENBLAS_CONST float beta, float *Y, OPENBLAS_CONST blasint incY); -void cblas_dsbmv(OPENBLAS_CONST enum CBLAS_ORDER order, OPENBLAS_CONST enum CBLAS_UPLO Uplo, OPENBLAS_CONST blasint N, OPENBLAS_CONST blasint K, OPENBLAS_CONST double alpha, OPENBLAS_CONST double *A, - OPENBLAS_CONST blasint lda, OPENBLAS_CONST double *X, OPENBLAS_CONST blasint incX, OPENBLAS_CONST double beta, double *Y, OPENBLAS_CONST blasint incY); - - -void cblas_stbmv(OPENBLAS_CONST enum CBLAS_ORDER order, OPENBLAS_CONST enum CBLAS_UPLO Uplo, OPENBLAS_CONST enum CBLAS_TRANSPOSE TransA, OPENBLAS_CONST enum CBLAS_DIAG Diag, - OPENBLAS_CONST blasint N, OPENBLAS_CONST blasint K, OPENBLAS_CONST float *A, OPENBLAS_CONST blasint lda, float *X, OPENBLAS_CONST blasint incX); -void cblas_dtbmv(OPENBLAS_CONST enum CBLAS_ORDER order, OPENBLAS_CONST enum CBLAS_UPLO Uplo, OPENBLAS_CONST enum CBLAS_TRANSPOSE TransA, OPENBLAS_CONST enum CBLAS_DIAG Diag, - OPENBLAS_CONST blasint N, OPENBLAS_CONST blasint K, OPENBLAS_CONST double *A, OPENBLAS_CONST blasint lda, double *X, OPENBLAS_CONST blasint incX); -void cblas_ctbmv(OPENBLAS_CONST enum CBLAS_ORDER order, OPENBLAS_CONST enum CBLAS_UPLO Uplo, OPENBLAS_CONST enum CBLAS_TRANSPOSE TransA, OPENBLAS_CONST enum CBLAS_DIAG Diag, - OPENBLAS_CONST blasint N, OPENBLAS_CONST blasint K, OPENBLAS_CONST float *A, OPENBLAS_CONST blasint lda, float *X, OPENBLAS_CONST blasint incX); -void cblas_ztbmv(OPENBLAS_CONST enum CBLAS_ORDER order, OPENBLAS_CONST enum CBLAS_UPLO Uplo, OPENBLAS_CONST enum CBLAS_TRANSPOSE TransA, OPENBLAS_CONST enum CBLAS_DIAG Diag, - OPENBLAS_CONST blasint N, OPENBLAS_CONST blasint K, OPENBLAS_CONST double *A, OPENBLAS_CONST blasint lda, double *X, OPENBLAS_CONST blasint incX); - -void cblas_stbsv(OPENBLAS_CONST enum CBLAS_ORDER order, OPENBLAS_CONST enum CBLAS_UPLO Uplo, OPENBLAS_CONST enum CBLAS_TRANSPOSE TransA, OPENBLAS_CONST enum CBLAS_DIAG Diag, - OPENBLAS_CONST blasint N, OPENBLAS_CONST blasint K, OPENBLAS_CONST float *A, OPENBLAS_CONST blasint lda, float *X, OPENBLAS_CONST blasint incX); -void cblas_dtbsv(OPENBLAS_CONST enum CBLAS_ORDER order, OPENBLAS_CONST enum CBLAS_UPLO Uplo, OPENBLAS_CONST enum CBLAS_TRANSPOSE TransA, OPENBLAS_CONST enum CBLAS_DIAG Diag, - OPENBLAS_CONST blasint N, OPENBLAS_CONST blasint K, OPENBLAS_CONST double *A, OPENBLAS_CONST blasint lda, double *X, OPENBLAS_CONST blasint incX); -void cblas_ctbsv(OPENBLAS_CONST enum CBLAS_ORDER order, OPENBLAS_CONST enum CBLAS_UPLO Uplo, OPENBLAS_CONST enum CBLAS_TRANSPOSE TransA, OPENBLAS_CONST enum CBLAS_DIAG Diag, - OPENBLAS_CONST blasint N, OPENBLAS_CONST blasint K, OPENBLAS_CONST float *A, OPENBLAS_CONST blasint lda, float *X, OPENBLAS_CONST blasint incX); -void cblas_ztbsv(OPENBLAS_CONST enum CBLAS_ORDER order, OPENBLAS_CONST enum CBLAS_UPLO Uplo, OPENBLAS_CONST enum CBLAS_TRANSPOSE TransA, OPENBLAS_CONST enum CBLAS_DIAG Diag, - OPENBLAS_CONST blasint N, OPENBLAS_CONST blasint K, OPENBLAS_CONST double *A, OPENBLAS_CONST blasint lda, double *X, OPENBLAS_CONST blasint incX); - -void cblas_stpmv(OPENBLAS_CONST enum CBLAS_ORDER order, OPENBLAS_CONST enum CBLAS_UPLO Uplo, OPENBLAS_CONST enum CBLAS_TRANSPOSE TransA, OPENBLAS_CONST enum CBLAS_DIAG Diag, - OPENBLAS_CONST blasint N, OPENBLAS_CONST float *Ap, float *X, OPENBLAS_CONST blasint incX); -void cblas_dtpmv(OPENBLAS_CONST enum CBLAS_ORDER order, OPENBLAS_CONST enum CBLAS_UPLO Uplo, OPENBLAS_CONST enum CBLAS_TRANSPOSE TransA, OPENBLAS_CONST enum CBLAS_DIAG Diag, - OPENBLAS_CONST blasint N, OPENBLAS_CONST double *Ap, double *X, OPENBLAS_CONST blasint incX); -void cblas_ctpmv(OPENBLAS_CONST enum CBLAS_ORDER order, OPENBLAS_CONST enum CBLAS_UPLO Uplo, OPENBLAS_CONST enum CBLAS_TRANSPOSE TransA, OPENBLAS_CONST enum CBLAS_DIAG Diag, - OPENBLAS_CONST blasint N, OPENBLAS_CONST float *Ap, float *X, OPENBLAS_CONST blasint incX); -void cblas_ztpmv(OPENBLAS_CONST enum CBLAS_ORDER order, OPENBLAS_CONST enum CBLAS_UPLO Uplo, OPENBLAS_CONST enum CBLAS_TRANSPOSE TransA, OPENBLAS_CONST enum CBLAS_DIAG Diag, - OPENBLAS_CONST blasint N, OPENBLAS_CONST double *Ap, double *X, OPENBLAS_CONST blasint incX); - -void cblas_stpsv(OPENBLAS_CONST enum CBLAS_ORDER order, OPENBLAS_CONST enum CBLAS_UPLO Uplo, OPENBLAS_CONST enum CBLAS_TRANSPOSE TransA, OPENBLAS_CONST enum CBLAS_DIAG Diag, - OPENBLAS_CONST blasint N, OPENBLAS_CONST float *Ap, float *X, OPENBLAS_CONST blasint incX); -void cblas_dtpsv(OPENBLAS_CONST enum CBLAS_ORDER order, OPENBLAS_CONST enum CBLAS_UPLO Uplo, OPENBLAS_CONST enum CBLAS_TRANSPOSE TransA, OPENBLAS_CONST enum CBLAS_DIAG Diag, - OPENBLAS_CONST blasint N, OPENBLAS_CONST double *Ap, double *X, OPENBLAS_CONST blasint incX); -void cblas_ctpsv(OPENBLAS_CONST enum CBLAS_ORDER order, OPENBLAS_CONST enum CBLAS_UPLO Uplo, OPENBLAS_CONST enum CBLAS_TRANSPOSE TransA, OPENBLAS_CONST enum CBLAS_DIAG Diag, - OPENBLAS_CONST blasint N, OPENBLAS_CONST float *Ap, float *X, OPENBLAS_CONST blasint incX); -void cblas_ztpsv(OPENBLAS_CONST enum CBLAS_ORDER order, OPENBLAS_CONST enum CBLAS_UPLO Uplo, OPENBLAS_CONST enum CBLAS_TRANSPOSE TransA, OPENBLAS_CONST enum CBLAS_DIAG Diag, - OPENBLAS_CONST blasint N, OPENBLAS_CONST double *Ap, double *X, OPENBLAS_CONST blasint incX); - -void cblas_ssymv(OPENBLAS_CONST enum CBLAS_ORDER order, OPENBLAS_CONST enum CBLAS_UPLO Uplo, OPENBLAS_CONST blasint N, OPENBLAS_CONST float alpha, OPENBLAS_CONST float *A, - OPENBLAS_CONST blasint lda, OPENBLAS_CONST float *X, OPENBLAS_CONST blasint incX, OPENBLAS_CONST float beta, float *Y, OPENBLAS_CONST blasint incY); -void cblas_dsymv(OPENBLAS_CONST enum CBLAS_ORDER order, OPENBLAS_CONST enum CBLAS_UPLO Uplo, OPENBLAS_CONST blasint N, OPENBLAS_CONST double alpha, OPENBLAS_CONST double *A, - OPENBLAS_CONST blasint lda, OPENBLAS_CONST double *X, OPENBLAS_CONST blasint incX, OPENBLAS_CONST double beta, double *Y, OPENBLAS_CONST blasint incY); -void cblas_chemv(OPENBLAS_CONST enum CBLAS_ORDER order, OPENBLAS_CONST enum CBLAS_UPLO Uplo, OPENBLAS_CONST blasint N, OPENBLAS_CONST float *alpha, OPENBLAS_CONST float *A, - OPENBLAS_CONST blasint lda, OPENBLAS_CONST float *X, OPENBLAS_CONST blasint incX, OPENBLAS_CONST float *beta, float *Y, OPENBLAS_CONST blasint incY); -void cblas_zhemv(OPENBLAS_CONST enum CBLAS_ORDER order, OPENBLAS_CONST enum CBLAS_UPLO Uplo, OPENBLAS_CONST blasint N, OPENBLAS_CONST double *alpha, OPENBLAS_CONST double *A, - OPENBLAS_CONST blasint lda, OPENBLAS_CONST double *X, OPENBLAS_CONST blasint incX, OPENBLAS_CONST double *beta, double *Y, OPENBLAS_CONST blasint incY); - - -void cblas_sspmv(OPENBLAS_CONST enum CBLAS_ORDER order, OPENBLAS_CONST enum CBLAS_UPLO Uplo, OPENBLAS_CONST blasint N, OPENBLAS_CONST float alpha, OPENBLAS_CONST float *Ap, - OPENBLAS_CONST float *X, OPENBLAS_CONST blasint incX, OPENBLAS_CONST float beta, float *Y, OPENBLAS_CONST blasint incY); -void cblas_dspmv(OPENBLAS_CONST enum CBLAS_ORDER order, OPENBLAS_CONST enum CBLAS_UPLO Uplo, OPENBLAS_CONST blasint N, OPENBLAS_CONST double alpha, OPENBLAS_CONST double *Ap, - OPENBLAS_CONST double *X, OPENBLAS_CONST blasint incX, OPENBLAS_CONST double beta, double *Y, OPENBLAS_CONST blasint incY); - -void cblas_sspr(OPENBLAS_CONST enum CBLAS_ORDER order, OPENBLAS_CONST enum CBLAS_UPLO Uplo, OPENBLAS_CONST blasint N, OPENBLAS_CONST float alpha, OPENBLAS_CONST float *X, OPENBLAS_CONST blasint incX, float *Ap); -void cblas_dspr(OPENBLAS_CONST enum CBLAS_ORDER order, OPENBLAS_CONST enum CBLAS_UPLO Uplo, OPENBLAS_CONST blasint N, OPENBLAS_CONST double alpha, OPENBLAS_CONST double *X, OPENBLAS_CONST blasint incX, double *Ap); - -void cblas_chpr(OPENBLAS_CONST enum CBLAS_ORDER order, OPENBLAS_CONST enum CBLAS_UPLO Uplo, OPENBLAS_CONST blasint N, OPENBLAS_CONST float alpha, OPENBLAS_CONST float *X, OPENBLAS_CONST blasint incX, float *A); -void cblas_zhpr(OPENBLAS_CONST enum CBLAS_ORDER order, OPENBLAS_CONST enum CBLAS_UPLO Uplo, OPENBLAS_CONST blasint N, OPENBLAS_CONST double alpha, OPENBLAS_CONST double *X,OPENBLAS_CONST blasint incX, double *A); - -void cblas_sspr2(OPENBLAS_CONST enum CBLAS_ORDER order, OPENBLAS_CONST enum CBLAS_UPLO Uplo, OPENBLAS_CONST blasint N, OPENBLAS_CONST float alpha, OPENBLAS_CONST float *X, OPENBLAS_CONST blasint incX, OPENBLAS_CONST float *Y, OPENBLAS_CONST blasint incY, float *A); -void cblas_dspr2(OPENBLAS_CONST enum CBLAS_ORDER order, OPENBLAS_CONST enum CBLAS_UPLO Uplo, OPENBLAS_CONST blasint N, OPENBLAS_CONST double alpha, OPENBLAS_CONST double *X, OPENBLAS_CONST blasint incX, OPENBLAS_CONST double *Y, OPENBLAS_CONST blasint incY, double *A); -void cblas_chpr2(OPENBLAS_CONST enum CBLAS_ORDER order, OPENBLAS_CONST enum CBLAS_UPLO Uplo, OPENBLAS_CONST blasint N, OPENBLAS_CONST float *alpha, OPENBLAS_CONST float *X, OPENBLAS_CONST blasint incX, OPENBLAS_CONST float *Y, OPENBLAS_CONST blasint incY, float *Ap); -void cblas_zhpr2(OPENBLAS_CONST enum CBLAS_ORDER order, OPENBLAS_CONST enum CBLAS_UPLO Uplo, OPENBLAS_CONST blasint N, OPENBLAS_CONST double *alpha, OPENBLAS_CONST double *X, OPENBLAS_CONST blasint incX, OPENBLAS_CONST double *Y, OPENBLAS_CONST blasint incY, double *Ap); - -void cblas_chbmv(OPENBLAS_CONST enum CBLAS_ORDER order, OPENBLAS_CONST enum CBLAS_UPLO Uplo, OPENBLAS_CONST blasint N, OPENBLAS_CONST blasint K, - OPENBLAS_CONST float *alpha, OPENBLAS_CONST float *A, OPENBLAS_CONST blasint lda, OPENBLAS_CONST float *X, OPENBLAS_CONST blasint incX, OPENBLAS_CONST float *beta, float *Y, OPENBLAS_CONST blasint incY); -void cblas_zhbmv(OPENBLAS_CONST enum CBLAS_ORDER order, OPENBLAS_CONST enum CBLAS_UPLO Uplo, OPENBLAS_CONST blasint N, OPENBLAS_CONST blasint K, - OPENBLAS_CONST double *alpha, OPENBLAS_CONST double *A, OPENBLAS_CONST blasint lda, OPENBLAS_CONST double *X, OPENBLAS_CONST blasint incX, OPENBLAS_CONST double *beta, double *Y, OPENBLAS_CONST blasint incY); - -void cblas_chpmv(OPENBLAS_CONST enum CBLAS_ORDER order, OPENBLAS_CONST enum CBLAS_UPLO Uplo, OPENBLAS_CONST blasint N, - OPENBLAS_CONST float *alpha, OPENBLAS_CONST float *Ap, OPENBLAS_CONST float *X, OPENBLAS_CONST blasint incX, OPENBLAS_CONST float *beta, float *Y, OPENBLAS_CONST blasint incY); -void cblas_zhpmv(OPENBLAS_CONST enum CBLAS_ORDER order, OPENBLAS_CONST enum CBLAS_UPLO Uplo, OPENBLAS_CONST blasint N, - OPENBLAS_CONST double *alpha, OPENBLAS_CONST double *Ap, OPENBLAS_CONST double *X, OPENBLAS_CONST blasint incX, OPENBLAS_CONST double *beta, double *Y, OPENBLAS_CONST blasint incY); - -void cblas_sgemm(OPENBLAS_CONST enum CBLAS_ORDER Order, OPENBLAS_CONST enum CBLAS_TRANSPOSE TransA, OPENBLAS_CONST enum CBLAS_TRANSPOSE TransB, OPENBLAS_CONST blasint M, OPENBLAS_CONST blasint N, OPENBLAS_CONST blasint K, - OPENBLAS_CONST float alpha, OPENBLAS_CONST float *A, OPENBLAS_CONST blasint lda, OPENBLAS_CONST float *B, OPENBLAS_CONST blasint ldb, OPENBLAS_CONST float beta, float *C, OPENBLAS_CONST blasint ldc); -void cblas_dgemm(OPENBLAS_CONST enum CBLAS_ORDER Order, OPENBLAS_CONST enum CBLAS_TRANSPOSE TransA, OPENBLAS_CONST enum CBLAS_TRANSPOSE TransB, OPENBLAS_CONST blasint M, OPENBLAS_CONST blasint N, OPENBLAS_CONST blasint K, - OPENBLAS_CONST double alpha, OPENBLAS_CONST double *A, OPENBLAS_CONST blasint lda, OPENBLAS_CONST double *B, OPENBLAS_CONST blasint ldb, OPENBLAS_CONST double beta, double *C, OPENBLAS_CONST blasint ldc); -void cblas_cgemm(OPENBLAS_CONST enum CBLAS_ORDER Order, OPENBLAS_CONST enum CBLAS_TRANSPOSE TransA, OPENBLAS_CONST enum CBLAS_TRANSPOSE TransB, OPENBLAS_CONST blasint M, OPENBLAS_CONST blasint N, OPENBLAS_CONST blasint K, - OPENBLAS_CONST float *alpha, OPENBLAS_CONST float *A, OPENBLAS_CONST blasint lda, OPENBLAS_CONST float *B, OPENBLAS_CONST blasint ldb, OPENBLAS_CONST float *beta, float *C, OPENBLAS_CONST blasint ldc); -void cblas_cgemm3m(OPENBLAS_CONST enum CBLAS_ORDER Order, OPENBLAS_CONST enum CBLAS_TRANSPOSE TransA, OPENBLAS_CONST enum CBLAS_TRANSPOSE TransB, OPENBLAS_CONST blasint M, OPENBLAS_CONST blasint N, OPENBLAS_CONST blasint K, - OPENBLAS_CONST float *alpha, OPENBLAS_CONST float *A, OPENBLAS_CONST blasint lda, OPENBLAS_CONST float *B, OPENBLAS_CONST blasint ldb, OPENBLAS_CONST float *beta, float *C, OPENBLAS_CONST blasint ldc); -void cblas_zgemm(OPENBLAS_CONST enum CBLAS_ORDER Order, OPENBLAS_CONST enum CBLAS_TRANSPOSE TransA, OPENBLAS_CONST enum CBLAS_TRANSPOSE TransB, OPENBLAS_CONST blasint M, OPENBLAS_CONST blasint N, OPENBLAS_CONST blasint K, - OPENBLAS_CONST double *alpha, OPENBLAS_CONST double *A, OPENBLAS_CONST blasint lda, OPENBLAS_CONST double *B, OPENBLAS_CONST blasint ldb, OPENBLAS_CONST double *beta, double *C, OPENBLAS_CONST blasint ldc); -void cblas_zgemm3m(OPENBLAS_CONST enum CBLAS_ORDER Order, OPENBLAS_CONST enum CBLAS_TRANSPOSE TransA, OPENBLAS_CONST enum CBLAS_TRANSPOSE TransB, OPENBLAS_CONST blasint M, OPENBLAS_CONST blasint N, OPENBLAS_CONST blasint K, - OPENBLAS_CONST double *alpha, OPENBLAS_CONST double *A, OPENBLAS_CONST blasint lda, OPENBLAS_CONST double *B, OPENBLAS_CONST blasint ldb, OPENBLAS_CONST double *beta, double *C, OPENBLAS_CONST blasint ldc); - - -void cblas_ssymm(OPENBLAS_CONST enum CBLAS_ORDER Order, OPENBLAS_CONST enum CBLAS_SIDE Side, OPENBLAS_CONST enum CBLAS_UPLO Uplo, OPENBLAS_CONST blasint M, OPENBLAS_CONST blasint N, - OPENBLAS_CONST float alpha, OPENBLAS_CONST float *A, OPENBLAS_CONST blasint lda, OPENBLAS_CONST float *B, OPENBLAS_CONST blasint ldb, OPENBLAS_CONST float beta, float *C, OPENBLAS_CONST blasint ldc); -void cblas_dsymm(OPENBLAS_CONST enum CBLAS_ORDER Order, OPENBLAS_CONST enum CBLAS_SIDE Side, OPENBLAS_CONST enum CBLAS_UPLO Uplo, OPENBLAS_CONST blasint M, OPENBLAS_CONST blasint N, - OPENBLAS_CONST double alpha, OPENBLAS_CONST double *A, OPENBLAS_CONST blasint lda, OPENBLAS_CONST double *B, OPENBLAS_CONST blasint ldb, OPENBLAS_CONST double beta, double *C, OPENBLAS_CONST blasint ldc); -void cblas_csymm(OPENBLAS_CONST enum CBLAS_ORDER Order, OPENBLAS_CONST enum CBLAS_SIDE Side, OPENBLAS_CONST enum CBLAS_UPLO Uplo, OPENBLAS_CONST blasint M, OPENBLAS_CONST blasint N, - OPENBLAS_CONST float *alpha, OPENBLAS_CONST float *A, OPENBLAS_CONST blasint lda, OPENBLAS_CONST float *B, OPENBLAS_CONST blasint ldb, OPENBLAS_CONST float *beta, float *C, OPENBLAS_CONST blasint ldc); -void cblas_zsymm(OPENBLAS_CONST enum CBLAS_ORDER Order, OPENBLAS_CONST enum CBLAS_SIDE Side, OPENBLAS_CONST enum CBLAS_UPLO Uplo, OPENBLAS_CONST blasint M, OPENBLAS_CONST blasint N, - OPENBLAS_CONST double *alpha, OPENBLAS_CONST double *A, OPENBLAS_CONST blasint lda, OPENBLAS_CONST double *B, OPENBLAS_CONST blasint ldb, OPENBLAS_CONST double *beta, double *C, OPENBLAS_CONST blasint ldc); - -void cblas_ssyrk(OPENBLAS_CONST enum CBLAS_ORDER Order, OPENBLAS_CONST enum CBLAS_UPLO Uplo, OPENBLAS_CONST enum CBLAS_TRANSPOSE Trans, - OPENBLAS_CONST blasint N, OPENBLAS_CONST blasint K, OPENBLAS_CONST float alpha, OPENBLAS_CONST float *A, OPENBLAS_CONST blasint lda, OPENBLAS_CONST float beta, float *C, OPENBLAS_CONST blasint ldc); -void cblas_dsyrk(OPENBLAS_CONST enum CBLAS_ORDER Order, OPENBLAS_CONST enum CBLAS_UPLO Uplo, OPENBLAS_CONST enum CBLAS_TRANSPOSE Trans, - OPENBLAS_CONST blasint N, OPENBLAS_CONST blasint K, OPENBLAS_CONST double alpha, OPENBLAS_CONST double *A, OPENBLAS_CONST blasint lda, OPENBLAS_CONST double beta, double *C, OPENBLAS_CONST blasint ldc); -void cblas_csyrk(OPENBLAS_CONST enum CBLAS_ORDER Order, OPENBLAS_CONST enum CBLAS_UPLO Uplo, OPENBLAS_CONST enum CBLAS_TRANSPOSE Trans, - OPENBLAS_CONST blasint N, OPENBLAS_CONST blasint K, OPENBLAS_CONST float *alpha, OPENBLAS_CONST float *A, OPENBLAS_CONST blasint lda, OPENBLAS_CONST float *beta, float *C, OPENBLAS_CONST blasint ldc); -void cblas_zsyrk(OPENBLAS_CONST enum CBLAS_ORDER Order, OPENBLAS_CONST enum CBLAS_UPLO Uplo, OPENBLAS_CONST enum CBLAS_TRANSPOSE Trans, - OPENBLAS_CONST blasint N, OPENBLAS_CONST blasint K, OPENBLAS_CONST double *alpha, OPENBLAS_CONST double *A, OPENBLAS_CONST blasint lda, OPENBLAS_CONST double *beta, double *C, OPENBLAS_CONST blasint ldc); - -void cblas_ssyr2k(OPENBLAS_CONST enum CBLAS_ORDER Order, OPENBLAS_CONST enum CBLAS_UPLO Uplo, OPENBLAS_CONST enum CBLAS_TRANSPOSE Trans, - OPENBLAS_CONST blasint N, OPENBLAS_CONST blasint K, OPENBLAS_CONST float alpha, OPENBLAS_CONST float *A, OPENBLAS_CONST blasint lda, OPENBLAS_CONST float *B, OPENBLAS_CONST blasint ldb, OPENBLAS_CONST float beta, float *C, OPENBLAS_CONST blasint ldc); -void cblas_dsyr2k(OPENBLAS_CONST enum CBLAS_ORDER Order, OPENBLAS_CONST enum CBLAS_UPLO Uplo, OPENBLAS_CONST enum CBLAS_TRANSPOSE Trans, - OPENBLAS_CONST blasint N, OPENBLAS_CONST blasint K, OPENBLAS_CONST double alpha, OPENBLAS_CONST double *A, OPENBLAS_CONST blasint lda, OPENBLAS_CONST double *B, OPENBLAS_CONST blasint ldb, OPENBLAS_CONST double beta, double *C, OPENBLAS_CONST blasint ldc); -void cblas_csyr2k(OPENBLAS_CONST enum CBLAS_ORDER Order, OPENBLAS_CONST enum CBLAS_UPLO Uplo, OPENBLAS_CONST enum CBLAS_TRANSPOSE Trans, - OPENBLAS_CONST blasint N, OPENBLAS_CONST blasint K, OPENBLAS_CONST float *alpha, OPENBLAS_CONST float *A, OPENBLAS_CONST blasint lda, OPENBLAS_CONST float *B, OPENBLAS_CONST blasint ldb, OPENBLAS_CONST float *beta, float *C, OPENBLAS_CONST blasint ldc); -void cblas_zsyr2k(OPENBLAS_CONST enum CBLAS_ORDER Order, OPENBLAS_CONST enum CBLAS_UPLO Uplo, OPENBLAS_CONST enum CBLAS_TRANSPOSE Trans, - OPENBLAS_CONST blasint N, OPENBLAS_CONST blasint K, OPENBLAS_CONST double *alpha, OPENBLAS_CONST double *A, OPENBLAS_CONST blasint lda, OPENBLAS_CONST double *B, OPENBLAS_CONST blasint ldb, OPENBLAS_CONST double *beta, double *C, OPENBLAS_CONST blasint ldc); - -void cblas_strmm(OPENBLAS_CONST enum CBLAS_ORDER Order, OPENBLAS_CONST enum CBLAS_SIDE Side, OPENBLAS_CONST enum CBLAS_UPLO Uplo, OPENBLAS_CONST enum CBLAS_TRANSPOSE TransA, - OPENBLAS_CONST enum CBLAS_DIAG Diag, OPENBLAS_CONST blasint M, OPENBLAS_CONST blasint N, OPENBLAS_CONST float alpha, OPENBLAS_CONST float *A, OPENBLAS_CONST blasint lda, float *B, OPENBLAS_CONST blasint ldb); -void cblas_dtrmm(OPENBLAS_CONST enum CBLAS_ORDER Order, OPENBLAS_CONST enum CBLAS_SIDE Side, OPENBLAS_CONST enum CBLAS_UPLO Uplo, OPENBLAS_CONST enum CBLAS_TRANSPOSE TransA, - OPENBLAS_CONST enum CBLAS_DIAG Diag, OPENBLAS_CONST blasint M, OPENBLAS_CONST blasint N, OPENBLAS_CONST double alpha, OPENBLAS_CONST double *A, OPENBLAS_CONST blasint lda, double *B, OPENBLAS_CONST blasint ldb); -void cblas_ctrmm(OPENBLAS_CONST enum CBLAS_ORDER Order, OPENBLAS_CONST enum CBLAS_SIDE Side, OPENBLAS_CONST enum CBLAS_UPLO Uplo, OPENBLAS_CONST enum CBLAS_TRANSPOSE TransA, - OPENBLAS_CONST enum CBLAS_DIAG Diag, OPENBLAS_CONST blasint M, OPENBLAS_CONST blasint N, OPENBLAS_CONST float *alpha, OPENBLAS_CONST float *A, OPENBLAS_CONST blasint lda, float *B, OPENBLAS_CONST blasint ldb); -void cblas_ztrmm(OPENBLAS_CONST enum CBLAS_ORDER Order, OPENBLAS_CONST enum CBLAS_SIDE Side, OPENBLAS_CONST enum CBLAS_UPLO Uplo, OPENBLAS_CONST enum CBLAS_TRANSPOSE TransA, - OPENBLAS_CONST enum CBLAS_DIAG Diag, OPENBLAS_CONST blasint M, OPENBLAS_CONST blasint N, OPENBLAS_CONST double *alpha, OPENBLAS_CONST double *A, OPENBLAS_CONST blasint lda, double *B, OPENBLAS_CONST blasint ldb); - -void cblas_strsm(OPENBLAS_CONST enum CBLAS_ORDER Order, OPENBLAS_CONST enum CBLAS_SIDE Side, OPENBLAS_CONST enum CBLAS_UPLO Uplo, OPENBLAS_CONST enum CBLAS_TRANSPOSE TransA, - OPENBLAS_CONST enum CBLAS_DIAG Diag, OPENBLAS_CONST blasint M, OPENBLAS_CONST blasint N, OPENBLAS_CONST float alpha, OPENBLAS_CONST float *A, OPENBLAS_CONST blasint lda, float *B, OPENBLAS_CONST blasint ldb); -void cblas_dtrsm(OPENBLAS_CONST enum CBLAS_ORDER Order, OPENBLAS_CONST enum CBLAS_SIDE Side, OPENBLAS_CONST enum CBLAS_UPLO Uplo, OPENBLAS_CONST enum CBLAS_TRANSPOSE TransA, - OPENBLAS_CONST enum CBLAS_DIAG Diag, OPENBLAS_CONST blasint M, OPENBLAS_CONST blasint N, OPENBLAS_CONST double alpha, OPENBLAS_CONST double *A, OPENBLAS_CONST blasint lda, double *B, OPENBLAS_CONST blasint ldb); -void cblas_ctrsm(OPENBLAS_CONST enum CBLAS_ORDER Order, OPENBLAS_CONST enum CBLAS_SIDE Side, OPENBLAS_CONST enum CBLAS_UPLO Uplo, OPENBLAS_CONST enum CBLAS_TRANSPOSE TransA, - OPENBLAS_CONST enum CBLAS_DIAG Diag, OPENBLAS_CONST blasint M, OPENBLAS_CONST blasint N, OPENBLAS_CONST float *alpha, OPENBLAS_CONST float *A, OPENBLAS_CONST blasint lda, float *B, OPENBLAS_CONST blasint ldb); -void cblas_ztrsm(OPENBLAS_CONST enum CBLAS_ORDER Order, OPENBLAS_CONST enum CBLAS_SIDE Side, OPENBLAS_CONST enum CBLAS_UPLO Uplo, OPENBLAS_CONST enum CBLAS_TRANSPOSE TransA, - OPENBLAS_CONST enum CBLAS_DIAG Diag, OPENBLAS_CONST blasint M, OPENBLAS_CONST blasint N, OPENBLAS_CONST double *alpha, OPENBLAS_CONST double *A, OPENBLAS_CONST blasint lda, double *B, OPENBLAS_CONST blasint ldb); - -void cblas_chemm(OPENBLAS_CONST enum CBLAS_ORDER Order, OPENBLAS_CONST enum CBLAS_SIDE Side, OPENBLAS_CONST enum CBLAS_UPLO Uplo, OPENBLAS_CONST blasint M, OPENBLAS_CONST blasint N, - OPENBLAS_CONST float *alpha, OPENBLAS_CONST float *A, OPENBLAS_CONST blasint lda, OPENBLAS_CONST float *B, OPENBLAS_CONST blasint ldb, OPENBLAS_CONST float *beta, float *C, OPENBLAS_CONST blasint ldc); -void cblas_zhemm(OPENBLAS_CONST enum CBLAS_ORDER Order, OPENBLAS_CONST enum CBLAS_SIDE Side, OPENBLAS_CONST enum CBLAS_UPLO Uplo, OPENBLAS_CONST blasint M, OPENBLAS_CONST blasint N, - OPENBLAS_CONST double *alpha, OPENBLAS_CONST double *A, OPENBLAS_CONST blasint lda, OPENBLAS_CONST double *B, OPENBLAS_CONST blasint ldb, OPENBLAS_CONST double *beta, double *C, OPENBLAS_CONST blasint ldc); - -void cblas_cherk(OPENBLAS_CONST enum CBLAS_ORDER Order, OPENBLAS_CONST enum CBLAS_UPLO Uplo, OPENBLAS_CONST enum CBLAS_TRANSPOSE Trans, OPENBLAS_CONST blasint N, OPENBLAS_CONST blasint K, - OPENBLAS_CONST float alpha, OPENBLAS_CONST float *A, OPENBLAS_CONST blasint lda, OPENBLAS_CONST float beta, float *C, OPENBLAS_CONST blasint ldc); -void cblas_zherk(OPENBLAS_CONST enum CBLAS_ORDER Order, OPENBLAS_CONST enum CBLAS_UPLO Uplo, OPENBLAS_CONST enum CBLAS_TRANSPOSE Trans, OPENBLAS_CONST blasint N, OPENBLAS_CONST blasint K, - OPENBLAS_CONST double alpha, OPENBLAS_CONST double *A, OPENBLAS_CONST blasint lda, OPENBLAS_CONST double beta, double *C, OPENBLAS_CONST blasint ldc); - -void cblas_cher2k(OPENBLAS_CONST enum CBLAS_ORDER Order, OPENBLAS_CONST enum CBLAS_UPLO Uplo, OPENBLAS_CONST enum CBLAS_TRANSPOSE Trans, OPENBLAS_CONST blasint N, OPENBLAS_CONST blasint K, - OPENBLAS_CONST float *alpha, OPENBLAS_CONST float *A, OPENBLAS_CONST blasint lda, OPENBLAS_CONST float *B, OPENBLAS_CONST blasint ldb, OPENBLAS_CONST float beta, float *C, OPENBLAS_CONST blasint ldc); -void cblas_zher2k(OPENBLAS_CONST enum CBLAS_ORDER Order, OPENBLAS_CONST enum CBLAS_UPLO Uplo, OPENBLAS_CONST enum CBLAS_TRANSPOSE Trans, OPENBLAS_CONST blasint N, OPENBLAS_CONST blasint K, - OPENBLAS_CONST double *alpha, OPENBLAS_CONST double *A, OPENBLAS_CONST blasint lda, OPENBLAS_CONST double *B, OPENBLAS_CONST blasint ldb, OPENBLAS_CONST double beta, double *C, OPENBLAS_CONST blasint ldc); - -void cblas_xerbla(blasint p, char *rout, char *form, ...); - -/*** BLAS extensions ***/ - -void cblas_saxpby(OPENBLAS_CONST blasint n, OPENBLAS_CONST float alpha, OPENBLAS_CONST float *x, OPENBLAS_CONST blasint incx,OPENBLAS_CONST float beta, float *y, OPENBLAS_CONST blasint incy); - -void cblas_daxpby(OPENBLAS_CONST blasint n, OPENBLAS_CONST double alpha, OPENBLAS_CONST double *x, OPENBLAS_CONST blasint incx,OPENBLAS_CONST double beta, double *y, OPENBLAS_CONST blasint incy); - -void cblas_caxpby(OPENBLAS_CONST blasint n, OPENBLAS_CONST float *alpha, OPENBLAS_CONST float *x, OPENBLAS_CONST blasint incx,OPENBLAS_CONST float *beta, float *y, OPENBLAS_CONST blasint incy); - -void cblas_zaxpby(OPENBLAS_CONST blasint n, OPENBLAS_CONST double *alpha, OPENBLAS_CONST double *x, OPENBLAS_CONST blasint incx,OPENBLAS_CONST double *beta, double *y, OPENBLAS_CONST blasint incy); - -void cblas_somatcopy(OPENBLAS_CONST enum CBLAS_ORDER CORDER, OPENBLAS_CONST enum CBLAS_TRANSPOSE CTRANS, OPENBLAS_CONST blasint crows, OPENBLAS_CONST blasint ccols, OPENBLAS_CONST float calpha, OPENBLAS_CONST float *a, - OPENBLAS_CONST blasint clda, float *b, OPENBLAS_CONST blasint cldb); -void cblas_domatcopy(OPENBLAS_CONST enum CBLAS_ORDER CORDER, OPENBLAS_CONST enum CBLAS_TRANSPOSE CTRANS, OPENBLAS_CONST blasint crows, OPENBLAS_CONST blasint ccols, OPENBLAS_CONST double calpha, OPENBLAS_CONST double *a, - OPENBLAS_CONST blasint clda, double *b, OPENBLAS_CONST blasint cldb); -void cblas_comatcopy(OPENBLAS_CONST enum CBLAS_ORDER CORDER, OPENBLAS_CONST enum CBLAS_TRANSPOSE CTRANS, OPENBLAS_CONST blasint crows, OPENBLAS_CONST blasint ccols, OPENBLAS_CONST float* calpha, OPENBLAS_CONST float* a, - OPENBLAS_CONST blasint clda, float*b, OPENBLAS_CONST blasint cldb); -void cblas_zomatcopy(OPENBLAS_CONST enum CBLAS_ORDER CORDER, OPENBLAS_CONST enum CBLAS_TRANSPOSE CTRANS, OPENBLAS_CONST blasint crows, OPENBLAS_CONST blasint ccols, OPENBLAS_CONST double* calpha, OPENBLAS_CONST double* a, - OPENBLAS_CONST blasint clda, double *b, OPENBLAS_CONST blasint cldb); - -void cblas_simatcopy(OPENBLAS_CONST enum CBLAS_ORDER CORDER, OPENBLAS_CONST enum CBLAS_TRANSPOSE CTRANS, OPENBLAS_CONST blasint crows, OPENBLAS_CONST blasint ccols, OPENBLAS_CONST float calpha, float *a, - OPENBLAS_CONST blasint clda, OPENBLAS_CONST blasint cldb); -void cblas_dimatcopy(OPENBLAS_CONST enum CBLAS_ORDER CORDER, OPENBLAS_CONST enum CBLAS_TRANSPOSE CTRANS, OPENBLAS_CONST blasint crows, OPENBLAS_CONST blasint ccols, OPENBLAS_CONST double calpha, double *a, - OPENBLAS_CONST blasint clda, OPENBLAS_CONST blasint cldb); -void cblas_cimatcopy(OPENBLAS_CONST enum CBLAS_ORDER CORDER, OPENBLAS_CONST enum CBLAS_TRANSPOSE CTRANS, OPENBLAS_CONST blasint crows, OPENBLAS_CONST blasint ccols, OPENBLAS_CONST float* calpha, float* a, - OPENBLAS_CONST blasint clda, OPENBLAS_CONST blasint cldb); -void cblas_zimatcopy(OPENBLAS_CONST enum CBLAS_ORDER CORDER, OPENBLAS_CONST enum CBLAS_TRANSPOSE CTRANS, OPENBLAS_CONST blasint crows, OPENBLAS_CONST blasint ccols, OPENBLAS_CONST double* calpha, double* a, - OPENBLAS_CONST blasint clda, OPENBLAS_CONST blasint cldb); - -void cblas_sgeadd(OPENBLAS_CONST enum CBLAS_ORDER CORDER,OPENBLAS_CONST blasint crows, OPENBLAS_CONST blasint ccols, OPENBLAS_CONST float calpha, float *a, OPENBLAS_CONST blasint clda, OPENBLAS_CONST float cbeta, - float *c, OPENBLAS_CONST blasint cldc); -void cblas_dgeadd(OPENBLAS_CONST enum CBLAS_ORDER CORDER,OPENBLAS_CONST blasint crows, OPENBLAS_CONST blasint ccols, OPENBLAS_CONST double calpha, double *a, OPENBLAS_CONST blasint clda, OPENBLAS_CONST double cbeta, - double *c, OPENBLAS_CONST blasint cldc); -void cblas_cgeadd(OPENBLAS_CONST enum CBLAS_ORDER CORDER,OPENBLAS_CONST blasint crows, OPENBLAS_CONST blasint ccols, OPENBLAS_CONST float *calpha, float *a, OPENBLAS_CONST blasint clda, OPENBLAS_CONST float *cbeta, - float *c, OPENBLAS_CONST blasint cldc); -void cblas_zgeadd(OPENBLAS_CONST enum CBLAS_ORDER CORDER,OPENBLAS_CONST blasint crows, OPENBLAS_CONST blasint ccols, OPENBLAS_CONST double *calpha, double *a, OPENBLAS_CONST blasint clda, OPENBLAS_CONST double *cbeta, - double *c, OPENBLAS_CONST blasint cldc); - - -#ifdef __cplusplus -} -#endif /* __cplusplus */ - -#endif diff --git a/julia/docs/.gitignore b/julia/docs/.gitignore deleted file mode 100644 index 8a6e014fbbce..000000000000 --- a/julia/docs/.gitignore +++ /dev/null @@ -1,6 +0,0 @@ -Manifest.toml - -_build -build/ -site/ -venv/ diff --git a/julia/docs/Makefile b/julia/docs/Makefile deleted file mode 100644 index 337a28615e6e..000000000000 --- a/julia/docs/Makefile +++ /dev/null @@ -1,38 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -all: - julia --color=yes --project=./ -e \ - 'using Pkg; \ - Pkg.develop(PackageSpec(name="MXNet", path = joinpath(pwd(), "..")))' - julia --color=yes --project=./ ./make.jl - pip install --user Markdown==3.1 \ - mkdocs==1.0.4 \ - mkdocs-material==4.6.0 \ - pygments==2.5.2 \ - pymdown-extensions==6.2.1 \ - python-markdown-math==0.6 - export LC_ALL="C.UTF-8" - export LANG="C.UTF-8" - echo $(LC_ALL) - echo $(LANG) - LC_ALL="C.UTF-8" ~/.local/bin/mkdocs build - -clean: - rm -rvf venv - rm -rvf build - rm -rvf site diff --git a/julia/docs/Project.toml b/julia/docs/Project.toml deleted file mode 100644 index 023a222beba6..000000000000 --- a/julia/docs/Project.toml +++ /dev/null @@ -1,7 +0,0 @@ -[deps] -Documenter = "e30172f5-a6a5-5a46-863b-614d45cd2de4" -DocumenterMarkdown = "997ab1e6-3595-5248-9280-8efb232c3433" -MXNet = "a7949054-b901-59c6-b8e3-7238c29bf7f0" - -[compat] -Documenter = "~0.23" diff --git a/julia/docs/make.jl b/julia/docs/make.jl deleted file mode 100644 index 3ea9b07d1056..000000000000 --- a/julia/docs/make.jl +++ /dev/null @@ -1,60 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -using Documenter -using DocumenterMarkdown -using MXNet - -""" -Return all files of a submodule - -julia> listpages("ndarray") -15-element Array{String,1}: - "ndarray.jl" - "ndarray/activation.jl" - "ndarray/arithmetic.jl" - "ndarray/array.jl" - ... - "ndarray/statistic.jl" - "ndarray/trig.jl" - "ndarray/type.jl" -""" -listpages(x) = - ["$x.jl"; joinpath.(x, readdir(joinpath(@__DIR__, "..", "src", x)))] - -const api_pages = [ - "api/context.md", - "api/ndarray.md", - "api/symbolic-node.md", - "api/model.md", - "api/initializers.md", - "api/optimizers.md", - "api/callbacks.md", - "api/metric.md", - "api/io.md", - "api/nn-factory.md", - "api/executor.md", - "api/kvstore.md", - "api/visualize.md", -] - -makedocs( - sitename = "MXNet.jl", - modules = MXNet, - doctest = false, - format = Markdown(), -) diff --git a/julia/docs/mkdocs.yml b/julia/docs/mkdocs.yml deleted file mode 100644 index 6ab34726e7ef..000000000000 --- a/julia/docs/mkdocs.yml +++ /dev/null @@ -1,63 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -site_name: MXNet.jl -repo_url: https://github.com/apache/incubator-mxnet/tree/master/julia#mxnet - -theme: material - -extra: - palette: - primary: 'indigo' - accent: 'blue' - -extra_css: - - assets/Documenter.css - -extra_javascript: - - https://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS_HTML - - assets/mathjaxhelper.js - -markdown_extensions: - - extra - - tables - - fenced_code - - mdx_math - - admonition - -docs_dir: 'build' - -nav: - - Home: index.md - - User Guide: - - Installation Guide: user-guide/install.md - - Overview: user-guide/overview.md - - FAQ: user-guide/faq.md - - API Documentation: - - Context: api/context.md - - Models: api/model.md - - Initializers: api/initializer.md - - Optimizers: api/optimizer.md - - Callbacks in training: api/callback.md - - Evaluation Metrics: api/metric.md - - Data Providers: api/io.md - - NDArray API: api/ndarray.md - - Symbolic API: api/symbolic-node.md - - Neural Networks Factory: api/nn-factory.md - - Executor: api/executor.md - - Key-Value Store: api/kvstore.md - - Network Visualization: api/visualize.md diff --git a/julia/docs/src/api.md b/julia/docs/src/api.md deleted file mode 100644 index 04cfadd6d698..000000000000 --- a/julia/docs/src/api.md +++ /dev/null @@ -1,22 +0,0 @@ - - - - - - - - - - - - - - - - - -# API Documentation - -```@contents -Pages = api_pages -``` diff --git a/julia/docs/src/api/callback.md b/julia/docs/src/api/callback.md deleted file mode 100644 index 5a35e5047120..000000000000 --- a/julia/docs/src/api/callback.md +++ /dev/null @@ -1,23 +0,0 @@ - - - - - - - - - - - - - - - - - -# Callback in training - -```@autodocs -Modules = [MXNet.mx] -Pages = ["callback.jl"] -``` diff --git a/julia/docs/src/api/context.md b/julia/docs/src/api/context.md deleted file mode 100644 index 2daabe2db41b..000000000000 --- a/julia/docs/src/api/context.md +++ /dev/null @@ -1,23 +0,0 @@ - - - - - - - - - - - - - - - - - -# Context - -```@autodocs -Modules = [MXNet.mx] -Pages = ["context.jl"] -``` diff --git a/julia/docs/src/api/executor.md b/julia/docs/src/api/executor.md deleted file mode 100644 index c3037dfff60b..000000000000 --- a/julia/docs/src/api/executor.md +++ /dev/null @@ -1,23 +0,0 @@ - - - - - - - - - - - - - - - - - -# Executor - -```@autodocs -Modules = [MXNet.mx] -Pages = ["executor.jl"] -``` diff --git a/julia/docs/src/api/initializer.md b/julia/docs/src/api/initializer.md deleted file mode 100644 index b2515263f93a..000000000000 --- a/julia/docs/src/api/initializer.md +++ /dev/null @@ -1,23 +0,0 @@ - - - - - - - - - - - - - - - - - -# Initializer - -```@autodocs -Modules = [MXNet.mx] -Pages = ["initializer.jl"] -``` diff --git a/julia/docs/src/api/io.md b/julia/docs/src/api/io.md deleted file mode 100644 index 52d172010af4..000000000000 --- a/julia/docs/src/api/io.md +++ /dev/null @@ -1,137 +0,0 @@ - - - - - - - - - - - - - - - - - -# Data Providers - -Data providers are wrappers that load external data, be it images, text, or general tensors, -and split it into mini-batches so that the model can consume the data in a uniformed way. - -## AbstractDataProvider interface - -```@docs -mx.AbstractDataProvider -``` - -The difference between *data* and *label* is that during training stage, -both *data* and *label* will be feeded into the model, while during -prediction stage, only *data* is loaded. Otherwise, they could be anything, with any names, and -of any shapes. The provided data and label names here should match the input names in a target -`SymbolicNode`. - -A data provider should also implement the Julia iteration interface, in order to allow iterating -through the data set. The provider will be called in the following way: - -```julia -for batch in eachbatch(provider) - data = get_data(provider, batch) -end -``` - -which will be translated by Julia compiler into - -```julia -state = Base.start(eachbatch(provider)) -while !Base.done(provider, state) - (batch, state) = Base.next(provider, state) - data = get_data(provider, batch) -end -``` - -By default, `eachbatch` simply returns the provider itself, so the iterator interface -is implemented on the provider type itself. But the extra layer of abstraction allows us to -implement a data provider easily via a Julia `Task` coroutine. See the -data provider defined in [the char-lstm example](/api/julia/docs/api/tutorial/char-lstm/) for an example of using coroutine to define data -providers. - -The detailed interface functions for the iterator API is listed below: - - Base.eltype(provider) -> AbstractDataBatch - -Returns the specific subtype representing a data batch. See `AbstractDataBatch`. -* `provider::AbstractDataProvider`: the data provider. - - Base.start(provider) -> AbstractDataProviderState - -This function is always called before iterating into the dataset. It should initialize -the iterator, reset the index, and do data shuffling if needed. -* `provider::AbstractDataProvider`: the data provider. - - Base.done(provider, state) -> Bool - -True if there is no more data to iterate in this dataset. -* `provider::AbstractDataProvider`: the data provider. -* `state::AbstractDataProviderState`: the state returned by `Base.start` and `Base.next`. - - Base.next(provider) -> (AbstractDataBatch, AbstractDataProviderState) - -Returns the current data batch, and the state for the next iteration. -* `provider::AbstractDataProvider`: the data provider. - -Note sometimes you are wrapping an existing data iterator (e.g. the built-in libmxnet data iterator) that -is built with a different convention. It might be difficult to adapt to the interfaces stated here. In this -case, you can safely assume that - -* `Base.start` will always be called, and called only once before the iteration starts. -* `Base.done` will always be called at the beginning of every iteration and always be called once. -* If `Base.done` return true, the iteration will stop, until the next round, again, starting with - a call to `Base.start`. -* `Base.next` will always be called only once in each iteration. It will always be called after - one and only one call to `Base.done`; but if `Base.done` returns true, `Base.next` will - not be called. - -With those assumptions, it will be relatively easy to adapt any existing iterator. See the implementation -of the built-in `MXDataProvider` for example. - -!!! note - Please do not use the one data provider simultaneously in two different places, either in parallel, - or in a nested loop. For example, the behavior for the following code is undefined - - ```julia - for batch in data - # updating the parameters - - # now let's test the performance on the training set - for b2 in data - # ... - end - end - ``` - -```@docs -mx.get_batch_size -mx.provide_data -mx.provide_label -``` - -## AbstractDataBatch interface - -```@docs -mx.AbstractDataProviderState -mx.count_samples -mx.get_data -mx.get_label -mx.get -mx.load_data! -mx.load_label! -``` - -## Implemented providers and other methods - -```@autodocs -Modules = [MXNet.mx] -Pages = ["io.jl"] -``` diff --git a/julia/docs/src/api/kvstore.md b/julia/docs/src/api/kvstore.md deleted file mode 100644 index e6bf852b2f43..000000000000 --- a/julia/docs/src/api/kvstore.md +++ /dev/null @@ -1,23 +0,0 @@ - - - - - - - - - - - - - - - - - -# Key-Value Store - -```@autodocs -Modules = [MXNet.mx] -Pages = ["kvstore.jl"] -``` diff --git a/julia/docs/src/api/metric.md b/julia/docs/src/api/metric.md deleted file mode 100644 index 163f27a5b397..000000000000 --- a/julia/docs/src/api/metric.md +++ /dev/null @@ -1,27 +0,0 @@ - - - - - - - - - - - - - - - - - -# Evaluation Metrics - -Evaluation metrics provide a way to evaluate the performance of a learned model. -This is typically used during training to monitor performance on the validation -set. - -```@autodocs -Modules = [MXNet.mx] -Pages = ["metric.jl"] -``` diff --git a/julia/docs/src/api/model.md b/julia/docs/src/api/model.md deleted file mode 100644 index 63137532de6a..000000000000 --- a/julia/docs/src/api/model.md +++ /dev/null @@ -1,26 +0,0 @@ - - - - - - - - - - - - - - - - - -# Model - -The model API provides convenient high-level interface to do training and predicting on -a network described using the symbolic API. - -```@autodocs -Modules = [MXNet.mx] -Pages = ["model.jl"] -``` diff --git a/julia/docs/src/api/ndarray.md b/julia/docs/src/api/ndarray.md deleted file mode 100644 index 640e8b3ec372..000000000000 --- a/julia/docs/src/api/ndarray.md +++ /dev/null @@ -1,74 +0,0 @@ - - - - - - - - - - - - - - - - - -# NDArray API - -## Arithmetic Operations - -In the following example `y` can be a `Real` value or another `NDArray`. - -| API | Example | | -|-----|----------|----------------------------| -| `+` | `x .+ y` | Elementwise summation | -| `-` | `x .- y` | Elementwise minus | -| `*` | `x .* y` | Elementwise multiplication | -| `/` | `x ./ y` | Elementwise division | -| `^` | `x .^ y` | Elementwise power | -| `%` | `x .% y` | Elementwise modulo | - - -## Trigonometric Functions - -| API | Example | | -|----------------|------------|-----------------------------| -| [`sin`](@ref) | `sin.(x)` | Elementwise sine | -| [`cos`](@ref) | `cos.(x)` | Elementwise cosine | -| [`tan`](@ref) | `tan.(x)` | Elementwise tangent | -| [`asin`](@ref) | `asin.(x)` | Elementwise inverse sine | -| [`acos`](@ref) | `acos.(x)` | Elementwise inverse cosine | -| [`atan`](@ref) | `atan.(x)` | Elementwise inverse tangent | - - -## Hyperbolic Functions - -| API | Example | | -|-----------------|-------------|----------------------------------------| -| [`sinh`](@ref) | `sinh.(x)` | Elementwise hyperbolic sine | -| [`cosh`](@ref) | `cosh.(x)` | Elementwise hyperbolic cosine | -| [`tanh`](@ref) | `tanh.(x)` | Elementwise hyperbolic tangent | -| [`asinh`](@ref) | `asinh.(x)` | Elementwise inverse hyperbolic sine | -| [`acosh`](@ref) | `acosh.(x)` | Elementwise inverse hyperbolic cosine | -| [`atanh`](@ref) | `atanh.(x)` | Elementwise inverse hyperbolic tangent | - - -## Activation Functions - -| API | Example | | -|-----------------------|-------------------|-------------------------| -| [`σ`](@ref) | `σ.(x)` | Sigmoid function | -| [`sigmoid`](@ref) | `sigmoid.(x)` | Sigmoid function | -| [`relu`](@ref) | `relu.(x)` | ReLU function | -| [`softmax`](@ref) | `softmax.(x)` | Softmax function | -| [`log_softmax`](@ref) | `log_softmax.(x)` | Softmax followed by log | - - -## Reference - -```@autodocs -Modules = [MXNet.mx] -Pages = listpages("ndarray") -``` diff --git a/julia/docs/src/api/nn-factory.md b/julia/docs/src/api/nn-factory.md deleted file mode 100644 index 70ecfd2f0157..000000000000 --- a/julia/docs/src/api/nn-factory.md +++ /dev/null @@ -1,26 +0,0 @@ - - - - - - - - - - - - - - - - - -# Neural Network Factory - -Neural network factory provide convenient helper functions to define -common neural networks. - -```@autodocs -Modules = [MXNet.mx] -Pages = ["nn-factory.jl"] -``` diff --git a/julia/docs/src/api/optimizer.md b/julia/docs/src/api/optimizer.md deleted file mode 100644 index 28d01cc9fd89..000000000000 --- a/julia/docs/src/api/optimizer.md +++ /dev/null @@ -1,66 +0,0 @@ -# Optimizers - -Says, you have the parameter `W` inited for your model and -got its gradient stored as `∇` (perhaps from AutoGrad APIs). -Here is minimal snippet of getting your parameter `W` baked by `SGD`. - -```@repl -using MXNet - -opt = SGD(η = 10) -decend! = getupdater(opt) - -W = NDArray(Float32[1, 2, 3, 4]); -∇ = NDArray(Float32[.1, .2, .3, .4]); - -decend!(1, ∇, W) -``` - -```@autodocs -Modules = [MXNet.mx, MXNet.mx.LearningRate, MXNet.mx.Momentum] -Pages = ["optimizer.jl"] -``` - -## Built-in optimizers - -### Stochastic Gradient Descent -```@autodocs -Modules = [MXNet.mx] -Pages = ["optimizers/sgd.jl"] -``` - -### ADAM -```@autodocs -Modules = [MXNet.mx] -Pages = ["optimizers/adam.jl"] -``` - -### AdaGrad -```@autodocs -Modules = [MXNet.mx] -Pages = ["optimizers/adagrad.jl"] -``` - -### AdaDelta -```@autodocs -Modules = [MXNet.mx] -Pages = ["optimizers/adadelta.jl"] -``` - -### AdaMax -```@autodocs -Modules = [MXNet.mx] -Pages = ["optimizers/adamax.jl"] -``` - -### RMSProp -```@autodocs -Modules = [MXNet.mx] -Pages = ["optimizers/rmsprop.jl"] -``` - -### Nadam -```@autodocs -Modules = [MXNet.mx] -Pages = ["optimizers/nadam.jl"] -``` diff --git a/julia/docs/src/api/symbolic-node.md b/julia/docs/src/api/symbolic-node.md deleted file mode 100644 index 785dda87fbde..000000000000 --- a/julia/docs/src/api/symbolic-node.md +++ /dev/null @@ -1,23 +0,0 @@ - - - - - - - - - - - - - - - - - -# Symbolic API - -```@autodocs -Modules = [MXNet.mx] -Pages = listpages("symbolic-node") -``` diff --git a/julia/docs/src/api/visualize.md b/julia/docs/src/api/visualize.md deleted file mode 100644 index e401a888cc81..000000000000 --- a/julia/docs/src/api/visualize.md +++ /dev/null @@ -1,23 +0,0 @@ - - - - - - - - - - - - - - - - - -# Network Visualization - -```@autodocs -Modules = [MXNet.mx] -Pages = ["visualize.jl"] -``` diff --git a/julia/docs/src/index.md b/julia/docs/src/index.md deleted file mode 100644 index e21bc165457a..000000000000 --- a/julia/docs/src/index.md +++ /dev/null @@ -1,50 +0,0 @@ - - - - - - - - - - - - - - - - - -# MXNet Documentation - -[MXNet.jl](https://github.com/dmlc/MXNet.jl) is the -[Julia](http://julialang.org/) package of -[dmlc/mxnet](https://github.com/dmlc/mxnet). MXNet.jl brings flexible and efficient GPU -computing and state-of-art deep learning to Julia. Some highlight of features -include: - -* Efficient tensor/matrix computation across multiple devices, - including multiple CPUs, GPUs and distributed server nodes. -* Flexible symbolic manipulation to composite and construct - state-of-the-art deep learning models. - -For more details, see documentation below. Please also checkout the -[examples](https://github.com/apache/incubator-mxnet/tree/master/julia/examples) directory. - -## User's Guide - -```@contents -Pages = [ - "user-guide/install.md", - "user-guide/overview.md", - "user-guide/faq.md", -] -Depth = 2 -``` - -## API Documentation - -```@contents -Pages = api_pages -Depth = 2 -``` diff --git a/julia/docs/src/user-guide/faq.md b/julia/docs/src/user-guide/faq.md deleted file mode 100644 index 2799584f5472..000000000000 --- a/julia/docs/src/user-guide/faq.md +++ /dev/null @@ -1,25 +0,0 @@ - - - - - - - - - - - - - - - - - -FAQ -=== - -Running MXNet on AWS GPU instances ----------------------------------- - -See the discussions and notes -[here](https://github.com/dmlc/MXNet.jl/issues/43). diff --git a/julia/docs/src/user-guide/install.md b/julia/docs/src/user-guide/install.md deleted file mode 100644 index 129b6a190c5b..000000000000 --- a/julia/docs/src/user-guide/install.md +++ /dev/null @@ -1,109 +0,0 @@ - - - - - - - - - - - - - - - - - -Installation Guide -================== - -Automatic Installation ----------------------- - -To install MXNet.jl, simply type - -```julia -Pkg.add("MXNet") -``` - -In the Julia REPL. Or to use the latest git version of MXNet.jl, use the -following command instead - -```julia -Pkg.checkout("MXNet") -``` - -MXNet.jl is built on top of [libmxnet](https://github.com/dmlc/mxnet). -Upon installation, Julia will try to automatically download and build -libmxnet. - -There are several environment variables that change this behaviour. - -- `MXNET_HOME`: If you already have a pre-installed version of mxnet - you can use `MXNET_HOME` to point the build-process in the right direction. -- `CUDA_HOME`: If the automatic cuda detection fails you can also set `CUDA_HOME` - to override the process. -- `MXNET_COMMIT`: To control which version of libmxnet will be compiled, - you can use the`MXNET_COMMIT` variable to point to either a version tag - (e.g. `v0.10.0`), a branch name (e.g. `master`) or a specific commit hash - (e.g. `a0b1c2d3`). -- `CC`: The path of C compiler. -- `CXX`: The path of C++ compiler. -- `ADD_CFLAGS`: Additional C flags. For instance, - if you need to point non-standard include directory, please set it as - `ENV["ADD_CFLAGS"] = "-I'/path/to/include/dir'"`. -- `ADD_LDFLAGS`: Additional linker flags. -- `USE_JEMALLOC`: Default is enabled if jemalloc available. - If you ran into segfault cause by jemalloc, - Please try to disable it. - - ```julia - # first remove whole libmxnet source: Pkg.dir("MXNet", "deps", "src") - ENV["USE_JEMALLOC"] = "0" - Pkg.build("MXNet") - ``` - -The libmxnet source is downloaded to `Pkg.dir("MXNet", "deps", "src", "mxnet")`. -The automatic build is using default configurations, with OpenCV disabled. -If the compilation failed due to unresolved dependency, or if -you want to customize the build, you can compile and -install libmxnet manually. Please see below for more details. - -Manual Compilation ------------------- - -It is possible to compile libmxnet separately and point MXNet.jl to a -existing library in case automatic compilation fails due to -unresolved dependencies in an non-standard environment; Or when one want -to work with a separate, maybe customized libmxnet. - -To build libmxnet, please refer to [the installation guide of -libmxnet](https://mxnet.apache.org/install/index.html). After -successfully installing libmxnet, set the `MXNET_HOME` *environment -variable* to the location of libmxnet. In other words, the compiled -`libmxnet.so` should be found in `$MXNET_HOME/lib`. - -> **note** -> -> The constant `MXNET_HOME` is pre-compiled in MXNet.jl package cache. -> If you updated the environment variable after installing MXNet.jl, -> make sure to update the pre-compilation cache by -> `Base.compilecache("MXNet")`. - -When the `MXNET_HOME` environment variable is detected and the -corresponding `libmxnet.so` could be loaded successfully, MXNet.jl will -skip automatic building during installation and use the specified -libmxnet instead. - -Basically, MXNet.jl will search `libmxnet.so` or `libmxnet.dll` in the -following paths (and in that order): - -- `$MXNET_HOME/lib`: customized libmxnet builds -- `Pkg.dir("MXNet", "deps", "usr", "lib")`: automatic builds -- Any system wide library search path - -Note that MXNet.jl can not load `libmxnet.so` even if it is on one of -the paths above in case a library it depends upon is missing from the -`LD_LIBRARY_PATH`. Thus, if you are going to compile to add CUDA, the -path to the CUDA libraries will have to be added to `LD_LIBRARY_PATH`. diff --git a/julia/docs/src/user-guide/overview.md b/julia/docs/src/user-guide/overview.md deleted file mode 100644 index 6b8d954ee4a1..000000000000 --- a/julia/docs/src/user-guide/overview.md +++ /dev/null @@ -1,416 +0,0 @@ - - - - - - - - - - - - - - - - - -# Overview - -## MXNet.jl Namespace - -Most the functions and types in MXNet.jl are organized in a flat -namespace. Because many some functions are conflicting with existing -names in the Julia Base module, we wrap them all in a `mx` module. The -convention of accessing the MXNet.jl interface is the to use the `mx.` -prefix explicitly: - -```julia -julia> using MXNet - -julia> x = mx.zeros(2, 3) # MXNet NDArray -2×3 mx.NDArray{Float32} @ CPU0: - 0.0 0.0 0.0 - 0.0 0.0 0.0 - -julia> y = zeros(eltype(x), size(x)) # Julia Array -2×3 Array{Float32,2}: - 0.0 0.0 0.0 - 0.0 0.0 0.0 - -julia> copy!(y, x) # Overloaded function in Julia Base -2×3 Array{Float32,2}: - 0.0 0.0 0.0 - 0.0 0.0 0.0 - -julia> z = mx.ones(size(x), mx.gpu()) # MXNet NDArray on GPU -2×3 mx.NDArray{Float32} @ GPU0: - 1.0 1.0 1.0 - 1.0 1.0 1.0 - -julia> mx.copy!(z, y) # Same as copy!(z, y) -2×3 mx.NDArray{Float32} @ GPU0: - 0.0 0.0 0.0 - 0.0 0.0 0.0 -``` - -Note functions like `size`, `copy!` that is extensively overloaded for -various types works out of the box. But functions like `zeros` and -`ones` will be ambiguous, so we always use the `mx.` prefix. If you -prefer, the `mx.` prefix can be used explicitly for all MXNet.jl -functions, including `size` and `copy!` as shown in the last line. - -## Low Level Interface - -### `NDArray` - -`NDArray` is the basic building blocks of the actual computations in -MXNet. It is like a Julia `Array` object, with some important -differences listed here: - -- The actual data could live on different `Context` (e.g. GPUs). For - some contexts, iterating into the elements one by one is very slow, - thus indexing into NDArray is not recommanded in general. The easiest - way to inspect the contents of an NDArray is to use the `copy` - function to copy the contents as a Julia `Array`. -- Operations on `NDArray` (including basic arithmetics and neural - network related operators) are executed in parallel with automatic - dependency tracking to ensure correctness. -- There is no generics in `NDArray`, the `eltype` is always - `mx.MX_float`. Because for applications in machine learning, single - precision floating point numbers are typical a best choice balancing - between precision, speed and portability. Also since libmxnet is - designed to support multiple languages as front-ends, it is much - simpler to implement with a fixed data type. - -While most of the computation is hidden in libmxnet by operators -corresponding to various neural network layers. Getting familiar with -the `NDArray` API is useful for implementing `Optimizer` or customized -operators in Julia directly. - -The followings are common ways to create `NDArray` objects: - -- `NDArray(undef, shape...; ctx = context, writable = true)`: - create an uninitialized array of a given shape on a specific device. - For example, - `NDArray(undef, 2, 3)`, `NDArray(undef, 2, 3, ctx = mx.gpu(2))`. -- `NDArray(undef, shape; ctx = context, writable = true)` -- `NDArray{T}(undef, shape...; ctx = context, writable = true)`: - create an uninitialized with the given type `T`. -- `mx.zeros(shape[, context])` and `mx.ones(shape[, context])`: - similar to the Julia's built-in `zeros` and `ones`. -- `mx.copy(jl_arr, context)`: copy the contents of a Julia `Array` to - a specific device. - -Most of the convenient functions like `size`, `length`, `ndims`, -`eltype` on array objects should work out-of-the-box. Although indexing -is not supported, it is possible to take *slices*: - -```@repl -using MXNet -a = mx.ones(2, 3) -b = mx.slice(a, 1:2) -b[:] = 2 -a -``` - -A slice is a sub-region sharing the same memory with the original -`NDArray` object. A slice is always a contiguous piece of memory, so only -slicing on the *last* dimension is supported. The example above also -shows a way to set the contents of an `NDArray`. - -```@repl -using MXNet -mx.srand(42) -a = NDArray(undef, 2, 3) -a[:] = 0.5 # set all elements to a scalar -a[:] = rand(size(a)) # set contents with a Julia Array -copy!(a, rand(size(a))) # set value by copying a Julia Array -b = NDArray(undef, size(a)) -b[:] = a # copying and assignment between NDArrays -``` - -Note due to the intrinsic design of the Julia language, a normal -assignment - -```julia -a = b -``` - -does **not** mean copying the contents of `b` to `a`. Instead, it just -make the variable `a` pointing to a new object, which is `b`. -Similarly, inplace arithmetics does not work as expected: - -```@repl inplace-macro -using MXNet -a = mx.ones(2) -r = a # keep a reference to a -b = mx.ones(2) -a += b # translates to a = a + b -a -r -``` - -As we can see, `a` has expected value, but instead of inplace updating, -a new `NDArray` is created and `a` is set to point to this new object. If -we look at `r`, which still reference to the old `a`, its content has -not changed. There is currently no way in Julia to overload the -operators like `+=` to get customized behavior. - -Instead, you will need to write `a[:] = a + b`, or if you want *real* -inplace `+=` operation, MXNet.jl provides a simple macro `@mx.inplace`: - -```@repl inplace-macro -@mx.inplace a += b -macroexpand(:(@mx.inplace a += b)) -``` - -As we can see, it translate the `+=` operator to an explicit `add_to!` -function call, which invokes into libmxnet to add the contents of `b` -into `a` directly. For example, the following is the update rule in the -`SGD Optimizer` (both gradient `∇` and weight `W` are `NDArray` objects): - -```julia -@inplace W .+= -η .* (∇ + λ .* W) -``` - -Note there is no much magic in `mx.inplace`: it only does a shallow -translation. In the SGD update rule example above, the computation like -scaling the gradient by `grad_scale` and adding the weight decay all -create temporary `NDArray` objects. To mitigate this issue, libmxnet has a -customized memory allocator designed specifically to handle this kind of -situations. The following snippet does a simple benchmark on allocating -temp `NDArray` vs. pre-allocating: - -```julia -using Benchmark -using MXNet - -N_REP = 1000 -SHAPE = (128, 64) -CTX = mx.cpu() -LR = 0.1 - -function inplace_op() - weight = mx.zeros(SHAPE, CTX) - grad = mx.ones(SHAPE, CTX) - - # pre-allocate temp objects - grad_lr = NDArray(undef, SHAPE, ctx = CTX) - - for i = 1:N_REP - copy!(grad_lr, grad) - @mx.inplace grad_lr .*= LR - @mx.inplace weight -= grad_lr - end - return weight -end - -function normal_op() - weight = mx.zeros(SHAPE, CTX) - grad = mx.ones(SHAPE, CTX) - - for i = 1:N_REP - weight[:] -= LR * grad - end - return weight -end - -# make sure the results are the same -@assert(maximum(abs(copy(normal_op() - inplace_op()))) < 1e-6) - -println(compare([inplace_op, normal_op], 100)) -``` - -The comparison on my laptop shows that `normal_op` while allocating a -lot of temp NDArray in the loop (the performance gets worse when -increasing `N_REP`), is only about twice slower than the pre-allocated -one. - -| Row | Function | Average | Relative | Replications | -| ------ | --------------- | ------------ | ----------- | --------------- | -| 1 | "inplace\_op" | 0.0074854 | 1.0 | 100 | -| 2 | "normal\_op" | 0.0174202 | 2.32723 | 100 | - -So it will usually not be a big problem unless you are at the bottleneck -of the computation. - -### Distributed Key-value Store - -The type `KVStore` and related methods are used for data sharing across -different devices or machines. It provides a simple and efficient -integer - NDArray key-value storage system that each device can pull or -push. - -The following example shows how to create a local `KVStore`, initialize -a value and then pull it back. - -```@setup kv -using MXNet -``` - -```@example kv -kv = mx.KVStore(:local) -shape = (2, 3) -key = 3 - -mx.init!(kv, key, mx.ones(shape) * 2) -a = NDArray(undef, shape) -mx.pull!(kv, key, a) # pull value into a -a -``` - -## Intermediate Level Interface - -### Symbols and Composition - -The way we build deep learning models in MXNet.jl is to use the powerful -symbolic composition system. It is like -[Theano](http://deeplearning.net/software/theano/), except that we -avoided long expression compilation time by providing *larger* neural -network related building blocks to guarantee computation performance. - -The basic type is `mx.SymbolicNode`. The following is a trivial example of -composing two symbols with the `+` operation. - -```@setup sym1 -using MXNet -``` - -```@example sym1 -A = mx.Variable(:A) -B = mx.Variable(:B) -C = A + B -print(C) # debug printing -``` - -We get a new `SymbolicNode` by composing existing `SymbolicNode`s by some -*operations*. A hierarchical architecture of a deep neural network could -be realized by recursive composition. For example, the following code -snippet shows a simple 2-layer MLP construction, using a hidden layer of -128 units and a `ReLU` activation function. - -```@setup fcnet -using MXNet -``` - -Each time we take the previous symbol, and compose with an operation. -Unlike the simple `+` example above, the *operations* here are "bigger" -ones, that correspond to common computation layers in deep neural -networks. - -Each of those operation takes one or more input symbols for composition, -with optional hyper-parameters (e.g. `num_hidden`, `act_type`) to -further customize the composition results. - -When applying those operations, we can also specify a `name` for the -result symbol. This is convenient if we want to refer to this symbol -later on. If not supplied, a name will be automatically generated. - -Each symbol takes some arguments. For example, in the `+` case above, to -compute the value of `C`, we will need to know the values of the two -inputs `A` and `B`. For neural networks, the arguments are primarily two -categories: *inputs* and *parameters*. *inputs* are data and labels for -the networks, while *parameters* are typically trainable *weights*, -*bias*, *filters*. - -When composing symbols, their arguments accumulates. -We can list all the arguments by - -```@example fcnet -mx.list_arguments(net) -``` - -Note the names of the arguments are generated according to the provided -name for each layer. We can also specify those names explicitly: - -```@repl -using MXNet -net = mx.Variable(:data) -w = mx.Variable(:myweight) -net = mx.FullyConnected(net, weight=w, name=:fc1, num_hidden=128) -mx.list_arguments(net) -``` - -The simple fact is that a `Variable` is just a placeholder `mx.SymbolicNode`. -In composition, we can use arbitrary symbols for arguments. For example: - -```@repl -using MXNet -net = mx.Variable(:data) -net = mx.FullyConnected(net, name=:fc1, num_hidden=128) -net2 = mx.Variable(:data2) -net2 = mx.FullyConnected(net2, name=:net2, num_hidden=128) -mx.list_arguments(net2) -composed_net = net2(data2=net, name=:composed) -mx.list_arguments(composed_net) -``` - -Note we use a composed symbol, `net` as the argument `data2` for `net2` -to get a new symbol, which we named `:composed`. It also shows that a -symbol itself is a call-able object, which can be invoked to fill in -missing arguments and get more complicated symbol compositions. - -### Shape Inference - -Given enough information, the shapes of all arguments in a composed -symbol could be inferred automatically. For example, given the input -shape, and some hyper-parameters like `num_hidden`, the shapes for the -weights and bias in a neural network could be inferred. - -```@repl infer-shape -using MXNet -net = mx.Variable(:data) -net = mx.FullyConnected(net, name=:fc1, num_hidden=10) -arg_shapes, out_shapes, aux_shapes = mx.infer_shape(net, data=(10, 64)) -``` - -The returned shapes corresponds to arguments with the same order as -returned by `mx.list_arguments`. The `out_shapes` are shapes for -outputs, and `aux_shapes` can be safely ignored for now. - -```@repl infer-shape -for (n, s) in zip(mx.list_arguments(net), arg_shapes) - println("$n\t=> $s") -end -``` -```@repl infer-shape -for (n, s) in zip(mx.list_outputs(net), out_shapes) - println("$n\t=> $s") -end -``` - -### Binding and Executing - -In order to execute the computation graph specified a composed symbol, -we will *bind* the free variables to concrete values, specified as -`mx.NDArray`. This will create an `mx.Executor` on a given `mx.Context`. -A context describes the computation devices (CPUs, GPUs, etc.) and an -executor will carry out the computation (forward/backward) specified in -the corresponding symbolic composition. - -```@repl -using MXNet -A = mx.Variable(:A) -B = mx.Variable(:B) -C = A .* B -a = mx.ones(3) * 4 -b = mx.ones(3) * 2 -c_exec = mx.bind(C, context=mx.cpu(), args=Dict(:A => a, :B => b)); - -mx.forward(c_exec) -c_exec.outputs[1] -copy(c_exec.outputs[1]) # copy turns NDArray into Julia Array -``` - -For neural networks, it is easier to use `simple_bind`. By providing the -shape for input arguments, it will perform a shape inference for the -rest of the arguments and create the NDArray automatically. In practice, -the binding and executing steps are hidden under the `Model` interface. - -**TODO** Provide pointers to model tutorial and further details about -binding and symbolic API. - -## High Level Interface - -The high level interface include model training and prediction API, etc. diff --git a/julia/examples/imagenet/ijulia-pretrained-predict/Prediction with Pre-trained Model.ipynb b/julia/examples/imagenet/ijulia-pretrained-predict/Prediction with Pre-trained Model.ipynb deleted file mode 100644 index 878a3851e77e..000000000000 --- a/julia/examples/imagenet/ijulia-pretrained-predict/Prediction with Pre-trained Model.ipynb +++ /dev/null @@ -1,249 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Using Pretrained Inception-BatchNorm Network\n", - "\n", - "In this example we will show how to use a pretrained *Inception-BatchNorm* Network. This network is described in the paper\n", - "\n", - "> Ioffe, Sergey, and Christian Szegedy. \"Batch normalization: Accelerating deep network training by reducing internal covariate shift.\" arXiv preprint arXiv:1502.03167 (2015).\n", - "\n", - "The pre-trained Inception-BatchNorm network is able to be downloaded from [this link](http://webdocs.cs.ualberta.ca/~bx3/data/Inception.zip). Run the script `get.sh` in `models/Inception/` will download and unpack it automatically." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let us first load and display the demo image (download it from [here](https://raw.githubusercontent.com/dmlc/web-data/master/mxnet/doc/tutorials/python/predict_image/cat.jpg) or try to use other images you like). You will need to install `Images.jl` and `Colors.jl` to load the image." - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": { - "collapsed": false - }, - "outputs": [ - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAQAAAAEACAIAAADTED8xAAAABGdBTUEAALGPC/xhBQAAACBjSFJNAAB6JgAAgIQAAPoAAACA6AAAdTAAAOpgAAA6mAAAF3CculE8AAAABmJLR0QA/wD/AP+gvaeTAACAAElEQVR42kz9WbMlW5Iehn2fu68VEXvvM+Rw761b062qnrvR6AEQ0E0QFKwhoQEZZaIJRsr0Jj3pp0lmepBkMhpkFCROMooQSKoJoNlo9Dygpjtk5hn23hHLBz3EqTblQ+bLSTvn7Fix3P3zb+DP/U9/qoCqqqokACooWUKvKgAAyBIRsCiCirElAEq6+/UyMiUyPSkcFS5Q640ssFTVtInSzJp1VbVGterWtFHEzEzVWmu9NxZEVESaqFes6+rbtl7Wh/PlPDxHXS7rdh1mUBMRoWYyqiqVkIJE+NbUjktvXU0BoKois0pGcBtjG+eQzCoiREwhDBYsM0UUZSwzQtRTQsSEzVAiFE6aEu4+MlE+mLGum29rVWl4RaUqp25tMumhjdKMEpTUkggSHSVjBJJSQKKqSI1yEds/56oiCSCGswBRjwIYESwgSZKNKMnMQkQEsjpbpQ5eqkA0k9Zm2Ak2yWGal/vj6aDWaK1VAUlElqa7RxYA602WzlYAVBWzMPH84cPT0/Pzh3j66vr41Xl93lpYh0DUEResAjW3QFW2TAeTYelRGd2gNMmV1CKSSTKraKomkFJJEgWSWpFVJaZZqhRtUgabOU2qk8mxlt570zZTuxYUWSJ0VG7p11zP5WtcnmI85vXpcn5cc5QEkUUVEckYIEOAEhdXVRYAj0pInW4WA1nlBBRAIVkKFuv/7/STRGaKElWAqO2PSsysNdk2z0ypQAGwIodfqwpFFXELUsnRe7Qu5qLKTVZrS+9wC7PIjHRvrYmUmSWILIWMRHjBBV6+jYrM9AiqvRwXkACYSBYCqo1KL5R7gEKrQlaG59jgGVVUqAmTopBKzUJVAXRPsFiRKCUr2JUiBZFwAshaK9KTYwt3j+DY6IOZWZWUMDURUBIoVQMplQJmIgMsJ01gxUIlBCgWIKUsJIqkCCK8igUBQQBAugupZhAACEkgCFRCTEQks3JdWzbARoZzVaiMRhXv8PUS00JVRlqjNQune7XWpmWSqQl4mOab+xvtssErY13Xeep9Xpo9qzJrOEdewpGwikqLjtLIzGCKE0RaFSlgwN2LyQQ0IGw2mRC10kKkxIRUFoogWRRPFFUoFAkUSBGBEAAiX+5fQMFKRCSSiKqI9GLKGJHuompmbapg1UhEJgFCzbwSqCREpJiQMhUmgNTeLJlEGcnKzNR6uYtIxU8qw/4vQAAiAiAiqgoQkTQzkh6XcCEE2IAkpQr7J0Qqihs8oqypmbWmqKiqXobKykhjlbQGCJCRsX/nikowI0ZERQRJsCICAPny46kZUFCYNey1CclilYCZhQRHRAYLBEWVRFWgEkWiKjNIhoeaRQ4CfTJVMc3wyCHliYoq+tAIjjVHSIxKR0FEoSyiRAvw/eQyq1gsIimwTMmqqkBp5MtXAKGieLlqsgokARawf+iiKYSqFVAGaSJSESEigGSxhLGtsAgXZjLLM1dHDcg57BznjvMlb+5O05TzzJzCrDdrfdJ+dzicjq2pmc3zPM0tWNfr+ny92LamkErpNbjGFNh0nMf1uqXT0EBWk6BnBKAKAYgsgISSUpZ746CSCohBJi0CGAJVahUJUASAA5UVFQAaDACE2pspW9c2qViSBYEkEembxxY5uF0zNmRWpReTpKq4l1eSEuUFkgQpQGaIkAKyuqn1SZuYwiksFLIg2F8Evpz73E+YiJDc/87c+x9BsSrVZK8SHjNLwJFIpJBa5RTJFAIizABQkMwaiUBpMfHSEAgAJQnJKKiwGJUR9Nzf9hij1nWQFCGSJYVKKaEppbSZZ5ClqgCEBsmIZCJLMopQIFQNQEVFQSn58loXySqIYGwXVbbW21zNUgV0JqMqfdUx0mO459hyeCKRiWIqm5iI1X67qIpw/2OVUimRQtGMyCySBdsvNsCzUCgl9/uFVQQoBWtAZpKt1ARkaGhDlJimKgOxl2dopVcw4AEvcfhWvm5w2DPK8PTueX0zjreHw02b5jje1t3dab5ZptNhXpbj7VEmg1RvWklMkEUvF43a3Le+TYfTIjq5e4zqH7brh3VcAo6XI+GWlSRBQIUCJZRIJRPNxCSSKd0oSqaICclAlQikqhqqIsKLomo2qSUyCTGdu4hIIfeWT9jNLEhLlNPLx/AIVGpGIlnF8qrMzNT9LhFSiUqyur0UFhGA5NRuXt2aFIoAKlAle5kFQSKrivKT1pPcO1SyRAxZ6UFSlShUce7ToA93VAKNVNUCRKWBSWYlPQpSXpE05palgOE6WhNlQ1SWFwVKASNybJGgZ7nnGFtViCigJEypzcTILkCWlKhVlYkWiyQrhVWlGYwIAqIgFcjMYqqY1X4pAyxUzlFn0VwOh76gLdmNFQqiilrmgx61Dd+uW4XtzSsgun9GomoUKQEFRGSJQTS8sgSQqiBE1SpZSBEhMmGJQNV4eWBaSCBbbykFKEhVUWMxAZSUmDJSNKVJAeEukmXVFBWg00dpI1vLNcYYKOSG9z/+6nq9Pq+H+djYT6dX2Q79eHfq82SL6aFRoZSqstWqygfmSdelX7epr1PmWVQ29ZNOvdt6GZeH7fp+SJjEfn2OAtWUlcygSCNpQomQlCZqZKUWQRoJI6GZKC8hBFEAqZOKVGZkJPenqdIELiJAAkhUVonpdLCqovp+OUpZePEntbTJT9p4YaBUFeYpIMVAFrRrO8zt0A1ZQu41JzMDxZeysTcwBJiZJKsIe2k8WFDV2jsMpkgRkfAAWFMmUNm0V1UxySIhJqCJoGTvqDKcaRgjkJuVIaKZqLR0B5CZ7r73Wt26nOR8PqPQu82TWSvtEGMQJeIxRPeXF6IaCCUqMwEzrapAvNSMdMrekxQorEoUS7OGiByPergJO7hMjGBtpFCSvkWW+ojt6j60nBQpUETMhFollawmWqzMl5mWezcPUVX3yEyRBFRIFXF3AAQKUNUqeoQ26b2DfzWAiZimeFUAUJUSqqrDIUlj62jVt/CI0JRsqObcUnQKoXegmm6owMOH81Nsd3KzbP2WiK5hyFYwtKmXFCJV1KIGIjMTJa3sUOZlEnmlDofJbNZP2g9T6Xn7/JIBYYhE0UTSlCihoylLqqqEalDNFJZSKsF9UmKwBAgIzQQmIgqke6ZASmJ4JjUqARGKNFTWy1OVQtJqmtUH070iQVdIsAAECvmCKJAswgQwI9USTdSm1qd56cveEiQAIfbHtbei+5i7P6EMgCgBoyjg3rSyAFQKWa0TDC2RMUcgIpg0aYAnEtgbDKoqkHvDk1HUGptrL0TmFfO8ACaSIVXC/buw0Kz3U0/44TDv3X9rOs2iTZIwulfSKsr5UgNXqBSlPF+6SRCQTHpAoVlIAsXKiChqphTkMs3z8d6mm63aWtCpHaJiOJ08Dx+brmuMrYgGoFCqIgpIiUIFSgIoIlGAkKCQARRRJAyIvxoO8idvCblfelmVfW6ttZLKDCGLqMyqYgmKkGIWeM1GEtoEWlGeBK0MqFGIUMnIitxKxdIk1AFGWsEEvZU2QMrTE0EyIsYYbVoyfd18bO5+TURUrj5GPtOGzqKyhUO2Sgs6zfJG7Dl1/SoqUiiNjeKiIQQQaKpCLQooKFYSBJEZhKhI7mOqVEYVSoVgBVAFgTBr28b5kpOUAkVRKRKQBECkipWISPWmsuioDSEjWFuSEOE+UGXmjjWpGooiYl1Npfc+WWtqpqoREREQeQFVCpm+X2/c625lFVgZSVGoyQ6uECZiSrUesqC1CWp+9XUdvuUYgyosyxBEVRVZKFRSlIBHoRLhUJZwYJV9aiwVUVDLTKT6fq2LiCr3ykCpqOGVhBQuWZVZqAggWWwoug+BkKqVXhpMZUpVVohKpkqmAxBjnyyIYZMeSm6cc8zzHFv1ZKq/T4WgVIYji2osdzFNAnAK1ERMBRDso1MklEVJJgIl2HtMQHWKIojAqEqKogRklItUM6OwKqSEZPDl164qBiuLxkQqnTqVkvRKd2yiKvvRkOQQwjAhJ0oSm8TVMUltPqmc7o+3t7fTYUlIjG1d16YdqZmX7ToycN02v1yv69O6Xq7ndVwu6WezqNDUDBVVYc9RFZereB7ue5O6PkauKQJRsqgAzRzR2Ix7d5Sg7qAAhAVUMUYos5FrFUoqvFStNzQx04IksKZP0rSrqEoxc6AUACI807dQdtLJ/VqJ/czuvboUCCZZEapSG4iiEV3YTCYrk41pYkpSlBFRlSQpEgOlWQFIZTnVyCpElGQgk9baPiiwsk2iXWXuy8H6rIRl9ss1zk8R7sNRXpnIAWzumWXaxaKECRYQiMqoQVICJmqAcNpPr3aS1DIxqjQxCkHyfD6f/XqJcyYFUVmshghlJiqIxRA73h5SESVV4qYVklQaZS1l+e001dw8x2dv3kh/9ywf2mwfvV7Wy/bjd1/d33/y9fXj7z99HzmAiXCwEhWZJsomaiKSRFCtDGBFUbIoyEwWSbAxan8RVEj3FYDKVAhUVhaR3VTMClCNqih2IVgVEYmiGoRV3kiVOdwRPsqlobFDGFNplqqOdC+nqaSVJ2VoiUCLoovgUHKSaloYHsft4mde5zRcZIuxuY7rdn58l3Fxv665eW1mWkSam7YJiMgRwm1jAQ7bQpCjnFoWohUpWdWAMBqJqsHK2qt5iWQ15RbuIkWpqCpDAdiCZNXm6HNvS0MrTDV1MbOpmVDTq2mG1+aMKqx1vcZ6uawbtuuW2/ryfilMELEfZcgOqUUNiVZa64AIjFkQwdRpmdGamM3uuV63MUZWqIjsu6WsYhaz9uNUMiqVYEIo0kobZMrpMNnSlpP2JtOs2mS91vMZ6/O2nmtbz170bbgkXCszUgAhM2vsM8IOgUhZFkAT0oTa2FszMwgFL7M4QGSaSSdTdYvyIIuJDO4IbAMkib0jqkhT3TGoMho1Spzo5DR3Oc1lnHK7e318Oy9XvGr3h+f1x6/v3jytsWJd5CI05IVxBSycBqZRpdSEmkDt+zsgM0NVqRqZIraDTpkECkKhpgelWBIIVr1AIUro3iXs+EdSsyiRVTvGwKqsGAFPV/W9sVBLZDGoBaBaCpru1ZWIzBiQrWSSLZMT5KT9VkMGiSyOsc6m56fL5SJ1jcv1+fky1qeLjwczg7nzSit3jxpoRWVUUZyoaVqKyNwEbWujq/n7zOdEEGKSVJYKCyPhQmFJJsBwoYhVum/BMkgJIqMchWCgZBGIsstyO+lsU1NrJczM9EwhUVQ1X71QHrVtEYMYJaUFlqdk0TR8kACQSArTQ4XpI5OBEmOfe29qFCuMTK3SaWp9mnyM8/m8rQN0SpFKyIjIrFRjCUhP94hJxSbT2aR7m/KwpDU9LG1aWuucpuxLbXM997w8T+vg0CZYsfnYGFkKahPj5JujKE0CGRFiUhmZSIWxAS9gsYhm7pdvRrkzXhqLGplRBc9wbF6DMUynIFBQCpWEYkf9k1QYKGCbzU7WhJNhkGs8nU5f/3e+9+++v/74P/+9Pz28/uh7X/vuH/7p75+vH1xMcBObm0vKgLBR1EAWhaoiYpWoQTYrorDDbJqQl82JCIDKpEKoGVFJIUHsVXefi2rvE6WJKQKidI/MEWuEu5R4VkqlhhilgV60ohVHVgFt6KLMlunMoSIpPTC6NGmtTapNBF4YEbKurAyVed18fVq3p6fz83N4lYS1i5ont1Kn5sZRXWwipMy6qCWvffZClpSTBmArXffVtppWIau8MliKEkCrEpSCRoQSVRKejuoUJiJkZOgsfTIoRFtTnXubu5mp18ZWmwz3fZuSfZ58G3LeJENSIAgkRCg1xvB9TIgKVkFoYOeIRKSSRmwZFpGUFNVPfvpeRFVVBNZknm2aZlEdlV6RmawiKpFEI6hAwfdFTRLt1KZFpy7LwVrXaenLIststujcVRWQqiIoKM3MyH03kTbptMwwtdZUxZlJINPMBFX0UQHA1EQoUo5AFphUT8kgRo2oEbENHyPC4VtdxFK0QC+qqTSxqn3Kp8DAHacstZpvlpySEjdT//TNx29Oh+v43KbpF77xq995/a3t/VfixTDP+el9Pn+5oug1TFSUQFqbhCWEqApVCEpBFNQkKUbRBNRUVKiyw6ncUZ+fbBeFsu89SSGlXgBoLRJMHzlGjLXKS1KYLArjpbTXDj/L3vaqgsUoAUX0ZWRklRRH79Zn08msydQaqaKsqpHx+HR+fDg/f1ifn87b8/XyeF0337anwqiUjJFYSxwMSIau3aY2taHPxW2HOYXmGfTUeNnmKVpVZWaMQgmhO6q4nxgTKBAjt0hUr2xeESG6tPnQptlsnqmwxj6pCkW0qYqhTa2qSBQcTC0WWKkVzMhwz0BlRWaM3IGgApS2l899gyRqbMKmbWrTcZoW06/99BtRUoJCU2lTm+bWZ21mVA0gMhMgtYqVVbI/XEOIY1AxL733bksdlkObrc+tH+c29ePxZjocI2345uFRXok+zfOhtcX6ZNo7lTaJNJQAkEqwpBhVUVlJipRIhAQlqYAUFQlcx7au13V9Oo/rFr7GmjWSwzoFsCprs1IL+55OTBQUF1FFaNHEDjpbE2Vp/uK3Pvupm49ubj96/Orp177+G//wN/5XP/u1z/RJ3n/+8NUXX3799Te//LcPOxjDQpXb3EmqwlQBAUM15IV9pKJGiojY1Ptk3M/A/pPszXCAVQAIIRNIKVCouq8s6RjlGNuoUVraKIRgX2OWJHci0Y7gkmpUq4gkRUukJJNgkalu0vrU0FALrbE1y0JEDa/reX16OD98uLz/6vHp3dN29XTkdqGAqtpaaUKSksIhfVLth75MU9cJIkNFMxAoL5dIA5zuDg3FgHvVjn/vYyiFrNKsLIZFclQQLSpDRgD92I43S+9zaxM1rZFKIUdEInfwJyLBqsjwACcPJAylBkPBtxprlY/M8kigAMhehcNDoCbaRLpYF+2ija1Tv/Yzp4SrSrPWmomKiLRpmqfJmk29m5mIBpBFiDiRrMoiBRUjwlo7HHpb6nBYjgfTg+nUjnZYltNyuhVtLIwR27Y1ldvb4/G0LKe5d6VpmyxZAmpr6ZXhmVkoIl8IIQK2YlcBqZUYI3PEuKzn6/VyXcd1rCO3TIekyr5LVWPbe25Sda8gYEJUm0qqqQlE9ND6ocntcV6or+rm177zG3/nF3/rtR5uXn1889Ev/cLP/dbX7z79nX/2T3Bdg8u6YZIOIDJ0EiHMSlRJVqU2UpSYiqQKyWnq86GrESwKBUIClZXISO7YfxazdF/A/4QXAFTkXmWTqCY7KF0UiWJUwZGVTIUTkKAXSwVUiJUYqIQIBKKpqmbCpmJqaqwiLTy3NS/n7fp4ffzq8vjVkz9XRVFUskyERmslbV/isfdpmu7ubt/eHu7mabJuRGZlZl19VIV648Zy1CaSlZUFsJixM6Oo1AJFLDw8wLJwZGDfeHlqVLZ5nnoTkzb1rEikmoiJSDVT94iIzIoERUnLSEmBi1eMKvesgVw9ghlpkMlaiUApZuxqTfsySVdtqpO1bv3Q9JOfma213idVVVWKQFTEBGI29W5qYq3PU+8z2VAFYhAlTJLhqKo+83BaWpNlEZ05tfkwn9RmlTa3ZpyQUiWtt8PBlsPSp2Zdp6mpCYVIZFZGjHEN30QSQsjO+NoGPGJ1+Fbna11Xv25jva7n4dvmmeXbGCTNlEqqqU0UE1XdX2coRArKKisYtZSmRnDq0+nY527L4Xhox1/57Fd/7mu/sET//I//8PIXf8CxfuMX/tHf+63/TTxczt//N4wVdiNzt9aHX1sryn7LQATsVrAEqYShTzofJptV9sW1kkURyciIRAkqXwpCCgUqiEImMxOFgkRV7oTQQkBLzZFVWZkK4f7xp4AojWK2JrCSRqhQVIzGkiIIVqmayaSlW65ZzBFxqffn9fzhLI9Za25DCGksiqVszbI10IZriPbb0+uP3356e3Naej8cTjfLq9mOkcjKbQMG6G1cKy7ktpcFLyAdmqoGkX0pK9qaSHqGZ9AldzylOIYwxGNrJn1u7dBSUk2lo6tNsxLmkSVIVEKjKob7GJJVXiPcURmFKF93xNlURERKUE10mXqT1rR16820yzS3aZmtm7757L71Zi8rATNrapYFYRKVCCpMm5Im2UTmxt7AQgEqQGF4BKIZtYlMnKw3O7Xp2KyZqnDKgujUDkubmnVbltl6t9Z6pzZ41tgiPce2jm1grCMzMjO3a54v43xdHzZ/utbTsz9u2M7jfNku1/XZaxu1eaT11lqnYOoqpqoiQlOliKgIhWUVIKgsqinMfbATGZ/ev25JL/yNb//GT03f+eov//J3/8W/+PM//u//7H/4gz/7g9/5/l/+N9/57l/79d/8xz/zzW/94Z/+tz98fP/p20+m3j88fdFa3wl5qqCxRBwhmhC1yeZlmqZmvSm5L8hU2v6SR1QGQCECRUIKQVZUFTTTUelBeYG99lUyq3KvDdz3qYXITK/QYId1pVU1UkWYJtLEQCmUlKo2CkV6KQpVoTV43bb12eO81Vp0EfSIjRJijRpqUoKy1DbN0/H1zavXd/c3p9f3t2/nw83hcNt0AjUSsQZ8Y2KLsXllaG2oACAMMTNhAK7K6dR1CZ3mdqCa+tD0LGKNTFDNSgmiH0wnzJOZ0IxspiogMuGxFekjI0eOuoxt3VwCNQpZ6Ts3kdIUxmxkE7VsXczYDjbNRiuZqbPAaLP2Q9PbrzVTmomaalM1pagQKpVSkZVZWQ5m1l4PoUKKqDbwBeEdW2aKmYq1w3w69KO1Wa0rVLSbaVF6M+uAlJiKlXXIJEHZthGxpbtfY13X1YdfuW4edvXYPCN0czurbtJG5QgfHheKgEomTPYL3Vqpqs4KK0tQrUmyoNp3dlTtbWE1MFjq5ajxjdcff/2jj1+Pu9/81r/zoz/6w9/7N//dw/qejuVwfC57/v4Xv//P/uO7Cd/5W//LX/qV3/yd/+z/8OMvf3z38Scfnh9KCW3ampioSOlebITCqdvp5kanpkqRLKKKBc+gj+GelRQEUNiJfxk7w3CfgmonBr60Q9x7YVQRidipQ7KT1UOYAjahlbKhFVrthDQUiVBRbaJNhSSiCpFIZwXGOcfFt4vnOqpk5xgIILYkVhEWA4q+nF7fv31zejXdvDoebk/Hu9vjq6nNUWXaJWpLjNxip+lnRiQieirVVQiCLQGcjoflruuxHQ/9eHvgyfrNJJ1eEYQ1scVsatMJot5677OWBKFlzoK7J0tk3zxlJisKUVIWKVlSUYxkUVSgsI7eWjNr3djEZmuLahfpgqZUQmFdpnnS/pGWkFpiKiKonT8nUSAL5ZWeXhnIqMyI8AytF1o0hEJBFkduAjnO8zTPp5s71gtJgCJmXbUlQJVAUKkqfTr0PvuI9br6GGON63Ud2zo28TG0p3UCbN0OJ5xuTBeqlXZVM20iZqCJqomqgWK01nqqUmy2vl8bO6NVMnbWpzBFcowRsOkwTRlXq/Hzbz/7tW//bTxefvAnv18jTtNNpr66ub+ZGm2Ktf3gX//Xdze33/yFv/vzP/XX/tP/6v/yOCqaVJPWRbrYJGxVpJpRyxqWuS+LNSMkgJ2CK1FIxzZGOlRMwKwEUVUQRYGQRCWquK/kWdh/ft9v/wwKFeBOkIJYgckSlqmRXpqU/XoKSJGKgiiBqkQmPdI907ldwtfL9pR5VUahHFICiVEWZq2KLi3N6rAs96/f3n70tddvbl+/uZ+XuU+tJCGCApC+xnCItVltZktPXy+qEOmKrgghptPST9bmNi3z8dXCLm3ufZkOh2NbejtObZa+aOuTNUqHNTObqAiskcPDRwzPUSg1Q6qkVFR5sXZ9U0XUtjmoUjvcKtZVTMRUm0qT3vWv+oKCtN5aN1HR6UaQgqyqRAnBysr0BCoBB0t2UUlFREXFyLQdb/KxywxSrTISolNvvfVlXvq0IKGgiE66TDaJ7kKIzHRVsWUy1W29nM+P6xbrJS+Xy/V6HRsPi928apE+zXo89uWGy0G0SZtIlLLpzglFy9zValQznWjNWmvNmrZuKipCFYpWie6SqjSqChmsFvj47g3dfvb4jV/77i//8e/+988Pj6fjrZai2eWymWmbl8Px9n2s1z//V/fdvv43/tEvf+/nf/+f/59+bAjNpReasCElCIaQFdOsx+Pcu4mKqVQlIJAeEZer+xiVOzUILwPEjg/KvuOTAD1dwMJOJdv3GcliRILKyqogZf+CfGmKFFIhxWYkKM62LzuZOVTlJ1wuzagcFQO1jXFFbOBOLWOhFNCAW2NvLTJs0tvbu/tXr9+8/ejVq/uPPnp7Op1UhcqsAiojxrZtEVW1tHbTT01nkusY41rpkBo29cPtPJ3YDq0tvTT71I83J2uqDe3A5bZNBxNNbRSjdCsplaACEpt7VCTTY3h4VmWkghEVm2PEtnlElCIDFSVNQIiBjdDSLjaZTa2ZkKRKiYiamKlJ66KHux5bRuzzVUUiYgBZERVZyUqke4RXZlVmVkZF5nB/Wa8iSkNV19yaNRWByLIcunUmVSeTpgpTBgpC97HPKRG4XJ4+PLw7n9dt5LZu4es0692rRTRb5+EopzvtB1gTGlSIAsUICS+UVgpFKM1aNaXqYjZbExNtNvfWd1puEyFSgG5mJnPXtK2b19g+++Szv/3Z3zj/8PMP7398c3yFKpswH7pvo2xqmqp2Oh6fPqzx8Pmbt8dv/uLfP8j1//m7/6SaHeZZVaCVggKc0ZrOy9Sa9t72q4QgRbNkXdftPCKCxUpkReyvL7DjP1UV6aKiprHXLJQIdhkGwAJRYBUrWWQBoFBAsJigWC+mULSxZIhQAO5XVKIclYgRlYVkrDWq9uVDelbsSwy4ZG/CQiJ00pv7+/v716/vXr356KPTze18OBShyoKI2LatT+uzh7NoEGHvtvQ+S+q2pV+uCrSlT4c2H2U+zjK1ZjYtc5ubTW06NZuEUhRYkzZTm4q11qG9RAAmdYfRXmhRmZUeUvBIonx4RBYlc9fGclclShNrKko1tW7WdIfmEkHVHX5WI1l6840pqjKZkZmentsY4Q7ntm0ea6bvgFdEhkc6KmN/B7JGwaFCbfuwHxEBlHBp07EfkBrYaQIjJYss1hZjHRvK13U9Pz8+Pj9sq2/XQfdlstu7uTURwe19P5x0PkInoYTDKbBm+2/rxc1dyBLQVKR3aWaiJqLWtFlr3SbT1pp23eF1agPN+8SmaFbzXG/6/a9++9eeP/+wXc9N2rx068bMyYSgUddtPTSbb99Ua88/+BeHN7c/9bf+ox/83n/xJz/68/bmtTB1l6gyiGiTzlPrRlP5iSgGlRxrbKtvq1dUvUgGWLs4DXxRzxW02+3NzduP3izLvDMUwz0TpOzdEvZdDKSAQlF2sExUFE13asauhaWESok0gWZ6JQGtQBUqmV7D3auaqLVOokaOSoHQKyOpsMnu39wcTof7t28++vqnN3cHnSjK3ptnhucYvl6vl3X19bpD2qqWkeVpIruWyCBSyY5+WrSbmS7HuSaVSZbTbF21N4jSmnTTbtbZFVNvJRJA2dq6mlK43xE/2SzsAjIC2tq0TK3vC0U2FRVRocK69MlaMxGSLztqElSKSEVVBJH6tZ++EbHMcg8fSM9y+jWuV79exzZ8G+4R23AflSGVEuHukRGZQRFtbV/QCuE+2PQ6VkTcnk4JeqbHdR1nr6DI5sPHdlnPvp193S6X8+W8hlesrqz7u5vDcVap080yHbks0idhQ+QVQlVT1czyRMaL3KeYqQIxU+t9r+ZCa9ZMRERFqU3LxFo3bdm6svF26lOzudtf+/rPnsZxOz8dZCqE9oNEVUZvTZAA29TG2DLW17dvS/CDP/5X3/7F3/zZX/qb/9l/8X9+aFs7tEowMOBmOpvNU2tmKFSBFCTdc9tivbhv8XIQE+B+DUntZ7nApje3N6/ub9+8fv36zf3t7W0zZda6bfkizAaq9rEYFN813FUsTtaiZZWrFrRKqAqgZGduF4XKkozKispyTwpPy3SaZjXT1syYmeVAsEjnWG7a/avTzf3N64/evnrzqk+m/QUm2bbr8+Xy9PhwvjzGtkqFaqcZhU2lq0QGtswUH464CgGzNvXDMsvc+qHPh2Wemgj7PLXeYPKT1kVEQGNSSkssiTRTch/TX4QSWbmzT3ufeltYbM0Ox2U5zLd3N8eb+XCYe29m2kx3agF20SbAKoFI7WLu0k+/d9eamJFgRoSXCEXkul4vV79e/fLs2+q+ZXi547qtPnzfxu0diAohWXCS1J3Uko+Xx+V0uLu5iXEZ4zp8eIQIfIx1u66Xp8t1jcz1EtfLlutG+GFph5s2tb4cpM/VZ/TG1ltgZK0glW2/8BKSIQrbr9MkIU6FaiOaKK2Z7tujKpEygZm0KVVi6jpP1lrNE24Ptz/95qdfr6fnz79soM1zmUqVj3WeF0dRtaT7iEoMXt8cPsnzdr385bd+7h+JXP/r//Y/wWES6Tk2UMz0ps9Te1naibRKZiFczpdxPY/wF7k1qvBCUqmdfqvNDofl9eu7m5tDbzZP0+1pvr05zIclkdfrNbN2AGQX6OU+Pe9qVbDAkgBZrBfaxa6ZTtlZAGBVxc7RQcHU2tTuTof7+5vbm9vD6TQvfWqWGU/bRq3jfbNj3bw+fPzJx29ff7QcDtZEm0A1w7dtPD89PT5+OF8e3C+QbNYhCE8pmfvcbWop28gYq7IkSyjTcemHZtO0HA/NtApipl2bSZvVTCBClTLdN3pqIoCYklKoXVWyE0DMzGxuMCmVLBFp3VqzZV5at9774XBYlvlwWI6n4zzNZkpJH7mv0irSrJnaSNdv/PwrsaJ6MLS1w818uJmmQ2uTZXpEbtdtW2NsGQ4fft3W2FiObVwQMJFCkiVKQFlkZbpfI2Se3ry+txojR72gloHy8/n56fxwHWPb3EfmqPChwvl0sK69SW/Z5rSuvZMiWQ54wgGByIixjULt4jSvrF1PXvtpEjWqUJA7zJhCmEhrNc3ZLKVBmfevT0vrb/s3fv7tr8UPr+a+rddpmaVhJ+pN3frp5vH5WXJAatb2fHl+ePb7jz85f/nHxz7/2r/z7//L3/m//fEPf6jz7RbPaq2pHDpNhSgVK7zsttY11mtUFrijPLsEb2cyVElJt+PN6dX97ek498mmrofeWnvBMYp1vlzGdXtxUsELdY4FvpjYSAoBL0rBKKCiCJHSnfikun8vERTKrE3dDofD65vTcry5ubl9/fr+7tXN7eGofaqe02SnV215fXz15vU3Pv309es3fenWSIGq+Iinh8f37z88PDw8Pbx/Hl+hRNiqskqQmsPhUJpy6qyxrRmRlQXhLH0yNopIFkM1idZMBOho1l609ajhnjHKsqJKS4QgVFVopn2xucvcRJXatE299WYqJqIk95ZHKK331ro1m6dpPlhrHbUjB4XdlkKp3/zrbxIxMppNd3enm5vDsszLcjje3t3dLofjrGqbj1gdG2obLzw/L8Su2d5pLxqVIklGZgqlzxM1b2+XeVlSCIEaKJEVz4/P56eHaziifA33KkltOM7LZKYTdKrWlOLSQWvXWD1DTEBW6rgWUhMIKVUVmHtpcO8HACq0oAI1ShOIuhlU2E21iWp2yZL89P7Nr73+u+1hGg+fm85mWlAG4E6T5+u6LLfjmtt2OUqy8nDzpp+mh4cfv3n9cX744enbvzgJ/+v/73/uhyUvQ0QPNs+dBEQ7iCobwAjfhoy4bhwmu/JWSgMIpFAmYC315djfvLq/nZaJ6L2rWWv9cFim3sYY18t62a7uCahJq8idJha7D4OwKcL2iRfWmhjYU3upQdv24megWcVWbdJpnm7e3r89vr69v3l1f7g/HOab4/E43x0P8+3dbFNC8s3b+zcfffzq7cfH2xl9mBhlQvl6Pj88Xj+8f3j37sdfPX5xva4o7rp9yWTJ8IqAX5Hb5p6Pl22M4FrhVdpL4NhUW5WkDm2AElaFylpRycK6XkDHfrZq32TvpEMqtIsKKoqpQk5N59a6mZl1qa6tKcW6WbOqSo9MT1SjtiatSeua4PAXKEc/++XXQE3TdLw5LIfDvMzzMh8O7XQ63dzen46nZZmsMTI2v3puCqsQVL3A1rLTYUJRgDgpJssyLcdZDW3G21dHNUCiNAJji3G+Xp/Ol9gGKit2BaSrZV+MAp1VBEyKkGZb+GW7jMoSRNIHq4zQKkiYVQeBJJIZuY0VCcJEd2nqLuFX1Uk5QRy8sPHmcKOoy+XyvU9/tn9JPD6INqpCikgV6G69sY7T6dW2ObRFstLvbw+Uev/+cZnm+9tX3/zFX/vTP/jd3//T37+fjstk0jBLiU6U9mLwQdkix3YFUkqKSBZUMguAmkKyQr1KlHc3N6fTjU6td+2Ttd5EOE2tTW3d1ut6Hdv64h4ie62T2g1YSgqNL3pvh6T2HSj2QihEbEKxChRtqkvr8+lw8+r17enV/f396e3t/f2rpU1ouajpQee5H47LfHN8++r+sCzsOingmhHX89Pjhw/P7x4+vPvyfHnYtufLeo7VCaoaCz62zPQYVeXn8XS+XC5XdWW284j0DFTvs6qQbNNkZqoUCqlII3Yp2ItcNF+cN5BFT0clBVUCNmVlQNmEikKp5E46UlOh2ovXmJpily/uMINpaw1SntvwqyP1Z379ralO83w4LctxmabpcFgOx8MyL/M8t957a603m/YFMXJkZkGlNaESQlGoiBBR5pXzbDe30/H2YItoG/OiOqm0grr78HB3v1xW9x3bBrIqHQIx2txEo2pX5mdWPl/P63ap0lHIkOEFWhVYomUZOYYTWgEIPDLcASGkdnsJqqqaKukFz8xRNeks5RPn++17848l6zwiCDYzs0ZTLxfSr353+2Y6HVqfzfrw6/n5wzQdIvLHn/+44+H19/72dz799D/+z/6P7Lg5vEamTgcKyZ3CjaT4GIiN2CUsufeygCb2Mwkt8VKPrRtu7++Px5ummBdtbWpNs3zzVZTWNHIA0WezDmmUBhWaEUhIUJTI3WMuUaUlVtxVOwQQIkpQgN77YTkux9Pb+7dv7u4Pr47TfLg5zKqkGU3Z23Q7f/TRq2U2MT1MvauOjDG2D199/vju4cPD48Pzh+en9+fL03V7Xq/rdb2OkQAyR2SFR3r4iu2S1/OzX0Ow+MZ1O+/I03JzEIOKqgIKUY0RmVGsQnrmtrqHR8a6XTKyihGxT8FCI00EL8BOJZU0K+yyu13Bmy963r3jyRebNxGKJhXWDKqJ0p/9H30iqq233qd5XuZ5OR4Oy7Is82GarfdF1URKzUynXY1f+6KxqfzErowgRCIIsM+8Oc7LzXK8XabFwLJmVAdjXdf1uoXH2Mb1WvtucoxttwnZ+THkTkPfvLYRcblexhYozRSkEhpZmVCYigHwUXTxQO+TiPjwLKkEIvfxkghwKw5kEbMzmGVX+ah//Zdv/2Y9fDjHemhdjGPLXd2FciYiiiIj4vHDVzenRU2vm/tWonI8HR4fPv/49u7tz/zmD//wX//RX/7rw+tXrU2UTWli1RopmcDqHrn5vteKZClLC0wioypMKSkZOcbww83pozf3SzNrrfcGQ5Rvvl3WazLVjCZs1iadFrNZtO19XkqHWnM4IFUsprRsk5i1nRJGyZ0bTIrZPPfjab55/cnb00e3N69OzUqboRlEVPyK0Q/Tq8M8ebzR6ZTydLnYFtvj+Xo9X87X9PH09O7dw5cj13U9b9uIdK+CSBYyZFt9u/rYhm8R1xpXzy0yM0tX94jRD1OfJ9Yg4xpO3YWwYydkRNbm2xYjM1C1W++YiLAJlLtzw7542FERM1DDMz0SyErPoIC6m/u8vA+qUClRJFK0dZuqysQUuU+K2q0dj4fD4dhaE5qIZErvXTtVOv2pIkuGXsXdq7irXXajh0oUMrNiE5X5cDgebw/U1OaJ8Ijy2LZtXSs2KQ9hRWKMsW4XADO6avk2YqCwavOq9MDYMkvgzF3MApDarUkyR8ZIRvMRplrJSgo7ioBE1TqCXLOiRLoYSDIbX1+v+b23n3xt/mw8PAevZrZdvC82tz4i3QMFCm/uby7Xy+F0q6394Ec/7N3u7t6w5Hp+UObc7/7s//Nf/vTH3/kP//3/9e/8wf9dLWkL4SJMYbFQpDh1g4q2jFEtLIKBKBQjK1SojiSrwdZ1/Oirzz/+6P70+lVwXP0JAg+HpM3E5jLXQaYJyiwgo3zbrplIN5RFpG59bJWZtKoa7tBm4aDCoITuPiU+Bsg2ifbox7Y002Ybsjo91vO6vlmWG2+v/fC1ab6Pfj2v26y51uV6/ZHjDy/PD9fHL7768YoV7jEq4Cksf9ahpTcSa1a51C55HRJo6iO9gEGPuvD61Q/e397eDo7xnJgm1a1MRYssaBVDm/Tsa60NbSdBKRvRdi+wQkVGVSJ2f7ytqmUiYjg4qRBoU5vnWVjUbFooZkUBO8lWoM3qZpmsBLsIWK2sYZrazc2h9UVEBOruYswqX3M7rCOWLY/Ei6cNALx4hGTV8HH1AnhvNjf0ztaXJlq9FSXW7bz52Lb0VVaPGiODY91iJFWGr2JaYNaguOdWhTFkHZlCr6HVqwopKJaWJHNgbOruJlrFbd32Kx8imW42i2lRipaQyCatgTrLTT/qF+/O3/ruLa8tfWvVxYwZtJpmg3CMHNulqswO8zxPh9O2XWVc/XppNh0Oh8enr8a66aw//K/+r9/9h//47/yt3/p//+t/MX26AHNl7nAzioiLwPWFFAYa3GO3hJMSowSQFTtlCpJfPnzxx9+frXGeoioKkcyi9FkP6FyxjSHSTRQZXiNyBqQC2zrWdXjMvmVErON5kC/+CLpbldm+PvAYw70QTTsSObwtjYqoTdzvq1WcfgGvvqEnEemBoy05FYRrjYue7zY+5oc/ef9HQp6wfFifUIDIGGPEFVLoypGZGJFWbVQObGu6u0paVaZjexzv4qvT6XD36WlgmBcz+jKLABqAC8fUFRG+iSgydv+yoKhKAzQzlZJkeIytqjwqPSM9vPLZQ6HSbJrOh9maoZtGyRjr7kbT2qQmHpuoG/f5A4Bkm6Qv2uc2z936DKaPls9cR7Sp9blP2zTPs3uoKn5ioEuRQqxrdPbWrbUpXYmumLoetFNkZLpvWwUiar3GuIyxbT7ol9XDtWuojAjAsgLpVZFlmUVY5e6kIJmIUeXjmlvTXslxHbudtGdQCUh6SIc13UmwomI6dZtb782OrSZc5dXXXs2nKda5IlEyVp9n86rIrWezss4pxK+r13YOhx7vrs/Ps0X4dX18b8qPX70J1W199+7dD7724fm3fuN/8Tv/5r+xOVnqDpSjZMt1zS0otGRCiREjIaB6VCZIjeEisF5NOffD4Pjw/NUPP7SP7m5VNWvsWgbrdtSDmNgQlE7dVOeqUpl2n9YxxroOH7WuY4z1fK01W5lHXcjazXeBlEpRwHZiDZj9/HR5nJ8oSpePng+vPh+3l6k5Ro0Pj+8vz89cZhYk89Xd3duPP1mO84/j8XdTJ8zXzT9AMpG78STkvF5R7+HctgxHPsu4VgQ8eN1WHSXQgitsffIf/cWX/TTLzKC7SuCiuheBjIGInfIXuycod0btLprWUrHy0VuXfsCE3fobW5Y2um8eVXE+Xx+fx7zY6bjcHA40e5EkvVjXbECamf70r3+klACs2en2Zl6O1vvUWp9Vm9B6Ro2xDs91g/u2bSMzzJQQUZ3miaYQS7Xe5uN8WKbjNB2X5STaRQyqplqhl/NlXZ98eFzL1+15rNdzhiN3xxZkVe4WCFERUWNoJVUMbkyJTZDMLfw8fEspI6oyhIaq/Yil5+7q27vObRZRUW3W5nmel6W3aeo2lW1pn96+fbPZPC7nS9U6dgwuSdsJWfkiCuzNhLoss5nSpB9n65K+Ssntq1chxGZ36V//7nf+5Me/9/n5Q5+P59qCW+XY4tmLHlo5alRsGKO2DdcNI8srKsMoU5fp0GWuZlzmhYrEamoQjUJmQWiqZkqRaZ447fQi09YPh6X3pk2Xm8P98Waej6qihj7bfDgUi4SZggTUKGQkC7Cm/e4wTTcHPcwQlQ/bJ9+vj//t4Pvr5d2HL5+ent69f356QrO6buP58nB5+vDuKxY/++wzbfL5ux89rI9PHA/xkBVQBYU0CEs8gteRdN82Oa/wrXAObppOVGpWgVGSTDvyeNNVubGSIyvSMTbdbcK2bYtwoShVQGEjVESsmZn03vvcp95b671rn1Rk75VKyNZEOrUXZada6XJcluVwmBYjwz29ICRFf+bXP9pda+Z5OR6Os7FJibU20cyYNbYxho8ttnG9btvYtoisgjXtTc20TY1ddm7Wcjgel2We5953GoaaECN8retlu1yetmvUZtvq63Vsl2vGgGgpdyMd7GyPEqKhGMF1q0zWoAfGNWJjOjMyPU1apoyxDY9KRURalNbUmmgTk9abcudCtWU6zXZsdtOmvvm2SPvW4d7er8USEsNH7DY9jowSLxmq2tuhJJd5Pp1OBSSaQJsoIsa22tQvl+fhT29/7hfeHN/+zr/451vPzX3lqC1Xr3S619iK14ix+cC4MDfIbnQlsC5tEjHR1tAJRrcWw8HIqoqsGoXISoq0qbW5mU3adqGJUqFN+zRbb9b7PE8UtnmSppPVpBpAwRdrQBUSyooi9GBtWe70/uZU/e49vva5fP0HUe8e3/kHBDQL1nWeu/SojKrZpvL44bsv4lrf/dovvI/HP3r/p1+Oy7kC6ASqPBBQ7Bp2MhIpQ5U9Pcbz8AsqtEYVLMpB2lztKPNhkh4piEpUZUX4KqIRta5rhJkuTCB37jelT33u8zIfl+lwPE3zZM2s67RM09yW49Q7wQgJtNRJWlfrDYouebpd+qEndhK9Ib2JWaSUwkx7M91HDEXKGtVqjEzJzEwvDMHublW9yeCujRRVlkjXiY6LX4haloOJwdU3TJPWagO1rpfLZbs+Y7uUb9f1um2r/2SAJksySMB3t8XdUCoZGzIKyAzsfPoazID77qrjGZGZO89jOenuNiwCAcIzNNGkChkC9KnfdJ29yXE+eeLp4bGezzm8qrQ4NpFwlS5oLHMfpaWMKF639XBzJ2vVOI9thXvF1ppw226Op+fzh6ff/aOf/pW/+7Pf/af/7A//+XR/g6t6RgrGyPXsNWKsOTZZrxHO8paJXZYpzUJ2znhSV6FkkpSHh+friKnrMps6hgnrrvdmNO0ibDnKUyKzKNpEKWPEiLSpT4f5iPl6fn4+Pw3BWJF1qQxP7wmrMGGzg5jx3Tr/6Pwdb5+09pjn1eqeN0PQD8fhyOvWhEXpE7lu6HVAfPnl5996eN58fPnhR96N1vyyiVaABCUqcfUW1cgobxU+0pydQamAoDJRAsmcelPGdr1qn8BKS2eZKAWZvvtkV8AzWMXcmaGqynme52kyXXq3TJeWu/fMSQ6ZOXy5Xk7vHz48Xy+bb6xcepumGVmbb0X0Q6upx2VNwRjDnGMnECgLFaCWWqav6yoSGVy36zYu7usW15Fr5RYRfDGD2vXmAKr3aQzftgFymmdVZcp2HQJS7Xz1x4fz5Rqxruu6Xq+embudfDpZTKYRpbXtcswMKS0yS30LCWQhRlXsPSWqPP7KbhHQriKiWSPHEFftTK7DFaQ5YrXtuU3HNs2zTtKsj4x3V+1t27b1Ovq8bPWU6zZ11T5BxdpMcmT6GB/ev/vqi68Oh8Plcp6mab1cjocufVJp07LczK9+8Kd/9DPf/u5v/83f/qM//ldn9Ofi5sNHrteoDXHVbWQM8ZE+IkdBVK0X6ekqQLmMfVZgVolGIS7XM1Ib5wgBzLdzblNWp2RKFWy9DqBUEGa0EpGit65m4tlynsBqk52f5HwRH+uGUTDFvMy32jW38bUP/qvt+Dan82V78tVEqWbCy/WqOi2HQ0YsMEFu6ad+aBd73J7Pfp6l9d4m4uGyebqRJppehSyVbbhCUVHCSCKV2ltnbOCg5yBpXbRZkVWVnioB3T2ZTamArGMrAkgWACnJpma9995UlYrpQDIrSmAqEFMz80RDm07WbnCzHq7XLYZPvROa7lHJim4dkowKiqfo9/7GRzvLYmq2TL3PE00LvnPZfYzn89Pz5fl8PT89Pzw9P43LcC+AKoKftKM07uTcEau0dry7FbAIrw0lVXK5rg9PHy7ns4/h7h4Yq1eC+xQVQBCFGFERUkQgI4GWgdgSXrFVDI+xE5R2R0FqMctb47S0QmBkJbPCzChIEC9WI+XhQl36sYs2wRzy6kO0hPt4vmygtkXcfaxXoUMyqyIK1m5Pp+Px6OvapikitYmK+4jVx3pxj6oxTq8OPS+ffOe7lw+X3/3zf/O+1hF5fvb1GnHe3HP3dYvIXedFAoqSrMydqEkUaZSJFI+ozBwDDpPetffWBFAxRUNKelwv23YdgSShJUxCBBAhutoWHo7wYcC0TNIOyLisZy15Nb+N28Mc+Zv5+u+8/bm3vHnWfL8+WdZhPqLoLFXr0ltrxZQIZrTZBOGJ8/Z0/+btyPzdH/wP18rzCC8HKyhMk2QJUhUQZsRgrJZX1lq1BkZg7Efaj7ft+Gqxg7QZras1YVOzJrp7IWAbI4YbOwFVsybWupmpCqS1uauBWm2yeTI1qpJGUWlzp5LK1uywHA7HpXURLaO2ZiroqpN1UfWq6xhmJEQSHFGeY/U1t5pNygo5xubX9fm6ni/X83l9uozn7ZomKjZn7hqFrJLYPTwkrNV5fbxc7qY251jZK4b33guBKt82hO/mmlXcLSKLRdWsQkqSFhiZEE3fXd+QUbFrMr0yMzxRJTvDX9EgS2siO9N9D76p9XKFIMaQLdR7IyrtistTPfbb3TF9ntDSB1jB8fwYco23H71aun7xwx/per053bW2xLZe0O5ev755+/HY4jjG5fnd8c3rD+/eny8P2/X8+vbGx1Yh58vjcrn8w9/6j37n3/y3l8cPn4/YRvnQWadrnN03ZFBEpAEshIAVVDF6ZVK7jnVDWQb3HyrDr7nJ0Ll1CQU0N3hdIVMiLufr6ljH9XRziB7NtJ0mSosUYwmq0hEvROh76jjdLRg3yWlpy8a/PX3v3/v6L63FR+IaMfU+VVuzTGyxHrSuLT1MGmwXVtaIbJbT3GrDx3ef3s+3f7l94caezd0TJSa7kQvJ3TCrdjqK7UA0X1rpsp2p0TqWg2mvYjq004Da29+IyPSIYdLI/pKKArg7lTWGjzYsijJps247jJmoAkIHoTNvs21ZoxDuNZIYE3PkNgQ0a631RJ23s37v197ihTZfu/PULjSKcvdxvl6u1+u2bZfL5Xq5jHUd7jvuny/JMTtHrYAKjwp6ZDGtWdTIyM0vo7aRl/Pl6Xy+RGZERo7IjIjC7h/74huNQkDGyBgIRw7kYA2M1cNrtwzZadw7ERqJZqb7S7z7LEiq7UoB+jbSB1G7qXYiI/18vSimnzp9ejrHNi7n6ybCiM3X7fn5w/2ru88+++kvPv/88vS+CefllCWXy2Xz0N4//dY32nyCTL3PLG+my6zLzd3z2U8HG09Pp0+/e7T5X/7e7//5F19K4O50B/byIMT2IJrdcZ5lKkZTSnJXuhGpY9R6vvrlGmtsZ1yfnyO34+mwLAelya7eGrFdt8saj4/Xzz//4vJ88Rjrul6v5zEiM4RBxVjX2hvMrA2RETcyL/1okH/35jv/4Nu/6tneP39AuFzdZCpaE+t9FuouQgMpQt11VSImlkFkqk2ffP2zf/nF7/3eu78ogSp/ouiskmJjIBGBlEyWW21ZVxln8XPIMCHZMN325Y3ZgWoqJm3exw3drfLc04cDxVJAd2PwXRgnRjB39dVOlntJqlERoyiygNotkjJliEC6qjYBxMT6LK0BCYaKZJR++1c+hrB1qrH2wCJG5HCPMWJdV/cxfGzb5sMjImKU76zsLAap/Ctha8I9UBmxQjwR21gvvm3jcl2fz+t127Z13bZtDc/MwT1VIrl7luyy1+tIH4i1asAdPhBRdGzuAKr2XBZUFRUUqvClA5YsiZ0CSRqF7u7uw3P1UYWxjXU9P6zr4tMvvvpsqRiXyzoqU7rAoyjyxY++WJblZ37upz48fPnlu6/W8IiRcTWz4/F+3Twzrk9PmaHC0HZ+fJCSnNp2ftC6HO3u07/2a//0v/wnf/LDP/2Zr30v3R/OH2bI1CbV/cnskTI7ZT/J3Tci13UNr+265rjG8PQUojgCW5+X29P9ZA3ch4HNx0Dq0+P1+z/40edffu7DY8Tz09P1cs64ekQFxhoRY01fwy2q02LqN1f924fPfuun/ka7+sPjB81CVFsOoIhJmw/KllJzm0REu2WuXWVqlukkALtenp+u548+/tp/98N/9Qdf/PnSZgLarJjgQAWhYuLY4wcRTjiwEZtyQIdFrnrC6aObfiu64Hg4TK0lsqC66+yF2xjh1bWTfY/2MYqw2tSsNdMCAzkJe+QAvU9mrTWbREB2M4U6LKUZxZAQMRVSG0srk8isER5MWqYrdU+Ky+zunumwNkvfi05GZaaZTYclCZRuHAikRwRVLgWTsqAURslKMqnP57M0YcVu4pg1Nl/HtqVvGVEBoe7WMXv4ko8kNaqhrpXwUS8FFYjdQ4q6p57uoiDuNqWsERtKUdWoaJoSSIfIiNi2EVFqynWtKyZz9qk0jwe+bXNwc9Ara4SwQ7aKujse/uJP/3Csz5999gsZf1adp9tb7T0Df/pnf7Be/WuffBzjGVmHPmXl4ebN49OHj28n97xcMR6/P+Mb//N/7x+9e3yg1+Xp3LQFRM3nmkNAz/BMYSQiK2vNsecc5jwZUVFok1FE5TrxsMa2ruu6XrrM4urckOkZKh1Abmtero/5rs6DxpIvp8N8/+r1/f39bDaQK8EcXRqUh6G/8eqz//FHP7++H++fLwADukwzVKUZklkQ42yLJqiSLLgqhVSBJsuwAYjVFTj2ubXlON9dYhu+TkKarOFBoqBWYsyV0sAlePHVtBr34ITldmkHFc2pWTMBEmx78KJYj6gMUWm7lxhQTUTVao/RKGtNMzOHr5FrPNuUJO/kqNYp2hSQCrFSTWSObWBNr4LFuDIZEbGNiowx1hzWF7HGRFLUa6inqiLqrKWVexwNibaL3dE0FUlfnY2eOUY25aZOajF3t3wpZIxMzxwuexgL/8oelCVUqQCqSb3A3SxBhKCssAUiZEQZkpF7eUkja6hqoviThDlBD3NksFpZK0mUU1UqRqzukq7lQ8F1DTbk0DL/9Pbt3Xz4y/MPM9jZV1w8nbGbafJ0c/PFVw/r+P7Xvv7Z51+9GyFYyS5vXr0G5HpZD9Ph6el9TBMDHy6PrPH+/fs3r14DuT48zZ9/+Q/+wX/w+Z/92f/+//WfLPfLEvZue5rUUthFa01QVVhrwDnGGKutY520z0u3VpVjOvH8/L5ZqfU+zOtp+DUihoyKRFZUDtmK6NpLrzm2D+/fG60sn85bbNmlnUWa6pWp4Wvyl772zV9+9cnPnD46+/O784Mnux6mLpBWVTG8t0XRFKSD0oDKHFQpwN1VGxjX7TrPLSk+6vZ4e3e6a9NUq4io45nwxkqVMgorfUiJEpsXZBLJwRj0+WTTqXEa1ffhs0pVuyYwUCoY5yEhqg1AjiFmVRARbdPUtDXzKiZ9PJ59Bafc+rNkl6tJa9PchSXoXajIiiHGZOQILW99XFcAqi1jLVRT2v2rWxGMMTLTaJUyooAU2UoEgJmoakZVayJSuUbEqIx9m5cIRBQQ8ZOAVUBRVcnIjM0TKIFERZFsxsiEZiazarfOTyJLVEdWhQCSvmUiaZWoClVFBKClKS+RksUahYumRBU1UYNBCoKbi0qE6ECq+FQDA2DEtl7aLF1qrE+X54dMFKPgYiqV7l4lqtObNzeX8/rw9Hh7ezpvYzoeT4cjgMvl8vr1zc3drT3c/PgHf/rR6zcKXM/+8OF5sWm+u7tk8Ud/cvvtn/vN3/if/Kf/4v9hx9s/eTgvN8euMdKleDvPRflwXrM8ISyR9LfzAfNMrLc39zYF+EEXq2rWKM9RYUissfq6IVlpEa4NsZ4zr4FCkWUDWXtUxPrYcUjI/Uf37/zLefW/9a1f/3vf/JU71oenx6frtQdFyhpU2h7e1uYutP22UwUZiGjIopAc40rR1tty9wqP71tEbuv7y6VSFFyWggfScg/DxGCWlngaKjKZKTn2uERpB5tfWbtrs8lMBWzLkBaQMFi6+9gyKcUuarBoxkalmMjxuEjvI7wLPa6UG8ppdwAI5/PDtc+LzekwCE1U5AVelybuFKFRmD01cgcggcrUv/nb3zsc53ma90Cln0RBkihVba2ZmYiovrQkWSWiCt29/EQ1gdidUvfxNFF79m9tufOwXsK0SsA9o49VuhcLFHbzmxIKK2IX7lUWMiOKwB48KCBUxUqNQIo4SW2EaFjBSClrQdscgxIsUigUlVZhLCTyvK7bZf17P/Xr37l7/cMvf7ytgoiqQRqrosrEWAQ5LUeUfO3TT9+8eXtdN79u7z68i9zW7fLh+fHrn3z6/v0Xjw8Pb968bq1XxuV6PRxnsz7W58P93e3bjw9/8W8fzk9fWX00HbqZAnd9/vh0s7mf1zi2KcoJReTtcfn06x+vfLKjf/T2xtqYl364ma64lIzGFFqJuK/bBuxiQR/X6/Xp+cFjCKVErPeTHbaxxbZpVveS2N59+YO//51f+ce//Btvw56fzmsOoiTR20RQpSWEZtPhSGpktKZGbmOTyorBPS9R9hQoZfBpe9Su/f71P/3B73zZL8fpuLuGUyVKIgOMykAplOnuI8pbbahV0qM3uXt7Wu6tzdIm4aTs1EkEqk0i3CNUtVszaKPNfTod+rLc3My3piK7x3SEsmRnDmXueprdTI9MUaPuS7QUpRjJiswKiEik7z4dTLAYMfRX//53em/LMs/TDKAQFIClYi+nX/fE1Z8oLSorA1moyErfnQqqaico7CSm/ctYYO0yfwV3bTOrtFIpBmgJC3uy10v1qGJJRrBQnpWJSmRVhkymzShFSVG3LiKWlDTahNYbCWtXsyBMCTVBsaAjEZUGVNaomKX/xrd/6bM3n74/Xx/eP1dQBGNzVfMYrJynlpWFUrUvvvjSIzz8/eMjhV++++LDVz9+fP/V5eHhZ3/+F/74j/7g8enDPE+UOp8fI2KZD7fTaT0/Hr/3c/N6/W/+4F++uX3zVuxoy5vD628f7046Pa5bbPnZR5/EGB/efXj7+v7q5zd3x4/fHs/PP/zOpx+9evVqUmlTbPUEwZ5pT6WvNa61ByWQuI7xdL4wyyhVWdC3x7vz8zU93t7dzdvl6cc//u1f/rv/27/12zeaW67Xy5qbT9OOuquIOYSi1htKq8oUhErknmkKloiCrMiuqoKiPJ7fz/Ny+Pqnv/Pwh/1mmjAPKAUmgsptbJ6VqJCqciYqVUtlIDaycjnY6eZgx7IDZKFNbLtrlDaPTMDUjDq3PvV2XJa743GZp7mfzDpqt+UUZGUxd6Z/JHeP4mooM7HerXUrRMSg7DmeqqoKicRLdkcmArnlGKv+2t//KRU1UTMz1WYaUT6CL9kL2qyp6R6XDWFEIiQLXhm159NFRXhmZsUewL77VzBVbJd/7wED1D25MiEqBdllj7tyi5WeVVovHoysksoKD6Ban9gT6qTQpHeIajXCoi8UHaKiJuSGgpTuFkUsIOYMSmWtmm4i8fVXH/2dv/Y3bzj76hRet2ts21/ZVBGcp+PNq9eHw02fljZPfZ7vXr+iye39q/tXr8Mr1vH+3YeB+ujNqz/8N7+X8MpQ4HhaQsRUZsTyrW/D/MNf/OFc8/0y3fXDT3/8ta9Px4eHc9n0ts8fn47ivD5dv/X1T6+xXnH59scf9V7a66Ob+9N9u8RDbdtuGOpp1w104cWfQGBjjW2M67ZZawTCq8KX+fZ8Xln16rjo5fLXvvO9/90/+A9Pjq8uD18+PabHPM3bFkSHCm1iazd3t0L45ktvSs2Ipo1NkkmUirBETaapi9rz9ujbdmjHulv+sj6Mgmoray/Z8CKZtUUM+ChvKUgLbzlQV4VLMz3MCxpkrn5oYlQRUaQhyO26aXHSNtMOvR/btKiqSNdpTyAsVmZt6S41aoQP960q1/VCBqUm03lS5aImYEbtFr80ZRPNRL6IED08cmRFDV/t5WS/pJLTrDeNMLBSoQqt2jNqAcCoXWbnNej14qVeEIZkxQAEyt3Scv9fUblzHKilBkFBBTSERCMcimqq8EgwHUitPdxgB6bCM7NNxknLoDrUjJpiLCVb7GHuAiOzpIKNkgQjogIqXYqWVS6eAlGTZia9HVtrlBSo0oIVoIK9L6fD8XQ6SZuTHMMPx5siYotpmh4/PNzf37/9+NPcBqS+/2d/8au/8ktf/8a3/uzP/uTV/WlqdjgcDkeuNZ6fzncfvn93+9FvfPrt3/vqq4TNN8dvL6dGRI7p4fHuk0+/+PDO3nx0b8ui7fbtcm2Ph4Hbt9/cND599c0P24/ubm98I+uxcSx62C6H94/venAwAb+Ma2SdJhFtj5crvcpxPV9m45brjeB/9u/99m/86l+f1usXDx8uY3UfYv0aQygAQgDTeZ63MVR4mHvbT1lWcrfnTRFhAshp6ptfK0q9eltsOfXeyya/uTl4+HPFaMAwzqZDdesVAqFnOmOEbywPEG1qaj0koXtMTA7ZGxFf60pRVhnQmyrYgcr0csJVSrrJrqiuct/Kr8PhnmRt2xYRIha5rdcuGGXoRxbKPZtpVZm102lSbdvzJSPCScuMQdL2Rcbe+qvqS8QkSq13bc2mrAgEiu7OLEbsXoCevhtTQjMi3Xfs2iAo391opZLJrH3RUyKSKJqRYi5SiMwR+RK5QVYhd+b67g5LSGtNGlJGM9Om2rw0XNlmSc0CSylawkwhTbJQ4Vr07GNVjDS2Lco55i7Hw6tPP/7MNh1tZIPpxHwQQQ70rtJMTG2e5tNh27bb29vwej4/Pj089G7v3n31xY9/+OknX/vkk4/+7ffPl6f1z/7iz7/7Mz/34fHheFimuW2Z16enm/mQotvzOn3yyUff+Obj+Tl0WW6XWy8x+6k3r2/mw7qO+ea1d70uR53mt+sIvnp7e/uhztX0m+3Te2/WkfO7V+3VV59/MbejtNs/G4Nz8uk8RrTWJzOTMp3rGk8tTI+3fZ4n+8bbr/3dn/3rv/3rf2e7Pj6vH9ZxrcLU+suMR5bV0k/ae2amh/ROQUSIVOsS20Dk1GSs2xZ1XA7rukYOZKmallfVdHNjT6eF1c9rX61rhyJqZT0BQO4eCXuitQK1C75pyk4zy+JwbxNKZctUQY4RuU0yU4pKVnkOgVKQtRKtIiClGqxNsUGgFttYGaGGdb1G+XVb5eHh9nQ5xLL4ga1aU2iBGrTjYVZtrs1kQj17rOvmvR2sqqpQCVBVLZC7FSNJ1fZCm1at8q7dywWQFM2dXY5wj0IBpTsTYVfiQhRVyMqEF7RS9rWvGcvAiFYFxUpElYhVZAiE4VUkMxIl5L6ETNGkDSqlhzbZVKoHE6CpRWm57iFQQWqVxRaTHBy5bePsWyZu7+bT4djl/tNXn/WmUUVp07KHIAhjD9uAr56JU8b9/X3EyKj7+5vV/bQsh+P0F3/+bx/evxezpnOb+5fv330jvv2tz757eX68vT2FX5/G5Xv3t11dCuRxPr46dcU0HW0a41wje+DTu9vrJdnl7FeZDjd3tz9697h6fPub3/r+0188X9dv4TZO09MPfxzLN8X73enG0m/vPn18fLys6ze//tH3f/T+9e2rU7dYt5vT6xb4gw8/umvz129u7k3+g7/z9/7mN392uz6u53dXbNE6/39E/emvbt2al4fd3Rhjzvl0q9v925+eAqooqiibzkAAxzEGCaVBchwl8sf0iSP/HcmXKCJKIsVKHAgmIGOBCZ2hCqgOquqcU6dOf97+3Xvt1T3NnHOMu8mHuQ/5vqS1pDWf+Yxx37/fdUmx6cjuJEWkW212kIqEqVsqBSJMNaG46SKfgbDmYeEOcBpnBGWhYFSdwzxJV6OVvqxEMaBvq6mauXALRGZO6s29LQV8MDdzwmDuAgFAJbETNCA0xKaGQODWmluoSKMmgV1ZgUeEgYM2c0DhjOANmnqtWmdQ18mhmjYzm5s+jIAgJHR/vLmMJ1s8W3Vl6Tu5r6hPZlayQNWSupo0Fn24uUyjNsOu944A1AWg48yJMCigBbBVAAwEWY7x1c0RUKiYQDLTcKvh3ha3eRhRkpLBY0lsYpCaMytGWrqcyBQLsCyciCopELgwzkjqBBaBjGA0cw4jCIoQI5Igcw7O3iFjiFNVbpGYGeNNzKC51YCeqNdwksQi83Rcb8rbz5/uhp2eIuphkknpnGoKVnaojYwcpjlQjnV89emHBLHdbnePHm3PNlJ7rf7y5csPPnj/8Qv/+KNPQDW85pSmafrJx5/8gT/4c//yn/3j81331otnh4fT4bQ/3/Y47uH2+zO03fpsNmytMaVpmodu1a2GdEaqremqXA2qh20uRiWlfHX1VddaSrHo4PznPx/vPjt+/JWrF/uHca/xB65ewNxO1i6u8qPNdtWtjk1Tvz4dxtN0vDrv3x3kj371l96/eFbrwzjvpwZ1DCGweqJlDJJYun6aa/Ioq54qmFYRIVhSJACBzGQVWrMQJ4honooYGDsYykO9e8xJy4YItoRTn+RUclp7PZg0YkYVIknss9UwQg22AGBOIkYQRpQCGVwRKZwNNIysEQGOoeEVciGaRTRRZivIHH6a2iHC5lbnWo82tmjhMzh4DZ1tMWdPGgB5SnNQMEfCXYfZiSE5GExtjgjnoGSZ2aggTGYmt/uxlGaRI7tJEkIWQcRwNlsQKBBuRNKWLJqZ+8L2QARJiRTYY2IAMCPilNLiWoKIpgtT2gAXbC57YDQnAggGAk5QKDU1jEiJ69JYAtc33S5mCczGCVRcCuHCkGFYBFlESBxAysjohJFb03DMlLxinS3cH11dPHv86Ops26VBtsNlN/RMEjOhjq1hSTqNRdLxMNc4WXMCZsbj8TjNp+uXpVlsdtvVavX69etutWrNbJ7M2/XxZpVXt599evf8xR//E3/yB9/5zXVXLtfr7/3kBxmfln6N8Yqbg3Srdbm5uRnB53nadWW7GhAxcAVBGctpzpdDf3NzY3XablbI2cxzN2wuzy7u/KpdPn/05BXf/d7HHz6/uOiG/tvf/d47z9/yUc/PH83ot/vx/fPLZPX5s0dfffSVf+tLf+h4+nz/8IBQZhvDYJzHrusgQrqSV5t4o44GD6BUyJA9St+11kBnsKZoFuARTIwBi0FdXdvi4EMKd8lMq0w2QygIBmoko6CudbOr+aQRPcoEFgScJEsSI3c3FHNXgyCjcFBubuEK5izkwRDGeERImcHFECeYskK1aAa1eWutNWuxSDmrhgMimQY4smPVNjZAOiU5iHTMzGzRQseGpthFzpkyWU9ttpQZg/jya+eBCr6AeNQh3kBEA9zB3RdfERFHuJt7eGvqas3UzCPIws2NEJJIylmYSy591xVJb2zEjEiwqDNj6dFgRhRYAmHEyLi4Z3QZsjqqhaMtmjcUA0FMKMlJjJgdPdiAnAiRnNAZAUFc0YwIuwDWiefZhpxfPH787PLRZrVed8Ou7C4250/KWdKoZsdpGoaVZNY6GS2IyX7o1+uz8361Xu/O12eP3KxONTw+/MlH5+fnb7314vNPPy2pUMCk87Mnlw8PNy+eP7tar159/vFQOiJx1cz88OozbDOmxCl3fX9zc3d3f9t33W63a61N8zz0q8NxbO73D3f399eMMXRlbjWXUnKCOiXCQvz8+Vurspbml5tVO5426+Ht7VmPtF1vV5JWq7UkOWd5a/3s/WdfSeUwz7fjqVJO6mzz3K/XbuYWqaym2lprhUVSYiQkZslLwVyIVNVtnq2Z6TIX+WmWqi3pQrBglO36vD5dfR9ehauD1cmqNwBzUwBjAmYQIW/VakRjcMTgePOsglE0cCRXb3MdqzVtS+MNI5asILg5A5vb3CaD1rTOXitYC9WwgCAia+HmbhC2RLbDHTAQQ8ygqaaUh9UgIgQ/9dgukp3m0ziaNq3aZuPteyuRYBQGMKsRQYEU6GFuXutsphACCBHWWlv+Pl943rE4HqJ5I8Yky8WZc84d58LSpYyZ3qQdwgGA2IkWv+iyXECLQCYmcDSQRe4tS+7N3Q2VMiIT5mBySYSCCwUfDJFiwf754iR0YO4gRCeFyTPBbr15dnV1eb4b1kPXDXkom7KCMaaHaViv15vNeDiUUhyW9DwRJwvQ1jhJWQ0o6fzi7PLq6vziEbp/55vf7Nbd2dVlPYwXjx5vdv1Z3++6/PH1Z7lkPO1fH/ab9RnApLU+3D3knIZhUPWSystXXxz2h812uz07n+b6+uUX4zTeHx6Oh7v94dZN3ZyY1W29WR3H48vrmxa6PtuV3c4tusR97tj5ydU5IgX4OufNet13Q0FJDRPnFy9eYAmtGI0VvY61ELbWwKPvh3Gqc9PSdbkrROhzizpLSrlfmVZXBVwuYQSBADFO0ziOqhWX2TNhAJU0PH721su1fnf/Uc7irTWdEEyEELC22bQRspAQlzAHVdAAo4iEjIbL9pSQQEHfLJWW6zUgEYjwEh9AD3VvEbNOLZrhIoc2BweHcDcPRAlHr2S6bIzCGxDy4jEC8Fy6nLtwXOD+rTVtOh4nndp0PE3jOM/Gq7d7phAQgGhePWCpzpupalOt7oZBbq7WWpsw6E0exdzN3iA+GJgwiRRJqUvLHjuJ5JQkCRCbofvCs8c32hbi8EXevswLHAgzJ1yWYhgIGAGORgLExAVZIIQIcwSQEzpEEBiipwAMVPMAZFzI+NYccbVeP3tyfn429F1XhjWkyD2CwenV/mx7cXZ+Fk33+5PkMj3s728OD3ev5+lwvH99uH01nk4Pd6/ub2769S73w9l2e7bd3N8/vPvB+0aw3aw2m441NuseEg190TZ6tdN4Avbx+DBVr00vz3bjWG/vbh4e9mW1ktJbYG0zUty8vp6ORwBLpRyPU8lDq1USB8Krly+Pp8Plo6vVqtNWH27vLs7POGFOJQsdxiN6nO92q76fJ3398nbaj8+/9P5uu57up3k6eSg0dNM6uWTklDQgSelKl/uCCOxBsVhlDD2SsGRp2mozJonwcRy1qbkyLfkAYABzTGV19c5bP8DrHx0/FqGms4IRLs3WQAb1qja7ejM1DQgUFgJkFAABWtxuFGHLvhacWISIBGzJYC/cH7No6oowoxlZsC8M5sQkjAxmwW5hCosvzMxcw41Cl/OdTdNcqwpl+Kl1sDWbx1rH6m461+k01qny8GxgYWECcAdzgDfdNvPW6ptrQKC5am2wnImWNPTCm0P0BXoPmCUJMzPllBPnnLqUJAyXCPByWofAJdztTurmAUiAEe4GQAxEBEgGYBGLFDpQkAVAINjdEDSzMaACkrtEQzD28ADDCFdk5uQcCE4oiZ9eXT66uBhWq1yyMAXPNMNQ+0fnj4Nhs+r7fsVE06woYjadDveEkZmmw1F1AvUFY3q7v099v05DyfLO73///uXLq2F1bIeRlA4Hr9PmydNoBiGTWZ1H0GamJdHpdPz008+s6fnVo3BaItggsF1vbNLgfH27N+L1sPLQhYL/cP/QDd1bz18cD8f9w/7u9o7CAaD03TxPNy+/uDi7MIim8/F4evX6/tnzZ++9/z6oPuxvPRScx2kOB+47FJrMUhn6PCTghRFGzZGQZS0sHM3U2lwB3C3m+dTq1NoSpBUk0lYzp5TEVLlfbV48++bhJx/Nn2G4qs6zhqqqOZhGMxgJ3aLOba6TWYMwWvbDi0UGKWAx2nmE4pJ0JvDEgkDgIJQRMci5IDIEARIuzAU3xGWW5Bq+oOAiHGEpmRgACJFEgKpZtVYtAgSQwuaq2rTWVue5TtVVp3Ga55mHpz26pCRMjhBhbuaIvtTSalW1CLBwr61GLKxJ9ABdXDBAvuDuIxaBRRIppXSpS1mW+zAvPisE8wBgJkHkZi3cEJwQF5b4cq0OVCCLIA8MWJbeEajIhp7BxJuhYcTSt4jF/UgYiBQO7iFvtBNMzH1HF2fD+flFWXfUEbH4DH1dfeXiPRHqV90babVCpHQ87fcPt8ICKalC7oZUhpQzOIyng6xKbQ3m2ubjxZPzs8dXL7/1ve2T9dgOp+vbh+Phxdvv3e+PYvAwHsPa4/OzcRprnQDi5u7B3YGw1nZ2fmbgEZ4ln06TAc9qpcsCwSSTzo+urvZ397vdWcnD8XT66OMPIQCR1kPPzJ98/Al4lFwOx+OpzgnlyeOn7331AzLY399WNDDSGqc69UPHqXO1Lg/uHOrgCMxIKUsWzlRKLt0bWrjqXKu5Qtg0TfM8qmpOKcAhsCslmIn4bPdo/d47//LT3/7B7Y9QYxqn6XQ0natOVWdzd53CnJgC0Kq1qvPYdAZXAAvX6gAYpBCqjZALsbkJCqEQACGlyAEBpEtJigGEkTkCnSAiwDzMMXRRoxIahfHCpViMLYhIIIziHmYtwpkFI1zNVOs0W2t1rnOdT9OeV89WdbZElgjA1c1C3XRWsKY2z2C+4OTMLQCX7EOYtQWwHsv7H0FNwyOlVEru0iLmoAVyj7IICx0gwsLUXSFCTZ08CGJx8wFF8JKVEw9WV4vqIYgJWBkENQuKqpoCukTz0AWCqEvVlICF0GxOJJkLZoJ02qzy06unwyrNug8UOnJ3TB9cPnGsU53ncbw/HupJp6k+XH+xv7n18JxXKF2/Gdbrs0NrOaXLR5fnm/XTi7NgI/KbDz998ZW3yvnq+sOf9AGBDCJpKFeXj1+//HhuD13ucuoawPE4e7O5WepkfzogR78ehqG/GLbH+/3D8eTM/bpbd6WkfP36evfoqraZEQjTfn+c6vjDH3zv3Xffefzs8TRPr26uD4cDkHTDalJd+jQXVxcNY26tzlMCqbON89yXgUhO44mpMCVCIuSSulwGlgSOyNwlDmLuViywTDgYgrnUVk+nU9dlEZ7mKUkqqXNJjvBk95SePfrH3/pn33/5fQbRycy0tXGuc23VWniry6sYDBFSBM/TFLXGbGFokJEIQtyDlqABBjKxoxsuXXAKZwYidg8BBDQEUgh3IETwCDNXAzVrjibknSuqerizMKIREgYhMEDEm2o4MeE8z21u4NbmWVVrG9VnXj8dzJsFopMrtKamas28YSguSrb4NxcQx+ZBSGbuSxgIYgnomztEpJS7lBdHOy/CpsQiQkxAqOZmoUstUsEUm4ZahBsyICLEkhJij0CK+qb/brQk0z2shQCCkTV0awzohui4aNTeqMAEnC0xdmQdc0p52Gy260StnG6nK7h4b/O21uk43V9/+noe23g83N/tx/lU5+N0PI2nk3u11lqN1dnZl778rmJ8cf3Fbii79apkRrRWjy8//vSdn/3K/rObdn+IIQfQ7RcvH18+vjvi1dBRspcPD+e7iyxye3c7T6N7AOTd2eMlDnJ5dXV7f+NmrcWwXW36fjod7k/71dBrtYuzJ9e3Nyihcx1Ph3fffw9d2qx3d3f7/UMWIcachZG1VrPJ6+zAYH7a76fakAmYZq3i1FASIEkGhEAzQgEWAARwB/QQBGJOXelKNouqNUJFCIjrXAVh1RUiMTImefb0vbbe/LVf/Ruf375c4cqiQqU6m6mqVg8N9AB3h6azWYAzSwKLmMkNjWfBYujuc7/4zPtOgIhIECMMGRbvkKMGOgUgZURzNABABUByjHAl6wv2TOyOaIqYgoCAAAtiWjDrKJRTlySDWasa0eaparUAN1X3MBt5+3xFSNpaaLiHNm/NwsgdTMHdECUiTG2pI4YuyGloqgHhvmSagRwglrUx5ZIXlJmISJLFH9PUF5KJq2lzV9W5aXMPDFx0igsaaVFVIRi15tFC4s0j7hGhgY7hmYKtqdYAF4CGCAKZlk4wCKAwRielH0A6Hta7nJOpyUhP8pP3r95DjZe31w/7/fHhOI5Ha7W22sZpOh1bM20GHsPQOTWw+b33P3j+/NnnX3y4P1wvgzJGfH33eXt1942f/bnf/fEPURuB337xUi0uHj8fD6+7JHPz0nVqfndze3vzKufEmLdnW6KUU6YU97cPzFkkW+i6L4f7QxmGUnLfr0/j/Or27uLi7NPPPn3y9Mnl46txPB4OD6fTOI6TgSNhM5Ocd9vtNJ4YKUPZ7/f7w55FRHIzZRFwWHKOiOABpmCGKecIj+bIISzqbG5CVHIBiqajVsiCVauHlS4RiXAB5H7VX7z9zvXh9q/+1//Z3bynCnWeW53m1g7TwawBmLam1qZa1UwrWENcOL1Opt60BjC6siCRZ5EskoVlObIQYgAgquviiAREoAZhizecGQGwNUtAhbehZBYLA6C5IyESAsgb5zimvuu6oeeFkB8B7mFe52nJ2c91Vqs8PC3gYLV5VTIwtyXPsEwhEWihT8ZPZQy+jH/cm9vSy7KF2LgEms0hIom8OfwTMS5hO1yWaBERjmGual7dZw8Nd3MwRIbAf3MRbOrhDE7oCIFuBO4YAC5hblbdmusCjkRZEEWIQsCSGDMiSgZOuCppu1pxyVXH4XhxIeeXu11muX99Z+oIdLo/PLx+XavqVLXNqi0szBoLvnjriQh+9MlPrh5fvvP+u599+tHdzct5HFPqEvhnH334cLp/74N3Pvnoo47F23yc91ePtvOkOa8gzE0P+/v96eawv+/yjjtmofOzc49otbV5Pp0OZ7tdncculcPxYbvbch5OUzVtEF5Kev367urxE49Ipbu9u71++UXXDXOdkpTEaXGSW0RgHO5ur+9uzs/PAek0nQIhKLF0gSkc5jqaVYAoXde8NWs0JMzJKLggeJz2x+NxT4Q5szCD4zSdkE2kL2VVvSYqVxcX3Vtv/c4Pfu+v/8P/8niadLTpOB9Ox6nVh9PDaTwsyJup1VnnVltTs8qhPp+m6aBmDYjA0bURESTKCQpCKalwEHlCcY8Ac2yLP8G9BSghuy15TXMDwcxRfOk+m6tBBKEhUBIoy7kcgIRTyiWnlJjJCeBNTmfJfUKAVaut8erpKmLpRXmYI4C9iXN6BCKRWbjF4po1WyLPoI4QvEDZFv2Ug/sCfY1gokVYnUSInJHDQc0QwZq7RmtqLbxFm1SrmmlYENKbYJ2DGyyHrlj2ZxYUiB7gYIoeFL5cPgqQpmXbxpTSG3IMEjMxZgLGVe6HQrmAa3m3/5nLvKmnh/OzTavuZoBx9/r1/e2dV0Bg9WmapwhAdAAoXTo/v+w6/vCjH7nBk8fP6nSo8zTOIxi0aJ9+/OOLy4v1sH59fb17dLE/3iXudhePtE3aqlCu437S4zzWWXm9W7HkzXp9Op1MPbyN+0PuS05pOo3VWtd14XCa5r4UaxUNh9VmfbFRU0n93f1pmufEeBoPfbfCpbboYO5zq3d3r/vtOsz3xwMnLikzSdOYTlML9KbhQCCubdKTtpkZCTnA3BqS55Ka1qlWRg60w2mc57lI3m4vAmB/ut+dbzabVT5/9J0Pf/Sf//2/s9+f6mT3x4fjaZ7GqlaneZrmmYiAUV3B3S0gsgAnkPnQxuNsRsBk5iLYD5zYS8IksUjdChdw1JgM9E0kMypGmPtyf8QgJiHjCCBAi8UWSIJIPzVnBIGpMgsRI5EIY2CY24LM8cDlg1XNzLUqr66GsGWKgwsNg5wW52AERqBZvFEBOEAwOIUtuQMCCyYiQASAFmaxXDdEJOX85nFkDGBA0kUSoNGqa9VoTdXGaZqrxfJ5X8oEBKoKgAC5zQ0W0pUGLHpnC/MgFqRgJkmceyy9i7BIkYQkyszIQYSEzJmplK5IlwvMw9fPvv6lx2/X64dpmrCj2pQd7+5v1Zs3qq151JTSPFVhSl3Xr9aYCT0y+mefflTneTucmdvt7UuhRASS5NMvXj55+gw8SteVYYWxSiXf31wjxGoYxnEaD2OiDplW67PVsIYQInLTOjetEwl3pbhZbTXnTtUO48Fq7buuqvabdcpJiE+n8dXLawwbx33uU1e6aZyYGVEgUKe5255z4OHmzt04pyJJmzZr682mDH1YhDkAjodja1pSEfN6PMz7g05K4VrnnJKQMdHpVF/dfFHyqu92SHY67RH53befM0l68uK/+rVf/i/+yf+XqUBFa3Mdl6NFi5gtFAlyt9TNkCllWmVIEizRE9A0zc3MhVms77HrUuEAaoiQMgsKQDhbs8okQMEUgkyJkRzdQAEVwH0hX7FiYBEgAHRyDjA0QCRkIpYkTPzmCrysd5dJv0I4tKZzNQ+QACMIX9icgK4RuFThZfm2kkQR6F6JISUhFgglAgRHRGsCiAu5P9w8KIzcUSNaLM4nB6gL2oUJhSPhcmx3Yl+gKuHsCorhaCQIiGYaPnsYAQIEEYFBOBMgSiAbi6ckktGZUxJmRoSIQEwighKAQaIRMrdp1u08xwpwEGKHYbs+He9rbVmSsne5a8NmgjY9VDfzqpvVGsg55wBobVbzTLgZ0t3NF17nt99+zmKvr19tt9v5cBLpPv7443fefvvlJ5+9/cF7bKVNbWwaAN40LFqzOs3d0GeCeTy1Nvf9KsKO4wGxdr0wpc9vXzrOK+uF16ZxsMOwWW/WAyDPk7Y6Tqe70NNpf0iC2/Vam+ahbC/PTfF4eNiuhob5NJ4IDVgg6HCacumvLs9JVqeTgiShnCiZs9da0pYTgk7Y6jzttU1mXpi3m/5h3t/ta5d3w2pYrYZpOu73+w/eeZclYwxY0ne+9x2bYlivHBp4nxKxo9UZRLnjqZ1o4lKGQBKiwoTBc3gpadWvam23p70zHE/KOYY1AioGUwQRIBgnI/dEbB6oAQkNfFHvIieBwHCRQpEgQFJCSwhu6IQCFG7N3SUVVUUMFtJmvNDhkTCQUMJbBHoLtEBEQUQIxTBEjIW2SeIQcHJITBwYjIiuTgzgAWzC9IY6HeQWyxweFxwRoDZvs4ZGqJsZIgHqUlnInBrq8qWRcxeBfedew5TAwNlDQUdAljD1mGlBexNSCoAIRSRoCMjRlVRKwkzEvowQkJyZkqSUiMWDDAozL683Ok0j2vT6+vqS1scYMcOg6WFqTWtKiaRbbbjZNO5xnKsk3mzW41TRqxm3cTSKgBCOOh+++PzTy0eXZnZ/d7sZuu36/O7u/mXJj5+/ePnJZ1/+8pfnCZyRcnHI01gp0XRsVNP+8FrDk5RS3p6rRljOeZrGLz67Do0QnVs1r2belQIoktPh8JCoI/ZpfBiPD4icc/FGTNKv18ES7tuzzc3167HpbjVsL85Gg6axXm+kX6vGdDzU5oSdpJxL36/PE8LyWxJvFPcahyR51125tanp/f3RHM83T1JyAJtnBZBh3c/Tcf3o2e3dy9/4rV/lDlOHaMA5g1jzxoAUxTTAYppP6sgCm7QCrKpMRMi4IOfJo83VDQ/iZ1sbMgMSIVprLEAJYQ4mADcQRiYgd7XFFkwUhMEIGVYo6iAUGEiCCKHGGIrNNMwI0MyYnYnMLCG1cNOAJX9h1lpDDEbk9eUKdDlPBoQjcEAsnjpCREQ3C38Drgogxrx4+QA5nCPYFokBwJKZQ0QRGUonjAUpfBmnEjiEoTXSiilJJqFAcK3N5moeGG7RWrSkraEDBRMCI4pQRAC5MAcHCkqm3AEll8JJhHjRdHvuqeskF04ZuAALdrIKUUQpBHrirV4+v7joWI7H0TAE8TTOk2mtR21a6wyAm836NO9ZCMIyy2rbtdpqra1NhEGE43j67PPPt5urdbcyMEF66/lbx/FU51a6bn946ZVX26fuBn4yMA9KkllKgD3sbyyilJW7QSB6TON0ON6thiJpYC6Hw/1Yp/X6LCdq2sbjuFmtT8eHH//gu1q9Hzopq6FfoYgjqQUh3l6/aq3tNmsz6/r1VNt6NayGoWokLOvhbLvZ5DJkzlOt4zi3cR5Ph3F/mE+HaTrprKDqaqdxvrm5AYX17gzJofnxYX9/dyslXT2+UIv1ixd/75f/yf/p7/3N7dV6kESZSIzFSRCZkDMSMZuTq4bFRER96sEJjdnYnU5tOtW5TSHgQJZdVlIMLTBEEqEHGQjGAoSSCApOkgQRDRctIEVKuUAmBAhiEEJAISSHxQposWBEwmlBNcDSTas6naZpmuvcWtMIA0Bi4d2jTQR4NMRgWOSS9lMkkYUtsR8KBwISQkQkDMQMKD9F9FCARwQGMBGipJRKkS6J4+Jy+ik7zikMIxzcUhJGaurTPJmGVUZlVwMXACGWtPhCKByJIQECJ0aCtID/E3ICEYQUGETsuYPSldSl1EVOESkCIyVFTNGkk1RgeCRP15QBnMgP93ttRsJNdZqbh4KDqSXh8/PzXCgVqjZnQWEKb63NhJiQs3AhvL57terXV7vL+VQD4tnTZ7fXd+fnj4L0dKhXj97JQ7q9edmXToQlFVXf7s5vb27A0zTp5ePzruvqMQIaYDiqSLq/26cub9brVcnW7P5w64rrfvji849efvbJbrNZb7bIpZQ+d3m2hkg2T6HzejXUcSSksdZqrfQbNUwlMXdplQ3NXU/jMdDcbZwmbZYIicLfOIbgdJwOp72Fr/ozKaJVT8fDeDpAxPZsu9l0qRuGyyd/7e/9rV/56LfW6+3KhQo6mBAQL/2pQAZOYuggjTpw88K547UbszF6zCc/TdXqFMxgjKpSMGWiQMAqiQgJIILMMMwjQAlj2SsJQZYkIT2tEWAZFwUALFoVojDXCA9bPORCpLWFujcws7nZm4PJkrFkSkmYkfur5GHmhsGIGZECGCG5xZISAEQCRiIAoKXMzgAEiIyA/wblgAzkwIwkxIlkeSsQAXAEuQliAoDA6mHqLiTMubk1C21u1cOCGAk9clDBVFgSAQAEMGIQASESMSdOiEtERJAAICIlTplTV6RPlBAEmHJQVJyEJPk6z/lJfvpMHmcsHu5Wx8P+4XCIiDaruhNynU8AHt5KH/3AQ58RKwIg6ul0yyIIETCbW+l7YpjGE5I/e/HiVOt4PLz/3pccdD2cE7vqhMQJUzvNw2aQbnWa6/pyo9o6FEYo2/NIXe71tD+Zq0iZx9PhcNysd6XvgfLd64fjwzUXJE0PN1+MdX9x/oTL2m1erdfdcH6apq5Lp9NECLVVN5h1nqztzs7DYZ7GeR7V23E8WCWRTBJRW5aCjDpPbj7Z6GbWFAPH8TTPp+1qq5BYEDhOpwO497vN7uJsyGl19ThdPfrb/+gffOuz3xtWkMmDgBc7dBJzo4SUAkUxGWVwEnVHp4QpKkZVbdWa17GNqmiURUZ0B8iUgCTQIgiFnJZUNLpbTjkFESAGIiCBMAtA0gAMRyBYDiOAulhf3uQ6iIExyNvC9dQIqE1hqZlZQwpO9CanM1ysIt6keiIs3BAwYCH2IDiGIzMREhOFh9CiB4bFDoAAHosCDoRQmEkwZeHETCJSALDOGgEiCQDdFrotIjIye2CtzeamVWPJvXVAGbmQpJClmokIGBBAuEQmAImYg3jZPVA4IQIQYArJIMmRlJPklBg8dCl35C2tz61nYQfb395PpxGT1Glq86ym1uacQMi6TABN20LfR2aBwNNpX/KqdMUhcl4hCXLenW3DfH9fHz95NtXDeDpAyo92T8Y6mXnVVueZQp3AAHeXVyJpHsdSinolgOePnh4Otzd3dynlJNzaNI51e/FIcrm5ufvJh99fiTl1q7Oz/d3NPM/dZpdltbs8W+8ubm9ORD7XUaRTnXSeqlV3Xm0uVWN/f68613YUpjJs+/68mbkqGt6/vj/e3Y/HY6uTNrPmoXWurdaYWwBhPwy73Vqt1rDS96WU7WqTS9p+5YPr24d//K9+9aW9ZmopCwBFOAtwYuQABhbAhMEBhIbgCGCGHuSsc1VX4giI6hoa4sREro0JKVviFA4OgUzECQk4EbHQ8j8GJkR3ROIIimBQC0cPCEMPaPbGK+wRYYRBYRHL4ugNnwFwEWaHL1+5JMzCAssAPhDCINrynoYwBEBgAEQAbW42BuScxQPDgSwSuHAYLQ8nhIMLARKhMAou4yCNpt6aikBKCYLdUnggNESEEIIqxClR12OzaOHIb9QDgogEHGTmtow/39xKEBCACTzeGKcdQwM5TBsG5pKRgMgIM9MGyE7z4X48fjLHOnd5ldqRvFnKA5JRpvBIDBmkK11t5KaAwzSfaq1EZN4Ipe9Xkmjo16rd0O9SSh7zMGw4/Ob1yx98/1vPn7/lga3ZXE/b7fnDwwOxPNxc77rkzVNHfc7j7JeP353uP3/56raUMo17n2O9Xs/jKXEAihF16xU6fvLj74ZPuftg2FysdltIHUnfr3brfpty/uTTV+FSUktMx/1DbYcsJVPnmKZpMp+YnKmszs4BRYLmaY/IOtfb6/3h4UGI3QLC1ZwDTFXRJPWZJfdDN/A4n47zaXOxW2237XjCMCk7SP0Xx7vuaniyP5/nwV3b/FDnQAbhCAsPfMO3dG7NCN0wKvjME3FCIWIUi5XTWOOkEeY53JFaQPUQtZyzNUuMmYUQkDEAmgZSICCRqLk1dSAKNyNGAEf3cAtHNPXZF2XoGxSnmmIEkRBgYCwKTSRbsnqLXog3jwZEClu2xwwhZhhgsbTbidwNIFgICSE4yH256UKEmQf+lNZPAcBJck4JBZGRYDEBm/niuUFcPoTmFghEga3WVmtEIKFjAC0ouAXOHZTIAcKc/f+vCnyToGYAYI/QJbDqgESYQgrlTliYMFiyAzVDDleNh2nusb8oF0POrY4elomZWHIuOW/Xa0DLWbq+Tyml0okkD2jVUpJuVRCg1pNkYHZAHfeH0nHpu0dXT5CYCS8uLkvaKkJJCKBmgQ77h3ti2G3OSuqCsC+Dt9Onn/zw4skzLuubmxvC0Ho8POx3u8eb7Vli/vTDD/c3Xzx/+90X73+tH1aDoM3T7nLDiVLqbm5vWTKzt/HgdartlHLpVtvpWFXHuR3dqjXry9Yj3T6cEKUvyU2Px0m4O7+4VHewCE6uDUybOqYEoMK4Wm9O88R9d355gQgm2A/p6ury/NnzH3/+8nc++93vfvaj69N9nzkJuDBlTlQiyBwMQjIBO6YIchJxMEB3h4x9JhQKAaIwiGYeFqliYJeRUsqJCyMHvnEFGzMiomsDZoiflmCamwUaRoA1t+bNMAwNfNK51eZvHhGP8DA3DW8eEYQYjhFY64Tkkog5ByKSyxuYYThy4JK/AIjghYqKuNDf3V0oKBZvSsUAaot+b7EEeLAEkjDz8mPuvgx2lgREM3NFL0H85gjkDm4a5ozMLJxD3D3AXCEANQLB0AEAjMEDwxHJAyPMEWwOJQNCd1JVFgwAymQ15tEBRFgg1B1VAzwpehNQr3Ozgx5WfTfP8zSPZ9udpAwRVSdBsRrMCbm2URkT+Nj1KcJLGjYXq5cvP6+jnm2G1WrVut7MoukX95/studDGp48fu7Exxo6XUcEmq/yMOHRImq0zarzadLWJHecUp+LO4gMzKf7w8PxUJ8+zY+vrn7y4Q9fvvp4vTu7uHzx+NHFaDrf71dl5SVU52k+bbdnEHV/+2ocZwAathsgvr+/BZubz25IOAz9RgPG+/vUD7mU2to01aHb7LaXL1/d3N0fXjx/Olp99ZN71Jb7Iffd6fgQzvu7oxTouCBCs7rBIQWUftiL/JMf/KtvvfzedbuGnnXWXkpIqtUNtc3zQq8DBBExUuQAx8wc0FqzSlPGJIICSdR1SM2iRaoNEQwB3cksJCEJL3dKCIJAQgDzJUxvvqR0IjAEoJlqdQ9llAibbAZzhuIU4UYA6LCUDhYaj/tP0WcLxxAAEQ1M3IIYGQPRkYAE0EMbhCIygUcENAVkl0SIaK6oVt3CCXnZfDvyYuRlQQpzwyACdSIKRDKrrVrDMPPF8QQAod5aVbdAWFj/uLQQkBZyKARAAwhy89Y8XJfXPhFEUGsaFICsgGqO2MKkdFRHYFZCpiQW5pC8mUARBp+NJRODq2vVi4tLDiR/U/Q3BZGU+26aTmqt1dENkKLknlmOx6NwfvvFB7c3d/PkKfl2+9i1blary8vd9fX19WEadpsvf+P33e3r3cs2ja9rm9jp6upqijmY1MfVsKktDg+f1dG0YS7w7OriBx9fT83OL6+aN2Ca2oRJnrz1/tnFpav3fRlvD/1qfapjneL88uL13e315z/Y9P3l5aOQ7uZ2b/XAUMf5GBFnm8cspZrOY12tt2dnFxEWmDarNQVf37z67NVn24v+ra+89eEPf4wBXDpKMo2neVJadTnJZpDjw81h8rOrR+cXT1lr3/ff29/+4P7zl/bgObbRj4ynNksQd9AUMQsaCCKiRyiiuLoIgQdjyiTRamSgnIQUXFLomvLczFyFVBJFWKvBTCJAwuHRzDInohTWINDNVR0ihcESl9RqVnVuEU4BHugE6D4vCbQIxGUGCmG2UGuxNWVmSUTMHuHg4FWEOUIDnEhIkHhRWQkw1lnjp6944UQQQMYsZB7NNRBTZrGAxdnMKSgilsWwhZMDI1kQYRcxtVYXYihzEBGo1VqnVtUUAJaNLyL8NAuHC0g9zLVaxAK+ckAOIvfm4EG0mJfAXcMroNY0jWYITqYFmSA8xLA1M4JtObvcPO27TY+GWk3nlAatLVQ1NBFoVUBD8DAnFE54dnbZ5sndu6uz4173D9PubJ2kO46nNh5XZ4O6P758EZA//uTHH3/+o8357q23vzwdjoe7a0A+zuOOUPLOw7p+kzC3dqy1fvm9rz168tbDfP9wukmprFeXfc+PHl8wJ4cY1punT19cXV1U8CI50ai1glMZ+tM83d5+kcrq6dtfrqoff/KqcOo6cRPp+kRpHtvhdAfsuazneX51/QnKar0627cZDOaTXV2cvf3B2/M817H2w6pqvb+/m/YPqUsFi0eM0zxr67ddGEDXb7cXgfN4uNvrfbBnlmqVSIQ8UFtTYGLgwGV00yAgzIUJiSQXbC0FFKIkzBiZVTmkoEQwGaSgXhw90Ah4bkrCEogYEuFYkZKQWNg8V18AaxGgNmuzFlatNkMnBDZwgkALLgmBa60AgILo4GoAi53EF1xVmAeEZJau4/4iEQqQcHJiZRZEZEZAanpyg4ycO8kdclJiJJyFiio0nQkgUV6EqZiR0+K4YeEMwACQkiQsauCmphrNogJA8tqOdTxNbZ5mqDPAGxZKM9CGERwYZhoNY0JvWJtnUHIGz76U+x1CGS3CFAzRAJQhUoAhIgJjBBq4NrDelaY4iXbvdM8f7Z70wypLQotaa5gJAmeGwAg3tbmeVBsh5lI4yWq1Es4R+fzRo64r8/G4GoZHjx5Lv97tztfbHZBfPH4seXV9fXP76vU41/Uqq+vd7V1oPbY5qMsl9R0SpYfb22Ykq9L3NJ/G03EsnAHg5vbV2fl2KcJeXT7ZXl6y8Gq9mY+n+8PnNzc3gqtguLu/Pbs4u3r0VI0+f/nZdp2JoAIkWWnANM/H8b5f567fltK7YimZhc0ijCAs9Hj55JKG4faL14fXD69f37g38FnIcjc4lWgTWnDJYI0M3v7q7xveewuq//qPf+2bd58KSKta3awaajQzs2UBasiOC08GnZCInEvOQfJGfohdHrpEnTfV0R1caT9O1QMSkrASuQRxAgxIRGELoc3DgzD+DZokEjppddPwuUEQLmFJDPcApJREWJ/2jcMAAIAASURBVNwhkRABE4lIOOTcEyGqLZ2TCOeBuZNVPwiFc/FADHzDGCQkgxmFUurm2UEsZWLBnDkQwknYTsdqxljQrSIiQ5ihahRJboAdEdnC4kNyEYpcIsBqm+roVQUwrM7zrG7EkBJ7mFpFpWgKJoAJDdUswiKCgdQYyYEagnMJylybm6K2EmDMbObzNHKS2QlVQ9PMVUIgaVn1gz95zJu3Hz0/79dtPoHaNE2AsV53WWhqNQGBJ2PgJBHOBCklAra2GDHV2ry7vHr61ruvX10/zPHo7CKl3IIxmGD46pevzs+2H/7k029++3uU9Btf+kByvpn2WYY+pHSpKbH41KpI7rp1gM9z63J3c/OaWZ49fU9nRJyfPn1mioykrqpKROG5lF3qu2Mdd7vdlruXd/fYb54/e+vm+rP7h+MwrFvU6XTfWjvbXUmi4/GIMOVcxrERVxGzmHOXNhfbeTrxsWvz+OrVF83bfLI6PgxDl0sPEF6rh/S5G8dmduo2K0hPceekAgBTzK1pKDrj0m30aBAkQguWoQEgkmNtXnvtMAkGUDhHCCiGt5gBXSHe/FsDBQnAQ4CFAKt5gAUxKzkjegNTFpYEEcjaIhq4oxsGZPOGJAThqokppSScMJwCI5CZ3M3d+tIhpimC+9UyFcUU0smw6XJOIpmHYWDx1maIjOiUWSA3nXnKiKhROUnpIGVwCHA2bxbOWMKghaWUwMENTUO19QMTG7IAkFlQmKQkqc85W60Pdw8PD+PokNzNKkSYAAG6a3Vrjj4ahCxAaQ9abvyJGAO9NUrKCYcs0vNx1nnycNeKqkYMesIZ3KraFFaFhkjkrc4G8mTYvn/23uPhCq1N497bydWYubW225wJ5+nhgBhLYyGlVEppTYUzBh11zIxgen/YO+Wrp89zFvAmktt4fPXy0x999+HRbvXWu+984+t/sKwv/u4/+Fuff/bhz37jD5QyIBQu7JHn5lkcEDnharM+Hca3n7/9e9/5prb57OLifPu4ttPx8PD40bOH+5OZrfJQp1kgwPthvQ6rpXTrYTOe2ubiSbde333+yXSc+lLq6eF0OnGJbugDsWkM61WdZrVxnkxyx2Krza7vs7v1XT8djtP+mBAmtXracwDnzWyhh7uSUTmHc171z56+K9IHAuScsK/TeKiza63zOLqiO7TWqiNYFgAHZDQ3QLDw5tG1ikwcQJ5ESIgYzN0CCBmIASAMIAM6uSAxRIQZILovqmQPECNgCg9wIQ9XcI0wMHVGIhJXh8AspevycvSHJf8ZCB4AS25AwKzjCAoA4pxR0EhZQFil9DkLp+KlSGupRcVkbkDIUoAmjGBgzx0xozOAUpgAz17VLTORIzgguoIzYORcmJeCDC4woEE4C0dP1TvFmOv9cX86ztURJAEvn7Jm7ARq40wABqREGAhCFIFuQRERLoybde43BGwgvN508ylO99HmarOFSgNwra1R6DFOKJ1sV0NpleaHpvV0mhgV0FQniFjnM8F888XtelhJovu7o7u36n3fM+eUuqXn2ZXBVes8hR6KlMrQjnb7cH93/er+9au7/c1Umx2O69Xq/Ol7f+xP/zt/+S/+9//q/+3/XNtv/cLP/tKpSffo8ni8S5OtOUfEsCkBmohPh2NgnJ+fHw73wl0SWg+rMC8dE6d5spzBTJC7qR7CmqRhrrp9fLXaXpzuX+ciq8324eZ6qveUZbvadN1wPLRUOqsK4IyRCUWS9CtOyTXmOgmlOkedp+aWWCrN6/XVanv5+u41h20uLoCKElxeXDx+cnaaX/XwPAzv9ncPrx8eXKFWbbW25q1hGGggQ50cSYKawhQLIZNkMk3SennTFFwKvoGdgbJgMEQWd2/g7EGmgAQsyGhN1YFZABEDMUDNIUidKQwI7c14RhEJcTG1YzgTp4AAdHcngYgQplwSYyBBlwQ8jCkPnYNP5mbNkSUX5DLljjjJw2mOSRGDSAOIM7C4G0EgEbB4YFiqhCsR1OZhEOzuoUwdOUYk4QgD1AAA9K7riagkWfUZRGoYsZOF2mgM0RwSS3EOtWaSCDUog1dbdNLhEGbMjCQBFRnLWlbnTKVFxOXZtmlAdV6Tlzwep7mqTnOWrpce0aD60K13Zf3e7uzx7qrLm5PWLqrPjVG6fqCUCamZvXz5eb9aI0Cd55wzc3Ijlpw4ap3MTShT4dbmNh5eT8dXr2/raTzsxx/+4NNPr1+3cKGyW9Hw8ndeXV//+T/37//lv/hX/trf/M83P/rixbtfuTuMHXGW3FpDpPE4XTy7OOr+7v6hX/f1MG93Z6uhS4m32y0RUWYS4Zwwxc1+bgFzm/b7/cVVvrh4vNvtxvmUxBDs7uZVL7EadtL1QIU5dUPKub+5edX1CR0xOQqENa3Zw8BhgtNhf7zb3589ubCppu7sfPf4eJxtPr54+0ULJYaLR4922/L6eN9/8cXwYsT+8jjh3c1rLVxHtUm1za01ImCBcCJMKBI0veGWBGLj6rUWdskA4GABQEQgQZoIghkpEwIKExEEvpGqZGBERgWPAIS2pNAAHNjM3asruBeEJIvcBw1goTEjEJq28JDF6IIBECKSJbXWaGGWFCHBeVYKUHcwln4jJDNlyIVzszoZAjAFsnsTSaEQAIKIEUrojgZcAxyhiwDwoABAJ8wsiGSAoRZAXsrQ91kCS8lp6CTnNeKu73rJxnW/f7h5vQeunBzRyIE0BafeWtTl5bIsIDTIFucTZNo8KWXbKEOfh0Rl/zBLCkZoHOLkEAK0Kt269IbiqIyRUKKl6VgTHI7JEHpQFU4AhIinw7FOo6ve3d0NuSOiUgosNU43Yu67DADaYJqhaTxcHygXyOcffvj627/7o4fR1Fa1Hcd20pvTxUB395/f3/3NP/fv/tk//kt/8re/8723v/S10/4IfX56vr25f1nn9uLRIzs2CMNM2eiorVTvzlM/ZIW4PL86Hvel61EwIvaH+9B2miqhdF0eNutq7XB/PR5ubz7//Gy3ksyJOyKaDKZqpe9as1RyKSlAZju2aSwcrkZECK4aieStt55O1Sc9Pn787n6v43TYna0piSo8urrIOe3ncZsud7IyvaGcuq4zszbadJqjuc82TxNmGUrmLoLAohEJBSvYMspGTtWjerDP5EAoJMkCESPQIdGS5CQBYTYGIARUd00gQKihECCQVJ0ZA4IIAWPJOgASQkJsy+S15MwC5hMSUhAAmjXiRTpkkvLyXhbBkgIxPEVzI4SgEBmIJSW23EOuBAeNYCdgMpbgBNaWeIUDB2BkThiJaFZoIoDI4BjqgYyZjEdKWd0QmDiInFNZDUPOWXJKKTFuSumd/YtXcKwPk8/eiYgImVTVit4rrQgAWgAgIxcDa2YEsTrv85kPO+q6Qg5sMSWWNBskwaCmCct6fd7lklIaKKmg4vHV9Y0ex0fbzcXpZMceL55upBcSDW/HfT2NdZrcVdsphs12vTuepoAGoUNeMngiIiyZMGFZn46n+7v6/Q+//5PPXr3/la89Xw2nh+mwv73evx5nPe2n3/zRFx/e3u3rf/2n/vQfe/Hes0nnZ0+e13a6q+1UbdcPZgZuFFCkONb1sEopqVvpVobgEdvdpYYG8HRqq+Hs5tVnD/vx8eOr3e5yrqZuOXcqw7NnXwqxKSqYoMca0R8e2lzbPK+7PjxYJDGrNq8n7KRZZEwpZ2QKMIr25OnjTz66PRxazrkbZIx2cXl2dX5uPKlKnyA65ry5//x7/+o7vzLV0AZ6MreGChhUhBnY5iZDODmDMCTFBoIBiGKK0CKFMZl5MDAVQBNd4H+JKTE6mqKyFCRjYfBAR3/D4kRYsGmGphMEmaE6EjmhI7lXRYYiaaH+uHtKmYAjAoBNDQBaa00hSyRmRLUGIpKIGWkZM0nKwcxA1Vmo58gQRhzq7lKcBYgTYlhYEvblu8dl2CAnh2ZhAZAIILiSFMkcFIt+rLW5SWbpUkqlF5FUUp9T1+duhrFZPdnd3bEyIZCUjVC1w8FKIx4wrwmR2+hEDMBzM0+Quxg2uFozUZACgQmHFCFOoJ4ASWNzuepKjwQDClNuUEa+GVbd+frJo67PhFODDgO5AbRax3mcIyKiAfrpdOi6rvn4sL9m15kkUfZQZCDMm4tnz9760uXT9d/5u//wk89f/+E/8m+V1cCEf+Iv/amyvfx///W//eu/8s/KZd1uH3/7W/96//Ctt99958k7zx4O+69+ddvR5UcvP2RC9JmoN4hoMwbc3x3WXXd+sctdZ74wCeY0rAFJrZeutYeb24fDo2dPn794gdzfP4zry13fb4G73A+n/QPVY+p6aHZ4uCMCrT50GRFrw1XH/VBQnK1FRoQkkSiX0qVVP5Dwq9fjrMd+GAjYrD46O39yuVYfKWKVEkhEIoD+o08+fXVzOB7382nEyq6QUlqvNv1qve1LUz3CQ0DjkE541NkjIxb345v6ObEpKRAAFKGanJVNTQAJRZMxGYKnn7a+AiEmwyBADg9EDA1oZh7NEyATOaIuAf5FM+puP7XFE0RzCwCMQHebJhNRIQlRETEPZTYLRHY3ceAX31hLMhYTRkZqszMxOhGSB7uDNy0FVucpD5QIEIiIiD314mEcOXDKneQhdQP3PQASWlQ91HkOyB33XZ+HYVNKVzruenKEAEHiU9sjz0whCMxNugUjRJsd785tvc5UiFJgZ8N5161LXyD3VnrGiOoNoYDkcHJsjhMw5q7r+7xer/u+6zerPDCkmPSEYJthWIv0sircJYRwdQ/VYACoOnoj6ihIa1Wbx3HvHtNxOp1Ojhpgm7Ord7/2i+fPvvLZ7enXf/d3L148PTs7+7Vf++2PP7/7T/7T/93jxxd/9f/6n/3uj370+ublk0fnX/rKB7/73e9zxJfeev/86slmuxnWHYTa8RB1XO96c5qPx+PhtutXfeklp4vdpRpzwvV2havd5H3u+3FsR5ve/dLbzx8/a6p3d/eqcbq/2+9vMKfjaVznnkKQCqeOCIauL1lASnXNvahqAEpyt4BmXemkrIf1MGy3JW9yXq82w/pip6ba5rPz7ZOnj1u06g0jUubd+VvD+x+gH//h3/+7//DX/sVeT0PqqCEEdauyLv2qrNerbjNc9mk7xRzUaujJWyAKSABlkIQ4iCRo6wSrLgmlCLQgMx5rnsIkYcrCREGBRGAqJgICYQARSOpg4RCEQUzIQkAYAOhMJEgmQhggLCUlouoRhEqh5CkCLKoI9p1IXkDMTmgRoOamYcFC3EQKCzMhZhyGoY6mAQFL1ACkQy4A4Ik6FJBFrQwWzfuBJgdSpmwpkxBLhEILFDc+HbVNp17GeZxg13LuKYkhcOa+H9I4DsO6RdfkABZhKEJpl2CjOUseOkrOtatTqjrnHsCHAE3dkcUcAJq7VHexYnMdVWrqC7NB0jLwqvQMaIzVpzX3hcGpGSW3INIMKcLbNNc6g3liGrCYYmC7vb9jZqIhE1ea1A5G66uLR8/f+32rxx8cVL797W+71idPn7bTw/Xd59/61g//wl/472r4t37v25n4s0+++PTjj/9H/8P/8Gs/80c+/PSj29uH7eUFJxaUvuR7tzaeCHAoopS227P1emD1tF7PbSylpFXPwznIrrAweqX917705en29ru/+3tffPbhw+GLNtf5YSbibt332+6DL33j6bN3jZwzswpmysOK9vceLQKaW2KmtLbTLQGHOQ3OOWXqQ5tGONBuWHdvp/3ZPmWe6wER+7w2r+6eh8Ilf/G973/3u998erH+yuWXPrnd//D2e0li062TkLBzcObgnHtfjebCyibq5EyZAABaM2ftc0ZK5uGkgO4QQJgydW82uMiSISzcF9Il4ptYP2AEKCASSSyldQZwQAdOCBSIyZfoGzROCEhCrr5YcsGnpq1No+uGJSoFI6CHYgB4qlOb55Mwp5SZyJeZY7dKrS6yVGYIyeYBVIwQMSBR13XNAs2FhCtPBuaWOHeClIVEBBCNBBDq6XCcbtf9er3ajOO+36YwCRCIRgApQ9+vTjVbQM4MligEkSHRaigeVIau6/KRTh2sWfrwpuZJOMnCYOynqgETZ6YETEyFc8mZJbEkJI8ghCzIltCNJQKahpuHIWvVphNaTHNTgs2wmvxU34DzsKTu1GK9Xpc8PH/23qO3PujWjwNgf3vz0SefX148effZs3/+T3/54bBPQ/dP/+k/NaTdbrOf7v/Mn/3z/+P/yX/4q//sX+y2xzre//Czj1+8eAYaKCGAJISp88qpb/2QwHk+nkrZUGROnjqPLpX1Y8au+Xw81reev3N//ek/+gf/1fVnR2vlVHkeeWpGjtu7k8jdpx+9fPvLb/2+b3xte/XWvB0EoR6mnLAxnY6TawvmHF3aXZ7miXNKyB0mV6ttBAug1HXdsOZ+uFD1aZpK7ptGVcurlNYl9vcxPXzta19ZX15+74uXx+MxF4mqhYckRAApc8oYiFmoQRaB2XSCEEJEI1cIsCgWC8AvA1bwpVGl5gpvzLmL5oHdEQkJCHyJkDkAECVekLdgQMpiqABKiwAYgFogUEhCycYIXmPBN89tVG3uboFzqyVhECAGU5jPBOjNrLoQkQghhQcSQc6csrXGjBDhRCHCKVFJQFCJexRAB2AohVmEOZhKphW4IYIRpMiub3Tyx7vT7c2h6/KwxrTqV0OnrTFjrUpsORNLztFnIsSEzoxCKfqUKWPpejcCr+aClDwaumVhITY2MHF2ZBWjlCMhY3ZJlgiQGvOAHpDMWAh5gLyVoda9UuYGYeR1qvOxYFqv18Bw2I8aTWsDgKYzsAzDmnN6/Ohi6C7XF5eb8/PTQdvpYZ7Hx289v351e3d7NHfEcvFoHRGhdnu8+9N//t/7j/7KX3l+9fj/8L//PxrK/WnsV0OHzAHeXPU4pBTQlUGPxz0aIbAn2pxforXT6frsaserjTazSqnvbNLf+OVfvbluH73U0WMCEj5Lq+1pf3j5cNrI5iLi09/7sd492M/Ho3fePt6Pp4e7gOZcUvGUsmo7qfV9GRKrTkBxmg8I3Pc9MnFOscQrASJitRlqrRAwrPrHz9+S3RnUdhhv7vcPv/693/3s+jZttk8v3rq/eV2nfT+cp5Q4UekYAHKl2SEWAgq0wgkJXDXcXZVEhFFQBDGS8+Sg4E2NCAkJPEUgs0YQpohYyljuhhji2R08nAUlIbEjG2ZxNUAkSgs0SNg4jBmBHZuC0zw3NUBOEOgarTGTs+BPtQQjU1AkATDmjJwFMUDZQjLjPAEgGoFTztJ1uRRTmKsCOiF1qQTxLCmI0pA7RjCXlAnAAHCZFaMlqLZ/PW63h/t9KasppQnczMIh1BTJk5BAl0AwMnIGoCScMG0Gwky1zZ3lqgRA8wRETuBmwJAUXDKbUoQxA/cByQiJIyCBC4Qb0CwJgKlDgXBJZIbu3rwxgIh48zrNWCAXoUbBjuE4JAQZhoHAEbHbDdPoz1+s6nTKWTjpfjrsZFO1jcdKMSBQ4CgiEPL/+Zt/4z/5X/zHv/DzP/fo8dPf/u1v/oEvvVtWgxCTYc4duj1+dMEid/cPfbedp5OkfrPbOWEikUjinVFuudnptC79N7/9zdfXpx/8ePz+3YOcd48uH+8uLnR+2Dw/h5Db6/HbP/rx164udvPmd7778VcN18NKcjIgNm+mREQuhiAiXZZx1KZR9bgeVpk4sET4IiZpOrvbdPLA4KHf7Xb9+jI4/eQH3/onv/kvvv3pj8b97aasuv6MwMHn+/kBMZiR0ATRGfrSmfvslTEQ2CoJQ6ItYgVTgoSISFZSLxQOVNDmOrVa1RsgRHWDhsJvdL2OpugGiAQpWBDDS8KcEQkIyV0bmPtiH4UkLGTChN48nDhQEYDcgDHcwYM8FNHB0B0RCSlYghgkyUACxA7BZghvlCsWAB4ZCPsVdQNzhvCsjuhIEcKYBwTAlBm9BYRgYWEOnGutWrGFNqRE4jHu2/39cb09gEffr5pNVScFcPfEIrhCCxQKEMQeAQidiIgZdJFlzOrhgQiCGKgKwYIwYHiAs9GQlutytCKYKYXGTOQUwICApBiRYAXbYjlrSDCAOfFCQPXmCz0+5czdDj1EpI73jx5dcaZC9vTpU6BUGC52q8uLJ7/7/Y/+zB/7E++8+/6PPvr44f6W+FxSHus4DKuPf/id/+n//H/9pUfP9w83t7ev5und3e6cF/QZ17Pd4wgc2yHlDB7H/f7q8VPBzgmFyBBovUFKYtGv+3rdPvnw81/5rd/7/OS/8Of+6Fe+9tXnu6/+tb/5t/+bf/73dDz9O3/0T37j9399/9aT7//Wt/pKP7d5cjpY1xkR2FxtnnJKgZ5SX8DcdZotycq1CRX0PNUmGToqEGhuWTBy8hbCmTLmoZf1xfT69ld+7b/5V9//nbzpX1w+PjWeIJDTdnNp2A7H1yjrNg2nlFBaYu9Sp1NlzAMxB5hXYCeSAiVDIiIiwrBhGKRPrSLl882k++k0zsdpquYqocBOAeFASJ0kU/BIpTCjZonEkFiYourMkOuErQZ5EkGEeeFUMUoRIkpnkU7HmThAWoAiObGaKYKEpYhCzCmLDKttLmNECyf35dgjKfVmcyBSIilUekAKceEgoQirENylFQp4hlbJLRgdSVF91mbq0WhqFROmHr2maR/393sMiggkG0+jgzccI1A4iaBVVYOUilmLKKE9JRMyxNHdwxxSJO4lEQrY5JRSGDLMKJaZcyokbCDQKEyDKhNITi7mFmABjjq3VWICZgkObpqCNSJ0rjZPAHG6r6XHrs/Mvt70HhpAn378WT7bIb2VpHt0dvELP/MH//k//60vPvv8/S+//+lnH55O33l9+9pKMtPLs+0f+P3vlZh/8OGPv/eDH4LqrtuNp9YuYNP7eJiZ8tR0s05CIT6t+r7O2l/ltNno3f328TNYd2bVZ1fnjz7++Nvf/+yHn++/+od/Vuf27/7p/+Dmlf7eDz6/fj1Op8Nf/xt/89+7/2M//0d+8eu/9As//q1vfmXevP/lLw+DHQ5fGIwAgCQEKkDTNAXGaj2YGaCXMhAlCxd3V8u5uJqHI1DOXFIqfZ+HC9isX/74u+O0f36+PYbL+bbdzovnwdLgsNnr3OJQQ1OIYENsWXKWJC1TRJe8BlfdZxx63vYiffKUEgoSwabr5hKY+o3JbPowne7u9uN4mOZDm1WIE4twzpLQJLjkgsKek0JMDDNTCKUZCBKHQiAjKAOHV8qQpGsK4CxdSkAeaggCkxAmwZRwrjMAIpaUEsHAP/PHH4sAcWHKANaq1hqtqptFMHFaDf2wEV7OzhjcIyEhrlNfmAE8EuSFRkdspnUcqzZtzsc9uNF2u0uFgaN0fZLkWtGjjqO2yWM2VJacJQOQKQinrtslIaRgEghq7cFMl2R/Ji+ZMualNemRxjZV9K6kPq9S6ogEI4OBMJVBkCvIFDELpZVsV9Sxh86u4eBxmmszc/V5ns21X2/WqzN3BKSu64mlzZWIGrQPf/Q9bY79xVyn9x9fNo2//y9/+emTi0ePL1f9ENFyF++98+IXf/5n//if+GPC6V//9ne++Tu/++LZkz/7J/7I491mt94kYp9P4zx2JQ9dSeDH+1sWOb86x9U6DatxfyjrHeQNBpZudXx1951vffO/+Ee//qWv/uzji/L3/sE//r/83//WX/9bf/373/8tUxkPh/v761bbarfaXJzn8mSq9etf/7qftIPJ2r26grbQOp+OjNCv+tJ3p9OJBUVkaqPDBOGBDAgsRCgeABDIPjx5Kz3/wEtq93fXh9cPt188POxXq83Qd12ggEuPkV1xBom+y6UEoxF7otTcJ5gZvc8CZB36qgxM1HW07cuqT/1KUsopJymZ85ClpMS5z9thV/JKkoSjayTMXS5Dzqths+6H9VDW6261KixBCZIQAQLM4W4utc6JISUJbIuCS1AEBZ1K6ggTEUPU1FXJRhQLpKd0HZEgJCkdMRm6c8pE/XgkhEbomHmeFECYObFDkKkvhicWypnIQDJjSXUGQQ0P8zr5UZVMOXQRvPg47Vfbp9v1usfMFQx9cm+Is2mHsk1FzZxQUCY7qHcrzkjM1Ajbgv3NqUGAOxExO4IbcdFaEYOwE80pupxWJJQoAEtUT0IMFkJCjNJCp6pzjdSZA2NomqYx5qlWF8acGGkTFA18vV0TRCZh9BC+f319ebV9cn754Y8+jLRaDSUNm//eX/yTiu3Xf/03P/iZL7/1pffXl+dD5rOzs5SH7/3o+jd+7Vu/869+5cXz53/5P/hvX56txtNDqxevr291OpZMQ8fqTSMo5xTYpRVzH+ackkLqV1ubHMZ6eHX3L37zWz/+7OYbv7ht7WY+3f/mN3+jdKuz7W7/+tWf//f/0n/6v/2f/W/+V//L737r++fr59uri4eb16/uH56thttPvjjef9iOY/UxEheW0g1qp9bOLndX0zQdjreATiFAwEmIiBeOd9Nm86orabOGsiHwYRi6Bptu3XVddVfJ945r7maPhnFqanjvXgl6oHuKLOiM7i36Doece0DsMwa4B4uTzEJY+pJFmFJIz569SWuYvG8dStenvl+ttnU/RauALeeUBXOSrmdOE3AwnhEzo47H6l4rGoai8+nYIDR3WOtpWHOfh7A8Q4TDsDrLOTfbtfgi46RxEqlTgAP23XayEBER8UQsJNVyW3XjcQ/E7CpUmDkVZ3F3kQV7whoOBtEJM4BQZzyrhkVVr4ANCQD7VpGQSNo0Hi62m0ePLlWVSUgiGGbXMGBPDOJ4UqsRDEwes8OJIoMXxACeJDFGJgJDAvcIcguLIEpmEQ6ClFLpcy/i3pyoUwRiT50gBkY1AkMNnVqjBwNokTyLzW0+UTALh3lrc8bCaTYPlqQ2a/VQD7fTzcP2rfXTq/6T7/7ryWl19ej3/8wf+o/+0p9963L7G9/65t5cuk4VP/zeh9M0ff75XVL8j/8Hf+EXf/EX+rwZb1+/vH613pSr8y2Bi5CZJc6tnkrJQ9cpONgch7lfbWDoyZUI7z99+cNPX/6z3/idy8vLvksvP39ILo8uzlUKSJrnw9e//sG//W//4p/6M/+t/9f/8//xS3/E3Gyufrs//L63nj5cw9Sm8f56aqe8Xo8OLIdhfXmBZZSHeW7T6WDWdtvLROV0vCOMzfo8563kJI0w9xFuoKh6d3+TxvlqWF3buL95tepwtbs42nQYK7P0Xa4qhOpYE60X8HIS77Mn5JwLUhKcBDszI57cAVIYQVckpQFk06G4Q6u5ap7NEWcAKClrPqK1BdlAgLlLnD1lAWyEgZzRWyq1tmxuWqNVrzUinFKQELiVwhQpTAlWl+dPhGie140KpZP67Wm+A9fWqhTcbLdCBF3qSuqYGWdd9V3botlxnBpCYkYiYxImCq9EQCJN4c2RBwWRchZVrXpEIhJOHVozQOPEoXpxcbY726xWXWvm6jlJhQlBhZCREnJIttBwECGHaqZApKYSlQSSdK4tYE6Srbq7IHL40sQHRGbwzJKldGzLBFM4E1HpyGOGpbvJJMjjNB81BsidSNFAmBrA8TBqm9frLSsTB6ipKwuq+fFwEsbjWG9+cPz6V76y6bNotPn+137zlx9fvfsHv/Hog3f/+PX1w83d/n5/4ovtaiibVX+2G853qy+u9/vDuNusyNNcT3NFBjztD5uhV62qmjkCEUqZpmnoOgOkecbxMJ785ub21c3xZIUI1Pzs/Enq18CvOka2lmT3z3/1tz789AtxeP7isYpNp1ksCQXm03vvfTDf39VjW+H67n7PBNt+fTydjtNn2/v95eW5cIeQ2qzuhwqnruspSN1SKUNaSwDUEK02PdTbL1YX3eEByt4uu3KEada72zodvVHuV8xpylZAYyY9y72n1Oox1tYxUUkdyeRKmQUzeFASaDrR3M2rUlYlSY5ITpxMZA46zWHAkuosmgDNI9B0JAzmYEEWMZ+BlRGbOQMyVkL3AHdyg9ZaQHSFABugdXnd5w3BpkgPWK1Vp57ZNttH6Vhw/xpsnOpnZ9uVABqnIikLJXfFruA2E8zXd7RvjSRSZmJEhEJi6oiaZEXYZUk5ZzVE9NKhIZlra5UTS5dyzpVstVu9897b/TCkIgBgxCyeHDsujM6CyMgLRQIQgABMtVJm89qsJUZc6DBAjBYMYUaQCIFQuiJTI2+VBTAMgJEzODIVpqHL4B7hE0p3+3Cc6mmV1itOyUAE0SkPHWlExGq1kpI92CMBSmBrVUW4dGmapkxop+P3v/v9r3/jZ590fUn8k89ef/LD755uL59cXD3a5HcePRrWq3nGaZqQ/HS8f/X5nqDbFTkerkPH8dBgl6tCmysPa3e/u7ndPX+03p5NwKrzbCpV11nq/c39nR3n2qXy7PLJd774yf3D9VdePH/3vWff+/DH9aB9H4+fPvrtf/3rv/DzP/feW+/87B/8es6AEcfDfLla9086fH3x9lsf5KyH2/u5unBMMyCHYBBBhBGJKcx6IqTcFeKi7uJuzUrmwIiqYMGtXnTlOpehu5znGOP0cHiYYo/pvAMO0o5ljG4fTUpKNPdp2w0XQQenCaOloigOqWujMUZXCrO6TWQcOGDHWViwgzSMrQmcEiByoXHGmBHcGxIosYJRzpkFiBsDxCIEZorGBJJYE8uxeVgguKsDYspifqLkq3IZmtRORQSx1zolWSFMRCCE69VqHPF4fOA/9GeuSteV0iNJRM68JRB/I4jVnHG9zd3AyBaoQSq8hkiJu6GsA2DW2QIEI6Xi4a0FMRKBOULQ87euHj960nVdSh0CizALaAsIyoU4MTBpzOGKgGZeW4tQphLhJMpJAav5ZF4JBYmW1SHiYtZhD4vw1dB1uQRGYDAV4V5o1efImRTV1GmOHF3S1FVqVb0aB5r5rG3WlvuuW23yUKT0QiKckkjTJimzZKtaurxe94fjw3oznMZ56LrHF1tyRSRtOE9VEm4uzp++8+Vutbs/nObjaVVkmg+vXn5+PN09e/Z4Oo03r292211mWQ9J5ykcN9vz3PXj/c00TZv1tmzL/nD46AefBZfd7lyj/pN//C+H9eWXvnrZZzo9+GfXPxmn1mYEP15e5C9//WuPHr24uHz6+vXNz1xe/uGf+/27ty9immAaOQeXohpQx6HfpMIQARDa5jrVVCRllJxK2jBlTstiE0vp6HzD51dtmuonP5yODwYy+dxscggiFuHt5qIUZgoWrKyO1q140/dZzrpy3q97zgThKEokRBIOdW5M6jEmkdx13WqdVlvKQzd0XdeJYGA1aCktcE4gRAxnnhFaRDCuAA3lJMnCW3gQsKtYDVMIlfFkENQPqXQhuZXSS1JOKsIISaS4IwmmRMRMYhb3QEbEGFLrJIvAHZAQc0rMLIyJ4rIftqthnOa7xGOXABCnSTMn5i1oSpyZk7oHAkZCiG7QEGhNzAxKW211t7l4dPGkT4W5IWgpvcdoBuqNEubcKToyoc0RIwN61LneYWwtn8zCjobcOFWPGUAjxJURGIGBDCFaQ22OCAwoTOqNyDMLAxEEUE8RENxay5wzbwi6nvP5buiCpFrL02y1d7em7lTyAIA6qaoBzgt3z8LKegCjafSut/vDw/bskRsU5ot1Tuv07K13ADtOuV+fl25z98PvTYc7gJNZPT5cH/dffO3rX9+U9OMPPz3brnORgDbup2hVU5lrw3qbCKfj4eHuVeqC1KOCQwyP1/+dP/9n7h+u/87f/5Vf3+x+7pe+8Sf/XMI0/uSjTy3sS+//3Fe/9sHlo6vt2fPXL++2E/2hP/Sl9bMBujPfvp752Ev2kjkl2Q0WPh6rSD6NszmcXW6H1RYAAUJSVzJTpgqwLplIAgp4bdc37XAQ7nfrjarSdNwmmfrd0dpYTxPUU8KZvKbIqy4P3SCPSLCUlPNqvd7eldub/bXHjMF9x1B1GmsujRMndcLsmjTxZAZslFOh3tDn46kvwkglcc3QpjSHIjrGrYODH9iByQBHCyJmlJYLT+K5IGHHjMiV5f9H05/1WtNk+X3YmiIih733mZ7pnWquYnWzmzO7mxQpURzUlCEBvrEF2xIMG4Ig+8rfwveCLwwbsD+CYcCAIEG2IYuQLMqSmiJ7rO6uqnd+xjPsvTMzItbgi108n+Cck5kxrPVfvx8QNoLs1lt/PSQmeolERKyWglwSs0v2AomyjENxMe9m4U4pc8rFGxAk2t/mFmPSpnPwe5IViRISBBML5RvmpH4cigCPtdagrrax+DhRa+AeJdF+zDNTyoHUER3ZhOj8eGx+TsEAE5OwZNVN9dSsOrTAtrU1D2dErtVo7cnVzMwU3VRRBJKwhdMFUYeRiBDUrAIYBIE4IQpxZhKZClloHgAPfJfzzlfNyTCgtQWEGHjIg2dXdKAkQkmKqdx/OFmtGHZ9tZtGeXqswMO4O7x+c7/0+OzjT7ZzTUK0tHeff3777OX903Jc/sU8jL13jk2m4fx0Oq/Lzc2Vbuu7dRHsxKbbORD6uopI0346H/t2zjkDuLf13S+/enh/6gvu8tSW96TL/+bf/Td//MnNf/Zf/skf/HP+7Pt3/6N/+9/66vMvQPrt4eXdi89Ssj/6w5/Rh/W3f/KDmx+W3fe+4wD9fB441rotT49DpjTdPa1nbBjEverp/kmGcd5dMY2mS7fOHZNwSQSMnoeUD7GuaX0w1GgIfRm1suQK5ZHPvcVSTYQkibpN2K1gKfupTJI540SUxgRTPgDl9+8+BwCkXkrWbdtaB+J5l9QEjAkEgGtXCYegQZKJoHSAKDmXhGdVbSfADYEY0RUdILEBqqMRk2Rz9ZRpGNIGHNEvSB5kstAI7bqBHxOJZO61A3awttkjcs1DuArgWIT4r/zrLwFwyAWBKDilWXAMENSeedhN41QyQCA2STqO05T3g+w4ZRIUYWZr9qjR3AwxE+eIDZF3824slCXJSJS4dQ6MZtvD0zu1bT8/Ixo4cU7Jvao/WTQA783cI+cxJQL0gEokFhcdeQZAgAu1AZlYRBwhEafhVxk3AR7KVIYroqEIJeEe9by8W9v7qqfWV4vN174uZ4Jo64boOeWcSx4GckssHvrw+GgG0zAnwnVZHh9O5+VeUhCyaSzL1qsPw/T2zbfv3rz98P6b9+/evv72q/PDN8vpA0JoM2t9WVcFXL1/8/b14/E47qacWCLAjHJmyUMetu1MFEDQe2NksvSnP/tZ3R6f3e1yWns9Dow//M5H3301pK5v35zvTw/7/UGQlnNrT/H1V1/OZr/9w89+87d+8oPf+nXsev/mz/2bz5f7dw+nh3o6QUKRISKbewQKyzgOZq1bU62SgUEcwANK+JSn/PKjOBzs3bd2eupNH9ejrU8EUDgFwarYVLtwK1zZlCNlR6JOtJv3Q7qa8iyJCWUer6c8Lkt9enoH4YQc0B1WcBzSCOO+jBNPBRNBNIhwA7fN4mz2BO7hqF3dkIBUwV0DmmoPgIjNLzb3cIyLDTITZo9GqXLaSJSFkALJ3EK1d+vHp+PWHljUfFE7Cg25ZLrEJ1H4L/+rLxF9GGZm2dpS0m4cb3p/AsZduc6lDNPMnHv0SD7nfcofEZechTlHYLcHi+UiekR0ZiaGiC5USi6ptMziNrW+mp9Pp/OyHklwmg6SMgGzUFDVvrgpkJpSeBKRYcyOFQAAulm7uCsBiQiYOKdEyO4O0Iix0MDCnECEmTNBCQcnA1b182l7/9jebnZsG5omRCySEyOA1tXO69K1erdee9XWthoKJOKoqis4dusX91suOZc8DeP5tAzT+OKjT57O27DfYSrD7gAiDtwNlnP16qe6dO/eZOth1qY0XM030zjv9nsFaY58mTgOiLb0cz09HllS91rXh7mwJNKUbvbXu7s7s/XlIf/0Oy/HdZGtQYWbJDPZR/Pw1376g9/4m3/pO7/2PWimH748ff2nx29/8eH169O6eGKAYTfuKM2SACAICgut25mQ9ruZMVNO+2mmIjxO8+0LvNpXdd6eCDqYRIDaIhFAWAGVzACcU2RGdEGkNDkl7SdAfn77/ZxLkAmXeRxL3o95cm21Lu5G1DHUXEvBYd5T2achAziBI6oBgNVYzfqxra0rqbbokWjIPAiTo9a2uB2HQZl7RKDPiCAyMhYkcGgRgQweKtIRkyvZxXIabAGInGRgQQQgKjlzEkJUABRkCITu50TZvD2dv2QYhnyrViNwGgdKlPI1pn5cgWjHRAEJwHNOgLasyhiZseHizhBGrCyKVIFH4mJgtX/YWmXGquvW1pSviM855drcXRIfUrrvfu+GuTBS59TMgYIvbuy1q6qzB1OAsTDknAmlteYuIjKM7FG3tjoGkiEJ+YGhWHSPToRCxJBLnnLkWhuwL82SRdcFHJMMtvXWq3oQY+1L71ZNhfxqvrq+efkcnx0fjvcPp5RW6+urZx9/8cUXp9Ppo48++frrr8c5ffzJK9P2cP+u1/N+Hk6Pp/BtW9fweT/tMu9LnrPsZdx3IO8bY5gFMKPa24djXc5hwAOnotM8t7bdUuyvxvV8f/Xxx9//rb/1xb/479vrb/76X/1EW+K8dwrK6eZ2f3197ULH95+/+/yXT6+/iPOC5kE+5ETlkDh1U/UGADlnGdKHD28tNNBTHnZz6aZb1NtyMx2u8eWnuDvwm2+juYBARrVagSKCGDPSLiWmCttZiAvjqZ+c8YbnJ5S35/a0Pr64/hiVJTkXZh9v5pfz9/cfnt598fXvnc6PiXgoXe2+tw/mz+s6ug1QsnonCMDsAraNXc9JUHhs1N0QIhW+TpxTzKfltfYnlkBKxAxBDp6HMIOdDWVIarxVreeaDqn1SkSE3aAiIUaoasm5Q1OrphlFCIGE+G/8w08DFakzMgBrP4fHbnpmGpJzKUysSAbkEA5wsUQzEQR24tbs0WwDCtNqcVEV/apsWXIWSuGOhK1tW/uA1CGCmeZ5n/OAxA4qnJBa93uHSmRAyJyFMgaZuzGYe9ss1BGJORNxSiKcUkoEgkDCDakG9sDK4swlghyUSAHPtR2tKXuRXkpMU55JPSubhWSG1nXb1NXdwPzx8X453bsqBo1lit6HkknILMKQKYhx62pNv/72Cxb74Q9/sJzO63IsgvMg8zAgAOXMlLvBaV1Titu76+evPrm5+0hZugYjZ5KUZsnlfH7o6xKhTmEEhKXwkAjN9O7lS8oC2vKL5zff/RGYOegG1tvbu9vD9fVsvmzL/Zd//ifvPv+zxzdf6oe3EF1HqRdvltrpdH/ShQnLwIJJtQN6GQphFoiUC4gOeTjs7/KLj9rdC0dOT0eqR6+tbjW0YgQERViADykLAAQ6IkCwQ+KMMmDIuR1Xf9xfXSWZLxrwnImiueYx7+bpFkKRn5iRpYMAiuScOGdHY+oE0NVCrW0BhtE00wiatKPZBQuCwiJUMLJbNF0ByI0CutqGKOE0lIEJhZhwuDB5zD2CESUcIiBcESTCzDfmlNPIlJiZf+fvf4IAjBHechLm1PoGGEO6JmJzD1QiN61NN4Om6oAIKbqdFBaL8wWsTpDCIsAAxQMRW07ZLrNq+VpYAJUpBURAzWkYyh4jIwQyEAdgmJ8CAjEJT0xFrQW5B/au2npKxMI5ZWZCREIWEjcxD6KVUrBkAAM0ohIu4QjQVWt0B0u7fH1TXk3plsKEU6LMgNZ7183aGt6Wuh6fllbrBeVbZOxd12VdHt/X09K2bV3Pj08Pp6fz+3f3VTdiHobx+9/70Yvb/fH4er+fX756DuHvjyfO6Xhq5+rTbpfSsL96fn33aab5cH3IJQNmSmMei7N5Eo6o2xNCSsMdAWyndyUDsu6GabjeP71/z8vy+OaXE/WrNGdC9HpaHj68+7Y+fvj6Fz/fzk9MsNsd8v62ajvdv9mOT8S0KxNCCZKUSkrD0/Hpw+NDV23dMIyt17VRwrv9i/H2JdxeYRapWzy8i3qioN57XY91XbRXAMu5dNettiBA5oiIgO6xMW0SgNKhEZxJAmk80GFkljylNJpp107MZSiAgEnBgj0NeYc5IQIHqyXX8BYUBUzbeqqnBT2D79yJuBHZpbfDgO4twLsqXtDPnpgmZgpQDCNIYQWR1Lw3J8pJBqZMQMzJzIki6HQRjSGM4Yn/xt9/gYBEYXDyqIgU4dtWJRVmsOgskDlaX9e+hPWuF0r75tHUFgPLaXJDQBMuCEyQmQnQAEl4Kkmy7ETGYZhTGtWbxpHES5qYExEjOxMBRLeViAICnYnIyA2CEOTiqifAC2OehCBBhBu7kSQhuTAwECECHELCS3hCtNC2rrW3mhDJsJ43Ao3wAHA3bWvdzsu2tNbWZdnW1lo7HVcPbmbH8wlLzsJrbU+nczDncVyrrkvL43C4fnVz+1wbjLtp3E0a0oxk3Odxv1TT8A/Hs2k6zPPbt1+//urz+9e/QGvjmHMZpAwkDJww7zbzrTbBPmUJsHV9AID94SZxmvbXfHdLeYr7x9d/9scfPny7H6Wv29Pjh0zU60ZIdzd3hHE6Prx++/X94wcmmuYr4eTuqkaoaZhbxZx5dxgCJYnkBAlTGmk37K5ffJKfPwcIWJ/g/Ih9QzPrTohuW90qYFDExdw8YgJCd+PAAgnMK3ZKpXBCYFvPW/0AyaYyj3zgPLgJBNR+6npiwTKVHoHREUI4B2cItubhGKbRzc0Aums9nU5djYgBAdQll4sKt61HdyVkdQf0CMFIiCSJszCRaI2uzcL+JQz90tbJTEKYEZEYkJp5NTeCIQj5t3/3FUmn1ADcIwAt3MO49T4kQXGgDta6rc2qqfVem1YkRXKzHhZMCYMCTGgsMiQsCQtDQZqmcS+YIhyiiBTOEGi1b246lCkPCdE9Asm7V9UzUvNwIgwIdXRDhnB1dEYphFQSljwiJL+QV6gEAFFH1rgQdkMjOGy0ixwcvWu35q6YYDeXGyzZW7PeKULXpS0bGHjr29oCAYFKmVKZgfFw2M/Xh6WrCef5wGUEZM4Fc8oyHG7vPvr04x/+5CeGjGkkmX/++de9w/Xdi+vbjxzjw9Ppi1+8/eKLPwd9f0gW/e27b79+99XXx3dfPb3/8uHta+sdTLtlwFwSjQmHgajINN7kQsSblKtyuHGyQf3h3fuz99b1uKy7+ZBkVIdAOh2ffvlnf7Rtx6vd9d319TjfCedxHIIuplCD8CIJo3VdGbCUJMKMhRMdDtfjzS1e3Zl6Wo7ezuBMAdu2uVcENwtzC9NcpKAQwOodwUtgIAKhAAzN2DgMG56a36s1KTPKCO5mUdvZLDwq8vliL/fwFpeVboxI2BSbhnqY18sksy/a160uTVdmxAggYFnDj9p720AhnLZQJEyAcQkYjGUOJ1NVg9ZVZGAamdM4zEyFIBMJoF/iLh7d3QGB0Pl3/vFLkibJIghjANKuFTx1RQ8thZHATavVrjXCuvVuG3DzaOEaGOAsyMyZQYrMwskMEMckU5biBqq1pCmlpL7Vfg5qxMw4QUCwawc1DWgePbwSJiIhEQwBJ74IkSAhk2AIRUqZqXTrCACREAKjE5qBhYeHAiTwITy5t96amqpuFGVML5lkiGHMEzVHdeEUCOdlpYD9za2kknLK0wgQ0ziot9PxFERZOMDX1k7bZs1ePPvo5Scfvfj4o08//v60m3b73dXN7e3dy+9//4coMB5uFGhtrchwWvub1+9O70+Pr5d5mqYxH+/fPb193c7vc3YEOz18/e7DNzwcDoeDhALC4Xr37O6WhzSl5GTDfJUOd4tDRuGAZV2n3d2rT18hBLg/PH6IaPM87cY74UtOPjVda12QM6eMgK21arWHIaRhGBHImhHC7dX17nAYd3u+vo40sBlsmy2raSfTtp7XbRGmJIyhROhq5F6ICKH1FcATyQjDnHaI4gJPvoQsUZ+6kvXStlOtdWs1oHXd3NeIaq5o/qv914m5IETT1cLDGxCeT0/aFvCmtjV92vQRsQMtCCugujez0K7mJwaKYCZm5lJG4nRBSWMgCSMREw6lTPNMxOEBgAAkicxbwBbh4c6c+e/+41eAxgScCELcKLDH5cXCSClSEnNYtyNAA7TeoHs32OjCNwlMWBInRmESwEzEAa7q6CZEdJkGYkiZPHBrR8lngN47cCoAEKDqm2oldkJB4EASSeDAgAHg6AQoBEIm5ECIlHpvph0tuStGcDIPc/NfmWlsYGSzbt7UzmEqMGfcJ0k5ZTBAN2ZGYDQfE2qCp2WtW3N0xw4ArXVkHOZ5LPtAWratdZ33N/PVx5znT7736a//9Nd249W6rvePj93gtGzN/O7Vx9N8vb+5SWm4nq7TMH7+5TdPb8+/93t/sD0dX7w6mC15GvO8P2347ddPtS6nitP1bV9Oz/bz4bAzqxxGBIwy757T7oZePKMXr4bnn2YRW5at33/55S8fPrxJ0m+udnpS6NG8Gu9cQgCG6Sbvb+arq/nw/HD7Ypxv0jjP13fXNy8DUmvNrKmeOdHNdDPmwa2DCPCA22qn16316BXDh1wi3HVLyA4klGgo+zKo9arnAcM1EMQzBcGKa8fwUKfH1pqbb1tvXVvTbVtrVe09omtvGMRB2swxM47hXXELdVfQqL1aPS3WVoAmKcKt+SlAk4QwIaKjMQL0FmEA4AFEOSKllAMtwJhHgAuSl1PKQxmI0V0DVGSIAPdGrB4GEEyToASTICsCE5tqQBCAImaSpTYkEggB7EQBQIYerq49JLvncMHEUqj2hpCzHLquhFVYVHXZzvO4B3SLWpsF8TiOJLdrnFZ9WDuJ7ZgZjN2NsDogIbNEhIuk5s2thzkzMwv/SgfTzZ66mikpPxBlFwAFQGytA5pIiiCNCOeI0A6Oq8iHpQvDrfdtxikn1taXukRvKedEfJWvxqEIjwiyvx6n3fVcrnuvmyEK7w8HABkPU57G88NpzAFYOsF4c6uPp69f30/T7t3PvpBScs4/+tFP97e30xV/xvkv/7W/+vl0Z325HuHd43k3wLOrK5Kb/+o//73Iw1/4td/44Y9/M+3pruhHn73II58edqftJI7D7av06V9EHrVmV8yYx+c/LF++rcejNtbA9+/7vA+7ugv3rGs9b4/3xyzpxcuDjDsDhHBEKfsD66SqLSjtbmZCqAXhFWXaikw3e0wD9g30bNtaO2y1JrAs7IAeJCIIho3QA9wWcHQ88NgimGyzh165M1I0VsuSTfKC9yeXHPVhe02R2QftweLTNObky/rYnIJFe+3ba6ALaErQ0UJCm8YKURNnDCmjRqwYzaJ2W5kzS4XI4pPVioDmFgBgW86AZGArkJeBseWIztmcuzUwM2QMaBDiEImh5Nwrbe3Ef/N3b4hdmBEpgsxAtbsjkTAzov1q/EohoLtBtyByDxPOANIaYBAxmoIbI0HOORw9OmCvm6r27rXb6rgAakrDfvx0zLfmfVsfzXtc5K4BZo3ZEQGDL6AvCAIAJCeCxMBMiAbYI0DVPaK1djHcBziE974GUHhBGMLIbAmP5keHDaGBMToTEJMQDvvrm93V/urm43F+Pu93u3mc8762bfXz49Pp+PDheHp/2tbeWvemoKfl+M03X7358ltQu9pPuaRxmJKUrgHIz5+9lJS/84Pv3756+fS4fliXtVph/LW/+NPa+5/+wb+4HWger+ra1WN/9+Inf/nv/vbf+bdvnr8cZhn28NOffjZdX+N8fVo7ycHoCspOrl7E9BzlgIBuK+aQnOpywsBWq+R0Pj4e3351fP82DKI/9m3RcPXqrZ4ePjw9Pbb15Fqtt7o8RTu5LghxXk5KbXcYd8OdOWIhdY0eyUkQQGjMiQDUDNzAm5kacIRBmLo7mGrDCDA3V1TN3gydAZlayBIIahxRu3ltZ60NAJGMQJgHD6tb7RaE3b25aqhGN1c323o7WmscQGHhxFxEsgggbkDdvImEqoZTIG3diDMiIrGaRqzmzUIhSJgup2dB8ubWagCadSTnFIA95axmtT4KhJgqgjAzBDIrc1I3wN5auBPiqZCoqoe6g0e/IMdqreEXBXHDmoVmAFrW99mGy44BAMJD9zV0gW7EY8lDyTvBw5Chj1hXX+sWHCIumBBHiMYJwj0CARUJPLpZzSVdNN1ABEbWUa212k2lYwNKnEi9Qwh5sghOatEcKwtlhNN5Q5wOu0KQym7/6bMfFp3O52NE6Hk7Pnz7+P716fT+dGxlGG4/fn5ze+edHh8f/Xh8Oj4s520+XAHLIPPNzVXA06Kn73/8ats2ijTNh8++930Kurp7UXbD9fUL+BH/6S9+Vtfzbn/Vev8Lv/4X3/xrf//3/8n/Iy25n5bMw/nUvvfZ87IbH89/zoV+8NGnGYina7y9vdrfFIXltCENCBBhXAYAWe+f6sOb5eHtn379FfQj1vv3n7/JnKfDtZQhYZSrZ1e3ubmqNkLfT8NpOUePeg6lOEwzKGzrmtNw2O05yX7a764mxcKRrW29nsAszEcZ23Jat9W9QxgHMWWLBoEBoL1dWD3gARDknIUi8ZUEoKtRb4gl7QlO2iIYjBgZ8YJfMW2AkNW9aUtFwSHMzQxBiIv7ol7DEyKHeYBG4LQbmLtjp6LoSDSWSAnByLFTKLPIMO21L70/WWj46OFCyGxmPYwAovceYSTsGOgKkVTZUVlMupIwBw6AIpQCKyGKuGuHCEaptfb24MZA1cJ6uEeYATj0vjEgp8HAPJ0AyKN77SKC6B5+ERZFJEDCGKb8CQaZn5jHnGbmK7WOpAIpECRJgJutSI5I4Qk8MBQ8rCtnIEZiCcBAN3d1RZJLmLV3QzKiMGMhUFsMqrsjcUS8evbZYfzufrzNctXP6e0XD+LvT/UhPCWDhFGmm0bluy/k+e3dw3H7+hdv1uXhm2+/Tnn86W/85l/49e/vr1+WgRnw7bvXMuCPf/zruyH//h/886e3T88/+mT4apzSDJJU2q4c9lc3h+sD5vHdh5O1fn24/d3/yb9DGf/k//tPvNOOrnl4oYA9Hm+flWfPXoRIa/z0xVt4fy7jeDTN034/32EacL6KIATMaTo9rf3+A67Lu/fvrq7m+cWnx/t3uj6FQynz4/Jk3pkxp2nBhBSJRdK42x02V3efpskWfHh8O5Xhez/4i9cvPrOZY0xRaYgSLG1do8yn01OYa9+0nURyyVeIQbYFUjNFiiJJW1iClHMxqM1WtAYdQZOnwW82b5q6oPf+RDgiZmIx8w6W02RhJMXaB6sbM4ODdndfAx5AyYEDL+I6UG3gjXPkIohT+JmTISycc6LAMuQ0bcdoG/kG87Q7bab9aLYyiUVYXy54PFNs0bwHxwwUiZypAGhKGDHzX/67d4jENIoMgiMAh2MEmTojqru6LdtJwbsv1Rd3gMv3HOiGrmQGHtBiUTMMNKuALcLNe4S3rqZKGO42DKWUnXtHbA64rUvXSiiEmYgQMQLUnyK6u6mG28UFDwEhyYio5IGQVdUtulrXTShLskvcBdAIi9BU27b2N/M4vHjx2ctnn+3GGwi/f3dejmqrZ5N92edhTCJZhlYD8vjq4++V3fAnf/onP/vjP3x4/9U0yr/69/6Nv/Lb/9rV7avWjn/4L/7b3//n/93jun36w79wfXvXq37+5VdLXX7tp79OXJpZq7b1Fmhd9enx2Gp/fHza1vX49FSrffv2/uruxdPj47evP7z40Q9/8Jd+4+7jZ4frstsVbJrHbN2++urb1+/evX3zzfnpbCetFbwjJwYG4iBJCVnWZb4ba22tWt5dyXQdMiNnZFYki+CQ6AyY1OJ4PLp5HvI0HUJ7zokEXT0P4/721XC4CUgQyk/v7Xg8nc+hDaxZNEqUy+SewAysNt245CJCSDmlxEIkgWTmrbfACOsIRgAOqVIGJHCv3mvdwAgiEWVgEkRCDoRetfezaQMwIlRf1Z/M1DsFJI+LVhoiolsHB2YSYSDP2VOycJfYcRoL7TEyU7ZuARwk3Ra1k6oChEV1hwDstjXfupMZM6FIMEVgY0bwgf/S33mFUBiLUEJkQkB00w4AW+2m3HytvVY9dm1+0QCGQqi6moU5Vq3qtXcFgG5q0YDNwdRMLVTRlRDdzdS3eb4Bl96bW9feEaTwjoIxzB3MIeBkdlEOoimYIYAjQk7AhImLO/Wm7nAhvAediYzFiC9NMz+e3uci3/3or3zy8ffC28Pju9fvvm7tacxjonHeHcpYuioEhvett2l3c/v8+f37t3/6sz/Lkj/79JPf+Kt/9ePPfno8t5/94e/9f/6T/9vP/vQPvvuj7/723/7buZT7h69/9rPf/+//6R/M+8Pf+Jt/nTmP81yGaZx2eRg++vhjkRIBb9+++fDhAxIvT6eHNx/evnmXUqpray0++t5n560JD0AulEeBp/MSrmXi3aFczTvKQ5qmy58zjyWset0iA4/TKCVI8+46zdM4DuM4XF0/nw+3424/39yVsusa3WpdTolpHAaG4IQIQChAmJjneff8+avd9RVQMAHU1Y5Pdl5tWawu2s5b3ay2koZxvoph7EwEKCAo7OZhFghIDKamGyEUgsTi6ApuQE7chFrT1jYHMq11a0S7LBMAIlAYoZMQWDMCl0uNMoIwmXmYa7RamzuycFe1jm7EQqlwTrnkjJC8s1rGyAgDEopI+AQktZ6bLrU1QGBGM2IqRNQ7qv4qxpOSEF38uMbM/Jt/+7tJJsYEECwM4A6rea/NtTnhEOREol7dFSEiCJmsq5rWrq1uFqpWL34o5AD0rrWrmtvFZox4gXFlj2rRmbm3pfcNwhnSkK4I2b2qh1mobu4OqAHmHhYG6ICepRCSX8zg7q5gHZjS9c11ySOLEjUzbX37+OOPXr78zBq/ef3F4/EL03Z9/XzMzy4LQK2ttXNEgCOhzPublPLnn//Cev3pT37zuz/8sSX8sz/92Tef/+z3//l/czyf/9bf/Xv/2t//t0oZ/8k/+c+/+vyrd+/v3797+Nt/5+/98Aff++WXX7Lww8OjcHo8HcdhXJft6fGx1m05na8O1/f3xzB4/vzatFdt33z55TyOZb9/OLfe7Onxvml/PD6pwXnZ1nUtwzzvn427w3zY3bz4NOfROSCLULa19naMZA14nPe78XBBsmrv63IUSaGGgcLk0bVtHlbyMM27cF/Ws0MAQKjN+3G+PlStsGwYCzWNNONQJHEqmRFdN1Dr27nVI4TN02F/9SwPV2aEVJBEDWozCBBhYFT3Y11PrV1GyCNoQ+rgXrm1trYPVU+9gdABgcM0LIQk5UlkTDJDsBsQXbqlsrXe2qa9uRshEYzWCWEIUAgU3pWcmcO8qrFpCM1uRDhSzBgpydhaILC7uTdGyjkDuTbAEAYeypBKQvQAw7AkyH/xtz8FF8RgJklERIDV3U0RfMAL6Fc40d5jQeyc5VdWU/cIAw8zxbi8+gHYzRe1LQC21WurFs0M3QHRAajpav4gTBFGmBGIQgiyB29Nu2q4BawBG5IjB4BFeIBjKFMGQEd3t8sNJedUhmEcx5LJY2MaP3v1V8N2X3z15x+e/rhkmFLJXLIcgDnCgpxTHuUw0K1QGcacaXj7+v3HH7189el3Wt9++cufvX3zDaCty/qDH/3k7/3r/zCo/Df/9X/1//tv/ot5zEMZj0/tf/6/+Pe/+6NP/p//yX/81/7aX33z7v08lvN67q7LaXl6PFetfavI5OHeNM3D/elxW9rp/uGf/df/tJ1Pzz7+7u75c8SutTKntZpkPp4eT8fltOqx+vFhHdK+pNw1elumDMGA5uKKuSAzBsgwKicNqMvZ+7asjxYA5LvdNJRRHVLOTIEIWrWudatPErAb9/PhbvfyVZqe+TDZplByKiMPgwy5nZ4+fPjQzqfWKlAPb9gU3Hpft75SygZACKWMJU0gbMweUqNphDEjgprXLmSDWTOjrfbT0rd67K2P+brI5I6URFIKz+adqYhMAbhpRQhHuAw/RWDApVqXEUZTAKyqFk4izgShohZmGWxIuFfN7sYhEcy0QyCEIDZEyakEAMKEnjMlSoQphiEBNABnFv61v/USLDML86Xw0pG8tUaYDZI6uy+JxsIj4lmhCQ0p4WXG1DTMwMyQUCQcDNADAJDX3k29bbqt3bxBEDgIFCaPaEBJ8Fp4KHnUQHfW7hC8LEvtJ8CNxQKBiCToV+JuV1cCSuESwY7EUkZhoqVIJ+RnNz9+fv39r17/4qs3f0ggiWb0IScHN0d3FOKx8CjM1hvGtrUNQwjo40/uzOznf/7n7799OByuhjwQjj/46W9+9J3v/P7v/Ys//Gf/NIN++uknzeLc+T/4D/93kvn//H/6P/5P/53/2R//0Z+7Yu99W5fz8emwv1LtLELCvbV337xe6vr6m2+++uXnT+/fvf72XUIkkd5drd/d3pacAWgcBxSap/H67plTmcZ95rF3b72uy0IZSx59aw5KJTOodxcIyQUScsrT7iDgvZ4OeQbTQN4fDlMqGNSsaTfDdSpjTlKmfJiv99M15QnmKe0nyxPwQGj9fKpP5/V4Xk8PBJaE2dC6GgZ6t7ZoPUN4WAVw9dVAgYmTKDR1SzQwpe6dUBKOKKVVPOqHZnrarG/BkBAySw6QkkfhjGS9b6odMcLN1SMaIpgugeHhiJhoAG9qbhYGOg7F1d0aEXRXVW0LmObehTlTcEpjBEXo5T5CDCCLeSAUvnSXgDEkJZTkgOFuAMF/6e986gBJikV1vCdyADbzAAMcTZEuXyNbQHMARGHO4eTu2rtqIIJIkoSEgoERoN3dQFXXpbVqYYxIzBhIhIEiAMA4SgATCu/MIcARvVntdrIwZCMOBEISIA7HgOyOSEAMWnsAD3kWcskGYM9uPy55+PKbPzot70u+CcsBCLAi10AJCIOGthvSDsMZU6vdFD/9+EdjGd69+erbL792TB89f9VqH/b7V59+dHw6/tnPfnZelrvb2/3d4dzIY/h3/71//3xc/6P/w//+P/jf/od/+Ps/W07Lbp4eHz8cj4/X11fzPCzLKXH66suv/tl//3tbbXXTra59q9rq4TA/++jjMh+O51Ndz9ZrTmk/j2br4+MjJy5lGubd/up6dziUaRymqUyDe2cmTgjWxem81uhba74tj7YcW20qfHjx6Xz9nfv3X9fH9x/uX58e7nVZgCPct20jb5nwcHf73e/95u1nP4qbaxCJh3fL/c/LuvpyOp4ezw/HdjxqPwfYthy1NaJILITILEOZh2HPwsKcEiMABlrvdVusbRzAgO4dKcDBICr0Vbe1H5f2dF5PCkQ0sucAbFYROKUSzu54qaWsa22tBVmAdt/gUmGNIKK4nISthA9judpNV+CpdY/w3tQ0tgUiEtOQ0x4wAYBrRJiDdasWG6KJJMLEXNwRGVMGYuu6AToh8l//+z9wR4bU7YzpQSQQGbx5BNJMOKSEEYpI5hUCiNnNPfyidXWDgB7YEDMipVQSiZpHmKr21rUzhBCTMF56FkRsoOZbTkk4hRESRARgCJG7mjdHTZexB0AEAnQA9Eulx8PDqmp4jCVHyKtXnxD1Nx/+mfOS4Ht1jW4bsmdid1dszSuCiw9oqeRkmxW8+8kPf305n37xiz9C8v3+2c3t84e3j9dXd1d3t1998/XpeJqGcZqnPMyPp226evb3/vV/+8vPv/q//l/+o//Vv/+//PKr13/8B3/04x//cFlO7r4s57u7uy+++KLW9vDw4Y//8A9N7e765vmzu/28S5LmcSx5SOP+6u6ZpMwE5+Px8fHBPZACkRWI0tiaqnYZMoBLYhQch9x6G4aZQIjZQB/WLfry4f4dRpC2h/u3X3zxRR7GH//G3+RyhajL04enpyfrJ0aXPCELRJpTZia53o83L4QPGJ3WM9SWw/p5zZzA9fj4YX168tCcUiJyd0lClx4TEiJFgF8kvAGqXXsDDw+tfTmuT9U2Q93Cjl6f2qZ66qDnuoZF4qJGZpCY3TsCmMblbTCLbavrVg2bgxl0pCAUpAhojAUxCc9ZClOax0Muk/BAmLXbtp29W0DyYEC6fBWq5t4QtcfZooowUyl5FhYiMddcOKAiBSETMv/OP/gBgQC6+SLZWIBICU3dMYYiE0RtvXmQRQ1yBEIi91CtANy7mVWIgEgRwEzCbNrMzUIRJQwBmIVyYmZuql2NGCw2i0AQEoYQASQgsEAOIHBvRJqEL7dqRnSDRICBiAAQVauausPHrz5xjPf3Px+H0W3XtVknD3BSpM3sbEAQ7E3Fpix+Oj0VHj/7+LP7d/dff/WL26vDfvosHL76+uu725ci8vnP/6yUoQzFwK5u7tZz3c2H3/iNv/L2/Zv/+D/9v/+b/9Y/VLXf++/++a//2q8/Pj4y49u37z755NMvv/xKNbatdqvzNLx69gzdReT25vbZ82eXrPaQ5epq3u1KyjKOo5q9u/9wPm23z17wMDnykDOAJxaIMGtzHlkSE67rtjs829RSlnmarR7XZXn75sun89Pdfj/p8c/+4L/96svX+7sXu/1QpjLsD9Zb73U3TLvbV7cvXx2ef7K7ep6JsFcL9M1FcpU4L+eZcwA0a2HKmQkcAUUYL1c9QDM1cBZmuTyAS+hKmXEYc0KGgDwUESk8UC7AQ9u02hLATQGDEEU9hSN6Z0lde2tba5uqr8u69eO6HTucUTZiADTAxhKIGmCX13eeZ0Q0tVwA0JiyRW3tCRkDSbu6N0R3g6ZHB+u9YTobLADANAonZAIUCGJhZiZCohJO/Nv/8BMi5ARBhmIkjpepe0hmyd1a62vryAbsDk6CRKzqCAl8qisiAIALZQRABPCLOi8AEJzcIIIIsORhHEcPU6seGoDmm4Uxc6bMiMTWbVW3ALeoEZoSiwBiMBCjuCtjYsoe0Fqcjk8vX90iLa8//HPizZTDBzU0jUBk7mpL1y0AiZLAzOTn0+NeDj/94W+9/ubbZVlvrp4xlG2t796/efX844j48z/7o5LFAVnk5uZ2XSpz+cmPfv30dPwv/+l/8dt/67devfrsP/9//1c//Qs/OZ+3/X7385//+Xe+893Hx6O7MdM8TykXAiKI3TzO+zlAEb3p9vzu+sWL27qekuB+f7i+vhmmUVUvedXpao8IQ87TWERoSGWahmnaOVAuxbSZ9/lwUKPzu68ZAgOsHtvp/ZuvvyXA/W63Pn7z5pufA3QZh2G/313deKRxLM9evry7fnF7+2J68Um6+rirAi02X2kZYn2q9+/v3795fHp3Ph/dsACHtd7qZavWcARMSVLJIokIiQiJJHMuCYlE2AGZk1xkss3UCDCZQyATswZ4UCAJZ+Hc7YxIEWjWa11No/dW+7HWs+GKvEgKZoGwX7WFcGMacs4AmHMK7EyA5ABntdrVERmRwwLMwTnAa5yarj2a4xNxIPIF+otIhIUoESEzAhAiBzr/7d/9fgA4avdqWJGMwJNkRjGn7dRqbwbKCZghIFggvFBMQplx5JBEucihpMyEGIAQzOQeXVut6iaImVByKuM4SObEgBitrRGOhGZRchlyBu7NNzOtfjLTABNB5iyQiZAp/yp+ghUCn+77Jy++l3n89u0v5pkDajgHCMHQtREj4KbePCKnnPgKUNf29VX5yQ+/+6988dX/4K3f7l+15Xh8fGzr8uL2Y639m2++fPHiuSEAwfX1DQaa20effqTW/+TPfvbi5Ytf//Xf+M/+0//XT37yo9779fXhl5///Nmzu3VdT6fTfj+nlAFoW8/b+TRP0zCN3ToTEvjVbuemZZrGaUJVDM9jub07DIUIA8gp4TiU68Nuv58lldu7W0Y8bmcIkDIMQ16Oj0lQhhJhD2++GnZFys1EeTeItp5Ynz1/Nk55e3qigDDdD7tPv/djuXo25XF/feDnL2wqIEnKtaSpguKYOAmeTc716f6N+UbaODSlAQkBADGQMbGMZUglu3lr1VQBCQJUVdXMAQFbq9u6ACiZYQASs1AYb6Bn6xrdobEgJVHdTGuSjEiqql2JAUiJyehIrES/antRAHMggakSUYAxh0iuVYdcAKy2c2uKIgxDkaS9tt4UevPVo1lsDiujmF0y08I4EuzdBUEQhHEgzIDG/8q/+d0AtFC1DbEjOYEziTlBpLbBVjcLJUZmEE5MJLhHGLwjeWYcGG0sOQubmxsQATM3t9Zrb6BNKAbGQSRLQqaUkggXQOoNXRNCMrNcRsloCBZqvrXew5GFEybChAiIGIThFuFPj+tHtz+8mq7fvX19uBkzEYJFuMDOwdW6e/OoSB6gCCmnIdx35cWPvv8X/+SL/245P0y8Qw1dTrbqy5vvgsObt+8++fijnDKQSCnCUrf67PnL1tdt60+nh7/4a3/jX/wPf1gK7Pe3HriujwCGGI+PT/M0sZC7927LeqRwIjfvta1a28vnz0XScnycr+Z5Nxchs21/NQb0KadxHm/urqd52u/maZzHcdrtdimlXIowRQBGlDQlSa5LQst330XGxw8/vztcld0tsA8cEC0Q9oe7/bgHwJubu3DM43y4+6jcPKP9deCcJak3ZKzrVuaZpquMJXuAbg5mmw6JuBj4EOjMxEyAzEjE3Ky5O2IQ/ao3jwhEpBaIhB7uqh7gyJJdZG1+//h035cYM6WMocgISHU51daImClHRK1r1wXZWBTSxskuZq7EiuREDNQDVjMvZQAgJHZTBEuclu2x2VHYRZglzBfDBtRapaqPASe8yNtpQmrESFgYd0IZIhDIDAmTx8Z/6x9/jChNt6YLQECPhCEogNkUwmPdNABRNHESSVkO5JM1ImAwCo+ciMgtFhIgZkCwwAjyAIzilhmHnKZ5HMY5lzIjIhohT4SiHREFIhF5Hkg4HAIsWl89IuHEEAhKUQgpoJnZ02m5uf7senr5xec/m698nke0GcEjQAQtVN2YLtCUysCCo0eHTj/+/u98/fWfrE9fPZu+X/yAsFlP0/Bsf331+u23z+6emeL5tGrT/eFmWU7TNKhhEv766y9evfx4OS/39x8++853Hx/vd4fd+XwiotPpnHNBQvfGTACgauC2GzOCeuhuHHNJqh0sANS83dzc1N6JoOSyGw7z1dXVs9vb27vrw+10uDrsDyUnREzznMs4lHHalc22vDugIPaNest3zx3i6asvOPH4/KWHAqya5gQpX92WUvIwzy8+CUPtLY9X44tP3AnUINy0cxI8L/3+vdBEwGiVAFRbD+2dWl85cxaJQJGcpaBwmaZShpRzGcaUS85Z1ZpqTqOhm65Bdrn/ba0eq64OD72dXSGNgEXSkNPsBuZPta0AgZDDyWPbtgUpSLpFc++5OJERXjD+gWABTsSIHEBdl4i+nM/gm5OZniC6ZIZwJGnW1lZbde0VEHO+jA0i8gYQEAwxhDlxmLkpunnrR/6d3/1OhF1CAbX2S0eMkdXJVHu07hGISTyLCE7JiwMgi+BgtiGypMv8QB+GVPIEaD0aBCGgG7QKETKUcR7H/TBe7w9JcoAz8yAlAFrzEZlgkySAkCGEfbO6ViUcCAnZETJglsTrep7H/fXu2ZdffMGJy1CEx5wHQAzSwAhAMCcJChIcxvRsGK7r0j/75Cfe2sP918/2Lz3YYDPQteLN3ScP7++F87atj6fHWuvN7bX1BsgAqW56Pt/33r/73e98+dUvP/30k/v7D4fDrre6rsu6bkSX2hZGYIQTAWFJDGOWbd2K5K3VaRxFsqtSipRKGcZt3Vqrz57f5rJLpUzzVZ7n3dVhnEZJiCIy5GmcOI1BzCOVLBCYxsk0fH3E1qcX343E/f5b5OnwyQ+9OkIM482w2+2vX+bDQcYx0zCnibaGuvHhQOOVbrEdn/h8wu3Jv/ni/bc/R2J2WO6/9djO50foOQ/s5s26Y3BOJImQE/+qzHKh6XdTNQtEi+h1C+8hQCyhfKzrB12dBVAggEsBcowAHlzM+qraulK31i/FcmyqGyKEb4AOYCJJDT0gABwQYQRMampe+7q12rd6WuojEYoY0EZoKY8KDh7bspl2dwJkpDCvHhtzQnB1RWQMEmSMar333npb+W/+o+8G4NbWblvvzUOHPFxKre7QVd2BGAINach5T5AghiwDErmh0MDiIh4YLIgM6tXBAdjdEZIqhWPJCTGYZZznaTgIFzWxpkQAUdxgyDnCJBWiDVEMUL27ucDEOJk7SEO3kuZ5d/ji9S+B+LB7ISjClEsiAkJqzQhKytMFvpJ4P+WXCa6f3373Zn/31c//bM6oGF0X26rA/PzZp7syH98/6La23pB5d31gSWvdbu7u3r//IJLevf/2xz/+8bfffptSGcex1rrb7d+9e2/eEImIcs6qegElEHFOOUm0ugxlrL3VWg/7Q5hv2zLv52EYVTuAm7V5nsZxRJJpng9XV3WrwpIk55QBIOUxJSE2sMycApXSnnmE4s0r6zBe7zJI0s0tdq9+GJIkglnGYS/THvMeHdLNjONYTwv4RtPA82HIWbeVXUqZ1vtffPPH/4wlV/SH+9dZLkV+6L0zcsoJkdwjAIIiSb4ogGqt27ohoVAiRETOqRDCtlUkSiVzGloQInhYB3O2ZluYETijI2XT3NpS29K8pRQIqTdDbuGAmMMREdwrEREMZtw7uIf10AZaL+HgFl6HYcjMDoDozG5u2mHbqroGBAIzpfBMREk4wNyAKZtW88W8dq0Owb/9D76jEFuvvfW6NVWVLEwIQeEQAF2bRQcAAM5lSFIYRowS7utaT+dzxMbsgebRHHqABZKaqXaIFP4rbMuQR8IRg8ZhLjypX9YREJJwNW855SyBABisXjVWiCxxQzghOTIg4tXV7v7hfe0t5zlRKTymlDgBUQrMtZ0Bfcw3CW+YZiIWzkKHFy8/ev/hW8JgnIrRjscDfDrxxy9uv/vtL7/t9YThAVByHsbpw/3Dq1evzsvSu14frolhnndfffX1d77znaenp3mej8ejuzMLXaZ2EMwMEUXEXYcykAATofCybleHA0SER+udU7q7u1uXc4RFRB7K/nBIw5xyIWYASCkxYCoZIpDlV+V4kEvFInCIEE8tsdhRSQSHgwD1vjmk/c0nQTkAmjYnT3mKklwJ5is+vMLTKZ6+jaTRKVSTLu38lOYpoZw/vNde3fogWQQtkBBzzlkyIDIJCwJGuPuvRk49pZQkJWEipAvkHmkcSkk5IYGjGEb0p7o+1WM3Q0oKttUTJsiyYxx6b23zZTUHA9faFo8Ix0CPMEQKMHcHyNrBDF2TKYaBh1/mgHtriEycicFju1z2XEs947qFXzR4NBEOl/AeUTIDN9PYIFTV3QMZ+Lf+4WcWbh6u0LZWW2WOaR6yJKTGHNpdmwJwGDFhykOigVwC2DzqdoLoRAbY1RqQEUMEtW5mHUEYEwQgyDTvE48SidlzTixea3UPDUPo2htioKzgnGVydPUONmQ+CBdCU++7/bTZg+maeRdKiJGHgsHBJkk80MMQKPEV+TzKPObJml3Pz7EPfTnOAyecB9qP6bZtueRkbXv/+ptMoSGENI7T8bSMw7Sfd1999eXt3W2r6zCWh4eHYRj2+/35fL4gWi7JbQBEhEuHKOUUEaVkJiEWFnYLYpIklzqGCElKRFxbZUkaMY5zSoVSGeZZRNRtKAMx11pTyaYhQh4BHOYunN06AEJTDKQioKsnNpgG5rrdQ4Dsb8lWZFxOJ44Ybl9EpNiOlEBuP4b71/r2TVCHtp7efbm8/UVr/fbVqwTt8cNrFiEEAAqCVLIkuQRmiVC7AkTORYT9X/4ggmo3M4r4FbcSgQ3V0SKSelfrII4QBGv0sz2JsNAotMucBIQhbbWdlpN7D7S1KpAICxG21lvrqu7G4bQtDjCAEyBwIotwZxEhSimRu7pbQISzdupb3tbonQEyogA4ElwGDM3C7eKiSESDuTET//Y/+sHF2gth7r1ry6WMQ8mJiBUpwsHU3YCRhRJiEZxKFklFJAE0tJBkw4BIFG4GvVtsW3UHdAlTc289IGI/7gk4vAZ44rJ533q99MnCcKurwZaEheac9iSFMYhIpDAFp2a4BnZm5LjQhzIxQXSklGQEJAIGLwIjhAxlf9i9ZMzovj0dRx4ERSh55G2JDvX5sxfQiD0VHkNwGGYHBI/rm5s3b96o2f6w//qbr6ZpcvfdbnepfvTea61mhhgifHFslVIAkIhLHiwMiQkTpzyMORCQkCUNQyplsHBAyKUM076MO8mFUt62KjmVUrQrIvbePZyFwa33noekreu2EXbvSCmDNyAiQgrHNECvqcBmRwTEstenczwdt6fF6rncXQMjPL6L5Hb3Ez+/L++/9ixOQwNYv/mz+vBV3h+Wta3HJ2Y0YBYys63WICLhYZiGcUqp+MU3EtFVkQiJWERSiktpiED7VlvfuhkFE5ljJ6qwPG33j/Vk3vbDnvIuuoBpgpy4NO+nZenaHYkQ3ZEwh7N27I3cWNV7t3Bn8pQxMFQ1AACh8BQuzJlJ3ClMVAk8MYg5blWBMMLcnZAI2AzCHSKIEoREXK40mX/nH/3QwGpbPdTRkEiYcsYsxOyXloGZqYY7Co/MI8eUWKQQAjg0M2eOlAECWt88uoFvtZpBRNpq7d17h/PpNOScZXAHRFRTlBRoECGIgFKbq/dhnBPtJe/zkEsxMzdNqSAnW+qxFBEWt2CahIbMlBJ7SDhcVNVojFAy7zxiv79Jab8sZ7NzQJhht1M/v4UtbuePr9LL+/f3Dr2vm1OogjsAU87ldD7nUoAJ6eK/5JyLSEpJHh8fL0tgKQMiMctlKxDhYRhEBDlKGQOglMxMnHgYxqGMYxmkjCx5mveSp/lwLWUCRBGJgLrVknNcLl4AddtYgAi1GwExUj0fySI4eUYBhQpEaM05hznp+UwY9d3byNepJKFtOuyPH97I5mW+9ccv8f1D5CiHcfvwjW0nlwLz8+38+OaXf0xIn33n++fjk7oyYdduqoA4zXPJY0oJidbWa2sByJLKMLKkYZwkZSR2BEI0bapVLRjYzDtG7fbN+d1Xxy/vlwckJiAi5lzASFt3BVOqvW51UWN3FCaiBMAQYhrhbObmFsApE1IDMARwx5xmpkSWTLltkdOOI2lzj4AA62ER3Rwuaw+yO6gBuACgeyOGPAxmv6rk8u/87vc8LpuIeygjY/iYIJcQNoJAQDVRZe3gRomFhYmJGQEdcEMId0/ZIkLNAdFRa93CmaCYkyskEnAM8N1+RyCS2L1qWM4SvRPJPO3DyZwIr+Z5GOeSeJfz6HHuujGKee2xkLSSM/FgkZBERJh3bm6+sQThQDSWtE+yJ0reOaccsJk1COMwqCEIQmXCu1BUWyIgD5fp/h4QwzhdYFCUpDdDoOvra1UdxxEA7u8/+EU0TpTzoGq9t4hARGYaxtK1J0ngMA4jAgpnIkl5LOPIaeRU8jDmYQZMZSgA7BHgUEq5nKyGYfiVxRPZwMI859GdulYB1NohyVoDVEE30A2Y+7ISynpc/Wlzb+v6/vrVp7ZWPZ6vP/kuUqy6Yhm41dAjEAkOx8+/rsuHkQPy2KKtT2/LmGWaTuum2pIUFiZmU7tEfj1MKHHinLNIRqQhlZQHRGitUbhuzdSIWVIBREU4te39+nh2M+CU2QWPfW1VC48Yg3kN16Zt2U5Vt7DLqEokTu5w4eIENYsegSkHoDMjuiNEkSHhQFGsU3heFzUDRo4wjHC18KTu7mgO4EFMgAGggI5AzA7ozJxSBuiIC//tf/xDDEcIEujaKbAwzIWGTIjg7ojSN+pVAqK1ypzSBCklRo5wJHPwiAa4RUAEIrFC61oRMuMAIRQoAEXGrptkvDncUbRAVN8wdJBdREkpjePEUFjKVHb73ZDyIdG41futPqi22lcqR+RtkDHRGMgRjJiFCxIBmUXzAIJB8o54TDKFuelqvvZWQ7ekNiiHixnfzXemjYUZpEOEd+vh7k27RxiAG0DAfrdj4Tdv3rx48aLW+vBwPwyDuxMRgNW6MlNKOSJ2u/n+/gEAkyQz2+12iMychceLAA2ZOQknaVWXdZEshNTadsEYppQuy3/O2ezy5on3DSXlYdrqmSCW86lBL3nfzls/vfdwiXw+Pq3nUxpH1Q6xWa01dvPV1dvPf4kW4+FKMsR+H7tDd/DPf5mR0m54+PrPCsP1Yd5MEeD04UFSBkQiBkYWDPDMOSdhiFIKESIgE4G7sIBHb6u2RgDEMAwTj6ML17WqmXFsW21cSpkR+djOj9tT7X05b4lGxNztXPvJvFVtEJ6yloxCA+Il6FWIimMl6cQUaMKEhokL4+WNymRiuuudI3LrGoGuFhHhKYIiSI0IBEkAPBdhgZQREYg6kqlXlobY3Rv/zj/4EZInEQjUejaPJCUlZCJ1NxB3UvNaPcBDOrBkygMTXfqUCGpbt5qYk4iZI0vigsYBIVkI0MOSDIUHdGf3w04C1aEPAmrGZZiGK4yym6/KsGeSVPI43EoiIN96X+r9cXlE8cxVQ51gGmfCHGDk5AYgCsjeSXsluLxOY+KBAnvrrmfEJWzrrVkwWAxUxjRua+tu1UybtfW8tQUAEQmIkNjQEWmc53fv3+eUxnH88OFDShIRy7LM81xKbk2naSainAUATqfl+fNXiMychmFAjJQYEVQVkYWZkCGw1uruvel+v+9dtXcS1t5TSrVWImqtCaI7n44fgmMYOAI3w9PDBzyfaRwYCUxbU5CShPu2hathn4adLP3161/w/sVut+v3v8iSeBxtKPh0HqzBLp9Oj0kwmb/7cE+w7lO2poFs3QQCgNxUWFLKampu9KuLb5CgQlcDQgEkYMqp5CQsREkCMDwIiVM6914dCiUQOtn2/nj/0I7mFh5P/YSurdq6ndWrRtv8kcVTlpSQizLzIKOIIxFxIWxCVggzFsSCVCKy9qEbmrJpIGBoa7p2JHVBQgVwV0aQNDsAseWcipQxR2Zz1e6r+gOSQVDtlf/mP/iRQhMhBN16d4oIZQ8PdzWwMLO19mYGyACM4CUnZAFuiIqI5q3pKjkN4wCQASOXnGWgIII08RxODJQTlyQlYWYTpgikJGadYtjtnu131/v5bhrnQWaEHAg5UUSufevxoHZeN3PcSmZ2IHBJhWKnG/TmddMwj94cAEncQ60L5ySjasVUUbTbEtDdacAdqIMTS2ERvAz26KJaXTWAUp5609rqkIf9/up4OkpK7t5ay7nUuhHRfr9HJBEhIjM7HPYfPnyYpvnq6hoASimIMAyFiC5WD3edpt3lsNRaG8cxIsxsv9+vy8rEl06Cd3UIZl5OZ2E6nY8s0tV103Eoy/Gp1QqCZRxZiAFVa+ZgZGTazJ/efH17OPjDl8f7D8Pzu+LNLDznHDlG7B8+ZE88j29/+efQz7shn46t5IRg6iuXACKP8EsdFFHkgjlnulRbUiKSIpkJkiTGIA5khAA3Iw9BRiaFcIQOttX6fn16WD6sdtKuusWq9rAct74p9q37povh4mEIncUNUNIwlpk5mIlYHVYmSJQIOfOesWBINLKWtELT7m7m2ltr3QOJ5fLvlkScOTNBzll4KGVADGIADo/VdPWoQbXpUtvKv/27P8oCAC0wulvrHRVAqYeCOwZpj7W1ap0olZwDGogTk4VHmDC7Qbgie0rZ3EhkLDkRWVfBkoUuAWaikJwHEaGeWZCpA3iAbrq/ut3tbwoNu3EqMhho7cdhGIR2VbfaHxPNblJdhaxA6g4YkWDoXeumrZr3retZQznNzKluhsZI7rg4tN6dwXaShthrjykN14cXW4ckLIim1qu2doLAcdpDkJmzlKvdrgz58elJRJKIqhIRsyDSfr8joog4Hs9XV4cIf/fu3SeffDqOk2pLSZh5GMqlO5ZzvpQRe+9mJsSSZLfbmZmIJEki4u7oceHq5ySmflmJc5lSHr22kmkYB6u9ZJGyezzeD0zWq9YTuW+95v01Ht99ePNt2k345ptl6LFlbtvIaA/vdC487tvPP883+1x2H95+ftjN3WNZjtc3NwF4eZRIKJyGofxLgTwjIjMjsuSBIRG7e7PeERAgHAhQiNPlGSMRIarqw+OHt08fvj5++14/bLqa0uasxuh5tbXqsesJXcG7UOBF6sY85KssO0kSEQ4a3glmpnALMEEvZBNEAgPgZuEB5t7Dw4MgQCQYgzmNhRgICKVcZAWMiO5EAizozWqt3Wv3Tbvz7/zDHwomhB5oFmCthyo6CDEReXjvpuZh4U7EJIndgBgRs7VISIIcoECasjADURJKhNANHIzYNBQQSTgUzC1lzzkZhYYBodmZU3p2911GFlLmBMxrPSHpkEZwbluPLomzQpidL52yrhaW3ei89Nas96qxOgULM4t2XdfVvVqv6F4kZQ7T6j3v8t1cdixz75EIwKP1BtBYeOC9RQKAYR5yHnpXIF+3mlOapqn3flm/d7udCPduqpZS2u3mDx/uI+Dly5cASITTtCNCZgEIZimlpCQ5l2VZtmXd7XZJUi55nmdVLbkMwwAAjMSCl12lDDnlZB1KljxOicCtYy67obgjUAlb0BoRL6eHpw/v6uMHaHW6/SRsTR5NH+/fvc7jmERH8Loey+sHubleJI5ffXn13Vd63J4eHsapfHj/zfHxEQHq1olS72HWzP5lmh1Ata/rwpeKh3tvBkwimUmAspSJxhE5QS6Yc/TWW23akkiRGTidI87VHur2FCsQFRyZ2BTMGkYkolL4UlAdhyKIAoiILKn1jgLAwBdPkCJBUWUECSA1VauqrtrMlZCZA7ELQSLJiS4uk8CQBIxIQIgmpAC9d2/NmqmbIyT+V/7B95MUhW7RrUFXYKLAgMAIUutdO4SBsakQSUkjYwkXZMcAbxhUWywQDojECdwhhPCASN22qjXAARFJENCsB6uhKvRASlm4YN3as5tPyzC5BTIT8+l8Oi8PJbFAasumtlJgD1ULM0BAU1iru+JS+7JufnFziHMihCCM8HANCp6HkcApaOBxkBm7gVM4123VttXaFMJtcwuMjJLzkCTLpq03HccdC43jmBK3Vs0853xzc2OmZn45/avqtm3DMJRSUsrTNJYy9K4RTsS73e5ScxiG4Xw8EdE4jtNuHoYh53zZWzgJiwxZEkvKKaUEALv5ICzoiilR2FjkXDvUhom72cAgGF07Bq6PH/rTGzu+jd3LQ4aRqXKG+6d4fr3bX8XDIxSw9uhPj/l7L8+vv22vvxKOx/s3hfmjFx8/PjycjkdCIioi1HoFABFhFne/HNvcI8KAgtOMMhNLAORSUJKDmZp2beeln1fCQCYSHsqwm+a97AfaEyZbbauGEV0BILLMiRMYZM45E3IwIQUAhAcCQgCodYuVeROEUHC/bKjq0c1bV9UOgB5hlzkpoShFABAikMHAgBwpEg+JGQLAMRw6WNVaa4NAROS/9Y9+nAubm4c3dd3CQTSag2l473bpZNUOZjTkMVEpskuUELq7Ne0aW4OtawdElEAXwcmdAzElbq7dNnNFIhGMsO4QAZicnAKRU67rypwOh49aw+6Lu671eDo9ONgg8/l8qv0IYV1rdwdM7MVDLMIsmnptysyXVSkJZE4USYIQkKMkSmNJIxdR5kbW1TZKNDIEhUlOCIGuHoyElAhFgNmDpmGaD1fC0lpV7cMwlDLs9/vLDKS75ZyGYbhcakspwzAdDrvLuXnbNoBIKQ3DUMookoahHJ+ORHQ4HFLOpZTLNxMe5lZKudrvmYgQc8kpJWZhiKGkIHLt4D0AtvMTWQXGAGrLKYEyYHR7On/blnfnp7eA0zQU4sIUHGbTTV/bmFrVDksf2jbtdl6DqKVBHt68H+fh1ac/bBoMxpyATVhyLsxsZoickkQAkaQkKac0jJIGJAAwQFDv0Lr31tZFuwWTmiMhEJgiEoHj2rs5OOWl1WM9btaJ05B3QxmHRJIwgLr1ZhvCGI50GUkn3GpFNDdIiATg0QFR1SAIg+3iQEJzMiZixgAnhjyIR+26mAKgCiliiBAhEVymc8Ad0Ar41BryX/9HP0FAIq6q5obOtVrz6qGMGB5u3rp3pfAiNHAw4jAOQ8K8bbrZ2REutCz1xmyZZvLsph7QPQCgtzWoBbqrAiDSYKHMAkiIHdxVodaYhxui1HXdWm3tuCyvz8s7yUVdj8u3hisJWHRmFJoJxwiESK5MlEsp865IRgC/+LETiwQPPA4pZ0HVc+srsKAhhOzGnRDkPIIjYVhv5kQMIJDSUJsjpaGUlFJvbd2Wq6sDIpo5AGzbduHS7/f73vvpdBqGaZ53t7e30zSWUtxDVUvJ0zSIpGEYc87upr2LyDBcOmPDUIacc5LkZh66n2cmJMKSyziMAFh7C/c0DOBmrQ7D4NZsW4PYu4tuy9Ob9elhvxt5TNZOgvkU49tv/vT27hpwtad7GdnaxsfFiPJUsNn9h8frVwdEgeApyePpzXh1s58O1hYAAqRhGB28toYoklIEAFISGccppWzee9tMGyK6O4aJcAByTsN+x2W4kJ6X9XzS5am2p7r1dq5eGztLskAPoMhSOGci4ZzmnGYh2nqs2xbhBGzOapgywwX/pysLAMHWcFvVLNQUCBDRw4Mh5UA0j96jpoE92rK1CEQyZu29mfWUMhI4WngwjAyTdeyN+C//vc8wiMUjFGEEyqqtLu6uKWF4mPauEZYJi/BENFIumQ6ZJiI496VbTUAXlB0SFRqEBg1em9bmrTW11uzsEZc5N3VijkyFEzNiYmeSeokSDkM4qS1rfbtsXy3t9dP61mnb6octFmIgFOEJQC6QCQYsaWSZCHEYZRhGQgfvRJwgCdLAKQOzIxglZHRzgFF2o5TWtoeHh/P5EaxLylIyoUzTlRoAYxlGbTrvDogUYUjQmhJRTsN+vzPTUtK2rcfj0zCM87zb7/fTNF3OQdu2EXEpebeb53mnavM89962dZvnOeeMhLv9TliGYRAQScJMEI7u67KM08ycUmIA7O6pDENO2hYRyUXujyfziGVd3n8rYu/ffft4/+bV7fem4aZ5uXr+6jDA1tab3f7+7bvFlut5evv1F0o85MFKNqtPb78eMB/ffygDItnT+w/iPO4m5CC41DSj9W4el0mqcLhEvtethXehECkBlJNkZr/8WmVCZHFIGNqaMJGwUUTUp3r6UOv7p6fH06OCEQliF7YswyDXTIVF8vB8Hp4D+nq206OaJUqZmRhTkFftGu7EYalr1KYGARoOUWsAQR7RY4MwZHK/8AgTSAD3i87RFIgAIBCCPEGdVFPrzTX4L/zWzNJKnjBKhJJkpNKb9nYOxAi15hiFhb13hGmcXzLtBDonwJy003JeAS8jyUhBtS/EjJj61qxHbdu2roiVpBFlAGFxJAFsGefEBdxzHrPsEo9TGV09wNft/v3jL1s8LfW12tlxNezMIjRkmVmc2RCTQ5XsKQsFJJFhSiKOCIguhoJEjEOapnxIktU00LFbrxrGy3lJA0+7mfM4lIKUmPm0nQEpp11KZZqGcbc/L+v5fAbwaZpvb2/3h2ldN7MWwWY2TfNud5inPRIg4jhOqn48nqZpOhyucr7EJXi/359O5659t98P43DpYIvIbrcLUCaZpx2S5JK3rTbtwzA+PNxzOJHrVplimMv5vIRaInAg257WD1+dTvdJRI+LLieLk5Kv7bwbJgjfuj2/vf328QOW3WHY1bpp7+F2uL0Rzh8+fIjQ++M9ggy5dOvNXCiNOefE3exSN0Ezd1dXYTJyBxvzMMxXwkOSBJwoJ6YMLKpqtTFYuHZVYZqmcZ8n83iq7dzW07Y9tO20vlv9LFkST0nK9eGm5DnnYTfeTOOrZ4dPrufPslxttbuvKcNcriMAwps264FcyHN4AkaA1Fa80BeGMXEOAwMwuygDMFpvSA3IgC66504AFhie3YsphuPWjf/R//hvrNsjEhAWjSd1QhgI8bSttQYgdGOAYWBKkfpSp+GqlMngTIhCYq0tW9OoSMjMgNyttthQRE2X87FrN1egyikAIqfEJKlIpgyBhAxAETTNhyIThgRVD+xen+ovt/aBmQM6ogP2JFlSYuLMBcFE3APMex6QJTA4cREyht6tkjFgJmYIdtC1PRKBWsfAkoYp71IeUx73N3eHq2eOhBHLsgDzYXc1lHGa5mkcP9zfn5d1GIZpmuf5sNvtlmU5nU7unnO5u7sdhmG/P5Q8HJ9OImma5giKiHEabm5uELHWmnMexxExVFVEcs4550uKTkTGYUJkkTSOQxIuQ2FKiHh8Wk7Hh/P5AUDPx4dWK7ltpw/Wtvnq8PjuK9RTVwv1/VzOy2Mpu3Ger1+8fP/mGwlwgKf7+xeHG9jNhDKTnNbT6eGRAOerQ183dA2wbdsQaBhnYoYIYprG4cITZ8KUxN2RUASFRVIiogDgVESSX7LQNGIghFFYoAZFyilTOS/LYh0I1GmtlyBmXdu5a2XMIgVQUpnH4Woqh914dXd4tp9eHua7Vy8/en73ijiFgVsgCwPWCy8RnVyYMKwhJgIJU0B3i5TpcqYmAuEiPElQdPNuSSS8rbY5kHruiuHglWxTEON/73/97wCmHgtGd7AL9CQa9h7mK4KE58RpSEOhtK1bbTVPsyMDWaARmQauG1hUEWHirqt6M7WutepJu0eEqgEIkucCIgNEHqd9SQXchf3Cqcs5S04UjBgkQ7d1bQ+AwCSAW1xi6BnNNvJEII4r5U1tQYGSBV0ZVchV116bBQGQBIM7sQiKNZ0lH3YfT2U/l4mBx3Eqw6hq6t5byyntrq4R0zCMkuV0Oq/rsr+6FpF5nsZx/v/T9GdNk21Jeh7m0xr2EBHfkJlnqKru6gYagEAMhJoADQNJWNMISqIZdYWfLZmJkCBA3eipTp06JzO/IYY9rMHddRGFv7Atduy13N/3efZ9LWXdtp2Zvv32u2kaRSSGYd+rSDgeT4jk7syUcsw5ulsIUUTcPcZwj1XfS1XMDAC930d4bGZEyIRENIw5hsTsRM7kIuJdy7pA6307f/npx9KtLS/nl98c5zkQASJFUZK6bTHFw+nw429/ezo9aG/t/Tp9+wmI4LbnOW/b/v7yCu7DEIcpsbCgnI6PzpRDZKTWaqnNXN0dARGAmWMIbqCmpsrIQASAzCwxIqLDbl7cOiMCumoHIHRet23TWnqtDbZi13q7tuvuTaGTZyIoe221Hw6neXh6OD0/nj48P/5qnmdVQ+AQgkGv/SIchJJDUHXwFhhIAAkQmAXdgVmI0FpH10ghpDilIVE+TQ+Rs5CQASqqYVd0jgzBGnhHIOAA/L//+z+TkK2DhHsYtYIXcN72brAFziMNgSiwBHSwuq5rsZbT2HVDRIlBKEY5QC9qHdCIm1nT3mqvQPBfCSmInjhAjB7lCJ4BKPFMBA4lRLFu5pjyKEwOFmQ+TB9a68vyZt6RCSG6EwCYWms9hhQitP5u2NxKIkBtap0YALS1VpXcjJEZQpQxSB4oJoj7an1X7wiuHLiUsu3bvYudUkAOiOIEtXXt+vTx0zBmRAghrOtK7Nt2a01//es/yjmr9pyzdmemjx8/qN5Hn0gMx+PxjtWJMfXeQwjMtG0bEd0/Aojo7uM4rstNtSMCmhOjdSVEkWC1lr04eG+l1xvovpy/Wr1aa7UD1EuwXvcWmY055GxkTU0bELqwbMv27Tefat+/vH55PD7pXta6nk6Pw5C77jHIly+fswgRO7irIlJM6f16bqWaOoDXUsyciO5TUXBAQJFIEuBekHHt2gA6gpu5dUVAUwXXrpUDVTNDC0Oubu+3y/tyvu3XvW9mLeXgxstaUsofHr89TqfD6fHh8fHD84eHw3Erl62e97p0uzbd3JRjFhlMDVwphiApYUAAoVBb4aAiId9slwAAgABJREFU6NBDkHGYgnGCIJxSGLIMoNDUDcSdwSn4gJYdPGeex4H/t3//Z3WvtW0d1NEZFaAYlFZu++5TimNOBJxFoKl3VW3X2wZkOQiQiIRDenoYD0JQajEyTm6oxE4A5DFIZoAYAiA5KhiijzEGQXRlQFZ0iSMBtboR0TQ/W8OUwnF8SOFxWc5L/1tkAku9mzsiBqYYYxinEQnMdu0VujKKGzkqMbTetk4OCAwkqfXe+g2d9rWrbjlNQgmIem/v75cgQqR130SYSAB43XcJ/Pj4nPJwvwDGGM18L7fb7fqL7//wdHwqeyEiRIoxPT4+5pzVmrm2th8Ox8Ph0Kq5YWtdhEXI7Pep6TtXOEoo+x5DSCnd89Xgjo5m3rsSM3u9vH9dry9vn3+oyyvUvZe9rzcSnubDcvkSSFKcSy0VHAhGSdP8KDysy3lMfBiPe+vT6Wi9X94vrtVNS6nglgIiYM5pvd2G05GH4LXWvaR55hjbvph3bX1dl33fWmuqnYgkxiAxpimliUV6V3MLUVhGkYxOpVXtnYljzByEZGCPW+ubY3G5aXnfb9fr+rrttdwIacgzgO7lMqZxyk9jfjydxo9Pp+f5cUy5+H7d3i+Xt27VvXRrCIBE2tycIqDwnVxBQZwcJSBy66aDTBln70wKA+bk2bzfbGlQxIZgA2JAvD/4EKPzv/v3/9zcu16uy0V9RW8OW7UOagx5iFMIIgECduhbbXuzvpS+tjaP4zQdiGKkIUlOaUQgVTWjEEYHRPckg4RM9/AU5t7NzFQB/L6uiojkzGDEoGpLqZcwhnl4DhyHHAMnh7Du6215B3R3VO0hhDEfSIQJogRB6t3cOcqEzoCKBB0IQEqDtezmHb0hGDjJkA4h9opCyb1v+zWGTECff/7d8XhsrZTSatMhj8+fPmozJ3cnkZBSPJ8vpezDMDycPoQQAbDsJYQ4z0dmYSZTN3WzHmMa8qTqIqGUql3neW6t9d6naSbCVur9GrCuq7vHGO8dAGFW1W3bRKj3kpigl7effqbetBezuu1nIfr47XfWmhoM0yGP2cDBoe0rK44xA3RhEQi9b4D7h6dvnXG7nuuy7vs2D+FyPmsHYh7GcWv1NB+tNQcszRhD10buzKKqpRQRSSmbubkRSUgDS0QOEmJOWVLGIOik2s3VEB2DGtaqpXZwr+Rvpf10ff+yvb7dzufl/dJuqrs2SDwJUSlv6/6epzDGYwgyxDGFSUL0YMXq+XrbzhfD3nFT3ZmJgb1bQCIiRCCmKFE4BWb33bRCS2M6oTMaUhdwAgJwt0bWGTyypJxyShEIWDr/d//u+94rArn51r42fTfbTTuFPE9TkANjiFHY1fqm3dbSb62ZI1r98PwhpSetLcUxpti81LohkKkxIYpRIARC81ZVVbqi+33chxIyubgTuBMqQmt9WfsZID49fj/ECVEQrRUWeKh63vcLeFCzGCXFkYGZLFBEH/dWCIR9RIjIDbmTiOAwDYfIA5sFSJkOh3RgEF2McZjHUy1rDBzTeL2uf/THf4wS3l7PccgPD8+S8743ZlT31g3ALpeLqqnqw8PD8fgQY7her5fLeRxHAByGQfWOEbBxGkWk1hZjQsTWCqAdj0dVvd1up9MpxrBv+z0zl3O+XC6llHEchXjf93uGdFlu6/KuvaYkOY99L+6KiEKyti2GLJymwyMIc8zWe9tWpEi93a6vQMVBrXchf395u+3bPE1JJARa3l7AdZgP5mZmXe00HPdlp8AoASjspSD4EAdCBIRpmuKd0ZLi6XSMId0P4BwC0P2OQCQECIggEiXEIEJBILCISAwwpA7x1nRtbVtW091oI2xmDszAxoLrfltul5glpqH0HREdDJRa6bf9cr1+vWxvQDWAo8MQslBiFu5CLr17q87MiCZRgHpf0aylGFS1NwDCLGH02Wos4EZIFNlxGEJMjA78T/+nD70tvRfE5t5r20pZhnx8mD/k9CwUAwhjdPDrdt21GLKqcHRsHYEOx2dvkFLOYVDbq3aFTtiYiCi6sVlza4gMgObgxqaGQMQYOQEwoHJwclBrRd8QUpTTaf4oPCDavldtNqS5lrbsV/NOCFN8iJgQu3DsaMva0XKSkYUUNhFjAYnHIY6Z8il+GughuEx0TDocH4/TdDJDVR2nyYB+9etfk9Cf/8XfDMM4H45qaCZ5GM+X166ecl7W6/l8UfXT6fDhw3POo3n94YcfWFCEp3G6X3HuRZnj8QAAX1++HA5TrRXQ3N2sE8v1ckH0lNK6ba3W+yColIKISB6E3ay1GiKHSHXf93V7O19YsNWr6g5OkdL0+GFbVzfLOQJzqe7dWtnQ+vH5cS2lLKu1qlZTCOPhuF7O7rDdbtM4HIa0tQ4A1/0WOByengLHulZ1ZwmRowFoL73U3uq6LXvdzZqDiQQCDByZ2RyIAAiZxQjJENwMgEMiEASgECilINDAi4JhoHFSycu6LOWt6Vr2pauFKDEkJGEO1+VtWa/j+JiGvO83gQYFtaXr8vnn60/n5TNBO4aZOQDCHT9oJqokRGDOzoxOiCzRUFuvte5EQiAJwyA5UEyc7jEk5EghhkBxCDHM/C/+7QO67vXWdHeHtvOYHz59/JOH+QOSMAm4uffd9mq9maq3NNA9uVT6mlMIw9QVjWvtCzMAVNMaMBKRm3lvzIJAxDsYWwcAQwBvHUDTEFgcvCEioG3tZgLEs1g4pBNHvJZl2Xd3yFPet9teXyl0wjjiya0bKJhpY4GILgiBY+YAjBbClCDnNKIF7Y4EA+bT+Nhqvdze67L1bmuvp9Op1/p//H//42Hgj59+OQ4TCwDRtuzIkMekpp8//9x7/fD84dtvfpXTZNbfz1/3vT4+PuU89N7v0MzfK8uJzHzdlhjF/PeRodfXlyEP7r6uSyu1t3b/IEzTpOrmjRzQybVVvS2XV2g2D2Nv6+12JicGL9u7EBLIcJqsW9mWvS+MJJAaELm3voPzMIwhiLauit1szoM5MFKvvdc6HaaUs6TY3ZZtPYikcRw/PNVq2ot5C0RqXlrR5tpt226ILUape1+3fa+3fpehEKQ0IBNhAxDnQMPkxE7ghK5WLpdSFjI3kI56vS1vSzm39/P2ee1vN9+hO7rcUxPiLMnPl/O6bM8ffjHmk9aOoCFCgnHr+8vttWyXcZwPh6eQxRHckXTf2qK/P5gJQFMzZIqRkOy6rg5yGg6RSd2ZQgox5SFwIIopHyNHIY6c+V/82fdE1LS32upev/3wq1//6h8c5g/z8BDjUGtb972Ucl2vwI3EqxdEjdHiMAAPFVqKubfS+tV6c1NANd8DgwC7VoOm955bc4WNuAkmJEM0EpfkAA0cGBwJVGnf/Xh6aqYkPI7PrfXl8uK+h0ApHvay7bUgAGhzs657LRujppjRYu8sHIOEEGIQxWhEvG19aztoe0rTuly/vL8zmSAxx2k+iMhvfvPXTDYfTqp2vrzebhfrlsYx55FCKqVeLudf/eoPv/vu+2EYhjy+vr1cb5echtPpJCLbtuWcEfF4fAghmHlr9Xq9PD09Et07Q1jKjsQp597q+XI21ZTSHYEfc6x7RcdIoZbdrNayvr+9ZLR1Obu3KJHAXXcCzykrkjYjkd7qerlEkTxmJy7abS9tW6Z5QBYHjDF2L4CEhL3X1gsT5pzVIechBIE79ElCDEFrdTAHFkEmWq9Xd88x3E1AvRsjEyOLUAgO3I3uUAIFBAIE876DKaGDdlDvddfenOJe6q3sq9X3/a1CNSitdq3YHIX87gULlETC+fa5m3/7/Ms5PqtayJzzccYHdVrrrdRynD8+nX7xMH1giM2bArj3ZkuHW8jEYuDgiB2qB1i2mmQc8xyYhV1EwpQDp0BDiGNOMTC3WvlP/6dfpTgihd41xekX3//y0/M3x8PjN88fH45Pt7V8eX+53i5lu3RtFgRZiJUEjYOkubbdVQFpaytoBW0GCq7iiKbdugNgTGNKCKywU6RACVFCwhAdoBOhqYEDIrKk1nYReXr8tncNIQZJ1+Va+iqIhOOQTqa83TazRaIiiurVVKE5SyAkIknpFOUpRjLSrWzM0hsc0kO0bB2Px2lI0zQ8pzwSwrKc9/VGRAiwLMvXrz8z8fPztzkNW92WZXl7f//22+8+ffx2GAZE6tre3t5CCNM03bNApZSc8zQd7jm5lNL7+/l3v/vdhw9P4zi6g7vX2og5pdR6bbUSMxOJyL7v45hrbYwYic3vXWplNCjV3az3GEJvFdR7tZyTAW7XGwaZUtZaL7d3sHZ8mEOe1+s5MCDA4XASSV1biDzkIaW47VspxRGfnz611rd1AYeiRkRk0EohATPT1mvdt+Xc6s5IRBhiEAmIICGkNM7zY0gBSZGRRRDZrC/XS1tW2Hcsm9WC6JKCmK/Lcr5er7elaOtMHaw3QPFe277V2pUBHMjczAGR3cvL6+840tPjN2M6lbZSwpQGwhQ4akP0aZ6ejqePx9O3QziSCbi7r6WsXV1CyJmdkjoYgJrV3mNIQ0gJo3DGFByQAIYx5BhNO5rxf/tvf+nOMRzGOT89fDPmecjDPDw+PX1LELzjXvafXn4L0NDFG6mZmhOPjujegLqDE+WqveuGBrXsjM4EHQsw39kVh3kec0ZBswTgLCABhRHvbVkhwogoRN5pNaMPp2+DpF49Cnf16+1q5hwdIY7hWThtZe3Wc5QYhB1aOxu2GB+FnnI85GkOOat69bWWGi1FDtApUYqIMQ6AwQH25Xa7vDn0+XR4fvzW3GLkT58+xTj89NPPr6+vps4xPj1+yDkTkbuVsrtbSsPpdOy9L8siEmNM8zzfuXGqqqqXyzmlcDqdav1919Hdh5yZ8Hq9EmBK6R79t95M+74tIkbkdzaNMCECU3DVXtZWVkJijqU3xK51cWjklnJW93K7bteLpDCN47aXnAYCHIZ85zuQYJQYJOU89tJba9M8CEIej3kYt20H8A79Ll0jwrZvdd8BwFyXdeldgUCCBImECODWOxgysZp2M0aJLHQnH6WEIWKI4EBhDDFurZ5v58t6fd+WatrcOm7kfSt72SopUUiErG5uGimWtn99/Xkej8fjE1EstTJwDIKRhvEBIArLMEz5MGd8GFJqdlO1FKdutbp2UODIzqQoJN1q63WQYZABOhRoft+KgXpwdet153/zf/lHIgNTRu4hhDGcTvM3D6cnc399ebtcl252vX5l4kFyQCbKIpkAQxhjCA5eq7l762vpq4MTKklXbJuuEifhEbQ4Uog53mN7VAIDYVQzUwLnzHHOxxynlCLjYPWaMuf4TWvYekXU5fpStaWcmCn4kIcjS1rX6r7GQMIRiUt19OOQnplzylOICSiykzeFpq7NwQl6q/u+lm1rtWznty+t78Nhfv7w7XJd12WfpgMh//T5589fPo/TPEyD8PD49JxSBsCU4h0pllJura7rmtKAiPM831+AWquqqfZlucUkOWezOzOC/ytRC0IIX798CSEMw5BS0tqh91q3UhcE1961921bQDClWLel1mLa9n1JKYUk1gHQGKy1jv+1fMzo18vrPB85TkV1msfWyuFwbLW10sFRm+aUDoeDqu5lC4mFhjTmaZrdvXXdarFu2o0lMouIsFBMkUW0N3dPISOSAzAlQHLoTgqAIUQOgQJjCJACCDPLfcokwimnIUZAPm9lKU1JK7SuDV0DJnRG9ERM5OBKEvLA5fb+8+evHKdhGAJz7RoDAkDK03E+1VrRgIWGmM2rmtZWGXvIDoTdcGs7A6OiKSJT7armEkJ3Ne/OhIGbtr01Qyh15//5f//nh/HbIGOru1kPgd1xzMf3r2+v18tW/Ho777evOVIIkZhjIIkokYc0TjmkPCLYtl3I3YCa7SGCozbX7qbWCYVczAmQg0TEBqwpRCI2gG7OGBPkWXKUxJRjCK7m5vPwaLo4aWt7qV8JnGkgCOZsyjFFoqK9IVCKh5ifrY+lQB6mGCaRUTipO8FAQA5uJowUEIu6gx+GKTBFGT9++O50fHh/v1Urp8OjVliuy8vrlzTG08Mjc5im08Ppobae8+AO1+uFma/X27ou8zzf8aCn0+k+y9+2rbV2L1IN4+8rkffMDwAQkWqPMZrb5XwZxzHlYOr3zO2yXIYcrVUW6Fq2fQlEKUvZKrhq38w0hAjKLDHGSZ2WfXOrriUwRuFt7w/PH0lAXTEECpIkEEop1VRrLSFIzLFbr00HCZf1hgCn42mcRkNels3Vr+t6vV3N7c44IaAc0r0bGLKMh2MaD44MQIhJQiK5D7IR3a1UaB29qxarBg6ltWVZt65LL2tbO2wQkoGliDFEAhIKAyZCQCFiDpRj5Pfzl69vP0vEIT0KsxbLMTpBHNKYx/W6QtkNzYmL1tKvpb47eAqI1pzE3UHB3RScYyrFFu09EiE5uCIoem2tlaKu/K//1384xuM0jN1g3W7q+9vbT5frzTy9vV0IrdSX6/qZkhsoYUSshCB8Ly/fy5xOHAUzc1JTJDcHRSPW0m/qznAAYKAeJAI4OiJAswZACAFUBFgQcooOaIAOfW9nFhAPvWwOa12uXXsQjnJEk96aeSeuMU7CA9NxyB9jnNd1a17zMFkFJrhDjtU8xCCctTdomvIwDkfq4kqHh8dhmrblpk4fP368nM/vb197v1XbWrU8HJmjxICAxBxCeHt7CZFvy/Wnn36UwCHEEORwOA7DQEREdLlcRGSapvf3t2ka7pm5e3vYzFRbCKGVejeduRohUGRCtN5qK4geyBEd0bHbtty01RhiV0dAVyBkxKYOzDHE7IzaWttLqz2nQWJS05xjktDB3ZzV7+1eNVXTtaxIEDj22jHwmIbL+aKqIcYogk4dtJfSu27rptpDEEJMKY55TNMwHz+MD99KjE6dI+U8hzRgCNZar13NSAQcdK/3Qf+tbi/X959fXn/7+vXrcl1LWVstuIJ4YhDBGCKhuDJSVA+mFZ2Ec0x5vZ3fXl+c0hAfY5zUGzP0fU8SDf318vV92zEaRzOvVZujdS3NKrv03pHcsXdoIbCIlFIMCQCAEaD5vcGy171X/rP/679A9BgjMrzfvp6v75/f/ubLyw+1rZfL19fzb7pdqlZH4wCOYI6BiV2JDSl0a2Yagzize59SRgYi6r0ZFKag5oQODqpujoImKN1s74vVFnEGT2qVArCwCGJbq+2b1q4bge/l1mqNHkrdumoQZOTesPembjHNh8NTiE/EUUIE5Je3171c3FrRhVBUfd13RnKqTffYYZLxTtI7HE+U4vvb6+38Mh9PZd9//vEH082pd/WPH3855vn15adpnubj/Pj08PnzZ2Iw67/73Q/btjCHp+fHeT4cDsfee865tXa7XaZpBoD397cYBRHvZ4xt2wAMEcu2D8PAgOuygGurDRkDsQiVtr++fmZ0cqh7jUICuC8XRw1BwB2cABxFEciaqtYYJaU45wFJWrd7E1+Yg7AAgrp1J4GYYspZ0d8v7633eZxN/bKt4ziHEMuylHXbtg0RCEH3LcdwmOdtX9Z9kSD35loaZ8kjcwAC8GZGxAkZACJ0N21uiu6g2lrpqmpk4AbQOxTXrW/XZVuWssmVgyRMLMDixGTsW3etVdEIXTuBMVO63m4/nX+LDvP0hMi1d0Ta9gUiuOr78tb7mgNEjsN0ZKSyF7PG2glUSCmaU++9poBRDJu5EEdOARGpF8XaDYn/l//7PyVEYu9I1+X69va72ta1nd+vv7vW162/VKs557rZtl7dnYh73zuqAqGptuKAYCBeQyDmFigH4SEiaDWLQTDGLkLYxEHMUMgR3LtrB2tgYF219ptD7b4qdu9dqbR+035rrVvrFChEKXW7j357sd5VUgBHIhnGMXMix0wTOb9evuz1uq/rXhVAxEyrumCWNNpoFGMYUore7P3t/f36hdymYbq+X1sr8zy20o+HD998892yLfPD4zefvmtWl8v5+v7+8dM3v/3px9oqAR6Ox9PpIYR8p1m11s7nc0o5RjHT/yoSh/tB/3K5NG05DG9v7yQyzVMrtbVuDlXXOYe6b0kCdXh7e2H0ul62dYlswojuvW4izMJuXtueh0yECArQ0SCl8TAfXVLTIoTgXlujGJ3QXD0yB2EK4zgDeNlube/ffPMNgf785QuRnOZZRPbe97Jp11abOUzHYxiSILCTd6dAIY0BRXVxU8RkzRA6jgOmDCzh955pJBEAdMDebS+rMU55rqXf6t7qulvtWN0xxwi0FDIDBGtt37ujMWnvpni3jBLS7f39d28/A+PD/ETGdwLVXtZdW/BWt8Wqo3vkIBKBOjESAzgwIZO7kzcBVAIMEilIjiFLQvW2b2AeEPhf/d/+BIHMwE1aXc+3L3u5llp6NcTOvOQgH46/+PbDn6yrf33/21t/681q9bWs67ZWrWrVoSDgPIwhC4CHEEOKIBgYY4AQNDEmyYiBgHvfHbv74Aa1+l6LAmzlupRLtdKhOSEgEJKZV6sGAI6M0dHW7bwt27puwJ0DGXirNTIKRVcH8BAzM19u59fttu8F3VBIHUzllE9jmomiqtay7OutbI1RDtPctN2ub6eHOQ8DUfzuF38IANu25ihvL2/X263s7ePHj137+/u7m6eQjqejdjDze8vx9fV139fHx4c73PN+iWTm//pxKMttE6YQZFlvKSZCXJarBHb1UrdeayAGNK271g7MWnvZbkSACNaVGO89AVe33gE6IQ95NHc1l5DjNBKCIwQeEFFNk4T7IqL3JhLieHj6xfdEvF2u3fo8zinl3uq2rRJDDJJZmE17B2t13yLh8TjP8zGGMUoackQmJ2II9zg0iGDIJBEYMCaI2ZAdPcYMzda+ddRlL7elLrW8X8+17xgJgZ3AxEBAW7fuRTc1rkW1KnTovZupNQGQana93S7XN+SeI63rtbayl91KIQJT751AIhJ1rd53Ao4uaKJGTdWVHLgrFCVAHoYxxSGIeK91rYDetfM/+tefANi0k3PVer6+b3VprVqnyGEapueHb56Pv/7m6e//wS//vsj4tz/+dlvVAdZlXday77X31axMw3Ec8jTNxEQEKc05MeAlZ8pCQpwju6tabWV1R4XQO3XV++xIode2GRiImJv1FmN0tGq3pqqV1F1ha7WW1bd2a31VYwO1jr00h2LWyZ2AMx8UwqVsbW9au5oNwzFBJgjT45BCWLZL2RdrLUpCDNo6hfDx44fj46dS2+HhUTj95of/crl86dUI0d2n+UhB3r6+CBIATIcjswDgPB/MrPcK4ETEcld5o1o3s3EcS9n2feu9A7ianh5O27btpYzThOiXyyXHqNb3dW1lzZmu54t1Y4HTMGzredsWZGbmcRz1vlNhUa1gigwIlPJMJLXVqi3FZN2b1pSzSFjWjQnRXVvXDta7apmGh8PhQ9W97C0Fmae5aFv3rSzrvq1p4CmPwFRa7a062OFwPDw+K4C25oQhT8hBzUkCs3gQdHVUFcHh4NPA4wgUQDhkMYd1Wb+eX7+u7+/rtfTeuxclJeu0mxijeSuAwB5KpX3feuuGUDvUUrV3RYwSSlk/v30uViTIUq4AOhAByzAdJWW1zd1due8GBlpqLe4au9LetKnvrTUl4cBR7q+4qoEzGDRQ/vWf0hBnBAAK7rzt+7betBuqhNC+/fiL7z7+n+fhOaUUZDgNv6jVrssbEYBL175uRc1OUwiDRJlyyuMYHaM2FypIK8uas4TgQi0EUqdW1cG6YTeqvZiBu7EUAPeGaswkKQiQAZkQuKkbF7feFR1rhW2p+1q8gyuo3lrfl/1LEJrjHIm9OxNAw/f3q6MSq1o9zo/CGWkcs3jX9bIRyLbut9t1GKd5OpHkz19+XrdFJLy+fH0/v03T4dM334P77Xo7HI/LvpStpBifHh/VVdXGcRrHqda99/7w8FBrbX0XCczs7veYZ+9tXdfW2t1/CIAhhF61lGpdwQ1MxyG03i5vb2OKh4fjy+uL7gu0LQm13hEDIA3DWGptVYdpkBgMTM2I4zjOIhERet17sxCk9c1dx/k05GlZlyHnGKetbkHU1tK7TqfTMB3NoZdVcjw8PkHX1tpWys8//bjt2zjN4zha17qXptXBHRqGKY0TRsHAEAK4IxGlZB3AgJFUO4cASL0XxCqIiBSJOtq5rud1PW/NfVBmEajNt34LmQNH8OBoiNDatm513dq2bdZabbU1MDNADEFK2XuvMTEH1K4dXCQe5gOg7evqquhe16ZudW+9Wzcs1Xv31sydooQQQwzeWqsKbKTNqjf+o3/eA2WmAZQRqZuu663W4mpqu7D/wTd/+uHhV7313rzVXrflWq6t92mcAXDdW4rjlA9EIi5THCRmQGJU03dOilTVFiYmZAohp5PwtJVWStduQNR7b22V5Mi9Vo0sUUaBZAACjaGIBBVxj73W3tyctnXXBr1Bb9tW3679vWPzXhPEo8wctNeyrXuxpmgpCgWOY/j0/Inq5N1BWSi+X25d23fffneYHxDw7f3ldz//5TQl4cGBHh8/DcNJ1d8vbzkNdd/2bXf0HEckWMrycDox853s0LuJBNWeUrrrM+4IrbvgFhFKKcuyxpjuEFxCcLd9v7l3tBruUo0ol8sNwQk69Gratm0nCikOQx616zCk3nrvjYRCSIQxxdx7uwewc8r7vjv0+TBve12W2zCOKaZaIaQA1pbLNQ9DJHt/+ULEpVe3VkohksM0xTQQSy/1tm/LtnpX79paRQIEE6Dp+SPFaNCJgCgAIQVxEiSu627rldpGrUAtDEruvTfsJgY55xAHMtkVlSKnEMYjYbrtK7MchiMhO3UG7Qqtw+XyUvYdNJIHrb2VbRzGwzxI9qWtrbbH41Mafq9VZqQgEJnXZa2tIIFqVG1Fb62rAbsGcGTBmHhK6X7tdnPr/Q7s4T/5l6EUiyFbd/BurqU2haIGWrnua87y3adfmxKAKbT315/flp9QLBARmrMSMuOYOU4BwMBAwBWhOHrvlVCRqyq5RUTI+WHKn0Smy+1ctLXeiToLtGqIXRBBmSU4GFp3q2hGQTqDoGIPptLNvRqqhWCC0Mw2uxERIVDrYmjurWpR3LR2aHGQnGnfvgaSx/kX1jQPGdEV2nEYEw5drbT1888/ENLj48fn50+IsK63rs3Nh0N2s23dhjGv6y4cQ5RhGmJMAPj49LAum0iotR6PhxBkWa4hRGautd6Rb6odAEppiDCOo7u7ubadUBl1vd28ahYJkZzg9ecvCVCx5mlyQ+EAgAQowq3veYj0e99JAEQEIAbz36sriNkMJMR5PrjD++vLhw/PhvR2/t3heBSZl+2Wj4fhcLhdv/Zal2XrpS/nt679eHjqTbf9dno8AfK6rWoNAYeUCIkpcYgiotq9dQQEYSAGjoDACKAFW7WygzdHIJl4PgiGttzW9epqCXjMydCrgfIAAXov6DDkwzRPgZMDA/XubV1tWwAxMrv1Ns6n54dPQ4oiHhNrbQjy8OH7ORwRhAF6W5mRUbZ9rbq5WbW9WUcSxkCGQBwCDOPIAYTykMcUHIHMXdH5H/+bb2t3BCInJkOCqsW0Oba97L15LQuJMvPb5a3Ubd0vS/spBmRHxc2hkAEhJ84i5IhNm3ZruqsVQgWoCEYcjvNTSiewMcfjfHhgHr++f9nLdchZkGtTAmVkYDdrwkGQiJ0luhMJCHVwri1t247uQ5DTGIaBhzHHELU7Wxhj3rez1lb7vumytaVaG4YcEUj7XmtIKecEoO5gpsv7pVx2ILrdLkzhD3716198/3e+fHn52x/+c+/bYT4+PpyI6Yff/PDdd99fb9eX97df/+Gv3ZEEm+rjw0PvWsqeU+bfu31Kt55TBMDee4zRzEorEkTVtbd79jVIKLdr2a4piYCcz6+380sv+8NxLtva9r3UbduXw3gaxhyitNZ774AmIikl4uh2RxX2+5KhaUHAex4bwGPMUaJ530uNUVB1ud0Ox8PT4enr+bK3/SGP1rT3nkMIQQx9r9vzw2Ngvl6vzBEIUQIhuXcHkDimEChGksDI5g4EiAxhcARk4pARmYTMtLcOABYycQbzy37d9r1rkUAxJnUpW9v6tZNO6TDwIU/HYTgEiW7afKGg7uCdYjx8fH7+xfe/mqfHECLHOA5DTulyeSvbNuYJzFrRXnTbrs1uLHttu+reejcHwoB+r8kbUouBOPGYD4MIsoE7Q5CU+J/+D98BcYpjDDOzhoRuVtoGSKBTLbqX/nb5adlvL+8/vLz+5aZfwHugFsSZUEKX0NHZkXdrxKitLst+P76BrWBmOiJE4eE4/eEQPxCZBBrkgwNt9WJK1hXIVF2C5wEoYpAh84CEmDgFEVQmagplt66NUQ4yTYI5hxCGIRxjmAljcAHv23Ir3XZfSm0sPATOAhLy1vrebnl67N0EhlKs7wVVS605D3/wB384jvMPv/mbv/mr/9/pMH3z8RNRCnH+L3/xn2IYH56e//Jv/urv/b0/QeCvL69DyoeH03yYf/rd747HE5PcSfGt7eYeQzBTdxjH0dxVe601xqC1uzsSDWGAbtu6btuWJRwO09vb15eff5gCzPOpmtfthr0ScoghjNGZDJyQAYA4IiIw3JNFrezMTsSgGoQcWq21V2XBlHLZTdVSFgdflm2YhsBYr4vt4ADaGiIic23NrO3bcpxP5rDf1hhSTgNxADBhCndgyjhCmpyIBAgQnFwiSjcHxwCSlCjkg0gCq32/kSSZj+r+fr2e1+vSq5IYBWttraVIPw6fnvK3jNN8nAK7ILe+o/BxGhjaGPP33//y9Pgw5jFPMUqOPKaIHNvl7efz5WsIXsve2q3r2ut+VzwAORMAmJupAyGaq4RK0lmYxJkpBUFEAEY1/mf/+o8SDZFhCDlyTBHvTxuMUhiwn+q+rWu9refb8mWr54575E4CzQpRmQaexznlANq3fSsdiAfv1HsD2l21OeV8BOO9bCzhOH9jls2s9x4k9MaX24UQBAI6SMA09nkcAFibAyd3YvEooXpTU+2m3QxkEhqhRGIJE2GMfHyYfxEpg1lpftsWA2CnIBhHEhFGZPbrctt1Oc4nbMMQSHtRbYcUjsfnvdXbZb+efx6Oh48fv/vpdz9spV0ut7Vcv/vu43Ldpjx9+vDNf/6Lvzg9zPN8Oj0+ff7pcwrhu2+/e3t7vyfkCPl6W+fDwTq01o+nQ9kqKBLBHZ8/xKHt+zwls91V9+s1IA45RJay3y7Xt0D4cDoZ9o48TKN5F2ACigKE97OOBg58b2QhAcI9jddbL6XmNLvjsp5DkKenZ+3aWnOAIQ+B8fb+RubjNKKAoqOE0ntOgxAul6XUvdUSxzQdDnZfJiCGHCWEQVg40DAgCpGokFojA5fsYbLegJ3jZAbYC2AlTwxgpRACQ9gNd8aX9fq+ree6fqlLB5x4jDGN80MMAwGxUBDpdTO75cnSMOfxFIaQh5xTZnGhDI7b/k5oLNBaKbV13S+3l1L37lbaSggOoOZqWNuuauauWBp1RwCsLBJjgN97nV3V+L/9l98/PXyXU1KrEpkZw8BBInQa8/BweHRtt21pzWtlQ2FAMyMGiTykGITMAQCjBKG4llq6InFX3fbSAMCMkXM61GJ72URClIdt1WZFu+5lVe+AHHk8jBOSjSkQIRNFieYEICwMQF2LQzcNa+l73QIxE1GkNJ6CnHI8jnkaUgoYaqtbWXv1KMLEMQcWUi8o2qGJ6pRPHGP3xkJBAqott01VDSoTjePxr/7mL17fXp6evs1xHHPSrrfr7fvvf/n5y5dhTPPhMEynUvd1Xf/JP/4ntfTL9XY8nlIKtVZTnca5tSYi43Tcty0EykO2rtZ6TNysIbqrai+kfVlfwNV7SYG9t95bSBJl4iDkEqMQ3OvgfifP1b2W2kIId9oKk7gjYUgplVaut/M8jyGEZVnQ8XQ6bevaWyt1jSG4w21dzREluAoRifC6bYiQUt62ZSvrbV176zEOrRXEPuRDHmZhdcrhcEBhJHJkdEZkYIDMIvleE0Oo3hT3zbbFTKG3fnkz7U06kRPm3Vop+Hq5IsGUx240juOUp2s9e3cWQvBtfTe8xgmBwNFSTIRRKxJi13WvL01LSmlMc21Q9lpbXdZraQUwAKhp36u2pq1U1YoEgOTdOlYKioTgbA26OqC0zvwP//vvnp+//+7T32Ecm14BIGAcc0qJU5pP0+NxOvZme2m9Y6mVICEIIuUhzVMSQVWo1RHZQUgA3XpTA29OTokRu3ZAYcl72dZ1Fw61luV2U3W3Rl5CYGLmSGngyEPOIswIEuJo0M2K9tbca1mYFaCVvSKgiKtpiqNQTjEHZuwWxNXrtlT1LkxBJESiyGYVEKbhgTo23VKaycfAgtZKLQRGALXvIcTPn39a1ssf/OpPvvn0i9vl68vXr8zh06dv1q0EkdPpuJV9PAza7O/88d8Bpx9/+p1I+PTp075vZV8R6XA4xZgAnFjADcCH4YBO2ru7phBa61HEVRH99fWn3sttvQzDEFIuvQGQGR7mIUjqvZhqCJGQmcM0zEyh1WZu94AdIpnZtq1Ny2Geay2t6eFwtG7n81sIHGNclgWJ1r0cTw8xpt6qtZKGbKbLtrZeLu/nWkoMSWvby62tW287MaSUnCzlkIYDxyRhhnE07bhXSBkPB/MOgJRnbH5fvxo5CiAjWF+Wde/lfb9ebq/b7QocYxqqw3LrxRViIiPmMAxj29Z934OICHXTps15c145KgD01oW1tovBBXkPkXIWd2UkR2Hmve57NeJQa2v3BmrXbd/dBCAQkAM5AiOjM/Pv/997VSDkf/5v//6cnp8fvxvTg4Pv+2K+Cevp9Bz5lOM0DhlwvK2L4bVDI4eUJAQhInQHYJHEnrypO3dsRCKMHIwkRg8oU++4t6Xaba/7dVlu29l8K+2ybmfTxurMEVkUNAlJYAN2S109SCIMbkZs1rt7BTQUUsPeEcCwt9pbCAkBhsTuvZat176um0LNOaU8kIhECSGZA8cZvKr1OKaY5oGnMSdE2ta6rmcE/Pr1xbT+4hd/+OnjL76+/vib3/7l6fHw8PhhWZfe/HA4AuAwHvI0PB6OxPTl5evlcv7lL39Zyr4sy5AGIpCQRAQAt30nhBjDneQE6K1UYc4puakQ7vt+u1xykt46Y5hPx23rSDEE79qmaSaEUndwiOH3shY3jDkiQu2tqxEiMyF566XsdZ5O7qjdUhqEw+VyuSNvm5qEhMA5JjMz7QomgWttrZQgsi7L7XoLMZt1LesdRt261roS0eHwHIYRRJCR1L1Xgo5Z4PGjtXAHN5kZRSR3VO8kkmYM46a+up7Xfeu0IijHW63v26pOQgLObpCyNK1b2bU7C8hATSt4Z7Km4K051G6r2d61AzozOythRfYYxiC5m7k7gDtQ0Q1V0bxV3HfrZiQIhEwI3khEQmaIfa+q3RT5f/x3fzLnj+hExL336/7F4KrexzgN44TKKU4hjute1vaGoaMZmcYg6NIL9YagHFFcBQ061Y4QKTIRAQkiGFsPvffWb6XXve9bv5a296Zlv2nfkQVAmCRxIAOUEVwAyaB222MM5g2AhymmMDKmJFkwLGXftQFJa4tZVbNmXrWo9a64lc1aHcdDSqecMhEadJOGQUWCG3dHThIo9NL6vgH01vv57S1y/Oab72IYz+f3l5cfP3z8dp4e397Pl9v1OD+cjk8GPabpcHj67W//Wl1fX78Ow3A6Hd7f30/HB0DqratXBnKwbV0ZKYXYeydxb92tpyCmpdWt7HuK8e3r15zoYX7Y1xYjg4GrT2NsTcHZUYXZze/CbQeVFFgYhM3d7iJR7yKUU+rN3UFE9n3btj3nIaWhNYsxWO/buiIAknQFRdlrV/PT8WBVa6kxZiTc2y4xCYuZmyET5DgmyQIUUobH5x6jgyqzoFgzm0+cJ/SOAdrbhdqGblqb3a61lPT0YTycRkUzK5F35ZROzsPaLk13gmjKtS9OhYOVsnYHI1C91fZmoNrYK/Ve3FvvaiaAsavV1gBsHFKMYwiht/sIuCEC3V3Oe4VqURk7ABAKEYJJkQghYIgRzFqtXdt1u/F/8y8Px/nZVYLkrZ1f3n8A2IccDXoek+mIkEkcHLsWAGASVwMjBHJHcyKIosKQgwQIqlZRzQGBoys4ILowSUzJAYgIzM3AuiJUBhVmBwA0CQFMWlcAdVMzV+tESqwGLhiCDCnmlNKYhiABgUBVva7boobd09771lZz32sx6k8fPohEcMsRiLz7RtxjDAh+u65gkHnAJpnjvm/b2jLzmAcwLLXvpUzjLHG4Xc/LuhLSN998L0HWbR2GfD6/E+Pp8eHl5eXh4bQst2+++SaG/PXrSwyB5K64bgg+5XFIedmvDAZmoIbk2lurO5o9TMe3t6+1Ls9Pj6W2urdhnACs9eruDr7v650AVcoWUyZiCoJMKSVCBkRwW9frul2HYch5KqXe1w4Avm07AEiUFOPxeGitvb6+Xm/XPORtXeu6aO+APh0PXXXblpiiEJdamQIRAWMMIhJiDMwQY7Zh4ocHYvFqDTsFoS6/t3UCooG9vXnrHEGw7ev2/vK57WfQ5uSNmHFea40SxyHc1tfee7Nlh7cGi8ECvCpsDq21um+tN7cK662sa6lFe6XWqfVmZq2b0JDCyDIigHkh0tqLWQNDBkdA7GpamHDIiQIobRRsGFKOLIiotK71fF325vztP5Dj/MgcWq/d19fzZ8BLiqDutbFwvp4vtRWiFrgKUBAMLGZdrahWRJ/inChHBE6hWDPviK1qJRQoYdM9pzHnKcdjknGIM/W70J0JiEECMkA1qkjd2N2w1lb21nTvtXmnMR845N4bEYkEQYoMMcTEQQK4S+APKT2Yc9dQum5tVwALUVLMeWR3ogbUzDa3zcndQatqU6aUOYHWUppAADN3kBgdcZwO0zBdrq+Xy1cR+uNf/0mM6Xc/fc7jaRhHM/vjX//6/HZhljwM83z4+Pzh5y+fhSUPg7buvW/LMqQ0jxOg3g+s+7oQuhC69bsOSHttWpil9q613sF3IswxCBOzANjtuozD6GpddZ4Prtp6124pJQIA8CHHVmutNcXxHlMjohhTjKHU7Xo9l1K76jRN7v7+9lL3JQki2bYu19tSyjaOiQCW6xUMcs4IIFGIHTFyEkcDAw5IHAnI1xtrhXsHEioqdmY9LxwDJQR3K404h8dHInp7e3lfrte+N+ghHXTXaleVfbm+gC81vBhfEFvRc+lvAJu1tZTetlBvvK5627BsUDaoBddbqXu/3WqS4zg8WwdCQ1Lmilxbr6VuagCEYiCIAOrYIXQIFaQLGVOLRK6+1X7b1rXYPHzPv/qH2W2PnJf1fanvTfdWX4VTTuOyrdt63bey1ws6uLF7D2EgJiC/b30BMQUZAgPLrovZZqoOJAhaSzV3yDmNYx5D4BRzwJyHgcAQegByt04dmMjM1IkQWLR7a7psl77vgUOQMORhiHkrqtgpKQUwVQREiMfD8yl/CwbdXD0Urc1WkVygI7RBEiiYmXprvqurW1VoyLHZXrUEOblKZMGq4H46PUVJKad8SOtyfv38ZRiffvmLPzgen19e31Hk46dPaUp5mLdlqbUfT08p5I8fPvzudz+q6dPz0205awNh1t6GManWXss4HZftFpgIupZdiIi8tWVdLmYlxel2XtwrgWrbAT3FcDw8tdZDYHQEw3GMe9l/327p3bQb3BcOtq0lp0lirqW5G7GN09i7AWDOsbV2PZ9vt4WIjsfTOEzLuoDjOMxEpHW32pfzdbku67Yq9Noa3AscQCych4OEPMQU8kGiOAJ37V35eDQWW6/etxCOOAxwvXEzEPYhGmeSKR7mIaf3y+uyb7XV635tDEi92OvW3jd9W/yl+KJta+Vca9cOvWBbWyvtei2v7/u6t7JrV+3VrJo5l821Y4qJKQAW85tDq2Up/QJAhKwtkylZh8pAySOgdOgFDa1Lb1y7VrOOHOXh+0//gP/eP3u2vpPUYm2vizASBwWTmJj9dlmX29p0V23oPaYpygCMTuTIHEBSFBkEySV2atve72tjdOulmpJCTDHMQ84hMkYiijikMNa+VVtAqmkMcZAwmLlaQ+jAaF5bJYacZDSlO5UJWLTvrV+VOiMIEIVT5mN3dLhvKBeFKwVF1kgVqQGjw77Xt+q1mjqZ0252A9mL73UvI41RMphHl9M8T6cnRGKi3vjLzz9KkO++/T7E4XK5NW3PH5/GaQIIAFa2NY7TfHwY5/z25Yt1/e4X3++Xa9nqaZ5f317n+ZBTeD+/IPE8H1W3yIjm1/ev6C2GQZj2968GcJwftuWmvZBQszpNQytWyz5N47osDw8P3fpt3XLKhE5AXbuqau+MnFOuta63JUq82+TMVEjyMPTet213N0DU3rZtJYJhzDlkdHxbLm2vcxpqq4AeQ9jLzsTu2mtliQElpcxEIeec5xwDjTOKuDDyiDSRsEMkpq4dchDtsCyASIFZxExpLzKOx8OpNV2aOfcXfd/bbd9fipaX7fPtcvZWtlLeL8u2mLUDaNbuvWvr/bzv+2rgdMfoI0VmCpFNu7uhooMirF1r7Zvb1YmrUwKraqVJ54AhNrKlGvRWCUwGg+DKASamUXgcwyP/o3/2S1BF0jyF1joAjPnR3XZbQ3zozdfl0rqhEzGFcGQ+9g4kSATGRBLHNImFZiWkrJj2ukEAdDTFrtEpklCWiADE0QyYAvhoTqW/qoLIgBBTeEhpROQGbq6MrooOnOOYRKKm1rx39gbey152N4gclAJYZxJERsSOV6ebmgLuFB3dyOJSz7f91a113dR7CAy0OhTgTpa4jwIYIRzG45w/dUO32ns7XxYWzynelr13jTE8Pj4lmW636zxFwdBaH3JOY9jXFR0eP33Q1i/v7w/zuNd1XW5Pzw9IsO/b08PjNB+W9V1bBevL5SzsKWU0ur7/gJHTkLfblVRDGs287DtGKWUNgZmp1sLErVdwDxLNDIlSSuiwrkspZZrGYRy6OjOKBHBat33br8OQGMnUsiQm9N72/ebWp8MMTExyWdaltUjUe/PAmaMbdmvunmPOQ+Ygd4JLSiOHiDkjshlRDk6dJKII5InC5O83Lxe/o+LKBl2hN3Iry9LAG0BptbEv29vn9x8dKnNsFdet3q79cm2vr+deOcZHkeSq6KhQb/XCKAIsgvdpz12pACDm5ozdSrd9rbdWqzkWBzMD1w66gzdGpBagB+E4DnNOmVJiEUlugTEMkZmd/7t/9QcpsHAOSSTGIGMMLAHNmqsC+V57bUg0CiUEiVFEotVG1CUEcxBEQjSlbp5zcrR1PROouzok5AAu2q3U8506D+4GYLD2rog0jiPjg3ZiCkRJeGAiVe/WSn8H0ofpFGBC462Vva5dda+91R3Q3IEZXMNWXtby1g0Ox+nx8SQS9l7ExQ1L8+aO1AC7mhk4UycyIBBO3obMHx6nD1OetPfz+afr8rovN7cK0LdtT3l+OD0dT4co+Xo5jzk+Hj/u21tvdZxnA87MIlK1bpcruqaceitBJMbYe0FvHz98WJfry5efj4exlW1d3g6HKJK3tWzrl9u+Mcm2LndVLiC9n18P0+xutZRpmlSViYc8xJTMlJiPx4Pq3WYXr9fr9XqNMcYofv8FAIiIKexlzylLDHdJ831wva+7de1FyeHx4WBt124I4Xa+dW+UWDrGnB2USUKMd8FrGDLliYXNzFEgToys1EwYgTAFar3XG0RgCQaAyCjsqtf19ecvvzuv7xi4mP98ffv59W8YcEgH11g2//zj8vq2dQ05PQ/8kGIQwsip9bJuZ1UNAZGIBWJmEWGkwBBIAqNBbeWirZhj62KIjlBKJUTANoXwcc7fPz1+/3T67mF+nE5TjoKMxGrc0RS2Us78p//TL4fhUSg4IIkhakwSQyDvpd1UoTZuWoko8EAwppiQqPfVbRdhhlRb67gjRnQ2pZRGQa+lGGg1IwfhwVyX7e22LSwBlEq5qu4ORkTT8HgYPuidt23CADmNiGOtvteLt2bmDkyARFjB1ma91e5r1RVaQ4zbuuzbAgRjPk3D06en7x5PnzKe3LC2JUgmj9frrbYeOLh3AiAMgCZBMx8Tfcox1tLO71/3/cwcCLC2DZw+ffr+8fmDGrr55XI+noaPHz9+/fn98+ff5GGI82HMg+3b2/ur9pZDiJEAgADd++12US3H+XC5vC/LNYV4PKTPP/8EVgA8pbnUzawTMBqZK5CLCCIMQxJARFI1+L28kkiYmCTGfV1rrSGEbd8khOPpVGp9+fp1bYUBg2CrxRyGYRSJiCQcQhREUHdTHMa57j2FgL1vt/fjYYrj3BW96bZdt+Ua3BGN3Ls2U43pXrKPPE4s7BAo3gGdAFPi8ZNxaOsbIYiM6IDCLpl5RNaqbbutL5e3L7evm5bOoTuetxu6xBAIad3aT68v13IpBofpwykdIzIJITE4tFqaqUsgTshCjEw4hDykaYiHO6SXQFvve28da3dV7dHTINMvnk//6Ntf/oOP3/zhN0/ff5yP0yASgFid99bX0q7r7XJ525ad/9m/+nUOcRgZCNUdMTBDiIzs+9721bQFV+u9pRyGdDQdCQjctSsRO4ZSaq2ViGMY3RiMcjowTbVp8xokOZM5tNrWpSEAUyi11taIPVASpjGNkU97W4yaQDYLzIfj/CFL7rXXfqXYHaF521rprboV7V2VGFLZilltzdxknh+n9Aw6aOspTqfphEraNvC2lq3WIk5qzhQYg/kegqV4UEUGrL20CsfjKcRxW/fA6cPHb2IaX1/fe28pj6fj8cPHD+/vL3/11/9Je/j2V3+c53x9/XJ5/ZqHeJynGNhUJVIQup7PrdXDfAgs18s1Rnl6fti329evX4JEV09xaLUGRlNjCSFKYCbB6/X8eHogROIghMys2vdtM7daNwBjhNvtaqaBAyDGPB5PD0B8ud5u13eEfphPUdK27+7uiExs6mYQ01BaX7bt+ePTMEYl37dy2RcCR1cOGJkRqLWtls21IiGjhJxTTAOlOAZDonjQmEDcGIwD0YgYuFZkUgciNEQKSdEMMUqMks+3y4+XL1fXDoKQx/TL4/AQjFCDg3x5e72Va1VHpG9PzwjQrKurUem9IqbIc+SQIkX2QCHRmDAPlEgoyRzDwQDW/ebehHn09BAffjE+/tHp+MvHh4eHw3QISdy7X+u2btuylrfl9na77Ldr39bSnP/7/+E7JiBKDaw7pxSQgFkcuDUvtZs5ALoCow05BQpjyJFj2ataATNyol57vUaRlAbiCKgxpphGVKxWJAAAaTdTQpd5PCny5fZlb7dpOsYQGTnKw9rflvqaQ4hyDJKYaBpP8/xk3BVa861502bbWm7rufWdMTpBK7s238t1vW1g6TA9gQsDpzCcxoenw1MMtK5ft/1MAOjQq+yLIpaUmGmIYQAiayh8mPMhYOjdjvP04fFD6/b15XOK8fH0+PD4cDg+qPoPP/zlXtY/+ZN/FObxtz/+ELQfxowBh5x7rdYqEZa9rNtyt2Yw8Xa9TkOYhvibv/7L7bY+PnwwMwcve2l9WcoSQ5imad82Znp7f+vWRcK2rUG4tcKEiLCvV0bw1nIIzMHccx4ohLVVdZumaRzmVvfr9eKAKecUUtd+3ZbuykympmrDMH79+vV2vRALx5jG47bst5cvpVzdIQ/DMI4h5ZCjs1CMUx6DRAnCQjGzoSF0SCPPHyHPXr2vV/IGIk4s7N6679X3K0fEOJt2IXo+HNMwdyA3vu1UK378MM95aJsW7V/f367bTpK0tUABQZdyM7BitWgVDkNAIQtSY9RAjK7ujZljHEKYI08xBWGWhgeWE8ePFB9zGLMQE1O3fb+8rS+X6+u1/PT6+npdbktZ161vJSLMY+I//TefiCOAGmJt4NCFA6ALC7mXUtZ9CeIpZXDLIYQYokmIVvray4bYEYl1CdS6vXMKIkMKkRNNeR5Ydl3VvWt3MMZEPqR0EMnrfn6//MQSDuM3BCFGZ8FSbm59yg9RsjsBcBrmcRrVWutn1JaBvcCyXl19ig8SUozkTqq91r6sm5kJp3mcT8NzrRvQFgR7uem27qtfa0ULrbnEPg3DEOc05CAHwhT9lOh++ovotrzfLpfbw8PDhw8fmIQZUh7/5m/+i1n7g1/9XWH+6etPD6eHbz588+Xrz4BAjgRQtW3bdjufp2F0MHez3pbL++PDVMt2fj0z8zDk3krMUnv78vKzg8/j3A3cvZey7ZshWq3q1sp9Xa/DOA7DqGoI1NViTikOyBxidKJa9/V2S4KfPnzK41S1uXmOGQkVrPd6T/C32pblNk9TL21d1nZbiCCgr/vSzUHd3ADVMAzDOD88xukgMQ05pyGnw0TxxJysLiSDD0dglCAAytet9xWtowTPAxGAVlgMvZnV25evDeDDd7+Sji/vZ+BxrxvYLUkA9b3vr+eX99uCxO593zezvuzvVVvrXlthVpZdqRmQIxtvHWtzNuJ5OpEkr0joYxoPIU9QHxJGh+7QGmz7dlvO19v6u9fyV1/Ov/n68rvl/bwt+7qw+8MQn2Z6PgT+J//jJwdClK6goNbdYZUooJWMBSeySMDCwcmISThrgaVeFd16V9uMLAgFEqXSnSXkEJhjIBw/HB6m0+m22m29qnXhw5QPMQ5I2Frdii3rMk+TQGLyIBQT1r7XfjO3yA8s3H0PRGOeUFO/XEO9Jtgj0hieh/RxGKaUc+u91BUAW8HLednrxbQAgekSya2X8/nrttu2Y9nNPQCASApBUsZxPHA8MQZUhh60921f12Uh89PTcRgm7a6qh8N8vV7B++Fw3Ld+uVxyzH/yd//4z//yPyHR88NT2/fb5brUcr68JAnTmK7n92FI1/N1nkYRvG2rdX94eFRYupaUhpTp/L4ETixB1efDQa3X1pLEthdmRsQQooRoiCEOKY85DxBE3U3VEQGAmbRVUrte37Z9m44P43QspfRazL1U9WL7urVWgQCJVDUwai+9ln29FC9I0vcKZCEKUuCcBZGR5vlRjqccx2E8pvmIRGbG89Hi7CmZdacsMrgMTgZ1pbpRmPvxieYjNW9vXy/vb+fLzz+cX3+6XN5vb6+X7X3Hbd1TDDlMt9v72tdLebuuF0MFMMLe2n7bt3W9oGnX5l6dSu/dzen3mR5256baYZ+HmTg4igCK7UwlEwLO696v1/Xrevntefndef3hdX9Z9lttG7SOHbwPSY6Z52TTgfmf/MtPtdXmoL17Y6POdBGyphEZj+lw4Ec23Pve4WJQGFIDqR1dVSL3uAO2CQnRAb17RVJwDzCN6XmYnj89fBckfz1/WcuWcBYaOVII0ZRrvS37167hOE91WxiRCQDOdX8rZdEOxE6o2inw/Dh9nyLfzj+XyxJgmA5P4/x8mB7TkJf+ftk3LZ2qtKplf7vcLqMEDqnp2nQv/bq1i7aoxVx3EgpIg5ziOKC0QCzUte1dQ9MGrhmG4+mIUfatuLcPH79Bx7fXV1N4ef/aepnG09/9+/+n3/zmN9u2/vrXf+TqX19+eru8EMjjw/M4ZLUaorhbDPzwcKx76a3GmHJO21YIhBDNTIKcTh+3bTEvzCBpWPcGyNNhIufulqaBhFNOpVZiliBxHIlnBUhZtGsvpd6rwyED07rfwH0axt765fze1jOpMiAilb0exymm2LyrOQfspr1pihGZmwLFFIIMEkOKTVutS2bO8zGMQ8wTMNZ9VxZ5+pYxkFfsHWjUFCUJhUmJ8PaOAJ4e4ZCY8/L1d//vz3/ZHP7ib/7qz3/716/L21/9+Nd/+/oXKBpBz7ev13q7XM7n7SsFZzG30ro5MBMBsbm0rm7sqODGEAyidhT2kPauG6cgMZSylnYTKOKbk+3At2LvdXtZy9vuG7KRABBFHwKTdnAg53nCeYQhMv83//xbU8VO4qh+T5VVC5VRxzQO6TjmmMf5srxv9Yyubd96NQPaew2RUuhgHchqp2JguBlQs1ChPZ2eRzliG0Kc1/6+rD+jUsBBREKUIAOAubVtvRCLQyzt5rSXfgYCbf769kNv13F6DDy5o3h4HJ9zPl5v+9uXzxLz/OHT4XAMKRbdlvXSqt4uTZSQgyORYZPrz9e/ei9/C7wBiZsBAAUA3gNhTk8cCHExqG5dMWN3dkuSx/xgqutyCxIAadn31/P7vm2tlzxO4zR9/PBcavvy5cuvfvWH6Nha+fL5s5l9+PBpmkcOzATuEEI4no5RQqu1lDqO47YviO6gx9OhVAtxSjnt+x5DMndDckcJnEJUI4qxW2fh1nt3W5faDaBDtyaB0KE37aW0XhARYzpMxxwHNwXiFAe7m+7au2uFXgnU3CXGGAckZqAYsoQQQkg5xTh00xAI0FPOOQ+glVynYYhpRLJumKZjVyBkGh68me8XFAIJpAqBKH3bsMnrjwRNc+DhMH76fnl9+e3rb0Mefvrp5998+e1r/W3d32p7b/t2Wc5rub29f73c3kGcTFk6kg1hGNLASOQsgRMnwCgysiRVtI4SO+Y1hB2gmLdl20urGN3YKvFt86XjTfuuCiQxhZCkgxo0EXcopntKMk7iYh4D/+N/8YduLapnlpBBWTfdPTaC18D5MHxzPDyNw4OBXK8vte297rWqORoZoBLsUdyid2/doHl1ogZY2sLuh/y8lV513cq72i1KJ0IHtk7mIIRCZrCu+6KGe7uZNrW27XvrtdVyuX4ptqfxFPmBANFTnj/k6fl6296W6/j0MByGIGHZlsv5a9l9X0y7pjTEey9fnERab7V0EmExSZApm1fmFIeHICMjGtduzU3ERChFnrXq7fI+5gHA13VrvYWYkOneN398fF62xQxExMzmabhe3n766beHw+njxw8AkFLc9v308OiAEiIgvLy9DimYOQLV1pB8mo+qqN3NKhEJyzgdendViyG0XiUPMWWkYAbmmOKQ8mBG75dbt45ErcG2NjBnohDEQ9rXmiTkgbvVUpsgxxjM97JuwjyOsyMUbcgMDqrIRHdLOXOkwIjgShQCUhjzlPMhpIkopRCpd2tKw4mPs3mDOFJ6Aqb99iUQY5x63x1ziJ/awLSf+esZasEkz9NweV0/v5x/u/z44+vfWq/TIIrtvN7Ot+22l+uyNzAkNPQ4DnkcU47ESCDCFFIQdCYchjFLtAZW3LlBXKkbgbnXrqKGCtUZO4S941ZrV+u9a1NzMMDWratJQCYlghAFAyhhA+R/9Wd/ShAzg9sC4pJduRgoWjez58dvDsM3gfOQvlnWy/n8BTEChiBjYEEw1cVRMTCG2m1X6BZAQoCqddubwqVd9/altjfvhg7V1ByhgzsgV6DCqFX3ZSutNu3mTtqg9WaOzeH1+vNW9uPx05BH5KE3VROZnov15ls+TUnCvtzOb1+u5259aBWixxyHGMZEhzEdcpQGa/MVEDmwhd7J8vwhpw+P8u0kc6VVcQdXLwI4gXJdyjQMwrSui1l/eHqUkNyRkB8eTj/++OMvf/HrZb28vL18++03puU//+f/lzB/8+n7cZ5TSrdlzcOU0nC53ESkq7pDENr2nUVUjTmqqgjv+8KBwDHEGGMsZY8x1FIcIMfMHIRjkJTSIHlAiSjRTEhoXfcYBgNs6o6kbs7NzPZtN1dC1tprK+otxjANxxiGNJ5kGIjE1AJ609JbBfDr9WZdY0zo7AoKFoUIgTiGPKaUgohpR2SQSMQcR8uT0cwyRrF++9kghvTB/IpmXSaQYD9/1vef/fZWCRzSbVk/v/30+v7iyCLoDNV8r9CaEM/Hw8d5+HCcnw/D83F+fjh+OA0fxvBhGr57PH47TkdBygCBwLu1rntrtbv15tar7yA4pDGERDyaQ6t1WW51a702VayGACgcRJjIJXJKEdEBPOUA7vxn/+u/BpcxZiQ1aiiMiATaHQDKmI7Pxz9AEubBoJxvn1VVJJFHMC9l630lQgQ0JCBrvjJjDpM32kp9W5Z1vSy3rwhNOPXGW3GSNA4TsQPt3crp9OHh+KFs2qqiG7MQRG1UezcXM9n2t729UeQUDqyx7Nvelig9Mk2nRw6xb+16ud7ed2tkzcraneV0OOQwubUOm4bWSQEcoVa7AfJ4+MUUjyOJxNCtVH9nbGyzawyOSRJz2tfltlzm+SAxb9seghzm429++NvH54fT8eE//H/+H3/06z9097/567/odfvu2+8fTk9BpPW2rus4jr1rCOHe0wgxqakBkAiQxJi1N/Bquud0J/R3IEdwInZ3lsQckPnuj+AYQhqaQjdCkWmY91vpvc+ncdlWjpGIXZvW5mbMpK25KREoWuSU8swxdwdidgdC6GZm3u8fx17W5X25vqEX9B1doWtgAm9gNRCxREoZ5lmGERF7iMIjEwMycjbX/fYlRCbeHQsbUQgwpde3L9fX143hdbue9+u6rUutnAJjJgzIMcpxiM/T9GHKz6f0YYyn0/R0mk5zfs78IYXnYXg+jN8c5uPTeJiD9FpbK61ZbdaaBXK1FehGHHIYxziJZyt+u71ty3Zb6tasEdbeA8OYJWbi6DFyIARTJhBG08b/7n/7X2qtpTQnvHfHHIDdOnaEbs2n+SHG3PqquC7lrfRLDAEsOsSm1bomyQDRkAGwt5o4z/kJOXbH3g1rDSIx5t4bIBgwIE3jYFa6bQ8Pj9N4nIf5+fh8u12ulxUhCAZG3mtpamM+Bkmt19t20d9vZxjqDfp+fHj+7le/TjTtCp9//nJ5vUYUM1vavu015XCYM7F2K7VqL0qoDfba1LErxtPwgQmr72jv0G4iMdIj+uCmAw+t9rusSUI6n7cYJUZ6+fruSH/3T/74P/wf/89ffvcNOPzmb/82CI3DeBgPLBERz5fznYw7zwciRMR+J8BRRMJhnNxs2/dxHAE7mOZ46F1bb2YuEoSjcEQiY1SkPM4KwJSEY+/uZsM4aOu9tt/+8Jv5OAKTto7ovVHf9hgIAUWktaKq83RC5NJ7SMmg7/uaYxAJtfW611Jul8trWXZTc2/7dt3XHXtHU2u1tgaAzOTmLBLnkzNhGPowIrGX1fsNDTHPkgSXi2OAEBgNsIE8yTB8ffmq3JYK/+Ev/vw//vY/llKZghEG5ma9NxAaY44MoNpJGCgys6nV7iGmkGitq+01YbCua7nWWra9llaQOwfmcIVUKYxEo3qr1cruy/V827frdueSODowQxwoJk8DBSEhZAADM7faKv/P/+7fgNvb7XzdLmAOrECLNiPMCK6wAGgacu+11OW6vtS+BmSwXCu6iTVUo8Cju7eKASN7FpqIU2BmIFITlphj78W9IgMFT7kiNbV6Ok3CAR2mMTmW1hS8l9IMST1oxzyknKcpJfR+u7wsdXFA6rvr/ukXv/746VeoVNz+/G///Pz2HjmpWzcEAO1VuAEUkW7atmVXr+aB4JDioLoS7sBYvZZ+q17Qx3E4ARAiCzF06K0Ipe6OBimIaVvW7dtvv7+cz2o1x/DbH368S2uYxIEQgUVaa/X/z9N/NcmyLfmdmKslQmRmib2PuqovWgEgAcOMkWM0Gh/4dfkZSL6QD4MZ4wxtBmADaHSjrzxnq9pVKUIs4e58yIPJx6gKs6rIFS7//vNah2EMISBiirGrhhBVLeXBzZZ1a11Pjw/uRkwSsgPc1/HGmENMZmCuABjzEOLgjkCCLObOQqWU3vuyrR8+fko5PR6Prvb2dkaiGNjV8jCXbvM4mjkzA6AjgrsQufVSas4DBX55+Xo9nwmBELZ1DyGO42hu3hW9mSuQEIeUUkoBEAhA2E1FwhPEaH2x6wu0jZOhBEzB3cwadqXe0Ip0Lm3788ePX67Ll+XL6/VLX9tNb0CAoKWv2hEM1apa7163ui9LtW5mdds3U5O7qTZLgPt+vW0vZjs4mKvRHqPGoMzGgsRoFVrFrdRay76VVorV7l0JAZgo0pCRBIksEPdWt720Dq2p7O0LYGm2XJZXS8xONIi3CsUoBJ76ZTvT199lft62q0OlAAi76+ZgqMlNWte9VisF4K6U8ltfxvzo7gxgWRCRwMGwNW28hAgGs5MiwuX653fPf82aartOE/72L78D4x9//PK2XA22XWtq+TilyBxlvur5x0//+Bq/vE+nSDHHzE6IzN0UFnn01gqhj4oYOae6rF9ZjhakmTl5b5BocnLmYT7E3m9f93+sq0Pj4TAxx618CThyeOjaGFHB3WoHIceyeu/Gwuvttm3bcUqfP30NKYlQ0/10+tbMzH3ZVgR8fHza993df/3LX+37zsjjOJWyM1Hv7kAxDU27AhJHJ08pEGJXpxj3vRqoCCNEkaRmIQQiaVXByd161XXf19o8xG3Xst/nhFKxPk9TXZcOziLb3oc8GDiaEYAw916RRUv5859+nB+Oh8Ph+vZxuV6fju+/eX+63M4O9Pj0jhyYmZmRhBEFwBGW1ritwWYI1L98xIdJjrG30Za3tpZw+MZowLBT2dUWMKBL+fTx4w31dPiLP37+d2+3Px/HyCbrbVtvW4Wi2NR6t50jIcS917013ywnyCNbI0DJOU1xysJNovaVfLvPD8bIZqi9m0bs0G0zfQueTMHQETlwJAdXN3YAB9VtW7aYlSQFdC291ta0NiUC/m/+L39V+/p6+XS5fnHdGR3doPVuCqT3CoF7b1ZKW1vba9vQQRC1YzclZiQwNgjqrZddA2ZzUlwJSc3ZmSw5cScofV/K6628OanCXsvtdquqalrcmgjMYX5/+GGYD01bM20G27aehnEaDpE5Dw/tVi4vH8DteHo/P30T0qjd/vz573/8+PdDxCEGGTAdKYgngOnwNBzfCUTt2lvH4o5J8sA0Hg6nnFLZLut2a020JUJVKgoVnVkzKEUkU9a+m6mpbfs6jad12cYxrdsFUY7HAxGDQ8qBKI7joezrOI6t6fV6eXx8PJyeXq8XIwsxGUBzdUJ1U7UUM7ijuzA1tdo1jSNz6L2auyMDCoXAIiEEpFCbOqCZXW9bLdvl7ZXcD/Pw9fUrkyADuqtpyEkBBbFrA8IUo5qBA9jdOylLePny8qff/ZfjFN+/+3UpVfV2mA+Pp8dpGFhSiMM4nPI4gTUh55SJAqeU0yNzUGiotb58Vtfw+GyOdtkwDCxHcCB28M7Ebdk+fvrxzy/nOurab3/48R+Wdr71Zd1u2q97QzPRWrsjS7JOWnfQprq1vtW6KxTHfdvflu0z1cVq6bCikDkYFBftgL3uqj1IBuhWjeHAksGFi3TtpRYGYxIjDMFJCFzvTIF9r2bcetm2XTXz//H/+ou9rKXrsq3sVRgQUDtW64DoAFGYkEyRmR0VTK11MGIeqkEIgwO7K7MxEyKYmbk6NAQgk2aKHoCgQUOmWvdedVu6VQ3h+brVHz/8eD2fmW2ehtPxcZqeTo/TmMfbckHiHCK6TvMh8kQojn1dLoCcp1NOxzSkrd/+9NN/vFz+OAwShhAnnw8QogcKD88/pHACQ0Gvt80N1XCeH5McUVPinMJslrZVS6kGzVAUHQXJA3vMFLG7Qnf3ZV2tQx6HYUyt7722aRpSTL21eTog8sPDw7IuxIwGl+uVWb797nsD3vYqzMzSmiESAJha03Zv9MY4kHBtxiHGNJoTsRAwOGMIIUTiwBxr0VZbCFG73a631tbr9XycZxb+/OWTSBynQy3bvt1ECBF7qUzuYEwUmAF823eRkFJ091bbx59+v12vT0+H+XTM+aSA97GE6XjENFIYKETOWeaZh0MaT+N0DPkIFGur3ZTBcN+xUxgnHtgZKEyAJ0PWbrDeJGZS/8cPf/inD3+3rl8NfIe+7C/eb0X3ujN6JokxzMLxPvXvQA7gPbROhJFIhDkPs2BGRIrshE27gzmxmZS6tlaYo1AinEKYiHMrWPfS2n5f6A3kOVNOMiQMjEMSVevN3F04lE1bBf5X/9370pZlWwAMwcHJiN0JkAFiVzfXnLJIdCNwHBLnFHuvwpJxQCVArFhBlYMIMwV/eDiMOfTWrO+lminFHJjBTQXhGJ+TPwT4PsdvUzgu1/3l7atjiSEdpl88vnuach7j2LXe9mXKp3kYc44hDtq99ELEHFByOh0em21b7XtttVTzPQ0cggvaMMbp+Hw6vs8hiYSmutUlxsgQmkKgjC2SC2pCe6zN962V7oDMbMEC9SRGbGLKW7nVWgBgng7jNCzrpfeWYh6GEZEBsLV+mI85xU+fP6gquIeQpnmWPIHwsm2EzBR6U1NwhxgjuKcYGDmE6MAikUgAkFh6N0cMIZKMBsySzLGrxyBEdFuuvev58uZuMabL7bys23x4lJCul+u2rYd5imHc1+KgZoYokXiYkqnXWg38elmYaBpD36uDUghxOEqMgO7khIRxnKaJI8eQTg/v59NTylOe5lZ7J045CxO7Bqu9XVyET+86m99eSZRoQs7UFlwaY/yyv/z4+ufb7RKFSVOUFOiqDgjZ3SVADDmEgICEbIa9ce9qXogIAUk8yjjw1LopUSXroErQ3VtVbebN2Ikgs0xOESC2pn07m7o1tQ4ing4+3PWDkQABOZJk5sSUiARQ+Zd/c/zZ77gxsyHeZ70QIkZsjtdtK/2G96VraNBbzBwimyLDEPiUxnEaI3FXWw9Ten5+nqdTJtTeStnRopOwUBKJkVMIQdNJ3j/mH6yFIRwzTdvyirAfxveZv314OE1DRHI1XMvabT/MxyGOLNys17obdUo+jeN0fED3uhlTZErrsre25oAxUu815Hc5jswQmaC2QJ5CJMzrtte932FsZlwbEAlQdxdwDsAZ00DjIAeCVFsDa4iQcxLkr6+f9307nZ4kxBgPOY/bvseQHk7PX758LHUfhjHEmNIwHo5IYoa32zLkFCRs2waA9ySbGFMMIVDX5gqE1FsPMZqjWidmCQOHyCwxJzNwdxLatqWUbW99ud0QoGvftg2cjseH1ouVAqY5DUOIrezgigrCIcZMRN26ez+/vvbW19vtMM/TPKm6xJBSTPdNjyRMZLWD7TkSEzBLFHZvagVDNANteyD2OCA7mToSDiO40P6C66vvX2kcME+wvb28vpy3y8vlx8vyqspsU+YJxZU2B0QMMUZ3k+jEzMSM5IqGG1JnQQkQY8pxYpbqvncvfe1WCSITWWvQhRDdgWCI4eRO3dys9nLxamBmrYREcYYQHQCYOcQoMsY4xCDjNAZJCMCPv46qtSmCCxIaGCEHnIHJoBp4TKNrLFsXj0mGmI4h5CD53fHd99/95W/+6l/89re//fb5YZySegtMd2EwQe/a+l6GMDQFIOSAzh5iEI1imKdDCKdIYwysYbPQT9P7d48/HA7f5cCl7K2V5fZy25c4TXOanFh9RzAkI/YQYghhzCc32a43V9j3dbm9uYNgDoDdhQmLm2knWw7TMA/PINxBrtd2Ob+ZmzuZU1djDvOYhMVVgsQ5jwEnBEKvrqCmbS+363lbbymPeZwN5OHxm9tyu62377777vPnl/Plkod8fHja9n2c5mk8mqEwt7Yf54MIq2pKqbW27Qsi1NZEJOVhXTcHyMOATNqBY2AJZsSCEhiQ9rKZamvttiyAWGvdS933FdH3dQ9hSIEul5/YLIYYSKxXgs7o3q31SiJ5mK7nV+0bKvTSJFBrfZjGYRiFJQpFxhACM7t2dq3bWtYNkDhIN3O1xCGO74JM2+Wr1xKPz55HUrReEFeKo02/JCjt80+Xjx8YUR0/fnn7009/+unjf/56u1zbYJASh87keFa7hZiZMqBLcOHkQFEkJCA0gw5gIiGGQ5Sho6l7LVV7dTPmgIjmql0xMImQZ/Lobu5dba+1aa9CSliHSfIcmRsxk4CbpTAc5inHOMQcJLZWBJVr6QrK4m7J3buppRYDa7Xv3n/3L/7637x79z4EqWW73W44+mFMDKXW/f27X/zw/W+J5fVyTn+Ia9k//fST1zOhF/F93yNQtHaiuSqt6+oRj2nMIZnupb8O03Rbb80vEZHDMck0Ds+EiSBo36zdWJdedd2305AQIiFM03POx6YXIK/1soUZceQg3ro6Bhx4D3ULkkY+tM5Xpcg9RfQU45AGG/BmnnZu2r9etg4cY5EYCEJvIAE5ZATWJhgCoSEhiDhoqY0AUgpmVvZ2fDxcb18/f/n47ft3f/rpD7fbOk2HkKblVkhijLm1DkAgpK2rKiOEJABmXt31dtuZmSgisTIjirMgiXojl1o6gBNAGJOqWXdVba2llHq3vb6YIUJcbrdPnz799td/WbZ9vW3z4xhzaqa6b4g6hNhrK620to9DqmW7vn0+TQ/WaxAeD5MpSACRaIbdjRz0zngKkRVVATCoIZrxOEl+Z+qUwvj8i+v5T3I9h8ODzpl3s7JTUopi0z9P3z++/v1//2//n//3Hg5A/vHt03W5tVZ6WC+35vnQWcEmDj+F0L033JlpRLTARm0mIh9cfa91ZYiMpNCss5mZNTVXYqyd0IkTR+RAzEw+3GeITa2bIUMaiB3HaaAUKAlxZElNvffOkUlkHh7m4WHI02F+keP0VO0CYGtZEYLQUHvrbRXiX3x/+Ntf/cu//u5f/vVf/ebp/enr2+2yfDlvX8qycLAdKgfBXoTYLbhJL33Zl13dHVvfcNOHdMiJa9NipfSNKBZooAKt93Y1/HGttwbr3i+Izgyq2ra1pkwojOk4PfHLn6+XVyadczqM7wI9u1q3eW0fHPatf2E+eALtjYMchocBxau33ZrXohuLi9PII1ZUWkOS+ciEEsSu50vdKmANcRJMBt4qxJECsXZ0NgY2TPfOTAihO9W2JQIEu769XrfL+3ffqPrnzy/DlMd5UINm7RCn3sy0pzxeble498LMJMRtW8zAzPa9nh6etPvb2xsJS2aW3Hs389Y6IjCwo6jCvtfWmllXVQBalqXsTbtvtb18fVUHJ78uNzdiFlU9v70chtjrXiSQ9WZNvH7+6Y8SqDc1MxGqtQ7TGIOoNkQlEiQmJAdFhGbKOQ4hIQgzzePUEL7czkF86Ft694tx+B5ezv52g+cA04H6qOXCaWxhpum7b377L//x91/+X//T/6f6h3Cwi112W/bbjj66r0BwWZdKPXOLCYLPYIpQhccg2I0Ng7Tcm4MHBzKzZqCtlb7XboxDQEE0JI55QFREZCJ33Vst2ok4RwyYyWGcBooZhAVJna0WgKbAEqbnd7/85vFXbjHLr/h/93/6rVAijAZa29oVxKN3KH2PzL/87tffPX///PAcUljKpftm2m/bdr6dL2+3shUFP1/Xz1/Ov//Dnz59+NO2r021FOwFk0sW9g6X5bLouoOSBEK2ihmz1Wut54a19BXVy20TjEN6QGtGO6EGGgFlWc7bdnVmRiQ+HoZvJA5EVtprw53EWrXeW6s3wS4sAhIkQCAgBCbEtKuvoRujew1sIWdiDyw5ivWGVJHWQMgiLAO4qvYUco6BKTAMrr3bZgBNCQFiDGp2vZxDCI+n5/N5W9dyOM4IzJRSHE07ALau13U1tDwOrmCmKY2uVmu73dZxmMdhPJ/PgAZOKaaccm/dDJgpxkgsiG7m+77v+84siNSqXi6LOS7L65eXn1rZH06nspdWd/ciStq29fxlCIIAt7evkbzutykPe63jNPVWEf3h9IjMe6kxkjpqaYjauwNgIGEUpChhYggcMaQcwoySwbs2hd48eIoTqyNURPYYKDAaY1BgAHTI4ymnH18+/Kcf/+5Wfty1XPbb29t5021p58vb2+evn/JwOhzfCWMQUlNTDvQtw4TUHVqv7IDIxiIG2Hvdlr3ue1XrKgjuAJtWQCUJIUQkKl23UreyASoxEEx5GlI+SXgSHgWDmWlfHXpKASU9HH6Z80NZt9Z2IU4cg+k1YGpQzBWJSaSbffxy+ft/+I9zmgrUp/NxL8vL+Vq0mrfrUj7+9J8a7qc/PpVGn18uL18/132dpuM0JXSA2gUVwGqxq/ZqRZFhJUqstQfsR/IuobiTSQcni+W6vX791Oq8w/jt89MxZJweTqeny7Z4qxvneSLJCVEMNgDobXMTMGi9dt2ARXutWIQSUiEmjtlZetOy99Wrj0Jg5CqRY8YdOHDG7ga+9xLYmcCsEgY1a9CFzHtXqK17b91bZ7RtW2NUdhhienu77Hudc9yvW8wHJ3bXum1A+Ho5D3n6xXff96q3cp3ykMeh976umzscpnHbNyFiFwDovbfWeu+IaGZEpKq1dhG5N9SYWVWX9YrU3OptXbuWmES73/bLwzxj1yZopaJZ1RpjNuuAgmC9dxbet348PZf1dd8Wotzb9XarTBnUwDtiBQvGgk4yxgGHwzhwpobWe5/inMdwWzdEg69vtioeEw6sW/NlxXnCw+zubisTQtfb56/ocDq+++nlp2q+bLbXGlrYy3q+vD4dv3uX/2bEAOED8623M3FDe+3wjmBCck49wWDeDLU31L1XrZuqmWG7XgrGGImoNHxAwZaMVdvebsW6labYYziy09ScWy1mRthB16Y39Wtpodr6+58c8JJ42KxJq2vkSILaDXggD4FjFBwptN7/y58+fPjp/0GRZeJhTPP0MI0Pw0ja63lZ9vJ64XNf5fxyu8E6DAOym/dpyHGIvW4AptiDhtYhCTvVZltbAWAfn9LANnFcfLx5hylpbZevf+z9WwmhjyaPNMT0zcP3y+v5S/nJdtbWTYvD0vXWjbRLb0uAiCa12ra9MgeJ0ryrggBEASecKILkbl5qU28xojqadcWKyWIe1bqXpr11uLAQAKx2pabAOKAzAUMzUEJQdUVbl9s8jGiw7mcJaVmWmAciqm1db3WW4eXDl8u6zL+cVXW9rtfbWd6/3/e97PvtdpumqamGEFS1ahckkajqZqDaQwjueL0uAODu95V7ANBaQ8Te7e187r0jsaqt17dxSDlna3qa5uu+tq4AkFKoWWKMTLG1HnMCc+ueUzq/ve3FUhRgwaE26+bCSL32lFIIod+2xXoMLcoBOCCV2q7WMyMYBLcd6icrDxRmEdGMphUkG2ap1f3Wv5x//+MfPy9b2fq+9G3v2w7FYKN9L2uk+Z/94m9P+VtpN5qOu50prBSI+mvfCXAKIYQQGLoC3cVIVVpb1tYWNxQUc7zUQhJjlWTYArlab/u+l9rWzlpsDCkyqbckIiB2azeD5rAArtsGLNtWP7f109PxVyTIf/Gvx24NndBib8YMOY8xpSgp5QFBXl7f/vzpx58+fPjpx4/7vk9TCALs0UOs1i1wjNmgMzIjDIGTSDdDIslJxZ3ZjIlxSEk4iWQkxACSaIwxpcmInME6gJqCgRjnNMR0GMYcR4CwbOvr9TOgOYFDC9IR9+W26B5QgTxYVUYOHMfx8XD8Po+PRhjJp5QyDENIknJ30F73tS5laXst2o1ciBnQCRmwe0MCQFdTd0fiyGMUxtaxaW+FWjEtzQw7sNxXJUCphYVEwjBOW9lbqYb48vpVRIact20rpVxv1xijEJ/P51JKSomIWqtmpqop5Xmee++lFHdPKd1ut1LK/dyv66qq9xfAzC6Xy3K7OXjZt1b2Xus3794nEW3bYZq39cIMKcVxSAhdQgickTRFZiYws07MsZbL9e1T228OlQDBqJZ1vb4hqDBarwiKCAGFQUIe9q5r6YKAgRAK9CpOXs3QZIyeknUhGchBX77+l3/4L//h4+9frp/run19e7leqzUWjoGEKfzmF7/95vExBUkhNV2u+wtgu8MrwEnVAJBZEAMidu2m1mut267NTMG698Ztd92c3E3NVaE3773utTYl9hAJ0TgAcnXeSr9s/exYiAuQAUJre11uaJlgZHZp/VV8Aj9YFwYEa2qNaQAA7TpNU0ppvh2+fPpyeTu/fHz5i1+/j5ZSHErDtrtapRin4zS2m1cI0CPmGnCzXWpCHgkgjpzBAIxIFLyLkTURsDjvDMQtOmkGIAegSl7aeq7LsRzfUZopvz89ff46frq+gHDKPsR34B5FZQoRvu29m7QgOeZjHg+OcW83YbTF3CggkchSN+172ffr+a3WSkRhGA+neQ5kqkS0NaulS+yRYpI5cOjFNq0hcTQ1J1CutatXZGYetHnrZ+IY88zubn25ncven56ezuu56C6G5HZ9e+1uIYQ7mbmUcj/05+vlXnZUtxDCPc4xsxjjvu/X65XvxGyAZVnu2lJE3Pf9drsxM3fsvatqCJJSIjMWL9slZjbN1p0gMEVzBDLiuG5N+tcYY05HRIpp2MrXUs+wVGs9ShOhTnpb3kpdTsOU87Mb194kR+RRCLa2vJ3fhilPw0jxBGoghUh0VxoCy+xl8XVvt7p0fXvd/vxPf6x9zcORtxYBh5yQ+PHx4XA4Irrj/nndXi8/9ngbDjQyOK0cxvtmEOLpDhYkBtTie3dFRmGCqmrduSMrOGpxiwicU+/dDUNKMaFwwegqtruSVuPCyTLFxGNvbNiikPk0pCnnsZfIf/mvBnQWDEzRrdZatSMzBM78c/U6jsNwOJ5CjG4Fvc5DTimJkDYtS2PJMaHzlmJMIGWzYowgZBE7AQBQIg4MkFNmCtbB1dypgu+6NwcFqYaNnSKNeUIkZz+MYUDqve1929fXRc8Gt2kK8zBlOcX4IDLn9JAPeZpOMc6nx+PDw3PKI0c1retlJYBACtSLbuC27+v5cml7BTD3Fmh7HnhmNtJtK+t56WULOBzy42E8THEUj9hItQMYA4EHQDdswmLNu1pOkZB6t4fHx8v1bRyjq5XlWmt9mI/M8k9/+sM8H6dxctNlWXrvIrHWptZPp9PtdhvH0R2u12sI4Z4M1NZqa2ZWSqm1Xi4XZgYARFyW5Xw+p5wvt9dat259yPnh+Ljvt1auoO308ACILCGG2NSHcZqnATrcrlcRiUEAK7EQRkEjFABmQhYPIaY8ioQoghwxxDQO8/Exz+8wz3HKrtrKJhBCyHFISEhBaJwgTxAHpRMznX//n//zn37fIL18vf3hT//04e0PQJpiIFB3f/fu/bvnR0F0wmW7/enj717f3gyAOQbIBs1wY45mDI7u3ru22vrem3ZtDUsTQAosDsGQAqUQhyiSRFiQqIMiOUYEVgidWJmgWyeWwJRQhMKQxkAJXVxDiOEwH8fhIGChVVXcohAxEqC2vi17lKTNJAR3jxJSHOf5eLs+1uvn27XkbGMc359+Gfj6slxIte6xmB7CBJKFRBgSexyVGa26gSlWtYVIpsR7rGbayBlRUUQimllDJwXzTrYsy9vL13h0iHlnD6fj94dv1nJLaQAsFC4RD9TJfeUg6N0MKIKydOPr7fXl5XNrW3RtHJNQCtb6BrAeT0MpMAwcmHKwGJWhUtsDrmTaam6IMFnynPMskdrOvRNzRVQRWdYSNAQJ/WeAOJiX+fDQekWHGPLb21stxQCZ+cuXL8IxpeEeu3/68vnx9DAM07ZtT+8eiWgaD+Mwv7299d5zzvfl8qp6j39qrf/blWVZcs7btt1joVoKIjKziLSytbJo7xBDCMmNydRMY0zgLHEMIITeG4QwupdSWo4pnt6Ped+bK1QkM0DvDmgSUxyOMQ1pnJCFSGJKe9+Z+TCfWi+9d4boQREZiCEKYCBgwEjj/D//x//4hw9fK602LENhtaJue22Hw+Hx4SgiTfttXT99+LHdSpaD3qwC32YNg3FEZkcyt90hIN49a6GukdgluDsiKamLhxhiHmKkEEHB71tRHQiQIDA56x42VqIh+cDaOgFHoMFyHnM72txbt+v2IQ0qXnVfAUhKqN4dEYYcAwYAEOJ1WXqOmDAlJvfjfJT5aF61uRIIhYfpueywX7+uuzL5PJzycGj95n42WTBo5DGDd8o785flz9BLDocU3HEHJ0YROaIbDpRW2LZ6A+NOqvgTejONaehYwzSF8E3us0QFbnu/an8Bp5xOpQSSvdZ9+xophtr87evtdi2hsBJYAks1UXHuDvFSJExI1qKkFLERI3NCTNHzQOUCr9fbOIXH+TuiUYIDB66IHYkVWht8IDxYdwlqXnvteRwcatnOh+m9K/TWmsN4nGtp5PR4enp9ff3++2/33i6XyzRNAIboUZJwlDFer1cRuRd/SinDMLy9vRFRCGHbthgjIt5/h5m3bXOAfVsCcTeMLGB+PZ/HxIGmNBxUtZbNahnHYRjGbdtSD6otTkNEVrMUDmRVArJnppAPUk1dO+GdCY0o4fnx+eH5u3iYOMZhethK37eVu8kwJozUwZpSHjwwULjvYzH7s6+ax+cffv23/+9/93/7+PIf8sBzHkrT8+W8l/aLX3xPqIjcQb98+bRe1tFjCGnvrV6gaR9NQm84XlIChWiFVNGNiEi835GZ5s7uXdzQUbpTG/Ipk69t66ZQeyePmCMyK5ZCTjgPAmuj0CWn3ny9lR41x3HM2UBUfd+/ipKXxq1uOWPk0QBVeh5TlDiOMwh++fJplevhdBzHPHBQDt6JiFRw3/ecc/J43ohhikEBoxM7Wmub0jmYduyWhHsXhDHGDV66vOaUQDACiAg5OjzV5lLWAGfD2hVWCqWe2+scwxSnYQgYKA5xwLBwJFD0vX99+ezX1yGLWFyW6163qr11IDsQ5gFOQmmIXKww2zxFigEvdV2jOvUulXEzQGakEGQeh9pZl03fyjqXK8TBWYRJRCJBKQXQp/HU3fbr0ryBtXCHKLinFKPgst5UNVFIILfz5f3T+5dlkRjMbN0XEem911r1XtED2EtxRzNtrc3zfK9+llJijMxsZszcar3dbvM8l1L2fV+2G96JhyQG3k0D8zgNZe+BwrbcyrYys8QgwoSwnL8C+G7w8PRAgGg+RgYgRKLhJMEyQC2diVLOACCM6XDI0zHPDxQHawTWRATcW6/TfOLozdRReBhhTA6ETanU1z99/Mcvrwb+m1/+5u32Yb2da1u99uulvP/uYTwmMyXAclu3ax/4MGdW1QOn12277u4exlmJV2YI9GgCvWxGa0jdVEi1qFVwcDfELuBkjwGp96YdmpICQx2Zg/Ts0IGDD9OQpLuvpcZSm6EoAOiglm/bqixRhFAS/+ZfpL05Anpr7IxMqjrPw5gyh3w4HvZ9vazXqktptQMnSSHAdb8WNe3bupZKe23Ltn4e5nw4vsvxVNv+dv3p7fK2FW3qqUeoobTirgCVhUVSVDAXaiSYG0jRmnsXahx7TWZerWylrkVbrYbmbsCMIgn7OMrD0/zPTse/Wc7X8/mLtqV3ut28rL1rMG0z+qOMhzx2bU7daAWuQUb20Tz11kgxcZQ0dEM1AGZni1OfDxwJtlqAo6AouiOCI/TOYMjUenFvSZIwA5B1zykPIe/rNbAHQEbYt5u6YoSt7t9++12rdXt7Q+jTdIgxv75+zWM+Hh/Kvocg9wmveZ5vt1vvVkqNEs10XRYiMoVtX4kIAF5eXnpr7t5bcXcEU22Px8ccE1JTr2XfokiOaZymUouD3i4XLTWxu26mpdbFFYUiR+YoFHIMMQzJmFhwPBym+TAcHymMBqyqX96+nq+vhIWFGZmQRBJLJBGbB+UIEN0CO5L2/+8//N3/+vd/dyuXyLz2cllvr+e3h/HxN7/8PkUbx3kt+6fX11ZpzIccpKtXtx3rVnYUTTEwIkmn7A5KgA669mqldbdGuDeoqxm7IiYMJ54CoO4NDBxUsKURQ6QRB5QxTVPwHDqx2r7DukJdzEpjjKrYCry+3s7nbb81/vW/nAwpZOJI6oag05TGFJ6fT4giPJ8eTqq3razbWq7Xs+kqQntp5+vr+fr1ulzn+fj09ODIh+n5N7/858f5edvq+frl8+cf3160nKWuu1XAgrXurrTa3KpoCWGfoJA2vNayrRuaEhNqBFTTpg5GaAy1ay+we61VE85DPjwcfzPz979895u/+c3/WeLpp09/PN8+ag3rdXBSImGaZuaBRoFktDauu4IgRg85ppBHBTcCcCxVeycDoOBjwiHGKQ1qrs6RM6FnZwFLFCRILbWVOqSRmdbb0vcaGQPhul5QG6hb63EQtTqMMUYZc3p+et6WZduvOeeH4zdq/nb++v79t+7Ye7uXfQhRe9+2DZFEpGu9H3Qi2rb17jR677XuvTd0VW1uFd3nKQsbARzm+Xp5O83HyCzChFxLJXAEZfTWtjFO7lD2EkJMw0ghAHMKEmKIwpJC7Q1qG2IeDqeUBzUwMFdbbq/bdstpOh2fkRADe4wuAZghJMaM6/ny8Y+fL+efXs7/7p/+w+cvPzboBrq3PkT5zS9/GAeOEaz6x59ebtddIHIglLj22qAgkymACwdndqQi4kJEQO6szWvbew9gDdpa94qFZ8pjiNlJgNi9W0dWCIiRMSVFoRCZkBWomhW11vbbm7GGYUKMvdt1K7dLvXzW5VL5b/6bB46CzDFGQHMo4yDoJWaaDw+9yzxNMaRtrdq32i7nr0utrfX+tn66XF6HcRzTu0N+/NUv/uZp/uZhfhznqZT1erlt+9Us7a96W+t2a7pVJBKA5nG7wX6DbCNhhi5FpXavzYNDig9Cw3pr1o3QGI0x9qbEGEh8t23rKT384rt/9u7hu2+f/+rXv/pX33/3l+fz8vL1z83WVgmcXSGgJslMsVFbdd97r73HkBMPKU4kBIDadN9r3YE1ZM5zPAYIFAanESwSgHRiwhRTDqmXbq3N4wgA58vXwPL+/VOKYdsX8J4Dr/stRZnnA6EKyzROzw/PkXhZryw9ShjT9Pr6EgeZp/l8uUiWbl1rN7N1XYloyAnclmXpqkPORPTx40cAa6323hChlMJoqs1NpzwGZtV6mGZB1ro/HKavL1+CUK0NkY7HubcdrRJi4HDXV4sIilCIQQTRmYUQEJyRwExSng5Hd9trd8AYk1pbtxtamB8eghAE9uMJY/LakRhJQG//4e/+3f/4H/7X3//0D79/+d1lvSyt7htMQ/jF99M4YJ6Cu376/PrTT5/3wkBiYMakAMV2dUNiROxeACsxETFzA2imgE6tQa8U1cndPcTOI2DMxHQvL3kHpQgUGZg8CMDASAkg1I7dQFUSh5HyHCimvVettl19eXPdQ7fO/+y/mYaIOQszInXkHjMcH0Pprzk9BD61Wphx38uyvmnzbl62bV22favrtZPnb9//IoXpcfr21z/89uHwnbus2/Xzl991veY0nd/2L1/q7dJquY5pOIWjeOxKdV8UyPKA4ZHlhBD6ZhmzoQR5FJr31hxgkFGQwHczH8IxxXnf2svLpzSN73/4zfvHwzzwfHicx++bruflT3U371ZrEcgpUMxBwlyrbWXbizVjIWEeDdD6rr1pE+8hGEQeA43esVTTFgYeJIQkU8iDste9Yu2DyLLezue3eZ4eH08xBWbu1oXF0cdxODwcXQWJAofT4RDQe9vRivVbIkbv27aMYyZ07erurnZnB93NfM55WRY32+uGQLXWWvd1XXrvzAQAXSu6mzlLYEQ3n/KUA3rbrJXz25dal3EY13Wd58NhHrZ1gdanIQuHbsaBgsTeLQgPKd4XOgNgTsOUBgzBWJiEQyC6V3jd1GMQAYlBZIxGiTDhmDEObS2EhWj++unt3/2nf//3H/7LbVvrtmMrx8Pw3btpnjTnZAhfXt5++nR+eV3Khg7IdCdpmoIDGrMKazcFHQgzMwQmZlftraq1jrtp68Q05CkxCWGKFAG9dlBDNhZkRAoBA5GN4i692V4ZTEYOM8QxItPaam29LE3fWAtt2nff+Lf/6uDYHQ2wi0BKItE4V8S+7fsQnlvpZb+6oxuCoUHrtda1aZvA4vVyzVN8fv4m0/Du+YfD6fF6O396+fHL6z8OWQ9j7sDLddfdhkSH6IcxTDIWh7fm67Y5QMiPw/AsMup2rXpWTHdaAnLI+fQwv5/GUcgRRMHdIthwPr99Pv+5tnbID2k4nJfzui9qrrYVPdcduyFTmIcg5CKRYlr2basFlczdgZraVjatvVVEwBAiU1DtrXpvoLUzB44phFCs9d572am3upd93+Z5fnp4ioRkbmoI6OAi4TCfmEVY0D3FfJgm7bvbjr5TXwlo229ECEZdtZROIKVqraX1sqwLsxDitm29964NgVprtZa9rKqdGB2stdZbJ0IAQEYRHwOhbvt6aXXTvhEYIY3ThOQOmofBqgKZROlaVXtO2bq79SDBiViCSCIKgHg4PHAUBCSOHIe97OuymSGSD4linpkTxICM6ghpkCR+eVs+nl/229v5djmXdr7t69d5pPfPeUguEhzH27q8nc+v594q9U1DiDEkkdytAAlTRtpTlBAj+oAYlJuDuaO772VppXnFdd3UDMkQOYQYQ2DvaGbeHRtId9AOqA6gdO8LU+AwICcgNkLcdN1KvV379lb3t46sOK35QKKFnONmXSIl0hQ8RGjaAgTV68vbP4ofazXA9PD4zGLLy+pGtVvduwgh4T/+4z/keBr+8nC+vRJRLev59rlBeRznhPjd+6C38gY1SmJHx33k+mD0xfC2d7by8IAhjhHI4u3z+VMKSqreVVUPcjiFBwoQQIhue4d938Ecgr+9nv+n//nffvrpw9/85d88nJ5LvXlv7x++F7bf1a/XdZWgte4u015rmE7fPf2VfvkHYp1iru3qHrQBURon7FXdoRHU3vtewA2JVBd3Q20EgToMhM4ecnoeRiIiTCn45fxKBJKkao0pD9MMTrVfsTcENF3cm4AJA3LaSxVWBTi/fZlODxzGVmoFu13PTds0TeM43m6X221NKfXWGOV2u5SyM7Nq6//1Q0wA2HVjiyiAsEYJmEZwvZyXVjYEPh6PpW5MMD9+Iw+8bueuysy91LItEsfe6/nyMuoDEQVJzdQQvWoQnh8eMA23W/1ZhxdlzANaIWCXkRwAK+dRW0XzVuE///i7//zH313OX4JvcYD30+M4BUJFK97C6/r6tr3tvXEWKs5oMVFIkQSxu9be3DEgSiBSTurMZQ+qdcyZcRQcR5EywBGRXFiU3PrWrJtaiikkxu7VSYkDSOzOkkYOIYQwjZiEhjgEyWZwqvVyW9dY7GjwqxAixEkxR5EWsTb0UHondnYnJndxcjS+3T5EvIX4DpBRrZlFFgvzjkW9ecPDMcVIv//j33PEOMj1ujTdr8ur2q6aXOaI9XGa8EGsKOLcal/wMiT5DvEfil8g/Da+e//0PQG+rMtyPZVlq9BrU0ocaNi5zsc58FHMToN3xLdbk/iznOZ/+Y//9t//03//7em7d+8P0zzEYOPh6fiNty+YbEHvADTEoamlcPru3V8v28cYXDgxhUjezAOGQvte1s16jEQOWJuzK7fmFaEHzRNHUAJmQvHmBIRuy7KrWwjxfHnjEE6n0zzPX7582Zfz8/Nzq5v16xAzUSSSFI9Dr8XK63Wp9cY7JfcCvTnst5t6F8Z9z8t16b0DHEopAHBvBpt3d7/LRVU1xaFqzTGeDsdMOiUnhMv5Ukphjs6uvV8vr2Y9IGmpFNNIB+01EhJRaU2iEZG2fru+AXhASsMQODt0wqSq7J7Gwd2vt8telhSGMQ6uigwmrChRTcpmRT/99PKPv//Dv/1f/ofP2z+lsOVjGokVi1k/v/p2ey2Ehd2UI5FmceHjw3ycD1FQm/TmEsN4muKQVFtkUfC9ljHy0+MxholgCj4hUgoZNfa2l/3r+fVr60WNDodpGIZe27pvZkYSRSLFMAzjcZpTZEYMaWSJbmxmZblps9qLEROiIFAGeUiZXHCtlwUrhbqVuFkAKKBJAqEt24soGYyIuO/A4DlGmwOHtRUlsGmYFervfvx7MD0dnoHref1z25eVVxXGygn5IQ89ejVfFlezCT1wT+I4zO+PP/zyu79FhH67Xb67BNGOAABgS0lEQVT+E5S3rrrfFt0ihmAqb5dlPE3T1MwuFDgmWrYtppBGlnD48PLh7z/9j19exl/86vt3D78iHh7mR1LfzgvIXvorQwoZ976H+G4OgPpVJLqVkGIv1ZuGzBYI+9KtUYgBsYt1hPVWtl6ZUk/TAQfBjOzIyMzEuC17EHn7+uoIv/n1Lxzs86efzudzoppj2Jer5HGIJyIKg6iTN9tLMYp7aWa6b0uxrTb1os2txHI+vy7XdRjG8/l8l76ZWe9drREBEbmhu3fdmfH58fn9w4FV0evXt4/Lvnzz7j3a075e2ragtxSE3fq+ME8cYwixt8rR1ay0mkLUqs127AZ1f//DD301fB6Oz0/rbb2+vSkkraX3KkFEMmEGVqhXPnyPcdCy4Nevnz98+fH169flRr1PdEjDQNRduzeoLaxr1RLikMbp4Na1sM9TCvx0fDodpiiBgYVzGsd5nnIeOiBoL3VHoCnlwzSnlERiCCHHlHIwZ93s9fJ6vr0ty5mFpnkOIUKTdV2v25UF53EaUg6Rh3wQlG4G7CyGQOjEHUopi66ttQhDkDlPJt9MgwCOXPpbX9TcfOttJzXAQkuSrI3W8sUwtY6MlAMNh5lG5cV8qxyAsz2MJ07Hpa5ff3x1XBgqIBbdE788y+PeFTxkpN6vYGI03GqP0Z8OPL5/fvfdt+/mZ63bS5pQiUwD4PvDg/LgRjkcO9TWNA5DqRq1HEiLkXdTKTnQdw/HV6uvX88GfwSgIR+j8DE3WwV7NNzL+mdM33SUbnXOT3urHV7zwN6n1vZqb0FO38xDh1xu5fV6u6KySqTQwPetTzGo8HR8OnLSuo1pDCFeL69VOzFRgF/88BdJ5r//x/8foDPC8fQEhjGmlBIykcQYo7sbQRpiHHKE9PL69WVZ9+prre2mwCoD91styx4Q9roBA0ojYEBV7+jBWm+9IQNzejo9nA6nyJG4rLdL3X2eZ3eY56NDM28EnEnQDRHZLXkCBjN1RyJqffPSjsc5Vu/1usr19bM/Pv2q92pmKQ29mxjV+7pOMyAOORBnCGhxcHqmjA0//Pj69eN5fRjnf/m3//q6vqoWB+UwEIay6/68EZEwe6u9F3WQu6B+HlOIpVQJ6XA4HKZ5HsYghOZIgCDIIUS+d8cBLOccY8w5M3Mf+zD4YeRtG0XC8XgMIQDQXUvbVVNKQ45EVGvt3QAYAO6Tk4zi7rwL71y5EtEwUB4mYdRo29NsG4TrpwYUI4mrAql7uW6FenJQ82qKTqQwCvmQJFDkzEbogb/77i9++5t/4dB/96c//u4P/2ldtxhDNAaXLXYiGQ8HYdbFtO+3a8GIA1uY0zCBYNWit5u+fN3Pb1sm7AQt1dPTI0B0tMN4XPFq2sWjEx6OMsxBDW7tuvaLKU/TgQHbevn65eXdczTpgnEYpnLukQlDX8pHJEUPFh6m/Mttxb28DQMGTaDJveR8jHnWrOpc3m5OHDlGh5CBcMghO0JrLVJQ8Ov5lbodj0dv9eH9Y2D53e//YVuXb755yjHGJGaW05ziJEGQIefcWnfuTpKAjkdsap1Yz1vd+62sQF7WpqoEuO1LqTWNgyspOHESv58GJZQgPAxpnufW2qfzJ2EQkTiMgfB2/hJFhLPEptYVgRkUStRAwZFloAl52vRCiEDdwJ+evvny+imiR0Ltt4ynvl0gTGPApexEg5GA7m3bPA8+jBjFVBk2LM2UptPDQ6U15DFPQxrVyjRNz0/v5/lUS7+9nUutKNxaO1/fVDWIJBFg8q7koGDay15Q2y4k8zCmlJxcBImg96aqd73TOI6qervd7uKo4/E4TVNrnTnEmHvvRJRzFqG7oPDecTeDGGMIQTgQAziaWQhBRGJriJhzFhFZvaOo0BLjYAZ7aRpMQhpg5BBXrWsBM1Q3RATG4ma2JJQgQuwsJU/Hv/5n//u//ct/FbCf5ue3t7c/vt4cJEYBZ115eJzmMMaGOhy/fLXrstlowC7BHNt6e/3Mf/50Pf/uw3/+8PEP748Pcci1L7uc56e89gtVdOxuu1uVEJ6evpnyadvrT68/gbeIumxvOWQ1KPt5ucLxeDDGMSU+ZuwWmLReq73mPPV9Ps3fH8f55fqf9rKinrz7OLVxGMZpfis3chYKBhghHY9Ty9bWHjxoR0cjx2XdUhoPT0evKxK7448//f58/TLPT6fT8yGl18tHbTqNj4gcY0QmVYsxqoI6MUNKaZ7nDl671VpD5GVdL5dLCCml0Lq5IxqDUxoGQ5CUEBHU0IHAx3EMQnW5fn39mOJwOJyQYdmKo3R0IgQnpNAc2AG0dt23AllG4QAA0+FYezTr2K2a/vCL31yWC8RxOjzOp0cP47Z5NVi6RCGJOTEyuDFwTB4HqpvjDTt0nHKGp0O367IUzyFN09Pj4+Pz0/vj8aGU8iZJRCSGrdmyba4K3oBcVdfbsm1ba9VAl8t5W3Z0T8N4PB7HcRjGdG+BxxiPx+PxeMx53LblfvpTyncBee+uqvu+E1GMUuvP8xWltFK2WquIIHrOERG3dSe+k1GEiObDiMAAQETyejU7DdD34tc04vULeh8zKnAMPjymmcq21gKI7uimxWo3NCfLu0h1s8d5+v6HX75/+t7r7XFY3p2++RD+0NumOnYmBgxIQH7z2tp+W7dtb9Rh1TaRt3l5ffv4urRP1w9fyx9popuW3CTkcS8Ay2pha60hU9+uSPXx9BxtOMQppeHz9UsrVai9e39Sq73Svu2O19LYhBM/5DF4927eq75evmD48MN339ayhCSPj7/6+voTEaIeXRc3qRug876vt+XrMAyCMXKWKCOKhHGUOXZAwHenIQjclusYcm310+cPEuDd8w8ppWkarKs7xjwwM5OEEIEAwBGBQAAdAptZSimUPY8p7iWX1LRqr0QEnlVVFcBxGMZxOnAMAGCtIyIhklvKUVtzLYwEhtu25YSBcHz/Q2Qo61licGsssW51CuzQLucr0gOPMxgDYErZDafH0bxwmn7x/rvI8fj0DPOInUOwvSNA6/sS5mG6q9NhcENHIAHfVDWQjBhacUcrwpDilPOAiLflspe1lNKaniKnFMZDOulkqswYQmhNt21b13Xf91r3Wuu6ruu63geAkNy9qRIzs2CMMcbIjHdb3nsXkfu5N3NV7b0y876buztC711bR8SUooi4+77vjrspBOS7yjCEEFjcvaszs9wWrQBpyBVbOrSpxHJV0hASCyUhHgYtVgDIHcH/K4a+165LHt0tHudv37375jgdb9y+nD9HgtMhvl02V2zcQyQAi0OqGZa+rXItgZMPrfN29Tj11/GCUpfzn9r2JUxIpE3XEI4yTo5A1i7bTzmMu+v58qKdjuHJ2mrsDj2MIAYi5NBT0DQdum7qO9C06J7RDWEvbS9Yy3j9ch3C6+P0bLVDGKbh/Xa7kotrsjXdoPpOiZ8fx+fjcDwlGSQhJo8poGTgQCnlyfvt9etPU0611s9fX8djen76vt5qTE2tlKqARJiG8XBXVWntEmAcQu1uvSO5eUMEIspxGEc7v93cFZEQubbSWhMOIpKGfDgccs6muq4rEsUQ0FStq/rlckkhhzhNhwGhN9t779Bwyo9L+RwE2QkhOZEpmOr17TWyME8sgcgBvHkfYnQmkfj0/j2Ms4KyKbFybVya1rKca35+DodMZtCuRNUkosZyWy/ruW67t2rqMaZ5npnDtu8vLy+qSkQxxhAZC5M2dxeReTid5gMw7Pu+rnvv3V3dvZT29vZWyobIKQmS39P9UvaXl5f7VvC7LBzAW7tjMvT+YebW2p0ZY44ANI4zoiOiuwFg750phEgphRDCkDIiqmqt1R0xRln3dtvq8YHDNCeuT4/96q1ssK0hHrMHlvGIu6Ov4GpOKcRuaoplj13b4ci/+e4vvp1/IMC3df3zy+92+/L49PB6vuzrxnbAKQNGQp5Sqo9Do9MBMhTGtmMaFh3i3hHeynomVhBVZu+q1JEBkYMoWb3t2+60d/nzh5dtKU/HMTHVSGkYkwTCHjEYdCICMFUnOPZ2RA/S+cAyH4fHoW/jhUrWhSJN3EycYp8VwLlOMqkTpDq+V/OYOQkrMlrFpTVoGzHREPdW2u2KGLa1XW5fhykdT/Oy7gQY49wUz9cPrV5Px29yzq3t7r6XdQ6jK7l37R3uwQzYGNnMB4nTPF4ut3UrLFm11bpDGty91rqWHRGtNkHq7rU1b52lb/suMT49ProTOJStEMD29mU+jHl4RBvDwMioe40cJBBCqmXbtx5yS0EYwdFSICJ6eDrGcbqtZRwShaAVFNC9B5SUufRrr2vkI46hobGjrjVYULB1b3UzZg55CCGM4+zut9ttX4sIIVLvpWyblrJvpZky8+n02L75fpyiu/VeWi8p3aetnEXKvm5bMTNwJMLea9nb5fzp69eXcZzGccw59d6XZbmLw+8OAQBU7wBtAqB7iM/MITARmUHv/WfYNbqIIHKt+72+TCSIKF+/3oSCNXrWOCY2Rj7Ap7psm5+O0zAcRPKS6PVtA1AkRG8hCEqsTcr+9d379HB48N7Odfnppw8fv/6hlJ8CTyK4raubOMq3D9+ElMOI3+UQJS2tRAi66w5COXatqIUjxzAjJet98/1r/wKND/iw1627Vey4DSd61NDdGfQphDDEFPMI4G47AHCQGIVIoCFUMg7mOEwTWyqtb/uZx6M10V1jGpMHAcfsi5emIWicYoz5cZiyiFjxqrVp3Wwt66LdMYy9eauVw7htpaw3hkIctAc3F2GgBNDPt9fHwywpGOg9c+29u4K7r+ta655SQgcCS0wFnall4efnR/vytiwLQicC09Z6bVVvt9tyuRLiHetXSkkscwxMNs7zbbnkmNzRwXLObTk77Ov+GiklnFwAIjCEGIJ7GIZRrbr3XrfpeBQeHLQq7Ht5/u5YGpBHcClWbudbMw/dgTRnQmvUAR4fMYiVgpev17eXt1b22rWDmiE6gLVWAMCsE/1MV1e3fdkcbN0LAKSUzueve13ncULErvV+kXABQOKwbdv5fN73GkMexgRgRHwP692993a70X00dBzHlFJK6R7kpJRSynfY8J2e9HPu+1/15/+VrIH3AeumvWz7PXUuZZNitSnYBvKCj3PqcdutHA68ewHCObxrBBd5W88b9BYTVg4se6LsCoDmhJ/PL398/bM4f/7zn14+fLR4zqGE1LddO657sY+XkMfv3g0PyLGM7e364bYv7EgMU0SOWrcaDNiFQtjbHhV2XdiQIQHLCZ9OCBJikDmO05yOGUYQcEJqTd0cn2JiJoFijJR5CgkULXFMcVIs27btAQAscm7WTEHYAyGiZHAsSuaRMOXAbtw78x2ZCGbGRBQYyAlCHvh2fdOqxMw+MIzo6NaYcxzkxw//BEDj+E47uHsIYd1uYF7rbp3dW+urBEBMRARMwCqCrbV1W1Lm2nbr5g73OTBGIRJwAzAR6W7zNEXCvrcQ6e31SyQ9HcdWVVtxQyC3xkvbeGZ1jS4pDa21pj1OSYTdpJtr11J7OEpOg1Iuzd4+/enwza+MB2pbasvNO7ZqvTrAEA8UUf0C9Ez4nmIvsP50+dOHjx9fthWMmCnGxMzbtgLAVnYgdIR1K04oAZ2AhpDTdEiDlbbdlvW2AICIDMNAKL0XVSMiMyNCRHdoSMwUUjrknFury7LcbjdVnedZREopd2BMjOneLXE35gCE3Xrdt1K2EALiz6Okvddta73fl5F77/1+VytVRMQJuxk2PveuCnFUHphStL65Ve27NUagteyo3Sx1LhLIpLHZu6fHh+F4uXz4w4e/Iw1/+vwPvfu69oW+hpBMem/FXT+9WMJEFnmA7ky7UPGYD2M6HNMhi/i8gZfMiTntqfsBAY0kCk/iEpHFAzpwkGGYpvFwHzNf9qW4JZN5nmOM1+t12+pSa0klpMiMNCLaGqPkHO/qYhahBvu+d0VAAatmvauLoCIty3JvwcYoImIGAPchhJGZCbn2HkKgw6ydJ+YU4r5up4fj48PjH/74T58/vx4OP/9t6ECIl8tlGjIjllKjUJMMLuYWQrhpAegiMkyRXrVthZGqdgEUYkJ3oceH5yhUywZgZjbEvK9ba6vberu9ffvu27Lr7fJlSnlZrsQRSYWE0Ep5pXhC4Bxjc2s/V/3moEaZeu/7esvj+4EBYk5xZhKI7P0AAz2IlkUBViCMh6MksYho4E5AE53ep+my+uvr5dN+23POz09PIUBrjZlFQkrZECR54GkOiQUL9DTkKEFjhUp3KME4jilERARAZr+jfg6HQ7c71Bp7bSmlaZpqrSnle7h/r4qqain1HiUCwJ2dEXMKIdRaS6m9NiLKMczzPM6Hw+GbdSvL7WJmzAiEZnovQy3bIqiCFPdWFN0WeCAZJ+/UgowIvGuJMv3w9P7wr/4PRBQpFEchUOgk/PR4eHyaD1PibXu77K7jafx1KyTc3h0ffzmnunuKMVBIQQaZchi7tcdv3iFADvk4n4Y8hhBcobUmSIgMdyEUtKq9FDCDLCwYuxZVRYfeCrirNtSeOYyHMedcay2t1t7U7e1yzjkPQ8op3QNBRBaRbduwNUJnvqPCupqVuoOjMGuvvffbenP31Ia7CalNa3O1moeo1s01plFZmkUnPS/L82nOafiHf/wP19vrPL4fhyhC4BEwnS+v23ZlalEoyKmZMgckQXPt7go/I/BjSimdL0vrQAwhxTTkcTiOKQ5xyINIgHVdg2QD6r1fb1/39TKmlPP49nY5jBOAu7aH00kClK3GNJay9VLTNKkbmbdW91qt1jjkYZy5sbr1fQshxjTEYYY42vGEzrTl1oUHF6kckMIMMUMgw0TIDhZippSPD08AcM1vEjmn8Y60IAzMEmMUEWBi8UFCongAMiETwsBD41RbjDHH4b+eYGIGEUkSOAYiaq25OwDcI/gQMIQUQsg5hxBKKdfrFZC113sefMe+030fGyIRioi3XgmN8HA6DMfjsXvbH9d1ZUF3u5u5Xpu2LthBe0tZGA3Jd9+t2Gl4GvLzw+H5af7+ND4+jPNdph0kCRGA7V3VIceQssjA3drTCN8//eLt+Yd1+6s4xEA8SFZA7hgCd9+GYTpMRwBwkMQkZswYkoSQwKW1VrWqNWu91o4oCrZib60JA3l3ImRw6LVrbatZJ6IhD5Jk39fr9brsi4FL4GOe71ie+5DhnS1lZtu2qSqLCLOqruu6lwJM8zC6u2BwRgoCiBikuXnvjo6C6t5MhWOIA5i3ps5h3RdC22v9+PFT12U+HrZrTZ3ccZ6GUspPP/0UEwNxM22mgBxiAAD17uYA4L2Bee8WYxymqV5vYOCueZgen57nw9S1Lsvdd0uMqdzWZb+2pozycDxZNxGmIFrL48NTCrKs5yhDzLOq1m6PMSCA1m6miKja1msFtXE+EJAieCbS4q3dSZKI2YZjDqNbdUdnAQ4GiBAQmnvHsq3ns2kZs6Tnd6dpvn9rrXUJnHJAYDC3rnXdqrdbpBxi37ojz09THoYYs1EgBySy3kspROTA61Y0erCfIQD3cdA7MM/cY4wp5+PpFEO4g5VUVb3fc+JlWa7X6+vrq5mJSM7pdDrFwzSOY8ihu/VtJ6IgcphHdSvlDqHpIclDeJQA1r2Cg0xBskOAx9P3vzj++vn03cPpaR4Pc5pQJISQUoo5pRA5iXdkRSIzMHcovfVeetifhNze033LBnmzXTzkHCVTpBg4Eom6oVvXqqqGwDGIiChll/u+SoXNHYRkgBBYVNW7c5REd1ha27ata53nWUKqtVprvTUzA8IQwmGcmPl++q/XK4uklO5tlNvttpRd9d6J9JBiDtkJjdBZ1BURkURCIhJwZca7LhcACAWAat1ZvLXarYfg1/Wm3nIet7U0KzEeA8fa9rLchiGP4xgiC4dtr+M45pz3fW+tEjGi36PSXmopW4hy/17TECXFZn69rUTdEUtpzGwK3ru7O8BhnpmCmQHA2/n8fDoG5tY6o+SYt3Wv3VIKpdQcE6cIvTNhCPFuI+8JIrKgDFmiJ4Ixa2ltPYd8cI4YBoBEAO5AaHD/7wHfPn/58NNPy7rWVureHEqpuzsw0ziO0zTdQ3kzU2tYHCFuxX789JPX/iv7dX6XICqadTN3b6YYBMxbrcyMRIC4bdvlclnX1czuBdDD08P8eBrm6R4mEVFkcWJKQ2ttGIZ5nu+H8x4O5ZyHYRjmnxPlZVlur5ckAYVFpFu/J+uSIgUREZmOKWNQ6pJxHPPzwzffH3/77eGHbx6exmkCBmA01OZuW1NvKTwBgHkDckysBVophB3dU8g5Juu9bLt7U7UkEdEl4pxHcjDrjJ6TlGZOZO6AqGYB75s9PaWBg4zT5Nb3fe8CNIKqtqp7q621Uja15mZMwRVulwXIg8g0Tc5UWh3HERHvdkJCMP/ZOZq7A/Ter9erqo7zdO+oC7G67bWodyLKw0AkQcYYE9HPG5/umdO9zQ4AMcZWOcbo1m/LBzQ3TWZ2PD5Mh2PvvSzLYRopZ1UFcyJCIO3VId59kaohBgrRtjWItNa2UoU4yDgfH2Mar7e1NJ/nGchfPn8BoKenhynlXoxFa+vb5ofD8e3tNScZUj6/vrHE58dj2+tW95CAQ+putdZ5Pq6+qdr9yTDz3ZBpb0ce4mH2p3cQjpJG62C6kXfz7AgCho6A4OCICB5EwvXt/KfPH1NKgeTTx4/X88t4mE+nRzNY1/U+zp9SGlMmAgamEH713S9L03E8IgdVdfuZCRCIJEittbu7+zAM4zjGGO8v9rZt7t7dcs7H45GDvF7Oddu1d3fPOQeUu1Fj5ufn5+fn5/sbYgpqLZEAeFv3223Zeo0sd/Cjuw0pE+Kch/vt8m/+zX+r1Q3AuZjVh/z+2+dfzfNRhEGsE5h1uROh3K3obUG+WW0NkuQ2E4i5jSlljobm2upevBftlljUAAG09X2vDI4ELIRukQMRCBkAiBCimxmim2kKmZnd3QwILcYIALXtcIVtW15fX2svp9NpmqbWWt1biAwACt5aE0YH3Xp1VVYhZhEhortjJaJxmn49TYoaQkhxYOZSynK91d7yMB2Px+PhMOQJkRH/t7sQwBHpji8nYFNww969rtu2bTlEZBaRw/w05MnKyszDMLRSHbS1EqOkYXSFXhURQ4jbWhS7iDAFRooxXtaCAEMeYphYJgBwVEBZlsvL61dtZmb4+DAOoTZar+fjMC23cwj27uGBwHtvvXfBB0rptp3ruqXIyLFzaNqnabpc3mptIQgi1n0jgCApJMHHRxueencIzA/vvRbVS/ANBJQFCBkQAADQEYb56Ye/+Kvp6VHb/vblM6AZyOvr6735lVLKOQNASjHGSErDGPM4sIwpSKnn5fNLN5zykFLqqkjOyvu+q2rwVMumvZZSgsgP33/PzK33Uso4juiwLevXr1/X642ZY4x4nxl2v8vgAODeLAMAB7Xed29EUGu33sQBQdEdeq+lLOe3+2G4R1ny3/6L/447N7OPbx+uy9scxyEkJlPRogUNzawphhQBbd2X3is6uHu0IVjJA4ZJpukYYyxlaVWZEpDXxgQIbmDo7tfbjRBzjhhC3TVQAAZ3c/dScNvKfR0ii4BTV1XVvZR1Xd1dAsUY0jDOalupsG15msfDoe0lBQOwtZU77VOAtr2KCJLsrW61iEhKiZmBKY2DpEjws8wY0Hr32+12N11Dmsd8GIfT3VL2Wksp1vvdaqq2u4KFCFGb1t6qNtMhnxJLTtM4DsfjwbT1Zt379Xp9OJ62zRU6IhKRSCi13p/7MAx763vbkHycwrzN161qh8Ph4MRvb5emVUQGd5Ewj8fb7WbmvVeLUNYyxNHMtu0yDTEKg+vp9PDy8qVqJ2Qk76VYbZ6k6M47Pzw8mM2IyCz3KALACVr3HqcnGL4FxW6KvHBg6qRvZwpXPj05j3f8BDgDAk6nH/76+Av0fn75u3//b0/lgDO//NS/fPrUWnv37pkhbNtW3oq7PuTjkz9d1mXdF2E2s+v1HGP87a/+ZjxNt22t2kII5JgkTNE+fFjWdQOAw+EgQiGMhI5g67L01pCIAccp3wEZ276UHe8J3r0NDOZl2xFBYiSAvW69NQBAogiUQmIiALDeLm/ndV2BcLhLrb779ld979u+bLW06szQbLdi0pE5oAMiOmHblNDd3UvpgGY29ZoQLYsb1H0Db9AbIqaUYoyqQ+8dFDqqK8CyaHfhjIgkAQkcoRbV3pm51Oru83gIIRTd7xCEfa91b9u2EcPp8RiDDCke58M9XGTUNA3etda+9K1pFaIkwQmaVjC/t59qrXcByc8sTtV930opYM4h7LXvtdy9thCnkMZhzinXthu4mQPgvR2zLLfWKjOtt8u+XRmcg0RPSeR0mMHA3dZlQcSAdl9PrWq1tiFnAGkVMFopJQ1ZtasZgAOauzoUDjROMxhO07g2//Tl815u29pzzv/8b/7mm2++6c3WdRFqtfgcaZrGWqsp5JBr0ZzCOA61rNu6S0wp52kQ2xUDVO37tpQckcDU8jiSMAvHIaeQRszaKuAN+BhwgL7orV8/v9a2BPa0run5EYcHAEYABEMkQHUQOb3/9d/+67fbdj7/0xiGX3zzAxENQ4ox9vFwu11LKRZpb8Waln3fEUnYHZdl+fz15T3DttzWdb2b851546Bgd4XPuq632+1eDOUgYC4id1UIupdtiyKltX1dt1IAYJ5n93BPbdUa7QKEZV/P1ze3nyWftMA4zCklBRum8S4vvdeOpNfFwAA0h/h8enTvCtW8t0altLtzERGzn8vJpFq1Q1cbhiBpiAmaNm7WooQUUrrbGLNYS1c3cf2ZeW+WUwIndzT1VjZXCyljFHJQVQjsCOu69t6JwKFxhGM6MLMIrcv1/iMWZ7e27h323vuupq0zOJIreCtdWxckF3I1EWmt3TWD9+b53lavHR37tjUHbSZCgOZoElIIgRiggTuo9t47EPTavKtIUFcQdiD3XmsjkGGcat17be5OIOOUxpxrXZv127qAYZIMjKXeBKNpabuKRFUls8zBUHdkBAXUYZolstdWSllK7cUur58fjsd3796XVvdtQa3hMfEUwKjXfu/5I2JX9K3mPNaiijsTjeORkrvDGGZHa92YGd1UPR9y7725f/P4vj8+0eGgDWz9kmICnlHb7376fevLLw9P0BwZhjgZBUQAJ8eOIAgGQKd3v/nV9x/Wl8vKhQgAIIowM4AdhlFVb3vZthszz8dHpiABWquX69tWVtWHh8OJgVqv5r3stUpPIYqEbdve3t4ul9sdFnY6HXrv1+sC9JOZ7fvmrofDQUR6tZTS4XDovX/9+nXbF3eVGIQCorempTYiarVuy66tbPM6TYck4W4KSbW11kxl33cAQIKHhwPzg0Lvvar6uq73ROSuGr2Tye6tBEbqvevtdpoPQjwMAwcJLCFFjgER75psBwVwQAREEQmIDHjdbuuy30ni8/Ewp9CWVWsbhiGEIEj35B0RYQUEzTkjOpK3qq1305+vrGUvpZRSkCmEkEOs5kAoziIYIhMgRkQS894cDKHWvpW1tyJIROjIQUjI3B2JDofD4TilHO5p7n9dWqEhRTPr7oTYuxLwmIey3DrYlGTbL4SIADmkw5DmKZ+vX5dl++bxW9Btq5cHOfbW2rZiTvM4bqUAYcq5lMqmElKyLlIINnc3RfCAIGwcx9HdP335omoiIY8DoFp3SUl7k0SIGNMhp3y5vqUAgN1Ybbc85KZ9HIdeO3FgIqLQexOhprVu9Xg8RuHqLR6/NfkmQGr9y/WnjzKtnYwwbKuepXhOoWhfdzlkd0REdHQEdAIEAHv67hfHP/w+W9v3vfUiSQLhVpsDDOOIMRFzTiHGaI7ujmVNZQjjUFrV+0JZpBjiNIYY872Mc7m8tab3+k8IIcYYY17X/evXr9freV3XEKRs9R5lHA5zzkmtff7y8cOHD7W14/H4q1/+8jiNSdJxmmOMwLSuq3tS1XW97SRDTPN8TGlIw2RmAoSMFKOkNIjI/W1uTS+3a2vtTvS+TxJs2wLgIeQc0050O18+f30Zx1FiCPdMSYM3A7x3p52ZBVHNwF0BWmvXfS+lqGmt+7qtHEhB18s1SQCwrW5jTDEld9y2bV33OyCWCLrWvfb7Q7u/Xa0VZMIooEoAMeeErKrMGAOHIOiOQFU7gKRg5kgk45jrtiNiHGOQVEq5XC6llBDSfBhzTogownciuZnfZ5FcLWe9j+T23sk9xijuWlZJwIBR0rdP76LQut6W29VNBGNt5+PpAITLcu5tSfTe3T3GZd+ZQmDpiijONSYJOcq27Yh13z3HFCWoA5AT/py+EyFAH/LEBq31eZ6IiFNetttWbnk49G1FxBxD2ZacH10dmfa+jGFkzgSo2ogQTLWXFDNxUKioBZDD83eKQ1le+rq9ff26llWnSZDSEJnxLihwAARGb45813fn6WmtbVtec857LV+Xi4gQ8TQdnt99gw6X61trRVVrb0wy5qmVXrZtcRjyRCQphofTUwwB4Wfx8/F4/BkdiQgA7ioip9Pp/fv3y7K0dhex0TzP83FGRED0jjHkPEzX2+evX98O8xxjfH54PJ1OMcZSyuFwqHV/fX39+vWNiFpSdTydHqYp0/+/qPfosS3L0sO2d8deFz6ey8yq7OousruglghoJHDCgQBJ1IASQOj36B9IP0AjzTXhWKCaZFNsVBcrsyrtM/FeRFx3/Nl+a3Bele4oJhGBe+8+a629PocQkVwso9ifsDS0nOWiKDDGWuvz+bz8V8ZYCEFgKhn3MSSGD81ZG/Pw9FgWhRBiWWNBCFNKEH6OslkeBuf9OI4xxkwpIQSE0DnXtW1smuWeOg5DPwycsbKqQghN08zzjDG2VsfkjdFG+xQjBtAyCuHy3iFCKEWfIAAxLobCAENMMYCRMoExifMcgk8pCq5W5QrC5GIAAECYnI8dhDGCokLr9brIKwBACCHGoLW21lBKsyyjmEwpEYIjSIRgnZLWmiDgrAcQSkZRSHVdAhxn72KCIQTOVQAGYCBYNU/eOo0AcNYSSgEA1lqYfKbyBUANPhKES6WsGeZxmExMKMQUrTUQBO/jOA7e+xj9zS6nFHutpRAUU0Rw1zcpWoRQDHicjLPm/ua2t3YeR5SQBw4Br6MD0NX5BhDqjKvKkjA8+bhmBWA0JWemTy7SbH3LVgSeOgD++PaHP4oY6rIANEGOQUIAAggAgCAaHSnAMAcR8EzdvHn59psxJDhbs98/x+RX9UZlRUqJM17XZYxRa30+t9776Lw3emw7AuusYllZcC7/PGAvdS3LMiklQkhr/XlIiREhtFpV63Xtve/7LiVwdXW1Xm+dc8PQTdOEEMaUMsabpvn46Xl/OGy326urq6qqCCFCKC7yskyMKmPMcj07n8/jOFJK8b/+V//94kBvFx+m5TJlDEJoGUWW5BIA4pI2zCkDAPgYGOeMUARhjHGZKbz/rHVYLAy01sYYp00EKcWojXHOBe+Xi2mI0Wnjg0eMAgSddYs2YhxHa60PFiKAMPQueBcggowSzgXjXAixcLtxgpQQhLEQgmDCKKGMEkpiAnoy1vsYgfXWOw8AJBB775wzgolM8hSTNtpZFyCoytWq3mKMUgII4a7r+raBAFDKlJLWOmud90EbgxBOi4sbgJRyxomitFY5hXCc+hQdih4QWJUb7yZKUgLwsD8SmBhBEOCEIEQwBO+c41ImTCZtXLTWOO9cirAd527s+qlv+2GeBmuNMa7t2qHvnJk3q7ws5KpSKucgBTsMBIOpHwqZgZiGoacI5plCmIzzzCVnjIMACYDB+9navMyLLDM2VNW63t2n4gIqDiCnUEJrm6f3uj9zjBGCv/vH32o7397fCs4RYkioBCBIKYKUzBz0SIRMEAMIpKD68xFESkmQ4DTNep6MnvTcB+czmZd16b1rT80wdJggkMA4DIxSyYUSAqTkgvPRp+i1nkPwjNGU4pKgAyFeBAB5XmaZWti1hGApBeeSMUYIFUKqLM+yvK5X9XoDEZ7N3PbtOE0+OgAJCEgJtV5vVqs1QjSlSCldun0Iniz2G84ZY2eEMSHMubCIFRBIxpjo3cKlXhoFZSylxBEAMXGZYwAhhD5FCKFzRmuNEKnrEgAwDAMAgFJMvKGUOmv7vvfeE0qllIiSoiiMd3Nwbd9lTCxS16HvYwhS8sU3HEK8dENCPrO9Q4ohBAQAAgBj/HnkipFyRilNIRrjCKIARAghSMRFyBhDGHZD571fvi09zwBCyEiMEGMKInAuUIqMMfM4zXpkVCjFlncUQpjnuR96jLHg/OriEqYgc5kLti4EieDcdGK1A95BrzfC29lGhI3W1p4JcSkRRktI2Ww0YwxDGKwzxmAuGSPaBkJRmoCLwTnjnIsBhBBCdCBC5wIAERNYZCpTilKKuYwgTeOppHwaGuCjUmQYBgyByCShlGBgrbFuZoyxbEtRyAQ30bfDuaxfKlHNk6vqgKpNhAVIIDBF2VaM3x+eP3hp19vNF1/86qd3f+z7rl5tXDxVQiJWBRAwRIFnqO0Ca5DcAICKYgMIPTXnGKNgclVfxmStnZ3Xk/HjrGcTykrN8wQJXK1WXLBPT8/N0COCGKPeuxjj7LV1DqS0gPfe+xgDpawsSymzheIWY5xno7VZkOxhGPb7w8KfK+uqKLPtdpsQtNa+fvHy6fh4PO5DCKt1tanWCBAAACFswYmNmRfU4vO5WrhHnwVpICGEMI4xguB87/ulKlPOlhGIMSaUJIRMw2jHGWOcrDfW6uAgTH9S7rhlXTOOI4SQc7lwxJc7K8Z02U5yggEAGECBKZWiyPLFPUEu9oAJAACkzMqitk637ZlQmWUZpTikCGKilCwxchhjPc7DrINPjEFMiPdRCJHnihCMEJ5GO00TxGAtuDW+77pp6ud5RAhRzjBXLvhJzwCkLMuMsc65vu+D7y4u0J96cWi6xlsnpSQIbDZbySmiKJdKCkoIu/oyz/McpPT0+HD68I9Qu2EyhKuyFGM/AIi5Wtk4W+ud8UqpRcfkAeKEZlnhwwCI9WFGGDDGMMVlXiTgjPMgLg6KblMVnFNBGUxgnmyp6jD3wdl6te66bhhHgpNNWAOKAeSyphilyIz1BiVIU6HKCFXbT7fFRVELHwwODpICAW/sCUOMUQSYdEMrlPz1P/2nNsxD11jvCMJuHjkrEEQAIISpA3D+8FjcSShzgJiQpQ0+ej32A8b05vaiqgoAUtM00zTs9/sYw2az4VyEGGKMSvDtdsOlJIwaa51zhJOMYms9xphgaowex3HZvixMh77vFyp/jB5jzBhfLLUXXnRRFF3TGrdfHhVvraTkxc2t1ppzzikLIabk53lwzo/jiBDCeGFfY845CclzIWHiCwS9gFwAROssBJ8JqM65xZ2CMSa5cMHP8zzPE8OEIYwpkZxCABYGzqJzwxhLKUKIKSUIyZL0lmXZAhZGkKLzISRCIMeEc55xASAM0ReFkpIjhCjleZ7nWbk/PC2DoBBCMg4JhBAstTykSBCgglWMTv3UnFrCsHPOGTvPI+cMY5Ii7ftRm6GuS4IFJgRSkvEyuiV9Ohozp5AwxZRSrWdrbdu2erZZVhKCtZ76qTN2EowLhhBIKbiyqCHA3qTOaZv6KngpRF1s1BdFWa3Gw6d6miGN54ePMEGm0qF5gpRiH0GCzoaUIMEYADgHDyFOCANCI0II48269AESnAghs7M+GK9nDHiRiyLLizz3fYcISgEAiPJqM8wBRuAjJQQyVAi+M9ErIQlmjCMXhtibfhggIOt1LZk0g1VlLqq1RxDAHgORUpjak8Rse3H1dHqe53nq+5SCC+n923eXt3crSgCIEJCYAALQcDB8f8zKHRISQCxFmWUZhpRTOY7zNE1VVUmVH/b9h4e3KTmCVV1vYozn81kJUciMYjSPfcMYQUhrjQPOCsU5R5Awxsuy2m63wzCE4Pu+59xaazCGUqpFHtD3g/c+y4qyrJVSzrn949Onx4+L3ohJ8frF/eXughDWdc25a/thoAwzwmOMIKE/cSKA9x4hQhhVkvOUgvWfn4Foo/cxhARhIIxmRb6E9kCMEgTzOHVD3zYnTigBmDOSZyXGeLAaI4AxijFRylGKzltrdQSIQgwSghgs9COECMTAuxi8TSlhABNMs5mjDzF6JqSgnHDGGAMgnJtna3We51IIjCHAn+Et5xylFMe4EMcjx72e+n2TUmKCW2vPbTOOo/GOcRlCmuex6stVWSUEF2abyFVKKYTkrIYgIrSUf2+9I5DgpOexsTHpoQ0hRZBoLhDjCCWfYtO2DOOIQF6uFM2jB31vm/5DVpBcZfz21bWARs+M58350B6fu7EjYEAY5Kqw3iEErDeIgeSB8wl4nHwKKSKSxYRDHCFRyHlvLMCYqZwCmGWScdzr1rgZTgHlWYSIICZLWZWbFFE7t0RmF3cvmFKYC0xIsAYO2heTKGtGeZg6KDFVNLgYCCYAhkgiBBxJkMXxcM6Lzf2ONe2h1Wejpx7Bh3cfs1wS9jUAJKWEIEzRM5eK7TrigCEGIMY4vbi8TCnEiKLbj6YbBpCp4s3L62CHh4d3CIZJD1V9t15jAEJiJKVAIArOYMaUUiEBijKU4mhn5xyndV5WWVZO0xSjX4ovpVQphRBqmvM4NcPQFfl2t9tBiIZ+djERxiKMhDPGyPPxQIV8/fq1KpR2tu/GeXATdARhznnf90opKRUhLKZEuKDL7JVl2RJFuHQWLpkzflmBSS5CCM77GEHwQU9znpVVXgTvg7MhecH5rtpxRhYQYJomq2eEkJSZFCqTOQCAS+GTX5g5PoYUgLPBB01QCiHp2RCGEOILuG2D7/seY7SQecqyRIiM43g6naSUIYRp+qwNNcbYo11o4mnJ33WYEEIICcGbacYJMykiZVrrnpA8zxeu0cJsSynFmKbJBp+WXJalLQxjk4Azzs7Or/OSIHQ87evV5mpdM0y0mZHMynKd5aWUUnLBMGv6pu/GwTdDd4zBrKriZntxud7OL984Hd+++yGMDQgaUaIEG/vGzTEADBFJMAYIMaLGaG2GepUrIYO3JRcRAs45CongaJ2PAKSEZJ5BKZOP9eXN5eU9hKQuNofzk1Tq+sV9lmWE0JjSPM8+11QqoThNEBvTjGcMgKQcJJEwQkjDlIdAXDNG67778Q9SleucJ+8Aoce+l5IqIaINkEQAEADg9O57f37cvbiFZQESiDAIBnfrDSFomCdAU2VLijGlgRD+9ddfF2X57R++Oe6PgmerumSEhwQiCZ9rrrWcQ208xnS7rjCj4ziOgw4JMUYgTBFEQUWe55QS59zzfn86nfp+SAkKoYqiWk4shGm9XnNBjTGn8/l5f7QurFarsswXWMm5EYRAM75QW2OMxhkELIieSCmtNiklyujC5rPWQggJw4xQ6x1G2Fs39UOMsdxuaYYzJRYe8mjtZLQNLqJwXUglhY8pJdi2rffuxd1dCKEu66pcjePIBMdsgVGT0ZMxRkoMYQZi8N4SglKCBFPrDGNMW3c+n7NMFUURY7TWLQ+G1to5xxhrmmbh8AxmHoYB+kgQopwb5+IwUM5D8CCmqiiyLIeIMMbGeWr7DmO8BM4tuqSFQ+ucC6HDBGLEnXP9OB/2zfHQSikjjQNAECaqKko5kyoTTKoqLzdFWVPKMcacEmt9Xa+bLrXtyWvTNof+ePQXw/b6sq533mFRqfPzwc4NJhADRvlm6JquPTkAPUwhImujtZYxjggzekIIWOcgCpxCxiWngFBKCMmqVbWqI4ibzebuxQtM+DiOCcLd9jrPFSY8RSRFZlOIEVAhFWcIQlyUmMv80BIYADIxGezmhDIAIURumob19W0P4tvffQN/8cJaP8+zHqfrV6+FUtaMQq0ARHbuTvsPcTJk0JuNSgkgQK8vrz45bXSPcZCcI1VknGlnnDHVqsiKryCkP7376fn5wzSpqtzGGDlnlFJGxGKIYq03xoDosqJgjJl5nsyUYAApWW8Ywev1dolI6/t+no01oapWQogYfQgBwqSUEpJyTodhmrWmlA/D8G/+zb+5urp68+YN5/xwOCIEsXUAAUqQNkPQkRDkrSOMEQCi1QtfGEEICSHaGmstI1RK6X3UZtJmyqQCwQcQMMYgBQhiWeZK8Wkemra180Qpp4JPo+667urquq5WwXshOJes7Zt235SrWuVZroqyKE7753HqIMQgYcJkSUiMIKXEOPUx4hA4Z8sqSWs9zzMhtK5rKWXbtm3bLpeKEIJPkTCqMo4hmo023iUfdPBOm2kYAABt2zIhLi8vMclOR6e1Xha1RVEsm/iUgDFG6ymExKjo+3EYBm1d33ZFUWGByIqXZV7k6vryoqzy5IKSlcoyxkhV1SklM09lmWvr8qoWJOiGEwTN2DndHh4aWbQQ5RZa5yLiO6FICEYgElGcw9g186RHY0zw0Vr9dDzkQ353dQ0A6vtGZcxbmyhDiMtMMiZysUKMKsWLqpYyN9oF79uhizGOs7hTL0peaK0hBiC4GANkggmRqIxIqIs8wWT1AScHmIRQQoB9ikxlU99hABOkv//D908PH+ahjy7G6G0MThuhW4CJOZ9TQIkJbb01mjKZdI9BMtZ++PSMExRZhjPqYnDBBxCyskAJvf4SUkHfvv1x6CfBNQDQe5dCKLJlGRDGcTTOd117fXvz6tUrEFPTHpwLKYF5tu1op2lKCRpj2rY1xuVZWZbF8fj87t1PKYGqqsqyBCAdDoflYEgp5yl9+PDhdDoN42itnWdTlmUKIQWPGRBCUMIpFpBR/L/863+ppIQIhRBccHrW8zxbp1MAGGIIkTWaQEQpwZ99wXmKcRx6yuh2u6mqinPBKEsAzdYem1PXnRgT69Xau2CNxpSootjudkKKEMI4jSkFQYkx9nj6pM1IuRBCIIxDjLPWC1ywVHfO+TzPznnn/CIzt9Za+/+HhFJK86IoiyKTijOeKAYQKqHKqipUhhDS1kAEi6IoioIzJjiLCWitF2mItdYY0w/D+XwOLnnn+7E/ng6H4/5wPBzOx3GenYsRQCnV7e2L+/s7LgjChBAOUYAIIAgQRCoTRV1KKUOMnFIpKUiRoECAGZpzfzozwoObIAxC5C4CSDBm1KdovdeTG3Q/DLO3YRrHj09PWZZdXVxaF5wznDMEIYSIC1nXmwQRADHLFCVEShlCHLsphoAwVrmKKYUUs6KYjNbOcalUmVORQ8khoTCBhAgAjGAFSQlQAQGKMRKaM0zmw6dPD49YZc/Pp6eHjykmjKhUuVR5JgSK4bR/ev/u7aCnBJwe+v2nt8gZOx7a06lpe0xEkRceeAAjRfj5ee+dl1IlGPM8V0qmhLwPC9YrpciUUpmkhC07cSElItjFcHV5+eb1myyTMYZl+0mISCl1Xde27TzPUqrVqp7m/t27nx8+PhBM7u5ebjZbrfXxeDTGUooIoQjBPC9ijE9PT9OkCSEpWmuMcwYkEBwIHoKIhciIcw7E9GcobqEAAZAoISiBrmkhSquyAlF67+syz/PSWlsUxZJjLoSs69X1xU2WZe04/PT+p8PTE+fi3A+fDs+//OrV9uJi8Wa5ubn58PDu8enDYf9JMMKoiIFYq7mcrUNa6+DTNM1/NglbEDsA4LKyTSn2fbeMZ4tIYHlBHwkhIQbG2CaTWhpOKKNCaw0QhBgJIZTKjDHRByHEss5a1C0LSk0QVSoXjKeUjLNGT0137oZ+ms00u3meJ62VyhhXEOJp6FJKhGEwoXkc+7bbbi9UduGtbbuhbdvdqsScRR9wtM3z4cP7B1Vs797Us+0ZJVxZChTEufeeIM+xpXQAMYEUjNEAo+vra6XUfr/vxy7GKPguq9ZCiLKsAcQEI8Y55xIlwJjAhPlkvHeyVJLxxJKUKsToQ0gJ8iznRQYQTwAAyCGAACQIXAIeAgwiThAiBEACow8PHx9//vGHD49P524euzNnKC+K4/nkv/12GjpZ5NM8DF2XUrrY7DCX89R+evc9BPr9/pSx7P7+PsFweu5Lurm/e0UYP52Op/MBgMgYQ5C9eHHHGDmdTkIIITjFOMuyoesZ5S/uX5aran88/PY//+6Hn38WTGg9D4MGAYSQ5kkPYzcM3fKnQghd1y1XOCl5URSLCcp2u0MInc/nae6aplk2/atVVRSF4IpzEWNYOEiIMp+AEFLk2W63IxiieZycX8wPE0iBIYgxwxhFH6K30zTRAHYX2+riwlr7/PzMOb+6unLOfffDDyHE6+vrdVWDFFfbzXq9bs7HWduP+6fJDC4GSilFaR6Hfmg+fHjX971kPIQQoKeUTtP09u3bLMu22y1CRAjRNGcI4WazWQBjpTIAgLVmIVkopRbV3LJRnabJGTPHCCGMUiqa51J575+enqZp8sEyxrIsQwgv3NoEgTc2QvBnF9UYAaW0ltU4T4zQPM+llNvtxTjMbdsunzVjAsLYNvtpWlkzMyaChy4GYycAJiFUWZZLjHtKybkACaUyqzab/ePP3/7w7r/6r39J1dpnqqCZjTNhHFEVjI0uG8bZRoAQWYYxAFAmMgKR1noa9eJdSQnHiCPIMCQ31zdc0Oj8MEyH/Wmz20aQICYMC4QRhJBgSCjaZpvkA0fETxoKAAjDgACAQEoJxJgSRHGJBgMxARS5Kh7a4f/5+79r2pZnJSHeTSBAP5lmGovn4ycs0c16t15v87Ku1yuCyHq9HvT822/+w/fff/83X//aeu29X5WbqlwLwW7urs7N4fnpOM0DhGG7vdhtbzabDUIQQtQ0TXAOAVgU5V/86ldVVXVjL7n48suvunPz7bffci6VEkIxRFmmEmMcIzqMXdd1WmulVJ4rjCklHEI0DIP3HsJkrZ1nbXQQPBdCABhTSlJkkgkps7yuACbTNMx6RAmUqxolgCEi0YdlFzlpHWNMPgTnQUzWWgRgJpXkgmMiuSSYdlPnvZdSLj4tUkqZScYoZaTpuqDnEALFAHJ0s92ussL18/7pYbPapgS7rplnI6jgXGRZBiIEThOCpMwgxNbElEyIRmViMVRbnDBCtEIIqbg1flGdhhCKoqzr2jm7pOp6a12IxjnftIhgbb33nnEiMSuyPMaorcWMYkTbvpu6cbGnhRCDEF0Mfd8DACAGCCQAWJlnWSZtaa6v1yklo522JqWgGHXGRgABpNCDiCAieJ5niNBC/aiLcpoGhBCmSHJpkQJQff1X/+z+q18BICSXNOMZu7DWhmAtBIEQyGmIyIeEEDROa22VzIO3WltKUJ6XhKBZd4uIpaqLlKDTjhKe4ry8ECU4ApqSc44Scj6cCeX8IoMhDl3jncmqGmVV4h4kDCGOACEoFkZnAAgDlADAhPzmn/03bdv942//Lsuyal0DF4ZhcH4WimsbKlJsdtdXV1cARuc8ZFBxsaLldnPLaH55fesxBhiXmRqn5pvvTpQJClmeF2VZ5Xnuvev7Tqns4uK6aU6CMV4UZtb39/e77fY///73H58eb6+uV2VJATTGYExijMMwYYzKKtvttl3Xf/z4mCIS3FJKjbZKZXd3d3VdL35YjBGlcoJFrli5qpepaRmbIYRCKEIpSCkSDCnP87wuau/jME1k7NuFvhadn61JPiAICUSUcs75xcV2tVotMYaHw8EEt95uOaXDMFBK7+7uLi4uy7Lsum7xXNDa7Pf7GGNd10WhJgzGXlv9adE4C0ypoNbaaRh9TN5bgFHOynPbaPMMAVi+aWvtMrcxtnhZu2nUznlCKBO8KEop/TzPhBDKOYaIUkacSwlqraMNpcrU7iKAlIIzRkefIMGLNxuI6eJyjTEGCUkmQVVYa5e6IqVcfi7L0ns/DIMQQggJEmKMIQyM/0z0CMhqBDml3gbBuFTch4ASADFRygmFEAFOM4fM+u6FWF0ASOUqTyilCDnLKJHjPPmhsdZDEFVWhqenczM23RBjGlNIPvgUEVoU4jjGkGVlnuecSQAixMQnD2C0szZmppALJYQkMUGACCowSClaF7T1zkgoQGIYwpRcAsmHESaEIAfQQYAwpAvBOQK4vtj9d//dv/ri7vYffvf3AKRqt95stiklQjDnYru9kJJzroyZj/vnLMtjHiklF1V9f3WjlOr7PqXkXdTatm07DF1d7f4szl6IN2VZSs45Tp9iOJyONzc3EeGHjx8JhZvNGpA0Dh0AcGnsXddhjDEWXTsac/IxyEzc37/UWhNCCEF5nl9eXmZZZoxBiCyeA4t7SgihbduFzbmstlNK09gv3zLjMkRgY6CMlDQnbpwJIYxzmuUQQp10URRZllHMpOQXFxd5WTw9PQ3TGBBQVSmE6NsuL8v7+3vnXNs2CMHDYX8+n8uyjDFcXOyUyvq+b5oWgDSamVJaFMWyWULWLFhBCAEhgDHt+iYGxzlHCDAmKKXjMENAl9h0re0CXwuZjePgrSGY2uCfD/tMKkEZYSQl0Pc9QkipZbM2EIb7cbZ64pwhhMau74Zps9msNzUntGkaCCEXFGAEES1AlhcKQdL3/eKx8edrRtd1C1l12SQYY2I0GGNKozEGgrRoOFb1pm2atm13uw0AwLuAIKYsv7x5/f79e8wFJDjhBCIctEEI+GRjskulJ1j4APbHZpoMACC6ycWAMKYEQUistc/PU13thFB/5tgSQo7H4/F83lxeEIiCi9o4kQmM8cV6l3Ext/1kp+3mAjGZOIuMgkAQhAgg70IIPZIUIQEBWGjOCMB5nN/9+EfE4XZ3qc00aa1kfnV1JTgFABAsGMPJhyovIATWOhjT2PcAIZRA9A7EMI39OI4JQQgho58F+E6bZ/1U13WWZfM4mnmepllSThI6Pe+bppG5LJWEKFirhRAxgqWrr9frEMIy3xtjQghKZYSwGCNjTEq+HPQ/Cb7TYjW32EEvLotLrM7CiCaECCGWi+s0TVpPQ99+tpvGFMcQz+ezKkoAAMVECVnmxUKDOR6Px+PxcDhEkCjG29UagGStzbKMMTYMw7fffnt1cbHdbkdCjvu9c+bu5Yv1emWtmSacUkKURAhGPUcIRKa890tvwgDKPEMI6ZOllKYID6fjdnNxsbvibFr0KMbOAEQpF/W62+/3Dx+PlPCyLAXjbdv6IpcoaW36vu+6brvdMsaarvUxLJEK3kVCSNudtZ4JAjCuNQZL1MKpbSjheaGWDwVBsnziS41RSi1SoaUbhBDW63U/DnleLncJlGACQGsdfcAIEUI+Pj2mlC4vd8sH7RJgUnGVVav1oTnvT8c3L18RAubZxhBghNbacdLtNNjgrbUhuBDSZ/cyGJ0HCKHDYb/42TPGlFIppXGesiyz3jHBy7KECYx9n8u86wZrbV3k5OZKj/bt739fVznLKaQCAAExBcE7PVIKAZUBALCo3VNKIECInR4ePvzQj8eccs7KmcwIkU8fP1xdXb1+/ToluES4ntqOMQJAGPU8z3Py4XH6IKXM8xJCHEKYhznLMsmptTbLVFkV57btui7G2LatHue6rqUQf/O3/6Wex3cf3s1Ge21mrcd+8t6v12vOxSL1WsaYruuW944xNmamlPfD0A/DxeW2KkrnXFnWIYTFkM9ajRBBCF1eXsboj8fj6XRaBAZFVSKElntalmUuuH7srbWkWK3mcWrHAVsbosMQYQC9sZRSTiiBqB36eZ6VyqP3epqttd55a93hcJznWTB2Op2WmFtrbTt09scfT6fT0E/ehxACwmlxcuScLxTrBdOGiGRSpZR2O5RSenx8BAlTyoyx1jqAoPe+awdMYF2vhZSKO+9iAkgpVWQFRfjT81PTtX3fa62ttYvk13uPIRr7iQkqpQw0EUKyXC4T84eHd7Nxi1TCWi9VbqwfxxEAQHFaBNeU0hjjQmVdjMpCCFkuIYTWaee4MTNjBQawn+cY/fF4vr6+vb65OR6Pz8/PlxfbGHyCQCp2PreEkCzLQorfffPHY5ZfX19+Tv4KIcaofTiPbT8MDGGGSaAAALA4qXCWOWecN3/5l39RVYVzBgDVNM3Hx0+//OoXUsqiKKZp8tqsViuZFb/9d78Tkn399dcgoWBdwDDGCGOKkGAAPAAYxuTnRDkgGEWSQPq8a4PQOffbf/h3BPvt5kI3TXD+anfVT+PT06eiKBDBBDPnDESICWq1maapWpWU0/3THlFifXQhAAA3u8vFjBZhgDxYJIRXV1dKKWut1iaE6GGqNuu7F68+PTzM9jtjTCnL3famKuYPHz4cDserq6u6ruu6JoTkefb09BxCmGc9zxpjbF2ggmNKECSztvM0lWUJIey6bskfoBQv/E5r9QLsLsJGZ6wxJsXonJsBBADkQpYXl2S1WimRSZkNw6BtTCGeu0Y6yblACE3Hw+Xl5cXFxUI51lqH4I/Htu+7hfuZFcWfHiwghKgJhBAOw6C1ZYynlAAIf2YUhxAWkjNjzHn/8OFdnpVKZRDCu7u7xeFwmiZjDKXUWjcMY0ppnt6WZVnlWSYUWWFIsJRy6WspREzJbrcz2nHOg7dd1ymVY4zLMld5tjyZKaWljR4Oh1PTLVbaXKplEbTciZdiv0Dry2th4BGElVII4nEctbbTqBdWnwtOa+29r6owz3MV49XV1dT24zgAEDAlAPClBfdtU69Wr1++6trWrNfOeD311plJj1rrtmn2+z1CiDHhU0QEwoQowjLPtAbXNxdlWU+TjjGemuPT/pMQijFGMRFSppRkljHGxnF0Znzx+lZKCZx1uttdbWVZA5gDTGKaEOQAJuCSxwExiCBGAIKYAMIJAEqRkuK7n34fAUgBXF9fMykqSv7iV3/FGGvO3cJALvKcUjZHQxlDAEKEV+ttkechBOdM27bTNG42G+/DAuOEEGMMwzAopRDC1to8z0OKXIpvf/uPv/v2G54zKVVW1S9v71fb6p/8k79++/at1lqpvKpWizvi7Y0cx/HcHJcaKpUoiiIr8mjcNE2C82Uzbq1dCPMLURTCtAjBl7lxKY4LyDtN+uPHt0KIXKoff/iJ9N1IMFZchBCEYJTSEHyC4OLiwhgzz/Oihxzarutb7321rtab+ng4d12nlIoxJgi0NSFElatClTEE55ySudbae6eUVEotDJzz+dw0TZ7nWZadTodxSF3feGullBfbzdN+v1hmWz0HpyWXVVU65yHE7el8Pp+zLNPGjONY1zXjBFPy1S9/seiVp2laRGSccwAi52KzWS82G1mWcUqXmQohtFnv9vv94XDYXOx2my0hxNgZQjh10zzPSqlK5YVQy5xGCIkQLM2NS+Wcm/T8tH+GGEmI+rETkiGE5nl8evyIEKKcHE9nJnhI3vtIKWWUHw4HBPHtq2v9k/7x/U+KyvPhqJ3Vxr37+e2P3//Qtu3idCmE+BPoAa2dCSEgoa6dry5kgkDrkXP++tWbPxtCSSkhhBGAaeiKorjYXkYXASOsyuPJQkYiwSAaiAAABkIaGY8pKgAjDDBSgFJKCzwQfv31X/723//f7x8f/uqvfr2Y61OKCQRSCklZil4KwTmP0VtjCCEQAiHkapUpmQMAHj6+x5gqRRa3/rquzTR777fb7TRN799/IBD5FC8uru6ub56Ph/dv33315Zt6t5KEDdpop7//fo8Qub+/x4haa6fRAgBAIlJSrXUMYL3aXl1fSCnPTUMShJw7axcDdIxxlmXTNJ1Op4U8towbxhjn3DLoIYKtd58+PmGMVZZxzl2MEWHy/dsfLjdbmBDGWAhOCAkBj+PIOb+/v99sdj/++OPH9x+stULyoshOp9OsNUCJMLpMz1wKKniatfdecqHtDABggviAFBbr9XqhHy2VeAGuCSEx1kVRaG1BQimBh0+f3r59CwCs1zVBeFOVMQWIESGcAoTB4nkEmrF/enqa53m7Wa02a6Zkcm5xrFimAgwRAKAsy3nWTXO+fXFV5JUzC5YMqqJwzoO0loLNRkdvq1V1PtuUUpHnlJAsk8vuoiwyznmMsdcTjpAQAjESghln26YZho4i6L3fbFerzU4NXTf0kvN+GsZRq6IkBC0bW4RQAPD50GwQCBD8+MPPiko96kN7bprmj9/8/ng+QJRiipBhwgjGCCFkvfMuQBjmea7rDaXYmNk6m2elEOJx/zzOI8s4mhesDwAYPYBaa+MNd7Ys67ltp2mUNUHWA4wAFClFXubQ+jCdEM8AJn9y69A4OgTgzc3dvjnWZbnAroLRBajB6AwAQARhDFJKueJCqdkOzfNBkKxabyileVYyKkJ0C3lz8cl7UxRcCIxx2zSLGMU5t386fPPHP9zeXRdlOTaDqKvorfX+cDhored5XHLvlnuwMXaahyzLrq6unDeLxbfgXM/z4neyNOGlVC2/vuiJEULLKLFMLkVRLNj/zfU1hDDLsqqqFlUGWW3WH58eGcT39/d/3mddXl6GkN6/f3h6fu66jnBKCYQEOeCtXmhqheR0aXYxJsaog8ZbN6Th46cHKeWyPPXeNk2zaM8XGIsQsmwYl8slSMgY632ECO0urgEALMtISv04+hipkpiQ6BMGOKUACX51d7ur109PT8fjMaWkp0koSSnFGH12qJRqIXuaeX71+gWlWM8jpXxXrrthCCGPMd3c3MQIjqfTfr//+PHjer1mjLVNX1T5okoTSgghYkxN00hPq6oCABpnF7M+htG7d+8O58Zb1/cX46Qfn6+qTF1st8659+9/xhjnWRlCQggAGEOMIYRxHAftnSGnpun79g8//vHp8BSMLooqRh+TR5QQgiFIMQVOmRCSMaYUhzg9Hz8ltEspESxOh6MLPiF4OB0lppeXO87pPI8xOO99fzyH5Kv1quDy/HzEDkICAaQIzkQob7A+nzMGvdZkLRFiMXoYRxBN2x1/8Ytf9G46nc5XVzeLroNzLmRmrYcE5kLGCKRUKsu6rrNzmEbbBX/uBynler0mFOnBSCkZE1prxnlZVUzJRep1Pp36vp+1di784utfrtf1MHaUcBsihigk9Nd//ZsY436/r+t6oTB4bxGOw9B1ff/y5cuiuDqdTu/e/axUnqkiBLcc5XmeT6fTPM9SyhjBMAzLjL0ojJfDsNAfCWHL3ZpzPk0TQCnPc/h//O//6zxNL27vKKUAAy4FxYRzfjyfvHVLgWdCNd0ZghTcHFys61pw1XWdc345fCE6jgVEcdTj4ia0uFwsexhjLGOMUqIy4V1cCo/VOkGkjdNa51WplKpWNWPs8Hw8HvcAgDdv3gghuq6zerLaweQppRATH0OK0Do9DL2UcnmWYowBpDzPJaOcMYYRwMjH0LVD17b3N/cvXrw4Na1xNoW4MD5SStaF9+/fx+RXqzqlBABcrWopOUJk0R+FEObZYggxhIRx51xMxnubAPm0Pz58eD9N0zhP9WpVl2uFhR51N/eXF9tNtUsJztZEECJOLthCFj6kj+/e2qkHGD4+P5vZUk4IZwJThFAggGcKxgQBIIQQKjBKMNgQAkacURkhyIS8urqDDJ3PR5jAbr25v7shhDx8+ng8Hr768uub62tKcV1mMYJ+1IwxygSVmEEMEQEA/fDd95MehWS3r766uP1Vgs52D9DZtu2AS4TR47lXSsTkrXPeOYJFAqhar7eXF8/vPyjOAMWHwzPHqCgKXuYgxNPpNE9TURQLu9Zpl+d5hP7Tp0+MMW/84XzyIXAlOedvvvwKU/r9t99QhIs8l1JKlVsfCaPRp+60pwwTzMZ56tojIaSq123beu/v724wxtZ6RsXh+LhI2hlj06TP5zMhZJ7HJfkLIZRlcpGwOxcuLi6urq7atu37NsaY56Vzbhz7cRwZVSTG+PLlS4ZJ13UAxIUpCSGQUjJCPgPDJITZeW8BSOPULzL+WY/W+N1ut1pXi5uXt55STij33gvOvfcQYGNm78MXX3wBQOq6Tkqx5J9BjKWQQqXnJ6uU2mw2IYTT4Xg4PCOEXr58KaV83j+G4ClljCNGJec8AuBsKOrKWsvO5/P5bEOnuMrzvKoqznkITnAeQtB68j4yKYidDu2+/c/9OI5ZLglm0zRlueRChjRf31w2zfn5+XG73XIunTOMo8VMTirOmXz69Pzp4eEXv/hS5rJpGkLyPFeEyqurq1999UUMoOnOj/vnaZr2zb5t++jTZKaPn56UyMaxV7lElDztH6EHu5ubpu8wSEKK1W7rjKeUGj+vqvrV/QtA8Tzp5Ly11iUAkvfeR4gTAs770/7Z6okCBCJEBD49fizKNcJ8tm/tOCefemvef/yklOKCeu+HYTqdDpvdBkTw2V/V2tOpsS51XXfujq8+ffzn/6IKID38/D7jbF0XJniG1ctcLTuDyWjnqJ6tECoFe3h6IgQwyTARRb5KfgIgHp/3nDFrTIx+Qa+qvEgh/vz0SAk/NAefYp6VZbHGGDJOhBBumnCWEYiOhwOIkFAJbRQqAykhONar3LnQNA1lDAA09JrhgWPy03ff/6f/9+9vbi/LohYiCzGeT4dFpL6M/hDCPM/ruq6qylrLmFBKee/fvXv3008//fDDd0VR3d7eCimbppnGkXNalqXzEf5f/+f/VubV0HZLroG1emFs/3lzYoyVMpv6IcIoBJ/1OI7jYkK9ENfKspRSLpZGMcHlGno+HaZJr4o1UxRCWNdlSjBFuAATn43vMG77vm3by8vLoiisj4sYlBCSEgQAFKXkksMEAIAUoXEcV6s1Ffzhw6e+7znnEKVx7oPzGMAU4+IhgwlxzmVKjZP2MC3OYafH0zybu9vr+/ub7W7NGIkJnk6npSE+Pj6+f/9+s9lBmDjndb0WgmEExnHeP+5/+P7HX/zyi9sX1zHGlCCltOu6GP3d9Z1zoesaY+eUkvUREC6E6Jvz8XhmhAnBjTGIkHme3717F0C6vb3Ns3KYeoAgTCBGoE3/9OlRyuyLL77AETptjA8WRD13zkVrIkhoMuM4tkbrqR/KYj3PY3M+AkghI9M0rIvVy5sXeVUbM+aFWK1WgucQpYeH91xyQmgmc5VLENPQayboX3z9iwQIDP7i9nJV1cZ6qx0jpHeDs0HiQDGTUo6T8SlKmWVZFoLzIXHGFn7KPM8hOADAMoKXZYkJ++abb9qm+eLV64vN9tOHB0ppwujQHqty8+LuDkLYjc27d++M9X/7t3+LMf7+u+8QAq9fv44xHs8nN2nC6Wq1zrLi229///33P3LOIUzrqtbaYoxVkT8/nbr+lGVivd1QzDlfAoP94XDIsuz29nZJ/qrrumkaABCjYtmOvH//Pldie3FVlmWCccGni6LQWhOC8OPjx7yoFnC4qMr903NwFhHadX1e5lmWDUMPMKirIsFIab0wxvq+F1KOw/Dw8HB1ecOYgNBhjPM8996mWD8+/rHI67vd1axH7yNn0kXfNO2frB45AMDHuKxpp2naH89FUSxglnPBOVdXZQSh7RpKKSdCKeWcdc5yQRivpeRKqcPp+Pz41DWtd+7u7o4LMU3TZx2Q9/PQE0Yvd1sQU7COcyqlLMryeDwuFhV5oQgh9/cvfbB69qvVijE2z/M0DQTDxc5S5lm93i62Akv3y7Lsw4d3x/3+xf0r5w2lhGKylQohggisJK6rrCgqzun53KYEY4JXV5dN01htSyU3qyqACBEOIYytiiY45+ysX7142bdtRJAoMevRzt7q4L2nDAIIrbXjOHdd1w+TqorNZuVjOOwbmNAQbBpPPlifDBGyGUYpJc0y66PkUsiMS1ll6s3renuxATBW5SZbF/3+5LTjghCQrI25yBFLMZl5nLpuWO12HNO6rgmGwZKEQN8Pfd8TsiiKbEhRcA4AmCYd4nSx20kh2radxwkjxBjaXl7sbq9/+P77v/u7f7verRFC1oWqzNvmVFWrlJL34Xw+zfM8dN2p6T9+/Hh9c/OXv/paKSWlZFQQhg0AJMsk45vNpizWjCNCUN+PCYDwmU+Abm5uECIfPnxcGKBt2z4+Pp7P59vb++3mIiXMeTZNQ0r4cDhRBtfr9RK+9PHhPVmCloRkx+ZsgplHeD6fOKVVVTGCk/NEkOZ0CCBxgdfrGiT2/v37cZh3FxsAIef86dOz1npbXhCiy0wlEAhk5dU1xlQbN00ToTjGiADghAIlrbVt37VtJISEEGECjDEp5aRtURSccx+sVFxBNk2TykRVVNM0WmullBjDvu8hTFVVEkJmPXLOf/nLX57P53mec6kW4wlOhbF+ITgZZ8qy3G0KrXX0vhtaemQhJIQQRCkEFyLhjF1d3vzww08QJgjT4fC8QOvH/WkYhvV2u6RHLitnjDFFuKpWzsyEomp73U+j9x4Q7GMYjx3EUDBmzNwPTQghz4rjufHOX+8u9s+H9x9+/vLLL8uiOh6PTXNys729vJJZlkBAGMhcDuOox2FVr8mKpJRc8J81ay6kCBknUuVSZhgCF7y3ASUwzINPtiiKGJIxzlnddR0EuK62eUaUzAXnUnKZSQSy3dXV4fT+4aendV5CgBAAsMhxgoohwrkZdC4tQsiBCBGy1psQEYgpRBhhmZXjOEYXyrJs2v756SQkgRC3TX9zc3N9cdk0DSHkeG7bab4W3BmbZdI4RRi9v3s9j3bs9z/88NOs//D69WsA4t//x3/48ccfv/76V7/5zX/xq1//jZ4GZ9OL+zdCKETgbMLbdx9fvXlxOh3+7b/7t4KRqihvrl+8un9NJB6nfhrqpmk2m83vf//7//Sf/uNvfvMbrXnXdZTS1Wr19PTp7vbFxcXFzz+/c95jhKQs908PwDu43X769Ekogf+n/+FfQAjHYcQQ6XGyxux2W4SxsxpCsPD4irqqqooxCiHSWp/Ox3GYVpuNtQ4hKJWKKTEqEIRGTyF6a103ToRSQujxdAjeO23GcUgpGGeGaeBcCM4JIYyxBMES8pHlBabEhxiBxwh471KKi1MnADDBqI0OKQolMcYY02EY+27U8+ScWwqztmbSxltHCR2neRo0ggiEwAW3JsQAIkjeO85kUZScs6JQGGPOBUhgGObj8SilKop86UhSZqdTSymWRRaSq8uCc5Zg8sFTKsqyuL66JARP0wwwoZRp57x3MUHKGKH83LbBu7Is58l454TgYz8xKZgS09SXRTGOg5lGVeQxxa5rGWMAQkKpMUZxgUDq2maeRh88RmRo2hRjXRQv7u4EZxQiBCEi0OmBQF9klBKZQtTThGJSXFxud1+9enV1dbnb7m5ubwmAxhlKqJstwvjdz999/933QmWEc5Vv8vUN4xJqPespesg551Is1c17Z4wGEBIMVZY7F6w1znkXwnq9rqpSqkwptd3scqWM0fv9ngq+2e2c1+/ffmibgZKMimK7u3bWztNAs2x3cQMwffPlFwEkY83LV68ur65Fxsdx1kZ75wCEKhdt25yaI8bs6uo6k0JlYl3tplFPcxcTeP/4vm87JSVjrOu6sq4udhfffffd+/cPZVkJwa+vrzHG7z+8naa+KGS9qpw3xkyvX704N80333zDuaRc4P/5X/4LQsjz8z46LzjfrFf1amWtDQC4EPtxIozXdbVer2dtplkDEDNVZlnOOF+In8vKCRPMOAspGRcSJM64GKIU0oc4jxomgjDabjdCSUxYtV6rImOUZnk+z/NnMM95PetFtey9D+GzH7X3njGKKbbGzvPsvbc+xBSXhBvGeN938zwv9KSyrILzmGBKCQQgBY8xIowUea1kZrQGELx+9cVqtQIwMEaXBCRCYd9PznmEYAi+LGuEYAgRQrDe1FW9zvMcweisSwBCQgCGMMUUY9/34zRlMsMQUYQzlas8QxilhJTMBBcIQghJluUYI6uNdf7q9noe52UMhSlZ71NKS7wkJSQB4KzlnC/BBUKI7Xrz4u52s1lTgvNSLRQsPc+zHjvdO2+naZqGXtKCULbdbHa7XV3Xr169Wa03ZZkb67z329324uISA+TcPPTHeTQJorvLl9vrFzQvojbeGQ9QcH4YO0ww43QchxC8lMI5q3KFKNYmcJmtN2UEi8u8ZoxACLqub9tmmodvvvmdc6Yoy4eHTyn4q6uLFy9uIYofP77d7OrZ6m++/cP28oIJdnl1acw0jv3lxUWel9b4T48PQztO8zhOQz93b99/eNofvLN39y+uL1+em/N2u9qud+vNKiSfEtlsNymk/f5AKQshIoSlkATiV29eU0rbtoMQ3dxcCyEeHh6Op713PstUva7HSf/dv/8P/TDUq02R1/h//G//uTEmpKgyxaWIKVlrHx4evAdMSMIFgKA7t3aeEYYRxGmc1qstoTzGVFU1QjDGRAidph5gzLhMiAopGKVKZjDCrMxTQkqoVy9fSCUgQogxlatZaxs8odTM+nw+M8YSTARTjBElrG06a4IUEiAoM0kJjSFmWV4U+RKws92s67qSUhBGBOec87IsKaWE4PVqvTj0M0rKqsqKzDmfl2q9rut6hRDpuj6EYOxorQWJ+mAhCgn4YeiVUplSxujgbT+c61X1T3/9ay4zIRWBCULMVQYRDMkB65Y8MZVnUklGGaWUcZliooQjgBnlPljnLOc0xnhum9N+DzEq65U1ev/8nABo22Gchvv7+2X5HUIw1iKMtTFK5lfXV1VVzfPsnNluN5yzx6fH/f4RRDB2PYiWML6ur4ps++L+i1/9+i8vLq7KqqrXdVWuOJOciZRSij7jHBHSns6ntjHB7vcHmZe//OKrgACXNKagTZtxhDCnnAvJMcLOh5SAFBJCZJ2PGMm8QpjGFLph4IpTSjDCIYCmOZ/Pp9nMGEEp2Ha7QZjEGDGCGDNn435/fP/uHSK0XFWAsFzKjw8PlDDGaN+2IYQIwLt375XI/vpv/snN9Yuy2hAq6npVlavd5ur+xRuEMSEYE+KDk1zFEMtK1fUFwTQFYK1/8+bLerUpsvLu5h4itN1uXr9+rZQoiurm/kWe5d9//5MZ/dd/8avLm+vf/+GP//Ef/qEoCwQBF+r/AwHU3/3CQv3GAAAAAElFTkSuQmCC", - "text/plain": [ - "RGB4 Images.Image with:\n", - " data: 256x256 Array{ColorTypes.RGB4{FixedPointNumbers.UFixed{UInt8,8}},2}\n", - " properties:\n", - " imagedescription: \n", - " spatialorder: x y\n", - " pixelspacing: 1 1" - ] - }, - "execution_count": 1, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "using Images, Colors, ImageMagick\n", - "img = load(\"cat.jpg\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now let us do some preprocessing. The most important thing is to resize image to 224x224 that the pre-trained neural network model expect. However, since `Images.jl` does not have a `imresize` function yet, we will call Python to do the preprocessing. The helper function is defined in `imagehelper.py` under the same directory." - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": { - "collapsed": false - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Image resized to (224,224,3)\n", - "('Original Image Shape: ', (256, 256, 3))\n" - ] - } - ], - "source": [ - "img = convert(Array, separate(convert(Image{RGB}, img)))\n", - "using PyCall\n", - "unshift!(PyVector(pyimport(\"sys\")[\"path\"]), \"\")\n", - "@pyimport imagehelper as helper\n", - "\n", - "img = helper.PreprocessImage(img)\n", - "# transform from Python row-major to Julia column-major\n", - "img = permutedims(img, [3,2,1])\n", - "println(\"Image resized to $(size(img))\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The last thing we need to do to prepare the image is to subtract it from the mean. The mean image is computed on the training set, and it comes with the pre-trained model archive." - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": { - "collapsed": false - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Image prepared!\n" - ] - } - ], - "source": [ - "using MXNet\n", - "\n", - "model_dir = joinpath(Pkg.dir(\"MXNet\"), \"models/Inception/Inception/\")\n", - "mean_file = joinpath(model_dir, \"mean_224.nd\")\n", - "mean_arr = mx.load(mean_file, mx.NDArray)[:mean_img]\n", - "\n", - "img = img - copy(mean_arr)\n", - "img = reshape(img, 224, 224, 3, 1) # add a mini-batch dim\n", - "println(\"Image prepared!\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now we can load the pre-trained model, via the `load_checkpoint` function." - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": { - "collapsed": false - }, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "[09:48:53] src/operator/./softmax_output-inl.h:187: Softmax symbol is renamed to SoftmaxOutput. This API will be deprecated in Dec, 2015\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Model loaded\n" - ] - } - ], - "source": [ - "model_prefix = joinpath(model_dir, \"Inception_BN\")\n", - "model_epoch = 39\n", - "model = mx.load_checkpoint(model_prefix, model_epoch, mx.FeedForward)\n", - "println(\"Model loaded\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "With the loaded model, we can do prediction by wrapping the image with a `ArrayDataProvider`. The output is a 1000-way vector giving the predicted probability of each class. The class names are read from `synset.txt`, and we show the class name with the maximum probability." - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": { - "collapsed": false - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "tiger cat\n" - ] - } - ], - "source": [ - "pred = mx.predict(model, mx.ArrayDataProvider(img))\n", - "classes = open(joinpath(model_dir, \"synset.txt\")) do s \n", - " map(x -> replace(strip(x), r\"^n[0-9]+ \", \"\"), readlines(s))\n", - "end\n", - "println(classes[argmax(pred)])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We can also show easily the top-5 classes and the associated probabilities." - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "metadata": { - "collapsed": false - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - " tiger cat w.p. 0.415807\n", - " tabby, tabby cat w.p. 0.235859\n", - " Egyptian cat w.p. 0.161553\n", - " lynx, catamount w.p. 0.136078\n", - " Persian cat w.p. 0.007109\n" - ] - } - ], - "source": [ - "K = 5\n", - "n_best = sortperm(vec(pred), rev=true)[1:K]\n", - "best_probs = pred[n_best]\n", - "best_labels = classes[n_best]\n", - "\n", - "for (l,p) in zip(best_labels, best_probs)\n", - " println(mx.format(\"{1:>18} w.p. {2:4f}\", l, p))\n", - "end" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": true - }, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Julia 0.4.0", - "language": "julia", - "name": "julia-0.4" - }, - "language_info": { - "file_extension": ".jl", - "mimetype": "application/julia", - "name": "julia", - "version": "0.4.0" - } - }, - "nbformat": 4, - "nbformat_minor": 0 -} diff --git a/julia/examples/imagenet/ijulia-pretrained-predict/imagehelper.py b/julia/examples/imagenet/ijulia-pretrained-predict/imagehelper.py deleted file mode 100644 index dddef7415f45..000000000000 --- a/julia/examples/imagenet/ijulia-pretrained-predict/imagehelper.py +++ /dev/null @@ -1,48 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -import numpy as np -from skimage import io, transform - -def PreprocessImage(img): - img = np.array(img) - print("Original Image Shape: ", img.shape) - # we crop image from center - short_egde = min(img.shape[:2]) - yy = int((img.shape[0] - short_egde) / 2) - xx = int((img.shape[1] - short_egde) / 2) - crop_img = img[yy : yy + short_egde, xx : xx + short_egde] - # resize to 224, 224 - resized_img = transform.resize(crop_img, (224, 224)) - # convert to numpy.ndarray - sample = np.asarray(resized_img) * 256 - - #------------------------------------------------------------------- - # Note: The decoded image should be in BGR channel (opencv output) - # For RGB output such as from skimage, we need to convert it to BGR - # WRONG channel will lead to WRONG result - #------------------------------------------------------------------- - # swap channel from RGB to BGR - # sample = sample[:, :, [2,1,0]] - sample = sample[:, :, [0,1,2]] # actually, in this pre-trained model RGB is used - - # swap axes to make image from (224, 224, 4) to (3, 224, 224) - sample = np.swapaxes(sample, 0, 2) - sample = np.swapaxes(sample, 1, 2) - - sample.resize(3,224,224) - return sample diff --git a/julia/models/Inception/.gitignore b/julia/models/Inception/.gitignore deleted file mode 100644 index 3eabb6e80247..000000000000 --- a/julia/models/Inception/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -Inception -Inception.zip diff --git a/julia/models/Inception/get.sh b/julia/models/Inception/get.sh deleted file mode 100755 index 7b7895d65539..000000000000 --- a/julia/models/Inception/get.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/bin/bash - -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - - -wget -c http://data.mxnet.io/mxnet/data/Inception.zip -unzip Inception.zip diff --git a/julia/plugins/README.md b/julia/plugins/README.md deleted file mode 100644 index f9925cbdfbde..000000000000 --- a/julia/plugins/README.md +++ /dev/null @@ -1,31 +0,0 @@ - - - - - - - - - - - - - - - - - -# Plugins of MXNet.jl - -This directory contains *plugins* of MXNet.jl. A plugin is typically a component that could be part of MXNet.jl, but excluded from the `mx` namespace. The plugins are included here primarily for two reasons: - -* To minimize the dependency of MXNet.jl on other optional packages. -* To serve as examples on how to extend some components of MXNet.jl. - -The most straightforward way to use a plugin is to `include` the code. For example - -```julia -include(joinpath(Pkg.dir("MXNet"), "plugins", "io", "svmlight.jl")) - -provider = SVMLightProvider("/path/to/dataset", 100) -``` diff --git a/julia/plugins/io/svmlight.jl b/julia/plugins/io/svmlight.jl deleted file mode 100644 index f9d9b2ec83db..000000000000 --- a/julia/plugins/io/svmlight.jl +++ /dev/null @@ -1,86 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -#=doc -SVMLight / LibSVM is a popular data format for sparse features. Some preprocessed -datasets in this format could be found at http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/ -=# -using MXNet -using SVMLightLoader - -mutable struct SVMLightProvider <: mx.AbstractDataProvider - filename :: AbstractString - batch_size :: Int - fea_dim :: Int - data_name :: Symbol - label_name :: Symbol -end - -function SVMLightProvider(filename::AbstractString, batch_size::Int; fea_dim::Int=-1, - data_name::Symbol=:data, label_name::Symbol=:label) - if fea_dim == -1 - info("SVMLightProvider: going over file to get feature dimension of $filename") - f = SVMLightFile(filename) - for (data, label) in f - fea_dim = max(fea_dim, length(data)) - end - end - - return SVMLightProvider(filename, batch_size, fea_dim, data_name, label_name) -end - -mx.get_batch_size(provider :: SVMLightProvider) = provider.batch_size -function mx.provide_data(provider :: SVMLightProvider) - [(provider.data_name, (provider.fea_dim, provider.batch_size))] -end -function mx.provide_label(provider :: SVMLightProvider) - [(provider.label_name, (provider.batch_size,))] -end - -function mx.eachbatch(provider :: SVMLightProvider) - data_jl = zeros(mx.MX_float, (provider.fea_dim, provider.batch_size)) - data_nd = mx.empty(size(data_jl)) - label_jl = zeros(mx.MX_float, (provider.batch_size,)) - label_nd = mx.empty(size(label_jl)) - - batch = mx.DataBatch([data_nd], [label_nd], provider.batch_size) - function _svmlight_iter() - f = SVMLightFile(provider.filename) - while true - error("This is actually buggy and needs fixing") - raw = collect(take(f, provider.batch_size)) - cnt = length(raw) - if cnt == 0 - # end of file, no more data to see - return - end - - data_jl[:] = 0 - for i = 1:provider.batch_size - vec, gnd = raw[min(i,cnt)] - data_jl[1:length(vec),i] = vec - label_jl[i] = gnd - end - mx.copy!(data_nd, data_jl) - mx.copy!(label_nd, label_jl) - batch.count = cnt - produce(batch) - end - end - - return Task(_svmlight_iter) -end diff --git a/julia/src/MXNet.jl b/julia/src/MXNet.jl deleted file mode 100644 index a322da2fc024..000000000000 --- a/julia/src/MXNet.jl +++ /dev/null @@ -1,187 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -module MXNet - -using Reexport - -# we put everything in the namespace mx, because there are a lot of -# functions with the same names as built-in utilities like "zeros", etc. -export mx -module mx - -using Base.Broadcast: Broadcasted, DefaultArrayStyle -using Libdl -using LinearAlgebra -using Markdown -using Printf -using Statistics -using Random - -using Formatting -using MacroTools - -# Functions from base that we can safely extend and that are defined by libmxnet. -import Base.Broadcast: broadcasted -import Base.Iterators: filter - -############################################################################### -# exports -############################################################################### - -# exceptions.jl -export AbstractMXError, - MXError - -# symbolic-node.jl -export SymbolicNode, - Variable, - @var - -# ndarray.jl -export NDArray, - context, - expand_dims, - @inplace, - # activation funcs - σ, - sigmoid, - relu, - softmax, - log_softmax, - # broadcast utils - broadcast_to, - broadcast_axis, - broadcast_axes - -# executor.jl -export Executor, - bind, - simple_bind, - forward, - backward - -# context.jl -export Context, - cpu, - gpu, - num_gpus, - gpu_memory_info, - current_context, - @context, - @cpu, - @gpu, - empty_cache - -# model.jl -export AbstractModel, - FeedForward, - predict - -# nn-factory.jl -export MLP - -# metric.jl -export AbstractEvalMetric, - ACE, - Accuracy, - MSE, - MultiACE, - MultiMetric, - NMSE, - SeqMetric - -# kvstore.jl -export KVStore, - init!, - pull!, - barrier, - setoptimizer!, - setupdater! - -# initializer.jl -export AbstractInitializer, - UniformInitializer, - NormalInitializer, - XavierInitializer - -# optimizer.jl -export AbstractOptimizer, - AdaDelta, - AdaGrad, - ADAM, - AdaMax, - Nadam, - RMSProp, - SGD, - getupdater, - normgrad!, - update! - -# io.jl -export AbstractDataProvider, - AbstractDataBatch, - DataBatch, - ArrayDataProvider, - ArrayDataBatch - -# visualize.jl -export to_graphviz - -############################################################################### -# includes -############################################################################### - -include("exceptions.jl") -include("base.jl") - -include("runtime.jl") -include("context.jl") -include("util.jl") - -include("ndarray.jl") - -include("random.jl") -include("autograd.jl") - -include("name.jl") -include("symbolic-node.jl") -include("executor.jl") - -include("broadcast.jl") - -include("metric.jl") -include("optimizer.jl") -include("initializer.jl") - -include("io.jl") -include("kvstore.jl") - -include("callback.jl") -include("model.jl") - -include("visualize.jl") - -include("nn-factory.jl") - -include("deprecated.jl") - -end # mx - -@reexport using .mx - -end # module MXNet diff --git a/julia/src/autograd.jl b/julia/src/autograd.jl deleted file mode 100644 index 8b5edae5770a..000000000000 --- a/julia/src/autograd.jl +++ /dev/null @@ -1,404 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -# Autograd for NDArray -# this is a port of Python's autograd module -# https://github.com/apache/incubator-mxnet/blob/master/python/mxnet/autograd.py - -############################################################################### -# Private util functions -############################################################################### - -""" - _set_recording(state::Bool)::Bool - -Set status to recording/not recording. When recording, graph will be constructed -for gradient computation. - -## Parameters - -* `state::Bool` - -## Returns - -Previous state before this set -""" -function _set_recording(state::Bool)::Bool - prev = Ref{Cint}(C_NULL) - @mxcall(:MXAutogradSetIsRecording, (Cint, Ref{Cint}), state, prev) - prev[] -end - -_set_recording(::Cvoid) = nothing - -""" -Set status to training/predicting. -For example, Dropout will drop inputs randomly when -`train_mode = true` while simply passing through if `train_mode = false`. - -## Parameters -* `train_mode::Bool` - -## Returns - -Previous state before this set. -""" -function _set_training(train_mode::Bool)::Bool - prev = Ref{Cint}(C_NULL) - @mxcall(:MXAutogradSetIsTraining, (Cint, Ref{Cint}), train_mode, prev) - prev[] -end - -_set_training(::Cvoid) = nothing - -############################################################################### -# Public API -############################################################################### - -""" - is_recording()::Bool - -Get status on recording/not recording. -""" -function is_recording()::Bool - state = Ref{Cint}(C_NULL) - @mxcall(:MXAutogradIsRecording, (Ref{Cint},), state) - state[] -end - -""" - is_training()::Bool - -Get status on recording/not recording. -""" -function is_training()::Bool - state = Ref{Cint}(C_NULL) - @mxcall(:MXAutogradIsTraining, (Ref{Cint},), state) - state[] -end - -@inline function _record(f, is_record::Union{Cvoid,Bool}, train_mode::Union{Cvoid,Bool}) - # Port from Python's `_RecordingStateScope` context manager - # __enter__ - prev_is_record = _set_recording(is_record) - prev_train_mode = _set_training(train_mode) - - try - f() - finally - # __exit__ - if is_record != nothing && prev_is_record != is_record - _set_recording(prev_is_record) - end - if train_mode != nothing && prev_train_mode != train_mode - _set_recording(prev_train_mode) - end - end -end - -""" - record(f, train_mode = true) - record(translates = true) do - ... - end - -Returns an autograd recording scope context to be used in `do` block -and captures code that needs gradients to be calculated. - -Parameter `train_mode::Bool` controls whether the forward pass is in training -or predicting mode. -This controls the behavior of some layers such as `Dropout`, `BatchNorm`. - -!!! note - When forwarding with `train_mode = false`, the corresponding backward - should also use `train_mode = false`, otherwise gradient is undefined. - -```julia -x = mx.NDArray([1 2; 3 4]) -∇ = mx.attach_grad!(x) -y = mx.record() do - 2x -end -mx.backward!(y) - -julia> ∇ -2×2 mx.NDArray{Int64,2} @ CPU0: - 2 2 - 2 2 -``` -""" -record(f, train_mode::Bool = true) = _record(f, true, train_mode) - -""" - pause(f, train_mode = false) - pause(train_mode = false) do - ... - end - -Create a scope context for codes that do not need gradients to be calculated. - -```julia -record() do - ... - pause() do - # testing, IO, gradient updates... - end -end -``` -""" -pause(f, train_mode::Bool = false) = _record(f, false, train_mode) - -""" - train_mode(f) - train_mode() do - ... - end - -Create a scope context in which forward pass behavior is set to training mode, -without changing the recording states. - -```julia -y = model(x) -train_mode() do - z = mx.Dropout(y) - ... -end -``` -""" -train_mode(f) = _record(f, nothing, true) - -""" - predict_mode(f) - predict_mode() do - ... - end - -Create a scope context in which forward pass behavior is set to inference mode, -without changing the recording states. - -```julia -record() do - y = model(x) - predict_mode() do - y = sampling(y) - end -end -``` -""" -predict_mode(f) = _record(f, nothing, false) - -""" - backward!(head, head_grad; retain_graph = false, train_mode = true) - backward!(heads, head_grads; retain_graph = false, train_mode = true) - -Compute the gradients of heads w.r.t previously marked variables. - -## Parameters - -- `head::NDArray`: output NDArray - -- `head_grad::NDArray` or `Cvoid`: gradient coefficient with respect to head. - -- `heads::Vector{NDArray}`: a list of output NDArray - -- `head_grads::Vector`: a list of gradient coefficient with respect ot heads. - the element should be `NDArray` or `Cvoid` - -- `retain_graph::Bool`: whether to keep the graph after backward. e.g: - If you want to differentiate the same graph twice, - you need to pass `retain_graph=true`. - -- `train_mode::Bool`: whether to do backward for training or predicting. -""" -backward!(head::NDArray, head_grad::NDArray; kws...) = - backward!([head], [head_grad]; kws...) - -backward!(head::NDArray, head_grad::Cvoid = nothing; kws...) = - backward!([head], head_grad; kws...) - -function backward!(heads::VecOfNDArray, head_grad::Cvoid; - retain_graph::Bool = false, train_mode::Bool = true) - @mxcall( - :MXAutogradBackwardEx, - (MX_uint, - Ptr{MX_handle}, - Ptr{MX_handle}, - MX_uint, - Ptr{MX_handle}, - Cint, - Cint, - Cint, - Ptr{MX_handle}, - Ptr{MX_handle}), - length(heads), - map(x -> x.handle, heads), - C_NULL, - 0, - C_NULL, - retain_graph, - false, # create_graph - train_mode, - C_NULL, - C_NULL) -end - -function backward!(heads::VecOfNDArray, head_grads::Vector; - retain_graph::Bool = false, train_mode::Bool = true) - output_handles = map(x -> x.handle, heads) - ograd_handles = map(head_grads) do x - if x isa NDArray - x.handle - elseif x ≡ nothing # faster than `x isa Cvoid` in Julia 0.7 - MX_handle(C_NULL) - else - throw(ArgumentError("element of head_grads should be NDArray or Cvoid")) - end - end - @assert length(output_handles) == length(ograd_handles) - @mxcall( - :MXAutogradBackwardEx, - (MX_uint, - Ptr{MX_handle}, - Ptr{MX_handle}, - MX_uint, - Ptr{MX_handle}, - Cint, - Cint, - Cint, - Ptr{MX_handle}, - Ptr{MX_handle}), - length(output_handles), - output_handles, - ograd_handles, - 0, - C_NULL, - retain_graph, - false, # create_graph - train_mode, - C_NULL, - C_NULL) -end - -""" - getgrad(arr::NDArray) - -Returns the gradient buffer attached to this `NDArray`. -If the gradient buffer isn't attached yet, return `nothing`. -""" -function getgrad(arr::NDArray) - out = Ref{MX_handle}(C_NULL) - @mxcall(:MXNDArrayGetGrad, (MX_handle, Ref{MX_handle}), arr.handle, out) - (out[] == C_NULL) ? nothing : NDArray(MX_NDArrayHandle(out[])) -end - -""" - attach_grad!(x::NDArray, grad_req::Symbol = :write) - -Attach a gradient buffer to this `NDArray`, -so that [`backward!`](@ref) can compute gradient with respect to it. - -## Parameters - -- `x::NDArray` -- `grad_req::Symbol` (default is `:write`) - -## Return - -The attached gradient buffer - -## See also - -- [`getgrad`](@ref) -""" -function attach_grad!(x::NDArray, grad_req::Symbol = :write) - # TODO: support storage type (stype in Python) - # TODO: make sure it works with gpu array - grad = zeros_like(x) - _mark_variables!([x], [grad], grad_req) - grad -end - -""" - mark_variables!(var, grad, grad_req) - mark_variables!(vars, grads, grad_reqs) - -Mark `NDArrays` as variables to compute gradient for autograd. - -## Parameters - -- `var::NDArray` -- `grad::NDArray` -- `grad_req::Symbol`: `:nop`, `:write`, `:inplace` or `:add` -- `vars::Vector{NDArray}` -- `grads::Vector{NDArray}` -- `grad_req::Vector{Symbol}` -""" -mark_variables!(var::NDArray, grad::NDArray, grad_reqs::Symbol = :write) = - _mark_variables!([var], [grad], grad_reqs) - -mark_variables!(var::VecOfNDArray, grads::VecOfNDArray, grad_reqs = :write) = - _mark_variables!(var, grads, grad_reqs) - -@inline function _getgrad_req(x::Symbol)::GRAD_REQ - val = get(grad_req_map, x, false) - if val == false - throw(ArgumentError("invalid grad_reqs $x")) - end - val -end - -@inline _getgrad_reqs(x::Symbol, n::Int) = - map((_) -> MX_uint(_getgrad_req(x)), Base.OneTo(n)) - -@inline function _getgrad_reqs(xs::Vector{Symbol}, n::Int) - if length(xs) != n - throw(ArgumentError("number of variables and grad_reqs not matched")) - end - map(MX_uint ∘ _getgrad_req, xs) -end - -@inline function _mark_variables!(vars::VecOfNDArray, grads::VecOfNDArray, - grad_reqs = :write) - n = length(vars) - if n != length(grads) - throw(ArgumentError("number of variables and gradients not matched")) - end - - var_hdls = map(x -> x.handle, vars) - grad_hdls = map(x -> x.handle, grads) - grad_reqs = _getgrad_reqs(grad_reqs, n) - - @mxcall(:MXAutogradMarkVariables, - (MX_uint, Ref{MX_handle}, Ptr{MX_uint}, Ref{MX_handle}), - length(vars), var_hdls, grad_reqs, grad_hdls) -end - -""" - symbol(x::NDArray) - -Retrieve recorded computation history as `SymbolicNode`, - where `x` is a `NDArray` representing the head of computation graph. - """ -function symbol(x::NDArray) - ref = Ref{MX_handle}(C_NULL) - @mxcall(:MXAutogradGetSymbol, (MX_handle, Ref{MX_handle}), x, ref) - SymbolicNode(MX_SymbolHandle(ref[])) -end - -############################################################################### -# TODO: User-defined differentiable function -############################################################################### diff --git a/julia/src/base.jl b/julia/src/base.jl deleted file mode 100644 index e94b1bbbe37c..000000000000 --- a/julia/src/base.jl +++ /dev/null @@ -1,312 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -################################################################################ -# Common types used in MXNet API -################################################################################ -const MX_uint = Cuint -const MX_float = Cfloat -const MX_handle = Ptr{Cvoid} - -const char_p = Ptr{UInt8} -const char_pp = Ptr{char_p} - -################################################################################ -# Enumeration from MXNet headers -################################################################################ -# OpReqType in include/mxnet/op_attr_types.h -@enum GRAD_REQ GRAD_NOP=0 GRAD_WRITE=1 GRAD_INPLACE=2 GRAD_ADD=3 -const grad_req_map = Dict{Symbol,GRAD_REQ}( - :nop => GRAD_NOP, # no operation, do not write anything - :write => GRAD_WRITE, # write gradient to provided space - :inplace => GRAD_INPLACE, # perform an inplace write - :add => GRAD_ADD, # add to the provided space -) - -################################################################################ -# Initialization and library API entrance -################################################################################ -const MXNET_LIB = Libdl.find_library(["libmxnet.$(Libdl.dlext)", "libmxnet.so"], # see build.jl - [joinpath(get(ENV, "MXNET_HOME", ""), "lib"), - joinpath(get(ENV, "MXNET_HOME", ""), "build"), - get(ENV, "MXNET_HOME", ""), - joinpath(@__DIR__, "..", - "deps", "usr", "lib")]) -const LIB_VERSION = Ref{Cint}(0) - -if isempty(MXNET_LIB) - # touch this file, so that after the user properly build libmxnet, the precompiled - # MXNet.ji will be re-compiled to get MXNET_LIB properly. - touch(@__FILE__) - error("Cannot find or load libmxnet.$(Libdl.dlext). " * - "Please see the document on how to build it.") -else - include_dependency(MXNET_LIB) -end - -function __init__() - # TODO: bug in nnvm, if do not call this, call get handle "_copyto" will fail - _get_libmx_op_names() - _populate_iter_creator_cache!() - _get_lib_version!() - - atexit() do - # notify libmxnet we are shutting down - ccall( ("MXNotifyShutdown", MXNET_LIB), Cint, () ) - end -end - -function mx_get_last_error() - msg = ccall( ("MXGetLastError", MXNET_LIB), char_p, () ) - if msg == C_NULL - throw(MXError("Failed to get last error message")) - end - return unsafe_string(msg) -end - -"Utility macro to call MXNet API functions" -macro mxcall(f, argtypes, args...) - args = map(esc, args) - quote - _mxret = ccall(($f, $MXNET_LIB), - Cint, $(esc(argtypes)), $(args...)) - if _mxret != 0 - err_msg = mx_get_last_error() - throw(MXError(err_msg)) - end - end -end - -""" -Get libmxnet version - -This function will changes the global variable `LIB_VERSION`. -""" -function _get_lib_version!() - @mxcall :MXGetVersion (Ref{Cint},) LIB_VERSION - LIB_VERSION[] -end - -################################################################################ -# Handle types -################################################################################ -function mx_define_handle_t(name, destructor) - @eval begin - mutable struct $name - value::MX_handle - - function $name(value = C_NULL) - hdr = new(value) - - $(if destructor != nothing - :(finalizer(delete!, hdr)) - end) - - return hdr - end - end - - $(if finalizer != nothing - quote - function delete!(h :: $name) - if h.value != C_NULL - @mxcall($(QuoteNode(destructor)), (MX_handle,), h.value) - h.value = C_NULL - end - end - end - end) - - function Base.unsafe_convert(::Type{MX_handle}, obj::$name) - obj.value - end - Base.convert(t::Type{MX_handle}, obj::$name) = Base.unsafe_convert(t, obj) - Base.cconvert(t::Type{MX_handle}, obj::$name) = Base.unsafe_convert(t, obj) - - MX_handle(x::$name) = Base.convert(MX_handle, x) - end -end - -mx_define_handle_t(:MX_NDArrayHandle, :MXNDArrayFree) -mx_define_handle_t(:MX_OpHandle, nothing) -mx_define_handle_t(:MX_SymbolHandle, :MXSymbolFree) -mx_define_handle_t(:MX_ExecutorHandle, :MXExecutorFree) -mx_define_handle_t(:MX_DataIterHandle, :MXDataIterFree) -mx_define_handle_t(:MX_KVStoreHandle, :MXKVStoreFree) - -################################################################################ -# MXNet Params -# -# MXNet API use string to pass some common parameters like the configurations -# when defining layers. Typically, it is enough to use string(obj) to get a -# recognizable representation for libmxnet. However, there is currently a -# caveat: -# -# Because Julia use column-major ordering for tensors. In order to properly -# interact with Julia Arrays, the shape will look "reversed" from the Julia -# side. For example, a typical MNIST mini-batch tensor is of shape (28,28,1,100) -# from Julia side, while the shape information for the same piece of memory -# should be interpreted as (100,1,28,28) from C/C++/Python side. -# -# Therefore, when passing parameters to libmxnet, we should reverse the shape -# parameter. For example, when the user specify a non-square kernel size for -# a convolution or pooling layer. Unfortunately, those operators are automatically -# imported, and information about the type of each parameter is somehow limited. -# One hacky way is to match the type description for the string "Shape(tuple)" -# when importing operators. But currently we simply decided to reverse **all** -# NTuple{N, Int} passed to libmxnet. -# -# TODO: find a better solution in case this cause issues in the future. -# I made `@_remap` in `ndarray.jl`. (Iblis Lin) -################################################################################ -dump_mx_param(val::Any) = string(val) -dump_mx_param(val::Float64) = @sprintf("%.16e", val) -dump_mx_param(val::Float32) = @sprintf("%.8e", val) -dump_mx_param(val::Float16) = @sprintf("%.4e", val) -dump_mx_param(val::Irrational) = @sprintf("%.16e", val) -dump_mx_param(shape::NTuple{N,<:Integer}) where N = - string(reverse(shape)) - - -""" -A convenient macro copied from Mocha.jl that could be used to define structs -with default values and type checks. For example -```julia -@defstruct MyStruct Any ( - field1 :: Int = 0, - (field2 :: AbstractString = "", !isempty(field2)) -) -``` -where each field could be either -```julia -field_name :: field_type = default_value -``` -or put within a tuple, with the second element -specifying a validation check on the field value. -In the example above, the default value for -field2 does not satisfy the assertion, this -could be used to force user to provide a -valid value when no meaningful default value -is available. - -The macro will define a constructor that could accept -the keyword arguments. -""" -macro defstruct(name, fields) - _defstruct_impl(false, name, fields) -end - -"""A convenient macro to define immutable structs. The same as -`@defstruct` except that the defined type is immutable. -""" -macro defimmutable(name, fields) - _defstruct_impl(true, name, fields) -end - -"""Internal use only, this value is used to indicate a required value -is not specified. -""" -struct __Undefined -end - -function _defstruct_impl(is_immutable, name, fields) - if isa(fields, Expr) && fields.head == :tuple - fields = fields.args - else - fields = [fields] - end - @assert length(fields) > 0 - - if isa(name, Symbol) - name = esc(name) - super_name = :Any - else - @assert(isa(name, Expr) && name.head == :(<:) && length(name.args) == 2 && - isa(name.args[1], Symbol) && isa(name.args[2], Symbol), - "name must be of form 'Name <: SuperType'") - - super_name = esc(name.args[2]) - name = esc(name.args[1]) - end - - field_defs = Vector{Expr}(undef, length(fields)) # :(field2 :: Int) - field_names = Vector{Expr}(undef, length(fields)) # :field2 - field_defaults = Vector{Expr}(undef, length(fields)) # :(field2 = 0) - field_types = Vector{Expr}(undef, length(fields)) # Int - field_asserts = Vector{Expr}(undef, length(fields)) # :(field2 >= 0) - required_field = Symbol[] - - for i = 1:length(fields) - field = fields[i] - if field.head == :tuple - field_asserts[i] = esc(field.args[2]) - field = field.args[1] - end - if field.head == :(=) - fname = field.args[1].args[1] - field_defs[i] = esc(field.args[1]) - field_names[i] = esc(fname) - field_types[i] = esc(field.args[1].args[2]) - field_defaults[i] = Expr(:kw, fname, esc(field.args[2])) - else - # no default value provided, required field - fname = field.args[1] - field_defs[i] = esc(field) - field_names[i] = esc(fname) - field_types[i] = esc(field.args[2]) - field_defaults[i] = Expr(:kw, fname, __Undefined()) - push!(required_field, fname) - end - end - - # body of layer type, defining fields - type_body = Expr(:block, field_defs...) - - # constructor - requires = map(required_field) do fname - :(@assert(!isa($fname, __Undefined), "value for " * string($fname) * " is required")) - end - converts = map(zip(field_names, field_types)) do param - f_name, f_type = param - :($f_name = convert($f_type, $f_name)) - end - asserts = map(filter(i -> isassigned(field_asserts,i), 1:length(fields))) do i - :(@assert($(field_asserts[i]))) - end - construct = Expr(:call, name, field_names...) - ctor_body = Expr(:block, requires..., converts..., asserts..., construct) - ctor_def = Expr(:call, name, Expr(:parameters, field_defaults...)) - ctor = Expr(:(=), ctor_def, ctor_body) - - if is_immutable - quote - struct $(name) <: $(super_name) - $type_body - end - - $ctor - end - else - quote - mutable struct $(name) <: $(super_name) - $type_body - end - - $ctor - end - end -end diff --git a/julia/src/broadcast.jl b/julia/src/broadcast.jl deleted file mode 100644 index 7c68fab5007c..000000000000 --- a/julia/src/broadcast.jl +++ /dev/null @@ -1,31 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -struct NDArrayStyle{N} <: Broadcast.AbstractArrayStyle{N} end -NDArrayStyle(::Val{N}) where N = NDArrayStyle{N}() -NDArrayStyle{M}(::Val{N}) where {N,M} = NDArrayStyle{N}() - -# Determin the output type -Base.BroadcastStyle(::Type{<:NDArray{T,N}}) where {T,N} = NDArrayStyle{N}() - -Base.broadcastable(x::NDArray) = x - -# Make it non-lazy -broadcasted(f, x::NDArray, args...) = f(x, args...) -broadcasted(f, y, x::NDArray, args...) = f(y, x, args...) -broadcasted(f, x::NDArray{T,N}, y::NDArray{T,N}, args...) where {T,N} = - f(x, y, args...) diff --git a/julia/src/callback.jl b/julia/src/callback.jl deleted file mode 100644 index 39d8f2552035..000000000000 --- a/julia/src/callback.jl +++ /dev/null @@ -1,160 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -""" - AbstractCallback - -Abstract type of callback functions used in training. -""" -abstract type AbstractCallback end - -""" - AbstractBatchCallback - -Abstract type of callbacks to be called every mini-batch. -""" -abstract type AbstractBatchCallback <: AbstractCallback end - -""" - AbstractEpochCallback - -Abstract type of callbacks to be called every epoch. -""" -abstract type AbstractEpochCallback <: AbstractCallback end - -mutable struct BatchCallback <: AbstractBatchCallback - frequency :: Int - call_on_0 :: Bool - callback :: Function -end - -""" - every_n_batch(callback :: Function, n :: Int; call_on_0 = false) - -A convenient function to construct a callback that runs every `n` mini-batches. - -# Arguments -* `call_on_0::Bool`: keyword argument, default false. Unless set, the callback - will *not* be run on batch 0. - -For example, the [`speedometer`](@ref) callback is defined as - -```julia -every_n_batch(frequency, call_on_0=true) do state :: OptimizationState - if state.curr_batch == 0 - # reset timer - else - # compute and print speed - end -end -``` - -See also [`every_n_epoch`](@ref) and [`speedometer`](@ref). -""" -function every_n_batch(callback::Function, n::Int; call_on_0::Bool = false) - BatchCallback(n, call_on_0, callback) -end -function (cb :: BatchCallback)(state :: OptimizationState) - if state.curr_batch == 0 - if cb.call_on_0 - cb.callback(state) - end - elseif state.curr_batch % cb.frequency == 0 - cb.callback(state) - end -end - -""" - speedometer(;frequency=50) - -Create an `AbstractBatchCallback` that measure the training speed - (number of samples processed per second) every k mini-batches. - -# Arguments -* `frequency::Int`: keyword argument, default 50. The frequency (number of - min-batches) to measure and report the speed. -""" -function speedometer(;frequency::Int = 50) - cl_tic = 0 - every_n_batch(frequency, call_on_0 = true) do state::OptimizationState - if state.curr_batch == 0 - # reset timer - cl_tic = time() - else - speed = frequency * state.batch_size / (time() - cl_tic) - @info(format("Speed: {1:>6.2f} samples/sec", speed)) - cl_tic = time() - end - end -end - - -mutable struct EpochCallback <: AbstractEpochCallback - frequency :: Int - call_on_0 :: Bool - callback :: Function -end - -""" - every_n_epoch(callback :: Function, n :: Int; call_on_0 = false) - -A convenient function to construct a callback that runs every `n` full data-passes. - -* `call_on_0::Bool`: keyword argument, default false. Unless set, the callback - will *not* be run on epoch 0. Epoch 0 means no training has been performed - yet. This is useful if you want to inspect the randomly initialized model - that has not seen any data yet. - -See also [`every_n_batch`](@ref). -""" -every_n_epoch(callback::Function, n::Int; call_on_0::Bool = false) = - EpochCallback(n, call_on_0, callback) - -function (cb::EpochCallback)(model::Any, state::OptimizationState, - metric::Vector{Tuple{Symbol, T}}) where T<:Real - if state.curr_epoch == 0 - if cb.call_on_0 - cb.callback(model, state, metric) - end - elseif state.curr_epoch % cb.frequency == 0 - cb.callback(model, state, metric) - end -end - -""" - do_checkpoint(prefix; frequency=1, save_epoch_0=false) - -Create an `AbstractEpochCallback` that save checkpoints of the model to disk. -The checkpoints can be loaded back later on. - -# Arguments -* `prefix::AbstractString`: the prefix of the filenames to save the model. - The model architecture will be saved to prefix-symbol.json, - while the weights will be saved to prefix-0012.params, - for example, for the 12-th epoch. -* `frequency::Int`: keyword argument, default is 1. - The frequency (measured in epochs) to save checkpoints. -* `save_epoch_0::Bool`: keyword argument, default false. Whether we should save a - checkpoint for epoch 0 (model initialized but not seen any data yet). -""" -function do_checkpoint(prefix::AbstractString; - frequency::Int = 1, save_epoch_0::Bool = false) - mkpath(dirname(prefix)) - every_n_epoch(frequency, call_on_0=save_epoch_0) do model, state, metric - save_checkpoint(model, prefix, state) - end -end diff --git a/julia/src/context.jl b/julia/src/context.jl deleted file mode 100644 index aac2ab2ce233..000000000000 --- a/julia/src/context.jl +++ /dev/null @@ -1,203 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -@enum CONTEXT_TYPE CPU=1 GPU=2 CPU_PINNED=3 - -Base.convert(::Type{CONTEXT_TYPE}, x::Integer) = CONTEXT_TYPE(x) - -""" - Context(dev_type, dev_id) - -A context describes the device type and id on which computation should be carried on. -""" -struct Context - device_type::CONTEXT_TYPE - device_id::Int - - Context(dev_type::CONTEXT_TYPE, dev_id::Integer = 0) = new(dev_type, dev_id) -end - -const _default_ctx = Ref{Context}(Context(CPU, 0)) - -Context(dev_type::Integer, dev_id::Integer = 0) = - Context(convert(CONTEXT_TYPE, dev_type), dev_id) - -Base.show(io::IO, ctx::Context) = - print(io, lowercase("$(ctx.device_type)$(ctx.device_id)")) - -function _with_context(dev_type::Union{Symbol,Expr}, dev_id, e::Expr) - global _default_ctx - quote - ctx = current_context() - ctx′ = Context($(esc(dev_type)), $(esc(dev_id))) - $_default_ctx[] = ctx′ - try - return $(esc(e)) - finally - $_default_ctx[] = ctx - end - end -end - -""" - @context device_type [device_id] expr - -Change the default context in the following expression. - -# Examples -```jl-repl -julia> mx.@context mx.GPU begin - mx.zeros(2, 3) - end -2×3 NDArray{Float32,2} @ gpu0: - 0.0f0 0.0f0 0.0f0 - 0.0f0 0.0f0 0.0f0 - -julia> @context mx.GPU mx.zeros(3, 2) -3×2 NDArray{Float32,2} @ gpu0: - 0.0f0 0.0f0 - 0.0f0 0.0f0 - 0.0f0 0.0f0 -``` -""" -macro context(dev_type, e::Expr) - _with_context(dev_type, 0, e) -end - -macro context(dev_type, dev_id, e::Expr) - _with_context(dev_type, dev_id, e) -end - -for dev ∈ [:cpu, :gpu] - ctx = QuoteNode(Symbol(uppercase(string(dev)))) - docstring = """ - @$dev [device_id] expr - - A shorthand for `@context mx.GPU`. - - # Examples - ```jl-repl - julia> mx.@with_gpu mx.zeros(2, 3) - 2×3 NDArray{Float32,2} @ gpu0: - 0.0f0 0.0f0 0.0f0 - 0.0f0 0.0f0 0.0f0 - ``` - """ - @eval begin - @doc $docstring -> - macro $dev(e::Expr) - ctx = $ctx - quote - @context $ctx $(esc(e)) - end - end - - macro $dev(dev_id, e::Expr) - ctx = $ctx - quote - @context $ctx $(esc(dev_id)) $(esc(e)) - end - end - end -end # for dev ∈ [:cpu, :gpu] - -""" - cpu(dev_id) - -Get a CPU context with a specific id. `cpu()` is usually the default context for many -operations when no context is specified. - -# Arguments -* `dev_id::Integer = 0`: the CPU id. -""" -cpu(dev_id::Integer = 0) = Context(CPU, dev_id) - -""" - gpu(dev_id) - -Get a GPU context with a specific id. The K GPUs on a node is typically numbered as 0,...,K-1. - -# Arguments -* `dev_id::Integer = 0` the GPU device id. -""" -gpu(dev_id::Integer = 0) = Context(GPU, dev_id) - -""" - num_gpus() - -Query CUDA for the number of GPUs present. -""" -function num_gpus() - n = Ref{Cint}() - @mxcall :MXGetGPUCount (Ref{Cint},) n - n[] -end - -""" - empty_cache(ctx::Context = current_context()) - -Empties the memory cache for the current contexts device. -MXNet utilizes a memory pool to avoid excessive allocations. -Calling empty_cache will empty the memory pool of the contexts -device. This will only free the memory of the unreferenced data. -""" -function empty_cache(ctx::Context = current_context()) - @mxcall :MXStorageEmptyCache (Cint, Cint) ctx.device_type ctx.device_id - ctx -end - -""" - gpu_memory_info(dev_id = 0)::Tuple{UInt64,UInt64} - -Query CUDA for the free and total bytes of GPU global memory. -It returns a tuple of `(free memory, total memory)`. - -```julia-repl -julia> mx.gpu_memory_info() -(0x00000003af240000, 0x00000003f9440000) -``` -""" -function gpu_memory_info(dev_id = 0) - free = Ref{UInt64}() - n = Ref{UInt64}() - @mxcall :MXGetGPUMemoryInformation64 (Cint, Ref{UInt64}, Ref{UInt64}) dev_id free n - free[], n[] -end - -""" - current_context() - -Return the current context. - -By default, `mx.cpu()` is used for all the computations -and it can be overridden by using the `@context` macro. - -# Examples -```jl-repl -julia> mx.current_context() -cpu0 - -julia> mx.@context mx.GPU 1 begin # Context changed in the following code block - mx.current_context() - end -gpu1 - -julia> mx.current_context() -cpu0 -``` -""" -current_context() = _default_ctx[] diff --git a/julia/src/deprecated.jl b/julia/src/deprecated.jl deleted file mode 100644 index 7c49b66b14b1..000000000000 --- a/julia/src/deprecated.jl +++ /dev/null @@ -1,200 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -# NDArray reshape (#272) -@deprecate reshape(arr::NDArray; shape=()) reshape(arr, shape) -@deprecate Reshape(arr::NDArray; shape=()) reshape(arr, shape) - -# SymbolicNode reshape (#279) -@deprecate reshape(sym::SymbolicNode; shape=()) reshape(sym, shape) -@deprecate Reshape(sym::SymbolicNode; shape=()) reshape(sym, shape) - -# srand (#282) -@deprecate srand!(seed_state::Int) srand(seed_state) - -# v0.4 -@deprecate sin(x::NDArray) sin.(x) -@deprecate cos(x::NDArray) cos.(x) -@deprecate tan(x::NDArray) tan.(x) -@deprecate arcsin(x::NDArray) asin.(x) -@deprecate arccos(x::NDArray) acos.(x) -@deprecate arctan(x::NDArray) atan.(x) - -@deprecate sinh(x::NDArray) sinh.(x) -@deprecate cosh(x::NDArray) cosh.(x) -@deprecate tanh(x::NDArray) tanh.(x) -@deprecate arcsinh(x::NDArray) asinh.(x) -@deprecate arccosh(x::NDArray) acosh.(x) -@deprecate arctanh(x::NDArray) atanh.(x) - -# @deprecate make `randn` exported accidentially -# so we make the depwarn manually -function randn(μ, σ, dims::NTuple{N,Int}, ctx::Context = cpu()) where N - @warn("mx.randn(μ, σ, dims, ctx = cpu()) is deprecated, use " * - "mx.randn(dims...; μ = μ, σ = σ, context = ctx) instead.") - mx.randn(dims...; μ = μ, σ = σ, context = ctx) -end - -function randn!(μ, σ, x::NDArray) - @warn("mx.randn!(μ, σ, x::NDArray) is deprecated, use " * - "mx.randn!(x; μ = μ, σ = σ) instead.") - randn!(x; μ = μ, σ = σ) -end - -function rand!(low::Real, high::Real, x::NDArray) - @warn("rand!(low, high, x::NDArray) is deprecated, use " * - "rand!(x, low = low, high = high) instead.") - rand!(x, low = low, high = high) -end - -function rand(low::Real, high::Real, dims::NTuple{N,Int}, context::Context = cpu()) where N - @warn("rand!(low, high, dims, x::NDArray, context = cpu()) is deprecated, use " * - "rand!(dims..., x; low = low, high = high, context = cpu()) instead.") - rand(dims...; low = low, high = high, context = context) -end - -@deprecate sigmoid(x::NDArray) sigmoid.(x) -@deprecate relu(x::NDArray) relu.(x) -@deprecate softmax(x::NDArray; axis = ndims(x)) softmax.(x, axis) -@deprecate log_softmax(x::NDArray; axis = ndims(x)) log_softmax.(x, axis) - -function broadcast_plus(x::NDArray, y::NDArray) - @warn("broadcast_plus(x, y) is deprecated, use x .+ y instead.") - x .+ y -end - -function broadcast_add(x::NDArray, y::NDArray) - @warn("broadcast_add(x, y) is deprecated, use x .+ y instead.") - x .+ y -end - -function broadcast_sub(x::NDArray, y::NDArray) - @warn("broadcast_sub(x, y) is deprecated, use x .- y instead.") - x .- y -end - -function broadcast_minus(x::NDArray, y::NDArray) - @warn("broadcast_minus(x, y) is deprecated, use x .- y instead.") - x .- y -end - -function broadcast_mul(x::NDArray, y::NDArray) - @warn("broadcast_mul(x, y) is deprecated, use x .* y instead.") - x .* y -end - -function broadcast_div(x::NDArray, y::NDArray) - @warn("broadcast_div(x, y) is deprecated, use x ./ y instead.") - x ./ y -end - -function broadcast_mod(x::NDArray, y::NDArray) - @warn("broadcast_mod(x, y) is deprecated, use x .% y instead.") - x .% y -end - -function broadcast_power(x::NDArray, y::NDArray) - @warn("broadcast_power(x, y) is deprecated, use x.^y instead.") - x.^y -end - -function broadcast_equal(x::NDArray, y::NDArray) - @warn("broadcast_equal(x, y) is deprecated, use x .== y instead.") - x .== y -end - -function broadcast_not_equal(x::NDArray, y::NDArray) - @warn("broadcast_not_equal(x, y) is deprecated, use x .== y instead.") - x .!= y -end - -function broadcast_greater(x::NDArray, y::NDArray) - @warn("broadcast_greater(x, y) is deprecated, use x .== y instead.") - x .> y -end - -function broadcast_greater_equal(x::NDArray, y::NDArray) - @warn("broadcast_greater_equal(x, y) is deprecated, use x .== y instead.") - x .>= y -end - -function broadcast_lesser(x::NDArray, y::NDArray) - @warn("broadcast_lesser(x, y) is deprecated, use x .== y instead.") - x .< y -end - -function broadcast_lesser_equal(x::NDArray, y::NDArray) - @warn("broadcast_lesser_equal(x, y) is deprecated, use x .== y instead.") - x .<= y -end - -function broadcast_maximum(x::NDArray, y::NDArray) - @warn("broadcast_maximum(x, y) is deprecated, use max.(x, y) instead.") - max.(x, y) -end - -function broadcast_minimum(x::NDArray, y::NDArray) - @warn("broadcast_minimum(x, y) is deprecated, use min.(x, y) instead.") - min.(x, y) -end - -function broadcast_hypot(x::NDArray, y::NDArray) - @warn("broadcast_hypot(x, y) is deprecated, use hypot.(x, y) instead.") - hypot.(x, y) -end - -# Introduced by https://github.com/apache/incubator-mxnet/pull/12845 -import Base: sum, maximum, minimum, prod, cat -@deprecate sum(x::NDArray, dims) sum(x, dims = dims) -@deprecate maximum(x::NDArray, dims) maximum(x, dims = dims) -@deprecate minimum(x::NDArray, dims) minimum(x, dims = dims) -@deprecate prod(x::NDArray, dims) prod(x, dims = dims) -@deprecate cat(dims, As::NDArray{T}...) where T cat(As..., dims = dims) - -import Statistics: mean -@deprecate mean(x::NDArray, dims) mean(x, dims = dims) - -# replaced by UndefInitializer -function empty(::Type{T}, dims::NTuple{N,Int}, ctx::Context = cpu()) where {N,T<:DType} - @warn("`mx.empty(T, dims, ctx)` is deprecated, " * - "use `NDArray{T,N}(undef, dims; ctx = ctx)` instead.") - NDArray{T,N}(undef, dims; ctx = ctx) -end - -function empty(::Type{T}, dims::Int...) where {T<:DType} - @warn("`mx.empty(T, dims...)` is deprecated, " * - "use `NDArray{T,N}(undef, dims...)` instead.") - NDArray{T,N}(undef, dims...) -end - -function empty(dims::NTuple{N,Int}, ctx::Context = cpu()) where N - @warn("`mx.empty(dims, ctx)` is deprecated, " * - "use `NDArray(undef, dims; ctx = ctx)` instead.") - NDArray(undef, dims; ctx = ctx) -end - -function empty(dims::Int...) - @warn("`mx.empty(dims...)` is deprecated, " * - "use `NDArray(undef, dims...)` instead.") - NDArray(undef, dims...) -end - -# replaced by Base.clamp -@deprecate clip(x::NDArray, lo::Real, hi::Real) clamp(x, lo, hi) -@deprecate clip!(x::NDArray, lo::Real, hi::Real) clamp!(x, lo, hi) -@deprecate clip(x; a_min = 0, a_max = 0) clamp(x, a_min, a_max) - diff --git a/julia/src/exceptions.jl b/julia/src/exceptions.jl deleted file mode 100644 index 9faa2f8c189e..000000000000 --- a/julia/src/exceptions.jl +++ /dev/null @@ -1,31 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - - -""" -Exception thrown when an error occurred calling MXNet API. -""" -abstract type AbstractMXError <: Exception end - -"General MXNet API error" -struct MXError <: AbstractMXError - msg::String - - MXError(s::AbstractString) = new(string(s)) -end - -Base.show(io::IO, e::AbstractMXError) = print(io, e.msg) diff --git a/julia/src/executor.jl b/julia/src/executor.jl deleted file mode 100644 index 7f6c2bb5aa58..000000000000 --- a/julia/src/executor.jl +++ /dev/null @@ -1,254 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -import Base: bind - -""" - Executor - -An executor is a realization of a symbolic architecture defined by a `SymbolicNode`. -The actual forward and backward computation specified by the network architecture can -be carried out with an executor. -""" -mutable struct Executor - handle :: MX_ExecutorHandle - symbol :: SymbolicNode - arg_arrays :: VecOfNDArray - grad_arrays :: Vector{Union{Cvoid,<:NDArray}} - aux_arrays :: VecOfNDArray - outputs :: VecOfNDArray - arg_dict :: Dict{Symbol} - aux_dict :: Dict{Symbol} -end - -function Executor(hdl::MX_ExecutorHandle, sym::SymbolicNode, - arg_arrays::VecOfNDArray, grad_arrays::AbstractVector, - aux_arrays::VecOfNDArray) - # get output arrays - ref_size = Ref{MX_uint}(0) - ref_hdls = Ref{Ptr{MX_handle}}(C_NULL) - @mxcall(:MXExecutorOutputs, (MX_handle, Ref{MX_uint}, Ref{Ptr{MX_handle}}), - hdl, ref_size, ref_hdls) - out_hdrs = unsafe_wrap(Array, ref_hdls[], ref_size[]) - out_arrays = [NDArray(MX_NDArrayHandle(x)) for x in out_hdrs] - - arg_names = list_arguments(sym) - @assert(length(arg_names) == length(unique(arg_names)), "Duplicated names in arguments: $arg_names") - arg_dict = Dict(zip(arg_names, arg_arrays)) - - aux_names = list_auxiliary_states(sym) - @assert(length(aux_names) == length(unique(aux_names)), "Duplicated names in auxiliary states: $aux_names") - aux_dict = Dict(zip(aux_names, aux_arrays)) - - Executor(hdl, sym, arg_arrays, grad_arrays, aux_arrays, out_arrays, arg_dict, aux_dict) -end - -Base.unsafe_convert(::Type{MX_handle}, obj::Executor) = - Base.unsafe_convert(MX_handle, obj.handle) -Base.convert(t::Type{MX_handle}, obj::Executor) = Base.unsafe_convert(t, obj) -Base.cconvert(t::Type{MX_handle}, obj::Executor) = Base.unsafe_convert(t, obj) - -function _get_ndarray_inputs(arg_key::AbstractString, args::VecOfNDArray, - arg_names::Vector{Symbol}, allow_missing::Bool) - @assert(length(args) == length(arg_names), "Length of $arg_key does not match number of arguments") - return (MX_handle[args...], args) -end - -function _get_ndarray_inputs(arg_key::AbstractString, args::Dict{Symbol}, - arg_names::Vector{Symbol}, allow_missing::Bool) - args_vec = map(arg_names) do name - arr = get(args, name, nothing) - if !allow_missing - @assert(!isa(arr, Cvoid), "Must specify all arguments in $arg_key ($name is missing)") - end - arr - end - # help the type inference - if allow_missing - args_vec = Union{NDArray,Cvoid}[args_vec...] - else - args_vec = NDArray[args_vec...] - end - args_hdr = MX_handle[(isa(x,Cvoid) ? MX_handle(0) : x) for x in args_vec] - return (args_hdr, args_vec) -end - -""" - bind(sym, ctx, args; args_grad=Dict(), aux_states=Dict(), grad_req=GRAD_WRITE) - -Create an `Executor` by binding a `SymbolicNode` to concrete `NDArray`. - -# Arguments -* `sym::SymbolicNode`: the network architecture describing the computation graph. -* `ctx::Context`: the context on which the computation should run. -* `args`: either a list of `NDArray` or a dictionary of name-array pairs. Concrete - arrays for all the inputs in the network architecture. The inputs typically include - network parameters (weights, bias, filters, etc.), data and labels. - See [`list_arguments`](@ref) and [`infer_shape`](@ref). -* `args_grad`: a `Vector` of `NDArray` or a `Dict` contains `NDArray` -* `aux_states`: a `Vector` of `NDArray` or a `Dict` contains `NDArray` -* `grad_req`: single value, a `Vector` of `GRAD_REQ` or a `Dict{Symbol,GRAD_REQ}` -""" -function bind(self::SymbolicNode, ctx::Context, args; - args_grad = Dict{Symbol,NDArray}(), - aux_states = Dict{Symbol,NDArray}(), - grad_req = GRAD_WRITE) - - arg_names = list_arguments(self) - - args_hdr, args = _get_ndarray_inputs("args", args, arg_names, false) - args_grad_hdr, args_grad = _get_ndarray_inputs("args_grad", args_grad, arg_names, true) - aux_args_hdr, aux_states = _get_ndarray_inputs("aux_states", aux_states, list_auxiliary_states(self), false) - - if isa(grad_req, GRAD_REQ) - reqs = MX_uint[MX_uint(grad_req) for i=1:length(args)] - elseif isa(grad_req, Vector{GRAD_REQ}) - @assert(length(grad_req) == length(args)) - reqs = MX_uint[MX_uint.(grad_req)...] - elseif isa(grad_req, Dict{Symbol, GRAD_REQ}) - reqs = MX_uint[MX_uint(get(grad_req, name, GRAD_NOP)) for name in arg_names] - end - - ref_hdr = Ref{MX_handle}(0) - @mxcall(:MXExecutorBind, - (MX_handle, Cint, Cint, MX_uint, Ptr{MX_handle}, Ptr{MX_handle}, Ptr{MX_uint}, - MX_uint, Ptr{MX_handle}, Ref{MX_handle}), - self, ctx.device_type, ctx.device_id, length(args), args_hdr, - args_grad_hdr, reqs, length(aux_states), aux_args_hdr, ref_hdr) - args_grad = convert(Vector{Union{Cvoid,NDArray}}, args_grad) - executor = Executor(MX_ExecutorHandle(ref_hdr[]), self, - args, args_grad, aux_states) -end - -function bind(x::SymbolicNode; context::Context = cpu(), kwargs...) - kwargs = Dict(kwargs) - @assert(haskey(kwargs, :args), "Must specify args") - args = pop!(kwargs, :args) - bind(x, context, args; kwargs...) -end - -function simple_bind(self::SymbolicNode, ctx::Context; - grad_req::Union{GRAD_REQ,Dict{Symbol,GRAD_REQ}} = GRAD_WRITE, - kwargs...) - arg_shapes, out_shapes, aux_shapes = infer_shape(self; kwargs...) - @assert(!isa(arg_shapes, Cvoid), "Information not enough to perform complete shape inference") - - arg_arrays = NDArray[zeros(shape, ctx) for shape in arg_shapes] - arg_names = list_arguments(self) - - grad_arrays = Dict{Symbol,NDArray}() - - if grad_req != GRAD_NOP - shapes = zip(arg_names, arg_shapes) - - # if not in provided data, should be parameters - provided_data_names = [x[1] for x in kwargs] - shapes = filter(x -> !in(x[1], provided_data_names), shapes) - - # Remove all gradients for nop params - # if isa(grad_req, Dict{Symbol, GRAD_REQ}) - # shapes = filter(x -> grad_req[x[1]] != GRAD_NOP,shapes) - # end - - for (name, shape) in shapes - grad_arrays[name] = zeros(shape, ctx) - end - end - - aux_arrays = NDArray[zeros(shape, ctx) for shape in aux_shapes] - return bind(self, ctx, arg_arrays, args_grad=grad_arrays, grad_req=grad_req, aux_states=aux_arrays) -end - - -function forward(self::Executor; is_train::Bool = false, kwargs...) - for (k,v) in kwargs - @assert(k ∈ keys(self.arg_dict), "Unknown argument $k") - @assert(isa(v, NDArray), "Keyword argument $k must be an NDArray") - copy!(self.arg_dict[k], v) - end - - @mxcall(:MXExecutorForward, (MX_handle, Cint), self, is_train) - - self.outputs -end - -backward(x::Executor) = backward(x, NDArray[]) -backward(x::Executor, out_grad::NDArray) = backward(x, [out_grad]) -backward(x::Executor, out_grads::VecOfNDArray) = - @mxcall(:MXExecutorBackward, (MX_handle, MX_uint, Ptr{MX_handle}), - x, length(out_grads), MX_handle[out_grads...]) - -function copy_params_from(self::Executor, arg_params::Dict{Symbol}, - aux_params::Dict{Symbol} = Dict{Symbol,Any}(); - allow_extra_params::Bool = false) - for (name, array) in arg_params - if haskey(self.arg_dict, name) - copy!(self.arg_dict[name], array) - else - @assert(allow_extra_params, "Extra params $name not in the arguments") - end - end - - for (name, array) in aux_params - if haskey(self.aux_dict, name) - copy!(self.aux_dict[name], array) - else - @assert(allow_extra_params, "Extra auxiliary state $name not recognized") - end - end -end - - -Base.show(io::IO, x::Executor) = - print(io, "mx.", split(string(typeof(x)), '.')[end], " ", x.handle.value) - -""" - print([io::IO], x::Executor) - -Get a debug string about internal execution plan. - -Can be used to get an estimated about the memory cost. - -```julia -julia> x = mx.Variable(:x) -MXNet.mx.SymbolicNode x - -julia> exec = mx.bind(x + 1, mx.cpu(), Dict(:x => mx.ones(2,3))) -mx.Executor Ptr{Nothing} @0x000055c3dee9eb30 - -julia> print(exec) -Symbol Outputs: - output[0]=_plus_scalar0(0) -Variable:x --------------------- -Op:_plus_scalar, Name=_plus_scalar0 -Inputs: - arg[0]=x(0) version=0 -Attrs: - scalar=1.00000000e+00 -Total 0 MB allocated -Total 11 TempSpace resource requested -``` -""" -Base.print(io::IO, x::Executor) = print(io, debug_str(x)) -Base.print(x::Executor) = print(stdout, x) - -function debug_str(x::Executor) - s_ref = Ref{Cstring}(C_NULL) - @mxcall(:MXExecutorPrint, (MX_handle, Ptr{Cstring}), x.handle, s_ref) - unsafe_string(s_ref[]) -end diff --git a/julia/src/initializer.jl b/julia/src/initializer.jl deleted file mode 100644 index d4b35c0ce8bc..000000000000 --- a/julia/src/initializer.jl +++ /dev/null @@ -1,197 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -""" - AbstractInitializer - -The abstract base class for all initializers. - -To define a new initializer, it is -enough to derive a new type, and implement one or more of the following methods: - - _init_weight(self :: AbstractInitializer, name :: Base.Symbol, array :: NDArray) - _init_bias(self :: AbstractInitializer, name :: Base.Symbol, array :: NDArray) - _init_gamma(self :: AbstractInitializer, name :: Base.Symbol, array :: NDArray) - _init_beta(self :: AbstractInitializer, name :: Base.Symbol, array :: NDArray) - -Or, if full behavior customization is needed, override the following function - - init(self :: AbstractInitializer, name :: Base.Symbol, array :: NDArray) -""" -abstract type AbstractInitializer end - -function init(self :: T, name :: Base.Symbol, array :: NDArray) where T<:AbstractInitializer - strname = string(name) - if startswith(strname,"upsampling") - _init_bilinear(self,name, array) - elseif startswith(strname,"stn_loc") && endswith(strname,"weight") - _init_zero(self,name, array) - elseif startswith(strname,"stn_loc") && endswith(strname,"bias") - _init_loc_bias(self,name, array) - elseif endswith(strname, "bias") - _init_bias(self, name, array) - elseif endswith(strname, "gamma") - _init_gamma(self, name, array) - elseif endswith(strname, "beta") - _init_beta(self, name, array) - elseif endswith(strname, "weight") - _init_weight(self, name, array) - elseif endswith(strname, "moving_mean") - _init_zero(self, name, array) - elseif endswith(strname, "moving_var") - _init_zero(self, name, array) - else - _init_default(self, name, array) - end -end - -function _init_loc_bias(self :: AbstractInitializer, name :: Base.Symbol, array :: NDArray) - assert(size(array) == (6,)) - array[:]= [1.0, 0, 0, 0, 1.0, 0] -end - -function _init_bilinear(self :: AbstractInitializer, name :: Base.Symbol, array :: NDArray) - @assert ndims(array) == 4 - - W, H, C, N = size(array) # Inverse of NCHW layout - filter = Base.zeros(eltype(array), W, H) - - @assert H == W - - f = ceil(Int, W / 2) # factor - c = (2 * f - 1 - f % 2) / (2 * f) # center - for x in 0:(W-1) - for y in 0:(H-1) - filter[x+1, y+1] = (1 - abs(x / f - c)) * (1 - abs(y / f - c)) - end - end - - @nd_as_jl rw=array begin - for i in 1:N - for j in 1:C - array[:,:, j, i] = filter - end - end - end -end - -function _init_bias(self :: AbstractInitializer, name :: Base.Symbol, array :: NDArray) - array[:] = 0 -end -function _init_gamma(self :: AbstractInitializer, name :: Base.Symbol, array :: NDArray) - array[:] = 1 -end -function _init_beta(self :: AbstractInitializer, name :: Base.Symbol, array :: NDArray) - array[:] = 0 -end -function _init_zero(self :: AbstractInitializer, name :: Base.Symbol, array :: NDArray) - array[:] = 0 -end - -function _init_default(self :: AbstractInitializer, name :: Base.Symbol, array :: NDArray) - error("Do not know how to init $name") -end - -""" - UniformInitializer - -Initialize weights according to a uniform distribution within the provided scale. -""" -struct UniformInitializer <: AbstractInitializer - scale :: AbstractFloat -end -""" - UniformInitializer(scale=0.07) - -Construct a `UniformInitializer` with the specified scale. -""" -UniformInitializer() = UniformInitializer(0.07) - -_init_weight(i::UniformInitializer, name::Symbol, x::NDArray) = - rand!(x, low = -i.scale, high = i.scale) - -""" - NormalInitializer - -Initialize weights according to a univariate Gaussian distribution. -""" -struct NormalInitializer <: AbstractInitializer - μ :: AbstractFloat - σ :: AbstractFloat -end -""" - NormalInitializer(; mu=0, sigma=0.01) - -Construct a `NormalInitializer` with mean `mu` and variance `sigma`. -""" -NormalInitializer(; mu=0, sigma=0.01) = NormalInitializer(mu, sigma) - -_init_weight(i::NormalInitializer, name::Symbol, x::NDArray) = - randn!(x, μ = i.μ, σ = i.σ) - -@enum XavierDistribution xv_uniform xv_normal -@enum XavierRegularization xv_avg xv_in xv_out - - -""" - XavierInitializer - -The initializer documented in the paper [Bengio and Glorot 2010]: *Understanding -the difficulty of training deep feedforward neuralnetworks*. - -There are several different version of the XavierInitializer used in the wild. -The general idea is that the variance of the initialization distribution is controlled -by the dimensionality of the input and output. As a distribution one can either choose -a normal distribution with μ = 0 and σ² or a uniform distribution from -σ to σ. - -Several different ways of calculating the variance are given in the literature or are -used by various libraries. - -* [Bengio and Glorot 2010]: `mx.XavierInitializer(distribution = mx.xv_uniform, regularization = mx.xv_avg, magnitude = 1)` -* [K. He, X. Zhang, S. Ren, and J. Sun 2015]: `mx.XavierInitializer(distribution = mx.xv_gaussian, regularization = mx.xv_in, magnitude = 2)` -* caffe_avg: `mx.XavierInitializer(distribution = mx.xv_uniform, regularization = mx.xv_avg, magnitude = 3)` -""" -struct XavierInitializer <: AbstractInitializer - distribution :: XavierDistribution - regularization :: XavierRegularization - magnitude :: Float64 -end - -XavierInitializer(; distribution = xv_uniform, regularization = xv_avg, magnitude = 3.0) = - XavierInitializer(distribution, regularization, magnitude) - -function _init_weight(self :: XavierInitializer, name :: Base.Symbol, array :: NDArray) - dims = size(array) - fan_in = prod(dims[2:end]) - fan_out = dims[1] - - if self.regularization == xv_avg - factor = (fan_in + fan_out) / 2 - elseif self.regularization == xv_in - factor = fan_in - elseif self.regularization == xv_out - factor = fan_out - end - - σ = √(self.magnitude / factor) - - if self.distribution == xv_uniform - rand!(array, low = -σ, high = σ) - elseif self.distribution == xv_normal - randn!(array; μ = 0.0, σ = σ) - end -end diff --git a/julia/src/io.jl b/julia/src/io.jl deleted file mode 100644 index 6309f7ecd3f9..000000000000 --- a/julia/src/io.jl +++ /dev/null @@ -1,644 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -""" - AbstractDataProvider - -The root type for all data provider. A data provider should implement the following interfaces: - -* [`get_batch_size`](@ref) -* [`provide_data`](@ref) -* [`provide_label`](@ref) - -As well as the Julia iterator interface (see -[the Julia manual](https://docs.julialang.org/en/v1/manual/interfaces/#man-interface-iteration-1)). -Normally this involves defining: - -* `Base.eltype(provider) -> AbstractDataBatch` -* `Base.iterate(provider[, state]) -> (AbstractDataBatch, AbstractDataProvider)` -""" -abstract type AbstractDataProvider end - -""" - get_batch_size(provider) -> Int - -# Arguments: -* `provider::AbstractDataProvider`: the data provider. - -Returns the mini-batch size of the provided data. All the provided data should have the same mini-batch size (i.e. the last dimension). -""" -get_batch_size - -""" - provide_data(provider) -> Vector{Tuple{Base.Symbol, Tuple}} - -# Arguments: -* `provider::AbstractDataProvider`: the data provider. - -Returns a vector of (name, shape) pairs describing the names of the data it provides, and the corresponding shapes. - -""" -provide_data - -""" - provide_label(provider) -> Vector{Tuple{Base.Symbol, Tuple}} - -# Arguments: -* `provider::AbstractDataProvider`: the data provider. - -Returns a vector of (name, shape) pairs describing the names of the labels it provides, and the corresponding shapes. -""" -provide_label - -""" - AbstractDataProviderState - - Base type for data provider states. -""" -abstract type AbstractDataProviderState end - -""" - AbstractDataBatch - - Base type for a data mini-batch. It should implement the following interfaces: - -* [`count_samples`](@ref) -* [`get_data`](@ref) -* [`get_label`](@ref) - -The following utility functions will be automatically defined: - -* [`get`](@ref) -* [`load_data!`](@ref) -* [`load_label!`](@ref) -""" -abstract type AbstractDataBatch end - -""" - count_samples(provider, batch) -> Int - -# Arguments: -* `batch::AbstractDataBatch`: the data batch object. - -Returns the number of samples in this batch. This number should be greater than 0, but less than or equal to the batch size. This is used to indicate at the end of the data set, there might not be enough samples for a whole mini-batch. - -""" -count_samples - -""" - get_data(provider, batch) -> Vector{NDArray} - -# Arguments: -* `provider::AbstractDataProvider`: the data provider. -* `batch::AbstractDataBatch`: the data batch object. - -Returns a vector of data in this batch, should be in the same order as declared in `provide_data() `. - -The last dimension of each `NDArray` should always match the batch_size, even when `count_samples` returns a value less than the batch size. In this case, the data provider is free to pad the remaining contents with any value. -""" -get_data - -""" - get_label(provider, batch) -> Vector{NDArray} - -# Arguments: -* `provider::AbstractDataProvider`: the data provider. -* `batch::AbstractDataBatch`: the data batch object. - -Returns a vector of labels in this batch. Similar to [`get_data`](@ref). -""" -get_label - -""" - DataBatch - -A basic subclass of `AbstractDataBatch`, that implement the interface by -accessing member fields. -""" -mutable struct DataBatch{T,S,N,M} <: AbstractDataBatch - data :: Vector{NDArray{T,N}} - label :: Vector{NDArray{S,M}} - count :: Int -end - -count_samples(batch::DataBatch) = batch.count - -get_data(::Provider, batch::DataBatch) where {Provider<:AbstractDataProvider} = - batch.data - -get_label(::Provider, batch::DataBatch) where {Provider<:AbstractDataProvider} = - batch.label - -""" - SlicedNDArray - -A alias type of `Tuple{UnitRange{Int},NDArray}`. -""" -const SlicedNDArray = Tuple{UnitRange{Int},<:NDArray} - -function _load_general!(provider :: AbstractDataProvider, batch :: AbstractDataBatch, - targets :: Vector{<:Vector{<:SlicedNDArray}}, loader::Function) - data = loader(provider, batch) - for (d_src, d_targets) in zip(data, targets) - for (slice_idx, d_dst) in d_targets - copy!(d_dst, slice(d_src, slice_idx)) - end - end -end - -""" - load_data!(provider, batch, targets) - -# Arguments: -* `provider::AbstractDataProvider`: the data provider. -* `batch::AbstractDataBatch`: the data batch object. -* `targets::Vector{Vector{SlicedNDArray}}`: the targets to load data into. - -The targets is a list of the same length as number of data provided by this provider. -Each element in the list is a list of `SlicedNDArray`. This list described a -spliting scheme of this data batch into different slices, each slice is specified by -a slice-ndarray pair, where *slice* specify the range of samples in the mini-batch -that should be loaded into the corresponding *ndarray*. - -This utility function is used in data parallelization, where a mini-batch is splited -and computed on several different devices. -""" -function load_data!(provider :: AbstractDataProvider, batch :: AbstractDataBatch, - targets :: Vector{<:Vector{<:SlicedNDArray}}) - _load_general!(provider, batch, targets, get_data) -end - -""" - load_label!(provider, batch, targets) - -* `provider::AbstractDataProvider provider`: the data provider. -* `batch::AbstractDataBatch batch`: the data batch object. -* `targets::Vector{Vector{SlicedNDArray}}`: the targets to load label into. - -The same as [`load_data!`](@ref), except that this is for loading labels. -""" -function load_label!(provider :: AbstractDataProvider, batch :: AbstractDataBatch, - targets :: Vector{<:Vector{<:SlicedNDArray}}) - _load_general!(provider, batch, targets, get_label) -end - -function load_data!(provider :: AbstractDataProvider, batch :: AbstractDataBatch, - targets :: Vector{<:NDArray}) - for (src, dst) in zip(get_data(provider, batch), targets) - copy!(dst, src) - end -end -function load_label!(provider :: AbstractDataProvider, batch :: AbstractDataBatch, - targets :: Vector{<:NDArray}) - for (src, dst) in zip(get_label(provider, batch), targets) - copy!(dst, src) - end -end - -import Base.get -""" - get(provider, batch, name) -> NDArray - -* `provider::AbstractDataProvider`: the data provider. -* `batch::AbstractDataBatch`: the data batch object. -* `name::Symbol`: the name of the data to get, should be one of the names - provided in either `provide_data() ` - or `provide_label() `. - -Returns the corresponding data array corresponding to that name. -""" -function get(provider::AbstractDataProvider, batch::AbstractDataBatch, name::Symbol) - for (idx, (k, s)) in enumerate(provide_data(provider)) - if name == k - return get_data(provider, batch)[idx] - end - end - for (idx, (k, s)) in enumerate(provide_label(provider)) - if name == k - return get_label(provider, batch)[idx] - end - end - error("$name is not provided by this data provider") -end - -""" - eachbatch(provider::AbstractDataProvider) - -Allows you to perform operations on data every epoch. This is especially useful -when you need to perform real-time augmentation of the data. - -# Arguments: -* `provider`: an instance of the custom DataProvider type. You must return this -instance after modifying its fields. - -""" -eachbatch(provider::AbstractDataProvider) = provider - -""" - ArrayDataProvider - -A convenient tool to iterate `NDArray` or Julia `Array`. - - ArrayDataProvider(data[, label]; batch_size, shuffle, data_padding, label_padding) - -Construct a data provider from `NDArray` or Julia Arrays. - -# Arguments: -* `data`: the data, could be - * a `NDArray`, or a Julia Array. This is equivalent to `:data => data`. - * a name-data pair, like `:mydata => array`, where `:mydata` is the name of the data - * and `array` is an `NDArray` or a Julia Array. - * a list of name-data pairs. - -* `label`: the same as the `data` parameter. When this argument is omitted, the constructed provider will provide no labels. -* `batch_size::Int`: the batch size, default is 0, which means treating the whole array as a single mini-batch. -* `shuffle::Bool`: turn on if the data should be shuffled at every epoch. -* `data_padding::Real`: when the mini-batch goes beyond the dataset boundary, there might - be less samples to include than a mini-batch. This value specify a scalar to pad the - contents of all the missing data points. -* `label_padding::Real`: the same as `data_padding`, except for the labels. - -TODO: remove `data_padding` and `label_padding`, and implement rollover that copies -the last or first several training samples to feed the padding. -""" -mutable struct ArrayDataProvider{T,N} <: AbstractDataProvider - data_arrays :: Vector{Array{T,N}} - data_names :: Vector{Symbol} - label_arrays - label_names :: Vector{Symbol} - batch_size :: Int - sample_count :: Int - shuffle :: Bool - data_padding :: MX_float - label_padding :: MX_float - - data_batch - label_batch -end - -# Julia's type system is sometimes very frustrating. You cannot specify a function -# with argument Vector{Pair} to expect to be matched when calling with the parameter -# [:foo => zeros(2,3), :bar => zeros(3)] because the type inference gives very specific -# results, about the parametric type in the Pair{T1,T2} type, thus does not match the -# generic Pair type. In general, Int <: Number but Vector{Int} <: Vector{Number} is not -# true. So let us just use Any here... -function ArrayDataProvider(data; batch_size::Int = 0, shuffle::Bool = false, - data_padding::Real = 0, label_padding::Real = 0) - ArrayDataProvider(data, [], batch_size = batch_size, shuffle = shuffle, - data_padding = data_padding, label_padding = label_padding) -end - -function ArrayDataProvider(data, label; batch_size::Int = 0, shuffle::Bool = false, - data_padding::Real = 0, label_padding::Real = 0) - asarr(arr :: Array{T}) where {T} = convert(Array{MX_float}, arr) - asarr(arr :: NDArray) = copy(arr) - - if isa(data, Union{NDArray, Array}) && eltype(data) <: Real - data_names = [:data] - data_arrays = Array{MX_float}[asarr(data)] - elseif isa(data, Pair) - @assert isa(data.first, Base.Symbol) && isa(data.second, Union{NDArray, Array}) - data_names = [data.first] - data_arrays = Array{MX_float}[asarr(data.second)] - elseif isa(data, Vector) || isa(data, Tuple) - map(data) do d - @assert isa(d, Pair) && isa(d.first, Base.Symbol) && isa(d.second, Union{NDArray, Array}) - end - data_names = Base.Symbol[d.first for d in data] - data_arrays = Array{MX_float}[asarr(d.second) for d in data] - else - error("Invalid data argument type") - end - - if isa(label, Union{NDArray, Array}) && eltype(label) <: Real - label_names = [:softmax_label] - label_arrays = Array{MX_float}[asarr(label)] - elseif isa(label, Pair) - @assert isa(label.first, Base.Symbol) && isa(label.second, Union{NDArray, Array}) - label_names = [label.first] - label_arrays = Array{MX_float}[asarr(label.second)] - elseif isa(label, Vector) || isa(label, Tuple) - map(label) do d - @assert isa(d, Pair) && isa(d.first, Base.Symbol) && isa(d.second, Union{NDArray, Array}) - end - label_names = Base.Symbol[d.first for d in label] - label_arrays = Array{MX_float}[asarr(d.second) for d in label] - else - error("Invalid label argument type") - end - - @assert length(data_arrays) > 0 - sample_count = size(data_arrays[1])[end] - for i = 1:length(data_names) - @assert(size(data_arrays[i])[end] == sample_count, - "Number of samples in $(data_names[i]) is mismatch with $(data_names[1])") - end - for i = 1:length(label_names) - @assert(size(label_arrays[i])[end] == sample_count, - "Number of samples in $(label_names[i]) is mismatch with $(data_names[1])") - end - - if batch_size == 0 - batch_size = sample_count - end - @assert 0 < batch_size <= sample_count - - function gen_batch_nds(arrs :: Vector{Array{MX_float}}, bsize :: Int) - map(arrs) do arr - shape = size(arr) - NDArray(undef, shape[1:end-1]..., bsize) - end - end - - data_batch = gen_batch_nds(data_arrays, batch_size) - label_batch = gen_batch_nds(label_arrays, batch_size) - - # reshape data and labels into 2D tensors, so that it is easier to work with them - data_arrays = map(data_arrays) do arr - reshape(arr, prod(size(arr)[1:end-1]), size(arr)[end]) - end - label_arrays = map(label_arrays) do arr - reshape(arr, prod(size(arr)[1:end-1]), size(arr)[end]) - end - - ArrayDataProvider(data_arrays, data_names, label_arrays, label_names, batch_size, - sample_count, shuffle, MX_float(data_padding), MX_float(label_padding), - data_batch, label_batch) -end - -provide_data(provider::ArrayDataProvider) = - collect(zip(provider.data_names, map(size, provider.data_batch))) - -provide_label(provider::ArrayDataProvider) = - collect(zip(provider.label_names, map(size, provider.label_batch))) - -get_batch_size(provider::ArrayDataProvider) = provider.batch_size - -struct ArrayDataProviderState <: AbstractDataProviderState - curr_idx :: Int -end - -Base.eltype(provider :: ArrayDataProvider) = ArrayDataProviderState - -struct ArrayDataBatch <: AbstractDataBatch - idx :: UnitRange{Int} -end - -function _start(provider::ArrayDataProvider) - if provider.shuffle - # re-shuffle all data - idx_perm = randperm(provider.sample_count) - provider.data_arrays = map(x->x[:,idx_perm], provider.data_arrays) - provider.label_arrays = map(x->x[:,idx_perm], provider.label_arrays) - end - - return ArrayDataProviderState(1) -end - -function Base.iterate(provider::ArrayDataProvider, - state::ArrayDataProviderState = _start(provider)) - (state.curr_idx > provider.sample_count) && return nothing - idx = state.curr_idx:Base.min(state.curr_idx+provider.batch_size-1, provider.sample_count) - return (ArrayDataBatch(idx), ArrayDataProviderState(idx.stop+1)) -end - -function count_samples(provider :: ArrayDataProvider, batch :: ArrayDataBatch) - return length(batch.idx) -end - -function get_data(provider :: ArrayDataProvider, batch :: ArrayDataBatch) - for (src, dst) in zip(provider.data_arrays, provider.data_batch) - copy_ignore_shape!(dst[1:length(batch.idx)], src[:, batch.idx]) - if length(batch.idx) < provider.batch_size - dst[length(batch.idx)+1:provider.batch_size] = provider.data_padding - end - end - return provider.data_batch -end -function get_label(provider :: ArrayDataProvider, batch :: ArrayDataBatch) - for (src, dst) in zip(provider.label_arrays, provider.label_batch) - copy_ignore_shape!(dst[1:length(batch.idx)], src[:, batch.idx]) - if length(batch.idx) < provider.batch_size - dst[length(batch.idx)+1:provider.batch_size] = provider.label_padding - end - end - return provider.label_batch -end - - -""" - MXDataProvider - -A data provider that wrap built-in data iterators from libmxnet. See below for -a list of built-in data iterators. -""" -mutable struct MXDataProvider <: AbstractDataProvider - handle :: MX_DataIterHandle - data_shape :: Vector{Tuple{Symbol,Tuple}} - label_shape:: Vector{Tuple{Symbol,Tuple}} - batch_size :: Int - - # those two a auxiliary variables to help avoid calling reset - # but still pre-fetch first batch to get shape information - first_epoch:: Bool - first_batch:: Bool -end - -function _reset_data_iter(handle :: MX_DataIterHandle) - @mxcall(:MXDataIterBeforeFirst, (MX_handle,), handle) -end -function _iter_next(handle :: MX_DataIterHandle) - ref_ret = Ref{Cint}(0) - @mxcall(:MXDataIterNext, (MX_handle, Ref{Cint}), handle, ref_ret) - return Bool(ref_ret[]) -end -function _get_data(handle :: MX_DataIterHandle) - ref_hdr = Ref{MX_handle}(0) - @mxcall(:MXDataIterGetData, (MX_handle, Ref{MX_handle}), handle, ref_hdr) - return NDArray(MX_NDArrayHandle(ref_hdr[]), false) -end -function _get_label(handle :: MX_DataIterHandle) - ref_hdr = Ref{MX_handle}(0) - @mxcall(:MXDataIterGetLabel, (MX_handle, Ref{MX_handle}), handle, ref_hdr) - return NDArray(MX_NDArrayHandle(ref_hdr[]), false) -end - -function MXDataProvider(handle :: MX_DataIterHandle; - data_name :: Symbol = :data, - label_name :: Union{Symbol,Nothing} = :softmax_label, - kwargs...) # for convenience, we ignore the rest keyword arguments - # init iterator, load the first batch and get shapes - @assert(_iter_next(handle), "Failed to load the first batch in MXDataProvider") - data_shape = Tuple{Base.Symbol, Tuple}[(data_name, size(_get_data(handle)))] - if !isa(label_name, Nothing) - label_shape = Tuple{Base.Symbol, Tuple}[(label_name::Base.Symbol, size(_get_label(handle)))] - else - label_shape = Tuple{Base.Symbol, Tuple}[] - end - - MXDataProvider(handle, data_shape, label_shape, data_shape[1][2][end], true, true) -end - -provide_data(provider::MXDataProvider) = provider.data_shape -provide_label(provider::MXDataProvider) = provider.label_shape -get_batch_size(provider::MXDataProvider) = provider.batch_size - -mutable struct MXDataProviderState <: AbstractDataProviderState - has_next :: Bool -end -struct MXDataBatch <: AbstractDataBatch -end - -Base.eltype(::MXDataProvider) = MXDataBatch - -function _start(provider::MXDataProvider) - if !provider.first_epoch - _reset_data_iter(provider.handle) - else - provider.first_epoch = false - end - - return MXDataProviderState(true) -end - -function _done(provider::MXDataProvider, state::MXDataProviderState) - if provider.first_batch - state.has_next = true - provider.first_batch = false - else - state.has_next = _iter_next(provider.handle) - end - return !state.has_next -end - -function Base.iterate(provider::MXDataProvider, state::MXDataProviderState = _start(provider)) - _done(provider, state) && return nothing - MXDataBatch(), state -end - -function get_data(provider :: MXDataProvider, batch :: MXDataBatch) - return NDArray[_get_data(provider.handle)] -end -function get_label(provider :: MXDataProvider, batch :: MXDataBatch) - return NDArray[_get_label(provider.handle)] -end -function count_samples(provider :: MXDataProvider, batch :: MXDataBatch) - ref_pad = Ref{Cint}(0) - @mxcall(:MXDataIterGetPadNum, (MX_handle, Ref{Cint}), provider.handle, ref_pad) - return provider.batch_size - Int(ref_pad[]) -end - -function _get_iter_creators() - n_ref = Ref{MX_uint}(0) - h_ref = Ref{Ptr{MX_handle}}(0) - @mxcall(:MXListDataIters, (Ref{MX_uint}, Ref{Ptr{MX_handle}}), n_ref, h_ref) - - return unsafe_wrap(Array, h_ref[], n_ref[]) -end - -function _get_iter_name(hdr :: MX_handle) - ref_name = Ref{char_p}(0) - ref_desc = Ref{char_p}(0) - ref_narg = Ref{MX_uint}(0) - ref_arg_names = Ref{char_pp}(0) - ref_arg_types = Ref{char_pp}(0) - ref_arg_descs = Ref{char_pp}(0) - - @mxcall(:MXDataIterGetIterInfo, - (MX_handle, Ref{char_p}, Ref{char_p}, Ref{MX_uint}, Ref{char_pp}, Ref{char_pp}, Ref{char_pp}), - hdr, ref_name, ref_desc, ref_narg, ref_arg_names, ref_arg_types, ref_arg_descs) - - return Symbol(unsafe_string(ref_name[])) -end - -const _iter_creator_cache = Dict{Symbol,MX_handle}() -function _populate_iter_creator_cache!() - empty!(_iter_creator_cache) - h_creators = _get_iter_creators() - for handle in h_creators - name = _get_iter_name(handle) - _iter_creator_cache[name] = handle - end -end - -_get_iter_creator(name::Symbol) = _iter_creator_cache[name] - -function _define_data_iter_creator(hdr :: MX_handle) - ref_name = Ref{char_p}(0) - ref_desc = Ref{char_p}(0) - ref_narg = Ref{MX_uint}(0) - ref_arg_names = Ref{char_pp}(0) - ref_arg_types = Ref{char_pp}(0) - ref_arg_descs = Ref{char_pp}(0) - - @mxcall(:MXDataIterGetIterInfo, - (MX_handle, Ref{char_p}, Ref{char_p}, Ref{MX_uint}, Ref{char_pp}, Ref{char_pp}, Ref{char_pp}), - hdr, ref_name, ref_desc, ref_narg, ref_arg_names, ref_arg_types, ref_arg_descs) - - iter_name = Symbol(unsafe_string(ref_name[])) - - isprovider = endswith(string(iter_name), "Iter") - signature = _format_signature(Int(ref_narg[]), ref_arg_names) - f_desc = " " * string(iter_name) * "(" *signature * ")\n\n" - if isprovider - f_desc *= "Can also be called with the alias `$(string(iter_name)[1:end-4] * "Provider")`.\n" - end - f_desc *= unsafe_string(ref_desc[]) * "\n\n" - f_desc *= "# Arguments:\n" - f_desc *= "* `data_name::Symbol`: keyword argument, default `:data`. The name of the data.\n" - f_desc *= "* `label_name::Symbol`: keyword argument, default `:softmax_label`. " * - "The name of the label. Could be `nothing` if no label is presented in this dataset.\n\n" - f_desc *= _format_docstring(Int(ref_narg[]), ref_arg_names, ref_arg_types, ref_arg_descs) * "\n" - f_desc *= "Returns the constructed `MXDataProvider`." - - if isprovider - alias_name = Symbol(string(iter_name)[1:end-4] * "Provider") - else - alias_name = nothing - end - - defun = quote - @doc $f_desc - function $iter_name(; kwargs...) - arg_keys = String[string(k) for (k,v) in kwargs] - arg_vals = String[dump_mx_param(v) for (k,v) in kwargs] - ref_hdr = Ref{MX_handle}(0) - - local hdr = _get_iter_creator($(QuoteNode(iter_name))) - @mxcall(:MXDataIterCreateIter, (MX_handle, MX_uint, char_pp, char_pp, Ref{MX_handle}), - hdr, length(arg_keys), arg_keys, arg_vals, ref_hdr) - - return MXDataProvider(MX_DataIterHandle(ref_hdr[]); kwargs...) - end - $(isprovider ? :(const $alias_name = $iter_name) : :()) - - end - defun -end - -macro _import_io_iterators() - creators = _get_iter_creators() - defs = Expr[] - for handle in creators - push!(defs, _define_data_iter_creator(handle)) - end - esc(quote - $(defs...) - end) -end - -@_import_io_iterators() diff --git a/julia/src/kvstore.jl b/julia/src/kvstore.jl deleted file mode 100644 index 1fb6df20d27d..000000000000 --- a/julia/src/kvstore.jl +++ /dev/null @@ -1,353 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -import Base.push! - -""" - KVStore(kv_type = :local) - -For single machine training, there are two commonly used types: - -- `local`: Copies all gradients to CPU memory and updates weights there. - -- `device`: Aggregates gradients and updates weights on GPU(s). - With this setting, the `KVStore` also attempts to use GPU peer-to-peer - communication, potentially accelerating the communication. - -For distributed training, `KVStore` also supports a number of types: - -- `dist_sync`: Behaves similarly to `local` but with one major difference. - With `dist_sync`, batch-size now means the batch size used on each machine. - So if there are `n` machines and we use batch size ``b``, - then `dist_sync` behaves like `local` with batch size `n * b`. - -- `dist_device_sync`: Identical to `dist_sync` with the difference similar - to `device` vs `local`. - -- `dist_async`: Performs asynchronous updates. - The weights are updated whenever gradients are received from any machine. - No two updates happen on the same weight at the same time. - However, the order is not guaranteed. -""" -mutable struct KVStore - handle :: MX_KVStoreHandle - updater_c :: Ptr{Cvoid} - updater :: Function - - KVStore(hdr::MX_KVStoreHandle) = new(hdr, Ptr{Cvoid}(0)) -end - -function KVStore(kv_type::Symbol = :local) - @assert kv_type ∈ (:local, :device, :dist_sync, :dist_device_sync, :dist_async) - ref_hdr = Ref{MX_handle}(0) - @mxcall(:MXKVStoreCreate, (char_p, Ref{MX_handle}), dump_mx_param(kv_type), ref_hdr) - KVStore(MX_KVStoreHandle(ref_hdr[])) -end - -Base.unsafe_convert(::Type{MX_handle}, obj::KVStore) = - Base.unsafe_convert(MX_handle, obj.handle) -Base.convert(t::Type{MX_handle}, obj::KVStore) = Base.unsafe_convert(t, obj) -Base.cconvert(t::Type{MX_handle}, obj::KVStore) = Base.unsafe_convert(t, obj) - -Base.show(io::IO, kv::KVStore) = - print(io, "mx.KVStore @ $(get_type(kv))") - -function _flatten_kvlist(keys::Vector{Int}, vals::Vector{<:Vector{<:NDArray}}) - @assert length(keys) == length(vals) - keys_flt = Int[] - vals_flt = NDArray[] - for (k,v) in zip(keys, vals) - append!(keys_flt, Base.ones(Int, length(v))*k) - append!(vals_flt, v) - end - return (keys_flt, vals_flt) -end - -""" - init!(kv::KVStore, key::Int, val::NDArray) - init!(kv::KVStore, keys, vals) - -Initializes a single or a sequence of key-value pairs into the store. - -For each key, one must `init!` it before calling `push!` or `pull!`. -When multiple workers invoke `init!` for the same key, only -the value supplied by worker with rank `0` is used. This function returns -after data has been initialized successfully. - -```jldoctest -julia> kv = KVStore(:local) -mx.KVStore @ local - -julia> init!(kv, 42, mx.rand(2, 3)) -``` -""" -init!(kv::KVStore, key::Int, val::NDArray) = init!(kv, [key], [val]) -init!(kv::KVStore, key::Int, vals::Vector{<:NDArray}) = - init!(kv, Base.ones(Int, length(vals)) * key, vals) -init!(kv::KVStore, keys::Vector{Int}, vals::Vector{<:Vector{<:NDArray}}) = - init!(kv, _flatten_kvlist(keys, vals)...) - -function init!(kv::KVStore, keys::Vector{Int}, vals::VecOfNDArray) - @assert length(keys) == length(vals) - keys = Cint[keys...] - vals = MX_handle[vals...] - @mxcall(:MXKVStoreInit, (MX_handle, MX_uint, Ptr{Cint}, Ptr{MX_handle}), - kv, length(keys), keys, vals) -end - -""" - push!(kv::KVStore, key, val; priority = 0) - push!(kv::KVStore, key, vals; priority = 0) - push!(kv::KVStore, keys, vals; priority = 0) - -Pushes a single or a sequence of key-value pairs into the store. - -This function returns immediately after adding an operator to the engine. -The actual operation is executed asynchronously. If there are consecutive -pushes to the same key, there is no guarantee on the serialization of pushes. -The execution of a push does not guarantee that all previous pushes are -finished. There is no synchronization between workers by default. -One can use ``barrier()`` to sync all workers. - -`push!` and `pull!` single `NDArray`: -```jldoctest -julia> kv = KVStore(:local) -mx.KVStore @ local - -julia> x = NDArray(undef, 2, 3); - -julia> init!(kv, 3, x) - -julia> push!(kv, 3, mx.ones(2, 3) * 8) - -julia> pull!(kv, 3, x) - -julia> x -2×3 mx.NDArray{Float32,2} @ CPU0: - 8.0 8.0 8.0 - 8.0 8.0 8.0 -``` - -Aggregate values and `push!`: -```jldoctest -julia> vals = [mx.ones((2, 3), gpu(0)) * 3, mx.ones((2, 3), gpu(1)) * 4]; - -julia> push!(kv, 3, vals) - -julia> pull!(kv, 3, x) - -julia> x -2×3 mx.NDArray{Float32,2} @ CPU0: - 7.0 7.0 7.0 - 7.0 7.0 7.0 -``` - -`push!` a list of key to single device: - -```jldoctest -julia> keys = [4, 5]; - -julia> init!(kv, keys, [NDArray(undef, 2, 3), NDArray(undef, 2, 3)]) - -julia> push!(kv, keys, [x, x]) - -julia> y, z = NDArray(undef, 2, 3), NDArray(undef, 2, 3); - -julia> pull!(kv, keys, [y, z]) -``` -""" -push!(kv::KVStore, key::Int, val::NDArray; priority::Int = 0) = - push!(kv, [key], [val]; priority = priority) -push!(kv::KVStore, key::Int, vals::Vector{<:NDArray}; priority::Int = 0) = - push!(kv, Base.ones(Int, length(vals)) * key, vals; priority = priority) -push!(kv:: KVStore, keys::Vector{Int}, vals::Vector{<:Vector{<:NDArray}}; - priority::Int = 0) = - push!(kv, _flatten_kvlist(keys, vals)...; priority = priority) - -function push!(kv::KVStore, keys::Vector{Int}, vals::Vector{<:NDArray}; priority::Int = 0) - @assert length(keys) == length(vals) - keys = Cint[keys...] - vals = MX_handle[vals...] - @mxcall(:MXKVStorePush, (MX_handle, MX_uint, Ptr{Cint}, Ptr{MX_handle}, Cint), - kv, length(keys), keys, vals, priority) -end - -""" Pulls a single value or a sequence of values from the store. - -This function returns immediately after adding an operator to the engine. -Subsequent attempts to read from the `out` variable will be blocked until the -pull operation completes. - -`pull` is executed asynchronously after all previous `pull` calls and only -the last `push` call for the same input key(s) are finished. - -The returned values are guaranteed to be the latest values in the store. - -See [`pull!`](@ref) for more examples. -""" -pull!(kv::KVStore, key::Int, out::NDArray; priority::Int = 0) = - pull!(kv, [key], [out], priority = priority) -pull!(kv::KVStore, key::Int, outs::Vector{<:NDArray}; priority::Int = 0) = - pull!(kv, Base.ones(Int, length(outs))*key, outs; priority = priority) -pull!(kv::KVStore, keys::Vector{Int}, outs::Vector{<:Vector{<:NDArray}}; - priority::Int = 0) = - pull!(kv, _flatten_kvlist(keys, outs)...; priority = priority) - -function pull!(kv::KVStore, keys::Vector{Int}, outs::Vector{<:NDArray}; priority::Int = 0) - @assert length(keys) == length(outs) - keys = Cint[keys...] - outs = MX_handle[outs...] - @mxcall(:MXKVStorePull, (MX_handle, MX_uint, Ptr{Cint}, Ptr{MX_handle}, Cint), - kv, length(keys), keys, outs, priority) -end - - -function get_type(kv::KVStore) - type_ref = Ref{char_p}(0) - @mxcall(:MXKVStoreGetType, (MX_handle, Ref{char_p}), kv, type_ref) - return Symbol(unsafe_string(type_ref[])) -end - -function get_num_workers(kv::KVStore) - ref_size = Ref{Cint}(0) - @mxcall(:MXKVStoreGetGroupSize, (MX_handle, Ref{Cint}), kv, ref_size) - return Int(ref_size[]) -end - -function get_rank(kv::KVStore) - ref_rank = Ref{Cint}(0) - @mxcall(:MXKVStoreGetRank, (MX_handle, Ref{Cint}), kv, ref_rank) - return Int(ref_rank[]) -end - -""" - barrier(kv::KVStore) - -Invokes global barrier among all worker nodes. - -For example, assume there are `n` machines. We would like machine `0` to first -`init` the values and then have all the workers `pull` the initialized value. -Before pulling, we can place invoke `barrier(kv)` to guarantee that the -initialization is finished. -""" -barrier(kv::KVStore) = @mxcall(:MXKVStoreBarrier, (MX_handle,), kv) - - -# TODO: Currently Julia does not support closure in c-callbacks, so we are making use of the -# extra handle parameter of the API to pass the updater object around. Fix this when someday -# full closure cfunction is supported in Julia. -function _kvstore_update_wrapper(key::Cint, nd_recv::MX_handle, nd_local::MX_handle, - updater::Ptr{Cvoid}) - updater_func = unsafe_pointer_to_objref(updater) - updater_func(Int(key), NDArray(MX_NDArrayHandle(nd_recv)), - NDArray(MX_NDArrayHandle(nd_local))) - nothing -end - -""" - setupdater!(kv, updater) - -Sets a `push!` updater into the store. - -This function only changes the local store. -When running on multiple machines one must use `set_optimizer`. - -```jldoctest -julia> update(key, val, orig) = mx.@inplace orig += val .* .2 -update (generic function with 1 method) - -julia> kv = KVStore(:local) -mx.KVStore @ local - -julia> mx.setupdater!(kv, update) - -julia> init!(kv, 42, mx.ones(2, 3)) - -julia> push!(kv, 42, mx.ones(2, 3)) - -julia> x = NDArray(undef, 2, 3); - -julia> pull!(kv, 42, x) - -julia> x -2×3 mx.NDArray{Float32,2} @ CPU0: - 1.2 1.2 1.2 - 1.2 1.2 1.2 -``` -""" -function setupdater!(kv::KVStore, updater) - kv.updater = updater # keep a reference to the julia object so that updater_c is kept valid - kv.updater_c = @cfunction(_kvstore_update_wrapper, Cvoid, - (Cint,MX_handle,MX_handle,Ptr{Cvoid})) - @mxcall(:MXKVStoreSetUpdater, (MX_handle, Ptr{Cvoid}, Any), - kv, kv.updater_c, updater) -end - -""" - setoptimizer!(kv::KVStore, opt) - -Registers an optimizer with the kvstore. - -When using a single machine, this function updates the local optimizer. -If using multiple machines and this operation is invoked from a worker node, -it will serialized the optimizer with pickle and send it to all servers. -The function returns after all servers have been updated. - -```jldoctest -julia> kv = KVStore() -mx.KVStore @ local - -julia> W = mx.zeros(2, 3) # 2×3 weight matrix -2×3 mx.NDArray{Float32,2} @ CPU0: - 0.0 0.0 0.0 - 0.0 0.0 0.0 - -julia> init!(kv, 42, W) - -julia> setoptimizer!(kv, SGD(η = .2)) # SGD with .2 as learning rate - -julia> ∇W = mx.ones(2, 3) # assume it's the gradient -2×3 mx.NDArray{Float32,2} @ CPU0: - 1.0 1.0 1.0 - 1.0 1.0 1.0 - -julia> push!(kv, 42, ∇W) - -julia> pull!(kv, 42, W) # fetch weight and write back to `W` - -julia> W -2×3 mx.NDArray{Float32,2} @ CPU0: - -0.2 -0.2 -0.2 - -0.2 -0.2 -0.2 -``` -""" -function setoptimizer!(kv::KVStore, opt::AbstractOptimizer) - if occursin(r"dist", string(get_type(kv))) && _isworker() - # TODO - error("not implemented") - else - setupdater!(kv, getupdater(opt)) - end -end - -function _isworker()::Bool - ref = Ref{Cint}(0) - @mxcall(:MXKVStoreIsWorkerNode, (Ref{Cint},), ref) - ref_is_worker[] -end - -# TODO: sparse support? diff --git a/julia/src/metric.jl b/julia/src/metric.jl deleted file mode 100644 index 2ae7fc85144b..000000000000 --- a/julia/src/metric.jl +++ /dev/null @@ -1,478 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -""" - AbstractEvalMetric - -The base class for all evaluation metrics. The sub-types should implement the following -interfaces: - -* [`update!`](@ref) -* [`reset!`](@ref) -* [`get`](@ref) -""" -abstract type AbstractEvalMetric end - -""" - hasNDArraySupport(metric) -> Val{true/false} - -Trait for `_update_single_output` should return `Val{true}() if metric can handle `NDArray` -directly and `Val{false}()` if requires `Array`. Metric that work with NDArrays can be -async, while native Julia arrays require that we copy the output of the network, which is -a blocking operation. -""" -hasNDArraySupport(::AbstractEvalMetric) = Val{true}() - -""" - update!(metric, labels, preds) - -Update and accumulate metrics. - -# Arguments: -* `metric::AbstractEvalMetric`: the metric object. -* `labels::Vector{NDArray}`: the labels from the data provider. -* `preds::Vector{NDArray}`: the outputs (predictions) of the network. -""" -update!(metric::T, labels::VecOfNDArray, preds::VecOfNDArray) where T<:AbstractEvalMetric = - _update!(metric, labels, preds, hasNDArraySupport(metric)) - -function _update!(metric::T, labels::VecOfNDArray, preds::VecOfNDArray, - ::Val{true}) where T<:AbstractEvalMetric - if length(labels) != length(preds) - @warn( - "The number of labels ($(length(labels))) does not correspond to the " * - "number of outputs ($(length(preds))). The calculated metric might not be accuracte.", - maxlog = 1) - end - for (label, pred) in zip(labels, preds) - _update_single_output(metric, label, pred) - end -end - -function _update!(metric::T, labels::VecOfNDArray, preds::VecOfNDArray, - ::Val{false}) where {T<:AbstractEvalMetric} - if length(labels) != length(preds) - @warn( - "The number of labels ($(length(labels))) does not correspond to the " * - "number of outputs ($(length(preds))). The calculated metric might not be accuracte.", - maxlog = 1) - end - for (label, pred) in zip(labels, preds) - @nd_as_jl ro=(label, pred) begin - # This is a dynamic dispatch since the conversion from NDArray to - # Array is not type-stable. - _update_single_output(metric, label, pred) - end - end -end - -""" - reset!(metric) - -Reset the accumulation counter. -""" -reset!(metric::AbstractEvalMetric) = throw(MethodError(reset!, (typeof(metric),))) - - -import Base: get -""" - get(metric) - -Get the accumulated metrics. - -Returns `Vector{Tuple{Base.Symbol, Real}}`, a list of name-value pairs. -For example, `[(:accuracy, 0.9)]`. -""" -get(metric::AbstractEvalMetric) = throw(MethodError(get, (typeof(metric),))) - -""" - NullMetric() - -A metric that calculates nothing. Can be used to ignore an output during training. -""" -mutable struct NullMetric <: mx.AbstractEvalMetric -end - -update!(metric::NullMetric, labels::VecOfNDArray, preds::VecOfNDArray) = nothing - -reset!(metric::NullMetric) = nothing - -get(metric::NullMetric) = Tuple{Symbol, Float64}[] - -""" - MultiMetric(metrics::Vector{AbstractEvalMetric}) - -Combine multiple metrics in one and get a result for all of them. - -# Usage -To calculate both mean-squared error [`Accuracy`](@ref) and log-loss [`ACE`](@ref): -```julia - mx.fit(..., eval_metric = mx.MultiMetric([mx.Accuracy(), mx.ACE()])) -``` -""" -mutable struct MultiMetric <: AbstractEvalMetric - metrics :: Vector{mx.AbstractEvalMetric} -end - -function update!(metric :: MultiMetric, labels :: Vector{<:NDArray}, preds :: Vector{<:NDArray}) - for m in metric.metrics - update!(m, labels, preds) - end - nothing -end - -function reset!(metric :: MultiMetric) - map(reset!, metric.metrics) - nothing -end - -get(metric::MultiMetric) = mapreduce(get, append!, metric.metrics) - -""" - SeqMetric(metrics::Vector{AbstractEvalMetric}) - -Apply a different metric to each output. This is especially useful for `mx.Group`. - -# Usage -Calculate accuracy [`Accuracy`](@ref) for the first output -and log-loss [`ACE`](@ref) for the second output: -```julia - mx.fit(..., eval_metric = mx.SeqMetric([mx.Accuracy(), mx.ACE()])) -``` -""" -mutable struct SeqMetric <: AbstractEvalMetric - metrics :: Vector{AbstractEvalMetric} -end - -function update!(metric::SeqMetric, labels::VecOfNDArray, preds::VecOfNDArray) - @assert length(metric.metrics) == length(labels) - @assert length(metric.metrics) == length(preds) - for (m, l, p) in zip(metric.metrics, labels, preds) - update!(m, [l], [p]) - end - nothing -end - -function reset!(metric::SeqMetric) - map(reset!, metric.metrics) - nothing -end - -get(metric::SeqMetric) = mapreduce(get, append!, metric.metrics) - -""" - Accuracy - -Multiclass classification accuracy. - -Calculates the mean accuracy per sample for softmax in one dimension. -For a multi-dimensional softmax the mean accuracy over all dimensions is calculated. -""" -mutable struct Accuracy <: AbstractEvalMetric - acc_sum :: Float64 - n_sample :: Int - - Accuracy() = new(0.0, 0) -end - -hasNDArraySupport(::Accuracy) = Val{false}() - -function _update_single_output(metric::Accuracy, label::Array, pred::Array) - # Samples are stored in the last dimension - @assert size(label, ndims(label)) == size(pred, ndims(pred)) - - if ndims(pred) == 4 # Multidimensional case - # Reshape label to be of the same shape as pred. - # Except for the third dimension where the predictions are stored. - labels = reshape(label, size(pred, 1, 2)..., 1, size(pred, 4)) - - for sample in 1:size(labels, 4) - for j in 1:size(labels, 2) - for i in 1:size(labels, 1) - label = labels[i, j, 1, sample] - klasses = view(pred, i, j, :, sample) - klass = argmax(klasses) - 1 # Classes start at 0...k-1 - - metric.acc_sum += klass == label - metric.n_sample += 1 - end - end - end - elseif ndims(pred) == 2 # 1-dimensional case - for sample in 1:size(label, 1) - klass = argmax(view(pred, :, sample)) - 1 - metric.acc_sum += klass == label[sample] - metric.n_sample += 1 - end - else - error("Can't handle prediction with dimensions $(ndims(pred)).") - end -end - -get(metric::Accuracy) = [(:accuracy, metric.acc_sum / metric.n_sample)] - -function reset!(metric :: Accuracy) - metric.acc_sum = 0.0 - metric.n_sample = 0 -end - -""" - MSE - -Mean Squared Error. - -Calculates the mean squared error regression loss. -Requires that label and prediction have the same shape. -""" -mutable struct MSE{N} <: AbstractEvalMetric - mse_sum :: Vector{NDArray{MX_float,N}} - n_sample :: Int - - MSE{N}() where {N} = new(Vector{NDArray{MX_float,N}}(), 0) -end - -MSE() = MSE{1}() # backward compat? - -hasNDArraySupport(::MSE) = Val{true}() - -function _update_single_output(metric::MSE, label::NDArray{T,N}, - pred::NDArray{T,N}) where {T,N} - @assert size(label) == size(pred) - metric.n_sample += length(label) - mse_sum = mx.sum((label .- pred).^2) - push!(metric.mse_sum, mse_sum) - nothing -end - -function get(metric::MSE) - # Delay copy until last possible moment - mse_sum = mapreduce(nda->copy(nda)[1], +, metric.mse_sum; init = zero(MX_float)) - [(:MSE, mse_sum / metric.n_sample)] -end - -function reset!(metric::MSE{N}) where N - metric.mse_sum = Vector{NDArray{Float32,N}}() - metric.n_sample = 0 -end - -@doc doc""" - NMSE - -Normalized Mean Squared Error - -```math -\sum_i (\frac{label_i - pred_i}{label_i})^2 -``` - -Note that there are various ways to do the *normalization*. -It depends on your own context. Please judge the problem setting you have -first. If the current implementation do not suitable for you, -feel free to file it on GitHub. - -Let me show you a use case of this kind of normalization: - -Bob is training a network for option pricing. The option pricing problem is -a regression problem (pirce predicting). There are lots of option contracts -on same target stock but different strike price. -For example, there is a stock `S`; it's market price is 1000. -And, there are two call option contracts with different strike price. -Assume Bob obtains the outcome as following table: - -``` -+--------+----------------+----------------+--------------+ -| | Strike Price | Market Price | Pred Price | -+--------+----------------+----------------+--------------+ -| Op 1 | 1500 | 100 | 80 | -+--------+----------------+----------------+--------------+ -| Op 2 | 500 | 10 | 8 | -+--------+----------------+----------------+--------------+ -``` - -Now, obviously, Bob will calculate the normalized MSE as: - -```math - (\frac{100 - 80}{100})^2 - \text{ vs } - (\frac{10 - 8}{10}) ^2 -``` - -Both of the pred prices got the same degree of error. - -For more discussion about normalized MSE, please see -[#211](https://github.com/dmlc/MXNet.jl/pull/211) also. - -""" -mutable struct NMSE <: AbstractEvalMetric - nmse_sum :: Float64 - n_sample :: Int - - NMSE() = new(0.0, 0) -end - -hasNDArraySupport(::NMSE) = Val{false}() - -function _update_single_output(metric::NMSE, label::Array, pred::Array) - n_sample = size(pred)[end] - metric.n_sample += n_sample - - for i = 1:n_sample - if label[i] == 0.0f0 # in case of batch padding - continue - end - - metric.nmse_sum += ((label[i] - pred[i]) / label[i])^2 - end -end - -get(metric::NMSE) = [(:NMSE, metric.nmse_sum / metric.n_sample)] - -function reset!(metric::NMSE) - metric.nmse_sum = 0.0 - metric.n_sample = 0 -end - -""" - ACE - -Calculates the averaged cross-entropy (logloss) for classification. - -# Arguments: -* `eps::Float64`: Prevents returning `Inf` if `p = 0`. -""" -mutable struct ACE <: AbstractEvalMetric - ace_sum :: Float64 - n_sample :: Int - eps :: Float64 - - ACE(eps=1.0e-8) = new(0.0, 0, eps) -end - -get(metric::ACE) = [(:ACE, - metric.ace_sum / metric.n_sample)] - -function reset!(metric::ACE) - metric.ace_sum = 0.0 - metric.n_sample = 0 -end - -hasNDArraySupport(::ACE) = Val{false}() - -function _update_single_output(metric :: ACE, label :: Array{T}, pred :: Array{T}) where T - eps = convert(T, metric.eps) - # Samples are stored in the last dimension - @assert size(label, ndims(label)) == size(pred, ndims(pred)) - if size(label) == size(pred) # simply calculate the cross entropy of the probabilities - for (q, p) in zip(pred, label) - # p == true probability - # q == "unnatural" probability - metric.ace_sum += p * log(q + eps) - metric.n_sample += 1 - end - elseif ndims(pred) == 4 - labels = reshape(label, size(pred, 1, 2)..., 1, size(pred, 4)) - for sample in 1:size(labels, 4) - for j in 1:size(labels, 2) - for i in 1:size(labels, 1) - # Cross-entropy reduces to -(ln(p_1)*0 + ln(p_2)*1) for classification - # Since we can only target labels right now this is the only thing we can do. - target = Int(labels[i, j, 1, sample]) + 1 # klasses are 0...k-1 => julia indexing - p_k = pred[i, j, target, sample] - metric.ace_sum += log(p_k + eps) - metric.n_sample += 1 - end - end - end - elseif ndims(pred) == 2 # 1-dimensional case - for sample in 1:size(label, 1) - target = Int(label[sample]) + 1 # 0-based indexing => 1-based indexing - p_k = pred[target, sample] - metric.ace_sum += log(p_k +eps) - metric.n_sample += 1 - end - else - error("Can't handle prediction with dimensions $(ndims(pred)).") - end -end - -""" - MultiACE - -Calculates the averaged cross-entropy per class and overall (see [`ACE`](@ref)). -This can be used to quantify the influence of different classes on the overall loss. -""" -mutable struct MultiACE <: AbstractEvalMetric - aces :: Vector{Float64} - counts :: Vector{Int} - eps :: Float64 - - MultiACE(nclasses, eps=1.0e-8) = new(Base.zeros(nclasses), Base.zeros(Int, nclasses), eps) -end - -function get(metric :: MultiACE) - aces = [(Symbol("ACE_$(i-0)"), - metric.aces[i] / metric.counts[i]) for i in 1:length(metric.aces)] - push!(aces, (:ACE, - Base.sum(metric.aces) / Base.sum(metric.counts))) - return aces -end - -function reset!(metric :: MultiACE) - metric.aces = Base.zero(metric.aces) - metric.counts = Base.zero(metric.counts) -end - -hasNDArraySupport(::MultiACE) = Val{false}() - -function _update_single_output(metric :: MultiACE, label :: Array{T}, pred :: Array{T}) where T - eps = convert(T, metric.eps) - # Samples are stored in the last dimension - @assert size(label, ndims(label)) == size(pred, ndims(pred)) - @assert size(metric.aces) == size(metric.counts) - if size(label) == size(pred) # simply calculate the cross entropy of the probabilities - for k in 1:length(metric.aces) - kpred = view(pred, ntuple(d->:, ndims(pred) - 2)..., k, :) - klabel = view(label, ntuple(d->:, ndims(label) - 2)..., k, :) - for (q, p) in zip(kpred, klabel) - # p == true probability - # q == "unnatural" probability - metric.aces[k] += p * log(q + eps) - metric.counts[k] += 1 - end - end - elseif ndims(pred) == 4 - labels = reshape(label, size(pred, 1, 2)..., 1, size(pred, 4)) - for sample in 1:size(labels, 4) - for j in 1:size(labels, 2) - for i in 1:size(labels, 1) - # Cross-entropy reduces to -(ln(p_1)*0 + ln(p_2)*1) for classification - # Since we can only target labels right now this is the only thing we can do. - target = Int(labels[i, j, 1, sample]) + 1 # klasses are 0...k-1 => julia indexing - p_k = pred[i, j, target, sample] - - metric.aces[target] += log(p_k + eps) - metric.counts[target] += 1 - end - end - end - elseif ndims(pred) == 2 - for sample in 1:size(label, 1) - target = Int(label[sample]) + 1 - p_k = pred[target, sample] - metric.aces[target] += log(p_k + eps) - metric.counts[target] += 1 - end - else - error("Can't handle prediction with dimensions $(ndims(pred)).") - end -end diff --git a/julia/src/model.jl b/julia/src/model.jl deleted file mode 100644 index 0324edd1cdc6..000000000000 --- a/julia/src/model.jl +++ /dev/null @@ -1,673 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -""" - AbstractModel - -The abstract super type of all models in MXNet.jl. -""" -abstract type AbstractModel end - -""" - FeedForward - -The feedforward model provides convenient interface to train and predict on -feedforward architectures like multi-layer MLP, ConvNets, etc. There is no -explicitly handling of *time index*, but it is relatively easy to implement -unrolled RNN / LSTM under this framework (*TODO*: add example). For models -that handles sequential data explicitly, please use *TODO*... -""" -mutable struct FeedForward <: AbstractModel - arch :: SymbolicNode - ctx :: Vector{Context} - - arg_params :: Dict{Symbol} - aux_params :: Dict{Symbol} - - pred_exec :: Union{Executor,Cvoid} - - # leave the rest fields undefined - FeedForward(arch::SymbolicNode, ctx::Vector{Context}) = new(arch, ctx) - FeedForward(arch::SymbolicNode, ctx::Context) = new(arch, [ctx]) -end - -""" -Get a split of `batch_size` into `n_split` pieces for data parallelization. Returns a vector -of length `n_split`, with each entry a `UnitRange{Int}` indicating the slice index for that -piece. -""" -function _split_inputs(batch_size::Int, n_split::Int) - @assert(batch_size >= n_split) - per_split = floor(Int, batch_size / n_split) - counts = Base.zeros(Int, n_split) .+ per_split - extra = batch_size - Base.sum(counts) - counts[1:extra] .+= 1 - - cum = [0, cumsum(counts)...] - idx = [cum[i-1]+1:cum[i] for i = 2:length(cum)] - return idx -end - -""" - FeedForward(arch :: SymbolicNode, ctx) - -# Arguments: -* `arch`: the architecture of the network constructed using the symbolic API. -* `ctx`: the devices on which this model should do computation. It could be a single `Context` - or a list of `Context` objects. In the latter case, data parallelization will be used - for training. If no context is provided, the default context `cpu()` will be used. -""" -FeedForward(arch::SymbolicNode; context::Union{Context,Vector{Context}} = [cpu()]) = - FeedForward(arch, context) - -""" - init_model(self, initializer; overwrite=false, input_shapes...) - -Initialize the weights in the model. - -This method will be called automatically when training a model. So there is usually no -need to call this method unless one needs to inspect a model with only randomly initialized -weights. - -# Arguments: -* `self::FeedForward`: the model to be initialized. -* `initializer::AbstractInitializer`: an initializer describing how the weights should be initialized. -* `overwrite::Bool`: keyword argument, force initialization even when weights already exists. -* `input_shapes`: the shape of all data and label inputs to this model, given as keyword arguments. - For example, `data=(28,28,1,100), label=(100,)`. -""" -function init_model(self::FeedForward, initializer::AbstractInitializer; overwrite::Bool=false, input_shapes...) - # all arg names, including data, label, and parameters - arg_names = list_arguments(self.arch) - - input_names = [x[1] for x in input_shapes] - - param_names = setdiff(arg_names, input_names) - aux_names = list_auxiliary_states(self.arch) - - arg_shapes, out_shapes, aux_shapes = infer_shape(self.arch; input_shapes...) - - # If target dict is not yet defined set a temporary one - if !isdefined(self, :arg_params) - self.arg_params = Dict{Symbol, NDArray}() - end - if !isdefined(self, :aux_params) - self.aux_params = Dict{Symbol, NDArray}() - end - - arg_params = Dict{Symbol,NDArray}() - aux_params = Dict{Symbol,NDArray}() - - for (name, shape) in filter(x -> in(x[1],param_names), zip(arg_names, arg_shapes)) - if haskey(self.arg_params, name) - if shape == size(self.arg_params[name]) - arg_params[name] = self.arg_params[name] - continue - else - @warn("Shape mismatch for $name. Overwriting with new one.") - delete!(self.arg_params, name) - end - end - arg_params[name] = NDArray(undef, shape) - end - - for (name, shape) in zip(aux_names, aux_shapes) - if haskey(self.aux_params, name) - if shape == size(self.aux_params[name]) - aux_params[name] = self.aux_params[name] - continue - else - @warn("Shape mismatch for $name. Overwriting with new one.") - delete!(self.aux_params, name) - end - end - aux_params[name] = NDArray(undef, shape) - end - - for (k,v) in arg_params - if overwrite || !haskey(self.arg_params, k) - init(initializer, k, v) - end - end - for (k,v) in aux_params - if overwrite || !haskey(self.aux_params, k) - init(initializer, k, v) - end - end - - self.arg_params = arg_params - self.aux_params = aux_params - - return (arg_names, param_names, aux_names) -end - -function _setup_predictor(self::FeedForward, overwrite::Bool=false; verbosity::Integer = 1, data_shapes...) - if !isdefined(self, :pred_exec) || isa(self.pred_exec, Cvoid) || overwrite - if !isdefined(self, :arg_params) || !isdefined(self, :aux_params) - @assert(false, "Model weights not defined, please init or train the model, or load from file") - end - - # the predictor use only the first device - self.pred_exec = simple_bind(self.arch, self.ctx[1]; grad_req=GRAD_NOP, data_shapes...) - dbg_str = mx.debug_str(self.pred_exec) - verbosity >= 1 && @info(string("TempSpace: ", split(dbg_str, ['\n'])[end-2]..., " on ", self.ctx[1])) - copy_params_from(self.pred_exec, self.arg_params, self.aux_params) - else - # make sure the new setup is compatible with the existing one - for (d_name, d_shape) in data_shapes - @assert(d_shape == size(self.pred_exec.arg_dict[d_name]), - "Shape of $d_name mismatch with existing predictor, use overwrite=true overwrite existing predictor") - end - end -end - -""" - predict(self, data; overwrite=false, callback=nothing) - -Predict using an existing model. The model should be already initialized, or trained or loaded from -a checkpoint. There is an overloaded function that allows to pass the callback as the first argument, -so it is possible to do - -```julia -predict(model, data) do batch_output - # consume or write batch_output to file -end -``` - -# Arguments: -* `self::FeedForward`: the model. -* `data::AbstractDataProvider`: the data to perform prediction on. -* `overwrite::Bool`: an `Executor` is initialized the first time predict is called. The memory - allocation of the `Executor` depends on the mini-batch size of the test - data provider. If you call predict twice with data provider of the same batch-size, - then the executor can be potentially be re-used. So, if `overwrite` is false, - we will try to re-use, and raise an error if batch-size changed. If `overwrite` - is true (the default), a new `Executor` will be created to replace the old one. -* `verbosity::Integer`: Determines the verbosity of the print messages. Higher numbers - leads to more verbose printing. Acceptable values are - - `0`: Do not print anything during prediction - - `1`: Print allocation information during prediction - -!!! note - Prediction is computationally much less costly than training, so the bottleneck sometimes becomes the IO - for copying mini-batches of data. Since there is no concern about convergence in prediction, it is better - to set the mini-batch size as large as possible (limited by your device memory) if prediction speed is a - concern. - - For the same reason, currently prediction will only use the first device even if multiple devices are - provided to construct the model. - -!!! note - If you perform further after prediction. The weights are not automatically synchronized if `overwrite` - is set to false and the old predictor is re-used. In this case - setting `overwrite` to true (the default) will re-initialize the predictor the next time you call - predict and synchronize the weights again. - -See also [`train`](@ref), [`fit`](@ref), [`init_model`](@ref), and [`load_checkpoint`](@ref) -""" -function predict(callback::Function, self::FeedForward, data::AbstractDataProvider; - overwrite::Bool = true, verbosity::Integer = 1) - predict(self, data; overwrite = overwrite, callback=callback, verbosity = verbosity) -end -function predict(self::FeedForward, data::AbstractDataProvider; - overwrite::Bool = true, callback::Union{Function,Cvoid}=nothing, verbosity::Integer = 1) - data_shapes = provide_data(data) - data_names = [x[1] for x in data_shapes] - _setup_predictor(self, overwrite; verbosity = verbosity, data_shapes...) - - batch_size = get_batch_size(data) - data_arrays = [self.pred_exec.arg_dict[name] for name in data_names] - output_list = [Array{MX_float}[] for i=1:length(self.pred_exec.outputs)] - for batch in eachbatch(data) - load_data!(data, batch, data_arrays) - forward(self.pred_exec, is_train=false) - if isa(callback, Cvoid) - # no callback, accumulate the data and return at the end - for (o_list, o_nd) in zip(output_list, self.pred_exec.outputs) - push!(o_list, copy(slice(o_nd, 1:count_samples(data, batch)))) - end - else - outputs = self.pred_exec.outputs - if length(outputs) == 1 - outputs = outputs[1] - end - callback(outputs) - end - end - - if !isa(callback, Cvoid) - # callback exists, do not accumulate data - return nothing - end - - if isempty(output_list) - # maybe model does not have outputs - return nothing - end - if isempty(output_list[1]) - # maybe no output because data is empty - return length(output_list) == 1 ? output_list[1] : output_list - end - - # concatenate along mini-batches - output_arrays = [cat(x..., dims = ndims(x[1])) for x in output_list] - if length(output_arrays) == 1 - # only 1 output, return it directly, instead of a list - output_arrays = output_arrays[1] - end - return output_arrays -end - -function _init_model(self::FeedForward, data::AbstractDataProvider, - initializer::AbstractInitializer, overwrite::Bool) - init_model(self, initializer; overwrite=overwrite, - [provide_data(data)..., provide_label(data)...]...) -end - -function _create_kvstore(kv_type::Symbol, num_device::Int, arg_params::Dict{Symbol}, verbosity::Int) - if num_device == 1 && !occursin(r"dist", string(kv_type)) - return nothing - else - if kv_type == :local - max_size = maximum([prod(size(param)) for (k,param) in arg_params]) - if max_size < 1024 * 1024 * 16 - kv_type = :local_update_cpu - else - kv_type = :local_allreduce_cpu - end - verbosity >= 2 && @info("Auto-select kvstore type = $kv_type") - end - return KVStore(kv_type) - end -end - -@defstruct TrainingOptions ( - initializer :: AbstractInitializer = UniformInitializer(0.01), - n_epoch :: Int = 10, - eval_data :: Union{Cvoid,AbstractDataProvider} = nothing, - eval_metric :: AbstractEvalMetric = Accuracy(), - kvstore :: Union{Symbol,KVStore} = :local, - force_init :: Bool = false, - callbacks :: Vector{AbstractCallback} = AbstractCallback[], - verbosity :: Int = 3, - η_decay :: Symbol = :epoch, -) - -function _invoke_callbacks(m::FeedForward, callbacks::Vector{AbstractCallback}, - state::OptimizationState, type_filter::Type; - metric = Vector{Tuple{Symbol,Real}}()) - map(callbacks) do cb - !isa(cb, type_filter) && return - - # epoch callback have extra access to the model object - type_filter == AbstractEpochCallback && return cb(m, state, metric) - - cb(state) - end -end - -""" - train(model :: FeedForward, ...) - -Alias to [`fit`](@ref). -""" -train(m::FeedForward, opt::AbstractOptimizer, data::AbstractDataProvider; kw...) = - fit(m, opt, data; kw...) - -""" - fit(model::FeedForward, optimizer, data; kwargs...) - -Train the `model` on `data` with the `optimizer`. - -* `model::FeedForward`: the model to be trained. -* `optimizer::AbstractOptimizer`: the optimization algorithm to use. -* `data::AbstractDataProvider`: the training data provider. -* `n_epoch::Int`: default 10, the number of full data-passes to run. -* `eval_data::AbstractDataProvider`: keyword argument, default `nothing`. The data provider for - the validation set. -* `eval_metric::AbstractEvalMetric`: keyword argument, default [`Accuracy()`](@ref). The metric used - to evaluate the training performance. If `eval_data` is provided, the same metric is also - calculated on the validation set. -* `kvstore`: keyword argument, default `:local`. The key-value store used to synchronize gradients - and parameters when multiple devices are used for training. - :type kvstore: `KVStore` or `Symbol` -* `initializer::AbstractInitializer`: keyword argument, default `UniformInitializer(0.01)`. -* `force_init::Bool`: keyword argument, default false. By default, the random initialization using the - provided `initializer` will be skipped if the model weights already exists, maybe from a previous - call to [`train`](@ref) or an explicit call to [`init_model`](@ref) or [`load_checkpoint`](@ref). When - this option is set, it will always do random initialization at the begining of training. -* `callbacks::Vector{AbstractCallback}`: keyword argument, default `[]`. Callbacks to be invoked at each epoch or mini-batch, - see `AbstractCallback`. -* `verbosity::Int`: Determines the verbosity of the print messages. Higher numbers - leads to more verbose printing. Acceptable values are - - `0`: Do not print anything during training - - `1`: Print starting and final messages - - `2`: Print one time messages and a message at the start of each epoch - - `3`: Print a summary of the training and validation accuracy for each epoch -* `η_decay::Symbol`: `:epoch` or `:batch`, decay learning rate on epoch or batch. -""" -function fit(self::FeedForward, optimizer::AbstractOptimizer, data::AbstractDataProvider; - kwargs...) - opts = TrainingOptions(; kwargs...) - - opts.verbosity >= 1 && @info("Start training on $(self.ctx)") - - batch_size = get_batch_size(data) - num_dev = length(self.ctx) - slices = _split_inputs(batch_size, num_dev) - - # initialize parameters - opts.verbosity >= 2 && @info("Initializing parameters...") - arg_names, param_names, aux_names = _init_model(self, data, opts.initializer, opts.force_init) - - # setup kvstore - kvstore = opts.kvstore - if isa(kvstore, Symbol) - opts.verbosity >= 2 && @info("Creating KVStore...") - kvstore = _create_kvstore(kvstore, length(self.ctx), self.arg_params, opts.verbosity) - end - - update_on_kvstore = true - if isa(kvstore, Cvoid) || occursin(r"local_allreduce", string(get_type(kvstore))) - update_on_kvstore = false - end - - # get grad attribute to allow for freezing - freeze_names = Symbol[] - for (attr, value) in list_all_attr(self.arch) - sattr = string(attr) - if endswith(sattr, "grad") && value == "freeze" - push!(freeze_names, Symbol(sattr[1:end-5])) - end - end - # Needs to correspond to the correct id in the update loop layer idx=1:length(param_names). - freeze_idx = filter(i -> in(param_names[i], freeze_names), 1:length(param_names)) - - # Setup grad_req as a dictionary - grad_req = Dict{Symbol,GRAD_REQ}() - for param in param_names - if in(param, freeze_names) - grad_req[param] = GRAD_NOP - else - grad_req[param] = GRAD_WRITE - end - end - - train_execs = Array{Executor}(undef, num_dev) - for i = 1:num_dev - data_shapes = Dict(map((x) -> x[1] => tuple(x[2][1:end-1]...,length(slices[i])), provide_data(data))) - label_shapes = Dict(map((x) -> x[1] => tuple(x[2][1:end-1]...,length(slices[i])), provide_label(data))) - train_execs[i] = simple_bind(self.arch, self.ctx[i]; grad_req=grad_req, data_shapes..., label_shapes...) - dbg_str = mx.debug_str(train_execs[i]) - opts.verbosity >= 2 && @info(string("TempSpace: ", split(dbg_str, ['\n'])[end-2]..., " on ", self.ctx[i])) - - copy_params_from(train_execs[i], self.arg_params, self.aux_params) - end - - # set up input data structures - data_names = [x[1] for x in provide_data(data)] - label_names = [x[1] for x in provide_label(data)] - - data_arrays = [SlicedNDArray[(slices[i], exec.arg_dict[name]) for (i,exec) in enumerate(train_execs)] - for name in data_names] - label_arrays = [SlicedNDArray[(slices[i], exec.arg_dict[name]) for (i,exec) in enumerate(train_execs)] - for name in label_names] - - param_idx = filter(i -> in(arg_names[i], param_names), 1:length(arg_names)) - - param_arrays = [NDArray[exec.arg_arrays[i] for exec in train_execs] for i in param_idx] - grad_arrays = [NDArray[exec.grad_arrays[i] for exec in train_execs] for i in param_idx] - aux_arrays = [NDArray[exec.aux_arrays[i] for exec in train_execs] for i = 1:length(aux_names)] - - op_state = OptimizationState(batch_size) - # set up the gradient rescaling if user not set - iszero(optimizer.scale) && (optimizer.scale = 1 / batch_size) - - if !update_on_kvstore - updater = getupdater(optimizer) - end - - if !isa(kvstore, Cvoid) - if update_on_kvstore - set_optimizer(kvstore, optimizer) - end - - opts.verbosity >= 2 && @info("Initializing KVStore...") - # init kv with gradients - for idx = 1:length(param_arrays) - param_on_devs = param_arrays[idx] - - init!(kvstore, idx, self.arg_params[param_names[idx]]) - - if update_on_kvstore - # pull weights back - pull!(kvstore, idx, param_on_devs, priority=-idx) - end - end - end - - # set up output and labels in CPU for evaluation metric - output_shapes = [tuple(size(x)[1:end-1]...,batch_size) for x in train_execs[1].outputs] - cpu_dev = Context(CPU) - cpu_output_arrays = [NDArray(undef, shape, ctx = cpu_dev) for shape in output_shapes] - cpu_label_arrays = [NDArray(undef, shape, ctx = cpu_dev) for (name,shape) in provide_label(data)] - - # invoke callbacks on epoch 0 - _invoke_callbacks(self, opts.callbacks, op_state, AbstractEpochCallback) - - opts.verbosity >= 2 && @info("Start training...") - for i_epoch = 1:opts.n_epoch - time_start = time() - reset!(opts.eval_metric) - - op_state.curr_epoch = i_epoch - op_state.curr_batch = 0 - - # invoke callbacks on iteration 0 - _invoke_callbacks(self, opts.callbacks, op_state, AbstractBatchCallback) - - for batch in eachbatch(data) - load_data!(data, batch, data_arrays) - load_label!(data, batch, label_arrays) - - # forward and backward - for (texec, islice) in zip(train_execs, slices) - forward(texec, is_train=true) - - # copy outputs into cpu ndarray, for evaluation metric - for (cpu_out, dev_out) in zip(cpu_output_arrays, texec.outputs) - copy!(slice(cpu_out, islice), dev_out) - end - - backward(texec) - end - - op_state.curr_iter += 1 - op_state.curr_batch += 1 - - # update parameters - for idx = 1:length(param_names) - if in(idx, freeze_idx) - continue # Skip parameter update entirely - end - - # gradient synchronization - if !isa(kvstore, Cvoid) - # push gradient, priority is negative index - push!(kvstore, idx, grad_arrays[idx], priority=-idx) - if update_on_kvstore - # pull back the weights - pull!(kvstore, idx, param_arrays[idx], priority=-idx) - else - # pull back the sum-ed gradients, to the same locations - pull!(kvstore, idx, grad_arrays[idx], priority=-idx) - end - end - - if !update_on_kvstore - # manual updating - for i_dev = 1:num_dev - # create a fake index, so that the updater create states - # for different param AND different devices, TODO(mli) - # use a better solution later - fake_idx = idx * num_dev + i_dev - updater(fake_idx, grad_arrays[idx][i_dev], param_arrays[idx][i_dev]) - end - end - end - - # trigger learning rate decay - opts.η_decay == :batch && update!(optimizer.η_sched) - - # invoke callbacks after finishing each iteration - _invoke_callbacks(self, opts.callbacks, op_state, AbstractBatchCallback) - - # update evaluation metric on training set - load_label!(data, batch, cpu_label_arrays) - update!(opts.eval_metric, cpu_label_arrays, cpu_output_arrays) - end # end of one epoch - - time_stop = time() - metric = get(opts.eval_metric) - opts.verbosity >= 2 && @info(format("== Epoch {1:0>3d}/{2:0>3d} ==========", i_epoch, opts.n_epoch)) - if opts.verbosity >= 3 - @info("## Training summary") - for (name, value) in metric - @info(format("{1:>18s} = {2:.4f}", string(name), value)) - end - @info(format("{1:>18s} = {2:.4f} seconds", "time", time_stop-time_start)) - end - - # evaluation on validation set - if !isa(opts.eval_data, Cvoid) - # because we are re-using the memory allocated for the training network, - # the batch_size of the validation dataset must be the same as the training - # batch_size - @assert(get_batch_size(opts.eval_data) == batch_size) - - reset!(opts.eval_metric) - for batch in eachbatch(opts.eval_data) - load_data!(opts.eval_data, batch, data_arrays) - - # forward and backward - for (texec, islice) in zip(train_execs, slices) - forward(texec, is_train=true) - - # copy outputs into cpu ndarray, for evaluation metric - for (cpu_out, dev_out) in zip(cpu_output_arrays, texec.outputs) - copy!(slice(cpu_out, islice), dev_out) - end - end - load_label!(opts.eval_data, batch, cpu_label_arrays) - update!(opts.eval_metric, cpu_label_arrays, cpu_output_arrays) - end - - if opts.verbosity >= 3 - @info("## Validation summary") - for (name, value) in get(opts.eval_metric) - @info(format("{1:>18s} = {2:.4f}", string(name), value)) - end - end - end - - if i_epoch == opts.n_epoch || any(x->isa(x, AbstractEpochCallback), opts.callbacks) - # copy data back to cpu - for (name, weights) in zip(param_names, param_arrays) - # average parameters across devices - weight = +([copy(w, cpu()) for w in weights]...) / length(weights) - copy!(self.arg_params[name], weight) - end - for (name, aux_devs) in zip(aux_names, aux_arrays) - aux_avg = +([copy(aux, cpu()) for aux in aux_devs]...) / length(aux_devs) - copy!(self.aux_params[name], aux_avg) - end - end - - # trigger learning rate decay - opts.η_decay == :epoch && update!(optimizer.η_sched) - - _invoke_callbacks(self, opts.callbacks, op_state, AbstractEpochCallback; metric=metric) - end # end of all epochs - - opts.verbosity >= 1 && @info("Finish training on $(self.ctx)") - nothing -end - -save_checkpoint(self::FeedForward, prefix::AbstractString, state::OptimizationState) = - save_checkpoint(self.arch, self.arg_params, self.aux_params, prefix, state.curr_epoch) - -function save_checkpoint(sym::SymbolicNode, arg_params::Dict{Symbol}, - aux_params::Dict{Symbol}, prefix::AbstractString, epoch::Int) - save("$prefix-symbol.json", sym) - save_dict = Dict{Symbol,NDArray}( - Symbol("arg:$(x[1])") => x[2] for x in arg_params - ) - if !isempty(aux_params) - merge!(save_dict, Dict(map((x) -> Symbol("aux:$(x[1])") => x[2], aux_params))) - end - save_filename = format("{1}-{2:04d}.params", prefix, epoch) - save(save_filename, save_dict) - @info("Saved checkpoint to '$save_filename'") -end - -function load_checkpoint(prefix::AbstractString, epoch::Int) - arch = load("$prefix-symbol.json", SymbolicNode) - saved_dict = load(format("{1}-{2:04d}.params", prefix, epoch), NDArray) - arg_params = Dict{Symbol,Any}() - aux_params = Dict{Symbol,Any}() - for (k,v) in saved_dict - tp, name = split(string(k), ':') - name = Symbol(name) - if tp == "arg" - arg_params[name] = v - else - aux_params[name] = v - end - end - - return (arch, arg_params, aux_params) -end - -""" - load_checkpoint(prefix, epoch, ::mx.FeedForward; context) - -Load a mx.FeedForward model from the checkpoint *prefix*, *epoch* and optionally provide a context. -""" -function load_checkpoint(prefix::AbstractString, epoch::Int, ::Type{FeedForward}; context = nothing) - arch, arg_params, aux_params = load_checkpoint(prefix, epoch) - model = FeedForward(arch, context = context) - model.arg_params = arg_params - model.aux_params = aux_params - return model -end - -function load_checkpoint(self::FeedForward, prefix::AbstractString, epoch::Int; - overwrite::Bool = true, allow_different_arch::Bool = false) - if isdefined(self, :arg_params) && isdefined(self, :aux_params) && !overwrite - @info("model weights already exists, skip loading... (call with overwrite=true if needed)") - return self - end - - arch, arg_params, aux_params = load_checkpoint(prefix, epoch) - if !allow_different_arch - # TODO: is there better way to compare two symbols - @assert(to_json(self.arch) == to_json(arch), "Cannot load from a checkpoint with different network architecture") - end - self.arg_params = arg_params - self.aux_params = aux_params - return self -end diff --git a/julia/src/name.jl b/julia/src/name.jl deleted file mode 100644 index 8180886c869c..000000000000 --- a/julia/src/name.jl +++ /dev/null @@ -1,61 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -abstract type AbstractNameManager end -const NameType = Union{Base.Symbol, AbstractString} -const NameCounter = Dict{Base.Symbol, Int} - -import Base: get! - -# Default implementation for generating a name for a symbol. -# When a name is specified by the user, it will be used. Otherwise, a name -# is automatically generated based on the hint string. -function _default_get_name!(counter :: NameCounter, name :: NameType, hint :: NameType) - if isa(name, Base.Symbol) || !isempty(name) - return Symbol(name) - end - - hint = Symbol(hint) - if !haskey(counter, hint) - counter[hint] = 0 - end - name = Symbol("$hint$(counter[hint])") - counter[hint] += 1 - return name -end - -mutable struct BasicNameManager <: AbstractNameManager - counter :: NameCounter -end -BasicNameManager() = BasicNameManager(NameCounter()) - -function get!(manager :: BasicNameManager, name :: NameType, hint :: NameType) - _default_get_name!(manager.counter, name, hint) -end - -mutable struct PrefixNameManager <: AbstractNameManager - prefix :: Base.Symbol - counter :: NameCounter -end -PrefixNameManager(prefix :: NameType) = PrefixNameManager(Symbol(prefix), NameCounter()) - -function get!(manager :: PrefixNameManager, name :: NameType, hint :: NameType) - name = _default_get_name!(manager.counter, name, hint) - return Symbol("$(manager.prefix)$name") -end - -DEFAULT_NAME_MANAGER = BasicNameManager() diff --git a/julia/src/ndarray.jl b/julia/src/ndarray.jl deleted file mode 100644 index 256fdb4f5d65..000000000000 --- a/julia/src/ndarray.jl +++ /dev/null @@ -1,31 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -include("ndarray/type.jl") # type def and constructors -include("ndarray/context.jl") -include("ndarray/show.jl") -include("ndarray/remap.jl") # provide @_remap util -include("ndarray/array.jl") -include("ndarray/arithmetic.jl") -include("ndarray/comparison.jl") -include("ndarray/io.jl") # save/load and synchronization utils -include("ndarray/reduction.jl") -include("ndarray/statistic.jl") -include("ndarray/linalg.jl") -include("ndarray/trig.jl") -include("ndarray/activation.jl") -include("ndarray/autoimport.jl") # auto import operators from libmxnet diff --git a/julia/src/ndarray/activation.jl b/julia/src/ndarray/activation.jl deleted file mode 100644 index 8dd31aac89f4..000000000000 --- a/julia/src/ndarray/activation.jl +++ /dev/null @@ -1,87 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -# activation functions - -@doc doc""" - σ.(x::NDArray) - sigmoid.(x::NDArray) - -Computes sigmoid of x element-wise. - -```math -σ(x) = \frac{1}{(1 + exp(-x))} -``` - -The storage type of `sigmoid` output is always dense. -""" -function σ end -const sigmoid = σ -_nddoc[:σ] = false -@_remap broadcasted(::typeof(σ), x::NDArray) sigmoid(x) - -@doc doc""" - relu.(x::NDArray) - -Computes rectified linear. - -```math -\max(x, 0) -``` -""" -function relu end -_nddoc[:relu] = false -@_remap broadcasted(::typeof(relu), x::NDArray) relu(x) - -@doc doc""" - softmax.(x::NDArray, [dim = ndims(x)]) - -Applies the softmax function. - -The resulting array contains elements in the range `(0, 1)` -and the elements along the given axis sum up to 1. - -```math -softmax(\mathbf{z})_j = \frac{e^{z_j}}{\sum_{k=1}^K e^{z_k}} -``` -""" -function softmax end -_nddoc[:softmax] = false -@_remap broadcasted(::typeof(softmax), x::NDArray) softmax(x; axis = -ndims(x)) -@_remap broadcasted(::typeof(softmax), x::NDArray, dim::Int) softmax(x; axis = -dim) - -""" - log_softmax.(x::NDArray, [dim = ndims(x)]) - -Computes the log softmax of the input. -This is equivalent to computing softmax followed by log. - -julia> x -2×3 mx.NDArray{Float64,2} @ CPU0: - 1.0 2.0 0.1 - 0.1 2.0 1.0 - -julia> mx.log_softmax.(x) -2×3 mx.NDArray{Float64,2} @ CPU0: - -1.41703 -0.41703 -2.31703 - -2.31703 -0.41703 -1.41703 -""" -function log_softmax end -_nddoc[:log_softmax] = false -@_remap broadcasted(::typeof(log_softmax), x::NDArray) log_softmax(x; axis = -ndims(x)) -@_remap broadcasted(::typeof(log_softmax), x::NDArray, dim::Int) log_softmax(x; axis = -dim) - diff --git a/julia/src/ndarray/arithmetic.jl b/julia/src/ndarray/arithmetic.jl deleted file mode 100644 index 4c467a2d96dd..000000000000 --- a/julia/src/ndarray/arithmetic.jl +++ /dev/null @@ -1,303 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -import Base: + - -""" - +(args...) - .+(args...) - -Summation. Multiple arguments of either scalar or `NDArray` could be -added together. Note at least the first or second argument needs to be an -`NDArray` to avoid ambiguity of built-in summation. -""" -+(x::NDArray) = x -+(x::NDArray, y::NDArray) = _plus(x, y) -+(x::NDArray, y::Real) = _plus_scalar(x, scalar = y) -+(y::Real, x::NDArray) = _plus_scalar(x, scalar = y) - -broadcasted(::typeof(+), x::NDArray{T,N}, y::NDArray{T,M}) where {T,N,M} = - _broadcast_add(x, y) - -""" - sub_from!(dst::NDArray, args::NDArrayOrReal...) - -Subtract a bunch of arguments from `dst`. Inplace updating. -""" -function sub_from!(dst::NDArray, arg::NDArrayOrReal) - @assert dst.writable - if isa(arg, Real) - _minus_scalar(dst, scalar = arg, out = dst) - else - _minus!(dst, arg) - end - dst -end - -import Base: - - -""" - -(x::NDArray) - -(x, y) - .-(x, y) - -Subtraction `x - y`, of scalar types or `NDArray`. -Or create the negative of `x`. -""" --(x::NDArray) = _mul_scalar(x, scalar = -one(eltype(x))) --(x::NDArray, y::NDArray) = _minus(x, y) --(x::NDArray, y::Real) = _minus_scalar(x, scalar = y) --(y::Real, x::NDArray) = _rminus_scalar(x, scalar = y) - -broadcasted(::typeof(-), x::NDArray{T,N}, y::NDArray{T,M}) where {T,N,M} = - _broadcast_minus(x, y) - -""" - mul_to!(dst::NDArray, arg::NDArrayOrReal) - -Elementwise multiplication into `dst` of either a scalar or an `NDArray` of the same shape. -Inplace updating. -""" -function mul_to!(dst::NDArray, arg::NDArrayOrReal) - @assert dst.writable - if isa(arg, Real) - _mul_scalar(dst, scalar = arg, out = dst) - else - _mul(dst, arg, out = dst) - end - dst -end - -import Base: * - -""" - .*(x, y) - -Elementwise multiplication for `NDArray`. -""" -*(x::NDArray, y::Real) = _mul_scalar(x, scalar = y) -*(y::Real, x::NDArray) = _mul_scalar(x, scalar = y) - -broadcasted(::typeof(*), x::NDArray{T,N}, y::NDArray{T,N}) where {T,N} = - _mul(x, y) -broadcasted(::typeof(*), x::NDArray{T,N}, y::NDArray{T,M}) where {T,N,M} = - _broadcast_mul(x, y) - -""" - *(A::NDArray, B::NDArray) - -Matrix/tensor multiplication. -""" -*(x::NDArray{T}, y::NDArray{T}) where T = x ⋅ y - -LinearAlgebra.adjoint(x::NDArray{T,1}) where T = transpose(x) -LinearAlgebra.adjoint(x::NDArray{T,2}) where T = transpose(x) - -""" - div_from!(dst::NDArray, arg::NDArrayOrReal) - -Elementwise divide a scalar or an `NDArray` of the same shape from `dst`. Inplace updating. -""" -function div_from!(dst::NDArray, arg::NDArrayOrReal) - @assert dst.writable - if isa(arg, Real) - _div_scalar(dst, scalar = arg, out = dst) - else - _div(dst, arg, out = dst) - end - dst -end - -function div_from!(dst::NDArray{T}, arg::Real) where {T<:Integer} - @assert dst.writable - @assert(round(T, arg) != zero(T), "Integer divided by zero") - _div_scalar(dst, scalar = arg, out = dst) - dst -end - -""" - rdiv_from!(x:: Real, y::NDArray) - -Elementwise divide a scalar by an `NDArray`. Inplace updating. -""" -function rdiv_from!(x::Real, y::NDArray) - @assert y.writable - _rdiv_scalar(y, scalar = x, out = y) - y -end - -import Base: / - -""" - ./(x::NDArray, y::NDArray) - ./(x::NDArray, y::Real) - ./(x::Real, y::NDArray) - -* Elementwise dividing an `NDArray` by a scalar or another `NDArray` -of the same shape. - -* Elementwise divide a scalar by an `NDArray`. - -* Matrix division (solving linear systems) is not implemented yet. -""" -/(x::NDArray, y::Real) = _div_scalar(x, scalar = y) - -broadcasted(::typeof(/), y::Real, x::NDArray) = _rdiv_scalar(x, scalar = y) -broadcasted(::typeof(/), x::NDArray{T,N}, y::NDArray{T,N}) where {T,N} = - _div(x, y) -broadcasted(::typeof(/), x::NDArray{T,N}, y::NDArray{T,M}) where {T,N,M} = - _broadcast_div(x, y) - -function broadcasted(::typeof(/), x::NDArray{T}, y::Real) where {T<:Integer} - @assert(round(T, y) != zero(T), "Integer divided by zero") - _div_scalar(x, scalar = y) -end - -""" - mod_from!(x::NDArray, y::NDArray) - mod_from!(x::NDArray, y::Real) - -Elementwise modulo for `NDArray`. -Inplace updating. -""" -mod_from!(x::NDArray, y::NDArray) = _mod!(x, y) -mod_from!(x::NDArray, y::Real) = _mod_scalar!(x, y) - -""" - rmod_from!(y::Real, x::NDArray) - -Elementwise modulo for `NDArray`. -Inplace updating. -""" -rmod_from!(y::Real, x::NDArray) = _rmod_scalar!(x, y) - -import Base: % - -""" - .%(x::NDArray, y::NDArray) - .%(x::NDArray, y::Real) - .%(x::Real, y::NDArray) - -Elementwise modulo for `NDArray`. -""" -%(x::NDArray, y::Real) = _mod_scalar(x, y) - -broadcasted(::typeof(%), y::Real, x::NDArray) = _rmod_scalar(x, y) -broadcasted(::typeof(%), x::NDArray{T,N}, y::NDArray{T,N}) where {T,N} = - _mod(x, y) -broadcasted(::typeof(%), x::NDArray{T,N}, y::NDArray{T,M}) where {T,N,M} = - _broadcast_mod(x, y) - -# document of `.^` is merged into SymbolicNode's - -broadcasted(::typeof(Base.literal_pow), ::typeof(^), x::NDArray, ::Val{s}) where {s} = - _power_scalar(x, scalar = s) -broadcasted(::typeof(^), x::NDArray, s::Real) = _power_scalar(x, scalar = s) -broadcasted(::typeof(^), s::Real, x::NDArray) = _rpower_scalar(x, scalar = s) - -broadcasted(::typeof(^), ::Irrational{:ℯ}, x::NDArray) = exp(x) -broadcasted(::typeof(^), x::NDArray, s::Irrational) = _power_scalar(x, scalar = s) -broadcasted(::typeof(^), s::Irrational, x::NDArray) = _rpower_scalar(x, scalar = s) - -broadcasted(::typeof(^), x::NDArray{T,N}, y::NDArray{T,N}) where {T,N} = - _power(x, y) -broadcasted(::typeof(^), x::NDArray{T,N}, y::NDArray{T,M}) where {T,N,M} = - _broadcast_power(x, y) - -""" - clamp(x::NDArray, lo, hi) - -Clamps (limits) the values in `NDArray`. -Given an interval, values outside the interval are clipped to the interval edges. -Clamping `x` between low `lo` and high `hi` would be: - -```julia -clamp(x, lo, hi) = max(min(x, lo), hi)) -``` - -The storage type of clip output depends on storage types of inputs and the -`lo`, `hi` parameter values: - -- clamp(default) -> default -- clamp(row_sparse, lo <= 0, hi >= 0) -> row_sparse -- clamp(csr, lo <= 0, hi >= 0) -> csr -- clamp(row_sparse, lo < 0, hi < 0) -> default -- clamp(row_sparse, lo > 0, hi > 0) -> default -- clamp(csr, lo < 0, hi < 0) -> csr -- clamp(csr, lo > 0, hi > 0) -> csr - -## Examples - -```jldoctest -julia> x = NDArray(1:9); - -julia> clamp(x, 2, 8)' -1×9 mx.NDArray{Int64,2} @ CPU0: - 2 2 3 4 5 6 7 8 8 - -julia> clamp(x, 8, 2)' -1×9 NDArray{Int64,2} @ CPU0: - 8 8 2 2 2 2 2 2 2 - ``` -""" -Base.clamp(x::NDArray, lo::Real, hi::Real) = _clamp(x, lo, hi) -@_remap _clamp(x::NDArray, lo::Real, hi::Real) clip(x; a_min = lo, a_max = hi) - -""" - clamp!(x::NDArray, lo, hi) - -See also [`clamp`](@ref). -""" -Base.clamp!(x::NDArray, lo::Real, hi::Real) = _clamp!(x, lo, hi) -@_remap _clamp!(x::NDArray, lo::Real, hi::Real) clip(x; a_min = lo, a_max = hi) - -################################################################################ -# remapping to solving type unstablility -################################################################################ - -@_remap _plus(x::NDArray, y::NDArray) _plus(x, y) -@_remap _plus!(x::NDArray, y::NDArray) _plus(x, y) - -@_remap _minus(x::NDArray, y::NDArray) _minus(x, y) -@_remap _minus!(x::NDArray, y::NDArray) _minus(x, y) - -@_remap _mod(x::NDArray, y::NDArray) _mod(x, y) -@_remap _mod!(x::NDArray, y::NDArray) _mod(x, y) - -@_remap _mod_scalar(x::NDArray, y::Real) _mod_scalar(x; scalar = y) -@_remap _mod_scalar!(x::NDArray, y::Real) _mod_scalar(x; scalar = y) - -@_remap _rmod_scalar(x::NDArray, y::Real) _rmod_scalar(x; scalar = y) -@_remap _rmod_scalar!(x::NDArray, y::Real) _rmod_scalar(x; scalar = y) - -@_remap _broadcast_add(x::NDArray, y::NDArray) broadcast_add(x, y) -@_remap _broadcast_add!(x::NDArray, y::NDArray) broadcast_add(x, y) - -@_remap _broadcast_minus(x::NDArray, y::NDArray) broadcast_minus(x, y) -@_remap _broadcast_minus!(x::NDArray, y::NDArray) broadcast_minus(x, y) - -@_remap _broadcast_mul(x::NDArray, y::NDArray) broadcast_mul(x, y) -@_remap _broadcast_mul!(x::NDArray, y::NDArray) broadcast_mul(x, y) - -@_remap _broadcast_div(x::NDArray, y::NDArray) broadcast_div(x, y) -@_remap _broadcast_div!(x::NDArray, y::NDArray) broadcast_div(x, y) - -@_remap _broadcast_mod(x::NDArray, y::NDArray) broadcast_mod(x, y) -@_remap _broadcast_mod!(x::NDArray, y::NDArray) broadcast_mod(x, y) - -@_remap _broadcast_power(x::NDArray, y::NDArray) broadcast_power(x, y) -@_remap _broadcast_power!(x::NDArray, y::NDArray) broadcast_power(x, y) diff --git a/julia/src/ndarray/array.jl b/julia/src/ndarray/array.jl deleted file mode 100644 index 2cd9c2e24f9c..000000000000 --- a/julia/src/ndarray/array.jl +++ /dev/null @@ -1,714 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -# Julia Array related interface - -""" - similar(x::NDArray; writable, ctx) - -Create an `NDArray` with similar shape, data type, -and context with the given one. -Note that the returned `NDArray` is uninitialized. -""" -Base.similar(x::NDArray{T,N}; writable = x.writable, ctx = context(x)) where {T,N} = - NDArray{T,N}(undef, size(x)...; writable = writable, ctx = ctx) - -""" - zeros([DType], dims, ctx::Context = current_context()) - zeros([DType], dims...) - zeros(x::NDArray) - -Create zero-ed `NDArray` with specific shape and type. -""" -function zeros(::Type{T}, dims::NTuple{N,Int}, - ctx::Context = current_context()) where {N,T<:DType} - x = NDArray{T}(undef, dims..., ctx = ctx) - x[:] = zero(T) - x -end - -zeros(::Type{T}, dims::Int...) where {T<:DType} = zeros(T, dims) - -zeros(dims::NTuple{N,Int}, ctx::Context = current_context()) where N = - zeros(MX_float, dims, ctx) -zeros(dims::Int...) = zeros(dims) - -zeros(x::NDArray)::typeof(x) = zeros_like(x) -Base.zeros(x::NDArray)::typeof(x) = zeros_like(x) - -""" - ones([DType], dims, ctx::Context = current_context()) - ones([DType], dims...) - ones(x::NDArray) - -Create an `NDArray` with specific shape & type, and initialize with 1. -""" -function ones(::Type{T}, dims::NTuple{N,Int}, - ctx::Context = current_context()) where {N,T<:DType} - arr = NDArray{T}(undef, dims..., ctx = ctx) - arr[:] = one(T) - arr -end - -ones(::Type{T}, dims::Int...) where T<:DType = ones(T, dims) - -ones(dims::NTuple{N,Int}, ctx::Context = current_context()) where N = - ones(MX_float, dims, ctx) -ones(dims::Int...) = ones(dims) - -ones(x::NDArray)::typeof(x) = ones_like(x) -Base.ones(x::NDArray)::typeof(x) = ones_like(x) - -import Base: length, ndims - -""" - size(x::NDArray) - size(x::NDArray, dims) - -Get the shape of an `NDArray`. The shape is in Julia's column-major convention. -See also the notes on NDArray shapes [`NDArray`](@ref). -""" -function Base.size(x::NDArray) - ref_ndim = Ref{MX_uint}(0) - ref_shape = Ref{Ptr{MX_uint}}(0) - @mxcall(:MXNDArrayGetShape, (MX_handle, Ref{MX_uint}, Ref{Ptr{MX_uint}}), - x, ref_ndim, ref_shape) - tuple(map(Int, reverse(unsafe_wrap(Array, ref_shape[], ref_ndim[])))...) -end - -Base.size(x::NDArray{T,N}, dims::Integer) where {T,N} = (dims > N) ? 1 : size(x)[dims] - -""" - length(x::NDArray) - -Get the number of elements in an `NDArray`. -""" -length(x::NDArray) = prod(size(x)) - -""" - ndims(x::NDArray) - -Get the number of dimensions of an `NDArray`. -Is equivalent to `length(size(arr))`. -""" -ndims(x::NDArray) = ndims(x.handle) - -function ndims(x::MX_NDArrayHandle)::Int - ref_ndim = Ref{MX_uint}(0) - ref_shape = Ref{Ptr{MX_uint}}(0) - @mxcall(:MXNDArrayGetShape, (MX_handle, Ref{MX_uint}, Ref{Ptr{MX_uint}}), - x, ref_ndim, ref_shape) - ref_ndim[] -end - -""" - eltype(x::NDArray) - -Get the element type of an `NDArray`. -""" -function Base.eltype(x::Union{NDArray,MX_NDArrayHandle}) - dtype_ref = Ref{Cint}(0) - @mxcall(:MXNDArrayGetDType, (MX_handle, Ptr{Cint}), x, dtype_ref) - - if dtype_ref[] == -1 # x->is_none() - # TODO: unit test for this branch - throw(MXError("Eltype of $x is not defined")) - end - - fromTypeFlag(TypeFlag(dtype_ref[])) -end - -@inline _first(x::NDArray) = try_get_shared(x, sync = :read) |> first - -Base.first(x::NDArray) = _first(x) - -Base.lastindex(x::NDArray) = length(x) - -""" - slice(arr :: NDArray, start:stop) - -Create a view into a sub-slice of an `NDArray`. Note only slicing at the slowest -changing dimension is supported. In Julia's column-major perspective, this is the last -dimension. For example, given an `NDArray` of shape (2,3,4), `slice(array, 2:3)` will create -a `NDArray` of shape (2,3,2), sharing the data with the original array. This operation is -used in data parallelization to split mini-batch into sub-batches for different devices. -""" -function slice(arr::NDArray, ::Colon) - arr -end -function slice(arr::NDArray, slice::UnitRange{Int}) - dim1 = size(arr)[end] - @assert(1 <= slice.start <= slice.stop <= dim1) - if slice.start == 1 && slice.stop == dim1 - return arr - end - - hdr_ref = Ref{MX_handle}(0) - # note Julia is 1-based, inclusive-inclusive indexing, while C++ is - # 0-based, inclusive-exclusive indexing. So 1:3 in Julia should - # translates into 0:3 in C++. - @mxcall(:MXNDArraySlice, (MX_handle, MX_uint, MX_uint, Ref{MX_handle}), - arr, slice.start-1, slice.stop, hdr_ref) - return NDArray(MX_NDArrayHandle(hdr_ref[]), arr.writable) -end - -function _at(handle::Union{MX_NDArrayHandle, MX_handle}, idx::Integer) - h_ref = Ref{MX_handle}(C_NULL) - @mxcall(:MXNDArrayAt, (MX_handle, MX_uint, Ref{MX_handle}), - handle, idx, h_ref) - h_ref[] -end - -import Base: setindex! - -""" - setindex!(arr::NDArray, val, idx) - -Assign values to an `NDArray`. -The following scenarios are supported - -* single value assignment via linear indexing: `arr[42] = 24` - -* `arr[:] = val`: whole array assignment, `val` could be a scalar or an array (Julia `Array` - or `NDArray`) of the same shape. -* `arr[start:stop] = val`: assignment to a *slice*, `val` could be a scalar or an array of - the same shape to the slice. See also [`slice`](@ref). -""" -function setindex!(arr::NDArray, val::Real, idx::Integer) - # linear indexing - @assert arr.writable - _set_value(out=arr[idx], src=val) -end - -function setindex!(arr::NDArray, val::Real, ::Colon) - @assert arr.writable - _set_value(out = arr, src = dump_mx_param(val)) -end - -function setindex!(arr::NDArray, val::Array{T}, ::Colon) where T<:Real - @assert arr.writable - copy!(arr, val) -end - -function setindex!(arr::NDArray, val::NDArray, ::Colon) - @assert arr.writable - copy!(arr, val) -end - -function setindex!(arr::NDArray, val::Union{T,Array{T},NDArray}, - idx::UnitRange{Int}) where T<:Real - @assert arr.writable - setindex!(slice(arr, idx), val, Colon()) -end - -import Base: getindex -""" - getindex(arr::NDArray, idx) - -Shortcut for [`slice`](@ref). A typical use is to write - -```julia - arr[:] += 5 -``` - -which translates into - -```julia - arr[:] = arr[:] + 5 -``` - -which furthur translates into - -```julia - setindex!(getindex(arr, Colon()), 5, Colon()) -``` - -!!! note - The behavior is quite different from indexing into Julia's `Array`. For example, `arr[2:5]` - create a **copy** of the sub-array for Julia `Array`, while for `NDArray`, this is - a *slice* that shares the memory. -""" -getindex(arr::NDArray, ::Colon) = arr - -""" -Shortcut for [`slice`](@ref). -**NOTE** the behavior for Julia's built-in index slicing is to create a -copy of the sub-array, while here we simply call `slice`, -which shares the underlying memory. -""" -getindex(arr::NDArray, idx::UnitRange{Int}) = slice(arr, idx) - -getindex(arr::NDArray) = _first(arr) - -function getindex(arr::NDArray, idx::Integer) - # linear indexing - len = length(arr) - size_ = size(arr) - - if idx <= 0 || idx > len - throw(BoundsError( - "attempt to access $(join(size_, 'x')) NDArray at index $(idx)")) - end - - idx -= 1 - offsets = size_[1:end-1] |> reverse ∘ cumprod ∘ collect - handle = arr.handle - for offset ∈ offsets - handle = _at(handle, idx ÷ offset) - idx %= offset - end - - _at(handle, idx) |> MX_NDArrayHandle |> x -> NDArray(x, arr.writable) -end - -import Base: copy!, copy, convert, deepcopy - -""" - copy!(dst::Union{NDArray, Array}, src::Union{NDArray, Array}) - -Copy contents of `src` into `dst`. -""" -function copy!(dst::NDArray, src::NDArray) - @assert(dst.writable) - if dst.handle == src.handle - @warn("Copying an NDArray to itself") - return - end - - _copyto(src, out=dst) - return dst -end - -function copy!(dst::Array{T}, src::NDArray{T}) where T<:DType - @assert size(dst) == size(src) - @mxcall(:MXNDArraySyncCopyToCPU, (MX_handle, Ptr{Cvoid}, Csize_t), - src, pointer(dst), length(dst)) - dst -end - -copy!(dst::Array{<:Real}, src::NDArray) = copy!(dst, copy(src)) -copy!(dst::NDArray, src::AbstractArray) = copy!(dst, collect(src)) - -function copy!(dst::NDArray{T}, src::Array{<:Real}) where {T} - @assert dst.writable - @assert size(dst) == size(src) - src = convert(Array{T}, src) # this might involve copying - @mxcall(:MXNDArraySyncCopyFromCPU, (MX_handle, Ptr{Cvoid}, Csize_t), - dst.handle, pointer(src), length(src)) - dst -end - -function copy_ignore_shape!(dst::NDArray{T}, src::Array{<:Real}) where {T} - @assert dst.writable - @assert length(dst) == length(src) - src = convert(Array{T}, src) # this might involve copying - @mxcall(:MXNDArraySyncCopyFromCPU, (MX_handle, Ptr{Cvoid}, Csize_t), - dst.handle, pointer(src), length(src)) - dst -end - - -""" - copy(arr :: NDArray) - copy(arr :: NDArray, ctx :: Context) - copy(arr :: Array, ctx :: Context) - -Create a copy of an array. When no `Context` is given, create a Julia `Array`. -Otherwise, create an `NDArray` on the specified context. -""" -copy - -# Create copy: NDArray -> Julia Array -copy(x::NDArray{T,D}) where{T,D} = copy!(Array{T,D}(undef, size(x)), x) - -# Create copy: NDArray -> NDArray in a given context -copy(x::NDArray{T,D}, ctx::Context) where {T,D} = - copy!(NDArray{T,D}(_ndarray_alloc(T, size(x), ctx, true)), x) - -# Create copy: Julia Array -> NDArray in a given context -copy(x::Array{T}, ctx::Context) where {T<:DType} = - copy!(NDArray{T}(undef, size(x); ctx = ctx), x) - -copy(x::AbstractArray, ctx::Context) = - copy!(NDArray{eltype(x)}(undef, size(x); ctx = ctx), collect(x)) - -""" - convert(::Type{Array{<:Real}}, x::NDArray) - -Convert an `NDArray` into a Julia `Array` of specific type. -Data will be copied. -""" -convert(T::Type{Array{<:Real}}, x::NDArray) = convert(T, copy(x)) - -""" - deepcopy(arr::NDArray) - -Get a deep copy of the data blob in the form of an NDArray of default storage -type. This function blocks. Do not use it in performance critical code. -""" -function deepcopy(arr::NDArray) - out_ref = Ref{MX_handle}(C_NULL) - @mxcall(:MXNDArrayGetDataNDArray, (MX_handle, Ref{MX_handle}), arr, out_ref) - NDArray(MX_NDArrayHandle(out_ref[])) -end - -""" - hcat(x::NDArray...) -""" -Base.hcat(xs::NDArray{T}...) where T = cat(xs..., dims = 2) - -""" - vcat(x::NDArray...) -""" -Base.vcat(xs::NDArray{T}...) where T = cat(xs..., dims = 1) - -""" - cat(xs::NDArray...; dims) - -Concate the `NDArray`s which have the same element type along the `dims`. -Building a diagonal matrix is not supported yet. -""" -function Base.cat(xs::NDArray{T}...; dims) where T - ns = ndims.(xs) - d = Base.max(dims, maximum(ns)) - xs′ = map(zip(ns, xs)) do i - n, x = i - (d > n) ? reshape(x, -2, Base.ones(Int, d - n)...) : x - end - concat(xs′..., dim = d - dims) -end - -""" - @inplace - -Julia does not support re-definiton of `+=` operator (like `__iadd__` in python), -When one write `a += b`, it gets translated to `a = a+b`. `a+b` will allocate new -memory for the results, and the newly allocated `NDArray` object is then assigned -back to a, while the original contents in a is discarded. This is very inefficient -when we want to do inplace update. - -This macro is a simple utility to implement this behavior. Write - -```julia - @mx.inplace a += b -``` - -will translate into - -```julia - mx.add_to!(a, b) -``` - -which will do inplace adding of the contents of `b` into `a`. -""" -macro inplace(ex) - f = if ex.head == :+= || ex.head == :.+= - :add_to! - elseif ex.head == :-= || ex.head == :.-= - :sub_from! - elseif ex.head == :.*= - :mul_to! - elseif ex.head == :./= - :div_from! - elseif ex.head == :.%= - :mod_from! - else - error("unsupported inplace translation for $ex") - end - Expr(:call, f, esc(ex.args[1]), esc(ex.args[2])) -end - -""" - add_to!(dst::NDArray, args::NDArrayOrReal...) - -Add a bunch of arguments into `dst`. Inplace updating. -""" -function add_to!(dst::NDArray, args::NDArrayOrReal...) - @assert dst.writable - for arg in args - if isa(arg, Real) - _plus_scalar(dst, scalar = arg, out = dst) - else - _plus!(dst, arg) - end - end - dst -end - -""" - fill!(arr::NDArray, x) - -Create an `NDArray` filled with the value `x`, like `Base.fill!`. -""" -function Base.fill!(arr::NDArray, x) - arr[:] = x - arr -end - -""" - fill(x, dims, ctx = current_context()) - fill(x, dims...) - -Create an `NDArray` filled with the value `x`, like `Base.fill`. -""" -function fill(x::T, dims::NTuple{N,Integer}, ctx::Context = current_context()) where {T,N} - arr = NDArray{T}(undef, dims, ctx = ctx) - arr[:] = x - arr -end - -fill(x, dims::Integer...) = fill(x, dims) - -import Base: hypot - -broadcasted(::typeof(hypot), x::NDArray{T}, y::NDArray{T}) where {T} = - _broadcast_hypot(x, y) - -""" -Manipulating as Julia Arrays ----------------------------- - - @nd_as_jl(captures..., statement) - -A convenient macro that allows to operate `NDArray` as Julia Arrays. For example, - -```julia - x = mx.zeros(3,4) - y = mx.ones(3,4) - z = mx.zeros((3,4), mx.gpu()) - - @mx.nd_as_jl ro=(x,y) rw=z begin - # now x, y, z are just ordinary Julia Arrays - z[:,1] = y[:,2] - z[:,2] = 5 - end -``` - -Under the hood, the macro convert all the declared captures from `NDArray` into Julia -Arrays, by using `try_get_shared`. And automatically commit the modifications back into -the `NDArray` that is declared as `rw`. This is useful for fast prototyping and when -implement non-critical computations, such as `AbstractEvalMetric`. - -!!! note -* Multiple `rw` and / or `ro` capture declaration could be made. -* The macro does **not** check to make sure that `ro` captures are not modified. If the - original `NDArray` lives in CPU memory, then it is very likely the corresponding - Julia Array shares data with the `NDArray`, so modifying the Julia Array will also - modify the underlying `NDArray`. -* More importantly, since the `NDArray` is - asynchronized, we will wait for *writing* for `rw` variables but wait only for *reading* - in `ro` variables. If we write into those `ro` variables, **and** if the memory is - shared, racing condition might happen, and the behavior is undefined. -* When an `NDArray` is declared to be captured as `rw`, its contents is always sync - back in the end. -* The execution results of the expanded macro is always `nothing`. -* The statements are wrapped in a `let`, thus locally introduced new variables will not be - available after the statements. So you will need to declare the variables before calling the - macro if needed. -""" -macro nd_as_jl(m_args...) - @assert(length(m_args) > 0) - stmts = m_args[end] - @assert(isa(stmts, Expr) && stmts.head == :block, - "The last argument should be a statement block (begin-end); but get $stmts") - stmts = esc(stmts) - - dclrs = m_args[1:end-1] - nd_ro = [] - nd_rw = [] - nd_all = [] - for declr in dclrs - @assert(isa(declr, Expr) && declr.head == :(=) && length(declr.args)==2 && declr.args[1] ∈ (:ro,:rw), - "Invalid declaration, should be rw=(x,y) or ro=z; but get $declr") - - declr_vars = declr.args[2] - if isa(declr_vars, Symbol) - declr_vars = (declr_vars,) - elseif isa(declr_vars, Expr) - @assert(declr_vars.head ∈ (:tuple, :vect), - "Capture declaration should be a variable or a tuple of variables; but got $declr_vars") - declr_vars = declr_vars.args - else - @assert(false, "Capture declaration should be a variable or a tuple of variables; but got $declr_vars") - end - for declr_var in declr_vars - @assert(isa(declr_var, Symbol), - "Captured ndarrays in ro/rw declaration should be variables, but get $(declr_var)") - end - append!(nd_all, [declr_vars...]) - if declr.args[1] == :ro - append!(nd_ro, [declr_vars...]) - else - append!(nd_rw, [declr_vars...]) - end - end - - nd_ro = map(esc, nd_ro) - nd_rw = map(esc, nd_rw) - nd_all = map(esc, nd_all) - rw_origs = [gensym() for _ in nd_rw] - - save_statements = Expr(:block, [:($v_orig = $v) for (v_orig, v) in zip(rw_origs, nd_rw)]...) - wait_statements = Expr(:block, [:(_wait_to_read($v)) for v in nd_ro]..., - [:(_wait_to_write($v)) for v in nd_rw]...) - clear_statements = Expr(:block, [:($v_orig = nothing) for v_orig in rw_origs]...) - let_assignments = Expr(:block, [:($v = try_get_shared($v)) for v in nd_all]...) - sync_statements = map(rw_origs, nd_rw) do v_orig, v - quote - if !is_shared($v, $v_orig) - # copy data back if not or no longer sharing data - copy!($v_orig, $v) - end - end - end - sync_statements = Expr(:block, sync_statements...) - - let_statement = Expr(:let, let_assignments, quote - $stmts - $sync_statements - end) - m_body = quote - $wait_statements - $save_statements - $let_statement - $clear_statements - nothing # the final results is always nothing - end - - m_body -end - -# NOTE: internal use only. Accessing pointers on a different device (e.g. accessing GPU -# pointers from CPU) leads to undefined behavior. -import Base.pointer -function pointer(arr :: NDArray) - pdata = Ref{Ptr{Cvoid}}(0) - @mxcall(:MXNDArrayGetData, (MX_handle, Ref{Ptr{Cvoid}}), arr, pdata) - return convert(Ptr{eltype(arr)}, pdata[]) -end - -_ndsig[:reshape] = :(reshape(x; shape = dim, reverse = !reverse)) -@_remap Base.reshape(x::NDArray, dim...; reverse = false) reshape -@_remap Base.reshape(x::NDArray, dim ; reverse = false) reshape - -_nddoc[:expand_dims] = -""" - expand_dims(x::NDArray, dim) - -Insert a new axis into `dim`. - -```julia -julia> x -4 mx.NDArray{Float64,1} @ CPU0: - 1.0 - 2.0 - 3.0 - 4.0 - -julia> mx.expand_dims(x, 1) -1×4 mx.NDArray{Float64,2} @ CPU0: - 1.0 2.0 3.0 4.0 - -julia> mx.expand_dims(x, 2) -4×1 mx.NDArray{Float64,2} @ CPU0: - 1.0 - 2.0 - 3.0 - 4.0 -``` -""" -@_remap expand_dims(x::NDArray, dim) expand_dims(x; axis = -dim) - -@_remap Base.permutedims(x::NDArray, axes) transpose(x; axes = length(axes) .- tuple(axes...)) - -_nddoc[:broadcast_to] = """ - broadcast_to(x::NDArray, dims) - broadcast_to(x::NDArray, dims...) - -Broadcasts the input array to a new shape. - -In the case of broacasting doesn't work out of box, -you can expand the NDArray first. - -```jldoctest -julia> x = mx.ones(2, 3, 4); - -julia> y = mx.ones(1, 1, 4); - -julia> x .+ mx.broadcast_to(y, 2, 3, 4) -2×3×4 mx.NDArray{Float32,3} @ CPU0: -[:, :, 1] = - 2.0 2.0 2.0 - 2.0 2.0 2.0 - -[:, :, 2] = - 2.0 2.0 2.0 - 2.0 2.0 2.0 - -[:, :, 3] = - 2.0 2.0 2.0 - 2.0 2.0 2.0 - -[:, :, 4] = - 2.0 2.0 2.0 - 2.0 2.0 2.0 -``` -""" -@_remap broadcast_to(x::NDArray, dims) broadcast_to(x; shape = dims) -@_remap broadcast_to(x::NDArray, dims...) broadcast_to(x; shape = dims) - -_nddoc[:broadcast_axis] = _nddoc[:broadcast_axes] = """ - broadcast_axis(x::NDArray, dim, size) - broadcast_axes(x::NDArray, dim, size) - -Broadcasts the input array over particular axis(axes). -Parameter `dim` and `size` could be a scalar, a Tuple or an Array. - -`broadcast_axes` is just an alias. - -```jldoctest -julia> x -1×2×1 mx.NDArray{Int64,3} @ CPU0: -[:, :, 1] = - 1 2 - -julia> mx.broadcast_axis(x, 1, 2) -2×2×1 mx.NDArray{Int64,3} @ CPU0: -[:, :, 1] = - 1 2 - 1 2 - -julia> mx.broadcast_axis(x, 3, 2) -1×2×2 mx.NDArray{Int64,3} @ CPU0: -[:, :, 1] = - 1 2 - -[:, :, 2] = - 1 2 -``` -""" -@_remap(broadcast_axis(x::NDArray, dim, size), - broadcast_axis(x; axis = ndims(x) .- dim, size = size)) -@_remap(Base.broadcast_axes(x::NDArray, dim, size), - broadcast_axes(x; axis = ndims(x) .- dim, size = size)) - -################################################################################ -# remapping to solving type unstablility -################################################################################ - -@_remap _broadcast_hypot(x::NDArray, y::NDArray) broadcast_hypot(x, y) -@_remap _broadcast_hypot!(x::NDArray, y::NDArray) broadcast_hypot(x, y) diff --git a/julia/src/ndarray/autoimport.jl b/julia/src/ndarray/autoimport.jl deleted file mode 100644 index c86e8fffd231..000000000000 --- a/julia/src/ndarray/autoimport.jl +++ /dev/null @@ -1,227 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -# NDArray functions dynamically imported from libmxnet - -function _invoke_mxfunction(func_handle::MX_handle, use_vars, scalars, mut_vars; kwargs...) - names = String[string(entry[1]) for entry in kwargs] - args = String[string(entry[2]) for entry in kwargs] - @mxcall(:MXFuncInvokeEx, - (MX_handle, Ptr{MX_handle}, Ptr{MX_float}, Ptr{MX_handle}, Cint, char_pp, char_pp), - func_handle, use_vars, scalars, mut_vars, length(names), names, args) -end - -@enum(LIBMX_FUNC_TYPE_MASK, - NDARRAY_ARG_BEFORE_SCALAR = 1, - ACCEPT_EMPTY_MUTATE_TARGET = (1 << 2) -) - -# Import corresponding math functions from base so the automatically defined libmxnet -# functions can overload them -import Base: sqrt - -""" -The libxmnet APIs are automatically imported from `libmxnet.so`. The functions listed -here operate on `NDArray` objects. The arguments to the functions are typically ordered -as - -```julia - func_name(arg_in1, arg_in2, ..., scalar1, scalar2, ..., arg_out1, arg_out2, ...) -``` - -unless `NDARRAY_ARG_BEFORE_SCALAR` is not set. In this case, the scalars are put before the input arguments: - -```julia - func_name(scalar1, scalar2, ..., arg_in1, arg_in2, ..., arg_out1, arg_out2, ...) -``` - -If `ACCEPT_EMPTY_MUTATE_TARGET` is set. An overloaded function without the output arguments will also be defined: - -```julia - func_name(arg_in1, arg_in2, ..., scalar1, scalar2, ...) -``` - -Upon calling, the output arguments will be automatically initialized with empty NDArrays. - -Those functions always return the output arguments. If there is only one output (the typical situation), that -object (`NDArray`) is returned. Otherwise, a tuple containing all the outputs will be returned. -""" -function _get_ndarray_function_def(name::String) - func_name = Symbol(name) - - func_def = quote - function $func_name(::Type{<:NDArray}, args::NDArray...; out=nothing, kwargs...) - if out != nothing - output_vars = out - if isa(output_vars, NDArray) - output_vars = NDArray[output_vars] - end - num_outputs = length(output_vars) - else - output_vars = NDArray[] - num_outputs = 0 - end - - args = collect(args) # tuple to list - if length(args) == 0 - args = MX_handle[] - end - - output_handles_pp = if length(output_vars) > 0 - [map(x -> x.handle, output_vars)] - else - [Ptr{MX_handle}(C_NULL)] - end - num_outputs_p = [convert(Cint, num_outputs)] - - kw_keys_str = String[string(x[1]) for x in kwargs] - kw_vals_str = String[dump_mx_param(x[2]) for x in kwargs] - - op_handle = _get_cached_libmx_op_handle($(name)) - @mxcall(:MXImperativeInvoke, - (MX_handle, Cint, Ptr{MX_handle}, - Ptr{Cint}, Ptr{Ptr{MX_handle}}, - Cint, char_pp, char_pp), - op_handle, length(args), args, - num_outputs_p, output_handles_pp, - length(kwargs), kw_keys_str, kw_vals_str) - - if out == nothing - n = num_outputs_p[] - hdls = unsafe_wrap(Array{MX_handle}, output_handles_pp[], n) - xs = NDArray[NDArray(MX_NDArrayHandle(x)) for x in hdls] - if n == 1 - return xs[] - else - return xs - end - else - return out - end - end - end - - func_def2 = quote - function $func_name(args::NDArray...; out=nothing, kwargs...) - $func_name(NDArray, args...; out=out, kwargs...) - end - end - - return func_def, func_def2 -end - -const _op_import_bl = [ # import black list; do not import these funcs - "_full", # we already have `mx.fill` - "_ones", # we already have `mx.ones` - "_zeros", # we already have `mx.zeros` - "clip", - "expand_dims", - - # arithmetic - "_plus", - "_minus", - "_mod", - "_mod_scalar", - "_rmod_scalar", - - "dot", - "max", - "max_axis", - "mean", - "min", - "min_axis", - "prod", - "reshape", - "sum", - "transpose", - - # trigonometric - "sin", - "cos", - "tan", - "arcsin", - "arccos", - "arctan", - - # hyperbolic - "sinh", - "cosh", - "tanh", - "arcsinh", - "arccosh", - "arctanh", - - # activation - "sigmoid", - "relu", - "softmax", - "log_softmax", - - # broadcast - "broadcast_add", - "broadcast_plus", - "broadcast_minus", - "broadcast_sub", - "broadcast_mul", - "broadcast_div", - "broadcast_mod", - "broadcast_power", - "broadcast_equal", - "broadcast_not_equal", - "broadcast_greater", - "broadcast_greater_equal", - "broadcast_lesser", - "broadcast_lesser_equal", - "broadcast_maximum", - "broadcast_minimum", - "broadcast_to", - "broadcast_axis", - "broadcast_axes", - "broadcast_hypot", - - # reduction - "argmax", - "argmin", -] - -macro _import_ndarray_functions() - names = filter(n -> ∉(lowercase(n), _op_import_bl), _get_libmx_op_names()) - - func_exprs = map(names) do name - op_handle = _get_libmx_op_handle(name) - - desc, key_narg = _get_libmx_op_description(name, op_handle) - func_def, func_def2 = _get_ndarray_function_def(name) - - func_name = Symbol(name) - - import_expr = _import_expr(func_name) - - quote - $import_expr - $func_def - @doc $desc - $func_def2 - end - end - - esc(quote - $(func_exprs...) - end) -end - -@_import_ndarray_functions diff --git a/julia/src/ndarray/comparison.jl b/julia/src/ndarray/comparison.jl deleted file mode 100644 index 19be6fa51384..000000000000 --- a/julia/src/ndarray/comparison.jl +++ /dev/null @@ -1,56 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -broadcasted(::typeof(==), x::NDArray{T}, y::NDArray{T}) where {T} = - _broadcast_equal(x, y) - -broadcasted(::typeof(!=), x::NDArray{T}, y::NDArray{T}) where {T} = - _broadcast_not_equal(x, y) - -broadcasted(::typeof(>), x::NDArray{T}, y::NDArray{T}) where {T} = - _broadcast_greater(x, y) - -broadcasted(::typeof(>=), x::NDArray{T}, y::NDArray{T}) where {T} = - _broadcast_greater_equal(x, y) - -broadcasted(::typeof(<), x::NDArray{T}, y::NDArray{T}) where {T} = - _broadcast_lesser(x, y) - -broadcasted(::typeof(<=), x::NDArray{T}, y::NDArray{T}) where {T} = - _broadcast_lesser_equal(x, y) - -################################################################################ -# remapping to solving type unstablility -################################################################################ - -@_remap _broadcast_equal(x::NDArray, y::NDArray) broadcast_equal(x, y) -@_remap _broadcast_equal!(x::NDArray, y::NDArray) broadcast_equal(x, y) - -@_remap _broadcast_not_equal(x::NDArray, y::NDArray) broadcast_not_equal(x, y) -@_remap _broadcast_not_equal!(x::NDArray, y::NDArray) broadcast_not_equal(x, y) - -@_remap _broadcast_greater(x::NDArray, y::NDArray) broadcast_greater(x, y) -@_remap _broadcast_greater!(x::NDArray, y::NDArray) broadcast_greater(x, y) - -@_remap _broadcast_greater_equal(x::NDArray, y::NDArray) broadcast_greater_equal(x, y) -@_remap _broadcast_greater_equal!(x::NDArray, y::NDArray) broadcast_greater_equal(x, y) - -@_remap _broadcast_lesser(x::NDArray, y::NDArray) broadcast_lesser(x, y) -@_remap _broadcast_lesser!(x::NDArray, y::NDArray) broadcast_lesser(x, y) - -@_remap _broadcast_lesser_equal(x::NDArray, y::NDArray) broadcast_lesser_equal(x, y) -@_remap _broadcast_lesser_equal!(x::NDArray, y::NDArray) broadcast_lesser_equal(x, y) diff --git a/julia/src/ndarray/context.jl b/julia/src/ndarray/context.jl deleted file mode 100644 index c89c17b2c42c..000000000000 --- a/julia/src/ndarray/context.jl +++ /dev/null @@ -1,29 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -""" - context(x::NDArray) - -Get the context that this `NDArray` lives on. -""" -function context(x::NDArray) - ref_typeid = Ref{Cint}(0) - ref_devid = Ref{Cint}(0) - @mxcall(:MXNDArrayGetContext, (MX_handle, Ref{Cint}, Ref{Cint}), - x, ref_typeid, ref_devid) - Context(ref_typeid[], ref_devid[]) -end diff --git a/julia/src/ndarray/io.jl b/julia/src/ndarray/io.jl deleted file mode 100644 index 99c11cd90026..000000000000 --- a/julia/src/ndarray/io.jl +++ /dev/null @@ -1,135 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -@inline _wait_to_read(arr :: NDArray) = - @mxcall(:MXNDArrayWaitToRead, (MX_handle,), arr) -@inline _wait_to_write(arr :: NDArray) = - @mxcall(:MXNDArrayWaitToWrite, (MX_handle,), arr) - -""" - try_get_shared(arr; sync=:nop) - -Try to create a Julia array by sharing the data with the underlying `NDArray`. - -# Arguments: - -* `arr::NDArray`: the array to be shared. - -!!! note - The returned array does not guarantee to share data with the underlying `NDArray`. - In particular, data sharing is possible only when the `NDArray` lives on CPU. - -* `sync::Symbol`: `:nop`,`:write`, `:read` - On CPU, invoke `_wait_to_read` if `:read`; - invoke `_wait_to_write` if `:write`. -""" -function try_get_shared(x::NDArray; sync::Symbol=:nop) - if context(x).device_type == CPU - # try to do data sharing - if sync == :read - _wait_to_read(x) - elseif sync == :write - _wait_to_write(x) - end - - unsafe_wrap(Array, pointer(x), size(x)) - else - # impossible to share, just copying - copy(x) - end -end - -""" - is_shared(j_arr, arr) - -Test whether `j_arr` is sharing data with `arr`. - -# Arguments: - -* `j_arr::Array`: the Julia Array. -* `arr::NDArray`: the `NDArray`. -""" -is_shared(::Array, ::NDArray) = false - -function is_shared(j_arr::Array{T}, arr::NDArray{T}) where {T<:DType} - if length(j_arr) != length(arr) - return false - end - if context(arr).device_type != CPU - return false - end - pointer(j_arr) == pointer(arr) -end - -""" - load(filename, ::Type{NDArray}) - -Load NDArrays from binary file. - -# Arguments: -* `filename::String`: the path of the file to load. It could be S3 or HDFS address. - -Returns either `Dict{Symbol, NDArray}` or `Vector{NDArray}`. - -`filename` can point to `s3` or `hdfs` resources if the `libmxnet` is built with the -corresponding components enabled. Examples: -* `s3://my-bucket/path/my-s3-ndarray` -* `hdfs://my-bucket/path/my-hdfs-ndarray` -* `/path-to/my-local-ndarray` -""" -function load(filename::AbstractString, ::Type{<:NDArray}) - out_size = Ref{MX_uint}(0) - out_hdrs = Ref{Ptr{MX_handle}}(0) - out_name_size = Ref{MX_uint}(0) - out_names = Ref{char_pp}(0) - @mxcall(:MXNDArrayLoad, (char_p, Ref{MX_uint}, Ref{Ptr{MX_handle}}, Ref{MX_uint}, Ref{char_pp}), - filename, out_size, out_hdrs, out_name_size, out_names) - out_name_size = out_name_size[] - out_size = out_size[] - if out_name_size == 0 - return [NDArray(MX_NDArrayHandle(hdr)) for hdr in unsafe_wrap(Array, out_hdrs[], out_size)] - else - @assert out_size == out_name_size - return Dict([(Symbol(unsafe_string(k)), NDArray(MX_NDArrayHandle(hdr))) for (k,hdr) in - zip(unsafe_wrap(Array, out_names[], out_size), unsafe_wrap(Array, out_hdrs[], out_size))]) - end -end - -""" - save(filename::AbstractString, data) - -Save NDarrays to binary file. Filename could be S3 or HDFS address, if `libmxnet` is built -with corresponding support (see `load`). - -* `filename::String`: path to the binary file to write to. -* `data`: data to save to file. Data can be a`NDArray`, a `Vector` of `NDArray`, - or a `Dict{Symbol}` contains `NDArray`s. -""" -save(filename::String, data::NDArray) = save(filename, [data]) - -save(filename::String, data::VecOfNDArray) = - @mxcall(:MXNDArraySave, (char_p, MX_uint, Ptr{MX_handle}, char_pp), - filename, length(data), MX_handle[data...], char_pp(0)) - -function save(filename::String, data::Dict{Symbol}) - names = keys(data) - arrays = MX_handle.(collect(values(data))) - names = String.(collect(names)) - - @mxcall(:MXNDArraySave, (char_p, MX_uint, Ptr{MX_handle}, char_pp), - filename, length(names), arrays, names) -end diff --git a/julia/src/ndarray/linalg.jl b/julia/src/ndarray/linalg.jl deleted file mode 100644 index 4e91cfac631e..000000000000 --- a/julia/src/ndarray/linalg.jl +++ /dev/null @@ -1,23 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -# See https://github.com/dmlc/MXNet.jl/issues/55 -@_remap LinearAlgebra.dot(x::NDArray, y::NDArray) dot(y, x) - -# See https://github.com/dmlc/MXNet.jl/pull/123 -@_remap LinearAlgebra.transpose(x::NDArray{T,1}) where T reshape(x; shape = (1, length(x)), reverse = true) -@_remap LinearAlgebra.transpose(x::NDArray{T,2}) where T transpose(x) diff --git a/julia/src/ndarray/reduction.jl b/julia/src/ndarray/reduction.jl deleted file mode 100644 index 2045ce231674..000000000000 --- a/julia/src/ndarray/reduction.jl +++ /dev/null @@ -1,113 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -Base.prod(x::NDArray; dims = :) = _prod(x, dims) -@_remap _prod(x::NDArray, ::Colon) prod(x) -@_remap _prod(x::NDArray, dims) prod(x; axis = 0 .- dims, keepdims = true) - -Base.maximum(x::NDArray; dims = :) = _nd_maximum(x, dims) -@_remap _nd_maximum(x::NDArray, ::Colon) max(x) -@_remap _nd_maximum(x::NDArray, dims) max(x; axis = 0 .- dims, keepdims = true) - -Base.minimum(x::NDArray; dims = :) = _nd_minimum(x, dims) -@_remap _nd_minimum(x::NDArray, ::Colon) min(x) -@_remap _nd_minimum(x::NDArray, dims) min(x; axis = 0 .- dims, keepdims = true) - -############################################################################### -# min/max -############################################################################### - -import Base: min, max - -broadcasted(::typeof(max), x::NDArray{T}, y::NDArray{T}) where {T} = - _broadcast_maximum(x, y) - -broadcasted(::typeof(min), x::NDArray{T}, y::NDArray{T}) where {T} = - _broadcast_minimum(x, y) - -############################################################################### -# argmin/argmax -############################################################################### - -# TODO: support CartesianIndex ? -""" - argmax(x::NDArray; dims) -> indices - -Note that `NaN` is treated as greater than all other values in `argmax`. - -## Examples - -```julia-repl -julia> x = NDArray([0. 1 2; 3 4 5]) -2×3 NDArray{Float64,2} @ CPU0: - 0.0 1.0 2.0 - 3.0 4.0 5.0 - -julia> argmax(x, dims = 1) -1×3 NDArray{Float64,2} @ CPU0: - 2.0 2.0 2.0 - -julia> argmax(x, dims = 2) -2×1 NDArray{Float64,2} @ CPU0: - 3.0 - 3.0 -``` - -See also [`argmin`](@ref mx.argmin). -""" -Base.argmax(x::NDArray; dims = :) = _argmax(x, dims) .+ 1 -@_remap _argmax(x::NDArray, ::Colon) argmax(x) -@_remap _argmax(x::NDArray, dims) argmax(x; axis = 0 .- dims, keepdims = true) - -""" - argmin(x::NDArray; dims) -> indices - -Note that `NaN` is treated as less than all other values in `argmin`. - -## Examples - -```julia-repl -julia> x = NDArray([0. 1 2; 3 4 5]) -2×3 NDArray{Float64,2} @ CPU0: - 0.0 1.0 2.0 - 3.0 4.0 5.0 - -julia> argmax(x, dims = 1) -1×3 NDArray{Float64,2} @ CPU0: - 2.0 2.0 2.0 - -julia> argmax(x, dims = 2) -2×1 NDArray{Float64,2} @ CPU0: - 3.0 - 3.0 -``` - -See also [`argmax`](@ref mx.argmax). -""" -Base.argmin(x::NDArray; dims = :) = _argmin(x, dims) .+ 1 -@_remap _argmin(x::NDArray, ::Colon) argmin(x) -@_remap _argmin(x::NDArray, dims) argmin(x; axis = 0 .- dims, keepdims = true) - -################################################################################ -# remapping to solving type unstablility -################################################################################ - -@_remap _broadcast_maximum(x::NDArray, y::NDArray) broadcast_maximum(x, y) -@_remap _broadcast_maximum!(x::NDArray, y::NDArray) broadcast_maximum(x, y) - -@_remap _broadcast_minimum(x::NDArray, y::NDArray) broadcast_minimum(x, y) -@_remap _broadcast_minimum!(x::NDArray, y::NDArray) broadcast_minimum(x, y) diff --git a/julia/src/ndarray/remap.jl b/julia/src/ndarray/remap.jl deleted file mode 100644 index 86cb0373164e..000000000000 --- a/julia/src/ndarray/remap.jl +++ /dev/null @@ -1,145 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -# Mapping NDArray functions to Base-like API - -const _ndsig = Dict{Symbol,Expr}() -const _nddoc = Dict{Symbol,Any}() - -_isinplace(name::Symbol) = endswith(string(name), "!") - -_writable(name::Symbol, x) = - _isinplace(name) ? :(@assert $x.writable "this NDArray isn't writable") : :() - -function _outexpr(name::Symbol, x #= the first arg of `sig` =#) - if _isinplace(name) # `func!` - Ptr, 1, :([[MX_handle(x.handle)]]), :($x) - else - retexpr = :(NDArray(MX_NDArrayHandle(unsafe_load(hdls_ref[], 1)))) - Ref, 0, :(Ref{Ptr{MX_handle}}(C_NULL)), retexpr - end -end - -_broadcast_target(sig::Expr) = sig.args[2].args[].args[end] - -""" -Generate docstring from function signature -""" -function _docsig(fname::Symbol, sig::Expr, opname::String) - if fname !== :broadcasted - get(_nddoc, fname, " $sig") * "\n" * _getdocdefine(opname) - else - name = _broadcast_target(sig) - str = get(_nddoc, name, "") - _nddoc[name] = false # change to false, denote docstring has been set up - if isempty(str) - sig_ = Expr(:call, Symbol(name, "."), sig.args[3:end]...) - str = " $sig_" - end - if str ≠ false - # append "Defined in ..." - def = _getdocdefine(opname) - str = if str isa Markdown.MD - str = Markdown.MD(copy(str.content), copy(str.meta)) - push!(str, Markdown.Paragraph(def)) - str - else - str * def - end - - @eval @doc $str $name - end - "" - end -end - -""" - @_remap(sig::Expr, imp::Expr) - -Creating a function in signature `sig` with the function implementation `imp`. - -## Arguments -- `sig` is the function signature. - If the function name ends with `!`, it will invoke the corresponding inplace - call. -- `imp` is the underlying libmxnet API call - -""" -macro _remap(sig::Expr, imp::Expr) - d = splitdef(:($sig = $imp)) - @capture d[:name] (M_.fname_|fname_) - - opname = string(imp.args[1]) - - if isa(imp.args[2], Expr) && imp.args[2].head == :parameters - ndin = imp.args[3:end] - mxargs = imp.args[2].args - else # no keyword arguments - ndin = imp.args[2:end] - mxargs = [] - end - - mxkeys = map(x -> string(x.args[1]), mxargs) - mxvals = Expr(:vect, map(x -> :(dump_mx_param($(x.args[2]))), mxargs)...) - ndhlds = Expr(:vect, map(x -> :($(x).handle), ndin)...) - - # handler for `func!` which has side effect on first argument. - T, n_output, hdls_ref, retexpr = _outexpr(fname, _firstarg(sig)) - - assert_expr = _writable(fname, _firstarg(sig)) - - func_body = quote - $assert_expr - op_handle = _get_cached_libmx_op_handle($opname) - n_output = Ref(Cint($n_output)) - hdls_ref = $hdls_ref - @mxcall(:MXImperativeInvoke, - (MX_handle, - Cint, - Ptr{MX_handle}, - Ref{Cint}, - $T{Ptr{MX_handle}}, - Cint, - char_pp, - char_pp), - op_handle, - $(length(ndin)), - $(ndhlds), - n_output, - hdls_ref, - $(length(mxargs)), - $mxkeys, - $mxvals) - $retexpr - end - - docstr = _docsig(fname, sig, opname) - func_def = Expr(:function, sig, func_body) - - esc(quote - @doc $docstr - $func_def - end) -end - -macro _remap(sig::Expr, imp::Symbol) - imp = _ndsig[imp] - - esc(quote - @_remap($sig, $imp) - end) -end diff --git a/julia/src/ndarray/show.jl b/julia/src/ndarray/show.jl deleted file mode 100644 index 4a6bfa3f5948..000000000000 --- a/julia/src/ndarray/show.jl +++ /dev/null @@ -1,31 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -function Base.show(io::IO, x::NDArray) - print(io, "NDArray(") - Base.show(io, try_get_shared(x, sync = :read)) - print(io, ")") -end - -# for REPL -function Base.show(io::IO, ::MIME{Symbol("text/plain")}, x::NDArray{T,N}) where {T,N} - type_ = split(string(typeof(x)), '.', limit=2)[end] - n = length(x) - size_ = N == 1 ? "$n-element" : join(size(x), "×") - print(io, "$size_ $type_ @ $(context(x))", (n == 0) ? "" : ":\n") - Base.print_array(io, try_get_shared(x, sync = :read)) -end diff --git a/julia/src/ndarray/statistic.jl b/julia/src/ndarray/statistic.jl deleted file mode 100644 index b4f7b90b8aa0..000000000000 --- a/julia/src/ndarray/statistic.jl +++ /dev/null @@ -1,24 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -Statistics.mean(x::NDArray; dims = :) = _mean(x, dims) -@_remap _mean(x::NDArray, ::Colon) mean(x) -@_remap _mean(x::NDArray, dims) mean(x; axis = 0 .- dims, keepdims = true) - -Base.sum(x::NDArray; dims = :) = _sum(x, dims) -@_remap _sum(x::NDArray, ::Colon) sum(x) -@_remap _sum(x::NDArray, dims) sum(x; axis = 0 .- dims, keepdims = true) diff --git a/julia/src/ndarray/trig.jl b/julia/src/ndarray/trig.jl deleted file mode 100644 index 5251b3a34797..000000000000 --- a/julia/src/ndarray/trig.jl +++ /dev/null @@ -1,32 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -# trigonometric functions, remap to keep consistent API with Base -@_remap broadcasted(::typeof(sin), x::NDArray) sin(x) -@_remap broadcasted(::typeof(cos), x::NDArray) cos(x) -@_remap broadcasted(::typeof(tan), x::NDArray) tan(x) -@_remap broadcasted(::typeof(asin), x::NDArray) arcsin(x) -@_remap broadcasted(::typeof(acos), x::NDArray) arccos(x) -@_remap broadcasted(::typeof(atan), x::NDArray) arctan(x) - -# hyperbolic functions, remap to keep consistent API with Base -@_remap broadcasted(::typeof(sinh), x::NDArray) sinh(x) -@_remap broadcasted(::typeof(cosh), x::NDArray) cosh(x) -@_remap broadcasted(::typeof(tanh), x::NDArray) tanh(x) -@_remap broadcasted(::typeof(asinh), x::NDArray) arcsinh(x) -@_remap broadcasted(::typeof(acosh), x::NDArray) arccosh(x) -@_remap broadcasted(::typeof(atanh), x::NDArray) arctanh(x) diff --git a/julia/src/ndarray/type.jl b/julia/src/ndarray/type.jl deleted file mode 100644 index e24c89291dcb..000000000000 --- a/julia/src/ndarray/type.jl +++ /dev/null @@ -1,152 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -# All the types supported by mshadow. See `mshadow/base.h` -const DType = Union{Float32, Float64, Float16, UInt8, Int32, Int8, Int64} -@enum TypeFlag kFloat32 kFloat64 kFloat16 kUint8 kInt32 kInt8 kInt64 -const DEFAULT_DTYPE = Float32 # MSHADOW_DEFAULT_DTYPE - -function toTypeFlag(T::Type{<:DType}) - if T == Float32 - return kFloat32 - elseif T == Float64 - return kFloat64 - elseif T == Float16 - return kFloat16 - elseif T == UInt8 - return kUint8 - elseif T == Int32 - return kInt32 - elseif T == Int8 - return kInt8 - elseif T == Int64 - return kInt64 - else - throw(ArgumentError("Can't convert $T to DType.")) - end -end - -function fromTypeFlag(T::TypeFlag) - if T == kFloat32 - return Float32 - elseif T == kFloat64 - return Float64 - elseif T == kFloat16 - return Float16 - elseif T == kUint8 - return UInt8 - elseif T == kInt32 - return Int32 - elseif T == kInt8 - return Int8 - elseif T == kInt64 - return Int64 - else - throw(ArgumentError("Can't convert DType $T.")) - end -end - -# create a NDArray handle of specific shape -function _ndarray_alloc(shape::NTuple{N,Int}, ctx::Context, delay_alloc::Bool) where N - h_ref = Ref{MX_handle}(0) - shape = collect(reverse(MX_uint.(shape))) - @mxcall(:MXNDArrayCreate, (Ptr{MX_uint}, MX_uint, Cint, Cint, Cint, Ref{MX_handle}), - shape, N, ctx.device_type, ctx.device_id, delay_alloc, h_ref) - handle = MX_NDArrayHandle(h_ref[]) - return handle -end - -# create a NDArray handle of specific shape type -function _ndarray_alloc(::Type{T}, shape::NTuple{N,Int}, ctx::Context, delay_alloc::Bool) where {T<:DType,N} - h_ref = Ref{MX_handle}(0) - shape = collect(reverse(MX_uint.(shape))) - dtype = toTypeFlag(T) - @mxcall(:MXNDArrayCreateEx, (Ptr{MX_uint}, MX_uint, Cint, Cint, Cint, Cint, Ref{MX_handle}), - shape, N, ctx.device_type, ctx.device_id, delay_alloc, dtype, h_ref) - handle = MX_NDArrayHandle(h_ref[]) - return handle -end - -# create a handle to an empty NDArray, this handle can be used to hold -# results returned by libmx API calls -function _ndarray_alloc() - h_ref = Ref{MX_handle}(0) - @mxcall(:MXNDArrayCreateNone, (Ref{MX_handle},), h_ref) - return MX_NDArrayHandle(h_ref[]) -end - -################################################################################ -# NDArray Type -################################################################################ -""" - NDArray{T,N} - -Wrapper of the `NDArray` type in `libmxnet`. This is the basic building block -of tensor-based computation. - -!!! note - since C/C++ use row-major ordering for arrays while Julia follows a - column-major ordering. To keep things consistent, we keep the underlying data - in their original layout, but use *language-native* convention when we talk - about shapes. For example, a mini-batch of 100 MNIST images is a tensor of - C/C++/Python shape (100,1,28,28), while in Julia, the same piece of memory - have shape (28,28,1,100). -""" -mutable struct NDArray{T,N} - handle :: MX_NDArrayHandle - writable :: Bool - - NDArray{T,N}(handle::MX_NDArrayHandle, writable::Bool = true) where {T,N} = - new(handle, writable) -end - -# UndefInitializer constructors -NDArray{T,N}(::UndefInitializer, dims::NTuple{N,Integer}; - writable = true, ctx::Context = current_context()) where {T,N} = - NDArray{T,N}(_ndarray_alloc(T, dims, ctx, false), writable) -NDArray{T,N}(::UndefInitializer, dims::Vararg{Integer,N}; kw...) where {T,N} = - NDArray{T,N}(undef, dims; kw...) - -NDArray{T}(::UndefInitializer, dims::NTuple{N,Integer}; kw...) where {T,N} = - NDArray{T,N}(undef, dims; kw...) -NDArray{T}(::UndefInitializer, dims::Vararg{Integer,N}; kw...) where {T,N} = - NDArray{T,N}(undef, dims; kw...) - -NDArray(::UndefInitializer, dims::NTuple{N,Integer}; kw...) where {N} = - NDArray{DEFAULT_DTYPE,N}(undef, dims; kw...) -NDArray(::UndefInitializer, dims::Vararg{Integer,N}; kw...) where {N} = - NDArray{DEFAULT_DTYPE,N}(undef, dims; kw...) - -NDArray(x::AbstractArray{<:DType}) = copy(collect(x), cpu()) -NDArray(x::Array{<:DType}) = copy(x, cpu()) - -NDArray(::Type{T}, x::AbstractArray) where {T<:DType} = - copy(convert(AbstractArray{T}, x), cpu()) - -NDArray(handle, writable = true) = - NDArray{eltype(handle), ndims(handle)}(handle, writable) - -# type aliases -const NDArrayOrReal = Union{NDArray,Real} -const VecOfNDArray = AbstractVector{<:NDArray} - -Base.unsafe_convert(::Type{MX_handle}, x::NDArray) = - Base.unsafe_convert(MX_handle, x.handle) -Base.convert(T::Type{MX_handle}, x::NDArray) = Base.unsafe_convert(T, x) -Base.cconvert(T::Type{MX_handle}, x::NDArray) = Base.unsafe_convert(T, x) - -MX_handle(x::NDArray) = Base.convert(MX_handle, x) diff --git a/julia/src/nn-factory.jl b/julia/src/nn-factory.jl deleted file mode 100644 index cb5df3722470..000000000000 --- a/julia/src/nn-factory.jl +++ /dev/null @@ -1,60 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -""" - MLP(input, spec; hidden_activation = :relu, prefix) - -Construct a multi-layer perceptron. A MLP is a multi-layer neural network with -fully connected layers. - -# Arguments: -* `input::SymbolicNode`: the input to the mlp. -* `spec`: the mlp specification, a list of hidden dimensions. For example, - `[128, (512, :sigmoid), 10]`. The number in the list indicate the - number of hidden units in each layer. A tuple could be used to specify - the activation of each layer. Otherwise, the default activation will - be used (except for the last layer). -* `hidden_activation::Symbol`: keyword argument, default `:relu`, indicating - the default activation for hidden layers. The specification here could be overwritten - by layer-wise specification in the `spec` argument. Also activation is not - applied to the last, i.e. the prediction layer. See [`Activation`](@ref) for a - list of supported activation types. -* `prefix`: keyword argument, default `gensym()`, used as the prefix to - name the constructed layers. - -Returns the constructed MLP. -""" -function MLP(input, spec; hidden_activation::Symbol = :relu, prefix = gensym()) - spec = convert(Vector{Union{Int,Tuple}}, spec) - - n_layer = length(spec) - for (i, s) in enumerate(spec) - if isa(s, Tuple) - n_unit, act_type = s - else - n_unit = s - act_type = hidden_activation - end - input = FullyConnected(input, name=Symbol(prefix, "fc$i"), num_hidden=n_unit) - if i < n_layer || isa(s, Tuple) - # will not add activation unless the user explicitly specified - input = Activation(input, name=Symbol(prefix, "$act_type$i"), act_type=act_type) - end - end - - return input -end diff --git a/julia/src/optimizer.jl b/julia/src/optimizer.jl deleted file mode 100644 index 6eda53e6d5b3..000000000000 --- a/julia/src/optimizer.jl +++ /dev/null @@ -1,312 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -############################################################################### -# Types -############################################################################### - -""" - AbstractOptimizer - -Base type for all optimizers. -""" -abstract type AbstractOptimizer end - -""" - AbstractLearningRateScheduler - -Base type for all learning rate scheduler. -""" -abstract type AbstractLearningRateScheduler end - -""" - AbstractMomentumScheduler - -Base type for all momentum scheduler. -""" -abstract type AbstractMomentumScheduler end - -""" - OptimizationState - -### Attributes -* `batch_size`: The size of the mini-batch used in stochastic training. -* `curr_epoch`: - The current epoch count. Epoch 0 means no training yet, during the first - pass through the data, the epoch will be 1; during the second pass, the - epoch count will be 1, and so on. -* `curr_batch`: - The current mini-batch count. The batch count is reset during every epoch. - The batch count 0 means the beginning of each epoch, with no mini-batch - seen yet. During the first mini-batch, the mini-batch count will be 1. -* `curr_iter`: - The current iteration count. One iteration corresponds to one mini-batch, - but unlike the mini-batch count, the iteration count does **not** reset - in each epoch. So it track the *total* number of mini-batches seen so far. -""" -mutable struct OptimizationState - batch_size :: Int - curr_epoch :: Int - curr_batch :: Int - curr_iter :: Int -end - -OptimizationState(batch_size::Int) = OptimizationState(batch_size, 0, 0, 0) - -############################################################################### -# LearningRate module -############################################################################### - -module LearningRate - -using Markdown - -import Base: get -import ..mx: AbstractLearningRateScheduler, OptimizationState, update! - -export initlrsched - -initlrsched(η::Real) = LearningRate.Fixed(η) - -update!(a::AbstractLearningRateScheduler) = (isdefined(a, :t) && (a.t += 1)) - -""" - get(sched::AbstractLearningRateScheduler) - -Returns the current learning rate. -""" -get(::AbstractLearningRateScheduler) = nothing - -""" - LearningRate.Fixed(η) - -Fixed learning rate scheduler always return the same learning rate. -""" -struct Fixed <: AbstractLearningRateScheduler - η::Float64 -end - -get(f::Fixed) = f.η - -@doc doc""" - LearningRate.Exp(η₀; γ = 0.9) - -```math -\eta_t = \eta_0\gamma^t -``` - -Where `t` is the epoch count, or the iteration count. -""" -mutable struct Exp <: AbstractLearningRateScheduler - η₀::Float64 - γ ::Float64 - t ::Int -end - -function Exp(η₀; γ = 0.9, t = 0) - @assert 0 < γ < 1 - Exp(η₀, γ, t) -end - -get(a::Exp) = a.η₀ * a.γ^a.t - -@doc doc""" - LearningRate.Inv(η₀; γ = 0.9, p = 0.5) - -```math -\eta_t = \eta_0 (1 + \gamma t)^{-p} -``` - -Where `t` is the epoch count, or the iteration count. -""" -mutable struct Inv <: AbstractLearningRateScheduler - η₀::Float64 - γ ::Float64 - p ::Float64 - t ::Int -end - -function Inv(η₀; γ = 0.9, p = 0.5, t = 0) - @assert 0 < γ < 1 - @assert 0 <= p - Inv(η₀, γ, p, t) -end - -get(i::Inv) = i.η₀ * (1 + i.γ*i.t)^(-i.p) - -end # module LearningRate - -using .LearningRate - -############################################################################### -# Momentum module -############################################################################### - -module Momentum - -using Markdown - -import Base: get -import ..mx: AbstractMomentumScheduler, OptimizationState - -export initmomsched - -""" - get(sched) - -* `sched::AbstractMomentumScheduler`: the momentum scheduler. - -Returns the current momentum. -""" -get - -initmomsched(μ::Real) = iszero(μ) ? Momentum.Null() : Momentum.Fixed(μ) - -""" - Momentum.Null - -The null momentum scheduler always returns 0 for momentum. It is also used to -explicitly indicate momentum should not be used. -""" -struct Null <: AbstractMomentumScheduler -end - -get(::Null) = 0.0 - -""" - Momentum.Fixed - -Fixed momentum scheduler always returns the same value. -""" -mutable struct Fixed <: AbstractMomentumScheduler - μ::Float64 -end - -get(f::Fixed) = f.μ - -@doc doc""" - NadamScheduler(; μ = 0.99, δ = 0.004, γ = 0.5, α = 0.96) - -Nesterov-accelerated adaptive momentum scheduler. - -Description in [Incorporating Nesterov Momentum into Adam] -(http://cs229.stanford.edu/proj2015/054_report.pdf). - -```math -\mu_t = \mu_0 * (1 - \gamma * \alpha^{t * \delta}) -``` - -Where -* `t`: iteration count -* `μ`: default `0.99`, μ₀ -* `δ`: default `0.004` is scheduler decay. -* `γ`: default `0.5` -* `α`: default `0.96` -""" -struct NadamScheduler <: AbstractMomentumScheduler - μ::Float64 - δ::Float64 - γ::Float64 - α::Float64 -end - -function NadamScheduler(; μ = 0.99, δ = 0.004, γ = 0.5, α = 0.96) - @assert 0.0 <= μ < 1.0 - @assert 0.0 <= δ - @assert 0.0 <= γ <= 1.0 - @assert 0.0 <= α <= 1.0 - NadamScheduler(μ, δ, γ, α) -end - -""" - get(n::NadamScheduler, t) - -Where `t` is the iteration count. -""" -get(n::NadamScheduler, t) = - n.μ * (1.0 - n.γ * n.α^( t * n.δ)), - n.μ * (1.0 - n.γ * n.α^((t + 1) * n.δ)) - -end # module Momentum - -using .Momentum - -############################################################################### -# Public APIs -############################################################################### - -""" - getupdater(optimizer) - -A utility function to create an updater function of `KVStore`, -that uses its closure to store all the states needed for each weights. - -Ther returned function has following signature: - -```julia -decend!(index::Int, ∇::NDArray, x::NDArray) -``` - -If the optimizer is stateful and need access/store states during updating, -`index` will be the key to access/store states. -""" -function getupdater(optimizer::AbstractOptimizer) - states = Dict{Int,Any}() - function updater(index::Int, ∇::NDArray, x::NDArray) - if !haskey(states, index) - states[index] = create_state(optimizer, index, x) - end - update!(optimizer, index, x, ∇, states[index]) - end - updater -end - -""" - normgrad(optimizer, W, ∇) - -Get the properly normalized gradient (re-scaled and clipped if necessary). - -* `optimizer`: the optimizer, - should contain the field `scale`, `clip` and `λ`. -* `W::NDArray`: the trainable weights. -* `∇::NDArray`: the original gradient of the weights. -""" -function normgrad!(opt::AbstractOptimizer, W::NDArray, ∇::NDArray) - # rescaling - s = opt.scale - !iszero(s) && @inplace ∇ .*= s - # gradient clipping - c = opt.clip - c > 0 && clamp!(∇, -c, c) - # weight decay - λ = opt.λ - λ > 0 && @inplace ∇ += λ .* W - - ∇ -end - -############################################################################### -# Builtin Optimizers -############################################################################### - -include("optimizers/sgd.jl") -include("optimizers/adam.jl") -include("optimizers/adagrad.jl") -include("optimizers/adadelta.jl") -include("optimizers/adamax.jl") -include("optimizers/rmsprop.jl") -include("optimizers/nadam.jl") diff --git a/julia/src/optimizers/adadelta.jl b/julia/src/optimizers/adadelta.jl deleted file mode 100644 index 7a45dd0459db..000000000000 --- a/julia/src/optimizers/adadelta.jl +++ /dev/null @@ -1,104 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -@doc doc""" - AdaDelta(; kwargs...) - -Scale learning rates by the ratio of accumulated gradients to accumulated -updates, see [1] and notes for further description. - -### Attributes -* `η`: default `1.0`, learning rate. -* `ρ`: default `0.95`, squared gradient moving average decay factor. -* `ϵ`: default `1e-6`, small value added for numerical stability. -* `clip`: default `0`, gradient clipping. - If positive, will clip the gradient into the range `[-clip, clip]`. -* `scale`: default `0`, gradient rescaling. - If != 0, multiply the gradient with `scale` before updating. - Often choose to be `1.0 / batch_size`. - If leave it default, high-level API like `fit!` will set it to - `1.0 / batch_size`, since `fit!` knows the `batch_size`. -* `λ`: default `0.00001`, weight decay is equivalent - to adding a global l2 regularizer for all the parameters. - -### Notes -`ρ` should be between 0 and 1. A value of `ρ` close to 1 will decay the -moving average slowly and a value close to 0 will decay the moving average -fast. - -`ρ = 0.95` and `ϵ = 1e-6` are suggested in the paper and reported to -work for multiple datasets (MNIST, speech). In the paper, no learning rate is -considered (so `η = 1.0`). Probably best to keep it at this value. - -`ϵ` is important for the very first update (so the numerator does not become 0). - -Using the step size `η` and a decay factor `ρ` the learning rate is -calculated as: - -```math -\begin{align*} - r_t &= ρ r_{t-1} + (1 - ρ) g^2 \\ - η_t &= η \frac{\sqrt{s_{t-1} + ϵ}} {\sqrt{r_t + ϵ}} \\ - s_t &= ρ s_{t-1} + (1 - ρ) (η_t \times g)^2 -\end{align*} -``` - -### References -1. Zeiler, M. D. (2012): - ADADELTA: An Adaptive Learning Rate Method. arXiv Preprint arXiv:1212.5701. -""" -AdaDelta - -@defstruct AdaDelta <: AbstractOptimizer ( - (η :: Real = 1.0, η > 0), - (ρ :: Real = 0.95, 0 < ρ < 1 ), - (ϵ :: Real = 1e-6, ϵ > 0), - (clip :: Real = 0, clip >= 0), - scale :: Real = 0, - (λ :: Real = 1e-5, λ >= 0), - η_sched :: Any = initlrsched(η) -) - -mutable struct AdaDeltaState - x :: NDArray - Δx :: NDArray -end - -create_state(::AdaDelta, ::Int, W::NDArray) = - AdaDeltaState(zeros(size(W), context(W)), zeros(size(W), context(W))) - -function update!(ada::AdaDelta, ::Int, W::NDArray, ∇::NDArray, s::AdaDeltaState) - η = get(ada.η_sched) - x = s.x - Δx = s.Δx - ρ = ada.ρ - ϵ = ada.ϵ - - normgrad!(ada, W, ∇) - - # Update s.acc as in RMSProp - @inplace x .*= ρ - @inplace x .+= (1 - ρ) .* ∇.^2 - - # Compute update using the "old" Δx - Δxₜ = ∇ .* sqrt(Δx .+ ϵ) ./ sqrt(x .+ ϵ) # FIXME: sqrt dot-call - @inplace W .+= -η .* Δxₜ - - # update Δx using update - @inplace Δx .*= ρ - @inplace Δx .+= (1 - ρ) .* Δxₜ.^2 -end diff --git a/julia/src/optimizers/adagrad.jl b/julia/src/optimizers/adagrad.jl deleted file mode 100644 index 6ddcb36280bc..000000000000 --- a/julia/src/optimizers/adagrad.jl +++ /dev/null @@ -1,77 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -@doc doc""" - AdaGrad(; kwargs...) - -Scale learning rates by dividing with the square root of accumulated -squared gradients. See [1] for further description. - -### Arguments -* `η`: default `0.1`, learning rate. -* `ϵ`: default `1e-6`, small value added for numerical stability. -* `clip`: default `0`, gradient clipping. - If positive, will clip the gradient into the range `[-clip, clip]`. -* `scale`: default `0`, gradient rescaling. - If != 0, multiply the gradient with `scale` before updating. - Often choose to be `1.0 / batch_size`. - If leave it default, high-level API like `fit!` will set it to - `1.0 / batch_size`, since `fit!` knows the `batch_size`. -* `λ`: default `0.00001`, weight decay is equivalent - to adding a global l2 regularizer for all the parameters. - -### Notes -Using step size `η` AdaGrad calculates the learning rate for feature `i` at -time step t as: - -```math -η_{t,i} = \frac{lr}{\sqrt{\sum^t_{t^\prime} g^2_{t^\prime,i} + ϵ}} g_{t,i} -``` - -as such the learning rate is monotonically decreasing. -Epsilon is not included in the typical formula, see [2]. - -### References -1. Duchi, J., Hazan, E., & Singer, Y. (2011): - Adaptive subgradient methods for online learning and - stochastic optimization. JMLR, 12:2121-2159. -2. Chris Dyer: Notes on AdaGrad. - [http://www.ark.cs.cmu.edu/cdyer/adagrad.pdf] - (http://www.ark.cs.cmu.edu/cdyer/adagrad.pdf) -""" -AdaGrad - -@defstruct AdaGrad <: AbstractOptimizer ( - (η :: Real = 0.1, η > 0), - (ϵ :: Real = 1e-6, ϵ > 0), - (clip :: Real = 0, clip >= 0), - scale :: Real = 0, - (λ :: Real = 1e-5, λ >= 0), - η_sched :: Any = initlrsched(η) -) - -create_state(::AdaGrad, ::Int, W::NDArray) = zeros(size(W), context(W)) - -function update!(ada::AdaGrad, ::Int, W::NDArray, ∇::NDArray, x::NDArray) - η = get(ada.η_sched) - ϵ = ada.ϵ - - normgrad!(ada, W, ∇) - - @inplace x .+= ∇.^2 # update state - @inplace W .+= -η .* ∇ ./ sqrt(x .+ ϵ) # FIXME: sqrt dot-call -end diff --git a/julia/src/optimizers/adam.jl b/julia/src/optimizers/adam.jl deleted file mode 100644 index c6aa99ba71fb..000000000000 --- a/julia/src/optimizers/adam.jl +++ /dev/null @@ -1,88 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -""" - ADAM - -The solver described in Diederik Kingma, Jimmy Ba: *Adam: A Method for -Stochastic Optimization*. arXiv:1412.6980 [cs.LG]. - - ADAM(; kwargs...) - -### Arguments -* `η`: default `0.001`, learning rate. -* `β1`: default `0.9`. -* `β2`: default `0.999`. -* `ϵ`: default `1e-8`. -* `clip`: default `0`, gradient clipping. - If positive, will clip the gradient into the range `[-clip, clip]`. -* `scale`: default `0`, gradient rescaling. - If != 0, multiply the gradient with `scale` before updating. - Often choose to be `1.0 / batch_size`. - If leave it default, high-level API like `fit!` will set it to - `1.0 / batch_size`, since `fit!` knows the `batch_size`. -* `λ`: default `0.00001`, weight decay is equivalent - to adding a global l2 regularizer for all the parameters. -* `η_sched::AbstractLearningRateScheduler`: default `LearningRate.Fixed(η)`, a - dynamic learning rate scheduler. If set, will overwrite the `η` parameter. -""" -ADAM - -@defstruct ADAM <: AbstractOptimizer ( - (η :: Real = 0.001, η > 0), - (β1 :: Real = 0.9, 0 <= β1 < 1), - (β2 :: Real = 0.999, 0 <= β2 < 1), - (ϵ :: Real = 1e-8, ϵ > 0), - (clip :: Real = 0, clip >= 0), - scale :: Real = 0, - (λ :: Real = 1e-5, λ >= 0), - η_sched :: Any = initlrsched(η) -) - -mutable struct ADAMState - η :: Float64 # current learning rate - mₜ :: NDArray - vₜ :: NDArray - β1ᵗ :: Float64 - β2ᵗ :: Float64 -end - -create_state(adam::ADAM, ::Int, W::NDArray) = - ADAMState(get(adam.η_sched), - zeros(size(W), context(W)), - zeros(size(W), context(W)), - adam.β1, adam.β2) - -function update!(adam::ADAM, ::Int, W::NDArray, ∇:: NDArray, s::ADAMState) - η = s.η - β1 = adam.β1 - β2 = adam.β2 - ϵ = adam.ϵ - - normgrad!(adam, W, ∇) - - s.mₜ = β1 * s.mₜ + (1 - β1) .* ∇ - s.vₜ = β2 * s.vₜ + (1 - β2) .* ∇.^2 - - aₜ= sqrt(1.0 - s.β2ᵗ)/(1.0 - s.β1ᵗ) - - # update βᵗ to βᵗ⁺¹ - s.β1ᵗ *= β1 - s.β2ᵗ *= β2 - - @inplace W .+= -η * aₜ * s.mₜ ./ (sqrt(s.vₜ) .+ ϵ) -end diff --git a/julia/src/optimizers/adamax.jl b/julia/src/optimizers/adamax.jl deleted file mode 100644 index de6a1ab759b3..000000000000 --- a/julia/src/optimizers/adamax.jl +++ /dev/null @@ -1,84 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -""" - AdaMax(; kwargs...) - -This is a variant of of the Adam algorithm based on the infinity norm. -See [1] for further description. - -### Arguments -* `η`: default `0.002`, learning rate. -* `β1`: default `0.9`, exponential decay rate for the first moment estimates. -* `β2`: default `0.999`, exponential decay rate for the weighted - infinity norm estimates. -* `ϵ`: default `1e-8`, small value added for numerical stability. -* `clip`: default `0`, gradient clipping. - If positive, will clip the gradient into the range `[-clip, clip]`. -* `scale`: default `0`, gradient rescaling. - If != 0, multiply the gradient with `scale` before updating. - Often choose to be `1.0 / batch_size`. - If leave it default, high-level API like `fit!` will set it to - `1.0 / batch_size`, since `fit!` knows the `batch_size`. -* `λ`: default `0.00001`, weight decay is equivalent - to adding a global l2 regularizer for all the parameters. - -### References -1. Kingma, Diederik, and Jimmy Ba (2014): - Adam: A Method for Stochastic Optimization. Section 7. - [http://arxiv.org/abs/1412.6980] - (http://arxiv.org/abs/1412.6980). -""" -AdaMax - -@defstruct AdaMax <: AbstractOptimizer ( - (η :: Real = 0.002, η > 0), - (β1 :: Real = 0.9, 0 <= β1 < 1), - (β2 :: Real = 0.999, 0 <= β2 < 1), - (ϵ :: Real = 1e-8, ϵ > 0), - (clip :: Real = 0, clip >= 0), - scale :: Real = 0, - (λ :: Real = 1e-5, λ >= 0), - η_sched :: Any = initlrsched(η) -) - -mutable struct AdaMaxState - mₜ :: NDArray - uₜ :: NDArray - β1ᵗ :: Float64 -end - -create_state(ada::AdaMax, ::Int, W::NDArray) = - AdaMaxState(zeros(size(W), context(W)), - zeros(size(W), context(W)), - ada.β1) - -function update!(ada::AdaMax, ::Int, W::NDArray, ∇::NDArray, s::AdaMaxState) - η = get(ada.η_sched) - β1 = ada.β1 - β2 = ada.β2 - ϵ = ada.ϵ - - normgrad!(ada, W, ∇) - - s.mₜ = β1 * s.mₜ .+ (1 - β1) .* ∇ - s.uₜ = _maximum(β2 * s.uₜ, abs(∇)) # FIXME abs dot-call - - @inplace W .+= -η / (1 - s.β1ᵗ) * s.mₜ ./ (s.uₜ + ϵ) - - s.β1ᵗ *= ada.β1 -end diff --git a/julia/src/optimizers/nadam.jl b/julia/src/optimizers/nadam.jl deleted file mode 100644 index 522e9194caa8..000000000000 --- a/julia/src/optimizers/nadam.jl +++ /dev/null @@ -1,111 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -@doc doc""" - Nadam(; kwargs...) - -Nesterov Adam optimizer: Adam RMSprop with Nesterov momentum, -see [1] and notes for further description. - - -### Arguments -* `η`: default `0.001`, learning rate. -* `β1`: default `0.99`. -* `β2`: default `0.999`. -* `ϵ`: default `1e-8`, small value added for numerical stability. -* `clip`: default `0`, gradient clipping. - If positive, will clip the gradient into the range `[-clip, clip]`. -* `scale`: default `0`, gradient rescaling. - If != 0, multiply the gradient with `scale` before updating. - Often choose to be `1.0 / batch_size`. - If leave it default, high-level API like `fit!` will set it to - `1.0 / batch_size`, since `fit!` knows the `batch_size`. -* `λ`: default `0.00001`, weight decay is equivalent - to adding a global l2 regularizer for all the parameters. -* `η_sched::AbstractLearningRateScheduler`: default `nothing`, a - dynamic learning rate scheduler. If set, will overwrite the `η` - parameter. -* `μ_sched::NadamScheduler` default `NadamScheduler()` of the form. - - ```math - \mu_t = β_1 (1 - 0.5 \times 0.96^{t \times 0.004}) - ``` - -### Notes -Default parameters follow those provided in the paper. -It is recommended to leave the parameters of this optimizer -at their default values. - -### References -1. [Incorporating Nesterov Momentum into Adam] - (http://cs229.stanford.edu/proj2015/054_report.pdf). - -2. [On the importance of initialization and momentum in deep learning] - (http://www.cs.toronto.edu/~fritz/absps/momentum.pdf). -""" -Nadam - -@defstruct Nadam <: AbstractOptimizer ( - (η :: Real = 0.001, η > 0), - (β1 :: Real = 0.99, 0 <= β1 < 1), - (β2 :: Real = 0.999, 0 <= β2 < 1), - (ϵ :: Real = 1e-8, ϵ > 0), - (clip :: Real = 0, clip >= 0), - scale :: Real = 0, - (λ :: Real = 1e-5, λ >= 0), - η_sched :: Any = initlrsched(η), - μ_sched :: Momentum.NadamScheduler = Momentum.NadamScheduler(μ = β1) -) - -mutable struct NadamState - m :: NDArray - n :: NDArray - Πμ :: Float64 - β2ᵗ :: Float64 - t :: Int # use in NadamScheduler. - # we store `t` in state because state is created for each `index` -end - -create_state(n::Nadam, ::Int, W::NDArray) = - NadamState(zeros(size(W), context(W)), zeros(size(W), context(W)), - 1.0, n.β2, 1) - -function update!(na::Nadam, ::Int, W::NDArray, ∇::NDArray, s::NadamState) - η = get(na.η_sched) - μₜ, μₜ₁= get(na.μ_sched, s.t) - β1, β2 = na.β1, na.β2 - ϵ = na.ϵ - - normgrad!(na, W, ∇) - s.t += 1 - - s.Πμ *= μₜ - Πμ′ = s.Πμ * μₜ₁ - - ∇′ = ∇ / (1.0 - s.Πμ) - @inplace s.m .*= β1 - @inplace s.m .+= (1.0 - β1) * ∇ - m̂ = s.m / (1.0 - Πμ′) - - @inplace s.n .*= β2 - @inplace s.n .+= (1.0 - β2) .* ∇.^2 - n̂ = s.n / (1.0 - s.β2ᵗ) - s.β2ᵗ *= β2 - - m̄ = (1.0 - μₜ) * ∇′+ μₜ₁ * m̂ - @inplace W .+= -η * m̄ ./ (sqrt(n̂) + ϵ) -end diff --git a/julia/src/optimizers/rmsprop.jl b/julia/src/optimizers/rmsprop.jl deleted file mode 100644 index 18445752588a..000000000000 --- a/julia/src/optimizers/rmsprop.jl +++ /dev/null @@ -1,84 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -@doc doc""" - RMSProp(; kwargs...) - -Scale learning rates by dividing with the moving average of the root mean -squared (RMS) gradients. See [1] for further description. - -### Arguments - -* `η`: default `0.1`, learning rate. -* `ρ`: default `0.9`, gradient moving average decay factor. -* `ϵ`: default `1e-8`, small value added for numerical stability. -* `clip`: default `0`, gradient clipping. - If positive, will clip the gradient into the range `[-clip, clip]`. -* `scale`: default `0`, gradient rescaling. - If != 0, multiply the gradient with `scale` before updating. - Often choose to be `1.0 / batch_size`. - If leave it default, high-level API like `fit!` will set it to - `1.0 / batch_size`, since `fit!` knows the `batch_size`. -* `λ`: default `0.00001`, weight decay is equivalent - to adding a global l2 regularizer for all the parameters. - -### Notes -`ρ` should be between 0 and 1. A value of `ρ` close to 1 will decay the -moving average slowly and a value close to 0 will decay the moving average -fast. - -Using the step size `η` and a decay factor `ρ the -learning rate `ηₜ` is calculated as: - -```math -\begin{align*} - r_t &= ρ r_{t-1} + (1 - ρ)g^2 \\ - η_t &= \frac{η}{\sqrt{r_t + ϵ}} -\end{align*} -``` - -### References -1. Tieleman, T. and Hinton, G. (2012): - Neural Networks for Machine Learning, Lecture 6.5 - rmsprop. - Coursera. [http://www.youtube.com/watch?v=O3sxAc4hxZU] - (http://www.youtube.com/watch?v=O3sxAc4hxZU) (formula @5:20) -""" -RMSProp - -@defstruct RMSProp <: AbstractOptimizer ( - (η :: Real = 0.001, η > 0), - (ρ :: Real = 0.9, 0 < ρ < 1), - (ϵ :: Real = 1e-8, ϵ > 0), - (clip :: Real = 0, clip >= 0), - scale :: Real = 0, - (λ :: Real = 1e-5, λ >= 0), - η_sched :: Any = initlrsched(η) -) - -create_state(::RMSProp, ::Int, W::NDArray) = zeros(size(W), context(W)) - -function update!(rms::RMSProp, ::Int, W::NDArray, ∇::NDArray, s::NDArray) - η = get(rms.η_sched) - ρ = rms.ρ - ϵ = rms.ϵ - - normgrad!(rms, W, ∇) - - @inplace s .*= ρ - @inplace s .+= (1 - ρ) .* (∇.^2) - @inplace W .+= -η .* ∇ ./ sqrt(s .+ ϵ) # FIXME: sqrt should be dot-call -end diff --git a/julia/src/optimizers/sgd.jl b/julia/src/optimizers/sgd.jl deleted file mode 100644 index 6af8094829f6..000000000000 --- a/julia/src/optimizers/sgd.jl +++ /dev/null @@ -1,88 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -@doc doc""" - SGD(; kwargs...) - -Stochastic gradient descent optimizer. - -Vanilla SGD: - -```math -\theta \leftarrow \theta - \eta \nabla -``` - -SGD with momentum:: - -```math -\begin{align*} - \nu & \leftarrow \mu \nu_{t-1} - \eta \nabla \\ - \theta & \leftarrow \theta + \nu_t -\end{align*} -``` - -### Arguments - -* `η`: default `0.01`, learning rate. -* `μ`: default `0`, the momentum, usually set to `0.9` in this implementation. -* `λ`: default `0.0001`, weight decay is equivalent to - adding a global l2 regularizer to the parameters. -* `clip`: default `0`, gradient clipping. - If positive, will clip the gradient into the bounded range `[-clip, clip]`. -* `scale`: default `0`, gradient rescaling. - If != 0, multiply the gradient with `scale` before updating. - Often choose to be `1.0 / batch_size`. - If leave it default, high-level API like `fit!` will set it to - `1.0 / batch_size`, since `fit!` knows the `batch_size`. -* `μ_sched::AbstractMomentumScheduler`: default `Momentum.Null()`, - a dynamic momentum scheduler. If set, will overwrite the `momentum` - parameter. -* `η_sched::AbstractLearningRateScheduler`: default `LearningRate.Fixed(η)`, a - dynamic learning rate scheduler. If set, will overwrite the `η` parameter. -""" -SGD - -@defstruct SGD <: AbstractOptimizer ( - (η :: Real = 0.01, η > 0), - (μ :: Real = 0.0, μ >= 0), - (clip :: Real = 0, clip >= 0), - scale :: Real = 0, - (λ :: Real = 0.0001, λ >= 0), - η_sched :: Any = initlrsched(η), - μ_sched :: Any = initmomsched(μ) -) - -create_state(sgd::SGD, ::Int, W::NDArray) = - isa(sgd.μ_sched, Momentum.Null) ? nothing : zeros(size(W), context(W)) - -function update!(sgd::SGD, ::Int, W::NDArray, ∇::NDArray, ::Nothing) - η = get(sgd.η_sched) - normgrad!(sgd, W, ∇) - @inplace W += -η * ∇ -end - -# update with momentum -function update!(sgd::SGD, ::Int, W::NDArray, ∇::NDArray, ν::NDArray) - η = get(sgd.η_sched) - μ = get(sgd.μ_sched) - - normgrad!(sgd, W, ∇) - - @inplace ν .*= μ - @inplace ν .+= -η .* ∇ - @inplace W .+= ν -end diff --git a/julia/src/random.jl b/julia/src/random.jl deleted file mode 100644 index 3f3b80bbab4a..000000000000 --- a/julia/src/random.jl +++ /dev/null @@ -1,88 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -""" - rand!(x::NDArray; low = 0, high = 1) - -Draw random samples from a uniform distribution. -Samples are uniformly distributed over the half-open interval [low, high) -(includes low, but excludes high). - -```julia -julia> mx.rand!(NDArray(undef, 2, 3)) -2×3 mx.NDArray{Float32,2} @ CPU0: - 0.385748 0.839275 0.444536 - 0.0879585 0.215928 0.104636 - -julia> mx.rand!(NDArray(undef, 2, 3), low = 1, high = 10) -2×3 mx.NDArray{Float32,2} @ CPU0: - 6.6385 4.18888 2.07505 - 8.97283 2.5636 1.95586 -``` -""" -rand!(x::NDArray; low = 0, high = 1) = - _random_uniform(NDArray, low = low, high = high, shape = size(x), out = x) - -""" - rand(dims...; low = 0, high = 1, context = cpu()) - -Draw random samples from a uniform distribution. -Samples are uniformly distributed over the half-open interval [low, high) -(includes low, but excludes high). - -```julia -julia> mx.rand(2, 2) -2×2 mx.NDArray{Float32,2} @ CPU0: - 0.487866 0.825691 - 0.0234245 0.794797 - -julia> mx.rand(2, 2; low = 1, high = 10) -2×2 mx.NDArray{Float32,2} @ CPU0: - 5.5944 5.74281 - 9.81258 3.58068 -``` -""" -rand(dims::Integer...; low = 0, high = 1, context = cpu()) = - rand!(NDArray(undef, dims, ctx = context), low = low, high = high) - -""" - randn!(x::NDArray; μ = 0, σ = 1) - -Draw random samples from a normal (Gaussian) distribution. -""" -randn!(x::NDArray; μ = 0, σ = 1) = - _random_normal(NDArray, loc = μ, scale = σ, shape = size(x), out = x) - -""" - randn(dims...; μ = 0, σ = 1, context = cpu()) - -Draw random samples from a normal (Gaussian) distribution. -""" -randn(dims::Int...; μ = 0, σ = 1, context = cpu()) = - randn!(NDArray(undef, dims, ctx = context), μ = μ, σ = σ) - -""" - seed!(seed::Int) - -Set the random seed of libmxnet -""" -seed!(s::Int) = @mxcall :MXRandomSeed (Cint,) s - -function srand(s::Int) - @warn "`mx.srand` is deprecated, use `mx.seed!` instead." - seed!(s) -end diff --git a/julia/src/runtime.jl b/julia/src/runtime.jl deleted file mode 100644 index cedcced9d29a..000000000000 --- a/julia/src/runtime.jl +++ /dev/null @@ -1,76 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# License); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -# runtime detection of compile time features in the native library - -module MXRuntime - -using ..mx - -export LibFeature -export feature_list, isenabled - -# defined in include/mxnet/c_api.h -struct LibFeature - _name::Ptr{Cchar} - enabled::Bool -end - -function Base.getproperty(x::LibFeature, p::Symbol) - (p == :name) && return unsafe_string(getfield(x, :_name)) - getfield(x, p) -end - -Base.show(io::IO, x::LibFeature) = - print(io, ifelse(x.enabled, "✔", "✖"), " ", x.name) - -""" - feature_list() - -Check the library for compile-time features. -The list of features are maintained in libinfo.h and libinfo.cc -""" -function feature_list() - ref = Ref{Ptr{LibFeature}}(C_NULL) - s = Ref{Csize_t}(C_NULL) - @mx.mxcall(:MXLibInfoFeatures, (Ref{Ptr{LibFeature}}, Ref{Csize_t}), ref, s) - unsafe_wrap(Array, ref[], s[]) -end - -""" - isenabled(x::Symbol)::Bool - -Returns the given runtime feature is enabled or not. - -```julia-repl -julia> mx.isenabled(:CUDA) -false - -julia> mx.isenabled(:CPU_SSE) -true -``` - -See also `mx.feature_list()`. -""" -isenabled(x::Symbol) = - any(feature_list()) do i - Symbol(i.name) == x && i.enabled - end - -end # module MXRuntime - -using .MXRuntime diff --git a/julia/src/symbolic-node.jl b/julia/src/symbolic-node.jl deleted file mode 100644 index ba1c595c97e4..000000000000 --- a/julia/src/symbolic-node.jl +++ /dev/null @@ -1,24 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -include("symbolic-node/type.jl") -include("symbolic-node/show.jl") -include("symbolic-node/arithmetic.jl") -include("symbolic-node/io.jl") # save/load and json utils -include("symbolic-node/array.jl") -include("symbolic-node/op.jl") -include("symbolic-node/autodiff.jl") # AD and shape inference stuffs diff --git a/julia/src/symbolic-node/arithmetic.jl b/julia/src/symbolic-node/arithmetic.jl deleted file mode 100644 index 75b87c7dcab4..000000000000 --- a/julia/src/symbolic-node/arithmetic.jl +++ /dev/null @@ -1,127 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -import Base: + - -""" - +(args...) - .+(args...) - -Elementwise summation of `SymbolicNode`. -""" -function +(x::SymbolicNode, ys::SymbolicNodeOrReal...) - ret = x - for y ∈ ys - if y isa SymbolicNode - ret = _plus(ret, y) - else - ret = _plus_scalar(ret, scalar=MX_float(y)) - end - end - ret -end - -+(s::Real, x::SymbolicNode, ys::SymbolicNodeOrReal...) = +(x + s, ys...) - -broadcasted(::typeof(+), x::SymbolicNode, ys::SymbolicNodeOrReal...) = +(x, ys...) -broadcasted(::typeof(+), s::Real, x::SymbolicNode, ys::SymbolicNodeOrReal...) = +(x + s, ys...) - -import Base: - - -""" - -(x, y) - .-(x, y) - -Elementwise substraction of `SymbolicNode`. -Operating with `Real` is available. -""" -x::SymbolicNode - y::SymbolicNode = _minus(x, y) -x::SymbolicNode - s::Real = _minus_scalar(x, scalar=MX_float(s)) -s::Real - x::SymbolicNode = _rminus_scalar(x, scalar=MX_float(s)) - --(x::SymbolicNode) = 0 - x - -broadcasted(::typeof(-), x::SymbolicNode, y::SymbolicNodeOrReal) = x - y -broadcasted(::typeof(-), s::Real, x::SymbolicNode) = s - x - -import Base: * - -""" - .*(x, y) - -Elementwise multiplication of `SymbolicNode`. -""" -x::SymbolicNode * s::Real = _mul_scalar(x, scalar=MX_float(s)) -s::Real * x::SymbolicNode = _mul_scalar(x, scalar=MX_float(s)) - -function broadcasted(::typeof(*), x::SymbolicNode, ys::SymbolicNodeOrReal...) - ret = x - for y in ys - if y isa SymbolicNode - ret = _mul(ret, y) - else - ret = _mul_scalar(ret, scalar=MX_float(y)) - end - end - ret -end - -broadcasted(::typeof(*), s::Real, x::SymbolicNode, ys::SymbolicNodeOrReal...) = - broadcasted(*, x * s, ys...) - -import Base: / - -""" - ./(x, y) - -* Elementwise dividing a `SymbolicNode` by a scalar or another `SymbolicNode` -of the same shape. - -* Elementwise divide a scalar by an `SymbolicNode`. - -* Matrix division (solving linear systems) is not implemented yet. -""" -x::SymbolicNode / s::Real = _DivScalar(x, scalar=MX_float(s)) - -broadcasted(::typeof(/), x::SymbolicNode, y::SymbolicNode) = _div(x, y) -broadcasted(::typeof(/), x::SymbolicNode, s::Real) = _div_scalar(x, scalar=MX_float(s)) -broadcasted(::typeof(/), s::Real, x::SymbolicNode) = _rdiv_scalar(x, scalar=MX_float(s)) - - -import Base: ^ - -""" - .^(x, y) - -Elementwise power of `SymbolicNode` and `NDArray`. -Operating with `Real` is available. -""" -^ - -broadcasted(::typeof(^), x::SymbolicNode, y::SymbolicNode) = _power(x, y) -broadcasted(::typeof(^), x::SymbolicNode, s::Real) = _power_scalar(x, scalar = s) -broadcasted(::typeof(^), s::Real, x::SymbolicNode) = _rpower_scalar(x, scalar = s) -broadcasted(::typeof(Base.literal_pow), ::typeof(^), x::SymbolicNode, ::Val{s}) where {s} = - _power_scalar(x, scalar = s) - -broadcasted(::typeof(^), ::Irrational{:ℯ}, x::SymbolicNode) = exp(x) -broadcasted(::typeof(^), x::SymbolicNode, s::Irrational) = - _power_scalar(x, scalar=MX_float(s)) -broadcasted(::typeof(^), s::Irrational, x::SymbolicNode) = - _rpower_scalar(x, scalar=MX_float(s)) - - diff --git a/julia/src/symbolic-node/array.jl b/julia/src/symbolic-node/array.jl deleted file mode 100644 index 95446a03ab89..000000000000 --- a/julia/src/symbolic-node/array.jl +++ /dev/null @@ -1,122 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -# Base.Array related interface - -import Base: reshape - -""" - reshape(sym::SymbolicNode, dim; reverse=false, name) - reshape(sym::SymbolicNode, dim...; reverse=false, name) - -Reshape SymbolicNode operator - -Some dimensions of the shape can take special values from the set -{0, -1, -2, -3, -4}. -The significance of each is explained below: - -- `0` copy this dimension from the input to the output shape. - - Example: - - - input shape = (2,3,4), shape = (4,0,2), output shape = (4,3,2) - - input shape = (2,3,4), shape = (2,0,0), output shape = (2,3,4) - -- `-1` infers the dimension of the output shape by using the remainder of the - input dimensions keeping the size of the new array same as that of the input - array. At most one dimension of shape can be -1. - - Example: - - - input shape = (2,3,4), shape = (6,1,-1), output shape = (6,1,4) - - input shape = (2,3,4), shape = (3,-1,8), output shape = (3,1,8) - - input shape = (2,3,4), shape=(-1,), output shape = (24,) - -- `-2` copy all/remainder of the input dimensions to the output shape. - - Example: - - - input shape = (2,3,4), shape = (-2,), output shape = (2,3,4) - - input shape = (2,3,4), shape = (2,-2), output shape = (2,3,4) - - input shape = (2,3,4), shape = (-2,1,1), output shape = (2,3,4,1,1) - -- `-3` use the product of two consecutive dimensions of the input shape as the - output dimension. - - Example: - - - input shape = (2,3,4), shape = (-3,4), output shape = (6,4) - - input shape = (2,3,4,5), shape = (-3,-3), output shape = (6,20) - - input shape = (2,3,4), shape = (0,-3), output shape = (2,12) - - input shape = (2,3,4), shape = (-3,-2), output shape = (6,4) - -- `-4` split one dimension of the input into two dimensions passed subsequent - to -4 in shape (can contain -1). - - Example: - - - input shape = (2,3,4), shape = (-4,1,2,-2), output shape = (1,2,3,4) - - input shape = (2,3,4), shape = (2,-4,-1,3,-2), output shape = (2,1,3,4) - -If the argument `reverse` is set to `1`, then the special values are inferred -from right to left. - - Example: - - - with `reverse=false`, for input shape = (10,5,4), shape = (-1,0), - output shape would be (40,5) - - with `reverse=true`, output shape will be (50,4). -""" -reshape(sym::SymbolicNode, dim::NTuple{N, Integer}; kwargs...) where {N} = - _reshape(sym, dim; kwargs...) -reshape(sym::SymbolicNode, dim::Integer...; kwargs...) = - _reshape(sym, dim; kwargs...) - -@inline function _reshape(sym::SymbolicNode, dim::NTuple{N,Integer}; - reverse::Bool=false, name::String="") where N - op = _get_cached_libmx_op_handle("reshape") - node = _create_atomic_symbol(op.value, ["shape", "reverse"], - [dump_mx_param(dim), dump_mx_param(!reverse)]) - name = get!(DEFAULT_NAME_MANAGER, name, "reshape") - _compose!(node, name=name, data=sym) -end - -################################################################################ -# Base.getindex -################################################################################ - -""" - getindex(self :: SymbolicNode, idx :: Union{Int, Base.Symbol, AbstractString}) - -Get a node representing the specified output of this node. The index could be -a symbol or string indicating the name of the output, or a 1-based integer -indicating the index, as in the list of [`list_outputs`](@ref). -""" -function Base.getindex(self :: SymbolicNode, idx :: Union{Base.Symbol, AbstractString}) - idx = Symbol(idx) - i_idx = findall(idx .== list_outputs(self)) - @assert(length(i_idx) > 0, "Cannot find output with name '$idx'") - @assert(length(i_idx) < 2, "Found duplicated output with name '$idx'") - Base.getindex(self, i_idx[1]) -end -function Base.getindex(self :: SymbolicNode, idx :: Int) - ref_hdr = Ref{MX_handle}(0) - # note Julia is 1-based, while MXNet is 0-based - @mxcall(:MXSymbolGetOutput, (MX_handle, MX_uint, Ref{MX_handle}), self, idx-1, ref_hdr) - return SymbolicNode(MX_SymbolHandle(ref_hdr[])) -end - diff --git a/julia/src/symbolic-node/autodiff.jl b/julia/src/symbolic-node/autodiff.jl deleted file mode 100644 index ea4af87626ec..000000000000 --- a/julia/src/symbolic-node/autodiff.jl +++ /dev/null @@ -1,178 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -""" - grad(s::SymbolicNode, wrt::Vector{Symbol}) - -Get the autodiff gradient of the current `SymbolicNode`. This function can -only be used if the current symbol is a loss function. - -# Arguments: -* `s::SymbolicNode`: current node. -* `wrt::Vector{Symbol}`: the names of the arguments to the gradient. - -Returns a gradient symbol of the corresponding gradient. -""" -function grad(s::SymbolicNode, wrt::Vector{Symbol}) - hdr_ref = Ref{MX_handle}(C_NULL) - keys = string.(key) - - @mxcall(:MXSymbolGrad, (MX_handle, MX_uint, char_pp, Ptr{MX_handle}), - self, length(keys), keys, hdr_ref) - return SymbolicNode(MX_SymbolHandle(hdr_ref[])) -end - -function _build_shapes(shape_size::MX_uint, shape_ndim::Ptr{MX_uint}, shape_data::Ptr{Ptr{MX_uint}}) - shape_ndim = unsafe_wrap(Array, shape_ndim, shape_size) - shape_data = unsafe_wrap(Array, shape_data, shape_size) - shapes = map(1:shape_size) do i - my_shape = unsafe_wrap(Array, shape_data[i], shape_ndim[i]) - tuple(reverse(Int[my_shape...], dims = 1)...) - end - convert(Vector{Tuple}, shapes) -end - -function _infer_shape(self, keys, indptr, sdata) - ref_arg_shape_size = Ref{MX_uint}(0) - ref_arg_shape_ndim = Ref{Ptr{MX_uint}}(0) - ref_arg_shape_data = Ref{Ptr{Ptr{MX_uint}}}(0) - ref_out_shape_size = Ref{MX_uint}(0) - ref_out_shape_ndim = Ref{Ptr{MX_uint}}(0) - ref_out_shape_data = Ref{Ptr{Ptr{MX_uint}}}(0) - ref_aux_shape_size = Ref{MX_uint}(0) - ref_aux_shape_ndim = Ref{Ptr{MX_uint}}(0) - ref_aux_shape_data = Ref{Ptr{Ptr{MX_uint}}}(0) - ref_complete = Ref{Cint}(0) - @mxcall(:MXSymbolInferShape, - (MX_handle, MX_uint, char_pp, Ptr{MX_uint}, Ptr{MX_uint}, - Ref{MX_uint}, Ref{Ptr{MX_uint}}, Ref{Ptr{Ptr{MX_uint}}}, - Ref{MX_uint}, Ref{Ptr{MX_uint}}, Ref{Ptr{Ptr{MX_uint}}}, - Ref{MX_uint}, Ref{Ptr{MX_uint}}, Ref{Ptr{Ptr{MX_uint}}}, - Ref{Cint}), - self, length(indptr)-1, keys, indptr, sdata, - ref_arg_shape_size, ref_arg_shape_ndim, ref_arg_shape_data, - ref_out_shape_size, ref_out_shape_ndim, ref_out_shape_data, - ref_aux_shape_size, ref_aux_shape_ndim, ref_aux_shape_data, - ref_complete) - if ref_complete[] == 0 - return (nothing, nothing, nothing) - else - return ( - _build_shapes(ref_arg_shape_size[], ref_arg_shape_ndim[], ref_arg_shape_data[]), - _build_shapes(ref_out_shape_size[], ref_out_shape_ndim[], ref_out_shape_data[]), - _build_shapes(ref_aux_shape_size[], ref_aux_shape_ndim[], ref_aux_shape_data[]) - ) - end -end - -""" - infer_shape(self :: SymbolicNode, args...) - infer_shape(self :: SymbolicNode; kwargs...) - -Do shape inference according to the input shapes. The input shapes could be provided -as a list of shapes, which should specify the shapes of inputs in the same order as -the arguments returned by [`list_arguments`](@ref). Alternatively, the shape information -could be specified via keyword arguments. - -Returns a 3-tuple containing shapes of all the arguments, shapes of all the outputs and -shapes of all the auxiliary variables. If shape inference failed due to incomplete -or incompatible inputs, the return value will be `(nothing, nothing, nothing)`. -""" -function infer_shape(self :: SymbolicNode; kwargs...) - sdata = MX_uint[] - indptr = MX_uint[0] - for (k,v) in kwargs - append!(sdata, reverse([v...], dims = 1)) - push!(indptr, length(sdata)) - end - keys = AbstractString[string(x[1]) for x in kwargs] - _infer_shape(self, keys, indptr, sdata) -end -function infer_shape(self :: SymbolicNode, args::Union{Tuple, Cvoid}...) - sdata = MX_uint[] - indptr = MX_uint[0] - for arg in args - if isa(arg, Cvoid); continue; end - append!(sdata, reverse([arg...], dims = 1)) - push!(indptr, length(sdata)) - end - keys = Ptr{char_p}(0) - _infer_shape(self, keys, indptr, sdata) -end - -function _infer_type(self, keys, arg_type_data) - ref_in_type_size = Ref{MX_uint}() - ref_in_type_data = Ref{Ptr{Cint}}() - ref_out_type_size = Ref{MX_uint}() - ref_out_type_data = Ref{Ptr{Cint}}() - ref_aux_type_size = Ref{MX_uint}() - ref_aux_type_data = Ref{Ptr{Cint}}() - ref_complete = Ref{Cint}() - - @mxcall(:MXSymbolInferType, - (MX_handle, MX_uint, char_pp, Ptr{Cint}, - Ref{MX_uint}, Ref{Ptr{Cint}}, - Ref{MX_uint}, Ref{Ptr{Cint}}, - Ref{MX_uint}, Ref{Ptr{Cint}}, - Ref{Cint}), - self, length(arg_type_data)-1, keys, arg_type_data, - ref_in_type_size, ref_in_type_data, - ref_out_type_size, ref_out_type_data, - ref_aux_type_size, ref_aux_type_data, - ref_complete) - - if ref_complete[] == 0 - return (nothing, nothing, nothing) - else - in_type = unsafe_wrap(Array, ref_in_type_data[], ref_in_type_size[]) - out_type = unsafe_wrap(Array, ref_out_type_data[], ref_out_type_size[]) - aux_type = unsafe_wrap(Array, ref_aux_type_data[], ref_aux_type_size[]) - return ([fromTypeFlag(TypeFlag(t)) for t in in_type], - [fromTypeFlag(TypeFlag(t)) for t in out_type], - [fromTypeFlag(TypeFlag(t)) for t in aux_type]) - end -end - -""" - infer_type(self :: SymbolicNode; kwargs...) - infer_type(self :: SymbolicNode, args...) - -Do type inference according to the input types. The input types could be provided -as a list of types, which should specify the types of inputs in the same order as -the arguments returned by [`list_arguments`](@ref). Alternatively, the type information -could be specified via keyword arguments. - -Returns a 3-tuple containing types of all the arguments, types of all the outputs and -types of all the auxiliary variables. If type inference failed due to incomplete -or incompatible inputs, the return value will be `(nothing, nothing, nothing)`. -""" -function infer_type(self :: SymbolicNode; kwargs...) - types = Cint[toTypeFlag(x[2]) for x in kwargs] - keys = AbstractString[string(x[1]) for x in kwargs] - _infer_type(self, keys, types) -end - -function infer_type(self :: SymbolicNode, args :: Union{Tuple,Cvoid}...) - types = Cint[] - keys = Ptr{char_p}(0) - - for arg in args - if isa(arg, Cvoid); continue; end - push!(types, toTypeFlag(arg)) - end - _infer_type(self, keys, types) -end diff --git a/julia/src/symbolic-node/io.jl b/julia/src/symbolic-node/io.jl deleted file mode 100644 index ed461eb07c42..000000000000 --- a/julia/src/symbolic-node/io.jl +++ /dev/null @@ -1,58 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -""" - to_json(s::SymbolicNode) - -Convert a `SymbolicNode` into a JSON string. -""" -function to_json(s::SymbolicNode) - ref_json = Ref{char_p}(0) - @mxcall(:MXSymbolSaveToJSON, (MX_handle, Ref{char_p}), s, ref_json) - return unsafe_string(ref_json[]) -end - -""" - from_json(repr :: AbstractString, ::Type{SymbolicNode}) - -Load a `SymbolicNode` from a JSON string representation. -""" -function from_json(repr :: AbstractString, ::Type{SymbolicNode}) - ref_hdr = Ref{MX_handle}(0) - @mxcall(:MXSymbolCreateFromJSON, (char_p, Ref{MX_handle}), repr, ref_hdr) - return SymbolicNode(MX_SymbolHandle(ref_hdr[])) -end - -""" - load(filename :: AbstractString, ::Type{SymbolicNode}) - -Load a `SymbolicNode` from a JSON file. -""" -function load(filename :: AbstractString, ::Type{SymbolicNode}) - ref_hdr = Ref{MX_handle}(0) - @mxcall(:MXSymbolCreateFromFile, (char_p, Ref{MX_handle}), filename, ref_hdr) - return SymbolicNode(MX_SymbolHandle(ref_hdr[])) -end - -""" - save(filename :: AbstractString, node :: SymbolicNode) - -Save a `SymbolicNode` to a JSON file. -""" -function save(filename :: AbstractString, node :: SymbolicNode) - @mxcall(:MXSymbolSaveToFile, (MX_handle, char_p), node, filename) -end diff --git a/julia/src/symbolic-node/op.jl b/julia/src/symbolic-node/op.jl deleted file mode 100644 index dfdf93df4a92..000000000000 --- a/julia/src/symbolic-node/op.jl +++ /dev/null @@ -1,444 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -# compute graph related operators - -################################################################################ -# SymbolicNode attribute getter and setter -################################################################################ - -macro _list_symbol_info(self, func_name) - quote - ref_sz = Ref{MX_uint}(0) - ref_names = Ref{char_pp}(0) - @mxcall($func_name, (MX_handle, Ref{MX_uint}, Ref{char_pp}), - $(esc(self)), ref_sz, ref_names) - narg = ref_sz[] - names = unsafe_wrap(Array, ref_names[], narg) - names = [Symbol(unsafe_string(x)) for x in names] - return names - end -end - -""" - list_arguments(s::SymbolicNode) - -List all the arguments of this node. The argument for a node contains both -the inputs and parameters. For example, a `FullyConnected` node will -have both data and weights in its arguments. A composed node (e.g. a MLP) will -list all the arguments for intermediate nodes. - -Returns a list of symbols indicating the names of the arguments. -""" -list_arguments(s::SymbolicNode) = @_list_symbol_info(s, :MXSymbolListArguments) - -""" - list_outputs(s::SymbolicNode) - -List all the outputs of this node. - -Returns a list of symbols indicating the names of the outputs. -""" -list_outputs(s::SymbolicNode) = @_list_symbol_info(s, :MXSymbolListOutputs) - - -""" - list_auxiliary_states(s::SymbolicNode) - - -List all auxiliary states in the symbool. - -Auxiliary states are special states of symbols that do not corresponds to an argument, -and do not have gradient. But still be useful for the specific operations. -A common example of auxiliary state is the moving_mean and moving_variance in BatchNorm. -Most operators do not have Auxiliary states. - -Returns a list of symbols indicating the names of the auxiliary states. -""" -list_auxiliary_states(s::SymbolicNode) = - @_list_symbol_info(s, :MXSymbolListAuxiliaryStates) - -""" - get_internals(s::SymbolicNode) - -Get a new grouped `SymbolicNode` whose output contains all the internal outputs of -this `SymbolicNode`. -""" -function get_internals(s::SymbolicNode) - ref_hdr = Ref{MX_handle}(0) - @mxcall(:MXSymbolGetInternals, (MX_handle, Ref{MX_handle}), s, ref_hdr) - return SymbolicNode(MX_SymbolHandle(ref_hdr[])) -end - -""" - get_children(x::SymbolicNode) - -Gets a new grouped `SymbolicNode` whose output contains inputs to output -nodes of the original symbol. - -```julia -julia> x, y = @mx.var x y -(SymbolicNode x, SymbolicNode y) - -julia> z = x + y -SymbolicNode _plus0 - -julia> z |> mx.get_children |> mx.list_outputs -2-element Array{Symbol,1}: - :x - :y -``` -""" -function get_children(x::SymbolicNode) - hdl = Ref{MX_handle}(C_NULL) - @mxcall(:MXSymbolGetChildren, (MX_handle, Ref{MX_handle}), x, hdl) - sym = hdl[] |> MX_SymbolHandle |> SymbolicNode - isempty(list_outputs(sym)) ? nothing : sym -end - -""" - get_attr(s::SymbolicNode, key::Symbol) - -Get attribute attached to this `SymbolicNode` belonging to key. - -Returns the value belonging to key as a `String`. -If not available, returns `missing`. -""" -function get_attr(s::SymbolicNode, key::Symbol) - key_s = string(key) - ref_out = Ref{Cstring}() - ref_success = Ref{Cint}(-1) - @mxcall(:MXSymbolGetAttr, (MX_handle, Cstring, Ref{Cstring}, Ref{Cint}), - s, key_s, ref_out, ref_success) - if ref_success[] == 1 - unsafe_string(ref_out[]) - else - missing - end -end - -""" - list_attr(s::SymbolicNode) - -Get all attributes from a symbol. - -Returns a dictionary of attributes. -""" -function list_attr(s::SymbolicNode) - ref_sz = Ref{MX_uint}(0) - ref_strings = Ref{char_pp}(0) - @mxcall(:MXSymbolListAttrShallow, (MX_handle, Ref{MX_uint}, Ref{char_pp}), - s, ref_sz, ref_strings) - narg = 2*ref_sz[] - strings = unsafe_wrap(Array, ref_strings[], narg) - out = Dict{Symbol, String}() - for i in 1:2:narg - key = Symbol(unsafe_string(strings[i])) - value = unsafe_string(strings[i+1]) # Creates a copy of string - out[key] = value - end - return out -end - -""" - list_all_attr(s::SymbolicNode) - -Get all attributes from the symbol graph. - -Returns a dictionary of attributes. -""" -function list_all_attr(s::SymbolicNode) - ref_sz = Ref{MX_uint}(0) - ref_strings = Ref{char_pp}(0) - @mxcall(:MXSymbolListAttr, (MX_handle, Ref{MX_uint}, Ref{char_pp}), - s, ref_sz, ref_strings) - narg = 2*ref_sz[] - strings = unsafe_wrap(Array, ref_strings[], narg) - out = Dict{Symbol, String}() - for i in 1:2:narg - key = Symbol(unsafe_string(strings[i])) - value = unsafe_string(strings[i+1]) - out[key] = value - end - return out -end - -""" - set_attr(s::SymbolicNode, key::Symbol, value::AbstractString) - -Set the attribute key to value for this `SymbolicNode`. - -!!! note - It is encouraged not to call this function directly, unless you know exactly what you are doing. The - recommended way of setting attributes is when creating the `SymbolicNode`. Changing - the attributes of a `SymbolicNode` that is already been used somewhere else might - cause unexpected behavior and inconsistency. -""" -function set_attr(s::SymbolicNode, key::Symbol, value::AbstractString) - key_s = string(key) - value_s = String(value) - - @mxcall(:MXSymbolSetAttr, (MX_handle, Cstring, Cstring), s, key_s, value_s) -end - -""" - get_name(s::SymbolicNode) - -Get the name of the symbol. - - julia> x = mx.Variable(:data) - julia> mx.get_name(x) - :data - - julia> y = mx.FullyConnected(x, num_hidden = 128) - julia> mx.get_name(y) - :fullyconnected0 -""" -function get_name(s::mx.SymbolicNode) - name = Ref{mx.char_p}(C_NULL) - success = Ref(0) - @mxcall(:MXSymbolGetName, (MX_handle, Ref{char_p}, Ref{Int}), s.handle.value, name, success) - @assert success[] != -1 - - str = name[] - if str == C_NULL # e.g. the symbol returned via get_internals - string(s.handle.value) - else - Symbol(unsafe_string(str)) - end -end - -################################################################################ -# Atomic SymbolicNode functions dynamically imported from libmxnet -################################################################################ - -@inline function _create_atomic_symbol(creator::MX_handle, keys::Vector{String}, - vals::Vector{String}) - ref_sym_hdr = Ref{MX_handle}(C_NULL) - @mxcall(:MXSymbolCreateAtomicSymbol, - (MX_handle, MX_uint, Ptr{char_p}, Ptr{char_p}, Ref{MX_handle}), - creator, length(keys), keys, vals, ref_sym_hdr) - SymbolicNode(MX_SymbolHandle(ref_sym_hdr[])) -end - -@inline function _create_atomic_symbol(creator::MX_handle, keys::Vector{String}, - vals::Vector{String}, - attrs::Dict{Symbol, String}) - node = _create_atomic_symbol(creator, keys, vals) - # set attrs - for (k, v) in attrs - set_attr(node, k, v) - end - node -end - -function _define_atomic_symbol_creator(name::String) - handle = _get_libmx_op_handle(name) - f_desc, key_narg = _get_libmx_op_description(name, handle) - - f_desc *= "* `name::Symbol`: The name of the `SymbolicNode`. (e.g. `:my_symbol`), optional.\n" - f_desc *= "* `attrs::Dict{Symbol,String}`: The attributes associated with this `SymbolicNode`.\n\n" - - func_name = Symbol(name) - import_expr = _import_expr(func_name) - - func_def = quote - function $func_name(::Type{SymbolicNode}, args::SymbolicNode...; name = "", kwargs...) - - # NOTE: hacky way of solving the problem that the arguments of `dot` should be swapped - # See https://github.com/dmlc/MXNet.jl/issues/55 - if $name == "dot" - args = reverse(args) - end - - # NOTE: hacky way of solving the semantic difference of the axes parameter in Julia - # and in libmxnet. - # See https://github.com/dmlc/MXNet.jl/pull/123 - if $name == "transpose" - kwargs = Any[key != :axes ? (key, arg) : (key, reverse(map(i->length(arg)-i, arg))) for (key, arg) in kwargs] - end - - param_keys = String[] - param_vals = String[] - symbol_kws = Dict{Symbol,SymbolicNode}() - attrs = Dict{Symbol,String}() - - $(if key_narg != "" - quote - if !in($key_narg, param_keys) - push!(param_keys, $key_narg) - push!(param_vals, string(length(args))) - end - end - end) - - for (k,v) in kwargs - if k == :name; continue; end - if isa(v, SymbolicNode) - symbol_kws[k] = v - elseif k == :attrs - if isa(v, Dict) - attrs = convert(Dict{Symbol, String}, v) - else - throw(ArgumentError("attrs needs to be a Dictionary")) - end - else - push!(param_keys, string(k)) - push!(param_vals, dump_mx_param(v)) - end - end - - if length(args) > 1 && length(symbol_kws) != 0 - @assert(false, $name * " only accepts SymbolicNode either as positional or keyword arguments with optional positional `data` argument, not both.") - end - $(if key_narg != "" - quote - if length(symbol_kws) > 0 - @assert(false, $name * " takes variable number of SymbolicNode arguments, " * - "please pass input Symbols via positional arguments, instead of keyword arguments.") - end - end - end) - - local op = _get_cached_libmx_op_handle($name) - node = _create_atomic_symbol(op.value, param_keys, param_vals, attrs) - - # generate a new name for the new symbol if user not provided in kwargs - hint = lowercase($name) - name = get!(DEFAULT_NAME_MANAGER, name, hint) - - if length(symbol_kws) == 0 - _compose!(node, name, args...) - elseif length(args) == 1 - _compose!(node; name=name, data=args[1], symbol_kws...) - else - _compose!(node; name=name, symbol_kws...) - end - - return node - end # function - end # quote - - func_def2 = quote - @doc $f_desc - function $func_name(args::SymbolicNode...; kwargs...) - $func_name(SymbolicNode, args...; kwargs...) - end # function - end # quote - - return quote - $import_expr - $func_def - $func_def2 - end -end - -macro _import_atomic_symbol_creators() - # NOTE: those are operators defined for NDArray, we exclude them here - # because the calling convention for the type signature is not strong - # enough to disambiguate the method for NDArray and SymbolicNode - ignored_ops = ("_set_value", "reshape") # in lowercase - - op_names = _get_libmx_op_names() - func_exprs = map(op_names) do name - if lowercase(name) ∉ ignored_ops - expr = _define_atomic_symbol_creator(name) - end - end - - esc(quote - $(func_exprs...) - end) -end - -@_import_atomic_symbol_creators - -################################################################################ -# Utility macros to chain up symbols -################################################################################ - -macro chain(layers) - exprs = [] - last_layer = nothing - - function _chain_layer(layer, last_layer) - if last_layer ≡ nothing - return esc(layer) - else - if @capture(layer, f_(x__)) - x′ = esc.(x) - return :($f($last_layer, $(x′...))) - else - throw(AssertionError("$layer is not a valid function call and cannot be chained.")) - end - end - end - - while true - if @capture(layers, l1_=>l2_) - new_layer = gensym() - push!(exprs, :($new_layer = $(_chain_layer(l1, last_layer)))) - last_layer = new_layer - layers = l2 - else - push!(exprs, _chain_layer(layers, last_layer)) - break - end - end - Expr(:block, exprs...) -end - -################################################################################ -# compose -################################################################################ - -function _compose!(node::SymbolicNode; kwargs...) - name = char_p(C_NULL) - arg_keys = AbstractString[] # FIXME: can it be String[] ? - arg_vals = MX_handle[] - - for (k, v) in kwargs - if k == :name - name = string(v) - else - @assert(isa(v, SymbolicNode), "Compose expect `SymbolicNode` as arguments") - push!(arg_keys, string(k)) - push!(arg_vals, v) - end - end - - @mxcall(:MXSymbolCompose, - (MX_handle, char_p, MX_uint, Ptr{char_p}, Ptr{MX_handle}), - node, name, length(arg_keys), arg_keys, arg_vals) - node -end - -_compose!(node::SymbolicNode, args::SymbolicNode...) = - _compose!(node, char_p(0), args...) - -function _compose!(node::SymbolicNode, name::Union{Symbol, char_p}, args::SymbolicNode...) - if name isa Symbol - name = string(name) - end - arg_keys = Ptr{char_p}(C_NULL) - arg_vals = MX_handle[args...] - - @mxcall(:MXSymbolCompose, - (MX_handle, char_p, MX_uint, Ptr{char_p}, Ptr{MX_handle}), - node, name, length(arg_vals), arg_keys, arg_vals) - node -end diff --git a/julia/src/symbolic-node/show.jl b/julia/src/symbolic-node/show.jl deleted file mode 100644 index 9d40ea124505..000000000000 --- a/julia/src/symbolic-node/show.jl +++ /dev/null @@ -1,62 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -Base.show(io::IO, sym::SymbolicNode) = - print(io, "$(typeof(sym)) $(get_name(sym))") - -""" - print([io::IO], sym::SymbolicNode) - -Print the content of symbol, used for debug. - -```julia -julia> layer = @mx.chain mx.Variable(:data) => - mx.FullyConnected(name=:fc1, num_hidden=128) => - mx.Activation(name=:relu1, act_type=:relu) -MXNet.mx.SymbolicNode(MXNet.mx.MX_SymbolHandle(Ptr{Nothing} @0x000055b29b9c3520)) - -julia> print(layer) -Symbol Outputs: - output[0]=relu1(0) -Variable:data -Variable:fc1_weight -Variable:fc1_bias --------------------- -Op:FullyConnected, Name=fc1 -Inputs: - arg[0]=data(0) version=0 - arg[1]=fc1_weight(0) version=0 - arg[2]=fc1_bias(0) version=0 -Attrs: - num_hidden=128 --------------------- -Op:Activation, Name=relu1 -Inputs: - arg[0]=fc1(0) -Attrs: - act_type=relu -``` -""" -function Base.print(io::IO, sym::SymbolicNode) - out = Ref{mx.char_p}(C_NULL) - @mx.mxcall(:MXSymbolPrint, (mx.MX_SymbolHandle, Ref{mx.char_p}), sym.handle, out) - print(io, unsafe_string(out[])) -end - -Base.print(sym::SymbolicNode) = print(stdout, sym) - - diff --git a/julia/src/symbolic-node/type.jl b/julia/src/symbolic-node/type.jl deleted file mode 100644 index 60f2b5030246..000000000000 --- a/julia/src/symbolic-node/type.jl +++ /dev/null @@ -1,123 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -""" - SymbolicNode - -SymbolicNode is the basic building block of the symbolic graph in MXNet.jl. -It's a callable object and supports following calls: - - (s::SymbolicNode)(args::SymbolicNode...) - (s::SymbolicNode)(; kwargs...) - -Make a new node by composing `s` with `args`. Or the arguments -can be specified using keyword arguments. -""" -mutable struct SymbolicNode - handle::MX_SymbolHandle -end - -const SymbolicNodeOrReal = Union{SymbolicNode,Real} - -Base.unsafe_convert(::Type{MX_handle}, s::SymbolicNode) = - Base.unsafe_convert(MX_handle, s.handle) -Base.convert(T::Type{MX_handle}, s::SymbolicNode) = Base.unsafe_convert(T, s) -Base.cconvert(T::Type{MX_handle}, s::SymbolicNode) = Base.unsafe_convert(T, s) - -""" - deepcopy(s::SymbolicNode) - -Make a deep copy of a SymbolicNode. -""" -function Base.deepcopy(s::SymbolicNode) - ref_hdr = Ref{MX_handle}(C_NULL) - @mxcall(:MXSymbolCopy, (MX_handle, Ref{MX_handle}), s, ref_hdr) - SymbolicNode(MX_SymbolHandle(ref_hdr[])) -end - -""" - copy(s::SymbolicNode) - -Make a copy of a SymbolicNode. The same as making a deep copy. -""" -Base.copy(s::SymbolicNode) = Base.deepcopy(s) - - -function (s::SymbolicNode)(args::SymbolicNode...) - s = deepcopy(s) - _compose!(s, args...) -end - -function (s::SymbolicNode)(; kwargs...) - s = deepcopy(s) - _compose!(s; kwargs...) -end - -""" - Variable(name::Union{Symbol,AbstractString}; attrs) - -Create a symbolic variable with the given name. This is typically used as a placeholder. -For example, the data node, acting as the starting point of a network architecture. - -## Arguments - -* `attrs::Dict{Symbol,<:AbstractString}`: The attributes associated with this `Variable`. -""" -function Variable(name::Union{Symbol,AbstractString}; attrs = Dict()) - attrs = convert(Dict{Symbol, AbstractString}, attrs) - hdr_ref = Ref{MX_handle}(C_NULL) - @mxcall(:MXSymbolCreateVariable, (char_p, Ref{MX_handle}), name, hdr_ref) - node = SymbolicNode(MX_SymbolHandle(hdr_ref[])) - for (k, v) in attrs - set_attr(node, k, v) - end - node -end - -""" - @var ... - -A handy macro for creating `mx.Variable`. - -```julia -julia> x = @mx.var x -MXNet.mx.SymbolicNode x - -julia> x, y, z = @mx.var x y z -(MXNet.mx.SymbolicNode x, MXNet.mx.SymbolicNode y, MXNet.mx.SymbolicNode z) -``` -""" -macro var(n::Symbol) - Expr(:call, :Variable, QuoteNode(n)) -end - -macro var(names::Symbol...) - Expr(:tuple, map(n -> Expr(:call, :Variable, QuoteNode(n)), names)...) -end - -""" - Group(nodes::SymbolicNode...) - -Create a `SymbolicNode` by grouping nodes together. -""" -function Group(nodes::SymbolicNode...) - handles = MX_handle[nodes...] - ref_hdr = Ref{MX_handle}(0) - @mxcall(:MXSymbolCreateGroup, (MX_uint, Ptr{MX_handle}, Ref{MX_handle}), - length(handles), handles, ref_hdr) - SymbolicNode(MX_SymbolHandle(ref_hdr[])) -end diff --git a/julia/src/util.jl b/julia/src/util.jl deleted file mode 100644 index ac7f4fc71653..000000000000 --- a/julia/src/util.jl +++ /dev/null @@ -1,270 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -################################################################################ -# Dataset related utilities -################################################################################ -function get_data_dir() - data_dir = joinpath(@__DIR__, "..", "data") - mkpath(data_dir) - data_dir -end - -function get_mnist_ubyte() - data_dir = get_data_dir() - mnist_dir = joinpath(data_dir, "mnist") - mkpath(mnist_dir) - filenames = Dict(:train_data => "train-images-idx3-ubyte", - :train_label => "train-labels-idx1-ubyte", - :test_data => "t10k-images-idx3-ubyte", - :test_label => "t10k-labels-idx1-ubyte") - filenames = Dict((x[1] => joinpath(mnist_dir, x[2]) for x ∈ pairs(filenames))) - if !all(isfile, values(filenames)) - cd(mnist_dir) do - data = download("http://data.mxnet.io/mxnet/data/mnist.zip", "mnist.zip") - try - run(`unzip -u $data`) - catch - try - run(pipeline(`7z x $data`,stdout = devnull)) - catch - error("Extraction Failed:No extraction program found in path") - end - end - end - end - return filenames -end - -function get_cifar10() - data_dir = get_data_dir() - cifar10_dir = joinpath(data_dir, "cifar10") - mkpath(cifar10_dir) - filenames = Dict(:train => "cifar/train.rec", :test => "cifar/test.rec") - filenames = Dict(map((x) -> x[1] => joinpath(cifar10_dir, x[2]), filenames)) - if !all(isfile, values(filenames)) - cd(cifar10_dir) do - download("http://data.mxnet.io/mxnet/data/cifar10.zip", "cifar10.zip") - try - run(`unzip -u cifar10.zip`) - catch - try - run(pipeline(`7z x cifar10.zip`, stdout = devnull)) - catch - error("Extraction Failed:No extraction program found in path") - end - end - end - end - - filenames[:mean] = joinpath(cifar10_dir, "cifar/cifar_mean.bin") - return filenames -end - - -################################################################################ -# Internal Utilities -################################################################################ -function _get_libmx_op_names() - n = Ref{MX_uint}(0) - names = Ref{char_pp}(0) - - @mxcall(:MXListAllOpNames, (Ref{MX_uint}, Ref{char_pp}), n, names) - - names = unsafe_wrap(Array, names[], n[]) - return [unsafe_string(x) for x in names] -end -function _get_libmx_op_handle(name :: String) - handle = Ref{MX_handle}(0) - @mxcall(:NNGetOpHandle, (char_p, Ref{MX_handle}), name, handle) - return MX_OpHandle(handle[]) -end - -# We keep a cache and retrieve the address everytime -# we run Julia, instead of pre-compiling with macro, -# because the actual handle might change in different -# runs -const _libmx_op_cache = Dict{String, MX_OpHandle}() -function _get_cached_libmx_op_handle(name :: String) - if !haskey(_libmx_op_cache, name) - handle = _get_libmx_op_handle(name) - _libmx_op_cache[name] = handle - return handle - else - return _libmx_op_cache[name] - end -end - -function _get_libmx_op_description(name::String, handle::MX_OpHandle) - # get operator information (human readable) - ref_real_name = Ref{char_p}(0) - ref_desc = Ref{char_p}(0) - ref_narg = Ref{MX_uint}(0) - - ref_arg_names = Ref{char_pp}(0) - ref_arg_types = Ref{char_pp}(0) - ref_arg_descs = Ref{char_pp}(0) - - ref_key_narg = Ref{char_p}(0) - ref_ret_type = Ref{char_p}(0) - - @mxcall(:MXSymbolGetAtomicSymbolInfo, - (MX_handle, Ref{char_p}, Ref{char_p}, Ref{MX_uint}, Ref{char_pp}, - Ref{char_pp}, Ref{char_pp}, Ref{char_p}, Ref{char_p}), - handle, ref_real_name, ref_desc, ref_narg, ref_arg_names, - ref_arg_types, ref_arg_descs, ref_key_narg, ref_ret_type) - - real_name = unsafe_string(ref_real_name[]) - signature = _format_signature(Int(ref_narg[]), ref_arg_names) - desc = " " * name * "(" * signature * ")\n\n" - if real_name != name - desc *= name * " is an alias of " * real_name * ".\n\n" - end - - key_narg = unsafe_string(ref_key_narg[]) - if key_narg != "" - desc *= "**Note**: " * name * " takes variable number of positional inputs. " - desc *= "So instead of calling as $name([x, y, z], $key_narg=3), " - desc *= "one should call via $name(x, y, z), and $key_narg will be " - desc *= "determined automatically.\n\n" - end - - desc *= unsafe_string(ref_desc[]) * "\n\n" - desc *= "# Arguments\n" - desc *= _format_docstring(Int(ref_narg[]), ref_arg_names, ref_arg_types, ref_arg_descs) - return desc, key_narg -end - -_format_typestring(s::String) = replace(s, r"\bSymbol\b" => "SymbolicNode") - -function _format_docstring(narg::Int, arg_names::Ref{char_pp}, arg_types::Ref{char_pp}, arg_descs::Ref{char_pp}, remove_dup::Bool=true) - param_keys = Set{String}() - - arg_names = unsafe_wrap(Array, arg_names[], narg) - arg_types = unsafe_wrap(Array, arg_types[], narg) - arg_descs = unsafe_wrap(Array, arg_descs[], narg) - docstrings = String[] - - for i = 1:narg - arg_name = unsafe_string(arg_names[i]) - if arg_name ∈ param_keys && remove_dup - continue - end - push!(param_keys, arg_name) - - arg_type = _format_typestring(unsafe_string(arg_types[i])) - arg_desc = unsafe_string(arg_descs[i]) - push!(docstrings, "* `$arg_name::$arg_type`: $arg_desc\n") - end - return join(docstrings, "\n") -end - -function _format_signature(narg::Int, arg_names::Ref{char_pp}) - arg_names = unsafe_wrap(Array, arg_names[], narg) - - return join([unsafe_string(name) for name in arg_names] , ", ") -end - -""" -Extract the line of `Defined in ...` - -julia> mx._getdocdefine("sgd_update") -"Defined in `src/operator/optimizer_op.cc:L53`" -""" -function _getdocdefine(name::String) - op = _get_libmx_op_handle(name) - str = _get_libmx_op_description(name, op)[1] - lines = split(str, '\n') - for m ∈ match.(Ref(r"^Defined in ([\S]+)$"), lines) - m != nothing && return "Defined in `$(m.captures[1])`" - end - "" -end - -""" -libmxnet operators signature checker. - -C/Python have different convernsion of accessing array. Those languages -handle arrays in row-major and zero-indexing which differs from Julia's -colume-major and 1-indexing. - -This function scans the docstrings of NDArray's APIs, -filter out the signature which contain `axis`, `axes`, `keepdims` and `shape` -as its function argument. - -We invoks this checker in Travis CI build and pop up the warning message -if the functions does not get manually mapped -(imply it's dimension refering may looks weird). - -If you found any warning in Travis CI build, please open an issue on GitHub. -""" -function _sig_checker() - names = filter(n -> ∉(lowercase(n), _op_import_bl), _get_libmx_op_names()) - foreach(names) do name - op_handle = _get_libmx_op_handle(name) - - desc, key_narg = _get_libmx_op_description(name, op_handle) - _sig = desc |> s -> split(s, '\n') |> first |> strip - _m = match(r"(axis|axes|keepdims|shape)", _sig) - - if _m === nothing - return - end - - @warn(_sig) - - end -end - -""" -Get first position argument from function sig -""" -function _firstarg(sig::Expr) - if sig.head ∈ (:where, :(::)) - _firstarg(sig.args[1]) - elseif sig.head == :call - i = if sig.args[2] isa Expr && sig.args[2].head == :parameters - # there are some keyward arguments locate at args[2] - 3 - elseif sig.args[1] === :broadcast_ - # case of broadcasting, skip the first arg `::typeof(...)` - 3 - else - 2 - end - _firstarg(sig.args[i]) - end -end - -_firstarg(s::Symbol) = s - -const _import_map = Dict{Symbol,Union{Missing,Module}}( - :diag => LinearAlgebra, - :dot => LinearAlgebra, - :norm => LinearAlgebra, - - :shuffle => Random, - - :mean => Statistics, - - :gamma => missing, -) - -function _import_expr(func_name::Symbol) - mod = get(_import_map, func_name, Base) - isdefined(mod, func_name) ? :(import $(Symbol(mod)): $func_name) : :() -end diff --git a/julia/src/visualize.jl b/julia/src/visualize.jl deleted file mode 100644 index 849b784779ea..000000000000 --- a/julia/src/visualize.jl +++ /dev/null @@ -1,213 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -import JSON - -""" - to_graphviz(network) - -* `network::SymbolicNode`: the network to visualize. -* `title::AbstractString:` keyword argument, default "Network Visualization", - the title of the GraphViz graph. -* `input_shapes`: keyword argument, default `nothing`. If provided, - will run shape inference and plot with the shape information. Should - be either a dictionary of name-shape mapping or an array of shapes. - -Returns the graph description in GraphViz `dot` language. -""" -function to_graphviz(network :: SymbolicNode; title="Network Visualization", input_shapes=nothing) - if !isa(input_shapes, Cvoid) - internals = get_internals(network) - if isa(input_shapes, Dict) - _, out_shapes, _ = infer_shape(internals; input_shapes...) - else - _, out_shapes, _ = infer_shape(internals, input_shapes...) - end - @assert(!isa(out_shapes, Cvoid), "Failed to do shape inference, input shapes are incomplete") - shape_dict = Dict(zip(list_outputs(internals), out_shapes)) - draw_shape = true - else - draw_shape = false - end - - conf = JSON.parse(to_json(network)) - nodes = conf["nodes"] - heads = unique([x[1]+1 for x in conf["heads"]]) - node_attr = Dict(:shape => :box, :fixedsize => true, :width => 1.3, - :height => 0.8034, :style => (:rounded, :filled), :penwidth => 2) - io = IOBuffer() - println(io, "digraph $(_simple_escape(title)) {") - println(io, "node [fontsize=10];") - println(io, "edge [fontsize=10];") - - # color map - fillcolors = ("#8dd3c7", "#fb8072", "#ffffb3", "#bebada", "#80b1d3", - "#fdb462", "#b3de69", "#fccde5") - edgecolors = ("#245b51", "#941305", "#999900", "#3b3564", "#275372", - "#975102", "#597d1c", "#90094e") - - # make nodes - for i = 1:length(nodes) - node = nodes[i] - op = node["op"] - name = node["name"] - attr = deepcopy(node_attr) - label = op - - # Up to 0.11.0 version of mxnet additional info was stored in - # node["attr"]. Staring from 0.12 `attr` was changed to `attrs`. - # See: https://github.com/dmlc/nnvm/pull/152 - if haskey(node, "attrs") - node_info = node["attrs"] - elseif haskey(node, "attr") - node_info = node["attr"] - end - - if op == "null" - if i ∈ heads - # heads are output nodes - label = node["name"] - colorkey = 1 - else - # otherwise, input nodes, might be data, label or parameters - continue - end - elseif op == "Convolution" - if haskey(node_info,"stride") - stride_info=_extract_shape(node_info["stride"]) - else - stride_info="1" - end - - label = format("Convolution\nkernel={1}\nstride={2}\nn-filter={3}", - _extract_shape(node_info["kernel"]), - stride_info, - node_info["num_filter"]) - colorkey = 2 - elseif op == "FullyConnected" - label = format("FullyConnected\nnum-hidden={1}", node_info["num_hidden"]) - colorkey = 2 - elseif op == "Activation" - label = format("Activation\nact-type={1}", node_info["act_type"]) - colorkey = 3 - elseif op == "BatchNorm" - colorkey = 4 - elseif op == "Pooling" - if haskey(node_info,"stride") - stride_info=_extract_shape(node_info["stride"]) - else - stride_info="1" - end - label = format("Pooling\ntype={1}\nkernel={2}\nstride={3}", - node_info["pool_type"], - _extract_shape(node_info["kernel"]), - stride_info) - colorkey = 5 - elseif op ∈ ("Concat", "Flatten", "Reshape") - colorkey = 6 - elseif endswith(op, "Output") || op == "BlockGrad" - colorkey = 7 - else - colorkey = 8 - end - - if op != "null" - label = "$name\n$label" - end - attr[:fillcolor] = fillcolors[colorkey] - attr[:color] = edgecolors[colorkey] - attr[:label] = label - _format_graphviz_node(io, name, attr) - end - - # add edges - for i = 1:length(nodes) - node = nodes[i] - op = node["op"] - name = node["name"] - if op == "null" - continue - end - inputs = node["inputs"] - for item in inputs - input_node = nodes[item[1]+1] - input_name = input_node["name"] - if input_node["op"] != "null" || (item[1]+1) ∈ heads - attr = Dict(:dir => :back, :arrowtail => :open, :color => "#737373") - if draw_shape - if input_node["op"] != "null" - key = Symbol(input_name, "_output") - shape = shape_dict[key][1:end-1] - else - key = Symbol(input_name) - shape = shape_dict[key][1:end-1] - end - label = "(" * join([string(x) for x in shape], ",") * ")" - attr[:label] = label - end - _format_graphviz_edge(io, name, input_name, attr) - end - end - end - println(io, "}") - - return String(take!(io)) -end - -function _format_graphviz_attr(io::IOBuffer, attrs) - label = get(attrs, :label, nothing) - if isa(label, Cvoid) - print(io, " [") - else - print(io, " [label=$(_simple_escape(label)),") - end - first_attr = true - for (k,v) in attrs - if k != :label - if !first_attr - print(io, ",") - end - first_attr = false - - if isa(v, AbstractString) && v[1] == '#' - # color - v = _simple_escape(v) - elseif isa(v, Tuple) - v = _simple_escape(join([string(x) for x in v], ",")) - end - print(io, "$k=$v") - end - end - println(io, "];") -end -function _simple_escape(str) - str = replace(string(str), r"\n" => "\\n") - return "\"$str\"" -end -function _format_graphviz_node(io::IOBuffer, name::AbstractString, attrs) - print(io, "$(_simple_escape(name)) ") - _format_graphviz_attr(io, attrs) -end -function _format_graphviz_edge(io::IOBuffer, head, tail, attrs) - print(io, """$(_simple_escape(head)) -> $(_simple_escape(tail)) """) - _format_graphviz_attr(io, attrs) -end -function _extract_shape(str :: AbstractString) - shape = collect(m.match for m ∈ eachmatch(r"\d+", str)) - shape = reverse(shape) # JSON in libmxnet has reversed shape (column vs row majoring) - return "(" * join(shape, ",") * ")" -end diff --git a/julia/test/common.jl b/julia/test/common.jl deleted file mode 100644 index 5ac5f905143a..000000000000 --- a/julia/test/common.jl +++ /dev/null @@ -1,50 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -################################################################################ -# Common models used in testing -################################################################################ -function rand_dims(max_ndim=6) - tuple(rand(1:10, rand(1:max_ndim))...) -end - -function mlp2() - data = mx.Variable(:data) - out = mx.FullyConnected(data, name=:fc1, num_hidden=1000) - out = mx.Activation(out, act_type=:relu) - out = mx.FullyConnected(out, name=:fc2, num_hidden=10) - return out -end - -function mlpchain() - mx.@chain mx.Variable(:data) => - mx.FullyConnected(name=:fc1, num_hidden=1000) => - mx.Activation(act_type=:relu) => - mx.FullyConnected(name=:fc2, num_hidden=10) -end - -""" -execution helper of SymbolicNode -""" -function exec(x::mx.SymbolicNode; feed...) - ks, vs = zip(feed...) - vs′ = mx.NDArray.(vs) - - e = mx.bind(x, context = mx.cpu(), args = Dict(zip(ks, vs′))) - mx.forward(e) - e.outputs -end diff --git a/julia/test/runtests.jl b/julia/test/runtests.jl deleted file mode 100644 index e30b68ac3e6f..000000000000 --- a/julia/test/runtests.jl +++ /dev/null @@ -1,40 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -using Test -using MXNet - -# run test in the whole directory, latest modified files -# are run first, this makes waiting time shorter when writing -# or modifying unit-tests -function test_dir(dir) - jl_files = sort( - filter(x -> occursin(r".*\.jl$", x), readdir(dir)), - by = fn -> stat(joinpath(dir, fn)).mtime) - foreach(reverse(jl_files)) do file - include("$dir/$file") - end -end - -@info "libmxnet version => $(mx.LIB_VERSION[])" - -const BASEDIR = joinpath(@__DIR__, "..") - -include(joinpath(@__DIR__, "common.jl")) -@testset "MXNet Test" begin - test_dir(joinpath(@__DIR__, "unittest")) -end diff --git a/julia/test/unittest/autograd.jl b/julia/test/unittest/autograd.jl deleted file mode 100644 index 8209fe7e0327..000000000000 --- a/julia/test/unittest/autograd.jl +++ /dev/null @@ -1,402 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -module TestAutoGrad - -using MXNet -using Test - - -function checkgradient(f, x, y, ∇) - ∇x = mx.attach_grad!(x) - y′ = mx.record(f) - @test copy(y′) ≈ y - @test copy(∇x) |> sum == 0 - mx.backward!(y′) - @test copy(mx.getgrad(x)) ≈ ∇ -end # function checkgradient - - -function test_getgrad() - @info("AutoGrad::getgrad") - - @info("AutoGrad::getgrad::unattached") - @test nothing == mx.getgrad(mx.zeros(10)) - - @info("AutoGrad::getgrad::attached") - x = mx.NDArray([1 2; 3 4]) - grad = mx.attach_grad!(x) - @test eltype(grad) ≡ Int - @test copy(grad) == [0 0; 0 0] - - grad[:] = 42 - @test copy(mx.getgrad(x)) == [42 42; 42 42] -end - - -function test_mark_variables!() - @info("AutoGrad::mark_variables!") - x = mx.zeros(4) - ẋ = mx.zeros(4) - y = mx.zeros(4) - ẏ = mx.zeros(4) - mx.mark_variables!([x, y], [ẋ, ẏ], [:nop, :nop]) - ẋ[:] = 42 - ẏ[:] = 24 - - @test copy(mx.getgrad(x)) == [42, 42, 42, 42] - @test copy(mx.getgrad(y)) == [24, 24, 24, 24] - - @info("AutoGrad::mark_variables!::invalid grad_reqs") - x = mx.zeros(4) - y = mx.zeros(4) - @test_throws ArgumentError mx.mark_variables!(x, y, :magic) - @test_throws ArgumentError mx.mark_variables!([x], [y], [:magic]) - - @info("AutoGrad::mark_variables!::args length mismatch") - x = mx.zeros(4) - y = mx.zeros(4) - z = mx.zeros(4) - @test_throws ArgumentError mx.mark_variables!([x], [y, z]) - @test_throws ArgumentError mx.mark_variables!([x], [y], [:write, :nop]) -end - - -function test_record() - let x = mx.NDArray([1 2; 3 4]) - @info("AutoGrad::record::backward!") - - y = [1 4; 9 16] - ∇ = [2 4; 6 8] # gradient is 2x - checkgradient(x, y, ∇) do - mx.square(x) - end - end - - let x = mx.NDArray([1 2; 3 4]) - @info("AutoGrad::record::symbol") - - mx.attach_grad!(x) - y = mx.record() do - mx.square(x) - end - - @test copy(y) == [1 4; 9 16] - - @test isa(mx.symbol(y), mx.SymbolicNode) - end - - let x = mx.NDArray([1 2; 3 4]) - @info("AutoGrad::record::backward!(retain_graph=true)") - - mx.attach_grad!(x) - y = mx.record() do - mx.square(x) - end - - @test copy(y) == [1 4; 9 16] - - mx.backward!(y, retain_graph=true) - # gradient is 2x - @test copy(mx.getgrad(x)) == [2 4; 6 8] - - @test isa(mx.symbol(y), mx.SymbolicNode) - end - - mx._record(nothing, nothing) do # no error with edage case - @test true - end -end # function test_record - - -function test_is_recording() - @info("AutoGrad::is_recording") - mx.record() do - @test mx.is_recording() - end -end # function test_is_recording - - -function test_is_training() - @info("AutoGrad::is_training") - mx.record() do - @test mx.is_training() - end - - mx.record(false) do - @test !mx.is_training() - end -end # function test_is_training - - -function test_pause() - @info("AutoGrad::pause") - let x = mx.NDArray([1 2; 3 4]) - ∇ = mx.attach_grad!(x) - y = mx.record() do - y = mx.square(x) - mx.pause() do - z = mx.square(y) - @test copy(z) == [1 16; 81 256] - end - y - end - - @test copy(y) == [1 4; 9 16] - - mx.backward!(y) - @test copy(∇) == [2 4; 6 8] - end -end # function test_pause - - -function test_train_mode() - @info("AutoGrad::train_mode") - let x = mx.NDArray(Float32[1 2; 3 4]) - y = mx.train_mode() do - mx.Dropout(x, p = 1) - end - - @test all(isnan.(copy(y))) - end -end # function test_train_mode - - -function test_predict_mode() - @info("AutoGrad::predict_mode") - let x = mx.NDArray(Float32[1 2; 3 4]) - y = mx.predict_mode() do - mx.Dropout(x, p = 1) - end - - @test copy(y) ≈ Float32[1 2; 3 4] - end -end # function test_train_mode - - -function test_backward!() - @info("AutoGrad::backward!::with head_grad") - let x = mx.NDArray(Float32[1 2; 3 4]), A = Float32[.2 .4; 0 .1] - ∇ = mx.attach_grad!(x) - y = mx.record() do - mx.square(x) - end - mx.backward!(y, mx.NDArray(A)) - @test copy(∇) ≈ [2 4; 6 8] .* A - end - - @info("AutoGrad::backward!::with head_grads") - let x = mx.NDArray(Float32[1 2; 3 4]) - ∇ = mx.attach_grad!(x) - mx.record() do - x′ = mx.square(x) - y = mx.square(x) - z = mx.square(x) .+ 42 - mx.backward!([x′, y, z], [nothing, - mx.NDArray(Float32[.01 .01; 1 1]), - mx.NDArray(Float32[1 1; .1 .1])]) - end - ans = [4.02 8.04 - 12.6 16.8] - @test copy(∇) ≈ ans - end - - @info("AutoGrad::backward!::ArgumentError") - let x = mx.NDArray([42]) - @test_throws ArgumentError mx.backward!([x], [24]) - end -end # function test_backward! - - -function test_symbol() - @info("AutoGrad::symbol") - - let x = mx.zeros(4) - mx.attach_grad!(x) - @test isa(mx.symbol(x), mx.SymbolicNode) - end -end - - -function test_add() - @info("AutoGrad::add") - - @info("AutoGrad::add::x") - let x = mx.NDArray([1 2; 3 4]) - y = [1 2; 3 4] - ∇ = [1 1; 1 1] # gradient is 1 - checkgradient(x, y, ∇) do - x - end - end - - @info("AutoGrad::add::+x") - let x = mx.NDArray([1 2; 3 4]) - y = [1 2; 3 4] - ∇ = [1 1; 1 1] # gradient is 1 - checkgradient(x, y, ∇) do - +x - end - end - - @info("AutoGrad::add::x .+ 42") - let x = mx.NDArray([1 2; 3 4]) - y = [43 44; 45 46] - ∇ = [1 1; 1 1] # gradient is 1 - checkgradient(x, y, ∇) do - x .+ 42 - end - end - - @info("AutoGrad::add::42 .+ x") - let x = mx.NDArray([1 2; 3 4]) - y = [43 44; 45 46] - ∇ = [1 1; 1 1] - checkgradient(x, y, ∇) do - 42 .+ x - end - end - - # TODO: @info("AutoGrad::add::x .+ y") -end # function test_add - - -function test_sub() - @info("AutoGrad::sub") - - @info("AutoGrad::sub::-x") - let x = mx.NDArray([1 2; 3 4]) - y = [-1 -2; -3 -4] - ∇ = [-1 -1; -1 -1] # gradient is -1 - checkgradient(x, y, ∇) do - -x - end - end - - @info("AutoGrad::sub::x .- 42") - let x = mx.NDArray([1 2; 3 4]) - y = [-41 -40; -39 -38] - ∇ = [1 1; 1 1] - checkgradient(x, y, ∇) do - x .- 42 - end - end - - @info("AutoGrad::sub::42 .- x") - let x = mx.NDArray([1 2; 3 4]) - y = [41 40; 39 38] - ∇ = -[1 1; 1 1] - checkgradient(x, y, ∇) do - 42 .- x - end - end - - # TODO: @info("AutoGrad::sub::x .- y") -end # function test_sub - - -function test_mul() - @info("AutoGrad::mul") - - @info("AutoGrad::mul::2x .* x") - let x = mx.NDArray([1 2; 3 4]) - y = [2 8; 18 32] - ∇ = [4 8; 12 16] # 4x - checkgradient(x, y, ∇) do - 2x .* x - end - end - - @info("AutoGrad::mul::x * 2 .* x") - let x = mx.NDArray([1 2; 3 4]) - y = [2 8; 18 32] - ∇ = [4 8; 12 16] # 4x - checkgradient(x, y, ∇) do - x * 2 .* x - end - end -end - - -function test_div() - @info("AutoGrad::div") - - @info("AutoGrad::div::x ./ 2") - let x = mx.NDArray(Float32[1 2; 3 4]) - y = Float32[.5 1; 1.5 2] - ∇ = [.5 .5; .5 .5] - checkgradient(x, y, ∇) do - x ./ 2 - end - end - - @info("AutoGrad::rdiv::2 ./ x") - let A = Float32[1 2; 3 4], x = mx.NDArray(A) - y = 2 ./ A - ∇ = @. -2 / A^2 # -2 / x² - checkgradient(x, y, ∇) do - 2 ./ x - end - end -end # function test_div - - -function test_power() - @info("AutoGrad::power") - - @info("AutoGrad::power::x.^3") - let A = Float32[1 2; 3 4] - x = mx.NDArray(A) - y = A.^3 - ∇ = 3(A.^2) - checkgradient(x, y, ∇) do - x.^3 - end - end - - @info("AutoGrad::power::x.^.5") - let A = Float32[1 2; 3 4] - x = mx.NDArray(A) - y = A.^.5 - ∇ = .5(A.^-.5) - checkgradient(x, y, ∇) do - x.^.5 - end - end -end - - -@testset "AutoGrad Test" begin - test_getgrad() - test_mark_variables!() - test_record() - test_is_recording() - test_is_training() - test_pause() - test_train_mode() - test_predict_mode() - test_backward!() - test_symbol() - test_add() - test_sub() - test_mul() - test_div() - test_power() -end - - -end # model TestAutoGrad diff --git a/julia/test/unittest/bind.jl b/julia/test/unittest/bind.jl deleted file mode 100644 index a221733cded1..000000000000 --- a/julia/test/unittest/bind.jl +++ /dev/null @@ -1,110 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -module TestBind -using MXNet -using Test - -using ..Main: rand_dims - -################################################################################ -# Test Implementations -################################################################################ -function test_arithmetic(::Type{T}, uf, gf) where T <: mx.DType - shape = rand_dims() - @info "Bind::arithmetic::$T::$uf::dims = $shape" - - lhs = mx.Variable(:lhs) - rhs = mx.Variable(:rhs) - ret = uf(lhs, rhs) - @test mx.list_arguments(ret) == [:lhs, :rhs] - - lhs_arr = NDArray(rand(T, shape)) - rhs_arr = NDArray(rand(T, shape)) - lhs_grad = NDArray{T}(undef, shape) - rhs_grad = NDArray{T}(undef, shape) - - exec2 = mx.bind(ret, mx.Context(mx.CPU), [lhs_arr, rhs_arr], args_grad=[lhs_grad, rhs_grad]) - exec3 = mx.bind(ret, mx.Context(mx.CPU), [lhs_arr, rhs_arr]) - exec4 = mx.bind(ret, mx.Context(mx.CPU), Dict(:lhs=>lhs_arr, :rhs=>rhs_arr), - args_grad=Dict(:rhs=>rhs_grad, :lhs=>lhs_grad)) - - mx.forward(exec2) - mx.forward(exec3) - mx.forward(exec4) - - out1 = uf(copy(lhs_arr), copy(rhs_arr)) - out2 = copy(exec2.outputs[1]) - out3 = copy(exec3.outputs[1]) - out4 = copy(exec4.outputs[1]) - @test isapprox(out1, out2) - @test isapprox(out1, out3) - @test isapprox(out1, out4) - - # test gradients - out_grad = mx.NDArray(ones(T, shape)) - lhs_grad2, rhs_grad2 = gf(copy(out_grad), copy(lhs_arr), copy(rhs_arr)) - mx.backward(exec2, out_grad) - @test isapprox(copy(lhs_grad), lhs_grad2) - @test isapprox(copy(rhs_grad), rhs_grad2) - - # reset grads - lhs_grad[:] = 0 - rhs_grad[:] = 0 - # compute using another binding - mx.backward(exec4, out_grad) - @test isapprox(copy(lhs_grad), lhs_grad2) - @test isapprox(copy(rhs_grad), rhs_grad2) -end - -function test_arithmetic() - for T in [mx.fromTypeFlag(TF) for TF in instances(mx.TypeFlag)] - test_arithmetic(T, (x,y) -> x .+ y, (g,x,y) -> (g,g)) - test_arithmetic(T, (x,y) -> x .- y, (g,x,y) -> (g,-g)) - test_arithmetic(T, (x,y) -> x .* y, (g,x,y) -> (y.*g, x.*g)) - if T <: Integer || T == Float16 - @warn "Not running division test for $T" - else - test_arithmetic(T, (x,y) -> x ./ y, (g,x,y) -> (g ./ y, -x .* g ./ (y.^2))) - end - end -end - -function test_forward() - # forward with data keyword argument - x = @var x - y = x .+ 42 - - A = 1:5 - B = A .+ 42 - - e = bind(y, args = Dict(:x => NDArray(24:28))) - z = forward(e, x = NDArray(A))[1] - - @test copy(z) == collect(B) -end - -################################################################################ -# Run tests -################################################################################ -@testset "Bind Test" begin - test_arithmetic() - test_forward() -end - -end - diff --git a/julia/test/unittest/context.jl b/julia/test/unittest/context.jl deleted file mode 100644 index e903f9212930..000000000000 --- a/julia/test/unittest/context.jl +++ /dev/null @@ -1,111 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -module TestContext - -using MXNet -using Test - -function test_num_gpus() - @info "Context::num_gpus" - - @test num_gpus() >= 0 -end - -function test_context_macro() - @info "Context::@context" - - @context mx.CPU 42 begin - ctx = mx.current_context() - @test ctx.device_type == mx.CPU - @test ctx.device_id == 42 - - @context mx.GPU 24 begin - ctx = mx.current_context() - @test ctx.device_type == mx.GPU - @test ctx.device_id == 24 - end - - ctx = mx.current_context() - @test ctx.device_type == mx.CPU - @test ctx.device_id == 42 - end - - function f() - ctx = mx.current_context() - @test ctx.device_type == mx.GPU - @test ctx.device_id == 123 - end - - @context mx.GPU 123 begin - f() - end - - @context mx.GPU begin - ctx = mx.current_context() - @test ctx.device_type == mx.GPU - @test ctx.device_id == 0 - end - - @context mx.CPU begin - ctx = mx.current_context() - @test ctx.device_type == mx.CPU - @test ctx.device_id == 0 - end - - @info "Context::@gpu" - @gpu 123 f() - @gpu begin - ctx = mx.current_context() - @test ctx.device_type == mx.GPU - @test ctx.device_id == 0 - end - let n = 321 - @gpu n begin - ctx = mx.current_context() - @test ctx.device_type == mx.GPU - @test ctx.device_id == 321 - end - end - - @info "Context::@cpu" - @cpu 123 begin - ctx = mx.current_context() - @test ctx.device_type == mx.CPU - @test ctx.device_id == 123 - end - @cpu begin - ctx = mx.current_context() - @test ctx.device_type == mx.CPU - @test ctx.device_id == 0 - end - let n = 321 - @cpu n begin - ctx = mx.current_context() - @test ctx.device_type == mx.CPU - @test ctx.device_id == 321 - end - end -end - -@testset "Context Test" begin - test_num_gpus() - test_context_macro() -end - - -end # module TestContext diff --git a/julia/test/unittest/exceptions.jl b/julia/test/unittest/exceptions.jl deleted file mode 100644 index ad952c6483f5..000000000000 --- a/julia/test/unittest/exceptions.jl +++ /dev/null @@ -1,42 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -module TestExceptions - -using MXNet -using Test - -struct MXError′ <: AbstractMXError - msg::String -end - -function test_show() - @info "AbstractMXError::Base.show" - - io = IOBuffer() - e = MXError′("magic") - print(io, e) - str = String(take!(io)) - @test str == "magic" -end - -@testset "Exception Test" begin - test_show() -end - - -end # module TestExceptions diff --git a/julia/test/unittest/initializer.jl b/julia/test/unittest/initializer.jl deleted file mode 100644 index fa528c9f8e13..000000000000 --- a/julia/test/unittest/initializer.jl +++ /dev/null @@ -1,35 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -@testset "Initializers" begin - @testset "Bilinear initializer" begin - # Setup a filter with scale = 2 - expectedFilter = Float32[ - 0.0625 0.1875 0.1875 0.0625; - 0.1875 0.5625 0.5625 0.1875; - 0.1875 0.5625 0.5625 0.1875; - 0.0625 0.1875 0.1875 0.0625] - filter = mx.zeros(Float32, 4, 4, 1, 4) - mx.init(mx.XavierInitializer(), :upsampling0_weight, filter) - - mx.@nd_as_jl ro=filter begin - for s in 1:size(filter, 4) - @test all(filter[:, :, 1, s] .== expectedFilter) - end - end - end -end diff --git a/julia/test/unittest/io.jl b/julia/test/unittest/io.jl deleted file mode 100644 index 7d98d28fc541..000000000000 --- a/julia/test/unittest/io.jl +++ /dev/null @@ -1,145 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -module TestIO - -using MXNet -using Test - -using ..Main: rand_dims - -function test_mnist() - @info "IO::MNIST" - filenames = mx.get_mnist_ubyte() - - batch_size = 10 - mnist_provider = mx.MNISTProvider(image=filenames[:train_data], - label=filenames[:train_label], - batch_size=batch_size, silent=true, shuffle=false) - data_spec = mx.provide_data(mnist_provider) - label_spec = mx.provide_label(mnist_provider) - @test data_spec == [(:data, (28,28,1,batch_size))] - @test label_spec == [(:softmax_label, (batch_size,))] - - n_batch = 0 - for batch in mnist_provider - if n_batch == 0 - data_array = NDArray(undef, 28, 28, 1, batch_size) - label_array = NDArray(undef, batch_size) - # have to use "for i=1:1" to get over the legacy "feature" of using - # [ ] to do concatenation in Julia - data_targets = [[(1:batch_size, data_array)] for i = 1:1] - label_targets = [[(1:batch_size, label_array)] for i = 1:1] - - mx.load_data!(mnist_provider, batch, data_targets) - mx.load_label!(mnist_provider, batch, label_targets) - - true_labels = [5,0,4,1,9,2,1,3,1,4] # the first 10 labels in MNIST train - got_labels = Int[copy(label_array)...] - @test true_labels == got_labels - end - - n_batch += 1 - end - - @test n_batch == 60000 / batch_size -end - -function test_arrays_impl(data::Vector, label::Vector, provider::mx.ArrayDataProvider) - data = convert(Vector{Array{Float64}}, data) - label = convert(Vector{Array{Float64}}, label) - - sample_count = size(data[1])[end] - batch_size = mx.get_batch_size(provider) - idx_all = 1:batch_size:sample_count - - for (d1, (_, d2)) in zip(data, mx.provide_data(provider)) - @test size(d1)[1:end-1] == d2[1:end-1] - @test batch_size == d2[end] - end - for (d1, (_, d2)) in zip(label, mx.provide_label(provider)) - @test size(d1)[1:end-1] == d2[1:end-1] - @test batch_size == d2[end] - end - - @info "IO::Array::#data=$(length(data)),#label=$(length(label)),batch_size=$batch_size" - for (idx, batch) in zip(idx_all, provider) - data_batch = [x[[Colon() for i=1:ndims(x)-1]..., idx:min(idx+batch_size-1,sample_count)] for x in data] - data_get = mx.get_data(provider, batch) - - for (d_real, d_get) in zip(data_batch, data_get) - @test d_real ≈ copy(d_get)[[1:n for n in size(d_real)]...] - @test mx.count_samples(provider, batch) == size(d_real)[end] - end - end -end - -function test_arrays() - sample_count = 15 - batch_size = 4 - dims_data = [rand_dims()..., sample_count] - data = rand(dims_data...) - provider = mx.ArrayDataProvider(data, batch_size=batch_size) - test_arrays_impl(Array[data], [], provider) - - dims_label = [rand_dims()..., sample_count] - label = rand(dims_label...) - provider = mx.ArrayDataProvider(data, label, batch_size=batch_size) - test_arrays_impl(Array[data], Array[label], provider) - - provider = mx.ArrayDataProvider(:data=>data, :my_label=>label, batch_size=batch_size) - test_arrays_impl(Array[data], Array[label], provider) - - dims_data2 = [rand_dims()..., sample_count] - data2 = rand(dims_data2...) - provider = mx.ArrayDataProvider((:data=>data, :data2=>data2), label, batch_size=batch_size) - test_arrays_impl(Array[data,data2], Array[label], provider) -end - -function test_arrays_shuffle() - @info "IO::Array::shuffle" - - sample_count = 15 - batch_size = 4 - data = rand(mx.MX_float, 1, sample_count) - label = collect(1:sample_count) - provider = mx.ArrayDataProvider(data, :index => label, batch_size=batch_size, shuffle=true) - - idx_all = 1:batch_size:sample_count - data_got = similar(data) - label_got = similar(label) - for (idx, batch) in zip(idx_all, provider) - data_batch = mx.get(provider, batch, :data) - label_batch = mx.get(provider, batch, :index) - ns_batch = mx.count_samples(provider, batch) - data_got[idx:idx+ns_batch-1] = copy(data_batch)[1:ns_batch] - label_got[idx:idx+ns_batch-1] = copy(label_batch)[1:ns_batch] - end - - @test label_got != label - @test sort(label_got) == label - @test size(data_got) == size(data[:, Int[label_got...]]) - @test data_got ≈ data[:, Int[label_got...]] -end - -@testset "IO Test" begin - test_arrays_shuffle() - test_arrays() - test_mnist() -end - -end diff --git a/julia/test/unittest/kvstore.jl b/julia/test/unittest/kvstore.jl deleted file mode 100644 index db6885717edc..000000000000 --- a/julia/test/unittest/kvstore.jl +++ /dev/null @@ -1,114 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -module TestKVStore -using MXNet -using Test - -using ..Main: rand_dims - -SHAPE = rand_dims() -KEYS = [5,7,11] - -function init_kv() - kv = mx.KVStore() - mx.init!(kv, 3, mx.zeros(SHAPE)) - - vals = [mx.zeros(SHAPE) for k in KEYS] - mx.init!(kv, KEYS, vals) - kv -end - -function test_kv_basic() - @info("KVStore::basic") - - kv = init_kv() - @test mx.get_type(kv) == :local - @test mx.get_rank(kv) == 0 - @test mx.get_num_workers(kv) == 1 -end - -function test_single_kv_pair() - @info("KVStore::single") - - kv = init_kv() - mx.push!(kv, 3, mx.ones(SHAPE)) - val = NDArray(undef, SHAPE) - mx.pull!(kv, 3, val) - @test maximum(abs.(copy(val) .- 1)) == 0 -end - -function test_aggregator() - @info("KVStore::aggregator") - - kv = init_kv() - - num_devs = 4 - devs = [mx.Context(mx.CPU, i) for i=0:num_devs-1] - vals = [mx.ones(SHAPE, dev) for dev in devs] - - mx.push!(kv, 3, vals) - mx.pull!(kv, 3, vals) - for v in vals - @test maximum(abs.(copy(v)) .- num_devs) == 0 - end - - # list - vals = [mx.NDArray[mx.ones(SHAPE, dev)*2 for dev in devs] for k in KEYS] - mx.push!(kv, KEYS, vals) - mx.pull!(kv, KEYS, vals) - - for vv in vals - for v in vv - @test maximum(abs.(copy(v)) .- 2 * num_devs) == 0 - end - end -end - -function check_setupdater!(f) - kv = KVStore(:local) - setupdater!(kv, f) - - A = Float32[1, 2, 3, 4] - B = Float32[.5, .6, .7, .8] - x = NDArray(A) - Δ = NDArray(B) - init!(kv, 42, x) - push!(kv, 42, Δ) - pull!(kv, 42, x) - - @test copy(x) ≈ A + 2B -end # function check_setupdater! - -function test_setupdater!() - @info("KVStore::setupdater!") - - f(key, Δ, x) = @mx.inplace x += 2Δ - g(key, Δ, x) = (x[:] += 2Δ) - - check_setupdater!(f) - check_setupdater!(g) -end # test_setupdater! - -@testset "KVStore Test" begin - test_kv_basic() - test_single_kv_pair() - test_aggregator() - test_setupdater!() -end - -end diff --git a/julia/test/unittest/metric.jl b/julia/test/unittest/metric.jl deleted file mode 100644 index 05e4dbda47f4..000000000000 --- a/julia/test/unittest/metric.jl +++ /dev/null @@ -1,116 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -module TestMetric - -using MXNet -using Test - -################################################################################ -# Supporting functions -################################################################################ - -""" -Returns a random n x m array in which each column defines a discrete probability distribution. -Each column contains numbers between 0 and 1, and each column sums to 1. -""" -function generate_probs(n, m) - # Init - result = rand(n, m) - - # Normalize: ensure each column sums to 1 - for j = 1:m - colsum = sum(result[:, j]) - for i = 1:n - result[i, j] /= colsum - end - end - result -end - - -function loglikelihood(labels::Vector{T}, probs::Array{T, 2}) where T <: AbstractFloat - LL = 0.0 - eps = convert(T, 1.0e-8) - for i = 1:size(labels, 1) - LL += log(probs[Int(labels[i]) + 1, i] + eps) # labels are zero-based - end - LL / size(labels, 1) -end - - -################################################################################ -# Test Implementations -################################################################################ - -function test_ace() - @info "EvalMetric::ACE" - n_categories = 4 - n_observations = 100 - labels = convert(Vector{Float32}, rand(0:(n_categories - 1), n_observations)) # MXNet uses Float32 - probs = convert(Array{Float32}, generate_probs(n_categories, n_observations)) - LL = loglikelihood(labels, probs) - metric = mx.ACE() # For categorical variables, ACE == -LL - mx._update_single_output(metric, labels, probs) - LL_v2 = metric.ace_sum / metric.n_sample - @test LL ≈ LL_v2 atol=1e-12 -end - - -function test_nmse() - @info "EvalMetric::NMSE" - - @testset "EvalMetric::NMSE::update!" begin - metric = mx.NMSE() - labels = Array{mx.NDArray}( - [mx.NDArray([100.0, 0.0]), mx.NDArray([10.0, 0.0])]) - preds = Array{mx.NDArray}( - [mx.NDArray([20.0, 0.0]), mx.NDArray([2.0, 0.0])]) - - mx.update!(metric, labels, preds) - @test metric.nmse_sum ≈ 0.64 * 2 - end - - @testset "EvalMetric::NMSE::reset!" begin - metric = mx.NMSE() - metric.nmse_sum = sum(rand(10)) - metric.n_sample = 42 - - mx.reset!(metric) - - @test metric.nmse_sum == 0.0 - @test metric.n_sample == 0 - end - - @testset "EvalMetric::NMSE::get" begin - metric = mx.NMSE() - metric.nmse_sum = 100.0 - metric.n_sample = 20 - - @test mx.get(metric) == [(:NMSE, 5.0)] - end -end - - -################################################################################ -# Run tests -################################################################################ -test_ace() -test_nmse() - - -end diff --git a/julia/test/unittest/model.jl b/julia/test/unittest/model.jl deleted file mode 100644 index 387a0cd555ab..000000000000 --- a/julia/test/unittest/model.jl +++ /dev/null @@ -1,51 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -module TestModel - -using Test -using MXNet - - -function test_feedforward() - @info("Model::FeedForward::constructor") - let x = @mx.var x - m = mx.FeedForward(x) - @test m.arch === x - @test length(m.ctx) == 1 - end - - @info("Model::FeedForward::constructor::keyword context") - let x = @mx.var x - m = mx.FeedForward(x, context = mx.cpu()) - @test m.arch === x - @test length(m.ctx) == 1 - end - - let x = @mx.var x - m = mx.FeedForward(x, context = [mx.cpu(), mx.cpu(1)]) - @test m.arch === x - @test length(m.ctx) == 2 - end -end - - -@testset "Model Test" begin - test_feedforward() -end - -end # module TestModel diff --git a/julia/test/unittest/name.jl b/julia/test/unittest/name.jl deleted file mode 100644 index 1099ec4a7df5..000000000000 --- a/julia/test/unittest/name.jl +++ /dev/null @@ -1,50 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -module TestNameManager -using MXNet -using Test - -function test_default() - @info("NameManager::default") - - name = :_____aaaaa_____ - @test get!(mx.DEFAULT_NAME_MANAGER, name, "") == name - @test get!(mx.DEFAULT_NAME_MANAGER, string(name), "") == name - - hint = name - @test get!(mx.DEFAULT_NAME_MANAGER, "", hint) == Symbol("$(hint)0") - @test get!(mx.DEFAULT_NAME_MANAGER, "", string(hint)) == Symbol("$(hint)1") -end - -function test_prefix() - @info("NameManager::prefix") - - name = :_____bbbbb_____ - prefix = :_____foobar_____ - - prefix_manager = mx.PrefixNameManager(prefix) - @test get!(prefix_manager, name, "") == Symbol("$prefix$name") - @test get!(prefix_manager, "", name) == Symbol("$prefix$(name)0") -end - -@testset "Name Test" begin - test_default() - test_prefix() -end - -end diff --git a/julia/test/unittest/ndarray.jl b/julia/test/unittest/ndarray.jl deleted file mode 100644 index fb59b71edd60..000000000000 --- a/julia/test/unittest/ndarray.jl +++ /dev/null @@ -1,1604 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -module TestNDArray - -using MXNet -using Statistics -using LinearAlgebra -using Test - -using ..Main: rand_dims - -################################################################################ -# Test Implementations -################################################################################ -rand_tensors(dims::NTuple{N,Int}) where {N} = rand_tensors(mx.MX_float, dims) -function rand_tensors(::Type{T}, dims::NTuple{N,Int}) where {N,T} - tensor = rand(T, dims) - array = copy(tensor, mx.cpu()) - return (tensor, array) -end - -function test_constructor() - @info("NDArray::NDArray(x::AbstractArray)") - function check_absarray(x) - y = mx.NDArray(x) - @test ndims(x) == ndims(y) - @test eltype(x) == eltype(y) - @test x[3] == y[3][] - end - - check_absarray(1:10) - check_absarray(1.0:10) - - @info("NDArray::NDArray(Type, AbstractArray)") - let - x = mx.NDArray(Float32, [1, 2, 3]) - @test eltype(x) == Float32 - @test copy(x) == [1, 2, 3] - end - let - x = mx.NDArray(Float32, [1.1, 2, 3]) - @test eltype(x) == Float32 - @test copy(x) ≈ [1.1, 2, 3] - end - - @info "NDArray::NDArray{T,N}(undef, dims...)" - let - x = NDArray{Int,2}(undef, 5, 5) - @test eltype(x) == Int - @test size(x) == (5, 5) - @test x.writable - - y = NDArray{Int,2}(undef, 5, 5, writable = false) - @test !y.writable - - # dimension mismatch - @test_throws MethodError NDArray{Int,1}(undef, 5, 5) - end - - @info "NDArray::NDArray{T,N}(undef, dims)" - let - x = NDArray{Int,2}(undef, (5, 5)) - @test eltype(x) == Int - @test size(x) == (5, 5) - @test x.writable - - y = NDArray{Int,2}(undef, (5, 5), writable = false) - @test !y.writable - - # dimension mismatch - @test_throws MethodError NDArray{Int,1}(undef, (5, 5)) - end - - @info "NDArray::NDArray{T}(undef, dims...)" - let - x = NDArray{Int}(undef, 5, 5) - @test eltype(x) == Int - @test size(x) == (5, 5) - @test x.writable - - y = NDArray{Int}(undef, 5, 5, writable = false) - @test !y.writable - end - - @info "NDArray::NDArray{T}(undef, dims)" - let - x = NDArray{Int}(undef, (5, 5)) - @test eltype(x) == Int - @test size(x) == (5, 5) - @test x.writable - - y = NDArray{Int}(undef, (5, 5), writable = false) - @test !y.writable - end - - @info "NDArray::NDArray(undef, dims...)" - let - x = NDArray(undef, 5, 5) - @test eltype(x) == mx.MX_float - @test size(x) == (5, 5) - @test x.writable - - y = NDArray(undef, 5, 5, writable = false) - @test !y.writable - end - - @info "NDArray::NDArray(undef, dims)" - let - x = NDArray(undef, (5, 5)) - @test eltype(x) == mx.MX_float - @test size(x) == (5, 5) - @test x.writable - - y = NDArray(undef, (5, 5), writable = false) - @test !y.writable - end -end # function test_constructor - - -function test_ones_zeros_like() - @info("NDArray::Base.zeros") - let x = mx.rand(1, 3, 2, 4, low = 1, high = 10) - y = zeros(x) - @test sum(copy(y)) == 0 - - y = mx.zeros(x) - @test sum(copy(y)) == 0 - end - - @info("NDArray::Base.ones") - let x = mx.rand(1, 3, 2, 4, low = 1, high = 10) - y = ones(x) - @test sum(copy(y)) == 1 * 3 * 2 * 4 - - y = mx.ones(x) - @test sum(copy(y)) == 1 * 3 * 2 * 4 - end -end # function test_ones_zeros_like - - -function test_copy() - dims = rand_dims() - tensor = rand(mx.MX_float, dims) - - @info("NDArray::copy::dims = $dims") - - # copy to NDArray and back - array = copy(tensor, mx.cpu()) - tensor2 = copy(array) - @test tensor ≈ tensor2 - - # copy between NDArray - array2 = copy(array, mx.cpu()) - tensor2 = copy(array2) - @test tensor ≈ tensor2 - - @info("NDArray::copy::AbstractArray") - let x = copy(1:4, mx.cpu()) - @test eltype(x) == Int - @test copy(x) == [1, 2, 3, 4] - end - - let x = copy(1.:4, mx.cpu()) - @test eltype(x) == Float64 - @test copy(x) ≈ [1., 2, 3, 4] - end - - @info("NDArray::copy!::AbstractArray") - let - x = mx.zeros(4) - copy!(x, 1:4) - - @test eltype(x) == Float32 - @test copy(x) == [1, 2, 3, 4] - end -end - -function test_deepcopy() - @info("NDArray::deepcopy") - - x = mx.zeros(2, 5) - y = deepcopy(x) - x[:] = 42 - @test copy(x) != copy(y) -end - -function test_assign() - dims = rand_dims() - tensor = rand(mx.MX_float, dims) - - @info("NDArray::assign::dims = $dims") - - # Julia Array -> NDArray assignment - array = NDArray(undef, size(tensor)...) - array[:] = tensor - @test tensor ≈ copy(array) - - array2 = mx.zeros(size(tensor)) - @test zeros(size(tensor)) ≈ copy(array2) - - array3 = mx.zeros(Float16, size(tensor)) - @test zeros(Float16, size(tensor)) ≈ copy(array2) - - # scalar -> NDArray assignment - scalar = rand() - array2[:] = scalar - @test zeros(size(tensor)) .+ scalar ≈ copy(array2) - - scalar = rand(Float16) - array2[:] = scalar - @test zeros(size(tensor)) .+ scalar ≈ copy(array2) - - scalar = rand(Float64) - array2[:] = scalar - array3[:] = scalar - @test zeros(size(tensor)) .+ scalar ≈ copy(array2) - @test zeros(Float16, size(tensor)) .+ scalar ≈ copy(array3) - - # NDArray -> NDArray assignment - array[:] = array2 - @test zeros(size(tensor)) .+ scalar ≈ copy(array) -end - -function test_slice() - array = mx.zeros((2, 4)) - array[2:3] = ones(2, 2) - @test copy(array) == [0 1 1 0; 0 1 1 0] - @test copy(mx.slice(array, 2:3)) == [1 1; 1 1] -end - -function test_linear_idx() - @info("NDArray::getindex::linear indexing") - let A = reshape(1:30, 3, 10) - x = mx.NDArray(A) - - @test copy(x) == A - @test copy(x[1]) == [1] - @test copy(x[2]) == [2] - @test copy(x[3]) == [3] - @test copy(x[12]) == [12] - @test copy(x[13]) == [13] - @test copy(x[14]) == [14] - - @test_throws BoundsError x[-1] - @test_throws BoundsError x[0] - @test_throws BoundsError x[31] - @test_throws BoundsError x[42] - end - - let A = reshape(1:24, 3, 2, 4) - x = mx.NDArray(A) - - @test copy(x) == A - @test copy(x[1]) == [1] - @test copy(x[2]) == [2] - @test copy(x[3]) == [3] - @test copy(x[11]) == [11] - @test copy(x[12]) == [12] - @test copy(x[13]) == [13] - @test copy(x[14]) == [14] - end - - @info("NDArray::setindex!::linear indexing") - let A = reshape(1:24, 3, 2, 4) - x = mx.NDArray(A) - - @test copy(x) == A - - x[4] = -4 - @test copy(x[4]) == [-4] - - x[11] = -11 - @test copy(x[11]) == [-11] - - x[24] = 42 - @test copy(x[24]) == [42] - end - - @info("NDArray::setindex!::type convert") - let - x = NDArray([1, 2, 3]) - @test eltype(x) == Int - x[:] = π - @test copy(x) == [3, 3, 3] - end -end # function test_linear_idx - -function test_first() - @info("NDArray::first") - let A = reshape(1:30, 3, 10) - x = mx.NDArray(A) - - @test x[] == 1 - @test x[5][] == 5 - - @test first(x) == 1 - @test first(x[5]) == 5 - end -end # function test_first - -function test_lastindex() - @info("NDArray::lastindex") - let A = [1 2; 3 4; 5 6], x = mx.NDArray(A) - @test lastindex(A) == lastindex(x) - end -end # function test_lastindex - -function test_cat() - function check_cat(f, A, B = 2A) - C = [A B] - D = [A; B] - x = NDArray(A) - y = NDArray(B) - z = NDArray(C) - d = NDArray(D) - - if f == :hcat - @test copy([x y]) == [A B] - @test copy([x y 3y x]) == [A B 3B A] - @test copy([z y x]) == [C B A] - elseif f == :vcat - @test copy([x; y]) == [A; B] - @test copy([x; y; 3y; x]) == [A; B; 3B; A] - @test copy([x; d]) == [A; D] - @test copy([d; x]) == [D; A] - else - @assert false - end - end - - let A = [1, 2, 3, 4] - @info("NDArray::hcat::1D") - check_cat(:hcat, A) - - @info("NDArray::vcat::1D") - check_cat(:vcat, A) - end - - let A = [1 2; 3 4] - @info("NDArray::hcat::2D") - check_cat(:hcat, A) - - @info("NDArray::vcat::2D") - check_cat(:vcat, A) - end - - let A = rand(4, 3, 2) - @info("NDArray::hcat::3D") - check_cat(:hcat, A) - - @info("NDArray::vcat::3D") - check_cat(:vcat, A) - end - - let A = rand(4, 3, 2, 2) - @info("NDArray::hcat::4D") - check_cat(:hcat, A) - - @info("NDArray::vcat::4D") - check_cat(:vcat, A) - end - - let A = [1, 2, 3, 4] - @info("NDArray::cat::3D/1D") - check_cat(:vcat, reshape(A, 4, 1, 1), 2A) - end -end # function test_cat - -function test_plus() - dims = rand_dims() - t1, a1 = rand_tensors(dims) - t2, a2 = rand_tensors(dims) - t3, a3 = rand_tensors(dims) - - @info("NDArray::plus::dims = $dims") - - @test t1 .+ t2 ≈ copy(a1 .+ a2) - - @test t1 .+ t2 .+ t3 ≈ copy(a1 .+ a2 .+ a3) - - # test inplace += operation - a0 = a1 # keep a reference to a1 - @mx.inplace a1 += a2 # perform inplace += - @test a0 == a1 # make sure they are still the same object - @test copy(a0) ≈ copy(a1) - @test copy(a1) ≈ t1 .+ t2 - - # test scalar - scalar = rand() - @test t3 .+ scalar ≈ copy(a3 .+ scalar) - @test t2 .+ scalar .+ t3 ≈ copy(a2 .+ scalar .+ a3) - - # test small and large scalar - t4 = zeros(Float32, dims) - a4 = copy(t4, mx.cpu()) - scalar_small = 1e-8 - scalar_large = 1e8 - @test t4 .+ scalar_small ≈ copy(a4 .+ scalar_small) - @test t4 .+ scalar_large ≈ copy(a4 .+ scalar_large) - - t5 = zeros(Float64, dims) - a5 = copy(t5, mx.cpu()) - scalar_small = 1e-8 - scalar_large = 1e8 - @test t5 .+ scalar_small ≈ copy(a5 .+ scalar_small) - @test t5 .+ scalar_large ≈ copy(a5 .+ scalar_large) - - t6 = zeros(Float16, dims) - a6 = copy(t6, mx.cpu()) - scalar_small = Float16(1e-5) - scalar_large = Float16(1e4) - @test t6 .+ scalar_small ≈ copy(a6 .+ scalar_small) - @test t6 .+ scalar_large ≈ copy(a6 .+ scalar_large) - - let x = mx.NDArray([1 2; 3 4]), y = mx.NDArray([1 1; 1 1]) - @test copy(42 .+ x) == [43 44; 45 46] - @test copy(x .+ 42) == [43 44; 45 46] - @test copy(0 .+ x .+ y .+ 41) == [43 44; 45 46] - end - - @info("NDArray::plus::scalar::type convert") - let x = mx.NDArray([1, 2, 3]) - y = x .+ 0.5 - @test copy(y) == copy(x) - - y = x .+ 2.9 - @test copy(y) == [3, 4, 5] - end - - @info("NDArray::broadcast_add") - let - A = [1 2 3; - 4 5 6] - B = [1, - 2] - x = NDArray(A) - y = NDArray(B) - - z = x .+ y - @test copy(z) == A .+ B - - # TODO - # @inplace x .+= y - # @test copy(x) == A .+ B - end -end - -function test_minus() - dims = rand_dims() - t1, a1 = rand_tensors(dims) - t2, a2 = rand_tensors(dims) - - @info("NDArray::minus::dims = $dims") - - @test t1 .- t2 ≈ copy(a1 .- a2) - - @test -t1 ≈ copy(-a1) - - # make sure the negation is not in-place, so a1 is not changed after previous - # statement is executed - @test t1 ≈ copy(a1) - - # test inplace -= operation - a0 = a1 # keep a reference to a1 - @mx.inplace a1 -= a2 # perform inplace -= - @test a0 == a1 # make sure they are still the same object - @test a0.handle == a1.handle - @test copy(a0) ≈ copy(a1) - @test copy(a1) ≈ t1 .- t2 - - # test scalar - scalar = rand() - @test t2 .- scalar ≈ copy(a2 .- scalar) - - # test small and large scalar - t4 = zeros(Float32, dims) - a4 = copy(t4, mx.cpu()) - scalar_small = 1e-8 - scalar_large = 1e8 - @test t4 .- scalar_small ≈ copy(a4 .- scalar_small) - @test t4 .- scalar_large ≈ copy(a4 .- scalar_large) - - t5 = zeros(Float64, dims) - a5 = copy(t5, mx.cpu()) - scalar_small = 1e-8 - scalar_large = 1e8 - @test t5 .- scalar_small ≈ copy(a5 .- scalar_small) - @test t5 .- scalar_large ≈ copy(a5 .- scalar_large) - - t6 = zeros(Float16, dims) - a6 = copy(t6, mx.cpu()) - scalar_small = Float16(1e-5) - scalar_large = Float16(1e4) - @test t6 .- scalar_small ≈ copy(a6 .- scalar_small) - @test t6 .- scalar_large ≈ copy(a6 .- scalar_large) - - @info("NDArray::minus::scalar::type convert") - let x = mx.NDArray([1, 2, 3]) - @test copy(x .- π) ≈ [-2, -1, 0] - end - - @info("NDArray::broadcast_minus") - let - A = [1 2 3; - 4 5 6] - B = [1, - 2] - x = NDArray(A) - y = NDArray(B) - - z = x .- y - @test copy(z) == A .- B - - # TODO - # @inplace x .-= y - # @test copy(x) == A .- B - end - - @info("NDArray::scalar::rminus") - let - A = [1 2 3; - 4 5 6] - B = 10 .- A - - x = NDArray(A) - y = 10 .- x - - @test copy(y) == B - end -end - -function test_mul() - dims = rand_dims() - t1, a1 = rand_tensors(dims) - t2, a2 = rand_tensors(dims) - t3, a3 = rand_tensors(dims) - - @info("NDArray::mul::dims = $dims") - - @test t1 .* t2 ≈ copy(a1 .* a2) - - # test inplace .*= operation - a0 = a1 # keep a reference to a1 - @mx.inplace a1 .*= a2 # perform inplace .*= - @test a0 == a1 # make sure they are still the same object - @test a0.handle == a1.handle - @test copy(a0) ≈ copy(a1) - @test copy(a1) ≈ t1 .* t2 - - # test scalar - scalar = mx.MX_float(rand()) - @test t3 .* scalar ≈ copy(a3 .* scalar) - - # test small and large scalar - t4, a4 = rand_tensors(Float32, dims) - scalar_small = 1e-8 - scalar_large = 1e8 - @test t4 * scalar_small ≈ copy(a4 .* scalar_small) - @test t4 * scalar_large ≈ copy(a4 .* scalar_large) - - t5, a5 = rand_tensors(Float64, dims) - scalar_small = 1e-8 - scalar_large = 1e8 - @test t5 * scalar_small ≈ copy(a5 .* scalar_small) - @test t5 * scalar_large ≈ copy(a5 .* scalar_large) - - t6, a6 = rand_tensors(Float16, dims) - scalar_small = Float16(1e-5) - @test t6 * scalar_small ≈ copy(a6 .* scalar_small) - - @info("NDArray::mul::matrix multiplication") - let x = mx.NDArray([1. 2]) - y = x' * x - @test copy(y) == [1. 2; 2 4] - end - - @info("NDArray::mul::elementwise::issue 253") - let x = mx.NDArray([1. 2]) - y = x .* x - @test copy(y) == [1. 4.] - end - - @info("NDArray::mul::scalar::type convert") - let x = mx.NDArray([1, 2, 3]) - y = x .* π - @test eltype(x) == Int - @test copy(y) == [3, 6, 9] - end - - @info("NDArray::broadcast_mul") - let - A = [1 2 3; - 4 5 6] - B = [1, - 2] - x = NDArray(A) - y = NDArray(B) - - z = x .* y - @test copy(z) == A .* B - - # TODO - # @inplace x .*= y - # @test copy(x) == A .* B - end -end - -function test_div() - dims = rand_dims() - t1, a1 = rand_tensors(dims) - t2, a2 = rand_tensors(dims) - - @info("NDArray::div::dims = $dims") - t2 .+= 2 # avoid numerical instability - @mx.inplace a2 .+= 2 - - @test t1 ./ t2 ≈ copy(a1 ./ a2) - - # test inplace -= operation - a0 = a1 # keep a reference to a2 - @mx.inplace a1 ./= a2 # perform inplace ./= - @test a0 == a1 # make sure they are still the same object - @test a0.handle == a1.handle - @test copy(a0) ≈ copy(a1) - @test copy(a1) ≈ t1 ./ t2 - - # test scalar - scalar = rand() + 2 - @test t2 ./ scalar ≈ copy(a2 ./ scalar) - - # test small and large scalar - t4, a4 = rand_tensors(Float32, dims) - scalar_small = 1e-8 - scalar_large = 1e8 - @test t4 ./ scalar_small ≈ copy(a4 ./ scalar_small) - @test t4 ./ scalar_large ≈ copy(a4 ./ scalar_large) - - t5, a5 = rand_tensors(Float64, dims) - scalar_small = 1e-8 - scalar_large = 1e8 - @test t5 ./ scalar_small ≈ copy(a5 ./ scalar_small) - @test t5 ./ scalar_large ≈ copy(a5 ./ scalar_large) - - t6, a6 = rand_tensors(Float16, dims) - scalar_large = 1e4 - @test t6 ./ scalar_large ≈ copy(a6 ./ scalar_large) - - @info("NDArray::div::scalar::type convert") - let x = mx.NDArray([1, 2, 3]) - y = x ./ 1.1 - @test eltype(y) == Int - @test copy(y) == [1, 2, 3] - - y = x ./ 2 - @test eltype(y) == Int # this differs from julia - @test copy(y) == [0, 1, 1] - - @test_throws AssertionError x ./ 0.5 - end - - @info("NDArray::broadcast_div") - let - A = Float32[1 2 3; - 4 5 6] - B = Float32[1, - 2] - x = NDArray(A) - y = NDArray(B) - - z = x ./ y - @test copy(z) == A ./ B - - # TODO - # @inplace x ./= y - # @test copy(x) == A ./ B - end -end - -function test_rdiv() - @info("NDArray::rdiv") - - @info("NDArray::rdiv::Inf16") - let x = 1 ./ mx.zeros(Float16, 4) - @test copy(x) == [Inf16, Inf16, Inf16, Inf16] - end - - @info("NDArray::rdiv::Inf32") - let x = 1 ./ mx.zeros(Float32, 4) - @test copy(x) == [Inf32, Inf32, Inf32, Inf32] - end - - @info("NDArray::rdiv::Inf64") - let x = 1 ./ mx.zeros(Float64, 4) - @test copy(x) == [Inf64, Inf64, Inf64, Inf64] - end - - @info("NDArray::rdiv::Int") - let x = 1 ./ mx.NDArray([1 2; 3 4]) - @test copy(x) == [1 0; 0 0] - end - - @info("NDArray::rdiv::Float32") - let x = 1 ./ mx.NDArray(Float32[1 2; 3 4]) - y = 1 ./ Float32[1 2; 3 4] - @test copy(x) ≈ y - end - - @info("NDArray::rdiv::type convert") - let x = mx.NDArray([1, 2, 3]) - y = 5.5 ./ x - @test eltype(y) == Int # this differs from julia - @test copy(y) == [5, 2, 1] - end -end # function test_rdiv - -function test_mod() - @info("NDArray::mod") - A = [1 2; 3 4] - B = [1 1; 3 3] - - let x = NDArray(A), y = NDArray(B) - C = A .% B - D = B .% A - - w = x .% y - z = y .% x - - @test copy(w) ≈ C - @test copy(z) ≈ D - end - - @info("NDArray::mod::scalar") - let x = NDArray(A) - C = A .% 2 - y = x .% 2 - @test copy(y) ≈ C - end - - @info("NDArray::rmod") - let x = NDArray(A) - C = 11 .% A - y = 11 .% x - @test copy(y) ≈ C - end - - @info("NDArray::mod_from!") - let - x = NDArray(A) - y = NDArray(B) - C = A .% B - mx.mod_from!(x, y) - @test copy(x) ≈ C - end - - let - x = NDArray(A) - y = NDArray(B) - C = B .% A - mx.mod_from!(y, x) - - @test copy(y) ≈ C - end - - @info("NDArray::mod_from!::scalar") - let - x = NDArray(A) - C = A .% 2 - mx.mod_from!(x, 2) - @test copy(x) ≈ C - end - - @info("NDArray::rmod_from!") - let - x = NDArray(A) - C = 11 .% A - mx.rmod_from!(11, x) - @test copy(x) ≈ C - end - - @info("NDArray::mod_from!::writable") - let - x = NDArray(A) - y = NDArray(B) - x.writable = false - y.writable = false - @test_throws AssertionError mx.mod_from!(x, y) - @test_throws AssertionError mx.mod_from!(y, x) - @test_throws AssertionError mx.mod_from!(x, 2) - @test_throws AssertionError mx.rmod_from!(2, x) - end - - @info("NDArray::mod::inplace") - let - x = NDArray(A) - y = NDArray(B) - C = A .% B - @inplace x .%= y - @test copy(x) ≈ C - end - - @info("NDArray::broadcast_mod") - let - A = [1 2 3; - 4 5 6] - B = [1, - 2] - x = NDArray(A) - y = NDArray(B) - - z = x .% y - @test copy(z) == A .% B - - # TODO - # @inplace x .%= y - # @test copy(x) == A .% B - end -end # function test_mod - -function test_gd() - dims = rand_dims() - tw, aw = rand_tensors(dims) - tg, ag = rand_tensors(dims) - - @info("NDArray::gd::dims = $dims") - - lr = rand() - wd = rand() - - @mx.inplace aw += -lr * (ag + wd * aw) - tw += -lr * (tg + wd * tw) - @test copy(aw) ≈ tw -end - -function test_saveload() - n_arrays = 5 - @info("NDArray::saveload::n_arrays = $n_arrays") - fname = tempname() - - # save and load a single array - dims = rand_dims() - j_array, nd_array = rand_tensors(dims) - mx.save(fname, nd_array) - data = mx.load(fname, mx.NDArray) - @test data isa Vector{<:mx.NDArray} - @test length(data) == 1 - @test copy(data[1]) ≈ j_array - - # save and load N arrays of different shape - arrays = [rand_tensors(rand_dims()) for i = 1:n_arrays] - nd_arrays = mx.NDArray[x[2] for x in arrays] - mx.save(fname, nd_arrays) - data = mx.load(fname, mx.NDArray) - @test data isa Vector{<:mx.NDArray} - @test length(data) == n_arrays - for i = 1:n_arrays - @test copy(data[i]) ≈ arrays[i][1] - end - - # save and load dictionary of ndarrays - names = [Symbol("array$i") for i = 1:n_arrays] - dict = Dict([(n, v) for (n,v) in zip(names, nd_arrays)]) - mx.save(fname, dict) - data = mx.load(fname, mx.NDArray) - @test data isa Dict{Symbol,<:mx.NDArray} - @test length(data) == n_arrays - for i = 1:n_arrays - @test copy(data[names[i]]) ≈ arrays[i][1] - end - - rm(fname) -end - -function test_clamp() - @info("NDArray::clamp::dims") - - A = [1 2 3; - 4 5 6; - 7 8 9.] - B = [3 3 3; - 4 5 6; - 7 8 8.] - x = NDArray(A) - y = clamp(x, 3., 8.) - - # make sure the original array is not modified - @test copy(x) ≈ A - @test copy(y) ≈ B - - @info("NDArray::clamp!") - let - x = NDArray(1.0:20) - clamp!(x, 5, 15) - @test all(5 .<= copy(x) .<= 15) - end -end - -function test_power() - @info("NDArray::power") - - @info("NDArray::power::Int::x .^ n") - let x = mx.NDArray([1 2; 3 4]) - @test eltype(x) == Int - @test copy(x .^ -1) == [1 0; 0 0] - @test copy(x .^ 0) == [1 1; 1 1] - @test copy(x .^ 1) == [1 2; 3 4] - @test copy(x .^ 1.1) == [1 2; 3 4] - @test copy(x .^ 2) == [1 4; 9 16] - @test copy(x .^ 2.9) == [1 4; 9 16] - @test copy(x .^ 3) == [1 8; 27 64] - end - - @info("NDArray::power::Int::n .^ x") - let x = mx.NDArray([1 2; 3 4]) - @test eltype(x) == Int - @test copy(0 .^ x) == [0 0; 0 0] - @test copy(1 .^ x) == [1 1; 1 1] - @test copy(1.1 .^ x) == [1 1; 1 1] - @test copy(2 .^ x) == [2 4; 8 16] - @test copy(2.9 .^ x) == [2 4; 8 16] - @test copy(3 .^ x) == [3 9; 27 81] - end - - @info("NDArray::power::Int::x .^ y") - let x = mx.NDArray([1 2; 3 4]), y = mx.NDArray([2 2; 2 2]) - @test eltype(x) == Int - @test eltype(y) == Int - @test copy(x .^ y) == [1 4; 9 16] - @test copy(y .^ x) == [2 4; 8 16] - end - - @info("NDArray::power::Float32::x .^ n") - let x = mx.NDArray(Float32[1 2; 3 4]), A = Float32[1 2; 3 4] - @test eltype(x) == Float32 - @test copy(x .^ 0) == Float32[1 1; 1 1] - @test copy(x .^ 1) == Float32[1 2; 3 4] - @test copy(x .^ 2) == Float32[1 4; 9 16] - @test copy(x .^ 3) == Float32[1 8; 27 64] - - @test copy(x .^ -1) ≈ A .^ -1 - @test copy(x .^ 1.1) ≈ A .^ 1.1 - @test copy(x .^ 2.9) ≈ A .^ 2.9 - end - - @info("NDArray::power::Float32::n .^ x") - let x = mx.NDArray(Float32[1 2; 3 4]), A = Float32[1 2; 3 4] - @test eltype(x) == Float32 - @test copy(0 .^ x) == Float32[0 0; 0 0] - @test copy(1 .^ x) == Float32[1 1; 1 1] - @test copy(2 .^ x) == Float32[2 4; 8 16] - @test copy(3 .^ x) == Float32[3 9; 27 81] - - @test copy(1.1 .^ x) ≈ 1.1 .^ A - @test copy(2.9 .^ x) ≈ 2.9 .^ A - end - - @info("NDArray::power::Float32::x .^ y") - let x = mx.NDArray(Float32[1 2; 3 4]), y = mx.NDArray(Float32[2 2; 2 2]) - @test eltype(x) == Float32 - @test eltype(y) == Float32 - @test copy(x .^ y) == Float32[1 4; 9 16] - @test copy(y .^ x) == Float32[2 4; 8 16] - end - - @info("NDArray::power::ℯ .^ x::x .^ ℯ") - let x = mx.zeros(2, 3), A = [1 1 1; 1 1 1] - @test copy(ℯ .^ x) ≈ A - end - - let A = Float32[1 2; 3 4], x = mx.NDArray(A) - @test copy(ℯ .^ x) ≈ ℯ .^ A - @test copy(x .^ ℯ) ≈ A .^ ℯ - end - - @info("NDArray::power::π .^ x::x .^ π") - let A = Float32[1 2; 3 4], x = mx.NDArray(A) - @test copy(π .^ x) ≈ π .^ A - @test copy(x .^ π) ≈ A .^ π - end - - # TODO: Float64: wait for https://github.com/apache/incubator-mxnet/pull/8012 - - @info("NDArray::broadcast_power") - let - A = [1 2 3; - 4 5 6] - B = [1, - 2] - x = NDArray(A) - y = NDArray(B) - - z = x.^y - @test copy(z) == A.^B - - # TODO - # @inplace x .^= y - # @test copy(x) == A.^B - end -end # function test_power - -function test_sqrt() - dims = rand_dims() - @info("NDArray::sqrt::dims = $dims") - - j_array, nd_array = rand_tensors(dims) - sqrt_ed = sqrt(nd_array) - @test copy(sqrt_ed) ≈ sqrt.(j_array) -end - -function test_nd_as_jl() - dims = (2, 3) - @info("NDArray::nd_as_jl::dims = $dims") - - x = mx.zeros(dims) + 5 - y = mx.ones(dims) - z = mx.zeros(dims) - @mx.nd_as_jl ro=x rw=(y, z) begin - for i = 1:length(z) - z[i] = x[i] - end - - z[:, 1] = y[:, 1] - y .= 0 - end - - @test sum(copy(y)) == 0 - @test sum(copy(z)[:, 1]) == 2 - @test copy(z)[:, 2:end] ≈ copy(x)[:, 2:end] -end - -function test_dot() - dims1 = (2, 3) - dims2 = (3, 8) - @info("NDArray::dot") - - x = mx.zeros(dims1) - y = mx.zeros(dims2) - z = mx.dot(x, y) - @test size(z) == (2, 8) - - x = mx.zeros(1, 2) - y = mx.zeros(1, 2, 3) - @test_throws mx.MXError dot(x, y) # dimension mismatch - - @info("NDArray::matrix mul") - let - A = [1. 2 3; 4 5 6] - B = [-1., -2, -3] - x = NDArray(A) - y = NDArray(B) - z = x * y - @test copy(z) == A * B - @test size(z) == (2,) - end - - let - A = [1. 2 3; 4 5 6] - B = [-1. -2; -3 -4; -5 -6] - x = NDArray(A) - y = NDArray(B) - z = x * y - @test copy(z) == A * B - @test size(z) == (2, 2) - end -end - -function test_eltype() - @info("NDArray::eltype") - dims = (3,3) - - x = NDArray(undef, dims) - @test eltype(x) == mx.DEFAULT_DTYPE - - for TF in instances(mx.TypeFlag) - T = mx.fromTypeFlag(TF) - x = NDArray{T}(undef, dims) - @test eltype(x) == T - end -end - -function test_reshape() - @info("NDArray::reshape") - A = rand(2, 3, 4) - - B = reshape(NDArray(A), 4, 3, 2) - @test size(B) == (4, 3, 2) - @test copy(B)[3, 1, 1] == A[1, 2, 1] - - C = reshape(NDArray(A), (4, 3, 2)) - @test size(C) == (4, 3, 2) - @test copy(C)[3, 1, 1] == A[1, 2, 1] - - @info("NDArray::reshape::reverse") - A = mx.zeros(10, 5, 4) - - B = reshape(A, -1, 0) - @test size(B) == (40, 5) - - C = reshape(A, -1, 0, reverse=true) - @test size(C) == (50, 4) -end - -function test_expand_dims() - @info("NDArray::expand_dims") - let A = [1, 2, 3, 4], x = NDArray(A) - @test size(x) == (4,) - - y = expand_dims(x, 1) - @test size(y) == (1, 4) - - y = expand_dims(x, 2) - @test size(y) == (4, 1) - end - - let A = [1 2; 3 4; 5 6], x = NDArray(A) - @test size(x) == (3, 2) - - y = expand_dims(x, 1) - @test size(y) == (1, 3, 2) - - y = expand_dims(x, 2) - @test size(y) == (3, 1, 2) - - y = expand_dims(x, 3) - @test size(y) == (3, 2, 1) - end -end # test_expand_dims - -function test_sum() - @info("NDArray::sum") - - let A = reshape(1.0:8, 2, 2, 2), X = mx.NDArray(A) - @test copy(sum(X))[] == sum(A) - @test copy(sum(X, dims = 1)) == sum(A, dims = 1) - @test copy(sum(X, dims = 2)) == sum(A, dims = 2) - @test copy(sum(X, dims = 3)) == sum(A, dims = 3) - @test copy(sum(X, dims = [1, 2])) == sum(A, dims = [1, 2]) - @test copy(sum(X, dims = (1, 2))) == sum(A, dims = (1, 2)) - end -end - -function test_mean() - @info("NDArray::mean") - - let A = reshape(1.0:8, 2, 2, 2), X = mx.NDArray(A) - @test copy(mean(X))[] == mean(A) - @test copy(mean(X, dims = 1)) == mean(A, dims = 1) - @test copy(mean(X, dims = 2)) == mean(A, dims = 2) - @test copy(mean(X, dims = 3)) == mean(A, dims = 3) - @test copy(mean(X, dims = [1, 2])) == mean(A, dims = [1, 2]) - @test copy(mean(X, dims = (1, 2))) == mean(A, dims = (1, 2)) - end -end - -function test_maximum() - @info("NDArray::maximum") - - let A = reshape(1.0:8, 2, 2, 2), X = mx.NDArray(A) - @test copy(maximum(X))[] == maximum(A) - @test copy(maximum(X, dims = 1)) == maximum(A, dims = 1) - @test copy(maximum(X, dims = 2)) == maximum(A, dims = 2) - @test copy(maximum(X, dims = 3)) == maximum(A, dims = 3) - @test copy(maximum(X, dims = [1, 2])) == maximum(A, dims = [1, 2]) - @test copy(maximum(X, dims = (1, 2))) == maximum(A, dims = (1, 2)) - end - - @info("NDArray::broadcast_maximum") - let - A = [1 2 3; - 4 5 6] - B = [1, - 2] - x = NDArray(A) - y = NDArray(B) - - z = max.(x, y) - @test copy(z) == max.(A, B) - end -end - -function test_minimum() - @info("NDArray::minimum") - - let A = reshape(1.0:8, 2, 2, 2), X = mx.NDArray(A) - @test copy(minimum(X))[] == minimum(A) - @test copy(minimum(X, dims = 1)) == minimum(A, dims = 1) - @test copy(minimum(X, dims = 2)) == minimum(A, dims = 2) - @test copy(minimum(X, dims = 3)) == minimum(A, dims = 3) - @test copy(minimum(X, dims = [1, 2])) == minimum(A, dims = [1, 2]) - @test copy(minimum(X, dims = (1, 2))) == minimum(A, dims = (1, 2)) - end - - @info("NDArray::broadcast_minimum") - let - A = [1 2 3; - 4 5 6] - B = [1, - 2] - x = NDArray(A) - y = NDArray(B) - - z = min.(x, y) - @test copy(z) == min.(A, B) - end -end - -function test_prod() - @info("NDArray::prod") - - let A = reshape(1.0:8, 2, 2, 2), X = mx.NDArray(A) - @test copy(prod(X))[] == prod(A) - @test copy(prod(X, dims = 1)) == prod(A, dims = 1) - @test copy(prod(X, dims = 2)) == prod(A, dims = 2) - @test copy(prod(X, dims = 3)) == prod(A, dims = 3) - @test copy(prod(X, dims = [1, 2])) == prod(A, dims = [1, 2]) - @test copy(prod(X, dims = (1, 2))) == prod(A, dims = (1, 2)) - end -end - -function test_fill() - @info("NDArray::fill") - - let x = mx.fill(42, 2, 3, 4) - @test eltype(x) == Int - @test size(x) == (2, 3, 4) - @test copy(x) == fill(42, 2, 3, 4) - end - - let x = mx.fill(Float32(42), 2, 3, 4) - @test eltype(x) == Float32 - @test size(x) == (2, 3, 4) - @test copy(x) ≈ fill(Float32(42), 2, 3, 4) - end - - let x = mx.fill(42, (2, 3, 4)) - @test eltype(x) == Int - @test size(x) == (2, 3, 4) - @test copy(x) == fill(42, 2, 3, 4) - end - - let x = mx.fill(Float32(42), (2, 3, 4)) - @test eltype(x) == Float32 - @test size(x) == (2, 3, 4) - @test copy(x) ≈ fill(Float32(42), 2, 3, 4) - end - - @info("NDArray::fill!::arr") - let x = fill!(mx.zeros(2, 3, 4), 42) - @test eltype(x) == Float32 - @test size(x) == (2, 3, 4) - @test copy(x) ≈ fill(Float32(42), 2, 3, 4) - end -end # function test_fill - -function test_transpose() - @info("NDArray::transpose::1D") - let A = rand(Float32, 4), x = NDArray(A) - @test size(x) == (4,) - @test size(x') == (1, 4) - end - - @info("NDArray::transpose::2D") - let A = rand(Float32, 2, 3), x = mx.NDArray(A) - @test size(x) == (2, 3) - @test size(x') == (3, 2) - end - - @info("NDArray::permutedims") - let A = collect(Float32, reshape(1.0:24, 2, 3, 4)), x = mx.NDArray(A) - A′ = permutedims(A, [2, 1, 3]) - x′ = permutedims(x, [2, 1, 3]) - @test size(A′) == size(x′) - @test A′ == copy(x′) - end -end - -function test_show() - @info("NDArray::show::REPL") - let str = sprint(show, MIME"text/plain"(), mx.NDArray([1 2 3 4])) - @test occursin("1×4", str) - @test occursin("NDArray", str) - @test occursin("Int64", str) - @test occursin("cpu", str) - @test match(r"1\s+2\s+3\s+4", str) != nothing - end - - @info("NDArray::show") - let str = sprint(show, mx.NDArray([1 2 3 4])) - @test str == "NDArray([1 2 3 4])" - end - - let str = sprint(show, mx.zeros(4)) - @test str == "NDArray(Float32[0.0, 0.0, 0.0, 0.0])" - end -end - -function test_size() - @info("NDArray::size") - let A = [1 2; 3 4; 5 6], x = mx.NDArray(A) - @test size(A) == size(x) - dims = (1, 2, 3, 4, 5) - @test map(d -> size(A, d), dims) == map(d -> size(x, d), dims) - @inferred map(d -> size(x, d), dims) - end -end # function test_size() - -function check_trigonometric(f) - @info("NDArray::$f") - let A = [.1 .2; .3 .4], x = mx.NDArray(A) - B = f.(A) - y = f.(x) - @test copy(y) ≈ B - end - - let A = Float32[.1 .2; .3 .4], x = mx.NDArray(A) - B = f.(A) - y = f.(x) - @test copy(y) ≈ B - end -end # function check_trigonometric - -function test_trigonometric() - for f ∈ [sin, cos, tan, asin, acos, atan] - check_trigonometric(f) - end -end # function test_trigonometric - -function check_hyperbolic(f, A) - @info("NDArray::$f") - let x = NDArray(A) - B = f.(A) - y = f.(x) - @test copy(y) ≈ B - end - - let A = Float32.(A), x = NDArray(A) - B = f.(A) - y = f.(x) - @test copy(y) ≈ B - end -end # function check_hyperbolic - -function test_hyperbolic() - for f ∈ [sinh, cosh, tanh, asinh, acosh, atanh] - A = if f == acosh - [1.1, 1.2, 1.3, 1.4] - else - [.1, .2, .3, .4] - end - check_hyperbolic(f, A) - end -end # function test_hyperbolic - -function test_act_funcs() - @info("NDArray::σ/sigmoid") - let - A = Float32[.1, .2, -.3, -.4] - B = @. 1 / (1 + ℯ ^ (-A)) - x = NDArray(A) - y = σ.(x) - @test copy(y) ≈ B - - z = sigmoid.(x) - @test copy(z) ≈ B - end - - @info("NDArray::relu") - let - A = [1, 2, -3, -4] - B = max.(A, 0) - x = NDArray(A) - y = relu.(x) - @test copy(y) ≈ B - end - - @info("NDArray::softmax::1D") - let - A = Float32[1, 2, 3, 4] - B = exp.(A) ./ sum(exp.(A)) - x = NDArray(A) - y = softmax.(x) - @test copy(y) ≈ B - end - - @info("NDArray::softmax::2D") - let - A = Float32[1 2; 3 4] - B = exp.(A) ./ sum(exp.(A), dims = 1) - x = NDArray(A) - y = softmax.(x, 1) - @test copy(y) ≈ B - - C = exp.(A) ./ sum(exp.(A), dims = 2) - z = softmax.(x, 2) - @test copy(z) ≈ C - end - - @info("NDArray::log_softmax::1D") - let - A = Float32[1, 2, 3, 4] - B = log.(exp.(A) ./ sum(exp.(A))) - x = NDArray(A) - y = log_softmax.(x) - @test copy(y) ≈ B - end - - @info("NDArray::log_softmax::2D") - let - A = Float32[1 2; 3 4] - B = log.(exp.(A) ./ sum(exp.(A), dims = 1)) - x = NDArray(A) - y = log_softmax.(x, 1) - @test copy(y) ≈ B - - C = log.(exp.(A) ./ sum(exp.(A), dims = 2)) - z = log_softmax.(x, 2) - @test copy(z) ≈ C - end -end # function test_act_funcs - -macro check_equal(op) - quote - A = [1 2 3 - 4 5 6] - B = [1, - 6] - x = NDArray(A) - y = NDArray(B) - a = broadcast($op, x, y) - @test copy(a) == broadcast($op, A, B) - - C = [3 2 1 - 6 5 4] - z = NDArray(C) - b = broadcast($op, x, z) - @test copy(b) == broadcast($op, A, C) - end -end - -function test_equal() - @info("NDArray::broadcast_equal") - @check_equal == - - @info("NDArray::broadcast_not_equal") - @check_equal != - - @info("NDArray::broadcast_greater") - @check_equal > - - @info("NDArray::broadcast_greater_equal") - @check_equal >= - - @info("NDArray::broadcast_lesser") - @check_equal < - - @info("NDArray::broadcast_lesser_equal") - @check_equal <= -end # function test_equal - -function test_broadcast_to() - @info("NDArray::broadcast_to") - A = [1 2 3] - x = NDArray(A) - @test mx.broadcast_to(x, (1, 3)) |> copy == A - @test mx.broadcast_to(x, (5, 3)) |> copy == repeat(A, outer = (5, 1)) - - @test mx.broadcast_to(x, 1, 3) |> copy == A - @test mx.broadcast_to(x, 5, 3) |> copy == repeat(A, outer = (5, 1)) -end # function test_broadcast_to - -function test_broadcast_axis() - @info("NDArray::broadcast_axis") - A = reshape([1, 2, 3], 1, 3, 1) - x = NDArray(A) - - @test mx.broadcast_axis(x, 1, 4) |> copy == [A; A; A; A] - @test mx.broadcast_axis(x, 3, 2) |> copy == cat(A, A, dims = 3) - - @info("NDArray::broadcast_axes") - @test mx.broadcast_axes(x, 1, 4) |> copy == [A; A; A; A] - @test mx.broadcast_axes(x, 3, 2) |> copy == cat(A, A, dims = 3) -end # function test_broadcast_axis - -function test_hypot() - @info("NDArray::hypot") - A = [3 3 3] - B = [4, 4] - C = hypot.(A, B) - - x = NDArray(A) - y = NDArray(B) - z = hypot.(x, y) - - @test copy(z) == C -end # function test_hypot - -function test_argmax() - @info "NDArray::argmax" - let - A = [1. 5 3; - 4 2 6] - x = NDArray(A) - - @test copy(argmax(x, dims = 1)) == [x[1] for x ∈ argmax(A, dims = 1)] - @test copy(argmax(x, dims = 2)) == [x[2] for x ∈ argmax(A, dims = 2)] - end - - @info "NDArray::argmax::NaN" - let - A = [1. 5 3; - NaN 2 6] - x = NDArray(A) - - @test copy(argmax(x, dims = 1)) == [x[1] for x ∈ argmax(A, dims = 1)] - @test copy(argmax(x, dims = 2)) == [x[2] for x ∈ argmax(A, dims = 2)] - end -end - -function test_argmin() - @info "NDArray::argmin" - let - A = [1. 5 3; - 4 2 6] - x = NDArray(A) - - @test copy(argmin(x, dims = 1)) == [x[1] for x ∈ argmin(A, dims = 1)] - @test copy(argmin(x, dims = 2)) == [x[2] for x ∈ argmin(A, dims = 2)] - end - - @info "NDArray::argmin::NaN" - let - A = [1. 5 3; - NaN 2 6] - x = NDArray(A) - - @test copy(argmin(x, dims = 1)) == [x[1] for x ∈ argmin(A, dims = 1)] - @test copy(argmin(x, dims = 2)) == [x[2] for x ∈ argmin(A, dims = 2)] - end -end - -################################################################################ -# Run tests -################################################################################ -@testset "NDArray Test" begin - test_constructor() - test_ones_zeros_like() - test_assign() - test_copy() - test_slice() - test_linear_idx() - test_first() - test_lastindex() - test_cat() - test_plus() - test_minus() - test_mul() - test_div() - test_rdiv() - test_mod() - test_gd() - test_saveload() - test_clamp() - test_power() - test_sqrt() - test_eltype() - test_nd_as_jl() - test_dot() - test_reshape() - test_expand_dims() - test_sum() - test_mean() - test_maximum() - test_minimum() - test_prod() - test_fill() - test_transpose() - test_show() - test_size() - test_trigonometric() - test_hyperbolic() - test_act_funcs() - test_equal() - test_broadcast_to() - test_broadcast_axis() - test_hypot() - test_argmax() - test_argmin() -end - -end diff --git a/julia/test/unittest/operator.jl b/julia/test/unittest/operator.jl deleted file mode 100644 index 345dd0f88daf..000000000000 --- a/julia/test/unittest/operator.jl +++ /dev/null @@ -1,57 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -module TestOperator - -using MXNet -using Test - -using ..Main: rand_dims - -function test_scalar_op() - data = mx.Variable(:data) - shape = rand_dims() - @info "Operator::scalar_op::dims = $shape" - - data_jl = 5ones(Float32, shape) - arr_data = mx.copy(data_jl, mx.cpu()) - arr_grad = mx.zeros(shape) - - test = 2 ./ (4 - ((1+data+1)*2/5) - 0.2) - exec_test = mx.bind(test, mx.cpu(), [arr_data], args_grad=[arr_grad]) - mx.forward(exec_test) - out = copy(exec_test.outputs[1]) - jl_out1 = @. 4 - ((1+data_jl+1)*2/5) - 0.2 - jl_out = 2 ./ jl_out1 - @test copy(out) ≈ jl_out - - out_grad = 2mx.ones(shape) - jl_grad = 2copy(out_grad) / 5 - jl_grad = 2jl_grad ./ (jl_out1 .^ 2) - mx.backward(exec_test, out_grad) - @test copy(arr_grad) ≈ jl_grad -end - -################################################################################ -# Run tests -################################################################################ - -@testset "Operator Test" begin - test_scalar_op() -end - -end diff --git a/julia/test/unittest/optimizer.jl b/julia/test/unittest/optimizer.jl deleted file mode 100644 index cd1e7ebb4b77..000000000000 --- a/julia/test/unittest/optimizer.jl +++ /dev/null @@ -1,85 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -module TestOptimizer - -using Test - -using MXNet -using MXNet.mx.LearningRate -using MXNet.mx.Momentum - - -function test_fixed_η() - @info "Optimizer::LearningRate::Fixed" - x = LearningRate.Fixed(.42) - @test get(x) == .42 - update!(x) - @test get(x) == .42 -end # function test_fixed_η - - -function check_η_decay(x) - @info "Optimizer::LearningRate::$x" - - η = get(x) - @test η == 1 - - for i ∈ 1:5 - update!(x) - η′ = get(x) - @test η′ < η - η = η′ - end -end # function check_η_decay - - -test_exp_η() = LearningRate.Exp(1) |> check_η_decay - - -test_inv_η() = LearningRate.Inv(1) |> check_η_decay - - -function test_μ_null() - @info "Optimizer::Momentum::Null" - x = Momentum.Null() - @test iszero(get(x)) -end - - -function test_μ_fixed() - @info "Optimizer::Momentum::Fixed" - x = Momentum.Fixed(42) - @test get(x) == 42 -end - - -@testset "Optimizer Test" begin - @testset "LearningRate Test" begin - test_fixed_η() - test_exp_η() - test_inv_η() - end - - @testset "Momentum Test" begin - test_μ_null() - test_μ_fixed() - end -end - - -end # module TestOptimizer diff --git a/julia/test/unittest/random.jl b/julia/test/unittest/random.jl deleted file mode 100644 index 38da9601a01a..000000000000 --- a/julia/test/unittest/random.jl +++ /dev/null @@ -1,63 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -module TestRandom -using MXNet -using Test -using Statistics - -function test_uniform() - dims = (100, 100, 2) - @info "random::uniform::dims = $dims" - - low = -10; high = 10 - seed = 123 - mx.seed!(seed) - ret1 = mx.rand(dims..., low = low, high = high) - - mx.seed!(seed) - ret2 = NDArray(undef, dims) - mx.rand!(ret2, low = low, high = high) - - @test copy(ret1) == copy(ret2) - @test abs(mean(copy(ret1)) - (high+low)/2) < 0.1 -end - -function test_gaussian() - dims = (80, 80, 4) - @info "random::gaussian::dims = $dims" - - μ = 10; σ = 2 - seed = 456 - mx.seed!(seed) - ret1 = mx.randn(dims..., μ = μ, σ = σ) - - mx.seed!(seed) - ret2 = NDArray(undef, dims) - mx.randn!(ret2, μ = μ, σ = σ) - - @test copy(ret1) == copy(ret2) - @test abs(mean(copy(ret1)) - μ) < 0.1 - @test abs(std(copy(ret1)) - σ) < 0.1 -end - -@testset "Random Test" begin - test_uniform() - test_gaussian() -end - -end diff --git a/julia/test/unittest/symbolic-node.jl b/julia/test/unittest/symbolic-node.jl deleted file mode 100644 index 69c852f6f843..000000000000 --- a/julia/test/unittest/symbolic-node.jl +++ /dev/null @@ -1,542 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -module TestSymbolicNode - -using MXNet -using Test - -using ..Main: mlp2, mlpchain, exec - -################################################################################ -# Test Implementations -################################################################################ -function test_basic() - @info("SymbolicNode::basic") - - model = mlp2() - @test mx.list_arguments(model) == [:data,:fc1_weight,:fc1_bias,:fc2_weight,:fc2_bias] - @test mx.list_outputs(model) == [:fc2_output] - @test mx.list_auxiliary_states(model) == Symbol[] -end - -function test_internal() - @info("SymbolicNode::internal") - - data = mx.Variable(:data) - oldfc = mx.FullyConnected(data, name=:fc1, num_hidden=10) - net1 = mx.FullyConnected(oldfc, name=:fc2, num_hidden=100) - - @test mx.list_arguments(net1) == [:data,:fc1_weight,:fc1_bias,:fc2_weight,:fc2_bias] - - internal = mx.get_internals(net1) - fc1 = internal[:fc1_output] - @test mx.list_arguments(fc1) == mx.list_arguments(oldfc) -end - -function test_get_children() - @info("SymbolicNode::get_children") - - let x = mx.Variable(:x), y = mx.Variable(:y) - z = x + y - @test length(mx.list_outputs(z)) == 1 - @test length(mx.list_outputs(mx.get_children(z))) == 2 - @test mx.list_outputs(mx.get_children(z)) == [:x, :y] - end - - @info("SymbolicNode::get_children::on leaf") - let x = mx.Variable(:x) - @test mx.get_children(x) == nothing - end -end # test_get_children - - -function test_compose() - @info("SymbolicNode::compose") - - data = mx.Variable(:data) - net1 = mx.FullyConnected(data, name=:fc1, num_hidden=10) - net1 = mx.FullyConnected(net1, name=:fc2, num_hidden=100) - - net2 = mx.FullyConnected(mx.SymbolicNode, name=:fc3, num_hidden=10) - net2 = mx.Activation(net2, act_type=:relu) - net2 = mx.FullyConnected(net2, name=:fc4, num_hidden=20) - - composed = net2(fc3_data=net1, name=:composed) - multi_out = mx.Group(composed, net1) - @test mx.list_outputs(multi_out) == [:composed_output, :fc2_output] -end - -function test_infer_shape() - @info("SymbolicNode::infer_shape::mlp2") - - model = mlp2() - data_shape = (100, 100) - arg_shapes, out_shapes, aux_shapes = mx.infer_shape(model, data=data_shape) - arg_shape_dict = Dict{Symbol,Tuple}(zip(mx.list_arguments(model), arg_shapes)) - @test arg_shape_dict == Dict{Symbol,Tuple}(:fc2_bias => (10,),:fc2_weight => (1000,10), - :fc1_bias => (1000,), :fc1_weight => (100, 1000), - :data => data_shape) - @test length(out_shapes) == 1 - @test out_shapes[1] == (10, 100) -end - -function test_infer_shape_error() - @info("SymbolicNode::infer_shape::throws") - - model = mlp2() - weight_shape = (100, 1) - data_shape = (100, 100) - @test_throws mx.MXError mx.infer_shape(model, data=data_shape, fc1_weight=weight_shape) -end - -function test_saveload() - @info("SymbolicNode::saveload::mlp2") - - model = mlp2() - fname = tempname() - mx.save(fname, model) - model2 = mx.load(fname, mx.SymbolicNode) - @test mx.to_json(model) == mx.to_json(model2) - - rm(fname) -end - -function test_attrs() - @info("SymbolicNode::Attributes") - - data = mx.Variable(:data) - - @test mx.get_name(data) == :data - result = mx.get_attr(data, :test) - @test ismissing(result) - mx.set_attr(data, :test, "1.0") - result = mx.get_attr(data, :test) - @test !ismissing(result) - @test result == "1.0" - - data2 = mx.Variable(:data2, attrs = Dict(:test => "hallo!")) - @test mx.get_attr(data2, :test) == "hallo!" - - conv = mx.Convolution(data2, kernel = (1,1), num_filter = 1) - @test ismissing(mx.get_attr(conv, :b)) - @test mx.get_name(conv) isa Symbol - - @test_throws MethodError mx.Variable(:data3, attrs = Dict(:test => "1.0", :test2 => 1.0)) - @test_throws MethodError mx.Convolution(data2, kernel = (1,1), num_filter = 1, attrs = Dict(:test => "1.0", :test2 => 1.0)) -end - -function test_functions() - @info("SymbolicNode::Functions") - data = mx.Variable(:data) - typeof(mx.sum(data)) == mx.SymbolicNode -end - -function test_reshape() - @info("SymbolicNode::reshape(sym, dim...)") - - A = mx.NDArray(collect(1:24)) - x = mx.Variable(:x) - y = mx.reshape(x, 2, 3, 4) - e = mx.bind(y, mx.cpu(), Dict(:x => A)) - mx.forward(e) - out = e.outputs[1] - - @test size(out) == (2, 3, 4) - @test copy(out) == reshape(1:24, 2, 3, 4) - - @info("SymbolicNode::reshape(sym, dim)") - - A = mx.NDArray(collect(1:24)) - x = mx.Variable(:x) - y = mx.reshape(x, (2, 3, 4)) - e = mx.bind(y, mx.cpu(), Dict(:x => A)) - mx.forward(e) - out = e.outputs[1] - - @test size(out) == (2, 3, 4) - @test copy(out) == reshape(1:24, 2, 3, 4) - - @info("SymbolicNode::reshape::reverse") - - A = mx.zeros(10, 5, 4) - x = mx.Variable(:x) - y = mx.reshape(x, -1, 0, reverse = true) - e = mx.bind(y, mx.cpu(), Dict(:x => A)) - mx.forward(e) - out = e.outputs[1] - - @test size(out) == (50, 4) - - @info("SymbolicNode::reshape::0") - - A = mx.zeros(2, 3, 4) - x = mx.Variable(:x) - y = mx.reshape(x, 4, 0, 2) - e = mx.bind(y, mx.cpu(), Dict(:x => A)) - mx.forward(e) - out = e.outputs[1] - - @test size(out) == (4, 3, 2) - - @info("SymbolicNode::reshape::-1") - - A = mx.zeros(2, 3, 4) - x = mx.Variable(:x) - y = mx.reshape(x, 6, 1, -1) - e = mx.bind(y, mx.cpu(), Dict(:x => A)) - mx.forward(e) - out = e.outputs[1] - - @test size(out) == (6, 1, 4) - - @info("SymbolicNode::reshape::-2") - - A = mx.zeros(2, 3, 4, 2) - x = mx.Variable(:x) - y = mx.reshape(x, 3, 2, -2) - e = mx.bind(y, mx.cpu(), Dict(:x => A)) - mx.forward(e) - out = e.outputs[1] - - @test size(out) == (3, 2, 4, 2) - - @info("SymbolicNode::reshape::-3") - - A = mx.zeros(2, 3, 4, 5) - x = mx.Variable(:x) - y = mx.reshape(x, -3, -3) - e = mx.bind(y, mx.cpu(), Dict(:x => A)) - mx.forward(e) - out = e.outputs[1] - - @test size(out) == (6, 20) - - @info("SymbolicNode::reshape::-4") - - A = mx.zeros(2, 3, 4) - x = mx.Variable(:x) - y = mx.reshape(x, 0, 0, -4, 2, 2) - e = mx.bind(y, mx.cpu(), Dict(:x => A)) - mx.forward(e) - out = e.outputs[1] - - @test size(out) == (2, 3, 2, 2) -end - -function test_dot() - @info("SymbolicNode::dot") - x = mx.Variable(:x) - y = mx.Variable(:y) - z = mx.dot(x, y) - z_exec = mx.bind(z, context = mx.cpu(), - args = Dict(:x => mx.ones((100, 2)), :y => mx.ones((2, 200)))) - mx.forward(z_exec) - - ret = copy(z_exec.outputs[1]) - @test size(ret) == (100, 200) - @test ret ≈ 2*ones(100, 200) -end - -function test_print() - @info("SymbolicNode::print") - io = IOBuffer() - print(io, mx.Variable(:x)) - @test !isempty(String(take!(io))) -end - -function test_misc() - @info("SymbolicNode::Miscellaneous") - # Test for #189 - a = mx.Variable("a") - b = mx.Variable("b") - symb = mx.ElementWiseSum(a, b) -end - -function test_add() - @info("SymbolicNode::elementwise add") - let x = mx.Variable(:x), A = Float32[1 2; 3 4] - let y = exec(x .+ 42; :x => A)[] - @test size(y) == size(A) - @test copy(y) == A .+ 42 - end - - let y = exec(42 .+ x; :x => A)[] - @test size(y) == size(A) - @test copy(y) == 42 .+ A - end - - let y = exec(-1 .+ x .+ 42; :x => A)[] - @test size(y) == size(A) - @test copy(y) == -1 .+ A .+ 42 - end - end - - let A = Float32[1 2; 3 4], B = Float32[2 4; 6 8] - x = mx.Variable(:x) - y = mx.Variable(:y) - - let z = x .+ y - z = exec(z; :x => A, :y => B)[] - - @test size(z) == size(A) - @test copy(z) == A .+ B - end - - let z = y .+ x - z = exec(z; :x => A, :y => B)[] - - @test size(z) == size(A) - @test copy(z) == B .+ A - end - end -end # function test_add - -function test_minus() - @info("SymbolicNode::elementwise minus") - let x = mx.Variable(:x), A = Float32[1 2; 3 4] - let y = exec(x .- 42; :x => A)[] - @test size(y) == size(A) - @test copy(y) == A .- 42 - end - - let y = exec(42 .- x; :x => A)[] - @test size(y) == size(A) - @test copy(y) == 42 .- A - end - - let y = exec(-1 .- x .- 42; :x => A)[] - @test size(y) == size(A) - @test copy(y) == -1 .- A .- 42 - end - - let y = exec(-x; :x => A)[] - @test size(y) == size(A) - @test copy(y) == -A - end - end - - let A = Float32[1 2; 3 4], B = Float32[2 4; 6 8] - x = mx.Variable(:x) - y = mx.Variable(:y) - - let z = x .- y - z = exec(z; :x => A, :y => B)[] - - @test size(z) == size(A) - @test copy(z) == A .- B - end - - let z = y .- x - z = exec(z; :x => A, :y => B)[] - - @test size(z) == size(A) - @test copy(z) == B .- A - end - end -end # function test_minus - -function test_mul() - @info("SymbolicNode::elementwise mul") - let x = mx.Variable(:x), A = Float32[1 2; 3 4] - let y = exec(x .* 42; :x => A)[] - @test size(y) == size(A) - @test copy(y) == A .* 42 - end - - let y = exec(42 .* x; :x => A)[] - @test size(y) == size(A) - @test copy(y) == 42 .* A - end - - let y = exec(-1 .* x .* 42; :x => A)[] - @test size(y) == size(A) - @test copy(y) == -1 .* A .* 42 - end - end - - let A = Float32[1 2; 3 4], B = Float32[2 4; 6 8] - x = mx.Variable(:x) - y = mx.Variable(:y) - - let z = x .* y - z = exec(z; :x => A, :y => B)[] - - @test size(z) == size(A) - @test copy(z) == A .* B - end - - let z = y .* x - z = exec(z; :x => A, :y => B)[] - - @test size(z) == size(A) - @test copy(z) == B .* A - end - end -end # function test_mul - -function test_div() - @info("SymbolicNode::elementwise div") - let x = mx.Variable(:x), A = Float32[1 2; 3 4] - let y = exec(x ./ 42; :x => A)[] - @test size(y) == size(A) - @test copy(y) ≈ A ./ 42 - end - - let y = exec(42 ./ x; :x => A)[] - @test size(y) == size(A) - @test copy(y) ≈ 42 ./ A - end - - let y = exec(-1 ./ x ./ 42; :x => A)[] - @test size(y) == size(A) - @test copy(y) ≈ -1 ./ A ./ 42 - end - end - - let A = Float32[1 2; 3 4], B = Float32[2 4; 6 8] - x = mx.Variable(:x) - y = mx.Variable(:y) - - let z = x ./ y - z = exec(z; :x => A, :y => B)[] - - @test size(z) == size(A) - @test copy(z) ≈ A ./ B - end - - let z = y ./ x - z = exec(z; :x => A, :y => B)[] - - @test size(z) == size(A) - @test copy(z) ≈ B ./ A - end - end -end # function test_div - -function test_power() - @info("SymbolicNode::elementwise power") - let x = mx.Variable(:x), A = Float32[1 2; 3 4] - let y = exec(x .^ 42; :x => A)[] - @test size(y) == size(A) - @test copy(y) ≈ A .^ 42 - end - - let y = exec(42 .^ x; :x => A)[] - @test size(y) == size(A) - @test copy(y) ≈ 42 .^ A - end - end - - let A = Float32[1 2; 3 4], B = Float32[2 4; 6 8] - x = mx.Variable(:x) - y = mx.Variable(:y) - - let z = x .^ y - z = exec(z; :x => A, :y => B)[] - - @test size(z) == size(A) - @test copy(z) ≈ A .^ B - end - - let z = y .^ x - z = exec(z; :x => A, :y => B)[] - - @test size(z) == size(A) - @test copy(z) ≈ B .^ A - end - end - - @info("SymbolicNode::power::e .^ x::x .^ e") - let x = mx.Variable(:x), A = [0 0 0; 0 0 0] - y = exec(ℯ .^ x; :x => A)[] - @test copy(y) ≈ fill(1, size(A)) - end - - let x = mx.Variable(:x), A = Float32[1 2; 3 4] - let y = ℯ .^ x - z = exec(y; :x => A)[] - @test copy(z) ≈ ℯ .^ A - end - - let y = x .^ ℯ - z = exec(y; :x => A)[] - @test copy(z) ≈ A .^ ℯ - end - end - - @info("SymbolicNode::power::π .^ x::x .^ π") - let x = mx.Variable(:x), A = Float32[1 2; 3 4] - let y = π .^ x - z = exec(y; :x => A)[] - @test copy(z) ≈ π .^ A - end - - let y = x .^ π - z = exec(y; :x => A)[] - @test copy(z) ≈ A .^ π - end - end -end # function test_power - -function test_get_name() - @info("SymbolicNode::get_name::with get_internals") - name = mx.get_name(mx.get_internals(mlp2())) # no error - @test occursin("Ptr", name) -end # function test_get_name - -function test_var() - @info("SymbolicNode::var") - x = @mx.var x - @test x isa mx.SymbolicNode - - x′ = @mx.var x - @test x.handle != x′.handle - - x, y, z = @mx.var x y z - @test x isa mx.SymbolicNode - @test y isa mx.SymbolicNode - @test z isa mx.SymbolicNode -end # test_var - - -################################################################################ -# Run tests -################################################################################ -@testset "SymbolicNode Test" begin - test_basic() - test_internal() - test_compose() - test_infer_shape() - test_infer_shape_error() - test_saveload() - test_attrs() - test_functions() - test_reshape() - test_dot() - test_print() - test_misc() - test_add() - test_minus() - test_mul() - test_div() - test_power() - test_get_name() - test_var() -end - -end diff --git a/julia/test/unittest/util.jl b/julia/test/unittest/util.jl deleted file mode 100644 index d7f65a3e8012..000000000000 --- a/julia/test/unittest/util.jl +++ /dev/null @@ -1,50 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -module TestUtil - -using MXNet -using Test - - -function test_getdocdefine() - @info("Util::_getdocdefine") - @test occursin("Defined in", mx._getdocdefine("sgd_update")) -end # function test_getdocdefine - - -function test_firstarg() - @info("Util::_firstarg") - @test mx._firstarg(:(f(x, y))) == :x - @test mx._firstarg(:(f(x::mx.NDArray, y))) == :x - @test mx._firstarg(:(f(x::mx.NDArray, y::mx.NDArray))) == :x - @test mx._firstarg(:(f(x::Int, y::mx.NDArray))) == :x - @test mx._firstarg(:(f(x::Int, y::mx.NDArray; other = 42))) == :x - @test mx._firstarg(:(f(x::mx.NDArray{T}, y) where {T})) == :x - @test mx._firstarg(:(f(x::mx.NDArray{T,N}, y) where {T,N})) == :x - @test mx._firstarg(:(f(x::mx.NDArray{T,N} where {T,N}, y))) == :x - @test mx._firstarg(:(broadcast_(::typeof(asin), x::mx.NDArray))) == :x - @test mx._firstarg(:(broadcast_(::typeof(asin), x::mx.NDArray, y::mx.NDArray))) == :x -end # function test_firstarg - - -@testset "Util Test" begin - test_firstarg() - test_getdocdefine() -end # @testset "Util" - -end # module TestUtil diff --git a/julia/test/unittest/visualize.jl b/julia/test/unittest/visualize.jl deleted file mode 100644 index a5a4f722e6e2..000000000000 --- a/julia/test/unittest/visualize.jl +++ /dev/null @@ -1,54 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -module TestVisualize -using MXNet -using Test - -using ..Main: mlp2 - -################################################################################ -# Test Implementations -################################################################################ - -function test_basic() - @info("Visualize::basic") - - mlp = mlp2() - - # Order of elements or default color values can change, but length of the output should be more or less stable - @test length(mx.to_graphviz(mlp)) == length( -""" -digraph "Network Visualization" { -node [fontsize=10]; -edge [fontsize=10]; -"fc1" [label="fc1\\nFullyConnected\\nnum-hidden=1000",style="rounded,filled",fixedsize=true,width=1.3,fillcolor="#fb8072",shape=box,penwidth=2,height=0.8034,color="#941305"]; -"activation0" [label="activation0\\nActivation\\nact-type=relu",style="rounded,filled",fixedsize=true,width=1.3,fillcolor="#ffffb3",shape=box,penwidth=2,height=0.8034,color="#999900"]; -"fc2" [label="fc2\\nFullyConnected\\nnum-hidden=10",style="rounded,filled",fixedsize=true,width=1.3,fillcolor="#fb8072",shape=box,penwidth=2,height=0.8034,color="#941305"]; -"activation0" -> "fc1" [arrowtail=open,color="#737373",dir=back]; -"fc2" -> "activation0" [arrowtail=open,color="#737373",dir=back]; -} -""") -end -################################################################################ -# Run tests -################################################################################ - -@testset "Visualize Test" begin - test_basic() -end -end diff --git a/matlab/+mxnet/model.m b/matlab/+mxnet/model.m deleted file mode 100644 index c45cb7714826..000000000000 --- a/matlab/+mxnet/model.m +++ /dev/null @@ -1,242 +0,0 @@ -% Licensed to the Apache Software Foundation (ASF) under one -% or more contributor license agreements. See the NOTICE file -% distributed with this work for additional information -% regarding copyright ownership. The ASF licenses this file -% to you under the Apache License, Version 2.0 (the -% "License"); you may not use this file except in compliance -% with the License. You may obtain a copy of the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, -% software distributed under the License is distributed on an -% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -% KIND, either express or implied. See the License for the -% specific language governing permissions and limitations -% under the License. - -classdef model < handle -%MODEL MXNet model, supports load and forward - -properties -% The symbol definition, in json format - symbol -% parameter weights - params -% whether or not print info - verbose -end - -properties (Access = private) -% mxnet predictor - predictor -% the previous input size - prev_input_size -% the previous device id - prev_dev_id -% the previous device type (cpu or gpu) - prev_dev_type -% the previous output layers - prev_out_layers -end - -methods - function obj = model() - %CONSTRUCTOR - obj.predictor = libpointer('voidPtr', 0); - obj.prev_input_size = zeros(1,4); - obj.verbose = 1; - obj.prev_dev_id = -1; - obj.prev_dev_type = -1; - end - - function delete(obj) - %DESTRUCTOR - obj.free_predictor(); - end - - function load(obj, model_prefix, num_epoch) - %LOAD load model from files - % - % A mxnet model is stored into two files. The first one contains the symbol - % definition in json format. While the second one stores all weights in binary - % format. For example, if we save a model using the prefix 'model/vgg19' at - % epoch 8, then we will get two files. 'model/vgg19-symbol.json' and - % 'model/vgg19-0009.params' - % - % model_prefix : the string model prefix - % num_epoch : the epoch to load - % - % Example: - % model = mxnet.model - % model.load('outptu/vgg19', 8) - - % read symbol - obj.symbol = fileread([model_prefix, '-symbol.json']); - - % read params - fid = fopen(sprintf('%s-%04d.params', model_prefix, num_epoch), 'rb'); - assert(fid ~= 0); - obj.params = fread(fid, inf, '*ubit8'); - fclose(fid); - end - - function json = parse_symbol(obj) - json = parse_json(obj.symbol); - end - - - function outputs = forward(obj, input, varargin) - %FORWARD perform forward - % - % OUT = MODEL.FORWARD(input) returns the forward (prediction) outputs of a list - % of input examples - % - % Examples - % - % % load and resize an image - % img = imread('test.jpg') - % img = imresize(img, [224 224]) - % % get the softmax output - % out = model.forward(img) - % % get the output of two internal layers - % out = model.forward(img, {'conv4', 'conv5'}) - % % use gpu 0 - % out = model.forward(img, 'gpu', 0) - % % use two gpus for a image list - % imgs(:,:,:,1) = img1 - % imgs(:,:,:,2) = img2 - % out = model.forward(imgs, 'gpu', [0,1]) - - % parse arguments - dev_type = 1; % cpu in default - dev_id = 0; - out_layers = {}; - while length(varargin) > 0 - if ischar(varargin{1}) && strcmp(varargin{1}, 'gpu') - assert(length(varargin) > 1, 'arg error: no gpu id') - assert(isnumeric(varargin{2})) - dev_type = 2; - dev_id = varargin{2}; - varargin = varargin(3:end); - continue - end - - if ischar(varargin{1}) - out_layers{end+1} = varargin{1}; - varargin = varargin(2:end); - continue - end - - if iscell(varargin{1}) - out_layers = varargin{1}; - varargin = varargin(2:end); - continue - end - end - - siz = size(input); - assert(length(siz) >= 2); - - % convert from matlab order (col-major) into c order (row major): - input = obj.convert_ndarray(input); - - if obj.changed(siz, dev_type, dev_id, out_layers) - obj.free_predictor() - end - - if obj.predictor.Value == 0 - fprintf('create predictor with input size '); - fprintf('%d ', siz); - fprintf('\n'); - csize = [ones(1, 4-length(siz)), siz(end:-1:1)]; - callmxnet('MXPredCreatePartialOut', obj.symbol, ... - libpointer('voidPtr', obj.params), ... - length(obj.params), ... - int32(dev_type), int32(dev_id), ... - 1, {'data'}, ... - uint32([0, 4]), ... - uint32(csize), ... - uint32(length(out_layers)), out_layers, ... - obj.predictor); - end - - % feed input - callmxnet('MXPredSetInput', obj.predictor, 'data', single(input(:)), uint32(numel(input))); - % forward - callmxnet('MXPredForward', obj.predictor); - - % get output - num_out = 1; - if ~isempty(out_layers), num_out = length(out_layers); end - - if num_out == 1 - outputs = obj.get_output(0); - else - outputs = cell(num_out,1); - for i = 1 : num_out - outputs{i} = obj.get_output(i-1); - end - end - - end -end - -methods (Access = private) - function free_predictor(obj) - % free the predictor - if obj.predictor.Value ~= 0 - callmxnet('MXPredFree', obj.predictor); - obj.predictor = libpointer('voidPtr', 0); - end - end - - function Y = convert_ndarray(obj, X) - % convert between matlab's col major and c's row major - siz = size(X); - Y = permute(X, [2 1 3:length(siz)]); - end - - function ret = changed(obj, input_size, dev_type, dev_id, out_layers) - % check if arguments changed since last call - ret = 0; - if length(input_size) ~= length(obj.prev_input_size) || ... - any(input_size ~= obj.prev_input_size) || ... - dev_type ~= obj.prev_dev_type || ... - length(dev_id) ~= length(obj.prev_dev_id) || ... - any(dev_id ~= obj.prev_dev_id) || ... - length(out_layers) ~= length(obj.prev_out_layers) || ... - ~all(cellfun(@strcmp, out_layers, obj.prev_out_layers)) - ret = 1; - end - obj.prev_input_size = input_size; - obj.prev_dev_type = dev_type; - obj.prev_dev_id = dev_id; - obj.prev_out_layers = out_layers; - end - - function out = get_output(obj, index) - % get the i-th output - out_dim = libpointer('uint32Ptr', 0); - out_shape = libpointer('uint32PtrPtr', ones(4,1)); - callmxnet('MXPredGetOutputShape', obj.predictor, index, out_shape, out_dim); - assert(out_dim.Value <= 4); - out_siz = out_shape.Value(1:out_dim.Value); - out_siz = double(out_siz(end:-1:1))'; - - % get output - out = libpointer('singlePtr', single(zeros(out_siz))); - - callmxnet('MXPredGetOutput', obj.predictor, index, ... - out, uint32(prod(out_siz))); - - % TODO convert from c order to matlab order... - out = reshape(out.Value, out_siz); - if length(out_siz) > 2 - out = obj.convert_ndarray(out); - end - end - -end - -end diff --git a/matlab/+mxnet/private/callmxnet.m b/matlab/+mxnet/private/callmxnet.m deleted file mode 100644 index 6ce601829ba1..000000000000 --- a/matlab/+mxnet/private/callmxnet.m +++ /dev/null @@ -1,45 +0,0 @@ -% Licensed to the Apache Software Foundation (ASF) under one -% or more contributor license agreements. See the NOTICE file -% distributed with this work for additional information -% regarding copyright ownership. The ASF licenses this file -% to you under the Apache License, Version 2.0 (the -% "License"); you may not use this file except in compliance -% with the License. You may obtain a copy of the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, -% software distributed under the License is distributed on an -% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -% KIND, either express or implied. See the License for the -% specific language governing permissions and limitations -% under the License. - -function callmxnet(func, varargin) -%CALLMXNET call mxnet functions - -if ~libisloaded('libmxnet') - cur_pwd = pwd; - mxnet_root = [fileparts(mfilename('fullpath')), '/../../../']; - cd(mxnet_root); - mxnet_root = pwd; - cd(cur_pwd); - - assert(exist([mxnet_root, '/lib/libmxnet.so' ], 'file') == 2 || ... - exist([mxnet_root, '/lib/libmxnet.dylib'], 'file') == 2 || ... - exist([mxnet_root, '/lib/libmxnet.dll' ], 'file') == 2, ... - 'you need to build mxnet first'); - assert(exist([mxnet_root, '/include/mxnet/c_predict_api.h']) == 2, ... - 'failed to find c_predict_api.h') - addpath([mxnet_root, '/lib']) - addpath([mxnet_root, '/include/mxnet']) - - [err, warn] = loadlibrary('libmxnet', 'c_predict_api.h'); - assert(isempty(err)); - if warn, warn, end -end - -assert(ischar(func)) -ret = calllib('libmxnet', func, varargin{:}); -assert(ret == 0) -end diff --git a/matlab/+mxnet/private/parse_json.m b/matlab/+mxnet/private/parse_json.m deleted file mode 100644 index 66a8b234ec37..000000000000 --- a/matlab/+mxnet/private/parse_json.m +++ /dev/null @@ -1,634 +0,0 @@ -% Licensed to the Apache Software Foundation (ASF) under one -% or more contributor license agreements. See the NOTICE file -% distributed with this work for additional information -% regarding copyright ownership. The ASF licenses this file -% to you under the Apache License, Version 2.0 (the -% "License"); you may not use this file except in compliance -% with the License. You may obtain a copy of the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, -% software distributed under the License is distributed on an -% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -% KIND, either express or implied. See the License for the -% specific language governing permissions and limitations -% under the License. - -function data = parse_json(fname,varargin) -%PARSE_JSON parse a JSON (JavaScript Object Notation) file or string -% -% Based on jsonlab (https://github.com/fangq/jsonlab) created by Qianqian Fang. Jsonlab is lisonced under BSD or GPL v3. - -global pos inStr len esc index_esc len_esc isoct arraytoken - -if(regexp(fname,'^\s*(?:\[.+\])|(?:\{.+\})\s*$','once')) - string=fname; -elseif(exist(fname,'file')) - try - string = fileread(fname); - catch - try - string = urlread(['file://',fname]); - catch - string = urlread(['file://',fullfile(pwd,fname)]); - end - end -else - error('input file does not exist'); -end - -pos = 1; len = length(string); inStr = string; -isoct=exist('OCTAVE_VERSION','builtin'); -arraytoken=find(inStr=='[' | inStr==']' | inStr=='"'); -jstr=regexprep(inStr,'\\\\',' '); -escquote=regexp(jstr,'\\"'); -arraytoken=sort([arraytoken escquote]); - -% String delimiters and escape chars identified to improve speed: -esc = find(inStr=='"' | inStr=='\' ); % comparable to: regexp(inStr, '["\\]'); -index_esc = 1; len_esc = length(esc); - -opt=varargin2struct(varargin{:}); - -if(jsonopt('ShowProgress',0,opt)==1) - opt.progressbar_=waitbar(0,'loading ...'); -end -jsoncount=1; -while pos <= len - switch(next_char) - case '{' - data{jsoncount} = parse_object(opt); - case '[' - data{jsoncount} = parse_array(opt); - otherwise - error_pos('Outer level structure must be an object or an array'); - end - jsoncount=jsoncount+1; -end % while - -jsoncount=length(data); -if(jsoncount==1 && iscell(data)) - data=data{1}; -end - -if(isfield(opt,'progressbar_')) - close(opt.progressbar_); -end - -%%------------------------------------------------------------------------- -function object = parse_object(varargin) - parse_char('{'); - object = []; - if next_char ~= '}' - while 1 - str = parseStr(varargin{:}); - if isempty(str) - error_pos('Name of value at position %d cannot be empty'); - end - parse_char(':'); - val = parse_value(varargin{:}); - object.(valid_field(str))=val; - if next_char == '}' - break; - end - parse_char(','); - end - end - parse_char('}'); - if(isstruct(object)) - object=struct2jdata(object); - end - -%%------------------------------------------------------------------------- - -function object = parse_array(varargin) % JSON array is written in row-major order -global pos inStr isoct - parse_char('['); - object = cell(0, 1); - dim2=[]; - arraydepth=jsonopt('JSONLAB_ArrayDepth_',1,varargin{:}); - pbar=-1; - if(isfield(varargin{1},'progressbar_')) - pbar=varargin{1}.progressbar_; - end - - if next_char ~= ']' - if(jsonopt('FastArrayParser',1,varargin{:})>=1 && arraydepth>=jsonopt('FastArrayParser',1,varargin{:})) - [endpos, e1l, e1r]=matching_bracket(inStr,pos); - arraystr=['[' inStr(pos:endpos)]; - arraystr=regexprep(arraystr,'"_NaN_"','NaN'); - arraystr=regexprep(arraystr,'"([-+]*)_Inf_"','$1Inf'); - arraystr(arraystr==sprintf('\n'))=[]; - arraystr(arraystr==sprintf('\r'))=[]; - %arraystr=regexprep(arraystr,'\s*,',','); % this is slow,sometimes needed - if(~isempty(e1l) && ~isempty(e1r)) % the array is in 2D or higher D - astr=inStr((e1l+1):(e1r-1)); - astr=regexprep(astr,'"_NaN_"','NaN'); - astr=regexprep(astr,'"([-+]*)_Inf_"','$1Inf'); - astr(astr==sprintf('\n'))=[]; - astr(astr==sprintf('\r'))=[]; - astr(astr==' ')=''; - if(isempty(find(astr=='[', 1))) % array is 2D - dim2=length(sscanf(astr,'%f,',[1 inf])); - end - else % array is 1D - astr=arraystr(2:end-1); - astr(astr==' ')=''; - [obj, count, errmsg, nextidx]=sscanf(astr,'%f,',[1,inf]); - if(nextidx>=length(astr)-1) - object=obj; - pos=endpos; - parse_char(']'); - return; - end - end - if(~isempty(dim2)) - astr=arraystr; - astr(astr=='[')=''; - astr(astr==']')=''; - astr(astr==' ')=''; - [obj, count, errmsg, nextidx]=sscanf(astr,'%f,',inf); - if(nextidx>=length(astr)-1) - object=reshape(obj,dim2,numel(obj)/dim2)'; - pos=endpos; - parse_char(']'); - if(pbar>0) - waitbar(pos/length(inStr),pbar,'loading ...'); - end - return; - end - end - arraystr=regexprep(arraystr,'\]\s*,','];'); - else - arraystr='['; - end - try - if(isoct && regexp(arraystr,'"','once')) - error('Octave eval can produce empty cells for JSON-like input'); - end - object=eval(arraystr); - pos=endpos; - catch - while 1 - newopt=varargin2struct(varargin{:},'JSONLAB_ArrayDepth_',arraydepth+1); - val = parse_value(newopt); - object{end+1} = val; - if next_char == ']' - break; - end - parse_char(','); - end - end - end - if(jsonopt('SimplifyCell',0,varargin{:})==1) - try - oldobj=object; - object=cell2mat(object')'; - if(iscell(oldobj) && isstruct(object) && numel(object)>1 && jsonopt('SimplifyCellArray',1,varargin{:})==0) - object=oldobj; - elseif(size(object,1)>1 && ismatrix(object)) - object=object'; - end - catch - end - end - parse_char(']'); - - if(pbar>0) - waitbar(pos/length(inStr),pbar,'loading ...'); - end -%%------------------------------------------------------------------------- - -function parse_char(c) - global pos inStr len - pos=skip_whitespace(pos,inStr,len); - if pos > len || inStr(pos) ~= c - error_pos(sprintf('Expected %c at position %%d', c)); - else - pos = pos + 1; - pos=skip_whitespace(pos,inStr,len); - end - -%%------------------------------------------------------------------------- - -function c = next_char - global pos inStr len - pos=skip_whitespace(pos,inStr,len); - if pos > len - c = []; - else - c = inStr(pos); - end - -%%------------------------------------------------------------------------- - -function newpos=skip_whitespace(pos,inStr,len) - newpos=pos; - while newpos <= len && isspace(inStr(newpos)) - newpos = newpos + 1; - end - -%%------------------------------------------------------------------------- -function str = parseStr(varargin) - global pos inStr len esc index_esc len_esc - % len, ns = length(inStr), keyboard - if inStr(pos) ~= '"' - error_pos('String starting with " expected at position %d'); - else - pos = pos + 1; - end - str = ''; - while pos <= len - while index_esc <= len_esc && esc(index_esc) < pos - index_esc = index_esc + 1; - end - if index_esc > len_esc - str = [str inStr(pos:len)]; - pos = len + 1; - break; - else - str = [str inStr(pos:esc(index_esc)-1)]; - pos = esc(index_esc); - end - nstr = length(str); - switch inStr(pos) - case '"' - pos = pos + 1; - if(~isempty(str)) - if(strcmp(str,'_Inf_')) - str=Inf; - elseif(strcmp(str,'-_Inf_')) - str=-Inf; - elseif(strcmp(str,'_NaN_')) - str=NaN; - end - end - return; - case '\' - if pos+1 > len - error_pos('End of file reached right after escape character'); - end - pos = pos + 1; - switch inStr(pos) - case {'"' '\' '/'} - str(nstr+1) = inStr(pos); - pos = pos + 1; - case {'b' 'f' 'n' 'r' 't'} - str(nstr+1) = sprintf(['\' inStr(pos)]); - pos = pos + 1; - case 'u' - if pos+4 > len - error_pos('End of file reached in escaped unicode character'); - end - str(nstr+(1:6)) = inStr(pos-1:pos+4); - pos = pos + 5; - end - otherwise % should never happen - str(nstr+1) = inStr(pos); - keyboard; - pos = pos + 1; - end - end - error_pos('End of file while expecting end of inStr'); - -%%------------------------------------------------------------------------- - -function num = parse_number(varargin) - global pos inStr isoct - currstr=inStr(pos:min(pos+30,end)); - if(isoct~=0) - numstr=regexp(currstr,'^\s*-?(?:0|[1-9]\d*)(?:\.\d+)?(?:[eE][+\-]?\d+)?','end'); - [num] = sscanf(currstr, '%f', 1); - delta=numstr+1; - else - [num, one, err, delta] = sscanf(currstr, '%f', 1); - if ~isempty(err) - error_pos('Error reading number at position %d'); - end - end - pos = pos + delta-1; - -%%------------------------------------------------------------------------- - -function val = parse_value(varargin) - global pos inStr len - - if(isfield(varargin{1},'progressbar_')) - waitbar(pos/len,varargin{1}.progressbar_,'loading ...'); - end - - switch(inStr(pos)) - case '"' - val = parseStr(varargin{:}); - return; - case '[' - val = parse_array(varargin{:}); - return; - case '{' - val = parse_object(varargin{:}); - return; - case {'-','0','1','2','3','4','5','6','7','8','9'} - val = parse_number(varargin{:}); - return; - case 't' - if pos+3 <= len && strcmpi(inStr(pos:pos+3), 'true') - val = true; - pos = pos + 4; - return; - end - case 'f' - if pos+4 <= len && strcmpi(inStr(pos:pos+4), 'false') - val = false; - pos = pos + 5; - return; - end - case 'n' - if pos+3 <= len && strcmpi(inStr(pos:pos+3), 'null') - val = []; - pos = pos + 4; - return; - end - end - error_pos('Value expected at position %d'); -%%------------------------------------------------------------------------- - -function error_pos(msg) - global pos inStr len - poShow = max(min([pos-15 pos-1 pos pos+20],len),1); - if poShow(3) == poShow(2) - poShow(3:4) = poShow(2)+[0 -1]; % display nothing after - end - msg = [sprintf(msg, pos) ': ' ... - inStr(poShow(1):poShow(2)) '' inStr(poShow(3):poShow(4)) ]; - error( ['JSONparser:invalidFormat: ' msg] ); - -%%------------------------------------------------------------------------- - -function str = valid_field(str) -global isoct -% From MATLAB doc: field names must begin with a letter, which may be -% followed by any combination of letters, digits, and underscores. -% Invalid characters will be converted to underscores, and the prefix -% "x0x[Hex code]_" will be added if the first character is not a letter. - pos=regexp(str,'^[^A-Za-z]','once'); - if(~isempty(pos)) - if(~isoct) - str=regexprep(str,'^([^A-Za-z])','x0x${sprintf(''%X'',unicode2native($1))}_','once'); - else - str=sprintf('x0x%X_%s',char(str(1)),str(2:end)); - end - end - if(isempty(regexp(str,'[^0-9A-Za-z_]', 'once' ))) - return; - end - if(~isoct) - str=regexprep(str,'([^0-9A-Za-z_])','_0x${sprintf(''%X'',unicode2native($1))}_'); - else - pos=regexp(str,'[^0-9A-Za-z_]'); - if(isempty(pos)) - return; - end - str0=str; - pos0=[0 pos(:)' length(str)]; - str=''; - for i=1:length(pos) - str=[str str0(pos0(i)+1:pos(i)-1) sprintf('_0x%X_',str0(pos(i)))]; - end - if(pos(end)~=length(str)) - str=[str str0(pos0(end-1)+1:pos0(end))]; - end - end - %str(~isletter(str) & ~('0' <= str & str <= '9')) = '_'; - -%%------------------------------------------------------------------------- -function endpos = matching_quote(str,pos) -len=length(str); -while(pos1 && str(pos-1)=='\')) - endpos=pos; - return; - end - end - pos=pos+1; -end -error('unmatched quotation mark'); -%%------------------------------------------------------------------------- -function [endpos, e1l, e1r, maxlevel] = matching_bracket(str,pos) -global arraytoken -level=1; -maxlevel=level; -endpos=0; -bpos=arraytoken(arraytoken>=pos); -tokens=str(bpos); -len=length(tokens); -pos=1; -e1l=[]; -e1r=[]; -while(pos<=len) - c=tokens(pos); - if(c==']') - level=level-1; - if(isempty(e1r)) - e1r=bpos(pos); - end - if(level==0) - endpos=bpos(pos); - return - end - end - if(c=='[') - if(isempty(e1l)) - e1l=bpos(pos); - end - level=level+1; - maxlevel=max(maxlevel,level); - end - if(c=='"') - pos=matching_quote(tokens,pos+1); - end - pos=pos+1; -end -if(endpos==0) - error('unmatched "]"'); -end - -function opt=varargin2struct(varargin) -% -% opt=varargin2struct('param1',value1,'param2',value2,...) -% or -% opt=varargin2struct(...,optstruct,...) -% -% convert a series of input parameters into a structure -% -% input: -% 'param', value: the input parameters should be pairs of a string and a value -% optstruct: if a parameter is a struct, the fields will be merged to the output struct -% -% output: -% opt: a struct where opt.param1=value1, opt.param2=value2 ... -% - -len=length(varargin); -opt=struct; -if(len==0) return; end -i=1; -while(i<=len) - if(isstruct(varargin{i})) - opt=mergestruct(opt,varargin{i}); - elseif(ischar(varargin{i}) && i nmr.mgh.harvard.edu) -% -% $Id: loadjson.m 371 2012-06-20 12:43:06Z fangq $ -% -% input: -% key: a string with which one look up a value from a struct -% default: if the key does not exist, return default -% optstruct: a struct where each sub-field is a key -% -% output: -% val: if key exists, val=optstruct.key; otherwise val=default -% - -val=default; -if(nargin<=2) return; end -opt=varargin{1}; -if(isstruct(opt)) - if(isfield(opt,key)) - val=getfield(opt,key); - elseif(isfield(opt,lower(key))) - val=getfield(opt,lower(key)); - end -end - -function s=mergestruct(s1,s2) -% -% s=mergestruct(s1,s2) -% -% merge two struct objects into one -% -% authors:Qianqian Fang (fangq nmr.mgh.harvard.edu) -% date: 2012/12/22 -% -% input: -% s1,s2: a struct object, s1 and s2 can not be arrays -% -% output: -% s: the merged struct object. fields in s1 and s2 will be combined in s. - -if(~isstruct(s1) || ~isstruct(s2)) - error('input parameters contain non-struct'); -end -if(length(s1)>1 || length(s2)>1) - error('can not merge struct arrays'); -end -fn=fieldnames(s2); -s=s1; -for i=1:length(fn) - s=setfield(s,fn{i},getfield(s2,fn{i})); -end -function newdata=struct2jdata(data,varargin) -% -% newdata=struct2jdata(data,opt,...) -% -% convert a JData object (in the form of a struct array) into an array -% -% authors:Qianqian Fang (fangq nmr.mgh.harvard.edu) -% -% input: -% data: a struct array. If data contains JData keywords in the first -% level children, these fields are parsed and regrouped into a -% data object (arrays, trees, graphs etc) based on JData -% specification. The JData keywords are -% "_ArrayType_", "_ArraySize_", "_ArrayData_" -% "_ArrayIsSparse_", "_ArrayIsComplex_" -% opt: (optional) a list of 'Param',value pairs for additional options -% The supported options include -% 'Recursive', if set to 1, will apply the conversion to -% every child; 0 to disable -% -% output: -% newdata: the covnerted data if the input data does contain a JData -% structure; otherwise, the same as the input. -% -% examples: -% obj=struct('_ArrayType_','double','_ArraySize_',[2 3], -% '_ArrayIsSparse_',1 ,'_ArrayData_',null); -% ubjdata=struct2jdata(obj); - -fn=fieldnames(data); -newdata=data; -len=length(data); -if(jsonopt('Recursive',0,varargin{:})==1) - for i=1:length(fn) % depth-first - for j=1:len - if(isstruct(getfield(data(j),fn{i}))) - newdata(j)=setfield(newdata(j),fn{i},jstruct2array(getfield(data(j),fn{i}))); - end - end - end -end -if(~isempty(strmatch('x0x5F_ArrayType_',fn)) && ~isempty(strmatch('x0x5F_ArrayData_',fn))) - newdata=cell(len,1); - for j=1:len - ndata=cast(data(j).x0x5F_ArrayData_,data(j).x0x5F_ArrayType_); - iscpx=0; - if(~isempty(strmatch('x0x5F_ArrayIsComplex_',fn))) - if(data(j).x0x5F_ArrayIsComplex_) - iscpx=1; - end - end - if(~isempty(strmatch('x0x5F_ArrayIsSparse_',fn))) - if(data(j).x0x5F_ArrayIsSparse_) - if(~isempty(strmatch('x0x5F_ArraySize_',fn))) - dim=double(data(j).x0x5F_ArraySize_); - if(iscpx && size(ndata,2)==4-any(dim==1)) - ndata(:,end-1)=complex(ndata(:,end-1),ndata(:,end)); - end - if isempty(ndata) - % All-zeros sparse - ndata=sparse(dim(1),prod(dim(2:end))); - elseif dim(1)==1 - % Sparse row vector - ndata=sparse(1,ndata(:,1),ndata(:,2),dim(1),prod(dim(2:end))); - elseif dim(2)==1 - % Sparse column vector - ndata=sparse(ndata(:,1),1,ndata(:,2),dim(1),prod(dim(2:end))); - else - % Generic sparse array. - ndata=sparse(ndata(:,1),ndata(:,2),ndata(:,3),dim(1),prod(dim(2:end))); - end - else - if(iscpx && size(ndata,2)==4) - ndata(:,3)=complex(ndata(:,3),ndata(:,4)); - end - ndata=sparse(ndata(:,1),ndata(:,2),ndata(:,3)); - end - end - elseif(~isempty(strmatch('x0x5F_ArraySize_',fn))) - if(iscpx && size(ndata,2)==2) - ndata=complex(ndata(:,1),ndata(:,2)); - end - ndata=reshape(ndata(:),data(j).x0x5F_ArraySize_); - end - newdata{j}=ndata; - end - if(len==1) - newdata=newdata{1}; - end -end diff --git a/matlab/README.md b/matlab/README.md deleted file mode 100644 index d5ef5d09fc8d..000000000000 --- a/matlab/README.md +++ /dev/null @@ -1,140 +0,0 @@ - - - - - - - - - - - - - - - - - -# MATLAB binding for MXNet - -### How to use - -MXNet needs to be built so that the `lib/libmxnet.so` is available, which can be done by: - -```bash -cd .. -make -``` -The pre-trained `Inception-BN` should be downloaded to obtain the symbol and network parameters. - -```bash -./get_inception_model.sh -``` - -This data will be saved in the `./data` folder: - -```bash -./data/ -├── cat.png -├── Inception-BN-0126.params -├── Inception-BN-symbol.json -└── synset.txt -``` - -####Sample usage - -Run the demo script from the command-line without invoking Matlab GUI: - -```bash -matlab -nodisplay -nojvm -nosplash -nodesktop -r "run('./demo.m'), exit(0);" -``` -or the script may be run from the Matlab GUI as usual. - -The script has the following components: - -- Load model - - ```matlab - model = mxnet.model; - model.load('data/Inception-BN', 126); - ``` - -- Load data and normalise. Here we assume a fixed value of 120 as 'mean image': - - ```matlab - img = single(imresize(imread('./data/cat.png'), [224 224])) - 120; - ``` - -- Get prediction: - - ```matlab - pred = model.forward(img); - ``` - -- Do feature extraction on CPU or GPU 0: - - ```matlab - feas = model.forward(img, {'max_pool_5b_pool', 'global_pool', 'fc1'}); % CPU mode - feas = model.forward(img, 'gpu', 0, {'max_pool_5b_pool', 'global_pool', 'fc1'}); % GPU mode - ``` - -- See [demo.m](demo.m) for more details - -### Note on Implementation - -We use `loadlibrary` to load mxnet library directly into Matlab and `calllib` to -call MXNet functions. Note that Matlab uses the column-major to store N-dim -arrays while and MXNet uses the row-major. So assume we create an array in -Matlab with - -```matlab -X = zeros([2,3,4,5]); -``` - -If we pass the memory of `X` into MXNet, then the correct shape will be -`[5,4,3,2]` in MXNet. When processing images, MXNet assumes the data layout is - -``` -batchSize x channel x width x height -``` - -while in Matlab we often store images in - -``` -width x height x channel x batchSize -``` - -So we should permute the dimensions by `X = permute(X, [2, 1, 3, 4])` before -passing `X` into MXNet. - -### FAQ - -1. You may get the error `GLIBCXX_x.x.xx` is not found. Such as on Ubuntu 14.04: - - ``` - > In loadlibrary (line 359) - Error using loadlibrary (line 447) - There was an error loading the library "/home/muli/work/mxnet/lib/libmxnet.so" - /usr/local/MATLAB/R2015a/bin/glnxa64/../../sys/os/glnxa64/libstdc++.so.6: - version `GLIBCXX_3.4.18' not found (required by - /home/muli/work/mxnet/lib/libmxnet.so) - - Caused by: - Error using loaddefinedlibrary - /usr/local/MATLAB/R2015a/bin/glnxa64/../../sys/os/glnxa64/libstdc++.so.6: - version `GLIBCXX_3.4.18' not found (required by - /home/muli/work/mxnet/lib/libmxnet.so) - ``` - - One way to fix it is to link `MATLAB_ROOT/sys/os/glnxa64/libstdc++.so.6` to - your system's `libstdc++`. For example - - ```bash - muli@ghc:/usr/local/MATLAB/R2015a/sys/os/glnxa64$ sudo rm -r libstdc++.so.6 - muli@ghc:/usr/local/MATLAB/R2015a/sys/os/glnxa64$ sudo ln -s /usr/lib/x86_64-linux-gnu/ libstdc++.so.6.0.19 libstdc++.so.6 - ``` - - -2. Matlab binding has been tested with the following version: - - `R2016b (9.1.0.441655) 64-bit (glnxa64)` diff --git a/matlab/demo.m b/matlab/demo.m deleted file mode 100644 index 74eb2161b372..000000000000 --- a/matlab/demo.m +++ /dev/null @@ -1,70 +0,0 @@ -% Licensed to the Apache Software Foundation (ASF) under one -% or more contributor license agreements. See the NOTICE file -% distributed with this work for additional information -% regarding copyright ownership. The ASF licenses this file -% to you under the Apache License, Version 2.0 (the -% "License"); you may not use this file except in compliance -% with the License. You may obtain a copy of the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, -% software distributed under the License is distributed on an -% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -% KIND, either express or implied. See the License for the -% specific language governing permissions and limitations -% under the License. - -%% Assumes model symbol and parameters already downloaded using .sh script - -%% Load the model -clear model -format compact -model = mxnet.model; -model.load('data/Inception-BN', 126); - -%% Load and resize the image -img = imresize(imread('data/cat.png'), [224 224]); -img = single(img) - 120; -%% Run prediction -pred = model.forward(img); - -%% load the labels -labels = {}; -fid = fopen('data/synset.txt', 'r'); -assert(fid >= 0); -tline = fgetl(fid); -while ischar(tline) - labels{end+1} = tline; - tline = fgetl(fid); -end -fclose(fid); - -%% Print top 5 predictions -fprintf('Top 5 predictions: \n'); -[p, i] = sort(pred, 'descend'); -for x = 1:5 - fprintf(' %2.2f%% - %s\n', p(x)*100, labels{i(x)} ); -end - -%% Print the last 10 layers in the symbol -fprintf('\nLast 10 layers in the symbol: \n'); -sym = model.parse_symbol(); -layers = {}; -for i = 1 : length(sym.nodes) - if ~strcmp(sym.nodes{i}.op, 'null') - layers{end+1} = sym.nodes{i}.name; - end -end -fprintf(' layer name: %s\n', layers{end-10:end}) - - -%% Extract feature from internal layers -fprintf('\nExtract feature from internal layers using CPU forwarding: \n'); -feas = model.forward(img, {'max_pool_5b_pool', 'global_pool', 'fc1'}); -feas(:) - -%% If GPU is available -fprintf('\nExtract feature from internal layers using GPU forwarding: \n'); -feas = model.forward(img, 'gpu', 0, {'max_pool_5b_pool', 'global_pool', 'fc1'}); -feas(:) diff --git a/matlab/get_inception_model.sh b/matlab/get_inception_model.sh deleted file mode 100755 index 3c0cb5c4b052..000000000000 --- a/matlab/get_inception_model.sh +++ /dev/null @@ -1,35 +0,0 @@ -#!/usr/bin/env bash - -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - - -MATLAB_DIR=$(cd `dirname $0`; pwd) -DATA_DIR="${MATLAB_DIR}/data/" - -if [[ ! -d "${DATA_DIR}" ]]; then - echo "${DATA_DIR} doesn't exist, will create one"; - mkdir -p ${DATA_DIR} -fi -cd ${DATA_DIR} - -# Get cat image -wget --no-check-certificate https://raw.githubusercontent.com/dmlc/mxnet.js/master/data/cat.png; - -# Get inception model -wget --no-check-certificate http://data.mxnet.io/models/imagenet/inception-bn.tar.gz -tar -zxvf inception-bn.tar.gz diff --git a/matlab/tests/prepare_data.m b/matlab/tests/prepare_data.m deleted file mode 100644 index 9802d522f023..000000000000 --- a/matlab/tests/prepare_data.m +++ /dev/null @@ -1,53 +0,0 @@ -% Licensed to the Apache Software Foundation (ASF) under one -% or more contributor license agreements. See the NOTICE file -% distributed with this work for additional information -% regarding copyright ownership. The ASF licenses this file -% to you under the Apache License, Version 2.0 (the -% "License"); you may not use this file except in compliance -% with the License. You may obtain a copy of the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, -% software distributed under the License is distributed on an -% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -% KIND, either express or implied. See the License for the -% specific language governing permissions and limitations -% under the License. - -%% download cifar10 dataset -system('wget https://www.cs.toronto.edu/~kriz/cifar-10-matlab.tar.gz') -system('tar -xzvf cifar-10-matlab.tar.gz') -load cifar-10-batches-mat/test_batch.mat - -%% convert test dataset of cifar10, and save -X = reshape(data', [32, 32, 3, 10000]); -X = permute(X, [2 1 3 4]); -Y = labels + 1; - - -save cifar10-test X Y -%% preview one picture -imshow(imresize(X(:,:,:,2), [128, 128])) - -%% - -!wget http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz -!wget http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz -!gunzip t10k-images-idx3-ubyte.gz -!gunzip t10k-labels-idx1-ubyte.gz - -%% - -fid = fopen('t10k-images-idx3-ubyte', 'r'); -d = fread(fid, inf, '*uint8'); -fclose(fid); -X = reshape(d(17:end), [28 28 1 10000]); -X = permute(X, [2 1 3 4]); - -fid = fopen('t10k-labels-idx1-ubyte', 'r'); -d = fread(fid, inf, '*uint8'); -fclose(fid); -Y = d(9:end) + 1; - -save mnist-test X Y diff --git a/matlab/tests/test_prediction.m b/matlab/tests/test_prediction.m deleted file mode 100644 index c6aff1cd0472..000000000000 --- a/matlab/tests/test_prediction.m +++ /dev/null @@ -1,122 +0,0 @@ -% Licensed to the Apache Software Foundation (ASF) under one -% or more contributor license agreements. See the NOTICE file -% distributed with this work for additional information -% regarding copyright ownership. The ASF licenses this file -% to you under the Apache License, Version 2.0 (the -% "License"); you may not use this file except in compliance -% with the License. You may obtain a copy of the License at -% -% http://www.apache.org/licenses/LICENSE-2.0 -% -% Unless required by applicable law or agreed to in writing, -% software distributed under the License is distributed on an -% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -% KIND, either express or implied. See the License for the -% specific language governing permissions and limitations -% under the License. - -%% prepare - -addpath('..') - -if ~exist('mnist-test.mat', 'file') - system('wget --no-check-certificate https://github.com/dmlc/web-data/raw/master/mxnet/matlab/mnist-test.mat'); -end - -if ~exist('model/mnist-lenet-0-0010.params', 'file') - system('wget --no-check-certificate https://github.com/dmlc/web-data/raw/master/mxnet/matlab/mnist-lenet.tar.gz'); - system('tar -zxf mnist-lenet.tar.gz'); -end - -%% load data and model - -load mnist-test -clear model -model = mxnet.model; -model.load('model/mnist-lenet-0', 10); - -%% predict - -err = 0; -batch = 1000; -for i = 1 : length(Y) / batch - ix = (i-1)*batch+1 : i*batch; - x = X(:,:,:,ix); - pred = model.forward(x, 'gpu', 0); - [~, k] = max(pred); - err = err + nnz(k ~= Y(ix)'); -end - -err = err / length(Y); -fprintf('prediction error: %f\n', err) - -%% -% ix = 1:2; -% x = X(:,:,:,ix); -% pred = model.forward(x, {'pooling1', 'fullyconnected1', 'softmax'}); - -%% -% batch = 1000; -% e = 0; -% for i = 1 : batch -% x = single(X(:,:,:,i)); -% pred = model.forward(x); -% [~, k] = max(pred); -% e = e + (k == Y(i)); -% end - -% e / batch - -% %% load data -% load cifar10-test.mat -% img_mean = [123.68, 116.779, 103.939]; - -% %% -% clear model -% model = mxnet.model; -% model.load('model/cifar10-incept-bn-0', 20); - -% %% -% batch = 100; -% x = zeros(28,28,3,batch); -% for i = 1 : batch -% x(:,:,:,i) = single(imresize(X(:,:,:,i), [28, 28])); -% x = x(:,:,[3 2 1],:); -% end -% % x = permute(x, [2 1 3 4]); - -% x = x - 120; -% % for i = 1 : 3 -% % x(:,:,i,:) = x(:,:,i,:) - img_mean(i); -% % end - - -% pred = model.forward(x, 'gpu', 0); - -% [~,i] = max(reshape(pred(:), 10, batch)); -% nnz(i' == Y(1:batch)) / length(i) - -% %% - -% batch = 100; -% e = 0; -% for i = 1 : batch -% x = single(imresize(X(:,:,:,i), [28, 28])) - 120; -% for j = 1 : 3 -% x(:,:,j) = x(:,:,j); -% end -% pred = model.forward(x); -% [~, k] = max(pred); -% e = e + (k == Y(i)); -% end - -% e / batch - - -% %% load bin - -% a = fopen('mean.bin', 'r'); -% yy = fread(a, 14, '*int32'); -% mm = fread(a, inf, '*single'); -% fclose(a) -% % nn = mm(14:end-6); diff --git a/python/mxnet/base.py b/python/mxnet/base.py index 8e9700fa6c74..9a25d9dc43aa 100644 --- a/python/mxnet/base.py +++ b/python/mxnet/base.py @@ -362,7 +362,6 @@ def _load_lib(): OpHandle = ctypes.c_void_p CachedOpHandle = ctypes.c_void_p SymbolHandle = ctypes.c_void_p -ExecutorHandle = ctypes.c_void_p DataIterCreatorHandle = ctypes.c_void_p DataIterHandle = ctypes.c_void_p DatasetHandle = ctypes.c_void_p diff --git a/scala-package/.gitignore b/scala-package/.gitignore deleted file mode 100644 index dadc000c612e..000000000000 --- a/scala-package/.gitignore +++ /dev/null @@ -1,12 +0,0 @@ -target/ -.flattened-pom.xml -core/src/main/scala/org/apache/mxnet/NDArrayAPIBase.scala -core/src/main/scala/org/apache/mxnet/NDArrayBase.scala -core/src/main/scala/org/apache/mxnet/NDArrayRandomAPIBase.scala -core/src/main/scala/org/apache/mxnet/javaapi/NDArrayBase.scala -core/src/main/scala/org/apache/mxnet/SymbolAPIBase.scala -core/src/main/scala/org/apache/mxnet/SymbolBase.scala -core/src/main/scala/org/apache/mxnet/SymbolRandomAPIBase.scala -examples/scripts/infer/images/ -examples/scripts/infer/models/ -examples/scripts/infer/objectdetector/boundingImage.png diff --git a/scala-package/.mvn/wrapper/.gitignore b/scala-package/.mvn/wrapper/.gitignore deleted file mode 100644 index 576738f6a2be..000000000000 --- a/scala-package/.mvn/wrapper/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -maven-wrapper.jar - diff --git a/scala-package/.mvn/wrapper/MavenWrapperDownloader.java b/scala-package/.mvn/wrapper/MavenWrapperDownloader.java deleted file mode 100755 index fa4f7b499fdd..000000000000 --- a/scala-package/.mvn/wrapper/MavenWrapperDownloader.java +++ /dev/null @@ -1,110 +0,0 @@ -/* -Licensed to the Apache Software Foundation (ASF) under one -or more contributor license agreements. See the NOTICE file -distributed with this work for additional information -regarding copyright ownership. The ASF licenses this file -to you under the Apache License, Version 2.0 (the -"License"); you may not use this file except in compliance -with the License. You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, -software distributed under the License is distributed on an -"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -KIND, either express or implied. See the License for the -specific language governing permissions and limitations -under the License. -*/ - -import java.net.*; -import java.io.*; -import java.nio.channels.*; -import java.util.Properties; - -public class MavenWrapperDownloader { - - /** - * Default URL to download the maven-wrapper.jar from, if no 'downloadUrl' is provided. - */ - private static final String DEFAULT_DOWNLOAD_URL = - "https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.4.2/maven-wrapper-0.4.2.jar"; - - /** - * Path to the maven-wrapper.properties file, which might contain a downloadUrl property to - * use instead of the default one. - */ - private static final String MAVEN_WRAPPER_PROPERTIES_PATH = - ".mvn/wrapper/maven-wrapper.properties"; - - /** - * Path where the maven-wrapper.jar will be saved to. - */ - private static final String MAVEN_WRAPPER_JAR_PATH = - ".mvn/wrapper/maven-wrapper.jar"; - - /** - * Name of the property which should be used to override the default download url for the wrapper. - */ - private static final String PROPERTY_NAME_WRAPPER_URL = "wrapperUrl"; - - public static void main(String args[]) { - System.out.println("- Downloader started"); - File baseDirectory = new File(args[0]); - System.out.println("- Using base directory: " + baseDirectory.getAbsolutePath()); - - // If the maven-wrapper.properties exists, read it and check if it contains a custom - // wrapperUrl parameter. - File mavenWrapperPropertyFile = new File(baseDirectory, MAVEN_WRAPPER_PROPERTIES_PATH); - String url = DEFAULT_DOWNLOAD_URL; - if(mavenWrapperPropertyFile.exists()) { - FileInputStream mavenWrapperPropertyFileInputStream = null; - try { - mavenWrapperPropertyFileInputStream = new FileInputStream(mavenWrapperPropertyFile); - Properties mavenWrapperProperties = new Properties(); - mavenWrapperProperties.load(mavenWrapperPropertyFileInputStream); - url = mavenWrapperProperties.getProperty(PROPERTY_NAME_WRAPPER_URL, url); - } catch (IOException e) { - System.out.println("- ERROR loading '" + MAVEN_WRAPPER_PROPERTIES_PATH + "'"); - } finally { - try { - if(mavenWrapperPropertyFileInputStream != null) { - mavenWrapperPropertyFileInputStream.close(); - } - } catch (IOException e) { - // Ignore ... - } - } - } - System.out.println("- Downloading from: : " + url); - - File outputFile = new File(baseDirectory.getAbsolutePath(), MAVEN_WRAPPER_JAR_PATH); - if(!outputFile.getParentFile().exists()) { - if(!outputFile.getParentFile().mkdirs()) { - System.out.println( - "- ERROR creating output direcrory '" + outputFile.getParentFile().getAbsolutePath() + "'"); - } - } - System.out.println("- Downloading to: " + outputFile.getAbsolutePath()); - try { - downloadFileFromURL(url, outputFile); - System.out.println("Done"); - System.exit(0); - } catch (Throwable e) { - System.out.println("- Error downloading"); - e.printStackTrace(); - System.exit(1); - } - } - - private static void downloadFileFromURL(String urlString, File destination) throws Exception { - URL website = new URL(urlString); - ReadableByteChannel rbc; - rbc = Channels.newChannel(website.openStream()); - FileOutputStream fos = new FileOutputStream(destination); - fos.getChannel().transferFrom(rbc, 0, Long.MAX_VALUE); - fos.close(); - rbc.close(); - } - -} diff --git a/scala-package/.mvn/wrapper/maven-wrapper.properties b/scala-package/.mvn/wrapper/maven-wrapper.properties deleted file mode 100755 index b6e6781222b5..000000000000 --- a/scala-package/.mvn/wrapper/maven-wrapper.properties +++ /dev/null @@ -1 +0,0 @@ -distributionUrl=https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.6.0/apache-maven-3.6.0-bin.zip \ No newline at end of file diff --git a/scala-package/LICENSE b/scala-package/LICENSE deleted file mode 100644 index 8f71f43fee3f..000000000000 --- a/scala-package/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - diff --git a/scala-package/README.md b/scala-package/README.md deleted file mode 100644 index 2af27dfb8b0c..000000000000 --- a/scala-package/README.md +++ /dev/null @@ -1,250 +0,0 @@ - - - - - - - - - - - - - - - - - -MXNet Package for Scala/Java -===== - -The MXNet Scala/Java Package brings flexible and efficient GPU/CPU computing and state-of-art deep learning to the JVM. - -- It enables you to write seamless tensor/matrix computation with multiple GPUs - in Scala, Java and other JVM languages. -- It also enables you to construct and customize the state-of-art deep learning models in JVM languages, - and apply them to tasks such as image classification and data science challenges. -- The Scala/Java _Inference API_ provides an easy out of the box solution for performing inference tasks using pre-trained MXNet models. - -Pre-Built Maven Packages ------------------------- - -### Stable ### - -The MXNet Scala/Java packages can be easily included in your Maven managed project. -The stable jar files for the packages are available on the [MXNet Maven Package Repository](https://search.maven.org/search?q=g:org.apache.mxnet). -Currently we provide packages for Linux (Ubuntu 16.04) (CPU and GPU) and macOS (CPU only). Stable packages for Windows and CentOS will come soon. For now, if you have a CentOS machine, follow the ```Build From Source``` section below. - -To add the MXNet Scala/Java packages to your project, add the dependency as shown below corresponding to your platform, under the ```dependencies``` tag in your project's ```pom.xml``` : - -**Linux GPU** - -maven badge - -```HTML - - org.apache.mxnet - mxnet-full_2.11-linux-x86_64-gpu - [1.4.0,) - -``` - -**Linux CPU** - -maven badge - -```HTML - - org.apache.mxnet - mxnet-full_2.11-linux-x86_64-cpu - [1.4.0,) - -``` - -**macOS CPU** - -maven badge - -```HTML - - org.apache.mxnet - mxnet-full_2.11-osx-x86_64-cpu - [1.4.0,) - -``` - -**Note:** ```[1.4.0,)<\version>``` indicates that we will fetch packages with version 1.4.0 or higher. This will always ensure that the pom.xml is able to fetch the latest and greatest jar files from Maven. - -### Nightly ### - -Apart from these, the nightly builds representing the bleeding edge development on Scala/Java packages are also available on the [MXNet Maven Nexus Package Repository](https://repository.apache.org/#nexus-search;gav~org.apache.mxnet~~~~). -Currently we provide nightly packages for Linux (CPU and GPU) and MacOS (CPU only). The Linux nightly jar files also work on CentOS. Nightly packages for Windows will come soon. - -Add the following ```repository``` to your project's ```pom.xml``` file : - -````html - - - Apache Snapshot - https://repository.apache.org/content/groups/snapshots - - -```` - -Also, add the dependency which corresponds to your platform to the ```dependencies``` tag : - -**Linux GPU** - -maven badge - -```HTML - - org.apache.mxnet - mxnet-full_2.11-linux-x86_64-gpu - [2.0.0-SNAPSHOT,) - -``` - -**Linux CPU** - -maven badge - -```HTML - - org.apache.mxnet - mxnet-full_2.11-linux-x86_64-cpu - [2.0.0-SNAPSHOT,) - -``` - -**macOS CPU** - -maven badge -```HTML - - org.apache.mxnet - mxnet-full_2.11-osx-x86_64-cpu - [2.0.0-SNAPSHOT,) - -``` - -**Note:** ```[2.0.0-SNAPSHOT,)``` indicates that we will fetch packages with version 2.0.0 or higher. This will always ensure that the pom.xml is able to fetch the latest and greatest jar files from Maven Snapshot repository. - -Build From Source ------------------ - -The [Installation Guide](https://mxnet.apache.org/get_started) contains instructions to install mxnet or build it from source. The Scala/Java package is built from source using Maven. The maven build assumes you already have a ``lib/libmxnet.so`` file. -If you have built MXNet from source and are looking to set up Scala\Java from that point, you may simply run the following from the MXNet source root, the build will detect your platform (OSX/Linux) and libmxnet.so flavor (CPU/GPU): - -```bash -cd scala-package -mvn install -``` - -You can also run the unit tests and integration tests on the Scala Package by : - -```bash -cd scala-package -mvn integration-test -DskipTests=false -``` - -Or run a subset of unit tests, for e.g., - -```bash -cd scala-package -mvn -Dsuites=org.apache.mxnet.NDArraySuite integration-test -``` - -If everything goes well, you will find jars for `assembly`, `core` and `example` modules. -Also it produces the native library in `native/target`, which you can use in conjunction with the `core` module. - -Deploy to repository --------------------- - -By default, `maven deploy` will deploy artifacts to local file system, you can find them in the ``scala-package/deploy/target/repo`` folder. - -For nightly builds (typically done by CI), a snapshot build will be uploaded to an apache snapshot repository with the following command: - -```bash -cd scala-package -mvn deploy -Pnightly -``` - -Use the following command when performing a release (pushes artifacts to an apache staging repository): - -```bash -cd scala-package -mvn deploy -Pstaging -``` - -Examples & Usage -------- -Assuming you use `mvn install`, you can find the `mxnet-full_scala_version-INTERNAL.jar` e.g. `mxnet-full_2.11-INTERNAL.jar` under the path `incubator-mxnet/scala-package/assembly/target`. - -Adding the following configuration in `pom.xml` -```HTML - - org.apache.mxnet - mxnet-full_2.11-INTERNAL - 2.0.0 - system - path_to_jar/mxnet-full_2.11-INTERNAL.jar - -``` -If you have following error message -``` -Error: A JNI error has occurred, please check your installation and try again -Exception in thread "main" java.lang.NoClassDefFoundError: org/apache/mxnet/NDArray - at java.lang.Class.getDeclaredMethods0(Native Method) - at java.lang.Class.privateGetDeclaredMethods(Class.java:2701) - at java.lang.Class.privateGetMethodRecursive(Class.java:3048) - at java.lang.Class.getMethod0(Class.java:3018) - at java.lang.Class.getMethod(Class.java:1784) - at sun.launcher.LauncherHelper.validateMainClass(LauncherHelper.java:544) - at sun.launcher.LauncherHelper.checkAndLoadMain(LauncherHelper.java:526) -Caused by: java.lang.ClassNotFoundException: org.apache.mxnet.NDArray - at java.net.URLClassLoader.findClass(URLClassLoader.java:381) - at java.lang.ClassLoader.loadClass(ClassLoader.java:424) - at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:331) - at java.lang.ClassLoader.loadClass(ClassLoader.java:357) -``` -Please make sure your $CLASSPATH contains `mxnet-full_scala_version-INTERNAL.jar`. - -- To set up the Scala Project using IntelliJ IDE on macOS follow the instructions [here](https://mxnet.apache.org/tutorials/scala/mxnet_scala_on_intellij.html). -- Several examples on using the Scala APIs are provided in the [Scala Examples Folder](https://github.com/apache/incubator-mxnet/tree/master/scala-package/examples/) - -Scala Training APIs -------- -- Module API : -[The Module API](https://mxnet.apache.org/api/scala/module.html) provides an intermediate and high-level interface for performing computation with neural networks in MXNet. Modules provide high-level APIs for training, predicting, and evaluating. - -- KVStore API : -To run training over multiple GPUs and multiple hosts, one can use the [KVStore API](https://mxnet.apache.org/api/scala/kvstore.html). - -- IO/Data Loading : -MXNet Scala provides APIs for preparing data to feed as an input to models. Check out [Data Loading API](https://mxnet.apache.org/api/scala/io.html) for more info. - -Other available Scala APIs for training can be found [here](https://mxnet.apache.org/api/scala/index.html). - - -Scala Inference APIs -------- -The [Scala Inference APIs](https://mxnet.apache.org/api/scala/infer.html) provide an easy, out of the box solution to load a pre-trained MXNet model and run inference on it. The Inference APIs are present in the [Infer Package](https://github.com/apache/incubator-mxnet/tree/master/scala-package/infer) under the MXNet Scala Package repository, while the documentation for the Infer API is available [here](https://mxnet.apache.org/api/scala/docs/index.html#org.apache.mxnet.infer.package). - -Java Inference APIs -------- -The [Java Inference APIs](https://mxnet.apache.org/api/java/index.html) also provide an easy, out of the box solution to load a pre-trained MXNet model and run inference on it. The Inference APIs are present in the [Infer Package](https://github.com/apache/incubator-mxnet/tree/master/scala-package/infer/src/main/scala/org/apache/mxnet/infer/javaapi) under the MXNet Scala Package repository, while the documentation for the Infer API is available [here](https://mxnet.apache.org/api/java/docs/index.html#org.apache.mxnet.infer.package). -More APIs will be added to the Java Inference APIs soon. - -JVM Memory Management -------- -The Scala/Java APIs also provide an automated resource management system, thus making it easy to manage the native memory footprint without any degradation in performance. -More details about JVM Memory Management are available [here](https://github.com/apache/incubator-mxnet/blob/master/scala-package/memory-management.md). - -License -------- -MXNet Scala Package is licensed under [Apache-2](https://github.com/apache/incubator-mxnet/blob/master/scala-package/LICENSE) license. - -MXNet uses some 3rd party softwares. Following 3rd party license files are bundled inside Scala jar file: -* cub/LICENSE.TXT -* mkldnn/external/mklml_mac_2019.0.1.20180928/license.txt diff --git a/scala-package/assembly/pom.xml b/scala-package/assembly/pom.xml deleted file mode 100644 index b93c181bc1d4..000000000000 --- a/scala-package/assembly/pom.xml +++ /dev/null @@ -1,129 +0,0 @@ - - - - 4.0.0 - - org.apache.mxnet - mxnet-parent - INTERNAL - ../pom.xml - - - mxnet-full_2.11 - Assembly Scala Package - pom - - - ${project.parent.basedir}/.. - - - - - org.apache.mxnet - mxnet-core - INTERNAL - - - org.apache.mxnet - libmxnet-scala - INTERNAL - ${libtype} - - - org.apache.mxnet - mxnet-infer - INTERNAL - - - - - - staging - - - - org.apache.maven.plugins - maven-gpg-plugin - 1.6 - - - sign-artifacts - deploy - - sign - - - - - - - - - - - - - org.apache.maven.plugins - maven-assembly-plugin - - - binary-jar - package - - single - - - false - - src/main/assembly/assembly.xml - - - - - sources-jar - package - - single - - - true - - src/main/assembly/source.xml - - - - - javadoc-jar - package - - single - - - true - - src/main/assembly/javadoc.xml - - - - - - - - diff --git a/scala-package/assembly/src/main/assembly/assembly.xml b/scala-package/assembly/src/main/assembly/assembly.xml deleted file mode 100644 index 655c4fdb6ef2..000000000000 --- a/scala-package/assembly/src/main/assembly/assembly.xml +++ /dev/null @@ -1,73 +0,0 @@ - - - full - - jar - - false - - - - *:*:jar - - - org.scala-lang:* - org.scala-lang.modules:* - commons-io:commons-io - commons-codec:commons-codec - org.slf4j:slf4j-api - args4j:args4j - - . - false - true - runtime - - - lib/native - libmxnet-scala.${libtype} - false - false - false - - org.apache.mxnet:libmxnet-scala:${libtype} - - - - - - ${MXNET_DIR}/lib - - libmxnet.so - libtvm_runtime.so - libgfortran.so.4 - libquadmath.so.0 - - lib/native - - - ${MXNET_DIR}/licenses - - LICENSE.binary.dependencies - NOTICE - LICENSE - - . - - - diff --git a/scala-package/assembly/src/main/assembly/javadoc.xml b/scala-package/assembly/src/main/assembly/javadoc.xml deleted file mode 100644 index c6df96a3f5a5..000000000000 --- a/scala-package/assembly/src/main/assembly/javadoc.xml +++ /dev/null @@ -1,31 +0,0 @@ - - - bundle - - jar - - false - - - ${rootdir}/core/target/site/scaladocs - . - - - diff --git a/scala-package/assembly/src/main/assembly/source.xml b/scala-package/assembly/src/main/assembly/source.xml deleted file mode 100644 index 1f004e811cfc..000000000000 --- a/scala-package/assembly/src/main/assembly/source.xml +++ /dev/null @@ -1,35 +0,0 @@ - - - src - - - jar - - false - - - ${rootdir}/core/src/main/scala - - **\/*.scala - - . - - - diff --git a/scala-package/core/pom.xml b/scala-package/core/pom.xml deleted file mode 100644 index d7b9f907df55..000000000000 --- a/scala-package/core/pom.xml +++ /dev/null @@ -1,163 +0,0 @@ - - - - 4.0.0 - - org.apache.mxnet - mxnet-parent - INTERNAL - ../pom.xml - - - mxnet-core - MXNet Scala Package - Core - - - false - - - - - - org.codehaus.mojo - native-maven-plugin - true - - - javah - verify - - default - ${project.build.directory}/custom-javah - ${basedir} - org_apache_mxnet_native_c_api.h - - org.apache.mxnet.LibInfo - - - - javah - - - - - - org.codehaus.mojo - exec-maven-plugin - 1.6.0 - - - verify-javah - verify - - exec - - - diff - ${project.build.directory}/custom-javah/org_apache_mxnet_native_c_api.h ${project.parent.basedir}/native/src/main/native/org_apache_mxnet_native_c_api.h - - - - apidoc-generation - generate-sources - - exec - - - java - ${project.parent.basedir} - -classpath %classpath:${rootdir}/init/target/classes:${rootdir}/macros/target/classes -Djava.library.path=${rootdir}/native/target org.apache.mxnet.APIDocGenerator ${rootdir}/core/src/main/scala/org/apache/mxnet/ - - - - - - - org.apache.maven.plugins - maven-jar-plugin - - - META-INF/*.SF - META-INF/*.DSA - META-INF/*.RSA - - - - - org.apache.maven.plugins - maven-clean-plugin - 3.1.0 - - - - src/main/scala/org/apache/mxnet - - NDArrayAPIBase.scala - NDArrayBase.scala - NDArrayRandomAPIBase.scala - javaapi/NDArrayBase.scala - SymbolAPIBase.scala - SymbolBase.scala - SymbolRandomAPIBase.scala - - false - - - - - - org.scalatest - scalatest-maven-plugin - - - -Djava.library.path=${project.parent.basedir}/native/target \ - -Dlog4j.configuration=file://${project.basedir}/src/test/resources/log4j.properties - - - - - org.scalastyle - scalastyle-maven-plugin - - - - - - - org.apache.mxnet - mxnet-macros - INTERNAL - provided - - - org.apache.mxnet - mxnet-scala-init - INTERNAL - provided - - - - org.mockito - mockito-all - 1.10.19 - test - - - diff --git a/scala-package/core/scripts/get_cifar_data.sh b/scala-package/core/scripts/get_cifar_data.sh deleted file mode 100755 index a5cdcda419b7..000000000000 --- a/scala-package/core/scripts/get_cifar_data.sh +++ /dev/null @@ -1,38 +0,0 @@ -#!/bin/bash - -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - - -set -e - -if [ ! -z "$MXNET_HOME" ]; then - data_path="$MXNET_HOME" -else - data_path="./data" -fi - -if [ ! -d "$data_path" ]; then - mkdir -p "$data_path" -fi - -cifar_data_path="$data_path/cifar10.zip" -if [ ! -f "$cifar_data_path" ]; then - curl -L -o $cifar_data_path http://data.mxnet.io/mxnet/data/cifar10.zip - cd $data_path - unzip -u cifar10.zip -fi diff --git a/scala-package/core/scripts/get_mnist_data.sh b/scala-package/core/scripts/get_mnist_data.sh deleted file mode 100755 index a7be96a9f401..000000000000 --- a/scala-package/core/scripts/get_mnist_data.sh +++ /dev/null @@ -1,38 +0,0 @@ -#!/bin/bash - -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - - -set -e - -if [ ! -z "$MXNET_HOME" ]; then - data_path="$MXNET_HOME" -else - data_path="./data" -fi - -if [ ! -d "$data_path" ]; then - mkdir -p "$data_path" -fi - -mnist_data_path="$data_path/mnist.zip" -if [ ! -f "$mnist_data_path" ]; then - curl -L -o $mnist_data_path http://data.mxnet.io/mxnet/data/mnist.zip - cd $data_path - unzip -u mnist.zip -fi diff --git a/scala-package/core/src/main/scala/org/apache/mxnet/AttrScope.scala b/scala-package/core/src/main/scala/org/apache/mxnet/AttrScope.scala deleted file mode 100644 index 6dac750fb446..000000000000 --- a/scala-package/core/src/main/scala/org/apache/mxnet/AttrScope.scala +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet - -/** - * Attribute manager for scoping. - * User can also inherit this object to change naming behavior. - */ -private[mxnet] class AttrScope(attr: Map[String, String] = Map.empty) { - private var _attr = attr - /** - * Get the attribute dict given the attribute set by the symbol. - * @param userDefinedAttr The attribute passed in by user during symbol creation. - * @return Updated attributes to add other scope related attributes. - */ - def get(userDefinedAttr: Option[Map[String, String]]): Map[String, String] = { - _attr ++ userDefinedAttr.getOrElse(Map.empty[String, String]) - } - - def withScope[T](body: => T): T = { - val oldAttrScope = AttrScope.current - this._attr = AttrScope.current._attr ++ this._attr - AttrScope.setCurrentAttr(this) - try { - body - } finally { - AttrScope.setCurrentAttr(oldAttrScope) - } - } -} - -private[mxnet] object AttrScope { - private var _current = new AttrScope() - def current: AttrScope = _current - private def setCurrentAttr(attr: AttrScope): Unit = { - _current = attr - } - - def apply(attr: Map[String, String] = Map.empty): AttrScope = new AttrScope(attr) -} diff --git a/scala-package/core/src/main/scala/org/apache/mxnet/Base.scala b/scala-package/core/src/main/scala/org/apache/mxnet/Base.scala deleted file mode 100644 index c3378ec4074c..000000000000 --- a/scala-package/core/src/main/scala/org/apache/mxnet/Base.scala +++ /dev/null @@ -1,152 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet - -import org.apache.mxnet.util.NativeLibraryLoader -import org.slf4j.{Logger, LoggerFactory} - -import scala.Specializable.Group - -private[mxnet] object Base { - private val logger: Logger = LoggerFactory.getLogger("MXNetJVM") - - // type definitions - class RefInt(val value: Int = 0) - class RefLong(val value: Long = 0) - class RefFloat(val value: Float = 0) - class RefString(val value: String = null) - - type MXUint = Int - type MXFloat = Float - type CPtrAddress = Long - - type NDArrayHandle = CPtrAddress - type FunctionHandle = CPtrAddress - type DataIterHandle = CPtrAddress - type DataIterCreator = CPtrAddress - type KVStoreHandle = CPtrAddress - type ExecutorHandle = CPtrAddress - type SymbolHandle = CPtrAddress - type RecordIOHandle = CPtrAddress - type RtcHandle = CPtrAddress - - type MXUintRef = RefInt - type MXFloatRef = RefFloat - type NDArrayHandleRef = RefLong - type FunctionHandleRef = RefLong - type DataIterHandleRef = RefLong - type DataIterCreatorRef = RefLong - type KVStoreHandleRef = RefLong - type ExecutorHandleRef = RefLong - type SymbolHandleRef = RefLong - type RecordIOHandleRef = RefLong - type RtcHandleRef = RefLong - - val MX_REAL_TYPE = DType.Float32 - - // The primitives currently supported for NDArray operations - val MX_PRIMITIVES = new Group ((Double, Float)) - - - /* Find the native libray either on the path or copy it from - * the jar in the dependency - * jar into a temp directory and load it - */ - try { - try { - tryLoadLibraryOS("mxnet-scala") - } catch { - case e: UnsatisfiedLinkError => - logger.info("Copying and loading native library from the jar archive") - NativeLibraryLoader.loadLibrary("mxnet-scala") - } - } catch { - case e: UnsatisfiedLinkError => - logger.error("Couldn't find native library mxnet-scala") - throw e - } - - val _LIB = new LibInfo - checkCall(_LIB.nativeLibInit()) - - // TODO: shutdown hook won't work on Windows - Runtime.getRuntime.addShutdownHook(new Thread() { - override def run(): Unit = { - notifyShutdown() - } - }) - - @throws(classOf[UnsatisfiedLinkError]) - private def tryLoadLibraryOS(libname: String): Unit = { - logger.info(s"Try loading $libname from native path.") - System.loadLibrary(libname) - } - - // helper function definitions - /** - * Check the return value of C API call - * - * This function will raise exception when error occurs. - * Wrap every API call with this function - * @param ret return value from API calls - */ - def checkCall(ret: Int): Unit = { - if (ret != 0) { - throw new MXNetError(_LIB.mxGetLastError()) - } - } - - // Notify MXNet about a shutdown - private def notifyShutdown(): Unit = { - checkCall(_LIB.mxNotifyShutdown()) - } - - // Convert ctypes returned doc string information into parameters docstring. - def ctypes2docstring( - argNames: Seq[String], - argTypes: Seq[String], - argDescs: Seq[String]): String = { - - val params = - (argNames zip argTypes zip argDescs) map { case ((argName, argType), argDesc) => - val desc = if (argDesc.isEmpty) "" else s"\n$argDesc" - s"$argName : $argType$desc" - } - s"Parameters\n----------\n${params.mkString("\n")}\n" - } -} - -class MXNetError(val err: String) extends Exception(err) - -// Some type-classes to ease the work in Symbol.random and NDArray.random modules - -class SymbolOrScalar[T](val isScalar: Boolean) -object SymbolOrScalar { - def apply[T](implicit ev: SymbolOrScalar[T]): SymbolOrScalar[T] = ev - implicit object FloatWitness extends SymbolOrScalar[Float](true) - implicit object IntWitness extends SymbolOrScalar[Int](true) - implicit object SymbolWitness extends SymbolOrScalar[Symbol](false) -} - -class NDArrayOrScalar[T](val isScalar: Boolean) -object NDArrayOrScalar { - def apply[T](implicit ev: NDArrayOrScalar[T]): NDArrayOrScalar[T] = ev - implicit object FloatWitness extends NDArrayOrScalar[Float](true) - implicit object IntWitness extends NDArrayOrScalar[Int](true) - implicit object NDArrayWitness extends NDArrayOrScalar[NDArray](false) -} diff --git a/scala-package/core/src/main/scala/org/apache/mxnet/Callback.scala b/scala-package/core/src/main/scala/org/apache/mxnet/Callback.scala deleted file mode 100644 index e98f2c7ce9ee..000000000000 --- a/scala-package/core/src/main/scala/org/apache/mxnet/Callback.scala +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet - -import org.slf4j.{Logger, LoggerFactory} - -/** - * Callback functions that can be used to track various status during epoch. - */ -object Callback { - - class Speedometer(batchSize: Int, frequent: Int = 50) extends BatchEndCallback { - private val logger: Logger = LoggerFactory.getLogger(classOf[Speedometer]) - private var init = false - private var tic: Long = 0L - private var lastCount: Int = 0 - - override def invoke(epoch: Int, count: Int, evalMetric: EvalMetric): Unit = { - if (lastCount > count) { - init = false - } - lastCount = count - - if (init) { - if (count % frequent == 0) { - val speed = frequent.toDouble * batchSize / (System.currentTimeMillis - tic) * 1000 - if (evalMetric != null) { - val (name, value) = evalMetric.get - name.zip(value).foreach { case (n, v) => - logger.info("Epoch[%d] Batch [%d]\tSpeed: %.2f samples/sec\tTrain-%s=%f".format( - epoch, count, speed, n, v)) - } - } else { - logger.info("Iter[%d] Batch [%d]\tSpeed: %.2f samples/sec".format(epoch, count, speed)) - } - tic = System.currentTimeMillis - } - } else { - init = true - tic = System.currentTimeMillis - } - } - } -} diff --git a/scala-package/core/src/main/scala/org/apache/mxnet/Context.scala b/scala-package/core/src/main/scala/org/apache/mxnet/Context.scala deleted file mode 100644 index b04cd31ff67f..000000000000 --- a/scala-package/core/src/main/scala/org/apache/mxnet/Context.scala +++ /dev/null @@ -1,86 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet - -import scala.language.implicitConversions - -object Context { - val devtype2str = Map(1 -> "cpu", 2 -> "gpu", 3 -> "cpu_pinned") - val devstr2type = Map("cpu" -> 1, "gpu" -> 2, "cpu_pinned" -> 3) - private var _defaultCtx = new Context("cpu", 0) - - def defaultCtx: Context = _defaultCtx - - def cpu(deviceId: Int = 0): Context = { - new Context("cpu", deviceId) - } - - def gpu(deviceId: Int = 0): Context = { - new Context("gpu", deviceId) - } - - implicit def ctx2Array(ctx: Context): Array[Context] = Array(ctx) -} - -/** - * Constructing a context which is used to specify the device and device type that will - * be utilized by the engine. - * - * @param deviceTypeName {'cpu', 'gpu'} String representing the device type - * @param deviceId (default=0) The device id of the device, needed for GPU - */ -class Context(deviceTypeName: String, val deviceId: Int = 0) extends Serializable { - val deviceTypeid: Int = Context.devstr2type(deviceTypeName) - - def this(context: Context) = { - this(context.deviceType, context.deviceId) - } - - def withScope[T](body: => T): T = { - val oldDefaultCtx = Context.defaultCtx - Context._defaultCtx = this - try { - body - } finally { - Context._defaultCtx = oldDefaultCtx - } - } - - /** - * Return device type of current context. - * @return device_type - */ - def deviceType: String = Context.devtype2str(deviceTypeid) - - override def toString: String = { - s"$deviceType($deviceId)" - } - - override def equals(other: Any): Boolean = { - if (other != null && other.isInstanceOf[Context]) { - val otherInst = other.asInstanceOf[Context] - otherInst.deviceId == deviceId && otherInst.deviceTypeid == deviceTypeid - } else { - false - } - } - - override def hashCode: Int = { - toString.hashCode - } -} diff --git a/scala-package/core/src/main/scala/org/apache/mxnet/DType.scala b/scala-package/core/src/main/scala/org/apache/mxnet/DType.scala deleted file mode 100644 index 1d5cc2847ac0..000000000000 --- a/scala-package/core/src/main/scala/org/apache/mxnet/DType.scala +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet - -object DType extends Enumeration { - type DType = Value - val Float32 = Value(0, "float32") - val Float64 = Value(1, "float64") - val Float16 = Value(2, "float16") - val UInt8 = Value(3, "uint8") - val Int32 = Value(4, "int32") - val Int8 = Value(5, "int8") - val Int64 = Value(6, "int64") - val Unknown = Value(-1, "unknown") - private[mxnet] def numOfBytes(dtype: DType): Int = { - dtype match { - case DType.UInt8 | DType.Int8 => 1 - case DType.Int32 => 4 - case DType.Float16 => 2 - case DType.Float32 => 4 - case DType.Float64 | DType.Int64 => 8 - case DType.Unknown => 0 - } - } -} diff --git a/scala-package/core/src/main/scala/org/apache/mxnet/EvalMetric.scala b/scala-package/core/src/main/scala/org/apache/mxnet/EvalMetric.scala deleted file mode 100644 index aedf7c8327c1..000000000000 --- a/scala-package/core/src/main/scala/org/apache/mxnet/EvalMetric.scala +++ /dev/null @@ -1,349 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet - -import scala.collection.mutable.ArrayBuffer - -/** - * Base class of all evaluation metrics - * @param name Metric name - */ -abstract class EvalMetric(protected val name: String) { - - protected var numInst: Int = 0 - protected var sumMetric: Double = 0.0d - - /** - * Update the internal evaluation. - * - * @param labels The labels of the data - * @param preds Predicted values. - */ - def update(labels: IndexedSeq[NDArray], preds: IndexedSeq[NDArray]): Unit - - /** - * Clear the internal statistics to initial state. - */ - def reset(): Unit = { - this.numInst = 0 - this.sumMetric = 0.0d - } - - /** - * Get the current evaluation result. - * @return name, Name of the metric - * value, Value of the evaluation - */ - def get: (Array[String], Array[Float]) = { - (Array(this.name), Array((this.sumMetric / this.numInst).toFloat)) - } -} - -/** - * Manage multiple evaluation metrics. - */ -class CompositeEvalMetric extends EvalMetric("composite") { - private val metrics = ArrayBuffer[EvalMetric]() - - // Add a child metric. - def add(metric: EvalMetric): Unit = { - this.metrics += metric - } - - // Get a child metric. - def getMetric(index: Int): EvalMetric = { - require(index < this.metrics.length, - s"Metric index $index is out of range 0 and ${this.metrics.length}") - this.metrics(index) - } - - override def update(labels: IndexedSeq[NDArray], preds: IndexedSeq[NDArray]): Unit = { - for (metric <- this.metrics) { - metric.update(labels, preds) - } - } - - override def reset(): Unit = { - for (metric <- this.metrics) { - metric.reset() - } - } - - override def get(): (Array[String], Array[Float]) = { - val names = ArrayBuffer[String]() - val results = ArrayBuffer[Float]() - for (metric <- this.metrics) { - val (name, result) = metric.get - names += name(0) - results += result(0) - } - (names.toArray, results.toArray) - } -} - -// Classification metrics - -/** - * Calculate accuracy - */ -class Accuracy extends EvalMetric("accuracy") { - override def update(labels: IndexedSeq[NDArray], preds: IndexedSeq[NDArray]): Unit = { - require(labels.length == preds.length, - "labels and predictions should have the same length.") - - for ((pred, label) <- preds zip labels) { - val predLabel = if (pred.shape == label.shape) { - NDArray.argmax(Map("axis" -> 1, "keepdims" -> true))(pred) - } else { - NDArray.argmax_channel(pred) - } - require(label.shape == predLabel.shape, - s"label ${label.shape} and prediction ${predLabel.shape}" + - s"should have the same length.") - - this.sumMetric += label.toArray.zip(predLabel.toArray) - .filter{ case (labelElem: Float, predElem: Float) => labelElem == predElem } - .size - this.numInst += predLabel.shape(0) - predLabel.dispose() - } - } -} - -/** - * Calculate top k predictions accuracy - */ -class TopKAccuracy(topK: Int) extends EvalMetric("top_k_accuracy") { - require(topK > 1, "Please use Accuracy if topK is no more than 1") - - override def update(labels: IndexedSeq[NDArray], preds: IndexedSeq[NDArray]): Unit = { - require(labels.length == preds.length, - s"labels and predictions should have the same length " + - s"(got ${labels.length} and ${preds.length}).") - - for ((pred, label) <- preds zip labels) { - val predShape = pred.shape - val dims = predShape.length - require(dims <= 2, s"Predictions should be no more than 2 dims (got $predShape).") - val labelArray = label.toArray - val numSamples = predShape(0) - if (dims == 1) { - val predArray = pred.toArray.zipWithIndex.sortBy(_._1).reverse.map(_._2) - require(predArray.length == labelArray.length, - s"Each label and prediction array should have the same length " + - s"(got ${labelArray.length} and ${predArray.length}).") - this.sumMetric += - labelArray.zip(predArray).map { case (l, p) => if (l == p) 1 else 0 }.sum - } else if (dims == 2) { - val numclasses = predShape(1) - val predArray = pred.toArray.grouped(numclasses).map { a => - a.zipWithIndex.sortBy(_._1).reverse.map(_._2) - }.toArray - require(predArray.length == labelArray.length, - s"Each label and prediction array should have the same length " + - s"(got ${labelArray.length} and ${predArray.length}).") - val topK = Math.max(this.topK, numclasses) - for (j <- 0 until topK) { - this.sumMetric += - labelArray.zip(predArray.map(_(j))).map { case (l, p) => if (l == p) 1 else 0 }.sum - } - } - this.numInst += numSamples - } - } -} - -/** - * Calculate the F1 score of a binary classification problem. - */ -class F1 extends EvalMetric("f1") { - override def update(labels: IndexedSeq[NDArray], preds: IndexedSeq[NDArray]): Unit = { - require(labels.length == preds.length, - s"labels and predictions should have the same length " + - s"(got ${labels.length} and ${preds.length}).") - - for ((pred, label) <- preds zip labels) { - val predLabel = NDArray.argmax_channel(pred) - require(label.shape == predLabel.shape, - s"label ${label.shape} and prediction ${predLabel.shape}" + - s"should have the same length.") - val labelArray = label.toArray - var unique = Array[Float]() - labelArray.foreach(l => if (!unique.contains(l)) unique = unique :+ l) - require(unique.length <= 2, "F1 currently only supports binary classification.") - - var truePositives, falsePositives, falseNegatives = 0f - for ((labelElem, predElem) <- labelArray zip predLabel.toArray) { - if (predElem == 1 && labelElem == 1) truePositives += 1 - else if (predElem == 1 && labelElem == 0) falsePositives += 1 - else if (predElem == 0 && labelElem == 1) falseNegatives += 1 - } - - val precision = { - if (truePositives + falsePositives > 0) truePositives / (truePositives + falsePositives) - else 0f - } - - val recall = { - if (truePositives + falseNegatives > 0) truePositives / (truePositives + falseNegatives) - else 0f - } - - val f1Score = { - if (precision + recall > 0) (2 * precision * recall) / (precision + recall) - else 0f - } - - this.sumMetric += f1Score - this.numInst += 1 - } - } -} - -/** - * Calculate perplexity. - * - * @param ignoreLabel - * Index of invalid label to ignore when - * counting. Usually should be -1. Include - * all entries if None. - * @param axis - * The axis from prediction that was used to - * compute softmax. Default is -1 which means use the last axis. - */ -class Perplexity(ignoreLabel: Option[Int] = None, axis: Int = -1) extends EvalMetric("Perplexity") { - override def update(labels: IndexedSeq[NDArray], preds: IndexedSeq[NDArray]): Unit = { - require(labels.length == preds.length, - s"labels and predictions should have the same length " + - s"(got ${labels.length} and ${preds.length}).") - var loss = 0d - var num = 0 - val probs = ArrayBuffer[NDArray]() - - for ((label, pred) <- labels.zip(preds)) { - require(label.size == pred.size / pred.shape.toArray.reverse.head, - s"shape mismatch: ${label.shape} vs. ${pred.shape}") - val l = label.asInContext(pred.context).asType(DType.Int32).reshape(Shape(label.size)) - val p = NDArray.pick(Map("axis" -> this.axis))(pred, label) - probs += p.head - } - - for ((label, prob) <- labels.zip(probs)) { - val probArr = prob.toArray - if (this.ignoreLabel != None) { - val ignore = label.toArray.map(l => if (l == this.ignoreLabel.get) 1 else 0) - val p = prob.toArray.zip(ignore).map { case (p, i) => p * (1 - i) + i } - prob.set(p) - num += p.length - ignore.sum - } else { - num += prob.size - } - loss += prob.toArray.map(p => -Math.log(Math.max(1e-10f, p))).sum - } - - this.sumMetric += Math.exp(loss / num).toFloat - this.numInst += 1 - } -} - -// Regression metrics - -/** - * Calculate Mean Absolute Error loss - */ -class MAE extends EvalMetric("mae") { - override def update(labels: IndexedSeq[NDArray], preds: IndexedSeq[NDArray]): Unit = { - require(labels.size == preds.size, - s"labels and predictions should have the same length " + - s"(got ${labels.length} and ${preds.length}).") - - for ((label, pred) <- labels zip preds) { - val labelArr = label.toArray - val predArr = pred.toArray - require(labelArr.length == predArr.length, - s"Each label and prediction array should have the same length " + - s"(got ${labelArr.length} and ${predArr.length}).") - this.sumMetric += - (labelArr zip predArr).map { case (l, p) => Math.abs(l - p) }.sum / labelArr.length - this.numInst += 1 - } - } -} - -// Calculate Mean Squared Error loss -class MSE extends EvalMetric("mse") { - override def update(labels: IndexedSeq[NDArray], preds: IndexedSeq[NDArray]): Unit = { - require(labels.size == preds.size, - s"labels and predictions should have the same length " + - s"(got ${labels.length} and ${preds.length}).") - - for ((label, pred) <- labels zip preds) { - val labelArr = label.toArray - val predArr = pred.toArray - require(labelArr.length == predArr.length, - s"Each label and prediction array should have the same length " + - s"(got ${labelArr.length} and ${predArr.length}).") - this.sumMetric += - (labelArr zip predArr).map { case (l, p) => (l - p) * (l - p) }.sum / labelArr.length - this.numInst += 1 - } - } -} - -/** - * Calculate Root Mean Squred Error loss - */ -class RMSE extends EvalMetric("rmse") { - override def update(labels: IndexedSeq[NDArray], preds: IndexedSeq[NDArray]): Unit = { - require(labels.size == preds.size, - s"labels and predictions should have the same length " + - s"(got ${labels.length} and ${preds.length}).") - - for ((label, pred) <- labels zip preds) { - val labelArr = label.toArray - val predArr = pred.toArray - require(labelArr.length == predArr.length, - s"Each label and prediction array should have the same length " + - s"(got ${labelArr.length} and ${predArr.length}).") - val metric: Double = Math.sqrt( - (labelArr zip predArr).map { case (l, p) => (l - p) * (l - p) }.sum / labelArr.length) - this.sumMetric += metric.toFloat - } - this.numInst += 1 - } -} - - -/** - * Custom evaluation metric that takes a NDArray function. - * @param fEval Customized evaluation function. - * @param name The name of the metric - */ -class CustomMetric(fEval: (NDArray, NDArray) => Float, - name: String) extends EvalMetric(name) { - override def update(labels: IndexedSeq[NDArray], preds: IndexedSeq[NDArray]): Unit = { - require(labels.size == preds.size, - s"labels and predictions should have the same length " + - s"(got ${labels.length} and ${preds.length}).") - - for ((label, pred) <- labels zip preds) { - this.sumMetric += fEval(label, pred) - this.numInst += 1 - } - } -} diff --git a/scala-package/core/src/main/scala/org/apache/mxnet/Executor.scala b/scala-package/core/src/main/scala/org/apache/mxnet/Executor.scala deleted file mode 100644 index 6365f9cb4645..000000000000 --- a/scala-package/core/src/main/scala/org/apache/mxnet/Executor.scala +++ /dev/null @@ -1,300 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet - -import org.apache.mxnet.Base._ -import org.slf4j.{Logger, LoggerFactory} - -import scala.collection.mutable.ArrayBuffer - -object Executor { - // Get the dictionary given name and ndarray pairs. - private[mxnet] def getDict(names: Seq[String], - ndarrays: Seq[NDArray]): Map[String, NDArray] = { - require(names.toSet.size == names.length, s"Duplicate names detected in ($names)") - (names zip ndarrays).toMap - } -} - -/** - * Symbolic Executor component of MXNet
- * - * WARNING: it is your responsibility to clear this object through dispose(). - * - * - * @author Yizhi Liu - * - * Constructor: please use Symbol.bind and Symbol.simpleBind instead. - * @param handle ExecutorHandle generated by calling Bind - * @param symbol - * @see Symbol.bind : to create executor - */ -class Executor private[mxnet](private[mxnet] val handle: ExecutorHandle, - private[mxnet] val symbol: Symbol, - private[mxnet] var argArrays: Array[NDArray] = null, - private[mxnet] var gradArrays: Array[NDArray] = null, - private[mxnet] var auxArrays: Array[NDArray] = null, - private var _ctx: Context = null, - private var _gradsReq: Iterable[_] = null, - private var _group2ctx: Map[String, Context] = null - ) extends NativeResource { - - val outputs: Array[NDArray] = getOutputs - protected var _argDict: Map[String, NDArray] = null - protected var _gradDict: Map[String, NDArray] = null - protected var _auxDict: Map[String, NDArray] = null - protected var monitorCallback: MXMonitorCallback = null - private val logger: Logger = LoggerFactory.getLogger(classOf[Executor]) - - private var reshaped = false - - override def nativeAddress: CPtrAddress = handle - override def nativeDeAllocator: (CPtrAddress => Int) = _LIB.mxExecutorFree - // cannot determine the off-heap size of this object - override val bytesAllocated: Long = 0 - override val ref: NativeResourceRef = super.register() - - override def dispose(): Unit = { - if (!super.isDisposed) { - super.dispose() - outputs.foreach(o => o.dispose()) - if (reshaped && argArrays != null) {argArrays.foreach(a => a.dispose())} - if (reshaped && gradArrays != null) {gradArrays.foreach( - // Symbol will sometimes fill this with nulls so we've got to check the elements too - a => if (a != null) {a.dispose()}) - } - if (reshaped && auxArrays != null) {auxArrays.foreach(a => a.dispose())} - } - } - - /** - * Return a new executor with the same symbol and shared memory, - * but different input/output shapes. - * For runtime reshaping, variable length sequences, etc. - * The returned executor shares state with the current one, - * and cannot be used in parallel with it. - * @param partialShaping Whether to allow changing the shape of unspecified arguments. - * @param allowUpSizing Whether to allow allocating new ndarrays that's larger than the original. - * @param kwargs Map of string to Shape. - * - new shape for arguments. - * @return - * executor A new executor that shares memory with this. - */ - def reshape(partialShaping: Boolean = false, allowUpSizing: Boolean = false, - kwargs: Map[String, Shape]): Executor = { - - val providedArgShapeNames = kwargs.keys - val providedArgShapeData = kwargs.values.flatMap(_.toVector) - val providedArgShapeIdx = kwargs.values.scanLeft(0)((sum, shape) => sum + shape.size) - - val ctxMapKeys = if (_group2ctx != null) _group2ctx.keys.toArray else Array.empty[String] - val ctxMapDevTypes = if (_group2ctx != null) { - _group2ctx.values.map(_.deviceTypeid).toArray - } else { - Array.empty[Int] - } - val ctxMapDevIds = if (_group2ctx != null) { - _group2ctx.values.map(_.deviceId).toArray - } else { - Array.empty[Int] - } - - val inArgs = ArrayBuffer.empty[NDArrayHandle] - val argGrads = ArrayBuffer.empty[NDArrayHandle] - val auxStates = ArrayBuffer.empty[NDArrayHandle] - val outHandle = new ExecutorHandleRef() - - checkCall(_LIB.mxExecutorReshape( - if (partialShaping) 1 else 0, - if (allowUpSizing) 1 else 0, - _ctx.deviceTypeid, - _ctx.deviceId, - ctxMapKeys.toArray, - ctxMapDevTypes.toArray, - ctxMapDevIds.toArray, - providedArgShapeNames.toArray, - providedArgShapeData.toArray, - providedArgShapeIdx.toArray, - inArgs, - argGrads, - auxStates, - this.handle, - outHandle)) - - val argArrays = inArgs.map(new NDArray(_)).toArray - val gradArrays = argGrads.map(handle => - if (handle == 0) null else new NDArray(handle)).toArray - val auxArrays = auxStates.map(new NDArray(_)).toArray - - val executor = new Executor(outHandle.value, this.symbol) - executor._ctx = this._ctx - executor._gradsReq = this._gradsReq - executor._group2ctx = this._group2ctx - executor.argArrays = argArrays - executor.gradArrays = gradArrays - executor.auxArrays = auxArrays - executor.reshaped = true - executor - } - - /** - * list all the output ndarray - * @return A list of ndarray binded to the heads of executor. - */ - private def getOutputs: Array[NDArray] = { - val ndHandles = ArrayBuffer[NDArrayHandle]() - checkCall(_LIB.mxExecutorOutputs(handle, ndHandles)) - ndHandles.toArray.map(ele => { - val nd = new NDArray(ele, addToCollector = false) - if (nd.isSparse) { - nd.asInstanceOf[SparseNDArray] - } - nd - } - ) - } - - /** - * Calculate the outputs specified by the binded symbol. - * @param isTrain whether this forward is for evaluation purpose. - * @param kwargs Additional specification of input arguments. - */ - def forward(isTrain: Boolean, kwargs: (String, NDArray)*): Unit = { - kwargs.foreach { case (name, array) => - require(argDict.contains(name), s"Unknown argument $name") - array.copyTo(argDict(name)) - } - checkCall(_LIB.mxExecutorForward(handle, if (isTrain) 1 else 0)) - } - - def forward(): Unit = { - forward(isTrain = false) - } - - /** - * Do backward pass to get the gradient of arguments. - * @param outGrads Gradient on the outputs to be propagated back. - * This parameter is only needed when bind is called - * on outputs that are not a loss function. - */ - def backward(outGrads: Array[NDArray]): Unit = { - require(outGrads != null, "outGrads must not be null") - val ndArrayPtrs = outGrads.map(_.handle) - checkCall(_LIB.mxExecutorBackward(handle, ndArrayPtrs)) - } - - def backward(outGrad: NDArray): Unit = { - require(outGrad != null, "outGrads must not be null") - backward(Array(outGrad)) - } - - def backward(): Unit = { - backward(Array.empty[NDArray]) - } - - /** - * Install callback. - * @param callback Takes a string and an NDArrayHandle. - */ - def setMonitorCallback(callback: MXMonitorCallback): Unit = { - monitorCallback = callback - checkCall(_LIB.mxExecutorSetMonitorCallback(handle, monitorCallback)) - } - - /** - * Get dictionary representation of argument arrrays. - * @return The dictionary that maps name of arguments to NDArrays. - */ - def argDict: Map[String, NDArray] = { - if (_argDict == null) { - _argDict = Executor.getDict(symbol.listArguments(), argArrays) - } - _argDict - } - - /** - * Get dictionary representation of gradient arrays. - * @return The dictionary that maps name of arguments to gradient arrays. - */ - def gradDict: Map[String, NDArray] = { - if (_gradDict == null) { - _gradDict = Executor.getDict(symbol.listArguments(), gradArrays) - } - _gradDict - } - - /** - * Get dictionary representation of auxiliary states arrays. - * @return The dictionary that maps name of auxiliary states to NDArrays. - */ - def auxDict: Map[String, NDArray] = { - if (_auxDict == null) { - _auxDict = Executor.getDict(symbol.listAuxiliaryStates(), auxArrays) - } - _auxDict - } - - /** - * Copy parameters from arg_params, aux_params into executor's internal array. - * @param argParams : dict of name to NDArray of arguments - * @param auxParams : dict of name to NDArray of auxiliary states. - * @param allowExtraParams - * Whether allow extra parameters that are not needed by symbol - * If this is True, no error will be thrown when arg_params or aux_params - * contain extra parameters that is not needed by the executor. - */ - def copyParamsFrom(argParams: Map[String, NDArray], - auxParams: Map[String, NDArray], - allowExtraParams: Boolean = false): Unit = { - argParams.foreach { case (name, array) => - if (argDict.contains(name)) { - array.copyTo(argDict(name)) - } else { - require(allowExtraParams, s"Provided name $name is not in the arguments") - } - } - if (auxParams != null) { - auxParams.foreach { case (name, array) => - if (auxDict.contains(name)) { - array.copyTo(auxDict(name)) - } else { - require(allowExtraParams, s"Provided name $name is not in the auxiliary states") - } - } - } - } - - def copyParamsFrom(argParams: Map[String, NDArray], allowExtraParams: Boolean): Unit = { - copyParamsFrom(argParams, null, allowExtraParams) - } - - def copyParamsFrom(argParams: Map[String, NDArray]): Unit = { - copyParamsFrom(argParams, allowExtraParams = false) - } - - /** - * Get a debug string about internal execution plan. - * @return Debug string of the executor. - */ - def debugStr: String = { - val str = new RefString - checkCall(_LIB.mxExecutorPrint(handle, str)) - str.value - } - -} diff --git a/scala-package/core/src/main/scala/org/apache/mxnet/ExecutorManager.scala b/scala-package/core/src/main/scala/org/apache/mxnet/ExecutorManager.scala deleted file mode 100644 index d94b8fb01ed6..000000000000 --- a/scala-package/core/src/main/scala/org/apache/mxnet/ExecutorManager.scala +++ /dev/null @@ -1,557 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet - -import org.apache.mxnet.DType.DType -import org.slf4j.{LoggerFactory, Logger} - -import scala.collection.immutable.ListMap -import scala.collection.mutable -import scala.collection.mutable.ArrayBuffer - -/** - * Helper class to manage multiple executors for data parallelism. - * @author Yizhi Liu - * @param symbol output symbol - * @param ctx devices to run on - * @param paramNames Name of all trainable parameters of the network. - * @param argNames Name of all arguments of the network. - * @param auxNames Name of all auxiliary states of the network. - * @param trainData Training data iterator. - * @param workLoadList The list of work load for different devices, in the same order as ctx - * @param symGen symbol generator for bucketing - */ -private[mxnet] class DataParallelExecutorManager(private val symbol: Symbol, - private val ctx: Array[Context], - private[mxnet] val paramNames: IndexedSeq[String], - private[mxnet] val argNames: IndexedSeq[String], - private[mxnet] val auxNames: IndexedSeq[String], - trainData: DataIter, - private var workLoadList: Seq[Float] = null, - private val symGen: SymbolGenerator = null) { - // preparation - private val numDevice = ctx.length - DataParallelExecutorManager.logger.info(s"Start training with [${ctx.mkString(",")}]") - - // make sure the architecture is valid - ExecutorManager.checkArguments(symbol) - - if (workLoadList == null) { - workLoadList = Seq.fill(numDevice)(1f) - } - require(workLoadList.size == numDevice, "Invalid settings for work load. " + - s"Size (${workLoadList.size}) should match num devices ($numDevice)") - - private val slices = ExecutorManager.splitInputSlice(trainData.batchSize, workLoadList) - - private val paramNameSet = paramNames.toSet - - private val execGrp = new DataParallelExecutorGroup( - symbol, argNames, paramNameSet, ctx, slices, trainData) - private var currExecGrp: DataParallelExecutorGroup = null // this is set when data is loaded - - private val execGrpBucket: mutable.Map[AnyRef, DataParallelExecutorGroup] - = mutable.HashMap.empty[AnyRef, DataParallelExecutorGroup] - if (symGen != null) { - execGrpBucket.put(trainData.defaultBucketKey, execGrp) - } - - // shared parameter arrays - def paramArrays: IndexedSeq[Array[NDArray]] = { - // param arrays should be shared by all executor groups - execGrp.paramArrays - } - - // shared gradient arrays - def gradArrays: IndexedSeq[Array[NDArray]] = { - // grad arrays should be shared by all executor groups - execGrp.gradArrays - } - - // shared aux states - def auxArrays: IndexedSeq[Array[NDArray]] = { - // aux arrays are also shared by all executor groups - execGrp.auxArrays - } - - /** - * Release all the executor groups. - * The object shall never be used after it is disposed. - */ - def dispose(): Unit = { - execGrp.dispose() - execGrpBucket.values.foreach(_.dispose()) - } - - // Install monitor on all executors - def installMonitor(monitor: Monitor): Unit = { - require(symGen == null, "Monitoring is not implemented for bucketing") - execGrp.trainExecs.foreach(monitor.install) - } - - /** - * Set parameter and aux values - * @param argParams source parameter arrays - * @param auxParams source aux arrays - */ - def setParams(argParams: Map[String, NDArray], auxParams: Map[String, NDArray]): Unit = { - execGrp.trainExecs.foreach(_.copyParamsFrom(argParams, auxParams)) - } - - /** - * Copy data from each executor to `arg_params` and `aux_params` - * @param argParams target parameter arrays - * @param auxParams target aux arrays - * @note This function will inplace update the NDArrays in arg_params and aux_params. - */ - def copyTo(argParams: Map[String, NDArray], auxParams: Map[String, NDArray]): Unit = { - for ((name, block) <- paramNames zip paramArrays) { - val weight = block.map(_.copyTo(Context.cpu())).reduce(_ + _) / block.length - val typedWeight = weight.asType(argParams(name).dtype) - typedWeight.copyTo(argParams(name)) - typedWeight.dispose() - } - for ((name, block) <- auxNames zip auxArrays) { - val weight = block.map(_.copyTo(Context.cpu())).reduce(_ + _) / block.length - val typedWeight = weight.asType(auxParams(name).dtype) - typedWeight.copyTo(auxParams(name)) - typedWeight.dispose() - } - } - - // load data and labels into arrays - def loadDataBatch(dataBatch: DataBatch): Unit = { - currExecGrp = - if (symGen != null) { - val key = dataBatch.bucketKey - require(key != null, "bucketKey must not be null for bucketing io") - if (!execGrpBucket.contains(key)) { - // create new bucket entry - val sym = symGen.generate(key) - val grp = new DataParallelExecutorGroup(sym, argNames, paramNameSet, - ctx, slices, dataBatch, sharedGroup = execGrp) - execGrpBucket.put(key, grp) - } - execGrpBucket(key) - } else { - execGrp - } - currExecGrp.loadDataBatch(dataBatch) - } - - // run forward on the current executor - def forward(isTrain: Boolean = false): Unit = { - currExecGrp.forward(isTrain = isTrain) - } - - // run backward on the current executor - def backward(): Unit = { - currExecGrp.backward() - } - - // update metric with the current executor - def updateMetric(metric: EvalMetric, labels: IndexedSeq[NDArray]): Unit = { - currExecGrp.updateMetric(metric, labels) - } -} - -private object DataParallelExecutorManager { - val logger: Logger = LoggerFactory.getLogger(classOf[DataParallelExecutorManager]) -} - -private[mxnet] object ExecutorManager { - /** - * Get input slice from the input shape. - * @param batchSize The number of samples in a mini-batch. - * @param workLoadList The list of work load for different devices, in the same order as ctx - * @return The split slices to get a specific slice. - * @throws IllegalArgumentException - * If there are two many splits such that some slice can be empty. - */ - private[mxnet] def splitInputSlice(batchSize: Int, - workLoadList: Seq[Float]): Array[(Int, Int)] = { - val totalWorkLoad = workLoadList.sum - val batchNumList = workLoadList.map(workLoad => - math.round(workLoad * batchSize / totalWorkLoad)).toArray - val batchNumSum = batchNumList.sum - if (batchNumSum < batchSize) { - batchNumList(batchNumList.length-1) += batchSize - batchNumSum - } - - val slices = ArrayBuffer.empty[(Int, Int)] - var end = 0 - batchNumList.foreach(batchNum => { - val begin = math.min(end, batchSize) - end = math.min(begin + batchNum, batchSize) - require(begin < end, "Too many slices such that some splits are empty") - slices.append((begin, end)) - }) - slices.toArray - } - - /** - * Check the argument names of symbol. - * This function checks the duplication of arguments in Symbol. - * The check is done for feedforward net for now. - * @param symbol The network configuration - */ - private[mxnet] def checkArguments(symbol: Symbol): Unit = { - val argNames = symbol.listArguments() - require(argNames.toSet.size == argNames.length, - "Found duplicated argument name," + - "please make the weight name non-duplicated(using name arguments)," + - s"arguments are $argNames") - - val auxNames = symbol.listAuxiliaryStates() - require(auxNames.toSet.size == auxNames.length, - "Found duplicated auxiliary param name," + - "please make the weight name non-duplicated(using name arguments)," + - s"arguments are $auxNames") - } - - // Load a list of arrays into a list of arrays - private[mxnet] def loadGeneral(data: Seq[NDArray], targets: Seq[NDArray]): Unit = { - (data zip targets).foreach { case (dSrc, dTarget) => - require(dSrc.shape == dTarget.shape, - s"src shape ${dSrc.shape} mismatch dst shape ${dTarget.shape}") - dSrc.copyTo(dTarget) - } - } - - // Load a list of arrays into a list of arrays specified by slices - private[mxnet] def loadGeneralMulti(data: Seq[NDArray], - targets: Seq[Array[(Int, Int, NDArray)]]): Unit = { - for ((src, dTargets) <- data zip targets) { - for ((start, end, dst) <- dTargets) { - val sliced = src.slice(start, end) - require(sliced.shape == dst.shape, - s"src shape ${sliced.shape} mismatch dst shape ${dst.shape}") - sliced.copyTo(dst) - } - } - } - - // Load data into sliced arrays - private[mxnet] def loadDataMulti(batch: DataBatch, - targets: Seq[Array[(Int, Int, NDArray)]]): Unit = { - loadGeneralMulti(batch.data, targets) - } - - private[mxnet] def loadData(batch: DataBatch, targets: Seq[NDArray]): Unit = { - loadGeneral(batch.data, targets) - } - - // Load label into sliced arrays - private[mxnet] def loadLabelMulti(batch: DataBatch, - targets: Seq[Array[(Int, Int, NDArray)]]): Unit = { - loadGeneralMulti(batch.label, targets) - } - - private[mxnet] def loadLabel(batch: DataBatch, targets: Seq[NDArray]): Unit = { - loadGeneral(batch.label, targets) - } - - // bind executor for bucketing, potentially sharing data with an existing executor. - private[mxnet] def bindExec(sym: Symbol, ctx: Context, inputShapes: Map[String, Shape], - paramNames: Set[String], needGrad: Boolean = false, - grads: Set[String] = null, baseExec: Executor = null, - sharedDataArrays: mutable.Map[String, NDArray] = null, - inputTypes: ListMap[String, DType] = null) = { - val (argShape, _, auxShape) = sym.inferShape(inputShapes) - // TODO: more precise error message should be provided by backend - require(argShape != null, "Shape inference failed." + - s"Known shapes are $inputShapes for symbol arguments ${sym.listArguments()} " + - s"and aux states ${sym.listAuxiliaryStates()}") - - val inputTypesUpdate = - if (inputTypes == null) { - inputShapes.map { case (key, _) => (key, Base.MX_REAL_TYPE) } - } else { - inputTypes - } - val (argTypes, _, auxTypes) = sym.inferType(inputTypesUpdate) - require(argTypes != null, "Type inference failed." + - s"Known types as $inputTypes for symbol arguments ${sym.listArguments()} " + - s"and aux states ${sym.listAuxiliaryStates()}") - - val argArrays = ArrayBuffer.empty[NDArray] - val gradArrays: mutable.Map[String, NDArray] = - if (needGrad) mutable.HashMap.empty[String, NDArray] else null - - val argNames = sym.listArguments() - - val gradSet: Set[String] = - if (!needGrad) { - Set.empty[String] - } else if (grads == null) { - argNames.toSet -- inputShapes.keySet - } else { - grads - } - - val gradReq = argNames.map { name => - if (gradSet.contains(name)) name -> "write" - else name -> "null" - }(collection.breakOut): Map[String, String] - - // create or borrow arguments and gradients - argNames.zipWithIndex.foreach { case (name, i) => - if (!paramNames.contains(name)) { - // data or label - val argArr = - if (sharedDataArrays != null && sharedDataArrays.contains(name)) { - val arr = sharedDataArrays(name) - if (arr.shape.product >= argShape(i).product) { - // good, we can share this memory - require(argTypes(i) == arr.dtype, - s"Type ${arr.dtype} of argument $name does not match inferred type ${argTypes(i)}") - arr.reshape(argShape(i)) - } else { - DataParallelExecutorManager.logger.warn( - s"bucketing: data $name has a shape ${argShape(i)}," + - s"which is larger than already allocated shape ${arr.shape}." + - "Need to re-allocate.Consider putting default_bucket_key" + - "to be the bucket taking the largest input for better memory sharing.") - val zeros = NDArray.zeros(argShape(i), ctx, dtype = argTypes(i)) - // replace existing shared array because the new one is bigger - sharedDataArrays.put(name, zeros) - // TODO: shall we dispose the replaced array here? - // arr.dispose() - zeros - } - } else { - val zeros = NDArray.zeros(argShape(i), ctx, dtype = argTypes(i)) - if (sharedDataArrays != null) { - sharedDataArrays.put(name, zeros) - } - zeros - } - argArrays.append(argArr) - } else { - // model parameter - val argArr = - if (baseExec == null) { - if (gradSet.contains(name)) { - val gradArr = NDArray.zeros(argShape(i), ctx, dtype = argTypes(i)) - gradArrays.put(name, gradArr) - } - NDArray.zeros(argShape(i), ctx, dtype = argTypes(i)) - } else { - val arr = baseExec.argDict(name) - require(arr.shape == argShape(i), - s"Shape ${arr.shape} of argument $name does not match inferred shape ${argShape(i)}") - require(arr.dtype == argTypes(i), - s"Type ${arr.dtype} of argument $name does not match inferred type ${argTypes(i)}") - if (gradSet.contains(name)) { - gradArrays.put(name, baseExec.gradDict(name)) - } - arr - } - argArrays.append(argArr) - } - } - // create or borrow aux variables - val auxNames = sym.listAuxiliaryStates() - val auxArrays = - if (baseExec == null) { - (auxShape zip auxTypes) map { case (s, t) => - NDArray.zeros(s, ctx, dtype = t) - } - } else { - baseExec.auxArrays.zipWithIndex.map { case (a, i) => - require(auxShape(i) == a.shape, - s"Shape ${a.shape} of aux variable ${auxNames(i)} does not match " + - s"inferred shape ${auxShape(i)}") - require(auxTypes(i) == a.dtype, - s"Type ${a.dtype} of aux variable ${auxNames(i)} does not match " + - s"inferred type ${auxTypes(i)}") - a - }.toSeq - } - sym.bind(ctx = ctx, args = argArrays.toSeq, argsGrad = gradArrays.toMap, gradsReq = gradReq, - auxStates = auxArrays, group2ctx = null, sharedExec = baseExec) - } -} - -/** - * A group of executors living on different devices, for data parallel. - * @param sym The network configuration. - * @param argNames Equals `sym.list_arguments()` - * @param paramNames Names of all trainable parameters. - * @param ctx List of devices for training (data parallel) - * @param slices Describes how the data parallel splits data into different devices. - * @param providedDataDesc training data descriptions - * @param providedLabelDesc training label descriptions - * @param sharedGroup: DataParallelExecutorGroup - * An existing executor group, if to share parameters with it. - * - */ -private class DataParallelExecutorGroup private(sym: Symbol, - argNames: IndexedSeq[String], paramNames: Set[String], - ctx: Array[Context], private val slices: Array[(Int, Int)], - providedDataDesc: IndexedSeq[DataDesc], - providedLabelDesc: IndexedSeq[DataDesc], - sharedGroup: DataParallelExecutorGroup) { - // make sure the architecture is valid - ExecutorManager.checkArguments(sym) - - private[mxnet] val sharedDataArrays: Array[mutable.Map[String, NDArray]] = - if (sharedGroup == null) { - ctx.map(_ => mutable.HashMap.empty[String, NDArray]) - } else { - sharedGroup.sharedDataArrays - } - - private[mxnet] val dataNames = providedDataDesc.map(_.name).toList - private[mxnet] val labelNames = providedLabelDesc.map(_.name).toList - private[mxnet] val auxNames = sym.listAuxiliaryStates() - private[mxnet] val paramIdx = argNames.zipWithIndex - .filter { case (name, i) => paramNames.contains(name) } - .map { case (name, i) => i } - private[mxnet] val paramNamesComb = paramIdx.map(i => argNames(i)).toSet - - private[mxnet] val trainExecs: Array[Executor] = - ctx.zipWithIndex.map { case (ctxi, i) => - val dataShapes = - (providedDataDesc ++ providedLabelDesc).map( desc => { - desc.name -> - (Shape(slices(i)._2 - slices(i)._1) ++ desc.shape.slice(1, desc.shape.length)) - }).toMap - val sharedExec: Executor = if (sharedGroup == null) null else sharedGroup.trainExecs(i) - ExecutorManager.bindExec(sym, ctxi, dataShapes, paramNamesComb, - needGrad = true, baseExec = sharedExec, - sharedDataArrays = sharedDataArrays(i)) - } - - // data structure - private[mxnet] val dataArrays = - dataNames.map(name => - trainExecs.zipWithIndex.map { case (e, i) => - (slices(i)._1, slices(i)._2, e.argDict(name)) - } - ).toIndexedSeq - private[mxnet] val labelArrays = - labelNames.map(name => - trainExecs.zipWithIndex.map { case (e, i) => - (slices(i)._1, slices(i)._2, e.argDict(name)) - } - ).toIndexedSeq - private[mxnet] val paramArrays = paramIdx.map(i => - trainExecs.map(e => e.argArrays(i)) - ).toIndexedSeq - private[mxnet] val gradArrays = paramIdx.map(i => - trainExecs.map(e => e.gradArrays(i)) - ).toIndexedSeq - private[mxnet] val auxArrays = (0 until auxNames.length).map(i => - trainExecs.map(e => e.auxArrays(i)) - ) - - /** - * A group of executors living on different devices, for data parallel - * @param sym The network configuration. - * @param argNames Equals `sym.list_arguments()` - * @param paramNames Names of all trainable parameters. - * @param ctx List of devices for training (data parallel) - * @param slices Describes how the data parallel splits data into different devices. - * @param trainData The dataset for training. - * Loading of actual data is not necessarily needed at this stage. - * @param sharedGroup: DataParallelExecutorGroup - * An existing executor group, if to share parameters with it. - * - */ - def this(sym: Symbol, - argNames: IndexedSeq[String], paramNames: Set[String], - ctx: Array[Context], slices: Array[(Int, Int)], - trainData: DataIter, - sharedGroup: DataParallelExecutorGroup) { - this(sym, argNames, paramNames, ctx, slices, - trainData.provideDataDesc, trainData.provideLabelDesc, sharedGroup) - } - - def this(sym: Symbol, - argNames: IndexedSeq[String], paramNames: Set[String], - ctx: Array[Context], slices: Array[(Int, Int)], - trainData: DataIter) { - this(sym, argNames, paramNames, ctx, slices, - trainData.provideDataDesc, trainData.provideLabelDesc, null) - } - - /** - * A group of executors living on different devices, for data parallel - * @param sym The network configuration. - * @param argNames Equals `sym.list_arguments()` - * @param paramNames Names of all trainable parameters. - * @param ctx List of devices for training (data parallel) - * @param slices Describes how the data parallel splits data into different devices. - * @param trainData The dataset for training. - * Loading of actual data is not necessarily needed at this stage. - * @param sharedGroup: DataParallelExecutorGroup - * An existing executor group, if to share parameters with it. - * - */ - def this(sym: Symbol, - argNames: IndexedSeq[String], paramNames: Set[String], - ctx: Array[Context], slices: Array[(Int, Int)], - trainData: DataBatch, - sharedGroup: DataParallelExecutorGroup) { - this(sym, argNames, paramNames, ctx, slices, - trainData.provideDataDesc, trainData.provideLabelDesc, sharedGroup) - } - - def this(sym: Symbol, - argNames: IndexedSeq[String], paramNames: Set[String], - ctx: Array[Context], slices: Array[(Int, Int)], - trainData: DataBatch) { - this(sym, argNames, paramNames, ctx, slices, - trainData.provideDataDesc, trainData.provideLabelDesc, null) - } - - // load data and labels into arrays - def loadDataBatch(dataBatch: DataBatch): Unit = { - ExecutorManager.loadDataMulti(dataBatch, dataArrays) - ExecutorManager.loadLabelMulti(dataBatch, labelArrays) - } - - // Perform a forward pass on each executor - def forward(isTrain: Boolean = false): Unit = { - trainExecs.foreach(_.forward(isTrain = isTrain)) - } - - // Perform a backward pass on each executor - def backward(): Unit = { - trainExecs.foreach(_.backward()) - } - - // Update evaluation metric with label and current outputs - def updateMetric(metric: EvalMetric, labels: IndexedSeq[NDArray]): Unit = { - (trainExecs zip slices).foreach { case (texec, islice) => - val labelsSlice = labels.map(_.slice(islice)) - metric.update(labelsSlice, texec.outputs) - } - } - - /** - * Release the related executors. - * The object shall never be used after it is disposed. - */ - def dispose(): Unit = { - trainExecs.foreach(_.dispose()) - } -} - - diff --git a/scala-package/core/src/main/scala/org/apache/mxnet/FeedForward.scala b/scala-package/core/src/main/scala/org/apache/mxnet/FeedForward.scala deleted file mode 100644 index b8e2ba0b39c8..000000000000 --- a/scala-package/core/src/main/scala/org/apache/mxnet/FeedForward.scala +++ /dev/null @@ -1,744 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet - -import org.apache.mxnet.Base.CPtrAddress -import org.apache.mxnet.io.NDArrayIter -import org.apache.mxnet.optimizer.SGD -import org.slf4j.{Logger, LoggerFactory} - -import scala.collection.mutable.ListBuffer - -/** - * Model class of MXNet for training and predicting feedforward nets. - * This class is designed for a single-data single output supervised network. - * @param symbol The symbol configuration of computation network. - * @param symGen Symbol generator for bucketing. - * @param ctx The device context of training and prediction. - * To use multi GPU training, pass in a list of gpu contexts. - * @param numEpoch Training parameter, number of training epochs(epochs). - * @param epochSize Number of batches in a epoch. In default, it is set to - * ceil(num_train_examples / batch_size) - * @param optimizer Training parameter, name or optimizer object for training. - * @param initializer Training parameter, the initialization scheme used. - * @param batchSize The batch size of training data. - * @param argParams Model parameter, dict of name to NDArray of net's weights. - * @param auxParams Model parameter, dict of name to NDArray of net's auxiliary states. - * @param allowExtraParams Whether allow extra parameters that are not needed by symbol - * to be passed by aux_params and arg_params. - * If this is True, no error will be thrown when aux_params and arg_params - * contain extra parameters than needed. - * @param beginEpoch The beginning training epoch. - */ -class FeedForward private( - private var symbol: Symbol, - symGen: SymbolGenerator, - ctx: Array[Context], - numEpoch: Int, val epochSize: Int, - optimizer: Optimizer, - initializer: Initializer, - batchSize: Int, - argParams: Map[String, NDArray], - auxParams: Map[String, NDArray], - private val allowExtraParams: Boolean, - val beginEpoch: Int) extends NativeResource { - - val logger: Logger = LoggerFactory.getLogger(classOf[FeedForward]) - private var argumentChecked = false - private var _argParams = argParams - private var _auxParams = auxParams - if (symGen == null) { - checkArguments() - } - - def getArgParams: Map[String, NDArray] = _argParams - def getAuxParams: Map[String, NDArray] = _auxParams - - // internal helper state - var predExec: Executor = null - - private var monitor: Option[Monitor] = None - - // scalastyle:off parameterNum - def this(symbol: Symbol, ctx: Array[Context] = Array(Context.cpu()), - numEpoch: Int = -1, epochSize: Int = -1, - optimizer: Optimizer = new SGD(), - initializer: Initializer = new Uniform(0.01f), - batchSize: Int = 128, - argParams: Map[String, NDArray] = null, - auxParams: Map[String, NDArray] = null, - allowExtraParams: Boolean = false, - beginEpoch: Int = 0) { - this(symbol, null, ctx, numEpoch, epochSize, optimizer, initializer, batchSize, - argParams, auxParams, allowExtraParams, beginEpoch) - } - - def this(symbol: SymbolGenerator, ctx: Array[Context], numEpoch: Int, epochSize: Int, - optimizer: Optimizer, initializer: Initializer, batchSize: Int, - argParams: Map[String, NDArray], auxParams: Map[String, NDArray], - allowExtraParams: Boolean, beginEpoch: Int) { - this(null, symbol, ctx, numEpoch, epochSize, optimizer, initializer, batchSize, - argParams, auxParams, allowExtraParams, beginEpoch) - } - // scalastyle:on parameterNum - - // verify the argument of the default symbol and user provided parameters - def checkArguments(): Unit = { - if (!argumentChecked) { - require(symbol != null, "Symbol must not be null") - // check if symbol contain duplicated names. - ExecutorManager.checkArguments(symbol) - // rematch parameters to delete useless ones - if (allowExtraParams) { - if (_argParams != null) { - val argNames = symbol.listArguments().toSet - _argParams = _argParams.filter { case (k, v) => argNames.contains(k) } - } - if (auxParams != null) { - val auxNames = symbol.listAuxiliaryStates().toSet - _auxParams = _auxParams.filter { case (k, v) => auxNames.contains(k) } - } - } - argumentChecked = true - } - } - - def setMonitor(m: Monitor): Unit = { - monitor = Option(m) - } - - def unsetMonitor(): Unit = { - setMonitor(null) - } - - // Initialize weight parameters and auxiliary states - // The NDArrays associated with the _argParms and _auxParams are not disposed instead - // they are passed a outer scope if available. - private def initParams(inputShapes: IndexedSeq[DataDesc], overwrite: Boolean = false) - : (IndexedSeq[String], IndexedSeq[String], IndexedSeq[String]) = { - val (argShapes, _, auxShapes) = symbol.inferShape(inputShapes) - val argNames = symbol.listArguments() - val inputNames = inputShapes.map(_.name).toSet - val paramNames = argNames.filter(!inputNames.contains(_)) - val auxNames = symbol.listAuxiliaryStates() - - val paramNameShapes = (argNames zip argShapes).filter { case (name, _) => - paramNames.contains(name) - } - val argParams = paramNameShapes.map { case (name, shape) => { - val param = NDArray.zeros(shape) - val curScope = ResourceScope.getCurrentScope() - if (curScope.isDefined) curScope.get.moveToOuterScope(param) - (name, param) - } - }.toMap - - val auxParams = (auxNames zip auxShapes).map { case (name, shape) => { - val param = NDArray.zeros(shape) - val curScope = ResourceScope.getCurrentScope() - if (curScope.isDefined) curScope.get.moveToOuterScope(param) - (name, param) - } - }.toMap - - for ((k, v) <- argParams) { - if (_argParams != null && _argParams.contains(k) && (!overwrite)) { - argParams(k).set(_argParams(k)) - - } else { - initializer(k, v) - } - } - - for ((k, v) <- auxParams) { - if (_auxParams != null && _auxParams.contains(k) && (!overwrite)) { - auxParams(k).set(_auxParams(k)) - } else { - initializer(k, v) - } - } - - _argParams = argParams - _auxParams = auxParams - (argNames, paramNames, auxNames) - } - - // Initialize the predictor module for running prediction. - private def initPredictor(inputShapes: IndexedSeq[DataDesc]): Unit = { - var shouldInit = true - if (this.predExec != null) { - val (argShapes, _, _) = symbol.inferShape(inputShapes) - require(argShapes != null, "Shape inference failed." + - s"Known shapes are $inputShapes for symbol arguments ${symbol.listArguments()} " + - s"and aux states ${symbol.listAuxiliaryStates()}") - val predShapes = this.predExec.argArrays.map(_.shape) - if (argShapes.sameElements(predShapes)) { - shouldInit = false - } - } - if(shouldInit) { - // for now only use the first device - val predExec = symbol.simpleBind(ctx(0), gradReq = "null", inputShapes) - predExec.copyParamsFrom(_argParams, _auxParams) - ExecutorManager.checkArguments(symbol) - this.predExec = predExec - } - } - - // Initialize the iterator given input. - private def initIter(X: NDArray, y: NDArray, isTrain: Boolean): DataIter = { - require(y != null || !isTrain, "y must be specified") - val label = if (y == null) NDArray.zeros(X.shape(0)) else y - require(label.shape.length == 1, "Label must be 1D") - require(X.shape(0) == label.shape(0), - s"The numbers of data points (${X.shape(0)}) and labels (${label.shape(0)}) are not equal") - if (isTrain) { - new NDArrayIter(IndexedSeq(X), IndexedSeq(label), batchSize, - shuffle = isTrain, lastBatchHandle = "roll_over") - } else { - new NDArrayIter(IndexedSeq(X), IndexedSeq(label), batchSize, shuffle = false) - } - } - - // Initialize the iterator given eval_data. - private def initEvalIter(evalData: (NDArray, NDArray)): DataIter = { - if (evalData == null) { - null - } else { - initIter(evalData._1, evalData._2, isTrain = true) - } - } - - /** - * Run the prediction, always only use one device. - * @param data eval data - * @param numBatch the number of batch to run. Go though all batches if set -1 - * @return The predicted value of the output. - * Note the network may have multiple outputs, thus it return an array of [[NDArray]] - */ - def predict(data: DataIter, numBatch: Int = -1): Array[NDArray] = { - data.reset() - val dataShapes = data.provideDataDesc - val dataNames = dataShapes.map(_.name).toArray - initPredictor(dataShapes) - val batchSize = data.batchSize - val dataArrays = dataNames.map(predExec.argDict(_)) - val outputs = Array.fill(predExec.outputs.length)(ListBuffer.empty[NDArray]) - - var i = 0 - while (data.hasNext && i != numBatch) { - val batch = data.next() - try { - i += 1 - ExecutorManager.loadData(batch, dataArrays) - predExec.forward(isTrain = false) - val padded = batch.pad - val realSize = batchSize - padded - for ((list, nd) <- outputs zip predExec.outputs) { - // The slice is being written to a value so that dispose can be called after the copy. - // The one liner nd.slice().copy() leads to leaking the memory of the slice. - val ndSliced = nd.slice(0, realSize) - try { - list += ndSliced.copy() - } finally { - ndSliced.dispose() - } - } - } finally { - batch.dispose() - } - } - // TODO(Yizhi): we can use Symbol.concat to do the same thing. Can it be more efficient? - val results = outputs.map(NDArray.concatenate(_)) - for (output <- outputs) { - output.foreach(_.dispose()) - } - results - } - - /** - * Fit the model. - * @param trainData Training data - * @param evalData Evaluation data - * @param evalMetric The evaluation metric, cannot be null - * @param epochEndCallback A callback that is invoked at end of each epoch. - * This can be used to checkpoint model each epoch. - * @param batchEndCallback A callback that is invoked at end of each batch - * For print purpose - * @param kvStoreType A string kvstore type: - * 'local' : multi-devices on a single machine, will automatically - * choose one from 'local_update_cpu', 'local_allreduce_cpu', and - * 'local_allreduce_device' - * 'dist_sync' : multi-machines with BSP - * 'dist_async' : multi-machines with partical asynchronous - * In default uses 'local', often no need to change for single machine. - * @param logger When not specified, default logger will be used. - * @param workLoadList The list of work load for different devices, in the same order as ctx - */ - def fit(trainData: DataIter, evalData: DataIter, evalMetric: EvalMetric, kvStoreType: String, - epochEndCallback: EpochEndCallback, batchEndCallback: BatchEndCallback, - logger: Logger, workLoadList: Seq[Float]): Unit = { - ResourceScope.using() { - // init params first to allow kv store use _argParams to decide its type - initSymbolParams(trainData) - // create kvstore - val (kvStore, updateOnKVStore) = Model.createKVStore(kvStoreType, ctx.length, _argParams) - fit(trainData, evalData, evalMetric, kvStore, updateOnKVStore, - epochEndCallback, batchEndCallback, logger, workLoadList) -// kvStore.foreach(_.dispose()) - } - } - - def fit(trainData: DataIter, evalData: DataIter, evalMetric: EvalMetric, - kvStoreType: String, epochEndCallback: EpochEndCallback, - batchEndCallback: BatchEndCallback): Unit = { - fit(trainData, evalData, evalMetric, kvStoreType, - epochEndCallback, batchEndCallback, FeedForward.logger, null) - } - - def fit(trainData: DataIter, evalData: DataIter, - evalMetric: EvalMetric, kvStoreType: String): Unit = { - fit(trainData, evalData, evalMetric, kvStoreType, - epochEndCallback = null, batchEndCallback = null) - } - - def fit(trainData: DataIter, evalData: DataIter, evalMetric: EvalMetric): Unit = { - fit(trainData, evalData, evalMetric, kvStoreType = "local") - } - - def fit(trainData: DataIter, evalData: DataIter): Unit = { - fit(trainData, evalData, new Accuracy()) - } - - def fit(trainData: DataIter, evalData: DataIter, evalMetric: EvalMetric, - kv: KVStore, - epochEndCallback: EpochEndCallback, - batchEndCallback: BatchEndCallback, logger: Logger, - workLoadList: Seq[Float]): Unit = { - // init params first to allow kv store use _argParams to decide its type - ResourceScope.using() { - initSymbolParams(trainData) - // create kvstore - val (kvStore, updateOnKVStore) = Model.createKVStore(kv) - fit(trainData, evalData, evalMetric, kvStore, updateOnKVStore, - epochEndCallback, batchEndCallback, logger, workLoadList) - } - } - - def fit(trainData: DataIter, evalData: DataIter, evalMetric: EvalMetric, - kvStore: KVStore, - epochEndCallback: EpochEndCallback, - batchEndCallback: BatchEndCallback): Unit = { - fit(trainData, evalData, evalMetric, kvStore, epochEndCallback, - batchEndCallback, FeedForward.logger, null) - } - - def fit(trainData: DataIter, evalData: DataIter, - evalMetric: EvalMetric, kvStore: KVStore): Unit = { - fit(trainData, evalData, evalMetric, kvStore, epochEndCallback = null, batchEndCallback = null) - } - - def fit(trainData: DataIter, evalData: DataIter, kvStore: KVStore): Unit = { - fit(trainData, evalData, new Accuracy(), kvStore) - } - - private def initSymbolParams(trainData: DataIter) - : (IndexedSeq[String], IndexedSeq[String], IndexedSeq[String]) = { - if (symGen != null) { - this.symbol = symGen.generate(trainData.defaultBucketKey) - checkArguments() - } - initParams(trainData.provideDataDesc ++ trainData.provideLabelDesc) - } - - private def fit(trainData: DataIter, evalData: DataIter, evalMetric: EvalMetric = new Accuracy(), - kvStore: Option[KVStore], updateOnKVStore: Boolean, - epochEndCallback: EpochEndCallback = null, - batchEndCallback: BatchEndCallback = null, logger: Logger = FeedForward.logger, - workLoadList: Seq[Float] = null): Unit = { - require(evalMetric != null, "evalMetric cannot be null") - // TODO: https://issues.apache.org/jira/browse/MXNET-1171 - // this leaks memory, initSymbolParams->initParams is already called which allocates - // NDArray in argParams, auxParams and here we are overwriting it by calling again. - // PhantomRef should take care of releasing this when GC is called, however we have to - // wait for the GC call to happen. - val (argNames, paramNames, auxNames) = initSymbolParams(trainData) - - // init optimizer - val batchSizeMultiplier = kvStore.map { kv => - if (kv.`type` == "dist_sync") { - kv.numWorkers - } else { - 1 - } - } - val batchSize = trainData.batchSize * batchSizeMultiplier.getOrElse(1) - this.optimizer.setArgNames(argNames) - this.optimizer.setRescaleGrad(1f / batchSize) - this.optimizer.setSymbol(this.symbol) - val paramIdx2Name = - if (updateOnKVStore) { - paramNames.zipWithIndex.map { case (name, idx) => idx -> name }.toMap - } else { - paramNames.zipWithIndex.flatMap { case (name, idx) => - (0 until ctx.length).map(k => (idx * ctx.length + k) -> name).toMap - }.toMap - } - this.optimizer.setIdx2Name(paramIdx2Name) - - logger.debug("Start training on multi-device") - Model.trainMultiDevice( - symbol, ctx, argNames, paramNames, auxNames, - _argParams, _auxParams, - this.beginEpoch, this.numEpoch, - this.epochSize, this.optimizer, - kvStore, updateOnKVStore, - trainData = trainData, evalData = Option(evalData), - evalMetric = evalMetric, - epochEndCallback = Option(epochEndCallback), - batchEndCallback = Option(batchEndCallback), - workLoadList = workLoadList, - monitor = monitor, - symGen = symGen) - } - - /** - * Checkpoint the model checkpoint into file. - * You can also use pickle to do the job if you only work on python. - * The advantage of load/save is the file is language agnostic. - * This means the file saved using save can be loaded by other language binding of mxnet. - * You also get the benefit being able to directly load/save from cloud storage(S3, HDFS) - * @param prefix Prefix of model name. - * @see FeedForward.load : the method to load the model back. - * @note - * - ``prefix-symbol.json`` will be saved for symbol. - * - ``prefix-epoch.params`` will be saved for parameters. - */ - def save(prefix: String, epoch: Int = this.numEpoch): Unit = { - require(epoch >= 0, s"epoch must be >=0 (got $epoch)") - Model.saveCheckpoint(prefix, epoch, this.symbol, getArgParams, getAuxParams) - } - - /** - * Serialize the model to Java byte array - * @return serialized model bytes - */ - def serialize(): Array[Byte] = { - Model.serialize(this.symbol, getArgParams, getAuxParams) - } - - // hack to make the FeedForward.scala work with ResourceScope and - // automatically release _argParms and _auxParms - override def nativeAddress: CPtrAddress = hashCode() - - override def nativeDeAllocator: CPtrAddress => Int = FeedForward.doNothingDeAllocator - - override val ref: NativeResourceRef = super.register() - - override val bytesAllocated: Long = 0L - - override def dispose(): Unit = { - if (!super.isDisposed) { - _argParams.foreach { case (_, param) => param.dispose() } - _auxParams.foreach { case (_, param) => param.dispose() } - } - } -} - -object FeedForward { - - private def doNothingDeAllocator(dummy: CPtrAddress): Int = 0 - - private val logger: Logger = LoggerFactory.getLogger(classOf[FeedForward]) - // Check if name is a data argument. - private def isDataArg(name: String): Boolean = { - name.endsWith("data") || name.endsWith("label") - } - - /** - * Load model checkpoint from file. - * @param prefix Prefix of model name. - * @param epoch epoch number of model we would like to load. - * @return The loaded model that can be used for prediction. - * @note - * - ``prefix-symbol.json`` will be saved for symbol. - * - ``prefix-epoch.params`` will be saved for parameters. - */ - def load(prefix: String, epoch: Int, - ctx: Array[Context] = Array(Context.cpu()), - numEpoch: Int = -1, - epochSize: Int = -1, - optimizer: Optimizer = new SGD(), - initializer: Initializer = new Uniform(0.01f), - batchSize: Int = 128, - allowExtraParams: Boolean = false): FeedForward = { - val (symbol, argParams, auxParams) = Model.loadCheckpoint(prefix, epoch) - new FeedForward(symbol, ctx = ctx, - argParams = argParams, auxParams = auxParams, - beginEpoch = epoch, numEpoch = numEpoch, - epochSize = epochSize, optimizer = optimizer, - initializer = initializer, batchSize = batchSize, - allowExtraParams = allowExtraParams) - } - - /** - * Deserialize bytes to model. - * @param bytes serialized model bytes. - * @return The loaded model that can be used for prediction. - */ - def deserialize(bytes: Array[Byte], epoch: Int = 0, - ctx: Array[Context] = Array(Context.cpu()), - numEpoch: Int = -1, - epochSize: Int = -1, - optimizer: Optimizer = new SGD(), - initializer: Initializer = new Uniform(0.01f), - batchSize: Int = 128, - allowExtraParams: Boolean = false): FeedForward = { - val (symbol, argParams, auxParams) = Model.deserialize(bytes) - new FeedForward(symbol, ctx = ctx, - argParams = argParams, auxParams = auxParams, - beginEpoch = epoch, numEpoch = numEpoch, - epochSize = epochSize, optimizer = optimizer, - initializer = initializer, batchSize = batchSize, - allowExtraParams = allowExtraParams) - } - - def newBuilder(modelDef: Symbol): Builder = new Builder(modelDef, null) - def newBuilder(symGen: SymbolGenerator): Builder = new Builder(null, symGen) - - class Builder private[FeedForward](private val modelDef: Symbol, - private val symGen: SymbolGenerator) { - private var ctx: Array[Context] = Array(Context.cpu()) - private var numEpoch: Int = -1 - private var epochSize: Int = -1 - private var optimizer: Optimizer = new SGD() - private var initializer: Initializer = new Uniform(0.01f) - private var batchSize: Int = 128 - private var argParams: Map[String, NDArray] = null - private var auxParams: Map[String, NDArray] = null - private var allowExtraParams: Boolean = false - private var beginEpoch: Int = 0 - private var trainData: DataIter = null - private var evalData: DataIter = null - private var evalMetric: EvalMetric = new Accuracy() - - private var kvStoreInst: KVStore = null - private var kvStoreType: String = "local" - - private var epochEndCallback: EpochEndCallback = null - private var batchEndCallback: BatchEndCallback = null - private var logger: Logger = FeedForward.logger - private var workLoadList: Seq[Float] = null - - /** - * Set ctx The device context of training and prediction. - * To use multi GPU training, pass in a list of gpu contexts. - */ - def setContext(ctx: Array[Context]): Builder = { - this.ctx = ctx - this - } - - /** - * Set number of training epochs - */ - def setNumEpoch(numEpoch: Int): Builder = { - this.numEpoch = numEpoch - this - } - - /** - * Set number of batches in a epoch. In default, it is set to - * ceil(num_train_examples / batch_size) - */ - def setEpochSize(epochSize: Int): Builder = { - this.epochSize = epochSize - this - } - - /** - * Set optimizer for training. Default SGD. - */ - def setOptimizer(opt: Optimizer): Builder = { - this.optimizer = opt - this - } - - /** - * Set the initialization scheme used. Default Uniform(0.01f). - */ - def setInitializer(initializer: Initializer): Builder = { - this.initializer = initializer - this - } - - /** - * Set the batch size of training data. - */ - def setBatchSize(batchSize: Int): Builder = { - this.batchSize = batchSize - this - } - - /** - * Set the model parameter, dict of name to NDArray of net's weights. - */ - def setArgParams(argParams: Map[String, NDArray]): Builder = { - this.argParams = argParams - this - } - - /** - * Set the model parameter, dict of name to NDArray of net's auxiliary states - */ - def setAuxParams(auxParams: Map[String, NDArray]): Builder = { - this.auxParams = auxParams - this - } - - /** - * Whether allow extra parameters that are not needed by symbol - * to be passed by aux_params and arg_params. - * If this is True, no error will be thrown when aux_params and arg_params - * contain extra parameters than needed. - */ - def setAllowExtraParams(allowExtraParams: Boolean): Builder = { - this.allowExtraParams = allowExtraParams - this - } - - /** - * Set the beginning training epoch. - */ - def setBeginEpoch(beginEpoch: Int): Builder = { - this.beginEpoch = beginEpoch - this - } - - /** - * Set the training data - */ - def setTrainData(trainData: DataIter): Builder = { - this.trainData = trainData - this - } - - /** - * Set the evaluation data - */ - def setEvalData(evalData: DataIter): Builder = { - this.evalData = evalData - this - } - - /** - * Set the evaluation metric. Default Accuracy() - */ - def setEvalMetric(metric: EvalMetric): Builder = { - this.evalMetric = metric - this - } - - /** - * this will take precedence over the setKVStore(String) version - */ - def setKVStore(kv: KVStore): Builder = { - this.kvStoreInst = kv - this - } - - /** - * A string kvstore type: - * 'local' : multi-devices on a single machine, will automatically - * choose one from 'local_update_cpu', 'local_allreduce_cpu', and - * 'local_allreduce_device' - * 'dist_sync' : multi-machines with BSP - * 'dist_async' : multi-machines with partical asynchronous - * In default uses 'local', often no need to change for single machine. - */ - def setKVStore(kv: String): Builder = { - this.kvStoreType = kv - this - } - - /** - * A callback that is invoked at end of each epoch. - * This can be used to checkpoint model each epoch. - */ - def setEpochEndCallback(epochEndCallback: EpochEndCallback): Builder = { - this.epochEndCallback = epochEndCallback - this - } - - /** - * batchEndCallback A callback that is invoked at end of each batch. - * For print purpose. - */ - def setBatchEndCallback(batchEndCallback: BatchEndCallback): Builder = { - this.batchEndCallback = batchEndCallback - this - } - - /** - * When not specified, default logger will be used. - */ - def setLogger(logger: Logger): Builder = { - this.logger = logger - this - } - - /** - * Set the list of work load for different devices, in the same order as ctx - */ - def setWorkLoadList(workLoadList: Seq[Float]): Builder = { - this.workLoadList = workLoadList - this - } - - /** - * Construct the FeedForward model and fit on the input training data - * @return the trained model - */ - def build(): FeedForward = { - require(trainData != null, "Training data missing") - val model = new FeedForward( - modelDef, symGen, ctx, numEpoch, epochSize, - optimizer, initializer, batchSize, - argParams, auxParams, allowExtraParams, beginEpoch) - if (kvStoreInst == null) { - model.fit(trainData, evalData, evalMetric, kvStoreType, - epochEndCallback, batchEndCallback, logger, workLoadList) - } else { - model.fit(trainData, evalData, evalMetric, kvStoreInst, - epochEndCallback, batchEndCallback, logger, workLoadList) - } - model - } - - /** - * Construct the FeedForward model but do NOT train - * @return the un-trained model - */ - def setup(): FeedForward = { - new FeedForward( - modelDef, symGen, ctx, numEpoch, epochSize, - optimizer, initializer, batchSize, - argParams, auxParams, allowExtraParams, beginEpoch) - } - } -} diff --git a/scala-package/core/src/main/scala/org/apache/mxnet/IO.scala b/scala-package/core/src/main/scala/org/apache/mxnet/IO.scala deleted file mode 100644 index 1db6d2a6e953..000000000000 --- a/scala-package/core/src/main/scala/org/apache/mxnet/IO.scala +++ /dev/null @@ -1,435 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet - -import org.apache.mxnet.Base._ -import org.apache.mxnet.DType.DType -import org.apache.mxnet.io.{MXDataIter, MXDataPack} -import org.slf4j.LoggerFactory - -import scala.annotation.varargs -import scala.collection.immutable.ListMap -import scala.collection.mutable.ListBuffer -import scala.language.implicitConversions -/** - * IO iterators for loading training & validation data - */ -object IO { - type IterCreateFunc = (Map[String, String]) => DataIter - type PackCreateFunc = (Map[String, String]) => DataPack - - private val logger = LoggerFactory.getLogger(classOf[DataIter]) - private val iterCreateFuncs: Map[String, IterCreateFunc] = initIOModule() - - def MNISTIter: IterCreateFunc = iterCreateFuncs("MNISTIter") - - def ImageRecordIter: IterCreateFunc = iterCreateFuncs("ImageRecordIter") - - def CSVIter: IterCreateFunc = iterCreateFuncs("CSVIter") - - def MNISTPack: PackCreateFunc = createMXDataPack("MNISTIter") - - def ImageRecodePack: PackCreateFunc = createMXDataPack("ImageRecordIter") - - def CSVPack: PackCreateFunc = createMXDataPack("CSVIter") - - - /** - * create iterator via iterName and params - * @param iterName name of iterator; "MNISTIter" or "ImageRecordIter" - * @param params parameters for create iterator - * @return created data iterator - */ - def createIterator(iterName: String, params: Map[String, String]): DataIter = { - iterCreateFuncs(iterName)(params) - } - - /** - * create dataPack for iterator via itername and params - * @param iterName name of iterator: "MNISTIter" or "ImageRecordIter" - * @param params parameters for create iterator - * @return created dataPack - */ - def createMXDataPack(iterName: String)(params: Map[String, String]): DataPack = { - new MXDataPack(iterName, params) - } - - /** - * initialize all IO creator Functions - * @return Map from name to iter creator function - */ - private def initIOModule(): Map[String, IterCreateFunc] = { - val IterCreators = new ListBuffer[DataIterCreator] - checkCall(_LIB.mxListDataIters(IterCreators)) - IterCreators.map(makeIOIterator).toMap - } - - private def makeIOIterator(handle: DataIterCreator): (String, IterCreateFunc) = { - val name = new RefString - val desc = new RefString - val argNames = new ListBuffer[String] - val argTypes = new ListBuffer[String] - val argDescs = new ListBuffer[String] - checkCall(_LIB.mxDataIterGetIterInfo(handle, name, desc, argNames, argTypes, argDescs)) - val paramStr = Base.ctypes2docstring(argNames, argTypes, argDescs) - val docStr = s"${name.value}\n${desc.value}\n\n$paramStr\n" - logger.debug(docStr) - (name.value, creator(handle)) - } - - /** - * DataIter creator - * @param handle native memory ptr for the iterator - * @param params parameter passed to the iterator - * @return created DataIter - */ - private def creator(handle: DataIterCreator)( - params: Map[String, String]): DataIter = { - val out = new DataIterHandleRef - val keys = params.keys.toArray - val vals = params.values.toArray - checkCall(_LIB.mxDataIterCreateIter(handle, keys, vals, out)) - val dataName = params.getOrElse("data_name", "data") - val labelName = params.getOrElse("label_name", "label") - new MXDataIter(out.value, dataName, labelName) - } - - // Convert data into canonical form. - private[mxnet] def initDataDesc(data: IndexedSeq[NDArray], - allowEmpty: Boolean, - defaultName: String, - defaultDType: DType, - defaultLayout: String): IndexedSeq[(DataDesc, NDArray)] = { - require(data != null, "data is required.") - require(data != IndexedSeq.empty || allowEmpty, - s"data should not be empty when allowEmpty is false") - if (data == IndexedSeq.empty) { - IndexedSeq() - } else if (data.length == 1) { - IndexedSeq((new DataDesc(defaultName, data(0).shape, - defaultDType, defaultLayout), data(0))) - } else { - data.zipWithIndex.map(item => { - (new DataDesc(defaultName + "_" + item._2, item._1.shape, - defaultDType, defaultLayout), item._1) - }).toIndexedSeq - } - } -} - -/** - * class batch of data - */ -class DataBatch(val data: IndexedSeq[NDArray], - val label: IndexedSeq[NDArray], - val index: IndexedSeq[Long], - val pad: Int, - // the key for the bucket that should be used for this batch, - // for bucketing io only - val bucketKey: AnyRef = null, - // use DataDesc to indicate the order of data/label loading - // (must match the order of input data/label) - private val providedDataDesc: IndexedSeq[DataDesc] = null, - private val providedLabelDesc: IndexedSeq[DataDesc] = null) { - // TODO: change the data/label type into IndexedSeq[(NDArray, DataDesc)] - // However, since the data and label can be accessed publicly (no getter and setter) - // the change on this will break BC - - @deprecated("Use provideDataDesc and provideDataLabel instead", "1.3.0") - def this(data: IndexedSeq[NDArray], - label: IndexedSeq[NDArray], - index: IndexedSeq[Long], - pad: Int, - // the key for the bucket that should be used for this batch, - // for bucketing io only - bucketKey: AnyRef, - // use ListMap to indicate the order of data/label loading - // (must match the order of input data/label) - providedData: ListMap[String, Shape]) { - this(data, label, index, pad, bucketKey, - DataDesc.ListMap2Descs(providedData)) - } - - @deprecated("Use provideDataDesc and provideDataLabel instead", "1.3.0") - def this(data: IndexedSeq[NDArray], - label: IndexedSeq[NDArray], - index: IndexedSeq[Long], - pad: Int, - // the key for the bucket that should be used for this batch, - // for bucketing io only - bucketKey: AnyRef, - // use ListMap to indicate the order of data/label loading - // (must match the order of input data/label) - providedData: ListMap[String, Shape], - providedLabel: ListMap[String, Shape]) { - this(data, label, index, pad, bucketKey, - DataDesc.ListMap2Descs(providedData), DataDesc.ListMap2Descs(providedLabel)) - } - - /** - * Dispose its data and labels - * The object shall never be used after it is disposed. - */ - def dispose(): Unit = { - if (data != null) { - data.foreach(arr => if (arr != null) arr.dispose()) - } - if (label != null) { - label.foreach(arr => if (arr != null) arr.dispose()) - } - } - - // The name and shape of data - @deprecated("Use provideDataDesc instead", "1.3.0") - def provideData: ListMap[String, Shape] = { - var temp = ListMap[String, Shape]() - if (providedDataDesc == null) null - else { - providedDataDesc.foreach(ele => temp = temp + (ele.name -> ele.shape)) - temp - } - } - - // The name and shape of label - @deprecated("Use provideLabelDesc instead", "1.3.0") - def provideLabel: ListMap[String, Shape] = { - var temp = ListMap[String, Shape]() - if (providedLabelDesc == null) null - else { - providedLabelDesc.foreach(ele => temp = temp + (ele.name -> ele.shape)) - temp - } - } - - def provideDataDesc: IndexedSeq[DataDesc] = providedDataDesc - - def provideLabelDesc: IndexedSeq[DataDesc] = providedLabelDesc - -} - -object DataBatch { - /** - * Builder class for DataBatch. - */ - class Builder() { - private var data: IndexedSeq[NDArray] = null - private var label: IndexedSeq[NDArray] = null - private var index: IndexedSeq[Long] = null - private var pad: Int = 0 - private var bucketKey: AnyRef = null - private var dataDesc: IndexedSeq[DataDesc] = null - private var labelDesc: IndexedSeq[DataDesc] = null - - /** - * Set the input data. - * @param data a list of data. - * @return this. - */ - @varargs def setData(data: NDArray*): Builder = { - this.data = data.toIndexedSeq - this - } - - /** - * Set the labels in the same order of data. - * @param label a list of labels. - * @return this. - */ - @varargs def setLabel(label: NDArray*): Builder = { - this.label = label.toIndexedSeq - this - } - - /** - * Set the example indices in this batch. - * @param index indices in the same order of data. - * @return this. - */ - @varargs def setIndex(index: Long*): Builder = { - this.index = index.toIndexedSeq - this - } - - /** - * Set the pad. - * @param pad The number of examples padded at the end of a batch. It is used when the - * total number of examples read is not divisible by the `batch_size`. - * These extra padded examples are ignored in prediction. - * @return this - */ - def setPad(pad: Int): Builder = { - this.pad = pad - this - } - - /** - * Set the bucket key, used for bucketing module. - * @param bucketKey the bucket key related to this batch. - * @return this. - */ - def setBucketKey(bucketKey: AnyRef): Builder = { - this.bucketKey = bucketKey - this - } - - /** - * Provide the shape of a data. - * @param dataDesc DataDescriptor - * @return this. - */ - def provideDataDesc(dataDesc: IndexedSeq[DataDesc]): Builder = { - this.dataDesc = dataDesc - this - } - - /** - * Provide the shape of a label. - * @param labelDesc LabelDescriptor - * @return this. - */ - def provideLabelDesc(labelDesc: IndexedSeq[DataDesc]): Builder = { - this.labelDesc = labelDesc - this - } - - def build(): DataBatch = { - require(data != null, "data is required.") - new DataBatch(data, label, index, pad, bucketKey, dataDesc, labelDesc) - } - } -} - -/** - * DataIter object in mxnet. - */ -abstract class DataIter extends Iterator[DataBatch] { - /** - * reset the iterator - */ - def reset(): Unit - - def batchSize: Int - - /** - * get next data batch from iterator - * @return - */ - @throws(classOf[NoSuchElementException]) - def next(): DataBatch = { - new DataBatch(getData(), getLabel(), getIndex(), getPad()) - } - - /** - * get data of current batch - * @return the data of current batch - */ - def getData(): IndexedSeq[NDArray] - - /** - * Get label of current batch - * @return the label of current batch - */ - def getLabel(): IndexedSeq[NDArray] - - /** - * Get the number of padding examples - * in current batch - * @return number of padding examples in current batch - */ - def getPad(): Int - - /** - * Get the index of current batch - * @return the index of current batch - */ - def getIndex(): IndexedSeq[Long] - - // The name and shape of data provided by this iterator - @deprecated("Use provideDataDesc instead", "1.3.0") - def provideData: ListMap[String, Shape] - - // The name and shape of label provided by this iterator - @deprecated("Use provideLabelDesc instead", "1.3.0") - def provideLabel: ListMap[String, Shape] - - // Provide type:DataDesc of the data - def provideDataDesc: IndexedSeq[DataDesc] - - // Provide type:DataDesc of the label - def provideLabelDesc: IndexedSeq[DataDesc] - - // For bucketing io only - // The bucket key for the default symbol. - def defaultBucketKey: AnyRef = null -} - -/** - * pack of DataIter, use as Iterable class - */ -abstract class DataPack() extends Iterable[DataBatch] { - /** - * get data iterator - * @return DataIter - */ - def iterator: DataIter -} - -// Named data desc description contains name, shape, type and other extended attributes. -case class DataDesc(name: String, shape: Shape, - dtype: DType = DType.Float32, layout: String = Layout.UNDEFINED) { - require(layout == Layout.UNDEFINED || shape.length == layout.length, - s"number of dimensions in $shape should match the layout $layout") - - override def toString(): String = { - s"DataDesc[$name,$shape,$dtype,$layout]" - } -} - -object DataDesc { - - private val logger = LoggerFactory.getLogger(classOf[DataDesc]) - /** - * Get the dimension that corresponds to the batch size. - * @param layout layout string. For example, "NCHW". - * @return An axis indicating the batch_size dimension. When data-parallelism is used, - * the data will be automatically split and concatenate along the batch_size dimension. - * Axis can be -1, which means the whole array will be copied - * for each data-parallelism device. - */ - def getBatchAxis(layout: Option[String]): Int = { - if (layout.isEmpty|| layout.get == Layout.UNDEFINED) { - logger.warn("Found Undefined Layout, will use default index 0 for batch axis") - 0 - } else { - if (layout.get.contains('N')) { - layout.get.indexOf("N") - } else { - throw new IllegalArgumentException("no Batch Axis('N') found in Layout!") - } - } - } - - @deprecated("Please use DataDesc methods instead", "1.3.0") - implicit def ListMap2Descs(shapes: ListMap[String, Shape]): IndexedSeq[DataDesc] = { - if (shapes != null) { - shapes.map { case (k, s) => new DataDesc(k, s) }.toIndexedSeq - } else { - null - } - } -} diff --git a/scala-package/core/src/main/scala/org/apache/mxnet/Image.scala b/scala-package/core/src/main/scala/org/apache/mxnet/Image.scala deleted file mode 100644 index a6665f82e29b..000000000000 --- a/scala-package/core/src/main/scala/org/apache/mxnet/Image.scala +++ /dev/null @@ -1,249 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet -// scalastyle:off -import java.awt.{BasicStroke, Color, Graphics2D} -import java.awt.image.BufferedImage -// scalastyle:on -import java.io.InputStream - -import scala.collection.mutable -import scala.collection.mutable.{ArrayBuffer, ListBuffer} - -/** - * Image API of Scala package - * enable OpenCV feature - */ -object Image { - - /** - * Decode image with OpenCV. - * Note: return image in RGB by default, instead of OpenCV's default BGR. - * @param buf Buffer containing binary encoded image - * @param flag Convert decoded image to grayscale (0) or color (1). - * @param to_rgb Whether to convert decoded image - * to mxnet's default RGB format (instead of opencv's default BGR). - * @param out NDArray to store the output - * @return NDArray in HWC format with DType [[DType.UInt8]] - */ - def imDecode(buf: Array[Byte], flag: Int, - to_rgb: Boolean, - out: Option[NDArray]): NDArray = { - val nd = NDArray.array(buf.map( x => (x & 0xFF).toFloat), Shape(buf.length)) - val byteND = NDArray.api.cast(nd, "uint8") - val args : ListBuffer[Any] = ListBuffer() - val map : mutable.Map[String, Any] = mutable.Map() - args += byteND - map("flag") = flag - map("to_rgb") = to_rgb - if (out.isDefined) map("out") = out.get - NDArray.genericNDArrayFunctionInvoke("_cvimdecode", args, map.toMap) - } - - /** - * Same imageDecode with InputStream - * @param inputStream the inputStream of the image - * @param flag Convert decoded image to grayscale (0) or color (1). - * @param to_rgb Whether to convert decoded image - * to mxnet's default RGB format (instead of opencv's default BGR). - * @param out NDArray to store the output - * @return NDArray in HWC format with DType [[DType.UInt8]] - */ - def imDecode(inputStream: InputStream, flag: Int = 1, - to_rgb: Boolean = true, - out: Option[NDArray] = None): NDArray = { - val buffer = new Array[Byte](2048) - val arrBuffer = ArrayBuffer[Byte]() - var length = 0 - while (length != -1) { - length = inputStream.read(buffer) - if (length != -1) arrBuffer ++= buffer.slice(0, length) - } - imDecode(arrBuffer.toArray, flag, to_rgb, out) - } - - /** - * Read and decode image with OpenCV. - * Note: return image in RGB by default, instead of OpenCV's default BGR. - * @param filename Name of the image file to be loaded. - * @param flag Convert decoded image to grayscale (0) or color (1). - * @param to_rgb Whether to convert decoded image to mxnet's default RGB format - * (instead of opencv's default BGR). - * @param out NDArray to store the output - * @return org.apache.mxnet.NDArray in HWC format with DType [[DType.UInt8]] - */ - def imRead(filename: String, flag: Option[Int] = None, - to_rgb: Option[Boolean] = None, - out: Option[NDArray] = None): NDArray = { - val args : ListBuffer[Any] = ListBuffer() - val map : mutable.Map[String, Any] = mutable.Map() - map("filename") = filename - if (flag.isDefined) map("flag") = flag.get - if (to_rgb.isDefined) map("to_rgb") = to_rgb.get - if (out.isDefined) map("out") = out.get - NDArray.genericNDArrayFunctionInvoke("_cvimread", args, map.toMap) - } - - /** - * Resize image with OpenCV. - * @param src source image in NDArray - * @param w Width of resized image. - * @param h Height of resized image. - * @param interp Interpolation method (default=cv2.INTER_LINEAR). - * @param out NDArray to store the output - * @return org.apache.mxnet.NDArray - */ - def imResize(src: org.apache.mxnet.NDArray, w: Int, h: Int, - interp: Option[Int] = None, - out: Option[NDArray] = None): NDArray = { - val args : ListBuffer[Any] = ListBuffer() - val map : mutable.Map[String, Any] = mutable.Map() - args += src - map("w") = w - map("h") = h - if (interp.isDefined) map("interp") = interp.get - if (out.isDefined) map("out") = out.get - NDArray.genericNDArrayFunctionInvoke("_cvimresize", args, map.toMap) - } - - /** - * Pad image border with OpenCV. - * @param src source image - * @param top Top margin. - * @param bot Bottom margin. - * @param left Left margin. - * @param right Right margin. - * @param typeOf Filling type (default=cv2.BORDER_CONSTANT). - * @param value (Deprecated! Use ``values`` instead.) Fill with single value. - * @param values Fill with value(RGB[A] or gray), up to 4 channels. - * @param out NDArray to store the output - * @return org.apache.mxnet.NDArray - */ - def copyMakeBorder(src: org.apache.mxnet.NDArray, top: Int, bot: Int, - left: Int, right: Int, typeOf: Option[Int] = None, - value: Option[Double] = None, values: Option[Any] = None, - out: Option[NDArray] = None): NDArray = { - val args : ListBuffer[Any] = ListBuffer() - val map : mutable.Map[String, Any] = mutable.Map() - args += src - map("top") = top - map("bot") = bot - map("left") = left - map("right") = right - if (typeOf.isDefined) map("type") = typeOf.get - if (value.isDefined) map("value") = value.get - if (values.isDefined) map("values") = values.get - if (out.isDefined) map("out") = out.get - NDArray.genericNDArrayFunctionInvoke("_cvcopyMakeBorder", args, map.toMap) - } - - /** - * Do a fixed crop on the image - * @param src Src image in NDArray - * @param x0 starting x point - * @param y0 starting y point - * @param w width of the image - * @param h height of the image - * @return cropped NDArray - */ - def fixedCrop(src: NDArray, x0: Int, y0: Int, w: Int, h: Int): NDArray = { - NDArray.api.crop(src, Shape(y0, x0, 0), Shape(y0 + h, x0 + w, src.shape.get(2))) - } - - /** - * Convert a NDArray image to a real image - * The time cost will increase if the image resolution is big - * @param src Source image file in RGB - * @return Buffered Image - */ - def toImage(src: NDArray): BufferedImage = { - require(src.dtype == DType.UInt8, "The input NDArray must be bytes") - require(src.shape.length == 3, "The input should contains height, width and channel") - require(src.shape(2) == 3, "There should be three channels: RGB") - val height = src.shape.get(0) - val width = src.shape.get(1) - val img = new BufferedImage(width, height, BufferedImage.TYPE_INT_RGB) - val arr = src.toArray - (0 until height).par.foreach(r => { - (0 until width).par.foreach(c => { - // NDArray in RGB - val cellIndex = r * width * 3 + c * 3 - val red = arr(cellIndex).toByte & 0xFF - val green = arr(cellIndex + 1).toByte & 0xFF - val blue = arr(cellIndex + 2).toByte & 0xFF - val rgb = (red << 16) | (green << 8) | blue - img.setRGB(c, r, rgb) - }) - }) - img - } - - /** - * Helper function to generate ramdom colors - * @param transparency The transparency level - * @return Color - */ - private def randomColor(transparency: Option[Float] = Some(1.0f)) : Color = { - new Color( - Math.random().toFloat, Math.random().toFloat, Math.random().toFloat, - transparency.get - ) - } - - /** - * Method to draw bounding boxes for an image - * @param src Source of the buffered image - * @param coordinate Contains Map of xmin, xmax, ymin, ymax - * corresponding to top-left and down-right points - * @param names The name set of the bounding box - * @param stroke Thickness of the bounding box - * @param fontSizeMult Font size multiplier - * @param transparency Transparency of the bounding box - */ - def drawBoundingBox(src: BufferedImage, coordinate: Array[Map[String, Int]], - names: Option[Array[String]] = None, - stroke : Option[Int] = Some(3), - fontSizeMult : Option[Float] = Some(1.0f), - transparency: Option[Float] = Some(1.0f)): Unit = { - val g2d : Graphics2D = src.createGraphics() - g2d.setStroke(new BasicStroke(stroke.get)) - // Increase the size of font - val currentFont = g2d.getFont - val newFont = currentFont.deriveFont(currentFont.getSize * fontSizeMult.get) - g2d.setFont(newFont) - // Get font metrics to draw the font box - val fm = g2d.getFontMetrics(newFont) - for (idx <- coordinate.indices) { - val map = coordinate(idx) - g2d.setColor(randomColor(transparency).darker()) - g2d.drawRect(map("xmin"), map("ymin"), map("xmax") - map("xmin"), map("ymax") - map("ymin")) - // Write the name of the bounding box - if (names.isDefined) { - val x = map("xmin") - stroke.get - val y = map("ymin") - val h = fm.getHeight - val w = fm.charsWidth(names.get(idx).toCharArray, 0, names.get(idx).length()) - g2d.fillRect(x, y - h, w, h) - g2d.setColor(Color.WHITE) - g2d.drawString(names.get(idx), x, y) - } - } - g2d.dispose() - } - -} diff --git a/scala-package/core/src/main/scala/org/apache/mxnet/Initializer.scala b/scala-package/core/src/main/scala/org/apache/mxnet/Initializer.scala deleted file mode 100644 index 8531285ba60a..000000000000 --- a/scala-package/core/src/main/scala/org/apache/mxnet/Initializer.scala +++ /dev/null @@ -1,172 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet - -/** - * - * Base class for Initializer. - */ -abstract class Initializer { - - /** - * Initialize an Initializer - * - * @param name name of corrosponding ndarray - * @param arr ndarray to be Initialized - */ - def apply(name: String, arr: NDArray): Unit = { - - if (name.startsWith("upsampling")) { - initBilinear(name, arr) - } else if (name.endsWith("bias")) { - initBias(name, arr) - } else if (name.endsWith("gamma")) { - initGamma(name, arr) - } else if (name.endsWith("beta")) { - initBeta(name, arr) - } else if (name.endsWith("weight")) { - initWeight(name, arr) - } else if (name.endsWith("moving_mean")) { - initZero(name, arr) - } else if (name.endsWith("moving_var")) { - initZero(name, arr) - } else if (name.endsWith("moving_avg")) { - initZero(name, arr) - } else { - initDefault(name, arr) - } - } - - protected def initBilinear(name: String, arr: NDArray): Unit = { - val weight = Array.fill[Float](arr.size)(0.0f) - val shape = arr.shape - val f = shape(3) / 2.0f - val c = (2 * f - 1 - f % 2) / (2.0f * f) - - (0 until arr.size).foreach { i => - val x = i % shape(3) - val y = (i / shape(3)) % shape(2) - weight(i) = (1 - math.abs(x / f - c)) * (1 - math.abs(y / f - c)) - } - - arr.set(NDArray.array(weight, shape)) - } - - protected def initZero(name: String, arr: NDArray): Unit = { - arr.set(0f) - } - - protected def initBias(name: String, arr: NDArray): Unit = { - arr.set(0f) - } - - protected def initGamma(name: String, arr: NDArray): Unit = { - arr.set(1f) - } - - protected def initBeta(name: String, arr: NDArray): Unit = { - arr.set(0f) - } - - protected def initWeight(name: String, arr: NDArray): Unit - - protected def initDefault(name: String, arr: NDArray): Unit = { - throw new IllegalArgumentException(s"Unknown initialization pattern for $name.") - } -} - -/** - * Initialize the weight with mixed Initializer - * - * @param patterns List of regular expression patterns to match parameter names. - * @param initializers List of Initializer corrosponding to patterns - */ -class Mixed(protected val patterns: List[String], - protected val initializers: List[Initializer]) extends Initializer { - require(patterns.length == initializers.length, - "Should provide a pattern for each initializer") - private val map = patterns.map(_.r).zip(initializers) - - override def apply(name: String, arr: NDArray): Unit = { - val matchR = map.filter { case (prog, init) => prog.findFirstIn(name) != None } - if (matchR.length == 0) { - throw new IllegalArgumentException( - s"Parameter $name did not match any pattern. Consider " + - "add a \".*\" pattern at the and with default Initializer.") - } else matchR(0)._2(name, arr) - } - - override def initWeight(name: String, arr: NDArray): Unit = {} -} - -/** - * Initialize the weight with uniform [-scale, scale] - * - * @param scale The scale of uniform distribution - */ -class Uniform(protected val scale: Float = 0.07f) extends Initializer { - override def initWeight(name: String, arr: NDArray): Unit = { - Random.uniform(-scale, scale, out = arr) - } -} - - -/** - * Initialize the weight with normal(0, sigma) - * - * @param sigma Standard deviation for gaussian distribution. - */ -class Normal(protected val sigma: Float = 0.01f) extends Initializer { - override def initWeight(name: String, arr: NDArray): Unit = { - Random.normal(0, sigma, out = arr) - } -} - - -/** - * Initialize the weight with Xavier or similar initialization scheme. - * - * @param rndType Options are: "gaussian" or "uniform" - * @param factorType Options are: "avg", "in", "out" - * @param magnitude scale of random number range - */ -class Xavier(protected val rndType: String = "uniform", - protected val factorType: String = "avg", - protected val magnitude: Float = 3) extends Initializer { - - override def initWeight(name: String, arr: NDArray): Unit = { - val shape = arr.shape - val fanIn = shape.slice(1, shape.length).product - val fanOut = shape(0) - var factor = 1f - - factor = factorType match { - case "avg" => (fanIn + fanOut) / 2f - case "in" => fanIn - case "out" => fanOut - case _ => throw new IllegalArgumentException("Incorrect factor type") - } - val scale = math.sqrt(magnitude / factor).toFloat - - rndType match { - case "uniform" => Random.uniform(-scale, scale, out = arr) - case "gaussian" => Random.normal(0, scale, out = arr) - case _ => throw new IllegalArgumentException("Unknown random type") - } - } -} diff --git a/scala-package/core/src/main/scala/org/apache/mxnet/KVStore.scala b/scala-package/core/src/main/scala/org/apache/mxnet/KVStore.scala deleted file mode 100644 index b2d4349b4f64..000000000000 --- a/scala-package/core/src/main/scala/org/apache/mxnet/KVStore.scala +++ /dev/null @@ -1,298 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet - -import java.io._ - -import org.apache.mxnet.Base._ -import org.slf4j.{Logger, LoggerFactory} - -/** - * Key value store interface of MXNet for parameter synchronization. - * @author Yizhi Liu - */ -object KVStore { - - // group id of scheduler/server/worker - val GROUP_NODE_SCHEDULER = 1 - val GROUP_NODE_SERVER = 2 - val GROUP_NODE_WORKER = 4 - - /** - * Create a new KVStore.
- * - * WARNING: it is your responsibility to clear this object through dispose(). - * - * - * @param name : {'local', 'dist'} - * The type of KVStore - * - local works for multiple devices on a single machine (single process) - * - dist works for multi-machines (multiple processes) - * @return The created KVStore - */ - def create(name: String = "local"): KVStore = { - val handle = new KVStoreHandleRef - checkCall(_LIB.mxKVStoreCreate(name, handle)) - new KVStore(handle.value) - } -} - -class KVStore(private[mxnet] val handle: KVStoreHandle) extends NativeResource { - private val logger: Logger = LoggerFactory.getLogger(classOf[KVStore]) - private var updaterFunc: MXKVStoreUpdater = null - - override def nativeAddress: CPtrAddress = handle - - override def nativeDeAllocator: CPtrAddress => MXUint = _LIB.mxKVStoreFree - - override val ref: NativeResourceRef = super.register() - - override val bytesAllocated: Long = 0L - - /** - * Initialize a single or a sequence of key-value pairs into the store. - * For each key, one must init it before push and pull. - * Only worker 0's (rank == 0) data are used. - * This function returns after data have been initialized successfully - * - * @param keys The keys. - * @param values The values. - */ - def init(keys: Array[String], values: Array[NDArray]): Unit = { - require(keys.length == values.length, "len(keys) != len(values)") - val valuePtrs = values.map(_.handle) - checkCall(_LIB.mxKVStoreInitEx(handle, keys.length, keys, valuePtrs)) - } - - def init(key: String, value: NDArray): Unit = { - init(Array(key), Array(value)) - } - - /** - * Push a single or a sequence of key-value pairs into the store. - * Data consistency: - * 1. this function returns after adding an operator to the engine. - * 2. push is always called after all previous push and pull on the same key are finished - * 3. there is no synchronization between workers. One can use _barrier() to sync all workers - * - * @param keys Keys - * @param values According values - * @param priority - * The priority of the push operation. - * The higher the priority, the faster this action is likely - * to be executed before other push actions. - */ - def push(keys: Array[String], values: Array[NDArray], priority: Int): Unit = { - require(keys.length == values.length, "len(keys) != len(values)") - val valuePtrs = values.map(_.handle) - checkCall(_LIB.mxKVStorePushEx(handle, keys.length, keys, valuePtrs, priority)) - } - - def push(keys: Array[String], values: Array[NDArray]): Unit = push(keys, values, 0) - - def push(key: String, value: NDArray, priority: Int = 0): Unit = { - push(Array(key), Array(value), priority) - } - - def push(key: String, values: Array[NDArray], priority: Int): Unit = { - val keys = Array.fill(values.length)(key) - push(keys, values, priority) - } - - def push(key: String, values: Array[NDArray]): Unit = { - push(key, values, 0) - } - - /** - * Pull a single value or a sequence of values from the store. - * - * Data consistency: - * 1. this function returns after adding an operator to the engine. But any - * further read on out will be blocked until it is finished. - * 2. pull is always called after all previous push and pull on the same key are finished - * 3. It pulls the newest value from the store. - * @param keys Keys - * @param outs According values - * @param priority - * The priority of the push operation. - * The higher the priority, the faster this action is likely - * to be executed before other push actions. - */ - def pull(keys: Array[String], outs: Array[NDArray], priority: Int): Unit = { - require(keys.length == outs.length, "len(keys) != len(outs)") - val outPtrs = outs.map(_.handle) - checkCall(_LIB.mxKVStorePullEx(handle, keys.length, keys, outPtrs, priority)) - } - - def pull(keys: Array[String], outs: Array[NDArray]): Unit = pull(keys, outs, 0) - - def pull(key: String, out: NDArray, priority: Int = 0): Unit = { - pull(Array(key), Array(out), priority) - } - - def pull(key: String, outs: Array[NDArray], priority: Int): Unit = { - val keys = Array.fill(outs.length)(key) - pull(keys, outs, priority) - } - - def pull(key: String, outs: Array[NDArray]): Unit = { - pull(key, outs, 0) - } - - // Get the type of this kvstore - def `type`: String = { - val kvType = new RefString - checkCall(_LIB.mxKVStoreGetType(handle, kvType)) - kvType.value - } - - /** - * Get the number of worker nodes - * @return The number of worker nodes - */ - def numWorkers: Int = { - val size = new RefInt - checkCall(_LIB.mxKVStoreGetGroupSize(handle, size)) - size.value - } - - /** - * Get the rank of this worker node - * @return The rank of this node, which is in [0, get_num_workers()) - */ - def rank: Int = { - val rank = new RefInt - checkCall(_LIB.mxKVStoreGetRank(handle, rank)) - rank.value - } - - /** - * Register an optimizer to the store - * If there are multiple machines, this process (should be a worker node) - * will pack this optimizer and send it to all servers. It returns after - * this action is done. - * - * @param optimizer the optimizer - */ - def setOptimizer(optimizer: Optimizer): Unit = { - val isWorker = new RefInt - checkCall(_LIB.mxKVStoreIsWorkerNode(isWorker)) - if (`type`.contains("dist") && isWorker.value != 0) { - val optSerialized = Serializer.getSerializer.serialize(optimizer) - val cmd = Serializer.encodeBase64String(optSerialized) - logger.debug("Send optimizer to server: {}", cmd) - sendCommandToServers(0, cmd) - } else { - setUpdater(Optimizer.getUpdater(optimizer)) - } - } - - /** - * Set a push updater into the store. - * - * This function only changes the local store. Use setOptimizer for - * multi-machines. - * - * @param updater the updater function - */ - def setUpdater(updater: MXKVStoreUpdater): Unit = { - this.updaterFunc = updater - checkCall(_LIB.mxKVStoreSetUpdater(handle, updaterFunc)) - } - - /** - * Global barrier among all worker nodes - * - * For example, assume there are n machines, we want to let machine 0 first - * init the values, and then pull the inited value to all machines. Before - * pulling, we can place a barrier to guarantee that the initialization is - * finished. - */ - def barrier(): Unit = { - checkCall(_LIB.mxKVStoreBarrier(handle)) - } - - def numDeadNode(nodeId: Int): Int = { - val number = new RefInt - checkCall(_LIB.mxKVStoreGetNumDeadNode(handle, nodeId, number)) - number.value - } - - /** - * Whether to do barrier when the kvstore finalizes - * @param barrierBeforeExit - */ - def setBarrierBeforeExit(barrierBeforeExit: Boolean): Unit = { - val flag: Int = if (barrierBeforeExit) 1 else 0 - checkCall(_LIB.mxKVStoreSetBarrierBeforeExit(handle, flag)) - } - - /** - * Send a command to all server nodes - * - * Send a command to all server nodes, which will make each server node run - * KVStoreServer.controller - * - * This function returns after the command has been executed in all server nodes - * - * @param head the head of the command - * @param body the body of the command - */ - private def sendCommandToServers(head: Int, body: String): Unit = { - checkCall(_LIB.mxKVStoreSendCommmandToServers(handle, head, body)) - } - - /** - * Save optimizer (updater) state to file - * @param fname Path to output states file. - */ - def saveOptimizerStates(fname: String): Unit = { - require(updaterFunc != null, "Cannot save states for distributed training") - updaterFunc match { - case cachedStates: MXKVStoreCachedStates => - val target = new BufferedOutputStream(new FileOutputStream(fname)) - try { - target.write(cachedStates.serializeState()) - } finally { - target.close() - } - case _ => - logger.warn("Updater does not have states, skip saving to {}", fname) - } - } - - /** - * Load optimizer (updater) state from file - * @param fname Path to input states file. - */ - def loadOptimizerStates(fname: String): Unit = { - assert(updaterFunc != null, "Cannot load states for distributed training") - updaterFunc match { - case cachedStates: MXKVStoreCachedStates => - val bis = new BufferedInputStream (new FileInputStream (fname) ) - try { - val bArray = Stream.continually (bis.read).takeWhile (_ != -1).map (_.toByte).toArray - cachedStates.deserializeState(bArray) - } finally { - bis.close () - } - case _ => - logger.warn("Updater does not have states, skip loading from {}", fname) - } - } -} diff --git a/scala-package/core/src/main/scala/org/apache/mxnet/KVStoreServer.scala b/scala-package/core/src/main/scala/org/apache/mxnet/KVStoreServer.scala deleted file mode 100644 index 4c5ca66ce836..000000000000 --- a/scala-package/core/src/main/scala/org/apache/mxnet/KVStoreServer.scala +++ /dev/null @@ -1,114 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet - -import org.apache.mxnet.Base._ -import org.slf4j.{Logger, LoggerFactory} - -private[mxnet] class KVStoreServer(private val kvStore: KVStore) { - private val logger: Logger = LoggerFactory.getLogger(classOf[KVStoreServer]) - private val handle: KVStoreHandle = kvStore.handle - private val controller = new KVServerControllerCallback { - override def invoke(cmdId: Int, cmdBody: String): Unit = { - logger.debug("Receive cmdId {}, cmdBody: {}", cmdId, cmdBody) - if (cmdId == 0) { - val optimizer = Serializer.getSerializer.deserialize[Optimizer]( - Serializer.decodeBase64String(cmdBody)) - kvStore.setOptimizer(optimizer) - } else { - logger.warn(s"Server ${kvStore.rank}, unknown command ($cmdId, $cmdBody)") - } - } - } - - // run the server, whose behavior is like - // while receive(x): - // if is_command x: controller(x) - // else if is_key_value x: updater(x) - def run(): Unit = { - checkCall(_LIB.mxKVStoreRunServer(handle, controller)) - } -} - -object KVStoreServer { - private val logger: Logger = LoggerFactory.getLogger(classOf[KVStoreServer]) - /** - * Start server/scheduler according to env variables - * @param dieIfOthersGoOutTimeout When this argument is set to an integer greater than 0 - * (in second), - * a daemon thread will start to periodically check - * whether scheduler (server side) or servers (scheduler side) - * are dead. If so, die itself. - * This could be useful for running mxnet on distributed - * data platform, - * where you do not know which node your application runs on - * and in such situation - * you want others die automatically once - * some of the nodes goes out. - */ - def start(dieIfOthersGoOutTimeout: Int = 0): Unit = { - val isWorker = new RefInt - checkCall(_LIB.mxKVStoreIsWorkerNode(isWorker)) - require(isWorker.value == 0, "cannot start kv-store server on worker node") - val kvStore = KVStore.create("dist") - val daemonThread: Option[Thread] = - if (dieIfOthersGoOutTimeout > 0) { - val daemon = new Runnable { - override def run(): Unit = { - var running = true - while (running) { - try { - Thread.sleep(dieIfOthersGoOutTimeout.toLong * 1000) - val numDead = kvStore.numDeadNode(KVStore.GROUP_NODE_SCHEDULER - + KVStore.GROUP_NODE_SERVER + KVStore.GROUP_NODE_WORKER) - if (numDead > 0) { - logger.error(s"Detect $numDead dead node(s). Shutdown now.") - System.exit(1) - } - } catch { - case e: InterruptedException => running = false - } - } - } - } - val t = new Thread(daemon) - t.setDaemon(true) - t.start() - Option(t) - } else { - None - } - val server = new KVStoreServer(kvStore) - server.run() - daemonThread.foreach(t => { - t.interrupt() - t.join() - }) - kvStore.dispose() - } - - def init(env: Map[String, String]): Unit = { - val keys = env.keys.toArray - val vals = env.values.toArray - checkCall(_LIB.mxInitPSEnv(keys, vals)) - } -} - -private[mxnet] trait KVServerControllerCallback { - def invoke(cmdId: Int, cmdBody: String): Unit -} diff --git a/scala-package/core/src/main/scala/org/apache/mxnet/LRScheduler.scala b/scala-package/core/src/main/scala/org/apache/mxnet/LRScheduler.scala deleted file mode 100644 index 0cab368cf51a..000000000000 --- a/scala-package/core/src/main/scala/org/apache/mxnet/LRScheduler.scala +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.mxnet - -import org.slf4j.LoggerFactory - -/** - * Learning rate scheduler, which adaptively changes the learning rate - * based on the training progress. - * @author Yuan Tang - */ - -abstract class LRScheduler(var baseLR: Float = 0.01f) { - /** - * Base class of a learning rate scheduler - * - * The training progress is presented by `num_update`, which can be roughly - * viewed as the number of minibatches executed so far. Its value is - * non-decreasing, and increases at most by one. - * - * The exact value is the upper bound of the number of updates applied to - * a weight/index. - * - * @param numUpdate Int, the maximal number of updates applied to a weight. - */ - def apply(numUpdate: Int): Float -} - -/** - * Class for reducing learning rate in factor - * - * Assume the weight has been updated by n times, then the learning rate will - * be base_lr * factor^^(floor(n/step)) - * - * @param step Int, schedule learning rate after n updates - * @param factor Float, the factor for reducing the learning rate - * - */ -class FactorScheduler(protected var step: Int, protected var factor: Float) extends LRScheduler { - - protected var count: Int = 0 - private val logger = LoggerFactory.getLogger(classOf[FactorScheduler]) - - require(step >= 1, "Schedule step must be greater or equal than 1 round") - require(factor < 1.0, "Factor must be less than 1 to make lr reduce") - - def apply(numUpdate: Int): Float = { - - if (numUpdate > this.count + this.step) { - this.count += this.step - this.baseLR *= this.factor - this.logger.info(s"Update$numUpdate: Change learning rate to ${this.baseLR}") - } - this.baseLR - } -} diff --git a/scala-package/core/src/main/scala/org/apache/mxnet/Layout.scala b/scala-package/core/src/main/scala/org/apache/mxnet/Layout.scala deleted file mode 100644 index cb75dbc40803..000000000000 --- a/scala-package/core/src/main/scala/org/apache/mxnet/Layout.scala +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet - -/** - * Layout definition of DataDesc - * N Batch size - * C channels - * H Height - * W Weight - * T sequence length - * __undefined__ default value of Layout - */ -object Layout { - val UNDEFINED = "__undefined__" - val NCHW = "NCHW" - val NTC = "NTC" - val NT = "NT" - val N = "N" -} diff --git a/scala-package/core/src/main/scala/org/apache/mxnet/LibInfo.scala b/scala-package/core/src/main/scala/org/apache/mxnet/LibInfo.scala deleted file mode 100644 index 0ee6476be365..000000000000 --- a/scala-package/core/src/main/scala/org/apache/mxnet/LibInfo.scala +++ /dev/null @@ -1,378 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet - -import org.apache.mxnet.Base._ - -import scala.collection.mutable.{ArrayBuffer, ListBuffer} - -/** - * JNI functions - */ -private[mxnet] class LibInfo { - @native def nativeLibInit(): Int - @native def mxGetLastError(): String - // Operators - @native def mxListAllOpNames(names: ListBuffer[String]): Int - @native def nnGetOpHandle(opName: String, opHandle: RefLong): Int - // NDArray - @native def mxImperativeInvokeEx(creator: FunctionHandle, - inputs: Array[NDArrayHandle], - outputsGiven: Array[NDArrayHandle], - outputs: ArrayBuffer[NDArrayHandle], - numParams: Int, - paramKeys: Array[String], - paramVals: Array[String], - outStype: ArrayBuffer[Int]): Int - @native def mxNDArrayFree(handle: NDArrayHandle): Int - @native def mxNDArrayCreateNone(out: NDArrayHandleRef): Int - @native def mxNDArrayCreateEx(shape: Array[Int], - ndim: Int, - devType: Int, - devId: Int, - delayAlloc: Int, - dtype: Int, - out: NDArrayHandleRef): Int - // scalastyle:off parameterNum - @native def mxNDArrayCreateSparseEx(storageType: Int, - shape: Array[Int], - ndim: Int, - devType: Int, - devId: Int, - delayAlloc: Int, - dtype: Int, - numAux: Int, - auxTypes: Array[Int], - auxNdims: Array[Int], - auxShapes: Array[Int], - out: NDArrayHandleRef): Int - // scalastyle:on parameterNum - @native def mxNDArrayWaitAll(): Int - @native def mxNDArrayWaitToRead(handle: NDArrayHandle): Int - @native def mxListFunctions(functions: ListBuffer[FunctionHandle]): Int - @native def mxFuncDescribe(handle: FunctionHandle, - nUsedVars: MXUintRef, - nScalars: MXUintRef, - nMutateVars: MXUintRef, - typeMask: RefInt): Int - @native def mxFuncGetInfo(handle: FunctionHandle, - name: RefString, - desc: RefString, - numArgs: MXUintRef, - argNames: ListBuffer[String], - argTypes: ListBuffer[String], - argDescs: ListBuffer[String]): Int - @native def mxFuncInvoke(function: FunctionHandle, - useVars: Array[NDArrayHandle], - scalarArgs: Array[MXFloat], - mutateVars: Array[NDArrayHandle]): Int - @native def mxFuncInvokeEx(function: FunctionHandle, - useVars: Array[NDArrayHandle], - scalarArgs: Array[MXFloat], - mutateVars: Array[NDArrayHandle], - numParams: Int, - paramKeys: Array[Array[Byte]], - paramVals: Array[Array[Byte]]): Int - @native def mxNDArrayGetShape(handle: NDArrayHandle, - ndim: MXUintRef, - data: ArrayBuffer[Int]): Int - @native def mxNDArraySyncCopyFromNDArray(handleDst: NDArrayHandle, - handleSrc: NDArrayHandle, - locator: Int): Int - @native def mxNDArraySyncCopyToCPU(handle: NDArrayHandle, - data: Array[Byte], - size: Int): Int - @native def mxNDArraySlice(handle: NDArrayHandle, - start: MXUint, - end: MXUint, - sliceHandle: NDArrayHandleRef): Int - @native def mxNDArrayAt(handle: NDArrayHandle, - idx: MXUint, - out: NDArrayHandleRef): Int - @native def mxNDArrayReshape64(handle: NDArrayHandle, - nDim: Int, - dims: Array[Long], - reverse: Boolean, - reshapeHandle: NDArrayHandleRef): Int - @native def mxNDArraySyncCopyFromCPU(handle: NDArrayHandle, - source: Array[MXFloat], - size: Int): Int - @native def mxFloat64NDArraySyncCopyFromCPU(handle: NDArrayHandle, - source: Array[Double], - size: Int): Int - @native def mxNDArrayLoad(fname: String, - outSize: MXUintRef, - handles: ArrayBuffer[NDArrayHandle], - outNameSize: MXUintRef, - names: ArrayBuffer[String]): Int - @native def mxNDArraySave(fname: String, - handles: Array[NDArrayHandle], - keys: Array[String]): Int - @native def mxNDArrayGetDataNDArray(handle: NDArrayHandle, out: NDArrayHandleRef): Int - @native def mxNDArrayGetAuxNDArray(handle: NDArrayHandle, - location: Int, - out: NDArrayHandleRef): Int - @native def mxNDArrayGetContext(handle: NDArrayHandle, devTypeId: RefInt, devId: RefInt): Int - @native def mxNDArraySaveRawBytes(handle: NDArrayHandle, buf: ArrayBuffer[Byte]): Int - @native def mxNDArrayLoadFromRawBytes(bytes: Array[Byte], handle: NDArrayHandleRef): Int - @native def mxNDArrayGetDType(handle: NDArrayHandle, dtype: RefInt): Int - @native def mxNDArrayGetStorageType(handle: NDArrayHandle, stype: RefInt): Int - - // KVStore Server - @native def mxInitPSEnv(keys: Array[String], values: Array[String]): Int - @native def mxKVStoreRunServer(handle: KVStoreHandle, controller: KVServerControllerCallback): Int - @native def mxKVStoreGetNumDeadNode(handle: KVStoreHandle, nodeId: Int, number: RefInt): Int - - // KVStore - @native def mxKVStoreCreate(name: String, handle: KVStoreHandleRef): Int - @native def mxKVStoreInit(handle: KVStoreHandle, - len: MXUint, - keys: Array[Int], - values: Array[NDArrayHandle]): Int - @native def mxKVStoreInitEx(handle: KVStoreHandle, - len: MXUint, - keys: Array[String], - values: Array[NDArrayHandle]): Int - @native def mxKVStorePush(handle: KVStoreHandle, - len: MXUint, - keys: Array[Int], - values: Array[NDArrayHandle], - priority: Int): Int - @native def mxKVStorePushEx(handle: KVStoreHandle, - len: MXUint, - keys: Array[String], - values: Array[NDArrayHandle], - priority: Int): Int - @native def mxKVStorePull(handle: KVStoreHandle, - len: MXUint, - keys: Array[Int], - outs: Array[NDArrayHandle], - priority: Int): Int - @native def mxKVStorePullEx(handle: KVStoreHandle, - len: MXUint, - keys: Array[String], - outs: Array[NDArrayHandle], - priority: Int): Int - @native def mxKVStoreSetUpdater(handle: KVStoreHandle, updaterFunc: MXKVStoreUpdater): Int - @native def mxKVStoreIsWorkerNode(isWorker: RefInt): Int - @native def mxKVStoreGetType(handle: KVStoreHandle, kvType: RefString): Int - @native def mxKVStoreSendCommmandToServers(handle: KVStoreHandle, - head: Int, body: String): Int - @native def mxKVStoreBarrier(handle: KVStoreHandle): Int - @native def mxKVStoreGetGroupSize(handle: KVStoreHandle, size: RefInt): Int - @native def mxKVStoreGetRank(handle: KVStoreHandle, size: RefInt): Int - @native def mxKVStoreSetBarrierBeforeExit(handle: KVStoreHandle, doBarrier: Int): Int - @native def mxKVStoreFree(handle: KVStoreHandle): Int - - // DataIter Funcs - @native def mxListDataIters(handles: ListBuffer[DataIterCreator]): Int - @native def mxDataIterCreateIter(handle: DataIterCreator, - keys: Array[String], - vals: Array[String], - out: DataIterHandleRef): Int - @native def mxDataIterGetIterInfo(creator: DataIterCreator, - name: RefString, - description: RefString, - argNames: ListBuffer[String], - argTypeInfos: ListBuffer[String], - argDescriptions: ListBuffer[String]): Int - @native def mxDataIterFree(handle: DataIterHandle): Int - @native def mxDataIterBeforeFirst(handle: DataIterHandle): Int - @native def mxDataIterNext(handle: DataIterHandle, out: RefInt): Int - @native def mxDataIterGetLabel(handle: DataIterHandle, - out: NDArrayHandleRef): Int - @native def mxDataIterGetData(handle: DataIterHandle, - out: NDArrayHandleRef): Int - @native def mxDataIterGetIndex(handle: DataIterHandle, - outIndex: ListBuffer[Long], - outSize: RefLong): Int - @native def mxDataIterGetPadNum(handle: DataIterHandle, - out: MXUintRef): Int - // Executors - @native def mxExecutorOutputs(handle: ExecutorHandle, outputs: ArrayBuffer[NDArrayHandle]): Int - @native def mxExecutorFree(handle: ExecutorHandle): Int - @native def mxExecutorForward(handle: ExecutorHandle, isTrain: Int): Int - @native def mxExecutorBackward(handle: ExecutorHandle, - grads: Array[NDArrayHandle]): Int - @native def mxExecutorPrint(handle: ExecutorHandle, debugStr: RefString): Int - @native def mxExecutorSetMonitorCallback(handle: ExecutorHandle, callback: MXMonitorCallback): Int - // scalastyle:off parameterNum - @native def mxExecutorReshape(partialShaping: Int, - allowUpSizing: Int, - devType: Int, - devId: Int, - mapKeys: Array[String], - mapDevTypes: Array[Int], - mapDevIds: Array[Int], - providedArgShapeNames: Array[String], - providedArgShapeData: Array[Int], - providedArgShapeIdx: Array[Int], - inArgs: ArrayBuffer[NDArrayHandle], - argGrads: ArrayBuffer[NDArrayHandle], - auxStates: ArrayBuffer[NDArrayHandle], - sharedExec: ExecutorHandle, - out: ExecutorHandleRef): Int - // scalastyle:on parameterNum - - // Symbols - @native def mxSymbolListAtomicSymbolCreators(symbolList: ListBuffer[SymbolHandle]): Int - @native def mxSymbolGetAtomicSymbolInfo(handle: SymbolHandle, - name: RefString, - desc: RefString, - numArgs: MXUintRef, - argNames: ListBuffer[String], - argTypes: ListBuffer[String], - argDescs: ListBuffer[String], - keyVarNumArgs: RefString): Int - @native def mxSymbolCreateAtomicSymbol(handle: SymbolHandle, - paramKeys: Array[String], - paramVals: Array[String], - symHandleRef: SymbolHandleRef): Int - @native def mxSymbolSetAttr(handle: SymbolHandle, key: String, value: String): Int - @native def mxSymbolListAttrShallow(handle: SymbolHandle, - outSize: MXUintRef, - out: ArrayBuffer[String]): Int - @native def mxSymbolListAttr(handle: SymbolHandle, - outSize: MXUintRef, - out: ArrayBuffer[String]): Int - @native def mxSymbolCompose(handle: SymbolHandle, - name: String, - keys: Array[String], - args: Array[SymbolHandle]): Int - @native def mxSymbolCreateVariable(name: String, out: SymbolHandleRef): Int - @native def mxSymbolGetAttr(handle: SymbolHandle, - key: String, - ret: RefString, - success: RefInt): Int - @native def mxSymbolListArguments(handle: SymbolHandle, - arguments: ArrayBuffer[String]): Int - @native def mxSymbolCopy(handle: SymbolHandle, clonedHandle: SymbolHandleRef): Int - @native def mxSymbolListAuxiliaryStates(handle: SymbolHandle, - arguments: ArrayBuffer[String]): Int - @native def mxSymbolListOutputs(handle: SymbolHandle, - outputs: ArrayBuffer[String]): Int - @native def mxSymbolCreateGroup(handles: Array[SymbolHandle], out: SymbolHandleRef): Int - @native def mxSymbolPrint(handle: SymbolHandle, str: RefString): Int - @native def mxSymbolGetInternals(handle: SymbolHandle, out: SymbolHandleRef): Int - @native def mxSymbolInferType(handle: SymbolHandle, - keys: Array[String], - sdata: Array[Int], - argTypeData: ListBuffer[Int], - outTypeData: ListBuffer[Int], - auxTypeData: ListBuffer[Int], - complete: RefInt): Int - @native def mxSymbolInferShape(handle: SymbolHandle, - numArgs: MXUint, - keys: Array[String], - argIndPtr: Array[MXUint], - argShapeData: Array[Int], - inShapeData: ListBuffer[Array[Int]], - outShapeData: ListBuffer[Array[Int]], - auxShapeData: ListBuffer[Array[Int]], - complete: RefInt): Int - @native def mxSymbolInferShapePartial(handle: SymbolHandle, - numArgs: MXUint, - keys: Array[String], - argIndPtr: Array[MXUint], - argShapeData: Array[Int], - inShapeData: ListBuffer[Array[Int]], - outShapeData: ListBuffer[Array[Int]], - auxShapeData: ListBuffer[Array[Int]], - complete: RefInt): Int - @native def mxSymbolGetOutput(handle: SymbolHandle, index: Int, out: SymbolHandleRef): Int - @native def mxSymbolSaveToJSON(handle: SymbolHandle, out: RefString): Int - @native def mxSymbolCreateFromJSON(json: String, handle: SymbolHandleRef): Int - // scalastyle:off parameterNum - @native def mxExecutorBindX(handle: SymbolHandle, - deviceTypeId: Int, - deviceID: Int, - numCtx: Int, - ctxMapKeys: Array[String], - ctxMapDevTypes: Array[Int], - ctxMapDevIDs: Array[Int], - numArgs: Int, - argsHandle: Array[NDArrayHandle], - argsGradHandle: Array[NDArrayHandle], - reqsArray: Array[Int], - auxArgsHandle: Array[NDArrayHandle], - out: ExecutorHandleRef): Int - @native def mxExecutorBindEX(handle: SymbolHandle, - deviceTypeId: Int, - deviceID: Int, - numCtx: Int, - ctxMapKeys: Array[String], - ctxMapDevTypes: Array[Int], - ctxMapDevIDs: Array[Int], - numArgs: Int, - argsHandle: Array[NDArrayHandle], - argsGradHandle: Array[NDArrayHandle], - reqsArray: Array[Int], - auxArgsHandle: Array[NDArrayHandle], - sharedExec: ExecutorHandle, - out: ExecutorHandleRef): Int - // scalastyle:on parameterNum - @native def mxSymbolSaveToFile(handle: SymbolHandle, fname: String): Int - @native def mxSymbolCreateFromFile(fname: String, handle: SymbolHandleRef): Int - @native def mxSymbolFree(handle: SymbolHandle): Int - - // Random - @native def mxRandomSeed(seed: Int): Int - - @native def mxNotifyShutdown(): Int - - // RecordIO - @native def mxRecordIOWriterCreate(uri: String, out: RecordIOHandleRef): Int - @native def mxRecordIOReaderCreate(uri: String, out: RecordIOHandleRef): Int - @native def mxRecordIOWriterFree(handle: RecordIOHandle): Int - @native def mxRecordIOReaderFree(handle: RecordIOHandle): Int - @native def mxRecordIOWriterWriteRecord(handle: RecordIOHandle, buf: String, size: Int): Int - @native def mxRecordIOReaderReadRecord(handle: RecordIOHandle, buf: RefString): Int - @native def mxRecordIOWriterTell(handle: RecordIOHandle, pos: RefInt): Int - @native def mxRecordIOReaderSeek(handle: RecordIOHandle, pos: Int): Int - - // Rtc - @native def mxRtcCreate(name: String, - inputNames: Array[String], - outputNames: Array[String], - inputs: Array[NDArrayHandle], - outputs: Array[NDArrayHandle], - kernel: String, - out: RtcHandleRef): Int - @native def mxRtcPush(handle: RtcHandle, - inputs: Array[NDArrayHandle], - outputs: Array[NDArrayHandle], - gridDimX: Int, - gridDimY: Int, - gridDimZ: Int, - blockDimX: Int, - blockDimY: Int, - blockDimZ: Int): Int - @native def mxRtcFree(handle: RtcHandle): Int - - // CustomOp - @native def mxCustomOpRegister(regName: String, opProp: CustomOpProp): Int - - // Profiler - @native def mxSetProfilerConfig(keys: Array[String], vals: Array[String]): Int - @native def mxSetProfilerState(state: Int): Int - @native def mxDumpProfile(finished: Int): Int - - // Numpy - @native def mxIsNumpyShape(compatible: RefInt): Int - @native def mxSetIsNumpyShape(isNpComp: Int, prev: RefInt): Int -} diff --git a/scala-package/core/src/main/scala/org/apache/mxnet/MX_PRIMITIVES.scala b/scala-package/core/src/main/scala/org/apache/mxnet/MX_PRIMITIVES.scala deleted file mode 100644 index de7792850dc1..000000000000 --- a/scala-package/core/src/main/scala/org/apache/mxnet/MX_PRIMITIVES.scala +++ /dev/null @@ -1,93 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet - -import scala.language.implicitConversions - -object MX_PRIMITIVES { - - /** - * This defines the basic primitives we can use in Scala for mathematical - * computations in NDArrays.This gives us a flexibility to expand to - * more supported primitives in the future. Currently Float and Double - * are supported. The functions which accept MX_PRIMITIVE_TYPE as input can also accept - * plain old Float and Double data as inputs because of the underlying - * implicit conversion between primitives to MX_PRIMITIVE_TYPE. - */ - trait MX_PRIMITIVE_TYPE extends Ordered[MX_PRIMITIVE_TYPE]{ - - def toString: String - - def unary_- : MX_PRIMITIVE_TYPE - } - - trait MXPrimitiveOrdering extends Ordering[MX_PRIMITIVE_TYPE] { - - def compare(x: MX_PRIMITIVE_TYPE, y: MX_PRIMITIVE_TYPE): Int = x.compare(y) - - } - - implicit object MX_PRIMITIVE_TYPE extends MXPrimitiveOrdering - - /** - * Wrapper over Float in Scala. - * @param data - */ - class MX_FLOAT(val data: Float) extends MX_PRIMITIVE_TYPE { - - override def toString: String = data.toString - - override def unary_- : MX_PRIMITIVE_TYPE = new MX_FLOAT(data.unary_-) - - override def compare(that: MX_PRIMITIVE_TYPE): Int = { - this.data.compareTo(that.asInstanceOf[MX_FLOAT].data) - } - } - - implicit def FloatToMX_Float(d : Float): MX_FLOAT = new MX_FLOAT(d) - - implicit def MX_FloatToFloat(d: MX_FLOAT) : Float = d.data - - implicit def IntToMX_Float(d: Int): MX_FLOAT = new MX_FLOAT(d.toFloat) - - /** - * Wrapper over Double in Scala. - * @param data - */ - class MX_Double(val data: Double) extends MX_PRIMITIVE_TYPE { - - override def toString: String = data.toString - - override def unary_- : MX_PRIMITIVE_TYPE = new MX_Double(data.unary_-) - - override def compare(that: MX_PRIMITIVE_TYPE): Int = { - this.data.compareTo(that.asInstanceOf[MX_Double].data) - } - } - - implicit def DoubleToMX_Double(d : Double): MX_Double = new MX_Double(d) - - implicit def MX_DoubleToDouble(d: MX_Double) : Double = d.data - - def isValidMxPrimitiveType(num : Any) : Boolean = { - num match { - case valid @ (_: Float | _: Double) => true - case _ => false - } - } -} diff --git a/scala-package/core/src/main/scala/org/apache/mxnet/Model.scala b/scala-package/core/src/main/scala/org/apache/mxnet/Model.scala deleted file mode 100644 index b835c4964dd0..000000000000 --- a/scala-package/core/src/main/scala/org/apache/mxnet/Model.scala +++ /dev/null @@ -1,380 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet - -import java.nio.ByteBuffer - -import org.slf4j.LoggerFactory - -import scala.collection.mutable - -/** - * Describe the model flow - */ -class Model -object Model { - private val logger = LoggerFactory.getLogger(classOf[Model]) - - /** - * Checkpoint the model data into file. - * @param prefix Prefix of model name. - * @param epoch The epoch number of the model. - * @param symbol The input symbol - * @param argParams Model parameter, dict of name to NDArray of net's weights. - * @param auxParams Model parameter, dict of name to NDArray of net's auxiliary states. - * @note - * - ``prefix-symbol.json`` will be saved for symbol. - * - ``prefix-epoch.params`` will be saved for parameters. - */ - def saveCheckpoint(prefix: String, epoch: Int, symbol: Symbol, - argParams: Map[String, NDArray], auxParams: Map[String, NDArray]): Unit = { - symbol.save(s"$prefix-symbol.json") - val saveDict = argParams.map { case (k, v) => s"arg:$k" -> v } ++ - auxParams.map { case (k, v) => s"aux:$k" -> v } - val paramName = "%s-%04d.params".format(prefix, epoch) - NDArray.save(paramName, saveDict) - logger.info(s"Saved checkpoint to $paramName") - } - - /** - * Load model checkpoint from file. - * - * @param prefix Prefix of model name. - * @param epoch Epoch number of model we would like to load. - * - * @return - * symbol : The symbol configuration of computation network. - * argParams : Model parameter, dict of name to NDArray of net's weights. - * auxParams : Model parameter, dict of name to NDArray of net's auxiliary states. - * @note - * - symbol will be loaded from ``prefix-symbol.json``. - * - parameters will be loaded from ``prefix-epoch.params``. - */ - def loadCheckpoint(prefix: String, epoch: Int): - (Symbol, Map[String, NDArray], Map[String, NDArray]) = { - val symbol = Symbol.load(s"$prefix-symbol.json") - val saveDict = NDArray.load("%s-%04d.params".format(prefix, epoch)) - val argParams = mutable.HashMap[String, NDArray]() - val auxParams = mutable.HashMap[String, NDArray]() - for ((k, v) <- saveDict._1 zip saveDict._2) { - val splitted = k.split(":", 2) - val tp = splitted(0) - val name = splitted(1) - if (tp == "arg") { - argParams(name) = v - } else if (tp == "aux") { - auxParams(name) = v - } - } - (symbol, argParams.toMap, auxParams.toMap) - } - - // a helper class for serializing model - class SerializedModel private[mxnet] ( - val symbol: String, - val argParams: Map[String, Array[Byte]], - val auxParams: Map[String, Array[Byte]]) extends Serializable - - private[mxnet] def serialize(symbol: Symbol, - argParams: Map[String, NDArray], - auxParams: Map[String, NDArray]): Array[Byte] = { - val serializedModel = new SerializedModel( - symbol.toJson, - argParams.map { case (k, v) => (k, v.serialize()) }, - auxParams.map { case (k, v) => (k, v.serialize()) } - ) - Serializer.getSerializer.serialize(serializedModel).array() - } - - private[mxnet] def deserialize(bytes: Array[Byte]): - (Symbol, Map[String, NDArray], Map[String, NDArray]) = { - val model = Serializer.getSerializer.deserialize[SerializedModel](ByteBuffer.wrap(bytes)) - val symbol = Symbol.loadJson(model.symbol) - val argParams = model.argParams.map { case (k, v) => - (k, NDArray.deserialize(v)) - } - val auxParams = model.auxParams.map { case (k, v) => - (k, NDArray.deserialize(v)) - } - (symbol, argParams, auxParams) - } - - /** - * Create kvstore - * This function select and create a proper kvstore given the kvstore type - * @param kvStore KVStore type - * @param numDevice The number of devices - * @param argParams Model parameter, dict of name to NDArray of net's weights. - * @return Option of created [[KVStore]] and whether or not update weight on it - */ - private[mxnet] def createKVStore(kvStore: String, - numDevice: Int, - argParams: Map[String, NDArray]): (Option[KVStore], Boolean) = { - if (numDevice == 1 && !kvStore.contains("dist")) { - // no need to use kv for single device and single machine - (None, false) - } else { - var kvType = kvStore - if (kvType == "local") { - // automatically select a proper local - val maxSize = argParams.values.map(_.shape.product).max - kvType = - if (maxSize < 1024 * 1024 * 16) { - "local_update_cpu" - } else { - "local_allreduce_cpu" - } - logger.info(s"Auto - select kvstore type = $kvType") - } - (Option(KVStore.create(kvType)), !kvType.contains("local_allreduce")) - } - } - - /** - * Create a kvStore (wrap it with Option, None if given kvStore == null) - * @param kvStore KVStore - * @return Option of created [[KVStore]] and whether or not update weight on it - */ - private[mxnet] def createKVStore(kvStore: KVStore): (Option[KVStore], Boolean) = { - (Option(kvStore), kvStore != null && !kvStore.`type`.contains("local_allreduce")) - } - - // Initialize kvstore - private[mxnet] def initializeKVStore(kvStore: KVStore, - paramArrays: IndexedSeq[Array[NDArray]], - argParams: Map[String, NDArray], - paramNames: IndexedSeq[String], - updateOnKVStore: Boolean): Unit = { - require(paramArrays.length == paramNames.length, - s"Provided parameter arrays does not match parameter names") - for (idx <- 0 until paramArrays.length) { - val paramOnDevs = paramArrays(idx) - val name = paramNames(idx) - kvStore.init(name, argParams(paramNames(idx))) - if (updateOnKVStore) { - kvStore.pull(name, paramOnDevs, -idx) - } - } - } - - // Perform update of param_arrays from grad_arrays on kvstore - private[mxnet] def updateParamsOnKVStore(paramArrays: IndexedSeq[Array[NDArray]], - gradArrays: IndexedSeq[Array[NDArray]], - kvStore: Option[KVStore], - paramNames: IndexedSeq[String]): Unit = { - (paramArrays zip gradArrays).zipWithIndex.foreach { case ((argList, gradList), index) => - if (gradList != null) { - val name = paramNames(index) - // push gradient, priority is negative index - kvStore.foreach(_.push(name, gradList, -index)) - // pull back the weights - kvStore.foreach(_.pull(name, argList, -index)) - } - } - } - - // Perform update of param_arrays from grad_arrays not on kvstore - private[mxnet] def updateParams(paramArrays: IndexedSeq[Array[NDArray]], - gradArrays: IndexedSeq[Array[NDArray]], - updater: MXKVStoreUpdater, - numDevice: Int, - paramNames: IndexedSeq[String], - kvStore: Option[KVStore] = None) { - (paramArrays zip gradArrays).zipWithIndex.foreach { case ((argList, gradList), index) => - if (gradList != null) { - kvStore.foreach(kv => { - val name = paramNames(index) - // push gradient, priority is negative index - kv.push(name, gradList, -index) - // pull back the sum gradients, to the same locations. - kv.pull(name, gradList, -index) - }) - (argList zip gradList).zipWithIndex.foreach { case ((w: NDArray, g: NDArray), k: Int) => - // faked an index here, to make optimizer create diff - // state for the same index but on diff devs, - // (copy from python package) TODO(mli) use a better solution latter - updater.update(index * numDevice + k, g, w) - } - } - } - } - - /** - * Internal training function on multiple devices. - * This function will also work for single device as well. - * @param symbol The network configuration - * @param ctx The training devices. - * @param argNames Name of all arguments of the network. - * @param paramNames Name of all trainable parameters of the network. - * @param auxNames Name of all auxiliary states of the network. - * @param argParams Model parameter, dict of name to NDArray of net's weights. - * @param auxParams Model parameter, dict of name to NDArray of net's auxiliary states. - * @param beginEpoch The begining training epoch. - * @param endEpoch The end training epoch. - * @param epochSize Number of batches in a epoch. - * In default, it is set to ceil(num_train_examples / batch_size) - * @param optimizer The optimization algorithm - * @param kvStore The KVStore - * @param updateOnKVStore whether or not perform weight updating on kvstore - * @param trainData Training data iterator. - * @param evalData Validation data iterator. - * @param evalMetric A evaluation function. - * @param epochEndCallback A callback that is invoked at end of each epoch. - * This can be used to checkpoint model each epoch. - * @param batchEndCallback A callback that is invoked at end of each batch. - * This can be used to measure speed, - * get result from evaluation metric. etc. - * @param workLoadList The list of work load for different devices, in the same order as ctx - * @param monitor Monitor outputs, weights, and gradients for debugging - * @note This function will inplace update the NDArrays in argParams and auxStates. - */ - // scalastyle:off parameterNum - private[mxnet] def trainMultiDevice(symbol: Symbol, ctx: Array[Context], - argNames: IndexedSeq[String], paramNames: IndexedSeq[String], - auxNames: IndexedSeq[String], argParams: Map[String, NDArray], - auxParams: Map[String, NDArray], - beginEpoch: Int, endEpoch: Int, epochSize: Int, - optimizer: Optimizer, - kvStore: Option[KVStore], updateOnKVStore: Boolean, - trainData: DataIter, - evalData: Option[DataIter] = None, - evalMetric: EvalMetric, - epochEndCallback: Option[EpochEndCallback] = None, - batchEndCallback: Option[BatchEndCallback] = None, - workLoadList: Seq[Float] = Nil, - monitor: Option[Monitor] = None, - symGen: SymbolGenerator = null): Unit = { - ResourceScope.using() { - - val executorManager = new DataParallelExecutorManager( - symbol = symbol, - symGen = symGen, - ctx = ctx, - trainData = trainData, - paramNames = paramNames, - argNames = argNames, - auxNames = auxNames, - workLoadList = workLoadList) - - monitor.foreach(executorManager.installMonitor) - executorManager.setParams(argParams, auxParams) - - // updater for updateOnKVStore = false - val updaterLocal = Optimizer.getUpdater(optimizer) - - kvStore.foreach(initializeKVStore(_, executorManager.paramArrays, - argParams, executorManager.paramNames, updateOnKVStore)) - if (updateOnKVStore) { - kvStore.foreach(_.setOptimizer(optimizer)) - } - - // Now start training - for (epoch <- beginEpoch until endEpoch) { - // Training phase - val tic = System.currentTimeMillis - evalMetric.reset() - var nBatch = 0 - var epochDone = false - // Iterate over training data. - trainData.reset() - ResourceScope.using() { - while (!epochDone) { - var doReset = true - while (doReset && trainData.hasNext) { - val dataBatch = trainData.next() - executorManager.loadDataBatch(dataBatch) - monitor.foreach(_.tic()) - executorManager.forward(isTrain = true) - executorManager.backward() - if (updateOnKVStore) { - updateParamsOnKVStore(executorManager.paramArrays, - executorManager.gradArrays, - kvStore, executorManager.paramNames) - } else { - updateParams(executorManager.paramArrays, - executorManager.gradArrays, - updaterLocal, ctx.length, - executorManager.paramNames, - kvStore) - } - monitor.foreach(_.tocPrint()) - // evaluate at end, so out_cpu_array can lazy copy - executorManager.updateMetric(evalMetric, dataBatch.label) - - nBatch += 1 - batchEndCallback.foreach(_.invoke(epoch, nBatch, evalMetric)) - - // this epoch is done possibly earlier - if (epochSize != -1 && nBatch >= epochSize) { - doReset = false - } - } - if (doReset) { - trainData.reset() - } - - // this epoch is done - epochDone = (epochSize == -1 || nBatch >= epochSize) - } - } - val (name, value) = evalMetric.get - name.zip(value).foreach { case (n, v) => - logger.info(s"Epoch[$epoch] Train-$n=$v") - } - val toc = System.currentTimeMillis - logger.info(s"Epoch[$epoch] Time cost=${toc - tic}") - - ResourceScope.using() { - evalData.foreach { evalDataIter => - evalMetric.reset() - evalDataIter.reset() - // TODO: make DataIter implement Iterator - while (evalDataIter.hasNext) { - val evalBatch = evalDataIter.next() - executorManager.loadDataBatch(evalBatch) - executorManager.forward(isTrain = false) - executorManager.updateMetric(evalMetric, evalBatch.label) - } - - val (name, value) = evalMetric.get - name.zip(value).foreach { case (n, v) => - logger.info(s"Epoch[$epoch] Validation-$n=$v") - } - } - } - - if (epochEndCallback.isDefined || epoch + 1 == endEpoch) { - executorManager.copyTo(argParams, auxParams) - } - epochEndCallback.foreach(_.invoke(epoch, symbol, argParams, auxParams)) - } - - } - } - // scalastyle:on parameterNum -} - -trait EpochEndCallback { - def invoke(epoch: Int, symbol: Symbol, - argParams: Map[String, NDArray], - auxStates: Map[String, NDArray]): Unit -} - -trait BatchEndCallback { - def invoke(epoch: Int, nBatch: Int, evalMetric: EvalMetric) -} diff --git a/scala-package/core/src/main/scala/org/apache/mxnet/Monitor.scala b/scala-package/core/src/main/scala/org/apache/mxnet/Monitor.scala deleted file mode 100644 index c8a251d03a6c..000000000000 --- a/scala-package/core/src/main/scala/org/apache/mxnet/Monitor.scala +++ /dev/null @@ -1,132 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet - -import org.apache.mxnet.Base.NDArrayHandle -import org.slf4j.LoggerFactory - -import scala.collection.mutable - -/** - * Monitor outputs, weights, and gradients for debugging. - * - * @param interval Number of batches between printing. - * @param statFunc A function that computes statistics of tensors. - * Takes a NDArray and returns a NDArray. defaults - * to mean absolute value |x|/size(x). - */ -class Monitor( - protected val interval: Int, - protected var statFunc: (NDArray) => NDArray = null) { - - private val logger = LoggerFactory.getLogger(classOf[Monitor]) - - if (statFunc == null) { - statFunc = (x: NDArray) => { - NDArray.norm(x) / math.sqrt(x.size.toDouble).toFloat - } - } - - private var activated: Boolean = false - private var queue = new mutable.Queue[(Int, String, NDArray)] - private var step: Int = 0 - private var exes = new mutable.Queue[Executor] - - val statHelper: MXMonitorCallback = new MXMonitorCallback { - override def invoke(name: String, arr: NDArrayHandle): Unit = { - // wrapper for executor callback - if (activated) { - val array = new NDArray(arr, writable = false, addToCollector = false) - val elem = (step, name, statFunc(array)) - queue += elem - } - } - } - - /** - * Install callback to executor. - * Supports installing to multiple exes - * @param exe the Executor (returned by symbol.bind) to install to. - */ - def install(exe: Executor): Unit = { - exe.setMonitorCallback(statHelper) - exes += exe - } - - /** - * Start collecting stats for current batch. - * Call before forward - */ - def tic(): Unit = { - if (step % interval == 0) { - exes.foreach { exe => - exe.argArrays.foreach(_.waitToRead()) - } - queue = new mutable.Queue[(Int, String, NDArray)] - activated = true - } - step += 1 - } - - /** - * End collecting for current batch and return results. - * Call after computation of current batch. - */ - def toc(): mutable.Queue[(Int, String, String)] = { - if (activated) { - exes.foreach { exe => - exe.argArrays.foreach(_.waitToRead()) - } - exes.foreach { exe => - (exe.symbol.listArguments() zip exe.argArrays).foreach { case (name, array) => - val elem = (step, name, statFunc(array)) - queue += elem - } - } - activated = false - val res = new mutable.Queue[(Int, String, String)] - queue.foreach { q => - val (n, k, v) = q - if (v.shape == Shape(1)) { - res += ((n, k, v.toScalar.toString)) - } else { - res += ((n, k, s"[${v.toArray.mkString(",")}]")) - } - } - queue = new mutable.Queue[(Int, String, NDArray)] - res - } else { - new mutable.Queue[(Int, String, String)] - } - } - - /** - * End collecting and print results - */ - def tocPrint(): Unit = { - val res = toc() - res.foreach { case (n, k, v) => - logger.info(s"Batch: $n $k $v") - } - } - -} - -private[mxnet] trait MXMonitorCallback { - def invoke(name: String, arr: NDArrayHandle): Unit -} diff --git a/scala-package/core/src/main/scala/org/apache/mxnet/NDArray.scala b/scala-package/core/src/main/scala/org/apache/mxnet/NDArray.scala deleted file mode 100644 index 717120bcf984..000000000000 --- a/scala-package/core/src/main/scala/org/apache/mxnet/NDArray.scala +++ /dev/null @@ -1,1577 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet - -import java.nio.{ByteBuffer, ByteOrder} - -import org.apache.mxnet.Base._ -import org.apache.mxnet.DType.DType -import org.apache.mxnet.MX_PRIMITIVES.MX_PRIMITIVE_TYPE -import org.apache.mxnet.SparseFormat.SparseFormat -import org.slf4j.LoggerFactory - -import scala.collection.mutable -import scala.collection.mutable.{ArrayBuffer, ListBuffer} -import scala.language.implicitConversions -import scala.ref.WeakReference -import scala.util.Try - -/** - * NDArray Object extends from NDArrayBase for abstract function signatures - * Main code will be generated during compile time through Macros - */ -@AddNDArrayFunctions(false) -object NDArray extends NDArrayBase { - /** - * method to convert NDArrayFunctionReturn to NDArray - * @param ret the returned NDArray list - * @return NDArray result - */ - implicit def getFirstResult(ret: NDArrayFuncReturn): NDArray = ret(0) - private val logger = LoggerFactory.getLogger(classOf[NDArray]) - - private val functions: Map[String, NDArrayFunction] = initNDArrayModule() - - val api = NDArrayAPI - val random = NDArrayRandomAPI - - private def addDependency(froms: Array[NDArray], tos: Array[NDArray]): Unit = { - froms.foreach { from => - val weakRef = new WeakReference(from) - tos.foreach { to => - to.dependencies.put(from.handle, weakRef) - // we add all dep's dep to prevent (recursively) recomputing at runtime. - to.dependencies ++= from.dependencies - } - } - } - - /** - * Used by NDArrayMacro. - * Invoke this function by passing in parameters. - * Parameters - * ---------- - * @param args Positional arguments of input scalars and NDArray - * @param kwargs Key-value arguments of input scalars - * @return The result NDArrays of result of computation. - */ - private[mxnet] def genericNDArrayFunctionInvoke( - funcName: String, args: Seq[Any], kwargs: Map[String, Any] = null): NDArrayFuncReturn = { - val function = functions(funcName) - val ndArgs = ArrayBuffer.empty[NDArray] - val posArgs = ArrayBuffer.empty[String] - args.foreach { - case arr: NDArray => - ndArgs.append(arr) - case arrFunRet: NDArrayFuncReturn => - arrFunRet.arr.foreach(ndArgs.append(_)) - case arg => - posArgs.append(arg.toString) - } - - require(posArgs.length <= function.arguments.length, - s"len(posArgs) = ${posArgs.length}, should be less or equal to len(arguments) " + - s"= ${function.arguments.length}") - val updatedKwargs: Map[String, String] = - (Option(kwargs).getOrElse(Map.empty[String, String]) - ++ function.arguments.slice(0, posArgs.length).zip(posArgs) - "out" - ).map { case (k, v) => k -> v.toString } - - - val (oriOutputs, outputVars) = - if (kwargs != null && kwargs.contains("out")) { - val output = kwargs("out") - output match { - case nd: NDArray => (Array(nd), Array(nd.handle)) - case ndFuncRet: NDArrayFuncReturn => (ndFuncRet.arr, ndFuncRet.arr.map(_.handle)) - // Seq[NDArray] erasure problem explained here https://stackoverflow.com/questions/1094173/ - case ndArr: Seq[NDArray @unchecked] => - if (ndArr.head.isInstanceOf[NDArray]) (ndArr.toArray, ndArr.toArray.map(_.handle)) - else throw new IllegalArgumentException( - s"""Unsupported out ${output.getClass} type, - | should be NDArray or subclass of Seq[NDArray]""".stripMargin) - case _ => throw new IllegalArgumentException( - s"""Unsupported out ${output.getClass} type, - | should be NDArray or subclass of Seq[NDArray]""".stripMargin) - } - } else { - (null, null) - } - - val outputs = ArrayBuffer.empty[NDArrayHandle] - val outStypes = ArrayBuffer.empty[Int] - checkCall(_LIB.mxImperativeInvokeEx(function.handle, - ndArgs.map(_.handle).toArray, - outputVars, - outputs, - updatedKwargs.size, - updatedKwargs.keys.toArray, - updatedKwargs.values.toArray, - outStypes)) - new NDArrayFuncReturn(Option(oriOutputs).getOrElse { - val outputArrs = (outputs zip outStypes).map( - ele => ele._2 match { - case 0 => new NDArray(ele._1) - case _ => new SparseNDArray(ele._1) - } - ).toArray - addDependency(ndArgs.toArray, outputArrs) - outputArrs - }) - } - - /** - * Return a new empty handle. - * Empty handle can be used to hold result - * - * @return a new empty ndarray handle - */ - private def newEmptyHandle(): NDArrayHandle = { - val hdl = new NDArrayHandleRef - checkCall(_LIB.mxNDArrayCreateNone(hdl)) - hdl.value - } - - /** - * Return a new handle with specified shape and context. - * Empty handle is only used to hold results - * - * @return a new empty ndarray handle - */ - private def newAllocHandle(shape: Shape, - ctx: Context, - delayAlloc: Boolean, - dtype: DType = DType.Float32): NDArrayHandle = { - val hdl = new NDArrayHandleRef - checkCall(_LIB.mxNDArrayCreateEx( - shape.toArray, - shape.length, - ctx.deviceTypeid, - ctx.deviceId, - if (delayAlloc) 1 else 0, - dtype.id, - hdl)) - hdl.value - } - - /** - * Wait all async operation to finish in MXNet - * This function is used for benchmark only - */ - def waitall(): Unit = { - checkCall(_LIB.mxNDArrayWaitAll()) - } - - // List and add all the atomic symbol functions to current module. - private def initNDArrayModule(): Map[String, NDArrayFunction] = { - val opNames = ListBuffer.empty[String] - checkCall(_LIB.mxListAllOpNames(opNames)) - opNames.map(opName => { - val opHandle = new RefLong - checkCall(_LIB.nnGetOpHandle(opName, opHandle)) - makeNDArrayFunction(opHandle.value, opName) - }).toMap - } - - // Create an atomic symbol function by handle and function name. - private def makeNDArrayFunction(handle: NDArrayHandle, aliasName: String) - : (String, NDArrayFunction) = { - val name = new RefString - val desc = new RefString - val keyVarNumArgs = new RefString - val numArgs = new RefInt - val argNames = ListBuffer.empty[String] - val argTypes = ListBuffer.empty[String] - val argDescs = ListBuffer.empty[String] - - checkCall(_LIB.mxSymbolGetAtomicSymbolInfo( - handle, name, desc, numArgs, argNames, argTypes, argDescs, keyVarNumArgs)) - val arguments = (argTypes zip argNames).filter { case (dtype, _) => - !(dtype.startsWith("NDArray") || dtype.startsWith("Symbol") - || dtype.startsWith("NDArray-or-Symbol")) - }.map { case (_, argName) => - argName - } - (aliasName, new NDArrayFunction(handle, arguments.toList)) - } - - /** - * One hot encoding indices into matrix out. - * @param indices An NDArray containing indices of the categorical features. - * @param out The result holder of the encoding. - * @return Same as out. - */ - def onehotEncode(indices: NDArray, out: NDArray): NDArray = { - NDArray.genericNDArrayFunctionInvoke( - "_onehot_encode", Seq(indices, out), Map("out" -> out))(0) - } - - /** - * Create an empty uninitialized new NDArray, with specified shape. - * - * @param shape shape of the NDArray. - * @param ctx The context of the NDArray, default to current default context. - * - * @return The created NDArray. - */ - def empty(shape: Shape, ctx: Context = null, dtype: DType = Base.MX_REAL_TYPE): NDArray = { - val context = if (ctx == null) Context.defaultCtx else ctx - new NDArray(handle = NDArray.newAllocHandle(shape, context, delayAlloc = false, dtype)) - } - - def empty(shape: Int *): NDArray = empty(Shape(shape: _*)) - - def empty(ctx: Context, shape: Int *): NDArray = empty(Shape(shape: _*), ctx) - - /** - * Create a new NDArray filled with 0, with specified shape. - * - * @param shape shape of the NDArray. - * @param ctx The context of the NDArray, default to current default context. - * - * @return The created NDArray. - */ - def zeros(shape: Shape, ctx: Context = null, dtype: DType = Base.MX_REAL_TYPE): NDArray = { - val arr = empty(shape, ctx, dtype) - arr.set(0f) - arr - } - - def zeros(shape: Int *): NDArray = zeros(Shape(shape: _*)) - - def zeros(ctx: Context, shape: Int *): NDArray = zeros(Shape(shape: _*), ctx) - - /** - * Create a new NDArray filled with 1, with specified shape. - * @param shape shape of the NDArray. - * @param ctx The context of the NDArray, default to current default context. - * @return The created NDArray. - */ - def ones(shape: Shape, ctx: Context = null, dtype: DType = Base.MX_REAL_TYPE): NDArray = { - val arr = empty(shape, ctx, dtype) - arr.set(1f) - arr - } - - def ones(shape: Int *): NDArray = ones(Shape(shape: _*)) - - def ones(ctx: Context, shape: Int *): NDArray = ones(Shape(shape: _*), ctx) - - /** - * Create a new NDArray filled with given value, with specified shape. - * @param shape shape of the NDArray. - * @param value value to be filled with - * @param ctx The context of the NDArray, default to current default context - */ - def full(shape: Shape, value: Float, ctx: Context = null): NDArray = { - val arr = empty(shape, ctx) - arr.set(value) - arr - } - - def full(shape: Shape, value: Double, ctx: Context): NDArray = { - val arr = empty(shape, ctx, DType.Float64) - arr.set(value) - arr - } - - /** - * Create a new NDArray filled with given value, with specified shape. - * @param shape shape of the NDArray. - * @param value value to be filled with - */ - def full(shape: Shape, value: Double): NDArray = { - full(shape, value, null) - } - - - /** - * Perform power operation on NDArray. Returns result as NDArray - * @param lhs - * @param rhs - */ - def power(lhs: NDArray, rhs: NDArray): NDArray = { - NDArray.genericNDArrayFunctionInvoke("_power", Seq(lhs, rhs)) - } - - /** - * Perform scalar power operation on NDArray. Returns result as NDArray - * @param lhs NDArray on which to perform the operation on. - * @param rhs The scalar input. Can be of type Float/Double - */ - def power(lhs: NDArray, rhs: MX_PRIMITIVE_TYPE): NDArray = { - NDArray.genericNDArrayFunctionInvoke("_power_scalar", Seq(lhs, rhs)) - } - - /** - * Perform scalar power operation on NDArray. Returns result as NDArray - * @param lhs The scalar input. Can be of type Float/Double - * @param rhs NDArray on which to perform the operation on. - */ - def power(lhs: MX_PRIMITIVE_TYPE, rhs: NDArray): NDArray = { - NDArray.genericNDArrayFunctionInvoke("_rpower_scalar", Seq(lhs, rhs)) - } - - // Perform maximum operator - def maximum(lhs: NDArray, rhs: NDArray): NDArray = { - NDArray.genericNDArrayFunctionInvoke("_maximum", Seq(lhs, rhs)) - } - - /** - * Perform the max operation on NDArray. Returns the result as NDArray. - * @param lhs NDArray on which to perform the operation on. - * @param rhs The scalar input. Can be of type Float/Double - */ - def maximum(lhs: NDArray, rhs: MX_PRIMITIVE_TYPE): NDArray = { - NDArray.genericNDArrayFunctionInvoke("_maximum_scalar", Seq(lhs, rhs)) - } - - /** - * Perform the max operation on NDArray. Returns the result as NDArray. - * @param lhs The scalar input. Can be of type Float/Double - * @param rhs NDArray on which to perform the operation on. - */ - def maximum(lhs: MX_PRIMITIVE_TYPE, rhs: NDArray): NDArray = { - NDArray.genericNDArrayFunctionInvoke("_maximum_scalar", Seq(lhs, rhs)) - } - - // Perform minimum operator - def minimum(lhs: NDArray, rhs: NDArray): NDArray = { - NDArray.genericNDArrayFunctionInvoke("_minimum", Seq(lhs, rhs)) - } - - /** - * Perform the min operation on NDArray. Returns the result as NDArray. - * @param lhs NDArray on which to perform the operation on. - * @param rhs The scalar input. Can be of type Float/Double - */ - def minimum(lhs: NDArray, rhs: MX_PRIMITIVE_TYPE): NDArray = { - NDArray.genericNDArrayFunctionInvoke("_minimum_scalar", Seq(lhs, rhs)) - } - - /** - * Perform the min operation on NDArray. Returns the result as NDArray. - * @param lhs The scalar input. Can be of type Float/Double - * @param rhs NDArray on which to perform the operation on. - */ - def minimum(lhs: MX_PRIMITIVE_TYPE, rhs: NDArray): NDArray = { - NDArray.genericNDArrayFunctionInvoke("_minimum_scalar", Seq(lhs, rhs)) - } - - /** - * Returns the result of element-wise **equal to** (==) comparison operation with broadcasting. - * For each element in input arrays, return 1(true) if corresponding elements are same, - * otherwise return 0(false). - */ - def equal(lhs: NDArray, rhs: NDArray): NDArray = { - NDArray.genericNDArrayFunctionInvoke("broadcast_equal", Seq(lhs, rhs)) - } - - /** - * Returns the result of element-wise **equal to** (==) comparison operation with broadcasting. - * For each element in input arrays, return 1(true) if corresponding elements are same, - * otherwise return 0(false). - * - * @param lhs NDArray - * @param rhs The scalar input. Can be of type Float/Double - */ - def equal(lhs: NDArray, rhs: MX_PRIMITIVE_TYPE): NDArray = { - NDArray.genericNDArrayFunctionInvoke("_equal_scalar", Seq(lhs, rhs)) - } - - /** - * Returns the result of element-wise **not equal to** (!=) comparison operation - * with broadcasting. - * For each element in input arrays, return 1(true) if corresponding elements are different, - * otherwise return 0(false). - */ - def notEqual(lhs: NDArray, rhs: NDArray): NDArray = { - NDArray.genericNDArrayFunctionInvoke("broadcast_not_equal", Seq(lhs, rhs)) - } - - /** - * Returns the result of element-wise **not equal to** (!=) comparison operation - * with broadcasting. - * For each element in input arrays, return 1(true) if corresponding elements are different, - * otherwise return 0(false). - * @param lhs NDArray - * @param rhs The scalar input. Can be of type Float/Double - */ - def notEqual(lhs: NDArray, rhs: MX_PRIMITIVE_TYPE): NDArray = { - NDArray.genericNDArrayFunctionInvoke("_not_equal_scalar", Seq(lhs, rhs)) - } - - /** - * Returns the result of element-wise **greater than** (>) comparison operation - * with broadcasting. - * For each element in input arrays, return 1(true) if lhs elements are greater than rhs, - * otherwise return 0(false). - */ - def greater(lhs: NDArray, rhs: NDArray): NDArray = { - NDArray.genericNDArrayFunctionInvoke("broadcast_greater", Seq(lhs, rhs)) - } - - /** - * Returns the result of element-wise **greater than** (>) comparison operation - * with broadcasting. - * For each element in input arrays, return 1(true) if lhs elements are greater than rhs, - * otherwise return 0(false). - * - * @param lhs NDArray - * @param rhs The scalar input. Can be of type Float/Double - */ - def greater(lhs: NDArray, rhs: MX_PRIMITIVE_TYPE): NDArray = { - NDArray.genericNDArrayFunctionInvoke("_greater_scalar", Seq(lhs, rhs)) - } - - /** - * Returns the result of element-wise **greater than or equal to** (>=) comparison - * operation with broadcasting. - * For each element in input arrays, return 1(true) if lhs elements are greater than equal to rhs, - * otherwise return 0(false). - */ - def greaterEqual(lhs: NDArray, rhs: NDArray): NDArray = { - NDArray.genericNDArrayFunctionInvoke("broadcast_greater_equal", Seq(lhs, rhs)) - } - - /** - * Returns the result of element-wise **greater than or equal to** (>=) comparison - * operation with broadcasting. - * For each element in input arrays, return 1(true) if lhs elements are greater than equal to - * rhs, otherwise return 0(false). - * - * @param lhs NDArray - * @param rhs The scalar input. Can be of type Float/Double - */ - def greaterEqual(lhs: NDArray, rhs: MX_PRIMITIVE_TYPE): NDArray = { - NDArray.genericNDArrayFunctionInvoke("_greater_equal_scalar", Seq(lhs, rhs)) - } - - /** - * Returns the result of element-wise **lesser than** (<) comparison operation - * with broadcasting. - * For each element in input arrays, return 1(true) if lhs elements are less than rhs, - * otherwise return 0(false). - */ - def lesser(lhs: NDArray, rhs: NDArray): NDArray = { - NDArray.genericNDArrayFunctionInvoke("broadcast_lesser", Seq(lhs, rhs)) - } - - /** - * Returns the result of element-wise **lesser than** (<) comparison operation - * with broadcasting. - * For each element in input arrays, return 1(true) if lhs elements are less than rhs, - * otherwise return 0(false). - * @param lhs NDArray - * @param rhs The scalar input. Can be of type Float/Double - */ - def lesser(lhs: NDArray, rhs: MX_PRIMITIVE_TYPE): NDArray = { - NDArray.genericNDArrayFunctionInvoke("_lesser_scalar", Seq(lhs, rhs)) - } - - /** - * Returns the result of element-wise **lesser than or equal to** (<=) comparison - * operation with broadcasting. - * For each element in input arrays, return 1(true) if lhs elements are - * lesser than equal to rhs, otherwise return 0(false). - */ - def lesserEqual(lhs: NDArray, rhs: NDArray): NDArray = { - NDArray.genericNDArrayFunctionInvoke("broadcast_lesser_equal", Seq(lhs, rhs)) - } - - /** - * Returns the result of element-wise **lesser than or equal to** (<=) comparison - * operation with broadcasting. - * For each element in input arrays, return 1(true) if lhs elements are - * lesser than equal to rhs, otherwise return 0(false). - * - * @param lhs NDArray - * @param rhs The scalar input. Can be of type Float/Double - */ - def lesserEqual(lhs: NDArray, rhs: MX_PRIMITIVE_TYPE): NDArray = { - NDArray.genericNDArrayFunctionInvoke("_lesser_equal_scalar", Seq(lhs, rhs)) - } - - /** - * Create a new NDArray that copies content from source_array. - * @param sourceArr Source data to create NDArray from. - * @param shape shape of the NDArray - * @param ctx The context of the NDArray, default to current default context. - * @return The created NDArray. - */ - def array(sourceArr: Array[Float], shape: Shape, ctx: Context = null): NDArray = { - val arr = empty(shape, ctx) - arr.set(sourceArr) - arr - } - - def array(sourceArr: Array[Double], shape: Shape, ctx: Context): NDArray = { - val arr = empty(shape, ctx, dtype = DType.Float64) - arr.set(sourceArr) - arr - } - - def array(sourceArr: Array[Double], shape: Shape): NDArray = { - array(sourceArr, shape, null) - } - - /** - * Create a new NDArray based on the structure of source Array - * @param sourceArr Array[Array...Array[MX_PRIMITIVE_TYPE]...] - * @param ctx context like to pass in - * @return an NDArray with the same shape of the input - * @throws IllegalArgumentException if the data type is not valid - */ - def toNDArray(sourceArr: Array[_], ctx : Context = null) : NDArray = { - val shape = shapeGetter(sourceArr) - val container = new Array[Any](shape.product) - flattenArray(sourceArr, container, 0, container.length - 1) - val finalArr = container(0) match { - case f: Float => array(container.map(_.asInstanceOf[Float]), Shape(shape), ctx) - case d: Double => array(container.map(_.asInstanceOf[Double]), Shape(shape), ctx) - case _ => throw new IllegalArgumentException( - s"Unsupported type ${container(0).getClass}, please check MX_PRIMITIVES for valid types") - } - finalArr - } - - private def shapeGetter(sourceArr : Any) : ArrayBuffer[Int] = { - sourceArr match { - // e.g : Array[Double] the inner layer - case arr: Array[_] if MX_PRIMITIVES.isValidMxPrimitiveType(arr(0)) => { - ArrayBuffer[Int](arr.length) - } - // e.g : Array[Array...[]] - case arr: Array[_] => { - var arrBuffer = new ArrayBuffer[Int]() - if (!arr.isEmpty) arrBuffer = shapeGetter(arr(0)) - for (idx <- arr.indices) { - require(arrBuffer == shapeGetter(arr(idx))) - } - arrBuffer.insert(0, arr.length) - arrBuffer - } - case _ => throw new IllegalArgumentException(s"Wrong type passed: ${sourceArr.getClass}") - } - } - - private def flattenArray(sourceArr : Any, arr : Array[Any], - start : Int, end : Int) : Unit = { - sourceArr match { - case arrValid: Array[_] if MX_PRIMITIVES.isValidMxPrimitiveType(arrValid(0)) => { - for (i <- arrValid.indices) arr(start + i) = arrValid(i) - } - case arrAny: Array[_] => { - val fragment = (end - start + 1) / arrAny.length - for (i <- arrAny.indices) - flattenArray(arrAny(i), arr, start + i * fragment, start + (i + 1) * fragment) - } - case _ => throw new IllegalArgumentException(s"Wrong type passed: ${sourceArr.getClass}") - } - } - - /** - * Returns evenly spaced values within a given interval. - * Values are generated within the half-open interval [`start`, `stop`). In other - * words, the interval includes `start` but excludes `stop`. - * @param start Start of interval. The default start value is 0. - * @param stop End of interval. - * @param step Spacing between values. The default step size is 1. - * @param repeat Number of times to repeat each element. The default repeat count is 1. - * @param ctx Device context. Default context is the current default context. - * @param dType The data type of the `NDArray`. The default datatype is `DType.Float32`. - * @return NDArray of evenly spaced values in the specified range. - */ - def arange(start: Float, stop: Option[Float] = None, step: Float = 1.0f, - repeat: Int = 1, ctx: Context = Context.defaultCtx, - dType: DType = Base.MX_REAL_TYPE): NDArray = { - val params = Map("start" -> start, "step" -> step, "repeat" -> repeat, - "infer_range" -> false, "ctx" -> ctx.toString, "dtype" -> dType.toString()) - val fParams = if (stop == None) params else params ++ Map("stop" -> stop.get) - NDArray.genericNDArrayFunctionInvoke("_arange", Seq(), fParams)(0) - } - - /** - * Concatenate a list of NDArrays along the specified dimension. - * @param arrays Arrays to be concatenate. - * They must have identical shape except the first dimension. - * They also must have the same data type. - * @param axis The axis along which to concatenate. - * @param alwaysCopy Default `True`. When not `True`, - * if the arrays only contain one `NDArray`, - * that element will be returned directly, avoid copying. - * @return An `NDArray` that lives on the same context as `arrays[0].context`. - */ - def concatenate(arrays: Seq[NDArray], axis: Int = 0, alwaysCopy: Boolean = true): NDArray = { - require(arrays.size > 0, "Provide at least one array") - - val array0 = arrays(0) - if (!alwaysCopy && arrays.size == 1) { - array0 - } else { - val shapeRest1 = array0.shape.slice(0, axis) - val shapeRest2 = array0.shape.slice(axis + 1, array0.shape.length) - val dtype = array0.dtype - - val shapeAxis = - arrays.map(arr => { - require(shapeRest1 == arr.shape.slice(0, axis), - s"Mismatch between shape $shapeRest1 and ${arr.shape}") - require(shapeRest2 == arr.shape.slice(axis + 1, arr.shape.length), - s"Mismatch between shape $shapeRest2 and ${arr.shape}") - require(dtype == arr.dtype, - s"All arrays must have the same type (got ${dtype} and ${arr.dtype})") - arr.shape(axis) - }).sum - val retShape = shapeRest1 ++ Shape(shapeAxis) ++ shapeRest2 - val ret = NDArray.empty(retShape, ctx = array0.context, dtype = dtype) - - var idx = 0 - val begin = Array.fill(retShape.length)(0) - val end = retShape.toArray - for (arr <- arrays) { - if (axis == 0) { - ret.slice(idx, idx + arr.shape(0)).set(arr).dispose() - } else { - begin(axis) = idx - end(axis) = idx + arr.shape(axis) - NDArray._crop_assign(Map("out" -> ret, - "begin" -> Shape(begin), - "end" -> Shape(end)))(ret, arr) - } - idx += arr.shape(axis) - } - ret - } - } - - def concatenate(arrays: NDArray *): NDArray = { - concatenate(arrays.toSeq) - } - - /** - * Load ndarray from binary file. - * - * You can also use pickle to do the job if you only work on python. - * The advantage of load/save is the file is language agnostic. - * This means the file saved using save can be loaded by other language binding of mxnet. - * You also get the benefit being able to directly load/save from cloud storage(S3, HDFS) - * - * @param fname - * The name of the file.Can be S3 or HDFS address (remember built with S3 support). - * Example of fname: - * - `s3://my-bucket/path/my-s3-ndarray` - * - `hdfs://my-bucket/path/my-hdfs-ndarray` - * - `/path-to/my-local-ndarray` - * @return dict of str->NDArray - */ - def load(fname: String): (Array[String], Array[NDArray]) = { - val outSize = new MXUintRef - val outNameSize = new MXUintRef - val handles = ArrayBuffer.empty[NDArrayHandle] - val names = ArrayBuffer.empty[String] - checkCall(_LIB.mxNDArrayLoad(fname, outSize, handles, outNameSize, names)) - require(outNameSize.value == 0 || outNameSize.value == outSize.value, - s"Mismatch between names and arrays in file $fname") - (names.toArray, handles.map(new NDArray(_)).toArray) - } - - def load2Map(fname: String): Map[String, NDArray] = { - val (keys, vals) = load(fname) - require(keys.length == vals.length, "Loaded NDArrays have no name") - (keys zip vals).toMap - } - - def load2Array(fname: String): Array[NDArray] = { - load(fname)._2 - } - - /** - * Save list of NDArray or dict of str->NDArray to binary file. - * - * You can also use pickle to do the job if you only work on python. - * The advantage of load/save is the file is language agnostic. - * This means the file saved using save can be loaded by other language binding of mxnet. - * You also get the benefit being able to directly load/save from cloud storage(S3, HDFS) - * - * @param fname - * The name of the file.Can be S3 or HDFS address (remember built with S3 support). - * Example of fname: - * - `s3://my-bucket/path/my-s3-ndarray` - * - `hdfs://my-bucket/path/my-hdfs-ndarray` - * - `/path-to/my-local-ndarray` - * @param data dict of str->NDArray - */ - def save(fname: String, data: Map[String, NDArray]): Unit = { - val keys = data.keys.toArray - val handles = data.values.map(_.handle).toArray - save(fname, keys, handles) - } - - def save(fname: String, data: Traversable[NDArray]): Unit = { - save(fname, null, data.map(_.handle).toArray) - } - - private def save(fname: String, keys: Array[String], handles: Array[NDArrayHandle]): Unit = { - checkCall(_LIB.mxNDArraySave(fname, handles, keys)) - } - - def deserialize(bytes: Array[Byte]): NDArray = { - val handleRef = new NDArrayHandleRef - checkCall(_LIB.mxNDArrayLoadFromRawBytes(bytes, handleRef)) - new NDArray(handleRef.value) - } - - private def _crop_assign(kwargs: Map[String, Any] = null)(args: Any*) : NDArrayFuncReturn = { - genericNDArrayFunctionInvoke("_crop_assign", args, kwargs) - } - -} - -/** - * NDArray object in mxnet. - * NDArray is basic ndarray/Tensor like data structure in mxnet.
- * - * NOTE: NDArray is stored in native memory. Use NDArray in a try-with-resources() construct - * or a [[org.apache.mxnet.ResourceScope]] in a try-with-resource to have them - * automatically disposed. You can explicitly control the lifetime of NDArray - * by calling dispose manually. Failure to do this will result in leaking native memory. - * - */ -class NDArray private[mxnet](private[mxnet] val handle: NDArrayHandle, - val writable: Boolean) extends NativeResource { - - @deprecated("Please use ResourceScope instead", "1.5.0") - def this(handle: NDArrayHandle, - writable: Boolean = true, - addToCollector: Boolean = true) { - this(handle, writable) - if (addToCollector) { - NDArrayCollector.collect(this) - } - } - - override def nativeAddress: CPtrAddress = handle - override def nativeDeAllocator: (CPtrAddress => Int) = _LIB.mxNDArrayFree - override val bytesAllocated: Long = DType.numOfBytes(this.dtype) * this.shape.product - - override val ref: NativeResourceRef = super.register() - - // record arrays who construct this array instance - // we use weak reference to prevent gc blocking - private[mxnet] val dependencies = mutable.HashMap.empty[Long, WeakReference[NDArray]] - - private val lengthProperty = "mxnet.setNDArrayPrintLength" - private val layerProperty = "mxnet.setNDArrayPrintLayerLength" - private lazy val printLength = Try(System.getProperty(lengthProperty).toInt).getOrElse(1000) - private lazy val layerLength = Try(System.getProperty(layerProperty).toInt).getOrElse(10) - - def serialize(): Array[Byte] = { - val buf = ArrayBuffer.empty[Byte] - checkCall(_LIB.mxNDArraySaveRawBytes(handle, buf)) - buf.toArray - } - - /** - * Release the native memory.
- * The NDArrays it depends on will NOT be disposed.
- * The object shall never be used after it is disposed. - */ - override def dispose(): Unit = { - if (!super.isDisposed) { - super.dispose() - dependencies.clear() - } - } - - /** - * Dispose all NDArrays who help to construct this array.
- * e.g. (a * b + c).disposeDeps() will dispose a, b, c (including their deps) and a * b - * @return this NDArray - */ - def disposeDeps(): NDArray = { - disposeDepsExcept() - } - - /** - * Dispose all NDArrays who help to construct this array, excepts those in the arguments.
- * e.g. (a * b + c).disposeDepsExcept(a, b) - * will dispose c and a * b. - * Note that a, b's dependencies will not be disposed either. - * @param arrs array of NDArrays - * @return this array - */ - def disposeDepsExcept(arrs: NDArray*): NDArray = { - if (dependencies != null) { - val excepts = mutable.HashSet.empty[Long] - arrs.foreach { arr => - excepts += arr.handle - excepts ++= arr.dependencies.keys - } - dependencies.retain { case (addr, weak) => - if (excepts.contains(addr)) { - true - } else { - weak.get.foreach(_.dispose()) - false - } - } - } - this - } - - /** - * Peform an synchronize copy from the array. - * @param source The data source we should like to copy from. - */ - private def syncCopyfrom(source: Array[Float]): Unit = { - require(source.length == size, - s"array size (${source.length}) do not match the size of NDArray ($size)") - checkCall(_LIB.mxNDArraySyncCopyFromCPU(handle, source, source.length)) - } - - private def syncCopyfrom(source: Array[Double]): Unit = { - require(source.length == size, - s"array size (${source.length}) do not match the size of NDArray ($size)") - checkCall(_LIB.mxFloat64NDArraySyncCopyFromCPU(handle, source, source.length)) - } - - /** - * Visualize the internal structure of NDArray - * @return String that show the structure - */ - override def toString: String = { - val abstractND = buildStringHelper(this, this.shape.length) - val otherInfo = s"" - s"$abstractND\n$otherInfo" - } - - /** - * Helper function to create formatted NDArray output - * The NDArray will be represented in a reduced version if too large - * @param nd NDArray as the input - * @param totalSpace totalSpace of the lowest dimension - * @return String format of NDArray - */ - private def buildStringHelper(nd : NDArray, totalSpace : Int) : String = { - var result = "" - val THRESHOLD = layerLength // longest NDArray[NDArray[...]] to show in full - val ARRAYTHRESHOLD = printLength // longest array to show in full - val shape = nd.shape - val space = totalSpace - shape.length - if (shape.length != 1) { - val (length, postfix) = - if (shape(0) > THRESHOLD) { - // reduced NDArray - (10, s"\n${" " * (space + 1)}... with length ${shape(0)}\n") - } else { - (shape(0), "") - } - for (num <- 0 until length) { - val output = buildStringHelper(nd.at(num), totalSpace) - result += s"$output\n" - } - result = s"${" " * space}[\n$result${" " * space}$postfix${" " * space}]" - } else { - if (shape(0) > ARRAYTHRESHOLD) { - // reduced Array - val front = nd.slice(0, 10) - val back = nd.slice(shape(0) - 10, shape(0) - 1) - result = s"""${" " * space}[${front.toArray.mkString(",")} - | ... ${back.toArray.mkString(",")}]""".stripMargin - } else { - result = s"${" " * space}[${nd.toArray.mkString(",")}]" - } - } - result - } - - /** - * Return a sliced NDArray that shares memory with current one. - * NDArray only support continuous slicing on axis 0 - * - * @param start Starting index of slice. - * @param stop Finishing index of slice. - * - * @return a sliced NDArray that shares memory with current one. - */ - def slice(start: Int, stop: Int): NDArray = { - val sliceHandle = new NDArrayHandleRef - checkCall(_LIB.mxNDArraySlice(handle, start, stop, sliceHandle)) - new NDArray(handle = sliceHandle.value, writable = this.writable) - } - - def slice(range: (Int, Int)): NDArray = { - slice(range._1, range._2) - } - - /** - * Return a sliced NDArray at the ith position of axis0 - * @param i - * @return a sliced NDArray that shares memory with current one. - */ - def slice(i: Int): NDArray = { - slice(i, i + 1) - } - - /** - * Return a sub NDArray that shares memory with current one. - * the first axis will be rolled up, which causes its shape different from slice(i, i+1) - * @param idx index of sub array. - */ - def at(idx: Int): NDArray = { - val handleRef = new NDArrayHandleRef() - checkCall(_LIB.mxNDArrayAt(this.handle, idx, handleRef)) - new NDArray(handle = handleRef.value, writable = this.writable) - } - - // Get transpose of current NDArray - def T: NDArray = { - require(this.shape.size == 2, "Only 2D matrix is allowed to be transposed") - NDArray.genericNDArrayFunctionInvoke("transpose", Seq(this)) - } - - /** - * Get data type of current NDArray. - * @return class representing type of current ndarray - */ - def dtype: DType = { - val mxDtype = new RefInt - checkCall(_LIB.mxNDArrayGetDType(handle, mxDtype)) - DType(mxDtype.value) - } - - // This is a optimization on the SparseFormat checking - // TODO: In some cases, the checking on Sparse is invalid (-1) - lazy val sparseFormat: SparseFormat = { - val mxSF = new RefInt - checkCall(_LIB.mxNDArrayGetStorageType(handle, mxSF)) - SparseFormat(mxSF.value) - } - - /** - * Return a copied numpy array of current array with specified type. - * @param dtype Desired type of result array. - * @return A copy of array content. - */ - def asType(dtype: DType): NDArray = { - val res = NDArray.empty(this.shape, ctx = this.context, dtype = dtype) - this.copyTo(res) - res - } - - /** - * Return a reshaped NDArray that shares memory with current one. - * @param dims New shape. - * - * @return a reshaped NDArray that shares memory with current one. - */ - def reshape(dims: Array[Int]): NDArray = { - reshape(dims.map(_.toLong)) - } - - /** - * Return a reshaped NDArray that shares memory with current one. - * @param dims New shape. - * @param reverse whether to inplace reshape - * @return a reshaped NDArray that shares memory with current one. - */ - def reshape(dims: Array[Long], reverse: Option[Boolean] = None): NDArray = { - val reshapeHandle = new NDArrayHandleRef - checkCall(_LIB.mxNDArrayReshape64(handle, - dims.length, dims, reverse.getOrElse(false), reshapeHandle)) - new NDArray(handle = reshapeHandle.value, writable = this.writable) - } - - /** - * Return a reshaped NDArray that shares memory with current one. - * @param dims New shape. - * - * @return a reshaped NDArray that shares memory with current one. - */ - def reshape(dims: Shape): NDArray = { - reshape(dims.toArray) - } - - /** - * Block until all pending writes operations on current NDArray are finished. - * This function will return when all the pending writes to the current - * NDArray finishes. There can still be pending read going on when the - * function returns. - */ - def waitToRead(): Unit = { - checkCall(_LIB.mxNDArrayWaitToRead(handle)) - } - - /** - * Get context of current NDArray. - * @return The context of current NDArray. - */ - def context: Context = { - val devTypeId = new RefInt - val devId = new RefInt - checkCall(_LIB.mxNDArrayGetContext(handle, devTypeId, devId)) - new Context(Context.devtype2str(devTypeId.value), devId.value) - } - - /** - * Set the values of the NDArray - * @param value Value to set - * @return Current NDArray - */ - def set(value: MX_PRIMITIVE_TYPE): NDArray = { - require(writable, "trying to assign to a readonly NDArray") - NDArray.genericNDArrayFunctionInvoke("_set_value", Seq(value), Map("out" -> this)) - this - } - - def set(other: NDArray): NDArray = { - require(writable, "trying to assign to a readonly NDArray") - other.copyTo(this) - } - - def set(other: Array[Float]): NDArray = { - require(writable, "trying to assign to a readonly NDArray") - syncCopyfrom(other) - this - } - - def set(other: Array[Double]): NDArray = { - require(writable, "trying to assign to a readonly NDArray") - syncCopyfrom(other) - this - } - - def +(other: NDArray): NDArray = { - NDArray.genericNDArrayFunctionInvoke("_plus", Seq(this, other)) - } - - def +(other: MX_PRIMITIVE_TYPE): NDArray = { - NDArray.genericNDArrayFunctionInvoke("_plus_scalar", Seq(this, other)) - } - - def +=(other: NDArray): NDArray = { - if (!writable) { - throw new IllegalArgumentException("trying to add to a readonly NDArray") - } - NDArray.genericNDArrayFunctionInvoke("_plus", Seq(this, other), Map("out" -> this)) - this - } - - def +=(other: MX_PRIMITIVE_TYPE): NDArray = { - if (!writable) { - throw new IllegalArgumentException("trying to add to a readonly NDArray") - } - NDArray.genericNDArrayFunctionInvoke("_plus_scalar", Seq(this, other), Map("out" -> this)) - this - } - - def -(other: NDArray): NDArray = { - NDArray.genericNDArrayFunctionInvoke("_minus", Seq(this, other)) - } - - def -(other: MX_PRIMITIVE_TYPE): NDArray = { - NDArray.genericNDArrayFunctionInvoke("_minus_scalar", Seq(this, other)) - } - - def -=(other: NDArray): NDArray = { - if (!writable) { - throw new IllegalArgumentException("trying to subtract from a readonly NDArray") - } - NDArray.genericNDArrayFunctionInvoke("_minus", Seq(this, other), Map("out" -> this)) - this - } - - def -=(other: MX_PRIMITIVE_TYPE): NDArray = { - if (!writable) { - throw new IllegalArgumentException("trying to subtract from a readonly NDArray") - } - NDArray.genericNDArrayFunctionInvoke("_minus_scalar", Seq(this, other), Map("out" -> this)) - this - } - - def *(other: NDArray): NDArray = { - NDArray.genericNDArrayFunctionInvoke("_mul", Seq(this, other)) - } - - def *(other: MX_PRIMITIVE_TYPE): NDArray = { - NDArray.genericNDArrayFunctionInvoke("_mul_scalar", Seq(this, other)) - } - - def unary_-(): NDArray = { - NDArray.genericNDArrayFunctionInvoke("_mul_scalar", Seq(this, -1f)) - } - - def *=(other: NDArray): NDArray = { - if (!writable) { - throw new IllegalArgumentException("trying to multiply to a readonly NDArray") - } - NDArray.genericNDArrayFunctionInvoke("_mul", Seq(this, other), Map("out" -> this)) - this - } - - def *=(other: MX_PRIMITIVE_TYPE): NDArray = { - if (!writable) { - throw new IllegalArgumentException("trying to multiply to a readonly NDArray") - } - NDArray.genericNDArrayFunctionInvoke("_mul_scalar", Seq(this, other), Map("out" -> this)) - this - } - - def /(other: NDArray): NDArray = { - NDArray.genericNDArrayFunctionInvoke("_div", Seq(this, other)) - } - - def /(other: MX_PRIMITIVE_TYPE): NDArray = { - NDArray.genericNDArrayFunctionInvoke("_div_scalar", Seq(this, other)) - } - - def /=(other: NDArray): NDArray = { - if (!writable) { - throw new IllegalArgumentException("trying to divide from a readonly NDArray") - } - NDArray.genericNDArrayFunctionInvoke("_div", Seq(this, other), Map("out" -> this)) - this - } - - def /=(other: MX_PRIMITIVE_TYPE): NDArray = { - if (!writable) { - throw new IllegalArgumentException("trying to divide from a readonly NDArray") - } - NDArray.genericNDArrayFunctionInvoke("_div_scalar", Seq(this, other), Map("out" -> this)) - this - } - - def **(other: NDArray): NDArray = { - NDArray.power(this, other) - } - - def **(other: MX_PRIMITIVE_TYPE): NDArray = { - NDArray.power(this, other) - } - - def **=(other: NDArray): NDArray = { - NDArray.genericNDArrayFunctionInvoke("_power", Seq(this, other), Map("out" -> this)) - } - - def **=(other: MX_PRIMITIVE_TYPE): NDArray = { - NDArray.genericNDArrayFunctionInvoke("_power_scalar", Seq(this, other), Map("out" -> this)) - } - - def >(other: NDArray): NDArray = { - NDArray.greater(this, other) - } - - def >(other: MX_PRIMITIVE_TYPE): NDArray = { - NDArray.greater(this, other) - } - - def >=(other: NDArray): NDArray = { - NDArray.greaterEqual(this, other) - } - - def >=(other: MX_PRIMITIVE_TYPE): NDArray = { - NDArray.greaterEqual(this, other) - } - - def <(other: NDArray): NDArray = { - NDArray.lesser(this, other) - } - - def <(other: MX_PRIMITIVE_TYPE): NDArray = { - NDArray.lesser(this, other) - } - - def <=(other: NDArray): NDArray = { - NDArray.lesserEqual(this, other) - } - - def <=(other: MX_PRIMITIVE_TYPE): NDArray = { - NDArray.lesserEqual(this, other) - } - - def %(other: NDArray): NDArray = { - NDArray.genericNDArrayFunctionInvoke("_mod", Seq(this, other)) - } - - def %(other: MX_PRIMITIVE_TYPE): NDArray = { - NDArray.genericNDArrayFunctionInvoke("_mod_scalar", Seq(this, other)) - } - - def %=(other: NDArray): NDArray = { - if (!writable) { - throw new IllegalArgumentException("trying to take modulo from a readonly NDArray") - } - NDArray.genericNDArrayFunctionInvoke("_mod", Seq(this, other), Map("out" -> this)) - this - } - - def %=(other: MX_PRIMITIVE_TYPE): NDArray = { - if (!writable) { - throw new IllegalArgumentException("trying to take modulo from a readonly NDArray") - } - NDArray.genericNDArrayFunctionInvoke("_mod_scalar", Seq(this, other), Map("out" -> this)) - this - } - - /** - * Return a copied flat java array of current array (row-major). - * @return A copy of array content. - */ - def toArray: Array[Float] = { - internal.toFloatArray - } - - /** - * Return a copied flat java array of current array (row-major) with datatype as Float64/Double. - * @return A copy of array content. - */ - def toFloat64Array: Array[Double] = { - internal.toDoubleArray - } - - def internal: NDArrayInternal = { - val myType = dtype - val arrLength = DType.numOfBytes(myType) * size - val arr = Array.ofDim[Byte](arrLength) - checkCall(_LIB.mxNDArraySyncCopyToCPU(handle, arr, size)) - new NDArrayInternal(arr, myType) - } - - /** - * Return a CPU scalar(float) of current ndarray. - * This ndarray must have shape (1,) - * - * @return The scalar representation of the ndarray. - */ - def toScalar: Float = { - require(shape == Shape(1), "The current array is not a scalar") - this.toArray(0) - } - - def toFloat64Scalar: Double = { - require(shape == Shape(1), "The current array is not a scalar") - this.toFloat64Array(0) - } - - /** - * Copy the content of current array to other. - * - * @param other Target NDArray or context we want to copy data to. - * @return The copy target NDArray - */ - def copyTo(other: NDArray): NDArray = { - if (other.handle == this.handle) { - NDArray.logger.warn("copy an array to itself, is it intended ?") - } else { - NDArray.genericNDArrayFunctionInvoke("_copyto", Seq(this), Map("out" -> other)) - } - other - } - - /** - * Copy the content of current array to a new NDArray in the context. - * - * @param ctx Target context we want to copy data to. - * @return The copy target NDArray - */ - def copyTo(ctx: Context): NDArray = { - val ret = new NDArray(NDArray.newAllocHandle(shape, ctx, delayAlloc = true, dtype = dtype)) - copyTo(ret) - } - - /** - * Clone the current array - * @return the copied NDArray in the same context - */ - def copy(): NDArray = copyTo(this.context) - - /** - * Get shape of current NDArray. - * @return an array representing shape of current ndarray - */ - def shape: Shape = { - val ndim = new RefInt - val data = ArrayBuffer[Int]() - checkCall(_LIB.mxNDArrayGetShape(handle, ndim, data)) - if (ndim.value == -1) { - null - } else { - require(ndim.value == data.length, s"ndim=$ndim, while len(data)=${data.length}") - Shape(data) - } - } - - // Get size of current NDArray. - def size: Int = shape.product - - /** - * Return an `NDArray` that lives in the target context. If the array - * is already in that context, `self` is returned. Otherwise, a copy is made. - * @param context The target context we want the return value to live in. - * @return A copy or `self` as an `NDArray` that lives in the target context. - */ - def asInContext(context: Context): NDArray = { - if (this.context == context) this else this.copyTo(context) - } - - /** - * check if NDArray is SparseNDArray - * @return Boolean - */ - def isSparse: Boolean = { - this.sparseFormat.id != 0 - } - - /** - * Convert a NDArray to SparseNDArray - * - * @param sfOption the target sparse type - * @return SparseNDArray - */ - def toSparse(sfOption : Option[SparseFormat] = None): SparseNDArray = { - val sf = sfOption.getOrElse(SparseFormat.ROW_SPARSE) - if (sf.id == 0) throw new IllegalArgumentException("Require Sparse") - if (isSparse && sfOption.isEmpty) { - this.asInstanceOf[SparseNDArray] - } else { - NDArray.api.cast_storage(this, sf.toString).head.asInstanceOf[SparseNDArray] - } - } - - override def equals(o: Any): Boolean = o match { - case that: NDArray => - that != null && that.shape == this.shape && that.toArray.sameElements(this.toArray) - case _ => false - } - - override def hashCode: Int = { - // TODO: naive implementation - shape.hashCode + toArray.hashCode - } - -} - -private[mxnet] object NDArrayConversions { - implicit def int2Scalar(x: Int): NDArrayConversions = new NDArrayConversions(x.toFloat) - implicit def double2Scalar(x: Double): NDArrayConversions = new NDArrayConversions(x) - implicit def float2Scalar(x: Float): NDArrayConversions = new NDArrayConversions(x) -} - -private[mxnet] class NDArrayConversions(val value: MX_PRIMITIVE_TYPE) { - def +(other: NDArray): NDArray = { - other + value - } - def +(other: NDArrayFuncReturn): NDArray = { - other.head + value - } - - def -(other: NDArray): NDArray = { - NDArray.genericNDArrayFunctionInvoke("_rminus_scalar", Seq(other, value)) - } - def -(other: NDArrayFuncReturn): NDArray = { - NDArray.genericNDArrayFunctionInvoke("_rminus_scalar", Seq(other.head, value)) - } - - def *(other: NDArray): NDArray = { - other * value - } - def *(other: NDArrayFuncReturn): NDArray = { - other.head * value - } - - def /(other: NDArray): NDArray = { - NDArray.genericNDArrayFunctionInvoke("_rdiv_scalar", Seq(other, value)) - } - def /(other: NDArrayFuncReturn): NDArray = { - NDArray.genericNDArrayFunctionInvoke("_rdiv_scalar", Seq(other.head, value)) - } - - def **(other: NDArray): NDArray = { - NDArray.power(value, other) - } - def **(other: NDArrayFuncReturn): NDArray = { - NDArray.power(value, other.head) - } - - def >(other: NDArray): NDArray = { - NDArray.lesser(other, value) - } - def >(other: NDArrayFuncReturn): NDArray = { - NDArray.lesser(other.head, value) - } - - def >=(other: NDArray): NDArray = { - NDArray.lesserEqual(other, value) - } - def >=(other: NDArrayFuncReturn): NDArray = { - NDArray.lesserEqual(other.head, value) - } - - def <(other: NDArray): NDArray = { - NDArray.greater(other, value) - } - def <(other: NDArrayFuncReturn): NDArray = { - NDArray.greater(other.head, value) - } - - def <=(other: NDArray): NDArray = { - NDArray.greaterEqual(other, value) - } - def <=(other: NDArrayFuncReturn): NDArray = { - NDArray.greaterEqual(other.head, value) - } -} - -private case class NDArrayFunction(handle: NDArrayHandle, arguments: List[String]) - -private[mxnet] class NDArrayFuncReturn(private[mxnet] val arr: Array[NDArray]) { - def head: NDArray = apply(0) - def get: NDArray = { - require(arr.length == 1, s"return array length = ${arr.length}") - head - } - def apply(i: Int): NDArray = { - if (arr == null || arr.length <= i) { - null - } else { - arr(i) - } - } - - // copy methods from NDArray - def isDisposed: Boolean = head.isDisposed - def serialize(): Array[Byte] = head.serialize() - def dispose(): Unit = head.dispose() - def disposeDeps(): NDArray = head.disposeDeps() - def disposeDepsExcept(arrs: NDArray*): NDArray = head.disposeDepsExcept(arrs: _*) - def slice(start: Int, stop: Int): NDArray = head.slice(start, stop) - def slice(range: (Int, Int)): NDArray = head.slice(range) - def slice(i: Int): NDArray = head.slice(i) - def reshape(dims: Array[Int]): NDArray = head.reshape(dims) - def waitToRead(): Unit = head.waitToRead() - def context: Context = head.context - def set(value: Float): NDArray = head.set(value) - def set(value: Double): NDArray = head.set(value) - def set(other: NDArray): NDArray = head.set(other) - def set(other: Array[Float]): NDArray = head.set(other) - def set(other: Array[Double]): NDArray = head.set(other) - def +(other: NDArray): NDArray = head + other - def +(other: MX_PRIMITIVE_TYPE): NDArray = head + other - def +=(other: NDArray): NDArray = head += other - def +=(other: MX_PRIMITIVE_TYPE): NDArray = head += other - def -(other: NDArray): NDArray = head - other - def -(other: MX_PRIMITIVE_TYPE): NDArray = head - other - def -=(other: NDArray): NDArray = head -= other - def -=(other: MX_PRIMITIVE_TYPE): NDArray = head -= other - def *(other: NDArray): NDArray = head * other - def *(other: MX_PRIMITIVE_TYPE): NDArray = head * other - def unary_-(): NDArray = -head - def *=(other: NDArray): NDArray = head *= other - def *=(other: MX_PRIMITIVE_TYPE): NDArray = head *= other - def /(other: NDArray): NDArray = head / other - def /(other: MX_PRIMITIVE_TYPE): NDArray = head / other - def **(other: NDArray): NDArray = head ** other - def **(other: MX_PRIMITIVE_TYPE): NDArray = head ** other - def >(other: NDArray): NDArray = head > other - def >(other: MX_PRIMITIVE_TYPE): NDArray = head > other - def >=(other: NDArray): NDArray = head >= other - def >=(other: MX_PRIMITIVE_TYPE): NDArray = head >= other - def <(other: NDArray): NDArray = head < other - def <(other: MX_PRIMITIVE_TYPE): NDArray = head < other - def <=(other: NDArray): NDArray = head <= other - def <=(other: MX_PRIMITIVE_TYPE): NDArray = head <= other - def toArray: Array[Float] = head.toArray - def toFloat64Array: Array[Double] = head.toFloat64Array - def toScalar: Float = head.toScalar - def toFloat64Scalar: Double = head.toFloat64Scalar - def copyTo(other: NDArray): NDArray = head.copyTo(other) - def copyTo(ctx: Context): NDArray = head.copyTo(ctx) - def copy(): NDArray = head.copy() - def shape: Shape = head.shape - def size: Int = head.size - def asInContext(context: Context): NDArray = head.asInContext(context) -} - -private[mxnet] class NDArrayInternal (private val internal: Array[Byte], private val dtype: DType) { - private val unitSize = DType.numOfBytes(dtype) - require(internal.length > 0 && internal.length % unitSize == 0, - s"$dtype size $unitSize cannot divide byte array size ${internal.length}") - private val units: Array[Array[Byte]] = ( - for (i <- 0 until internal.length / unitSize) - yield internal.slice(i * unitSize, (i + 1) * unitSize) - ).toArray - - def getRaw: Array[Byte] = internal - def toDoubleArray: Array[Double] = { - require(dtype != DType.Float16, "Currently cannot convert float16 to native numerical types") - dtype match { - case DType.Float32 => units.map(wrapBytes(_).getFloat.toDouble) - case DType.Float64 => units.map(wrapBytes(_).getDouble) - case DType.Int32 => units.map(wrapBytes(_).getInt.toDouble) - case DType.Int64 => units.map(wrapBytes(_).getLong.toDouble) - case DType.UInt8 => internal.map(_.toDouble) - } - } - def toFloatArray: Array[Float] = { - require(dtype != DType.Float16, "Currently cannot convert float16 to native numerical types") - dtype match { - case DType.Float32 => units.map(wrapBytes(_).getFloat) - case DType.Float64 => units.map(wrapBytes(_).getDouble.toFloat) - case DType.Int32 => units.map(wrapBytes(_).getInt.toFloat) - case DType.Int64 => units.map(wrapBytes(_).getLong.toFloat) - case DType.UInt8 => internal.map(_.toFloat) - } - } - def toIntArray: Array[Int] = { - require(dtype != DType.Float16, "Currently cannot convert float16 to native numerical types") - dtype match { - case DType.Float32 => units.map(wrapBytes(_).getFloat.toInt) - case DType.Float64 => units.map(wrapBytes(_).getDouble.toInt) - case DType.Int32 => units.map(wrapBytes(_).getInt) - case DType.Int64 => units.map(wrapBytes(_).getLong.toInt) - case DType.UInt8 => internal.map(_.toInt) - } - } - def toLongArray: Array[Long] = { - require(dtype != DType.Float16, "Currently cannot convert float16 to native numerical types") - dtype match { - case DType.Float32 => units.map(wrapBytes(_).getFloat.toLong) - case DType.Float64 => units.map(wrapBytes(_).getDouble.toLong) - case DType.Int32 => units.map(wrapBytes(_).getInt.toLong) - case DType.Int64 => units.map(wrapBytes(_).getLong) - case DType.UInt8 => internal.map(_.toLong) - } - } - def toByteArray: Array[Byte] = { - require(dtype != DType.Float16, "Currently cannot convert float16 to native numerical types") - dtype match { - case DType.Float16 | DType.Float32 => units.map(wrapBytes(_).getFloat.toByte) - case DType.Float64 => units.map(wrapBytes(_).getDouble.toByte) - case DType.Int32 => units.map(wrapBytes(_).getInt.toByte) - case DType.Int64 => units.map(wrapBytes(_).getLong.toByte) - case DType.UInt8 => internal.clone() - } - } - - private def wrapBytes(bytes: Array[Byte]): ByteBuffer = { - val bb = ByteBuffer.wrap(bytes) - bb.order(ByteOrder.LITTLE_ENDIAN) - bb - } -} diff --git a/scala-package/core/src/main/scala/org/apache/mxnet/NDArrayAPI.scala b/scala-package/core/src/main/scala/org/apache/mxnet/NDArrayAPI.scala deleted file mode 100644 index 024fed1c4ba6..000000000000 --- a/scala-package/core/src/main/scala/org/apache/mxnet/NDArrayAPI.scala +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.mxnet - -/** - * typesafe NDArray API: NDArray.api._ - * Main code will be generated during compile time through Macros - */ -@AddNDArrayAPIs(false) -object NDArrayAPI extends NDArrayAPIBase { - // TODO: Implement CustomOp for NDArray -} - -/** - * typesafe NDArray random module: NDArray.random._ - * Main code will be generated during compile time through Macros - */ -@AddNDArrayRandomAPIs(false) -object NDArrayRandomAPI extends NDArrayRandomAPIBase { - -} - diff --git a/scala-package/core/src/main/scala/org/apache/mxnet/NDArrayCollector.scala b/scala-package/core/src/main/scala/org/apache/mxnet/NDArrayCollector.scala deleted file mode 100644 index 0761481cdfe8..000000000000 --- a/scala-package/core/src/main/scala/org/apache/mxnet/NDArrayCollector.scala +++ /dev/null @@ -1,172 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet - -import org.apache.mxnet.Base.CPtrAddress -import org.apache.mxnet.annotation.Experimental -import org.slf4j.LoggerFactory - -import scala.annotation.varargs -import scala.collection.mutable - -/** - * A collector to store NDArrays. - * It provides a scope, NDArrays allocated in the scope can either
- * - be disposed automatically when the code block finishes, or
- * - simply be collected for future usage. - *
- * If the return type of scope is NDArray or NDArrayFuncReturn, - * the collector is smart enough NOT to collect or dispose the returned NDArray.
- * However in other cases, it is users' responsibility NOT to leak allocated NDArrays outside, - * (e.g., store to a global variable and use later, pass to another thread, etc.)
- * Usage Example: - *
- *  val a = NDArray.array(Array(-1f, 0f, 1f, 2f, 3f, 4f), shape = Shape(2, 3))
- *  val res = NDArrayCollector.auto().withScope {
- *    (NDArray.relu(a) + a).toArray
- *  }
- *  
- * In the case above, the intermediate NDArrays - * (created by NDArray.relu and +) will be disposed automatically.
- * User can also decide to dispose the collected NDArrays later:
- *
- *  val collector = NDArrayCollector.manual()
- *  val res = collector.withScope {
- *    (NDArray.relu(a) + a).toArray
- *  }
- *  collector.foreach(_.dispose())
- *  
- * For Java users:
- *
- *  NDArray a = NDArray.array(new float[]{-1f, 0f, 1f, 2f, 3f, 4f},
- *                            Shape.create(2, 3), Context.cpu(0));
- *  float[] sliced = NDArrayCollector.auto().withScope(
- *    new scala.runtime.AbstractFunction0() {
- *    @Override
- *    public float[] apply() {
- *      a.slice(0, 1).toArray();
- *    }
- *  });
- *  
- */ -@deprecated("Please use ResourceScope instead", "1.5.0") -object NDArrayCollector { - private val logger = LoggerFactory.getLogger(classOf[NDArrayCollector]) - - private val currCollector = new ThreadLocal[NDArrayCollector] { - override def initialValue = new NDArrayCollector(false, false) - } - - /** - * Create a collector which will dispose the collected NDArrays automatically. - * @return an auto-disposable collector. - */ - @deprecated("Please use ResourceScope instead", "1.5.0") - def auto(): NDArrayCollector = new NDArrayCollector(true) - - /** - * Create a collector allows users to later dispose the collected NDArray manually. - * @return a manually-disposable collector. - */ - @deprecated("Please use ResourceScope instead", "1.5.0") - @Experimental - def manual(): NDArrayCollector = new NDArrayCollector(false) - - /** - * Collect the NDArrays into the collector of the current thread. - * @param ndArray NDArrays need to be collected. - */ - @deprecated("Please use ResourceScope instead", "1.5.0") - @varargs def collect(ndArray: NDArray*): Unit = { - currCollector.get().add(ndArray: _*) - } -} - -@deprecated("Please use ResourceScope instead", "1.5.0") -class NDArrayCollector private(private val autoDispose: Boolean = true, - private val doCollect: Boolean = true) { - // native ptr (handle) of the NDArray -> NDArray - // in some rare situation, multiple NDArrays have same native ptr, - // the Map here is to prevent from disposing more than once. - private val arrays = mutable.HashMap.empty[CPtrAddress, NDArray] - - private def add(nd: NDArray*): Unit = { - if (doCollect) nd.foreach(arr => arrays.put(arr.handle, arr)) - } - - /** - * Clear the collector. - */ - def clear(): Unit = { - arrays.clear() - } - - /** - * Iterate over the collected NDArrays and apply the user-defined function to each NDArray. - * @param f the function that is applied for its side-effect to every NDArray. - * The result of function f is discarded. - */ - def foreach(f: NDArray => Unit): Unit = { - arrays.values.foreach(f(_)) - } - - /** - * @return how many unique NDArrays are collected. - */ - def size: Int = arrays.size - - /** - * Create a code scope, NDArrays allocated within this scope will be collected. - * The collected NDArrays will be either
- * - disposed automatically when the code block finishes (when using auto) or
- * - stored for later access (when using manual)
- * If the return type of scope is NDArray or NDArrayFuncReturn, - * it is smart enough NOT to collect or dispose the returned NDArray.
- * However in other cases, it is users' responsibility NOT to leak allocated NDArrays outside. - *
- * We might switch to try -with-resources statement (by AutoCloseable in Java 1.7+) - * and deprecate this method later, thus it is marked as Experimental. - * - * @param codeBlock code block to be executed within the scope. - * @tparam T return type of the function codeBlock. - * @return The result of function codeBlock. - */ - @Experimental - @deprecated("Please use ResourceScope instead", "1.5.0") - def withScope[T](codeBlock: => T): T = { - val old = NDArrayCollector.currCollector.get() - NDArrayCollector.currCollector.set(this) - try { - val ret = codeBlock - ret match { - case ndRet: NDArray => - arrays.remove(ndRet.handle) - case ndarrays: NDArrayFuncReturn => - ndarrays.arr.foreach(nd => arrays.remove(nd.handle)) - case _ => // do nothing - } - ret - } finally { - if (autoDispose) { - foreach(_.dispose()) - clear() - } - NDArrayCollector.currCollector.set(old) - } - } -} diff --git a/scala-package/core/src/main/scala/org/apache/mxnet/NameManager.scala b/scala-package/core/src/main/scala/org/apache/mxnet/NameManager.scala deleted file mode 100644 index dbc961235508..000000000000 --- a/scala-package/core/src/main/scala/org/apache/mxnet/NameManager.scala +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet - -import scala.collection.mutable - -/** - * NameManager to do automatic naming. - * User can also inherit this object to change naming behavior. - */ -class NameManager { - val counter: mutable.Map[String, Int] = mutable.HashMap.empty[String, Int] - /** - * Get the canonical name for a symbol. - * This is default implementation. - * When user specified a name, - * the user specified name will be used. - * When user did not, we will automatically generate a name based on hint string. - * - * @param name : The name user specified. - * @param hint : A hint string, which can be used to generate name. - * @return A canonical name for the user. - */ - def get(name: Option[String], hint: String): String = { - name.getOrElse { - if (!counter.contains(hint)) { - counter(hint) = 0 - } - val generatedName = s"$hint${counter(hint)}" - counter(hint) += 1 - generatedName - } - } - - def withScope[T](body: => T): T = { - val oldManager = NameManager.current - NameManager.setCurrentManager(this) - try { - body - } finally { - NameManager.setCurrentManager(oldManager) - } - } -} - -object NameManager { - private var _current = new NameManager() - def current: NameManager = _current - private def setCurrentManager(manager: NameManager): Unit = { - _current = manager - } -} diff --git a/scala-package/core/src/main/scala/org/apache/mxnet/NativeResource.scala b/scala-package/core/src/main/scala/org/apache/mxnet/NativeResource.scala deleted file mode 100644 index 1806b8653376..000000000000 --- a/scala-package/core/src/main/scala/org/apache/mxnet/NativeResource.scala +++ /dev/null @@ -1,191 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet - -import org.apache.mxnet.Base.CPtrAddress -import java.lang.ref.{PhantomReference, ReferenceQueue, WeakReference} -import java.util.concurrent._ - -import org.apache.mxnet.Base.checkCall -import java.util.concurrent.atomic.AtomicLong - - -/** - * NativeResource trait is used to manage MXNet Objects - * such as NDArray, Symbol, Executor, etc., - * The MXNet Object calls NativeResource.register - * and assign the returned NativeResourceRef to PhantomReference - * NativeResource also implements AutoCloseable so MXNetObjects - * can be used like Resources in try-with-resources paradigm - */ -private[mxnet] trait NativeResource - extends AutoCloseable with WarnIfNotDisposed { - - /** - * native Address associated with this object - */ - def nativeAddress: CPtrAddress - - /** - * Function Pointer to the NativeDeAllocator of nativeAddress - */ - def nativeDeAllocator: (CPtrAddress => Int) - - /** - * Call NativeResource.register to get the reference - */ - val ref: NativeResourceRef - - /** - * Off-Heap Bytes Allocated for this object - */ - // intentionally making it a val, so it gets evaluated when defined - val bytesAllocated: Long - - // this is set and unset by [[ResourceScope.add]] and [[ResourceScope.remove]] - private[mxnet] var scope: Option[ResourceScope] = None - - @volatile private var disposed = false - - override def isDisposed: Boolean = disposed || isDeAllocated - - /** - * Register this object for PhantomReference tracking and in - * ResourceScope if used inside ResourceScope. - * @return NativeResourceRef that tracks reachability of this object - * using PhantomReference - */ - def register(): NativeResourceRef = { - val scope = ResourceScope.getCurrentScope() - if (scope.isDefined) scope.get.add(this) - - NativeResource.totalBytesAllocated.getAndAdd(bytesAllocated) - // register with PhantomRef tracking to release in case the objects go - // out of reference within scope but are held for long time - NativeResourceRef.register(this, nativeDeAllocator) - } - - // Implements [[@link AutoCloseable.close]] - override def close(): Unit = { - dispose() - } - - // Implements [[@link WarnIfNotDisposed.dispose]] - def dispose(): Unit = dispose(true) - - /** - * This method deAllocates nativeResource and deRegisters - * from PhantomRef and removes from Scope if - * removeFromScope is set to true. - * @param removeFromScope remove from the currentScope if true - */ - // the parameter here controls whether to remove from current scope. - // [[ResourceScope.close]] calls NativeResource.dispose - // if we remove from the ResourceScope ie., from the container in ResourceScope. - // while iterating on the container, calling iterator.next is undefined and not safe. - // Note that ResourceScope automatically disposes all the resources within. - private[mxnet] def dispose(removeFromScope: Boolean = true): Unit = { - if (!disposed) { - checkCall(nativeDeAllocator(this.nativeAddress)) - NativeResourceRef.deRegister(ref) // removes from PhantomRef tracking - if (removeFromScope && scope.isDefined) scope.get.remove(this) - NativeResource.totalBytesAllocated.getAndAdd(-1*bytesAllocated) - disposed = true - } - } - - /* - this is used by the WarnIfNotDisposed finalizer, - the object could be disposed by the GC without the need for explicit disposal - but the finalizer might not have run, then the WarnIfNotDisposed throws a warning - */ - private[mxnet] def isDeAllocated(): Boolean = NativeResourceRef.isDeAllocated(ref) - -} - -private[mxnet] object NativeResource { - var totalBytesAllocated : AtomicLong = new AtomicLong(0) -} - -// Do not make [[NativeResource.resource]] a member of the class, -// this will hold reference and GC will not clear the object. -private[mxnet] class NativeResourceRef(resource: NativeResource, - val resourceDeAllocator: CPtrAddress => Int) - extends PhantomReference[NativeResource](resource, NativeResourceRef.refQ) {} - -private[mxnet] object NativeResourceRef { - - private[mxnet] val refQ: ReferenceQueue[NativeResource] - = new ReferenceQueue[NativeResource] - - private[mxnet] val refMap = new ConcurrentHashMap[NativeResourceRef, CPtrAddress]() - - private[mxnet] val cleaner = new ResourceCleanupThread() - - cleaner.start() - - def register(resource: NativeResource, nativeDeAllocator: (CPtrAddress => Int)): - NativeResourceRef = { - val ref = new NativeResourceRef(resource, nativeDeAllocator) - refMap.put(ref, resource.nativeAddress) - ref - } - - // remove from PhantomRef tracking - def deRegister(ref: NativeResourceRef): Unit = refMap.remove(ref) - - /** - * This method will check if the cleaner ran and deAllocated the object - * As a part of GC, when the object is unreachable GC inserts a phantomRef - * to the ReferenceQueue which the cleaner thread will deallocate, however - * the finalizer runs much later depending on the GC. - * @param resource resource to verify if it has been deAllocated - * @return true if already deAllocated - */ - def isDeAllocated(ref: NativeResourceRef): Boolean = { - !refMap.containsKey(ref) - } - - def cleanup: Unit = { - // remove is a blocking call - val ref: NativeResourceRef = refQ.remove().asInstanceOf[NativeResourceRef] - // phantomRef will be removed from the map when NativeResource.close is called. - val resource = refMap.get(ref) - if (resource != 0L) { // since CPtrAddress is Scala a Long, it cannot be null - ref.resourceDeAllocator(resource) - refMap.remove(ref) - } - } - - protected class ResourceCleanupThread extends Thread { - setPriority(Thread.MAX_PRIORITY) - setName("NativeResourceDeAllocatorThread") - setDaemon(true) - - override def run(): Unit = { - while (true) { - try { - NativeResourceRef.cleanup - } - catch { - case _: InterruptedException => Thread.currentThread().interrupt() - } - } - } - } -} diff --git a/scala-package/core/src/main/scala/org/apache/mxnet/NumpyScope.scala b/scala-package/core/src/main/scala/org/apache/mxnet/NumpyScope.scala deleted file mode 100644 index b63095a10cc1..000000000000 --- a/scala-package/core/src/main/scala/org/apache/mxnet/NumpyScope.scala +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet - -import org.apache.mxnet.Base._ - -/** - * NumpyScope object provides util functions for turning on/off NumPy compatibility - * and checking whether NumPy compatibility has been turned on/off. NumPy compatibility - * is introduced first to support zero-dim and zero-size tensors as in NumPy. - */ -object NumpyScope { - def setNumpyShape(isNpComp: Boolean): Boolean = { - val prev = new RefInt() - checkCall(_LIB.mxSetIsNumpyShape(if (isNpComp) 1 else 0, prev)) - if (prev.value != 0) true else false - } - - def isNumpyShape: Boolean = { - val curr = new RefInt - checkCall(_LIB.mxIsNumpyShape(curr)) - if (curr.value != 0) true else false - } - - def enableNumpyShape: NumpyScope = { - new NumpyScope(true) - } - - - def disableNumpyShape: NumpyScope = { - new NumpyScope(false) - } -} - -class NumpyScope(var isCompatible: Boolean) { - private var prev: Boolean = false - - def withScope[T](body: => T): T = { - prev = NumpyScope.setNumpyShape(isCompatible) - try { - body - } finally { - if (prev != isCompatible) { - NumpyScope.setNumpyShape(prev) - } - } - } -} diff --git a/scala-package/core/src/main/scala/org/apache/mxnet/Operator.scala b/scala-package/core/src/main/scala/org/apache/mxnet/Operator.scala deleted file mode 100644 index a521702967e7..000000000000 --- a/scala-package/core/src/main/scala/org/apache/mxnet/Operator.scala +++ /dev/null @@ -1,281 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet - -import org.apache.mxnet.Base._ -import org.apache.mxnet.DType.DType -import scala.collection.mutable.ArrayBuffer - -/** - * Base class for operators implemented in Scala - */ -abstract class CustomOp { - - /** - * forward interface. override to create new operators. - * @param isTrain : Boolean - * whether this is for training - * @param req : array of String - * how to assign to outData. can be 'null', 'write', or 'add'. - * You can optionally use this.assign(dst, req, src) to handle this. - * @param inData, outData, aux : array of NDArrays - * input, output, and auxiliary states for forward. See document for - * corresponding arguments of Operator::Forward - */ - def forward(isTrain: Boolean, req: Array[String], - inData: Array[NDArray], outData: Array[NDArray], aux: Array[NDArray]): Unit - - /** - * backward interface. override to create new operators - * @param req : array of String - * how to assign to inGrad. can be 'null', 'write', or 'add'. - * You can optionally use this.assign(dst, req, src) to handle this. - * @param outGrad, inData, outData, inGrad, aux : array of NDArrays - * input, output, and auxiliary states for backward. See document for - * corresponding arguments of Operator::Backward - */ - def backward(req: Array[String], outGrad: Array[NDArray], - inData: Array[NDArray], outData: Array[NDArray], - inGrad: Array[NDArray], aux: Array[NDArray]): Unit - - /** - * Helper function for assigning into dst depending on requirements. - */ - def assign(dst: NDArray, req: String, src: NDArray): Unit = req match { - case "write" | "inplace" => dst.set(src) - case "add" => dst += src - case "null" => {} - } - - /** - * Scala Callback for CustomOp::Forward - */ - private[mxnet] def forwardEntry(numNdarray: Int, ndarraies: Array[NDArrayHandle], - tags: Array[Int], reqs: Array[Int], isTrain: Boolean): Boolean = { - var success = true - try { - val tensors = (0 until 5).toArray.map( x => ArrayBuffer[NDArray]() ) - for (i <- 0 until numNdarray) { - if (tags(i) == 1 || tags(i) == 4) { - tensors(tags(i)) += new NDArray(ndarraies(i), writable = true, addToCollector = false) - } else { - tensors(tags(i)) += new NDArray(ndarraies(i), writable = false, addToCollector = false) - } - } - val reqEnum = Array("null", "write", "inplace", "add") - val reqsArr = tensors(1).indices.map(i => reqEnum(reqs(i))).toArray - this.forward(isTrain = isTrain, req = reqsArr, - inData = tensors(0).toArray, outData = tensors(1).toArray, - aux = tensors(4).toArray) - } catch { - case ex: Throwable => { - success = false - ex.printStackTrace() - } - } - success - } - - /** - * Scala Callback for CustomOp::Backward - */ - private[mxnet] def backwardEntry(numNdarray: Int, ndarraies: Array[NDArrayHandle], - tags: Array[Int], reqs: Array[Int], isTrain: Boolean): Boolean = { - var success = true - try { - val tensors = (0 until 5).toArray.map( x => ArrayBuffer[NDArray]() ) - for (i <- 0 until numNdarray) { - if (tags(i) == 2 || tags(i) == 4) { - tensors(tags(i)) += new NDArray(ndarraies(i), writable = true) - } else { - tensors(tags(i)) += new NDArray(ndarraies(i), writable = false) - } - } - val reqEnum = Array("null", "write", "inplace", "add") - val reqsArr = tensors(2).indices.map(i => reqEnum(reqs(i))).toArray - this.backward(req = reqsArr, - inData = tensors(0).toArray, outData = tensors(1).toArray, - inGrad = tensors(2).toArray, outGrad = tensors(3).toArray, - aux = tensors(4).toArray) - } catch { - case ex: Throwable => { - success = false - ex.printStackTrace() - } - } - success - } -} - -/** - * Base class for operator property class implemented in Scala. - * MXNET_CPU_WORKER_NTHREADS must be greater than 1 for custom op to work on CPU - * @param needTopGrad : Boolean - * The default declareBackwardDependency function use this value - * to determine whether this operator needs gradient input for above. - */ -abstract class CustomOpProp(needTopGrad: Boolean = false) { - - protected var kwargs: Map[String, String] = Map[String, String]() - - private[mxnet] def init(keys: Array[String], vals: Array[String]): Unit = { - require(keys.length == vals.length, - s"Number of keys (${keys.length}) does not match arrays (${vals.length})") - kwargs = keys.zip(vals).toMap - } - - /** - * inferShape interface. override to create new operators - * @param inShape : array of Shape - * list of argument shapes in the same order as declared in listArguments(). - * @return - * inShapes : array of Shape - * array of argument shapes. Can be modified from inShape. - * outShapes : array of Shape - * array of output shapes calculated from inShape, - * in the same order as declared in listOutputs(). - * auxShapes : Optional, array of Shape - * array of aux shapes calculated from inShape, - * in the same order as declared in listAuxiliaryStates(). - */ - def inferShape(inShape: Array[Shape]): - (Array[Shape], Array[Shape], Array[Shape]) = (inShape, inShape.take(1), null) - - /** - * Scala Callback for CustomOp::InferShape - */ - private[mxnet] def inferShapeEntry( - numTensor: Int, intputShapes: Array[Array[Int]]): Array[Array[Int]] = { - val nIn = this.listArguments().length - val nOut = this.listOutputs().length - val nAux = { - val tmp = this.listAuxiliaryStates() - if (tmp == null) 0 else tmp.length - } - require(numTensor == (nIn + nOut + nAux), - s"Shape inference failed. $numTensor tensors expected, but got " + - s"$nIn args, $nOut ouputs and $nAux aux states") - val (inShapes, outShapes, auxShapes) = - inferShape(intputShapes.map(Shape(_))) - require(inShapes != null && inShapes.length != 0, "InputShape is undefined or empty") - require(outShapes != null && outShapes.length != 0, "OutputShape is undefined or empty") - if (auxShapes != null && auxShapes.length != 0) { - inShapes.map(_.toArray) ++ outShapes.map(_.toArray) ++ auxShapes.map(_.toArray) - } else inShapes.map(_.toArray) ++ outShapes.map(_.toArray) - } - - /** - * inferType interface. override to create new operators - * @param inType : array of DType - * list of argument types in the same order as declared in listArguments(). - * @return - * inTypes : array of DType - * array of argument types. Can be modified from inType. - * outTypes : array of DType - * array of output types calculated from inType, - * in the same order as declared in listOutputs(). - * auxTypes : Optional, array of DType - * array of aux types calculated from inType, - * in the same order as declared in listAuxiliaryStates(). - */ - def inferType(inType: Array[DType]): - (Array[DType], Array[DType], Array[DType]) = - (inType, Array.fill[DType](this.listOutputs.length)(inType(0)), - Array.fill[DType](this.listAuxiliaryStates.length)(inType(0))) - - /** - * Scala Callback for CustomOp::InferType - */ - private[mxnet] def inferTypeEntry( - numTensor: Int, intputTypes: Array[Int]): Array[Int] = { - val nIn = this.listArguments().length - val nOut = this.listOutputs().length - val nAux = { - val tmp = this.listAuxiliaryStates() - if (tmp == null) 0 else tmp.length - } - require(numTensor == (nIn + nOut + nAux), - s"Type inference failed. $numTensor tensors expected, but got " + - s"$nIn args, $nOut ouputs and $nAux aux states") - val (inTypes, outTypes, auxTypes) = - inferType(intputTypes.map(DType(_))) - require(inTypes != null && inTypes.length != 0, "InputType is undefined or empty") - require(outTypes != null && outTypes.length != 0, "OutputType is undefined or empty") - if (auxTypes != null && auxTypes.length != 0) { - inTypes.map(_.id) ++ outTypes.map(_.id) ++ auxTypes.map(_.id) - } else inTypes.map(_.id) ++ outTypes.map(_.id) - } - - /** - * listOutputs interface. override to create new operators - * @return - * outputs : array of String - * list of output blob names. - */ - def listOutputs(): Array[String] = Array("output") - - /** - * listArguments interface. override to create new operators - * @return - * arguments : array of String - * list of argument blob names. - */ - def listArguments(): Array[String] = Array("data") - - /** - * listAuxiliaryStates interface. override to create new operators - * @return - * auxs : array of String - * list of auxiliary state blob names. - */ - def listAuxiliaryStates(): Array[String] = null - - /** - * Declare dependencies of this operator for backward pass. - * @param outGrad : array of Int - * ids of outGrad blobs. - * @param inData : array of Int - * ids of inData blobs. - * @param outData : array of Int - * ids of outData blobs. - * @return - * deps : array of Int - * ids of the needed blobs. - */ - def declareBackwardDependency(outGrad: Array[Int], - inData: Array[Int], outData: Array[Int]): Array[Int] = { - val deps = ArrayBuffer[Array[Int]]() - if (this.needTopGrad) deps += outGrad - deps += inData - deps += outData - deps.toArray.flatten - } - - /** - * Create an operator that carries out the real computation - * given the context, input shapes, and input data types. - */ - def createOperator(ctx: String, inShapes: Array[Array[Int]], inDtypes: Array[Int]): CustomOp - -} - -object Operator { - def register(regName: String, opProp: CustomOpProp): Unit = { - checkCall(_LIB.mxCustomOpRegister(regName, opProp)) - } -} diff --git a/scala-package/core/src/main/scala/org/apache/mxnet/Optimizer.scala b/scala-package/core/src/main/scala/org/apache/mxnet/Optimizer.scala deleted file mode 100644 index 123eae986cd7..000000000000 --- a/scala-package/core/src/main/scala/org/apache/mxnet/Optimizer.scala +++ /dev/null @@ -1,326 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet - -import java.io._ - -import org.apache.mxnet.Base.CPtrAddress - -import scala.collection.mutable -import scala.util.Either - -object Optimizer { - def getUpdater(optimizer: Optimizer): MXKVStoreUpdater = { - new MXKVStoreUpdater with MXKVStoreCachedStates { - override def update(index: Int, grad: NDArray, weight: NDArray): Unit = { - ResourceScope.usingIfScopeExists(this.scope) { - val state = - if (states.contains(index)) { - states.get(index).get - } else { - val newState = optimizer.createState(index, weight) - states.put(index, newState) - newState - } - optimizer.update(index, weight, grad, state) - } - } - - override def dispose(): Unit = { - if (!super.isDisposed) { - states.values.foreach(optimizer.disposeState) - states.clear() - } - } - - override def serializeState(): Array[Byte] = { - val bos = new ByteArrayOutputStream() - try { - val out = new ObjectOutputStream(bos) - out.writeInt(states.size) - states.foreach { case (k, v) => - out.writeInt(k) - val stateBytes = optimizer.serializeState(v) - if (stateBytes == null) { - out.writeInt(0) - } else { - out.writeInt(stateBytes.length) - out.write(stateBytes) - } - } - out.flush() - bos.toByteArray - } finally { - try { - bos.close() - } catch { - case _: Throwable => - } - } - } - - override def deserializeState(bytes: Array[Byte]): Unit = { - val bis = new ByteArrayInputStream(bytes) - var in: ObjectInputStream = null - try { - in = new ObjectInputStream(bis) - val size = in.readInt() - (0 until size).foreach(_ => { - val key = in.readInt() - val bytesLength = in.readInt() - val value = - if (bytesLength > 0) { - val bytes = Array.fill[Byte](bytesLength)(0) - in.readFully(bytes) - optimizer.deserializeState(bytes) - } else { - null - } - states.update(key, value) - }) - } finally { - try { - if (in != null) { - in.close() - } - } catch { - case _: Throwable => - } - } - } - } - } -} - -abstract class Optimizer extends Serializable { - protected val lrMult: mutable.Map[Either[Int, String], Float] = - mutable.HashMap.empty[Either[Int, String], Float] - protected val wdMult: mutable.Map[Either[Int, String], Float] = - mutable.HashMap.empty[Either[Int, String], Float] - protected var numUpdate: Int = 0 - protected val indexUpdateCount: mutable.Map[Int, Int] = mutable.HashMap.empty[Int, Int] - - protected var specialized: Boolean = false - protected val weightSet: mutable.Set[Int] = mutable.HashSet.empty[Int] - protected var rescaleGrad: Float = 1 - @transient protected var symbol: Symbol = null - protected var idx2name: Map[Int, String] = null - - /** - * Update the parameters. - * @param index An unique integer key used to index the parameters - * @param weight weight ndarray - * @param grad grad ndarray - * @param state NDArray or other objects returned by initState - * The auxiliary state used in optimization. - */ - // TODO: make state a ClassTag - def update(index: Int, weight: NDArray, grad: NDArray, state: AnyRef): Unit - - // Create additional optimizer state such as momentum. - // TODO: make returned state a ClassTag - def createState(index: Int, weight: NDArray): AnyRef - - // Dispose the state it created - def disposeState(state: AnyRef): Unit - - def serializeState(state: AnyRef): Array[Byte] - - def deserializeState(bytes: Array[Byte]): AnyRef - - // Set individual learning rate scale for parameters - @deprecated("Use setLrMult instead.", "0.10.0") - def setLrScale(lrScale: Map[Int, Float]): Unit = { - val argsLrScale: Map[Either[Int, String], Float] = lrScale.map { case (k, v) => Left(k) -> v } - setLrMult(argsLrScale) - } - - /** - * Sets an individual learning rate multiplier for each parameter. - * If you specify a learning rate multiplier for a parameter, then - * the learning rate for the parameter will be set as the product of - * the global learning rate and its multiplier. - * note:: The default learning rate multiplier of a `Variable` - * can be set with `lr_mult` argument in the constructor. - * @param argsLrMult: Map[Either[Int, String], Float] - * For each of its key-value entries, the learning rate multipler for the - * parameter specified in the key will be set as the given value. - * - * You can specify the parameter with either its name or its index. - * If you use the name, you should also call the `setSymbol` method first, - * and the name you specified in the key of `argsLrMult` should match - * the name of the parameter in the `sym` you pass to `setSymbol` method. - * If you use the index, it should correspond to the index of the parameter - * used in the `update` method. - * - * Specifying a parameter by its index is only supported for backward - * compatibility, and we recommend to use the name instead. - */ - def setLrMult(argsLrMult: Map[Either[Int, String], Float]): Unit = { - argsLrMult.foreach { case (k, v) => this.lrMult(k) = v } - } - - /** - * Sets an individual weight decay multiplier for each parameter. - * - * By default, the weight decay multipler is set as 0 for all - * parameters whose name don't end with ``_weight`` or ``_gamma``, if - * you call the `setIdx2Name` method to set idx2name. - * - * note:: The default weight decay multiplier for a `Variable` - * can be set with its `wd_mult` argument in the constructor. - * @param argsWdMult: Map[Either[Int, String], Float] - * For each of its key-value entries, the learning rate multipler for the - * parameter specified in the key will be set as the given value. - * - * You can specify the parameter with either its name or its index. - * If you use the name, you should also call the `setSymbol` method first, - * and the name you specified in the key of `argsWdMult` should match - * the name of the parameter in the `sym` you pass to `setSymbol` method. - * If you use the index, it should correspond to the index of the parameter - * used in the `update` method. - * - * Specifying a parameter by its index is only supported for backward - * compatibility, and we recommend to use the name instead. - */ - def setWdMult(argsWdMult: Map[Either[Int, String], Float]): Unit = { - argsWdMult.foreach { case (k, v) => this.wdMult(k) = v } - } - - def setArgNames(argNames: Seq[String]): Unit = { - if (argNames != null) { - specialized = true - var index = 0 - argNames foreach { name => - if (!name.endsWith("data") && !name.endsWith("label")) { - if (name.endsWith("weight")) { - weightSet.add(index) - } - index += 1 - } - } - } - } - - // Set rescaling factor of gradient. - def setRescaleGrad(rescaleGrad: Float): Unit = { - this.rescaleGrad = rescaleGrad - } - - def setSymbol(sym: Symbol): Unit = { - this.symbol = sym - if (this.symbol != null) { - val attr = this.symbol.attrMap - for (name <- this.symbol.listArguments()) { - if (attr.contains(name) && attr(name).contains("__lr_mult__")) { - this.lrMult(Right(name)) = attr(name)("__lr_mult__").toFloat - } - if (attr.contains(name) && attr(name).contains("__wd_mult__")) { - this.wdMult(Right(name)) = attr(name)("__wd_mult__").toFloat - } - } - } - } - - def setIdx2Name(paramIdx2Name: Map[Int, String]): Unit = { - this.idx2name = paramIdx2Name - if (this.idx2name != null) { - for (n <- this.idx2name.values) { - if (!(n.endsWith("_weight") || n.endsWith("_gamma"))) { - this.wdMult(Right(n)) = 0f - } - } - } - } - - /** - * update num_update - * @param index The index will be updated - */ - protected def updateCount(index: Int): Unit = { - val count = indexUpdateCount.getOrElseUpdate(index, 0) + 1 - indexUpdateCount.update(index, count) - numUpdate = Math.max(count, numUpdate) - } - - // Gets the learning rate given the index of the weight. - protected def getLr(index: Int, lr: Float): Float = { - var llr = lr - if (this.lrMult.contains(Left(index))) { - llr *= this.lrMult(Left(index)) - } else if (this.idx2name != null && this.idx2name.contains(index)) { - llr *= this.lrMult.getOrElse(Right(this.idx2name(index)), 1.0f) - } - llr - } - - // Gets weight decay for index. - protected def getWd(index: Int, wd: Float): Float = { - var lwd = if (specialized) { - if (this.weightSet.contains(index)) { - wd - } else { - 0f - } - } else { - wd - } - if (this.wdMult.contains(Left(index))) { - lwd *= this.wdMult(Left(index)) - } else if (this.idx2name != null && this.idx2name.contains(index)) { - lwd *= this.wdMult.getOrElse(Right(this.idx2name(index)), 1.0f) - } - lwd - } -} - -trait MXKVStoreUpdater extends - NativeResource { - /** - * user-defined updater for the kvstore - * It's this updater's responsibility to delete recv and local - * @param key the key - * @param recv the pushed value on this key - * @param local the value stored on local on this key - */ - def update(key: Int, recv: NDArray, local: NDArray): Unit - - // This is a hack to make Optimizers work with ResourceScope - // otherwise the user has to manage calling dispose on this object. - override def nativeAddress: CPtrAddress = hashCode() - override def nativeDeAllocator: CPtrAddress => Int = doNothingDeAllocator - private def doNothingDeAllocator(dummy: CPtrAddress): Int = 0 - override val ref: NativeResourceRef = super.register() - override val bytesAllocated: Long = 0L -} - -trait MXKVStoreCachedStates { - protected val states = new scala.collection.mutable.HashMap[Int, AnyRef] - - /** - * Serialize states to byte array - * @return serialized states - */ - def serializeState(): Array[Byte] - - /** - * Update states with serialized results - * @param bytes Generated by serializeState() - */ - def deserializeState(bytes: Array[Byte]): Unit -} diff --git a/scala-package/core/src/main/scala/org/apache/mxnet/Profiler.scala b/scala-package/core/src/main/scala/org/apache/mxnet/Profiler.scala deleted file mode 100644 index a917377f096a..000000000000 --- a/scala-package/core/src/main/scala/org/apache/mxnet/Profiler.scala +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet - -import org.apache.mxnet.Base._ - -object Profiler { - - val state2Int = Map("stop" -> 0, "run" -> 1) - - /** - * Set up the configure of profiler. - * @param mode, optional - * Indicting whether to enable the profiler, can - * be "symbolic" or "all". Default is "symbolic". - * @param fileName, optional - * The name of output trace file. Default is "profile.json". - */ - def profilerSetConfig(kwargs: Map[String, String]): Unit = { - val keys = kwargs.keys.toArray - val vals = kwargs.values.toArray - checkCall(_LIB.mxSetProfilerConfig(keys, vals)) - } - - /** - * Set up the profiler state to record operator. - * @param state, optional - * Indicting whether to run the profiler, can - * be "stop" or "run". Default is "stop". - */ - def profilerSetState(state: String = "stop"): Unit = { - require(state2Int.contains(state), s"Invalid state $state") - checkCall(_LIB.mxSetProfilerState(state2Int(state))) - } - - /** - * Dump profile and stop profiler. Use this to save profile - * in advance in case your program cannot exit normally. - */ - def dumpProfile(finished: Int = 1): Unit = { - checkCall(_LIB.mxDumpProfile(finished)) - } -} diff --git a/scala-package/core/src/main/scala/org/apache/mxnet/Random.scala b/scala-package/core/src/main/scala/org/apache/mxnet/Random.scala deleted file mode 100644 index 34490837a052..000000000000 --- a/scala-package/core/src/main/scala/org/apache/mxnet/Random.scala +++ /dev/null @@ -1,95 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet - -import org.apache.mxnet.Base._ - -/** - * Random Number interface of mxnet. - */ -object Random { - /** - * Generate uniform distribution in [low, high) with shape. - * - * @param low The lower bound of distribution. - * @param high The upper bound of distribution. - * @param shape Output shape of the NDArray generated. - * @param ctx Context of output NDArray, will use default context if not specified. - * @param out Output place holder - * @return The result NDArray with generated result. - */ - def uniform(low: Float, - high: Float, - shape: Shape = null, - ctx: Context = null, - out: NDArray = null): NDArray = { - var outCopy = out - if (outCopy != null) { - require(shape == null && ctx == null, "shape and ctx is not needed when out is specified.") - } else { - require(shape != null, "shape is required when out is not specified") - outCopy = NDArray.empty(shape, ctx) - } - NDArray.genericNDArrayFunctionInvoke("_random_uniform", Seq(low, high), - Map("shape" -> outCopy.shape, "out" -> outCopy)) - } - - - /** - * Generate normal(Gaussian) distribution N(mean, stdvar^^2) with shape. - * - * @param loc The mean of the normal distribution. - * @param scale The standard deviation of normal distribution. - * @param shape Output shape of the NDArray generated. - * @param ctx Context of output NDArray, will use default context if not specified. - * @param out Output place holder - * @return The result NDArray with generated result. - */ - def normal(loc: Float, - scale: Float, - shape: Shape = null, - ctx: Context = null, - out: NDArray = null): NDArray = { - var outCopy = out - if (outCopy != null) { - require(shape == null & ctx == null, "shape and ctx is not needed when out is specified.") - } else { - require(shape != null, "shape is required when out is not specified") - outCopy = NDArray.empty(shape, ctx) - } - NDArray.genericNDArrayFunctionInvoke("_random_normal", Seq.empty[NDArray], - Map("loc" -> loc, "scale" -> scale, "shape" -> outCopy.shape, "out" -> outCopy)) - } - - - /** - * Seed the random number generators in mxnet. - * - * This seed will affect behavior of functions in this module, - * as well as results from executors that contains Random number - * such as Dropout operators. - * - * @param seedState The random number seed to set to all devices. - * @note The random number generator of mxnet is by default device specific. - * This means if you set the same seed, the random number sequence - * generated from GPU0 can be different from CPU. - */ - def seed(seedState: Int): Unit = { - checkCall(_LIB.mxRandomSeed(seedState)) - } -} diff --git a/scala-package/core/src/main/scala/org/apache/mxnet/RecordIO.scala b/scala-package/core/src/main/scala/org/apache/mxnet/RecordIO.scala deleted file mode 100644 index 578f00a76f9a..000000000000 --- a/scala-package/core/src/main/scala/org/apache/mxnet/RecordIO.scala +++ /dev/null @@ -1,228 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet - -import org.apache.mxnet.Base._ -import java.io.File -import scala.io.Source -import java.io.PrintWriter -import java.io.ByteArrayOutputStream -import java.io.DataOutputStream -import java.io.DataInputStream -import java.io.ByteArrayInputStream - -/** - * Scala interface for read/write RecordIO data format - * @param uri, path to recordIO file. - * @param flag, RecordIO.IORead for reading or RecordIO.Write for writing. - */ -class MXRecordIO(uri: String, flag: MXRecordIO.IOFlag) { - protected val recordIOHandle: RecordIOHandleRef = new RecordIOHandleRef - protected var isOpen: Boolean = false - - open() - - // Open record file - protected def open(): Unit = { - flag match { - case MXRecordIO.IOWrite => { - checkCall(_LIB.mxRecordIOWriterCreate(uri, recordIOHandle)) - } - case MXRecordIO.IORead => { - checkCall(_LIB.mxRecordIOReaderCreate(uri, recordIOHandle)) - } - } - this.isOpen = true - } - - // Close record file - def close(): Unit = { - if (this.isOpen) { - flag match { - case MXRecordIO.IOWrite => { - checkCall(_LIB.mxRecordIOWriterFree(recordIOHandle.value)) - } - case MXRecordIO.IORead => { - checkCall(_LIB.mxRecordIOReaderFree(recordIOHandle.value)) - } - } - } - } - - // Reset pointer to first item. - // If record is opened with RecordIO.IOWrite, this will truncate the file to empty. - def reset(): Unit = { - this.close() - this.open() - } - - // Write a string buffer as a record - def write(buf: String): Unit = { - assert(this.flag == MXRecordIO.IOWrite) - checkCall(_LIB.mxRecordIOWriterWriteRecord(this.recordIOHandle.value, buf, buf.size)) - } - - // Read a record as string - def read(): String = { - assert(this.flag == MXRecordIO.IORead) - val result = new RefString - checkCall(_LIB.mxRecordIOReaderReadRecord(this.recordIOHandle.value, result)) - result.value - } -} - -object MXRecordIO { - sealed trait IOFlag - case object IOWrite extends IOFlag - case object IORead extends IOFlag - - case class IRHeader(flag: Int, label: Array[Float], id: Int, id2: Int) - - /** - * pack an string into MXImageRecord. - * @param header - * header of the image record. - * header.label an array. - * @param s string to pack - * @return the resulting packed string - */ - def pack(header: IRHeader, s: String): String = { - val data = new ByteArrayOutputStream() - val stream = new DataOutputStream(data) - stream.writeInt(header.label.length) - header.label.foreach(stream.writeFloat) - stream.writeInt(header.id) - stream.writeInt(header.id2) - stream.writeUTF(s) - stream.flush() - stream.close() - data.toByteArray().map(_.toChar).mkString - } - - /** - * unpack a MXImageRecord to string. - * @param s string buffer from MXRecordIO.read - * @return - * header : IRHeader, header of the image record - * str : String, unpacked string - */ - def unpack(s: String): (IRHeader, String) = { - val data = s.toCharArray().map(_.toByte) - val stream = new DataInputStream(new ByteArrayInputStream(data)) - val flag = stream.readInt() - val label = (0 until flag).map( idx => stream.readFloat()).toArray - val id = stream.readInt() - val id2 = stream.readInt() - val str = stream.readUTF() - stream.close() - (IRHeader(flag, label, id, id2), str) - } - -} - -/** - * Scala interface for read/write RecordIO data formmat with index. - * Support random access. - * - * @author Depeng Liang - * - * @param idxPath, path to index file - * @param uri, path to recordIO file. - * @param flag, RecordIO.IORead for reading or RecordIO.Write for writing. - * @param keyType, data type for keys. - */ -class MXIndexedRecordIO(idxPath: String, uri: String, flag: MXRecordIO.IOFlag, - keyType: MXIndexedRecordIO.KeyType = MXIndexedRecordIO.TyepInt) extends MXRecordIO(uri, flag) { - private var idx = this.keyType match { - case MXIndexedRecordIO.TyepInt => Map[Int, Int]() - case _ => Map[Any, Int]() - } - - if (flag == MXRecordIO.IORead && new File(idxPath).isFile()) { - Source.fromFile(idxPath).getLines().foreach { line => - val (k, v) = { - val tmp = line.trim().split("\t") - val key = this.keyType match { - case MXIndexedRecordIO.TyepInt => tmp(0).toInt - } - (key, tmp(1).toInt) - } - this.idx = this.idx + (k -> v) - } - } - - override def close(): Unit = { - if (this.flag == MXRecordIO.IOWrite) { - val fOut = new PrintWriter(idxPath) - this.idx.foreach { case (k, v) => - fOut.write(s"$k\t$v\n") - } - fOut.flush() - fOut.close() - } - super.close() - } - - override def reset(): Unit = { - this.idx = Map[Any, Int]() - super.close() - super.open() - } - - // Query current read head position - def seek(idx: Any): Unit = { - assert(this.flag == MXRecordIO.IORead) - val idxx = this.keyType match { - case MXIndexedRecordIO.TyepInt => idx.asInstanceOf[Int] - } - val pos = this.idx(idxx) - checkCall(_LIB.mxRecordIOReaderSeek(this.recordIOHandle.value, pos)) - } - - // Query current write head position - def tell(): Int = { - assert(this.flag == MXRecordIO.IOWrite) - val pos = new RefInt - checkCall(_LIB.mxRecordIOWriterTell(this.recordIOHandle.value, pos)) - pos.value - } - - // Read record with index - def readIdx(idx: Any): String = { - this.seek(idx) - this.read() - } - - // Write record with index - def writeIdx(idx: Any, buf: String): Unit = { - val pos = this.tell() - val idxx = this.keyType match { - case MXIndexedRecordIO.TyepInt => idx.asInstanceOf[Int] - } - this.idx = this.idx + (idxx -> pos) - this.write(buf) - } - - // List all keys from index - def keys(): Iterable[Any] = this.idx.keys -} - -private object MXIndexedRecordIO { - sealed trait KeyType - case object TyepInt extends KeyType -} diff --git a/scala-package/core/src/main/scala/org/apache/mxnet/ResourceScope.scala b/scala-package/core/src/main/scala/org/apache/mxnet/ResourceScope.scala deleted file mode 100644 index 9a822870749e..000000000000 --- a/scala-package/core/src/main/scala/org/apache/mxnet/ResourceScope.scala +++ /dev/null @@ -1,214 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet - -import java.util.HashSet - -import org.slf4j.LoggerFactory - -import scala.collection.mutable -import scala.collection.mutable.ArrayBuffer -import scala.util.Try -import scala.util.control.{ControlThrowable, NonFatal} - -/** - * This class manages automatically releasing of `org.apache.mxnet.NativeResource`s - */ -class ResourceScope extends AutoCloseable { - - // HashSet does not take a custom comparator - private[mxnet] val resourceQ = new mutable.TreeSet[NativeResource]()(nativeAddressOrdering) - - private object nativeAddressOrdering extends Ordering[NativeResource] { - def compare(a: NativeResource, b: NativeResource): Int = { - a.nativeAddress compare b.nativeAddress - } - } - - ResourceScope.addToThreadLocal(this) - - /** - * Releases all the `org.apache.mxnet.NativeResource` by calling - * the associated`'org.apache.mxnet.NativeResource.close()` method - */ - override def close(): Unit = { - ResourceScope.removeFromThreadLocal(this) - if (!ResourceScope.threadLocalScopes.get().contains(this)) { - resourceQ.foreach(resource => if (resource != null) resource.dispose(false)) - resourceQ.clear() - } - } - - /** - * Add a NativeResource to the scope - * @param resource - */ - def add(resource: NativeResource): Unit = { - resourceQ.+=(resource) - resource.scope = Some(this) - } - - /** - * Check if a NativeResource is in the scope - * @param resource - */ - def contains(resource: NativeResource): Boolean = { - resourceQ.contains(resource) - } - - /** - * Remove NativeResource from the Scope, this uses - * object equality to find the resource in the stack. - * @param resource - */ - def remove(resource: NativeResource): Unit = { - resourceQ.-=(resource) - resource.scope = None - } - - /** - * Removes from current Scope and moves to outer scope if it exists - * @param resource Resource to be moved to an outer scope - */ - def moveToOuterScope(resource: NativeResource): Unit = { - val prevScope: Option[ResourceScope] = ResourceScope.getPrevScope() - if (prevScope.isDefined) { - if (contains(resource)) { - this.remove(resource) - prevScope.get.add(resource) - } - } else this.remove(resource) - } - -} - -object ResourceScope { - - private val logger = LoggerFactory.getLogger(classOf[ResourceScope]) - - /** - * Captures all Native Resources created using the ResourceScope and - * at the end of the body, de allocates all the Native resources by calling close on them. - * This method will not deAllocate NativeResources returned from the block. - * @param scope (Optional). Scope in which to capture the native resources - * @param body block of code to execute in this scope - * @tparam A return type - * @return result of the operation, if the result is of type NativeResource, it is not - * de allocated so the user can use it and then de allocate manually by calling - * close or enclose in another resourceScope. - */ - // inspired from slide 21 of https://www.slideshare.net/Odersky/fosdem-2009-1013261 - // and https://github.com/scala/scala/blob/2.13.x/src/library/scala/util/Using.scala - // TODO: we should move to the Scala util's Using method when we move to Scala 2.13 - def using[A](scope: ResourceScope = null)(body: => A): A = { - - val curScope = if (scope != null) scope else new ResourceScope() - - def recursiveMoveToOuterScope(resource: Any): Unit = { - resource match { - case nRes: NativeResource => curScope.moveToOuterScope(nRes) - case ndRet: NDArrayFuncReturn => ndRet.arr.foreach( nd => curScope.moveToOuterScope(nd) ) - case resInGeneric: scala.collection.Traversable[_] => - resInGeneric.foreach(recursiveMoveToOuterScope) - case resProduct: scala.Product => - resProduct.productIterator.foreach(recursiveMoveToOuterScope) - case _ => // do nothing - } - } - - @inline def safeAddSuppressed(t: Throwable, suppressed: Throwable): Unit = { - if (!t.isInstanceOf[ControlThrowable]) t.addSuppressed(suppressed) - } - - var retThrowable: Throwable = null - - try { - val ret = body - recursiveMoveToOuterScope(ret) - ret - } catch { - case t: Throwable => - retThrowable = t - null.asInstanceOf[A] // we'll throw in finally - } finally { - var toThrow: Throwable = retThrowable - if (retThrowable eq null) curScope.close - else { - try { - curScope.close - } catch { - case closeThrowable: Throwable => - if (NonFatal(retThrowable) && !NonFatal(closeThrowable)) toThrow = closeThrowable - else safeAddSuppressed(retThrowable, closeThrowable) - } finally { - throw toThrow - } - } - } - } - - private[mxnet] def usingIfScopeExists[A](scope: Option[ResourceScope])(body: => A): A = { - if (scope == None) { - body - } else { - ResourceScope.addToThreadLocal(scope.get) - ResourceScope.using(scope.get){ - body - } - } - } - - // thread local Scopes - private[mxnet] val threadLocalScopes = new ThreadLocal[ArrayBuffer[ResourceScope]] { - override def initialValue(): ArrayBuffer[ResourceScope] = - new ArrayBuffer[ResourceScope]() - } - - /** - * Add resource to current ThreadLocal DataStructure - * @param r ResourceScope to add. - */ - private[mxnet] def addToThreadLocal(r: ResourceScope): Unit = { - threadLocalScopes.get() += r - } - - /** - * Remove resource from current ThreadLocal DataStructure - * @param r ResourceScope to remove - */ - private[mxnet] def removeFromThreadLocal(r: ResourceScope): Unit = { - threadLocalScopes.get().remove(threadLocalScopes.get().lastIndexOf(r)) - } - - /** - * Get the latest Scope in the stack - * @return - */ - private[mxnet] def getCurrentScope(): Option[ResourceScope] = { - Try(Some(threadLocalScopes.get().last)).getOrElse(None) - } - - /** - * Get the Last but one Scope from threadLocal Scopes. - * @return n-1th scope or None when not found - */ - private[mxnet] def getPrevScope(): Option[ResourceScope] = { - val scopes = threadLocalScopes.get() - Try(Some(scopes(scopes.size - 2))).getOrElse(None) - } -} diff --git a/scala-package/core/src/main/scala/org/apache/mxnet/Rtc.scala b/scala-package/core/src/main/scala/org/apache/mxnet/Rtc.scala deleted file mode 100644 index e0a5d941adbf..000000000000 --- a/scala-package/core/src/main/scala/org/apache/mxnet/Rtc.scala +++ /dev/null @@ -1,95 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet - -import org.apache.mxnet.Base._ - - /** - * This class allow you to write cuda kernel in Scala - * and call them with NDArray. - * - * @param name String, name of the kernel. - * @param inputs Array[(String, NDArray)], array of input names and ndarray. - * @param outputs Array[(String, NDArray)], array of output names and ndarray. - * @param kernel String, the actual kernel code. - * Note that this is only the body of the kernel, i.e. - * after { and before }. Rtc will decorate the kernel. - * For example, if name = "mykernel" and - * inputs = Array(("x", NDArray.zeros(10))) - * outputs = Array(("y", NDArray.zeros(10))) - * kernel = "y[threadIdx.x] = x[threadIdx.x];", - * the kernel that is compile will be: - * extern "C" __global__ mykernel(float *x, float *y) { - * const int x_ndim = 1; - * const int x_dims[] = { 10 }; - * const int y_ndim = 1; - * const int y_dims[] = { 10 }; - * - * y[threadIdx.x] = x[threadIdx.x]; - * } - */ -class Rtc(name: String, inputs: Array[(String, NDArray)], - outputs: Array[(String, NDArray)], kernel: String) { - - private val rtcHandle = new RtcHandleRef - private val inputNames = inputs.map(_._1) - private val outputNames = outputs.map(_._1) - private val inputNDs = inputs.map(_._2.handle) - private val outputNDs = outputs.map(_._2.handle) - checkCall(_LIB.mxRtcCreate(name, - inputNames, - outputNames, - inputNDs, - outputNDs, - kernel, - rtcHandle)) - - /** - * run the kernel. - * @param ins, array of NDArray - * array of input. Can be different NDArray then uses for constructor, - * but must have the same shape and in the same order. - * @param outs, array of NDArray - * array of output. Can be different NDArray then uses for constructor, - * but must have the same shape and in the same order. - * @param gridDims, tuple of 3 Int - * grid dimension for kernel launch. - * @param blockDims, tuple of 3 Int - * block dimension for kernel launch - */ - def push(ins: Array[NDArray], outs: Array[NDArray], - gridDims: (Int, Int, Int), blockDims: (Int, Int, Int)): Unit = { - checkCall(_LIB.mxRtcPush(rtcHandle.value, - ins.map(_.handle), - outs.map(_.handle), - gridDims._1, - gridDims._2, - gridDims._3, - blockDims._1, - blockDims._2, - blockDims._3)) -} - - /** - * Free the rtc handle. - * The object shall never be used after it is disposed. - */ - def dispose(): Unit = { - checkCall(_LIB.mxRtcFree(rtcHandle.value)) - } -} diff --git a/scala-package/core/src/main/scala/org/apache/mxnet/Serializer.scala b/scala-package/core/src/main/scala/org/apache/mxnet/Serializer.scala deleted file mode 100644 index 556f7e8334e6..000000000000 --- a/scala-package/core/src/main/scala/org/apache/mxnet/Serializer.scala +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet - -import java.io._ -import java.nio.ByteBuffer -import java.nio.charset.Charset - -import org.apache.commons.codec.binary.Base64 - -import scala.reflect.ClassTag - -/** - * Serialize & deserialize Java/Scala [[Serializable]] objects - */ -private[mxnet] abstract class Serializer { - def serialize[T: ClassTag](t: T): ByteBuffer - def deserialize[T: ClassTag](bytes: ByteBuffer): T -} - -private[mxnet] object Serializer { - val UTF8 = Charset.forName("UTF-8") - - def getSerializer: Serializer = getSerializer(None) - - def getSerializer(serializer: Serializer): Serializer = { - // TODO: dynamically get from mxnet env to support other serializers like Kyro - if (serializer == null) new JavaSerializer else serializer - } - - def getSerializer(serializer: Option[Serializer]): Serializer = { - // TODO: dynamically get from mxnet env to support other serializers like Kyro - serializer.getOrElse(new JavaSerializer) - } - - def encodeBase64String(bytes: ByteBuffer): String = { - new String(Base64.encodeBase64(bytes.array), UTF8) - } - - def decodeBase64String(str: String): ByteBuffer = { - ByteBuffer.wrap(Base64.decodeBase64(str.getBytes(UTF8))) - } -} - -private[mxnet] class JavaSerializer extends Serializer { - override def serialize[T: ClassTag](t: T): ByteBuffer = { - val bos = new ByteArrayOutputStream() - val out = new ObjectOutputStream(bos) - out.writeObject(t) - out.close() - ByteBuffer.wrap(bos.toByteArray) - } - - override def deserialize[T: ClassTag](bytes: ByteBuffer): T = { - val byteArray = bytes.array() - val bis = new ByteArrayInputStream(byteArray) - val in = new ObjectInputStream(bis) - in.readObject().asInstanceOf[T] - } -} diff --git a/scala-package/core/src/main/scala/org/apache/mxnet/Shape.scala b/scala-package/core/src/main/scala/org/apache/mxnet/Shape.scala deleted file mode 100644 index 689176217722..000000000000 --- a/scala-package/core/src/main/scala/org/apache/mxnet/Shape.scala +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet - -import scala.annotation.varargs - -/** - * Shape of [[NDArray]] or other data - */ -class Shape(dims: Traversable[Int]) extends Serializable { - private val shape = dims.toVector - - def this(dims: Int*) = { - this(dims.toVector) - } - - def apply(dim: Int): Int = shape(dim) - def get(dim: Int): Int = apply(dim) - def size: Int = shape.size - def length: Int = shape.length - def drop(dim: Int): Shape = new Shape(shape.drop(dim)) - def slice(from: Int, end: Int): Shape = new Shape(shape.slice(from, end)) - def product: Int = shape.product - def head: Int = shape.head - - def ++(other: Shape): Shape = new Shape(shape ++ other.shape) - - def toArray: Array[Int] = shape.toArray - def toVector: Vector[Int] = shape - - override def toString(): String = s"(${shape.mkString(",")})" - - override def equals(o: Any): Boolean = o match { - case that: Shape => - that != null && that.shape.sameElements(shape) - case _ => false - } - - override def hashCode(): Int = { - shape.hashCode() - } -} - -object Shape { - def apply(dims: Int *): Shape = new Shape(dims: _*) - def apply(dims: Traversable[Int]): Shape = new Shape(dims) - @varargs def create(dims: Int*): Shape = new Shape(dims) -} diff --git a/scala-package/core/src/main/scala/org/apache/mxnet/SparseFormat.scala b/scala-package/core/src/main/scala/org/apache/mxnet/SparseFormat.scala deleted file mode 100644 index acb0c0f24070..000000000000 --- a/scala-package/core/src/main/scala/org/apache/mxnet/SparseFormat.scala +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet - -object SparseFormat extends Enumeration { - type SparseFormat = Value - val DEFAULT = Value(0, "default") - val ROW_SPARSE = Value(1, "row_sparse") - val CSR = Value(2, "csr") -} diff --git a/scala-package/core/src/main/scala/org/apache/mxnet/SparseNDArray.scala b/scala-package/core/src/main/scala/org/apache/mxnet/SparseNDArray.scala deleted file mode 100644 index f3fe638e41ed..000000000000 --- a/scala-package/core/src/main/scala/org/apache/mxnet/SparseNDArray.scala +++ /dev/null @@ -1,196 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet - -import org.apache.mxnet.Base.{NDArrayHandle, NDArrayHandleRef, checkCall, _LIB} -import org.apache.mxnet.DType.DType -import org.apache.mxnet.SparseFormat.SparseFormat - -object SparseNDArray { - /** - * Create a Compressed Sparse Row Storage (CSR) Format Matrix - * @param data the data to feed - * @param indices The indices array stores the column index for each non-zero element in data - * @param indptr The indptr array is what will help identify the rows where the data appears - * @param shape the shape of CSR NDArray to be created - * @param ctx the context of this NDArray - * @return SparseNDArray - */ - def csrMatrix(data: Array[Float], indices: Array[Float], - indptr: Array[Float], shape: Shape, ctx: Context): SparseNDArray = { - val fmt = SparseFormat.CSR - val dataND = NDArray.array(data, Shape(data.length), ctx) - val indicesND = NDArray.array(indices, Shape(indices.length), ctx).asType(DType.Int64) - val indptrND = NDArray.array(indptr, Shape(indptr.length), ctx).asType(DType.Int64) - val dTypes = Array(indptrND.dtype, indicesND.dtype) - val shapes = Array(indptrND.shape, indicesND.shape) - val handle = - newAllocHandle(fmt, shape, ctx, false, DType.Float32, dTypes, shapes) - checkCall(_LIB.mxNDArraySyncCopyFromNDArray(handle, dataND.handle, -1)) - checkCall(_LIB.mxNDArraySyncCopyFromNDArray(handle, indptrND.handle, 0)) - checkCall(_LIB.mxNDArraySyncCopyFromNDArray(handle, indicesND.handle, 1)) - new SparseNDArray(handle) - } - - /** - * RowSparseNDArray stores the matrix in row sparse format, - * which is designed for arrays of which most row slices are all zeros - * @param data Any Array(Array(... Array(Float))) - * @param indices the indices to store the data - * @param shape shape of the NDArray - * @param ctx Context - * @return SparseNDArray - */ - def rowSparseArray(data: Array[_], indices: Array[Float], - shape: Shape, ctx: Context): SparseNDArray = { - val dataND = NDArray.toNDArray(data) - val indicesND = NDArray.array(indices, Shape(indices.length), ctx).asType(DType.Int64) - rowSparseArray(dataND, indicesND, shape, ctx) - } - - /** - * RowSparseNDArray stores the matrix in row sparse format, - * which is designed for arrays of which most row slices are all zeros - * @param data NDArray input - * @param indices in NDArray. Only DType.Int64 supported - * @param shape shape of the NDArray - * @param ctx Context - * @return - */ - def rowSparseArray(data: NDArray, indices: NDArray, - shape: Shape, ctx: Context): SparseNDArray = { - val fmt = SparseFormat.ROW_SPARSE - val handle = newAllocHandle(fmt, shape, ctx, false, - DType.Float32, Array(indices.dtype), Array(indices.shape)) - checkCall(_LIB.mxNDArraySyncCopyFromNDArray(handle, data.handle, -1)) - checkCall(_LIB.mxNDArraySyncCopyFromNDArray(handle, indices.handle, 0)) - new SparseNDArray(handle) - } - - def retain(sparseNDArray: SparseNDArray, indices: Array[Float]): SparseNDArray = { - if (sparseNDArray.sparseFormat == SparseFormat.CSR) { - throw new IllegalArgumentException("CSR not supported") - } - NDArray.genericNDArrayFunctionInvoke("_sparse_retain", - Seq(sparseNDArray, NDArray.toNDArray(indices))).head.toSparse() - } - - private def newAllocHandle(stype : SparseFormat, - shape: Shape, - ctx: Context, - delayAlloc: Boolean, - dtype: DType = DType.Float32, - auxDTypes: Array[DType], - auxShapes: Array[Shape]) : NDArrayHandle = { - val hdl = new NDArrayHandleRef - checkCall(_LIB.mxNDArrayCreateSparseEx( - stype.id, - shape.toArray, - shape.length, - ctx.deviceTypeid, - ctx.deviceId, - if (delayAlloc) 1 else 0, - dtype.id, - auxDTypes.length, - auxDTypes.map(_.id), - auxShapes.map(_.length), - auxShapes.map(_.get(0)), - hdl) - ) - hdl.value - } -} - -/** - * Sparse NDArray is the child class of NDArray designed to hold the Sparse format - * - *

Currently, Rowsparse and CSR typed NDArray is supported. Most of the Operators - * will convert Sparse NDArray to dense. Basic operators like add will - * have optimization for sparse operattions

- * @param handle The pointer that SparseNDArray holds - * @param writable whether the NDArray is writable - */ -class SparseNDArray private[mxnet] (override private[mxnet] val handle: NDArrayHandle, - override val writable: Boolean = true) - extends NDArray(handle, writable) { - - private lazy val dense: NDArray = toDense - - override def toString: String = { - dense.toString - } - - /** - * Convert a SparseNDArray to dense NDArray - * @return NDArray - */ - def toDense: NDArray = { - NDArray.api.cast_storage(this, SparseFormat.DEFAULT.toString).head - } - - override def toArray: Array[Float] = { - dense.toArray - } - - override def at(idx: Int): NDArray = { - dense.at(idx) - } - - override def slice(start: Int, end: Int): NDArray = { - NDArray.api.slice(this, Shape(start), Shape(end)) - } - - /** - * Get the Data portion from a Row Sparse NDArray - * @return NDArray - */ - def getData: NDArray = { - require(this.sparseFormat == SparseFormat.ROW_SPARSE, "Not Supported for CSR") - val handle = new NDArrayHandleRef - _LIB.mxNDArrayGetDataNDArray(this.handle, handle) - new NDArray(handle.value, false) - } - - /** - * Get the indptr Array from a CSR NDArray - * @return NDArray - */ - def getIndptr: NDArray = { - require(this.sparseFormat == SparseFormat.CSR, "Not Supported for row sparse") - getAuxNDArray(0) - } - - /** - * Get the indice Array - * @return NDArray - */ - def getIndices: NDArray = { - if (this.sparseFormat == SparseFormat.ROW_SPARSE) { - getAuxNDArray(0) - } else { - getAuxNDArray(1) - } - } - - private def getAuxNDArray(idx: Int): NDArray = { - val handle = new NDArrayHandleRef - checkCall(_LIB.mxNDArrayGetAuxNDArray(this.handle, idx, handle)) - new NDArray(handle.value, false) - } - -} diff --git a/scala-package/core/src/main/scala/org/apache/mxnet/Symbol.scala b/scala-package/core/src/main/scala/org/apache/mxnet/Symbol.scala deleted file mode 100644 index 80f4dc935282..000000000000 --- a/scala-package/core/src/main/scala/org/apache/mxnet/Symbol.scala +++ /dev/null @@ -1,1429 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet - -import org.apache.mxnet.Base._ -import org.apache.mxnet.DType.DType -import org.slf4j.{Logger, LoggerFactory} - -import scala.collection.mutable -import scala.collection.mutable.{ArrayBuffer, ListBuffer} -import scala.language.implicitConversions - -/** - * Symbolic configuration API of mxnet.
- * - * WARNING: it is your responsibility to clear this object through dispose(). - * - */ -class Symbol private(private[mxnet] val handle: SymbolHandle) extends NativeResource { - private val logger: Logger = LoggerFactory.getLogger(classOf[Symbol]) - - // unable to get the byteAllocated for Symbol - override val bytesAllocated: Long = 0L - override def nativeAddress: CPtrAddress = handle - override def nativeDeAllocator: (CPtrAddress => Int) = _LIB.mxSymbolFree - override val ref: NativeResourceRef = super.register() - - - def +(other: Symbol): Symbol = Symbol.createFromListedSymbols("_Plus")(Array(this, other)) - def +[@specialized(Int, Float, Double) V](other: V): Symbol = { - Symbol.createFromListedSymbols("_PlusScalar")(Array(this), Map("scalar" -> other.toString)) - } - - def -(other: Symbol): Symbol = Symbol.createFromListedSymbols("_Minus")(Array(this, other)) - def -[@specialized(Int, Float, Double) V](other: V): Symbol = { - Symbol.createFromListedSymbols("_MinusScalar")(Array(this), Map("scalar" -> other.toString)) - } - - def *(other: Symbol): Symbol = Symbol.createFromListedSymbols("_Mul")(Array(this, other)) - def *[@specialized(Int, Float, Double) V](other: V): Symbol = { - Symbol.createFromListedSymbols("_MulScalar")(Array(this), Map("scalar" -> other.toString)) - } - - def /(other: Symbol): Symbol = Symbol.createFromListedSymbols("_Div")(Array(this, other)) - def /[@specialized(Int, Float, Double) V](other: V): Symbol = { - Symbol.createFromListedSymbols("_DivScalar")(Array(this), Map("scalar" -> other.toString)) - } - - def **(other: Symbol): Symbol = Symbol.pow(this, other) - def **[@specialized(Int, Float, Double) V](other: V): Symbol = Symbol.pow(this, other) - - def >(other: Symbol): Symbol = Symbol.greater(this, other) - def >[@specialized(Int, Float, Double) V](other: V): Symbol = Symbol.greater(this, other) - - def >=(other: Symbol): Symbol = Symbol.greaterEqual(this, other) - def >=[@specialized(Int, Float, Double) V](other: V): Symbol = Symbol.greaterEqual(this, other) - - def <(other: Symbol): Symbol = Symbol.lesser(this, other) - def <[@specialized(Int, Float, Double) V](other: V): Symbol = Symbol.lesser(this, other) - - def <=(other: Symbol): Symbol = Symbol.lesserEqual(this, other) - def <=[@specialized(Int, Float, Double) V](other: V): Symbol = Symbol.lesserEqual(this, other) - - def %(other: Symbol): Symbol = Symbol.createFromListedSymbols("_Mod")(Array(this, other)) - def %[@specialized(Int, Float, Double) V](other: V): Symbol = { - Symbol.createFromListedSymbols("_ModScalar")(Array(this), Map("scalar" -> other.toString)) - } - - override def clone(): Symbol = { - val clonedHandle = new SymbolHandleRef - checkCall(_LIB.mxSymbolCopy(handle, clonedHandle)) - new Symbol(clonedHandle.value) - } - - def get(index: Int): Symbol = { - val newHandle = new SymbolHandleRef - checkCall(_LIB.mxSymbolGetOutput(handle, index, newHandle)) - new Symbol(handle = newHandle.value) - } - - def get(name: String): Symbol = { - var index: Int = -1 - for ((output, i) <- listOutputs().view.zipWithIndex) { - if (output == name) { - index = i - } - } - require(index >= 0, s"Cannot find output that matches name $name") - get(index) - } - - /** - * Get a new grouped symbol whose output contains all the internal outputs of this symbol. - * @return The internal of the symbol. - */ - def getInternals(): Symbol = { - val newHandle = new SymbolHandleRef - checkCall(_LIB.mxSymbolGetInternals(handle, newHandle)) - new Symbol(handle = newHandle.value) - } - - /** - * List all the arguments in the symbol. - * @return Array of all the arguments. - */ - def listArguments(): IndexedSeq[String] = { - val arr = ArrayBuffer.empty[String] - checkCall(_LIB.mxSymbolListArguments(handle, arr)) - arr - } - - /** - * List all outputs in the symbol. - * @return : List of all the outputs. - */ - def listOutputs(): IndexedSeq[String] = { - val arr = ArrayBuffer.empty[String] - checkCall(_LIB.mxSymbolListOutputs(handle, arr)) - arr - } - - /** - * List all auxiliary states in the symbol. - * @return The names of the auxiliary states. - * @note - * Auxiliary states are special states of symbols that do not corresponds to an argument, - * and do not have gradient. But still be useful for the specific operations. - * A common example of auxiliary state is the moving_mean and moving_variance in BatchNorm. - * Most operators do not have Auxiliary states. - */ - def listAuxiliaryStates(): IndexedSeq[String] = { - val sarr = ArrayBuffer.empty[String] - checkCall(_LIB.mxSymbolListAuxiliaryStates(handle, sarr)) - sarr - } - - /** - * Infer the type of outputs and arguments of given known types of arguments. - * Tuple of Nones is returned if there is not enough information passed in. - * An error will be raised if there is inconsistency found in the known types passed in. - * @param args Provide type of arguments in a positional way. Unknown type can be marked as null - * @return - * argTypes : list of numpy.dtype or None - * List of types of arguments. - * The order is in the same order as list_arguments() - * outTypes : list of numpy.dtype or None - * List of types of outputs. - * The order is in the same order as list_outputs() - * auxTypes : list of numpy.dtype or None - * List of types of outputs. - * The order is in the same order as list_auxiliary() - */ - def inferType(args: DType*) : (Seq[DType], Seq[DType], Seq[DType]) = { - val sdata: Array[Int] = args.map { dtype => - if (dtype == null) -1 - else dtype.id - }.toArray - inferType(null, sdata) - } - - /** - * Infer the type of outputs and arguments of given known types of arguments. - * Tuple of Nones is returned if there is not enough information passed in. - * An error will be raised if there is inconsistency found in the known types passed in. - * @param kwargs Provide keyword arguments of known types. - * @return - * argTypes : list of numpy.dtype or None - * List of types of arguments. - * The order is in the same order as list_arguments() - * outTypes : list of numpy.dtype or None - * List of types of outputs. - * The order is in the same order as list_outputs() - * auxTypes : list of numpy.dtype or None - * List of types of outputs. - * The order is in the same order as list_auxiliary() - */ - def inferType(kwargs: Map[String, DType]) : (Seq[DType], Seq[DType], Seq[DType]) = { - val keys = kwargs.keys.toArray - val sdata = kwargs.values.map(_.id).toArray - inferType(keys, sdata) - } - - private def inferType(keys: Array[String], values: Array[Int]) - : (Seq[DType], Seq[DType], Seq[DType]) = { - val argTypeData = ListBuffer.empty[Int] - val outTypeData = ListBuffer.empty[Int] - val auxTypeData = ListBuffer.empty[Int] - val complete = new RefInt - checkCall(_LIB.mxSymbolInferType( - handle, keys, values, argTypeData, outTypeData, auxTypeData, complete)) - if (complete.value != 0) { - (argTypeData.map(DType(_)), outTypeData.map(DType(_)), auxTypeData.map(DType(_))) - } else { - (null, null, null) - } - } - - /** - * Infer the shape of outputs and arguments of given known shapes of arguments. - * User can either pass in the known shapes in positional way or keyword argument way. - * Tuple of Nones is returned if there is not enough information passed in. - * An error will be raised if there is inconsistency found in the known shapes passed in. - * @param args Provide a list of DataDesc containing the shapes to resolve - * @return - * argShapes List of shapes of arguments. The order is in the same order as list_arguments() - * outShapes List of shapes of outputs. The order is in the same order as list_outputs() - * auxShapes List of shapes of outputs. The order is in the same order as list_auxiliary() - */ - def inferShape(args: IndexedSeq[DataDesc]): - (IndexedSeq[Shape], IndexedSeq[Shape], IndexedSeq[Shape]) = { - val keys = ArrayBuffer.empty[String] - val indPtr = ArrayBuffer(0) - val sdata = ArrayBuffer.empty[Int] - args.foreach { arg => - val shape = arg.shape - if (shape != null) { - keys += arg.name - sdata ++= shape.toVector - indPtr += sdata.size - } - } - inferShape(keys.toArray, indPtr.toArray, sdata.toArray) - } - - /** - * Infer the shape of outputs and arguments of given known shapes of arguments. - * User can either pass in the known shapes in positional way or keyword argument way. - * Tuple of Nones is returned if there is not enough information passed in. - * An error will be raised if there is inconsistency found in the known shapes passed in. - * @param args Provide shape of arguments in a positional way. - * Unknown shape can be marked as None - * @return - * argShapes List of shapes of arguments. The order is in the same order as list_arguments() - * outShapes List of shapes of outputs. The order is in the same order as list_outputs() - * auxShapes List of shapes of outputs. The order is in the same order as list_auxiliary() - */ - def inferShape(args: Shape*): (IndexedSeq[Shape], IndexedSeq[Shape], IndexedSeq[Shape]) = { - val keys: Array[String] = null - val indPtr = ArrayBuffer(0) - val sdata = ArrayBuffer.empty[Int] - args.foreach { shape => - if (shape != null) { - sdata ++= shape.toVector - indPtr += sdata.size - } - } - inferShape(keys, indPtr.toArray, sdata.toArray) - } - - /** - * Infer the shape of outputs and arguments of given known shapes of arguments. - * User can either pass in the known shapes in positional way or keyword argument way. - * Tuple of Nones is returned if there is not enough information passed in. - * An error will be raised if there is inconsistency found in the known shapes passed in. - * @param kwargs Provide keyword arguments of known shapes. - * @return - * argShapes List of shapes of arguments. The order is in the same order as list_arguments() - * outShapes List of shapes of outputs. The order is in the same order as list_outputs() - * auxShapes List of shapes of outputs. The order is in the same order as list_auxiliary() - */ - def inferShape(kwargs: Map[String, Shape]) - : (IndexedSeq[Shape], IndexedSeq[Shape], IndexedSeq[Shape]) = { - val keys = ArrayBuffer.empty[String] - val indPtr = ArrayBuffer(0) - val sdata = ArrayBuffer.empty[Int] - kwargs.foreach { case (key, shape) => - keys += key - sdata ++= shape.toVector - indPtr += sdata.size - } - inferShape(keys.toArray, indPtr.toArray, sdata.toArray) - } - - def inferShape(keys: Array[String], indPtr: Array[Int], values: Array[Int]) - : (IndexedSeq[Shape], IndexedSeq[Shape], IndexedSeq[Shape]) = { - val res = inferShapeImpl(partial = false, keys, indPtr, values) - if (res._2 == null) { - val (argShapes, _, _) = inferShapeImpl(partial = true, keys, indPtr, values) - val argNames = listArguments() - val unknown = (argNames zip argShapes).map { case (name, shape) => - val shapeIsNone = if (NumpyScope.isNumpyShape) { - shape == null || shape.toVector.contains(-1) - } else { - shape == null || shape.toVector.contains(0) - } - if (shapeIsNone) s"$name: $shape" else "" - } - logger.warn("Cannot decide shape for the following arguments. " + - "Consider providing them as input: \n\t{}", - unknown.filter(_ != "").mkString("\n\t")) - } - res - } - - private def inferShapeImpl(partial: Boolean, - keys: Array[String], - indPtr: Array[Int], - values: Array[Int]) - : (IndexedSeq[Shape], IndexedSeq[Shape], IndexedSeq[Shape]) = { - val argShapeData = ListBuffer.empty[Array[Int]] - val outShapeData = ListBuffer.empty[Array[Int]] - val auxShapeData = ListBuffer.empty[Array[Int]] - val complete = new RefInt - if (partial) { - checkCall(_LIB.mxSymbolInferShapePartial(handle, indPtr.length - 1, keys, indPtr, values, - argShapeData, outShapeData, auxShapeData, complete)) - } else { - checkCall(_LIB.mxSymbolInferShape(handle, indPtr.length - 1, keys, indPtr, values, - argShapeData, outShapeData, auxShapeData, complete)) - } - if (complete.value != 0) { - (argShapeData.map(s => Shape(s)).toIndexedSeq, - outShapeData.map(s => Shape(s)).toIndexedSeq, - auxShapeData.map(s => Shape(s)).toIndexedSeq) - } else { - (null, null, null) - } - } - - /** - * Get attribute string from the symbol, this function only works for non-grouped symbol. - * @param key The key to get attribute from. - * @return value The attribute value of the key, returns None if attribute do not exist. - */ - def attr(key: String): Option[String] = { - val ret = new RefString - val success = new RefInt - checkCall(_LIB.mxSymbolGetAttr(handle, key, ret, success)) - if (success.value != 0) { - Option(ret.value) - } else { - None - } - } - - /** - * Invoke symbol as function on inputs. - * @param name resulting symbol name - * @param symbols provide named symbols - * @return the resulting symbol - */ - def apply(name: String, symbols: Map[String, Symbol]): Symbol = { - val s = clone() - s.compose(name, symbols) - s - } - - /** - * Get a debug string. - * @return Debug string of the symbol. - */ - def debugStr: String = { - val str = new RefString - checkCall(_LIB.mxSymbolPrint(handle, str)) - str.value - } - - // Set the attribute of the symbol. - private def setAttr(attr: Map[String, String]): Unit = { - attr.foreach { case (key, value) => - checkCall(_LIB.mxSymbolSetAttr(handle, key, value)) - } - } - - /** - * Gets all attributes from the symbol. - * @return Map[String, String], mapping attribute keys to values. - */ - def listAttr(): Map[String, String] = { - val outSize = new MXUintRef - val out = ArrayBuffer[String]() - checkCall(_LIB.mxSymbolListAttrShallow(handle, outSize, out)) - (0 until outSize.value).map(i => out(i * 2) -> out(i * 2 + 1)).toMap - } - - /** - * Recursively gets all attributes from the symbol and its children. - * @return Map[Map[String, String]], There is a key in the returned - * dict for every child with non-empty attribute set. For each symbol, - * the name of the symbol is its key in the dict and the correspond value - * is that symbol's attribute list (itself a dictionary). - */ - def attrMap(): Map[String, Map[String, String]] = { - val outSize = new MXUintRef - val out = ArrayBuffer[String]() - checkCall(_LIB.mxSymbolListAttr(handle, outSize, out)) - val result = { - val tmp = out.toArray.grouped(2).map{ strs => - val nk = strs(0).split('$') - (nk(0), nk(1), strs(1)) - }.toArray - val grouped = tmp.groupBy(_._1) - grouped.map { case (name, kvs) => name -> kvs.map(x => (x._2, x._3)).toMap } - } - result - } - - /** - * Save symbol into file. - * You can also use pickle to do the job if you only work on python. - * The advantage of load/save is the file is language agnostic. - * This means the file saved using save can be loaded by other language binding of mxnet. - * You also get the benefit being able to directly load/save from cloud storage(S3, HDFS) - * - * @param fname The name of the file - * - s3://my-bucket/path/my-s3-symbol - * - hdfs://my-bucket/path/my-hdfs-symbol - * - /path-to/my-local-symbol - * @see Symbol.load : Used to load symbol from file. - */ - def save(fname: String): Unit = { - checkCall(_LIB.mxSymbolSaveToFile(this.handle, fname)) - } - - /** - * Compose symbol on inputs. - * This call mutates the current symbol. - * @param name resulting symbol name - * @param symbols provide positional arguments - * @return the resulting symbol - */ - private def compose(name: String, symbols: Array[Symbol]): Unit = { - val args = symbols.map(_.handle) - checkCall(_LIB.mxSymbolCompose(handle, name, null, args)) - } - - private def compose(name: String, symbols: Map[String, Symbol]): Unit = { - val keys = symbols.keys.toArray - val args = symbols.values.map(_.handle).toArray - checkCall(_LIB.mxSymbolCompose(handle, name, keys, args)) - } - - /** - * Bind current symbol to get an executor, allocate all the ndarrays needed. - * Allows specifying data types. - * This function will ask user to pass in ndarray of position - * they like to bind to, and it will automatically allocate the ndarray - * for arguments and auxiliary states that user did not specify explicitly. - * - * @param ctx The device context the generated executor to run on. - * @param gradReq {'write', 'add', 'null'}, or list of str or dict of str to str, optional - * Specifies how we should update the gradient to the args_grad. - * - 'write' means everytime gradient is write to specified args_grad NDArray. - * - 'add' means everytime gradient is add to the specified NDArray. - * - 'null' means no action is taken, the gradient may not be calculated. - * @param dataDesc List of dataDescriptors - * @return The generated Executor - */ - def simpleBind(ctx: Context, gradReq: String, - descs: IndexedSeq[DataDesc]) : Executor = { - val (shapes, types) = descs.map(desc => - ( desc.name -> desc.shape, desc.name -> desc.dtype )).unzip - simpleBind(ctx, gradReq, shapes.toMap, types.toMap) - } - - /** - * Bind current symbol to get an executor, allocate all the ndarrays needed. - * Allows specifying data types. - * This function will ask user to pass in ndarray of position - * they like to bind to, and it will automatically allocate the ndarray - * for arguments and auxiliary states that user did not specify explicitly. - * - * @param ctx The device context the generated executor to run on. - * @param gradReq {'write', 'add', 'null'}, or list of str or dict of str to str, optional - * Specifies how we should update the gradient to the args_grad. - * - 'write' means everytime gradient is write to specified args_grad NDArray. - * - 'add' means everytime gradient is add to the specified NDArray. - * - 'null' means no action is taken, the gradient may not be calculated. - * @param typeDict Input type dictionary, name->dtype - * @param shapeDict Input shape dictionary, name->shape - * @return The generated Executor - */ - def simpleBind(ctx: Context, gradReq: String = "write", - shapeDict: Map[String, Shape], - typeDict: Map[String, DType] = null) - : Executor = { - val types = - if (typeDict == null) { - listArguments().map((_, MX_REAL_TYPE)).toMap - } else { - typeDict - } - val (argShapes, _, auxShapes) = inferShape(shapeDict) - val (argTypes, _, auxTypes) = inferType(types) - require(argShapes != null, "Shape inference failed." + - s"Known shapes are $shapeDict for symbol arguments ${listArguments()} " + - s"and aux states ${listAuxiliaryStates()}") - require(argTypes != null, "Type inference failed." + - s"Known types as $typeDict for symbol arguments ${listArguments()} " + - s"and aux states ${listAuxiliaryStates()}") - - // alloc space - val argNDArrays = (argShapes zip argTypes) map { case (shape, t) => - NDArray.zeros(shape, ctx, dtype = t) - } - val gradNDArrays = - if (gradReq != "null") { - (((listArguments() zip argShapes) zip argTypes) flatMap { case ((name, shape), t) => - if (!(name.endsWith("data") || name.endsWith("label"))) { - Map(name -> NDArray.zeros(shape, ctx, dtype = t)) - } else { - Map.empty[String, NDArray] - } - }).toMap - } else { - null - } - val auxNDArrays = (auxShapes zip auxTypes) map { case (shape, t) => - NDArray.zeros(shape, ctx, dtype = t) - } - bind(ctx, argNDArrays, gradNDArrays, gradReq, auxNDArrays, null, null) - } - - /** - * Bind current symbol to get an executor. - * - * @param ctx Context The device context the generated executor to run on. - * @param args Input arguments to the symbol. - * - If type is list of NDArray, the position is in the same order of list_arguments. - * - If type is dict of str to NDArray, then it maps the name of arguments - * to the corresponding NDArray. - * - In either case, all the arguments must be provided. - * @param argsGrad When specified, args_grad provide NDArrays to hold - * the result of gradient value in backward. - * - If type is list of NDArray, - * the position is in the same order of list_arguments. - * - If type is dict of str to NDArray, then it maps the name of arguments - * to the corresponding NDArray. - * - When the type is dict of str to NDArray, users only need to provide the dict - * for needed argument gradient. - * Only the specified argument gradient will be calculated. - * @param gradReq {'write', 'add', 'null'}, or list of str or dict of str to str, optional - * Specifies how we should update the gradient to the args_grad. - * - 'write' means everytime gradient is write to specified args_grad NDArray. - * - 'add' means everytime gradient is add to the specified NDArray. - * - 'null' means no action is taken, the gradient may not be calculated. - * @param auxStates Input auxiliary states to the symbol, only need to specify when - * list_auxiliary_states is not empty. - * - If type is list of NDArray, - * the position is in the same order of listAuxiliaryStates - * - If type is dict of str to NDArray, then it maps the name of auxiliary_states - * to the corresponding NDArray, - * - In either case, all the auxiliary_states need to be provided. - * @param group2ctx The dict mapping the ``ctx_group`` attribute to the context assignment. - * @param sharedExec Executor to share memory with. - * - This is intended for runtime reshaping, variable length sequences, etc. - * - The returned executor shares state with shared_exec, - * and should not be used in parallel with it. - * @return The generated Executor - * @note - * Auxiliary states are special states of symbols that do not corresponds to an argument, - * and do not have gradient. But still be useful for the specific operations. - * A common example of auxiliary state is the moving_mean and moving_variance in BatchNorm. - * Most operators do not have auxiliary states and this parameter can be safely ignored. - * - * User can give up gradient by using a dict in args_grad and only specify - * gradient they interested in. - */ - def bind(ctx: Context, args: Seq[NDArray], argsGrad: Seq[NDArray], - gradReq: String, auxStates: Seq[NDArray], - group2ctx: Map[String, Context], sharedExec: Executor): Executor = { - val symbolArguments = listArguments() - bindHelper(ctx, symbolArguments, args, argsGrad, - Seq.fill(symbolArguments.size)(gradReq), auxStates, group2ctx, sharedExec) - } - - def bind(ctx: Context, args: Seq[NDArray], argsGrad: Seq[NDArray], - gradReq: String, auxStates: Map[String, NDArray], - group2ctx: Map[String, Context], sharedExec: Executor): Executor = { - val symbolArguments = listArguments() - bindHelper(ctx, symbolArguments, args, argsGrad, - Seq.fill(symbolArguments.size)(gradReq), auxStates, group2ctx, sharedExec) - } - - def bind(ctx: Context, args: Seq[NDArray], argsGrad: Map[String, NDArray], - gradReq: String, auxStates: Seq[NDArray], - group2ctx: Map[String, Context], sharedExec: Executor): Executor = { - val symbolArguments = listArguments() - bindHelper(ctx, symbolArguments, args, argsGrad, - Seq.fill(symbolArguments.size)(gradReq), auxStates, group2ctx, sharedExec) - } - - def bind(ctx: Context, args: Seq[NDArray], argsGrad: Map[String, NDArray], - gradReq: String, auxStates: Map[String, NDArray], - group2ctx: Map[String, Context], sharedExec: Executor): Executor = { - val symbolArguments = listArguments() - bindHelper(ctx, symbolArguments, args, argsGrad, - Seq.fill(symbolArguments.size)(gradReq), auxStates, group2ctx, sharedExec) - } - - def bind(ctx: Context, args: Map[String, NDArray], argsGrad: Seq[NDArray], - gradReq: String, auxStates: Seq[NDArray], - group2ctx: Map[String, Context], sharedExec: Executor): Executor = { - val symbolArguments = listArguments() - bindHelper(ctx, symbolArguments, args, argsGrad, - Seq.fill(symbolArguments.size)(gradReq), auxStates, group2ctx, sharedExec) - } - - def bind(ctx: Context, args: Map[String, NDArray], argsGrad: Seq[NDArray], - gradReq: String, auxStates: Map[String, NDArray], - group2ctx: Map[String, Context], sharedExec: Executor): Executor = { - val symbolArguments = listArguments() - bindHelper(ctx, symbolArguments, args, argsGrad, - Seq.fill(symbolArguments.size)(gradReq), auxStates, group2ctx, sharedExec) - } - - def bind(ctx: Context, args: Map[String, NDArray], argsGrad: Map[String, NDArray], - gradReq: String, auxStates: Seq[NDArray], - group2ctx: Map[String, Context], sharedExec: Executor): Executor = { - val symbolArguments = listArguments() - bindHelper(ctx, symbolArguments, args, argsGrad, - Seq.fill(symbolArguments.size)(gradReq), auxStates, group2ctx, sharedExec) - } - - def bind(ctx: Context, args: Map[String, NDArray], argsGrad: Map[String, NDArray], - gradReq: String, auxStates: Map[String, NDArray], - group2ctx: Map[String, Context], sharedExec: Executor): Executor = { - val symbolArguments = listArguments() - bindHelper(ctx, symbolArguments, args, argsGrad, - Seq.fill(symbolArguments.size)(gradReq), auxStates, group2ctx, sharedExec) - } - - def bind(ctx: Context, args: Seq[NDArray], argsGrad: Seq[NDArray], - gradsReq: Seq[String], auxStates: Seq[NDArray], - group2ctx: Map[String, Context], sharedExec: Executor): Executor = { - val symbolArguments = listArguments() - bindHelper(ctx, symbolArguments, args, argsGrad, gradsReq, auxStates, group2ctx, - sharedExec) - } - - def bind(ctx: Context, args: Seq[NDArray], argsGrad: Seq[NDArray], - gradsReq: Seq[String], auxStates: Map[String, NDArray], - group2ctx: Map[String, Context], sharedExec: Executor): Executor = { - val symbolArguments = listArguments() - bindHelper(ctx, symbolArguments, args, argsGrad, gradsReq, auxStates, group2ctx, - sharedExec) - } - - def bind(ctx: Context, args: Seq[NDArray], argsGrad: Map[String, NDArray], - gradsReq: Seq[String], auxStates: Seq[NDArray], - group2ctx: Map[String, Context], sharedExec: Executor): Executor = { - val symbolArguments = listArguments() - bindHelper(ctx, symbolArguments, args, argsGrad, gradsReq, auxStates, group2ctx, - sharedExec) - } - - def bind(ctx: Context, args: Seq[NDArray], argsGrad: Map[String, NDArray], - gradsReq: Seq[String], auxStates: Map[String, NDArray], - group2ctx: Map[String, Context], sharedExec: Executor): Executor = { - val symbolArguments = listArguments() - bindHelper(ctx, symbolArguments, args, argsGrad, gradsReq, auxStates, group2ctx, - sharedExec) - } - - def bind(ctx: Context, args: Map[String, NDArray], argsGrad: Seq[NDArray], - gradsReq: Seq[String], auxStates: Seq[NDArray], - group2ctx: Map[String, Context], sharedExec: Executor): Executor = { - val symbolArguments = listArguments() - bindHelper(ctx, symbolArguments, args, argsGrad, gradsReq, auxStates, group2ctx, - sharedExec) - } - - def bind(ctx: Context, args: Map[String, NDArray], argsGrad: Seq[NDArray], - gradsReq: Seq[String], auxStates: Map[String, NDArray], - group2ctx: Map[String, Context], sharedExec: Executor): Executor = { - val symbolArguments = listArguments() - bindHelper(ctx, symbolArguments, args, argsGrad, gradsReq, auxStates, group2ctx, - sharedExec) - } - - def bind(ctx: Context, args: Map[String, NDArray], argsGrad: Map[String, NDArray], - gradsReq: Seq[String], auxStates: Seq[NDArray], - group2ctx: Map[String, Context], sharedExec: Executor): Executor = { - val symbolArguments = listArguments() - bindHelper(ctx, symbolArguments, args, argsGrad, gradsReq, auxStates, group2ctx, - sharedExec) - } - - def bind(ctx: Context, args: Map[String, NDArray], argsGrad: Map[String, NDArray], - gradsReq: Seq[String], auxStates: Map[String, NDArray], - group2ctx: Map[String, Context], sharedExec: Executor): Executor = { - val symbolArguments = listArguments() - bindHelper(ctx, symbolArguments, args, argsGrad, gradsReq, auxStates, group2ctx, - sharedExec) - } - - def bind(ctx: Context, args: Seq[NDArray], argsGrad: Seq[NDArray], - gradsReq: Map[String, String], auxStates: Seq[NDArray], - group2ctx: Map[String, Context], sharedExec: Executor): Executor = { - val symbolArguments = listArguments() - bindHelper(ctx, symbolArguments, args, argsGrad, gradsReq, auxStates, group2ctx, - sharedExec) - } - - def bind(ctx: Context, args: Seq[NDArray], argsGrad: Seq[NDArray], - gradsReq: Map[String, String], auxStates: Map[String, NDArray], - group2ctx: Map[String, Context], sharedExec: Executor): Executor = { - val symbolArguments = listArguments() - bindHelper(ctx, symbolArguments, args, argsGrad, gradsReq, auxStates, group2ctx, - sharedExec) - } - - def bind(ctx: Context, args: Seq[NDArray], argsGrad: Map[String, NDArray], - gradsReq: Map[String, String], auxStates: Seq[NDArray], - group2ctx: Map[String, Context], sharedExec: Executor): Executor = { - val symbolArguments = listArguments() - bindHelper(ctx, symbolArguments, args, argsGrad, gradsReq, auxStates, group2ctx, - sharedExec) - } - - def bind(ctx: Context, args: Seq[NDArray], argsGrad: Map[String, NDArray], - gradsReq: Map[String, String], auxStates: Map[String, NDArray], - group2ctx: Map[String, Context], sharedExec: Executor): Executor = { - val symbolArguments = listArguments() - bindHelper(ctx, symbolArguments, args, argsGrad, gradsReq, auxStates, group2ctx, - sharedExec) - } - - def bind(ctx: Context, args: Map[String, NDArray], argsGrad: Seq[NDArray], - gradsReq: Map[String, String], auxStates: Seq[NDArray], - group2ctx: Map[String, Context], sharedExec: Executor): Executor = { - val symbolArguments = listArguments() - bindHelper(ctx, symbolArguments, args, argsGrad, gradsReq, auxStates, group2ctx, - sharedExec) - } - - def bind(ctx: Context, args: Map[String, NDArray], argsGrad: Seq[NDArray], - gradsReq: Map[String, String], auxStates: Map[String, NDArray], - group2ctx: Map[String, Context], sharedExec: Executor): Executor = { - val symbolArguments = listArguments() - bindHelper(ctx, symbolArguments, args, argsGrad, gradsReq, auxStates, group2ctx, - sharedExec) - } - - def bind(ctx: Context, args: Map[String, NDArray], argsGrad: Map[String, NDArray], - gradsReq: Map[String, String], auxStates: Seq[NDArray], - group2ctx: Map[String, Context], sharedExec: Executor): Executor = { - val symbolArguments = listArguments() - bindHelper(ctx, symbolArguments, args, argsGrad, gradsReq, auxStates, group2ctx, - sharedExec) - } - - def bind(ctx: Context, args: Map[String, NDArray], argsGrad: Map[String, NDArray], - gradsReq: Map[String, String], auxStates: Map[String, NDArray], - group2ctx: Map[String, Context], sharedExec: Executor): Executor = { - val symbolArguments = listArguments() - bindHelper(ctx, symbolArguments, args, argsGrad, gradsReq, auxStates, group2ctx, - sharedExec) - } - - def bind(ctx: Context, args: Seq[NDArray], argsGrad: Seq[NDArray]): Executor = { - bind(ctx, args, argsGrad, "write", Nil, null, null) - } - - def bind(ctx: Context, args: Map[String, NDArray], argsGrad: Map[String, NDArray]): Executor = { - bind(ctx, args, argsGrad, "write", Nil, null, null) - } - - def bind(ctx: Context, args: Map[String, NDArray], argsGrad: Seq[NDArray]): Executor = { - bind(ctx, args, argsGrad, "write", Nil, null, null) - } - - def bind(ctx: Context, args: Seq[NDArray], argsGrad: Map[String, NDArray]): Executor = { - bind(ctx, args, argsGrad, "write", Nil, null, null) - } - - def bind(ctx: Context, args: Seq[NDArray]): Executor = { - val symbolArguments = listArguments() - bindHelper(ctx, symbolArguments, args, null, - Seq.fill(symbolArguments.size)("write"), Nil, null, null) - } - - def bind(ctx: Context, args: Map[String, NDArray]): Executor = { - val symbolArguments = listArguments() - bindHelper(ctx, symbolArguments, args, null, - Seq.fill(symbolArguments.size)("write"), Nil, null, null) - } - - private def bindHelper(ctx: Context, symbolArguments: Seq[String], - args: Iterable[_], argsGrad: Iterable[_], - gradsReq: Iterable[_], auxStates: Iterable[_], - group2ctx: Map[String, Context], sharedExec: Executor): Executor = { - require(args != null && !args.isInstanceOf[Set[_]], - s"args must be provided (Set is not supported)") - require(argsGrad == null || !argsGrad.isInstanceOf[Set[_]], - s"argsGrad must be provided (Set is not supported)") - require(auxStates == null || !auxStates.isInstanceOf[Set[_]], - s"auxStates must be provided (Set is not supported)") - require(gradsReq != null && !gradsReq.isInstanceOf[Set[_]], - s"gradsReq must be provided (Set is not supported)") - - val (argsHandle, argsNDArray) = - if (args.isInstanceOf[Seq[_]]) { - Symbol.getNDArrayInputs("args", args.asInstanceOf[Seq[NDArray]], - symbolArguments, allowMissing = false) - } else { - Symbol.getNDArrayInputs("args", args.asInstanceOf[Map[String, NDArray]], - symbolArguments, allowMissing = false) - } - - // setup args gradient - val (argsGradHandle, argsGradNDArray) = - if (argsGrad == null) { - (Array.fill[NDArrayHandle](args.size)(0L), null) - } else if (argsGrad.isInstanceOf[Seq[_]]) { - Symbol.getNDArrayInputs("args_grad", argsGrad.asInstanceOf[Seq[NDArray]], - symbolArguments, allowMissing = true) - } else { - Symbol.getNDArrayInputs("args_grad", argsGrad.asInstanceOf[Map[String, NDArray]], - symbolArguments, allowMissing = true) - } - - val (auxArgsHandle, auxStatesNDArray) = - if (auxStates == null) { - Symbol.getNDArrayInputs("aux_states", Nil, listAuxiliaryStates(), allowMissing = false) - } else if (auxStates.isInstanceOf[Seq[_]]) { - Symbol.getNDArrayInputs("aux_states", auxStates.asInstanceOf[Seq[NDArray]], - listAuxiliaryStates(), allowMissing = false) - } else { - Symbol.getNDArrayInputs("aux_states", auxStates.asInstanceOf[Map[String, NDArray]], - listAuxiliaryStates(), allowMissing = false) - } - - // setup requirements - val reqsArray = - if (gradsReq.isInstanceOf[Seq[_]]) { - gradsReq.asInstanceOf[Seq[String]].map { req => - require(Symbol.bindReqMap.contains(req), - s"grad_req $req must be in ${Symbol.bindReqMap}") - Symbol.bindReqMap(req) - }.toArray - } else { - val gradsReqMap = gradsReq.asInstanceOf[Map[String, String]] - symbolArguments.map { req => - val value = gradsReqMap.getOrElse(req, "null") - require(Symbol.bindReqMap.contains(value), - s"grad_req $req must be in ${Symbol.bindReqMap}") - Symbol.bindReqMap(value) - }.toArray - } - - val ctxMapKeys = ArrayBuffer.empty[String] - val ctxMapDevTypes = ArrayBuffer.empty[Int] - val ctxMapDevIDs = ArrayBuffer.empty[Int] - - if (group2ctx != null) { - group2ctx.foreach { case (key, value) => - ctxMapKeys += key - ctxMapDevTypes += value.deviceTypeid - ctxMapDevIDs += value.deviceId - } - } - - val execHandle = new ExecutorHandleRef - val sharedHandle = if (sharedExec != null) sharedExec.handle else 0L - checkCall(_LIB.mxExecutorBindEX(handle, - ctx.deviceTypeid, - ctx.deviceId, - ctxMapKeys.size, - ctxMapKeys.toArray, - ctxMapDevTypes.toArray, - ctxMapDevIDs.toArray, - args.size, - argsHandle, - argsGradHandle, - reqsArray, - auxArgsHandle, - sharedHandle, - execHandle)) - - val executorGroup2ctx = - if (group2ctx == null) null - else group2ctx.map { case (key, value) => - key -> new Context(value.deviceType, value.deviceId) - } - - // If this is in a scope then we want to create the clone in the same scope - var newSymbol: Symbol = null - ResourceScope.usingIfScopeExists(this.scope) { - newSymbol = this.clone() - } - - new Executor(execHandle.value, newSymbol, argsNDArray, argsGradNDArray, - auxStatesNDArray, new Context(ctx.deviceType, ctx.deviceId), - gradsReq, executorGroup2ctx) - - } - - /** - * Save symbol into a JSON string. - * See Also - * symbol.loadJson : Used to load symbol from JSON string. - */ - def toJson: String = { - val jsonStr = new RefString - checkCall(_LIB.mxSymbolSaveToJSON(handle, jsonStr)) - jsonStr.value - } - -} - -/** - * Symbol Object extends from SymbolBase for abstract function signatures - * Main code will be generated during compile time through Macros - */ -@AddSymbolFunctions(false) -object Symbol extends SymbolBase { - private type SymbolCreateNamedFunc = Map[String, Any] => Symbol - private val logger = LoggerFactory.getLogger(classOf[Symbol]) - private val functions: Map[String, SymbolFunction] = initSymbolModule() - private val bindReqMap = Map("null" -> 0, "write" -> 1, "add" -> 3) - - val api = SymbolAPI - val random = SymbolRandomAPI - - def pow(sym1: Symbol, sym2: Symbol): Symbol = { - Symbol.createFromListedSymbols("_Power")(Array(sym1, sym2)) - } - - def pow[@specialized(Int, Float, Double) V](sym: Symbol, number: V): Symbol = { - Symbol.createFromListedSymbols("_PowerScalar")(Array(sym), Map("scalar" -> number.toString)) - } - - def pow[@specialized(Int, Float, Double) V](number: V, sym: Symbol): Symbol = { - Symbol.createFromListedSymbols("_RPowerScalar")(Array(sym), Map("scalar" -> number.toString)) - } - - def max(left: Symbol, right: Symbol): Symbol = { - createFromListedSymbols("_Maximum")(Array(left, right)) - } - - def max[@specialized(Int, Float, Double) V](left: Symbol, right: V): Symbol = { - createFromListedSymbols("_MaximumScalar")(Array(left), Map("scalar" -> right.toString)) - } - - def max[@specialized(Int, Float, Double) V](left: V, right: Symbol): Symbol = { - createFromListedSymbols("_MaximumScalar")(Array(right), Map("scalar" -> left.toString)) - } - - def min(left: Symbol, right: Symbol): Symbol = { - createFromListedSymbols("_Minimum")(Array(left, right)) - } - - def min[@specialized(Int, Float, Double) V](left: Symbol, right: V): Symbol = { - createFromListedSymbols("_MinimumScalar")(Array(left), Map("scalar" -> right.toString)) - } - - def min[@specialized(Int, Float, Double) V](left: V, right: Symbol): Symbol = { - createFromListedSymbols("_MinimumScalar")(Array(right), Map("scalar" -> left.toString)) - } - - def equal(left: Symbol, right: Symbol): Symbol = { - createFromListedSymbols("_equal")(Array(left, right)) - } - - def equal[@specialized(Int, Float, Double) V](left: Symbol, right: V): Symbol = { - createFromListedSymbols("_equal_scalar")(Array(left), Map("scalar" -> right.toString)) - } - - def equal[@specialized(Int, Float, Double) V](left: V, right: Symbol): Symbol = { - createFromListedSymbols("_equal_scalar")(Array(right), Map("scalar" -> left.toString)) - } - - def notEqual(left: Symbol, right: Symbol): Symbol = { - createFromListedSymbols("_not_equal")(Array(left, right)) - } - - def notEqual[@specialized(Int, Float, Double) V](left: Symbol, right: V): Symbol = { - createFromListedSymbols("_not_equal_scalar")(Array(left), Map("scalar" -> right.toString)) - } - - def notEqual[@specialized(Int, Float, Double) V](left: V, right: Symbol): Symbol = { - createFromListedSymbols("_not_equal_scalar")(Array(right), Map("scalar" -> left.toString)) - } - - def greater(left: Symbol, right: Symbol): Symbol = { - createFromListedSymbols("_greater")(Array(left, right)) - } - - def greater[@specialized(Int, Float, Double) V](left: Symbol, right: V): Symbol = { - createFromListedSymbols("_greater_scalar")(Array(left), Map("scalar" -> right.toString)) - } - - def greaterEqual(left: Symbol, right: Symbol): Symbol = { - createFromListedSymbols("_greater_equal")(Array(left, right)) - } - - def greaterEqual[@specialized(Int, Float, Double) V](left: Symbol, right: V): Symbol = { - createFromListedSymbols("_greater_equal_scalar")(Array(left), Map("scalar" -> right.toString)) - } - - def lesser(left: Symbol, right: Symbol): Symbol = { - createFromListedSymbols("_lesser")(Array(left, right)) - } - - def lesser[@specialized(Int, Float, Double) V](left: Symbol, right: V): Symbol = { - createFromListedSymbols("_lesser_scalar")(Array(left), Map("scalar" -> right.toString)) - } - - def lesserEqual(left: Symbol, right: Symbol): Symbol = { - createFromListedSymbols("_lesser_equal")(Array(left, right)) - } - - def lesserEqual[@specialized(Int, Float, Double) V](left: Symbol, right: V): Symbol = { - createFromListedSymbols("_lesser_equal_scalar")(Array(left), Map("scalar" -> right.toString)) - } - - /** - * Returns a new symbol of given shape and type, filled with zeros. - */ - def zeros(shape: Shape, dType: DType = Base.MX_REAL_TYPE, ctx: Context = null): Symbol = { - val params = Map("shape" -> shape.toString, "dtype" -> dType.toString()) - val fParams = if (ctx == null) params else params ++ Map("ctx" -> ctx.toString) - createSymbolGeneral("_zeros", null, null, Array.empty[Symbol], fParams) - } - - /** - * Returns a new symbol of given shape and type, filled with ones. - */ - def ones(shape: Shape, dType: DType = Base.MX_REAL_TYPE, ctx: Context = null): Symbol = { - val params = Map("shape" -> shape.toString, "dtype" -> dType.toString()) - val fParams = if (ctx == null) params else params ++ Map("ctx" -> ctx.toString) - createSymbolGeneral("_ones", null, null, Array.empty[Symbol], fParams) - } - - /** - * Returns evenly spaced values within a given interval. - * @param start Start of interval. The default start value is 0. - * @param stop End of interval. - * @param step Spacing between values. The default step size is 1. - * @param repeat Number of times to repeat each element. The default repeat count is 1. - * @param dType The data type of the `NDArray`. The default datatype is `DType.Float32`. - * @return Symbol The created Symbol. - */ - def arange(start: Float, stop: Option[Float] = None, step: Float = 1.0f, - repeat: Int = 1, name: String = null, dType: DType = Base.MX_REAL_TYPE): Symbol = { - arange(start, stop, step, repeat, infer_range = false, name, dType) - } - - /** - * Returns evenly spaced values within a given interval. - * stop value can be infered from the output shape, - * which must be known from the rest of the net. - * @param start Start of interval. The default start value is 0. - * @param stop End of interval. - * @param step Spacing between values. The default step size is 1. - * @param repeat Number of times to repeat each element. The default repeat count is 1. - * @param infer_range - * When set to True, infer the stop position from the start, step, - * repeat, and output tensor size. - * @param ctx Device context. Default context is the current default context. - * @param dType The data type of the `NDArray`. The default datatype is `DType.Float32`. - * @return NDArray of evenly spaced values in the specified range. - */ - def arange(start: Float, stop: Option[Float], step: Float, - repeat: Int, infer_range: Boolean, name: String, - dType: DType): Symbol = { - val params = Map("start" -> start, "step" -> step, "repeat" -> repeat, - "infer_range" -> infer_range, "dtype" -> dType.toString()) - val fParams = if (stop == None) params else params ++ Map("stop" -> stop.get) - createSymbolGeneral("_arange", name, null, Array.empty[Symbol], fParams) - } - - // TODO(depeng) support setting initialization pattern - /** - * Create a symbolic variable with specified name. - * @param name Name of the variable. - * @param attr Additional attributes to set on the variable. - * @param shape - * The shape of a variable. If specified, this will be used during the shape inference. - * If one has specified a different shape for this variable using a keyword argument - * when calling shape inference, this shape information will be ignored. - * @param lrMult The learning rate multiplier for input variable. - * @param wdMult Weight decay multiplier for input variable. - * @param dType The dtype for input variable. If not specified, this value will be inferred. - * @param init Initializer for this variable to (optionally) override the default initializer. - * @param kwargs Additional attributes which must start and end with double underscores. - * @return A symbol corresponding to an input to the computation graph. - */ - def Variable(name: String, attr: Map[String, String] = null, shape: Shape = null, - lrMult: Option[Float] = None, wdMult: Option[Float] = None, dType: DType = null, - kwargs: Map[String, String] = Map.empty[String, String]): Symbol = { - val handle = new SymbolHandleRef - checkCall(_LIB.mxSymbolCreateVariable(name, handle)) - val sym = new Symbol(handle.value) - val tmpAttr = scala.collection.mutable.Map[String, String]() - if (shape != null) tmpAttr += "__shape__" -> shape.toString - if (lrMult != None) tmpAttr += "__lr_mult__" -> lrMult.get.toString - if (wdMult != None) tmpAttr += "__wd_mult__" -> wdMult.get.toString - if (dType != null) tmpAttr += "__dtype__" -> dType.id.toString - for ((k, v) <- kwargs) { - require(k.startsWith("__") && k.endsWith("__"), - s"Attribute name=$k is not supported. " + - "Additional attributes must start and end with double underscores, e.g, __yourattr__") - tmpAttr += k -> v - } - if (attr != null) { - attr.foreach { case (k, v) => tmpAttr += k -> v } - } - sym.setAttr(AttrScope.current.get(Option(tmpAttr.toMap))) - sym - } - - /** - * Create a symbol that groups symbols together. - * @param symbols List of symbols to be grouped. - * @return The created group symbol. - */ - def Group(symbols: Symbol*): Symbol = { - val ihandles = symbols.map(_.handle).toArray - val handle = new SymbolHandleRef - checkCall(_LIB.mxSymbolCreateGroup(ihandles, handle)) - new Symbol(handle.value) - } - - // List and add all the atomic symbol functions to current module. - private def initSymbolModule(): Map[String, SymbolFunction] = { - val opNames = ListBuffer.empty[String] - checkCall(_LIB.mxListAllOpNames(opNames)) - opNames.map(opName => { - val opHandle = new RefLong - checkCall(_LIB.nnGetOpHandle(opName, opHandle)) - makeAtomicSymbolFunction(opHandle.value, opName) - }).toMap - } - - // Create an atomic symbol function by handle and function name. - private def makeAtomicSymbolFunction(handle: SymbolHandle, aliasName: String) - : (String, SymbolFunction) = { - val name = new RefString - val desc = new RefString - val keyVarNumArgs = new RefString - val numArgs = new RefInt - val argNames = ListBuffer.empty[String] - val argTypes = ListBuffer.empty[String] - val argDescs = ListBuffer.empty[String] - - checkCall(_LIB.mxSymbolGetAtomicSymbolInfo( - handle, name, desc, numArgs, argNames, argTypes, argDescs, keyVarNumArgs)) - (aliasName, new SymbolFunction(handle, keyVarNumArgs.value)) - } - - // Used by SymbolMacro - private[mxnet] def createSymbolGeneral(operator: String, name: String, attr: Map[String, String], - symbols: Seq[Symbol], kwargs: Map[String, Any]): Symbol = { - val symbolKwargs: Map[String, Symbol] = - if (kwargs == null || kwargs.isEmpty) { - Map.empty[String, Symbol] - } else { - kwargs.filter { case (key, value) => - value.isInstanceOf[Symbol] - }.map { case (key, value) => - (key, value.asInstanceOf[Symbol]) - } - } - val strKwargs: Map[String, String] = - if (kwargs == null || kwargs.isEmpty) { - Map.empty[String, String] - } else { - kwargs.filter { case (key, value) => - !value.isInstanceOf[Symbol] - }.map { case (key, value) => - (key, value.toString) - } - } - require(symbols.isEmpty || symbolKwargs.isEmpty, - s"$operator can only accept input Symbols either as positional or keyword arguments, " + - s"not both") - if (symbols.isEmpty) { - createFromNamedSymbols(operator, name, attr)(symbolKwargs, strKwargs) - } else { - createFromListedSymbols(operator, name, attr)(symbols.toArray, strKwargs) - } - } - - /** - * Activation Operator of Neural Net. - * The parameters listed below can be passed in as keyword arguments. - * @param symbols Symbol parameters passed to create the resulting symbol - * @param paramKwargs Key-value parameters passed to create the resulting symbol - * @param attr Attributes set to the resulting symbol - * @return the resulting symbol - */ - def createFromListedSymbols( - operator: String, name: String = null, attr: Map[String, String] = null)( - symbols: Array[Symbol], paramKwargs: Map[String, String] = null): Symbol = { - val function = functions(operator) - require(function != null, s"invalid operator name $operator") - - val params = if (paramKwargs == null) Map.empty[String, String] else paramKwargs - val addkeyVarNumArgs = (function.keyVarNumArgs != null - && !function.keyVarNumArgs.isEmpty - && !params.contains(function.keyVarNumArgs)) - - val paramKeys: Array[String] = ( - if (addkeyVarNumArgs) Array[String](function.keyVarNumArgs) - else Array.empty[String] - ) ++ params.keys - val paramVals: Array[String] = ( - if (addkeyVarNumArgs) Array[String](symbols.length.toString) - else Array.empty[String] - ) ++ params.values - - // create atomic symbol - val symHandle = new SymbolHandleRef - checkCall(_LIB.mxSymbolCreateAtomicSymbol( - function.handle, paramKeys, paramVals, symHandle)) - - val s = new Symbol(symHandle.value) - val attrAll = AttrScope.current.get(Option(attr)) - s.setAttr(attrAll) - val hint = operator.toLowerCase - val managedName = NameManager.current.get(Option(name), hint) - s.compose(managedName, symbols) - s - } - - /** - * Activation Operator of Neural Net. - * The parameters listed below can be passed in as keyword arguments. - * @param symbols Named symbol parameters passed to create the resulting symbol - * @param paramKwargs Key-value parameters passed to create the resulting symbol - * @param attr Attributes set to the resulting symbol - * @return the resulting symbol - */ - def createFromNamedSymbols( - operator: String, name: String = null, attr: Map[String, String] = null)( - symbols: Map[String, Symbol], paramKwargs: Map[String, String] = null): Symbol = { - val function = functions(operator) - require(function != null, s"invalid operator name $operator") - require(function.keyVarNumArgs == null || function.keyVarNumArgs.isEmpty, - s"[$operator] support variable length of Symbol arguments.\n" + - "Please pass all the input Symbols via positional arguments instead of keyword arguments.") - - val paramKeys = - if (paramKwargs == null) Array.empty[String] - else paramKwargs.keys.toArray - val paramVals = - if (paramKwargs == null) Array.empty[String] - else paramKwargs.values.toArray - val symHandle = new SymbolHandleRef - checkCall(_LIB.mxSymbolCreateAtomicSymbol( - function.handle, paramKeys, paramVals, symHandle)) - - val s = new Symbol(symHandle.value) - val attrAll = AttrScope.current.get(Option(attr)) - s.setAttr(attrAll) - val hint = operator.toLowerCase - val managedName = NameManager.current.get(Option(name), hint) - s.compose(managedName, symbols) - s - } - - // a more friendly interface for creating symbols - // all values except symbols in kwargs will be cast to String using its toString() method - @deprecated("Use Checked version", "0.1.2") - def createFromNamedSymbolsNoCheck( - operator: String, name: String = null, attr: Map[String, String] = null)( - kwargs: Map[String, Any]): Symbol = { - val symbolArgs = kwargs.filter { case (key, value) => - value.isInstanceOf[Symbol] - }.map { case (key, value) => - (key, value.asInstanceOf[Symbol]) - } - val strArgs = kwargs.filter { case (key, value) => - !value.isInstanceOf[Symbol] - }.map { case (key, value) => - (key, value.toString) - } - createFromNamedSymbols(operator, name, attr)(symbolArgs, strArgs) - } - - // a more friendly interface for creating symbols - // all values except symbols in kwargs will be cast to String using its toString() method - @deprecated("Use Checked version", "0.1.2") - def createFromListedSymbolsNoCheck( - operator: String, name: String = null, attr: Map[String, String] = null)( - symbols: Array[Symbol], kwargs: Map[String, Any] = null): Symbol = { - val args = - if (kwargs == null) null - else kwargs.map { case (key, value) => (key, value.toString) } - createFromListedSymbols(operator, name, attr)(symbols, args) - } - - /** - * Helper function to get ndarray lists handles from various inputs. - * @param argKey The name of argument, used for error message. - * @param args list of NDArray or dict of str to NDArray - * Input arguments to the symbols. - * If type is list of NDArray, the position is in the same order of arg_names. - * If type is dict of str to NDArray, then it maps the name of arguments - * to the corresponding NDArray - * @param argNames List of argument names. - * @param allowMissing Whether missing argument is allowed. - * When allowed, the missing handle will be set to None(null) - * @return The positional list of NDArrayHandles generated from input. - */ - private def getNDArrayInputs(argKey: String, args: Seq[NDArray], argNames: Seq[String], - allowMissing: Boolean): (Array[NDArrayHandle], Array[NDArray]) = { - require(args.length == argNames.length, - s"Length of $argKey do not match number of arguments") - val argHandles = args.map(_.handle) - (argHandles.toArray, args.toArray) - } - - private def getNDArrayInputs(argKey: String, args: Map[String, NDArray], argNames: Seq[String], - allowMissing: Boolean): (Array[NDArrayHandle], Array[NDArray]) = { - val argArrays = ArrayBuffer.empty[NDArray] - val argHandles = ArrayBuffer.empty[NDArrayHandle] - argNames.foreach { name => - args.get(name) match { - case narr: Some[NDArray] => - argArrays += narr.get - argHandles += narr.get.handle - case None => - require(allowMissing, - s"Must specify all the arguments in $argKey. $name is unknown") - argArrays += null - argHandles += 0L - } - } - (argHandles.toArray, argArrays.toArray) - } - - /** - * Load symbol from a JSON file. - * - * You can also use pickle to do the job if you only work on python. - * The advantage of load/save is the file is language agnostic. - * This means the file saved using save can be loaded by other language binding of mxnet. - * You also get the benefit being able to directly load/save from cloud storage(S3, HDFS) - * - * @param fname The name of the file, examples: - * - `s3://my-bucket/path/my-s3-symbol` - * - `hdfs://my-bucket/path/my-hdfs-symbol` - * - `/path-to/my-local-symbol` - * @return The loaded symbol. - * @see Symbol.save : Used to save symbol into file. - */ - def load(fname: String): Symbol = { - val handle = new SymbolHandleRef - checkCall(_LIB.mxSymbolCreateFromFile(fname, handle)) - new Symbol(handle.value) - } - - /** - * Load symbol from json string. - * @param json A json string. - * @return The loaded symbol. - * @see Symbol.tojson : Used to save symbol into json string. - */ - def loadJson(json: String): Symbol = { - val handle = new SymbolHandleRef - checkCall(_LIB.mxSymbolCreateFromJSON(json, handle)) - new Symbol(handle.value) - } -} - -private case class SymbolFunction(handle: SymbolHandle, keyVarNumArgs: String) - -object SymbolConversions { - implicit def int2Scalar(x: Int): SymbolConversions[Int] = new SymbolConversions(x) - implicit def double2Scalar(x: Double): SymbolConversions[Double] = new SymbolConversions(x) - implicit def float2Scalar(x: Float): SymbolConversions[Float] = new SymbolConversions(x) -} - -class SymbolConversions[@specialized(Int, Float, Double) V](val value: V) { - def +(other: Symbol): Symbol = { - other + value - } - - def -(other: Symbol): Symbol = { - Symbol.createFromListedSymbols("_RMinusScalar")( - Array(other), Map("scalar" -> value.toString)) - } - - def *(other: Symbol): Symbol = { - other * value - } - - def /(other: Symbol): Symbol = { - Symbol.createFromListedSymbols("_RDivScalar")( - Array(other), Map("scalar" -> value.toString)) - } - - def **(other: Symbol): Symbol = { - Symbol.pow(value, other) - } - - def >(other: Symbol): Symbol = { - other < value - } - - def >=(other: Symbol): Symbol = { - other <= value - } - - def <(other: Symbol): Symbol = { - other > value - } - - def <=(other: Symbol): Symbol = { - other >= value - } - - def %(other: Symbol): Symbol = { - Symbol.createFromListedSymbols("_RModScalar")( - Array(other), Map("scalar" -> value.toString)) - } -} - -trait SymbolGenerator { - def generate(key: AnyRef): Symbol -} diff --git a/scala-package/core/src/main/scala/org/apache/mxnet/SymbolAPI.scala b/scala-package/core/src/main/scala/org/apache/mxnet/SymbolAPI.scala deleted file mode 100644 index f166de11ea52..000000000000 --- a/scala-package/core/src/main/scala/org/apache/mxnet/SymbolAPI.scala +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.mxnet - -import scala.collection.mutable - - -/** - * typesafe Symbol API: Symbol.api._ - * Main code will be generated during compile time through Macros - */ -@AddSymbolAPIs(false) -object SymbolAPI extends SymbolAPIBase { - def Custom (op_type : String, kwargs : mutable.Map[String, Any], - name : String = null, attr : Map[String, String] = null) : Symbol = { - val map = kwargs - map.put("op_type", op_type) - Symbol.createSymbolGeneral("Custom", name, attr, Seq(), map.toMap) - } -} - -/** - * typesafe Symbol random module: Symbol.random._ - * Main code will be generated during compile time through Macros - */ -@AddSymbolRandomAPIs(false) -object SymbolRandomAPI extends SymbolRandomAPIBase { - -} - diff --git a/scala-package/core/src/main/scala/org/apache/mxnet/Visualization.scala b/scala-package/core/src/main/scala/org/apache/mxnet/Visualization.scala deleted file mode 100644 index b990137b5a45..000000000000 --- a/scala-package/core/src/main/scala/org/apache/mxnet/Visualization.scala +++ /dev/null @@ -1,324 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet - -import scala.util.parsing.json._ -import java.io.File -import java.io.PrintWriter -import scala.collection.mutable.ArrayBuffer -import scala.language.postfixOps - -object Visualization { - - /** - * A simplify implementation of the python-Graphviz library functionality - * based on: https://github.com/xflr6/graphviz/tree/master/graphviz - */ - class Dot(name: String) { - // http://www.graphviz.org/cgi-bin/man?dot - private val ENGINES = Set( - "dot", "neato", "twopi", "circo", "fdp", "sfdp", "patchwork", "osage" - ) - - // http://www.graphviz.org/doc/info/output.html - private val FORMATS = Set( - "bmp", - "canon", "dot", "gv", "xdot", "xdot1.2", "xdot1.4", - "cgimage", - "cmap", - "eps", - "exr", - "fig", - "gd", "gd2", - "gif", - "gtk", - "ico", - "imap", "cmapx", - "imap_np", "cmapx_np", - "ismap", - "jp2", - "jpg", "jpeg", "jpe", - "pct", "pict", - "pdf", - "pic", - "plain", "plain-ext", - "png", - "pov", - "ps", - "ps2", - "psd", - "sgi", - "svg", "svgz", - "tga", - "tif", "tiff", - "tk", - "vml", "vmlz", - "vrml", - "wbmp", - "webp", - "xlib", - "x11" - ) - - private val _head = "digraph %s{".format(name) - private val _node = "\t%s %s" - private val _edge = "\t\t%s -> %s %s" - private val _tail = "}" - private val _body = ArrayBuffer[String]() - - private def attribute(label: String = null, attrs: Map[String, String]): String = { - if (label != null) { - s"[label=$label ${("" /: attrs){ (acc, elem) => s"$acc ${elem._1}=${elem._2}"}}]" - } - else { - s"[${("" /: attrs){ (acc, elem) => s"$acc ${elem._1}=${elem._2}"}}]" - } - } - - /** - * Create a node. - * @param name Unique identifier for the node inside the source. - * @param label Caption to be displayed (defaults to the node name). - * @param attrs Any additional node attributes (must be strings). - */ - def node(name: String, label: String = null, attrs: Map[String, String]): Unit = { - _body += _node.format(name, attribute(label, attrs)) - } - - /** - * Create an edge between two nodes. - * @param tailName Start node identifier. - * @param headName End node identifier. - * @param label Caption to be displayed near the edge. - * @param attrs Any additional edge attributes (must be strings). - */ - def edge(tailName: String, headName: String, - label: String = null, attrs: Map[String, String]): Unit = { - _body += _edge.format(tailName, headName, attribute(label, attrs)) - } - - private def save(filename: String, directory: String): String = { - val path = s"$directory${File.separator}$filename" - val writer = new PrintWriter(path) - try { - // scalastyle:off println - writer.println(s"${this._head}") - this._body.toArray.foreach { line => writer.println(s"$line") } - writer.println(s"${this._tail}") - writer.flush() - // scalastyle:off println - } finally { - writer.close() - } - path - } - - private def command(engine: String, format: String, filepath: String): String = { - require(ENGINES.contains(engine) == true, s"unknown engine: $engine") - require(FORMATS.contains(format) == true, s"unknown format: $format") - s"$engine -T${format} -O $filepath" - } - - /** - * Render file with Graphviz engine into format. - * @param engine The layout commmand used for rendering ('dot', 'neato', ...). - * @param format The output format used for rendering ('pdf', 'png', ...). - * @param fileName Name of the DOT source file to render. - * @param path Path to save the Dot source file. - */ - def render(engine: String = "dot", format: String = "pdf", - fileName: String, path: String): Unit = { - val filePath = this.save(fileName, path) - val args = command(engine, format, filePath) - import sys.process._ - try { - args ! - } catch { case _ : Throwable => - val errorMsg = s"""failed to execute "$args", """ + - """"make sure the Graphviz executables are on your systems' path""" - throw new RuntimeException(errorMsg) - } - } - } - - /** - * convert shape string to list, internal use only - * @param str shape string - * @return list of string to represent shape - */ - def str2Tuple(str: String): List[String] = { - val re = """\d+""".r - re.findAllIn(str).toList - } - - /** - * convert symbol to Dot object for visualization - * @param symbol symbol to be visualized - * @param title title of the dot graph - * @param shape Map of shapes, str -> shape, given input shapes - * @param nodeAttrs Map of node's attributes - * for example: - * nodeAttrs = Map("shape" -> "oval", "fixedsize" -> "false") - * means to plot the network in "oval" - * @param hideWeights - * if true (default) then inputs with names like `*_weight` - * or `*_bias` will be hidden - * @return Dot object of symbol - */ - def plotNetwork(symbol: Symbol, - title: String = "plot", shape: Map[String, Shape] = null, - nodeAttrs: Map[String, String] = Map[String, String](), - hideWeights: Boolean = true): Dot = { - - val (drawShape, shapeDict) = { - if (shape == null) (false, null) - else { - val internals = symbol.getInternals() - val (_, outShapes, _) = internals.inferShape(shape) - require(outShapes != null, "Input shape is incomplete") - val shapeDict = internals.listOutputs().zip(outShapes).toMap - (true, shapeDict) - } - } - val conf = JSON.parseFull(symbol.toJson) match { - case None => null - case Some(map) => map.asInstanceOf[Map[String, Any]] - } - require(conf != null, "Invalid json") - - require(conf.contains("nodes"), "Invalid json") - val nodes = conf("nodes").asInstanceOf[List[Any]] - - // default attributes of node - val nodeAttr = scala.collection.mutable.Map("shape" -> "box", "fixedsize" -> "true", - "width" -> "1.3", "height" -> "0.8034", "style" -> "filled") - // merge the dict provided by user and the default one - nodeAttrs.foreach { case (k, v) => nodeAttr(k) = v } - val dot = new Dot(name = title) - // color map - val cm = List(""""#8dd3c7"""", """"#fb8072"""", """"#ffffb3"""", - """"#bebada"""", """"#80b1d3"""", """"#fdb462"""", - """"#b3de69"""", """"#fccde5"""") - - // Internal helper to figure out if node should be hidden with hide_weights - def looksLikeWeight(name: String): Boolean = { - if (name.endsWith("_weight") || name.endsWith("_bias") - || name.endsWith("_beta") || name.endsWith("_gamma") - || name.endsWith("_moving_var") || name.endsWith("_moving_mean")) { true } else { false } - } - - // make nodes - val hiddenNodes = scala.collection.mutable.Set[String]() - nodes.foreach { node => - val params = node.asInstanceOf[Map[String, Any]] - val op = params("op").asInstanceOf[String] - val name = params("name").asInstanceOf[String] - val attrs = { - if (params.contains("attrs")) params("attrs").asInstanceOf[Map[String, String]] - else Map[String, String]() - } - // input data - val attr = nodeAttr.clone() - var label = name - var continue = false - op match { - case "null" => { - if (looksLikeWeight(name)) { - if (hideWeights) hiddenNodes.add(name) - continue = true - } - attr("shape") = "oval" // inputs get their own shape - label = name - attr("fillcolor") = cm(0) - } - case "Convolution" => { - val kernel = str2Tuple(attrs("kernel")) - val stride = if (attrs.contains("stride")) str2Tuple(attrs("stride")) else List(1) - label = - """"Convolution\n%s/%s, %s"""".format( - kernel.mkString("x"), stride.mkString("x"), attrs("num_filter")) - attr("fillcolor") = cm(1) - } - case "FullyConnected" => { - label = s""""FullyConnected\n${attrs("num_hidden")}"""" - attr("fillcolor") = cm(1) - } - case "BatchNorm" => attr("fillcolor") = cm(3) - case "Activation" | "LeakyReLU" => { - label = s""""${op}\n${attrs("act_type")}"""" - attr("fillcolor") = cm(2) - } - case "Pooling" => { - val kernel = str2Tuple(attrs("kernel")) - val stride = if (attrs.contains("stride")) str2Tuple(attrs("stride")) else List(1) - label = - s""""Pooling\n%s, %s/%s"""".format( - attrs("pool_type"), kernel.mkString("x"), stride.mkString("x")) - attr("fillcolor") = cm(4) - } - case "Concat" | "Flatten" | "Reshape" => attr("fillcolor") = cm(5) - case "Softmax" => attr("fillcolor") = cm(6) - case _ => { - attr("fillcolor") = cm(7) - if (op == "Custom") label = attrs("op_type") - } - } - if (!continue) dot.node(name = name , label, attr.toMap) - } - - val outIdx = scala.collection.mutable.Map[String, Int]() - // add edges - nodes.foreach { node => - val params = node.asInstanceOf[Map[String, Any]] - val op = params("op").asInstanceOf[String] - val name = params("name").asInstanceOf[String] - if (op != "null") { - val inputs = params("inputs").asInstanceOf[List[List[Double]]] - for (item <- inputs) { - val inputNode = nodes(item(0).toInt).asInstanceOf[Map[String, Any]] - val inputName = inputNode("name").asInstanceOf[String] - if (!hiddenNodes.contains(inputName)) { - val attrs = scala.collection.mutable.Map("dir" -> "back", "arrowtail" -> "open") - // add shapes - if (drawShape) { - val key = { - if (inputNode("op").asInstanceOf[String] != "null") { - var key = s"${inputName}_output" - if (inputNode.contains("attr")) { - val params = inputNode("attr").asInstanceOf[Map[String, String]] - if (params.contains("num_outputs")) { - if (!outIdx.contains(name)) outIdx(name) = params("num_outputs").toInt - 1 - key += outIdx(name) - outIdx(name) = outIdx(name) - 1 - } - } - key - } else inputName - } - val shape = shapeDict(key).toArray.drop(1) - val label = s""""${shape.mkString("x")}"""" - attrs("label") = label - } - dot.edge(tailName = name, headName = inputName, attrs = attrs.toMap) - } - } - } - } - dot - } -} diff --git a/scala-package/core/src/main/scala/org/apache/mxnet/annotation/Experimental.scala b/scala-package/core/src/main/scala/org/apache/mxnet/annotation/Experimental.scala deleted file mode 100644 index d63194d48bc5..000000000000 --- a/scala-package/core/src/main/scala/org/apache/mxnet/annotation/Experimental.scala +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet.annotation - -import java.lang.annotation.{ElementType, Retention, Target, _} - -/** - * Experimental: there is a comparably high chance that - * the API will be changed or removed. - */ -@Retention(RetentionPolicy.RUNTIME) -@Target(Array(ElementType.TYPE, ElementType.FIELD, ElementType.METHOD, ElementType.PARAMETER, - ElementType.CONSTRUCTOR, ElementType.LOCAL_VARIABLE, ElementType.PACKAGE)) -class Experimental {} diff --git a/scala-package/core/src/main/scala/org/apache/mxnet/contrib/NDArray.scala b/scala-package/core/src/main/scala/org/apache/mxnet/contrib/NDArray.scala deleted file mode 100644 index e0f7b94fde72..000000000000 --- a/scala-package/core/src/main/scala/org/apache/mxnet/contrib/NDArray.scala +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet.contrib - -import org.apache.mxnet.NDArray._ -import org.apache.mxnet.AddNDArrayFunctions - -@AddNDArrayFunctions(true) -object NDArray { -} diff --git a/scala-package/core/src/main/scala/org/apache/mxnet/contrib/Symbol.scala b/scala-package/core/src/main/scala/org/apache/mxnet/contrib/Symbol.scala deleted file mode 100644 index 941276233a29..000000000000 --- a/scala-package/core/src/main/scala/org/apache/mxnet/contrib/Symbol.scala +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet.contrib - -import org.apache.mxnet.Symbol._ -import org.apache.mxnet.AddSymbolFunctions - -@AddSymbolFunctions(true) -object Symbol { -} diff --git a/scala-package/core/src/main/scala/org/apache/mxnet/io/MXDataIter.scala b/scala-package/core/src/main/scala/org/apache/mxnet/io/MXDataIter.scala deleted file mode 100644 index 66b7d83cedc8..000000000000 --- a/scala-package/core/src/main/scala/org/apache/mxnet/io/MXDataIter.scala +++ /dev/null @@ -1,192 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet.io - -import org.apache.mxnet.Base._ -import org.apache.mxnet.DType.DType -import org.apache.mxnet._ -import org.apache.mxnet.IO._ -import org.slf4j.LoggerFactory - -import scala.collection.immutable.ListMap -import scala.collection.mutable.ListBuffer - -/** - * DataIter built in MXNet. - * @param handle the handle to the underlying C++ Data Iterator - */ -private[mxnet] class MXDataIter(private[mxnet] val handle: DataIterHandle, - dataName: String = "data", - labelName: String = "label") - extends DataIter with NativeResource { - - private val logger = LoggerFactory.getLogger(classOf[MXDataIter]) - - // use currentBatch to implement hasNext - // (may be this is not the best way to do this work, - // fix me if any better way found) - private var currentBatch: DataBatch = null - - private val (_provideDataDesc: IndexedSeq[DataDesc], - _provideLabelDesc: IndexedSeq[DataDesc], - _provideData: ListMap[String, Shape], - _provideLabel: ListMap[String, Shape], - _batchSize: Int) = { - if (hasNext) { - iterNext() - val data = currentBatch.data(0) - val label = currentBatch.label(0) - // properties - val res = ( - // TODO: need to allow user to specify Layout - IndexedSeq(new DataDesc(dataName, data.shape, data.dtype, Layout.UNDEFINED)), - IndexedSeq(new DataDesc(labelName, label.shape, label.dtype, Layout.UNDEFINED)), - ListMap(dataName -> data.shape), - ListMap(labelName -> label.shape), - data.shape(0)) - currentBatch.dispose() - reset() - res - } else { - (null, null, null, null, 0) - } - } - - override def nativeAddress: CPtrAddress = handle - - override def nativeDeAllocator: CPtrAddress => MXUint = _LIB.mxDataIterFree - - override val ref: NativeResourceRef = super.register() - - override val bytesAllocated: Long = 0L - - /** - * reset the iterator - */ - override def reset(): Unit = { - currentBatch = null - checkCall(_LIB.mxDataIterBeforeFirst(handle)) - } - - @throws(classOf[NoSuchElementException]) - override def next(): DataBatch = { - if (currentBatch == null) { - iterNext() - } - - if (currentBatch != null) { - val batch = currentBatch - currentBatch = null - batch - } else { - throw new NoSuchElementException - } - } - - /** - * Iterate to next batch - * @return whether the move is successful - */ - private def iterNext(): Boolean = { - val next = new RefInt - checkCall(_LIB.mxDataIterNext(handle, next)) - if (next.value > 0) { - currentBatch = new DataBatch(data = getData(), label = getLabel(), - index = getIndex(), pad = getPad()) - } else { - currentBatch = null - } - next.value > 0 - } - - /** - * get data of current batch - * @return the data of current batch - */ - override def getData(): IndexedSeq[NDArray] = { - val out = new NDArrayHandleRef - checkCall(_LIB.mxDataIterGetData(handle, out)) - IndexedSeq(new NDArray(out.value, writable = false)) - } - - /** - * Get label of current batch - * @return the label of current batch - */ - override def getLabel(): IndexedSeq[NDArray] = { - val out = new NDArrayHandleRef - checkCall(_LIB.mxDataIterGetLabel(handle, out)) - IndexedSeq(new NDArray(out.value, writable = false)) - } - - /** - * Get the index of current batch - * @return the index of current batch - */ - override def getIndex(): IndexedSeq[Long] = { - val outIndex = new ListBuffer[Long] - val outSize = new RefLong - checkCall(_LIB.mxDataIterGetIndex(handle, outIndex, outSize)) - outIndex.toIndexedSeq - } - - /** - * get the number of padding examples - * in current batch - * @return number of padding examples in current batch - */ - override def getPad(): MXUint = { - val out = new MXUintRef - checkCall(_LIB.mxDataIterGetPadNum(handle, out)) - out.value - } - - // The name and shape of data provided by this iterator - @deprecated("Please use provideDataDesc instead", "1.3.0") - override def provideData: ListMap[String, Shape] = _provideData - - // The name and shape of label provided by this iterator - @deprecated("Please use provideLabelDesc instead", "1.3.0") - override def provideLabel: ListMap[String, Shape] = _provideLabel - - // Provide type:DataDesc of the data - override def provideDataDesc: IndexedSeq[DataDesc] = _provideDataDesc - - // Provide type:DataDesc of the label - override def provideLabelDesc: IndexedSeq[DataDesc] = _provideLabelDesc - - override def hasNext: Boolean = { - if (currentBatch != null) { - true - } else { - iterNext() - } - } - - override def batchSize: Int = _batchSize -} - -private[mxnet] class MXDataPack(iterName: String, params: Map[String, String]) extends DataPack { - /** - * get data iterator - * @return DataIter - */ - override def iterator: DataIter = { - createIterator(iterName, params) - } -} diff --git a/scala-package/core/src/main/scala/org/apache/mxnet/io/NDArrayIter.scala b/scala-package/core/src/main/scala/org/apache/mxnet/io/NDArrayIter.scala deleted file mode 100644 index cda181823205..000000000000 --- a/scala-package/core/src/main/scala/org/apache/mxnet/io/NDArrayIter.scala +++ /dev/null @@ -1,337 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet.io - -import java.util.NoSuchElementException - -import org.apache.mxnet.Base._ -import org.apache.mxnet.DType.DType -import org.apache.mxnet._ -import org.slf4j.LoggerFactory - -import scala.collection.immutable.ListMap - -/** - * NDArrayIter object in mxnet. Taking NDArray to get dataiter. - * - * @param data Specify the data as well as the name. - * NDArrayIter supports single or multiple data and label. - * @param label Same as data, but is not fed to the model during testing. - * @param dataBatchSize Batch Size - * @param shuffle Whether to shuffle the data - * @param lastBatchHandle "pad", "discard" or "roll_over". How to handle the last batch - * - * This iterator will pad, discard or roll over the last batch if - * the size of data does not match batch_size. Roll over is intended - * for training and can cause problems if used for prediction. - */ -class NDArrayIter(data: IndexedSeq[(DataDesc, NDArray)], - label: IndexedSeq[(DataDesc, NDArray)], - private val dataBatchSize: Int, shuffle: Boolean, - lastBatchHandle: String) extends DataIter { - - /** - * @param data Specify the data. Data names will be data_0, data_1, ..., etc. - * @param label Same as data, but is not fed to the model during testing. - * Label names will be label_0, label_1, ..., etc. - * @param dataBatchSize Batch Size - * @param shuffle Whether to shuffle the data - * @param lastBatchHandle "pad", "discard" or "roll_over". How to handle the last batch - * - * This iterator will pad, discard or roll over the last batch if - * the size of data does not match batch_size. Roll over is intended - * for training and can cause problems if used for prediction. - */ - def this(data: IndexedSeq[NDArray], label: IndexedSeq[NDArray] = IndexedSeq.empty, - dataBatchSize: Int = 1, shuffle: Boolean = false, - lastBatchHandle: String = "pad", - dataName: String = "data", labelName: String = "label") { - this(IO.initDataDesc(data, allowEmpty = false, dataName, - if (data == null || data.isEmpty) MX_REAL_TYPE else data(0).dtype, Layout.UNDEFINED), - IO.initDataDesc(label, allowEmpty = true, labelName, - if (label == null || label.isEmpty) MX_REAL_TYPE else label(0).dtype, Layout.UNDEFINED), - dataBatchSize, shuffle, lastBatchHandle) - } - - private val logger = LoggerFactory.getLogger(classOf[NDArrayIter]) - - val (initData: IndexedSeq[(DataDesc, NDArray)], initLabel: IndexedSeq[(DataDesc, NDArray)]) = { - // data should not be null and size > 0 - require(data != null && data.size > 0, - "data should not be null and data.size should not be zero") - - require(label != null, - "label should not be null. Use IndexedSeq.empty if there are no labels") - - // shuffle is not supported currently - require(!shuffle, "shuffle is not supported currently") - - // discard final part if lastBatchHandle equals discard - if (lastBatchHandle.equals("discard")) { - val dataSize = data(0)._2.shape(0) - require(dataBatchSize <= dataSize, - "batch_size need to be smaller than data size when not padding.") - val keepSize = dataSize - dataSize % dataBatchSize - val dataList = data.map { case (name, ndArray) => (name, ndArray.slice(0, keepSize)) } - if (!label.isEmpty) { - val labelList = label.map { case (name, ndArray) => (name, ndArray.slice(0, keepSize)) } - (dataList, labelList) - } else { - (dataList, label) - } - } else { - (data, label) - } - } - - val numData = initData(0)._2.shape(0) - val numSource: MXUint = initData.size - private var cursor = -dataBatchSize - - private val (_provideData: ListMap[String, Shape], - _provideLabel: ListMap[String, Shape], - _provideDataDesc: IndexedSeq[DataDesc], - _provideLabelDesc: IndexedSeq[DataDesc]) = { - val pData = ListMap.empty[String, Shape] ++ initData.map(getShape) - val pLabel = ListMap.empty[String, Shape] ++ initLabel.map(getShape) - val pDData = IndexedSeq.empty[DataDesc] ++ initData.map(ele => { - val temp = getShape(ele) - new DataDesc(temp._1, temp._2, ele._1.dtype, ele._1.layout) - }) - val pDLabel = IndexedSeq.empty[DataDesc] ++ initLabel.map(ele => { - val temp = getShape(ele) - new DataDesc(temp._1, temp._2, ele._1.dtype, ele._1.layout) - }) - (pData, pLabel, pDData, pDLabel) - } - - /** - * get shape via dataBatchSize - * @param dataItem - */ - private def getShape(dataItem: (DataDesc, NDArray)): (String, Shape) = { - val len = dataItem._2.shape.size - val newShape = dataItem._2.shape.slice(1, len) - (dataItem._1.name, Shape(Array[Int](dataBatchSize)) ++ newShape) - } - - - /** - * Igore roll over data and set to start - */ - def hardReset(): Unit = { - cursor = -dataBatchSize - } - - /** - * reset the iterator - */ - override def reset(): Unit = { - if (lastBatchHandle.equals("roll_over") && cursor > numData) { - cursor = -dataBatchSize + (cursor%numData) % dataBatchSize - } else { - cursor = -dataBatchSize - } - } - - override def hasNext: Boolean = { - if (cursor + dataBatchSize < numData) { - true - } else { - false - } - } - - @throws(classOf[NoSuchElementException]) - override def next(): DataBatch = { - if (hasNext) { - cursor += dataBatchSize - new DataBatch(getData(), getLabel(), getIndex(), getPad()) - } else { - throw new NoSuchElementException - } - } - - /** - * handle the last batch - * @param ndArray - * @return - */ - private def _padData(ndArray: NDArray): NDArray = { - val padNum = cursor + dataBatchSize - numData - val shape = Shape(dataBatchSize) ++ ndArray.shape.slice(1, ndArray.shape.size) - // The new NDArray has to be created such that it inherits dtype from the passed in array - val newArray = NDArray.zeros(shape, dtype = ndArray.dtype) - ResourceScope.using() { - val batch = ndArray.slice(cursor, numData) - val padding = ndArray.slice(0, padNum) - newArray.slice(0, dataBatchSize - padNum).set(batch) - newArray.slice(dataBatchSize - padNum, dataBatchSize).set(padding) - newArray - } - } - - private def _getData(data: IndexedSeq[(DataDesc, NDArray)]): IndexedSeq[NDArray] = { - require(cursor < numData, "DataIter needs reset.") - if (data == null) { - null - } else { - if (cursor + dataBatchSize <= numData) { - data.map { case (_, ndArray) => ndArray.slice(cursor, cursor + dataBatchSize) } - } else { - // padding - data.map { case (_, ndArray) => _padData(ndArray) } - } - } - } - - /** - * get data of current batch - * @return the data of current batch - */ - override def getData(): IndexedSeq[NDArray] = { - _getData(initData) - } - - /** - * Get label of current batch - * @return the label of current batch - */ - override def getLabel(): IndexedSeq[NDArray] = { - _getData(initLabel) - } - - /** - * the index of current batch - * @return - */ - override def getIndex(): IndexedSeq[Long] = { - cursor.toLong to (cursor + dataBatchSize).toLong - } - - /** - * get the number of padding examples - * in current batch - * @return number of padding examples in current batch - */ - override def getPad(): MXUint = { - if (lastBatchHandle.equals("pad") && cursor + batchSize > numData) { - cursor + batchSize - numData - } else { - 0 - } - } - - - // The name and shape of data provided by this iterator - @deprecated("Please use provideDataDesc instead", "1.3.0") - override def provideData: ListMap[String, Shape] = _provideData - - // The name and shape of label provided by this iterator - @deprecated("Please use provideLabelDesc instead", "1.3.0") - override def provideLabel: ListMap[String, Shape] = _provideLabel - - // Provide type:DataDesc of the data - override def provideDataDesc: IndexedSeq[DataDesc] = _provideDataDesc - - // Provide type:DataDesc of the label - override def provideLabelDesc: IndexedSeq[DataDesc] = _provideLabelDesc - - override def batchSize: Int = dataBatchSize -} - -object NDArrayIter { - - /** - * Builder class for NDArrayIter. - */ - class Builder() { - private var data: IndexedSeq[(DataDesc, NDArray)] = IndexedSeq.empty - private var label: IndexedSeq[(DataDesc, NDArray)] = IndexedSeq.empty - private var dataBatchSize: Int = 1 - private var lastBatchHandle: String = "pad" - - /** - * Add one data input with its name. - * @param name Data name. - * @param data Data nd-array. - * @return The builder object itself. - */ - def addData(name: String, data: NDArray): Builder = { - this.data = this.data ++ IndexedSeq((new DataDesc(name, - data.shape, data.dtype, Layout.UNDEFINED), data)) - this - } - - /** - * Add one label input with its name. - * @param name Label name. - * @param label Label nd-array. - * @return The builder object itself. - */ - def addLabel(name: String, label: NDArray): Builder = { - this.label = this.label ++ IndexedSeq((new DataDesc(name, - label.shape, label.dtype, Layout.UNDEFINED), label)) - this - } - - /** - * Add one data input with its DataDesc - */ - def addDataWithDesc(dataDesc: DataDesc, data: NDArray): Builder = { - this.data = this.data ++ IndexedSeq((dataDesc, data)) - this - } - - /** - * Add one label input with its DataDesc - */ - def addLabelWithDesc(labelDesc: DataDesc, label: NDArray): Builder = { - this.data = this.data ++ IndexedSeq((labelDesc, label)) - this - } - - /** - * Set the batch size of the iterator. - * @param batchSize batch size. - * @return The builder object itself. - */ - def setBatchSize(batchSize: Int): Builder = { - this.dataBatchSize = batchSize - this - } - - /** - * How to handle the last batch. - * @param lastBatchHandle Can be "pad", "discard" or "roll_over". - * @return The builder object itself. - */ - def setLastBatchHandle(lastBatchHandle: String): Builder = { - this.lastBatchHandle = lastBatchHandle - this - } - - /** - * Build the NDArrayIter object. - * @return the built object. - */ - def build(): NDArrayIter = { - new NDArrayIter(data, label, dataBatchSize, false, lastBatchHandle) - } - } -} diff --git a/scala-package/core/src/main/scala/org/apache/mxnet/io/PrefetchingIter.scala b/scala-package/core/src/main/scala/org/apache/mxnet/io/PrefetchingIter.scala deleted file mode 100644 index 9cfcd598197c..000000000000 --- a/scala-package/core/src/main/scala/org/apache/mxnet/io/PrefetchingIter.scala +++ /dev/null @@ -1,193 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet.io - -import org.apache.mxnet._ -import org.slf4j.LoggerFactory -import java.util.concurrent.Semaphore - -import org.apache.mxnet.DType.DType - -import scala.collection.immutable.ListMap - -/** - * Base class for prefetching iterators. Takes one or more DataIters - * and combine them with prefetching. - * - * @param iters list of DataIters - * @param dataNames - * @param labelNames - */ -class PrefetchingIter( - iters: IndexedSeq[DataIter], - dataNames: IndexedSeq[Map[String, String]] = null, - labelNames: IndexedSeq[Map[String, String]] = null) extends DataIter { - - private val logger = LoggerFactory.getLogger(classOf[PrefetchingIter]) - - require(iters.nonEmpty, "Iters length must be greater than 0") - - @deprecated("Please use provideDataDesc instead", "1.3.0") - override def provideData: ListMap[String, Shape] = { - if (dataNames == null) { - iters.map(_.provideData).reduce(_ ++ _) - } else { - iters.map(_.provideData).zip(dataNames).map { case (providedData, names) => - providedData.map { case (oldName, shape) => names(oldName) -> shape } - }.reduceLeft(_ ++ _) - } - } - - @deprecated("Please use provideDataDesc instead", "1.3.0") - override def provideLabel: ListMap[String, Shape] = { - if (labelNames == null) { - iters.map(_.provideLabel).reduce(_ ++ _) - } else { - iters.map(_.provideLabel).zip(labelNames).map { case (providedLabel, names) => - providedLabel.map { case (oldName, shape) => names(oldName) -> shape } - }.reduceLeft(_ ++ _) - } - } - - override def provideDataDesc: IndexedSeq[DataDesc] = { - if (dataNames == null) { - iters.flatMap(_.provideDataDesc) - } else { - iters.map(_.provideDataDesc).zip(dataNames).flatMap { case (providedDataDesc, names) => - providedDataDesc.map(desc => - new DataDesc(names(desc.name), desc.shape, desc.dtype, desc.layout)) - } - } - } - - override def provideLabelDesc: IndexedSeq[DataDesc] = { - if (labelNames == null) { - iters.flatMap(_.provideLabelDesc) - } else { - iters.map(_.provideLabelDesc).zip(labelNames).flatMap { case (providedLabelDesc, names) => - providedLabelDesc.map(desc => - new DataDesc(names(desc.name), desc.shape, desc.dtype, desc.layout)) - } - } - } - - private val _batchSize: Int = this.provideDataDesc.head.shape(0) - private val dataReady: IndexedSeq[Semaphore] = - (0 until iters.length).map(i => new Semaphore(0)) - private val dataTaken: IndexedSeq[Semaphore] = - (0 until iters.length).map(i => new Semaphore(1)) - - @volatile private var started: Boolean = true - private var currentBatch: DataBatch = null - private val nextBatch: Array[DataBatch] = (0 until iters.length).map { i => - new DataBatch(null, null, null, 0) - }.toArray - - // thread entry - def prefetchFunc(i: Int): Runnable = new Runnable { - override def run(): Unit = { - while (started) { - dataTaken(i).acquire() - if (started) { - try { - nextBatch(i) = iters(i).next() - } catch { - case ex: NoSuchElementException => nextBatch(i) = null - } - } - dataReady(i).release() - } - } - } - - private val prefetchThreads = - for (i <- 0 until iters.length) yield new Thread(prefetchFunc(i)) - prefetchThreads.foreach(_.start()) - - override def next(): DataBatch = currentBatch - - /** - * reset the iterator - */ - override def reset(): Unit = { - for (e <- dataReady) e.acquire() - for (i <- iters) i.reset() - for (e <- dataTaken) e.release() - } - - override def batchSize: Int = this._batchSize - - /** - * get data of current batch - * @return the data of current batch - */ - override def getData(): IndexedSeq[NDArray] = currentBatch.data - - /** - * Get label of current batch - * @return the label of current batch - */ - override def getLabel(): IndexedSeq[NDArray] = currentBatch.label - - /** - * the index of current batch - * @return - */ - override def getIndex(): IndexedSeq[Long] = currentBatch.index - - /** - * get the number of padding examples - * in current batch - * @return number of padding examples in current batch - */ - override def getPad(): Int = this.currentBatch.pad - - override def hasNext: Boolean = { - for (e <- dataReady) e.acquire() - if (nextBatch(0) == null) { - for (i <- nextBatch) { - assert(i == null, "Number of entry mismatches between iterators") - } - for (e <- dataReady) e.release() - false - } else { - for (batch <- nextBatch) { - assert(batch.pad == nextBatch(0).pad, - "Number of entry mismatches between iterators") - } - val datas = for (batch <- nextBatch) yield batch.data - val labels = for (batch <- nextBatch) yield batch.label - currentBatch = new DataBatch(datas.toIndexedSeq.flatten, - labels.toIndexedSeq.flatten, - nextBatch(0).index, - nextBatch(0).pad) - for (e <- dataTaken) e.release() - true - } - } - - /** - * Stop all its internal prefetching threads. - * The object shall never be used after it is disposed. - */ - def dispose(): Unit = { - started = false - for (e <- dataTaken) e.release() - for (t <- prefetchThreads) t.join() - } -} diff --git a/scala-package/core/src/main/scala/org/apache/mxnet/io/ResizeIter.scala b/scala-package/core/src/main/scala/org/apache/mxnet/io/ResizeIter.scala deleted file mode 100644 index 9bc042a7b988..000000000000 --- a/scala-package/core/src/main/scala/org/apache/mxnet/io/ResizeIter.scala +++ /dev/null @@ -1,157 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet.io - -import java.util.NoSuchElementException - -import org.apache.mxnet.DType.DType -import org.apache.mxnet._ -import org.slf4j.LoggerFactory - -import scala.collection.immutable.ListMap - - -/** - * Resize a DataIter to given number of batches per epoch. - * May produce incomplete batch in the middle of an epoch due - * to padding from internal iterator. - * - * @param dataIter Internal data iterator. - * @param reSize number of batches per epoch to resize to. - * @param resetInternal whether to reset internal iterator on ResizeIter.reset - */ -class ResizeIter( - dataIter: DataIter, - reSize: Int, - resetInternal: Boolean = true) extends DataIter { - - private val logger = LoggerFactory.getLogger(classOf[ResizeIter]) - - private var currentBatch: DataBatch = null - private var cur = 0 - - - /** - * reset the iterator - */ - override def reset(): Unit = { - cur = 0 - if(resetInternal) { - dataIter.reset() - } - } - - @throws(classOf[NoSuchElementException]) - override def next(): DataBatch = { - if (currentBatch == null) { - iterNext() - } - - if (currentBatch != null) { - val batch = currentBatch - currentBatch = null - batch - } else { - throw new NoSuchElementException - } - } - - private def iterNext(): Boolean = { - if (cur == reSize) { - false - } else { - try { - currentBatch = dataIter.next() - } catch { - case ex: NoSuchElementException => { - dataIter.reset() - currentBatch = dataIter.next() - } - } - cur+=1 - true - } - } - - override def hasNext: Boolean = { - if (currentBatch != null) { - true - } else { - iterNext() - } - } - - /** - * get data of current batch - * @return the data of current batch - */ - override def getData(): IndexedSeq[NDArray] = { - currentBatch.data - } - - /** - * Get label of current batch - * @return the label of current batch - */ - override def getLabel(): IndexedSeq[NDArray] = { - currentBatch.label - } - - /** - * Get the index of current batch - * @return the index of current batch - */ - override def getIndex(): IndexedSeq[Long] = { - currentBatch.index - } - - /** - * Get the number of padding examples - * in current batch - * @return number of padding examples in current batch - */ - override def getPad(): Int = { - currentBatch.pad - } - - override def batchSize: Int = { - dataIter.batchSize - } - - // The name and shape of data provided by this iterator - @deprecated("Please use provideDataDesc instead", "1.3.0") - override def provideData: ListMap[String, Shape] = { - dataIter.provideData - } - - // The name and shape of label provided by this iterator - @deprecated("Please use provideLabelDesc instead", "1.3.0") - override def provideLabel: ListMap[String, Shape] = { - dataIter.provideLabel - } - - // The name and shape of data provided by this iterator - override def provideDataDesc: IndexedSeq[DataDesc] = { - dataIter.provideDataDesc - } - - // The name and shape of label provided by this iterator - override def provideLabelDesc: IndexedSeq[DataDesc] = { - dataIter.provideLabelDesc - } -} diff --git a/scala-package/core/src/main/scala/org/apache/mxnet/javaapi/Context.scala b/scala-package/core/src/main/scala/org/apache/mxnet/javaapi/Context.scala deleted file mode 100644 index 3d397e3fc496..000000000000 --- a/scala-package/core/src/main/scala/org/apache/mxnet/javaapi/Context.scala +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.mxnet.javaapi - -import collection.JavaConverters._ -import scala.language.implicitConversions - -/** - * Constructing a context which is used to specify the device and device type that will - * be utilized by the engine. - * - * @param deviceTypeName {'cpu', 'gpu'} String representing the device type - * @param deviceId The device id of the device, needed for GPU - */ -class Context private[mxnet] (val context: org.apache.mxnet.Context) { - - val deviceTypeid: Int = context.deviceTypeid - - def this(deviceTypeName: String, deviceId: Int = 0) - = this(new org.apache.mxnet.Context(deviceTypeName, deviceId)) - - def withScope[T](body: => T): T = context.withScope(body) - - /** - * Return device type of current context. - * @return device_type - */ - def deviceType: String = context.deviceType - - override def toString: String = context.toString - override def equals(other: Any): Boolean = context.equals(other) - override def hashCode: Int = context.hashCode -} - - -object Context { - implicit def fromContext(context: org.apache.mxnet.Context): Context = new Context(context) - implicit def toContext(jContext: Context): org.apache.mxnet.Context = jContext.context - - val cpu: Context = org.apache.mxnet.Context.cpu() - val gpu: Context = org.apache.mxnet.Context.gpu() - val devtype2str = org.apache.mxnet.Context.devstr2type.asJava - val devstr2type = org.apache.mxnet.Context.devstr2type.asJava - def defaultCtx: Context = org.apache.mxnet.Context.defaultCtx -} diff --git a/scala-package/core/src/main/scala/org/apache/mxnet/javaapi/DType.scala b/scala-package/core/src/main/scala/org/apache/mxnet/javaapi/DType.scala deleted file mode 100644 index e25cdde7ac73..000000000000 --- a/scala-package/core/src/main/scala/org/apache/mxnet/javaapi/DType.scala +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.mxnet.javaapi - -object DType extends Enumeration { - type DType = org.apache.mxnet.DType.DType - val Float32 = org.apache.mxnet.DType.Float32 - val Float64 = org.apache.mxnet.DType.Float64 - val Float16 = org.apache.mxnet.DType.Float16 - val UInt8 = org.apache.mxnet.DType.UInt8 - val Int32 = org.apache.mxnet.DType.Int32 - val Unknown = org.apache.mxnet.DType.Unknown -} diff --git a/scala-package/core/src/main/scala/org/apache/mxnet/javaapi/IO.scala b/scala-package/core/src/main/scala/org/apache/mxnet/javaapi/IO.scala deleted file mode 100644 index d0e10815a1e6..000000000000 --- a/scala-package/core/src/main/scala/org/apache/mxnet/javaapi/IO.scala +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet.javaapi -import scala.language.implicitConversions - -class DataDesc private[mxnet] (val dataDesc: org.apache.mxnet.DataDesc) { - - def this(name: String, shape: Shape, dType: DType.DType, layout: String) = - this(new org.apache.mxnet.DataDesc(name, shape, dType, layout)) - - override def toString(): String = dataDesc.toString() -} - -object DataDesc{ - implicit def fromDataDesc(dDesc: org.apache.mxnet.DataDesc): DataDesc = new DataDesc(dDesc) - - implicit def toDataDesc(dataDesc: DataDesc): org.apache.mxnet.DataDesc = dataDesc.dataDesc - - /** - * Get the dimension that corresponds to the batch size. - * @param layout layout string. For example, "NCHW". - * @return An axis indicating the batch_size dimension. When data-parallelism is used, - * the data will be automatically split and concatenate along the batch_size dimension. - * Axis can be -1, which means the whole array will be copied - * for each data-parallelism device. - */ - def getBatchAxis(layout: String): Int = org.apache.mxnet.DataDesc.getBatchAxis(Some(layout)) -} diff --git a/scala-package/core/src/main/scala/org/apache/mxnet/javaapi/Image.scala b/scala-package/core/src/main/scala/org/apache/mxnet/javaapi/Image.scala deleted file mode 100644 index 57a485083f20..000000000000 --- a/scala-package/core/src/main/scala/org/apache/mxnet/javaapi/Image.scala +++ /dev/null @@ -1,157 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet.javaapi -// scalastyle:off -import java.awt.image.BufferedImage -// scalastyle:on -import java.io.InputStream -import scala.collection.JavaConverters._ - -object Image { - /** - * Decode image with OpenCV. - * Note: return image in RGB by default, instead of OpenCV's default BGR. - * @param buf Buffer containing binary encoded image - * @param flag Convert decoded image to grayscale (0) or color (1). - * @param toRGB Whether to convert decoded image - * to mxnet's default RGB format (instead of opencv's default BGR). - * @return NDArray in HWC format with DType [[DType.UInt8]] - */ - def imDecode(buf: Array[Byte], flag: Int, toRGB: Boolean): NDArray = { - org.apache.mxnet.Image.imDecode(buf, flag, toRGB, None) - } - - /** - * Decode image with OpenCV. - * Note: return image in RGB by default, instead of OpenCV's default BGR. - * @param buf Buffer containing binary encoded image - * @return NDArray in HWC format with DType [[DType.UInt8]] - */ - def imDecode(buf: Array[Byte]): NDArray = { - imDecode(buf, 1, true) - } - - /** - * Same imageDecode with InputStream - * - * @param inputStream the inputStream of the image - * @param flag Convert decoded image to grayscale (0) or color (1). - * @param toRGB Whether to convert decoded image - * @return NDArray in HWC format with DType [[DType.UInt8]] - */ - def imDecode(inputStream: InputStream, flag: Int, toRGB: Boolean): NDArray = { - org.apache.mxnet.Image.imDecode(inputStream, flag, toRGB, None) - } - - /** - * Same imageDecode with InputStream - * - * @param inputStream the inputStream of the image - * @return NDArray in HWC format with DType [[DType.UInt8]] - */ - def imDecode(inputStream: InputStream): NDArray = { - imDecode(inputStream, 1, true) - } - - /** - * Read and decode image with OpenCV. - * Note: return image in RGB by default, instead of OpenCV's default BGR. - * @param filename Name of the image file to be loaded. - * @param flag Convert decoded image to grayscale (0) or color (1). - * @param toRGB Whether to convert decoded image to mxnet's default RGB format - * (instead of opencv's default BGR). - * @return org.apache.mxnet.NDArray in HWC format with DType [[DType.UInt8]] - */ - def imRead(filename: String, flag: Int, toRGB: Boolean): NDArray = { - org.apache.mxnet.Image.imRead(filename, Some(flag), Some(toRGB), None) - } - - /** - * Read and decode image with OpenCV. - * Note: return image in RGB by default, instead of OpenCV's default BGR. - * @param filename Name of the image file to be loaded. - * @return org.apache.mxnet.NDArray in HWC format with DType [[DType.UInt8]] - */ - def imRead(filename: String): NDArray = { - imRead(filename, 1, true) - } - - /** - * Resize image with OpenCV. - * @param src source image in NDArray - * @param w Width of resized image. - * @param h Height of resized image. - * @param interp Interpolation method (default=cv2.INTER_LINEAR). - * @return org.apache.mxnet.NDArray - */ - def imResize(src: NDArray, w: Int, h: Int, interp: Integer): NDArray = { - val interpVal = if (interp == null) None else Some(interp.intValue()) - org.apache.mxnet.Image.imResize(src, w, h, interpVal, None) - } - - /** - * Resize image with OpenCV. - * @param src source image in NDArray - * @param w Width of resized image. - * @param h Height of resized image. - * @return org.apache.mxnet.NDArray - */ - def imResize(src: NDArray, w: Int, h: Int): NDArray = { - imResize(src, w, h, null) - } - - /** - * Do a fixed crop on the image - * @param src Src image in NDArray - * @param x0 starting x point - * @param y0 starting y point - * @param w width of the image - * @param h height of the image - * @return cropped NDArray - */ - def fixedCrop(src: NDArray, x0: Int, y0: Int, w: Int, h: Int): NDArray = { - org.apache.mxnet.Image.fixedCrop(src, x0, y0, w, h) - } - - /** - * Convert a NDArray image to a real image - * The time cost will increase if the image resolution is big - * @param src Source image file in RGB - * @return Buffered Image - */ - def toImage(src: NDArray): BufferedImage = { - org.apache.mxnet.Image.toImage(src) - } - - /** - * Draw bounding boxes on the image - * @param src buffered image to draw on - * @param coordinate Contains Map of xmin, xmax, ymin, ymax - * corresponding to top-left and down-right points - * @param names The name set of the bounding box - */ - def drawBoundingBox(src: BufferedImage, - coordinate: java.util.List[ - java.util.Map[java.lang.String, java.lang.Integer]], - names: java.util.List[java.lang.String]): Unit = { - val coord = coordinate.asScala.map( - _.asScala.map{case (name, value) => (name, Integer2int(value))}.toMap).toArray - org.apache.mxnet.Image.drawBoundingBox(src, coord, Option(names.asScala.toArray)) - } - -} diff --git a/scala-package/core/src/main/scala/org/apache/mxnet/javaapi/Layout.scala b/scala-package/core/src/main/scala/org/apache/mxnet/javaapi/Layout.scala deleted file mode 100644 index cfe290c1aff7..000000000000 --- a/scala-package/core/src/main/scala/org/apache/mxnet/javaapi/Layout.scala +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.mxnet.javaapi - -/** - * Layout definition of DataDesc - * N Batch size - * C channels - * H Height - * W Weight - * T sequence length - * __undefined__ default value of Layout - */ -object Layout { - val UNDEFINED: String = org.apache.mxnet.Layout.UNDEFINED - val NCHW: String = org.apache.mxnet.Layout.NCHW - val NTC: String = org.apache.mxnet.Layout.NTC - val NT: String = org.apache.mxnet.Layout.NT - val N: String = org.apache.mxnet.Layout.N -} diff --git a/scala-package/core/src/main/scala/org/apache/mxnet/javaapi/NDArray.scala b/scala-package/core/src/main/scala/org/apache/mxnet/javaapi/NDArray.scala deleted file mode 100644 index 50139ec1be22..000000000000 --- a/scala-package/core/src/main/scala/org/apache/mxnet/javaapi/NDArray.scala +++ /dev/null @@ -1,465 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet.javaapi - -import org.apache.mxnet.javaapi.DType.DType - -import collection.JavaConverters._ -import scala.language.implicitConversions - -@AddJNDArrayAPIs(false) -object NDArray extends NDArrayBase { - implicit def fromNDArray(nd: org.apache.mxnet.NDArray): NDArray = new NDArray(nd) - - implicit def toNDArray(jnd: NDArray): org.apache.mxnet.NDArray = jnd.nd - - def waitall(): Unit = org.apache.mxnet.NDArray.waitall() - - /** - * One hot encoding indices into matrix out. - * @param indices An NDArray containing indices of the categorical features. - * @param out The result holder of the encoding. - * @return Same as out. - */ - def onehotEncode(indices: NDArray, out: NDArray): NDArray - = org.apache.mxnet.NDArray.onehotEncode(indices, out) - - /** - * Create an empty uninitialized new NDArray, with specified shape. - * - * @param shape shape of the NDArray. - * @param ctx The context of the NDArray. - * - * @return The created NDArray. - */ - def empty(shape: Shape, ctx: Context, dtype: DType.DType): NDArray - = org.apache.mxnet.NDArray.empty(shape, ctx, dtype) - def empty(ctx: Context, shape: Array[Int]): NDArray - = org.apache.mxnet.NDArray.empty(new Shape(shape), ctx) - def empty(ctx: Context, shape: java.util.List[java.lang.Integer]): NDArray - = org.apache.mxnet.NDArray.empty(new Shape(shape), ctx) - - /** - * Create a new NDArray filled with 0, with specified shape. - * - * @param shape shape of the NDArray. - * @param ctx The context of the NDArray. - * - * @return The created NDArray. - */ - def zeros(shape: Shape, ctx: Context, dtype: DType.DType): NDArray - = org.apache.mxnet.NDArray.zeros(shape, ctx, dtype) - def zeros(ctx: Context, shape: Array[Int]): NDArray - = org.apache.mxnet.NDArray.zeros(new Shape(shape), ctx) - def zeros(ctx: Context, shape: java.util.List[java.lang.Integer]): NDArray - = org.apache.mxnet.NDArray.zeros(new Shape(shape), ctx) - - /** - * Create a new NDArray filled with 1, with specified shape. - * @param shape shape of the NDArray. - * @param ctx The context of the NDArray. - * @return The created NDArray. - */ - def ones(shape: Shape, ctx: Context, dtype: DType.DType): NDArray - = org.apache.mxnet.NDArray.ones(shape, ctx, dtype) - def ones(ctx: Context, shape: Array[Int]): NDArray - = org.apache.mxnet.NDArray.ones(new Shape(shape), ctx) - def ones(ctx: Context, shape: java.util.List[java.lang.Integer]): NDArray - = org.apache.mxnet.NDArray.ones(new Shape(shape), ctx) - - /** - * Create a new NDArray filled with given value, with specified shape. - * @param shape shape of the NDArray. - * @param value value to be filled with - * @param ctx The context of the NDArray - */ - def full(shape: Shape, value: Float, ctx: Context): NDArray - = org.apache.mxnet.NDArray.full(shape, value, ctx) - - def full(shape: Shape, value: Double, ctx: Context): NDArray - = org.apache.mxnet.NDArray.full(shape, value, ctx) - - def power(lhs: NDArray, rhs: NDArray): NDArray = org.apache.mxnet.NDArray.power(lhs, rhs) - def power(lhs: NDArray, rhs: Float): NDArray = org.apache.mxnet.NDArray.power(lhs, rhs) - def power(lhs: Float, rhs: NDArray): NDArray = org.apache.mxnet.NDArray.power(lhs, rhs) - def power(lhs: NDArray, rhs: Double): NDArray = org.apache.mxnet.NDArray.power(lhs, rhs) - def power(lhs: Double, rhs: NDArray): NDArray = org.apache.mxnet.NDArray.power(lhs, rhs) - - def maximum(lhs: NDArray, rhs: NDArray): NDArray = org.apache.mxnet.NDArray.maximum(lhs, rhs) - def maximum(lhs: NDArray, rhs: Float): NDArray = org.apache.mxnet.NDArray.maximum(lhs, rhs) - def maximum(lhs: Float, rhs: NDArray): NDArray = org.apache.mxnet.NDArray.maximum(lhs, rhs) - def maximum(lhs: NDArray, rhs: Double): NDArray = org.apache.mxnet.NDArray.maximum(lhs, rhs) - def maximum(lhs: Double, rhs: NDArray): NDArray = org.apache.mxnet.NDArray.maximum(lhs, rhs) - - def minimum(lhs: NDArray, rhs: NDArray): NDArray = org.apache.mxnet.NDArray.minimum(lhs, rhs) - def minimum(lhs: NDArray, rhs: Float): NDArray = org.apache.mxnet.NDArray.minimum(lhs, rhs) - def minimum(lhs: Float, rhs: NDArray): NDArray = org.apache.mxnet.NDArray.minimum(lhs, rhs) - def minimum(lhs: NDArray, rhs: Double): NDArray = org.apache.mxnet.NDArray.minimum(lhs, rhs) - def minimum(lhs: Double, rhs: NDArray): NDArray = org.apache.mxnet.NDArray.minimum(lhs, rhs) - - - /** - * Returns the result of element-wise **equal to** (==) comparison operation with broadcasting. - * For each element in input arrays, return 1(true) if corresponding elements are same, - * otherwise return 0(false). - */ - def equal(lhs: NDArray, rhs: NDArray): NDArray = org.apache.mxnet.NDArray.equal(lhs, rhs) - def equal(lhs: NDArray, rhs: Float): NDArray = org.apache.mxnet.NDArray.equal(lhs, rhs) - def equal(lhs: NDArray, rhs: Double): NDArray = org.apache.mxnet.NDArray.equal(lhs, rhs) - - /** - * Returns the result of element-wise **not equal to** (!=) comparison operation - * with broadcasting. - * For each element in input arrays, return 1(true) if corresponding elements are different, - * otherwise return 0(false). - */ - def notEqual(lhs: NDArray, rhs: NDArray): NDArray = org.apache.mxnet.NDArray.notEqual(lhs, rhs) - def notEqual(lhs: NDArray, rhs: Float): NDArray = org.apache.mxnet.NDArray.notEqual(lhs, rhs) - def notEqual(lhs: NDArray, rhs: Double): NDArray = org.apache.mxnet.NDArray.notEqual(lhs, rhs) - - /** - * Returns the result of element-wise **greater than** (>) comparison operation - * with broadcasting. - * For each element in input arrays, return 1(true) if lhs elements are greater than rhs, - * otherwise return 0(false). - */ - def greater(lhs: NDArray, rhs: NDArray): NDArray = org.apache.mxnet.NDArray.greater(lhs, rhs) - def greater(lhs: NDArray, rhs: Float): NDArray = org.apache.mxnet.NDArray.greater(lhs, rhs) - def greater(lhs: NDArray, rhs: Double): NDArray = org.apache.mxnet.NDArray.greater(lhs, rhs) - - /** - * Returns the result of element-wise **greater than or equal to** (>=) comparison - * operation with broadcasting. - * For each element in input arrays, return 1(true) if lhs elements are greater than equal to rhs - * otherwise return 0(false). - */ - def greaterEqual(lhs: NDArray, rhs: NDArray): NDArray - = org.apache.mxnet.NDArray.greaterEqual(lhs, rhs) - def greaterEqual(lhs: NDArray, rhs: Float): NDArray - = org.apache.mxnet.NDArray.greaterEqual(lhs, rhs) - def greaterEqual(lhs: NDArray, rhs: Double): NDArray - = org.apache.mxnet.NDArray.greaterEqual(lhs, rhs) - - /** - * Returns the result of element-wise **lesser than** (<) comparison operation - * with broadcasting. - * For each element in input arrays, return 1(true) if lhs elements are less than rhs, - * otherwise return 0(false). - */ - def lesser(lhs: NDArray, rhs: NDArray): NDArray = org.apache.mxnet.NDArray.lesser(lhs, rhs) - def lesser(lhs: NDArray, rhs: Float): NDArray = org.apache.mxnet.NDArray.lesser(lhs, rhs) - def lesser(lhs: NDArray, rhs: Double): NDArray = org.apache.mxnet.NDArray.lesser(lhs, rhs) - - /** - * Returns the result of element-wise **lesser than or equal to** (<=) comparison - * operation with broadcasting. - * For each element in input arrays, return 1(true) if lhs elements are - * lesser than equal to rhs, otherwise return 0(false). - */ - def lesserEqual(lhs: NDArray, rhs: NDArray): NDArray - = org.apache.mxnet.NDArray.lesserEqual(lhs, rhs) - def lesserEqual(lhs: NDArray, rhs: Float): NDArray - = org.apache.mxnet.NDArray.lesserEqual(lhs, rhs) - def lesserEqual(lhs: NDArray, rhs: Double): NDArray - = org.apache.mxnet.NDArray.lesserEqual(lhs, rhs) - - /** - * Create a new NDArray that copies content from source_array. - * @param sourceArr Source data to create NDArray from. - * @param shape shape of the NDArray - * @param ctx The context of the NDArray, default to current default context. - * @return The created NDArray. - */ - def array(sourceArr: java.util.List[java.lang.Float], shape: Shape, ctx: Context = null): NDArray - = org.apache.mxnet.NDArray.array( - sourceArr.asScala.map(ele => Float.unbox(ele)).toArray, shape, ctx) - - /** - * Create a new NDArray that copies content from source_array. - * @param sourceArr Source data (list of Doubles) to create NDArray from. - * @param shape shape of the NDArray - * @param ctx The context of the NDArray, default to current default context. - * @return The created NDArray. - */ - def arrayWithDouble(sourceArr: java.util.List[java.lang.Double], shape: Shape, - ctx: Context = null): NDArray - = org.apache.mxnet.NDArray.array( - sourceArr.asScala.map(ele => Double.unbox(ele)).toArray, shape) - - /** - * Returns evenly spaced values within a given interval. - * Values are generated within the half-open interval [`start`, `stop`). In other - * words, the interval includes `start` but excludes `stop`. - * @param start Start of interval. - * @param stop End of interval. - * @param step Spacing between values. - * @param repeat Number of times to repeat each element. - * @param ctx Device context. - * @param dType The data type of the `NDArray`. - * @return NDArray of evenly spaced values in the specified range. - */ - def arange(start: Float, stop: Float, step: Float, repeat: Int, - ctx: Context, dType: DType.DType): NDArray = - org.apache.mxnet.NDArray.arange(start, Some(stop), step, repeat, ctx, dType) -} - -/** - * NDArray object in mxnet. - * NDArray is basic ndarray/Tensor like data structure in mxnet.
- * - * NOTE: NDArray is stored in native memory. Use NDArray in a try-with-resources() construct - * or a [[org.apache.mxnet.ResourceScope]] in a try-with-resource to have them - * automatically disposed. You can explicitly control the lifetime of NDArray - * by calling dispose manually. Failure to do this will result in leaking native memory. - * - */ -class NDArray private[mxnet] (val nd: org.apache.mxnet.NDArray ) { - - def this(arr: Array[Float], shape: Shape, ctx: Context) = { - this(org.apache.mxnet.NDArray.array(arr, shape, ctx)) - } - - def this(arr: Array[Double], shape: Shape, ctx: Context) = { - this(org.apache.mxnet.NDArray.array(arr, shape, ctx)) - } - - def this(arr: java.util.List[java.lang.Float], shape: Shape, ctx: Context) = { - this(NDArray.array(arr, shape, ctx)) - } - - override def toString: String = nd.toString - - def serialize(): Array[Byte] = nd.serialize() - - /** - * Release the native memory.
- * The NDArrays it depends on will NOT be disposed.
- * The object shall never be used after it is disposed. - */ - def dispose(): Unit = nd.dispose() - - /** - * Dispose all NDArrays who help to construct this array.
- * e.g. (a * b + c).disposeDeps() will dispose a, b, c (including their deps) and a * b - * @return this array - */ - def disposeDeps(): NDArray = nd.disposeDepsExcept() - - /** - * Dispose all NDArrays who help to construct this array, excepts those in the arguments.
- * e.g. (a * b + c).disposeDepsExcept(a, b) - * will dispose c and a * b. - * Note that a, b's dependencies will not be disposed either. - * @param arr the Array of NDArray not to dispose - * @return this array - */ - def disposeDepsExcept(arr: Array[NDArray]): NDArray = - nd.disposeDepsExcept(arr.map(NDArray.toNDArray): _*) - - /** - * Return a sliced NDArray that shares memory with current one. - * NDArray only support continuous slicing on axis 0 - * - * @param start Starting index of slice. - * @param stop Finishing index of slice. - * - * @return a sliced NDArray that shares memory with current one. - */ - def slice(start: Int, stop: Int): NDArray = nd.slice(start, stop) - - /** - * Return a sliced NDArray at the ith position of axis0 - * @param i - * @return a sliced NDArray that shares memory with current one. - */ - def slice (i: Int): NDArray = nd.slice(i) - - /** - * Return a sub NDArray that shares memory with current one. - * the first axis will be rolled up, which causes its shape different from slice(i, i+1) - * @param idx index of sub array. - */ - def at(idx: Int): NDArray = nd.at(idx) - - def T: NDArray = nd.T - - /** - * Get data type of current NDArray. - * @return class representing type of current ndarray - */ - def dtype: DType = nd.dtype - - /** - * Return a copied numpy array of current array with specified type. - * @param dtype Desired type of result array. - * @return A copy of array content. - */ - def asType(dtype: DType): NDArray = nd.asType(dtype) - - /** - * Return a reshaped NDArray that shares memory with current one. - * @param dims New shape. - * - * @return a reshaped NDArray that shares memory with current one. - */ - def reshape(dims: Array[Int]): NDArray = nd.reshape(dims) - - /** - * Block until all pending writes operations on current NDArray are finished. - * This function will return when all the pending writes to the current - * NDArray finishes. There can still be pending read going on when the - * function returns. - */ - def waitToRead(): Unit = nd.waitToRead() - - /** - * Get context of current NDArray. - * @return The context of current NDArray. - */ - def context: Context = nd.context - - /** - * Set the values of the NDArray - * @param value Value to set - * @return Current NDArray - */ - def set(value: Float): NDArray = nd.set(value) - def set(value: Double): NDArray = nd.set(value) - def set(other: NDArray): NDArray = nd.set(other) - def set(other: Array[Float]): NDArray = nd.set(other) - def set(other: Array[Double]): NDArray = nd.set(other) - - def add(other: NDArray): NDArray = this.nd + other.nd - def add(other: Float): NDArray = this.nd + other - def add(other: Double): NDArray = this.nd + other - def addInplace(other: NDArray): NDArray = this.nd += other - def addInplace(other: Float): NDArray = this.nd += other - def addInplace(other: Double): NDArray = this.nd += other - def subtract(other: NDArray): NDArray = this.nd - other - def subtract(other: Float): NDArray = this.nd - other - def subtract(other: Double): NDArray = this.nd - other - def subtractInplace(other: NDArray): NDArray = this.nd -= other - def subtractInplace(other: Float): NDArray = this.nd -= other - def subtractInplace(other: Double): NDArray = this.nd -= other - def multiply(other: NDArray): NDArray = this.nd * other - def multiply(other: Float): NDArray = this.nd * other - def multiply(other: Double): NDArray = this.nd * other - def multiplyInplace(other: NDArray): NDArray = this.nd *= other - def multiplyInplace(other: Float): NDArray = this.nd *= other - def multiplyInplace(other: Double): NDArray = this.nd *= other - def div(other: NDArray): NDArray = this.nd / other - def div(other: Float): NDArray = this.nd / other - def div(other: Double): NDArray = this.nd / other - def divInplace(other: NDArray): NDArray = this.nd /= other - def divInplace(other: Float): NDArray = this.nd /= other - def divInplace(other: Double): NDArray = this.nd /= other - def pow(other: NDArray): NDArray = this.nd ** other - def pow(other: Float): NDArray = this.nd ** other - def pow(other: Double): NDArray = this.nd ** other - def powInplace(other: NDArray): NDArray = this.nd **= other - def powInplace(other: Float): NDArray = this.nd **= other - def powInplace(other: Double): NDArray = this.nd **= other - def mod(other: NDArray): NDArray = this.nd % other - def mod(other: Float): NDArray = this.nd % other - def mod(other: Double): NDArray = this.nd % other - def modInplace(other: NDArray): NDArray = this.nd %= other - def modInplace(other: Float): NDArray = this.nd %= other - def modInplace(other: Double): NDArray = this.nd %= other - def greater(other: NDArray): NDArray = this.nd > other - def greater(other: Float): NDArray = this.nd > other - def greater(other: Double): NDArray = this.nd > other - def greaterEqual(other: NDArray): NDArray = this.nd >= other - def greaterEqual(other: Float): NDArray = this.nd >= other - def greaterEqual(other: Double): NDArray = this.nd >= other - def lesser(other: NDArray): NDArray = this.nd < other - def lesser(other: Float): NDArray = this.nd < other - def lesser(other: Double): NDArray = this.nd < other - def lesserEqual(other: NDArray): NDArray = this.nd <= other - def lesserEqual(other: Float): NDArray = this.nd <= other - def lesserEqual(other: Double): NDArray = this.nd <= other - - /** - * Return a copied flat java array of current array (row-major). - * @return A copy of array content. - */ - def toArray: Array[Float] = nd.toArray - - /** - * Return a copied flat java array of current array (row-major). - * @return A copy of array content. - */ - def toFloat64Array: Array[Double] = nd.toFloat64Array - - /** - * Return a CPU scalar(float) of current ndarray. - * This ndarray must have shape (1,) - * - * @return The scalar representation of the ndarray. - */ - def toScalar: Float = nd.toScalar - - /** - * Return a CPU scalar(float) of current ndarray. - * This ndarray must have shape (1,) - * - * @return The scalar representation of the ndarray. - */ - def toFloat64Scalar: Double = nd.toFloat64Scalar - - /** - * Copy the content of current array to other. - * - * @param other Target NDArray or context we want to copy data to. - * @return The copy target NDArray - */ - def copyTo(other: NDArray): NDArray = nd.copyTo(other) - - /** - * Copy the content of current array to a new NDArray in the context. - * - * @param ctx Target context we want to copy data to. - * @return The copy target NDArray - */ - def copyTo(ctx: Context): NDArray = nd.copyTo(ctx) - - /** - * Clone the current array - * @return the copied NDArray in the same context - */ - def copy(): NDArray = copyTo(this.context) - - /** - * Get shape of current NDArray. - * @return an array representing shape of current ndarray - */ - def shape: Shape = nd.shape - - - def size: Int = shape.product - - /** - * Return an `NDArray` that lives in the target context. If the array - * is already in that context, `self` is returned. Otherwise, a copy is made. - * @param context The target context we want the return value to live in. - * @return A copy or `self` as an `NDArray` that lives in the target context. - */ - def asInContext(context: Context): NDArray = nd.asInContext(context) - - override def equals(obj: Any): Boolean = nd.equals(obj) - override def hashCode(): Int = nd.hashCode -} diff --git a/scala-package/core/src/main/scala/org/apache/mxnet/javaapi/Shape.scala b/scala-package/core/src/main/scala/org/apache/mxnet/javaapi/Shape.scala deleted file mode 100644 index b795fe31f726..000000000000 --- a/scala-package/core/src/main/scala/org/apache/mxnet/javaapi/Shape.scala +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet.javaapi - -import collection.JavaConverters._ -import scala.language.implicitConversions - -/** - * Shape of [[NDArray]] or other data - */ - -class Shape private[mxnet] (val shape: org.apache.mxnet.Shape) { - def this(dims: java.util.List[java.lang.Integer]) - = this(new org.apache.mxnet.Shape(dims.asScala.map(Int.unbox))) - def this(dims: Array[Int]) = this(new org.apache.mxnet.Shape(dims)) - - def apply(dim: Int): Int = shape.apply(dim) - def get(dim: Int): Int = apply(dim) - def size: Int = shape.size - def length: Int = shape.length - def drop(dim: Int): Shape = shape.drop(dim) - def slice(from: Int, end: Int): Shape = shape.slice(from, end) - def product: Int = shape.product - def head: Int = shape.head - - def toArray: Array[Int] = shape.toArray - def toVector: java.util.List[Int] = shape.toVector.asJava - - override def toString(): String = shape.toString - override def equals(o: Any): Boolean = shape.equals(o) - override def hashCode(): Int = shape.hashCode() -} - -object Shape { - implicit def fromShape(shape: org.apache.mxnet.Shape): Shape = new Shape(shape) - - implicit def toShape(jShape: Shape): org.apache.mxnet.Shape = jShape.shape -} diff --git a/scala-package/core/src/main/scala/org/apache/mxnet/module/BaseModule.scala b/scala-package/core/src/main/scala/org/apache/mxnet/module/BaseModule.scala deleted file mode 100644 index f2f4c20b8833..000000000000 --- a/scala-package/core/src/main/scala/org/apache/mxnet/module/BaseModule.scala +++ /dev/null @@ -1,727 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet.module - -import java.io.IOException - -import org.apache.mxnet.optimizer.SGD -import org.apache.mxnet._ -import org.slf4j.LoggerFactory -import org.slf4j.Logger - -import scala.annotation.varargs -import scala.collection.mutable.ArrayBuffer - -object BaseModule { - /** - * Check that all input names are in symbol's arguments. - */ - @throws(classOf[IllegalArgumentException]) - private[module] def _checkInputNames(symbol: Symbol, names: IndexedSeq[String], - typeName: String, throws: Boolean, logger: Logger): Unit = { - val args = symbol.listArguments() - for (name <- names) { - if (!args.contains(name)) { - val candidates = args.filter ( arg => - !arg.endsWith("_weight") && !arg.endsWith("_bias") - && !arg.endsWith("_gamma") && !arg.endsWith("_beta")) - val msg = s"You created Module with Module(..., ${typeName}_names=${names.mkString})" + - s" but input with name \'${name}\' is not found in symbol.listArguments(). " + - s"Did you mean one of:\n${candidates.mkString("\n\t")}" - if (throws) throw new IllegalArgumentException(msg) - else logger.warn(msg) - } - } - } -} - -/** - * The base class of a modules. A module represents a computation component. The design - * purpose of a module is that it abstract a computation "machine", that one can run forward, - * backward, update parameters, etc. We aim to make the APIs easy to use, especially in the - * case when we need to use imperative API to work with multiple modules (e.g. stochastic - * depth network). - * - * A module has several states: - * - * - Initial state. Memory is not allocated yet, not ready for computation yet. - * - Binded. Shapes for inputs, outputs, and parameters are all known, memory allocated, - * ready for computation. - * - Parameter initialized. For modules with parameters, doing computation before initializing - * the parameters might result in undefined outputs. - * - Optimizer installed. An optimizer can be installed to a module. After this, the parameters - * of the module can be updated according to the optimizer after gradients are computed - * (forward-backward). - * - * In order for a module to interactive with others, a module should be able to report the - * following information in its raw stage (before binded) - * - * - `data_names`: list of string indicating the names of required data. - * - `output_names`: list of string indicating the names of required outputs. - * - * And also the following richer information after binded: - * - * - state information - * - `binded`: `bool`, indicating whether the memory buffers needed for computation - * has been allocated. - * - `forTraining`: whether the module is binded for training (if binded). - * - `paramsInitialized`: `bool`, indicating whether the parameters of this modules - * has been initialized. - * - `optimizerInitialized`: `bool`, indicating whether an optimizer is defined - * and initialized. - * - `inputsNeedGrad`: `bool`, indicating whether gradients with respect to the - * input data is needed. Might be useful when implementing composition of modules. - * - * - input/output information - * - `dataShapes`: a list of `(name, shape)`. In theory, since the memory is allocated, - * we could directly provide the data arrays. But in the case of data parallelization, - * the data arrays might not be of the same shape as viewed from the external world. - * - `labelShapes`: a list of `(name, shape)`. This might be `[]` if the module does - * not need labels (e.g. it does not contains a loss function at the top), or a module - * is not binded for training. - * - `outputShapes`: a list of `(name, shape)` for outputs of the module. - * - * - parameters (for modules with parameters) - * - `getParams()`: return a tuple `(argParams, auxParams)`. Each of those - * is a dictionary of name to `NDArray` mapping. Those `NDArray` always lives on - * CPU. The actual parameters used for computing might live on other devices (GPUs), - * this function will retrieve (a copy of) the latest parameters. Therefore, modifying - * - `setParams(argParams, auxParams)`: assign parameters to the devices - * doing the computation. - * - `initParams(...)`: a more flexible interface to assign or initialize the parameters. - * - * - setup - * - `bind()`: prepare environment for computation. - * - `initOptimizer()`: install optimizer for parameter updating. - * - * - computation - * - `forward(dataBatch)`: forward operation. - * - `backward(outGrads=None)`: backward operation. - * - `update()`: update parameters according to installed optimizer. - * - `getOutputs()`: get outputs of the previous forward operation. - * - `getInputGrads()`: get the gradients with respect to the inputs computed - * in the previous backward operation. - * - `updateMetric(metric, labels)`: update performance metric for the previous forward - * computed results. - * - * - other properties (mostly for backward compatibility) - * - `symbol`: the underlying symbolic graph for this module (if any) - * This property is not necessarily constant. For example, for `BucketingModule`, - * this property is simply the *current* symbol being used. For other modules, - * this value might not be well defined. - * - * When those intermediate-level API are implemented properly, the following - * high-level API will be automatically available for a module: - * - * - `fit`: train the module parameters on a data set - * - `predict`: run prediction on a data set and collect outputs - * - `score`: run prediction on a data set and evaluate performance - */ -abstract class BaseModule { - private val logger = LoggerFactory.getLogger(classOf[BaseModule]) - - private[module] var binded: Boolean = false - private[module] var forTraining: Boolean = false - private[module] var inputsNeedGrad: Boolean = false - private[module] var paramsInitialized: Boolean = false - private[module] var optimizerInitialized: Boolean = false - private[module] var symbol: Symbol = null - private[module] var execGroup: DataParallelExecutorGroup = null - private[module] var argParams: Map[String, NDArray] = null - private[module] var auxParams: Map[String, NDArray] = null - - // High Level API - def getSymbol: Symbol = this.symbol - - // A convenient function that calls both `forward` and `backward`. - def forwardBackward(dataBatch: DataBatch): Unit = { - forward(dataBatch, isTrain = Option(true)) - backward() - } - - /** - * Run prediction on `eval_data` and evaluate the performance according to `eval_metric`. - * @param evalData : DataIter - * @param evalMetric : EvalMetric - * @param numBatch Number of batches to run. Default is `Integer.MAX_VALUE`, - * indicating run until the `DataIter` finishes. - * @param batchEndCallback Could also be a list of functions. - * @param reset Default `True`, - * indicating whether we should reset `eval_data` before starting evaluating. - * @param epoch Default 0. For compatibility, this will be passed to callbacks (if any). - * During training, this will correspond to the training epoch number. - */ - def score(evalData: DataIter, evalMetric: EvalMetric, - numBatch: Int = Integer.MAX_VALUE, - batchEndCallback: Option[BatchEndCallback] = None, - scoreEndCallback: Option[BatchEndCallback] = None, - reset: Boolean = true, epoch: Int = 0): EvalMetric = { - require(evalData != null && evalMetric != null, "evalData and evalMetric must be defined") - require(binded && paramsInitialized, "bind() and initParams() must be called first.") - - if (reset) { - evalData.reset() - } - - evalMetric.reset() - - var nBatch = 0 - while (evalData.hasNext && nBatch < numBatch) { - val evalBatch = evalData.next() - - forward(evalBatch, isTrain = Option(false)) - updateMetric(evalMetric, evalBatch.label) - - batchEndCallback.foreach(callback => { - callback.invoke(epoch, nBatch, evalMetric) - }) - - evalBatch.dispose() - - nBatch += 1 - } - - scoreEndCallback.foreach(callback => { - callback.invoke(epoch, nBatch, evalMetric) - }) - - evalMetric - } - - /** - * Run prediction and collect the outputs. - * @param evalData - * @param numBatch Default is -1, indicating running all the batches in the data iterator. - * @param reset Default is `True`, indicating whether we should reset the data iter before start - * doing prediction. - * @return The return value will be a nested list like - * `[ [out1_batch1, out2_batch1, ...], [out1_batch2, out2_batch2, ...] ]` - * This mode is useful because in some cases (e.g. bucketing), - * the module does not necessarily produce the same number of outputs. - */ - def predictEveryBatch(evalData: DataIter, numBatch: Int = -1, reset: Boolean = true) - : IndexedSeq[IndexedSeq[NDArray]] = { - require(binded && paramsInitialized, "bind() and initParams() must be called first.") - if (reset) { - evalData.reset() - } - val outputList = ArrayBuffer.empty[IndexedSeq[NDArray]] - - var nBatch = 0 - while (evalData.hasNext && nBatch != numBatch) { - val evalBatch = evalData.next() - outputList.append(predict(evalBatch)) - evalBatch.dispose() - nBatch += 1 - } - - outputList - } - - def predict(batch: DataBatch): IndexedSeq[NDArray] = { - require(binded && paramsInitialized, "bind() and initParams() must be called first.") - forward(batch, isTrain = Option(false)) - val pad = batch.pad - getOutputsMerged().map(out => { - val withoutPadding = out.slice(0, out.shape(0)-pad) - val copied = withoutPadding.copy() - withoutPadding.dispose() - copied - }) - } - - /** - * Run prediction and collect the outputs. - * @param evalData dataIter to do the Inference - * @param numBatch Default is -1, indicating running all the batches in the data iterator. - * @param reset Default is `True`, indicating whether we should reset the data iter before start - * doing prediction. - * @return The return value will be a list `[out1, out2, out3]`. - * The concatenation process will be like - * {{{ - * outputBatches = [ - * [a1, a2, a3], // batch a - * [b1, b2, b3] // batch b - * ] - * result = [ - * NDArray, // [a1, b1] - * NDArray, // [a2, b2] - * NDArray, // [a3, b3] - * ] - * }}} - * Where each element is concatenation of the outputs for all the mini-batches. - */ - def predict(evalData: DataIter, numBatch: Int = -1, reset: Boolean = true) - : IndexedSeq[NDArray] = { - val outputBatches = predictEveryBatch(evalData, numBatch, reset) - val numOutputs = outputBatches.head.size - outputBatches.foreach(out => - require(out.size == numOutputs, - s"Cannot merge batches, as num of outputs $numOutputs is not the same " + - s"in mini-batches (${out.size})." + - "Maybe bucketing is used?") - ) - val oBT = outputBatches.transpose - val concatenatedOutput = oBT.map(out => NDArray.concatenate(out)) - outputBatches.foreach(_.foreach(_.dispose())) - concatenatedOutput - } - - // Symbol information - // A list of names for data required by this module. - def dataNames: IndexedSeq[String] - - // A list of names for the outputs of this module. - def outputNames: IndexedSeq[String] - - // Input/Output information - // A list of (name, shape) pairs specifying the data inputs to this module. - def dataShapes: IndexedSeq[DataDesc] - - /** - * A list of (name, shape) pairs specifying the label inputs to this module. - * If this module does not accept labels -- either it is a module without loss - * function, or it is not binded for training, then this should return an empty - * list `[]`. - */ - def labelShapes: IndexedSeq[DataDesc] - - // A list of (name, shape) pairs specifying the outputs of this module. - def outputShapes: IndexedSeq[(String, Shape)] - - // Parameters of a module - /** - * Get parameters, those are potentially copies of the actual parameters used - * to do computation on the device. - * @return `(argParams, auxParams)`, a pair of dictionary of name to value mapping. - */ - def getParams: (Map[String, NDArray], Map[String, NDArray]) - - /** - * Initialize the parameters and auxiliary states. - * @param initializer : Initializer - * Called to initialize parameters if needed. - * argParams : dict - * If not None, should be a dictionary of existing arg_params. Initialization - * will be copied from that. - * auxParams : dict - * If not None, should be a dictionary of existing aux_params. Initialization - * will be copied from that. - * allowMissing : bool - * If true, params could contain missing values, and the initializer will be - * called to fill those missing params. - * forceInit : bool - * If true, will force re-initialize even if already initialized. - * allowExtra : bool - * Whether allow extra parameters that are not needed by symbol. - * If this is True, no error will be thrown when argParams or auxParams - * contain extra parameters that is not needed by the executor. - */ - def initParams(initializer: Initializer = new Uniform(0.01f), - argParams: Map[String, NDArray] = null, - auxParams: Map[String, NDArray] = null, - allowMissing: Boolean = false, - forceInit: Boolean = false, - allowExtra: Boolean = false): Unit - - /** - * Assign parameter and aux state values. - * argParams : dict - * Dictionary of name to value (`NDArray`) mapping. - * auxParams : dict - * Dictionary of name to value (`NDArray`) mapping. - * allowMissing : bool - * If true, params could contain missing values, and the initializer will be - * called to fill those missing params. - * forceInit : bool - * If true, will force re-initialize even if already initialized. - * allowExtra : bool - * Whether allow extra parameters that are not needed by symbol. - * If this is True, no error will be thrown when argParams or auxParams - * contain extra parameters that is not needed by the executor. - */ - def setParams(argParams: Map[String, NDArray], - auxParams: Map[String, NDArray], - allowMissing: Boolean = false, - forceInit: Boolean = true, - allowExtra: Boolean = false): Unit = { - initParams(initializer = null, argParams, auxParams, - allowMissing, forceInit, allowExtra) - } - - /** - * Save model parameters to file. - * @param fname Path to output param file. - * - */ - def saveParams(fname: String): Unit = { - val (argParams, auxParams) = getParams - val saveDict = ( - argParams.map { case (k, v) => (s"arg:$k", v.asInContext(Context.cpu())) } - ++ auxParams.map { case (k, v) => (s"aux:$k", v.asInContext(Context.cpu())) } - ) - NDArray.save(fname, saveDict) - } - - /** - * Load model parameters from file. - * @param fname Path to input param file. - * @throws IOException if param file is invalid - */ - @throws(classOf[IOException]) - def loadParams(fname: String): Unit = { - val saveDict = NDArray.load(fname) - val argParams = scala.collection.mutable.HashMap.empty[String, NDArray] - val auxParams = scala.collection.mutable.HashMap.empty[String, NDArray] - (saveDict._1 zip saveDict._2) foreach { case (key, value) => - key.split(":", 2) match { - case Array(argType, name) if argType == "arg" => argParams.put(name, value) - case Array(argType, name) if argType == "aux" => auxParams.put(name, value) - case _ => throw new IOException("Invalid param file " + fname) - } - } - setParams(argParams.toMap, auxParams.toMap) - } - - /** - * - * Train the module parameters. - * @param trainData - * @param evalData If not `None`, will be used as validation set and evaluate - * the performance after each epoch. - * @param numEpoch Number of epochs to run training. - * @param fitParams Extra parameters for training. - */ - def fit(trainData: DataIter, evalData: Option[DataIter] = None, numEpoch: Int = 1, - fitParams: FitParams = new FitParams): Unit = { - require(fitParams != null, "Undefined fitParams") - require(numEpoch > 0, s"Invalid number of epochs $numEpoch") - bind(dataShapes = trainData.provideDataDesc, labelShapes = Option(trainData.provideLabelDesc), - forTraining = true, forceRebind = fitParams.forceRebind) - fitParams.monitor.foreach(installMonitor) - initParams(fitParams.initializer, argParams, auxParams, - fitParams.allowMissing, fitParams.forceInit) - initOptimizer(fitParams.kvstore, fitParams.optimizer) - - val valMetric = fitParams.validationMetric.getOrElse(fitParams.evalMetric) - - // training loop - for (epoch <- fitParams.beginEpoch until numEpoch) { - val tic = System.currentTimeMillis - fitParams.evalMetric.reset() - - var nBatch = 0 - while (trainData.hasNext) { - val dataBatch = trainData.next() - - fitParams.monitor.foreach(_.tic()) - forwardBackward(dataBatch) - update() - updateMetric(fitParams.evalMetric, dataBatch.label) - fitParams.monitor.foreach(_.tocPrint()) - - fitParams.batchEndCallback.foreach(callback => - callback.invoke(epoch, nBatch, fitParams.evalMetric) - ) - - dataBatch.dispose() - - nBatch += 1 - } - - // one epoch of training is finished - val (name, value) = fitParams.evalMetric.get - logger.info(s"Epoch[$epoch] Train-${name.head}=${value.head}") - val toc = System.currentTimeMillis - logger.info(s"Epoch[$epoch] Time cost=${toc - tic}") - - // sync aux params across devices - val (argParamsSync, auxParamsSync) = getParams - setParams(argParamsSync, auxParamsSync) - - fitParams.epochEndCallback.foreach(callback => - callback.invoke(epoch, symbol, argParamsSync, auxParamsSync) - ) - - // evaluation on validation set - evalData.foreach(data => { - val res = score(data, valMetric, - scoreEndCallback = fitParams.evalEndCallback, - batchEndCallback = fitParams.evalBatchEndCallback, epoch = epoch) - val (name, value) = res.get - logger.info(s"Epoch[$epoch] Validation-${name.head}=${value.head}") - }) - - // end of 1 epoch, reset the data-iter for another epoch - trainData.reset() - } - } - - // Install monitor on all executors - def installMonitor(monitor: Monitor): Unit - - // Computations - /** - * Forward computation. - * @param dataBatch Could be anything with similar API implemented. - * @param isTrain Default is `None`, which means `isTrain` takes the value of `this.forTraining`. - */ - def forward(dataBatch: DataBatch, isTrain: Option[Boolean] = None): Unit - - /** - * Forward computation. - * @param dataBatch a batch of data. - * @param isTrain Whether it is for training or not. - */ - def forward(dataBatch: DataBatch, isTrain: Boolean): Unit = { - forward(dataBatch, Option(isTrain)) - } - - /** - * Backward computation. - * @param outGrads Gradient on the outputs to be propagated back. - * This parameter is only needed when bind is called - * on outputs that are not a loss function. - */ - def backward(outGrads: Array[NDArray] = null): Unit - - /** - * Get outputs of the previous forward computation. - * @return In the case when data-parallelism is used, - * the outputs will be merged from multiple devices, - * as they look like from a single executor. - * The results will look like `[out1, out2]` - */ - def getOutputsMerged(): IndexedSeq[NDArray] - - /** - * Get outputs of the previous forward computation. - * @return In the case when data-parallelism is used, - * the outputs will be collected from multiple devices. - * The results will look like `[ [out1_dev1, out1_dev2], [out2_dev1, out2_dev2] ]`, - * those `NDArray` might live on different devices. - */ - def getOutputs(): IndexedSeq[IndexedSeq[NDArray]] - - /** - * Get the gradients to the inputs, computed in the previous backward computation. - * @return In the case when data-parallelism is used, - * the grads will be merged from multiple devices, - * as they look like from a single executor. - * The results will look like `[grad1, grad2]` - */ - def getInputGradsMerged(): IndexedSeq[NDArray] - - /** - * Get the gradients to the inputs, computed in the previous backward computation. - * @return In the case when data-parallelism is used, - * the grads will be collected from multiple devices. - * The results will look like `[ [grad1_dev1, grad1_dev2], [grad2_dev1, grad2_dev2] ]`, - * those `NDArray` might live on different devices. - */ - def getInputGrads(): IndexedSeq[IndexedSeq[NDArray]] - - // Update parameters according to the installed optimizer and the gradients computed - // in the previous forward-backward batch. - def update(): Unit - - /** - * Evaluate and accumulate evaluation metric on outputs of the last forward computation. - * @param evalMetric - * @param labels Typically `DataBatch.label`. - */ - def updateMetric(evalMetric: EvalMetric, labels: IndexedSeq[NDArray]): Unit - - // module setup - /** - * Bind the symbols to construct executors. - * This is necessary before one can perform computation with the module. - * @param dataShapes Typically is `DataIter.provideData`. - * @param labelShapes Typically is `DataIter.provideLabel`. - * @param forTraining Default is `True`. Whether the executors should be bind for training. - * @param inputsNeedGrad Default is `False`. - * Whether the gradients to the input data need to be computed. - * Typically this is not needed. - * But this might be needed when implementing composition of modules. - * @param forceRebind Default is `False`. This function does nothing - * if the executors are already binded. But with this `True`, - * the executors will be forced to rebind. - * @param sharedModule Default is `None`. This is used in bucketing. When not `None`, - * the shared module essentially corresponds to a different bucket - * -- a module with different symbol but with the same sets of parameters - * (e.g. unrolled RNNs with different lengths). - * @param gradReq Requirement for gradient accumulation (globally). - * Can be 'write', 'add', or 'null' (default to 'write'). - */ - def bind(dataShapes: IndexedSeq[DataDesc], labelShapes: Option[IndexedSeq[DataDesc]] = None, - forTraining: Boolean = true, inputsNeedGrad: Boolean = false, - forceRebind: Boolean = false, sharedModule: Option[BaseModule] = None, - gradReq: String = "write"): Unit - - - /** - * Bind the symbols to construct executors. - * This is necessary before one can perform computation with the module. - * @param forTraining Default is `True`. Whether the executors should be bind for training. - * @param inputsNeedGrad Default is `False`. - * Whether the gradients to the input data need to be computed. - * Typically this is not needed. - * But this might be needed when implementing composition of modules. - * @param forceRebind Default is `False`. This function does nothing - * if the executors are already binded. But with this `True`, - * the executors will be forced to rebind. - * @param dataShape Typically is `DataIter.provideData`. - */ - @varargs def bind(forTraining: Boolean, inputsNeedGrad: Boolean, - forceRebind: Boolean, dataShape: DataDesc*): Unit = { - bind(dataShape.toVector, None, forTraining, inputsNeedGrad, forceRebind, None) - } - - // Install and initialize optimizers. - def initOptimizer(kvstore: String = "local", optimizer: Optimizer = new SGD(), - resetOptimizer: Boolean = true, forceInit: Boolean = false): Unit -} - -class FitParams { - private[module] var evalMetric: EvalMetric = new Accuracy() - private[module] var epochEndCallback: Option[EpochEndCallback] = None - private[module] var batchEndCallback: Option[BatchEndCallback] = None - private[module] var kvstore: String = "local" - private[module] var optimizer: Optimizer = new SGD() - private[module] var evalEndCallback: Option[BatchEndCallback] = None - private[module] var evalBatchEndCallback: Option[BatchEndCallback] = None - private[module] var initializer: Initializer = new Uniform(0.01f) - private[module] var argParams: Map[String, NDArray] = null - private[module] var auxParams: Map[String, NDArray] = null - private[module] var allowMissing: Boolean = false - private[module] var forceRebind: Boolean = false - private[module] var forceInit: Boolean = false - private[module] var beginEpoch: Int = 0 - private[module] var validationMetric: Option[EvalMetric] = None - private[module] var monitor: Option[Monitor] = None - - // The performance measure used to display during training. - def setEvalMetric(evalMetric: EvalMetric): FitParams = { - require(evalMetric != null, "Undefined evalMetric") - this.evalMetric = evalMetric - this - } - - // Each callback will be called with the current - // `epoch`, `symbol`, `arg_params` and `aux_params`. - def setEpochEndCallback(epochEndCallback: EpochEndCallback): FitParams = { - this.epochEndCallback = Option(epochEndCallback) - this - } - - // Each callback will be called with a `BatchEndParam`. - def setBatchEndCallback(batchEndCallback: BatchEndCallback): FitParams = { - this.batchEndCallback = Option(batchEndCallback) - this - } - - def setKVStore(kvStore: String): FitParams = { - require(kvStore != null, "Undefined kvStore") - this.kvstore = kvstore - this - } - - def setOptimizer(optimizer: Optimizer): FitParams = { - require(optimizer != null, "Undefined optimizer") - this.optimizer = optimizer - this - } - - // These will be called at the end of each full evaluation, - // with the metrics over the entire evaluation set. - def setEvalEndCallback(evalEndCallback: BatchEndCallback): FitParams = { - this.evalEndCallback = Option(evalEndCallback) - this - } - - // These will be called at the end of each minibatch during evaluation. - def setEvalBatchEndCallback(evalBatchEndCallback: BatchEndCallback): FitParams = { - this.evalBatchEndCallback = Option(evalBatchEndCallback) - this - } - - // Will be called to initialize the module parameters if not already initialized. - def setInitializer(initializer: Initializer): FitParams = { - require(initializer != null, "Undefined Initializer") - this.initializer = initializer - this - } - - // Default `None`, if not `None`, should be existing parameters from a trained - // model or loaded from a checkpoint (previously saved model). In this case, - // the value here will be used to initialize the module parameters, - // unless they are already initialized by the user - // via a call to `init_params` or `fit`. - // `argParams` has higher priority to `initializer`. - def setArgParams(argParams: Map[String, NDArray]): FitParams = { - this.argParams = argParams - this - } - - // Default `None`. Similar to `argParams`, except for auxiliary states. - def setAuxParams(auxParams: Map[String, NDArray]): FitParams = { - this.auxParams = auxParams - this - } - - // Default `False`. Indicate whether we allow missing parameters - // when `arg_params` and `aux_params` are not `None`. - // If this is `True`, then the missing parameters will be - // initialized via the `initializer`. - def setAllowMissing(allowMissing: Boolean): FitParams = { - this.allowMissing = allowMissing - this - } - - // Default `False`. Whether to force rebinding the executors if already binded. - def setForceRebind(forceRebind: Boolean): FitParams = { - this.forceRebind = forceRebind - this - } - - // Default `False`. Indicate whether we should force initialization even if the - // parameters are already initialized. - def setForceInit(forceInit: Boolean): FitParams = { - this.forceInit = forceInit - this - } - - // Default `0`. Indicate the starting epoch. Usually, if we are resuming from a - // checkpoint saved at a previous training phase at epoch N, - // then we should specify this value as N+1. - def setBeginEpoch(beginEpoch: Int): FitParams = { - require(beginEpoch >= 0, s"Invalid epoch $beginEpoch") - this.beginEpoch = beginEpoch - this - } - - def setValidationMetric(metric: EvalMetric): FitParams = { - this.validationMetric = Option(metric) - this - } - - def setMonitor(monitor: Monitor): FitParams = { - this.monitor = Option(monitor) - this - } -} diff --git a/scala-package/core/src/main/scala/org/apache/mxnet/module/BucketingModule.scala b/scala-package/core/src/main/scala/org/apache/mxnet/module/BucketingModule.scala deleted file mode 100644 index d5c8c21ea106..000000000000 --- a/scala-package/core/src/main/scala/org/apache/mxnet/module/BucketingModule.scala +++ /dev/null @@ -1,406 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet.module - -import org.apache.mxnet._ -import org.slf4j.LoggerFactory -import org.slf4j.Logger -import scala.collection.mutable.ArrayBuffer -import org.apache.mxnet.optimizer.SGD -import scala.collection.immutable.ListMap -import org.apache.mxnet.module.BaseModule._ - -/** - * This module helps to deal efficiently with varying-length inputs. - * @param symGen A function when called with a bucket key, returns a triple - * ``(symbol, dataNames, labelNames)``. - * @param defaultBucketKey The key for the default bucket. - * @param contexts Default is cpu(). - * @param workLoadList Default `None`, indicating uniform workload. - * @param fixedParamNames Default `None`, indicating no network parameters are fixed. - */ -class BucketingModule(symGen: AnyRef => (Symbol, IndexedSeq[String], IndexedSeq[String]), - defaultBucketKey: AnyRef, contexts: Array[Context] = Context.cpu(), - workLoadList: Option[IndexedSeq[Float]] = None, - fixedParamNames: Option[Set[String]] = None) extends BaseModule { - private val logger = LoggerFactory.getLogger(classOf[BucketingModule]) - - { - val (sym, dNames, lNames) = symGen(defaultBucketKey) - val dataNameList = if (dNames == null) IndexedSeq.empty[String] else dNames - val labelNameList = if (lNames == null) IndexedSeq.empty[String] else lNames - val fixedParamNameList = fixedParamNames.getOrElse(IndexedSeq.empty[String]).toIndexedSeq - - _checkInputNames(sym, dataNameList, "data", true, logger) - _checkInputNames(sym, labelNameList, "label", false, logger) - _checkInputNames(sym, fixedParamNameList, "fixed_param", true, logger) - } - - private val workLoads = workLoadList.getOrElse(contexts.map(_ => 1f).toIndexedSeq) - require(workLoads.size == contexts.length, - s"workloads size (${workLoads.size}) do not match number of contexts ${contexts.length}") - - private val _buckets = scala.collection.mutable.Map[AnyRef, Module]() - private var _currModule: Module = null - private var _currBucketKey = defaultBucketKey - - private var paramsDirty = false - - // Internal function to reset binded state. - private def resetBind(): Unit = { - this.binded = false - this._buckets.clear() - this._currModule = null - this._currBucketKey = defaultBucketKey - } - - // Symbol information - // A list of names for data required by this module. - override def dataNames: IndexedSeq[String] = { - if (this.binded) this._currModule.dataNames - else this.symGen(this.defaultBucketKey)._2 - } - - // A list of names for the outputs of this module. - override def outputNames: IndexedSeq[String] = { - if (this.binded) this._currModule.outputNames - else this.symGen(this.defaultBucketKey)._1.listOutputs() - } - - // Input/Output information - // A list of (name, shape) pairs specifying the data inputs to this module. - override def dataShapes: IndexedSeq[DataDesc] = { - require(this.binded, "bind() must be called first.") - this._currModule.dataShapes - } - - /** - * A list of (name, shape) pairs specifying the label inputs to this module. - * If this module does not accept labels -- either it is a module without loss - * function, or it is not binded for training, then this should return an empty - * list `[]`. - */ - override def labelShapes: IndexedSeq[DataDesc] = { - require(this.binded, "bind() must be called first.") - this._currModule.labelShapes - } - - // A list of (name, shape) pairs specifying the outputs of this module. - override def outputShapes: IndexedSeq[(String, Shape)] = { - require(this.binded, "bind() must be called first.") - this._currModule.outputShapes - } - - /** - * Get current parameters. - * `(arg_params, aux_params)`, each a dictionary of name to parameters (in - * `NDArray`) mapping. - */ - override def getParams: (Map[String, NDArray], Map[String, NDArray]) = { - require(binded && paramsInitialized, "bind() and initParams() must be called first.") - this._currModule.paramsDirty = this.paramsDirty - val params = this._currModule.getParams - this.paramsDirty = false - params - } - - /** - * Assign parameter and aux state values. - * @param argParams Dictionary of name to value (`NDArray`) mapping. - * @param auxParams Dictionary of name to value (`NDArray`) mapping. - * @param allowMissing - * If true, params could contain missing values, and the initializer will be - * called to fill those missing params. - * @param forceInit - * If true, will force re-initialize even if already initialized. - * @param allowExtra - * Whether allow extra parameters that are not needed by symbol. - * If this is True, no error will be thrown when argParams or auxParams - * contain extra parameters that is not needed by the executor. - */ - override def setParams(argParams: Map[String, NDArray], - auxParams: Map[String, NDArray], - allowMissing: Boolean = false, - forceInit: Boolean = true, - allowExtra: Boolean = false): Unit = { - if (!allowMissing) { - this.initParams(null, argParams, auxParams, allowMissing, forceInit, allowExtra) - } else if (this.paramsInitialized && !forceInit) { - logger.warn("Parameters already initialized and forceInit=false. " + - "setParams call ignored.") - } else { - this._currModule.setParams( - argParams, auxParams, allowMissing, forceInit, allowExtra) - - // because we didn't update self._arg_params, they are dirty now. - this.paramsDirty = true - this.paramsInitialized = true - } - } - - /** - * Initialize the parameters and auxiliary states. - * @param initializer Called to initialize parameters if needed. - * @param argParams If not None, should be a dictionary of existing arg_params. - * Initialization will be copied from that. - * @param auxParams If not None, should be a dictionary of existing aux_params. - * Initialization will be copied from that. - * @param allowMissing If true, params could contain missing values, - * and the initializer will be called to fill those missing params. - * @param forceInit If true, will force re-initialize even if already initialized. - * @param allowExtra Whether allow extra parameters that are not needed by symbol. - * If this is True, no error will be thrown when argParams or auxParams - * contain extra parameters that is not needed by the executor. - */ - override def initParams(initializer: Initializer = new Uniform(0.01f), - argParams: Map[String, NDArray] = null, - auxParams: Map[String, NDArray] = null, - allowMissing: Boolean = false, - forceInit: Boolean = false, - allowExtra: Boolean = false): Unit = { - if (!paramsInitialized || forceInit) { - require(binded, "call bind before initializing the parameters") - this._currModule.initParams(initializer, argParams, auxParams, - allowMissing, forceInit, allowExtra) - this.paramsDirty = false - this.paramsInitialized = true - } - } - - /** - * Bind the symbols to construct executors. This is necessary before one - * can perform computation with the module. - * @param dataShapes Typically is `dataIter.provideData`. - * @param labelShapes Typically is `dataIter.provideLabel`. - * @param forTraining Default is `true`. Whether the executors should be bind for training. - * @param inputsNeedGrad Default is `false`. - * Whether the gradients to the input data need to be computed. - * Typically this is not needed. - * But this might be needed when implementing composition of modules. - * @param forceRebind Default is `false`. - * This function does nothing if the executors are already binded. - * But with this `true`, the executors will be forced to rebind. - * @param sharedModule Default is `None`. This is used in bucketing. - * When not `None`, the shared module essentially corresponds to - * a different bucket -- a module with different symbol - * but with the same sets of parameters - * (e.g. unrolled RNNs with different lengths). - */ - override def bind(dataShapes: IndexedSeq[DataDesc], - labelShapes: Option[IndexedSeq[DataDesc]] = None, - forTraining: Boolean = true, inputsNeedGrad: Boolean = false, - forceRebind: Boolean = false, sharedModule: Option[BaseModule] = None, - gradReq: String = "write"): Unit = { - // in case we already initialized params, keep it - val (argParams, auxParams) = - if (this.paramsInitialized) this.getParams - else (null, null) - - // force rebinding is typically used when one want to switch from - // training to prediction phase. - if (forceRebind) this.resetBind() - - if (this.binded) { - logger.warn("Already bound, ignoring bind()") - } else { - require(sharedModule.isEmpty, - "sharedModule for BucketingModule is not supported") - - this.forTraining = forTraining - this.inputsNeedGrad = inputsNeedGrad - this.binded = true - - val (sym, dNames, lNames) = this.symGen(this.defaultBucketKey) - val module = new Module(sym, dNames, lNames, this.contexts, - this.workLoadList, this.fixedParamNames) - module.bind(dataShapes, labelShapes, forTraining, inputsNeedGrad, - forceRebind = false, sharedModule = None, gradReq) - this._currModule = module - this._currBucketKey = this.defaultBucketKey - this._buckets(this.defaultBucketKey) = module - - // copy back saved params, if already initialized - if (this.paramsInitialized) { - this.setParams(argParams, auxParams) - } - } - } - - /** - * Switches to a different bucket. This will change ``this._currModule``. - * @param bucketKey The key of the target bucket. - * @param dataShapes Typically is `dataIter.provideData`. - * @param labelShapes Typically is `dataIter.provideLabel`. - */ - def switchBucket(bucketKey: AnyRef, dataShapes: IndexedSeq[DataDesc], - labelShapes: Option[IndexedSeq[DataDesc]] = None): Unit = { - require(this.binded, "call bind before switching bucket") - if (!this._buckets.contains(bucketKey)) { - val (sym, dNames, lNames) = this.symGen(bucketKey) - val module = new Module(sym, dNames, lNames, this.contexts, - this.workLoadList, this.fixedParamNames) - module.bind(dataShapes, labelShapes, this._currModule.forTraining, - this._currModule.inputsNeedGrad, forceRebind = false, - sharedModule = Option(this._buckets(this.defaultBucketKey))) - this._buckets(bucketKey) = module - } - - this._currModule = this._buckets(bucketKey) - this._currBucketKey = bucketKey - } - - /** - * Install and initialize optimizers. - * @param kvstore - * @param optimizer - * @param resetOptimizer Default `True`, indicating whether we should set `rescaleGrad` - * & `idx2name` for optimizer according to executorGroup - * @param forceInit Default `False`, indicating whether we should force re-initializing - * the optimizer in the case an optimizer is already installed. - */ - override def initOptimizer(kvstore: String = "local", optimizer: Optimizer = new SGD(), - resetOptimizer: Boolean = true, forceInit: Boolean = false): Unit = { - require(binded && paramsInitialized, "bind() and initParams() must be called first.") - if (optimizerInitialized && !forceInit) { - logger.warn("optimizer already initialized, ignoring ...") - } else { - this._currModule.initOptimizer(kvstore, optimizer, resetOptimizer, forceInit) - for (mod <- this._buckets.values) { - if (mod != this._currModule) mod.borrowOptimizer(this._currModule) - } - this.optimizerInitialized = true - } - } - - /** - * Prepares a data batch for forward. - * @param dataBatch input data - */ - def prepare(dataBatch: DataBatch): Unit = { - // perform bind if haven't done so - require(this.binded && this.paramsInitialized, "bind() and initParams() must be called first.") - val bucketKey = dataBatch.bucketKey - val originalBucketKey = this._currBucketKey - this.switchBucket(bucketKey, dataBatch.provideDataDesc, Option(dataBatch.provideLabelDesc)) - // switch back - this.switchBucket(originalBucketKey, null, None) - } - - /** - * Forward computation. - * @param dataBatch input data - * @param isTrain Default is `None`, which means `is_train` takes the value of `for_training`. - */ - override def forward(dataBatch: DataBatch, isTrain: Option[Boolean] = None): Unit = { - require(binded && paramsInitialized, "bind() and initParams() must be called first.") - this.switchBucket(dataBatch.bucketKey, dataBatch.provideDataDesc, - Option(dataBatch.provideLabelDesc)) - this._currModule.forward(dataBatch, isTrain) - } - - /** - * Backward computation. - * @param outGrads Gradient on the outputs to be propagated back. - * This parameter is only needed when bind is called - * on outputs that are not a loss function. - */ - override def backward(outGrads: Array[NDArray] = null): Unit = { - require(binded && paramsInitialized, "bind() and initParams() must be called first.") - this._currModule.backward(outGrads) - } - - // Update parameters according to the installed optimizer and the gradients computed - // in the previous forward-backward cycle. - override def update(): Unit = { - require(binded && paramsInitialized && optimizerInitialized, - "bind(), initParams() and initOptimizer() must be called first.") - this.paramsDirty = true - this._currModule.update() - } - - /** - * Get outputs of the previous forward computation. - * @return In the case when data-parallelism is used, - * the outputs will be collected from multiple devices. - * The results will look like `[ [out1_dev1, out1_dev2], [out2_dev1, out2_dev2] ]`, - * those `NDArray` might live on different devices. - */ - override def getOutputs(): IndexedSeq[IndexedSeq[NDArray]] = { - require(binded && paramsInitialized, "bind() and initParams() must be called first.") - this._currModule.getOutputs() - } - - /** - * Get outputs of the previous forward computation. - * @return In the case when data-parallelism is used, - * the outputs will be merged from multiple devices, - * as they look like from a single executor. - * The results will look like `[out1, out2]` - */ - override def getOutputsMerged(): IndexedSeq[NDArray] = { - require(binded && paramsInitialized, "bind() and initParams() must be called first.") - this._currModule.getOutputsMerged() - } - - /** - * Get the gradients to the inputs, computed in the previous backward computation. - * @return In the case when data-parallelism is used, - * the grads will be collected from multiple devices. - * The results will look like `[ [grad1_dev1, grad1_dev2], [grad2_dev1, grad2_dev2] ]`, - * those `NDArray` might live on different devices. - */ - override def getInputGrads(): IndexedSeq[IndexedSeq[NDArray]] = { - require(binded && paramsInitialized, "bind() and initParams() must be called first.") - require(inputsNeedGrad, "Call to getInputGrads() but inputsNeedGrad is false") - this._currModule.getInputGrads() - } - - /** - * Get the gradients to the inputs, computed in the previous backward computation. - * @return In the case when data-parallelism is used, - * the grads will be merged from multiple devices, - * as they look like from a single executor. - * The results will look like `[grad1, grad2]` - */ - override def getInputGradsMerged(): IndexedSeq[NDArray] = { - require(binded && paramsInitialized, "bind() and initParams() must be called first.") - require(inputsNeedGrad, "Call to getInputGradsMerged() but inputsNeedGrad is false") - this._currModule.getInputGradsMerged() - } - - /** - * Evaluate and accumulate evaluation metric on outputs of the last forward computation. - * @param evalMetric - * @param labels - */ - override def updateMetric(evalMetric: EvalMetric, labels: IndexedSeq[NDArray]): Unit = { - require(binded && paramsInitialized, "bind() and initParams() must be called first.") - this._currModule.updateMetric(evalMetric, labels) - } - - override def getSymbol: Symbol = { - require(binded, "bind() must be called first.") - this._currModule.symbol - } - - // Install monitor on all executors - override def installMonitor(monitor: Monitor): Unit = { - require(binded, "bind() must be called first.") - for (mod <- this._buckets.values) mod.installMonitor(monitor) - } -} diff --git a/scala-package/core/src/main/scala/org/apache/mxnet/module/DataParallelExecutorGroup.scala b/scala-package/core/src/main/scala/org/apache/mxnet/module/DataParallelExecutorGroup.scala deleted file mode 100644 index 4154e121c451..000000000000 --- a/scala-package/core/src/main/scala/org/apache/mxnet/module/DataParallelExecutorGroup.scala +++ /dev/null @@ -1,766 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet.module - -import org.apache.mxnet.DType.DType -import org.apache.mxnet._ -import org.apache.mxnet.module.DataParallelExecutorGroup.Builder -import org.slf4j.{Logger, LoggerFactory} - -import scala.collection.mutable -import scala.collection.mutable.ArrayBuffer - -private object DataParallelExecutorGroup { - private val logger: Logger = LoggerFactory.getLogger(classOf[DataParallelExecutorGroup]) - // Load a list of arrays into a list of arrays specified by slices - private def loadGeneralMulti(data: Seq[NDArray], - targets: Seq[Array[((Int, Int), NDArray)]], - majorAxis: Seq[Int]): Unit = { - for (((dSrc, dTargets), axis) <- data zip targets zip majorAxis) { - for (((sliceIdxStart, sliceIdxStop), dDst) <- dTargets) { - if (axis >= 0 && (sliceIdxStart > 0 || sliceIdxStop < dSrc.shape(axis))) { - // copy slice - val shape = dSrc.shape - val begin = Array.fill(shape.length)(0) - val end = shape.toArray - begin(axis) = sliceIdxStart - end(axis) = sliceIdxStop - if (dSrc.context == dDst.context) { - NDArray.crop(Map( - "begin" -> new Shape(begin), - "end" -> new Shape(end), - "out" -> dDst))(dSrc) - } else { - // on different device, crop and then do cross device copy - val dDstCopy: NDArray = NDArray.crop(Map( - "begin" -> new Shape(begin), - "end" -> new Shape(end)))(dSrc) - dDstCopy.copyTo(dDst) - } - } else { - dSrc.copyTo(dDst) - } - } - } - } - - private def loadGeneral(data: Seq[NDArray], targets: Seq[NDArray]): Unit = { - for ((dSrc, dTarget) <- data zip targets) { - dSrc.copyTo(dTarget) - } - } - - // Load data into sliced arrays - private def loadData(batch: DataBatch, - targets: Seq[Array[((Int, Int), NDArray)]], - majorAxis: Seq[Int]): Unit = { - loadGeneralMulti(batch.data, targets, majorAxis) - } - - - // Load label into sliced arrays - private def loadLabel(batch: DataBatch, - targets: Seq[Array[((Int, Int), NDArray)]], - majorAxis: Seq[Int]): Unit = { - loadGeneralMulti(batch.label, targets, majorAxis) - } - - // Merge outputs that lives on multiple context into one, - // so that they look like living on one context. - private def mergeMultiContext(outputs: IndexedSeq[IndexedSeq[NDArray]], majorAxis: Seq[Int]) - : IndexedSeq[NDArray] = { - (outputs zip majorAxis).map { case (tensors, axis) => - if (axis >= 0) { - NDArray.concatenate(tensors, axis = axis, alwaysCopy = false) - } else { - // negative axis means the there is no batch_size axis, and all the - // results should be the same on each device. We simply take the first one, - // without checking they are actually the same - tensors(0) - } - } - } - - private object Builder { - private[module] def convertGradReq( - gradReq: String, argNames: IndexedSeq[String], paramNames: IndexedSeq[String], - fixedParamNames: Set[String], dataNames: Seq[String], inputsNeedGrad: Boolean) - : Map[String, String] = { - require(argNames != null, "Invalid argNames") - require(paramNames != null, "Invalid paramNames") - require(fixedParamNames != null, "Invalid fixedParamNames") - require(dataNames != null, "Invalid dataNames") - argNames.map(k => { - if (paramNames.contains(k)) { - (k, if (fixedParamNames.contains(k)) "null" else gradReq) - } else if (dataNames.contains(k)) { - (k, if (inputsNeedGrad) gradReq else "null") - } else { - (k, "null") - } - }).toMap - } - } - - class Builder private[module](private val symbol: Symbol, - private val contexts: Array[Context], - private val paramNames: IndexedSeq[String]) { - - private var workLoadList: IndexedSeq[Float] = null - private var dataShapes: IndexedSeq[DataDesc] = null - private var labelShapes: Option[IndexedSeq[DataDesc]] = None - private var forTraining: Boolean = true - private var inputsNeedGrad: Boolean = false - private var sharedGroup: Option[DataParallelExecutorGroup] = None - private var inputTypes: Option[Map[String, DType]] = None - private var fixedParamNames: Set[String] = Set.empty[String] - private var gradReqs: Map[String, String] = null - - val argNames = symbol.listArguments() - - def setWorkLoadList(workLoad: IndexedSeq[Float]): Builder = { - this.workLoadList = workLoad - this - } - - def setDataShapes(shapes: IndexedSeq[DataDesc]): Builder = { - require(shapes != null, "Invalid shapes") - this.dataShapes = shapes - this - } - - def setDataShapesByName(shapes: IndexedSeq[(String, Shape)]): Builder = { - require(shapes != null, "Invalid shapes") - this.dataShapes = shapes.map { case (k, s) => new DataDesc(k, s) } - this - } - - def setLabelShapes(shapes: IndexedSeq[DataDesc]): Builder = { - this.labelShapes = Option(shapes) - this - } - - def setLabelShapesByName(shapes: IndexedSeq[(String, Shape)]): Builder = { - this.labelShapes = Option(shapes).map(shapesInst => - shapesInst.map { case (k, s) => new DataDesc(k, s) } - ) - this - } - - def setForTraining(forTraining: Boolean): Builder = { - this.forTraining = forTraining - this - } - - def setInputsNeedGrad(needGrad: Boolean): Builder = { - this.inputsNeedGrad = needGrad - this - } - - def setSharedGroup(sharedGroup: DataParallelExecutorGroup): Builder = { - this.sharedGroup = Option(sharedGroup) - this - } - - def setInputTypes(inputTypes: Map[String, DType]): Builder = { - this.inputTypes = Option(inputTypes) - this - } - - def setFixedParamNames(fixedParamNames: Set[String]): Builder = { - this.fixedParamNames = Option(fixedParamNames).getOrElse(Set.empty[String]) - this - } - - def setGradReq(gradReq: Map[String, String]): Builder = { - require(dataShapes != null, "dataShapes must be set first") - val gradReqTmp = mutable.HashMap.empty[String, String] - val dataNames = dataShapes.map(_.name) - for (k <- argNames) { - if (paramNames.contains(k)) { - gradReqTmp.put(k, if (fixedParamNames.contains(k)) "null" else "write") - } else if (dataNames.contains(k)) { - gradReqTmp.put(k, if (inputsNeedGrad) "write" else "null") - } else { - gradReqTmp.put(k, "null") - gradReqTmp ++= gradReq - } - } - this.gradReqs = gradReqTmp.toMap - this - } - - def setGradReq(gradReq: String): Builder = { - require(dataShapes != null, "dataShapes must be set first") - val dataNames = dataShapes.map(_.name) - this.gradReqs = Builder.convertGradReq( - gradReq, argNames, paramNames, fixedParamNames, dataNames, inputsNeedGrad) - this - } - - def setGradReq(gradReq: Seq[(String, String)]): Builder = { - require(gradReq.size == argNames.size, - s"provided number of gradReq (${gradReq.size}) do not match number of args " + - s"(${argNames.size})") - this.gradReqs = gradReq.toMap - this - } - - def build(): DataParallelExecutorGroup = { - new DataParallelExecutorGroup( - symbol, contexts, workLoadList, dataShapes, labelShapes, paramNames, forTraining, - inputsNeedGrad, sharedGroup, inputTypes, fixedParamNames, this.gradReqs) - } - } -} - -/** - * DataParallelExecutorGroup is a group of executors that lives on a group of devices. - * This is a helper class used to implement data parallelism. Each mini-batch will - * be split and run on the devices. - * @param symbol The common symbolic computation graph for all executors. - * @param contexts A list of contexts. - * @param workLoadList If not `None`, could be a list of numbers that - * specify the workload to be assigned to different context. - * Larger number indicate heavier workload. - * @param dataShapes Should be a list of (name, shape) tuples, for the shapes of data. - * Note the order is important and should be the same as the order that - * the `DataIter` provide the data. - * @param labelShapes Should be a list of (name, shape) tuples, for the shapes of label. - * Note the order is important and should be the same as the order that - * the `DataIter` provide the label. - * @param paramNames A list of strings, indicating the names of parameters - * (e.g. weights, filters, etc.) in the computation graph. - * @param forTraining Indicate whether the executors should be bind for training. - * When not doing training, the memory for gradients will not be allocated. - * @param inputsNeedGrad Indicate whether the gradients for the input data should be computed. - * This is currently not used. - * It will be useful for implementing composition of modules. - * @param sharedGroup Default is `None`. This is used in bucketing. When not `None`, - * it should be a executor group corresponding to a different bucket. - * In other words, it will correspond to a different symbol but - * with the same set of parameters (e.g. unrolled RNNs with different lengths). - * In this case, many memory will be shared. - * @param inputTypes Default is `None`. When not `None`, - * can be used to specify the data type for each of the data/label inputs. - * @param fixedParamNames Indicate parameters to be fixed during training. - * Parameters in this list will not allocate space for gradient, - * nor do gradient calculation. - * @param gradReq Requirement for gradient accumulation. Can be 'write', 'add', or 'null', - * be specified for each argument. - */ -class DataParallelExecutorGroup private[module]( - symbol: Symbol, - contexts: Array[Context], - workLoadList: IndexedSeq[Float], - var dataShapes: IndexedSeq[DataDesc], - var labelShapes: Option[IndexedSeq[DataDesc]] = None, - private[module] val paramNames: IndexedSeq[String], - forTraining: Boolean, - inputsNeedGrad: Boolean, - sharedGroup: Option[DataParallelExecutorGroup] = None, - inputTypes: Option[Map[String, DType]] = None, - fixedParamNames: Set[String] = Set.empty[String], - gradReq: Map[String, String] = null) { - - require(symbol != null, "Undefined symbol") - require(contexts != null, "Undefined context") - - private val argNames = symbol.listArguments() - private val auxNames = symbol.listAuxiliaryStates() - - private val gradReqRun = - if (!forTraining) { - val dataNames = dataShapes.map(_.name) - Builder.convertGradReq("null", - argNames, paramNames, fixedParamNames, dataNames, inputsNeedGrad) - } else { - gradReq - } - - private val sharedDataArrays: Array[mutable.Map[String, NDArray]] = - sharedGroup.map(_.sharedDataArrays).getOrElse( - Array.fill(contexts.length)(mutable.Map.empty[String, NDArray])) - - private var batchSize: Int = -1 - private var slices: Array[(Int, Int)] = null - private var execs: Array[Executor] = null - private var dataArrays: Seq[Array[((Int, Int), NDArray)]] = null - private var labelArrays: Option[Seq[Array[((Int, Int), NDArray)]]] = None - private[module] var paramArrays: IndexedSeq[Array[NDArray]] = null - private[module] var gradArrays: IndexedSeq[Array[NDArray]] = null - private[module] var auxArrays: IndexedSeq[Array[NDArray]] = null - private var inputGradArrays: IndexedSeq[Array[NDArray]] = null - - private var dataLayouts = decideSlices(dataShapes) - private var labelLayouts = - // call it to make sure labels has the same batch size as data - if (labelShapes != None) decideSlices(labelShapes.get) - else null - - private val outputLayouts = symbol.listOutputs().map(name => { - val sym = symbol.get(name) - val layout = sym.attr("__layout__") - sym.dispose() - DataDesc.getBatchAxis(layout) - } - ) - bindExec(dataShapes, labelShapes, sharedGroup) - - def getBatchSize: Int = batchSize - - /** - * Decide the slices for each context according to the workload. - * @param dataShapes list of DataDesc(name, shape) specifying - * the shapes for the input data or label. - */ - private def decideSlices(dataShapes: Seq[DataDesc]): Seq[Int] = { - require(dataShapes.size > 0, "dataShapes must be non empty") - val majorAxis = dataShapes.map(data => DataDesc.getBatchAxis(Option(data.layout))) - - for ((dataDesc, axis) <- dataShapes.zip(majorAxis)) { - if (axis != -1) { - val batchSize = dataDesc.shape(axis) - if (this.batchSize != -1) { - require(batchSize == this.batchSize, - s"all data must have the same batch size: $batchSize," + - s"but ${dataDesc.name} has shape ${dataDesc.shape}") - } else { - this.batchSize = batchSize - require(this.workLoadList != null, "Undefined workLoadList") - this.slices = ExecutorManager.splitInputSlice(this.batchSize, this.workLoadList) - } - } - } - majorAxis - } - - /** - * Bind executors on their respective devices. - * @param dataShapes DataDesc for input data. - * @param labelShapes DataDesc for input labels. - * @param sharedGroup - * @param reshape - */ - def bindExec(dataShapes: IndexedSeq[DataDesc], labelShapes: Option[IndexedSeq[DataDesc]], - sharedGroup: Option[DataParallelExecutorGroup], reshape: Boolean = false): Unit = { - this.batchSize = -1 - dataLayouts = decideSlices(dataShapes) - labelLayouts = { - // call it to make sure labels has the same batch size as data - if (labelShapes != None) decideSlices(labelShapes.get) - else null - } - if (reshape) { - (0 until contexts.length).foreach { i => - val dataShapesSliced = slicedShape(dataShapes, i, dataLayouts) - val labelShapesSliced = labelShapes.map(slicedShape(_, i, labelLayouts)) - val inputShapes - = dataShapesSliced.toMap ++ labelShapesSliced.getOrElse(Map.empty[String, Shape]) - - ResourceScope.usingIfScopeExists(execs(i).scope) { - val tmpExec = execs(i).reshape(allowUpSizing = true, kwargs = inputShapes) - execs(i).dispose() - execs(i) = tmpExec - } - } - } else { - execs = (0 until contexts.length).map(i => - bindIthExec(i, dataShapes, labelShapes, sharedGroup) - ).toArray - } - - this.dataShapes = dataShapes - this.labelShapes = labelShapes - - // convenient data structures - dataArrays = dataShapes.map(dataDesc => - this.execs.zipWithIndex.map { case (e, i) => (this.slices(i), e.argDict(dataDesc.name)) } - ) - - labelArrays = labelShapes.map(shapes => - shapes.map(labelDesc => - this.execs.zipWithIndex.map { case (e, i) => (this.slices(i), e.argDict(labelDesc.name)) } - ) - ) - - paramArrays = argNames.zipWithIndex.withFilter { - case (name, i) => paramNames.contains(name) - }.map { case (name, i) => - execs.map(_.argArrays(i)) - } - - gradArrays = - if (forTraining) { - argNames.zipWithIndex.withFilter { - case (name, i) => paramNames.contains(name) - }.map { case (name, i) => - execs.map(_.gradArrays(i)) - } - } else { - null - } - - val dataNames = dataShapes.map(_.name) - inputGradArrays = - if (inputsNeedGrad) { - argNames.zipWithIndex.withFilter { - case (name, i) => dataNames.contains(name) - }.map { case (name, i) => - execs.map(_.gradArrays(i)) - } - } else { - null - } - - auxArrays = (0 until auxNames.length).map(i => execs.map(_.auxArrays(i))) - } - - /** - * Reshape executors. - * @param dataShapes - * @param labelShapes - */ - def reshape(dataShapes: IndexedSeq[DataDesc], labelShapes: Option[IndexedSeq[DataDesc]]): Unit = { - if (!(dataShapes == this.dataShapes && labelShapes == this.labelShapes)) { - this.bindExec(dataShapes, labelShapes, None, reshape = true) - } - } - - /** - * Assign, i.e. copy parameters to all the executors. - * @param argParams A dictionary of name to `NDArray` parameter mapping. - * @param auxParams A dictionary of name to `NDArray` auxiliary variable mapping. - * @param allowExtra hether allow extra parameters that are not needed by symbol. - * If this is True, no error will be thrown when argParams or auxParams - * contain extra parameters that is not needed by the executor. - */ - def setParams(argParams: Map[String, NDArray], auxParams: Map[String, NDArray], - allowExtra: Boolean = false): Unit = { - execs.foreach(_.copyParamsFrom(argParams, auxParams, allowExtraParams = allowExtra)) - } - - /** - * Copy data from each executor to `arg_params` and `aux_params`. - * @param argParams target parameter arrays - * @param auxParams target aux arrays - * Note this function will inplace update the NDArrays in arg_params and aux_params. - */ - def getParams(argParams: Map[String, NDArray], auxParams: Map[String, NDArray]): Unit = { - for ((name, block) <- paramNames.zip(paramArrays)) { - val weight = (block.map(_.copyTo(Context.cpu())).reduce((a: NDArray, b: NDArray) => - (a + b).disposeDeps() - ) / block.length).disposeDeps() - val weightNewType = weight.asType(argParams(name).dtype) - weightNewType.copyTo(argParams(name)) - weight.dispose() - weightNewType.dispose() - } - for ((name, block) <- auxNames.zip(auxArrays)) { - val weight = (block.map(_.copyTo(Context.cpu())).reduce((a: NDArray, b: NDArray) => - (a + b).disposeDeps() - ) / block.length).disposeDeps() - val weightNewType = weight.asType(auxParams(name).dtype) - weightNewType.copyTo(auxParams(name)) - weight.dispose() - weightNewType.dispose() - } - } - - /** - * Split `dataBatch` according to workload and run forward on each devices. - * @param dataBatch - * @param isTrain The hint for the backend, indicating whether we are during training phase. - * Default is `None`, then the value `self.for_training` will be used. - */ - def forward(dataBatch: DataBatch, isTrain: Option[Boolean] = None): Unit = { - DataParallelExecutorGroup.loadData(dataBatch, dataArrays, dataLayouts) - val isTrainOpt = isTrain.getOrElse(this.forTraining) - labelArrays.foreach(labels => { - require(!isTrainOpt || dataBatch.label != null, "label must be defined if in training phase") - if (dataBatch.label != null) { - require(labelLayouts != null, "label layouts are undefined") - DataParallelExecutorGroup.loadLabel(dataBatch, labels, labelLayouts) - } - }) - execs.foreach(_.forward(isTrainOpt)) - } - - // Get the shapes of the outputs. - def getOutputShapes: IndexedSeq[(String, Shape)] = { - val outputs = execs(0).outputs - val shapes = outputs.map(_.shape) - (symbol.listOutputs() zip shapes zip outputLayouts) map { case ((key, theShape), axis) => - val shape = theShape.toArray - if (axis >= 0) { - shape(axis) = batchSize - } - (key, Shape(shape)) - } - } - - /** - * Get outputs of the previous forward computation. - * @return In the case when data-parallelism is used, - * the outputs will be collected from multiple devices. - * The results will look like `[ [out1_dev1, out1_dev2], [out2_dev1, out2_dev2] ]`, - * those `NDArray` might live on different devices. - */ - def getOutputs(): IndexedSeq[IndexedSeq[NDArray]] = { - (0 until execs(0).outputs.length).map(i => execs.map(_.outputs(i)).toIndexedSeq) - } - - /** - * Get outputs of the previous forward computation. - * @return In the case when data-parallelism is used, - * the outputs will be merged from multiple devices, - * as they look like from a single executor. - * The results will look like `[out1, out2]` - */ - def getOutputsMerged(): IndexedSeq[NDArray] = { - DataParallelExecutorGroup.mergeMultiContext(getOutputs(), outputLayouts) - } - - /** - * Get the gradients to the inputs, computed in the previous backward computation. - * @return In the case when data-parallelism is used, - * the grads will be collected from multiple devices. - * The results will look like `[ [grad1_dev1, grad1_dev2], [grad2_dev1, grad2_dev2] ]`, - * those `NDArray` might live on different devices. - */ - def getInputGrads(): IndexedSeq[IndexedSeq[NDArray]] = { - require(inputsNeedGrad, "Cannot get InputGrads when inputNeedGrad is set to false") - inputGradArrays.map(_.toIndexedSeq) - } - - /** - * Get the gradients to the inputs, computed in the previous backward computation. - * @return In the case when data-parallelism is used, - * the grads will be merged from multiple devices, - * as they look like from a single executor. - * The results will look like `[grad1, grad2]` - */ - def getInputGradsMerged(): IndexedSeq[NDArray] = { - DataParallelExecutorGroup.mergeMultiContext(getInputGrads(), dataLayouts) - } - - /** - * Run backward on all devices. A backward should be called after - * a call to the forward function. Backward cannot be called unless - * `this.for_training` is `True`. - * @param outGrads Gradient on the outputs to be propagated back. - * This parameter is only needed when bind is called - * on outputs that are not a loss function. - */ - def backward(outGrads: Array[NDArray] = null): Unit = { - require(forTraining, "re-bind with forTraining = true to run backward") - - for (((exec, islice), i) <- (execs zip slices).zipWithIndex) { - val outGradsSlice = - if (outGrads != null) { - (outGrads zip outputLayouts).map { case (grad, axis) => - if (axis >= 0) { - val ogMySlice: NDArray = NDArray.slice_axis( - Map("axis" -> axis, "begin" -> islice._1, "end" -> islice._2))(grad) - ogMySlice.asInContext(contexts(i)) - } else { - grad.copyTo(contexts(i)) - } - } - } else { - Array.empty[NDArray] - } - exec.backward(outGrads = outGradsSlice) - } - } - - /** - * Accumulate the performance according to `eval_metric` on all devices. - * @param evalMetric The metric used for evaluation. - * @param labels Typically comes from `label` of a `DataBatch`. - */ - def updateMetric(evalMetric: EvalMetric, labels: IndexedSeq[NDArray]): Unit = { - for ((texec, islice) <- this.execs zip this.slices) { - val labelsSlice = - (labels zip this.labelLayouts) map { case (label, axis) => - if (axis == 0) { - label.slice(islice) - } else if (axis > 0) { - val labelMySlice: NDArray = NDArray.slice_axis(Map( - "axis" -> axis, "begin" -> islice._1, "end" -> islice._2))(label) - .asInContext(label.context) - labelMySlice - } else { - label - } - } - - evalMetric.update(labelsSlice, texec.outputs) - - // Clear up any slices we created (sometimes we don't slice so check for this) - (labels zip labelsSlice).foreach { case (label, labelSlice) => - if (label ne labelSlice) { - labelSlice.dispose() - } - } - } - } - - // Internal utility function to bind the i-th executor. - private def bindIthExec(i: Int, dataShapes: Seq[DataDesc], - labelShapes: Option[Seq[DataDesc]], - sharedGroup: Option[DataParallelExecutorGroup]): Executor = { - val dataShapesSliced = slicedShape(dataShapes, i, dataLayouts) - val labelShapesSliced = labelShapes.map(slicedShape(_, i, labelLayouts)) - val sharedExec = sharedGroup.map(_.execs(i)) - val context = contexts(i) - val sharedDataArrays = this.sharedDataArrays(i) - - val inputShapes - = dataShapesSliced.toMap ++ labelShapesSliced.getOrElse(Map.empty[String, Shape]) - - val (argShapes, _, auxShapes) = symbol.inferShape(inputShapes) - require(argShapes != null, "Shape inference failed." + - s"Known shapes are $inputShapes for symbol arguments ${symbol.listArguments()} " + - s"and aux states ${symbol.listAuxiliaryStates()}") - - val inputTypesGot = inputTypes.getOrElse(inputShapes.map { case (k, v) => - (k, Base.MX_REAL_TYPE) - }) - val (argTypes, _, auxTypes) = symbol.inferType(inputTypesGot) - require(argTypes != null, "Type inference failed." + - s"Known types as $inputTypes for symbol arguments ${symbol.listArguments()} " + - s"and aux states ${symbol.listAuxiliaryStates()}") - - val argArrays = ArrayBuffer.empty[NDArray] - val gradArrayMap = mutable.HashMap.empty[String, NDArray] - - // create or borrow arguments and gradients - for (j <- 0 until argNames.length) { - val name = argNames(j) - val argArr = - if (paramNames.contains(name)) { - // model parameter - sharedExec match { - case None => - val argArr = NDArray.zeros(argShapes(j), context, dtype = argTypes(j)) - if (gradReqRun(name) != "null") { - val gradArr = NDArray.zeros(argShapes(j), context, dtype = argTypes(j)) - gradArrayMap.put(name, gradArr) - } - argArr - case Some(sharedExecInst) => - val argArr = sharedExecInst.argDict(name) - require(argArr.shape == argShapes(j), - s"Shape ${argArr.shape} of argument $name does not match " + - s"inferred shape ${argShapes(j)}") - require(argArr.dtype == argTypes(j), - s"Type ${argArr.dtype} of argument $name does not match " + - s"inferred type ${argTypes(j)}") - if (gradReqRun(name) != "null") { - gradArrayMap.put(name, sharedExecInst.gradDict(name)) - } - argArr - } - } else { - // data or label - val argArr = getOrReshape(name, sharedDataArrays, argShapes(j), argTypes(j), context) - // data might also need grad if inputs_need_grad is True - if (gradReqRun(name) != "null") { - gradArrayMap.put(name, - getOrReshape(s"grad of $name", sharedDataArrays, argShapes(j), argTypes(j), context)) - } - argArr - } - argArrays.append(argArr) - } - - // create or borrow aux variables - val auxArrays = - sharedExec match { - case None => (auxShapes zip auxTypes).map { case (s, t) => - NDArray.zeros(s, context, dtype = t) - }.toArray - case Some(sharedExecInst) => - for ((arr, j) <- sharedExecInst.auxArrays.zipWithIndex) { - require(auxShapes(j) == arr.shape, - s"Shape ${arr.shape} of aux variable ${auxNames(j)} does not match " + - s"inferred shape ${auxShapes(j)}") - require(auxTypes(j) == arr.dtype, - s"Type ${arr.dtype} of aux variable ${auxNames(j)} does not match " + - s"inferred type ${auxTypes(j)}") - } - sharedExecInst.auxArrays.map(identity) - } - symbol.bind(ctx = context, args = argArrays.toSeq, argsGrad = gradArrayMap.toMap, - gradsReq = gradReqRun, auxStates = auxArrays.toSeq, group2ctx = null, - sharedExec = sharedExec.orNull) - } - - /** - * Get the sliced shapes for the i-th executor. - * @param shapes : The original (name, shape) pairs. - * @param i Which executor we are dealing with. - * @param majorAxis - */ - private def slicedShape(shapes: Seq[DataDesc], i: Int, majorAxis: Seq[Int]) - : Seq[(String, Shape)] = { - (shapes zip majorAxis).map { case (DataDesc(k, shape, _ , _), axis) => - val shapeArr = shape.toArray - if (axis >= 0) { - shapeArr(axis) = slices(i)._2 - slices(i)._1 - } - (k, Shape(shapeArr)) - } - } - - // Install monitor on all executors - def installMonitor(monitor: Monitor): Unit = { - execs.foreach(monitor.install) - } - - // Internal helper to get a memory block or re-use by re-shaping - private def getOrReshape(name: String, - sharedDataArrays: mutable.Map[String, NDArray], - argShape: Shape, - argType: DType, - context: Context): NDArray = { - if (sharedDataArrays.contains(name)) { - val argArr = sharedDataArrays(name) - if (argArr.shape.product >= argShape.product) { - // nice, we can directly re-use this data blob - require(argArr.dtype == argType, - s"Type ${argArr.dtype} of argument $name does not match infered type ${argType}") - argArr.reshape(argShape) - } else { - DataParallelExecutorGroup.logger.warn(s"bucketing: data $name has a shape $argShape," + - s"which is larger than already allocated shape ${argArr.shape}." + - "Need to re-allocate. Consider putting default_bucket_key to be the bucket" + - "taking the largest input for better memory sharing.") - val argArrNew = NDArray.zeros(argShape, context, dtype = argType) - // replace existing shared array because the new one is bigger - sharedDataArrays.put(name, argArrNew) - argArrNew - } - } else { - val argArrNew = NDArray.zeros(argShape, context, dtype = argType) - sharedDataArrays.put(name, argArrNew) - argArrNew - } - } -} diff --git a/scala-package/core/src/main/scala/org/apache/mxnet/module/Module.scala b/scala-package/core/src/main/scala/org/apache/mxnet/module/Module.scala deleted file mode 100644 index 9928f66b2200..000000000000 --- a/scala-package/core/src/main/scala/org/apache/mxnet/module/Module.scala +++ /dev/null @@ -1,723 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet.module - -import java.io.{BufferedInputStream, BufferedOutputStream, FileInputStream, FileOutputStream} - -import org.apache.mxnet.DType.DType -import org.apache.mxnet._ -import org.apache.mxnet.module.DataParallelExecutorGroup.Builder -import org.apache.mxnet.optimizer.SGD -import org.slf4j.LoggerFactory - -import scala.annotation.varargs - -/** - * Module is a basic module that wrap a `Symbol`. It is functionally the same - * as the `FeedForward` model, except under the module API. - * @param symbolVar : Symbol definition. - * @param dataNames Input data names. - * @param labelNames Input label names - * @param contexts Default is cpu(). - * @param workLoadList Default `None`, indicating uniform workload. - * @param fixedParamNames Default `None`, indicating no network parameters are fixed. - */ -class Module(symbolVar: Symbol, - val dataNames: IndexedSeq[String] = IndexedSeq("data"), - labelNames: IndexedSeq[String] = IndexedSeq("softmax_label"), - contexts: Array[Context] = Context.cpu(), - workLoadList: Option[IndexedSeq[Float]] = None, - fixedParamNames: Option[Set[String]] = None) extends BaseModule { - private val logger = LoggerFactory.getLogger(classOf[Module]) - - require(symbolVar != null, "Undefined symbol") - this.symbol = symbolVar - - private val workLoads = workLoadList.getOrElse(contexts.map(_ => 1f).toIndexedSeq) - require(workLoads.size == contexts.length, - s"workloads size (${workLoads.size}) do not match number of contexts ${contexts.length}") - - private val labelNameList = if (labelNames == null) IndexedSeq.empty[String] else labelNames - - private val argNames = symbol.listArguments() - private val inputNames = dataNames ++ labelNameList - private val paramNames = argNames.filterNot(inputNames.toSet) - private val auxNames = symbol.listAuxiliaryStates() - private val outputNamesVar = symbol.listOutputs() - - private[module] var paramsDirty = false - - private var optimizer: Optimizer = null - private var kvstore: Option[KVStore] = None - private var updateOnKVStore: Boolean = false - private var updater: Option[MXKVStoreUpdater] = None - private var preloadOptStates: Option[String] = None - - private var dataShapesVar: IndexedSeq[DataDesc] = null - private var labelShapesVar: Option[IndexedSeq[DataDesc]] = None - - override def dataShapes: IndexedSeq[DataDesc] = { - require(binded, "bind() must be called first.") - dataShapesVar - } - - override def labelShapes: IndexedSeq[DataDesc] = { - require(binded, "bind() must be called first.") - labelShapesVar.orNull - } - - override def outputShapes: IndexedSeq[(String, Shape)] = { - require(binded, "bind() must be called first.") - execGroup.getOutputShapes - } - - def outputNames: IndexedSeq[String] = outputNamesVar - - /** - * Get current parameters. - * `(arg_params, aux_params)`, each a dictionary of name to parameters (in - * `NDArray`) mapping. - */ - override def getParams: (Map[String, NDArray], Map[String, NDArray]) = { - require(binded && paramsInitialized, "bind() and initParams() must be called first.") - if (paramsDirty) { - syncParamsFromDevices() - } - (argParams, auxParams) - } - - /** - * Initialize the parameters and auxiliary states. - * @param initializer Called to initialize parameters if needed. - * @param argParams If not None, should be a dictionary of existing arg_params. - * Initialization will be copied from that. - * @param auxParams If not None, should be a dictionary of existing aux_params. - * Initialization will be copied from that. - * @param allowMissing If true, params could contain missing values, - * and the initializer will be called to fill those missing params. - * @param forceInit If true, will force re-initialize even if already initialized. - * @param allowExtra Whether allow extra parameters that are not needed by symbol. - * If this is True, no error will be thrown when argParams or auxParams - * contain extra parameters that is not needed by the executor. - */ - override def initParams(initializer: Initializer = new Uniform(0.01f), - argParams: Map[String, NDArray] = null, - auxParams: Map[String, NDArray] = null, - allowMissing: Boolean = false, - forceInit: Boolean = false, - allowExtra: Boolean = false): Unit = { - if (!paramsInitialized || forceInit) { - require(binded, "call bind before initializing the parameters") - - if (this.argParams == null) { - val paramArrays = - execGroup.paramArrays.map(nds => NDArray.zeros(nds(0).shape, dtype = nds(0).dtype)) - this.argParams = this.paramNames.zip(paramArrays).toMap - } - - if (this.auxParams == null) { - val auxArrays = - execGroup.auxArrays.map(nds => NDArray.zeros(nds(0).shape, dtype = nds(0).dtype)) - this.auxParams = this.auxNames.zip(auxArrays).toMap - } - - this.argParams.foreach { case (name, arr) => - impl(name, arr, allowMissing, Option(initializer), argParams) - } - - this.auxParams.foreach { case (name, arr) => - impl(name, arr, allowMissing, Option(initializer), auxParams) - } - - this.paramsInitialized = true - this.paramsDirty = false - - // copy the initialized parameters to devices - this.execGroup.setParams(this.argParams, this.auxParams, allowExtra = allowExtra) - } - } - - // Internal helper for parameter initialization - private def impl(name: String, arr: NDArray, allowMissing: Boolean, - initializer: Option[Initializer] = None, - cache: Map[String, NDArray] = null): Unit = { - if (cache != null) { - if (cache.contains(name)) { - val cacheArr = cache(name) // just in case the cached array is just the target itself - if (cacheArr ne arr) { - cacheArr.copyTo(arr) - } - } else { - require(allowMissing, s"$name is not presented") - initializer.foreach(inst => inst(name, arr)) - } - } else { - initializer.foreach(inst => inst(name, arr)) - } - } - - /** - * Assign parameter and aux state values. - * argParams : dict - * Dictionary of name to value (`NDArray`) mapping. - * auxParams : dict - * Dictionary of name to value (`NDArray`) mapping. - * allowMissing : bool - * If true, params could contain missing values, and the initializer will be - * called to fill those missing params. - * forceInit : bool - * If true, will force re-initialize even if already initialized. - * allowExtra : bool - * Whether allow extra parameters that are not needed by symbol. - * If this is True, no error will be thrown when argParams or auxParams - * contain extra parameters that is not needed by the executor. - */ - override def setParams(argParams: Map[String, NDArray], - auxParams: Map[String, NDArray], - allowMissing: Boolean = false, - forceInit: Boolean = true, - allowExtra: Boolean = false): Unit = { - if (!allowMissing) { - this.initParams(null, argParams, auxParams, allowMissing, forceInit, allowExtra) - } else if (this.paramsInitialized && !forceInit) { - logger.warn("Parameters already initialized and forceInit=false. " + - "setParams call ignored.") - } else { - this.execGroup.setParams(argParams, auxParams, allowExtra) - - // because we didn't update self._arg_params, they are dirty now. - this.paramsDirty = true - this.paramsInitialized = true - } - } - - // Internal function to reset binded state. - private def resetBind(): Unit = { - binded = false - execGroup = null - dataShapesVar = null - labelShapesVar = None - } - - /** - * Bind the symbols to construct executors. This is necessary before one - * can perform computation with the module. - * @param dataShapes Typically is `dataIter.provideData`. - * @param labelShapes Typically is `data_iter.provide_label`. - * @param forTraining Default is `true`. Whether the executors should be bind for training. - * @param inputsNeedGrad Default is `false`. - * Whether the gradients to the input data need to be computed. - * Typically this is not needed. - * But this might be needed when implementing composition of modules. - * @param forceRebind Default is `false`. - * This function does nothing if the executors are already binded. - * But with this `true`, the executors will be forced to rebind. - * @param sharedModule Default is `None`. This is used in bucketing. - * When not `None`, the shared module essentially corresponds to - * a different bucket -- a module with different symbol - * but with the same sets of parameters - * (e.g. unrolled RNNs with different lengths). - */ - override def bind(dataShapes: IndexedSeq[DataDesc], - labelShapes: Option[IndexedSeq[DataDesc]] = None, - forTraining: Boolean = true, inputsNeedGrad: Boolean = false, - forceRebind: Boolean = false, sharedModule: Option[BaseModule] = None, - gradReq: String = "write"): Unit = { - // force rebinding is typically used when one want to switch from training to prediction phase. - if (forceRebind) { - resetBind() - } - - if (binded) { - logger.warn("Already binded, ignoring bind()") - } else { - this.forTraining = forTraining - this.inputsNeedGrad = inputsNeedGrad - this.binded = true - - if (!forTraining) { - require(!inputsNeedGrad, "Invalid inputsNeedGrad (cannot be true if not forTraining)") - } else { - // this is not True, as some module might not contains a loss function - // that consumes the labels - // require(labelShapes != None) - } - - this.dataShapesVar = dataShapes - this.labelShapesVar = labelShapes - - val sharedGroup = - sharedModule.map(sharedModuleInst => { - require(sharedModuleInst.binded && sharedModuleInst.paramsInitialized, - s"bind() and initParams() must be called first on shared module.") - sharedModuleInst.execGroup - }) - - val inputTypes = this.dataShapesVar.map(dataDesc => (dataDesc.name, dataDesc.dtype)).toMap ++ - labelShapes.map(shapes => shapes.map(dataDesc => (dataDesc.name, dataDesc.dtype)).toMap) - .getOrElse(Map.empty[String, DType]) - - execGroup = new Builder(symbol, contexts, paramNames) - .setWorkLoadList(workLoads) - .setDataShapes(dataShapes) - .setLabelShapes(labelShapes.orNull) - .setForTraining(forTraining) - .setInputsNeedGrad(inputsNeedGrad) - .setSharedGroup(sharedGroup.orNull) - .setFixedParamNames(fixedParamNames.orNull) - .setGradReq(gradReq) - .setInputTypes(inputTypes) - .build() - - if (sharedModule.isDefined) { - paramsInitialized = true - argParams = sharedModule.get.argParams - auxParams = sharedModule.get.auxParams - } else if (paramsInitialized) { - // if the parameters are already initialized, we are re-binding - // so automatically copy the already initialized params - execGroup.setParams(argParams, auxParams) - } - - sharedModule.foreach { - case sharedModuleInst: Module => - if (sharedModuleInst.optimizerInitialized) { - borrowOptimizer(sharedModuleInst) - } - case _ => - } - } - - } - - /** - * Check that input names matches input data descriptors. - */ - @throws(classOf[IllegalArgumentException]) - private def _checkNamesMatch(dataNames: IndexedSeq[String], dataShapes: IndexedSeq[DataDesc], - name: String, throwEx: Boolean): Unit = { - val actual = dataShapes.map(_.name) - if (dataNames.sorted != actual.sorted) { - val msg = s"Data provided by ${name}_shapes don't match names specified by " + - s"${name}_names (${dataShapes.mkString(", ")} vs. ${dataNames.mkString(", ")})" - if (throwEx) throw new IllegalArgumentException(msg) - else logger.warn(msg) - } - } - - /** - * parse data_attrs into DataDesc format and check that names match - */ - @throws(classOf[IllegalArgumentException]) - private def _parseDataDesc(dataNames: IndexedSeq[String], labelNames: IndexedSeq[String], - dataShapes: IndexedSeq[DataDesc], labelShapes: Option[IndexedSeq[DataDesc]]): - (IndexedSeq[DataDesc], Option[IndexedSeq[DataDesc]]) = { - _checkNamesMatch(dataNames, dataShapes, "data", true) - if (labelShapes != None) _checkNamesMatch(labelNames, labelShapes.get, "label", false) - (dataShapes, labelShapes) - } - - /** - * Reshapes the module for new input shapes. - * @param dataShapes Typically is `dataIter.provideData`. - * @param labelShapes Typically is `dataIter.provideLabel`. - */ - def reshape(dataShapes: IndexedSeq[DataDesc], - labelShapes: Option[IndexedSeq[DataDesc]] = None): Unit = { - require(this.binded, "bind() must be called first.") - val (tdataShapes, tlabelShapes) = this._parseDataDesc( - this.dataNames, this.labelNames, dataShapes, labelShapes) - this.dataShapesVar = tdataShapes - this.labelShapesVar = tlabelShapes - this.execGroup.reshape(tdataShapes, tlabelShapes) - } - - /** - * Install and initialize optimizers. - * @param kvstore - * @param optimizer - * @param resetOptimizer Default `True`, indicating whether we should set `rescaleGrad` - * & `idx2name` for optimizer according to executorGroup - * @param forceInit Default `False`, indicating whether we should force re-initializing - * the optimizer in the case an optimizer is already installed. - */ - def initOptimizer(kvstore: String = "local", optimizer: Optimizer = new SGD(), - resetOptimizer: Boolean = true, forceInit: Boolean = false): Unit = { - require(binded && paramsInitialized, "bind() and initParams() must be called first.") - if (optimizerInitialized && !forceInit) { - logger.warn("optimizer already initialized, ignoring ...") - } else { - val (kvstoreInst, updateOnKVStore) = Model.createKVStore(kvstore, contexts.length, argParams) - val batchSize = execGroup.getBatchSize * ( - if (kvstoreInst != None && kvstoreInst.get.`type` == "dist_sync") { - kvstoreInst.get.numWorkers - } else { - 1 - }) - if (resetOptimizer) { - val idx2name = - if (updateOnKVStore) { - execGroup.paramNames.zipWithIndex.map { case (name, i) => (i, name) }.toMap - } else { - (0 until contexts.length).flatMap(k => - execGroup.paramNames.zipWithIndex.map { case (name, i) => - (i * contexts.length + k, name) - } - ).toMap - } - optimizer.setIdx2Name(idx2name) - optimizer.setRescaleGrad(1f / batchSize) - } - - this.optimizer = optimizer - this.kvstore = kvstoreInst - this.updateOnKVStore = updateOnKVStore - - kvstoreInst.foreach(kv => - // copy initialized local parameters to kvstore - Model.initializeKVStore(kv, execGroup.paramArrays, - argParams, paramNames, updateOnKVStore) - ) - updater = - if (updateOnKVStore) { - kvstoreInst.foreach(_.setOptimizer(this.optimizer)) - None - } else { - Some(Optimizer.getUpdater(optimizer)) - } - - optimizerInitialized = true - preloadOptStates.foreach { optStates => - loadOptimizerStates(optStates) - } - preloadOptStates = None - } - } - - /** - * Borrow optimizer from a shared module. Used in bucketing, where exactly the same - * optimizer (esp. kvstore) is used. - * @param sharedModule - */ - def borrowOptimizer(sharedModule: Module): Unit = { - require(sharedModule.optimizerInitialized, - "initOptimizer() must be called first for shared module") - optimizer = sharedModule.optimizer - kvstore = sharedModule.kvstore - updateOnKVStore = sharedModule.updateOnKVStore - updater = sharedModule.updater - optimizerInitialized = true - } - - /** - * Forward computation. - * @param dataBatch input data - * @param isTrain Default is `None`, which means `is_train` takes the value of `for_training`. - */ - def forward(dataBatch: DataBatch, isTrain: Option[Boolean] = None): Unit = { - require(binded && paramsInitialized, "bind() and initParams() must be called first.") - val currDataShapes = this.dataShapes.map(_.shape) - val newDataShapes = dataBatch.data.map(_.shape) - if (currDataShapes != newDataShapes) { - val newDShapes: IndexedSeq[DataDesc] = - if (dataBatch.provideDataDesc != null) dataBatch.provideDataDesc - else { - this.dataShapes.zip(newDataShapes).map { case (i, shape) => - DataDesc(i.name, shape, i.dtype, i.layout) - } - } - val newLShapes: Option[IndexedSeq[DataDesc]] = - if (dataBatch.provideLabelDesc != null) Some(dataBatch.provideLabelDesc) - else if (dataBatch.label != null && dataBatch.label.length > 0 - && this.labelShapes != null) { - Some(this.labelShapes.zip(dataBatch.label).map { case (i, j) => - DataDesc(i.name, j.shape, i.dtype, i.layout) - }) - } else None - this.reshape(newDShapes, newLShapes) - } - execGroup.forward(dataBatch, isTrain) - } - - /** - * Backward computation. - * @param outGrads Gradient on the outputs to be propagated back. - * This parameter is only needed when bind is called - * on outputs that are not a loss function. - */ - def backward(outGrads: Array[NDArray] = null): Unit = { - require(binded && paramsInitialized, "bind() and initParams() must be called first.") - execGroup.backward(outGrads) - } - - // Update parameters according to the installed optimizer and the gradients computed - // in the previous forward-backward batch. - def update(): Unit = { - require(binded && paramsInitialized && optimizerInitialized, - "bind(), initParams() and initOptimizer() must be called first.") - paramsDirty = true - if (updateOnKVStore) { - Model.updateParamsOnKVStore(execGroup.paramArrays, - execGroup.gradArrays, kvstore, execGroup.paramNames) - } else { - require(updater.isDefined, "Undefined updater") - Model.updateParams(execGroup.paramArrays, - execGroup.gradArrays, updater.orNull, contexts.length, execGroup.paramNames, kvstore) - } - } - - /** - * Get outputs of the previous forward computation. - * @return In the case when data-parallelism is used, - * the outputs will be collected from multiple devices. - * The results will look like `[ [out1_dev1, out1_dev2], [out2_dev1, out2_dev2] ]`, - * those `NDArray` might live on different devices. - */ - def getOutputs(): IndexedSeq[IndexedSeq[NDArray]] = { - require(binded && paramsInitialized, "bind() and initParams() must be called first.") - execGroup.getOutputs() - } - - /** - * Get outputs of the previous forward computation. - * @return In the case when data-parallelism is used, - * the outputs will be merged from multiple devices, - * as they look like from a single executor. - * The results will look like `[out1, out2]` - */ - def getOutputsMerged(): IndexedSeq[NDArray] = { - require(binded && paramsInitialized, "bind() and initParams() must be called first.") - execGroup.getOutputsMerged() - } - - /** - * Get the gradients to the inputs, computed in the previous backward computation. - * @return In the case when data-parallelism is used, - * the grads will be collected from multiple devices. - * The results will look like `[ [grad1_dev1, grad1_dev2], [grad2_dev1, grad2_dev2] ]`, - * those `NDArray` might live on different devices. - */ - def getInputGrads(): IndexedSeq[IndexedSeq[NDArray]] = { - require(binded && paramsInitialized, "bind() and initParams() must be called first.") - require(inputsNeedGrad, "Call to getInputGrads() but inputsNeedGrad is false") - execGroup.getInputGrads() - } - - /** - * Get the gradients to the inputs, computed in the previous backward computation. - * @return In the case when data-parallelism is used, - * the grads will be merged from multiple devices, - * as they look like from a single executor. - * The results will look like `[grad1, grad2]` - */ - def getInputGradsMerged(): IndexedSeq[NDArray] = { - require(binded && paramsInitialized, "bind() and initParams() must be called first.") - require(inputsNeedGrad, "Call to getInputGradsMerged() but inputsNeedGrad is false") - execGroup.getInputGradsMerged() - } - - /** - * Evaluate and accumulate evaluation metric on outputs of the last forward computation. - * @param evalMetric - * @param labels - */ - def updateMetric(evalMetric: EvalMetric, labels: IndexedSeq[NDArray]): Unit = { - execGroup.updateMetric(evalMetric, labels) - } - - // Synchronize parameters from devices to CPU. This function should be called after - // calling `update` that updates the parameters on the devices, before one can read the - // latest parameters from `self._arg_params` and `self._aux_params`. - private def syncParamsFromDevices(): Unit = { - execGroup.getParams(argParams, auxParams) - } - - // Install monitor on all executors - def installMonitor(monitor: Monitor): Unit = { - require(binded, "bind() must be called first.") - execGroup.installMonitor(monitor) - } - - /** - * Save optimizer (updater) state to file - * @param fname Path to output states file. - */ - def saveOptimizerStates(fname: String): Unit = { - require(optimizerInitialized, "Optimizer should be initialized before saving.") - if (updateOnKVStore) { - kvstore.foreach(_.saveOptimizerStates(fname)) - } else { - updater.foreach { - case cachedStates: MXKVStoreCachedStates => - val target = new BufferedOutputStream(new FileOutputStream(fname)) - try { - target.write(cachedStates.serializeState()) - } finally { - target.close() - } - case _ => - logger.warn("Updater does not have states, skip saving to {}", fname) - } - } - } - - /** - * Load optimizer (updater) state from file - * @param fname Path to input states file. - */ - def loadOptimizerStates(fname: String): Unit = { - require(optimizerInitialized, "Optimizer should be initialized before loading.") - if (updateOnKVStore) { - kvstore.foreach(_.loadOptimizerStates(fname)) - } else { - updater.foreach { - case cachedStates: MXKVStoreCachedStates => - val bis = new BufferedInputStream(new FileInputStream(fname)) - try { - val bArray = Stream.continually(bis.read).takeWhile(-1 !=).map(_.toByte).toArray - cachedStates.deserializeState(bArray) - } finally { - bis.close() - } - case _ => - logger.warn("Updater does not have states, skip loading from {}", fname) - } - } - } - - /** - * Save current progress to checkpoint. - * Use mx.callback.module_checkpoint as epoch_end_callback to save during training. - * @param prefix The file prefix to checkpoint to - * @param epoch The current epoch number - * @param saveOptStates Whether to save optimizer states for continue training - */ - def saveCheckpoint(prefix: String, epoch: Int, saveOptStates: Boolean = false): Unit = { - symbol.save(s"$prefix-symbol.json") - val paramName = "%s-%04d.params".format(prefix, epoch) - saveParams(paramName) - logger.info("Saved checkpoint to {}", paramName) - if (saveOptStates) { - val stateName = "%s-%04d.states".format(prefix, epoch) - saveOptimizerStates(stateName) - logger.info("Saved optimizer state to {}", stateName) - } - } -} - -object Module { - /** - * Create a model from previously saved checkpoint. - * @param prefix Path prefix of saved model files. You should have "prefix-symbol.json", - * "prefix-xxxx.params", and optionally "prefix-xxxx.states", - * where xxxx is the epoch number. - * @param epoch Epoch to load. - * @param loadOptimizerStates Whether to load optimizer states. - * Checkpoint needs to have been made with saveOptimizerStates=True - * @param dataNames Input data names. - * @param labelNames Input label names - * @param contexts Default is cpu(). - * @param workLoadList Default `None`, indicating uniform workload. - * @param fixedParamNames Default `None`, indicating no network parameters are fixed. - */ - def loadCheckpoint(prefix: String, epoch: Int, loadOptimizerStates: Boolean = false, - dataNames: IndexedSeq[String] = IndexedSeq("data"), - labelNames: IndexedSeq[String] = IndexedSeq("softmax_label"), - contexts: Array[Context] = Context.cpu(), - workLoadList: Option[IndexedSeq[Float]] = None, - fixedParamNames: Option[Set[String]] = None): Module = { - val (sym, args, auxs) = Model.loadCheckpoint(prefix, epoch) - val mod = new Module(symbolVar = sym, - dataNames, labelNames, contexts, workLoadList, fixedParamNames) - mod.argParams = args - mod.auxParams = auxs - mod.paramsInitialized = true - if (loadOptimizerStates) { - mod.preloadOptStates = Some("%s-%04d.states".format(prefix, epoch)) - } - mod - } - - /** - * Builder class for Module. - * @param modelDef model definition in Symbol. - */ - class Builder(private val modelDef: Symbol) { - private var dataNames: IndexedSeq[String] = IndexedSeq("data") - private var labelNames: IndexedSeq[String] = IndexedSeq("softmax_label") - private var contexts: Array[Context] = Array(Context.cpu()) - private var workLoadList: IndexedSeq[Float] = _ - private var fixedParamNames: Set[String] = _ - - /** - * Set the context for execution. - * @param ctx a list of contexts. - * @return this. - */ - @varargs def setContext(ctx: Context*): Builder = { - contexts = ctx.toArray - this - } - - /** - * Set the input data names. - * @param name a list of data names. Cannot be null. - * @return this. - */ - @varargs def setDataNames(name: String*): Builder = { - dataNames = name.toVector - this - } - - /** - * Set the label names. - * @param name a list of label names. - * Set to null if no label is required. - * @return this. - */ - @varargs def setLabelNames(name: String*): Builder = { - labelNames = if (name == null) IndexedSeq.empty[String] else name.toVector - this - } - - /** - * Set the workloads. - * @param workloads a list of workloads - * @return this. - */ - @varargs def setWorkLoadList(workloads: Float*): Builder = { - workLoadList = workloads.toVector - this - } - - /** - * Specify the parameters need to be fixed. - * @param name a list of parameter names. - * @return this. - */ - @varargs def setFixedParamNames(name: String*): Builder = { - fixedParamNames = name.toSet - this - } - - def build(): Module = { - new Module(modelDef, dataNames, labelNames, contexts, - Option(workLoadList), Option(fixedParamNames)) - } - } -} diff --git a/scala-package/core/src/main/scala/org/apache/mxnet/module/SequentialModule.scala b/scala-package/core/src/main/scala/org/apache/mxnet/module/SequentialModule.scala deleted file mode 100644 index d80e6bc6279b..000000000000 --- a/scala-package/core/src/main/scala/org/apache/mxnet/module/SequentialModule.scala +++ /dev/null @@ -1,419 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet.module - -import org.apache.mxnet._ -import org.slf4j.LoggerFactory -import scala.collection.mutable.ArrayBuffer -import org.apache.mxnet.optimizer.SGD -import scala.collection.immutable.ListMap - -/** - * A SequentialModule is a container module that can chain multiple modules together. - * Note building a computation graph with this kind of imperative container is less - * flexible and less efficient than the symbolic graph. - * So this should be only used as a handy utility. - */ -class SequentialModule extends BaseModule { - - private val logger = LoggerFactory.getLogger(classOf[SequentialModule]) - - private val META_TAKE_LABELS = "take_labels" - private val META_AUTO_WIRING = "auto_wiring" - private val metaKeys = Set(META_TAKE_LABELS, META_AUTO_WIRING) - - private val modules = ArrayBuffer[BaseModule]() - private val metas = ArrayBuffer[Map[String, Boolean]]() - private var labelShapesVar: Option[IndexedSeq[DataDesc]] = None - - /** - * Add a module to the chain. - * An example of addinging two modules to a chain: - * val seqMod = new SequentialModule() - * seqMod.add(mod1).add(mod2) - * @param module The new module to add. - * @param kwargs All the keyword arguments are saved as meta information - * for the added module. The currently known meta includes - * - "take_labels": indicating whether the module expect to - * take labels when doing computation. Note any module in - * the chain can take labels (not necessarily only the top - * most one), and they all take the same labels passed - * from the original data batch for the `SequentialModule`. - * @return This function returns `this` to allow us to easily chain a series of `add` calls. - */ - def add(module: BaseModule, kwargs: (String, Boolean)*): SequentialModule = { - this.modules += module - - // a sanity check to avoid typo - kwargs.foreach { case (k, v) => - require(this.metaKeys.contains(k), s"Unknown meta $k,auxParams a typo?") - } - - this.metas += kwargs.map(kw => kw._1 -> kw._2).toMap - - // after adding new modules, we are reset back to raw states, needs - // to bind, init_params, etc. - this.binded = false - this.paramsInitialized = false - this.optimizerInitialized = false - - this - } - - /** - * @return A list of names for data required by this module. - */ - override def dataNames: IndexedSeq[String] = { - if (this.modules.length > 0) this.modules.head.dataNames - else IndexedSeq[String]() - } - - /** - * @return A list of names for the outputs of this module. - */ - override def outputNames: IndexedSeq[String] = { - if (this.modules.length > 0) this.modules.reverse.head.outputNames - else IndexedSeq[String]() - } - - /** - * Get data shapes. - * @return The data shapes of the first module is the data shape of a SequentialModule. - */ - override def dataShapes: IndexedSeq[DataDesc] = { - require(this.binded, "bind() must be called first.") - this.modules.head.dataShapes - } - - /** - * Get label shapes. - * @return The return value could be null if - * the module does not need labels, or if the module is not binded for - * training (in this case, label information is not available). - */ - override def labelShapes: IndexedSeq[DataDesc] = { - require(this.binded, "bind() must be called first.") - this.labelShapesVar.orNull - } - - /** - * Get output shapes. - * @return The output shapes of the last - * module is the output shape of a SequentialModule. - */ - def outputDesc: IndexedSeq[DataDesc] = { - require(this.binded, "bind() must be called first.") - this.modules.reverse.head.dataShapes - } - - /** - * Get output shapes. - * @return The output shapes of the last - * module is the output shape of a SequentialModule. - */ - override def outputShapes: IndexedSeq[(String, Shape)] = { - require(this.binded, "bind() must be called first.") - this.modules.reverse.head.outputShapes - } - - /** - * Get current parameters. - * @return (argParams, auxParams), - * each a Map of name to parameters (in NDArray) mapping. - */ - override def getParams: (Map[String, NDArray], Map[String, NDArray]) = { - require(this.binded && this.paramsInitialized, "bind() and initParams() must be called first.") - ((Map[String, NDArray](), Map[String, NDArray]()) /: this.modules){ (result, module) => - val (arg, aux) = module.getParams - (result._1 ++ arg, result._2 ++ aux) - } - } - - /** - * Initialize the parameters and auxiliary states. - * @param initializer Called to initialize parameters if needed. - * @param argParams If not None, should be a dictionary of existing arg_params. - * Initialization will be copied from that. - * @param auxParams If not None, should be a dictionary of existing aux_params. - * Initialization will be copied from that. - * @param allowMissing If true, params could contain missing values, - * and the initializer will be called to fill those missing params. - * @param forceInit If true, will force re-initialize even if already initialized. - * @param allowExtra Whether allow extra parameters that are not needed by symbol. - * If this is True, no error will be thrown when argParams or auxParams - * contain extra parameters that is not needed by the executor. - */ - override def initParams(initializer: Initializer = new Uniform(0.01f), - argParams: Map[String, NDArray] = null, - auxParams: Map[String, NDArray] = null, - allowMissing: Boolean = false, - forceInit: Boolean = false, - allowExtra: Boolean = false): Unit = { - if (!this.paramsInitialized || forceInit) { - require(this.binded, "call bind before initializing the parameters") - - for (module <- this.modules) { - module.initParams(initializer = initializer, argParams = argParams, - auxParams = auxParams, allowMissing = allowMissing, - forceInit = forceInit, allowExtra = allowExtra) - } - - // Internal function to help checking duplicated names, - // make sure we do not have duplicated parameter names. - def checkName(knownNames: scala.collection.mutable.Map[String, Int], - newNames: Array[String], modules: ArrayBuffer[BaseModule], i: Int): Unit = { - for (name <- newNames) { - require(!knownNames.contains(name), s"Duplicated parameter names: " + - s"name $name in layer $i (${modules(i).getClass.getName}) is already " + - s"used in layer ${knownNames("name")}" + - s"(${modules(knownNames("name")).getClass.getName})") - knownNames(name) = i - } - } - - val argNames = scala.collection.mutable.Map[String, Int]() - val auxNames = scala.collection.mutable.Map[String, Int]() - for ((module, iLayer) <- this.modules.zipWithIndex) { - val (argParams, auxParams) = module.getParams - checkName(argNames, argParams.keys.toArray, this.modules, iLayer) - checkName(auxNames, auxParams.keys.toArray, this.modules, iLayer) - } - this.paramsInitialized = true - } - } - - /** - * Bind the symbols to construct executors. This is necessary before one - * can perform computation with the module. - * @param dataShapes Typically is `dataIter.provideData`. - * @param labelShapes Typically is `data_iter.provide_label`. - * @param forTraining Default is `true`. Whether the executors should be bind for training. - * @param inputsNeedGrad Default is `false`. - * Whether the gradients to the input data need to be computed. - * Typically this is not needed. - * But this might be needed when implementing composition of modules. - * @param forceRebind Default is `false`. - * This function does nothing if the executors are already binded. - * But with this `true`, the executors will be forced to rebind. - * @param sharedModule Default is `None`. This is used in bucketing. - * When not `None`, the shared module essentially corresponds to - * a different bucket -- a module with different symbol - * but with the same sets of parameters - * (e.g. unrolled RNNs with different lengths). - * @param gradReq Requirement for gradient accumulation (globally). - * Can be 'write', 'add', or 'null' (default to 'write'). - */ - override def bind(dataShapes: IndexedSeq[DataDesc], - labelShapes: Option[IndexedSeq[DataDesc]] = None, - forTraining: Boolean = true, inputsNeedGrad: Boolean = false, - forceRebind: Boolean = false, sharedModule: Option[BaseModule] = None, - gradReq: String = "write"): Unit = { - if (this.binded && !forceRebind) { - logger.warn(s"Already binded, ignoring bind()") - } else { - if (inputsNeedGrad) { - require(forTraining, "inputsNeedGrad can be set only for training") - } - - require(sharedModule == None, "Shared module is not supported") - require(this.modules.length > 0, "Attempting to bind an empty SequentialModule") - - this.forTraining = forTraining - this.inputsNeedGrad = inputsNeedGrad - this.binded = true - - // the same label shapes are used for all chained modules - this.labelShapesVar = labelShapes - - var myDataShapes = dataShapes - var myLabelShapes = labelShapes - var anybodyEverNeedsLabel = false - for ((module, iLayer) <- this.modules.zipWithIndex) { - val meta = this.metas(iLayer) - if (meta.contains(META_TAKE_LABELS) && meta(META_TAKE_LABELS)) { - myLabelShapes = labelShapes - anybodyEverNeedsLabel = true - } else myLabelShapes = None - - val myInputsNeedGrad = if (inputsNeedGrad || (forTraining && iLayer > 0)) true else false - if (meta.contains(META_AUTO_WIRING) && meta(META_AUTO_WIRING)) { - val dataNames = module.dataNames - require(dataNames.length == myDataShapes.length, - s"dataNmes $dataNames and dataShapes $myDataShapes do not match") - myDataShapes = dataNames.zip(myDataShapes).map { case (newName, dataDes) => - DataDesc(newName, dataDes.shape) - } - } - - module.bind(myDataShapes, myLabelShapes, forTraining, myInputsNeedGrad, - forceRebind, sharedModule = None, gradReq) - // the output of the previous module is the data of the next module - myDataShapes = module.outputShapes.map{case (name, shape) => DataDesc(name, shape)} - } - - - if (!anybodyEverNeedsLabel) { - // then I do not need label either - this.labelShapesVar = None - } - } - - } - - /** - * Install and initialize optimizers. - * @param kvstore - * @param optimizer - * @param resetOptimizer Default `True`, indicating whether we should set `rescaleGrad` - * & `idx2name` for optimizer according to executorGroup - * @param forceInit Default `False`, indicating whether we should force re-initializing - * the optimizer in the case an optimizer is already installed. - */ - override def initOptimizer(kvstore: String = "local", optimizer: Optimizer = new SGD(), - resetOptimizer: Boolean = true, forceInit: Boolean = false): Unit = { - require(this.binded && this.paramsInitialized, "bind() and initParams() must be called first.") - if (optimizerInitialized && !forceInit) { - logger.warn("optimizer already initialized, ignoring ...") - } else { - for (module <- this.modules) { - module.initOptimizer(kvstore, optimizer, resetOptimizer, forceInit) - } - } - this.optimizerInitialized = true - } - - /** - * Forward computation. - * @param dataBatch input data - * @param isTrain Default is `None`, which means `isTrain` takes the value of `forTraining`. - */ - override def forward(dataBatch: DataBatch, isTrain: Option[Boolean] = None): Unit = { - require(this.binded && this.paramsInitialized, "bind() and initParams() must be called first.") - - var data = dataBatch - for ((module, iLayer) <- this.modules.zipWithIndex) { - module.forward(data, isTrain = isTrain) - // the last layer, do not need to do the followings - if (iLayer < this.modules.length - 1) { - val out = module.getOutputs() - // need to update this, in case the internal module is using bucketing - // or whatever - val dataNames = module.outputShapes.map(_._1) - require(dataNames.length == data.data.length, - s"dataNames $dataNames do not match with number of arrays in batch") - data = new DataBatch(out.head, data.label, data.index, - data.pad, data.bucketKey, outputDesc, data.provideLabelDesc) - } - } - } - - /** - * Backward computation. - * @param outGrads Gradient on the outputs to be propagated back. - * This parameter is only needed when bind is called - * on outputs that are not a loss function. - */ - override def backward(outGrads: Array[NDArray] = null): Unit = { - require(this.binded && this.paramsInitialized, "bind() and initParams() must be called first.") - var grad = outGrads - for ((module, iLayer) <- this.modules.zipWithIndex.reverse) { - module.backward(outGrads = grad) - if (iLayer > 0) { - grad = module.getInputGradsMerged().toArray - } - } - } - - // Update parameters according to the installed optimizer and the gradients computed - // in the previous forward-backward batch. - override def update(): Unit = { - require(this.binded && this.paramsInitialized && this.optimizerInitialized, - "bind(), initParams() and initOptimizer() must be called first.") - this.modules.foreach(_.update()) - } - - /** - * Get outputs of the previous forward computation. - * @return In the case when data-parallelism is used, - * the outputs will be collected from multiple devices. - * The results will look like `[ [out1_dev1, out1_dev2], [out2_dev1, out2_dev2] ]`, - * those `NDArray` might live on different devices. - */ - def getOutputs(): IndexedSeq[IndexedSeq[NDArray]] = { - require(this.binded && this.paramsInitialized, "bind() and initParams() must be called first.") - this.modules.reverse.head.getOutputs() - } - - /** - * Get outputs of the previous forward computation. - * @return In the case when data-parallelism is used, - * the outputs will be merged from multiple devices, - * as they look like from a single executor. - * The results will look like `[out1, out2]` - */ - def getOutputsMerged(): IndexedSeq[NDArray] = { - require(this.binded && this.paramsInitialized, "bind() and initParams() must be called first.") - this.modules.reverse.head.getOutputsMerged() - } - - /** - * Get the gradients to the inputs, computed in the previous backward computation. - * @return In the case when data-parallelism is used, - * the grads will be collected from multiple devices. - * The results will look like `[ [grad1_dev1, grad1_dev2], [grad2_dev1, grad2_dev2] ]`, - * those `NDArray` might live on different devices. - */ - def getInputGrads(): IndexedSeq[IndexedSeq[NDArray]] = { - require(this.binded && this.paramsInitialized, "bind() and initParams() must be called first.") - require(inputsNeedGrad, "Call to getInputGrads() but inputsNeedGrad is false") - this.modules.head.getInputGrads() - } - - /** - * Get the gradients to the inputs, computed in the previous backward computation. - * @return In the case when data-parallelism is used, - * the grads will be merged from multiple devices, - * as they look like from a single executor. - * The results will look like `[grad1, grad2]` - */ - def getInputGradsMerged(): IndexedSeq[NDArray] = { - require(this.binded && this.paramsInitialized, "bind() and initParams() must be called first.") - require(inputsNeedGrad, "Call to getInputGradsMerged() but inputsNeedGrad is false") - this.modules.head.getInputGradsMerged() - } - - /** - * Evaluate and accumulate evaluation metric on outputs of the last forward computation. - * @param evalMetric - * @param labels - */ - def updateMetric(evalMetric: EvalMetric, labels: IndexedSeq[NDArray]): Unit = { - require(this.binded && this.paramsInitialized, "bind() and initParams() must be called first.") - for ((meta, module) <- this.metas.zip(this.modules)) { - if (meta.contains(META_TAKE_LABELS) && meta(META_TAKE_LABELS)) { - module.updateMetric(evalMetric, labels) - } - } - } - - // Install monitor on all executors - def installMonitor(monitor: Monitor): Unit = { - require(this.binded, "bind() must be called first.") - this.modules.foreach(_.installMonitor(monitor)) - } -} diff --git a/scala-package/core/src/main/scala/org/apache/mxnet/optimizer/AdaDelta.scala b/scala-package/core/src/main/scala/org/apache/mxnet/optimizer/AdaDelta.scala deleted file mode 100644 index 3afe509b9479..000000000000 --- a/scala-package/core/src/main/scala/org/apache/mxnet/optimizer/AdaDelta.scala +++ /dev/null @@ -1,111 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet.optimizer - -import org.apache.mxnet.NDArrayConversions._ -import org.apache.mxnet.util.SerializerUtils -import org.apache.mxnet.{NDArray, Optimizer} - -/** - * AdaDelta optimizer as described in Matthew D. Zeiler, 2012. - * http://arxiv.org/abs/1212.5701 - * - * @param rho Decay rate for both squared gradients and delta x. - * @param epsilon The constant as described in the thesis - * @param rescaleGradient rescaling factor of gradient. - * @param clipGradient clip gradient in range [-clip_gradient, clip_gradient] - * @param wd L2 regularization coefficient add to all the weights - */ -class AdaDelta(rho: Float = 0.05f, rescaleGradient: Float = 1.0f, - epsilon: Float = 1e-8f, wd: Float = 0.0f, - clipGradient: Float = 0f) extends Optimizer { - - /** - * Update the parameters. - * @param index An unique integer key used to index the parameters - * @param weight weight ndarray - * @param grad grad ndarray - * @param state NDArray or other objects returned by initState - * The auxiliary state used in optimization. - */ - override def update(index: Int, weight: NDArray, grad: NDArray, state: AnyRef): Unit = { - - var resdGrad = grad * this.rescaleGrad - - if (clipGradient != 0f) { - val oldResdGrad = resdGrad - resdGrad = NDArray.clip(resdGrad, -clipGradient, clipGradient) - oldResdGrad.dispose() - } - - val (accG, accDelta) = state.asInstanceOf[(NDArray, NDArray)] - - val newAccG = (this.rho * accG + (1.0f - this.rho) * - resdGrad * resdGrad).disposeDepsExcept(accG, resdGrad) - accG.set(newAccG) - val currentDelta = ( - NDArray.sqrt(accDelta + this.epsilon) / - NDArray.sqrt(accG + this.epsilon) * resdGrad).disposeDepsExcept(accDelta, accG, resdGrad) - val newAccDelta = (this.rho * accDelta + - (1.0f - this.rho) * currentDelta * currentDelta).disposeDepsExcept(accDelta, currentDelta) - accDelta.set(newAccDelta) - - weight *= (1 - this.wd) - weight -= currentDelta - - newAccG.dispose() - newAccDelta.dispose() - resdGrad.dispose() - currentDelta.dispose() - } - - override def createState(index: Int, weight: NDArray): (NDArray, NDArray) = { - (NDArray.zeros(weight.shape, weight.context), // accumulated g - NDArray.zeros(weight.shape, weight.context)) // accumulated delta - } - - // Dispose the state it created - override def disposeState(state: AnyRef): Unit = { - if (state != null) { - val (g, delta) = state.asInstanceOf[(NDArray, NDArray)] - g.dispose() - delta.dispose() - } - } - - override def serializeState(state: AnyRef): Array[Byte] = { - if (state != null) { - val (g, delta) = state.asInstanceOf[(NDArray, NDArray)] - SerializerUtils.serializeNDArrays(g, delta) - } else { - null - } - } - - override def deserializeState(bytes: Array[Byte]): AnyRef = { - if (bytes != null) { - val ndArrays = SerializerUtils.deserializeNDArrays(bytes) - require(ndArrays.size == 2, s"Got ${ndArrays.size} arrays, expected 2.") - val state = (ndArrays(0), ndArrays(1)) - state.asInstanceOf[AnyRef] - } else { - null - } - } -} - diff --git a/scala-package/core/src/main/scala/org/apache/mxnet/optimizer/AdaGrad.scala b/scala-package/core/src/main/scala/org/apache/mxnet/optimizer/AdaGrad.scala deleted file mode 100644 index ed3c5139229b..000000000000 --- a/scala-package/core/src/main/scala/org/apache/mxnet/optimizer/AdaGrad.scala +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet.optimizer - -import org.apache.mxnet.NDArrayConversions._ -import org.apache.mxnet.{NDArray, Optimizer} - -/** - * AdaGrad optimizer as described in Matthew D. Zeiler, 2012. - * http://arxiv.org/pdf/1212.5701v1.pdf - * - * @param learningRate Step size. - * @param epsilon A small float number to make the updating processing stable. - * Default value is set to 1e-7. - * @param rescaleGradient rescaling factor of gradient. - * @param wd L2 regularization coefficient add to all the weights - */ -class AdaGrad(val learningRate: Float = 0.05f, rescaleGradient: Float = 1.0f, - epsilon: Float = 1e-7f, wd: Float = 0.0f) extends Optimizer { - - /** - * Update the parameters. - * @param index An unique integer key used to index the parameters - * @param weight weight ndarray - * @param grad grad ndarray - * @param state NDArray or other objects returned by initState - * The auxiliary state used in optimization. - */ - override def update(index: Int, weight: NDArray, grad: NDArray, state: AnyRef): Unit = { - val lr = getLr(index, this.learningRate) - - val resdGrad = rescaleGradient * grad - val history = state.asInstanceOf[NDArray] - - val gradSquared = resdGrad * resdGrad - history += gradSquared - gradSquared.dispose() - - val newWeight = (-lr * (resdGrad / NDArray.sqrt(history + this.epsilon) + this.wd * weight)) - .disposeDepsExcept(resdGrad, history, weight) - weight += newWeight - newWeight.dispose() - - resdGrad.dispose() - } - - override def createState(index: Int, weight: NDArray): NDArray = { - NDArray.zeros(weight.shape, weight.context) - } - - // Dispose the state it created - override def disposeState(state: AnyRef): Unit = { - if (state != null) { - state.asInstanceOf[NDArray].dispose() - } - } - - override def serializeState(state: AnyRef): Array[Byte] = { - if (state != null) { - state.asInstanceOf[NDArray].serialize() - } else { - null - } - } - - override def deserializeState(bytes: Array[Byte]): AnyRef = { - if (bytes != null) { - NDArray.deserialize(bytes).asInstanceOf[AnyRef] - } else { - null - } - } -} diff --git a/scala-package/core/src/main/scala/org/apache/mxnet/optimizer/Adam.scala b/scala-package/core/src/main/scala/org/apache/mxnet/optimizer/Adam.scala deleted file mode 100644 index 5a8b3cb4e94c..000000000000 --- a/scala-package/core/src/main/scala/org/apache/mxnet/optimizer/Adam.scala +++ /dev/null @@ -1,145 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet.optimizer - -import org.apache.mxnet.NDArrayConversions._ -import org.apache.mxnet.util.SerializerUtils -import org.apache.mxnet.{LRScheduler, NDArray, Optimizer, ResourceScope} - -/** - * Adam optimizer as described in [King2014] - * - * [King2014] Diederik Kingma, Jimmy Ba, - * Adam: A Method for Stochastic Optimization, - * http://arxiv.org/abs/1412.6980 - * - * @param learningRate Float, Step size. - * @param beta1 Float, Exponential decay rate for the first moment estimates. - * @param beta2 Float, Exponential decay rate for the second moment estimates. - * @param epsilon Float - * @param decayFactor Float - * @param wd Float, L2 regularization coefficient add to all the weights - * @param clipGradient Float, clip gradient in range [-clip_gradient, clip_gradient] - * @param lrScheduler The learning rate scheduler - */ -class Adam(val learningRate: Float = 0.002f, beta1: Float = 0.9f, beta2: Float = 0.999f, - epsilon: Float = 1e-8f, decayFactor: Float = 1-1e-8f, wd: Float = 0.0f, - clipGradient: Float = 0f, lrScheduler: LRScheduler = null) extends Optimizer { - - protected var time: Int = 0 - protected var timeFirstIndex: Option[Int] = None - - if (lrScheduler != null) { - lrScheduler.baseLR = learningRate - } - - /** - * Update the parameters. - * @param index An unique integer key used to index the parameters - * @param weight weight ndarray - * @param grad grad ndarray - * @param state NDArray or other objects returned by initState - * The auxiliary state used in optimization. - */ - override def update(index: Int, weight: NDArray, grad: NDArray, state: AnyRef): Unit = { - ResourceScope.using() { - var lr = - (if (lrScheduler != null) { - val scheduledLr = lrScheduler(numUpdate) - updateCount(index) - scheduledLr - } else { - this.learningRate - }) - lr = getLr(index, lr) - - val (mean, variance) = state.asInstanceOf[(NDArray, NDArray)] - - // increment time only when the first parameters is called - timeFirstIndex match { - case Some(idx) => - if (idx == index) time += 1 - case None => - timeFirstIndex = Option(index) - time = 0 // all parameters share the same time - } - - val t1: Int = time + 1 - val learningRate = (lr * math.sqrt(1.0 - math.pow(beta2, t1)) / - (1.0 - math.pow(beta1, t1))).toFloat - val beta1t = beta1 * math.pow(decayFactor, t1 - 1).toFloat - - var resdGrad = grad * rescaleGrad - if (clipGradient != 0f) { - val oldResdGrad = resdGrad - resdGrad = NDArray.clip(resdGrad, -clipGradient, clipGradient) - } - - val meanT = (beta1t * mean + (1.0 - beta1t) * resdGrad) - val varianceT = (beta2 * variance + (1.0f - beta2) * resdGrad * resdGrad) - val step = (learningRate * meanT / (NDArray.sqrt(varianceT) + epsilon)) - - val wd = this.getWd(index, this.wd) - if (wd > 0.0f) { - val stepDelta = lr * wd * weight - step += stepDelta - } - - weight -= step - mean.set(meanT) - variance.set(varianceT) - (mean, variance) - } - } - - // Create additional optimizer state: mean, variance - override def createState(index: Int, weight: NDArray): (NDArray, NDArray) = { - timeFirstIndex = None // time is incremented only on the first index - (NDArray.zeros(weight.shape, weight.context), // mean - NDArray.zeros(weight.shape, weight.context)) // variance - } - - // Dispose the state it created - override def disposeState(state: AnyRef): Unit = { - if (state != null) { - val (mean, variance) = state.asInstanceOf[(NDArray, NDArray)] - mean.dispose() - variance.dispose() - } - } - - override def serializeState(state: AnyRef): Array[Byte] = { - if (state != null) { - val (mean, variance) = state.asInstanceOf[(NDArray, NDArray)] - SerializerUtils.serializeNDArrays(mean, variance) - } else { - null - } - } - - override def deserializeState(bytes: Array[Byte]): AnyRef = { - if (bytes != null) { - val ndArrays = SerializerUtils.deserializeNDArrays(bytes) - require(ndArrays.size == 2, s"Got ${ndArrays.size} arrays, expected 2.") - val state = (ndArrays(0), ndArrays(1)) - state.asInstanceOf[AnyRef] - } else { - null - } - } -} diff --git a/scala-package/core/src/main/scala/org/apache/mxnet/optimizer/DCASGD.scala b/scala-package/core/src/main/scala/org/apache/mxnet/optimizer/DCASGD.scala deleted file mode 100644 index af804a580116..000000000000 --- a/scala-package/core/src/main/scala/org/apache/mxnet/optimizer/DCASGD.scala +++ /dev/null @@ -1,124 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet.optimizer - -import org.apache.mxnet.{Optimizer, LRScheduler, NDArray} -import org.apache.mxnet.NDArrayConversions._ -import org.apache.mxnet.util.SerializerUtils - -/** - * DCASGD optimizer with momentum and weight regularization. - * Implementation of paper "Asynchronous Stochastic Gradient Descent with - * Delay Compensation for Distributed Deep Learning" - */ -class DCASGD(val learningRate: Float = 0.01f, momentum: Float = 0.0f, - lamda: Float = 0.04f, wd: Float = 0.0f, clipGradient: Float = 0f, - lrScheduler: LRScheduler = null) extends Optimizer { - - if (lrScheduler != null) { - lrScheduler.baseLR = learningRate - } - - /** - * Update the parameters. - * @param index An unique integer key used to index the parameters - * @param weight weight ndarray - * @param grad grad ndarray - * @param state NDArray or other objects returned by initState - * The auxiliary state used in optimization. - */ - override def update(index: Int, weight: NDArray, grad: NDArray, state: AnyRef): Unit = { - var lr = - (if (lrScheduler != null) { - val scheduledLr = lrScheduler(numUpdate) - updateCount(index) - scheduledLr - } else { - this.learningRate - }) - lr = getLr(index, lr) - - val wd = getWd(index, this.wd) - var resdGrad = grad * this.rescaleGrad - if (clipGradient != 0f) { - // to get rid of memory leak - val oldResdGrad = resdGrad - resdGrad = NDArray.clip(resdGrad, -clipGradient, clipGradient) - oldResdGrad.dispose() - } - - var (mon, previousWeight) = state.asInstanceOf[(NDArray, NDArray)] - - val monUpdated = -lr * (resdGrad + wd * weight + this.lamda * - resdGrad * resdGrad * (weight - previousWeight)) - monUpdated.disposeDepsExcept(resdGrad, weight, previousWeight) - if (mon != null) { - mon *= this.momentum - mon += monUpdated - } else { - require(this.momentum == 0f, - s"momentum should be zero when state is provided.") - mon = monUpdated - } - previousWeight.set(weight) - weight += mon - resdGrad.dispose() - } - - // Create additional optimizer state such as momentum. - override def createState(index: Int, weight: NDArray): (NDArray, NDArray) = { - if (momentum == 0.0f) { - (null, weight.copy()) - } else { - (NDArray.zeros(weight.shape, weight.context, weight.dtype), weight.copy()) - } - } - - // Dispose the state it created - override def disposeState(state: AnyRef): Unit = { - if (state != null) { - val (mon, preWeight) = state.asInstanceOf[(NDArray, NDArray)] - if (mon != null) mon.dispose() - preWeight.dispose() - } - } - - override def serializeState(state: AnyRef): Array[Byte] = { - if (state != null) { - val (mon, preWeight) = state.asInstanceOf[(NDArray, NDArray)] - if (mon != null) SerializerUtils.serializeNDArrays(mon, preWeight) - else preWeight.serialize() - } else { - null - } - } - - override def deserializeState(bytes: Array[Byte]): AnyRef = { - if (bytes != null) { - val ndArrays = SerializerUtils.deserializeNDArrays(bytes) - require(ndArrays.size <= 2, s"Got ${ndArrays.size} arrays, expected <= 2.") - val state = { - if (ndArrays.length == 1) (null, ndArrays(0)) - else (ndArrays(0), ndArrays(1)) - } - state.asInstanceOf[AnyRef] - } else { - null - } - } -} diff --git a/scala-package/core/src/main/scala/org/apache/mxnet/optimizer/NAG.scala b/scala-package/core/src/main/scala/org/apache/mxnet/optimizer/NAG.scala deleted file mode 100644 index 5ed89548ebd4..000000000000 --- a/scala-package/core/src/main/scala/org/apache/mxnet/optimizer/NAG.scala +++ /dev/null @@ -1,124 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet.optimizer - -import org.apache.mxnet.{Optimizer, LRScheduler, NDArray} -import org.apache.mxnet.NDArrayConversions._ - -/** - * SGD with nesterov. - * It is implemented according to - * https://github.com/torch/optim/blob/master/sgd.lua - * - * @param learningRate Float, Step size. - * @param momentum Float, momentum value. - * @param wd Float, L2 regularization coefficient add to all the weights - * @param clipGradient Float, clip gradient in range [-clip_gradient, clip_gradient] - * @param lrScheduler The learning rate scheduler - */ -class NAG(val learningRate: Float = 0.01f, momentum: Float = 0.0f, - wd: Float = 0.0001f, clipGradient: Float = 0f, - lrScheduler: LRScheduler = null) extends Optimizer { - - if (lrScheduler != null) { - lrScheduler.baseLR = learningRate - } - - /** - * Update the parameters. - * @param index An unique integer key used to index the parameters - * @param weight weight ndarray - * @param grad grad ndarray - * @param state NDArray or other objects returned by initState - * The auxiliary state used in optimization. - */ - override def update(index: Int, weight: NDArray, grad: NDArray, state: AnyRef): Unit = { - // TODO(bing) implement wd_bias, wd_gamma, wd_beta (copy from python package) - var lr = - (if (lrScheduler != null) { - val scheduledLr = lrScheduler(numUpdate) - updateCount(index) - scheduledLr - } else { - this.learningRate - }) - lr = getLr(index, lr) - - val wd = getWd(index, this.wd) - var resdGrad = grad * this.rescaleGrad - if (clipGradient != 0f) { - // to get rid of memory leak - val oldResdGrad = resdGrad - resdGrad = NDArray.clip(resdGrad, -clipGradient, clipGradient) - oldResdGrad.dispose() - } - - if (state != null) { - val mom = state.asInstanceOf[NDArray] - mom *= momentum - resdGrad += wd * weight - mom += resdGrad - resdGrad += momentum * mom - weight += -lr * resdGrad - } else { - require(momentum == 0f, - s"momentum should be zero when state is provided.") - // adder = -lr * (resdGrad + this.wd * weight) - // we write in this way to get rid of memory leak - val adder = this.wd * weight - adder += resdGrad - adder *= (-lr) - weight += adder - adder.dispose() - } - - resdGrad.dispose() - } - - // Create additional optimizer state such as momentum. - override def createState(index: Int, weight: NDArray): AnyRef = { - if (momentum == 0.0f) { - null - } else { - NDArray.zeros(weight.shape, weight.context) - } - } - - // Dispose the state it created - override def disposeState(state: AnyRef): Unit = { - if (state != null) { - state.asInstanceOf[NDArray].dispose() - } - } - - override def serializeState(state: AnyRef): Array[Byte] = { - if (state != null) { - state.asInstanceOf[NDArray].serialize() - } else { - null - } - } - - override def deserializeState(bytes: Array[Byte]): AnyRef = { - if (bytes != null) { - NDArray.deserialize(bytes).asInstanceOf[AnyRef] - } else { - null - } - } -} diff --git a/scala-package/core/src/main/scala/org/apache/mxnet/optimizer/RMSProp.scala b/scala-package/core/src/main/scala/org/apache/mxnet/optimizer/RMSProp.scala deleted file mode 100644 index 71b20b8c356d..000000000000 --- a/scala-package/core/src/main/scala/org/apache/mxnet/optimizer/RMSProp.scala +++ /dev/null @@ -1,116 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet.optimizer - -import org.apache.mxnet.util.SerializerUtils -import org.apache.mxnet.{NDArray, Optimizer, LRScheduler} -import org.apache.mxnet.NDArrayConversions._ - -/** - * RMSProp optimizer as described in Tieleman & Hinton, 2012. - * http://arxiv.org/pdf/1308.0850v5.pdf Eq(38) - Eq(45) by Alex Graves, 2013. - * - * @param learningRate Float, Step size. - * @param rho Float, decay factor of moving average for gradient, gradient^^2. - * @param momentum Float, momentum factor of moving average for gradient. - * @param rescaleGradient Float, rescaling factor of gradient. - * @param wd Float, L2 regularization coefficient add to all the weights - * @param clipGradient Float, clip gradient in range [-clip_gradient, clip_gradient] - * @param lrScheduler The learning rate scheduler - */ -class RMSProp(val learningRate: Float = 0.002f, rescaleGradient: Float = 1.0f, - rho: Float = 0.95f, momentum: Float = 0.9f, wd: Float = 0.0f, - lrScheduler: LRScheduler = null, clipGradient: Float = 0f) extends Optimizer { - - /** - * Update the parameters. - * @param index An unique integer key used to index the parameters - * @param weight weight ndarray - * @param grad grad ndarray - * @param state NDArray or other objects returned by initState - * The auxiliary state used in optimization. - */ - override def update(index: Int, weight: NDArray, grad: NDArray, state: AnyRef): Unit = { - val lr = getLr(index, this.learningRate) - val (n, g, delta) = state.asInstanceOf[(NDArray, NDArray, NDArray)] - val wd = getWd(index, this.wd) - - var resdGrad = grad * this.rescaleGrad - if (clipGradient != 0f) { - val oldResdGrad = resdGrad - resdGrad = NDArray.clip(resdGrad, -clipGradient, clipGradient) - oldResdGrad.dispose() - } - - val nUpdated = ((1 - this.rho) * (resdGrad * resdGrad) + this.rho * n) - .disposeDepsExcept(resdGrad, n) - n.set(nUpdated) - nUpdated.dispose() - - val gUpdated = ((1 - this.rho) * resdGrad + this.rho * g) - .disposeDepsExcept(resdGrad, g) - g.set(gUpdated) - gUpdated.dispose() - - val deltaUpdated = - (this.momentum * delta - lr * (resdGrad / NDArray.sqrt(n - g * g + 1e-4f) + wd * weight)) - .disposeDepsExcept(delta, resdGrad, n, g, weight) - delta.set(deltaUpdated) - deltaUpdated.dispose() - - weight += delta - resdGrad.dispose() - } - - override def createState(index: Int, weight: NDArray): (NDArray, NDArray, NDArray) = { - (NDArray.zeros(weight.shape, weight.context), // n - NDArray.zeros(weight.shape, weight.context), // g - NDArray.zeros(weight.shape, weight.context)) // delta - } - - // Dispose the state it created - override def disposeState(state: AnyRef): Unit = { - if (state != null) { - val (n, g, delta) = state.asInstanceOf[(NDArray, NDArray, NDArray)] - n.dispose() - g.dispose() - delta.dispose() - } - } - - override def serializeState(state: AnyRef): Array[Byte] = { - if (state != null) { - val (n, g, delta) = state.asInstanceOf[(NDArray, NDArray, NDArray)] - SerializerUtils.serializeNDArrays(n, g, delta) - } else { - null - } - } - - override def deserializeState(bytes: Array[Byte]): AnyRef = { - if (bytes != null) { - val ndArrays = SerializerUtils.deserializeNDArrays(bytes) - require(ndArrays.size == 3, s"Got ${ndArrays.size} arrays, expected 3.") - val state = (ndArrays(0), ndArrays(1), ndArrays(2)) - state.asInstanceOf[AnyRef] - } else { - null - } - } -} - diff --git a/scala-package/core/src/main/scala/org/apache/mxnet/optimizer/SGD.scala b/scala-package/core/src/main/scala/org/apache/mxnet/optimizer/SGD.scala deleted file mode 100644 index d349feac3e93..000000000000 --- a/scala-package/core/src/main/scala/org/apache/mxnet/optimizer/SGD.scala +++ /dev/null @@ -1,127 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet.optimizer - -import org.apache.mxnet._ -import org.apache.mxnet.NDArrayConversions._ - -/** - * A very simple SGD optimizer with momentum and weight regularization. - */ -class SGD(val learningRate: Float = 0.01f, momentum: Float = 0.0f, - wd: Float = 0.0001f, clipGradient: Float = 0f, - lrScheduler: LRScheduler = null) extends Optimizer { - - if (lrScheduler != null) { - lrScheduler.baseLR = learningRate - } - - /** - * Update the parameters. - * @param index An unique integer key used to index the parameters - * @param weight weight ndarray - * @param grad grad ndarray - * @param state NDArray or other objects returned by initState - * The auxiliary state used in optimization. - */ - override def update(index: Int, weight: NDArray, grad: NDArray, state: AnyRef): Unit = { - // TODO(bing) implement wd_bias, wd_gamma, wd_beta (copy from python package) - var lr = { - if (lrScheduler != null) { - val scheduledLr = lrScheduler(numUpdate) - updateCount(index) - scheduledLr - } else { - this.learningRate - } - } - lr = getLr(index, lr) - - val wd = getWd(index, this.wd) - var resdGrad = grad * this.rescaleGrad - if (clipGradient != 0f) { - // to get rid of memory leak - val oldResdGrad = resdGrad - resdGrad = NDArray.clip(resdGrad, -clipGradient, clipGradient) - oldResdGrad.dispose() - } - - if (state != null) { - val mom = state.asInstanceOf[NDArray] - mom *= momentum - // adder = -lr * (resdGrad + wd * weight) - // we write in this way to get rid of memory leak - val adder = wd * weight - adder += resdGrad - adder *= (-lr) - mom += adder - weight += mom - adder.dispose() - } else { - require(momentum == 0f, - s"momentum should be zero when state is provided.") - // adder = -lr * (resdGrad + this.wd * weight) - // we write in this way to get rid of memory leak - val adder = this.wd * weight - adder += resdGrad - adder *= (-lr) - weight += adder - adder.dispose() - } - - resdGrad.dispose() - } - - // Create additional optimizer state such as momentum. - override def createState(index: Int, weight: NDArray): AnyRef = { - if (momentum == 0.0f) { - null - } else { - val s = NDArray.zeros(weight.shape, weight.context) - // this is created on the fly and shared between runs, - // we don't want it to be dispose from the scope - // and should be handled by the dispose - val scope = ResourceScope.getCurrentScope() - if (scope.isDefined) scope.get.remove(s) - s - } - } - - // Dispose the state it created - override def disposeState(state: AnyRef): Unit = { - if (state != null) { - state.asInstanceOf[NDArray].dispose() - } - } - - override def serializeState(state: AnyRef): Array[Byte] = { - if (state != null) { - state.asInstanceOf[NDArray].serialize() - } else { - null - } - } - - override def deserializeState(bytes: Array[Byte]): AnyRef = { - if (bytes != null) { - NDArray.deserialize(bytes).asInstanceOf[AnyRef] - } else { - null - } - } -} diff --git a/scala-package/core/src/main/scala/org/apache/mxnet/optimizer/SGLD.scala b/scala-package/core/src/main/scala/org/apache/mxnet/optimizer/SGLD.scala deleted file mode 100644 index 0765716c4b77..000000000000 --- a/scala-package/core/src/main/scala/org/apache/mxnet/optimizer/SGLD.scala +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet.optimizer - -import org.apache.mxnet.{Optimizer, LRScheduler, NDArray} -import org.apache.mxnet.NDArrayConversions._ -import org.apache.mxnet.Random - -/** - * Stochastic Langevin Dynamics Updater to sample from a distribution. - * - * @param learningRate Float, Step size. - * @param rescaleGradient Float, rescaling factor of gradient. - * @param wd Float, L2 regularization coefficient add to all the weights - * @param clipGradient Float, clip gradient in range [-clip_gradient, clip_gradient] - * @param lrScheduler The learning rate scheduler - */ -class SGLD(val learningRate: Float = 0.01f, rescaleGradient: Float = 1.0f, - wd: Float = 0.0001f, clipGradient: Float = 0f, - lrScheduler: LRScheduler = null) extends Optimizer { - - if (lrScheduler != null) { - lrScheduler.baseLR = learningRate - } - - /** - * Update the parameters. - * @param index An unique integer key used to index the parameters - * @param weight weight ndarray - * @param grad grad ndarray - * @param state NDArray or other objects returned by initState - * The auxiliary state used in optimization. - */ - override def update(index: Int, weight: NDArray, grad: NDArray, state: AnyRef): Unit = { - var lr = - (if (lrScheduler != null) { - val scheduledLr = lrScheduler(numUpdate) - updateCount(index) - scheduledLr - } else { - this.learningRate - }) - lr = getLr(index, lr) - - val wd = getWd(index, this.wd) - var resdGrad = grad * this.rescaleGrad - if (clipGradient != 0f) { - // to get rid of memory leak - val oldResdGrad = resdGrad - resdGrad = NDArray.clip(resdGrad, -clipGradient, clipGradient) - oldResdGrad.dispose() - } - - val adder = this.wd * weight - adder += resdGrad - adder *= -(lr / 2) - val norm = Random.normal(0f, Math.sqrt(lr).toFloat, weight.shape, weight.context) - adder += norm - weight += adder - adder.dispose() - norm.dispose() - } - - // Create additional optimizer state such as momentum. - override def createState(index: Int, weight: NDArray): AnyRef = { - null - } - - // Dispose the state it created - override def disposeState(state: AnyRef): Unit = {} - - override def serializeState(state: AnyRef): Array[Byte] = { - throw new UnsupportedOperationException("SGLD does not have states") - } - - override def deserializeState(bytes: Array[Byte]): AnyRef = { - throw new UnsupportedOperationException("SGLD does not have states") - } -} diff --git a/scala-package/core/src/main/scala/org/apache/mxnet/util/NativeLibraryLoader.scala b/scala-package/core/src/main/scala/org/apache/mxnet/util/NativeLibraryLoader.scala deleted file mode 100644 index a523e2d35d19..000000000000 --- a/scala-package/core/src/main/scala/org/apache/mxnet/util/NativeLibraryLoader.scala +++ /dev/null @@ -1,173 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet.util - -import java.io._ - -import org.slf4j.{Logger, LoggerFactory} - -private[mxnet] class NativeLibraryLoader - -private[mxnet] object NativeLibraryLoader { - private val logger: Logger = LoggerFactory.getLogger(classOf[NativeLibraryLoader]) - private val libPathInJar = "/lib/native/" - private val _tempDir: File = - try { - val tempDir = File.createTempFile("mxnet", "") - if (!tempDir.delete || !tempDir.mkdir) { - throw new IOException(s"Couldn't create directory ${tempDir.getAbsolutePath}") - } - - /* - * Different cleanup strategies for Windows and Linux. - * TODO: shutdown hook won't work on Windows - */ - if (getUnifiedOSName != "Windows") { - Runtime.getRuntime.addShutdownHook(new Thread() { - override def run(): Unit = { - for (f <- tempDir.listFiles()) { - logger.info("Deleting " + f.getAbsolutePath) - if (!f.delete()) { - logger.warn(s"Couldn't delete temporary file ${f.getAbsolutePath}") - } - } - logger.info(s"Deleting ${tempDir.getAbsolutePath}") - if (!tempDir.delete()) { - logger.warn(s"Couldn't delete temporary directory ${tempDir.getAbsolutePath}") - } - } - }) - tempDir - } else { - throw new RuntimeException("Windows not supported yet.") - } - } catch { - case ex: IOException => - logger.error("Couldn't create temporary directory: {}", ex.getMessage) - null - } - - /** - * Find the library as a resource in jar, copy it to a tempfile - * and load it using System.load(). The name of the library has to be the - * base name, it is mapped to the corresponding system name using - * System.mapLibraryName(). e.g., the library "foo" is called "libfoo.so" - * under Linux and "foo.dll" under Windows, but you just have to pass "foo" to - * the loadLibrary(). - * - * @param libname basename of the library - * @throws UnsatisfiedLinkError if library cannot be founds - */ - @throws(classOf[UnsatisfiedLinkError]) - def loadLibrary(libname: String) { - val mappedLibname = System.mapLibraryName(libname) - val loadLibname: String = - if (mappedLibname.endsWith("dylib")) { - logger.info("Replaced .dylib with .jnilib") - mappedLibname.replace(".dylib", ".jnilib") - } else { - mappedLibname - } - logger.debug(s"Attempting to load $loadLibname") - val libFileInJar = libPathInJar + loadLibname - saveLibraryToTemp("libmxnet.so", "/lib/native/libmxnet.so", true) - saveLibraryToTemp("libtvm_runtime.so", "/lib/native/libtvm_runtime.so", false) - saveLibraryToTemp("libgfortran.so.4", "/lib/native/libgfortran.so.4", false) - saveLibraryToTemp("libquadmath.so.0", "/lib/native/libquadmath.so.0", false) - val tempfile: File = saveLibraryToTemp(libname, libFileInJar, true) - - loadLibraryFromFile(libname, tempfile) - } - - /** - * Translate all those Windows to "Windows". ("Windows XP", "Windows Vista", "Windows 7", etc.) - */ - private def unifyOSName(osname: String): String = { - if (osname.startsWith("Windows")) { - "Windows" - } - osname - } - - private def getUnifiedOSName: String = { - unifyOSName(System.getProperty("os.name")) - } - - @throws(classOf[IOException]) - private def createTempFile(name: String): File = { - new File(_tempDir, name) - } - - /** - * Load a system library from a stream. Copies the library to a temp file - * and loads from there. - * - * @param libname name of the library (just used in constructing the library name) - * @param tempfile File pointing to the library - */ - private def loadLibraryFromFile(libname: String, tempfile: File) { - try { - logger.debug("Loading library from {}", tempfile.getPath) - System.load(tempfile.getPath) - } catch { - case ule: UnsatisfiedLinkError => - logger.error("Couldn't load copied link file: {}", ule.toString) - throw ule - } - } - - /** - * Load a system library from a stream. Copies the library to a temp file - * and loads from there. - * - * @param libname name of the library (just used in constructing the library name) - * @param resource String resource path in the jar file - * @param required true if library is required - */ - private def saveLibraryToTemp(libname: String, resource: String, required: Boolean): File = { - try { - val is: InputStream = getClass.getResourceAsStream(resource) - if (is == null) { - if (required) { - throw new UnsatisfiedLinkError(s"Couldn't find the resource $resource") - } else { - null - } - } else { - val tempfile: File = new File(_tempDir, libname) - val os: OutputStream = new FileOutputStream(tempfile) - logger.debug("tempfile.getPath() = {}", tempfile.getPath) - val savedTime: Long = System.currentTimeMillis - val buf: Array[Byte] = new Array[Byte](8192) - var len: Int = is.read(buf) - while (len > 0) { - os.write(buf, 0, len) - len = is.read(buf) - } - os.close() - is.close() - val seconds: Double = (System.currentTimeMillis - savedTime).toDouble / 1e3 - logger.debug(s"Copying $libname took $seconds seconds.") - tempfile - } - } catch { - case io: IOException => - throw new UnsatisfiedLinkError(s"Could not create temp file for $libname") - } - } -} diff --git a/scala-package/core/src/main/scala/org/apache/mxnet/util/OptionConversion.scala b/scala-package/core/src/main/scala/org/apache/mxnet/util/OptionConversion.scala deleted file mode 100644 index c780a9605b12..000000000000 --- a/scala-package/core/src/main/scala/org/apache/mxnet/util/OptionConversion.scala +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet.util - -import scala.language.implicitConversions - -object OptionConversion { - implicit def someWrapper[A](noSome : A) : Option[A] = Option(noSome) -} diff --git a/scala-package/core/src/main/scala/org/apache/mxnet/util/SerializerUtils.scala b/scala-package/core/src/main/scala/org/apache/mxnet/util/SerializerUtils.scala deleted file mode 100644 index 343362e05cb9..000000000000 --- a/scala-package/core/src/main/scala/org/apache/mxnet/util/SerializerUtils.scala +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet.util - -import java.io.{ObjectInputStream, ByteArrayInputStream, ObjectOutputStream, ByteArrayOutputStream} - -import org.apache.mxnet.NDArray - -private[mxnet] object SerializerUtils { - /** - * Serialize NDArrays to bytes - * @param arrays NDArrays to be serialized - * @return serialized bytes - */ - def serializeNDArrays(arrays: NDArray*): Array[Byte] = { - val bos = new ByteArrayOutputStream() - try { - val out = new ObjectOutputStream(bos) - out.writeInt(arrays.length) - arrays.foreach(array => { - val sArray = array.serialize() - out.writeInt(sArray.length) - out.write(sArray) - }) - out.flush() - bos.toByteArray - } finally { - try { - bos.close() - } catch { - case _: Throwable => - } - } - } - - /** - * Deserialize bytes to a list of NDArrays. - * This should be used with SerializerUtils.serializeNDArrays - * @param bytes serialized NDArray bytes - * @return deserialized NDArrays - */ - def deserializeNDArrays(bytes: Array[Byte]): IndexedSeq[NDArray] = { - if (bytes != null) { - val bis = new ByteArrayInputStream(bytes) - var in: ObjectInputStream = null - try { - in = new ObjectInputStream(bis) - val numArrays = in.readInt() - (0 until numArrays).map(_ => { - val len = in.readInt() - val bytes = Array.fill[Byte](len)(0) - in.readFully(bytes) - NDArray.deserialize(bytes) - }) - } finally { - try { - if (in != null) { - in.close() - } - } catch { - case _: Throwable => - } - } - } else { - null - } - } -} diff --git a/scala-package/core/src/main/scala/org/apache/mxnet/util/WarnIfNotDisposed.scala b/scala-package/core/src/main/scala/org/apache/mxnet/util/WarnIfNotDisposed.scala deleted file mode 100644 index dc2c269932c2..000000000000 --- a/scala-package/core/src/main/scala/org/apache/mxnet/util/WarnIfNotDisposed.scala +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet - -import org.slf4j.{Logger, LoggerFactory} -import scala.util.Try -import scala.collection._ - -private object WarnIfNotDisposed { - private val traceProperty = "mxnet.traceLeakedObjects" - - private val logger: Logger = LoggerFactory.getLogger(classOf[WarnIfNotDisposed]) - - // This set represents the list of classes we've logged a warning about if we're not running - // in tracing mode. This is used to ensure we only log once. - // Don't need to synchronize on this set as it's usually used from a single finalizer thread. - private val classesWarned = mutable.Set.empty[String] - - lazy val tracingEnabled = { - val value = Try(System.getProperty(traceProperty).toBoolean).getOrElse(false) - if (value) { - logger.info("Leaked object tracing is enabled (property {} is set)", traceProperty) - } - value - } -} - -// scalastyle:off finalize -protected trait WarnIfNotDisposed { - import WarnIfNotDisposed.logger - import WarnIfNotDisposed.traceProperty - import WarnIfNotDisposed.classesWarned - - protected def isDisposed: Boolean - - protected val creationTrace: Option[Array[StackTraceElement]] = if (tracingEnabled) { - Some(Thread.currentThread().getStackTrace()) - } else { - None - } - - override protected def finalize(): Unit = { - if (!isDisposed) logDisposeWarning() - - super.finalize() - } - - // overridable for testing - protected def tracingEnabled = WarnIfNotDisposed.tracingEnabled - - protected def logDisposeWarning(): Unit = { - // The ":Any" casts below are working around the Slf4j Java API having overloaded methods that - // Scala doesn't resolve automatically. - if (creationTrace.isDefined) { - logger.warn( - "LEAK: An instance of {} was not disposed. Creation point of this resource was:\n\t{}", - getClass(), creationTrace.get.mkString("\n\t"): Any) - } else { - // Tracing disabled but we still warn the first time we see a leak to ensure the code author - // knows. We could warn every time but this can be very noisy. - val className = getClass().getName() - if (!classesWarned.contains(className)) { - logger.warn( - "LEAK: [one-time warning] An instance of {} was not disposed. " + // - "Set property {} to true to enable tracing", - className, traceProperty: Any) - - classesWarned += className - } - } - } -} -// scalastyle:on finalize diff --git a/scala-package/core/src/test/java/org/apache/mxnet/javaapi/ContextTest.java b/scala-package/core/src/test/java/org/apache/mxnet/javaapi/ContextTest.java deleted file mode 100644 index abd4b5edb1e6..000000000000 --- a/scala-package/core/src/test/java/org/apache/mxnet/javaapi/ContextTest.java +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet.javaapi; - -import org.junit.Test; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; - -public class ContextTest { - - @Test - public void testCPU() { - Context.cpu(); - } - - @Test - public void testDefault() { - Context.defaultCtx(); - } - - @Test - public void testConstructor() { - new Context("cpu", 0); - } -} diff --git a/scala-package/core/src/test/java/org/apache/mxnet/javaapi/DTypeTest.java b/scala-package/core/src/test/java/org/apache/mxnet/javaapi/DTypeTest.java deleted file mode 100644 index 2e356edf5326..000000000000 --- a/scala-package/core/src/test/java/org/apache/mxnet/javaapi/DTypeTest.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet.javaapi; - -import org.junit.Test; - -public class DTypeTest { - - @Test - public void Float16Test() { - DType.Float16(); - } - - @Test - public void Float32Test() { - DType.Float32(); - } - - @Test - public void Float64Test() { - DType.Float64(); - } - - @Test - public void UnknownTest() { - DType.Unknown(); - } - - @Test - public void Int32Test() { - DType.Int32(); - } - - @Test - public void UInt8Test() { - DType.UInt8(); - } -} diff --git a/scala-package/core/src/test/java/org/apache/mxnet/javaapi/IOTest.java b/scala-package/core/src/test/java/org/apache/mxnet/javaapi/IOTest.java deleted file mode 100644 index f53b5c405642..000000000000 --- a/scala-package/core/src/test/java/org/apache/mxnet/javaapi/IOTest.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet.javaapi; - -import org.junit.Test; - -public class IOTest { - - @Test - public void testConstructor() { - Shape inputShape = new Shape(new int[] {1, 3, 512, 512}); - new DataDesc("data", inputShape, DType.Float32(), "NCHW"); - } - - @Test - public void testgetBatchAxis() { - DataDesc.getBatchAxis("NCHW"); - } - -} diff --git a/scala-package/core/src/test/java/org/apache/mxnet/javaapi/ImageTest.java b/scala-package/core/src/test/java/org/apache/mxnet/javaapi/ImageTest.java deleted file mode 100644 index f5515dc053a8..000000000000 --- a/scala-package/core/src/test/java/org/apache/mxnet/javaapi/ImageTest.java +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet.javaapi; - -import org.apache.commons.io.FileUtils; -import org.junit.BeforeClass; -import org.junit.Test; - -import javax.imageio.ImageIO; -import java.awt.image.BufferedImage; -import java.io.File; -import java.net.URL; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import static org.junit.Assert.assertArrayEquals; - -public class ImageTest { - - private static String imLocation; - - private static void downloadUrl(String url, String filePath, int maxRetry) throws Exception{ - File tmpFile = new File(filePath); - Boolean success = false; - if (!tmpFile.exists()) { - while (maxRetry > 0 && !success) { - try { - FileUtils.copyURLToFile(new URL(url), tmpFile); - success = true; - } catch(Exception e){ - maxRetry -= 1; - } - } - } else { - success = true; - } - if (!success) throw new Exception("$url Download failed!"); - } - - @BeforeClass - public static void downloadFile() throws Exception { - String tempDirPath = System.getProperty("java.io.tmpdir"); - imLocation = tempDirPath + "/inputImages/Pug-Cookie.jpg"; - downloadUrl("https://s3.amazonaws.com/model-server/inputs/Pug-Cookie.jpg", - imLocation, 3); - } - - @Test - public void testImageProcess() throws Exception { - NDArray nd = Image.imRead(imLocation, 1, true); - assertArrayEquals(nd.shape().toArray(), new int[]{576, 1024, 3}); - NDArray nd2 = Image.imResize(nd, 224, 224, null); - assertArrayEquals(nd2.shape().toArray(), new int[]{224, 224, 3}); - NDArray cropped = Image.fixedCrop(nd, 0, 0, 224, 224); - Image.toImage(cropped); - BufferedImage buf = ImageIO.read(new File(imLocation)); - Map map = new HashMap<>(); - map.put("xmin", 190); - map.put("xmax", 850); - map.put("ymin", 50); - map.put("ymax", 450); - List> box = new ArrayList<>(); - box.add(map); - List names = new ArrayList<>(); - names.add("pug"); - Image.drawBoundingBox(buf, box, names); - } -} diff --git a/scala-package/core/src/test/java/org/apache/mxnet/javaapi/NDArrayTest.java b/scala-package/core/src/test/java/org/apache/mxnet/javaapi/NDArrayTest.java deleted file mode 100644 index 1b7042d49795..000000000000 --- a/scala-package/core/src/test/java/org/apache/mxnet/javaapi/NDArrayTest.java +++ /dev/null @@ -1,100 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet.javaapi; - -import org.junit.Test; - -import java.util.Arrays; -import java.util.List; -import org.apache.mxnet.javaapi.NDArrayBase.*; - -import static org.junit.Assert.assertTrue; - -public class NDArrayTest { - @Test - public void testCreateNDArray() { - NDArray nd = new NDArray(new float[]{1.0f, 2.0f, 3.0f}, - new Shape(new int[]{1, 3}), - new Context("cpu", 0)); - int[] arr = new int[]{1, 3}; - assertTrue(Arrays.equals(nd.shape().toArray(), arr)); - assertTrue(nd.at(0).at(0).toArray()[0] == 1.0f); - List list = Arrays.asList(1.0f, 2.0f, 3.0f); - // Second way creating NDArray - nd = NDArray.array(list, - new Shape(new int[]{1, 3}), - new Context("cpu", 0)); - assertTrue(Arrays.equals(nd.shape().toArray(), arr)); - - List list2 = Arrays.asList(1d, 1d, 1d); - nd = NDArray.arrayWithDouble(list2, - new Shape(new int[]{1, 3}), - new Context("cpu", 0)); - - // Float64 assertion - assertTrue(nd.dtype() == DType.Float64()); - - } - - @Test - public void testZeroOneEmpty(){ - NDArray ones = NDArray.ones(new Context("cpu", 0), new int[]{100, 100}); - NDArray zeros = NDArray.zeros(new Context("cpu", 0), new int[]{100, 100}); - NDArray empty = NDArray.empty(new Context("cpu", 0), new int[]{100, 100}); - int[] arr = new int[]{100, 100}; - assertTrue(Arrays.equals(ones.shape().toArray(), arr)); - assertTrue(Arrays.equals(zeros.shape().toArray(), arr)); - assertTrue(Arrays.equals(empty.shape().toArray(), arr)); - } - - @Test - public void testComparison(){ - NDArray nd = new NDArray(new float[]{1.0f, 2.0f, 3.0f}, new Shape(new int[]{3}), new Context("cpu", 0)); - NDArray nd2 = new NDArray(new float[]{3.0f, 4.0f, 5.0f}, new Shape(new int[]{3}), new Context("cpu", 0)); - nd = nd.add(nd2); - float[] greater = new float[]{1, 1, 1}; - assertTrue(Arrays.equals(nd.greater(nd2).toArray(), greater)); - nd = nd.subtract(nd2); - nd = nd.subtract(nd2); - float[] lesser = new float[]{0, 0, 0}; - assertTrue(Arrays.equals(nd.greater(nd2).toArray(), lesser)); - - NDArray nd3 = new NDArray(new double[]{1.0, 2.0, 3.0}, new Shape(new int[]{3}), new Context("cpu", 0)); - nd3 = nd3.add(1.0); - double[] smaller = new double[] {2, 3, 4}; - assertTrue(Arrays.equals(smaller, nd3.toFloat64Array())); - - } - - @Test - public void testGenerated(){ - NDArray$ NDArray = NDArray$.MODULE$; - float[] arr = new float[]{1.0f, 2.0f, 3.0f}; - NDArray nd = new NDArray(arr, new Shape(new int[]{3}), new Context("cpu", 0)); - float result = NDArray.norm(new normParam(nd))[0].toArray()[0]; - float cal = 0.0f; - for (float ele : arr) { - cal += ele * ele; - } - cal = (float) Math.sqrt(cal); - assertTrue(Math.abs(result - cal) < 1e-5); - NDArray dotResult = new NDArray(new float[]{0}, new Shape(new int[]{1}), new Context("cpu", 0)); - NDArray.dot(new dotParam(nd, nd).setOut(dotResult)); - assertTrue(Arrays.equals(dotResult.toArray(), new float[]{14.0f})); - } -} diff --git a/scala-package/core/src/test/java/org/apache/mxnet/javaapi/ResourceScopeTestSuite.java b/scala-package/core/src/test/java/org/apache/mxnet/javaapi/ResourceScopeTestSuite.java deleted file mode 100644 index 1c246d870e28..000000000000 --- a/scala-package/core/src/test/java/org/apache/mxnet/javaapi/ResourceScopeTestSuite.java +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - -package org.apache.mxnet.javaapi; - -import org.apache.mxnet.NativeResourceRef; -import org.apache.mxnet.ResourceScope; -import org.junit.Test; - -import java.util.*; -import java.util.concurrent.Callable; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; - -public class ResourceScopeTestSuite { - - /** - * This is a placeholder class to test out whether NDArray References get collected or not when using - * try-with-resources in Java. - * - */ - class TestNDArray { - NDArray selfArray; - - public TestNDArray(Context context, int[] shape) { - this.selfArray = NDArray.ones(context, shape); - } - - public boolean verifyIsDisposed() { - return this.selfArray.nd().isDisposed(); - } - - public NativeResourceRef getNDArrayReference() { - return this.selfArray.nd().ref(); - } - } - - @Test - public void testNDArrayAutoRelease() { - TestNDArray test = null; - - try (ResourceScope scope = new ResourceScope()) { - test = new TestNDArray(Context.cpu(), new int[]{100, 100}); - } - - assertTrue(test.verifyIsDisposed()); - } - - @Test - public void testObjectReleaseFromList() { - List list = new ArrayList<>(); - - try (ResourceScope scope = new ResourceScope()) { - for (int i = 0;i < 10; i++) { - list.add(new TestNDArray(Context.cpu(), new int[] {100, 100})); - } - } - - assertEquals(list.size() , 10); - for (TestNDArray item : list) { - assertTrue(item.verifyIsDisposed()); - } - } - - @Test - public void testObjectReleaseFromMap() { - Map stringToNDArrayMap = new HashMap<>(); - - try (ResourceScope scope = new ResourceScope()) { - for (int i = 0;i < 10; i++) { - stringToNDArrayMap.put(String.valueOf(i),new TestNDArray(Context.cpu(), new int[] {i, i})); - } - } - - assertEquals(stringToNDArrayMap.size(), 10); - for (Map.Entry entry : stringToNDArrayMap.entrySet()) { - assertTrue(entry.getValue().verifyIsDisposed()); - } - - Map ndArrayToStringMap = new HashMap<>(); - - try (ResourceScope scope = new ResourceScope()) { - for (int i = 0;i < 10; i++) { - ndArrayToStringMap.put(new TestNDArray(Context.cpu(), new int[] {i, i}), String.valueOf(i)); - } - } - - assertEquals(ndArrayToStringMap.size(), 10); - for (Map.Entry entry : ndArrayToStringMap.entrySet()) { - assertTrue(entry.getKey().verifyIsDisposed()); - } - - } -} diff --git a/scala-package/core/src/test/java/org/apache/mxnet/javaapi/ShapeTest.java b/scala-package/core/src/test/java/org/apache/mxnet/javaapi/ShapeTest.java deleted file mode 100644 index 8f045b5687ab..000000000000 --- a/scala-package/core/src/test/java/org/apache/mxnet/javaapi/ShapeTest.java +++ /dev/null @@ -1,121 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet.javaapi; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; -import org.junit.Test; -import java.util.ArrayList; -import java.util.Arrays; - -public class ShapeTest { - @Test - public void testArrayConstructor() - { - new Shape(new int[] {3, 4, 5}); - } - - @Test - public void testListConstructor() - { - ArrayList arrList = new ArrayList(); - arrList.add(3); - arrList.add(4); - arrList.add(5); - new Shape(arrList); - } - - @Test - public void testApply() - { - Shape jS = new Shape(new int[] {3, 4, 5}); - assertEquals(jS.apply(1), 4); - } - - @Test - public void testGet() - { - Shape jS = new Shape(new int[] {3, 4, 5}); - assertEquals(jS.get(1), 4); - } - - @Test - public void testSize() - { - Shape jS = new Shape(new int[] {3, 4, 5}); - assertEquals(jS.size(), 3); - } - - @Test - public void testLength() - { - Shape jS = new Shape(new int[] {3, 4, 5}); - assertEquals(jS.length(), 3); - } - - @Test - public void testDrop() - { - Shape jS = new Shape(new int[] {3, 4, 5}); - ArrayList l = new ArrayList(); - l.add(4); - l.add(5); - assertTrue(jS.drop(1).toVector().equals(l)); - } - - @Test - public void testSlice() - { - Shape jS = new Shape(new int[] {3, 4, 5}); - ArrayList l = new ArrayList(); - l.add(4); - assertTrue(jS.slice(1,2).toVector().equals(l)); - } - - @Test - public void testProduct() - { - Shape jS = new Shape(new int[] {3, 4, 5}); - assertEquals(jS.product(), 60); - } - - @Test - public void testHead() - { - Shape jS = new Shape(new int[] {3, 4, 5}); - assertEquals(jS.head(), 3); - } - - @Test - public void testToArray() - { - Shape jS = new Shape(new int[] {3, 4, 5}); - assertTrue(Arrays.equals(jS.toArray(), new int[] {3,4,5})); - } - - @Test - public void testToVector() - { - Shape jS = new Shape(new int[] {3, 4, 5}); - ArrayList l = new ArrayList(); - l.add(3); - l.add(4); - l.add(5); - assertTrue(jS.toVector().equals(l)); - } -} diff --git a/scala-package/core/src/test/resources/log4j.properties b/scala-package/core/src/test/resources/log4j.properties deleted file mode 100644 index d82fd7ea4f3d..000000000000 --- a/scala-package/core/src/test/resources/log4j.properties +++ /dev/null @@ -1,24 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -# for development debugging -log4j.rootLogger = debug, stdout - -log4j.appender.stdout = org.apache.log4j.ConsoleAppender -log4j.appender.stdout.Target = System.out -log4j.appender.stdout.layout = org.apache.log4j.PatternLayout -log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss,SSS} [%t] [%c] [%p] - %m%n diff --git a/scala-package/core/src/test/scala/org/apache/mxnet/AttrScopeSuite.scala b/scala-package/core/src/test/scala/org/apache/mxnet/AttrScopeSuite.scala deleted file mode 100644 index 02838064caff..000000000000 --- a/scala-package/core/src/test/scala/org/apache/mxnet/AttrScopeSuite.scala +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet - -import org.scalatest.{BeforeAndAfterAll, FunSuite} - -class AttrScopeSuite extends FunSuite with BeforeAndAfterAll { - test("attr basic") { - val (data, gdata) = - AttrScope(Map("group" -> "4", "data" -> "great")).withScope { - val data = Symbol.Variable("data", attr = Map("dtype" -> "data", "group" -> "1")) - val gdata = Symbol.Variable("data2") - (data, gdata) - } - assert(gdata.attr("group").get === "4") - assert(data.attr("group").get === "1") - - val exceedScopeData = Symbol.Variable("data3") - assert(exceedScopeData.attr("group") === None, "No group attr in global attr scope") - } -} diff --git a/scala-package/core/src/test/scala/org/apache/mxnet/CancelTestUtil.scala b/scala-package/core/src/test/scala/org/apache/mxnet/CancelTestUtil.scala deleted file mode 100644 index 5b1e9a4bec69..000000000000 --- a/scala-package/core/src/test/scala/org/apache/mxnet/CancelTestUtil.scala +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet - -import java.text.DecimalFormatSymbols -import java.util.Locale - -import org.scalatest.Assertions - -object CancelTestUtil { - /** - * Cancel the test if the system's locale uses a decimal separator other than '.'. Please see - * #18097 for more information. - */ - def assumeStandardDecimalSeparator(): Unit = { - val lcNumeric = System.getenv("LC_NUMERIC"); - - val decimalFormatSymbols = if (lcNumeric != null) { - val localeName = lcNumeric.stripSuffix(".UTF-8".stripSuffix(".utf-8")) - val locale = Locale.forLanguageTag(localeName) - DecimalFormatSymbols.getInstance(locale) - } else { - DecimalFormatSymbols.getInstance() - } - - val isStandardDecimalPoint = (decimalFormatSymbols.getDecimalSeparator == '.') && - (lcNumeric != null && lcNumeric.toLowerCase != "en_dk.utf-8") // Java doesn't seem to respect - // the decimal separator - // set in en_DK.UTF8, which is - // used in CentOS CI jobs. - if (!isStandardDecimalPoint) { - Assertions.cancel("Some operators " + - "break when the decimal separator is set to anything other than \".\". These operators " + - "should be rewritten to utilize the new FFI. Please see #18097 for more information.") - } - } -} diff --git a/scala-package/core/src/test/scala/org/apache/mxnet/CheckUtils.scala b/scala-package/core/src/test/scala/org/apache/mxnet/CheckUtils.scala deleted file mode 100644 index 7602b53edc9e..000000000000 --- a/scala-package/core/src/test/scala/org/apache/mxnet/CheckUtils.scala +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet - -object CheckUtils { - def reldiff(a: NDArray, b: NDArray): Float = { - val diff = NDArray.sum(NDArray.abs(a - b)).toScalar - val norm = NDArray.sum(NDArray.abs(a)).toScalar - if (diff < Float.MinPositiveValue) diff else diff / norm - } - - def reldiff(a: Array[Float], b: Array[Float]): Float = { - val diff = - (a zip b).map { case (aElem, bElem) => Math.abs(aElem - bElem) }.sum - val norm: Float = a.reduce(Math.abs(_) + Math.abs(_)) - if (diff < Float.MinPositiveValue) diff else diff / norm - } -} diff --git a/scala-package/core/src/test/scala/org/apache/mxnet/ExecutorSuite.scala b/scala-package/core/src/test/scala/org/apache/mxnet/ExecutorSuite.scala deleted file mode 100644 index ee38cc6251cd..000000000000 --- a/scala-package/core/src/test/scala/org/apache/mxnet/ExecutorSuite.scala +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet - -import org.scalatest.{BeforeAndAfterAll, FunSuite} -import org.apache.mxnet.CheckUtils._ - -class ExecutorSuite extends FunSuite with BeforeAndAfterAll { - test("bind") { - val shape = Shape(100, 30) - val lhs = Symbol.Variable("lhs") - val rhs = Symbol.Variable("rhs") - val ret = lhs + rhs - assert(ret.listArguments().toArray === Array("lhs", "rhs")) - - val lhsArr = Random.uniform(-10f, 10f, shape) - val rhsArr = Random.uniform(-10f, 10f, shape) - val lhsGrad = NDArray.empty(shape) - val rhsGrad = NDArray.empty(shape) - - val executor = ret.bind(Context.cpu(), args = Seq(lhsArr, rhsArr), - argsGrad = Seq(lhsGrad, rhsGrad)) - val exec3 = ret.bind(Context.cpu(), args = Seq(lhsArr, rhsArr)) - val exec4 = ret.bind(Context.cpu(), args = Map("rhs" -> rhsArr, "lhs" -> lhsArr), - argsGrad = Map("lhs" -> lhsGrad, "rhs" -> rhsGrad)) - executor.forward() - exec3.forward() - exec4.forward() - - val out1 = lhsArr + rhsArr - val out2 = executor.outputs(0) - val out3 = exec3.outputs(0) - val out4 = exec4.outputs(0) - assert(reldiff(out1, out2) < 1e-6) - assert(reldiff(out1, out3) < 1e-6) - assert(reldiff(out1, out4) < 1e-6) - - // test gradient - val outGrad = NDArray.ones(shape) - val (lhsGrad2, rhsGrad2) = (outGrad, outGrad) - executor.backward(Array(outGrad)) - assert(reldiff(lhsGrad, lhsGrad2) < 1e-6) - assert(reldiff(rhsGrad, rhsGrad2) < 1e-6) - } - - test("reshape") { - val x = Symbol.Variable("x") - val y = Symbol.FullyConnected()()(Map("data" -> x, "num_hidden" -> 4)) - - val exec = y.simpleBind(Context.cpu(), "write", shapeDict = Map("x" -> Shape(5, 4))) - exec.argArrays(0).set(1) - exec.argArrays(1).set(1) - exec.argArrays(2).set(0) - - val newExec = exec.reshape(kwargs = Map("x" -> Shape(3, 4))) - newExec.forward(isTrain = false) - // test sub exec forward - assert(newExec.outputs(0).toArray.forall(_ == 4)) - // test shared memory - assert(exec.outputs(0).toArray.take(3).forall(_ == 4)) - // test base exec forward - exec.forward(isTrain = false) - assert(exec.outputs(0).toArray.forall(_ == 4)) - } -} diff --git a/scala-package/core/src/test/scala/org/apache/mxnet/IOSuite.scala b/scala-package/core/src/test/scala/org/apache/mxnet/IOSuite.scala deleted file mode 100644 index 9839f09e4063..000000000000 --- a/scala-package/core/src/test/scala/org/apache/mxnet/IOSuite.scala +++ /dev/null @@ -1,336 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet - -import org.apache.mxnet.io.{NDArrayIter, ResizeIter, PrefetchingIter} -import org.scalatest.{BeforeAndAfterAll, FunSuite} -import scala.language.postfixOps -import scala.sys.process._ - - -class IOSuite extends FunSuite with BeforeAndAfterAll { - - private val tu = new TestUtil - - test("test MNISTIter & MNISTPack") { - // get data - "./scripts/get_mnist_data.sh" ! - - val params = Map( - "image" -> tu.dataFile("train-images-idx3-ubyte"), - "label" -> tu.dataFile("train-labels-idx1-ubyte"), - "data_shape" -> "(784,)", - "batch_size" -> "100", - "shuffle" -> "1", - "flat" -> "1", - "silent" -> "0", - "seed" -> "10" - ) - - val mnistPack = IO.MNISTPack(params) - // test DataPack - val nBatch = 600 - var batchCount = 0 - for(batch <- mnistPack) { - batchCount += 1 - } - assert(nBatch === batchCount) - - // test DataIter - val mnistIter = mnistPack.iterator - // test provideData - val provideData = mnistIter.provideDataDesc - val provideLabel = mnistIter.provideLabelDesc - assert(provideData.find(_.name == "data").get.shape === Shape(100, 784)) - assert(provideLabel.find(_.name == "label").get.shape === Shape(100)) - // test_loop - mnistIter.reset() - batchCount = 0 - while (mnistIter.hasNext) { - mnistIter.next() - batchCount += 1 - } - // test loop - assert(nBatch === batchCount) - // test reset - mnistIter.reset() - mnistIter.next() - val label0 = mnistIter.getLabel().head.toArray - val data0 = mnistIter.getData().head.toArray - mnistIter.next() - mnistIter.next() - mnistIter.next() - mnistIter.reset() - mnistIter.next() - val label1 = mnistIter.getLabel().head.toArray - val data1 = mnistIter.getData().head.toArray - assert(label0 === label1) - assert(data0 === data1) - } - - - /** - * default skip this test for saving time - */ - test("test ImageRecordIter") { - // get data - "./scripts/get_cifar_data.sh" ! - - val params = Map( - "path_imgrec" -> tu.dataFile("cifar/train.rec"), - "mean_img" -> tu.dataFile("cifar/cifar10_mean.bin"), - "rand_crop" -> "False", - "and_mirror" -> "False", - "shuffle" -> "False", - "data_shape" -> "(3,28,28)", - "batch_size" -> "100", - "preprocess_threads" -> "4", - "prefetch_buffer" -> "1" - ) - val imgRecIter = IO.ImageRecordIter(params) - val nBatch = 500 - var batchCount = 0 - // test provideData - val provideData = imgRecIter.provideDataDesc - val provideLabel = imgRecIter.provideLabelDesc - assert(provideData.find(_.name == "data").get.shape.toArray === Array(100, 3, 28, 28)) - assert(provideLabel.find(_.name == "label").get.shape.toArray === Array(100)) - - imgRecIter.reset() - while (imgRecIter.hasNext) { - imgRecIter.next() - batchCount += 1 - } - // test loop - assert(batchCount === nBatch) - // test reset - imgRecIter.reset() - imgRecIter.next() - val label0 = imgRecIter.getLabel().head.toArray - val data0 = imgRecIter.getData().head.toArray - imgRecIter.reset() - imgRecIter.reset() - imgRecIter.reset() - imgRecIter.reset() - imgRecIter.reset() - val label1 = imgRecIter.getLabel().head.toArray - val data1 = imgRecIter.getData().head.toArray - assert(label0 === label1) - assert(data0 === data1) - } - - test("test ResizeIter") { - // get data - "./scripts/get_mnist_data.sh" ! - - val params = Map( - "image" -> tu.dataFile("train-images-idx3-ubyte"), - "label" -> tu.dataFile("train-labels-idx1-ubyte"), - "data_shape" -> "(784,)", - "batch_size" -> "100", - "shuffle" -> "1", - "flat" -> "1", - "silent" -> "0", - "seed" -> "10" - ) - - val mnistIter = IO.MNISTIter(params) - val nBatch = 400 - var batchCount = 0 - val resizeIter = new ResizeIter(mnistIter, nBatch, false) - - while(resizeIter.hasNext) { - resizeIter.next() - batchCount += 1 - } - - assert(batchCount === nBatch) - - batchCount = 0 - resizeIter.reset() - while(resizeIter.hasNext) { - resizeIter.next() - batchCount += 1 - } - - assert(batchCount === nBatch) - } - - test("test PrefetchIter") { - // get data - "./scripts/get_mnist_data.sh" ! - - val params = Map( - "image" -> tu.dataFile("train-images-idx3-ubyte"), - "label" -> tu.dataFile("train-labels-idx1-ubyte"), - "data_shape" -> "(784,)", - "batch_size" -> "100", - "shuffle" -> "1", - "flat" -> "1", - "silent" -> "0", - "seed" -> "10" - ) - - val mnistPack1 = IO.MNISTPack(params) - val mnistPack2 = IO.MNISTPack(params) - - val nBatch = 600 - var batchCount = 0 - - val mnistIter1 = mnistPack1.iterator - val mnistIter2 = mnistPack2.iterator - - var prefetchIter = new PrefetchingIter( - IndexedSeq(mnistIter1, mnistIter2), - IndexedSeq(Map("data" -> "data1"), Map("data" -> "data2")), - IndexedSeq(Map("label" -> "label1"), Map("label" -> "label2")) - ) - - // test loop - while(prefetchIter.hasNext) { - prefetchIter.next() - batchCount += 1 - } - assert(nBatch === batchCount) - - // test provideData - val provideData = prefetchIter.provideDataDesc - val provideLabel = prefetchIter.provideLabelDesc - assert(provideData.find(_.name == "data1").get.shape === Shape(100, 784)) - assert(provideData.find(_.name == "data2").get.shape === Shape(100, 784)) - assert(provideLabel.find(_.name == "label1").get.shape === Shape(100)) - assert(provideLabel.find(_.name == "label2").get.shape === Shape(100)) - - // test reset - prefetchIter.reset() - prefetchIter.next() - val label0 = prefetchIter.getLabel().head.toArray - val data0 = prefetchIter.getData().head.toArray - prefetchIter.next() - prefetchIter.next() - prefetchIter.next() - prefetchIter.reset() - prefetchIter.next() - val label1 = prefetchIter.getLabel().head.toArray - val data1 = prefetchIter.getData().head.toArray - assert(label0 === label1) - assert(data0 === data1) - - prefetchIter.dispose() - } - - test("test NDArrayIter") { - val shape0 = Shape(Array(1000, 2, 2)) - val data = IndexedSeq(NDArray.ones(shape0), NDArray.zeros(shape0)) - val shape1 = Shape(Array(1000, 1)) - val label = IndexedSeq(NDArray.ones(shape1, dtype = DType.Int32)) - val batchData0 = NDArray.ones(Shape(Array(128, 2, 2))) - val batchData1 = NDArray.zeros(Shape(Array(128, 2, 2))) - val batchLabel = NDArray.ones(Shape(Array(128, 1))) - - // test pad - val dataIter0 = new NDArrayIter(data, label, 128, false, "pad", - dataName = "data", labelName = "label") - var batchCount = 0 - val nBatch0 = 8 - while(dataIter0.hasNext) { - val tBatch = dataIter0.next() - batchCount += 1 - - assert(tBatch.data(0).toArray === batchData0.toArray) - assert(tBatch.data(1).toArray === batchData1.toArray) - assert(tBatch.label(0).toArray === batchLabel.toArray) - assert(tBatch.label(0).dtype == DType.Int32) - } - - assert(batchCount === nBatch0) - - // test discard - val dataIter1 = new NDArrayIter.Builder() - .addData("data0", data(0)).addData("data1", data(1)) - .addLabel("label", label(0)) - .setBatchSize(128) - .setLastBatchHandle("discard").build() - val nBatch1 = 7 - batchCount = 0 - while(dataIter1.hasNext) { - val tBatch = dataIter1.next() - batchCount += 1 - - assert(tBatch.data(0).toArray === batchData0.toArray) - assert(tBatch.data(1).toArray === batchData1.toArray) - assert(tBatch.label(0).toArray === batchLabel.toArray) - } - - assert(batchCount === nBatch1) - - // test empty label (for prediction) - val dataIter2 = new NDArrayIter(data = data, dataBatchSize = 128, shuffle = false, - lastBatchHandle = "discard") - batchCount = 0 - while(dataIter2.hasNext) { - val tBatch = dataIter2.next() - batchCount += 1 - - assert(tBatch.data(0).toArray === batchData0.toArray) - assert(tBatch.data(1).toArray === batchData1.toArray) - } - - assert(batchCount === nBatch1) - assert(dataIter2.initLabel == IndexedSeq.empty) - - // test implementation with DataDesc - val dataIter3 = new NDArrayIter( - IO.initDataDesc(data, false, "data", DType.Float32, Layout.NTC), - IO.initDataDesc(label, false, "label", DType.Int32, Layout.NT), - 128, false, "pad") - val dataDesc = dataIter3.provideDataDesc - val labelDesc = dataIter3.provideLabelDesc - assert(dataDesc(0).dtype == DType.Float32) - assert(dataDesc(0).layout == Layout.NTC) - assert(labelDesc(0).dtype == DType.Int32) - assert(labelDesc(0).layout == Layout.NT) - - - // Test with passing Float64 hardcoded as Dtype of data - val dataIter4 = new NDArrayIter( - IO.initDataDesc(data, false, "data", DType.Float64, Layout.NTC), - IO.initDataDesc(label, false, "label", DType.Int32, Layout.NT), - 128, false, "pad") - val dataDesc4 = dataIter4.provideDataDesc - val labelDesc4 = dataIter4.provideLabelDesc - assert(dataDesc4(0).dtype == DType.Float64) - assert(dataDesc4(0).layout == Layout.NTC) - assert(labelDesc4(0).dtype == DType.Int32) - assert(labelDesc4(0).layout == Layout.NT) - - // Test with Float64 coming from the data itself - val dataF64 = IndexedSeq(NDArray.ones(shape0, dtype = DType.Float64), - NDArray.zeros(shape0, dtype = DType.Float64)) - - val dataIter5 = new NDArrayIter( - IO.initDataDesc(dataF64, false, "data", DType.Float64, Layout.NTC), - IO.initDataDesc(label, false, "label", DType.Int32, Layout.NT), - 128, false, "pad") - val dataDesc5 = dataIter5.provideDataDesc - assert(dataDesc5(0).dtype == DType.Float64) - assert(dataDesc5(0).dtype != DType.Float32) - assert(dataDesc5(0).layout == Layout.NTC) - - } -} diff --git a/scala-package/core/src/test/scala/org/apache/mxnet/ImageSuite.scala b/scala-package/core/src/test/scala/org/apache/mxnet/ImageSuite.scala deleted file mode 100644 index dca4ce02ef89..000000000000 --- a/scala-package/core/src/test/scala/org/apache/mxnet/ImageSuite.scala +++ /dev/null @@ -1,121 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet - -import java.io.File -import java.net.URL - -import javax.imageio.ImageIO -import org.apache.commons.io.FileUtils -import org.scalatest.{BeforeAndAfterAll, FunSuite} -import org.slf4j.LoggerFactory - -class ImageSuite extends FunSuite with BeforeAndAfterAll { - private var imLocation = "" - private val logger = LoggerFactory.getLogger(classOf[ImageSuite]) - - private def downloadUrl(url: String, filePath: String, maxRetry: Option[Int] = None) : Unit = { - val tmpFile = new File(filePath) - var retry = maxRetry.getOrElse(3) - var success = false - if (!tmpFile.exists()) { - while (retry > 0 && !success) { - try { - FileUtils.copyURLToFile(new URL(url), tmpFile) - success = true - } catch { - case e: Exception => retry -= 1 - } - } - } else { - success = true - } - if (!success) throw new Exception(s"$url Download failed!") - } - - override def beforeAll(): Unit = { - val tempDirPath = System.getProperty("java.io.tmpdir") - imLocation = tempDirPath + "/inputImages/Pug-Cookie.jpg" - downloadUrl("https://s3.amazonaws.com/model-server/inputs/Pug-Cookie.jpg", - imLocation) - } - - test("Test load image") { - val nd = Image.imRead(imLocation) - logger.debug(s"OpenCV load image with shape: ${nd.shape}") - require(nd.shape == Shape(576, 1024, 3), "image shape not Match!") - } - - test("Test load image from Socket") { - val url = new URL("https://s3.amazonaws.com/model-server/inputs/Pug-Cookie.jpg") - val inputStream = url.openStream - val nd = Image.imDecode(inputStream) - logger.debug(s"OpenCV load image with shape: ${nd.shape}") - require(nd.shape == Shape(576, 1024, 3), "image shape not Match!") - } - - test("Test resize image") { - val nd = Image.imRead(imLocation) - val resizeIm = Image.imResize(nd, 224, 224) - logger.debug(s"OpenCV resize image with shape: ${resizeIm.shape}") - require(resizeIm.shape == Shape(224, 224, 3), "image shape not Match!") - } - - test("Test crop image") { - val nd = Image.imRead(imLocation) - val nd2 = Image.fixedCrop(nd, 0, 0, 224, 224) - require(nd2.shape == Shape(224, 224, 3), "image shape not Match!") - } - - test("Test apply border") { - val nd = Image.imRead(imLocation) - val nd2 = Image.copyMakeBorder(nd, 1, 1, 1, 1) - require(nd2.shape == Shape(578, 1026, 3), s"image shape not Match!") - } - - test("Test convert to Image") { - val nd = Image.imRead(imLocation) - val resizeIm = Image.imResize(nd, 224, 224) - val tempDirPath = System.getProperty("java.io.tmpdir") - val img = Image.toImage(resizeIm) - ImageIO.write(img, "png", new File(tempDirPath + "/inputImages/out.png")) - logger.debug(s"converted image stored in ${tempDirPath + "/inputImages/out.png"}") - } - - test("Test draw Bounding box") { - val buf = ImageIO.read(new File(imLocation)) - val box = Array( - Map("xmin" -> 190, "xmax" -> 850, "ymin" -> 50, "ymax" -> 450), - Map("xmin" -> 200, "xmax" -> 350, "ymin" -> 440, "ymax" -> 530) - ) - val names = Array("pug", "cookie") - Image.drawBoundingBox(buf, box, Some(names), fontSizeMult = Some(1.4f)) - val tempDirPath = System.getProperty("java.io.tmpdir") - ImageIO.write(buf, "png", new File(tempDirPath + "/inputImages/out2.png")) - logger.debug(s"converted image stored in ${tempDirPath + "/inputImages/out2.png"}") - for (coord <- box) { - val topLeft = buf.getRGB(coord("xmin"), coord("ymin")) - val downLeft = buf.getRGB(coord("xmin"), coord("ymax")) - val topRight = buf.getRGB(coord("xmax"), coord("ymin")) - val downRight = buf.getRGB(coord("xmax"), coord("ymax")) - require(downLeft == downRight) - require(topRight == downRight) - } - } - -} diff --git a/scala-package/core/src/test/scala/org/apache/mxnet/KVStoreSuite.scala b/scala-package/core/src/test/scala/org/apache/mxnet/KVStoreSuite.scala deleted file mode 100644 index 3eb61414bac1..000000000000 --- a/scala-package/core/src/test/scala/org/apache/mxnet/KVStoreSuite.scala +++ /dev/null @@ -1,98 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet - -import org.scalatest.{BeforeAndAfterAll, FunSuite} - -class KVStoreSuite extends FunSuite with BeforeAndAfterAll { - test("init and pull") { - val kv = KVStore.create() - val shape = Shape(2, 1) - val ndArray = NDArray.zeros(shape) - - kv.init("3", NDArray.ones(shape)) - kv.pull("3", ndArray) - assert(ndArray.toArray === Array(1f, 1f)) - } - - test("push and pull") { - val kv = KVStore.create() - val shape = Shape(2, 1) - val ndArray = NDArray.zeros(shape) - - kv.init("3", NDArray.ones(shape)) - kv.push("3", NDArray.ones(shape) * 4) - kv.pull("3", ndArray) - assert(ndArray.toArray === Array(4f, 4f)) - } - - test("test aggregate") { - val shape = Shape(4, 4) - val keys = Array("b", "c", "d") - val kv = KVStore.create() - kv.init("a", NDArray.zeros(shape)) - kv.init(keys, Array.fill(keys.length)(NDArray.zeros(shape))) - val numDevs = 4 - val devs = (0 until numDevs).map(Context.cpu(_)) - val vals = devs.map(d => NDArray.ones(shape, d)).toArray - kv.push("a", vals) - kv.pull("a", outs = vals) - assert(vals.map(v => v.toArray.map(x => x - numDevs).sum).sum == 0f) - - val valss = keys.map { k => - val tmpVals = devs.map(d => NDArray.ones(shape, d) * 2f).toArray - kv.push(k, tmpVals) - kv.pull(k, outs = tmpVals) - tmpVals - }.flatten - assert(valss.map(v => v.toArray.map(x => x - numDevs * 2f).sum).sum == 0f) - } - - test("updater runs when push") { - val kv = KVStore.create() - val updater = new MXKVStoreUpdater { - override def update(key: Int, input: NDArray, stored: NDArray): Unit = { - stored += input * 2 - } - override def dispose(): Unit = {} - } - kv.setUpdater(updater) - - val shape = Shape(2, 1) - val ndArray = NDArray.zeros(shape) - - kv.init("3", NDArray.ones(shape) * 4) - kv.pull("3", ndArray) - assert(ndArray.toArray === Array(4f, 4f)) - - kv.push("3", NDArray.ones(shape)) - kv.pull("3", ndArray) - assert(ndArray.toArray === Array(6f, 6f)) - } - - test("get type") { - val kv = KVStore.create("local") - assert(kv.`type` === "local") - } - - test("get numWorkers and rank") { - val kv = KVStore.create("local") - assert(kv.numWorkers === 1) - assert(kv.rank === 0) - } -} diff --git a/scala-package/core/src/test/scala/org/apache/mxnet/ModelParallelSuite.scala b/scala-package/core/src/test/scala/org/apache/mxnet/ModelParallelSuite.scala deleted file mode 100644 index 2962e3b4781c..000000000000 --- a/scala-package/core/src/test/scala/org/apache/mxnet/ModelParallelSuite.scala +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet - -import org.apache.mxnet.CheckUtils._ -import org.scalatest.{BeforeAndAfterAll, FunSuite} - -class ModelParallelSuite extends FunSuite with BeforeAndAfterAll { - test("chain") { - val n = 2 - val ctx1 = Context.cpu(0) - val ctx2 = Context.cpu(1) - val data1 = Symbol.Variable("data1") - val data2 = Symbol.Variable("data2") - val data3 = Symbol.Variable("data3") - - var net: Symbol = null - new AttrScope(Map("ctx_group" -> "dev1")).withScope { - net = (data1 + data2) * 3 - } - - new AttrScope(Map("ctx_group" -> "dev2")).withScope { - net = net + data3 - } - - val shape = Shape(4, 5) - val arr = (0 until n + 1).map(_ => NDArray.empty(shape, ctx1)) - val arrGrad = (0 until n).map(_ => NDArray.empty(shape, ctx1)) :+ NDArray.empty(shape, ctx2) - - val exec1 = net.bind(ctx1, - args = arr, - argsGrad = arrGrad, - gradReq = "write", - auxStates = Nil, - group2ctx = Map("dev1" -> ctx1, "dev2" -> ctx2), - sharedExec = null) - - arr(0).set(1f) - arr(1).set(2f) - arr(2).set(3f) - - val arr2 = arr.map(_.copyTo(ctx1)) - val arrGrad2 = arrGrad.map(_.copyTo(ctx1)) - val exec2 = net.bind(ctx1, args = arr2, argsGrad = arrGrad2) - - exec1.forward() - exec2.forward() - assert(reldiff(exec1.outputs(0).copyTo(ctx1), - exec2.outputs(0).copyTo(ctx1)) < 1e-6f) - - val outGrad = NDArray.ones(shape, ctx2) - exec1.backward(Array(outGrad)) - exec2.backward(Array(outGrad.copyTo(ctx1))) - (arrGrad zip arrGrad2) foreach { case (a, b) => - assert(reldiff(a.copyTo(ctx1), b) < 1e-6f) - } - } -} diff --git a/scala-package/core/src/test/scala/org/apache/mxnet/ModuleSuite.scala b/scala-package/core/src/test/scala/org/apache/mxnet/ModuleSuite.scala deleted file mode 100644 index 402509e8ea9a..000000000000 --- a/scala-package/core/src/test/scala/org/apache/mxnet/ModuleSuite.scala +++ /dev/null @@ -1,212 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet - -import org.scalatest.{BeforeAndAfterAll, FunSuite} -import org.apache.mxnet.module._ -import org.apache.mxnet.optimizer._ -import org.apache.mxnet.io._ - -class ModuleSuite extends FunSuite with BeforeAndAfterAll { - - class myModule(symbol : Symbol) extends Module (symbol) { - override def predictEveryBatch(evalData: DataIter, - numBatch: Int = 1, reset: Boolean = true): - IndexedSeq[IndexedSeq[NDArray]] = { - val data = IndexedSeq( - NDArray.ones(Shape(1, 10, 1)), - NDArray.ones(Shape(1, 10, 1)), - NDArray.ones(Shape(1, 10, 4)) - ) - List.fill(numBatch)(data).toIndexedSeq - } - } - - test("predict") { - val sym = Symbol.Variable("data") - val mod = new myModule(sym) - val dummyIter = new NDArrayIter(IndexedSeq(NDArray.ones(1))) - var output = mod.predict(dummyIter, 1) - require(output(0).shape == Shape(1, 10, 1)) - require(output(1).shape == Shape(1, 10, 1)) - require(output(2).shape == Shape(1, 10, 4)) - output = mod.predict(dummyIter, 2) - require(output(0).shape == Shape(2, 10, 1)) - require(output(1).shape == Shape(2, 10, 1)) - require(output(2).shape == Shape(2, 10, 4)) - } - - test ("model dtype") { - val dType = DType.Float32 - val dShape = Shape(3, 8, 7) - - var sym = Symbol.Variable("data") - sym = Symbol.Activation(attr = Map("__layout__" -> "TNC"))()( - Map("data" -> sym, "act_type" -> "relu")) - - val mod = new Module(sym, IndexedSeq("data"), null, - contexts = Array(Context.cpu(0), Context.cpu(1))) - mod.bind(dataShapes = IndexedSeq(DataDesc("data", dShape, dType, "TNC"))) - mod.initParams() - mod.forward(new DataBatch( - data = IndexedSeq(NDArray.ones(dShape, dtype = dType)), - label = null, index = null, pad = 0)) - mod.backward(Array(NDArray.ones(dShape, dtype = dType))) - - assert(mod.getOutputs.flatten.forall(_.dtype == dType)) - } - - test ("module input_grads") { - val a = Symbol.Variable("a", kwargs = Map("__layout__" -> "NC")) - val b = Symbol.Variable("b", kwargs = Map("__layout__" -> "NC")) - var c = Symbol.Variable("c", kwargs = Map("__layout__" -> "NC")) - - import SymbolConversions._ - c = a + 2 * b + 3 * c - - val mod = new Module.Builder(c) - .setDataNames("b", "c", "a") - .setLabelNames(null) - .setContext(Context.cpu(0), Context.cpu(1)) - .build() - mod.bind(dataShapes = IndexedSeq( - DataDesc("b", Shape(5, 5), layout = "NT"), - DataDesc("c", Shape(5, 5), layout = "NT"), - DataDesc("a", Shape(5, 5), layout = "NT")), - inputsNeedGrad = true - ) - mod.initParams() - mod.forward(new DataBatch( - data = IndexedSeq( - NDArray.ones(5, 5), NDArray.ones(5, 5), NDArray.ones(5, 5)), - label = null, index = null, pad = 0)) - mod.backward(Array(NDArray.ones(5, 5))) - - val inputGrads = mod.getInputGradsMerged() - val aGrad = inputGrads(0).toArray - val bGrad = inputGrads(1).toArray - val cGrad = inputGrads(2).toArray - - assert(aGrad.forall(_ == 1f)) - assert(bGrad.forall(_ == 2f)) - assert(cGrad.forall(_ == 3f)) - } - - test ("module layout") { - var sym = Symbol.Variable("data") - sym = Symbol.Activation(attr = Map("__layout__" -> "TNC"))()( - Map("data" -> sym, "act_type" -> "relu")) - - val dShape = Shape(3, 8, 7) - val mod = new Module(sym, IndexedSeq("data"), null, - contexts = Array(Context.cpu(0), Context.cpu(1))) - mod.bind(dataShapes = IndexedSeq(DataDesc("data", dShape, layout = "TNC"))) - mod.initParams() - mod.forward(new DataBatch( - data = IndexedSeq(NDArray.ones(dShape)), - label = null, index = null, pad = 0)) - mod.backward(Array(NDArray.ones(dShape))) - assert(mod.getOutputsMerged()(0).shape == dShape) - - val hdShape = Shape(3, 4, 7) - for (x <- mod.getOutputs) assert(x(0).shape == hdShape) - } - - test ("save load") { - def mapEqu(a: Map[String, NDArray], b: Map[String, NDArray]): Unit = { - assert(a.toSet == b.toSet) - for (k <- a.keys) assert(a(k) == b(k)) - } - - var sym = Symbol.Variable("data") - sym = Symbol.FullyConnected()()(Map("data" -> sym, "num_hidden" -> 100)) - - // single device - var mod = new Module(sym, IndexedSeq("data"), null) - mod.bind(dataShapes = IndexedSeq(DataDesc("data", Shape(10, 10), layout = "NT"))) - mod.initParams() - mod.initOptimizer(optimizer = new SGD(learningRate = 0.1f, momentum = 0.9f)) - mod.update() - mod.saveCheckpoint("test", 0, saveOptStates = true) - - var mod2 = Module.loadCheckpoint("test", 0, loadOptimizerStates = true) - mod2.bind(dataShapes = IndexedSeq(DataDesc("data", Shape(10, 10), layout = "NT"))) - mod2.initOptimizer(optimizer = new SGD(learningRate = 0.1f, momentum = 0.9f)) - assert(mod.getSymbol.toJson == mod2.getSymbol.toJson) - mapEqu(mod.getParams._1, mod2.getParams._1) - - // multi device - mod = new Module(sym, IndexedSeq("data"), null, - contexts = Array(Context.cpu(0), Context.cpu(1))) - mod.bind(dataShapes = IndexedSeq(DataDesc("data", Shape(10, 10), layout = "NT" ))) - mod.initParams() - mod.initOptimizer(optimizer = new SGD(learningRate = 0.1f, momentum = 0.9f)) - mod.update() - mod.saveCheckpoint("test", 0, saveOptStates = true) - - mod2 = Module.loadCheckpoint("test", 0, loadOptimizerStates = true) - mod2.bind(dataShapes = IndexedSeq(DataDesc("data", Shape(10, 10), layout = "NT"))) - mod2.initOptimizer(optimizer = new SGD(learningRate = 0.1f, momentum = 0.9f)) - assert(mod.getSymbol.toJson == mod2.getSymbol.toJson) - mapEqu(mod.getParams._1, mod2.getParams._1) - } - - test ("module reshape") { - CancelTestUtil.assumeStandardDecimalSeparator() - - var sym = Symbol.Variable("data") - sym = Symbol.FullyConnected("fc")()(Map("data" -> sym, "num_hidden" -> 20)) - - var dShape = Shape(7, 20) - val mod = new Module(sym, IndexedSeq("data"), null, - contexts = Array(Context.cpu(0), Context.cpu(1))) - mod.bind(dataShapes = IndexedSeq(DataDesc("data", dShape, layout = "NT"))) - mod.initParams() - mod.initOptimizer(optimizer = new SGD(learningRate = 1f)) - - mod.forward(new DataBatch( - data = IndexedSeq(NDArray.ones(dShape)), - label = null, index = null, pad = 0)) - mod.backward(Array(NDArray.ones(dShape))) - mod.update() - assert(mod.getOutputsMerged()(0).shape == dShape) - assert(mod.getParams._1("fc_bias").toArray.forall(_ == -1f)) - - // reshape module - dShape = Shape(14, 20) - mod.reshape(IndexedSeq(DataDesc("data", dShape, layout = "NT"))) - mod.forward(new DataBatch( - data = IndexedSeq(NDArray.ones(dShape)), - label = null, index = null, pad = 0)) - mod.backward(Array(NDArray.ones(dShape))) - mod.update() - assert(mod.getOutputsMerged()(0).shape == dShape) - assert(mod.getParams._1("fc_bias").toArray.forall(x => (x - -3f) < 1e-3)) - - // return to original binded shape - dShape = Shape(7, 20) - mod.reshape(IndexedSeq(DataDesc("data", dShape, layout = "NT"))) - mod.forward(new DataBatch( - data = IndexedSeq(NDArray.ones(dShape)), - label = null, index = null, pad = 0)) - mod.backward(Array(NDArray.ones(dShape))) - mod.update() - assert(mod.getOutputsMerged()(0).shape == dShape) - assert(mod.getParams._1("fc_bias").toArray.forall(x => (x - -3f) < 1e-3)) - } -} diff --git a/scala-package/core/src/test/scala/org/apache/mxnet/NDArrayCollectorSuite.scala b/scala-package/core/src/test/scala/org/apache/mxnet/NDArrayCollectorSuite.scala deleted file mode 100644 index f361ee1e4eac..000000000000 --- a/scala-package/core/src/test/scala/org/apache/mxnet/NDArrayCollectorSuite.scala +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet - -import org.scalatest.{BeforeAndAfterAll, FunSuite, Matchers} - -class NDArrayCollectorSuite extends FunSuite with BeforeAndAfterAll with Matchers { - - test("auto dispose") { - val a = NDArray.array(Array(-1f, 0f, 1f, 2f, 3f, 4f), shape = Shape(2, 3)) - var b, c: NDArray = null - - val res = NDArrayCollector.auto().withScope { - b = NDArray.relu(a) // [0, 0, 1, 2, 3, 4] - c = a + b // [-1, 0, 2, 4, 6, 8] - c.slice(0, 1) - } - - assert(b.isDisposed) - assert(c.isDisposed) - assert(!res.isDisposed) // smart enough not to dispose the returned NDArray - - assert(res.toArray === Array(-1f, 0f, 2f)) - - res.dispose() - } - - test("manually dispose") { - val a = NDArray.array(Array(-1f, 0f, 1f, 2f, 3f, 4f), shape = Shape(2, 3)) - var b, c: NDArray = null - - val collector = NDArrayCollector.manual() - val res = collector.withScope { - b = NDArray.relu(a) // [0, 0, 1, 2, 3, 4] - c = a + b // [-1, 0, 2, 4, 6, 8] - c.slice(0, 1) - } - - assert(res.toArray === Array(-1f, 0f, 2f)) - - assert(collector.size === 2) // smart enough not to collect the returned NDArray - assert(!b.isDisposed) - assert(!c.isDisposed) - assert(!res.isDisposed) - - collector.foreach(_.dispose()) - assert(b.isDisposed) - assert(c.isDisposed) - assert(!res.isDisposed) - - collector.clear() - assert(collector.size === 0) - - res.dispose() - } -} diff --git a/scala-package/core/src/test/scala/org/apache/mxnet/NDArraySuite.scala b/scala-package/core/src/test/scala/org/apache/mxnet/NDArraySuite.scala deleted file mode 100644 index 82b9edc8f4bb..000000000000 --- a/scala-package/core/src/test/scala/org/apache/mxnet/NDArraySuite.scala +++ /dev/null @@ -1,1022 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet - -import java.io.File -import java.util.concurrent.atomic.AtomicInteger - -import org.apache.mxnet.NDArrayConversions._ -import org.scalatest.{BeforeAndAfterAll, FunSuite, Matchers} -import org.slf4j.LoggerFactory -import scala.collection.mutable.ArrayBuffer - -class NDArraySuite extends FunSuite with BeforeAndAfterAll with Matchers { - private val sequence: AtomicInteger = new AtomicInteger(0) - - private val logger = LoggerFactory.getLogger(classOf[NDArraySuite]) - - test("to java array") { - val ndarray = NDArray.zeros(2, 2) - assert(ndarray.toArray === Array(0f, 0f, 0f, 0f)) - - val float64Array = NDArray.zeros(Shape(2, 2), dtype = DType.Float64) - assert(float64Array.toFloat64Array === Array(0d, 0d, 0d, 0d)) - } - - test("to scalar") { - val ndzeros = NDArray.zeros(1) - assert(ndzeros.toScalar === 0f) - val ndones = NDArray.ones(1) - assert(ndones.toScalar === 1f) - } - - test("to sparse") { - val arr = Array( - Array(1f, 0f, 0f), - Array(0f, 3f, 0f), - Array(0f, 0f, 1f) - ) - val nd = NDArray.toNDArray(arr) - assert(!nd.isSparse) - // row sparse - var ndSparse = nd.toSparse() - assert(ndSparse.getIndices.toArray sameElements Array(0f, 1f, 2f)) - // csr - ndSparse = nd.toSparse(Some(SparseFormat.CSR)) - assert(ndSparse.getIndptr.toArray sameElements Array(0f, 1f, 2f, 3f)) - } - - test("to float 64 scalar") { - val ndzeros = NDArray.zeros(Shape(1), dtype = DType.Float64) - assert(ndzeros.toFloat64Scalar === 0d) - val ndones = NDArray.ones(Shape(1), dtype = DType.Float64) - assert(ndones.toFloat64Scalar === 1d) - } - - test ("call toScalar on an ndarray which is not a scalar") { - intercept[Exception] { NDArray.zeros(1, 1).toScalar } - intercept[Exception] { NDArray.zeros(shape = Shape (1, 1), - dtype = DType.Float64).toFloat64Scalar } - } - - test("size and shape") { - val ndzeros = NDArray.zeros(4, 1) - assert(ndzeros.shape === Shape(4, 1)) - assert(ndzeros.size === 4) - } - - test("dtype") { - val arr = NDArray.zeros(3, 2) - assert(arr.dtype === DType.Float32) - - val float64Array = NDArray.zeros(shape = Shape(3, 2), dtype = DType.Float64) - assert(float64Array.dtype === DType.Float64) - } - - test("set scalar value") { - val ndarray = NDArray.empty(2, 1) - ndarray.set(10f) - assert(ndarray.toArray === Array(10f, 10f)) - - val float64array = NDArray.empty(shape = Shape(2, 1), dtype = DType.Float64) - float64array.set(10d) - assert(float64array.toFloat64Array === Array(10d, 10d)) - - } - - test("copy from java array") { - val ndarray = NDArray.empty(4, 1) - ndarray.set(Array(1f, 2f, 3f, 4f)) - assert(ndarray.toArray === Array(1f, 2f, 3f, 4f)) - } - - test("create NDArray based on Java Matrix") { - def arrayGen(num : Any) : Array[Any] = { - val array = num match { - case f: Float => - (for (_ <- 0 until 100) yield Array(1.0f, 1.0f, 1.0f, 1.0f)).toArray - case d: Double => - (for (_ <- 0 until 100) yield Array(1.0d, 1.0d, 1.0d, 1.0d)).toArray - case _ => throw new IllegalArgumentException(s"Unsupported Type ${num.getClass}") - } - Array( - Array( - array - ), - Array( - array - ) - ) - } - val floatData = 1.0f - var nd = NDArray.toNDArray(arrayGen(floatData)) - require(nd.shape == Shape(2, 1, 100, 4)) - val arr2 = Array(1.0f, 1.0f, 1.0f, 1.0f) - nd = NDArray.toNDArray(arr2) - require(nd.shape == Shape(4)) - val doubleData = 1.0d - nd = NDArray.toNDArray(arrayGen(doubleData)) - require(nd.shape == Shape(2, 1, 100, 4)) - require(nd.dtype == DType.Float64) - } - - test("test Visualize") { - var nd = NDArray.ones(Shape(1, 2, 1000, 1)) - var data : String = - """ - |[ - | [ - | [ - | [1.0] - | [1.0] - | [1.0] - | [1.0] - | [1.0] - | [1.0] - | [1.0] - | [1.0] - | [1.0] - | [1.0] - | - | ... with length 1000 - | ] - | [ - | [1.0] - | [1.0] - | [1.0] - | [1.0] - | [1.0] - | [1.0] - | [1.0] - | [1.0] - | [1.0] - | [1.0] - | - | ... with length 1000 - | ] - | ] - |] - |""".stripMargin - require(nd.toString.split("\\s+").mkString == data.split("\\s+").mkString) - nd = NDArray.ones(Shape(1, 4)) - data = - """ - |[ - | [1.0,1.0,1.0,1.0] - |] - |""".stripMargin - require(nd.toString.split("\\s+").mkString == data.split("\\s+").mkString) - } - - test("plus") { - var ndzeros = NDArray.zeros(2, 1) - var ndones = ndzeros + 1f - assert(ndones.toArray === Array(1f, 1f)) - assert((ndones + ndzeros).toArray === Array(1f, 1f)) - assert((1 + ndones).toArray === Array(2f, 2f)) - // in-place - ndones += ndones - assert(ndones.toArray === Array(2f, 2f)) - - // Float64 method test - ndzeros = NDArray.zeros(shape = Shape(2, 1), dtype = DType.Float64) - ndones = ndzeros + 1d - assert(ndones.toFloat64Array === Array(1d, 1d)) - assert((ndones + ndzeros).toFloat64Array === Array(1d, 1d)) - assert((1d + ndones).toArray === Array(2d, 2d)) - // in-place - ndones += ndones - assert(ndones.toFloat64Array === Array(2d, 2d)) - } - - test("minus") { - var ndones = NDArray.ones(2, 1) - var ndzeros = ndones - 1f - assert(ndzeros.toArray === Array(0f, 0f)) - assert((ndones - ndzeros).toArray === Array(1f, 1f)) - assert((ndzeros - ndones).toArray === Array(-1f, -1f)) - assert((ndones - 1).toArray === Array(0f, 0f)) - // in-place - ndones -= ndones - assert(ndones.toArray === Array(0f, 0f)) - - // Float64 methods test - ndones = NDArray.ones(shape = Shape(2, 1)) - ndzeros = ndones - 1d - assert(ndzeros.toFloat64Array === Array(0d, 0d)) - assert((ndones - ndzeros).toFloat64Array === Array(1d , 1d)) - assert((ndzeros - ndones).toFloat64Array === Array(-1d , -1d)) - assert((ndones - 1).toFloat64Array === Array(0d, 0d)) - // in-place - ndones -= ndones - assert(ndones.toArray === Array(0d, 0d)) - - } - - test("multiplication") { - var ndones = NDArray.ones(2, 1) - var ndtwos = ndones * 2 - assert(ndtwos.toArray === Array(2f, 2f)) - assert((ndones * ndones).toArray === Array(1f, 1f)) - assert((ndtwos * ndtwos).toArray === Array(4f, 4f)) - ndtwos *= ndtwos - // in-place - assert(ndtwos.toArray === Array(4f, 4f)) - - // Float64 methods test - ndones = NDArray.ones(shape = Shape(2, 1), dtype = DType.Float64) - ndtwos = ndones * 2d - assert(ndtwos.toFloat64Array === Array(2d, 2d)) - assert((ndones * ndones).toFloat64Array === Array(1d, 1d)) - assert((ndtwos * ndtwos).toFloat64Array === Array(4d, 4d)) - ndtwos *= ndtwos - // in-place - assert(ndtwos.toFloat64Array === Array(4d, 4d)) - - } - - test("division") { - var ndones = NDArray.ones(2, 1) - var ndzeros = ndones - 1f - var ndhalves = ndones / 2 - assert(ndhalves.toArray === Array(0.5f, 0.5f)) - assert((ndhalves / ndhalves).toArray === Array(1f, 1f)) - assert((ndones / ndones).toArray === Array(1f, 1f)) - assert((ndzeros / ndones).toArray === Array(0f, 0f)) - ndhalves /= ndhalves - // in-place - assert(ndhalves.toArray === Array(1f, 1f)) - - // Float64 methods test - ndones = NDArray.ones(shape = Shape (2, 1), dtype = DType.Float64) - ndzeros = ndones - 1d - ndhalves = ndones / 2d - assert(ndhalves.toFloat64Array === Array(0.5d, 0.5d)) - assert((ndhalves / ndhalves).toFloat64Array === Array(1d, 1d)) - assert((ndones / ndones).toFloat64Array === Array(1d, 1d)) - assert((ndzeros / ndones).toFloat64Array === Array(0d, 0d)) - ndhalves /= ndhalves - // in-place - assert(ndhalves.toFloat64Array === Array(1d, 1d)) - } - - test("full") { - var arr = NDArray.full(Shape(1, 2), 3f) - assert(arr.shape === Shape(1, 2)) - assert(arr.toArray === Array(3f, 3f)) - - // Float64 methods test - arr = NDArray.full(Shape(1, 2), value = 5d, Context.cpu()) - assert(arr.toFloat64Array === Array (5d, 5d)) - } - - test("clip") { - var ndarray = NDArray.empty(3, 2) - ndarray.set(Array(1f, 2f, 3f, 4f, 5f, 6f)) - assert(NDArray.clip(ndarray, 2f, 5f).toArray === Array(2f, 2f, 3f, 4f, 5f, 5f)) - - // Float64 methods test - ndarray = NDArray.empty(shape = Shape(3, 2), dtype = DType.Float64) - ndarray.set(Array(1d, 2d, 3d, 4d, 5d, 6d)) - assert(NDArray.clip(ndarray, 2d, 5d).toFloat64Array === Array(2d, 2d, 3d, 4d, 5d, 5d)) - } - - test("sqrt") { - var ndarray = NDArray.empty(4, 1) - ndarray.set(Array(0f, 1f, 4f, 9f)) - assert(NDArray.sqrt(ndarray).toArray === Array(0f, 1f, 2f, 3f)) - - // Float64 methods test - ndarray = NDArray.empty(shape = Shape(4, 1), dtype = DType.Float64) - ndarray.set(Array(0d, 1d, 4d, 9d)) - assert(NDArray.sqrt(ndarray).toFloat64Array === Array(0d, 1d, 2d, 3d)) - } - - test("rsqrt") { - var ndarray = NDArray.array(Array(1f, 4f), shape = Shape(2, 1)) - assert(NDArray.rsqrt(ndarray).toArray === Array(1f, 0.5f)) - - // Float64 methods test - ndarray = NDArray.array(Array(1d, 4d, 25d), shape = Shape(3, 1), Context.cpu()) - assert(NDArray.rsqrt(ndarray).toFloat64Array === Array(1d, 0.5d, 0.2d)) - } - - test("norm") { - var ndarray = NDArray.empty(3, 1) - ndarray.set(Array(1f, 2f, 3f)) - var normed = NDArray.norm(ndarray) - assert(normed.shape === Shape(1)) - assert(normed.toScalar === math.sqrt(14.0).toFloat +- 1e-3f) - - // Float64 methods test - ndarray = NDArray.empty(shape = Shape(3, 1), dtype = DType.Float64) - ndarray.set(Array(1d, 2d, 3d)) - normed = NDArray.norm(ndarray) - assert(normed.get.dtype === DType.Float64) - assert(normed.shape === Shape(1)) - assert(normed.toFloat64Scalar === math.sqrt(14.0) +- 1e-3d) - } - - test("one hot encode") { - val indices = NDArray.array(Array(1f, 0f, 2f), shape = Shape(3)) - val array = NDArray.empty(3, 3) - NDArray.onehotEncode(indices, array) - assert(array.shape === Shape(3, 3)) - assert(array.toArray === Array(0f, 1f, 0f, - 1f, 0f, 0f, - 0f, 0f, 1f)) - } - - test("dot") { - val arr1 = NDArray.array(Array(1f, 2f), shape = Shape(1, 2)) - val arr2 = NDArray.array(Array(3f, 4f), shape = Shape(2, 1)) - val res = NDArray.dot(arr1, arr2) - assert(res.shape === Shape(1, 1)) - assert(res.toArray === Array(11f)) - } - - test("arange") { - for (i <- 0 until 5) { - val start = scala.util.Random.nextFloat() * 5 - val stop = start + scala.util.Random.nextFloat() * 100 - val step = scala.util.Random.nextFloat() * 4 - val repeat = 1 - - val result1 = (start.toDouble until stop.toDouble by step.toDouble) - .flatMap(x => Array.fill[Float](repeat)(x.toFloat)) - val range1 = NDArray.arange(start = start, stop = Some(stop), step = step, - repeat = repeat) - assert(CheckUtils.reldiff(result1.toArray, range1.toArray) <= 1e-4f) - - val result2 = (0.0 until stop.toDouble by step.toDouble) - .flatMap(x => Array.fill[Float](repeat)(x.toFloat)) - val range2 = NDArray.arange(stop, step = step, repeat = repeat) - assert(CheckUtils.reldiff(result2.toArray, range2.toArray) <= 1e-4f) - - val result3 = 0f to stop by 1f - val range3 = NDArray.arange(stop) - assert(CheckUtils.reldiff(result3.toArray, range3.toArray) <= 1e-4f) - - val stop4 = Math.abs(stop) - val step4 = stop4 + Math.abs(scala.util.Random.nextFloat()) - val result4 = (0.0 until stop4.toDouble by step4.toDouble) - .flatMap(x => Array.fill[Float](repeat)(x.toFloat)) - val range4 = NDArray.arange(stop4, step = step4, repeat = repeat) - assert(CheckUtils.reldiff(result4.toArray, range4.toArray) <= 1e-4f) - } - } - - test("power") { - var arr = NDArray.array(Array(3f, 5f), shape = Shape(2, 1)) - - var arrPower1 = NDArray.power(2f, arr) - assert(arrPower1.shape === Shape(2, 1)) - assert(arrPower1.toArray === Array(8f, 32f)) - - var arrPower2 = NDArray.power(arr, 2f) - assert(arrPower2.shape === Shape(2, 1)) - assert(arrPower2.toArray === Array(9f, 25f)) - - var arrPower3 = NDArray.power(arr, arr) - assert(arrPower3.shape === Shape(2, 1)) - assert(arrPower3.toArray === Array(27f, 3125f)) - - var arrPower4 = arr ** 2f - - assert(arrPower4.shape === Shape(2, 1)) - assert(arrPower4.toArray === Array(9f, 25f)) - - var arrPower5 = arr ** arr - assert(arrPower5.shape === Shape(2, 1)) - assert(arrPower5.toArray === Array(27f, 3125f)) - - arr **= 2f - assert(arr.shape === Shape(2, 1)) - assert(arr.toArray === Array(9f, 25f)) - - arr.set(Array(3f, 5f)) - arr **= arr - assert(arr.shape === Shape(2, 1)) - assert(arr.toArray === Array(27f, 3125f)) - - // Float64 tests - arr = NDArray.array(Array(3d, 5d), shape = Shape(2, 1)) - - arrPower1 = NDArray.power(2d, arr) - assert(arrPower1.shape === Shape(2, 1)) - assert(arrPower1.dtype === DType.Float64) - assert(arrPower1.toFloat64Array === Array(8d, 32d)) - - arrPower2 = NDArray.power(arr, 2d) - assert(arrPower2.shape === Shape(2, 1)) - assert(arrPower2.dtype === DType.Float64) - assert(arrPower2.toFloat64Array === Array(9d, 25d)) - - arrPower3 = NDArray.power(arr, arr) - assert(arrPower3.shape === Shape(2, 1)) - assert(arrPower3.dtype === DType.Float64) - assert(arrPower3.toFloat64Array === Array(27d, 3125d)) - - arrPower4 = arr ** 2f - assert(arrPower4.shape === Shape(2, 1)) - assert(arrPower4.dtype === DType.Float64) - assert(arrPower4.toFloat64Array === Array(9d, 25d)) - - arrPower5 = arr ** arr - assert(arrPower5.shape === Shape(2, 1)) - assert(arrPower5.dtype === DType.Float64) - assert(arrPower5.toFloat64Array === Array(27d, 3125d)) - - arr **= 2d - assert(arr.shape === Shape(2, 1)) - assert(arr.dtype === DType.Float64) - assert(arr.toFloat64Array === Array(9d, 25d)) - - arr.set(Array(3d, 5d)) - arr **= arr - assert(arr.shape === Shape(2, 1)) - assert(arr.dtype === DType.Float64) - assert(arr.toFloat64Array === Array(27d, 3125d)) - } - - test("equal") { - var arr1 = NDArray.array(Array(1f, 2f, 3f, 5f), shape = Shape(2, 2)) - var arr2 = NDArray.array(Array(1f, 4f, 3f, 6f), shape = Shape(2, 2)) - - var arrEqual1 = NDArray.equal(arr1, arr2) - assert(arrEqual1.shape === Shape(2, 2)) - assert(arrEqual1.toArray === Array(1f, 0f, 1f, 0f)) - - var arrEqual2 = NDArray.equal(arr1, 3f) - assert(arrEqual2.shape === Shape(2, 2)) - assert(arrEqual2.toArray === Array(0f, 0f, 1f, 0f)) - - - // Float64 methods test - arr1 = NDArray.array(Array(1d, 2d, 3d, 5d), shape = Shape(2, 2)) - arr2 = NDArray.array(Array(1d, 4d, 3d, 6d), shape = Shape(2, 2)) - - arrEqual1 = NDArray.equal(arr1, arr2) - assert(arrEqual1.shape === Shape(2, 2)) - assert(arrEqual1.dtype === DType.Float64) - assert(arrEqual1.toFloat64Array === Array(1d, 0d, 1d, 0d)) - - arrEqual2 = NDArray.equal(arr1, 3d) - assert(arrEqual2.shape === Shape(2, 2)) - assert(arrEqual2.dtype === DType.Float64) - assert(arrEqual2.toFloat64Array === Array(0d, 0d, 1d, 0d)) - } - - test("not_equal") { - var arr1 = NDArray.array(Array(1f, 2f, 3f, 5f), shape = Shape(2, 2)) - var arr2 = NDArray.array(Array(1f, 4f, 3f, 6f), shape = Shape(2, 2)) - - var arrEqual1 = NDArray.notEqual(arr1, arr2) - assert(arrEqual1.shape === Shape(2, 2)) - assert(arrEqual1.toArray === Array(0f, 1f, 0f, 1f)) - - var arrEqual2 = NDArray.notEqual(arr1, 3f) - assert(arrEqual2.shape === Shape(2, 2)) - assert(arrEqual2.toArray === Array(1f, 1f, 0f, 1f)) - - // Float64 methods test - - arr1 = NDArray.array(Array(1d, 2d, 3d, 5d), shape = Shape(2, 2)) - arr2 = NDArray.array(Array(1d, 4d, 3d, 6d), shape = Shape(2, 2)) - - arrEqual1 = NDArray.notEqual(arr1, arr2) - assert(arrEqual1.shape === Shape(2, 2)) - assert(arrEqual1.dtype === DType.Float64) - assert(arrEqual1.toFloat64Array === Array(0d, 1d, 0d, 1d)) - - arrEqual2 = NDArray.notEqual(arr1, 3d) - assert(arrEqual2.shape === Shape(2, 2)) - assert(arrEqual2.dtype === DType.Float64) - assert(arrEqual2.toFloat64Array === Array(1d, 1d, 0d, 1d)) - - } - - test("greater") { - var arr1 = NDArray.array(Array(1f, 2f, 4f, 5f), shape = Shape(2, 2)) - var arr2 = NDArray.array(Array(1f, 4f, 3f, 6f), shape = Shape(2, 2)) - - var arrEqual1 = arr1 > arr2 - assert(arrEqual1.shape === Shape(2, 2)) - assert(arrEqual1.toArray === Array(0f, 0f, 1f, 0f)) - - var arrEqual2 = arr1 > 2f - assert(arrEqual2.shape === Shape(2, 2)) - assert(arrEqual2.toArray === Array(0f, 0f, 1f, 1f)) - - // Float64 methods test - arr1 = NDArray.array(Array(1d, 2d, 4d, 5d), shape = Shape(2, 2)) - arr2 = NDArray.array(Array(1d, 4d, 3d, 6d), shape = Shape(2, 2)) - - arrEqual1 = arr1 > arr2 - assert(arrEqual1.shape === Shape(2, 2)) - assert(arrEqual1.dtype === DType.Float64) - assert(arrEqual1.toFloat64Array === Array(0d, 0d, 1d, 0d)) - - arrEqual2 = arr1 > 2d - assert(arrEqual2.shape === Shape(2, 2)) - assert(arrEqual2.dtype === DType.Float64) - assert(arrEqual2.toFloat64Array === Array(0d, 0d, 1d, 1d)) - } - - test("greater_equal") { - var arr1 = NDArray.array(Array(1f, 2f, 4f, 5f), shape = Shape(2, 2)) - var arr2 = NDArray.array(Array(1f, 4f, 3f, 6f), shape = Shape(2, 2)) - - var arrEqual1 = arr1 >= arr2 - assert(arrEqual1.shape === Shape(2, 2)) - assert(arrEqual1.toArray === Array(1f, 0f, 1f, 0f)) - - var arrEqual2 = arr1 >= 2f - assert(arrEqual2.shape === Shape(2, 2)) - assert(arrEqual2.toArray === Array(0f, 1f, 1f, 1f)) - - // Float64 methods test - arr1 = NDArray.array(Array(1d, 2d, 4d, 5d), shape = Shape(2, 2)) - arr2 = NDArray.array(Array(1d, 4d, 3d, 6d), shape = Shape(2, 2)) - - arrEqual1 = arr1 >= arr2 - assert(arrEqual1.shape === Shape(2, 2)) - assert(arrEqual1.dtype === DType.Float64) - assert(arrEqual1.toFloat64Array === Array(1d, 0d, 1d, 0d)) - - arrEqual2 = arr1 >= 2d - assert(arrEqual2.shape === Shape(2, 2)) - assert(arrEqual2.dtype === DType.Float64) - assert(arrEqual2.toFloat64Array === Array(0d, 1d, 1d, 1d)) - } - - test("lesser") { - var arr1 = NDArray.array(Array(1f, 2f, 4f, 5f), shape = Shape(2, 2)) - var arr2 = NDArray.array(Array(1f, 4f, 3f, 6f), shape = Shape(2, 2)) - - var arrEqual1 = arr1 < arr2 - assert(arrEqual1.shape === Shape(2, 2)) - assert(arrEqual1.toArray === Array(0f, 1f, 0f, 1f)) - - var arrEqual2 = arr1 < 2f - assert(arrEqual2.shape === Shape(2, 2)) - assert(arrEqual2.toArray === Array(1f, 0f, 0f, 0f)) - - // Float64 methods test - arr1 = NDArray.array(Array(1d, 2d, 4d, 5d), shape = Shape(2, 2)) - arr2 = NDArray.array(Array(1d, 4d, 3d, 6d), shape = Shape(2, 2)) - - arrEqual1 = arr1 < arr2 - assert(arrEqual1.shape === Shape(2, 2)) - assert(arrEqual1.dtype === DType.Float64) - assert(arrEqual1.toFloat64Array === Array(0d, 1d, 0d, 1d)) - - arrEqual2 = arr1 < 2d - assert(arrEqual2.shape === Shape(2, 2)) - assert(arrEqual2.dtype === DType.Float64) - assert(arrEqual2.toFloat64Array === Array(1d, 0d, 0d, 0d)) - - } - - test("lesser_equal") { - var arr1 = NDArray.array(Array(1f, 2f, 4f, 5f), shape = Shape(2, 2)) - var arr2 = NDArray.array(Array(1f, 4f, 3f, 6f), shape = Shape(2, 2)) - - var arrEqual1 = arr1 <= arr2 - assert(arrEqual1.shape === Shape(2, 2)) - assert(arrEqual1.toArray === Array(1f, 1f, 0f, 1f)) - - var arrEqual2 = arr1 <= 2f - assert(arrEqual2.shape === Shape(2, 2)) - assert(arrEqual2.toArray === Array(1f, 1f, 0f, 0f)) - - // Float64 methods test - arr1 = NDArray.array(Array(1d, 2d, 4d, 5d), shape = Shape(2, 2)) - arr2 = NDArray.array(Array(1d, 4d, 3d, 6d), shape = Shape(2, 2)) - - arrEqual1 = arr1 <= arr2 - assert(arrEqual1.shape === Shape(2, 2)) - assert(arrEqual1.dtype === DType.Float64) - assert(arrEqual1.toFloat64Array === Array(1d, 1d, 0d, 1d)) - - arrEqual2 = arr1 <= 2d - assert(arrEqual2.shape === Shape(2, 2)) - assert(arrEqual2.dtype === DType.Float64) - assert(arrEqual2.toFloat64Array === Array(1d, 1d, 0d, 0d)) - } - - test("choose_element_0index") { - val arr = NDArray.array(Array(1f, 2f, 3f, 4f, 6f, 5f), shape = Shape(2, 3)) - val indices = NDArray.array(Array(0f, 1f), shape = Shape(2)) - val res = NDArray.choose_element_0index(arr, indices) - assert(res.toArray === Array(1f, 6f)) - } - - test("copy to") { - var source = NDArray.array(Array(1f, 2f, 3f), shape = Shape(1, 3)) - var dest = NDArray.empty(1, 3) - source.copyTo(dest) - assert(dest.shape === Shape(1, 3)) - assert(dest.toArray === Array(1f, 2f, 3f)) - - // Float64 methods test - source = NDArray.array(Array(1d, 2d, 3d), shape = Shape(1, 3)) - dest = NDArray.empty(shape = Shape(1, 3), dtype = DType.Float64) - source.copyTo(dest) - assert(dest.dtype === DType.Float64) - assert(dest.toFloat64Array === Array(1d, 2d, 3d)) - } - - test("abs") { - val arr = NDArray.array(Array(-1f, -2f, 3f), shape = Shape(3, 1)) - assert(NDArray.abs(arr).toArray === Array(1f, 2f, 3f)) - } - - test("sign") { - val arr = NDArray.array(Array(-1f, -2f, 3f), shape = Shape(3, 1)) - assert(NDArray.sign(arr).toArray === Array(-1f, -1f, 1f)) - } - - test("round") { - val arr = NDArray.array(Array(1.5f, 2.1f, 3.7f), shape = Shape(3, 1)) - assert(NDArray.round(arr).toArray === Array(2f, 2f, 4f)) - } - - test("ceil") { - val arr = NDArray.array(Array(1.5f, 2.1f, 3.7f), shape = Shape(3, 1)) - assert(NDArray.ceil(arr).toArray === Array(2f, 3f, 4f)) - } - - test("floor") { - val arr = NDArray.array(Array(1.5f, 2.1f, 3.7f), shape = Shape(3, 1)) - assert(NDArray.floor(arr).toArray === Array(1f, 2f, 3f)) - } - - test("square") { - val arr = NDArray.array(Array(1f, 2f, 3f), shape = Shape(3, 1)) - assert(NDArray.square(arr).toArray === Array(1f, 4f, 9f)) - } - - test("exp") { - val arr = NDArray.ones(1) - assert(NDArray.exp(arr).toScalar === 2.71828f +- 1e-3f) - } - - test("log") { - val arr = NDArray.empty(1) - arr.set(10f) - assert(NDArray.log(arr).toScalar === 2.302585f +- 1e-5f) - } - - test("cos") { - val arr = NDArray.empty(1) - arr.set(12f) - assert(NDArray.cos(arr).toScalar === 0.8438539f +- 1e-5f) - } - - test("sin") { - val arr = NDArray.empty(1) - arr.set(12f) - assert(NDArray.sin(arr).toScalar === -0.536572918f +- 1e-5f) - } - - test("max") { - val arr = NDArray.array(Array(1.5f, 2.1f, 3.7f), shape = Shape(3, 1)) - assert(NDArray.max(arr).toScalar === 3.7f +- 1e-3f) - } - - test("maximum") { - val arr1 = NDArray.array(Array(1.5f, 2.1f, 3.7f), shape = Shape(3, 1)) - val arr2 = NDArray.array(Array(4f, 1f, 3.5f), shape = Shape(3, 1)) - val arr = NDArray.maximum(arr1, arr2) - assert(arr.shape === Shape(3, 1)) - assert(arr.toArray === Array(4f, 2.1f, 3.7f)) - - // Float64 methods test - val arr3 = NDArray.array(Array(1d, 2d, 3d), shape = Shape(3, 1)) - val maxArr = NDArray.maximum(arr3, 10d) - assert(maxArr.shape === Shape(3, 1)) - assert(maxArr.toArray === Array(10d, 10d, 10d)) - } - - test("min") { - val arr = NDArray.array(Array(1.5f, 2.1f, 3.7f), shape = Shape(3, 1)) - assert(NDArray.min(arr).toScalar === 1.5f +- 1e-3f) - } - - test("minimum") { - val arr1 = NDArray.array(Array(1.5f, 2.1f, 3.7f), shape = Shape(3, 1)) - val arr2 = NDArray.array(Array(4f, 1f, 3.5f), shape = Shape(3, 1)) - val arr = NDArray.minimum(arr1, arr2) - assert(arr.shape === Shape(3, 1)) - assert(arr.toArray === Array(1.5f, 1f, 3.5f)) - - // Float64 methods test - val arr3 = NDArray.array(Array(4d, 5d, 6d), shape = Shape(3, 1)) - val minArr = NDArray.minimum(arr3, 5d) - assert(minArr.shape === Shape(3, 1)) - assert(minArr.toFloat64Array === Array(4d, 5d, 5d)) - } - - test("sum") { - var arr = NDArray.array(Array(1f, 2f, 3f, 4f), shape = Shape(2, 2)) - assert(NDArray.sum(arr).toScalar === 10f +- 1e-3f) - - } - - test("argmaxChannel") { - val arr = NDArray.array(Array(1f, 2f, 4f, 3f), shape = Shape(2, 2)) - val argmax = NDArray.argmax_channel(arr) - assert(argmax.shape === Shape(2)) - assert(argmax.toArray === Array(1f, 0f)) - } - - test("concatenate axis-0") { - val arr1 = NDArray.array(Array(1f, 2f, 4f, 3f, 3f, 3f), shape = Shape(2, 3)) - val arr2 = NDArray.array(Array(8f, 7f, 6f), shape = Shape(1, 3)) - val arr = NDArray.concatenate(arr1, arr2) - assert(arr.shape === Shape(3, 3)) - assert(arr.toArray === Array(1f, 2f, 4f, 3f, 3f, 3f, 8f, 7f, 6f)) - - // Try concatenating float32 arr with float64 arr. Should get exception - intercept[Exception] { - val arr3 = NDArray.array(Array (5d, 6d, 7d), shape = Shape(1, 3)) - NDArray.concatenate(Array(arr1, arr3)) - } - } - - test("concatenate axis-1") { - val arr1 = NDArray.array(Array(1f, 2f, 3f, 4f), shape = Shape(2, 2)) - val arr2 = NDArray.array(Array(5f, 6f), shape = Shape(2, 1)) - val arr = NDArray.concatenate(Array(arr1, arr2), axis = 1) - assert(arr.shape === Shape(2, 3)) - assert(arr.toArray === Array(1f, 2f, 5f, 3f, 4f, 6f)) - - // Try concatenating float32 arr with float64 arr. Should get exception - intercept[Exception] { - val arr3 = NDArray.array(Array (5d, 6d), shape = Shape(2, 1)) - NDArray.concatenate(Array(arr1, arr3), axis = 1) - } - } - - test("transpose") { - val arr = NDArray.array(Array(1f, 2f, 4f, 3f, 3f, 3f), shape = Shape(2, 3)) - assert(arr.toArray === Array(1f, 2f, 4f, 3f, 3f, 3f)) - assert(arr.T.shape === Shape(3, 2)) - assert(arr.T.toArray === Array(1f, 3f, 2f, 3f, 4f, 3f)) - } - - test("save and load with names") { - val filename - = s"${System.getProperty("java.io.tmpdir")}/ndarray-${sequence.getAndIncrement}.bin" - try { - val ndarray = NDArray.array(Array(1f, 2f, 3f), shape = Shape(3, 1)) - NDArray.save(filename, Map("local" -> ndarray)) - val (keys, arrays) = NDArray.load(filename) - assert(keys.length === 1) - assert(keys(0) === "local") - assert(arrays.length === 1) - val loadedArray = arrays(0) - assert(loadedArray.shape === Shape(3, 1)) - assert(loadedArray.toArray === Array(1f, 2f, 3f)) - assert(loadedArray.dtype === DType.Float32) - } finally { - val file = new File(filename) - file.delete() - } - - // Try the same for Float64 array - try { - val ndarray = NDArray.array(Array(1d, 2d, 3d), shape = Shape(3, 1), ctx = Context.cpu()) - NDArray.save(filename, Map("local" -> ndarray)) - val (keys, arrays) = NDArray.load(filename) - assert(keys.length === 1) - assert(keys(0) === "local") - assert(arrays.length === 1) - val loadedArray = arrays(0) - assert(loadedArray.shape === Shape(3, 1)) - assert(loadedArray.toArray === Array(1d, 2d, 3d)) - assert(loadedArray.dtype === DType.Float64) - } finally { - val file = new File(filename) - file.delete() - } - } - - test("save and load without names") { - val filename - = s"${System.getProperty("java.io.tmpdir")}/ndarray-${sequence.getAndIncrement}.bin" - try { - val ndarray = NDArray.array(Array(1f, 2f, 3f), shape = Shape(3, 1)) - NDArray.save(filename, Array(ndarray)) - val (keys, arrays) = NDArray.load(filename) - assert(keys.length === 0) - assert(arrays.length === 1) - val loadedArray = arrays(0) - assert(loadedArray.shape === Shape(3, 1)) - assert(loadedArray.toArray === Array(1f, 2f, 3f)) - assert(loadedArray.dtype === DType.Float32) - } finally { - val file = new File(filename) - file.delete() - } - - // Try the same thing for Float64 array : - - try { - val ndarray = NDArray.array(Array(1d, 2d, 3d), shape = Shape(3, 1), ctx = Context.cpu()) - NDArray.save(filename, Array(ndarray)) - val (keys, arrays) = NDArray.load(filename) - assert(keys.length === 0) - assert(arrays.length === 1) - val loadedArray = arrays(0) - assert(loadedArray.shape === Shape(3, 1)) - assert(loadedArray.toArray === Array(1d, 2d, 3d)) - assert(loadedArray.dtype === DType.Float64) - } finally { - val file = new File(filename) - file.delete() - } - } - - test("get context") { - val ndarray = NDArray.ones(3, 2) - val ctx = ndarray.context - assert(ctx.deviceType === "cpu") - assert(ctx.deviceId === 0) - } - - test("equals") { - val ndarray1 = NDArray.array(Array(1f, 2f, 3f), shape = Shape(3, 1)) - val ndarray2 = NDArray.array(Array(1f, 2f, 3f), shape = Shape(3, 1)) - val ndarray3 = NDArray.array(Array(1f, 2f, 3f), shape = Shape(1, 3)) - val ndarray4 = NDArray.array(Array(3f, 2f, 3f), shape = Shape(3, 1)) - val ndarray5 = NDArray.array(Array(3d, 2d, 3d), shape = Shape(3, 1), ctx = Context.cpu()) - ndarray1 shouldEqual ndarray2 - ndarray1 shouldNot equal(ndarray3) - ndarray1 shouldNot equal(ndarray4) - ndarray5 shouldNot equal(ndarray3) - } - - test("slice") { - val arr = NDArray.array(Array(1f, 2f, 3f, 4f, 5f, 6f), shape = Shape(3, 2)) - - val arr1 = arr.slice(1) - assert(arr1.shape === Shape(1, 2)) - assert(arr1.toArray === Array(3f, 4f)) - - val arr2 = arr.slice(1, 3) - assert(arr2.shape === Shape(2, 2)) - assert(arr2.toArray === Array(3f, 4f, 5f, 6f)) - } - - test("at") { - val arr = NDArray.array(Array(1f, 2f, 3f, 4f, 5f, 6f), shape = Shape(3, 2)) - - val arr1 = arr.at(1) - assert(arr1.shape === Shape(2)) - assert(arr1.toArray === Array(3f, 4f)) - } - - test("reshape") { - var arr = NDArray.array(Array(1f, 2f, 3f, 4f, 5f, 6f), shape = Shape(3, 2)) - - var arr1 = arr.reshape(Array(2, 3)) - assert(arr1.shape === Shape(2, 3)) - assert(arr1.toArray === Array(1f, 2f, 3f, 4f, 5f, 6f)) - - arr.set(1f) - assert(arr1.toArray === Array(1f, 1f, 1f, 1f, 1f, 1f)) - - arr = NDArray.ones(1, 384, 1) - arr1 = arr.reshape(Array(0, -3)) - assert(arr1.shape === Shape(1, 384)) - } - - test("dispose deps") { - val arr1 = NDArray.ones(1, 2) - val arr2 = NDArray.ones(1, 2) - val arr3 = NDArray.ones(1, 2) - - val arrWithDeps = (arr1 + arr2) + arr3 - assert(arrWithDeps.dependencies.size === 4) // arr1 + arr2 - assert(arrWithDeps.dependencies.contains(arr1.handle)) - assert(arrWithDeps.dependencies.contains(arr2.handle)) - assert(arrWithDeps.dependencies.contains(arr3.handle)) - assert(!arr1.isDisposed) - assert(!arr2.isDisposed) - assert(!arr3.isDisposed) - - val arrNoDeps = (arr1 + arr2 + arr3).disposeDeps() - assert(arrNoDeps.dependencies.isEmpty) - assert(arr1.isDisposed) - assert(arr2.isDisposed) - assert(arr3.isDisposed) - } - - test("dispose deps except") { - val arr1 = NDArray.ones(1, 2) - val arr2 = NDArray.ones(1, 2) - val arr3 = NDArray.ones(1, 2) - val arr1_2 = arr1 + arr2 - - val arr = (arr1 + arr2 + arr1_2 + arr3).disposeDepsExcept(arr1_2) - // since arr1_2 depends on arr1 & arr2 - // arr1 & arr2 will not be disposed either - assert(arr.dependencies.size === 3) - assert(arr.dependencies.contains(arr1.handle)) - assert(arr.dependencies.contains(arr2.handle)) - assert(arr.dependencies.contains(arr1_2.handle)) - assert(!arr1.isDisposed) - assert(!arr2.isDisposed) - assert(!arr1_2.isDisposed) - assert(arr3.isDisposed) - } - - test("serialize and deserialize") { - val arr = NDArray.ones(1, 2) * 3 - val bytes = arr.serialize() - val arrCopy = NDArray.deserialize(bytes) - assert(arr === arrCopy) - assert(arrCopy.dtype === DType.Float32) - } - - test("dtype int32") { - val arr = NDArray.ones(Shape(1, 2), dtype = DType.Int32) * 2 - assert(arr.dtype === DType.Int32) - assert(arr.internal.getRaw.length === 8) - assert(arr.internal.toFloatArray === Array(2f, 2f)) - assert(arr.internal.toIntArray === Array(2, 2)) - assert(arr.internal.toDoubleArray === Array(2d, 2d)) - assert(arr.internal.toByteArray === Array(2.toByte, 2.toByte)) - } - - test("dtype uint8") { - val arr = NDArray.ones(Shape(1, 2), dtype = DType.UInt8) * 2 - assert(arr.dtype === DType.UInt8) - assert(arr.internal.getRaw.length === 2) - assert(arr.internal.toFloatArray === Array(2f, 2f)) - assert(arr.internal.toIntArray === Array(2, 2)) - assert(arr.internal.toDoubleArray === Array(2d, 2d)) - assert(arr.internal.toByteArray === Array(2.toByte, 2.toByte)) - } - - test("dtype float64") { - val arr = NDArray.ones(Shape(1, 2), dtype = DType.Float64) * 2 - assert(arr.dtype === DType.Float64) - assert(arr.internal.getRaw.length === 16) - assert(arr.internal.toFloatArray === Array(2f, 2f)) - assert(arr.internal.toIntArray === Array(2, 2)) - assert(arr.internal.toDoubleArray === Array(2d, 2d)) - assert(arr.internal.toByteArray === Array(2.toByte, 2.toByte)) - } - - test("NDArray random module is generated properly") { - val lam = NDArray.ones(1, 2) - val rnd = NDArray.random.poisson(lam = Some(lam), shape = Some(Shape(3, 4))) - val rnd2 = NDArray.random.poisson(lam = Some(1f), shape = Some(Shape(3, 4)), - dtype = Some("float64")) - assert(rnd.shape === Shape(1, 2, 3, 4)) - assert(rnd2.shape === Shape(3, 4)) - assert(rnd2.head.dtype === DType.Float64) - } - - test("NDArray random module is generated properly - special case of 'normal'") { - val mu = NDArray.ones(1, 2) - val sigma = NDArray.ones(1, 2) * 2 - val rnd = NDArray.random.normal(mu = Some(mu), sigma = Some(sigma), shape = Some(Shape(3, 4))) - val rnd2 = NDArray.random.normal(mu = Some(1f), sigma = Some(2f), shape = Some(Shape(3, 4)), - dtype = Some("float64")) - assert(rnd.shape === Shape(1, 2, 3, 4)) - assert(rnd2.shape === Shape(3, 4)) - assert(rnd2.head.dtype === DType.Float64) - } - - test("Generated api") { - // Without SomeConversion - val arr3 = NDArray.ones(Shape(1, 2), dtype = DType.Float64) - val arr4 = NDArray.ones(Shape(1), dtype = DType.Float64) - val arr5 = NDArray.api.norm(arr3, ord = Some(1), out = Some(arr4)) - // With SomeConversion - import org.apache.mxnet.util.OptionConversion._ - val arr = NDArray.ones(Shape(1, 2), dtype = DType.Float64) - val arr2 = NDArray.ones(Shape(1), dtype = DType.Float64) - NDArray.api.norm(arr, ord = 1, out = arr2) - val result = NDArray.api.dot(arr2, arr2) - } -} diff --git a/scala-package/core/src/test/scala/org/apache/mxnet/NativeResourceSuite.scala b/scala-package/core/src/test/scala/org/apache/mxnet/NativeResourceSuite.scala deleted file mode 100644 index 81a9f605a887..000000000000 --- a/scala-package/core/src/test/scala/org/apache/mxnet/NativeResourceSuite.scala +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet - -import java.lang.ref.ReferenceQueue -import java.util.concurrent.ConcurrentHashMap - -import org.apache.mxnet.Base.CPtrAddress -import org.mockito.Matchers.any -import org.scalatest.{BeforeAndAfterAll, FunSuite, Matchers, TagAnnotation} -import org.mockito.Mockito._ - -@TagAnnotation("resource") -class NativeResourceSuite extends FunSuite with BeforeAndAfterAll with Matchers { - - object TestRef { - def getRefQueue: ReferenceQueue[NativeResource] = { NativeResourceRef.refQ} - def getRefMap: ConcurrentHashMap[NativeResourceRef, CPtrAddress] - = {NativeResourceRef.refMap} - def getCleaner: Thread = { NativeResourceRef.cleaner } - } - - class TestRef(resource: NativeResource, - resourceDeAllocator: CPtrAddress => Int) - extends NativeResourceRef(resource, resourceDeAllocator) { - } - - test(testName = "test native resource setup/teardown") { - val a = spy(NDArray.ones(Shape(2, 3))) - val aRef = a.ref - val spyRef = spy(aRef) - - assert(TestRef.getRefMap.containsKey(aRef) == true) - a.close() - verify(a).dispose() - verify(a).nativeDeAllocator - // resourceDeAllocator does not get called when explicitly closing - verify(spyRef, times(0)).resourceDeAllocator - - assert(TestRef.getRefMap.containsKey(aRef) == false) - assert(a.isDisposed == true, "isDisposed should be set to true after calling close") - } - - test(testName = "test dispose") { - val a: NDArray = spy(NDArray.ones(Shape(3, 4))) - val aRef = a.ref - val spyRef = spy(aRef) - a.dispose() - verify(a).nativeDeAllocator - assert(TestRef.getRefMap.containsKey(aRef) == false) - assert(a.isDisposed == true, "isDisposed should be set to true after calling close") - } -} - diff --git a/scala-package/core/src/test/scala/org/apache/mxnet/NumpyScopeSuite.scala b/scala-package/core/src/test/scala/org/apache/mxnet/NumpyScopeSuite.scala deleted file mode 100644 index 0581a9890d84..000000000000 --- a/scala-package/core/src/test/scala/org/apache/mxnet/NumpyScopeSuite.scala +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet - -import org.scalatest.{BeforeAndAfterAll, FunSuite} - -class NumpyScopeSuite extends FunSuite with BeforeAndAfterAll { - test("compatible") { - NumpyScope.enableNumpyShape.withScope { - assert(NumpyScope.isNumpyShape === true) - } - } - - test("incompatible") { - NumpyScope.disableNumpyShape.withScope { - assert(NumpyScope.isNumpyShape === false) - } - } -} diff --git a/scala-package/core/src/test/scala/org/apache/mxnet/OperatorSuite.scala b/scala-package/core/src/test/scala/org/apache/mxnet/OperatorSuite.scala deleted file mode 100644 index dc11b7bfb9b7..000000000000 --- a/scala-package/core/src/test/scala/org/apache/mxnet/OperatorSuite.scala +++ /dev/null @@ -1,1035 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet - -import org.apache.mxnet.CheckUtils._ -import org.scalatest.prop.GeneratorDrivenPropertyChecks -import org.scalatest.{BeforeAndAfterAll, FunSuite, Matchers} -import org.scalacheck.Gen - -import scala.collection.mutable -import scala.collection.mutable.ArrayBuffer - -class OperatorSuite extends FunSuite with BeforeAndAfterAll - with Matchers with GeneratorDrivenPropertyChecks { - private def checkElementwiseSumWithShape(shape: Shape, n: Int) = { - // forward - val inputs = (0 until n).map(i => Symbol.Variable(s"arg $i")) - val out = Symbol.ElementWiseSum(name = "esum")(inputs: _*)() - val arr = (0 until n).map(_ => Random.uniform(-10, 10, shape)) - val arrGrad = (0 until n).map(_ => NDArray.empty(shape)) - val exec = out.bind(Context.cpu(), args = arr, argsGrad = arrGrad) - exec.forward() - val forwardOutput = exec.outputs(0) - val forwardOutputExpected = arr.reduce(_ + _) - assert(reldiff(forwardOutput, forwardOutputExpected) < 5e-5) - - // backward - val outGrad = Random.uniform(-10, 10, shape) - exec.backward(outGrad) - arrGrad.foreach(grad => assert(grad === outGrad)) - } - - test("elementwise sum") { - checkElementwiseSumWithShape(Shape(5, 5, 3), 4) - forAll (Gen.choose(1, 4), Gen.choose(1, 8)) { (dim, n) => - forAll (Gen.listOfN(dim, Gen.choose(1, Math.pow(1000, 1.0 / dim).toInt))) { shape => - checkElementwiseSumWithShape(Shape(shape), n) - } - } - } - - // TODO: checkSliceChannel - - private def checkConcatWithShape(shapes: Seq[Shape], dimension: Int, skipSecond: Boolean) = { - // if skipSecond is true, second argument will not have gradient. - // it is to test #1130 - // forward - val targetDim = shapes.map(_(dimension)).sum - - val inputs = (0 until shapes.size).map(i => Symbol.Variable(s"arg$i")) - val out = Symbol.Concat(name = "conc")(inputs: _*)(Map("dim" -> dimension)) - val arr = shapes.map { shape => - val nd = NDArray.empty(shape) - nd.set(shape(dimension)) - } - val arrNp = arr.map(_.copy()) - val arrGrad = shapes.map(NDArray.empty(_)) - val argNames = out.listArguments() - val dictGrad = - (argNames zip arrGrad).filter { case (name, d) => - !skipSecond || name != "arg1" - }.toMap - - val args = out.listArguments() - val (argShapes, outShapes, auxShapes) = out.inferShape(args.zip(shapes).toMap) - val outGrad = NDArray.empty(outShapes(0)) - val exec1 = out.bind(Context.cpu(), arr, dictGrad) - exec1.forward() - val out1 = exec1.outputs(0) - // FIXME: only support concatenate at axis0 - val ret = NDArray.concatenate(arr) - assert(out1 === ret) - - // backward - out1.copyTo(outGrad) - outGrad += 1 - exec1.backward(outGrad) - argNames.zipWithIndex.foreach { case (name, i) => - if (!skipSecond || name != "arg1") { - val grad = dictGrad(name) - val npGrad = arrNp(i) - assert(grad === npGrad + 1) - } - } - } - - test("concat") { - val merge = Array(2, 3, 4, 5, 6) - forAll (Gen.choose(2, 5)) { dim => - val shapes = mutable.ArrayBuffer.empty[Shape] - for (i <- 0 until dim) { - shapes += Shape(merge(i), 2) - } - // TODO: check dimension > 0 - checkConcatWithShape(shapes, 0, skipSecond = true) - checkConcatWithShape(shapes, 0, skipSecond = false) - } - } - - // TODO: test softmax - - test("swap axes") { - val data = Symbol.Variable("data") - val shape = Shape(2, 3, 4) - val arrData = NDArray.ones(shape) - arrData.slice(0).set(1f) - arrData.slice(1).set(2f) - // arrData = - // - // [[[ 1., 1., 1., 1.], - // [ 1., 1., 1., 1.], - // [ 1., 1., 1., 1.]], - // - // [[ 2., 2., 2., 2.], - // [ 2., 2., 2., 2.], - // [ 2., 2., 2., 2.]]] - val swap0 = Symbol.SwapAxis()()(Map("data" -> data, "dim1" -> 0, "dim2" -> 2)) - val swap = Symbol.SwapAxis()()(Map("data" -> swap0, "dim1" -> 1, "dim2" -> 2)) - val exec = swap.bind(Context.cpu(), args = Array(arrData)) - exec.forward() - val out = exec.outputs(0) - - // After swapaxes(swapaxes(arrData, 0, 2), 1, 2) - // out should be - // [[[ 1., 1., 1.], - // [ 2., 2., 2.]], - // - // [[ 1., 1., 1.], - // [ 2., 2., 2.]], - // - // [[ 1., 1., 1.], - // [ 2., 2., 2.]], - // - // [[ 1., 1., 1.], - // [ 2., 2., 2.]]] - assert(out.shape === Shape(4, 2, 3)) - for (i <- 0 until 4) { - val axis0 = out.slice(i) - assert(CheckUtils.reldiff(axis0.toArray, Array(1f, 1f, 1f, 2f, 2f, 2f)) < 1e-6f) - } - } - - test("scalar op") { - CancelTestUtil.assumeStandardDecimalSeparator() - - val data = Symbol.Variable("data") - val shape = Shape(3, 4) - val dataTmp = NDArray.ones(shape) * 5 - - val test = { - import org.apache.mxnet.SymbolConversions._ - 2 / (4 - ((1 + data + 1) * 2 / 5) - 0.2) - } - - val (npout1, npout) = { - import org.apache.mxnet.NDArrayConversions._ - val npout1 = 4 - ((1 + dataTmp + 1) * 2 / 5) - 0.2f - val npout = 2 / npout1 - (npout1, npout) - } - - checkSymbolicForward(test, Array(dataTmp), Array(npout)) - - val npoutGrad = new NDArrayConversions(2f * (2f * 2f / 5f)) / (npout1 * npout1) - - checkSymbolicBackward(test, Array(dataTmp), Array(NDArray.ones(shape) * 2), Array(npoutGrad)) - } - - test("ones") { - val ones = Symbol.ones(shape = Shape(2, 2)) - val exe = ones.simpleBind(ctx = Context.cpu(), gradReq = "write", shapeDict = Map()) - exe.forward(isTrain = false) - assert(CheckUtils.reldiff(Array(1f, 1f, 1f, 1f), exe.outputs.head.toArray) <= 1e-5f) - } - - test("zeros") { - val zeros = Symbol.zeros(shape = Shape(2, 2)) - val exe = zeros.simpleBind(ctx = Context.cpu(), gradReq = "write", shapeDict = Map()) - exe.forward(isTrain = false) - assert(Array(0f, 0f, 0f, 0f) === exe.outputs.head.toArray) - } - - test("arange") { - for (i <- 0 until 5) { - val start = scala.util.Random.nextFloat() * 5 - val stop = start + scala.util.Random.nextFloat() * 100 - val step = scala.util.Random.nextFloat() * 4 - val repeat = 1 - val result = (start.toDouble until stop.toDouble by step.toDouble) - .flatMap(x => Array.fill[Float](repeat)(x.toFloat)) - val x = Symbol.arange(start = start, stop = Some(stop), step = step, repeat = repeat) - var exe = x.simpleBind(ctx = Context.cpu(), gradReq = "write", shapeDict = Map()) - exe.forward(isTrain = false) - assert(exe.gradArrays.length == 0) - assert(CheckUtils.reldiff(result.toArray, exe.outputs.head.toArray) <= 1e-4f) - } - } - - test("scalar pow") { - val data = Symbol.Variable("data") - val shape = Shape(1, 1) - val dataTmp = NDArray.ones(shape) * 3 - val dataTmpPowered = NDArray.ones(shape) * 9 - val test = data ** 2 - // TODO: check numeric gradient - checkSymbolicForward(test, Array(dataTmp), Array(dataTmpPowered)) - checkSymbolicBackward(test, Array(dataTmp), Array(NDArray.ones(shape)), Array(dataTmp * 2)) - } - - test("symbol pow") { - CancelTestUtil.assumeStandardDecimalSeparator() - - val shape = Shape(1, 1) - - val data = Symbol.Variable("data") - val dataTmp = NDArray.ones(shape) * 2 - - val exp = Symbol.Variable("exp") - val expTmp = NDArray.ones(shape) * 3 - - val test = data ** exp - - // TODO: check numeric gradient - checkSymbolicForward(test, Seq(dataTmp, expTmp), Seq(NDArray.ones(shape) * 8)) - - val dataDir = NDArray.ones(shape) * 4 * expTmp // dataTmp**(expTmp - 1) * expTmp - // expDir = dataTmp**(expTmp) * log(dataTmp) - val expDir = NDArray.ones(shape) * 8 * (NDArray.ones(shape) * Math.log(2).toFloat) - checkSymbolicBackward(test, Seq(dataTmp, expTmp), - Seq(NDArray.ones(shape)), Seq(dataDir, expDir)) - } - - test("pow fn") { - CancelTestUtil.assumeStandardDecimalSeparator() - - val shape = Shape(3, 4) - val exp = Symbol.Variable("exp") - import SymbolConversions._ - val y = 2 ** exp - val x = NDArray.ones(shape) * 3 - // TODO: check numeric gradient - checkSymbolicForward(y, Seq(x), Seq(NDArray.ones(shape) * 8)) // 2**x - checkSymbolicBackward(y, Seq(x), Seq(NDArray.ones(shape)), - // log(2) * 2**x - Seq(NDArray.ones(shape) * 8 * Math.log(2).toFloat)) - } - - test("scalar equal") { - val data = Symbol.Variable("datas") - val shape = Shape(2, 2) - val dataTmpExpected = NDArray.array(Array(0f, 1f, 0f, 0f), shape) - val test = Symbol.equal(data, 2f) - - val exec = test.simpleBind(Context.cpu(), gradReq = "write", shapeDict = Map("datas" -> shape)) - exec.argDict("datas").set(Array(1f, 2f, 3f, 4f)) - - exec.forward() - assert(reldiff(exec.outputs.head, dataTmpExpected) <= 1e-5f) - - exec.backward(NDArray.ones(shape)) - assert(exec.gradDict("datas").toArray === Array.fill[Float](shape.product)(0f)) - } - - test("symbol equal") { - val data = Symbol.Variable("datas") - val data2 = Symbol.Variable("datas2") - val shape = Shape(2, 2) - val dataTmpExpected = NDArray.array(Array(1f, 0f, 0f, 0f), shape) - val test = Symbol.equal(data, data2) - - val exec = test.simpleBind(Context.cpu(), gradReq = "write", - shapeDict = Map("datas" -> shape, "datas2" -> shape)) - exec.argDict("datas").set(Array(1f, 2f, 3f, 4f)) - exec.argDict("datas2").set(Array(1f, 3f, 2f, 6f)) - - exec.forward() - assert(reldiff(exec.outputs.head, dataTmpExpected) <= 1e-5f) - - exec.backward(NDArray.ones(shape)) - assert(exec.gradDict("datas").toArray === Array.fill[Float](shape.product)(0f)) - assert(exec.gradDict("datas2").toArray === Array.fill[Float](shape.product)(0f)) - } - - test("scalar equal 2") { - val data = Symbol.Variable("datas") - val shape = Shape(2, 2) - val dataTmpExpected = NDArray.array(Array(0f, 1f, 0f, 0f), shape) - val test = Symbol.equal(2f, data) - - val exec = test.simpleBind(Context.cpu(), gradReq = "write", shapeDict = Map("datas" -> shape)) - exec.argDict("datas").set(Array(1f, 2f, 3f, 4f)) - - exec.forward() - assert(reldiff(exec.outputs.head, dataTmpExpected) <= 1e-5f) - - exec.backward(NDArray.ones(shape)) - assert(exec.gradDict("datas").toArray === Array.fill[Float](shape.product)(0f)) - } - - test("scalar not_equal") { - val data = Symbol.Variable("datas") - val shape = Shape(2, 2) - val dataTmpExpected = NDArray.array(Array(1f, 0f, 1f, 1f), shape) - val test = Symbol.notEqual(data, 2f) - - val exec = test.simpleBind(Context.cpu(), gradReq = "write", shapeDict = Map("datas" -> shape)) - exec.argDict("datas").set(Array(1f, 2f, 3f, 4f)) - - exec.forward() - assert(reldiff(exec.outputs.head, dataTmpExpected) <= 1e-5f) - - exec.backward(NDArray.ones(shape)) - assert(exec.gradDict("datas").toArray === Array.fill[Float](shape.product)(0f)) - } - - test("symbol not_equal") { - val data = Symbol.Variable("datas") - val data2 = Symbol.Variable("datas2") - val shape = Shape(2, 2) - val dataTmpExpected = NDArray.array(Array(0f, 1f, 1f, 1f), shape) - val test = Symbol.notEqual(data, data2) - - val exec = test.simpleBind(Context.cpu(), gradReq = "write", - shapeDict = Map("datas" -> shape, "datas2" -> shape)) - exec.argDict("datas").set(Array(1f, 2f, 3f, 4f)) - exec.argDict("datas2").set(Array(1f, 3f, 2f, 6f)) - - exec.forward() - assert(reldiff(exec.outputs.head, dataTmpExpected) <= 1e-5f) - - exec.backward(NDArray.ones(shape)) - assert(exec.gradDict("datas").toArray === Array.fill[Float](shape.product)(0f)) - assert(exec.gradDict("datas2").toArray === Array.fill[Float](shape.product)(0f)) - } - - test("scalar not_equal 2") { - val data = Symbol.Variable("datas") - val shape = Shape(2, 2) - val dataTmpExpected = NDArray.array(Array(1f, 0f, 1f, 1f), shape) - val test = Symbol.notEqual(2f, data) - - val exec = test.simpleBind(Context.cpu(), gradReq = "write", shapeDict = Map("datas" -> shape)) - exec.argDict("datas").set(Array(1f, 2f, 3f, 4f)) - - exec.forward() - assert(reldiff(exec.outputs.head, dataTmpExpected) <= 1e-5f) - - exec.backward(NDArray.ones(shape)) - assert(exec.gradDict("datas").toArray === Array.fill[Float](shape.product)(0f)) - } - - test("scalar greater") { - val data = Symbol.Variable("datas") - val shape = Shape(2, 2) - val dataTmpExpected = NDArray.array(Array(0f, 0f, 1f, 1f), shape) - val test = data > 2f - - val exec = test.simpleBind(Context.cpu(), gradReq = "write", shapeDict = Map("datas" -> shape)) - exec.argDict("datas").set(Array(1f, 2f, 3f, 4f)) - - exec.forward() - assert(reldiff(exec.outputs.head, dataTmpExpected) <= 1e-5f) - - exec.backward(NDArray.ones(shape)) - assert(exec.gradDict("datas").toArray === Array.fill[Float](shape.product)(0f)) - } - - test("symbol greater") { - val data = Symbol.Variable("datas") - val data2 = Symbol.Variable("datas2") - val shape = Shape(2, 2) - val dataTmpExpected = NDArray.array(Array(0f, 0f, 1f, 0f), shape) - val test = data > data2 - - val exec = test.simpleBind(Context.cpu(), gradReq = "write", - shapeDict = Map("datas" -> shape, "datas2" -> shape)) - exec.argDict("datas").set(Array(1f, 2f, 3f, 4f)) - exec.argDict("datas2").set(Array(1f, 3f, 2f, 6f)) - - exec.forward() - assert(reldiff(exec.outputs.head, dataTmpExpected) <= 1e-5f) - - exec.backward(NDArray.ones(shape)) - assert(exec.gradDict("datas").toArray === Array.fill[Float](shape.product)(0f)) - assert(exec.gradDict("datas2").toArray === Array.fill[Float](shape.product)(0f)) - } - - test("scalar greater 2") { - val data = Symbol.Variable("datas") - val shape = Shape(2, 2) - val dataTmpExpected = NDArray.array(Array(1f, 0f, 0f, 0f), shape) - import SymbolConversions._ - val test = 2f > data - - val exec = test.simpleBind(Context.cpu(), gradReq = "write", shapeDict = Map("datas" -> shape)) - exec.argDict("datas").set(Array(1f, 2f, 3f, 4f)) - - exec.forward() - assert(reldiff(exec.outputs.head, dataTmpExpected) <= 1e-5f) - - exec.backward(NDArray.ones(shape)) - assert(exec.gradDict("datas").toArray === Array.fill[Float](shape.product)(0f)) - } - - test("scalar greater_equal") { - val data = Symbol.Variable("datas") - val shape = Shape(2, 2) - val dataTmpExpected = NDArray.array(Array(0f, 1f, 1f, 1f), shape) - val test = data >= 2f - - val exec = test.simpleBind(Context.cpu(), gradReq = "write", shapeDict = Map("datas" -> shape)) - exec.argDict("datas").set(Array(1f, 2f, 3f, 4f)) - - exec.forward() - assert(reldiff(exec.outputs.head, dataTmpExpected) <= 1e-5f) - - exec.backward(NDArray.ones(shape)) - assert(exec.gradDict("datas").toArray === Array.fill[Float](shape.product)(0f)) - } - - test("symbol greater_equal") { - val data = Symbol.Variable("datas") - val data2 = Symbol.Variable("datas2") - val shape = Shape(2, 2) - val dataTmpExpected = NDArray.array(Array(1f, 0f, 1f, 0f), shape) - val test = data >= data2 - - val exec = test.simpleBind(Context.cpu(), gradReq = "write", - shapeDict = Map("datas" -> shape, "datas2" -> shape)) - exec.argDict("datas").set(Array(1f, 2f, 3f, 4f)) - exec.argDict("datas2").set(Array(1f, 3f, 2f, 6f)) - - exec.forward() - assert(reldiff(exec.outputs.head, dataTmpExpected) <= 1e-5f) - - exec.backward(NDArray.ones(shape)) - assert(exec.gradDict("datas").toArray === Array.fill[Float](shape.product)(0f)) - assert(exec.gradDict("datas2").toArray === Array.fill[Float](shape.product)(0f)) - } - - test("scalar greater_equal 2") { - val data = Symbol.Variable("datas") - val shape = Shape(2, 2) - val dataTmpExpected = NDArray.array(Array(1f, 1f, 0f, 0f), shape) - import SymbolConversions._ - val test = 2f >= data - - val exec = test.simpleBind(Context.cpu(), gradReq = "write", shapeDict = Map("datas" -> shape)) - exec.argDict("datas").set(Array(1f, 2f, 3f, 4f)) - - exec.forward() - assert(reldiff(exec.outputs.head, dataTmpExpected) <= 1e-5f) - - exec.backward(NDArray.ones(shape)) - assert(exec.gradDict("datas").toArray === Array.fill[Float](shape.product)(0f)) - } - - test("scalar lesser") { - val data = Symbol.Variable("datas") - val shape = Shape(2, 2) - val dataTmpExpected = NDArray.array(Array(1f, 0f, 0f, 0f), shape) - val test = data < 2f - - val exec = test.simpleBind(Context.cpu(), gradReq = "write", shapeDict = Map("datas" -> shape)) - exec.argDict("datas").set(Array(1f, 2f, 3f, 4f)) - - exec.forward() - assert(reldiff(exec.outputs.head, dataTmpExpected) <= 1e-5f) - - exec.backward(NDArray.ones(shape)) - assert(exec.gradDict("datas").toArray === Array.fill[Float](shape.product)(0f)) - } - - test("symbol lesser") { - val data = Symbol.Variable("datas") - val data2 = Symbol.Variable("datas2") - val shape = Shape(2, 2) - val dataTmpExpected = NDArray.array(Array(0f, 1f, 0f, 1f), shape) - val test = data < data2 - - val exec = test.simpleBind(Context.cpu(), gradReq = "write", - shapeDict = Map("datas" -> shape, "datas2" -> shape)) - exec.argDict("datas").set(Array(1f, 2f, 3f, 4f)) - exec.argDict("datas2").set(Array(1f, 3f, 2f, 6f)) - - exec.forward() - assert(reldiff(exec.outputs.head, dataTmpExpected) <= 1e-5f) - - exec.backward(NDArray.ones(shape)) - assert(exec.gradDict("datas").toArray === Array.fill[Float](shape.product)(0f)) - assert(exec.gradDict("datas2").toArray === Array.fill[Float](shape.product)(0f)) - } - - test("scalar lesser 2") { - val data = Symbol.Variable("datas") - val shape = Shape(2, 2) - val dataTmpExpected = NDArray.array(Array(0f, 0f, 1f, 1f), shape) - import SymbolConversions._ - val test = 2f < data - - val exec = test.simpleBind(Context.cpu(), gradReq = "write", shapeDict = Map("datas" -> shape)) - exec.argDict("datas").set(Array(1f, 2f, 3f, 4f)) - - exec.forward() - assert(reldiff(exec.outputs.head, dataTmpExpected) <= 1e-5f) - - exec.backward(NDArray.ones(shape)) - assert(exec.gradDict("datas").toArray === Array.fill[Float](shape.product)(0f)) - } - - test("scalar lesser_equal") { - val data = Symbol.Variable("datas") - val shape = Shape(2, 2) - val dataTmpExpected = NDArray.array(Array(1f, 1f, 0f, 0f), shape) - val test = data <= 2f - - val exec = test.simpleBind(Context.cpu(), gradReq = "write", shapeDict = Map("datas" -> shape)) - exec.argDict("datas").set(Array(1f, 2f, 3f, 4f)) - - exec.forward() - assert(reldiff(exec.outputs.head, dataTmpExpected) <= 1e-5f) - - exec.backward(NDArray.ones(shape)) - assert(exec.gradDict("datas").toArray === Array.fill[Float](shape.product)(0f)) - } - - test("symbol lesser_equal") { - val data = Symbol.Variable("datas") - val data2 = Symbol.Variable("datas2") - val shape = Shape(2, 2) - val dataTmpExpected = NDArray.array(Array(1f, 1f, 0f, 1f), shape) - val test = data <= data2 - - val exec = test.simpleBind(Context.cpu(), gradReq = "write", - shapeDict = Map("datas" -> shape, "datas2" -> shape)) - exec.argDict("datas").set(Array(1f, 2f, 3f, 4f)) - exec.argDict("datas2").set(Array(1f, 3f, 2f, 6f)) - - exec.forward() - assert(reldiff(exec.outputs.head, dataTmpExpected) <= 1e-5f) - - exec.backward(NDArray.ones(shape)) - assert(exec.gradDict("datas").toArray === Array.fill[Float](shape.product)(0f)) - assert(exec.gradDict("datas2").toArray === Array.fill[Float](shape.product)(0f)) - } - - test("scalar lesser_equal 2") { - val data = Symbol.Variable("datas") - val shape = Shape(2, 2) - val dataTmpExpected = NDArray.array(Array(0f, 1f, 1f, 1f), shape) - import SymbolConversions._ - val test = 2f <= data - - val exec = test.simpleBind(Context.cpu(), gradReq = "write", shapeDict = Map("datas" -> shape)) - exec.argDict("datas").set(Array(1f, 2f, 3f, 4f)) - - exec.forward() - assert(reldiff(exec.outputs.head, dataTmpExpected) <= 1e-5f) - - exec.backward(NDArray.ones(shape)) - assert(exec.gradDict("datas").toArray === Array.fill[Float](shape.product)(0f)) - } - - test("embedding") { - val inDim = 10 - val outDim = 4 - val batch = 24 - - val data = Symbol.Variable("data") - val embed = Symbol.Embedding(name = "embed")()( - Map("data" -> data, "input_dim" -> inDim, "output_dim" -> outDim)) - // TODO - } - - // check ops handle duplicate input correctly. - test("binary op duplicate input") { - val data = Symbol.Variable("data") - val shape = Shape(3, 4) - val dataTmp = NDArray.ones(shape) * 5 - val arrData = dataTmp.copy() - val arrGrad = NDArray.ones(shape) * 3 - val outGrad = NDArray.ones(shape) - val square = data * data - val exeSquare = square.bind(Context.cpu(), args = Array(arrData), argsGrad = Array(arrGrad)) - exeSquare.forward() - assert(reldiff(exeSquare.outputs.head, dataTmp * dataTmp) < 1e-6f) - exeSquare.backward(outGrad) - assert(reldiff(arrGrad, dataTmp * 2f) < 1e-6f) - } - - test("sign") { - val data = Symbol.Variable("data") - val shape = Shape(3, 4) - val dataTmp = NDArray.ones(shape) * 5 - val arrData = dataTmp.copy() - val arrGrad = NDArray.ones(shape) * 3 - - val test = Symbol.sign()(data)() - val exeTest = test.bind(Context.cpu(), args = Array(arrData), argsGrad = Array(arrGrad)) - exeTest.forward() - val out = exeTest.outputs.head - val npout = NDArray.sign(dataTmp) - assert(reldiff(out, npout) < 1e-6) - - val outGrad = NDArray.ones(shape) * 2 - exeTest.backward(outGrad) - arrGrad.toArray.foreach(elem => assert(elem === 0f +- 1e-3f)) - } - - test("round, ceil, floor") { - val data = Symbol.Variable("data") - val shape = Shape(3, 4) - val dataTmp = NDArray.ones(shape) * 5.543f - val arrData = dataTmp.copy() - val arrGrad = NDArray.ones(shape) * 2 - - val test = Symbol.round()(data)() + Symbol.ceil()(data)() + Symbol.floor()(data)() - val exeTest = test.bind(Context.cpu(), args = Array(arrData)) - exeTest.forward() - val out = exeTest.outputs.head - val npout = NDArray.round(dataTmp) + NDArray.ceil(dataTmp) + NDArray.floor(dataTmp) - assert(reldiff(out, npout) < 1e-6) - } - - test("rsqrt, cos, sin") { - val data = Symbol.Variable("data") - val shape = Shape(3, 4) - val dataTmp = NDArray.ones(shape) * 5 - val arrData = dataTmp.copy() - val arrGrad = NDArray.ones(shape) * 3 - - val test = Symbol.rsqrt()(data)() + Symbol.cos()(data)() + Symbol.sin()(data)() - val exeTest = test.bind(Context.cpu(), args = Array(arrData), argsGrad = Array(arrGrad)) - exeTest.forward() - val out = exeTest.outputs.head - val npout = { - import org.apache.mxnet.NDArrayConversions._ - 1 / NDArray.sqrt(dataTmp) + NDArray.cos(dataTmp) + NDArray.sin(dataTmp) - } - assert(reldiff(out, npout) < 1e-6) - - val outGrad = NDArray.ones(shape) * 2 - val npoutGrad = { - import org.apache.mxnet.NDArrayConversions._ - outGrad * -(1 / (2 * dataTmp * NDArray.sqrt(dataTmp))) + - outGrad * -1 * NDArray.sin(dataTmp) + outGrad * NDArray.cos(dataTmp) - } - exeTest.backward(outGrad) - assert(reldiff(arrGrad, npoutGrad) < 1e-6) - } - - test("maximum") { - val data1 = Symbol.Variable("data1") - val data2 = Symbol.Variable("data2") - val shape = Shape(3, 4) - val dataTmp1 = Random.uniform(0, 100, shape) - val dataTmp2 = Random.uniform(0, 100, shape) - - val arrData1 = dataTmp1.copy() - val arrData2 = dataTmp2.copy() - - val test = Symbol.max(data1, data2) - val exeTest = test.bind(Context.cpu(), args = Array(arrData1, arrData2)) - exeTest.forward() - val out = exeTest.outputs.head - val expected = (dataTmp1.toArray zip dataTmp2.toArray).map { case (a, b) => Math.max(a, b) } - assert(reldiff(out.toArray, expected) < 1e-6) - } - - test("minimum") { - val data1 = Symbol.Variable("data1") - val data2 = Symbol.Variable("data2") - val shape = Shape(3, 4) - val dataTmp1 = Random.uniform(0, 100, shape) - val dataTmp2 = Random.uniform(0, 100, shape) - - val arrData1 = dataTmp1.copy() - val arrData2 = dataTmp2.copy() - - val test = Symbol.min(data1, data2) - val exeTest = test.bind(Context.cpu(), args = Array(arrData1, arrData2)) - exeTest.forward() - val out = exeTest.outputs.head - val expected = (dataTmp1.toArray zip dataTmp2.toArray).map { case (a, b) => Math.min(a, b) } - assert(reldiff(out.toArray, expected) < 1e-6) - } - - test("transpose") { - val data = Symbol.Variable("data") - val test = Symbol.transpose()(data)() - - val shape = Shape(3, 4) - val ctx = Context.cpu() - val arrData = Random.uniform(0, 100, shape, ctx) - - val trans: Array[Float] = { - val tmp = arrData.toArray.toList.grouped(4).toList - for (i <- 0 until 4) yield { - List(tmp(0)(i), tmp(1)(i), tmp(2)(i)) - } - }.flatten.toArray - - val exeTest = test.bind(ctx, args = Map("data" -> arrData)) - exeTest.forward(isTrain = false) - val out = exeTest.outputs.head - - assert(out.shape == Shape(4, 3)) - assert(reldiff(out.toArray, trans) < 1e-6) - } - - test("smooth_l1 & makeloss") { - val data = Symbol.Variable("data") - val smoothL1 = Symbol.smooth_l1()()(Map("data" -> data, "scalar" -> 1.0f)) - val loss = Symbol.MakeLoss()()(Map("data" -> smoothL1)) - - val shape = Shape(2, 6) - val ctx = Context.cpu() - val input = NDArray.empty(ctx, shape.toArray: _*) - val grad = NDArray.empty(ctx, shape.toArray: _*) - val array = Array[Float]( - -3.5f, -2.5f, -1.5f, -0.5f, -0.3f, -0.1f, - 0.1f, 0.3f, 0.5f, 1.5f, 2.5f, 3.5f) - input.set(array) - - val arrTmp = Array[Float]( - 3.0f, 2.0f, 1.0f, 0.125f, 0.045f, 0.005f, - 0.005f, 0.045f, 0.125f, 1.0f, 2.0f, 3.0f) - val gradTmp = Array[Float]( - -1.0f, -1.0f, -1.0f, -0.5f, -0.3f, -0.1f, - 0.1f, 0.3f, 0.5f, 1.0f, 1.0f, 1.0f) - - val exeTest = - loss.bind(ctx, args = Map("data" -> input), argsGrad = Map("data" -> grad)) - exeTest.forward(isTrain = true) - val out = exeTest.outputs.head - - assert(reldiff(out.toArray, arrTmp) < 1e-6) - - exeTest.backward() - - assert(reldiff(grad.toArray, gradTmp) < 1e-6) - } - - test("maximum minimum scalar") { - val data = Symbol.Variable("data") - val shape = Shape(3, 4) - val dataTmp = NDArray.ones(shape) * 2 - - val arrData = dataTmp.copy() - - val test = Symbol.max(data, 3) + Symbol.max(9, data) + Symbol.min(5, data) + Symbol.min(data, 4) - val exeTest = test.bind(Context.cpu(), args = Array(arrData)) - exeTest.forward() - val out = exeTest.outputs.head - // 3 + 9 + 2 + 2 - assert(reldiff(out, NDArray.ones(shape) * 16) < 1e-6) - } - - test("abs") { - val data = Symbol.Variable("data") - val shape = Shape(3, 4) - val dataTmp = NDArray.ones(shape) * 5 - val arrData = dataTmp.copy() - val arrGrad = NDArray.ones(shape) * 3 - - val test = Symbol.abs()(data)() - val exeTest = test.bind(Context.cpu(), args = Array(arrData), argsGrad = Array(arrGrad)) - exeTest.forward() - val out = exeTest.outputs.head - val npout = NDArray.abs(dataTmp) - assert(reldiff(out, npout) < 1e-6) - - val outGrad = NDArray.ones(shape) * 2 - val npoutGrad = outGrad * NDArray.sign(dataTmp) - exeTest.backward(outGrad) - assert(reldiff(arrGrad, npoutGrad) < 1e-6) - } - - // configure A: input --> conv --> deconv --> output. - // the convolution and deconvoluiton has similar parameter which ensure - // the input shape is the same as output, and the same weights between conv - // and deconv; - // If the input value of forward() and backwrad() is the same, then - // the output value of them should also the same; - private def checkDeconvolutionForwardBackward(inputShape: Shape, - numFilter: Int, - kernel: (Int, Int), - stride: (Int, Int), - pad: (Int, Int)): Unit = { - require(inputShape(1) == numFilter) - val data = Symbol.Variable(name = "data") - val conv = Symbol.Convolution(name = "conv")()(Map( - "data" -> data, "kernel" -> kernel, "stride" -> stride, "pad" -> pad, - "num_filter" -> numFilter, "no_bias" -> "true")) - val deconv = Symbol.Deconvolution(name = "deconv")()(Map( - "data" -> conv, "kernel" -> kernel, "stride" -> stride, "pad" -> pad, - "num_filter" -> numFilter, "no_bias" -> "true")) - - val argNames = deconv.listArguments() - val (argShapes, outShapes, _) = deconv.inferShape(Map("data" -> inputShape)) - val inputData = Random.uniform(-5, 5, inputShape) - val outGrad = inputData - val convWeight = Random.normal(0, 1, Shape(numFilter, inputShape(1), kernel._1, kernel._2)) - val args: Map[String, NDArray] = - Map("data" -> inputData, "conv_weight" -> convWeight, "deconv_weight" -> convWeight) - val argsGrad: Seq[NDArray] = argShapes.map(NDArray.empty(_)) - - val exe = deconv.bind(Context.cpu(), args = args, argsGrad = argsGrad) - exe.forward() - val out = exe.outputs.head - exe.backward(outGrad) - assert(reldiff(out, argsGrad.head) < 1e-6) - } - - test("deconvolution forward & backward") { - checkDeconvolutionForwardBackward( - inputShape = Shape(1, 1, 5, 5), - numFilter = 1, - kernel = (3, 3), - stride = (1, 1), - pad = (1, 1) - ) - checkDeconvolutionForwardBackward( - inputShape = Shape(32, 3, 28, 28), - numFilter = 3, - kernel = (3, 3), - stride = (1, 1), - pad = (1, 1) - ) - checkDeconvolutionForwardBackward( - inputShape = Shape(10, 3, 403, 403), - numFilter = 3, - kernel = (7, 7), - stride = (5, 5), - pad = (2, 2) - ) - } - - // configure A: input --> conv --> output. - // configure B: input --> deconv --> output - // the convolution and deconvoluiton has similar parameter which ensure - // the input shape is the same as output; - // During backward(), if the input of A equals output of B, and the output - // of A equals input of B, then the grad of weight should be the same; - private def checkDeconvolutionGradient(inputShape: Shape, - numFilter: Int, - pad: (Int, Int)): Unit = { - val stride = (1, 1) - val kernel = (2 * pad._1 + 1, 2 * pad._2 + 1) - val dataConv = Symbol.Variable(name = "data_conv") - val conv = Symbol.Convolution(name = "conv")()(Map( - "data" -> dataConv, "kernel" -> kernel, "stride" -> stride, "pad" -> pad, - "num_filter" -> numFilter, "no_bias" -> "true")) - val dataDeconv = Symbol.Variable(name = "data_deconv") - val deconv = Symbol.Deconvolution(name = "deconv")()(Map( - "data" -> dataDeconv, "kernel" -> kernel, "stride" -> stride, "pad" -> pad, - "num_filter" -> numFilter, "no_bias" -> "true")) - - val convData = Random.uniform(-5, 5, inputShape) - val convArgs = Map("data_conv" -> convData, - "conv_weight" -> Random.normal(0, 1, Shape(numFilter, inputShape(1), kernel._1, kernel._2))) - - val convArgsGrad = Seq(NDArray.zeros(convData.shape), - NDArray.zeros(Shape(numFilter, inputShape(1), kernel._1, kernel._2))) - val exeConv = conv.bind(Context.cpu(), args = convArgs, argsGrad = convArgsGrad) - val convOutGrad = Random.normal(0, 2, exeConv.outputs.head.shape) - exeConv.forward() - exeConv.backward(convOutGrad) - - val deconvData = convOutGrad - val deconvArgs = Map("data_deconv" -> deconvData, "deconv_weight" -> convArgs("conv_weight")) - val deconvArgsGrad = Seq(NDArray.zeros(deconvData.shape), - NDArray.zeros(Shape(numFilter, inputShape(1), kernel._1, kernel._2))) - val exeDeconv = deconv.bind(Context.cpu(), args = deconvArgs, argsGrad = deconvArgsGrad) - val deconvOutGrad = convData - exeDeconv.forward() - exeDeconv.backward(deconvOutGrad) - assert(reldiff(convArgsGrad(1), deconvArgsGrad(1)) < 1e-5) - } - - test("deconvolution gradient") { - checkDeconvolutionGradient( - inputShape = Shape(1, 3, 5, 5), - numFilter = 3, - pad = (1, 1) - ) - checkDeconvolutionGradient( - inputShape = Shape(5, 3, 100, 100), - numFilter = 3, - pad = (3, 3) - ) - } - - private def checkNearestUpSamplingWithShape(shapes: Seq[Shape], - scale: Int, - rootScale: Int): Unit = { - val arr = shapes.zipWithIndex.map { case (shape, i) => - (s"arg_$i", Random.uniform(-10, 10, shape)) - }.toMap - - val arrGrad = shapes.zipWithIndex.map { case (shape, i) => - (s"arg_$i", NDArray.zeros(shape)) - }.toMap - - val upArgs = (0 until shapes.size).map(i => Symbol.Variable(s"arg_$i")) - val up = Symbol.UpSampling()(upArgs: _*)(Map("sample_type" -> "nearest", "scale" -> rootScale)) - val exe = up.bind(Context.cpu(), args = arr, argsGrad = arrGrad) - exe.forward(isTrain = true) - exe.backward(exe.outputs) - for (k <- 0 until shapes.size) { - val name = s"arg_$k" - val expected = - arr(name).toArray.map(_ * Math.pow(rootScale, 2).toFloat * Math.pow(scale, 2 * k).toFloat) - val real = arrGrad(name).toArray - (expected zip real) foreach { case (e, r) => - assert(r === e +- 0.1f) - } - } - } - - test("nearest upsampling") { - for (rootScale <- 1 to 3) { - for (scale <- 1 to 3) { - for (numShape <- 1 to 3) { - for (base <- 1 to 3) { - val shapes = (0 until numShape).map(i => - Shape(1, 3, base * rootScale * Math.pow(scale, numShape - 1 - i).toInt, - base * rootScale * Math.pow(scale, numShape - 1 - i).toInt)) - checkNearestUpSamplingWithShape(shapes, scale, rootScale) - } - } - } - } - } - - test("batch norm") { - val data = Symbol.Variable("data") - val test = Symbol.BatchNorm(name = "bn")()(Map("data" -> data, "fix_gamma" -> "False")) - // TODO: check numeric gradient - } - - /** - * Compare forward call to expected value. - * @param sym output symbol - * @param location list of numpy arrays corresponding to sym.list_arguments - * @param expected list of arrays corresponding to sym.outputs - * @param checkEps relative error to check to - */ - private def checkSymbolicForward(sym: Symbol, - location: Seq[NDArray], - expected: Seq[NDArray], - checkEps: Float = 1e-5f): Unit = { - val arrData = location.map(_.copy()) - val arrGrad = location.map(array => NDArray.empty(array.shape)) - - val executor = sym.bind(Context.cpu(), args = arrData, argsGrad = arrGrad) - - val inps = executor.argArrays - assert(inps.size === location.size, - s"Executor argArrays and and location len do not match." + - s"Got ${inps.size} inputs and ${location.size} locations") - - for ((inp, source) <- location zip executor.argArrays) { - source.set(inp) - } - for (g <- executor.gradArrays) { - if (g != null) { - g.set(0f) - } - } - - assert(executor.outputs.length === 1) - - executor.forward() - - for ((expect, output) <- expected zip executor.outputs) { - assert(reldiff(expect, output) <= checkEps) - } - } - - /** - * Compare backwards call to expected value. - * @param sym output symbol - * @param location list of numpy arrays corresponding to sym.list_arguments - * @param grad list of numpy arrays corresponding to sym.outputs for incoming gradient - * @param expected list of arrays corresponding to sym.outputs - * @param checkEps relative error to check to - */ - private def checkSymbolicBackward(sym: Symbol, - location: Seq[NDArray], - grad: Seq[NDArray], - expected: Seq[NDArray], - checkEps: Float = 1e-5f): Unit = { - val arrData = location.map(_.copy()) - val arrGrad = location.map(array => NDArray.empty(array.shape)) - val outGrad = grad.map(_.copy()).toArray - - val executor = sym.bind(Context.cpu(), args = arrData, argsGrad = arrGrad) - - val inps = executor.argArrays - assert(inps.size === location.size, - s"Executor argArrays and and location len do not match." + - s"Got ${inps.size} inputs and ${location.size} locations") - for ((inp, source) <- location zip executor.argArrays) { - source.set(inp) - } - for (g <- executor.gradArrays) { - if (g != null) { - g.set(0f) - } - } - - executor.forward() - executor.backward(outGrad) - - for ((expect, grad) <- expected zip executor.gradArrays) { - assert(reldiff(expect, grad) <= checkEps) - } - } -} diff --git a/scala-package/core/src/test/scala/org/apache/mxnet/RandomSuite.scala b/scala-package/core/src/test/scala/org/apache/mxnet/RandomSuite.scala deleted file mode 100644 index 1f7b2a42a57b..000000000000 --- a/scala-package/core/src/test/scala/org/apache/mxnet/RandomSuite.scala +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet - -import org.scalatest.{BeforeAndAfterAll, FunSuite} - -class RandomSuite extends FunSuite with BeforeAndAfterAll { - test("uniform on cpu") { - Context.cpu().withScope { - val (a, b) = (-10, 10) - val shape = Shape(100, 100) - Random.seed(128) - val un1 = Random.uniform(a, b, shape) - Random.seed(128) - val un2 = Random.uniform(a, b, shape) - assert(un1 === un2) - assert(Math.abs(un1.toArray.sum / un1.size - (a + b) / 2f) < 0.1) - } - } - - test("normal on cpu") { - val (mu, sigma) = (10f, 2f) - val shape = Shape(100, 100) - Random.seed(128) - val ret1 = Random.normal(mu, sigma, shape) - Random.seed(128) - val ret2 = Random.normal(mu, sigma, shape) - assert(ret1 === ret2) - - val array = ret1.toArray - val mean = array.sum / ret1.size - val devs = array.map(score => (score - mean) * (score - mean)) - val stddev = Math.sqrt(devs.sum / ret1.size) - - assert(Math.abs(mean - mu) < 0.1) - assert(Math.abs(stddev - sigma) < 0.1) - } -} diff --git a/scala-package/core/src/test/scala/org/apache/mxnet/RecordIOSuite.scala b/scala-package/core/src/test/scala/org/apache/mxnet/RecordIOSuite.scala deleted file mode 100644 index ca85b56d8d74..000000000000 --- a/scala-package/core/src/test/scala/org/apache/mxnet/RecordIOSuite.scala +++ /dev/null @@ -1,86 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet - -import org.scalatest.{BeforeAndAfterAll, FunSuite} -import java.io._ - -class RecordIOSuite extends FunSuite with BeforeAndAfterAll { - test("test RecordIO") { - val fRec = File.createTempFile("tmpFile", ".tmp") - val N = 255 - - val writer = new MXRecordIO(fRec.getAbsolutePath, MXRecordIO.IOWrite) - for (i <- 0 until N) { - writer.write(s"$i") - } - writer.close() - - val reader = new MXRecordIO(fRec.getAbsolutePath, MXRecordIO.IORead) - for (i <- 0 until N) { - val res = reader.read() - assert(res === s"$i") - } - } - - test("test IndexedRecordIO") { - val fIdxRec = File.createTempFile("tmpIdxFile", ".tmp") - val fIdx = File.createTempFile("tmpIdx", ".tmp") - val N = 255 - - val writer = new MXIndexedRecordIO(fIdx.getAbsolutePath, - fIdxRec.getAbsolutePath, MXRecordIO.IOWrite) - for (i <- 0 until N) { - writer.writeIdx(i, s"$i") - } - writer.close() - - val reader = new MXIndexedRecordIO(fIdx.getAbsolutePath, - fIdxRec.getAbsolutePath, MXRecordIO.IORead) - var keys = reader.keys().map(_.asInstanceOf[Int]).toList.sorted - assert(keys.zip(0 until N).forall(x => x._1 == x._2)) - keys = scala.util.Random.shuffle(keys) - for (k <- keys) { - val res = reader.readIdx(k) - assert(res === s"$k") - } - } - - test("test RecordIOPackLabel") { - val fRec = File.createTempFile("tmpFile", ".tmp") - val N = 255 - - val charsDigits = - (0 until 26).map(x => ('A' + x).toChar.toString ).toArray ++ (0 to 9).map(_.toString) - - for (i <- 1 until N) { - for (j <- 0 until N) { - val content = { - val idx = scala.util.Random.shuffle(charsDigits.indices.toList).take(j) - idx.map(charsDigits(_)).mkString - } - val label = (0 until i).map(x => scala.util.Random.nextFloat()).toArray - val header = MXRecordIO.IRHeader(0, label, 0, 0) - val s = MXRecordIO.pack(header, content) - val (rHeader, rContent) = MXRecordIO.unpack(s) - assert(label === rHeader.label) - assert(content === rContent) - } - } - } -} diff --git a/scala-package/core/src/test/scala/org/apache/mxnet/ResourceScopeSuite.scala b/scala-package/core/src/test/scala/org/apache/mxnet/ResourceScopeSuite.scala deleted file mode 100644 index 19162385f0f7..000000000000 --- a/scala-package/core/src/test/scala/org/apache/mxnet/ResourceScopeSuite.scala +++ /dev/null @@ -1,184 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet - -import java.lang.ref.ReferenceQueue -import java.util.concurrent.ConcurrentHashMap - -import org.apache.mxnet.Base.CPtrAddress -import org.apache.mxnet.ResourceScope.logger -import org.mockito.Matchers.any -import org.scalatest.{BeforeAndAfterAll, FunSuite, Matchers} -import org.mockito.Mockito._ -import scala.collection.mutable.HashMap - -class ResourceScopeSuite extends FunSuite with BeforeAndAfterAll with Matchers { - - class TestNativeResource extends NativeResource { - /** - * native Address associated with this object - */ - override def nativeAddress: CPtrAddress = hashCode() - - /** - * Function Pointer to the NativeDeAllocator of nativeAddress - */ - override def nativeDeAllocator: CPtrAddress => Int = TestNativeResource.deAllocator - - /** Call NativeResource.register to get the reference - */ - override val ref: NativeResourceRef = super.register() - /** - * Off-Heap Bytes Allocated for this object - */ - override val bytesAllocated: Long = 0 - } - object TestNativeResource { - def deAllocator(handle: CPtrAddress): Int = 0 - } - - object TestPhantomRef { - def getRefQueue: ReferenceQueue[NativeResource] = { NativeResourceRef.refQ} - def getRefMap: ConcurrentHashMap[NativeResourceRef, CPtrAddress] - = {NativeResourceRef.refMap} - def getCleaner: Thread = { NativeResourceRef.cleaner } - - } - - class TestPhantomRef(resource: NativeResource, - resourceDeAllocator: CPtrAddress => Int) - extends NativeResourceRef(resource, resourceDeAllocator) { - } - - test(testName = "test NDArray Auto Release") { - var a: NDArray = null - var aRef: NativeResourceRef = null - var b: NDArray = null - - ResourceScope.using() { - b = ResourceScope.using() { - a = NDArray.ones(Shape(3, 4)) - aRef = a.ref - val x = NDArray.ones(Shape(3, 4)) - x - } - val bRef: NativeResourceRef = b.ref - assert(a.isDisposed == true, - "objects created within scope should have isDisposed set to true") - assert(b.isDisposed == false, - "returned NativeResource should not be released") - assert(TestPhantomRef.getRefMap.containsKey(aRef) == false, - "reference of resource in Scope should be removed refMap") - assert(TestPhantomRef.getRefMap.containsKey(bRef) == true, - "reference of resource outside scope should be not removed refMap") - } - assert(b.isDisposed, "resource returned from inner scope should be released in outer scope") - } - - test("test return object release from outer scope") { - var a: TestNativeResource = null - ResourceScope.using() { - a = ResourceScope.using() { - new TestNativeResource() - } - assert(a.isDisposed == false, "returned object should not be disposed within Using") - } - assert(a.isDisposed == true, "returned object should be disposed in the outer scope") - } - - /** - * Tests passing a scope to using and creating new resources within. - */ - test("test moving scope of native resource to scope of another") { - var a: TestNativeResource = null - var b: TestNativeResource = null - var c: TestNativeResource = null - var d: TestNativeResource = null - - ResourceScope.using() { - a = new TestNativeResource() - ResourceScope.using() { - b = new TestNativeResource() - ResourceScope.usingIfScopeExists(a.scope) { - c = new TestNativeResource() - ResourceScope.using() { - d = new TestNativeResource() - assert(c.scope == a.scope) - } - assert(d.isDisposed == true) - } - assert(b.isDisposed == false) - assert(c.isDisposed == false) - } - assert(a.isDisposed == false) - assert(b.isDisposed == true) - assert(c.isDisposed == false) - } - assert(a.isDisposed == true) - assert(b.isDisposed == true) - assert(c.isDisposed == true) - } - - test(testName = "test NativeResources in returned Lists are not disposed") { - var ndListRet: IndexedSeq[TestNativeResource] = null - ResourceScope.using() { - ndListRet = ResourceScope.using() { - val ndList: IndexedSeq[TestNativeResource] = - IndexedSeq(new TestNativeResource(), new TestNativeResource()) - ndList - } - ndListRet.foreach(nd => assert(nd.isDisposed == false, - "NativeResources within a returned collection should not be disposed")) - } - ndListRet.foreach(nd => assert(nd.isDisposed == true, - "NativeResources returned from inner scope should be disposed in outer scope")) - } - - test("test native resource inside a map") { - var nRInKeyOfMap: HashMap[TestNativeResource, String] = null - var nRInValOfMap: HashMap[String, TestNativeResource] = HashMap[String, TestNativeResource]() - - ResourceScope.using() { - nRInKeyOfMap = ResourceScope.using() { - val ret = HashMap[TestNativeResource, String]() - ret.put(new TestNativeResource, "hello") - ret - } - assert(!nRInKeyOfMap.isEmpty) - - nRInKeyOfMap.keysIterator.foreach(it => assert(it.isDisposed == false, - "NativeResources returned in Traversable should not be disposed")) - } - - nRInKeyOfMap.keysIterator.foreach(it => assert(it.isDisposed)) - - ResourceScope.using() { - - nRInValOfMap = ResourceScope.using() { - val ret = HashMap[String, TestNativeResource]() - ret.put("world!", new TestNativeResource) - ret - } - assert(!nRInValOfMap.isEmpty) - nRInValOfMap.valuesIterator.foreach(it => assert(it.isDisposed == false, - "NativeResources returned in Collection should not be disposed")) - } - nRInValOfMap.valuesIterator.foreach(it => assert(it.isDisposed)) - } - -} diff --git a/scala-package/core/src/test/scala/org/apache/mxnet/SerializerSuite.scala b/scala-package/core/src/test/scala/org/apache/mxnet/SerializerSuite.scala deleted file mode 100644 index 86e229b0b143..000000000000 --- a/scala-package/core/src/test/scala/org/apache/mxnet/SerializerSuite.scala +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet - -import org.apache.mxnet.optimizer.SGD -import org.scalatest.{Matchers, BeforeAndAfterAll, FunSuite} - -class SerializerSuite extends FunSuite with BeforeAndAfterAll with Matchers { - test("serialize and deserialize optimizer") { - val optimizer: Optimizer = new SGD(learningRate = 0.1f, momentum = 0.9f, wd = 0.0005f) - val optSerialized: String = Serializer.encodeBase64String( - Serializer.getSerializer.serialize(optimizer)) - assert(optSerialized.length > 0) - - val bytes = Serializer.decodeBase64String(optSerialized) - val optDeserialized = Serializer.getSerializer.deserialize[Optimizer](bytes) - - assert(optDeserialized.isInstanceOf[SGD]) - val sgd = optDeserialized.asInstanceOf[SGD] - - val learningRate = classOf[SGD].getDeclaredField("learningRate") - learningRate.setAccessible(true) - assert(learningRate.get(sgd).asInstanceOf[Float] === 0.1f +- 1e-6f) - - val momentum = classOf[SGD].getDeclaredField("momentum") - momentum.setAccessible(true) - assert(momentum.get(sgd).asInstanceOf[Float] === 0.9f +- 1e-6f) - - val wd = classOf[SGD].getDeclaredField("wd") - wd.setAccessible(true) - assert(wd.get(sgd).asInstanceOf[Float] === 0.0005f +- 1e-6f) - } -} diff --git a/scala-package/core/src/test/scala/org/apache/mxnet/ShapeSuite.scala b/scala-package/core/src/test/scala/org/apache/mxnet/ShapeSuite.scala deleted file mode 100644 index b186653e3db9..000000000000 --- a/scala-package/core/src/test/scala/org/apache/mxnet/ShapeSuite.scala +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet - -import org.scalatest.{BeforeAndAfterAll, FunSuite} - -class ShapeSuite extends FunSuite with BeforeAndAfterAll { - test("to string") { - val s = Shape(1, 2, 3) - assert(s.toString === "(1,2,3)") - } - - test("equals") { - assert(Shape(1, 2, 3) === Shape(1, 2, 3)) - assert(Shape(1, 2) != Shape(1, 2, 3)) - } - - test("drop") { - val s = Shape(1, 2, 3) - val s2 = s.drop(1) - assert(s == Shape(1, 2, 3)) - assert(s2 == Shape(2, 3)) - val s3 = s.drop(2) - assert(s3 == Shape(3)) - } - - test("slice") { - val s = Shape(1, 2, 3) - val s2 = s.slice(0, 1) - assert(s2 == Shape(1)) - } -} diff --git a/scala-package/core/src/test/scala/org/apache/mxnet/SparseNDArraySuite.scala b/scala-package/core/src/test/scala/org/apache/mxnet/SparseNDArraySuite.scala deleted file mode 100644 index f9968efd80c5..000000000000 --- a/scala-package/core/src/test/scala/org/apache/mxnet/SparseNDArraySuite.scala +++ /dev/null @@ -1,93 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet - -import org.apache.mxnet.io.NDArrayIter -import org.scalatest.FunSuite -import org.slf4j.LoggerFactory - -class SparseNDArraySuite extends FunSuite { - - private val logger = LoggerFactory.getLogger(classOf[SparseNDArraySuite]) - - test("create CSR NDArray") { - val data = Array(7f, 8f, 9f) - val indices = Array(0f, 2f, 1f) - val indptr = Array(0f, 2f, 2f, 3f) - val shape = Shape(3, 4) - val sparseND = SparseNDArray.csrMatrix(data, indices, indptr, shape, Context.cpu()) - assert(sparseND.shape == Shape(3, 4)) - assert(sparseND.toArray - sameElements Array(7.0f, 0.0f, 8.0f, 0.0f, - 0.0f, 0.0f, 0.0f, 0.0f, - 0.0f, 9.0f, 0.0f, 0.0f)) - assert(sparseND.sparseFormat == SparseFormat.CSR) - assert(sparseND.getIndptr.toArray sameElements indptr) - assert(sparseND.getIndices.toArray sameElements indices) - } - - test("create Row Sparse NDArray") { - val data = Array( - Array(1f, 2f), - Array(3f, 4f) - ) - val indices = Array(1f, 4f) - val shape = Shape(6, 2) - val sparseND = SparseNDArray.rowSparseArray(data, indices, shape, Context.cpu()) - assert(sparseND.sparseFormat == SparseFormat.ROW_SPARSE) - assert(sparseND.shape == Shape(6, 2)) - assert(sparseND.at(1).toArray sameElements Array(1f, 2f)) - assert(sparseND.getIndices.toArray sameElements indices) - } - - test("Test retain") { - val arr = Array( - Array(1f, 2f), - Array(3f, 4f), - Array(5f, 6f) - ) - val indices = Array(0f, 1f, 3f) - val rspIn = SparseNDArray.rowSparseArray(arr, indices, Shape(4, 2), Context.cpu()) - val toRetain = Array(0f, 3f) - val rspOut = SparseNDArray.retain(rspIn, toRetain) - assert(rspOut.getData.toArray sameElements Array(1f, 2f, 5f, 6f)) - assert(rspOut.getIndices.toArray sameElements Array(0f, 3f)) - } - - test("Test add") { - val nd = NDArray.array(Array(1f, 2f, 3f), Shape(3)).toSparse(Some(SparseFormat.ROW_SPARSE)) - val nd2 = nd + nd - assert(nd2.isInstanceOf[SparseNDArray]) - assert(nd2.toArray sameElements Array(2f, 4f, 6f)) - } - - test("Test DataIter") { - val nd = NDArray.array(Array(1f, 2f, 3f), Shape(1, 3)).toSparse(Some(SparseFormat.CSR)) - val arr = IndexedSeq(nd, nd, nd, nd) - val iter = new NDArrayIter(arr) - while (iter.hasNext) { - val tempArr = iter.next().data - tempArr.foreach(ele => { - assert(ele.sparseFormat == SparseFormat.CSR) - assert(ele.shape == Shape(1, 3)) - }) - } - } - - -} diff --git a/scala-package/core/src/test/scala/org/apache/mxnet/SymbolSuite.scala b/scala-package/core/src/test/scala/org/apache/mxnet/SymbolSuite.scala deleted file mode 100644 index 6a0f0ecba089..000000000000 --- a/scala-package/core/src/test/scala/org/apache/mxnet/SymbolSuite.scala +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet - -import org.scalatest.{BeforeAndAfterAll, FunSuite} - -class SymbolSuite extends FunSuite with BeforeAndAfterAll { - - test("symbol compose") { - val data = Symbol.Variable("data") - - var net1 = Symbol.FullyConnected(name = "fc1")()(Map("data" -> data, "num_hidden" -> 10)) - net1 = Symbol.FullyConnected(name = "fc2")()(Map("data" -> net1, "num_hidden" -> 100)) - assert(net1.listArguments().toArray === - Array("data", "fc1_weight", "fc1_bias", "fc2_weight", "fc2_bias")) - - var net2 = Symbol.FullyConnected(name = "fc3")()(Map("num_hidden" -> 10)) - net2 = Symbol.Activation()()(Map("data" -> net2, "act_type" -> "relu")) - net2 = Symbol.FullyConnected(name = "fc4")()(Map("data" -> net2, "num_hidden" -> 20)) - - val composed = net2(name = "composed", Map("fc3_data" -> net1)) - val multiOut = Symbol.Group(composed, net1) - assert(multiOut.listOutputs().length === 2) - } - - test("symbol internal") { - val data = Symbol.Variable("data") - val oldfc = Symbol.FullyConnected(name = "fc1")()(Map("data" -> data, "num_hidden" -> 10)) - val net1 = Symbol.FullyConnected(name = "fc2")()(Map("data" -> oldfc, "num_hidden" -> 100)) - assert(net1.listArguments().toArray - === Array("data", "fc1_weight", "fc1_bias", "fc2_weight", "fc2_bias")) - val internal = net1.getInternals() - val fc1 = internal.get("fc1_output") - assert(fc1.listArguments() === oldfc.listArguments()) - } - - test("symbol copy") { - val data = Symbol.Variable("data") - val data2 = data.clone() - assert(data.toJson === data2.toJson) - } - - test("Symbol random module is generated properly") { - val lam = Symbol.Variable("lam") - val rnd = Symbol.random.poisson(lam = Some(lam), shape = Some(Shape(2, 2))) - val rnd2 = Symbol.random.poisson(lam = Some(1f), shape = Some(Shape(2, 2))) - } - - test("Symbol random module is generated properly - special case of 'normal'") { - val loc = Symbol.Variable("loc") - val scale = Symbol.Variable("scale") - val rnd = Symbol.random.normal(mu = Some(loc), sigma = Some(scale), shape = Some(Shape(2, 2))) - val rnd2 = Symbol.random.normal(mu = Some(1f), sigma = Some(2f), shape = Some(Shape(2, 2))) - } -} diff --git a/scala-package/core/src/test/scala/org/apache/mxnet/TestUtil.scala b/scala-package/core/src/test/scala/org/apache/mxnet/TestUtil.scala deleted file mode 100644 index 4fc8ec9826c1..000000000000 --- a/scala-package/core/src/test/scala/org/apache/mxnet/TestUtil.scala +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet - -class TestUtil { - - /** - * Allow override of data path. Default is /data - * @return Data direcotry path ()may be relative) - */ - def getDataDirectory: String = { - var dataDir = System.getenv("MXNET_HOME") - if(dataDir == null) { - dataDir = "data" - } else { - if (dataDir.isEmpty) { - dataDir = "data" - } - } - dataDir - } - - /** - * Create data file path based upon getDataDirectory - * @param relFile - * @return file path - */ - def dataFile(relFile: String): String = { - getDataDirectory + "/" + relFile - } - -} diff --git a/scala-package/core/src/test/scala/org/apache/mxnet/gpu/RtcSuite.scala b/scala-package/core/src/test/scala/org/apache/mxnet/gpu/RtcSuite.scala deleted file mode 100644 index 90d187d6f631..000000000000 --- a/scala-package/core/src/test/scala/org/apache/mxnet/gpu/RtcSuite.scala +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet - -import org.scalatest.{Ignore, BeforeAndAfterAll, FunSuite} - -@Ignore -class RtcSuite extends FunSuite with BeforeAndAfterAll { - test("test kernel 1") { - val ctx = Context.gpu(0) - val x = NDArray.empty(ctx, 10) - x.set(1f) - val y = NDArray.empty(ctx, 10) - y.set(2f) - val rtc = new Rtc("abc", Array(("x", x)), Array(("y", y)), """ - __shared__ float s_rec[10]; - s_rec[threadIdx.x] = x[threadIdx.x]; - y[threadIdx.x] = expf(s_rec[threadIdx.x]*5.0);""") - - rtc.push(Array(x), Array(y), (1, 1, 1), (10, 1, 1)) - - val gt = x.toArray.map( x => Math.exp(x * 5.0).toFloat ) - - rtc.dispose() - assert(CheckUtils.reldiff(y.toArray, gt) < 1e-5f) - } - - test("test kernel 2") { - val ctx = Context.gpu(0) - val x = NDArray.empty(ctx, 33554430) - x.set(1f) - val y = NDArray.empty(ctx, 33554430) - y.set(2f) - val z = NDArray.empty(ctx, 33554430) - - val rtc = new Rtc("multiplyNumbers", Array(("x", x), ("y", y)), Array(("z", z)), """ - int tid = (blockIdx.y * 128 * 256) + blockIdx.x * 256 + threadIdx.x; - z[tid] = sqrt(x[tid] * y[tid] / 2.5);""") - - rtc.push(Array(x, y), Array(z), (128, 1024, 1), (256, 1, 1)) - - val xArr = x.toArray - val yArr = y.toArray - val gt = xArr.indices.map( i => Math.sqrt(xArr(i) * yArr(i) / 2.5f).toFloat ) - - rtc.dispose() - assert(CheckUtils.reldiff(z.toArray, gt.toArray) < 1e-7f) - } -} diff --git a/scala-package/core/src/test/scala/org/apache/mxnet/util/SerializerUtilsSuite.scala b/scala-package/core/src/test/scala/org/apache/mxnet/util/SerializerUtilsSuite.scala deleted file mode 100644 index c5cdc435a25e..000000000000 --- a/scala-package/core/src/test/scala/org/apache/mxnet/util/SerializerUtilsSuite.scala +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet.util - -import org.apache.mxnet.NDArray -import org.scalatest.{BeforeAndAfterAll, FunSuite} - -class SerializerUtilsSuite extends FunSuite with BeforeAndAfterAll { - test("serialize & deserialize NDArrays") { - val a = NDArray.zeros(2, 3) - val b = NDArray.ones(3, 1) - val bytes = SerializerUtils.serializeNDArrays(a, b) - val ndArrays = SerializerUtils.deserializeNDArrays(bytes) - assert(ndArrays.size === 2) - assert(ndArrays(0) === a) - assert(ndArrays(1) === b) - } -} diff --git a/scala-package/core/src/test/scala/org/apache/mxnet/util/WarnIfNotDiposedSuite.scala b/scala-package/core/src/test/scala/org/apache/mxnet/util/WarnIfNotDiposedSuite.scala deleted file mode 100644 index 835bd2984832..000000000000 --- a/scala-package/core/src/test/scala/org/apache/mxnet/util/WarnIfNotDiposedSuite.scala +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet - -import org.scalatest.{BeforeAndAfterAll, FunSuite} - -// scalastyle:off finalize -class Leakable(enableTracing: Boolean = false, markDisposed: Boolean = false) - extends WarnIfNotDisposed { - def isDisposed: Boolean = markDisposed - override protected def tracingEnabled = enableTracing - - var warningWasLogged: Boolean = false - def getCreationTrace: Option[Array[StackTraceElement]] = creationTrace - - override def finalize(): Unit = super.finalize() - override protected def logDisposeWarning() = { - warningWasLogged = true - } -} -// scalastyle:on finalize - -class WarnIfNotDisposedSuite extends FunSuite with BeforeAndAfterAll { - test("trace collected if tracing enabled") { - val leakable = new Leakable(enableTracing = true) - - val trace = leakable.getCreationTrace - assert(trace.isDefined) - assert(trace.get.exists(el => el.getClassName() == getClass().getName())) - } - - test("trace not collected if tracing disabled") { - val leakable = new Leakable(enableTracing = false) - assert(!leakable.getCreationTrace.isDefined) - } - - test("no warning logged if object disposed") { - val notLeaked = new Leakable(markDisposed = true) - notLeaked.finalize() - assert(!notLeaked.warningWasLogged) - } - - test("warning logged if object not disposed") { - val leaked = new Leakable(markDisposed = false) - leaked.finalize() - assert(leaked.warningWasLogged) - } -} diff --git a/scala-package/deploy/pom.xml b/scala-package/deploy/pom.xml deleted file mode 100644 index 74338f2173da..000000000000 --- a/scala-package/deploy/pom.xml +++ /dev/null @@ -1,124 +0,0 @@ - - - - 4.0.0 - - org.apache.mxnet - mxnet-parent - INTERNAL - ../pom.xml - - - mxnet-deployment - ${revision} - MXNet Scala Package - Full ${platform}-only - pom - - Scala Package for Apache MXNet (Incubating) - flexible and efficient library for deep learning. - - - - ${project.parent.basedir}/.. - mxnet-full_2.11-${platform}-${flavor} - ${base.revision}-SNAPSHOT - apache.snapshots.https - - - - - - - - org.apache.mxnet - mxnet-full_2.11 - INTERNAL - - - - - - staging - - ${base.revision} - apache.releases.https - https://repository.apache.org/service/local/staging/deploy/maven2 - jar.asc,asc,asc,pom.asc - ,sources.jar,javadoc.jar, - - ../assembly/target/mxnet-full_2.11-INTERNAL.jar.asc,../assembly/target/mxnet-full_2.11-INTERNAL-src.jar.asc,../assembly/target/mxnet-full_2.11-INTERNAL-bundle.jar.asc,../externalPom/target/deploy.xml.asc - - - - - nightly - - apache.snapshots.https - https://repository.apache.org/content/repositories/snapshots - - - - - - - - org.apache.maven.plugins - maven-assembly-plugin - - false - - - - - org.apache.maven.plugins - maven-deploy-plugin - - - deploy-file - deploy - - deploy-file - - - ${project.description} - false - ${repositoryId} - ${repo_url} - ${project.groupId} - ${ARTIFACT_ID} - ${project.version} - jar - ${rootdir}/externalPom/target/deploy.xml - ${rootdir}/assembly/target/mxnet-full_2.11-INTERNAL.jar - ${rootdir}/assembly/target/mxnet-full_2.11-INTERNAL-src.jar - ${rootdir}/assembly/target/mxnet-full_2.11-INTERNAL-bundle.jar - ${deploy_asc_types} - ${deploy_asc_classifers} - ${deploy_asc_files} - - - - - true - - - - - - diff --git a/scala-package/dev/change-artifact-id.sh b/scala-package/dev/change-artifact-id.sh deleted file mode 100755 index 4b7305fd71cd..000000000000 --- a/scala-package/dev/change-artifact-id.sh +++ /dev/null @@ -1,51 +0,0 @@ -#!/bin/bash - -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# (Yizhi) This is mainly inspired by the script in apache/spark. -# I did some modificaiton to get it with our project. -# - -set -e - -if [[ ($# -ne 2) || ( $1 == "--help") || $1 == "-h" ]]; then - echo "Usage: $(basename $0) [-h|--help] " 1>&2 - exit 1 -fi - -FROM_ARTIFACT_ID=$1 -TO_ARTIFACT_ID=$2 - -sed_i() { - perl -p -000 -e "$1" "$2" > "$2.tmp" && mv "$2.tmp" "$2" -} - -export -f sed_i - -BASEDIR=$(dirname $0)/.. - -find "$BASEDIR" -name 'pom.xml' -not -path '*target*' -print \ - -exec bash -c \ - "sed_i 's/('$FROM_ARTIFACT_ID'(<\/artifactId>)/\1>'$TO_ARTIFACT_ID'\2/g' {}" \; - -# Change assembly including settings -# -# org.apache.mxnet:libmxnet-scala-linux-x86_64-cpu:so -# -find "$BASEDIR" -name 'assembly.xml' -not -path '*target*' -print \ - -exec bash -c \ - "sed_i 's/(.*mxnet):'$FROM_ARTIFACT_ID'(:.*<\/include>)/\1:'$TO_ARTIFACT_ID'\2/g' {}" \; diff --git a/scala-package/dev/change-proj-version.sh b/scala-package/dev/change-proj-version.sh deleted file mode 100755 index 953e7b758288..000000000000 --- a/scala-package/dev/change-proj-version.sh +++ /dev/null @@ -1,42 +0,0 @@ -#!/bin/bash - -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# (Yizhi) This is mainly inspired by the script in apache/spark. -# I did some modificaiton to get it with our project. -# - -set -e - -if [[ ($# -ne 2) || ( $1 == "--help") || $1 == "-h" ]]; then - echo "Usage: $(basename $0) [-h|--help] " 1>&2 - exit 1 -fi - -FROM_VERSION=$1 -TO_VERSION=$2 - -sed_i() { - perl -p -000 -e "$1" "$2" > "$2.tmp" && mv "$2.tmp" "$2" -} - -export -f sed_i - -BASEDIR=$(dirname $0)/.. -find "$BASEDIR" -name 'pom.xml' -not -path '*target*' -print \ - -exec bash -c \ - "sed_i 's/(.*mxnet-.*<\/artifactId>\s+'$FROM_VERSION'(<\/version>)/\1>'$TO_VERSION'\2/g' {}" \; diff --git a/scala-package/dev/change-scala-version.sh b/scala-package/dev/change-scala-version.sh deleted file mode 100755 index 34c314f52289..000000000000 --- a/scala-package/dev/change-scala-version.sh +++ /dev/null @@ -1,49 +0,0 @@ -#!/bin/bash - -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# (Yizhi) This is mainly inspired by the script in apache/spark. -# I did some modificaiton to get it with our project. -# - -set -e - -if [[ ($# -ne 2) || ( $1 == "--help") || $1 == "-h" ]]; then - echo "Usage: $(basename $0) [-h|--help] " 1>&2 - exit 1 -fi - -FROM_VERSION=$1 -TO_VERSION=$2 - -sed_i() { - sed -e "$1" "$2" > "$2.tmp" && mv "$2.tmp" "$2" -} - -export -f sed_i - -BASEDIR=$(dirname $0)/.. -find "$BASEDIR" -name 'pom.xml' -not -path '*target*' -print \ - -exec bash -c "sed_i 's/\(artifactId.*\)_'$FROM_VERSION'/\1_'$TO_VERSION'/g' {}" \; - -find "$BASEDIR/.." -name 'Makefile' -not -path '*target*' -print \ - -exec bash -c "sed_i 's/'$FROM_VERSION'/'$TO_VERSION'/g' {}" \; - -# Also update in parent POM -# Match any scala binary version to ensure idempotency -sed_i '1,/[0-9]*\.[0-9]*[0-9]*\.[0-9]*'$TO_VERSION' " 1>&2 - exit 1 -fi -PLATFORM=$1 -MXNETDIR=$2 - - -# below routine shamelessly copied from -# https://github.com/apache/incubator-mxnet/blob/master/setup-utils/install-mxnet-osx-python.sh -# This routine executes a command, -# prints error message on the console on non-zero exit codes and -# returns the exit code to the caller. -chkret() { - cmd=$* - echo "$cmd" - $cmd - ret=$? - if [[ ${ret} != 0 ]]; then - echo " " - echo "ERROR: Return value non-zero for: $cmd" - echo " " - exit 1 - fi -} # chkret() - -UNAME=`uname -s` -chkret pushd $MXNETDIR - -set +e -git submodule update --init --recursive -set -e - -# don't want to overwrite an existing config file -cp make/config.mk ./config.mk - -if [[ $PLATFORM == "osx-x86_64-cpu" ]]; -then - echo "Building MXNet Backend on MAC OS" - echo "ADD_CFLAGS += -I/usr/local/opt/opencv/include" >> ./config.mk - echo "ADD_CFLAGS += -I/usr/local/opt/openblas/include" >> ./config.mk - echo "ADD_LDFLAGS += -L/usr/local/opt/opencv/lib" >> ./config.mk - echo "ADD_LDFLAGS += -L/usr/local/opt/openblas/lib" >> ./config.mk - echo "USE_OPENMP = 0" >> ./config.mk - echo "USE_LAPACK_PATH = /usr/local/opt/lapack/lib" >> ./config.mk - make -j$(sysctl -n hw.ncpu) -elif [[ $PLATFORM == "linux-x86_64-cpu" ]]; -then - echo "Building MXNet Backend on Linux CPU" - echo "ADD_CFLAGS += -I/usr/local/include/opencv" >> ./config.mk - echo "ADD_LDFLAGS += -L/usr/local/lib" >> ./config.mk - echo "USE_OPENCV=1" >> ./config.mk - echo "USE_OPENMP=1" >> ./config.mk - echo "USE_BLAS=openblas" >> ./config.mk - echo "USE_LAPACK=1" >> ./config.mk - echo "USE_DIST_KVSTORE=1" >> ./config.mk - echo "USE_S3=1" >> ./config.mk - make -j$(cat /proc/cpuinfo | awk '/^processor/{print $3}' | tail -1) -elif [[ $PLATFORM == "linux-x86_64-gpu" ]] -then - echo "Building MXNet Backend on Linux GPU" - echo "ADD_CFLAGS += -I/usr/local/include/opencv" >> ./config.mk - echo "ADD_LDFLAGS += -L/usr/local/lib" >> ./config.mk - echo "USE_OPENCV=1" >> ./config.mk - echo "USE_OPENMP=1" >> ./config.mk - echo "USE_BLAS=openblas" >> ./config.mk - echo "USE_LAPACK=1" >> ./config.mk - echo "USE_DIST_KVSTORE=1" >> ./config.mk - echo "USE_S3=1" >> ./config.mk - echo "USE_CUDA=1" >> ./config.mk - echo "USE_CUDNN=1" >> ./config.mk - echo "ADD_CFLAGS += -I/usr/local/cuda/include" >> ./config.mk - echo "ADD_LDFLAGS += -L/usr/local/cuda/lib64/ " >> ./config.mk - #update th nccl version approriately - echo "ADD_LDFLAGS += -L/lib/nccl/cuda-9.0/lib " >> ./config.mk - eval "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/lib/nccl/cuda-9.0/lib" - eval "export LD_LIBRARY_PATH=/usr/local/cuda/lib64:$LD_LIBRARY_PATH" - make -j$(cat /proc/cpuinfo | awk '/^processor/{print $3}' | tail -1) - echo "Building MXNet Backend on Linux GPU" -else - echo "MY ALIEN OVERLOADS HAVE NOT TOLD WHAT TO DO FOR INVALID INPUT !!!" - echo "Currently supported platforms: osx-x86_64-cpu or linux-x86_64-cpu or linux-x86_64-gpu" -fi -chkret popd -echo "done building MXNet Backend" -exit 0 diff --git a/scala-package/examples/pom.xml b/scala-package/examples/pom.xml deleted file mode 100644 index 257529199176..000000000000 --- a/scala-package/examples/pom.xml +++ /dev/null @@ -1,150 +0,0 @@ - - - - 4.0.0 - - org.apache.mxnet - mxnet-parent - INTERNAL - ../pom.xml - - - mxnet-examples - MXNet Scala Package - Examples - - - true - ${skipTests} - - - - - - maven-resources-plugin - - - copy-resources - validate - - copy-resources - - - ${project.build.outputDirectory} - - - src/main/resources - true - - - - - - - - org.apache.maven.plugins - maven-dependency-plugin - - - copy-dependencies - package - - copy-dependencies - - - ${project.build.outputDirectory}/lib - runtime - test,provided - false - false - true - - - - - - org.apache.maven.plugins - maven-jar-plugin - - - net.alchim31.maven - scala-maven-plugin - - - org.scalatest - scalatest-maven-plugin - - ${skipTests} - - -Djava.library.path=${project.parent.basedir}/native/target \ - -Dlog4j.configuration=file://${project.basedir}/src/test/resources/log4j.properties - - - - - org.scalastyle - scalastyle-maven-plugin - - - - - - org.apache.mxnet - mxnet-core - INTERNAL - provided - - - org.apache.mxnet - mxnet-infer - INTERNAL - provided - - - com.sksamuel.scrimage - scrimage-core_2.11 - 2.1.8 - - - com.sksamuel.scrimage - scrimage-io-extra_2.11 - 2.1.8 - - - com.sksamuel.scrimage - scrimage-filters_2.11 - 2.1.8 - - - nu.pattern - opencv - 2.4.9-7 - - - org.slf4j - slf4j-simple - 1.7.5 - - - com.google.code.gson - gson - 2.8.5 - - - diff --git a/scala-package/examples/scripts/benchmark/run_image_inference_bm.sh b/scala-package/examples/scripts/benchmark/run_image_inference_bm.sh deleted file mode 100755 index 7bbf86ea15fc..000000000000 --- a/scala-package/examples/scripts/benchmark/run_image_inference_bm.sh +++ /dev/null @@ -1,46 +0,0 @@ -#!/bin/bash - -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -set -e - -MXNET_ROOT=$(cd "$(dirname $0)/../../../.."; pwd) -CLASS_PATH=$MXNET_ROOT/scala-package/assembly/target/*:$MXNET_ROOT/scala-package/examples/target/*:$MXNET_ROOT/scala-package/examples/target/classes/lib/*:$MXNET_ROOT/scala-package/infer/target/* - -MODEL_NAME=$2 - -RUNS=$3 - -BATCHSIZE=$4 - -# model dir -MODEL_PATH_PREFIX=$5 -# input image -INPUT_IMG=$6 -# which input image dir -INPUT_DIR=$7 - -java -Xmx8G -Dmxnet.traceLeakedObjects=true -cp $CLASS_PATH \ - org.apache.mxnetexamples.benchmark.ScalaInferenceBenchmark \ - --example $MODEL_NAME \ - --count $RUNS \ - --batchSize $BATCHSIZE \ - --model-path-prefix $MODEL_PATH_PREFIX \ - --input-image $INPUT_IMG \ - --input-dir $INPUT_DIR \ - diff --git a/scala-package/examples/scripts/benchmark/run_java_inference_bm.sh b/scala-package/examples/scripts/benchmark/run_java_inference_bm.sh deleted file mode 100644 index c62a7438df85..000000000000 --- a/scala-package/examples/scripts/benchmark/run_java_inference_bm.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/bash - -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -set -e - -MXNET_ROOT=$(cd "$(dirname $0)/../../../.."; pwd) -CLASS_PATH=$MXNET_ROOT/scala-package/assembly/target/*:$MXNET_ROOT/scala-package/examples/target/*:$MXNET_ROOT/scala-package/examples/target/classes/lib/* - -java -Xmx8G -Dmxnet.traceLeakedObjects=true -cp $CLASS_PATH \ - org.apache.mxnetexamples.javaapi.benchmark.JavaBenchmark $@ - diff --git a/scala-package/examples/scripts/benchmark/run_text_charrnn_bm.sh b/scala-package/examples/scripts/benchmark/run_text_charrnn_bm.sh deleted file mode 100755 index 6b7c10758db9..000000000000 --- a/scala-package/examples/scripts/benchmark/run_text_charrnn_bm.sh +++ /dev/null @@ -1,44 +0,0 @@ -#!/bin/bash - -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -set -e - -MXNET_ROOT=$(cd "$(dirname $0)/../../../.."; pwd) -CLASS_PATH=$MXNET_ROOT/scala-package/assembly/target/*:$MXNET_ROOT/scala-package/examples/target/*:$MXNET_ROOT/scala-package/examples/target/classes/lib/*:$MXNET_ROOT/scala-package/infer/target/* - -MODEL_NAME=$2 - -RUNS=$3 - -# model dir -MODEL_PATH_PREFIX=$4 -# input image -DATA_PATH=$5 - -# feel free to change the starter sentence -STARTER_SENTENCE="The joke" - -java -Xmx8G -Dmxnet.traceLeakedObjects=false -cp $CLASS_PATH \ - org.apache.mxnetexamples.benchmark.ScalaInferenceBenchmark \ - --example $MODEL_NAME \ - --count $RUNS \ - --model-prefix $MODEL_PATH_PREFIX \ - --data-path $DATA_PATH \ - --starter-sentence "$STARTER_SENTENCE" - diff --git a/scala-package/examples/scripts/customop/run_customop.sh b/scala-package/examples/scripts/customop/run_customop.sh deleted file mode 100644 index 1b5caf5e2aba..000000000000 --- a/scala-package/examples/scripts/customop/run_customop.sh +++ /dev/null @@ -1,34 +0,0 @@ -#!/bin/bash - -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - - -MXNET_ROOT=$(cd "$(dirname $0)/../../../.."; pwd) -CLASS_PATH=$MXNET_ROOT/scala-package/assembly/target/*:$MXNET_ROOT/scala-package/examples/target/*:$MXNET_ROOT/scala-package/examples/target/classes/lib/* - -# which gpu card to use, -1 means cpu -GPU=$1 - -# the mnist data path -# you can get the mnist data using the script core/scripts/get_mnist_data.sh -DATA_PATH=$2 - -java -Xmx4G -cp $CLASS_PATH \ - org.apache.mxnetexamples.customop.ExampleCustomOp \ - --data-path $DATA_PATH \ - --gpu $GPU diff --git a/scala-package/examples/scripts/customop/run_customopwithrtc.sh b/scala-package/examples/scripts/customop/run_customopwithrtc.sh deleted file mode 100644 index 432eff5622d2..000000000000 --- a/scala-package/examples/scripts/customop/run_customopwithrtc.sh +++ /dev/null @@ -1,34 +0,0 @@ -#!/bin/bash - -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - - -MXNET_ROOT=$(cd "$(dirname $0)/../../../.."; pwd) -CLASS_PATH=$MXNET_ROOT/scala-package/assembly/target/*:$MXNET_ROOT/scala-package/examples/target/*:$MXNET_ROOT/scala-package/examples/target/classes/lib/* - -# which gpu card to use -GPU=0 - -# the mnist data path -# you can get the mnist data using the script core/scripts/get_mnist_data.sh -DATA_PATH=$1 - -java -Xmx4G -cp $CLASS_PATH \ - org.apache.mxnetexamples.customop.ExampleCustomOpWithRtc \ - --data-path $DATA_PATH \ - --gpu $GPU diff --git a/scala-package/examples/scripts/infer/bert/get_bert_data.sh b/scala-package/examples/scripts/infer/bert/get_bert_data.sh deleted file mode 100755 index 609aae27cc66..000000000000 --- a/scala-package/examples/scripts/infer/bert/get_bert_data.sh +++ /dev/null @@ -1,31 +0,0 @@ -#!/bin/bash - -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -set -e - -MXNET_ROOT=$(cd "$(dirname $0)/../../.."; pwd) - -data_path=$MXNET_ROOT/scripts/infer/models/static-bert-qa/ - -if [ ! -d "$data_path" ]; then - mkdir -p "$data_path" - curl https://s3.us-east-2.amazonaws.com/mxnet-scala/scala-example-ci/BertQA/vocab.json -o $data_path/vocab.json - curl https://s3.us-east-2.amazonaws.com/mxnet-scala/scala-example-ci/BertQA/static_bert_qa-0002.params -o $data_path/static_bert_qa-0002.params - curl https://s3.us-east-2.amazonaws.com/mxnet-scala/scala-example-ci/BertQA/static_bert_qa-symbol.json -o $data_path/static_bert_qa-symbol.json -fi diff --git a/scala-package/examples/scripts/infer/bert/run_bert_qa_example.sh b/scala-package/examples/scripts/infer/bert/run_bert_qa_example.sh deleted file mode 100755 index d8ba092c5c1b..000000000000 --- a/scala-package/examples/scripts/infer/bert/run_bert_qa_example.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/bash - -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -set -e - -MXNET_ROOT=$(cd "$(dirname $0)/../../../../.."; pwd) - -CLASS_PATH=$MXNET_ROOT/scala-package/assembly/target/*:$MXNET_ROOT/scala-package/examples/target/*:$MXNET_ROOT/scala-package/examples/target/classes/lib/* - -java -Xmx8G -Dmxnet.traceLeakedObjects=true -cp $CLASS_PATH \ - org.apache.mxnetexamples.javaapi.infer.bert.BertQA $@ diff --git a/scala-package/examples/scripts/infer/imageclassifier/get_resnet_18_data.sh b/scala-package/examples/scripts/infer/imageclassifier/get_resnet_18_data.sh deleted file mode 100755 index b12993210ed2..000000000000 --- a/scala-package/examples/scripts/infer/imageclassifier/get_resnet_18_data.sh +++ /dev/null @@ -1,41 +0,0 @@ -#!/bin/bash - -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -set -e - -MXNET_ROOT=$(cd "$(dirname $0)/../../.."; pwd) - -data_path=$MXNET_ROOT/scripts/infer/models/resnet-18/ - -image_path=$MXNET_ROOT/scripts/infer/images/ - -if [ ! -d "$data_path" ]; then - mkdir -p "$data_path" -fi - -if [ ! -d "$image_path" ]; then - mkdir -p "$image_path" -fi - -if [ ! -f "$data_path" ]; then - curl https://s3.us-east-2.amazonaws.com/scala-infer-models/resnet-18/resnet-18-symbol.json -o $data_path/resnet-18-symbol.json - curl https://s3.us-east-2.amazonaws.com/scala-infer-models/resnet-18/resnet-18-0000.params -o $data_path/resnet-18-0000.params - curl https://s3.us-east-2.amazonaws.com/scala-infer-models/resnet-18/synset.txt -o $data_path/synset.txt - curl https://s3.us-east-2.amazonaws.com/mxnet-scala/scala-example-ci/resnet152/kitten.jpg -o $image_path/kitten.jpg -fi diff --git a/scala-package/examples/scripts/infer/imageclassifier/get_resnet_data.sh b/scala-package/examples/scripts/infer/imageclassifier/get_resnet_data.sh deleted file mode 100755 index dc37bdbd73ef..000000000000 --- a/scala-package/examples/scripts/infer/imageclassifier/get_resnet_data.sh +++ /dev/null @@ -1,41 +0,0 @@ -#!/bin/bash - -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -set -e - -MXNET_ROOT=$(cd "$(dirname $0)/../../.."; pwd) - -data_path=$MXNET_ROOT/scripts/infer/models/resnet-152/ - -image_path=$MXNET_ROOT/scripts/infer/images/ - -if [ ! -d "$data_path" ]; then - mkdir -p "$data_path" -fi - -if [ ! -d "$image_path" ]; then - mkdir -p "$image_path" -fi - -if [ ! -f "$data_path" ]; then - curl https://s3.us-east-2.amazonaws.com/mxnet-scala/scala-example-ci/resnet152/resnet-152-0000.params -o $data_path/resnet-152-0000.params - curl https://s3.us-east-2.amazonaws.com/mxnet-scala/scala-example-ci/resnet152/resnet-152-symbol.json -o $data_path/resnet-152-symbol.json - curl https://s3.us-east-2.amazonaws.com/mxnet-scala/scala-example-ci/resnet152/synset.txt -o $data_path/synset.txt - curl https://s3.us-east-2.amazonaws.com/mxnet-scala/scala-example-ci/resnet152/kitten.jpg -o $image_path/kitten.jpg -fi diff --git a/scala-package/examples/scripts/infer/imageclassifier/run_classifier_example.sh b/scala-package/examples/scripts/infer/imageclassifier/run_classifier_example.sh deleted file mode 100755 index aa021478b27e..000000000000 --- a/scala-package/examples/scripts/infer/imageclassifier/run_classifier_example.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/bash - -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -set -e - -MXNET_ROOT=$(cd "$(dirname $0)/../../../../.."; pwd) -CLASS_PATH=$MXNET_ROOT/scala-package/assembly/target/*:$MXNET_ROOT/scala-package/examples/target/*:$MXNET_ROOT/scala-package/examples/target/classes/lib/*:$MXNET_ROOT/scala-package/infer/target/* - -# model dir -MODEL_PATH_PREFIX=$1 -# input image -INPUT_IMG=$2 -# which input image dir -INPUT_DIR=$3 - -java -Xmx8G -Dmxnet.traceLeakedObjects=true -cp $CLASS_PATH \ - org.apache.mxnetexamples.infer.imageclassifier.ImageClassifierExample \ - --model-path-prefix $MODEL_PATH_PREFIX \ - --input-image $INPUT_IMG \ - --input-dir $INPUT_DIR diff --git a/scala-package/examples/scripts/infer/objectdetector/get_ssd_data.sh b/scala-package/examples/scripts/infer/objectdetector/get_ssd_data.sh deleted file mode 100755 index e901fc80792f..000000000000 --- a/scala-package/examples/scripts/infer/objectdetector/get_ssd_data.sh +++ /dev/null @@ -1,45 +0,0 @@ -#!/bin/bash - -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - - -set -e - -MXNET_ROOT=$(cd "$(dirname $0)/../../../.."; pwd) - -data_path=$MXNET_ROOT/examples/scripts/infer/models/resnet50_ssd - -image_path=$MXNET_ROOT/examples/scripts/infer/images - -if [ ! -d "$data_path" ]; then - mkdir -p "$data_path" -fi - -if [ ! -d "$image_path" ]; then - mkdir -p "$image_path" -fi - -if [ ! -f "$data_path" ]; then - curl https://s3.amazonaws.com/model-server/models/resnet50_ssd/resnet50_ssd_model-symbol.json -o $data_path/resnet50_ssd_model-symbol.json - curl https://s3.amazonaws.com/model-server/models/resnet50_ssd/resnet50_ssd_model-0000.params -o $data_path/resnet50_ssd_model-0000.params - curl https://s3.amazonaws.com/model-server/models/resnet50_ssd/synset.txt -o $data_path/synset.txt - curl https://cloud.githubusercontent.com/assets/3307514/20012566/cbb53c76-a27d-11e6-9aaa-91939c9a1cd5.jpg -o $image_path/000001.jpg - curl https://cloud.githubusercontent.com/assets/3307514/20012567/cbb60336-a27d-11e6-93ff-cbc3f09f5c9e.jpg -o $image_path/dog.jpg - curl https://cloud.githubusercontent.com/assets/3307514/20012563/cbb41382-a27d-11e6-92a9-18dab4fd1ad3.jpg -o $image_path/person.jpg -fi - diff --git a/scala-package/examples/scripts/infer/objectdetector/run_ssd_example.sh b/scala-package/examples/scripts/infer/objectdetector/run_ssd_example.sh deleted file mode 100755 index 9f290416bb32..000000000000 --- a/scala-package/examples/scripts/infer/objectdetector/run_ssd_example.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/bash - -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -set -x - -MXNET_ROOT=$(cd "$(dirname $0)/../../../../../"; pwd) -CLASS_PATH=$MXNET_ROOT/scala-package/assembly/target/*:$MXNET_ROOT/scala-package/examples/target/*:$MXNET_ROOT/scala-package/examples/target/classes/lib/* - -# model dir and prefix -MODEL_DIR=$1 -# input image -INPUT_IMG=$2 -# which input image dir -INPUT_DIR=$3 - -java -Xmx8G -cp $CLASS_PATH \ - org.apache.mxnetexamples.infer.objectdetector.SSDClassifierExample \ - --model-path-prefix $MODEL_DIR \ - --input-image $INPUT_IMG \ - --input-dir $INPUT_DIR diff --git a/scala-package/examples/scripts/infer/objectdetector/run_ssd_java_example.sh b/scala-package/examples/scripts/infer/objectdetector/run_ssd_java_example.sh deleted file mode 100755 index 07328b50dce6..000000000000 --- a/scala-package/examples/scripts/infer/objectdetector/run_ssd_java_example.sh +++ /dev/null @@ -1,34 +0,0 @@ -#!/bin/bash - -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -MXNET_ROOT=$(cd "$(dirname $0)/../../../../../"; pwd) -CLASS_PATH=$MXNET_ROOT/scala-package/assembly/target/*:$MXNET_ROOT/scala-package/examples/target/*:$MXNET_ROOT/scala-package/examples/target/classes/lib/*:$MXNET_ROOT/scala-package/infer/target/*:$MXNET_ROOT/scala-package/examples/src/main/scala/org/apache/mxnetexamples/api/java/infer/imageclassifier/* - -# model dir and prefix -MODEL_DIR=$1 -# input image -INPUT_IMG=$2 -# which input image dir -INPUT_DIR=$3 - -java -Xmx8G -cp $CLASS_PATH \ - org.apache.mxnetexamples.javaapi.infer.objectdetector.SSDClassifierExample \ - --model-path-prefix $MODEL_DIR \ - --input-image $INPUT_IMG \ - --input-dir $INPUT_DIR diff --git a/scala-package/examples/scripts/infer/predictor/run_predictor_java_example.sh b/scala-package/examples/scripts/infer/predictor/run_predictor_java_example.sh deleted file mode 100755 index 8e8752ee87f4..000000000000 --- a/scala-package/examples/scripts/infer/predictor/run_predictor_java_example.sh +++ /dev/null @@ -1,31 +0,0 @@ -#!/bin/bash - -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -MXNET_ROOT=$(cd "$(dirname $0)/../../../../../"; pwd) -CLASS_PATH=$MXNET_ROOT/scala-package/assembly/target/*:$MXNET_ROOT/scala-package/examples/target/*:$MXNET_ROOT/scala-package/examples/target/classes/lib/*:$MXNET_ROOT/scala-package/infer/target/* - -# model dir and prefix -MODEL_DIR=$1 -# input image -INPUT_IMG=$2 - -java -Xmx8G -cp $CLASS_PATH \ - org.apache.mxnetexamples.javaapi.infer.predictor.PredictorExample \ - --model-path-prefix $MODEL_DIR \ - --input-image $INPUT_IMG diff --git a/scala-package/examples/scripts/module/mnist_mlp.sh b/scala-package/examples/scripts/module/mnist_mlp.sh deleted file mode 100755 index 907552a45b46..000000000000 --- a/scala-package/examples/scripts/module/mnist_mlp.sh +++ /dev/null @@ -1,28 +0,0 @@ -#!/bin/bash - -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -ROOT_DIR=$(cd `dirname $0`/../../..; pwd) -CLASSPATH=$ROOT_DIR/assembly/target/*:$ROOT_DIR/examples/target/*:$ROOT_DIR/examples/target/classes/lib/* - -mkdir -p model -java -Xmx4G -cp $CLASSPATH \ - org.apache.mxnetexamples.module.MnistMlp \ - --data-dir "$ROOT_DIR/core/data/" \ - --batch-size 10 \ - --num-epoch 5 diff --git a/scala-package/examples/scripts/module/run_sequential_module.sh b/scala-package/examples/scripts/module/run_sequential_module.sh deleted file mode 100644 index c7507850d40b..000000000000 --- a/scala-package/examples/scripts/module/run_sequential_module.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/bash - -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -ROOT_DIR=$(cd `dirname $0`/../../..; pwd) -CLASSPATH=$ROOT_DIR/assembly/target/*:$ROOT_DIR/examples/target/*:$ROOT_DIR/examples/target/classes/lib/* - -DATA_DIR=$ROOT_DIR/core/data - -SAVE_MODEL_PATH=. - -# LOAD_MODEL=seqModule-0001.params - -java -Xmx4G -cp $CLASSPATH \ - org.apache.mxnetexamples.module.SequentialModuleEx \ - --data-dir $DATA_DIR \ - --batch-size 10 \ - --num-epoch 2 \ - --lr 0.01 \ - --save-model-path $SAVE_MODEL_PATH \ - # --load-model-path $LOAD_MODEL diff --git a/scala-package/examples/scripts/neuralstyle_end2end/run_test_end2end.sh b/scala-package/examples/scripts/neuralstyle_end2end/run_test_end2end.sh deleted file mode 100644 index a040ba7ca99b..000000000000 --- a/scala-package/examples/scripts/neuralstyle_end2end/run_test_end2end.sh +++ /dev/null @@ -1,34 +0,0 @@ -#!/bin/bash - -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - - -MXNET_ROOT=$(cd "$(dirname $0)/../../../.."; pwd) -CLASS_PATH=$MXNET_ROOT/scala-package/assembly/target/*:$MXNET_ROOT/scala-package/examples/target/*:$MXNET_ROOT/scala-package/examples/target/classes/lib/* - -INPUT_IMG=$1 -MODEL_DIR=$2 -OUTPUT_DIR=$3 -GPU=0 - -java -Xmx1024m -cp $CLASS_PATH \ - org.apache.mxnetexamples.neuralstyle.end2end.BoostInference \ - --model-path $MODEL_DIR \ - --input-image $INPUT_IMG \ - --output-path $OUTPUT_DIR \ - --gpu $GPU diff --git a/scala-package/examples/scripts/neuralstyle_end2end/run_train_end2end.sh b/scala-package/examples/scripts/neuralstyle_end2end/run_train_end2end.sh deleted file mode 100644 index d63be1d406ab..000000000000 --- a/scala-package/examples/scripts/neuralstyle_end2end/run_train_end2end.sh +++ /dev/null @@ -1,38 +0,0 @@ -#!/bin/bash - -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - - -MXNET_ROOT=$(cd "$(dirname $0)/../../../.."; pwd) -CLASS_PATH=$MXNET_ROOT/scala-package/assembly/target/*:$MXNET_ROOT/scala-package/examples/target/*:$MXNET_ROOT/scala-package/examples/target/classes/lib/* - -# more details please refer to -# https://github.com/Ldpe2G/mxnet/blob/develop/example/neural-style/end_to_end/README.md -TRAIN_DATA_PATH=$1 -STYLE_IMG=$2 -VGG_MODEL_PATH=$3 -SAVE_MODEL_DIR=$4 -GPU=0 - -java -Xmx1024m -cp $CLASS_PATH \ - org.apache.mxnetexamples.neuralstyle.end2end.BoostTrain \ - --data-path $TRAIN_DATA_PATH \ - --vgg--model-path $VGG_MODEL_PATH \ - --save--model-path $SAVE_MODEL_DIR \ - --style-image $STYLE_IMG \ - --gpu $GPU diff --git a/scala-package/examples/scripts/profiler/run_profiler_matmul.sh b/scala-package/examples/scripts/profiler/run_profiler_matmul.sh deleted file mode 100644 index 2f7828573aa5..000000000000 --- a/scala-package/examples/scripts/profiler/run_profiler_matmul.sh +++ /dev/null @@ -1,38 +0,0 @@ -#!/bin/bash - -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - - -MXNET_ROOT=$(cd "$(dirname $0)/../../../.."; pwd) -CLASS_PATH=$MXNET_ROOT/scala-package/assembly/target/*:$MXNET_ROOT/scala-package/examples/target/*:$MXNET_ROOT/scala-package/examples/target/classes/lib/* - -# which gpu card to use, -1 means cpu -GPU=0 - -MODE="symbolic" -OUTPUT_PATH="." -# Just load the trace file at chrome://tracing in your Chrome browser -FILE_NAME="profile_matmul_20iter.json" - -java -Xmx4G -cp $CLASS_PATH \ - org.apache.mxnetexamples.profiler.ProfilerMatMul \ - --gpu $GPU \ - --profiler-mode $MODE \ - --output-path $OUTPUT_PATH \ - --profile-filename $FILE_NAME - diff --git a/scala-package/examples/scripts/profiler/run_profiler_ndarray.sh b/scala-package/examples/scripts/profiler/run_profiler_ndarray.sh deleted file mode 100644 index 3b8cf22a5abd..000000000000 --- a/scala-package/examples/scripts/profiler/run_profiler_ndarray.sh +++ /dev/null @@ -1,35 +0,0 @@ -#!/bin/bash - -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - - -MXNET_ROOT=$(cd "$(dirname $0)/../../../.."; pwd) -CLASS_PATH=$MXNET_ROOT/scala-package/assembly/target/*:$MXNET_ROOT/scala-package/examples/target/*:$MXNET_ROOT/scala-package/examples/target/classes/lib/* - - -MODE="all" -OUTPUT_PATH="." -# Just load the trace file at chrome://tracing in your Chrome browser -FILE_NAME="profile_ndarray.json" - -java -Xmx4G -cp $CLASS_PATH \ - org.apache.mxnetexamples.profiler.ProfilerNDArray \ - --profiler-mode $MODE \ - --output-path $OUTPUT_PATH \ - --profile-filename $FILE_NAME - diff --git a/scala-package/examples/scripts/rnn/run_lstm_bucketing.sh b/scala-package/examples/scripts/rnn/run_lstm_bucketing.sh deleted file mode 100644 index 508dcac80cff..000000000000 --- a/scala-package/examples/scripts/rnn/run_lstm_bucketing.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/bash - -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - - -MXNET_ROOT=$(cd "$(dirname $0)/../../../.."; pwd) -CLASS_PATH=$MXNET_ROOT/scala-package/assembly/target/*:$MXNET_ROOT/scala-package/examples/target/*:$MXNET_ROOT/scala-package/examples/target/classes/lib/* - -DATA_TRAIN=$1 -DATA_VAL=$2 -NUM_EPOCH=5 -GPUS="0" -SAVE_MODEL_PATH=./model/lstm - -java -Xmx4G -cp $CLASS_PATH \ - org.apache.mxnetexamples.rnn.LstmBucketing \ - --data-train $DATA_TRAIN \ - --data-val $DATA_VAL \ - --num-epoch $NUM_EPOCH \ - --gpus $GPUS \ - --save-model-path $SAVE_MODEL_PATH diff --git a/scala-package/examples/scripts/rnn/run_train_charrnn.sh b/scala-package/examples/scripts/rnn/run_train_charrnn.sh deleted file mode 100755 index f8c9a3534923..000000000000 --- a/scala-package/examples/scripts/rnn/run_train_charrnn.sh +++ /dev/null @@ -1,38 +0,0 @@ -#!/bin/bash - -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - - -MXNET_ROOT=$(cd "$(dirname $0)/../../../.."; pwd) -CLASS_PATH=$MXNET_ROOT/scala-package/assembly/target/*:$MXNET_ROOT/scala-package/examples/target/*:$MXNET_ROOT/scala-package/examples/target/classes/lib/* - -# which gpu card to use, -1 means cpu -GPU=$1 -# you can get the training data file using the following command -# curl -O http://data.mxnet.io/data/char_lstm.zip -# unzip -o char_lstm.zip -# for example ./datas/obama.txt -DATA_PATH=$2 -# for example ./models -SAVE_MODEL_PATH=$3 - -java -Xmx4G -cp $CLASS_PATH \ - org.apache.mxnetexamples.rnn.TrainCharRnn \ - --data-path $DATA_PATH \ - --save-model-path $SAVE_MODEL_PATH \ - --gpu $GPU \ diff --git a/scala-package/examples/scripts/run_neuralstyle.sh b/scala-package/examples/scripts/run_neuralstyle.sh deleted file mode 100644 index fb482437654f..000000000000 --- a/scala-package/examples/scripts/run_neuralstyle.sh +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/bash - -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - - -MXNET_ROOT=$(cd "$(dirname $0)/../../.."; pwd) -CLASS_PATH=$MXNET_ROOT/scala-package/assembly/target/*:$MXNET_ROOT/scala-package/examples/target/*:$MXNET_ROOT/scala-package/examples/target/classes/lib/* -INPUT_IMG=$1 -STYLE_IMG=$2 -MODEL_PATH=$MXNET_ROOT/example/neural-style/model/vgg19.params -OUTPUT_DIR=$MXNET_ROOT/example/neural-style/output - -java -Xmx1024m -cp $CLASS_PATH \ - org.apache.mxnetexamples.neuralstyle.NeuralStyle \ - --content-image $INPUT_IMG \ - --style-image $STYLE_IMG \ - --model-path $MODEL_PATH \ - --output-dir $OUTPUT_DIR diff --git a/scala-package/examples/src/main/java/org/apache/mxnetexamples/javaapi/benchmark/InferBase.java b/scala-package/examples/src/main/java/org/apache/mxnetexamples/javaapi/benchmark/InferBase.java deleted file mode 100644 index fdcde6b4152c..000000000000 --- a/scala-package/examples/src/main/java/org/apache/mxnetexamples/javaapi/benchmark/InferBase.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.mxnetexamples.javaapi.benchmark; - -import org.apache.mxnet.javaapi.Context; -import org.kohsuke.args4j.Option; - -import java.util.List; - -abstract class InferBase { - @Option(name = "--num-runs", usage = "Number of runs") - public int numRun = 1; - @Option(name = "--model-name", usage = "Name of the model") - public String modelName = ""; - @Option(name = "--batchsize", usage = "Size of the batch") - public int batchSize = 1; - - public abstract void preProcessModel(List context); - public abstract void runSingleInference(); - public abstract void runBatchInference(); -} diff --git a/scala-package/examples/src/main/java/org/apache/mxnetexamples/javaapi/benchmark/JavaBenchmark.java b/scala-package/examples/src/main/java/org/apache/mxnetexamples/javaapi/benchmark/JavaBenchmark.java deleted file mode 100644 index 4a6bb2dd38bf..000000000000 --- a/scala-package/examples/src/main/java/org/apache/mxnetexamples/javaapi/benchmark/JavaBenchmark.java +++ /dev/null @@ -1,129 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnetexamples.javaapi.benchmark; - -import org.apache.mxnet.javaapi.Context; -import org.kohsuke.args4j.CmdLineParser; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; - -public class JavaBenchmark { - - private static boolean runBatch = false; - - private static void parse(Object inst, String[] args) { - CmdLineParser parser = new CmdLineParser(inst); - try { - parser.parseArgument(args); - } catch (Exception e) { - System.err.println(e.getMessage() + e); - parser.printUsage(System.err); - System.exit(1); - } - } - - private static long percentile(int p, long[] seq) { - Arrays.sort(seq); - int k = (int) Math.ceil((seq.length - 1) * (p / 100.0)); - return seq[k]; - } - - private static void printStatistics(long[] inferenceTimesRaw, String metricsPrefix) { - long[] inferenceTimes = inferenceTimesRaw; - // remove head and tail - if (inferenceTimes.length > 2) { - inferenceTimes = Arrays.copyOfRange(inferenceTimesRaw, - 1, inferenceTimesRaw.length - 1); - } - double p50 = percentile(50, inferenceTimes) / 1.0e6; - double p99 = percentile(99, inferenceTimes) / 1.0e6; - double p90 = percentile(90, inferenceTimes) / 1.0e6; - long sum = 0; - for (long time: inferenceTimes) sum += time; - double average = sum / (inferenceTimes.length * 1.0e6); - - System.out.println( - String.format("\n%s_p99 %fms\n%s_p90 %fms\n%s_p50 %fms\n%s_average %1.2fms", - metricsPrefix, p99, metricsPrefix, p90, - metricsPrefix, p50, metricsPrefix, average) - ); - - } - - private static List bindToDevice() { - List context = new ArrayList(); - if (System.getenv().containsKey("SCALA_TEST_ON_GPU") && - Integer.valueOf(System.getenv("SCALA_TEST_ON_GPU")) == 1) { - context.add(Context.gpu()); - } else { - context.add(Context.cpu()); - } - return context; - } - - public static void main(String[] args) { - if (args.length < 2) { - StringBuilder sb = new StringBuilder(); - sb.append("Please follow the format:"); - sb.append("\n --model-name "); - sb.append("\n --num-runs "); - sb.append("\n --batchsize "); - System.out.println(sb.toString()); - return; - } - String modelName = args[1]; - InferBase model = null; - switch(modelName) { - case "ObjectDetection": - runBatch = true; - ObjectDetectionBenchmark inst = new ObjectDetectionBenchmark(); - parse(inst, args); - model = inst; - break; - default: - System.err.println("Model name not found! " + modelName); - System.exit(1); - } - List context = bindToDevice(); - long[] result = new long[model.numRun]; - model.preProcessModel(context); - if (runBatch) { - for (int i =0;i < model.numRun; i++) { - long currTime = System.nanoTime(); - model.runBatchInference(); - result[i] = System.nanoTime() - currTime; - } - System.out.println("Batchsize: " + model.batchSize); - System.out.println("Num of runs: " + model.numRun); - printStatistics(result, modelName +"batch_inference"); - } - - model.batchSize = 1; - model.preProcessModel(context); - result = new long[model.numRun]; - for (int i = 0; i < model.numRun; i++) { - long currTime = System.nanoTime(); - model.runSingleInference(); - result[i] = System.nanoTime() - currTime; - } - System.out.println("Num of runs: " + model.numRun); - printStatistics(result, modelName + "single_inference"); - } -} diff --git a/scala-package/examples/src/main/java/org/apache/mxnetexamples/javaapi/benchmark/ObjectDetectionBenchmark.java b/scala-package/examples/src/main/java/org/apache/mxnetexamples/javaapi/benchmark/ObjectDetectionBenchmark.java deleted file mode 100644 index 257ea3241626..000000000000 --- a/scala-package/examples/src/main/java/org/apache/mxnetexamples/javaapi/benchmark/ObjectDetectionBenchmark.java +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnetexamples.javaapi.benchmark; - -import org.apache.mxnet.infer.javaapi.ObjectDetector; -import org.apache.mxnet.javaapi.*; -import org.kohsuke.args4j.Option; - -import java.util.ArrayList; -import java.util.List; - -class ObjectDetectionBenchmark extends InferBase { - @Option(name = "--model-path-prefix", usage = "input model directory and prefix of the model") - public String modelPathPrefix = "/model/ssd_resnet50_512"; - @Option(name = "--input-image", usage = "the input image") - public String inputImagePath = "/images/dog.jpg"; - - private ObjectDetector objDet; - private NDArray img; - private NDArray$ NDArray = NDArray$.MODULE$; - - public void preProcessModel(List context) { - Shape inputShape = new Shape(new int[] {this.batchSize, 3, 512, 512}); - List inputDescriptors = new ArrayList<>(); - inputDescriptors.add(new DataDesc("data", inputShape, DType.Float32(), "NCHW")); - objDet = new ObjectDetector(modelPathPrefix, inputDescriptors, context, 0); - img = ObjectDetector.bufferedImageToPixels( - ObjectDetector.reshapeImage( - ObjectDetector.loadImageFromFile(inputImagePath), 512, 512 - ), - new Shape(new int[] {1, 3, 512, 512}) - ); - } - - public void runSingleInference() { - List nd = new ArrayList<>(); - nd.add(img); - objDet.objectDetectWithNDArray(nd, 3); - } - - public void runBatchInference() { - List nd = new ArrayList<>(); - NDArray[] temp = new NDArray[batchSize]; - for (int i = 0; i < batchSize; i++) temp[i] = img.copy(); - NDArray batched = NDArray.concat(temp, batchSize, 0, null)[0]; - nd.add(batched); - objDet.objectDetectWithNDArray(nd, 3); - } -} diff --git a/scala-package/examples/src/main/java/org/apache/mxnetexamples/javaapi/infer/bert/BertDataParser.java b/scala-package/examples/src/main/java/org/apache/mxnetexamples/javaapi/infer/bert/BertDataParser.java deleted file mode 100644 index 440670afc098..000000000000 --- a/scala-package/examples/src/main/java/org/apache/mxnetexamples/javaapi/infer/bert/BertDataParser.java +++ /dev/null @@ -1,126 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnetexamples.javaapi.infer.bert; - -import java.io.FileReader; -import java.util.*; - -import com.google.gson.Gson; -import com.google.gson.JsonArray; -import com.google.gson.JsonElement; -import com.google.gson.JsonObject; - -/** - * This is the Utility for pre-processing the data for Bert Model - * You can use this utility to parse Vocabulary JSON into Java Array and Dictionary, - * clean and tokenize sentences and pad the text - */ -public class BertDataParser { - - private Map token2idx; - private List idx2token; - - /** - * Parse the Vocabulary to JSON files - * [PAD], [CLS], [SEP], [MASK], [UNK] are reserved tokens - * @param jsonFile the filePath of the vocab.json - * @throws Exception - */ - void parseJSON(String jsonFile) throws Exception { - Gson gson = new Gson(); - token2idx = new HashMap<>(); - idx2token = new LinkedList<>(); - JsonObject jsonObject = gson.fromJson(new FileReader(jsonFile), JsonObject.class); - JsonArray arr = jsonObject.getAsJsonArray("idx_to_token"); - for (JsonElement element : arr) { - idx2token.add(element.getAsString()); - } - JsonObject preMap = jsonObject.getAsJsonObject("token_to_idx"); - for (String key : preMap.keySet()) { - token2idx.put(key, preMap.get(key).getAsInt()); - } - } - - /** - * Tokenize the input, split all kinds of whitespace and - * Separate the end of sentence symbol: . , ? ! - * @param input The input string - * @return List of tokens - */ - List tokenizer(String input) { - String[] step1 = input.split("\\s+"); - List finalResult = new LinkedList<>(); - for (String item : step1) { - if (item.length() != 0) { - if ((item + "a").split("[.,?!]+").length > 1) { - finalResult.add(item.substring(0, item.length() - 1)); - finalResult.add(item.substring(item.length() -1)); - } else { - finalResult.add(item); - } - } - } - return finalResult; - } - - /** - * Pad the tokens to the required length - * @param tokens input tokens - * @param padItem things to pad at the end - * @param num total length after padding - * @return List of padded tokens - */ - List pad(List tokens, E padItem, int num) { - if (tokens.size() >= num) return tokens; - List padded = new LinkedList<>(tokens); - for (int i = 0; i < num - tokens.size(); i++) { - padded.add(padItem); - } - return padded; - } - - /** - * Convert tokens to indexes - * @param tokens input tokens - * @return List of indexes - */ - List token2idx(List tokens) { - List indexes = new ArrayList<>(); - for (String token : tokens) { - if (token2idx.containsKey(token)) { - indexes.add(token2idx.get(token)); - } else { - indexes.add(token2idx.get("[UNK]")); - } - } - return indexes; - } - - /** - * Convert indexes to tokens - * @param indexes List of indexes - * @return List of tokens - */ - List idx2token(List indexes) { - List tokens = new ArrayList<>(); - for (int index : indexes) { - tokens.add(idx2token.get(index)); - } - return tokens; - } -} diff --git a/scala-package/examples/src/main/java/org/apache/mxnetexamples/javaapi/infer/bert/BertQA.java b/scala-package/examples/src/main/java/org/apache/mxnetexamples/javaapi/infer/bert/BertQA.java deleted file mode 100644 index dd17b1d4a0a5..000000000000 --- a/scala-package/examples/src/main/java/org/apache/mxnetexamples/javaapi/infer/bert/BertQA.java +++ /dev/null @@ -1,148 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnetexamples.javaapi.infer.bert; - -import org.apache.mxnet.infer.javaapi.Predictor; -import org.apache.mxnet.javaapi.*; -import org.kohsuke.args4j.CmdLineParser; -import org.kohsuke.args4j.Option; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.*; - -/** - * This is an example of using BERT to do the general Question and Answer inference jobs - * Users can provide a question with a paragraph contains answer to the model and - * the model will be able to find the best answer from the answer paragraph - */ -public class BertQA { - @Option(name = "--model-path-prefix", usage = "input model directory and prefix of the model") - private String modelPathPrefix = "/model/static_bert_qa"; - @Option(name = "--model-epoch", usage = "Epoch number of the model") - private int epoch = 2; - @Option(name = "--model-vocab", usage = "the vocabulary used in the model") - private String modelVocab = "/model/vocab.json"; - @Option(name = "--input-question", usage = "the input question") - private String inputQ = "When did BBC Japan start broadcasting?"; - @Option(name = "--input-answer", usage = "the input answer") - private String inputA = - "BBC Japan was a general entertainment Channel.\n" + - " Which operated between December 2004 and April 2006.\n" + - "It ceased operations after its Japanese distributor folded."; - @Option(name = "--seq-length", usage = "the maximum length of the sequence") - private int seqLength = 384; - - private final static Logger logger = LoggerFactory.getLogger(BertQA.class); - private static NDArray$ NDArray = NDArray$.MODULE$; - - private static int argmax(float[] prob) { - int maxIdx = 0; - for (int i = 0; i < prob.length; i++) { - if (prob[maxIdx] < prob[i]) maxIdx = i; - } - return maxIdx; - } - - /** - * Do the post processing on the output, apply softmax to get the probabilities - * reshape and get the most probable index - * @param result prediction result - * @param tokens word tokens - * @return Answers clipped from the original paragraph - */ - static List postProcessing(NDArray result, List tokens) { - NDArray[] output = NDArray.split( - new splitParam(result, 2).setAxis(2)); - // Get the formatted logits result - NDArray startLogits = output[0].reshape(new int[]{0, -3}); - NDArray endLogits = output[1].reshape(new int[]{0, -3}); - // Get Probability distribution - float[] startProb = NDArray.softmax( - new softmaxParam(startLogits))[0].toArray(); - float[] endProb = NDArray.softmax( - new softmaxParam(endLogits))[0].toArray(); - int startIdx = argmax(startProb); - int endIdx = argmax(endProb); - return tokens.subList(startIdx, endIdx + 1); - } - - public static void main(String[] args) throws Exception{ - BertQA inst = new BertQA(); - CmdLineParser parser = new CmdLineParser(inst); - parser.parseArgument(args); - BertDataParser util = new BertDataParser(); - Context context = Context.cpu(); - if (System.getenv().containsKey("SCALA_TEST_ON_GPU") && - Integer.valueOf(System.getenv("SCALA_TEST_ON_GPU")) == 1) { - context = Context.gpu(); - } - // pre-processing - tokenize sentence - List tokenQ = util.tokenizer(inst.inputQ.toLowerCase()); - List tokenA = util.tokenizer(inst.inputA.toLowerCase()); - int validLength = tokenQ.size() + tokenA.size(); - logger.info("Valid length: " + validLength); - // generate token types [0000...1111....0000] - List QAEmbedded = new ArrayList<>(); - util.pad(QAEmbedded, 0f, tokenQ.size()).addAll( - util.pad(new ArrayList(), 1f, tokenA.size()) - ); - List tokenTypes = util.pad(QAEmbedded, 0f, inst.seqLength); - // make BERT pre-processing standard - tokenQ.add("[SEP]"); - tokenQ.add(0, "[CLS]"); - tokenA.add("[SEP]"); - tokenQ.addAll(tokenA); - List tokens = util.pad(tokenQ, "[PAD]", inst.seqLength); - logger.info("Pre-processed tokens: " + Arrays.toString(tokenQ.toArray())); - // pre-processing - token to index translation - util.parseJSON(inst.modelVocab); - List indexes = util.token2idx(tokens); - List indexesFloat = new ArrayList<>(); - for (int integer : indexes) { - indexesFloat.add((float) integer); - } - // Preparing the input data - List inputBatch = Arrays.asList( - new NDArray(indexesFloat, - new Shape(new int[]{1, inst.seqLength}), context), - new NDArray(tokenTypes, - new Shape(new int[]{1, inst.seqLength}), context), - new NDArray(new float[] { validLength }, - new Shape(new int[]{1}), context) - ); - // Build the model - List contexts = new ArrayList<>(); - contexts.add(context); - List inputDescs = Arrays.asList( - new DataDesc("data0", - new Shape(new int[]{1, inst.seqLength}), DType.Float32(), Layout.NT()), - new DataDesc("data1", - new Shape(new int[]{1, inst.seqLength}), DType.Float32(), Layout.NT()), - new DataDesc("data2", - new Shape(new int[]{1}), DType.Float32(), Layout.N()) - ); - Predictor bertQA = new Predictor(inst.modelPathPrefix, inputDescs, contexts, inst.epoch); - // Start prediction - NDArray result = bertQA.predictWithNDArray(inputBatch).get(0); - List answer = postProcessing(result, tokens); - logger.info("Question: " + inst.inputQ); - logger.info("Answer paragraph: " + inst.inputA); - logger.info("Answer: " + Arrays.toString(answer.toArray())); - } -} diff --git a/scala-package/examples/src/main/java/org/apache/mxnetexamples/javaapi/infer/bert/README.md b/scala-package/examples/src/main/java/org/apache/mxnetexamples/javaapi/infer/bert/README.md deleted file mode 100644 index 7925a259f48f..000000000000 --- a/scala-package/examples/src/main/java/org/apache/mxnetexamples/javaapi/infer/bert/README.md +++ /dev/null @@ -1,103 +0,0 @@ - - - - - - - - - - - - - - - - - -# Run BERT QA model using Java Inference API - -In this tutorial, we will walk through the BERT QA model trained by MXNet. -Users can provide a question with a paragraph contains answer to the model and -the model will be able to find the best answer from the answer paragraph. - -Example: -```text -Q: When did BBC Japan start broadcasting? -``` - -Answer paragraph -```text -BBC Japan was a general entertainment channel, which operated between December 2004 and April 2006. -It ceased operations after its Japanese distributor folded. -``` -And it picked up the right one: -```text -A: December 2004 -``` - -## Setup Guide - -### Step 1: Download the model - -For this tutorial, you can get the model and vocabulary by running following bash file. This script will use `wget` to download these artifacts from AWS S3. - -From the `scala-package/examples/scripts/infer/bert/` folder run: - -```bash -./get_bert_data.sh -``` - -### Step 2: Setup data path of the model - -### Setup Datapath and Parameters - -The available arguments are as follows: - -| Argument | Comments | -| ----------------------------- | ---------------------------------------- | -| `--model-path-prefix`           | Folder path with prefix to the model (including json, params). | -| `--model-vocab` | Vocabulary path | -| `--model-epoch` | Epoch number of the model | -| `--input-question` | Question that asked to the model | -| `--input-answer` | Paragraph that contains the answer | -| `--seq-length` | Sequence Length of the model (384 by default) | - -### Step 3: Run Inference -After the previous steps, you should be able to run the code using the following script that will pass all of the required parameters to the Infer API. - -From the `scala-package/examples/scripts/infer/bert/` folder run: - -```bash -./run_bert_qa_example.sh --model-path-prefix ../models/static-bert-qa/static_bert_qa \ - --model-vocab ../models/static-bert-qa/vocab.json \ - --model-epoch 2 -``` - -## Background - -To learn more about how BERT works in MXNet, please follow this [MXNet Gluon tutorial on NLP using BERT](https://medium.com/apache-mxnet/gluon-nlp-bert-6a489bdd3340). - -The model was extracted from MXNet GluonNLP with static length settings. - -[Download link for the script](https://gluon-nlp.mxnet.io/_downloads/bert.zip) - -The original description can be found in the [MXNet GluonNLP model zoo](https://gluon-nlp.mxnet.io/model_zoo/bert/index.html#bert-base-on-squad-1-1). -```bash -python static_finetune_squad.py --optimizer adam --accumulate 2 --batch_size 6 --lr 3e-5 --epochs 2 --gpu 0 --export - -``` -This script will generate `json` and `param` fles that are the standard MXNet model files. -By default, this model are using `bert_12_768_12` model with extra layers for QA jobs. - -After that, to be able to use it in Java, we need to export the dictionary from the script to parse the text -to actual indexes. Please add the following lines after [this line](https://github.com/dmlc/gluon-nlp/blob/master/scripts/bert/staticbert/static_finetune_squad.py#L262). -```python -import json -json_str = vocab.to_json() -f = open("vocab.json", "w") -f.write(json_str) -f.close() -``` -This would export the token vocabulary in json format. -Once you have these three files, you will be able to run this example without problems. diff --git a/scala-package/examples/src/main/java/org/apache/mxnetexamples/javaapi/infer/objectdetector/README.md b/scala-package/examples/src/main/java/org/apache/mxnetexamples/javaapi/infer/objectdetector/README.md deleted file mode 100644 index 21c062938e93..000000000000 --- a/scala-package/examples/src/main/java/org/apache/mxnetexamples/javaapi/infer/objectdetector/README.md +++ /dev/null @@ -1,114 +0,0 @@ - - - - - - - - - - - - - - - - - -# Single Shot Multi Object Detection using Java Inference API - -In this example, you will learn how to use Java Inference API to run Inference on pre-trained Single Shot Multi Object Detection (SSD) MXNet model. - -The model is trained on the [Pascal VOC 2012 dataset](http://host.robots.ox.ac.uk/pascal/VOC/voc2012/index.html). The network is a SSD model built on Resnet50 as base network to extract image features. The model is trained to detect the following entities (classes): ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor']. For more details about the model, you can refer to the [MXNet SSD example](https://github.com/apache/incubator-mxnet/tree/master/example/ssd). - - -## Contents - -1. [Prerequisites](#prerequisites) -2. [Download artifacts](#download-artifacts) -3. [Setup datapath and parameters](#setup-datapath-and-parameters) -4. [Run the image inference example](#run-the-image-inference-example) -5. [Infer APIs](#infer-api-details) -6. [Next steps](#next-steps) - - -## Prerequisites - -1. [Build MXNet](https://mxnet.apache.org/get_started/ubuntu_setup) -2. [Build MXNet Scala/Java Package](https://mxnet.apache.org/get_started/scala_setup) -3. [IntelliJ IDE (or alternative IDE) project setup](https://mxnet.apache.org/tutorials/java/mxnet_java_on_intellij.html) with the MXNet Scala/Java Package -4. wget - - -## Setup Guide - -### Download Artifacts -#### Step 1 -You can download the files using the script `get_ssd_data.sh`. It will download and place the model files in a `model` folder and the test image files in a `image` folder in the current directory. -From the `scala-package/examples/scripts/infer/objectdetector/` folder run: - -```bash -./get_ssd_data.sh -``` - -**Note**: You may need to run `chmod +x get_ssd_data.sh` before running this script. - -In the pre-trained model, the `input_name` is `data` and shape is `(1, 3, 512, 512)`. -This shape translates to: a batch of `1` image, the image has color and uses `3` channels (RGB), and the image has the dimensions of `512` pixels in height by `512` pixels in width. - -`image/jpeg` is the expected input type, since this example's image pre-processor only supports the handling of binary JPEG images. - -The output shape is `(1, 6132, 6)`. As with the input, the `1` is the number of images. `6132` is the number of prediction results, and `6` is for the size of each prediction. Each prediction contains the following components: -- `Class` -- `Accuracy` -- `Xmin` -- `Ymin` -- `Xmax` -- `Ymax` - - -### Setup Datapath and Parameters -#### Step 2 -The followings is the parameters defined for this example, you can find more information in the `class SSDClassifierExample`. - -| Argument | Comments | -| ----------------------------- | ---------------------------------------- | -| `model-path-prefix` | Folder path with prefix to the model (including json, params, and any synset file). | -| `input-image` | The image to run inference on. | -| `input-dir` | The directory of images to run inference on. | - - -## How to Run Inference -After the previous steps, you should be able to run the code using the following script that will pass all of the required parameters to the Infer API. - -From the `scala-package/examples/scripts/infer/objectdetector/` folder run: - -```bash -./run_ssd_java_example.sh ../models/resnet50_ssd/resnet50_ssd_model ../images/dog.jpg ../images -``` - -**Notes**: -* These are relative paths to this script. -* You may need to run `chmod +x run_ssd_example.sh` before running this script. - -The example should give expected output as shown below: -``` -Class: car -Probabilties: 0.99847263 -(Coord:,312.21335,72.0291,456.01443,150.66176) -Class: bicycle -Probabilties: 0.90473825 -(Coord:,155.95807,149.96362,383.8369,418.94513) -Class: dog -Probabilties: 0.8226818 -(Coord:,83.82353,179.13998,206.63783,476.7875) -``` -the outputs come from the input image, with top3 predictions picked. - - -## Infer API Details -This example uses ObjectDetector class provided by MXNet's Java Infer APIs. It provides methods to load the images, create NDArray out of Java BufferedImage and run prediction using Classifier and Predictor APIs. - - -## References -This documentation used the model and inference setup guide from the [MXNet Model Server SSD example](https://github.com/awslabs/mxnet-model-server/blob/master/examples/ssd/README.md). diff --git a/scala-package/examples/src/main/java/org/apache/mxnetexamples/javaapi/infer/objectdetector/SSDClassifierExample.java b/scala-package/examples/src/main/java/org/apache/mxnetexamples/javaapi/infer/objectdetector/SSDClassifierExample.java deleted file mode 100644 index 31b8514de345..000000000000 --- a/scala-package/examples/src/main/java/org/apache/mxnetexamples/javaapi/infer/objectdetector/SSDClassifierExample.java +++ /dev/null @@ -1,214 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnetexamples.javaapi.infer.objectdetector; - -import org.apache.mxnet.infer.javaapi.ObjectDetectorOutput; -import org.kohsuke.args4j.CmdLineParser; -import org.kohsuke.args4j.Option; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import org.apache.mxnet.javaapi.*; -import org.apache.mxnet.infer.javaapi.ObjectDetector; - -// scalastyle:off -import javax.imageio.ImageIO; -import java.awt.image.BufferedImage; -// scalastyle:on - -import java.util.*; - -import java.io.File; - -public class SSDClassifierExample { - @Option(name = "--model-path-prefix", usage = "input model directory and prefix of the model") - private String modelPathPrefix = "/model/ssd_resnet50_512"; - @Option(name = "--input-image", usage = "the input image") - private String inputImagePath = "/images/dog.jpg"; - @Option(name = "--input-dir", usage = "the input batch of images directory") - private String inputImageDir = "/images/"; - - final static Logger logger = LoggerFactory.getLogger(SSDClassifierExample.class); - - static List> - runObjectDetectionSingle(String modelPathPrefix, String inputImagePath, List context) { - Shape inputShape = new Shape(new int[]{1, 3, 512, 512}); - List inputDescriptors = new ArrayList(); - inputDescriptors.add(new DataDesc("data", inputShape, DType.Float32(), "NCHW")); - BufferedImage img = ObjectDetector.loadImageFromFile(inputImagePath); - ObjectDetector objDet = new ObjectDetector(modelPathPrefix, inputDescriptors, context, 0); - return objDet.imageObjectDetect(img, 3); - } - - static List>> - runObjectDetectionBatch(String modelPathPrefix, String inputImageDir, List context) { - Shape inputShape = new Shape(new int[]{1, 3, 512, 512}); - List inputDescriptors = new ArrayList(); - inputDescriptors.add(new DataDesc("data", inputShape, DType.Float32(), "NCHW")); - ObjectDetector objDet = new ObjectDetector(modelPathPrefix, inputDescriptors, context, 0); - - // Loading batch of images from the directory path - List> batchFiles = generateBatches(inputImageDir, 20); - List>> outputList - = new ArrayList>>(); - - for (List batchFile : batchFiles) { - List imgList = ObjectDetector.loadInputBatch(batchFile); - // Running inference on batch of images loaded in previous step - List> tmp - = objDet.imageBatchObjectDetect(imgList, 5); - outputList.add(tmp); - } - return outputList; - } - - static List> generateBatches(String inputImageDirPath, int batchSize) { - File dir = new File(inputImageDirPath); - - List> output = new ArrayList>(); - List batch = new ArrayList(); - for (File imgFile : dir.listFiles()) { - batch.add(imgFile.getPath()); - if (batch.size() == batchSize) { - output.add(batch); - batch = new ArrayList(); - } - } - if (batch.size() > 0) { - output.add(batch); - } - return output; - } - - public static void main(String[] args) { - SSDClassifierExample inst = new SSDClassifierExample(); - CmdLineParser parser = new CmdLineParser(inst); - try { - parser.parseArgument(args); - } catch (Exception e) { - logger.error(e.getMessage(), e); - parser.printUsage(System.err); - System.exit(1); - } - - String mdprefixDir = inst.modelPathPrefix; - String imgPath = inst.inputImagePath; - String imgDir = inst.inputImageDir; - - if (!checkExist(Arrays.asList(mdprefixDir + "-symbol.json", imgDir, imgPath))) { - logger.error("Model or input image path does not exist"); - System.exit(1); - } - - List context = new ArrayList(); - if (System.getenv().containsKey("SCALA_TEST_ON_GPU") && - Integer.valueOf(System.getenv("SCALA_TEST_ON_GPU")) == 1) { - context.add(Context.gpu()); - } else { - context.add(Context.cpu()); - } - - try { - Shape inputShape = new Shape(new int[]{1, 3, 512, 512}); - Shape outputShape = new Shape(new int[]{1, 6132, 6}); - - StringBuilder outputStr = new StringBuilder().append("\n"); - - List> output - = runObjectDetectionSingle(mdprefixDir, imgPath, context); - - // Creating Bounding box material - BufferedImage buf = ImageIO.read(new File(imgPath)); - int width = buf.getWidth(); - int height = buf.getHeight(); - List> boxes = new ArrayList<>(); - List names = new ArrayList<>(); - for (List ele : output) { - for (ObjectDetectorOutput i : ele) { - outputStr.append("Class: " + i.getClassName() + "\n"); - outputStr.append("Probabilties: " + i.getProbability() + "\n"); - names.add(i.getClassName()); - Map map = new HashMap<>(); - float xmin = i.getXMin() * width; - float xmax = i.getXMax() * width; - float ymin = i.getYMin() * height; - float ymax = i.getYMax() * height; - List coord = Arrays.asList(xmin, xmax, ymin, ymax); - map.put("xmin", (int) xmin); - map.put("xmax", (int) xmax); - map.put("ymin", (int) ymin); - map.put("ymax", (int) ymax); - boxes.add(map); - StringBuilder sb = new StringBuilder(); - for (float c : coord) { - sb.append(", ").append(c); - } - outputStr.append("Coord:" + sb.substring(2) + "\n"); - } - } - logger.info(outputStr.toString()); - - // Covert to image - Image.drawBoundingBox(buf, boxes, names); - File outputFile = new File("boundingImage.png"); - ImageIO.write(buf, "png", outputFile); - - List>> outputList = - runObjectDetectionBatch(mdprefixDir, imgDir, context); - - outputStr = new StringBuilder().append("\n"); - int index = 0; - for (List> i : outputList) { - for (List j : i) { - outputStr.append("*** Image " + (index + 1) + "***" + "\n"); - for (ObjectDetectorOutput k : j) { - outputStr.append("Class: " + k.getClassName() + "\n"); - outputStr.append("Probabilties: " + k.getProbability() + "\n"); - List coord = Arrays.asList(k.getXMin() * width, - k.getXMax() * height, k.getYMin() * width, k.getYMax() * height); - - StringBuilder sb = new StringBuilder(); - for (float c : coord) { - sb.append(", ").append(c); - } - outputStr.append("Coord:" + sb.substring(2) + "\n"); - } - index++; - } - } - logger.info(outputStr.toString()); - } catch (Exception e) { - logger.error(e.getMessage(), e); - parser.printUsage(System.err); - System.exit(1); - } - System.exit(0); - } - - static Boolean checkExist(List arr) { - Boolean exist = true; - for (String item : arr) { - if (!(new File(item).exists())) { - logger.error("Cannot find: " + item); - exist = false; - } - } - return exist; - } -} diff --git a/scala-package/examples/src/main/java/org/apache/mxnetexamples/javaapi/infer/predictor/PredictorExample.java b/scala-package/examples/src/main/java/org/apache/mxnetexamples/javaapi/infer/predictor/PredictorExample.java deleted file mode 100644 index c5d209998d32..000000000000 --- a/scala-package/examples/src/main/java/org/apache/mxnetexamples/javaapi/infer/predictor/PredictorExample.java +++ /dev/null @@ -1,128 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnetexamples.javaapi.infer.predictor; - -import org.apache.mxnet.infer.javaapi.Predictor; -import org.apache.mxnet.javaapi.*; -import org.kohsuke.args4j.CmdLineParser; -import org.kohsuke.args4j.Option; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.awt.image.BufferedImage; -import java.io.BufferedReader; -import java.io.File; -import java.io.FileReader; -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; - -/** - * This Class is a demo to show how users can use Predictor APIs to do - * Image Classification with all hand-crafted Pre-processing. - * All helper functions for image pre-processing are - * currently available in ObjectDetector class. - */ -public class PredictorExample { - @Option(name = "--model-path-prefix", usage = "input model directory and prefix of the model") - private String modelPathPrefix = "/model/ssd_resnet50_512"; - @Option(name = "--input-image", usage = "the input image") - private String inputImagePath = "/images/dog.jpg"; - - final static Logger logger = LoggerFactory.getLogger(PredictorExample.class); - private static NDArray$ NDArray = NDArray$.MODULE$; - - /** - * Helper class to print the maximum prediction result - * @param probabilities The float array of probability - * @param modelPathPrefix model Path needs to load the synset.txt - */ - private static String printMaximumClass(float[] probabilities, - String modelPathPrefix) throws IOException { - String synsetFilePath = modelPathPrefix.substring(0, - 1 + modelPathPrefix.lastIndexOf(File.separator)) + "/synset.txt"; - BufferedReader reader = new BufferedReader(new FileReader(synsetFilePath)); - ArrayList list = new ArrayList<>(); - String line = reader.readLine(); - - while (line != null){ - list.add(line); - line = reader.readLine(); - } - reader.close(); - - int maxIdx = 0; - for (int i = 1;i probabilities[maxIdx]) { - maxIdx = i; - } - } - - return "Probability : " + probabilities[maxIdx] + " Class : " + list.get(maxIdx) ; - } - - public static void main(String[] args) { - PredictorExample inst = new PredictorExample(); - CmdLineParser parser = new CmdLineParser(inst); - try { - parser.parseArgument(args); - } catch (Exception e) { - logger.error(e.getMessage(), e); - parser.printUsage(System.err); - System.exit(1); - } - // Prepare the model - List context = new ArrayList(); - if (System.getenv().containsKey("SCALA_TEST_ON_GPU") && - Integer.valueOf(System.getenv("SCALA_TEST_ON_GPU")) == 1) { - context.add(Context.gpu()); - } else { - context.add(Context.cpu()); - } - List inputDesc = new ArrayList<>(); - Shape inputShape = new Shape(new int[]{1, 3, 224, 224}); - inputDesc.add(new DataDesc("data", inputShape, DType.Float32(), "NCHW")); - Predictor predictor = new Predictor(inst.modelPathPrefix, inputDesc, context,0); - // Prepare data - NDArray img = Image.imRead(inst.inputImagePath, 1, true); - img = Image.imResize(img, 224, 224, null); - // predict - float[][] result = predictor.predict(new float[][]{img.toArray()}); - try { - System.out.println("Predict with Float input"); - System.out.println(printMaximumClass(result[0], inst.modelPathPrefix)); - } catch (IOException e) { - System.err.println(e); - } - // predict with NDArray - NDArray nd = img; - nd = NDArray.transpose(nd, new Shape(new int[]{2, 0, 1}), null)[0]; - nd = NDArray.expand_dims(nd, 0, null)[0]; - nd = nd.asType(DType.Float32()); - List ndList = new ArrayList<>(); - ndList.add(nd); - List ndResult = predictor.predictWithNDArray(ndList); - try { - System.out.println("Predict with NDArray"); - System.out.println(printMaximumClass(ndResult.get(0).toArray(), inst.modelPathPrefix)); - } catch (IOException e) { - System.err.println(e); - } - } - -} diff --git a/scala-package/examples/src/main/java/org/apache/mxnetexamples/javaapi/infer/predictor/README.md b/scala-package/examples/src/main/java/org/apache/mxnetexamples/javaapi/infer/predictor/README.md deleted file mode 100644 index 141d55a636d0..000000000000 --- a/scala-package/examples/src/main/java/org/apache/mxnetexamples/javaapi/infer/predictor/README.md +++ /dev/null @@ -1,78 +0,0 @@ - - - - - - - - - - - - - - - - - -# Image Classification using Java Predictor - -In this example, you will learn how to use Java Inference API to -build and run pre-trained Resnet 18 model. - -## Contents - -1. [Prerequisites](#prerequisites) -2. [Download artifacts](#download-artifacts) -3. [Setup datapath and parameters](#setup-datapath-and-parameters) -4. [Run the image classifier example](#run-the-image-inference-example) - -## Prerequisites - -1. Build from source with [MXNet](https://mxnet.apache.org/install/index.html) -2. [IntelliJ IDE (or alternative IDE) project setup](https://github.com/apache/incubator-mxnet/blob/master/docs/tutorials/java/mxnet_java_on_intellij.md) with the MXNet Java Package -3. wget - -## Download Artifacts - -For this tutorial, you can get the model and sample input image by running following bash file. This script will use `wget` to download these artifacts from AWS S3. - -From the `scala-package/examples/scripts/infer/imageclassifier/` folder run: - -```bash -./get_resnet_18_data.sh -``` - -**Note**: You may need to run `chmod +x get_resnet_18_data.sh` before running this script. - -### Setup Datapath and Parameters - -The available arguments are as follows: - -| Argument | Comments | -| ----------------------------- | ---------------------------------------- | -| `model-dir`                   | Folder path with prefix to the model (including json, params, and any synset file). | -| `input-image` | The image to run inference on. | - -## Run the image classifier example - -After the previous steps, you should be able to run the code using the following script that will pass all of the required parameters to the Predictor API. - -From the `scala-package/examples/scripts/infer/predictor/` folder run: - -```bash -bash run_predictor_java_example.sh ../models/resnet-18/resnet-18 ../images/kitten.jpg -``` - -**Notes**: -* These are relative paths to this script. -* You may need to run `chmod +x run_predictor_java_example.sh` before running this script. - -The example should give an output similar to the one shown below: -``` -Predict with Float input -Probability : 0.30337515 Class : n02123159 tiger cat -Predict with NDArray -Probability : 0.30337515 Class : n02123159 tiger cat -``` -the outputs come from the input image, with top1 predictions picked. diff --git a/scala-package/examples/src/main/resources/log4j.properties b/scala-package/examples/src/main/resources/log4j.properties deleted file mode 100644 index ef523cb7bc4f..000000000000 --- a/scala-package/examples/src/main/resources/log4j.properties +++ /dev/null @@ -1,24 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -# for development debugging -log4j.rootLogger = info, stdout - -log4j.appender.stdout = org.apache.log4j.ConsoleAppender -log4j.appender.stdout.Target = System.out -log4j.appender.stdout.layout = org.apache.log4j.PatternLayout -log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss,SSS} [%t] [%c] [%p] - %m%n diff --git a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/InferBase.scala b/scala-package/examples/src/main/scala/org/apache/mxnetexamples/InferBase.scala deleted file mode 100644 index 36a85c227f6e..000000000000 --- a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/InferBase.scala +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnetexamples - -import org.apache.mxnet._ - -trait InferBase { - - def loadModel(context: Array[Context], batchInference : Boolean): Any - def loadSingleData(): Any - def loadBatchFileList(batchSize: Int): List[Any] - def loadInputBatch(source: Any): Any - def runSingleInference(loadedModel: Any, input: Any): Any - def runBatchInference(loadedModel: Any, input: Any): Any -} diff --git a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/Util.scala b/scala-package/examples/src/main/scala/org/apache/mxnetexamples/Util.scala deleted file mode 100644 index dba343160bff..000000000000 --- a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/Util.scala +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnetexamples - -import java.io.File -import java.net.URL - -import org.apache.commons.io.FileUtils - -object Util { - - def downloadUrl(url: String, filePath: String, maxRetry: Int = 3) : Unit = { - val tmpFile = new File(filePath) - var retry = maxRetry - var success = false - if (!tmpFile.exists()) { - while (retry > 0 && !success) { - try { - FileUtils.copyURLToFile(new URL(url), tmpFile) - success = true - } catch { - case e: Exception => retry -= 1 - } - } - } else { - success = true - } - if (!success) throw new Exception(s"$url Download failed!") - } -} diff --git a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/benchmark/README.md b/scala-package/examples/src/main/scala/org/apache/mxnetexamples/benchmark/README.md deleted file mode 100644 index 2b32dd6664bc..000000000000 --- a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/benchmark/README.md +++ /dev/null @@ -1,83 +0,0 @@ - - - - - - - - - - - - - - - - - -# Benchmarking Scala Inference APIs - -This folder contains a base class [ScalaInferenceBenchmark](https://github.com/apache/incubator-mxnet/tree/master/scala-package/examples/src/main/scala/org/apache/mxnetexamples/benchmark/) and provides a mechanism for benchmarking [MXNet Inference APIs]((https://github.com/apache/incubator-mxnet/tree/master/scala-package/infer)) in Scala. -The benchmarking scripts provided runs an experiment for single inference calls and batch inference calls. It collects the time taken to perform an inference operation and emits the P99, P50 and Average values for these metrics. One can easily add/modify any new/existing examples to the ScalaInferenceBenchmark framework in order to get the benchmark numbers for inference calls. -Currently the ScalaInferenceBenchmark script supports three Scala examples : -1. [ImageClassification using ResNet-152](https://github.com/apache/incubator-mxnet/blob/master/scala-package/mxnet-demo/src/main/scala/sample/ImageClassificationExample.scala) -2. [Object Detection Example](https://github.com/apache/incubator-mxnet/blob/master/scala-package/examples/src/main/scala/org/apache/mxnetexamples/infer/objectdetector/SSDClassifierExample.scala) - -This script can be easily placed in an automated environment to run benchmark regressions on the Scala APIs. The script automatically picks up whether you are running it on a CPU machine or on a GPU machine and appropriately uses that. - -## Contents - -1. [Prerequisites](#prerequisites) -2. [Scripts](#scripts) - -## Prerequisites - -1. MXNet -2. MXNet Scala Package -3. [IntelliJ IDE (or alternative IDE) project setup](https://mxnet.apache.org/api/scala/docs/tutorials/mxnet_scala_on_intellij) with the MXNet Scala Package -4. Model files and datasets for the model one will try to benchmark - -## Scripts -To help you easily run the benchmarks, a starter shell script has been provided for each of three examples mentioned above. The scripts can be found [here](https://github.com/apache/incubator-mxnet/blob/master/scala-package/examples/scripts/benchmark). -Each of the script takes some parameters as inputs, details of which can be found either in the bash scripts or in the example classes itself. - -* *ImageClassification Example* -
The following shows an example of running ImageClassifier under the benchmark script. The script takes as parameters, the platform type (cpu/gpu), number of iterations for inference calls, the batch size for batch inference calls, the model path, input file, and input directory. -For more details to run ImageClassificationExample as a standalone file, refer to the [README](https://github.com/apache/incubator-mxnet/blob/master/scala-package/examples/src/main/scala/org/apache/mxnetexamples/infer/imageclassifier/README.md) for ImageClassifierExample. -You may need to run ```chmod u+x run_image_inference_bm.sh``` before running this script. - ```bash - cd /scala-package/examples/scripts/infer/imageclassifier - ./get_resnet_data.sh - cd /scala-package/examples/scripts/benchmark - ./run_image_inference_bm.sh gpu ImageClassifierExample 100 10 ../infer/models/resnet-152/resnet-152 ../infer/images/kitten.jpg ../infer/images/ - ./run_image_inference_bm.sh cpu ImageClassifierExample 100 10 ../infer/models/resnet-152/resnet-152 ../infer/images/kitten.jpg ../infer/images/ - ``` - Upon running this script, you might see an output like this : - ``` - [main] INFO org.apache.mxnetexamples.benchmark.CLIParserBase - - single_inference_latency p99 1663, single_inference_p50 729, single_inference_average 755.17 - ... - - INFO org.apache.mxnetexamples.benchmark.CLIParserBase - - batch_inference_latency p99 4241, batch_inference_p50 4241, batch_inference_average 4241.00 - ``` - -* *Object Detection Example* -
The following shows an example of running SSDClassifier under the benchmark script. The script takes in the number of iterations for inference calls, the batch size for batch inference calls, the model path, input file, and input directory. -For more details to run SSDClassifierExample as a standalone file, refer to the [README](https://github.com/apache/incubator-mxnet/blob/master/scala-package/examples/src/main/scala/org/apache/mxnetexamples/infer/objectdetector/README.md) for SSDClassifierExample. -You may need to run ```chmod u+x run_image_inference_bm.sh``` before running this script. - ```bash - cd /scala-package/examples/scripts/infer/objectdetector - ./get_ssd_data.sh - cd /scala-package/examples/scripts/benchmark - ./run_image_inference_bm.sh cpu ObjectDetectionExample 100 10 ../infer/models/resnet50_ssd/resnet50_ssd_model ../infer/images/dog.jpg ../infer/images/ - ``` - Upon running this script, you might see an output like this : - ``` - [main] INFO org.apache.mxnetexamples.benchmark.CLIParserBase - - single_inference_latency p99 1663, single_inference_p50 729, single_inference_average 755.17 - ... - - INFO org.apache.mxnetexamples.benchmark.CLIParserBase - - batch_inference_latency p99 4241, batch_inference_p50 4241, batch_inference_average 4241.00 - ``` diff --git a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/benchmark/ScalaInferenceBenchmark.scala b/scala-package/examples/src/main/scala/org/apache/mxnetexamples/benchmark/ScalaInferenceBenchmark.scala deleted file mode 100644 index ba6c4b8ca7a9..000000000000 --- a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/benchmark/ScalaInferenceBenchmark.scala +++ /dev/null @@ -1,165 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnetexamples.benchmark - -import org.apache.mxnetexamples.InferBase -import org.apache.mxnetexamples.infer.imageclassifier.ImageClassifierExample -import org.apache.mxnet._ -import org.apache.mxnetexamples.infer.objectdetector.SSDClassifierExample -import org.kohsuke.args4j.{CmdLineParser, Option} -import org.slf4j.LoggerFactory - -import scala.collection.JavaConverters._ - -object ScalaInferenceBenchmark { - - private val logger = LoggerFactory.getLogger(classOf[CLIParserBase]) - - def loadModel(objectToRun: InferBase, context: Array[Context], batchInference : Boolean): - Any = { - objectToRun.loadModel(context, batchInference) - } - - def loadDataSet(objectToRun: InferBase): - Any = { - objectToRun.loadSingleData() - } - - def loadBatchDataSet(objectToRun: InferBase, batchSize: Int): - List[Any] = { - objectToRun.loadBatchFileList(batchSize) - } - - def runInference(objectToRun: InferBase, loadedModel: Any, dataSet: Any, totalRuns: Int): - List[Long] = { - var inferenceTimes: List[Long] = List() - for (i <- 1 to totalRuns) { - ResourceScope.using() { - val startTimeSingle = System.currentTimeMillis() - objectToRun.runSingleInference(loadedModel, dataSet) - val estimatedTimeSingle = System.currentTimeMillis() - startTimeSingle - inferenceTimes = estimatedTimeSingle :: inferenceTimes - logger.info("Inference time at iteration: %d is : %d \n".format(i, estimatedTimeSingle)) - } - } - - inferenceTimes - } - - def runBatchInference(objecToRun: InferBase, loadedModel: Any, dataSetBatches: List[Any]): - List[Long] = { - - var inferenceTimes: List[Long] = List() - for (batch <- dataSetBatches) { - ResourceScope.using() { - val loadedBatch = objecToRun.loadInputBatch(batch) - val startTimeSingle = System.currentTimeMillis() - objecToRun.runBatchInference(loadedModel, loadedBatch) - val estimatedTimeSingle = System.currentTimeMillis() - startTimeSingle - inferenceTimes = estimatedTimeSingle :: inferenceTimes - logger.info("Batch Inference time is : %d \n".format(estimatedTimeSingle)) - } - } - - inferenceTimes - } - - def percentile(p: Int, seq: Seq[Long]): Long = { - val sorted = seq.sorted - val k = math.ceil((seq.length - 1) * (p / 100.0)).toInt - sorted(k) - } - - def printStatistics(inferenceTimes: List[Long], metricsPrefix: String) { - - val times: Seq[Long] = inferenceTimes - val p50 = percentile(50, times) - val p99 = percentile(99, times) - val p90 = percentile(90, times) - val average = times.sum / (times.length * 1.0) - - logger.info("\n%s_p99 %d, %s_p90 %d, %s_p50 %d, %s_average %1.2f".format(metricsPrefix, - p99, metricsPrefix, p90, metricsPrefix, p50, metricsPrefix, average)) - - } - - def main(args: Array[String]): Unit = { - - var context = Context.cpu() - if (System.getenv().containsKey("SCALA_TEST_ON_GPU") && - System.getenv("SCALA_TEST_ON_GPU").toInt == 1) { - context = Context.gpu() - } - var baseCLI : CLIParserBase = null - try { - val exampleName = args(1) - val exampleToBenchmark : InferBase = exampleName match { - case "ImageClassifierExample" => { - val imParser = new org.apache.mxnetexamples.infer.imageclassifier.CLIParser - baseCLI = imParser - val parsedVals = new CmdLineParser(imParser).parseArgument(args.toList.asJava) - new ImageClassifierExample(imParser) - } - case "ObjectDetectionExample" => { - val imParser = new org.apache.mxnetexamples.infer.objectdetector.CLIParser - baseCLI = imParser - val parsedVals = new CmdLineParser(imParser).parseArgument(args.toList.asJava) - new SSDClassifierExample(imParser) - } - case _ => throw new Exception("Invalid example name to run") - } - - logger.info("Running single inference call") - // Benchmarking single inference call - ResourceScope.using() { - val loadedModel = loadModel(exampleToBenchmark, context, false) - val dataSet = loadDataSet(exampleToBenchmark) - val inferenceTimes = runInference(exampleToBenchmark, loadedModel, dataSet, baseCLI.count) - printStatistics(inferenceTimes, "single_inference") - } - - if (baseCLI.batchSize != 0) { - logger.info("Running for batch inference call") - // Benchmarking batch inference call - ResourceScope.using() { - val loadedModel = loadModel(exampleToBenchmark, context, true) - val batchDataSet = loadBatchDataSet(exampleToBenchmark, baseCLI.batchSize) - val inferenceTimes = runBatchInference(exampleToBenchmark, loadedModel, batchDataSet) - printStatistics(inferenceTimes, "batch_inference") - } - } - - } catch { - case ex: Exception => { - logger.error(ex.getMessage, ex) - new CmdLineParser(baseCLI).printUsage(System.err) - sys.exit(1) - } - } - } - -} - -class CLIParserBase { - @Option(name = "--example", usage = "The scala example to benchmark") - val exampleName: String = "ImageClassifierExample" - @Option(name = "--count", usage = "number of times to run inference") - val count: Int = 1000 - @Option(name = "--batchSize", usage = "BatchSize to run batchinference calls", required = false) - val batchSize: Int = 0 -} diff --git a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/customop/Data.scala b/scala-package/examples/src/main/scala/org/apache/mxnetexamples/customop/Data.scala deleted file mode 100644 index d61269c131ff..000000000000 --- a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/customop/Data.scala +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnetexamples.customop - -import org.apache.mxnet.{DataIter, IO, Shape} - -object Data { - // return train and val iterators for mnist - def mnistIterator(dataPath: String, batchSize: Int, inputShape: Shape): (DataIter, DataIter) = { - val flat = if (inputShape.length == 3) "False" else "True" - val trainParams = Map( - "image" -> s"$dataPath/train-images-idx3-ubyte", - "label" -> s"$dataPath/train-labels-idx1-ubyte", - "input_shape" -> inputShape.toString(), - "batch_size" -> s"$batchSize", - "shuffle" -> "True", - "flat" -> flat - ) - val trainDataIter = IO.MNISTIter(trainParams) - val testParams = Map( - "image" -> s"$dataPath/t10k-images-idx3-ubyte", - "label" -> s"$dataPath/t10k-labels-idx1-ubyte", - "input_shape" -> inputShape.toString(), - "batch_size" -> s"$batchSize", - "flat" -> flat - ) - val testDataIter = IO.MNISTIter(testParams) - (trainDataIter, testDataIter) - } -} diff --git a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/customop/ExampleCustomOp.scala b/scala-package/examples/src/main/scala/org/apache/mxnetexamples/customop/ExampleCustomOp.scala deleted file mode 100644 index 42922f212c11..000000000000 --- a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/customop/ExampleCustomOp.scala +++ /dev/null @@ -1,222 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnetexamples.customop - -import org.apache.mxnet.Callback.Speedometer -import org.apache.mxnet.DType.DType -import org.apache.mxnet.{Accuracy, Context, CustomOp, CustomOpProp, NDArray, Operator, ResourceScope, Shape, Symbol, Xavier} -import org.apache.mxnet.optimizer.RMSProp -import org.kohsuke.args4j.{CmdLineParser, Option} -import org.slf4j.LoggerFactory - -import scala.collection.JavaConverters._ -import scala.collection.mutable - -/** - * Example of CustomOp - */ -object ExampleCustomOp { - private val logger = LoggerFactory.getLogger(classOf[ExampleCustomOp]) - - class Softmax(_param: Map[String, String]) extends CustomOp { - - override def forward(sTrain: Boolean, req: Array[String], inData: Array[NDArray], - outData: Array[NDArray], aux: Array[NDArray]): Unit = { - val xShape = inData(0).shape - val x = inData(0).toArray.grouped(xShape(1)).toArray - val yArr = x.map { it => - val max = it.max - val tmp = it.map(e => Math.exp(e.toDouble - max).toFloat) - val sum = tmp.sum - tmp.map(_ / sum) - }.flatten - val y = NDArray.empty(xShape, outData(0).context) - y.set(yArr) - this.assign(outData(0), req(0), y) - y.dispose() - } - - override def backward(req: Array[String], outGrad: Array[NDArray], - inData: Array[NDArray], outData: Array[NDArray], - inGrad: Array[NDArray], aux: Array[NDArray]): Unit = { - val l = inData(1).toArray.map(_.toInt) - val oShape = outData(0).shape - val yArr = outData(0).toArray.grouped(oShape(1)).toArray - l.indices.foreach { i => - yArr(i)(l(i)) -= 1.0f - } - val y = NDArray.empty(oShape, inGrad(0).context) - y.set(yArr.flatten) - this.assign(inGrad(0), req(0), y) - y.dispose() - } - } - - class SoftmaxProp(needTopGrad: Boolean = false) - extends CustomOpProp(needTopGrad) { - - override def listArguments(): Array[String] = Array("data", "label") - - override def listOutputs(): Array[String] = Array("output") - - override def inferShape(inShape: Array[Shape]): - (Array[Shape], Array[Shape], Array[Shape]) = { - val dataShape = inShape(0) - val labelShape = Shape(dataShape(0)) - val outputShape = dataShape - (Array(dataShape, labelShape), Array(outputShape), null) - } - - override def inferType(inType: Array[DType]): - (Array[DType], Array[DType], Array[DType]) = { - (inType, inType.take(1), null) - } - - override def createOperator(ctx: String, inShapes: Array[Array[Int]], - inDtypes: Array[Int]): CustomOp = new Softmax(this.kwargs) - } - - Operator.register("softmax", new SoftmaxProp) - - def test(dataPath : String, ctx : Context) : Float = { - val data = Symbol.Variable("data") - val label = Symbol.Variable("label") - val fc1 = Symbol.api.FullyConnected(data = Some(data), num_hidden = 128, name = "fc1") - val act1 = Symbol.api.Activation (data = Some(fc1), "relu", name = "relu") - val fc2 = Symbol.api.FullyConnected(Some(act1), None, None, 64, name = "fc2") - val act2 = Symbol.api.Activation(data = Some(fc2), "relu", name = "relu2") - val fc3 = Symbol.api.FullyConnected(Some(act2), None, None, 10, name = "fc3") - val kwargs = mutable.Map[String, Any]("label" -> label, "data" -> fc3) - val mlp = Symbol.api.Custom(op_type = "softmax", name = "softmax", kwargs = kwargs) - - val (trainIter, testIter) = - Data.mnistIterator(dataPath, batchSize = 100, inputShape = Shape(784)) - - val datasAndLabels = trainIter.provideDataDesc ++ trainIter.provideLabelDesc - val (argShapes, outputShapes, auxShapes) = mlp.inferShape(datasAndLabels) - - val initializer = new Xavier(factorType = "in", magnitude = 2.34f) - val argNames = mlp.listArguments() - val argDict = argNames.zip(argShapes.map(s => NDArray.empty(s, ctx))).toMap - - val gradDict = argNames.zip(argShapes).filter { case (name, shape) => - !datasAndLabels.contains(name) - }.map(x => x._1 -> NDArray.empty(x._2, ctx) ).toMap - - argDict.foreach { case (name, ndArray) => - if (!datasAndLabels.contains(name)) { - initializer.initWeight(name, ndArray) - } - } - - val executor = mlp.bind(ctx, argDict, gradDict) - val lr = 0.001f - val opt = new RMSProp(learningRate = lr, wd = 0.00001f) - val paramsGrads = gradDict.toList.zipWithIndex.map { case ((name, grad), idx) => - (idx, name, grad, opt.createState(idx, argDict(name))) - } - - val evalMetric = new Accuracy - val batchEndCallback = new Speedometer(100, 100) - val numEpoch = 10 - var validationAcc = 0.0f - - for (epoch <- 0 until numEpoch) { - val tic = System.currentTimeMillis - evalMetric.reset() - var nBatch = 0 - var epochDone = false - ResourceScope.using() { - trainIter.reset() - while (!epochDone) { - var doReset = true - while (doReset && trainIter.hasNext) { - val dataBatch = trainIter.next() - argDict("data").set(dataBatch.data(0)) - argDict("label").set(dataBatch.label(0)) - executor.forward(isTrain = true) - executor.backward() - paramsGrads.foreach { case (idx, name, grad, optimState) => - opt.update(idx, argDict(name), grad, optimState) - } - evalMetric.update(dataBatch.label, executor.outputs) - nBatch += 1 - batchEndCallback.invoke(epoch, nBatch, evalMetric) - } - if (doReset) { - trainIter.reset() - } - epochDone = true - } - val (name, value) = evalMetric.get - name.zip(value).foreach { case (n, v) => - logger.info(s"Epoch[$epoch] Train-accuracy=$v") - } - val toc = System.currentTimeMillis - logger.info(s"Epoch[$epoch] Time cost=${toc - tic}") - - evalMetric.reset() - testIter.reset() - while (testIter.hasNext) { - val evalBatch = testIter.next() - argDict("data").set(evalBatch.data(0)) - argDict("label").set(evalBatch.label(0)) - executor.forward(isTrain = true) - evalMetric.update(evalBatch.label, executor.outputs) - evalBatch.dispose() - } - val (names, values) = evalMetric.get - names.zip(values).foreach { case (n, v) => - logger.info(s"Epoch[$epoch] Validation-accuracy=$v") - validationAcc = Math.max(validationAcc, v) - } - } - } - executor.dispose() - validationAcc - } - - def main(args: Array[String]): Unit = { - val leop = new ExampleCustomOp - val parser: CmdLineParser = new CmdLineParser(leop) - try { - parser.parseArgument(args.toList.asJava) - assert(leop.dataPath != null) - - val ctx = if (leop.gpu >= 0) Context.gpu(0) else Context.cpu() - - val dataName = Array("data") - val labelName = Array("softmax_label") - test(leop.dataPath, ctx) - - } catch { - case ex: Exception => { - logger.error(ex.getMessage, ex) - parser.printUsage(System.err) - sys.exit(1) - } - } - } -} - -class ExampleCustomOp { - @Option(name = "--data-path", usage = "the mnist data path") - private val dataPath: String = null - @Option(name = "--gpu", usage = "which gpu card to use, default is -1, means using cpu") - private val gpu: Int = -1 -} diff --git a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/customop/ExampleCustomOpWithRtc.scala b/scala-package/examples/src/main/scala/org/apache/mxnetexamples/customop/ExampleCustomOpWithRtc.scala deleted file mode 100644 index 7b0fb349373d..000000000000 --- a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/customop/ExampleCustomOpWithRtc.scala +++ /dev/null @@ -1,240 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnetexamples.customop - -import org.apache.mxnet.Callback.Speedometer -import org.apache.mxnet.DType.DType -import org.apache.mxnet.{Accuracy, Context, CustomOp, CustomOpProp, NDArray, Operator, Rtc, Shape, Symbol, Xavier} -import org.apache.mxnet.optimizer.RMSProp -import org.kohsuke.args4j.{CmdLineParser, Option} -import org.slf4j.LoggerFactory - -import scala.collection.JavaConverters._ -import scala.collection.mutable - -/** - * Example of CustomOp with Rtc - */ -object ExampleCustomOpWithRtc { - private val logger = LoggerFactory.getLogger(classOf[ExampleCustomOpWithRtc]) - - class Softmax(_param: Map[String, String]) extends CustomOp { - - private var fwdKernel: Rtc = null - private var bwdKernel: Rtc = null - - override def forward(sTrain: Boolean, req: Array[String], - inData: Array[NDArray], outData: Array[NDArray], aux: Array[NDArray]): Unit = { - if (fwdKernel == null) { - fwdKernel = new Rtc("softmax", Array(("x", inData(0))), Array(("y", outData(0))), """ - int i = threadIdx.x + blockIdx.x*blockDim.x; - float max_x = x[i*x_dims[1]]; - for (int j = 1; j < x_dims[1]; ++j) { - if (max_x < x[i*x_dims[1]+j]) { - max_x = x[i*x_dims[1]+j]; - } - } - float sum = 0.0f; - for (int j = 0; j < x_dims[1]; ++j) { - sum += expf(x[i*x_dims[1]+j]-max_x); - } - for (int j = 0; j < x_dims[1]; ++j) { - y[i*x_dims[1]+j] = expf(x[i*x_dims[1]+j]-max_x)/sum; - } - """) - } - val blockDim = { - val dimStr = this._param("forwardBlockDim") - val dims = dimStr.substring(1, dimStr.length() - 1).split(",").map(_.toInt) - (dims(0), dims(1), dims(2)) - } - fwdKernel.push(Array(inData(0)), Array(outData(0)), blockDim, (inData(0).shape(0), 1, 1)) - } - - override def backward(req: Array[String], outGrad: Array[NDArray], - inData: Array[NDArray], outData: Array[NDArray], - inGrad: Array[NDArray], aux: Array[NDArray]): Unit = { - val l = inData(1) - val y = outData(0) - val dx = inGrad(0) - if (bwdKernel == null) { - bwdKernel = new Rtc("softmax_grad", Array(("y", y), ("l", l)), Array(("dx", dx)), """ - int i = blockIdx.x; - int j = threadIdx.x; - int k = static_cast(l[i]); - if (j == k) { - dx[i*dx_dims[1]+j] = y[i*dx_dims[1]+j] - 1.0f; - } else { - dx[i*dx_dims[1]+j] = y[i*dx_dims[1]+j]; - } - """) - } - bwdKernel.push(Array(y, l), Array(dx), (y.shape(0), 1, 1), (y.shape(1), 1, 1)) - } - } - - class SoftmaxProp(needTopGrad: Boolean = false) - extends CustomOpProp(needTopGrad) { - - override def listArguments(): Array[String] = Array("data", "label") - - override def listOutputs(): Array[String] = Array("output") - - override def inferShape(inShape: Array[Shape]): - (Array[Shape], Array[Shape], Array[Shape]) = { - val dataShape = inShape(0) - val labelShape = Shape(dataShape(0)) - val outputShape = dataShape - (Array(dataShape, labelShape), Array(outputShape), null) - } - - override def inferType(inType: Array[DType]): - (Array[DType], Array[DType], Array[DType]) = { - (inType, inType.take(1), null) - } - - override def createOperator(ctx: String, inShapes: Array[Array[Int]], - inDtypes: Array[Int]): CustomOp = new Softmax(this.kwargs) - } - - Operator.register("softmax", new SoftmaxProp) - - def test(dataPath : String, ctx : Context) : Float = { - val data = Symbol.Variable("data") - val label = Symbol.Variable("label") - val fc1 = Symbol.api.FullyConnected(data = Some(data), num_hidden = 128, name = "fc1") - val act1 = Symbol.api.Activation (data = Some(fc1), "relu", name = "relu") - val fc2 = Symbol.api.FullyConnected(Some(act1), None, None, 64, name = "fc2") - val act2 = Symbol.api.Activation(data = Some(fc2), "relu", name = "relu2") - val fc3 = Symbol.api.FullyConnected(Some(act2), None, None, 10, name = "fc3") - val kwargs = mutable.Map[String, Any]("label" -> label, "data" -> fc3, - "forwardBlockDim" -> new Shape(1, 1, 1)) - val mlp = Symbol.api.Custom(op_type = "softmax", name = "softmax", kwargs = kwargs) - - val (trainIter, testIter) = - Data.mnistIterator(dataPath, batchSize = 100, inputShape = Shape(784)) - val datasAndLabels = trainIter.provideDataDesc ++ trainIter.provideLabelDesc - val (argShapes, outputShapes, auxShapes) = mlp.inferShape(datasAndLabels) - - val initializer = new Xavier(factorType = "in", magnitude = 2.34f) - - val argNames = mlp.listArguments() - val argDict = argNames.zip(argShapes.map(s => NDArray.empty(s, ctx))).toMap - val gradDict = argNames.zip(argShapes).filter { case (name, shape) => - !datasAndLabels.contains(name) - }.map(x => x._1 -> NDArray.empty(x._2, ctx) ).toMap - argDict.foreach { case (name, ndArray) => - if (!datasAndLabels.contains(name)) { - initializer.initWeight(name, ndArray) - } - } - - val executor = mlp.bind(ctx, argDict, gradDict) - val lr = 0.001f - val opt = new RMSProp(learningRate = lr, wd = 0.00001f) - val paramsGrads = gradDict.toList.zipWithIndex.map { case ((name, grad), idx) => - (idx, name, grad, opt.createState(idx, argDict(name))) - } - val evalMetric = new Accuracy - val batchEndCallback = new Speedometer(100, 100) - val numEpoch = 10 - var validationAcc = 0.0f - - for (epoch <- 0 until numEpoch) { - val tic = System.currentTimeMillis - evalMetric.reset() - var nBatch = 0 - var epochDone = false - - trainIter.reset() - while (!epochDone) { - var doReset = true - while (doReset && trainIter.hasNext) { - val dataBatch = trainIter.next() - argDict("data").set(dataBatch.data(0)) - argDict("label").set(dataBatch.label(0)) - executor.forward(isTrain = true) - executor.backward() - paramsGrads.foreach { case (idx, name, grad, optimState) => - opt.update(idx, argDict(name), grad, optimState) - } - evalMetric.update(dataBatch.label, executor.outputs) - nBatch += 1 - batchEndCallback.invoke(epoch, nBatch, evalMetric) - } - if (doReset) { - trainIter.reset() - } - epochDone = true - } - val (name, value) = evalMetric.get - name.zip(value).foreach { case (n, v) => - logger.info(s"Epoch[$epoch] Train-accuracy=$v") - } - val toc = System.currentTimeMillis - logger.info(s"Epoch[$epoch] Time cost=${toc - tic}") - - evalMetric.reset() - testIter.reset() - while (testIter.hasNext) { - val evalBatch = testIter.next() - argDict("data").set(evalBatch.data(0)) - argDict("label").set(evalBatch.label(0)) - executor.forward(isTrain = true) - evalMetric.update(evalBatch.label, executor.outputs) - evalBatch.dispose() - } - val (names, values) = evalMetric.get - names.zip(values).foreach { case (n, v) => - logger.info(s"Epoch[$epoch] Validation-accuracy=$v") - validationAcc = Math.max(validationAcc, v) - } - } - executor.dispose() - validationAcc - } - - def main(args: Array[String]): Unit = { - val leop = new ExampleCustomOpWithRtc - val parser: CmdLineParser = new CmdLineParser(leop) - try { - parser.parseArgument(args.toList.asJava) - assert(leop.dataPath != null && leop.gpu >= 0) - - val ctx = Context.gpu(0) - - val dataName = Array("data") - val labelName = Array("softmax_label") - test(leop.dataPath, ctx) - - } catch { - case ex: Exception => { - logger.error(ex.getMessage, ex) - parser.printUsage(System.err) - sys.exit(1) - } - } - } -} - -class ExampleCustomOpWithRtc { - @Option(name = "--data-path", usage = "the mnist data path") - private val dataPath: String = null - @Option(name = "--gpu", usage = "which gpu card to use, default is -1, means using cpu") - private val gpu: Int = 0 -} diff --git a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/customop/README.md b/scala-package/examples/src/main/scala/org/apache/mxnetexamples/customop/README.md deleted file mode 100644 index a3952aabfb44..000000000000 --- a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/customop/README.md +++ /dev/null @@ -1,39 +0,0 @@ - - - - - - - - - - - - - - - - - -# Custom Operator Example for Scala -This is the example using Custom Operator for type-safe api of Scala. -In the example, a `Softmax` operator is implemented to run the MNIST example. - -There is also an example using RTC. However, the rtc module is depreciated and no longer can be used. Please contribute to use CudaModule operator to replace the rtc. - -## Setup -### Download the source File -```$xslt -https://s3.us-east-2.amazonaws.com/mxnet-scala/scala-example-ci/mnist/mnist.zip -``` -### Unzip the file -```$xslt -unzip mnist.zip -``` -### Arguement Configuration -Then you need to define the arguments that you would like to pass in the model: -```$xslt ---data-path -``` - -you can find more in [here](https://github.com/apache/incubator-mxnet/blob/scala-package/examples/src/main/scala/org/apache/mxnet/examples/customop/ExampleCustomOp.scala#L218-L221) \ No newline at end of file diff --git a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/infer/imageclassifier/ImageClassifierExample.scala b/scala-package/examples/src/main/scala/org/apache/mxnetexamples/infer/imageclassifier/ImageClassifierExample.scala deleted file mode 100644 index 48e55004cf7b..000000000000 --- a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/infer/imageclassifier/ImageClassifierExample.scala +++ /dev/null @@ -1,236 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnetexamples.infer.imageclassifier - -import org.apache.mxnet._ -import org.kohsuke.args4j.{CmdLineParser, Option} -import org.slf4j.LoggerFactory -import org.apache.mxnet.infer.{Classifier, ImageClassifier} - -import scala.collection.JavaConverters._ -import java.io.File - -import org.apache.mxnetexamples.benchmark.CLIParserBase -// scalastyle:off -import java.awt.image.BufferedImage -// scalastyle:on - -import org.apache.mxnetexamples.InferBase - -import scala.collection.mutable.ListBuffer - -// scalastyle:off -/** - *

- * Example inference showing usage of the Infer package on a resnet-152 model. - * @see Instructions to run this example - */ -// scalastyle:on -object ImageClassifierExample { - - private val logger = LoggerFactory.getLogger(classOf[ImageClassifierExample]) - - - def runInferenceOnSingleImage(modelPathPrefix: String, inputImagePath: String, - context: Array[Context]): - IndexedSeq[IndexedSeq[(String, Float)]] = { - ResourceScope.using() { - val dType = DType.Float32 - val inputShape = Shape(1, 3, 224, 224) - - val inputDescriptor = IndexedSeq(DataDesc("data", inputShape, dType, "NCHW")) - - // Create object of ImageClassifier class - val imgClassifier: ImageClassifier = new - ImageClassifier(modelPathPrefix, inputDescriptor, context) - - // Loading single image from file and getting BufferedImage - val img = ImageClassifier.loadImageFromFile(inputImagePath) - - // Running inference on single image - val output = imgClassifier.classifyImage(img, Some(5)) - output - } - } - - def runInferenceOnBatchOfImage(modelPathPrefix: String, inputImageDir: String, - context: Array[Context]): - IndexedSeq[IndexedSeq[(String, Float)]] = { - ResourceScope.using() { - val dType = DType.Float32 - val inputShape = Shape(1, 3, 224, 224) - - val inputDescriptor = IndexedSeq(DataDesc("data", inputShape, dType, "NCHW")) - - // Create object of ImageClassifier class - val imgClassifier: ImageClassifier = new - ImageClassifier(modelPathPrefix, inputDescriptor, context) - - // Loading batch of images from the directory path - val batchFiles = generateBatches(inputImageDir, 20) - var outputList = IndexedSeq[IndexedSeq[(String, Float)]]() - - for (batchFile <- batchFiles) { - val imgList = ImageClassifier.loadInputBatch(batchFile) - // Running inference on batch of images loaded in previous step - outputList ++= imgClassifier.classifyImageBatch(imgList, Some(5)) - } - - outputList - } - } - - def generateBatches(inputImageDirPath: String, batchSize: Int = 100): List[List[String]] = { - val dir = new File(inputImageDirPath) - require(dir.exists && dir.isDirectory, - "input image directory: %s not found".format(inputImageDirPath)) - val output = ListBuffer[List[String]]() - var batch = ListBuffer[String]() - for (imgFile: File <- dir.listFiles()){ - batch += imgFile.getPath - if (batch.length == batchSize) { - output += batch.toList - batch = ListBuffer[String]() - } - } - if (batch.length > 0) { - output += batch.toList - } - output.toList - } - - def main(args: Array[String]): Unit = { - val inst = new CLIParser - val parser: CmdLineParser = new CmdLineParser(inst) - - var context = Context.cpu() - if (System.getenv().containsKey("SCALA_TEST_ON_GPU") && - System.getenv("SCALA_TEST_ON_GPU").toInt == 1) { - context = Context.gpu() - } - - try { - parser.parseArgument(args.toList.asJava) - - - val modelPathPrefix = if (inst.modelPathPrefix == null) System.getenv("MXNET_HOME") - else inst.modelPathPrefix - - val inputImagePath = if (inst.inputImagePath == null) System.getenv("MXNET_HOME") - else inst.inputImagePath - - val inputImageDir = if (inst.inputImageDir == null) System.getenv("MXNET_HOME") - else inst.inputImageDir - - val singleOutput = runInferenceOnSingleImage(modelPathPrefix, inputImagePath, context) - - // Printing top 5 class probabilities - for (i <- singleOutput) { - printf("Classes with top 5 probability = %s \n", i) - } - - val batchOutput = runInferenceOnBatchOfImage(modelPathPrefix, inputImageDir, context) - - val d = new File(inputImageDir) - val filenames = d.listFiles.filter(_.isFile).toList - - // Printing filename and inference class with top 5 probabilities - for ((f, inferOp) <- (filenames zip batchOutput)) { - printf("Input image %s ", f) - printf("Class with probability =%s \n", inferOp) - } - } catch { - case ex: Exception => { - logger.error(ex.getMessage, ex) - parser.printUsage(System.err) - sys.exit(1) - } - } - } -} - -class CLIParser extends CLIParserBase{ - @Option(name = "--model-path-prefix", usage = "the input model directory") - val modelPathPrefix: String = "/resnet-152/resnet-152" - @Option(name = "--input-image", usage = "the input image") - val inputImagePath: String = "/images/kitten.jpg" - @Option(name = "--input-dir", usage = "the input batch of images directory") - val inputImageDir: String = "/images/" -} - -class ImageClassifierExample(CLIParser: CLIParser) extends InferBase{ - - override def loadModel(context: Array[Context], - batchInference : Boolean = false): Classifier = { - val dType = DType.Float32 - val batchSize = if (batchInference) CLIParser.batchSize else 1 - val inputShape = Shape(batchSize, 3, 224, 224) - - val inputDescriptor = IndexedSeq(DataDesc("data", inputShape, dType, "NCHW")) - - // Create object of ImageClassifier class - val imgClassifier: ImageClassifier = new ImageClassifier(CLIParser.modelPathPrefix, - inputDescriptor, context) - imgClassifier - } - - override def loadSingleData(): Any = { - val img = ImageClassifier.loadImageFromFile(CLIParser.inputImagePath) - img - } - - override def loadBatchFileList(batchSize: Int): List[Any] = { - val dir = new File(CLIParser.inputImageDir) - require(dir.exists && dir.isDirectory, - "input image directory: %s not found".format(CLIParser.inputImageDir)) - val output = ListBuffer[List[String]]() - var batch = ListBuffer[String]() - for (imgFile: File <- dir.listFiles()){ - batch += imgFile.getPath - if (batch.length == batchSize) { - output += batch.toList - batch = ListBuffer[String]() - } - } - if (batch.length > 0) { - output += batch.toList - } - output.toList - } - - override def loadInputBatch(inputPaths: Any): Any = { - val batchFile = inputPaths.asInstanceOf[List[String]] - ImageClassifier.loadInputBatch(batchFile) - } - - override def runSingleInference(loadedModel: Any, input: Any): Any = { - // Running inference on single image - val imageModel = loadedModel.asInstanceOf[ImageClassifier] - val imgInput = input.asInstanceOf[BufferedImage] - val output = imageModel.classifyImage(imgInput, Some(5)) - output - } - - override def runBatchInference(loadedModel: Any, input: Any): Any = { - val imageModel = loadedModel.asInstanceOf[ImageClassifier] - val imgInput = input.asInstanceOf[Traversable[BufferedImage]] - val output = imageModel.classifyImageBatch(imgInput, Some(5)) - output - } - -} diff --git a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/infer/imageclassifier/README.md b/scala-package/examples/src/main/scala/org/apache/mxnetexamples/infer/imageclassifier/README.md deleted file mode 100644 index 6b26e316eda9..000000000000 --- a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/infer/imageclassifier/README.md +++ /dev/null @@ -1,105 +0,0 @@ - - - - - - - - - - - - - - - - - -# Image Classification - -This folder contains an example for image classification with the [MXNet Scala Infer API](https://github.com/apache/incubator-mxnet/tree/master/scala-package/infer). -The goal of image classification is to identify the objects contained in images. -The following example shows recognized object classes with corresponding probabilities using a pre-trained model. - - -## Contents - -1. [Prerequisites](#prerequisites) -2. [Download artifacts](#download-artifacts) -3. [Run the image inference example](#run-the-image-inference-example) -4. [Pretrained models](#pretrained-models) -5. [Infer APIs](#infer-api-details) -6. [Next steps](#next-steps) - - -## Prerequisites - -1. MXNet -2. MXNet Scala Package -3. [IntelliJ IDE (or alternative IDE) project setup](https://mxnet.apache.org/api/scala/docs/tutorials/mxnet_scala_on_intellij) with the MXNet Scala Package -4. wget - - -## Download Artifacts - -For this tutorial, you can get the model and sample input image by running following bash file. This script will use `wget` to download these artifacts from AWS S3. - -From the `scala-package/examples/scripts/infer/imageclassifier/` folder run: - -```bash -./get_resnet_data.sh -``` - -**Note**: You may need to run `chmod +x get_resnet_data.sh` before running this script. - - -## Run the Image Inference Example - -Now that you have the model files and the test kitten image, you can run the following script to pass the necessary parameters to the JDK to run this inference example. - -```bash -./run_classifier_example.sh \ -../resnet/resnet-152 ../images/kitten.jpg ../images/ -``` - -**Notes**: -* These are relative paths to this script. -* You may need to run `chmod +x run_predictor_example.sh` before running this script. - -There are few options which you can provide to run the example. Use the `--help` argument to list them. - -```bash -./run_predictor_example.sh --help -``` - -The available arguments are as follows: - -| Argument | Comments | -| ----------------------------- | ---------------------------------------- | -| `model-dir`                   | Folder path with prefix to the model (including json, params, and any synset file). | -| `input-image` | The image to run inference on. | -| `input-dir` | The directory of images to run inference on. | - -* You must use `model-dir`. -* You must use `input-image` and `input-dir` as this example shows single image inference as well as batch inference together. - - -## Pretrained Models - -The MXNet project repository provides several [pre-trained models on various datasets](https://github.com/apache/incubator-mxnet/tree/master/example/image-classification#pre-trained-models) and examples on how to train them. You may use the [modelzoo.py](https://github.com/apache/incubator-mxnet/blob/master/example/image-classification/common/modelzoo.py) helper script to download these models. Many ImageNet models may be also be downloaded directly from [http://data.mxnet.io/models/imagenet/](http://data.mxnet.io/models/imagenet/). - - -## Infer API Details - -This example uses the [ImageClassifier](https://github.com/apache/incubator-mxnet/blob/master/scala-package/infer/src/main/scala/org/apache/mxnet/infer/ImageClassifier.scala) -class provided by the [MXNet Scala Infer API](https://github.com/apache/incubator-mxnet/tree/master/scala-package/infer). -It provides methods to load the images, create a NDArray out of a `BufferedImage`, and run prediction using the following Infer APIs: -* [Classifier](https://github.com/apache/incubator-mxnet/blob/master/scala-package/infer/src/main/scala/org/apache/mxnet/infer/Classifier.scala) -* [Predictor](https://github.com/apache/incubator-mxnet/blob/master/scala-package/infer/src/main/scala/org/apache/mxnet/infer/Predictor.scala) - - -## Next Steps - -Check out the following related tutorials and examples for the Infer API: - -* [Single Shot Detector with the MXNet Scala Infer API](../objectdetector/README.md) diff --git a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/infer/objectdetector/README.md b/scala-package/examples/src/main/scala/org/apache/mxnetexamples/infer/objectdetector/README.md deleted file mode 100644 index 062448981256..000000000000 --- a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/infer/objectdetector/README.md +++ /dev/null @@ -1,121 +0,0 @@ - - - - - - - - - - - - - - - - - -# Single Shot Multi Object Detection using Scala Inference API - -In this example, you will learn how to use Scala Inference API to run Inference on pre-trained Single Shot Multi Object Detection (SSD) MXNet model. - -The model is trained on the [Pascal VOC 2012 dataset](http://host.robots.ox.ac.uk/pascal/VOC/voc2012/index.html). The network is a SSD model built on Resnet50 as base network to extract image features. The model is trained to detect the following entities (classes): ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor']. For more details about the model, you can refer to the [MXNet SSD example](https://github.com/apache/incubator-mxnet/tree/master/example/ssd). - - -## Contents - -1. [Prerequisites](#prerequisites) -2. [Download artifacts](#download-artifacts) -3. [Setup datapath and parameters](#setup-datapath-and-parameters) -4. [Run the image inference example](#run-the-image-inference-example) -5. [Infer APIs](#infer-api-details) -6. [Next steps](#next-steps) - - -## Prerequisites - -1. MXNet -2. MXNet Scala Package -3. [IntelliJ IDE (or alternative IDE) project setup](https://mxnet.apache.org/tutorials/scala/mxnet_scala_on_intellij.html) with the MXNet Scala Package -4. wget - - -## Setup Guide - -### Download Artifacts -#### Step 1 -You can download the files using the script `get_ssd_data.sh`. It will download and place the model files in a `model` folder and the test image files in a `image` folder in the current directory. -From the `scala-package/examples/scripts/infer/objectdetector/` folder run: - -```bash -./get_ssd_data.sh -``` - -**Note**: You may need to run `chmod +x get_ssd_data.sh` before running this script. - -In the pre-trained model, the `input_name` is `data` and shape is `(1, 3, 512, 512)`. -This shape translates to: a batch of `1` image, the image has color and uses `3` channels (RGB), and the image has the dimensions of `512` pixels in height by `512` pixels in width. - -`image/jpeg` is the expected input type, since this example's image pre-processor only supports the handling of binary JPEG images. - -The output shape is `(1, 6132, 6)`. As with the input, the `1` is the number of images. `6132` is the number of prediction results, and `6` is for the size of each prediction. Each prediction contains the following components: -- `Class` -- `Accuracy` -- `Xmin` -- `Ymin` -- `Xmax` -- `Ymax` - - -### Setup Datapath and Parameters -#### Step 2 -The followings is the parameters defined for this example, you can find more information in the `class SSDClassifierExample`. - -| Argument | Comments | -| ----------------------------- | ---------------------------------------- | -| `model-path-prefix` | Folder path with prefix to the model (including json, params, and any synset file). | -| `input-image` | The image to run inference on. | -| `input-dir` | The directory of images to run inference on. | - - -## How to Run Inference -After the previous steps, you should be able to run the code using the following script that will pass all of the required parameters to the Infer API. - -From the `scala-package/examples/scripts/inferexample/objectdetector/` folder run: - -```bash -./run_ssd_example.sh ../models/resnet50_ssd/resnet50_ssd_model ../images/dog.jpg ../images -``` - -**Notes**: -* These are relative paths to this script. -* You may need to run `chmod +x run_ssd_example.sh` before running this script. - -The example should give expected output as shown below: -``` -Class: car -Probabilties: 0.99847263 -(Coord:,312.21335,72.0291,456.01443,150.66176) -Class: bicycle -Probabilties: 0.90473825 -(Coord:,155.95807,149.96362,383.8369,418.94513) -Class: dog -Probabilties: 0.8226818 -(Coord:,83.82353,179.13998,206.63783,476.7875) -``` -the outputs come from the input image, with top3 predictions picked. - - -## Infer API Details -This example uses ObjectDetector class provided by MXNet's scala package Infer APIs. It provides methods to load the images, create NDArray out of Java BufferedImage and run prediction using Classifier and Predictor APIs. - - -## References -This documentation used the model and inference setup guide from the [MXNet Model Server SSD example](https://github.com/awslabs/mxnet-model-server/blob/master/examples/ssd/README.md). - - -## Next Steps - -Check out the following related tutorials and examples for the Infer API: - -* [Image Classification with the MXNet Scala Infer API](../imageclassifier/README.md) diff --git a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/infer/objectdetector/SSDClassifierExample.scala b/scala-package/examples/src/main/scala/org/apache/mxnetexamples/infer/objectdetector/SSDClassifierExample.scala deleted file mode 100644 index 8c5366d6279a..000000000000 --- a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/infer/objectdetector/SSDClassifierExample.scala +++ /dev/null @@ -1,254 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnetexamples.infer.objectdetector -// scalastyle:off -import java.awt.image.BufferedImage - -import org.apache.mxnetexamples.benchmark.CLIParserBase -// scalastyle:on -import java.io.File - -import org.apache.mxnet._ -import org.apache.mxnet.infer._ -import org.kohsuke.args4j.{CmdLineParser, Option} -import org.slf4j.LoggerFactory - -import scala.collection.JavaConverters._ -import java.nio.file.{Files, Paths} - -import org.apache.mxnetexamples.InferBase - -import scala.collection.mutable.ListBuffer - -// scalastyle:off -/** - *

- * Example single shot detector (SSD) using the Infer package - * on a ssd_resnet50_512 model. - * @see Instructions to run this example - */ -// scalastyle:on -object SSDClassifierExample { - - private val logger = LoggerFactory.getLogger(classOf[SSDClassifierExample]) - private type SSDOut = (String, Array[Float]) - - def runObjectDetectionSingle(modelPathPrefix: String, inputImagePath: String, - context: Array[Context]): - IndexedSeq[IndexedSeq[(String, Array[Float])]] = { - ResourceScope.using() { - val dType = DType.Float32 - val inputShape = Shape(1, 3, 512, 512) - // ssd detections, numpy.array([[id, score, x1, y1, x2, y2]...]) - val outputShape = Shape(1, 6132, 6) - val inputDescriptors = IndexedSeq(DataDesc("data", inputShape, dType, "NCHW")) - val img = ImageClassifier.loadImageFromFile(inputImagePath) - val objDetector = new ObjectDetector(modelPathPrefix, inputDescriptors, context) - val output = objDetector.imageObjectDetect(img, Some(3)) - - output - } - } - - def runObjectDetectionBatch(modelPathPrefix: String, inputImageDir: String, - context: Array[Context]): - IndexedSeq[IndexedSeq[(String, Array[Float])]] = { - ResourceScope.using() { - val dType = DType.Float32 - val inputShape = Shape(1, 3, 512, 512) - // ssd detections, numpy.array([[id, score, x1, y1, x2, y2]...]) - val outputShape = Shape(1, 6132, 6) - val inputDescriptors = IndexedSeq(DataDesc("data", inputShape, dType, "NCHW")) - val objDetector = new ObjectDetector(modelPathPrefix, inputDescriptors, context) - // Loading batch of images from the directory path - val batchFiles = generateBatches(inputImageDir, 20) - var outputList = IndexedSeq[IndexedSeq[(String, Array[Float])]]() - - for (batchFile <- batchFiles) { - val imgList = ImageClassifier.loadInputBatch(batchFile) - // Running inference on batch of images loaded in previous step - outputList ++= objDetector.imageBatchObjectDetect(imgList, Some(5)) - } - outputList - } - } - - def generateBatches(inputImageDirPath: String, batchSize: Int = 100): List[List[String]] = { - val dir = new File(inputImageDirPath) - require(dir.exists && dir.isDirectory, - "input image directory: %s not found".format(inputImageDirPath)) - val output = ListBuffer[List[String]]() - var batch = ListBuffer[String]() - for (imgFile: File <- dir.listFiles()){ - batch += imgFile.getPath - if (batch.length == batchSize) { - output += batch.toList - batch = ListBuffer[String]() - } - } - if (batch.length > 0) { - output += batch.toList - } - output.toList - } - - def main(args: Array[String]): Unit = { - val inst = new CLIParser - val parser : CmdLineParser = new CmdLineParser(inst) - parser.parseArgument(args.toList.asJava) - val mdprefixDir = inst.modelPathPrefix - val imgPath = inst.inputImagePath - val imgDir = inst.inputImageDir - if (!checkExist(Array(mdprefixDir + "-symbol.json", imgDir, imgPath))) { - logger.error("Model or input image path does not exist") - sys.exit(1) - } - - var context = Context.cpu() - if (System.getenv().containsKey("SCALA_TEST_ON_GPU") && - System.getenv("SCALA_TEST_ON_GPU").toInt == 1) { - context = Context.gpu() - } - - try { - val inputShape = Shape(1, 3, 512, 512) - val outputShape = Shape(1, 6132, 6) - - val width = inputShape(2) - val height = inputShape(3) - var outputStr : String = "\n" - - val output = runObjectDetectionSingle(mdprefixDir, imgPath, context) - - - for (ele <- output) { - for (i <- ele) { - outputStr += "Class: " + i._1 + "\n" - val arr = i._2 - outputStr += "Probabilties: " + arr(0) + "\n" - val coord = Array[Float]( - arr(1) * width, arr(2) * height, - arr(3) * width, arr(4) * height - ) - outputStr += "Coord:" + coord.mkString(",") + "\n" - } - } - logger.info(outputStr) - - val outputList = runObjectDetectionBatch(mdprefixDir, imgDir, context) - - outputStr = "\n" - for (idx <- outputList.indices) { - outputStr += "*** Image " + (idx + 1) + "***" + "\n" - for (i <- outputList(idx)) { - outputStr += "Class: " + i._1 + "\n" - val arr = i._2 - outputStr += "Probabilties: " + arr(0) + "\n" - val coord = Array[Float]( - arr(1) * width, arr(2) * height, - arr(3) * width, arr(4) * height - ) - outputStr += "Coord:" + coord.mkString(",") + "\n" - } - } - logger.info(outputStr) - - } catch { - case ex: Exception => { - logger.error(ex.getMessage, ex) - parser.printUsage(System.err) - sys.exit(1) - } - } - sys.exit(0) - } - - - def checkExist(arr : Array[String]) : Boolean = { - var exist : Boolean = true - for (item <- arr) { - if (!(Files.exists(Paths.get(item)))) { - logger.error("Cannot find: " + item) - exist = false - } - } - exist - } - -} - -class CLIParser extends CLIParserBase { - @Option(name = "--model-path-prefix", usage = "the input model directory and prefix of the model") - val modelPathPrefix: String = "/model/ssd_resnet50_512" - @Option(name = "--input-image", usage = "the input image") - val inputImagePath: String = "/images/dog.jpg" - @Option(name = "--input-dir", usage = "the input batch of images directory") - val inputImageDir: String = "/images/" -} - -class SSDClassifierExample(CLIParser: CLIParser) - extends InferBase { - override def loadModel(context: Array[Context], batchInference: Boolean = false): Any = { - val dType = DType.Float32 - val batchSize = if (batchInference) CLIParser.batchSize else 1 - val inputShape = Shape(batchSize, 3, 512, 512) - val inputDescriptors = IndexedSeq(DataDesc("data", inputShape, dType, "NCHW")) - new ObjectDetector(CLIParser.modelPathPrefix, inputDescriptors, context) - } - override def loadSingleData(): Any = { - val img = ImageClassifier.loadImageFromFile(CLIParser.inputImagePath) - img - } - - override def runSingleInference(loadedModel: Any, input: Any): Any = { - val detector = loadedModel.asInstanceOf[ObjectDetector] - val imgInput = input.asInstanceOf[BufferedImage] - detector.imageObjectDetect(imgInput) - } - - override def loadInputBatch(inputPaths: Any): Any = { - val batchFile = inputPaths.asInstanceOf[List[String]] - ImageClassifier.loadInputBatch(batchFile) - } - - override def loadBatchFileList(batchSize: Int): List[Any] = { - val dir = new File(CLIParser.inputImageDir) - require(dir.exists && dir.isDirectory, - "input image directory: %s not found".format(CLIParser.inputImageDir)) - val output = ListBuffer[List[String]]() - var batch = ListBuffer[String]() - for (imgFile: File <- dir.listFiles()){ - batch += imgFile.getPath - if (batch.length == batchSize) { - output += batch.toList - batch = ListBuffer[String]() - } - } - if (batch.length > 0) { - output += batch.toList - } - output.toList - } - - override def runBatchInference(loadedModel: Any, input: Any): Any = { - val model = loadedModel.asInstanceOf[ObjectDetector] - val imgInput = input.asInstanceOf[Traversable[BufferedImage]] - val output = model.imageBatchObjectDetect(imgInput, Some(5)) - output - } -} diff --git a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/infer/predictor/PredictorExample.scala b/scala-package/examples/src/main/scala/org/apache/mxnetexamples/infer/predictor/PredictorExample.scala deleted file mode 100644 index be90936ce51b..000000000000 --- a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/infer/predictor/PredictorExample.scala +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnetexamples.infer.predictor - -import java.io.File - -import scala.io -import org.apache.mxnet._ -import org.apache.mxnet.infer.Predictor -import org.apache.mxnetexamples.benchmark.CLIParserBase -import org.kohsuke.args4j.{CmdLineParser, Option} - -import scala.collection.JavaConverters._ - -object PredictorExample { - - def loadModel(modelPathPrefix : String, inputDesc : IndexedSeq[DataDesc], - context : Context, epoch : Int): Predictor = { - new Predictor(modelPathPrefix, inputDesc, context, Some(epoch)) - } - - def doInference(predictor : Predictor, imageND : NDArray): IndexedSeq[NDArray] = { - predictor.predictWithNDArray(IndexedSeq(imageND)) - } - - def preProcess(imagePath: String, h: Int, w: Int) : NDArray = { - var img = Image.imRead(imagePath) - img = Image.imResize(img, h, w) - // HWC -> CHW - img = NDArray.api.transpose(img, Some(Shape(2, 0, 1))) - img = NDArray.api.expand_dims(img, 0) - img.asType(DType.Float32) - } - - def postProcess(modelPathPrefix : String, result : Array[Float]) : String = { - val dirPath = modelPathPrefix.substring(0, 1 + modelPathPrefix.lastIndexOf(File.separator)) - val d = new File(dirPath) - require(d.exists && d.isDirectory, s"directory: $dirPath not found") - val f = io.Source.fromFile(dirPath + "synset.txt") - val s = f.getLines().toIndexedSeq - val maxIdx = result.zipWithIndex.maxBy(_._1)._2 - printf(s"Predict Result ${s(maxIdx)} with prob ${result(maxIdx)}\n") - s(maxIdx) - } - - def main(args : Array[String]): Unit = { - val inst = new CLIParser - val parser: CmdLineParser = new CmdLineParser(inst) - - parser.parseArgument(args.toList.asJava) - - var context = Context.cpu() - if (System.getenv().containsKey("SCALA_TEST_ON_GPU") && - System.getenv("SCALA_TEST_ON_GPU").toInt == 1) { - context = Context.gpu() - } - - val imgWidth = 224 - val imgHeight = 224 - - val inputDesc = IndexedSeq(new DataDesc("data", Shape(1, 3, imgHeight, imgWidth), - DType.Float32, Layout.NCHW)) - - val predictor = loadModel(inst.modelPathPrefix, inputDesc, context, 0) - val img = preProcess(inst.inputImagePath, imgHeight, imgWidth) - val result = doInference(predictor, img)(0).toArray - postProcess(inst.modelPathPrefix, result) - } - -} - -class CLIParser extends CLIParserBase{ - @Option(name = "--model-path-prefix", usage = "the input model directory") - val modelPathPrefix: String = "/resnet-152/resnet-152" - @Option(name = "--input-image", usage = "the input image") - val inputImagePath: String = "/images/kitten.jpg" -} diff --git a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/multitask/Data.scala b/scala-package/examples/src/main/scala/org/apache/mxnetexamples/multitask/Data.scala deleted file mode 100644 index 068aa6314f89..000000000000 --- a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/multitask/Data.scala +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnetexamples.multitask - -import org.apache.mxnet.Shape -import org.apache.mxnet.IO -import org.apache.mxnet.DataIter - -object Data { - - // return train and val iterators for mnist - def mnistIterator(dataPath: String, batchSize: Int, inputShape: Shape): (DataIter, DataIter) = { - val flat = if (inputShape.length == 3) "False" else "True" - val trainParams = Map( - "image" -> s"$dataPath/train-images-idx3-ubyte", - "label" -> s"$dataPath/train-labels-idx1-ubyte", - "input_shape" -> inputShape.toString(), - "batch_size" -> s"$batchSize", - "shuffle" -> "True", - "flat" -> flat - ) - val trainDataIter = IO.MNISTIter(trainParams) - val testParams = Map( - "image" -> s"$dataPath/t10k-images-idx3-ubyte", - "label" -> s"$dataPath/t10k-labels-idx1-ubyte", - "input_shape" -> inputShape.toString(), - "batch_size" -> s"$batchSize", - "flat" -> flat - ) - val testDataIter = IO.MNISTIter(testParams) - (trainDataIter, testDataIter) - } -} diff --git a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/neuralstyle/ModelVgg19.scala b/scala-package/examples/src/main/scala/org/apache/mxnetexamples/neuralstyle/ModelVgg19.scala deleted file mode 100644 index ca4c242ab1ce..000000000000 --- a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/neuralstyle/ModelVgg19.scala +++ /dev/null @@ -1,116 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnetexamples.neuralstyle - -import org.apache.mxnet.{Context, Executor, NDArray, Shape, Symbol} - -/** - * Definition for the neuralstyle network and initialize it with pretrained weight - */ -object ModelVgg19 { - case class ConvExecutor(executor: Executor, data: NDArray, dataGrad: NDArray, - style: Array[NDArray], content: NDArray, argDict: Map[String, NDArray]) - - def ConvRelu(data : Symbol, convName : String, reluName : String, - numFilter : Int, kernel : (Int, Int) = (3, 3), - stride : (Int, Int) = (1, 1)) : Symbol = { - val conv = Symbol.api.Convolution(data = Some(data), num_filter = numFilter, - pad = Some(Shape(1, 1)), kernel = Shape(kernel._1, kernel._2), - stride = Some(Shape(stride._1, stride._2)), no_bias = Some(false), - workspace = Some(1024), name = convName) - val relu = Symbol.api.relu(data = Some(conv), name = reluName) - conv.dispose() - relu - } - - def getSymbol: (Symbol, Symbol) = { - getVggSymbol() - } - - def getVggSymbol(prefix: String = "", contentOnly: Boolean = false): (Symbol, Symbol) = { - // declare symbol - val data = Symbol.Variable(s"${prefix}data") - - val relu1_1 = ConvRelu(data, s"${prefix}conv1_1", s"${prefix}relu1_1", 64) - val relu1_2 = ConvRelu(relu1_1, s"${prefix}conv1_2", s"${prefix}relu1_2", 64) - val pool1 = Symbol.api.Pooling(data = Some(relu1_2), pad = Some(Shape(0, 0)), - kernel = Some(Shape(2, 2)), stride = Some(Shape(2, 2)), pool_type = Some("avg"), - name = s"${prefix}pool1") - - val relu2_1 = ConvRelu(pool1, s"${prefix}conv2_1", s"${prefix}relu2_1", 128) - val relu2_2 = ConvRelu(relu2_1, s"${prefix}conv2_2", s"${prefix}relu2_2", 128) - val pool2 = Symbol.api.Pooling(data = Some(relu2_2), pad = Some(Shape(0, 0)), - kernel = Some(Shape(2, 2)), stride = Some(Shape(2, 2)), pool_type = Some("avg"), - name = s"${prefix}pool2") - - val relu3_1 = ConvRelu(pool2, s"${prefix}conv3_1", s"${prefix}relu3_1", 256) - val relu3_2 = ConvRelu(relu3_1, s"${prefix}conv3_2", s"${prefix}relu3_2", 256) - val relu3_3 = ConvRelu(relu3_2, s"${prefix}conv3_3", s"${prefix}relu3_3", 256) - val relu3_4 = ConvRelu(relu3_3, s"${prefix}conv3_4", s"${prefix}relu3_4", 256) - val pool3 = Symbol.api.Pooling(data = Some(relu3_4), pad = Some(Shape(0, 0)), - kernel = Some(Shape(2, 2)), stride = Some(Shape(2, 2)), pool_type = Some("avg"), - name = s"${prefix}pool3") - - val relu4_1 = ConvRelu(pool3, s"${prefix}conv4_1", s"${prefix}relu4_1", 512) - val relu4_2 = ConvRelu(relu4_1, s"${prefix}conv4_2", s"${prefix}relu4_2", 512) - val relu4_3 = ConvRelu(relu4_2, s"${prefix}conv4_3", s"${prefix}relu4_3", 512) - val relu4_4 = ConvRelu(relu4_3, s"${prefix}conv4_4", s"${prefix}relu4_4", 512) - val pool4 = Symbol.api.Pooling(data = Some(relu4_4), pad = Some(Shape(0, 0)), - kernel = Some(Shape(2, 2)), stride = Some(Shape(2, 2)), pool_type = Some("avg"), - name = s"${prefix}pool4") - - val relu5_1 = ConvRelu(pool4, s"${prefix}conv5_1", s"${prefix}relu5_1", 512) - - // style and content layers - val style = if (contentOnly) null else Symbol.Group(relu1_1, relu2_1, relu3_1, relu4_1, relu5_1) - val content = Symbol.Group(relu4_2) - (style, content) - } - - def getExecutor(style: Symbol, content: Symbol, modelPath: String, - inputSize: (Int, Int), ctx: Context): ConvExecutor = { - val out = Symbol.Group(style, content) - // make executor - val (argShapes, outputShapes, auxShapes) = out.inferShape( - Map("data" -> Shape(1, 3, inputSize._1, inputSize._2))) - val argNames = out.listArguments() - val argDict = argNames.zip(argShapes.map(NDArray.zeros(_, ctx))).toMap - val gradDict = Map("data" -> argDict("data").copyTo(ctx)) - // init with pretrained weight - val pretrained = NDArray.load2Map(modelPath) - argNames.filter(_ != "data").foreach { name => - val key = s"arg:$name" - if (pretrained.contains(key)) argDict(name).set(pretrained(key)) - } - pretrained.foreach(ele => ele._2.dispose()) - val executor = out.bind(ctx, argDict, gradDict) - out.dispose() - val outArray = executor.outputs - ConvExecutor(executor = executor, - data = argDict("data"), - dataGrad = gradDict("data"), - style = outArray.take(outArray.length - 1), - content = outArray(outArray.length - 1), - argDict = argDict) - } - - def getModel(modelPath: String, inputSize: (Int, Int), ctx: Context): ConvExecutor = { - val (style, content) = getSymbol - getExecutor(style, content, modelPath, inputSize, ctx) - } -} diff --git a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/neuralstyle/NeuralStyle.scala b/scala-package/examples/src/main/scala/org/apache/mxnetexamples/neuralstyle/NeuralStyle.scala deleted file mode 100644 index beb80ced9d4e..000000000000 --- a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/neuralstyle/NeuralStyle.scala +++ /dev/null @@ -1,326 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnetexamples.neuralstyle - -import java.io.File - -import com.sksamuel.scrimage.{Image, Pixel} -import com.sksamuel.scrimage.filter.GaussianBlurFilter -import com.sksamuel.scrimage.nio.JpegWriter -import org.apache.mxnet._ -import org.apache.mxnet.optimizer.Adam -import org.kohsuke.args4j.{CmdLineParser, Option} -import org.slf4j.LoggerFactory - -import scala.collection.JavaConverters._ -import scala.collection.mutable.ListBuffer - -/** - * An Implementation of the paper A Neural Algorithm of Artistic Style - */ -object NeuralStyle { - case class NSExecutor(executor: Executor, data: NDArray, dataGrad: NDArray) - - private val logger = LoggerFactory.getLogger(classOf[NeuralStyle]) - - def preprocessContentImage(path: String, longEdge: Int, ctx: Context): NDArray = { - val img = Image.fromFile(new File(path)) - logger.info(s"load the content image, size = ${(img.height, img.width)}") - val factor = longEdge.toFloat / Math.max(img.height, img.width) - val (newHeight, newWidth) = ((img.height * factor).toInt, (img.width * factor).toInt) - val resizedImg = img.scaleTo(newWidth, newHeight) - val sample = NDArray.empty(Shape(1, 3, newHeight, newWidth), ctx) - val datas = { - val rgbs = resizedImg.iterator.toArray.map { p => - (p.red, p.green, p.blue) - } - val r = rgbs.map(_._1 - 123.68f) - val g = rgbs.map(_._2 - 116.779f) - val b = rgbs.map(_._3 - 103.939f) - r ++ g ++ b - } - sample.set(datas) - logger.info(s"resize the content image to ${(newHeight, newWidth)}") - sample - } - - def preprocessStyleImage(path: String, shape: Shape, ctx: Context): NDArray = { - val img = Image.fromFile(new File(path)) - val resizedImg = img.scaleTo(shape(3), shape(2)) - val sample = NDArray.empty(Shape(1, 3, shape(2), shape(3)), ctx) - val datas = { - val rgbs = resizedImg.iterator.toArray.map { p => - (p.red, p.green, p.blue) - } - val r = rgbs.map(_._1 - 123.68f) - val g = rgbs.map(_._2 - 116.779f) - val b = rgbs.map(_._3 - 103.939f) - r ++ g ++ b - } - sample.set(datas) - sample - } - - def clip(array: Array[Float]): Array[Float] = array.map { a => - if (a < 0) 0f - else if (a > 255) 255f - else a - } - - def postprocessImage(img: NDArray): Image = { - val datas = img.toArray - val spatialSize = img.shape(2) * img.shape(3) - val r = clip(datas.take(spatialSize).map(_ + 123.68f)) - val g = clip(datas.drop(spatialSize).take(spatialSize).map(_ + 116.779f)) - val b = clip(datas.takeRight(spatialSize).map(_ + 103.939f)) - val pixels = for (i <- 0 until spatialSize) - yield Pixel(r(i).toInt, g(i).toInt, b(i).toInt, 255) - Image(img.shape(3), img.shape(2), pixels.toArray) - } - - def saveImage(img: NDArray, filename: String, radius: Int): Unit = { - logger.info(s"save output to $filename") - val out = postprocessImage(img) - val gauss = GaussianBlurFilter(radius).op - val result = Image(out.width, out.height) - gauss.filter(out.awt, result.awt) - result.output(filename)(JpegWriter()) - } - - def styleGramSymbol(inputSize: (Int, Int), style: Symbol): (Symbol, List[Int]) = { - val (_, outputShape, _) = style.inferShape( - Map("data" -> Shape(1, 3, inputSize._1, inputSize._2))) - var gramList = List[Symbol]() - var gradScale = List[Int]() - for (i <- 0 until style.listOutputs().length) { - val shape = outputShape(i) - val x = Symbol.api.Reshape(data = Some(style.get(i)), - target_shape = Some(Shape(shape(1), shape(2) * shape(3)))) - val gram = Symbol.api.FullyConnected(data = Some(x), weight = Some(x), - no_bias = Some(true), num_hidden = shape(1)) - x.dispose() - gramList = gramList :+ gram - gradScale = gradScale :+ (shape(1) * shape(2) * shape(3) * shape(1)) - } - (Symbol.Group(gramList: _*), gradScale) - } - - def getLoss(gram: Symbol, content: Symbol): (Symbol, Symbol) = { - var gramLoss = ListBuffer[Symbol]() - for (i <- 0 until gram.listOutputs().length) { - val gvar = Symbol.Variable(s"target_gram_$i") - Symbol.api.square(data = Some(gvar - gram.get(i))) - gramLoss += Symbol.api.sum( - Some(Symbol.api.square(data = Some(gvar - gram.get(i)))) - ) - gvar.dispose() - } - gram.dispose() - val cvar = Symbol.Variable("target_content") - val contentLoss = Symbol.api.sum( - Some(Symbol.api.square(Some(cvar - content))) - ) - (Symbol.Group(gramLoss: _*), contentLoss) - } - - def getTvGradExecutor(img: NDArray, ctx: Context, tvWeight: Float): scala.Option[Executor] = { - // create TV gradient executor with input binded on img - if (tvWeight <= 0.0f) None - - val nChannel = img.shape(1) - val sImg = Symbol.Variable("img") - val sKernel = Symbol.Variable("kernel") - val channels = Symbol.api.SliceChannel(data = Some(sImg), num_outputs = nChannel) - val result = (0 until nChannel).map { i => - Symbol.api.Convolution(data = Some(channels.get(i)), weight = Some(sKernel), - num_filter = 1, kernel = Shape(3, 3), pad = Some(Shape(1, 1)), no_bias = Some(true), - stride = Some(Shape(1, 1))) - }.toArray - val out = Symbol.api.Concat(result, result.length) * tvWeight - val kernel = { - val tmp = NDArray.empty(Shape(1, 1, 3, 3), ctx) - tmp.set(Array[Float](0, -1, 0, -1, 4, -1, 0, -1, 0)) - tmp / 0.8f - } - Some(out.bind(ctx, Map("img" -> img, "kernel" -> kernel))) - } - - def twoNorm(array: Array[Float]): Float = { - Math.sqrt(array.map(x => x * x).sum.toDouble).toFloat - } - - //scalastyle:off - def runTraining(model : String, contentImage : String, styleImage: String, dev : Context, - modelPath : String, outputDir : String, styleWeight : Float, - contentWeight : Float, tvWeight : Float, gaussianRadius : Int, - lr: Float, maxNumEpochs: Int, maxLongEdge: Int, - saveEpochs : Int, stopEps: Float) : Unit = { - ResourceScope.using() { - val contentNp = preprocessContentImage(contentImage, maxLongEdge, dev) - val styleNp = preprocessStyleImage(styleImage, contentNp.shape, dev) - val size = (contentNp.shape(2), contentNp.shape(3)) - - val (style, content) = ModelVgg19.getSymbol - val (gram, gScale) = styleGramSymbol(size, style) - var modelExecutor = ModelVgg19.getExecutor(gram, content, modelPath, size, dev) - - modelExecutor.data.set(styleNp) - modelExecutor.executor.forward() - - val styleArray = modelExecutor.style.map(_.copyTo(Context.cpu())) - modelExecutor.data.set(contentNp) - modelExecutor.executor.forward() - val contentArray = modelExecutor.content.copyTo(Context.cpu()) - - // delete the executor - modelExecutor.argDict.foreach(ele => ele._2.dispose()) - modelExecutor.content.dispose() - modelExecutor.data.dispose() - modelExecutor.dataGrad.dispose() - modelExecutor.style.foreach(_.dispose()) - modelExecutor.executor.dispose() - modelExecutor = null - - val (styleLoss, contentLoss) = getLoss(gram, content) - modelExecutor = ModelVgg19.getExecutor( - styleLoss, contentLoss, modelPath, size, dev) - - val gradArray = { - var tmpGA = Array[NDArray]() - for (i <- 0 until styleArray.length) { - modelExecutor.argDict(s"target_gram_$i").set(styleArray(i)) - tmpGA = tmpGA :+ NDArray.ones(Shape(1), dev) * (styleWeight / gScale(i)) - } - tmpGA :+ NDArray.ones(Shape(1), dev) * contentWeight - } - - modelExecutor.argDict("target_content").set(contentArray) - - // train - val img = Random.uniform(-0.1f, 0.1f, contentNp.shape, dev) - val lrFS = new FactorScheduler(step = 10, factor = 0.9f) - - saveImage(contentNp, s"${outputDir}/input.jpg", gaussianRadius) - saveImage(styleNp, s"${outputDir}/style.jpg", gaussianRadius) - - val optimizer = new Adam( - learningRate = lr, - wd = 0.005f, - lrScheduler = lrFS) - val optimState = optimizer.createState(0, img) - - logger.info(s"start training arguments") - - var oldImg = img.copyTo(dev) - val clipNorm = img.shape.toVector.reduce(_ * _) - val tvGradExecutor = getTvGradExecutor(img, dev, tvWeight) - var eps = 0f - var trainingDone = false - var e = 0 - while (e < maxNumEpochs && !trainingDone) { - modelExecutor.data.set(img) - modelExecutor.executor.forward() - modelExecutor.executor.backward(gradArray) - - val gNorm = NDArray.norm(modelExecutor.dataGrad).toScalar - if (gNorm > clipNorm) { - modelExecutor.dataGrad.set(modelExecutor.dataGrad * (clipNorm / gNorm)) - } - tvGradExecutor match { - case Some(executor) => { - executor.forward() - optimizer.update(0, img, - modelExecutor.dataGrad + executor.outputs(0), - optimState) - } - case None => - optimizer.update(0, img, modelExecutor.dataGrad, optimState) - } - eps = (NDArray.norm(oldImg - img) / NDArray.norm(img)).toScalar - oldImg.set(img) - logger.info(s"epoch $e, relative change $eps") - - if (eps < stopEps) { - logger.info("eps < args.stop_eps, training finished") - trainingDone = true - } - if ((e + 1) % saveEpochs == 0) { - saveImage(img, s"${outputDir}/tmp_${e + 1}.jpg", gaussianRadius) - } - e = e + 1 - } - saveImage(img, s"${outputDir}/out.jpg", gaussianRadius) - logger.info("Finish fit ...") - } - } - - def main(args: Array[String]): Unit = { - val alle = new NeuralStyle - val parser: CmdLineParser = new CmdLineParser(alle) - try { - parser.parseArgument(args.toList.asJava) - assert(alle.contentImage != null && alle.styleImage != null - && alle.modelPath != null && alle.outputDir != null) - - val dev = if (alle.gpu >= 0) Context.gpu(alle.gpu) else Context.cpu(0) - runTraining(alle.model, alle.contentImage, alle.styleImage, dev, alle.modelPath, - alle.outputDir, alle.styleWeight, alle.contentWeight, alle.tvWeight, - alle.gaussianRadius, alle.lr, alle.maxNumEpochs, alle.maxLongEdge, - alle.saveEpochs, alle.stopEps) - } catch { - case ex: Exception => { - logger.error(ex.getMessage, ex) - parser.printUsage(System.err) - sys.exit(1) - } - } - } -} - -class NeuralStyle { - @Option(name = "--model", usage = "the pretrained model to use: ['vgg']") - private val model: String = "vgg19" - @Option(name = "--content-image", usage = "the content image") - private val contentImage: String = null - @Option(name = "--style-image", usage = "the style image") - private val styleImage: String = null - @Option(name = "--model-path", usage = "the model file path") - private val modelPath: String = null - @Option(name = "--stop-eps", usage = "stop if the relative chanage is less than eps") - private val stopEps: Float = 0.0005f - @Option(name = "--content-weight", usage = "the weight for the content image") - private val contentWeight: Float = 20f - @Option(name = "--style-weight", usage = "the weight for the style image") - private val styleWeight: Float = 1f - @Option(name = "--tv-weight", usage = "the magtitute on TV loss") - private val tvWeight: Float = 0.01f - @Option(name = "--max-num-epochs", usage = "the maximal number of training epochs") - private val maxNumEpochs: Int = 1000 - @Option(name = "--max-long-edge", usage = "resize the content image") - private val maxLongEdge: Int = 600 - @Option(name = "--lr", usage = "the initial learning rate") - private val lr: Float = 10f - @Option(name = "--gpu", usage = "which gpu card to use, -1 means using cpu") - private val gpu: Int = 0 - @Option(name = "--output-dir", usage = "the output directory") - private val outputDir: String = null - @Option(name = "--save-epochs", usage = "save the output every n epochs") - private val saveEpochs: Int = 50 - @Option(name = "--gaussian-radius", usage = "the gaussian blur filter radius") - private val gaussianRadius: Int = 1 -} diff --git a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/neuralstyle/README.md b/scala-package/examples/src/main/scala/org/apache/mxnetexamples/neuralstyle/README.md deleted file mode 100644 index 0dc4fb892691..000000000000 --- a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/neuralstyle/README.md +++ /dev/null @@ -1,100 +0,0 @@ - - - - - - - - - - - - - - - - - -# Neural Style Example for Scala - -## Introduction -This model contains three important components: -- Boost Inference -- Boost Training -- Neural Style conversion - -You can use the prebuilt VGG model to do the conversion. -By adding a style image, you can create several interesting images. - -Original Image | Style Image -:-------------------------:|:-------------------------: -![](https://s3.us-east-2.amazonaws.com/mxnet-scala/scala-example-ci/NeuralStyle/IMG_4343.jpg) | ![](https://s3.us-east-2.amazonaws.com/mxnet-scala/scala-example-ci/NeuralStyle/starry_night.jpg) - -Boost Inference Image (pretrained) | Epoch 150 Image -:-------------------------:|:-------------------------: -![](https://s3.us-east-2.amazonaws.com/mxnet-scala/scala-example-ci/NeuralStyle/out_3.jpg) | ![](https://s3.us-east-2.amazonaws.com/mxnet-scala/scala-example-ci/NeuralStyle/tmp_150.jpg) - -## Setup -Please download the input image and style image following the links below: - -Input image -```bash -https://s3.us-east-2.amazonaws.com/mxnet-scala/scala-example-ci/NeuralStyle/IMG_4343.jpg -``` -Style image -```bash -https://s3.us-east-2.amazonaws.com/mxnet-scala/scala-example-ci/NeuralStyle/starry_night.jpg -``` - -VGG model --Boost inference -```bash -https://s3.us-east-2.amazonaws.com/mxnet-scala/scala-example-ci/NeuralStyle/model.zip -``` - -VGG model --Boost Training -```bash -https://s3.us-east-2.amazonaws.com/mxnet-scala/scala-example-ci/NeuralStyle/vgg19.params -``` - -Please unzip the model before you use it. - -## Boost Inference Example - -Please provide the corresponding arguments before you execute the program -```bash ---input-image -/IMG_4343.jpg ---model-path -/model ---output-path - -``` - -## Boost Training Example -Please download your own training data for boost training. -You can use 26k images sampled from [MIT Place dataset](http://places.csail.mit.edu/). -```bash ---style-image -/starry_night.jpg ---data-path -/images ---vgg-model-path -/vgg19.params ---save-model-path - -``` - -## NeuralStyle Example -Please provide the corresponding arguments before you execute the program -```bash ---model-path -/vgg19.params ---content-image -/IMG_4343.jpg ---style-image -/starry_night.jpg ---gpu - ---output-dir - -``` \ No newline at end of file diff --git a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/neuralstyle/end2end/Basic.scala b/scala-package/examples/src/main/scala/org/apache/mxnetexamples/neuralstyle/end2end/Basic.scala deleted file mode 100644 index 56303253f33d..000000000000 --- a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/neuralstyle/end2end/Basic.scala +++ /dev/null @@ -1,119 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnetexamples.neuralstyle.end2end - -import org.apache.mxnet.{Context, Initializer, NDArray, Shape, Symbol} -import org.apache.mxnetexamples.neuralstyle.ModelVgg19 -import org.slf4j.LoggerFactory - - -object Basic { - - class PretrainedInit(prefix: String, params: Map[String, NDArray], - verbose: Boolean = false) extends Initializer { - - private val logger = LoggerFactory.getLogger(classOf[PretrainedInit]) - - private val prefixLen = prefix.length() + 1 - private val argParams = params.filter(_._1.startsWith("arg:")) - private val auxParams = params.filter(_._1.startsWith("aux:")) - private val argNames = argParams.keySet.map(_.substring(4)) - private val auxNames = auxParams.keySet.map(_.substring(4)) - - override def initWeight(name: String, arr: NDArray): Unit = { - val key = name.substring(prefixLen) - if (this.argNames.contains(key)) { - if (verbose) logger.info(s"Init $name") - arr.set(this.argParams(s"arg:$key")) - } else if (this.auxNames.contains(key)) { - if (verbose) logger.info(s"Init $name") - arr.set(this.auxParams(s"aux:$key")) - } else { - logger.info(s"Unknown params: $name, init with 0") - arr.set(0f) - } - } - - override def initDefault(name: String, arr: NDArray): Unit = { - } - } - - def getStyleModule(prefix: String, dShape: Shape, - ctx: Context, params: Map[String, NDArray]): Module = { - val inputShape = Map(s"${prefix}_data" -> dShape) - val (style, content) = ModelVgg19.getVggSymbol(prefix + "_") - val (gram, gScale) = styleGramSymbol(inputShape, style) - val init = new PretrainedInit(prefix, params, true) - new Module(symbol = gram, context = ctx, - dataShapes = Map(s"${prefix}_data" -> dShape), - initializer = init, forTraining = false) - } - - def styleGramSymbol(inputShape: Map[String, Shape], style: Symbol): (Symbol, List[Int]) = { - val (_, outputShape, _) = style.inferShape(inputShape) - var gramList = List[Symbol]() - var gradScale = List[Int]() - for (i <- 0 until style.listOutputs().length) { - val shape = outputShape(i) - val x = Symbol.api.Reshape(data = Some(style.get(i)), - shape = Some(Shape(shape(1), shape(2) * shape(3)))) - val gram = Symbol.api.FullyConnected(data = Some(x), weight = Some(x), - no_bias = Some(true), num_hidden = shape(1)) - gramList = gramList :+ gram - gradScale = gradScale :+ (shape(1) * shape(2) * shape(3) * shape(1)) - } - (Symbol.Group(gramList: _*), gradScale) - } - - def getLoss(gram: Symbol, content: Symbol): (Symbol, Symbol) = { - var gramLoss = List[Symbol]() - for (i <- 0 until gram.listOutputs().length) { - val gvar = Symbol.Variable(s"target_gram_$i") - gramLoss = gramLoss :+ Symbol.api.sum(Some( - Symbol.api.square(Some(gvar - gram.get(i))) - )) - } - val cvar = Symbol.Variable("target_content") - val contentLoss = Symbol.api.sum(Some(Symbol.api.square(Some(cvar - content)))) - (Symbol.Group(gramLoss: _*), contentLoss) - } - - def getContentModule(prefix: String, dShape: Shape, - ctx: Context, params: Map[String, NDArray]): Module = { - val (_, sym) = ModelVgg19.getVggSymbol(prefix + "_", true) - val init = new PretrainedInit(prefix, params) - new Module(symbol = sym, context = ctx, - dataShapes = Map(s"${prefix}_data" -> dShape), - initializer = init, forTraining = false) - } - - def getLossModule(prefix: String, dShape: Shape, - ctx: Context, params: Map[String, NDArray]): (Module, List[Int]) = { - val inputShape = Map(s"${prefix}_data" -> dShape) - val (style, content) = ModelVgg19.getVggSymbol(prefix + "_") - val (gram, gScale) = styleGramSymbol(inputShape, style) - val (styleLoss, contentLoss) = getLoss(gram, content) - val sym = Symbol.Group(styleLoss, contentLoss) - val init = new PretrainedInit(prefix, params, true) - val mod = new Module(symbol = sym, context = ctx, - dataShapes = Map(s"${prefix}_data" -> dShape), - initializer = init, forTraining = true, - inputsNeedGrad = true) - (mod, gScale) - } -} diff --git a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/neuralstyle/end2end/BoostInference.scala b/scala-package/examples/src/main/scala/org/apache/mxnetexamples/neuralstyle/end2end/BoostInference.scala deleted file mode 100644 index cd1ed59b6e6d..000000000000 --- a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/neuralstyle/end2end/BoostInference.scala +++ /dev/null @@ -1,95 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnetexamples.neuralstyle.end2end - -import org.apache.mxnet.{Context, ResourceScope, Shape} -import org.kohsuke.args4j.{CmdLineParser, Option} -import org.slf4j.LoggerFactory - -import scala.collection.JavaConverters._ - -object BoostInference { - - private val logger = LoggerFactory.getLogger(classOf[BoostInference]) - - def runInference(modelPath: String, outputPath: String, guassianRadius : Int, - inputImage : String, ctx : Context): Unit = { - ResourceScope.using() { - val dShape = Shape(1, 3, 480, 640) - val clipNorm = 1.0f * dShape.product - // generator - val gens = Array( - GenV4.getModule("g0", dShape, ctx, isTrain = false), - GenV3.getModule("g1", dShape, ctx, isTrain = false), - GenV3.getModule("g2", dShape, ctx, isTrain = false), - GenV4.getModule("g3", dShape, ctx, isTrain = false) - ) - gens.zipWithIndex.foreach { case (gen, i) => - gen.loadParams(s"$modelPath/$i/v3_0002-0026000.params") - } - - val contentNp = - DataProcessing.preprocessContentImage(s"$inputImage", dShape, ctx) - var data = Array(contentNp) - for (i <- 0 until gens.length) { - ResourceScope.using() { - gens(i).forward(data.takeRight(1)) - val newImg = gens(i).getOutputs()(0) - data :+= newImg - DataProcessing.saveImage(newImg, s"$outputPath/out_$i.jpg", guassianRadius) - logger.info(s"Converted image: $outputPath/out_$i.jpg") - } - } - } - } - - def main(args: Array[String]): Unit = { - val stce = new BoostInference - val parser: CmdLineParser = new CmdLineParser(stce) - try { - parser.parseArgument(args.toList.asJava) - assert(stce.modelPath != null - && stce.inputImage != null - && stce.outputPath != null) - - val ctx = if (stce.gpu == -1) Context.cpu() else Context.gpu(stce.gpu) - - runInference(stce.modelPath, stce.outputPath, stce.guassianRadius, stce.inputImage, ctx) - - } catch { - case ex: Exception => { - logger.error(ex.getMessage, ex) - parser.printUsage(System.err) - sys.exit(1) - } - } - } -} - -class BoostInference { - @Option(name = "--model-path", usage = "the saved model path") - private val modelPath: String = null - @Option(name = "--input-image", usage = "the style image") - private val inputImage: String = null - @Option(name = "--output-path", usage = "the output result path") - private val outputPath: String = null - @Option(name = "--gpu", usage = "which gpu card to use, default is -1, means using cpu") - private val gpu: Int = -1 - @Option(name = "--guassian-radius", usage = "the gaussian blur filter radius") - private val guassianRadius: Int = 2 -} diff --git a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/neuralstyle/end2end/BoostTrain.scala b/scala-package/examples/src/main/scala/org/apache/mxnetexamples/neuralstyle/end2end/BoostTrain.scala deleted file mode 100644 index 1c9adbaf7560..000000000000 --- a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/neuralstyle/end2end/BoostTrain.scala +++ /dev/null @@ -1,211 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnetexamples.neuralstyle.end2end - -import java.io.File - -import org.apache.mxnet.{Context, Executor, NDArray, ResourceScope, Shape, Symbol} -import org.apache.mxnet.optimizer.SGD -import org.kohsuke.args4j.{CmdLineParser, Option} -import org.slf4j.LoggerFactory - -import scala.collection.JavaConverters._ -import scala.util.Random - - -object BoostTrain { - - private val logger = LoggerFactory.getLogger(classOf[BoostTrain]) - - def getTvGradExecutor(img: NDArray, ctx: Context, tvWeight: Float): Executor = { - // create TV gradient executor with input binded on img - if (tvWeight <= 0.0f) null - - val nChannel = img.shape(1) - val sImg = Symbol.Variable("img") - val sKernel = Symbol.Variable("kernel") - val channels = Symbol.api.SliceChannel(data = Some(sImg), num_outputs = nChannel) - val toConcat = (0 until nChannel).map( i => - Symbol.api.Convolution(data = Some(channels.get(i)), weight = Some(sKernel), - num_filter = 1, kernel = Shape(3, 3), pad = Some(Shape(1, 1)), - no_bias = Some(true), stride = Some(Shape(1, 1))) - ).toArray - val out = Symbol.api.Concat(data = toConcat, num_args = toConcat.length) * tvWeight - val kernel = { - val tmp = NDArray.empty(Shape(1, 1, 3, 3), ctx) - tmp.set(Array[Float](0, -1, 0, -1, 4, -1, 0, -1, 0)) - tmp / 8.0f - } - out.bind(ctx, Map("img" -> img, "kernel" -> kernel)) - } - - def runTraining(dataPath : String, vggModelPath: String, ctx : Context, - styleImage : String, saveModelPath : String) : Unit = { - ResourceScope.using() { - // params - val vggParams = NDArray.load2Map(vggModelPath) - val styleWeight = 1.2f - val contentWeight = 10f - val dShape = Shape(1, 3, 384, 384) - val clipNorm = 0.05f * dShape.product - val modelPrefix = "v3" - // init style - val styleNp = DataProcessing.preprocessStyleImage(styleImage, dShape, ctx) - var styleMod = Basic.getStyleModule("style", dShape, ctx, vggParams) - styleMod.forward(Array(styleNp)) - val styleArray = styleMod.getOutputs().map(_.copyTo(Context.cpu())) - styleMod.dispose() - styleMod = null - - // content - val contentMod = Basic.getContentModule("content", dShape, ctx, vggParams) - - // loss - val (loss, gScale) = Basic.getLossModule("loss", dShape, ctx, vggParams) - val extraArgs = (0 until styleArray.length) - .map(i => s"target_gram_$i" -> styleArray(i)).toMap - loss.setParams(extraArgs) - var gradArray = Array[NDArray]() - for (i <- 0 until styleArray.length) { - gradArray = gradArray :+ (NDArray.ones(Shape(1), ctx) * (styleWeight / gScale(i))) - } - gradArray = gradArray :+ (NDArray.ones(Shape(1), ctx) * contentWeight) - - // generator - val gens = Array( - GenV4.getModule("g0", dShape, ctx), - GenV3.getModule("g1", dShape, ctx), - GenV3.getModule("g2", dShape, ctx), - GenV4.getModule("g3", dShape, ctx) - ) - gens.foreach { gen => - val opt = new SGD(learningRate = 1e-4f, - momentum = 0.9f, - wd = 5e-3f, - clipGradient = 5f) - gen.initOptimizer(opt) - } - - var filelist = new File(dataPath).list().toList - val numImage = filelist.length - logger.info(s"Dataset size: $numImage") - - val tvWeight = 1e-2f - - val startEpoch = 0 - val endEpoch = 3 - - for (k <- 0 until gens.length) { - val path = new File(s"${saveModelPath}/$k") - if (!path.exists()) path.mkdir() - } - - // train - for (i <- startEpoch until endEpoch) { - ResourceScope.using() { - filelist = Random.shuffle(filelist) - for (idx <- filelist.indices) { - var dataArray = Array[NDArray]() - var lossGradArray = Array[NDArray]() - val data = - DataProcessing.preprocessContentImage(s"${dataPath}/${filelist(idx)}", dShape, ctx) - dataArray = dataArray :+ data - // get content - contentMod.forward(Array(data)) - // set target content - loss.setParams(Map("target_content" -> contentMod.getOutputs()(0))) - // gen_forward - for (k <- 0 until gens.length) { - gens(k).forward(dataArray.takeRight(1)) - dataArray = dataArray :+ gens(k).getOutputs()(0) - // loss forward - loss.forward(dataArray.takeRight(1)) - loss.backward(gradArray) - lossGradArray = lossGradArray :+ loss.getInputGrads()(0) - } - val grad = NDArray.zeros(data.shape, ctx) - for (k <- gens.length - 1 to 0 by -1) { - val tvGradExecutor = getTvGradExecutor(gens(k).getOutputs()(0), ctx, tvWeight) - tvGradExecutor.forward() - grad += lossGradArray(k) + tvGradExecutor.outputs(0) - val gNorm = NDArray.norm(grad) - if (gNorm.toScalar > clipNorm) { - grad *= clipNorm / gNorm.toScalar - } - gens(k).backward(Array(grad)) - gens(k).update() - gNorm.dispose() - tvGradExecutor.dispose() - } - grad.dispose() - if (idx % 20 == 0) { - logger.info(s"Epoch $i: Image $idx") - for (k <- 0 until gens.length) { - val n = NDArray.norm(gens(k).getInputGrads()(0)) - logger.info(s"Data Norm : ${n.toScalar / dShape.product}") - n.dispose() - } - } - if (idx % 1000 == 0) { - for (k <- 0 until gens.length) { - gens(k).saveParams( - s"${saveModelPath}/$k/${modelPrefix}_" + - s"${"%04d".format(i)}-${"%07d".format(idx)}.params") - } - } - data.dispose() - } - } - } - } - } - - def main(args: Array[String]): Unit = { - val stin = new BoostTrain - val parser: CmdLineParser = new CmdLineParser(stin) - try { - parser.parseArgument(args.toList.asJava) - assert(stin.dataPath != null - && stin.vggModelPath != null - && stin.saveModelPath != null - && stin.styleImage != null) - - val ctx = if (stin.gpu == -1) Context.cpu() else Context.gpu(stin.gpu) - runTraining(stin.dataPath, stin.vggModelPath, ctx, stin.styleImage, stin.saveModelPath) - } catch { - case ex: Exception => { - logger.error(ex.getMessage, ex) - parser.printUsage(System.err) - sys.exit(1) - } - } - } -} - -class BoostTrain { - @Option(name = "--data-path", usage = "the input train data path") - private val dataPath: String = null - @Option(name = "--vgg-model-path", usage = "the pretrained model to use: ['vgg']") - private val vggModelPath: String = null - @Option(name = "--save-model-path", usage = "the save model path") - private val saveModelPath: String = null - @Option(name = "--style-image", usage = "the style image") - private val styleImage: String = null - @Option(name = "--gpu", usage = "which gpu card to use, default is -1, means using cpu") - private val gpu: Int = -1 -} diff --git a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/neuralstyle/end2end/DataProcessing.scala b/scala-package/examples/src/main/scala/org/apache/mxnetexamples/neuralstyle/end2end/DataProcessing.scala deleted file mode 100644 index 5b01d2016467..000000000000 --- a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/neuralstyle/end2end/DataProcessing.scala +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnetexamples.neuralstyle.end2end - -import java.io.File - -import com.sksamuel.scrimage.{Image, Pixel} -import com.sksamuel.scrimage.filter.GaussianBlurFilter -import com.sksamuel.scrimage.nio.JpegWriter -import org.apache.mxnet.{Context, NDArray, Shape} - - -object DataProcessing { - - def preprocessContentImage(path: String, - dShape: Shape = null, ctx: Context): NDArray = { - val img = Image.fromFile(new File(path)) - val resizedImg = img.scaleTo(dShape(3), dShape(2)) - val sample = NDArray.empty(Shape(1, 3, resizedImg.height, resizedImg.width), ctx) - val datas = { - val rgbs = resizedImg.iterator.toArray.map { p => - (p.red, p.green, p.blue) - } - val r = rgbs.map(_._1 - 123.68f) - val g = rgbs.map(_._2 - 116.779f) - val b = rgbs.map(_._3 - 103.939f) - r ++ g ++ b - } - sample.set(datas) - sample - } - - def preprocessStyleImage(path: String, shape: Shape, ctx: Context): NDArray = { - val img = Image.fromFile(new File(path)) - val resizedImg = img.scaleTo(shape(3), shape(2)) - val sample = NDArray.empty(Shape(1, 3, shape(2), shape(3)), ctx) - val datas = { - val rgbs = resizedImg.iterator.toArray.map { p => - (p.red, p.green, p.blue) - } - val r = rgbs.map(_._1 - 123.68f) - val g = rgbs.map(_._2 - 116.779f) - val b = rgbs.map(_._3 - 103.939f) - r ++ g ++ b - } - sample.set(datas) - sample - } - - def clip(array: Array[Float]): Array[Float] = array.map { a => - if (a < 0) 0f - else if (a > 255) 255f - else a - } - - def postprocessImage(img: NDArray): Image = { - val datas = img.toArray - val spatialSize = img.shape(2) * img.shape(3) - val r = clip(datas.take(spatialSize).map(_ + 123.68f)) - val g = clip(datas.drop(spatialSize).take(spatialSize).map(_ + 116.779f)) - val b = clip(datas.takeRight(spatialSize).map(_ + 103.939f)) - val pixels = for (i <- 0 until spatialSize) - yield Pixel(r(i).toInt, g(i).toInt, b(i).toInt, 255) - Image(img.shape(3), img.shape(2), pixels.toArray) - } - - def saveImage(img: NDArray, filename: String, radius: Int): Unit = { - val out = postprocessImage(img) - val gauss = GaussianBlurFilter(radius).op - val result = Image(out.width, out.height) - gauss.filter(out.awt, result.awt) - result.output(filename)(JpegWriter()) - } -} diff --git a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/neuralstyle/end2end/GenV3.scala b/scala-package/examples/src/main/scala/org/apache/mxnetexamples/neuralstyle/end2end/GenV3.scala deleted file mode 100644 index d7ab59e28402..000000000000 --- a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/neuralstyle/end2end/GenV3.scala +++ /dev/null @@ -1,84 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnetexamples.neuralstyle.end2end - -import org.apache.mxnet.{Context, Shape, Symbol, Xavier} - - -object GenV3 { - def Conv(data: Symbol, numFilter: Int, kernel: (Int, Int) = (5, 5), - pad: (Int, Int) = (2, 2), stride: (Int, Int) = (2, 2)): Symbol = { - val sym1 = Symbol.api.Convolution(data = Some(data), num_filter = numFilter, - kernel = Shape(kernel._1, kernel._2), stride = Some(Shape(stride._1, stride._2)), - pad = Some(Shape(pad._1, pad._2)), no_bias = Some(false)) - val sym2 = Symbol.api.BatchNorm(data = Some(sym1), fix_gamma = Some(false)) - val sym3 = Symbol.api.LeakyReLU(data = Some(sym2), act_type = Some("leaky")) - sym2.dispose() - sym1.dispose() - sym3 - } - - def Deconv(data: Symbol, numFilter: Int, imHw: (Int, Int), - kernel: (Int, Int) = (7, 7), pad: (Int, Int) = (2, 2), stride: (Int, Int) = (2, 2), - crop: Boolean = true, out: Boolean = false): Symbol = { - var sym = Symbol.api.Deconvolution(data = Some(data), num_filter = numFilter, - kernel = Shape(kernel._1, kernel._2), stride = Some(Shape(stride._1, stride._2)), - pad = Some(Shape(pad._1, pad._2)), no_bias = Some(true)) - if (crop) sym = Symbol.api.Crop(data = Array(sym), offset = Some(Shape(1, 1)), - h_w = Some(Shape(imHw._1, imHw._2)), num_args = 1) - sym = Symbol.api.BatchNorm(data = Some(sym), fix_gamma = Some(false)) - if (out == false) Symbol.api.LeakyReLU(data = Some(sym), act_type = Some("leaky")) - else Symbol.api.Activation(data = Some(sym), act_type = "tanh") - } - - def getGenerator(prefix: String, imHw: (Int, Int)): Symbol = { - val data = Symbol.Variable(s"${prefix}_data") - val conv1 = Conv(data, 64) // 192 - val conv1_1 = Conv(conv1, 48, kernel = (3, 3), pad = (1, 1), stride = (1, 1)) - val conv2 = Conv(conv1_1, 128) // 96 - val conv2_1 = Conv(conv2, 96, kernel = (3, 3), pad = (1, 1), stride = (1, 1)) - val conv3 = Conv(conv2_1, 256) // 48 - val conv3_1 = Conv(conv3, 192, kernel = (3, 3), pad = (1, 1), stride = (1, 1)) - val deconv1 = Deconv(conv3_1, 128, (imHw._1 / 4, imHw._2 / 4)) + conv2 - val conv4_1 = Conv(deconv1, 160, kernel = (3, 3), pad = (1, 1), stride = (1, 1)) - val deconv2 = Deconv(conv4_1, 64, (imHw._1 / 2, imHw._2 / 2)) + conv1 - val conv5_1 = Conv(deconv2, 96, kernel = (3, 3), pad = (1, 1), stride = (1, 1)) - val deconv3 = Deconv(conv5_1, 3, imHw, kernel = (8, 8), pad = (3, 3), out = true, crop = false) - val rawOut = (deconv3 * 128) + 128 - val norm = Symbol.api.SliceChannel(data = Some(rawOut), num_outputs = 3) - val rCh = norm.get(0) - 123.68f - val gCh = norm.get(1) - 116.779f - val bCh = norm.get(2) - 103.939f - val normOut = Symbol.api.Concat(data = Array(rCh, gCh, bCh), num_args = 3) - normOut * 0.4f + data * 0.6f - } - - def getModule(prefix: String, dShape: Shape, ctx: Context, isTrain: Boolean = true): Module = { - val sym = getGenerator(prefix, (dShape(2), dShape(3))) - val (dataShapes, forTraining, inputsNeedGrad) = { - val dataShape = Map(s"${prefix}_data" -> dShape) - if (isTrain) (dataShape, true, true) - else (dataShape, false, false) - } - val mod = new Module(symbol = sym, context = ctx, - dataShapes = dataShapes, - initializer = new Xavier(magnitude = 2f), - forTraining = forTraining, inputsNeedGrad = inputsNeedGrad) - mod - } -} diff --git a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/neuralstyle/end2end/GenV4.scala b/scala-package/examples/src/main/scala/org/apache/mxnetexamples/neuralstyle/end2end/GenV4.scala deleted file mode 100644 index 82fc9b6ce109..000000000000 --- a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/neuralstyle/end2end/GenV4.scala +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnetexamples.neuralstyle.end2end - -import org.apache.mxnet.{Context, Shape, Symbol, Xavier} - - -object GenV4 { - - def Conv(data: Symbol, numFilter: Int, workspace : Long, kernel: (Int, Int) = (5, 5), - pad: (Int, Int) = (2, 2)): Symbol = { - val sym1 = Symbol.api.Convolution(data = Some(data), num_filter = numFilter, - kernel = Shape(kernel._1, kernel._2), workspace = Some(workspace), - pad = Some(Shape(pad._1, pad._2)), no_bias = Some(false)) - val sym2 = Symbol.api.BatchNorm(data = Some(sym1), fix_gamma = Some(false)) - val sym3 = Symbol.api.LeakyReLU(data = Some(sym2), act_type = Some("leaky")) - sym2.dispose() - sym1.dispose() - sym3 - } - - def getGenerator(prefix: String, imHw: (Int, Int)): Symbol = { - val data = Symbol.Variable(s"${prefix}_data") - - var conv1_1 = Conv(data, 48, 4096) - val conv2_1 = Conv(conv1_1, 32, 4096) - var conv3_1 = Conv(conv2_1, 64, 4096, (3, 3), (1, 1)) - var conv4_1 = Conv(conv3_1, 32, 4096) - var conv5_1 = Conv(conv4_1, 48, 4096) - var conv6_1 = Conv(conv5_1, 32, 4096) - var out = Symbol.api.Convolution(data = Some(conv6_1), num_filter = 3, kernel = Shape(3, 3), - pad = Some(Shape(1, 1)), no_bias = Some(true), workspace = Some(4096)) - out = Symbol.api.BatchNorm(data = Some(out), fix_gamma = Some(false)) - out = Symbol.api.Activation(data = Some(out), act_type = "tanh") - val rawOut = (out * 128) + 128 - val norm = Symbol.api.SliceChannel(data = Some(rawOut), num_outputs = 3) - val rCh = norm.get(0) - 123.68f - val gCh = norm.get(1) - 116.779f - val bCh = norm.get(2) - 103.939f - val normOut = Symbol.api.Concat(data = Array(rCh, gCh, bCh), num_args = 3) - normOut * 0.4f + data * 0.6f - } - - def getModule(prefix: String, dShape: Shape, ctx: Context, isTrain: Boolean = true): Module = { - val sym = getGenerator(prefix, (dShape(2), dShape(3))) - val (dataShapes, forTraining, inputsNeedGrad) = { - val dataShape = Map(s"${prefix}_data" -> dShape) - if (isTrain) (dataShape, true, true) - else (dataShape, false, false) - } - val mod = new Module(symbol = sym, context = ctx, - dataShapes = dataShapes, - initializer = new Xavier(magnitude = 2f), - forTraining = forTraining, inputsNeedGrad = inputsNeedGrad) - mod - } -} diff --git a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/neuralstyle/end2end/Module.scala b/scala-package/examples/src/main/scala/org/apache/mxnetexamples/neuralstyle/end2end/Module.scala deleted file mode 100644 index 1d11f8864063..000000000000 --- a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/neuralstyle/end2end/Module.scala +++ /dev/null @@ -1,137 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnetexamples.neuralstyle.end2end - -import org.apache.mxnet.{Context, Initializer, NDArray, Optimizer, Shape, Symbol, Uniform} -import org.slf4j.LoggerFactory - -class Module(symbol: Symbol, - context: Context, - dataShapes: Map[String, Shape], - labelShapes: Map[String, Shape] = Map[String, Shape](), - initializer: Initializer = new Uniform(0.01f), - forTraining: Boolean = true, - inputsNeedGrad: Boolean = false) { - - private val logger = LoggerFactory.getLogger(classOf[Module]) - - private val dataLabelShape = dataShapes ++ labelShapes - private val (argDict, gradDict, auxDict) = { - val (argShapes, outShapes, auxShapes) = symbol.inferShape(dataLabelShape) - val argNames = symbol.listArguments() - val argDict = argNames.zip(argShapes.map(NDArray.empty(_, context))).toMap - - val filterShapes = if (inputsNeedGrad) labelShapes else dataLabelShape - val gradDict = argNames.zip(argShapes).filter { case (name, shape) => - !filterShapes.contains(name) - }.map(x => x._1 -> NDArray.empty(x._2, context) ).toMap - - val auxDict = symbol.listAuxiliaryStates().zip(auxShapes.map(NDArray.empty(_, context))).toMap - - (argDict, gradDict, auxDict) - } - - private val dataArrs = dataShapes.keys.toArray.map(argDict(_)) - private val labelArrs = labelShapes.keys.toArray.map(argDict(_)) - private val dataGrads = { - if (inputsNeedGrad) dataShapes.keys.toArray.map(gradDict(_)) - else null - } - - argDict.foreach { case (name, ndArray) => - if (!dataLabelShape.contains(name)) initializer(name, ndArray) - } - - private val executor = symbol.bind(context, argDict, gradDict, "write", auxDict, null, null) - - private var optimizer: Optimizer = null - private var paramsGrads: List[(Int, String, NDArray, AnyRef)] = null - private var optimizerInitialized: Boolean = false - - def initOptimizer(opt: Optimizer): Unit = { - this.optimizer = opt - this.paramsGrads = gradDict.toList.zipWithIndex.map { case ((name, grad), idx) => - (idx, name, grad, this.optimizer.createState(idx, argDict(name))) - } - this.optimizerInitialized = true - } - - def forward(datas: Array[NDArray], labels: Array[NDArray] = Array[NDArray]()): Unit = { - datas.zip(this.dataArrs).foreach { case (src, dest) => dest.set(src) } - labels.zip(this.labelArrs).foreach { case (src, dest) => dest.set(src) } - this.executor.forward(isTrain = forTraining) - } - - def backward(outGrads: Array[NDArray]): Unit = { - this.executor.backward(outGrads) - } - - def update(): Unit = { - assert(this.optimizerInitialized) - paramsGrads.foreach { case (idx, name, grad, optimState) => - this.optimizer.update(idx, argDict(name), grad, optimState) - } - } - - def dispose(): Unit = { - this.executor.dispose() - this.argDict.foreach(_._2.dispose()) - this.gradDict.foreach(_._2.dispose()) - this.auxDict.foreach(_._2.dispose()) - } - - def setParams(params: Map[String, NDArray]): Unit = { - params.foreach { case (name, arr) => - if (this.argDict.contains(name)) { - this.argDict(name).set(arr) - } - else if (this.auxDict.contains(name)) { - this.auxDict(name).set(arr) - } - else logger.info(name) - } - } - - def loadParams(fName: String): Unit = { - val saveDict = NDArray.load2Map(fName) - var params = Map[String, NDArray]() - saveDict.foreach { case (k, v) => - val (argType, name) = { - val tmp = k.split(":") - (tmp(0), tmp(1)) - } - if (argType == "arg" || argType == "aux") { - params += name -> v - } - } - this.setParams(params) - } - - def saveParams(fName: String): Unit = { - val saveDict = { - argDict.filter(x => !dataLabelShape.contains(x._1)) - .map { case (k, v) => s"arg:$k" -> v } ++ - auxDict.map { case (k, v) => s"aux:$k" -> v } - } - NDArray.save(fName, saveDict) - } - - def getOutputs(): Array[NDArray] = this.executor.outputs - - def getInputGrads(): Array[NDArray] = this.dataGrads -} diff --git a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/profiler/ProfilerMatMul.scala b/scala-package/examples/src/main/scala/org/apache/mxnetexamples/profiler/ProfilerMatMul.scala deleted file mode 100644 index 95c5d77c8777..000000000000 --- a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/profiler/ProfilerMatMul.scala +++ /dev/null @@ -1,107 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnetexamples.profiler - -import org.kohsuke.args4j.{CmdLineParser, Option} -import org.slf4j.LoggerFactory -import scala.collection.JavaConverters._ -import org.apache.mxnet.Context -import org.apache.mxnet.Profiler -import java.io.File -import org.apache.mxnet.Symbol -import org.apache.mxnet.Shape -import org.apache.mxnet.Random - -/** - * @author Depeng Liang - */ -object ProfilerMatMul { - private val logger = LoggerFactory.getLogger(classOf[ProfilerMatMul]) - - def main(args: Array[String]): Unit = { - val erul = new ProfilerMatMul - val parser: CmdLineParser = new CmdLineParser(erul) - try { - parser.parseArgument(args.toList.asJava) - val ctx = if (erul.gpu >= 0) Context.gpu(erul.gpu) else Context.cpu() - - val path = s"${erul.outputPath}${File.separator}${erul.profilerName}" - val kwargs = Map("file_name" -> path, "profile_" + erul.profilerMode -> "1") - Profiler.profilerSetConfig(kwargs) - logger.info(s"profile file save to $path") - - val A = Symbol.Variable("A") - val B = Symbol.Variable("B") - val C = Symbol.api.dot(Some(A), Some(B)) - - val executor = C.simpleBind(ctx, "write", - Map("A" -> Shape(4096, 4096), "B" -> Shape(4096, 4096))) - - val a = Random.uniform(-1.0f, 1.0f, shape = Shape(4096, 4096)) - val b = Random.uniform(-1.0f, 1.0f, shape = Shape(4096, 4096)) - - a.copyTo(executor.argDict("A")) - b.copyTo(executor.argDict("B")) - - val flag = false - logger.info(s"execution begin") - var t0 = 0L - var t1 = 0L - for (i <- 0 until erul.iterNum) { - if (i == erul.beginProfilingIter) { - t0 = System.currentTimeMillis() - Profiler.profilerSetState("run") - } - if (i == erul.endProfilingIter) { - t1 = System.currentTimeMillis() - Profiler.profilerSetState("stop") - } - executor.forward() - executor.outputs(0).waitToRead() - } - logger.info(s"execution end") - val duration = t1 - t0 - logger.info(s"duration: ${duration / 1000f}s") - logger.info(s"${duration.toFloat / erul.iterNum}ms/operator") - } catch { - case ex: Exception => { - logger.error(ex.getMessage, ex) - parser.printUsage(System.err) - sys.exit(1) - } - } - } -} - -class ProfilerMatMul { - @Option(name = "--profiler-mode", usage = "the profiler mode, can be \"symbolic\"" - + ", \"imperative\", \"api\", \"mem\", etc.") - private val profilerMode: String = "symbolic" - @Option(name = "--output-path", usage = "the profile file output directory.") - private val outputPath: String = "." - @Option(name = "--profile-filename", usage = "the profile file name.") - private val profilerName: String = "profile_matmul_20iter.json" - @Option(name = "--iter-num", usage = "iterate number.") - private val iterNum: Int = 100 - @Option(name = "--begin-profiling-iter'", usage = "specific iterate to start the profiler.") - private val beginProfilingIter: Int = 50 - @Option(name = "--end-profiling-iter'", usage = "specific iterate to stop the profiler.") - private val endProfilingIter: Int = 70 - @Option(name = "--gpu", usage = "which gpu card to use, default is -1, means using cpu") - private val gpu: Int = -1 -} diff --git a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/profiler/ProfilerNDArray.scala b/scala-package/examples/src/main/scala/org/apache/mxnetexamples/profiler/ProfilerNDArray.scala deleted file mode 100644 index 3e8034007257..000000000000 --- a/scala-package/examples/src/main/scala/org/apache/mxnetexamples/profiler/ProfilerNDArray.scala +++ /dev/null @@ -1,252 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnetexamples.profiler - -import org.kohsuke.args4j.{CmdLineParser, Option} -import org.slf4j.LoggerFactory -import scala.collection.JavaConverters._ -import java.io.File -import org.apache.mxnet.Profiler -import org.apache.mxnet.Random -import org.apache.mxnet.Shape -import org.apache.mxnet.NDArray -import org.apache.mxnet.Context - -/** - * @author Depeng Liang - */ -object ProfilerNDArray { - private val logger = LoggerFactory.getLogger(classOf[ProfilerNDArray]) - - def testBroadcast(): Unit = { - val sampleNum = 1000 - def testBroadcastTo(): Unit = { - for (i <- 0 until sampleNum) { - val nDim = scala.util.Random.nextInt(2) + 1 - val targetShape = Shape((0 until nDim).map(i => scala.util.Random.nextInt(10) + 1)) - val shape = targetShape.toArray.map { s => - if (scala.util.Random.nextInt(2) == 1) 1 - else s - } - val dat = NDArray.empty(shape: _*) - val randomRet = (0 until shape.product) - .map(r => scala.util.Random.nextFloat() - 0.5f).toArray - dat.set(randomRet) - val ndArrayRet = NDArray.api.broadcast_to(dat, Some(targetShape)) - require(ndArrayRet.shape == targetShape) - val err = { - // implementation of broadcast - val ret = { - (randomRet /: shape.zipWithIndex.reverse){ (acc, elem) => elem match { case (s, i) => - if (s != targetShape(i)) { - acc.grouped(shape.takeRight(shape.length - i).product).map {g => - (0 until targetShape(i)).map(x => g).flatten - }.flatten.toArray - } else acc - }} - } - val tmp = ndArrayRet.toArray.zip(ret).map{ case (l, r) => Math.pow(l - r, 2) } - tmp.sum / tmp.length - } - require(err < 1E-8) - ndArrayRet.dispose() - dat.dispose() - } - } - testBroadcastTo() - } - - def randomNDArray(dim: Int): NDArray = { - val tmp = Math.pow(1000, 1.0 / dim).toInt - val shape = Shape((0 until dim).map(d => scala.util.Random.nextInt(tmp) + 1)) - Random.uniform(-10f, 10f, shape) - } - - def testNDArraySaveload(): Unit = { - val maxDim = 5 - val nRepeat = 10 - val fileName = s"${System.getProperty("java.io.tmpdir")}/tmpList.bin" - for (repeat <- 0 until nRepeat) { - try { - val data = (0 until 10).map(i => randomNDArray(scala.util.Random.nextInt(4) + 1)) - NDArray.save(fileName, data) - val data2 = NDArray.load2Array(fileName) - require(data.length == data2.length) - for ((x, y) <- data.zip(data2)) { - val tmp = x - y - require(tmp.toArray.sum == 0) - tmp.dispose() - } - val dMap = data.zipWithIndex.map { case (arr, i) => - s"NDArray xx $i" -> arr - }.toMap - NDArray.save(fileName, dMap) - val dMap2 = NDArray.load2Map(fileName) - require(dMap.size == dMap2.size) - for ((k, x) <- dMap) { - val y = dMap2(k) - val tmp = x - y - require(tmp.toArray.sum == 0) - tmp.dispose() - } - data.foreach(_.dispose()) - } finally { - val file = new File(fileName) - file.delete() - } - } - } - - def testNDArrayCopy(): Unit = { - val c = Random.uniform(-10f, 10f, Shape(10, 10)) - val d = c.copyTo(Context.cpu(0)) - val tmp = c - d - require(tmp.toArray.map(Math.abs).sum == 0) - c.dispose() - d.dispose() - } - - def reldiff(a: NDArray, b: NDArray): Float = { - val diff = NDArray.api.sum(NDArray.api.abs(a - b)).toScalar - val norm = NDArray.api.sum(NDArray.api.abs(a)).toScalar - diff / norm - } - - def reldiff(a: Array[Float], b: Array[Float]): Float = { - val diff = - (a zip b).map { case (aElem, bElem) => Math.abs(aElem - bElem) }.sum - val norm: Float = a.reduce(Math.abs(_) + Math.abs(_)) - diff / norm - } - - def testNDArrayNegate(): Unit = { - val rand = Random.uniform(-10f, 10f, Shape(2, 3, 4)) - val npy = rand.toArray - val arr = NDArray.empty(Shape(2, 3, 4)) - arr.set(npy) - require(reldiff(npy, arr.toArray) < 1e-6f) - val negativeArr = -arr - require(reldiff(npy.map(_ * -1f), negativeArr.toArray) < 1e-6f) - // a final check to make sure the negation (-) is not implemented - // as inplace operation, so the contents of arr does not change after - // we compute (-arr) - require(reldiff(npy, arr.toArray) < 1e-6f) - rand.dispose() - arr.dispose() - negativeArr.dispose() - } - - def testNDArrayScalar(): Unit = { - val c = NDArray.empty(10, 10) - val d = NDArray.empty(10, 10) - c.set(0.5f) - d.set(1.0f) - d -= c * 2f / 3f * 6f - c += 0.5f - require(c.toArray.sum - 100f < 1e-5f) - require(d.toArray.sum + 100f < 1e-5f) - c.set(2f) - require(c.toArray.sum - 200f < 1e-5f) - d.set(-c + 2f) - require(d.toArray.sum < 1e-5f) - c.dispose() - d.dispose() - } - - def testClip(): Unit = { - val shape = Shape(10) - val A = Random.uniform(-10f, 10f, shape) - val B = NDArray.api.clip(A, -2f, 2f) - val B1 = B.toArray - require(B1.forall { x => x >= -2f && x <= 2f }) - } - - def testDot(): Unit = { - val a = Random.uniform(-3f, 3f, Shape(3, 4)) - val b = Random.uniform(-3f, 3f, Shape(4, 5)) - val c = NDArray.api.dot(a, b) - val A = a.toArray.grouped(4).toArray - val B = b.toArray.grouped(5).toArray - val C = (Array[Array[Float]]() /: A)((acc, row) => acc :+ row.zip(B).map(z => - z._2.map(_ * z._1)).reduceLeft(_.zip(_).map(x => x._1 + x._2))).flatten - require(reldiff(c.toArray, C) < 1e-5f) - a.dispose() - b.dispose() - c.dispose() - } - - def testNDArrayOnehot(): Unit = { - val shape = Shape(100, 20) - var npy = (0 until shape.product).toArray.map(_.toFloat) - val arr = NDArray.empty(shape) - arr.set(npy) - val nRepeat = 3 - for (repeat <- 0 until nRepeat) { - val indices = (0 until shape(0)).map(i => scala.util.Random.nextInt(shape(1))) - npy = npy.map(i => 0f) - for (i <- 0 until indices.length) npy(i * shape(1) + indices(i)) = 1f - val ind = NDArray.empty(shape(0)) - ind.set(indices.toArray.map(_.toFloat)) - NDArray.onehotEncode(ind, arr) - require(arr.toArray.zip(npy).map(x => x._1 - x._2).sum == 0f) - ind.dispose() - } - arr.dispose() - } - - def main(args: Array[String]): Unit = { - val eray = new ProfilerNDArray - val parser: CmdLineParser = new CmdLineParser(eray) - try { - parser.parseArgument(args.toList.asJava) - - val path = s"${eray.outputPath}${File.separator}${eray.profilerName}" - val kwargs = Map("file_name" -> path, "profile_" + eray.profilerMode -> "1") - Profiler.profilerSetConfig(kwargs) - logger.info(s"profile file save to $path") - - Profiler.profilerSetState("run") - testBroadcast() - testNDArraySaveload() - testNDArrayCopy() - testNDArrayNegate() - testNDArrayScalar() - testClip() - testDot() - testNDArrayOnehot() - Profiler.profilerSetState("stop") - - } catch { - case ex: Exception => { - logger.error(ex.getMessage, ex) - parser.printUsage(System.err) - sys.exit(1) - } - } - } -} - -class ProfilerNDArray { - @Option(name = "--profiler-mode", usage = "the profiler mode, can be \"symbolic\"" - + ", \"imperative\", \"api\", \"mem\", etc.") - private val profilerMode: String = "symbolic" - @Option(name = "--output-path", usage = "the profile file output directory.") - private val outputPath: String = "." - @Option(name = "--profile-filename", usage = "the profile file name.") - private val profilerName: String = "profile_ndarray.json" -} diff --git a/scala-package/examples/src/test/java/org/apache/mxnetexamples/javaapi/infer/predictor/BertExampleTest.java b/scala-package/examples/src/test/java/org/apache/mxnetexamples/javaapi/infer/predictor/BertExampleTest.java deleted file mode 100644 index 0518254c297d..000000000000 --- a/scala-package/examples/src/test/java/org/apache/mxnetexamples/javaapi/infer/predictor/BertExampleTest.java +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnetexamples.javaapi.infer.predictor; - -import org.apache.mxnetexamples.Util; -import org.apache.mxnetexamples.javaapi.infer.bert.BertQA; -import org.junit.BeforeClass; -import org.junit.Test; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; - -/** - * Test on BERT QA model - */ -public class BertExampleTest { - final static Logger logger = LoggerFactory.getLogger(BertExampleTest.class); - private static String modelPathPrefix = ""; - private static String vocabPath = ""; - - @BeforeClass - public static void downloadFile() { - logger.info("Downloading Bert QA Model"); - String tempDirPath = System.getProperty("java.io.tmpdir"); - logger.info("tempDirPath: %s".format(tempDirPath)); - - String baseUrl = "https://s3.us-east-2.amazonaws.com/mxnet-scala/scala-example-ci/BertQA"; - Util.downloadUrl(baseUrl + "/static_bert_qa-symbol.json", - tempDirPath + "/static_bert_qa/static_bert_qa-symbol.json", 3); - Util.downloadUrl(baseUrl + "/static_bert_qa-0002.params", - tempDirPath + "/static_bert_qa/static_bert_qa-0002.params", 3); - Util.downloadUrl(baseUrl + "/vocab.json", - tempDirPath + "/static_bert_qa/vocab.json", 3); - modelPathPrefix = tempDirPath + File.separator + "static_bert_qa/static_bert_qa"; - vocabPath = tempDirPath + File.separator + "static_bert_qa/vocab.json"; - } - - @Test - public void testBertQA() throws Exception{ - BertQA bert = new BertQA(); - String Q = "When did BBC Japan start broadcasting?"; - String A = "BBC Japan was a general entertainment Channel.\n" + - " Which operated between December 2004 and April 2006.\n" + - "It ceased operations after its Japanese distributor folded."; - String[] args = new String[] { - "--model-path-prefix", modelPathPrefix, - "--model-vocab", vocabPath, - "--model-epoch", "2", - "--input-question", Q, - "--input-answer", A, - "--seq-length", "384" - }; - bert.main(args); - } -} diff --git a/scala-package/examples/src/test/java/org/apache/mxnetexamples/javaapi/infer/predictor/PredictorExampleTest.java b/scala-package/examples/src/test/java/org/apache/mxnetexamples/javaapi/infer/predictor/PredictorExampleTest.java deleted file mode 100644 index 30bc8db447d8..000000000000 --- a/scala-package/examples/src/test/java/org/apache/mxnetexamples/javaapi/infer/predictor/PredictorExampleTest.java +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnetexamples.javaapi.infer.predictor; - -import org.junit.BeforeClass; -import org.junit.Test; -import org.apache.mxnetexamples.Util; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; - -public class PredictorExampleTest { - - final static Logger logger = LoggerFactory.getLogger(PredictorExampleTest.class); - private static String modelPathPrefix = ""; - private static String inputImagePath = ""; - - @BeforeClass - public static void downloadFile() { - logger.info("Downloading resnet-18 model"); - - String tempDirPath = System.getProperty("java.io.tmpdir"); - logger.info("tempDirPath: %s".format(tempDirPath)); - - String baseUrl = "https://s3.us-east-2.amazonaws.com/scala-infer-models"; - - Util.downloadUrl(baseUrl + "/resnet-18/resnet-18-symbol.json", - tempDirPath + "/resnet18/resnet-18-symbol.json", 3); - Util.downloadUrl(baseUrl + "/resnet-18/resnet-18-0000.params", - tempDirPath + "/resnet18/resnet-18-0000.params", 3); - Util.downloadUrl(baseUrl + "/resnet-18/synset.txt", - tempDirPath + "/resnet18/synset.txt", 3); - Util.downloadUrl("https://s3.amazonaws.com/model-server/inputs/Pug-Cookie.jpg", - tempDirPath + "/inputImages/resnet18/Pug-Cookie.jpg", 3); - - modelPathPrefix = tempDirPath + File.separator + "resnet18/resnet-18"; - inputImagePath = tempDirPath + File.separator + - "inputImages/resnet18/Pug-Cookie.jpg"; - } - - @Test - public void testPredictor(){ - PredictorExample example = new PredictorExample(); - String[] args = new String[]{ - "--model-path-prefix", modelPathPrefix, - "--input-image", inputImagePath - }; - example.main(args); - } - -} diff --git a/scala-package/examples/src/test/resources/log4j.properties b/scala-package/examples/src/test/resources/log4j.properties deleted file mode 100644 index ef523cb7bc4f..000000000000 --- a/scala-package/examples/src/test/resources/log4j.properties +++ /dev/null @@ -1,24 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -# for development debugging -log4j.rootLogger = info, stdout - -log4j.appender.stdout = org.apache.log4j.ConsoleAppender -log4j.appender.stdout.Target = System.out -log4j.appender.stdout.layout = org.apache.log4j.PatternLayout -log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss,SSS} [%t] [%c] [%p] - %m%n diff --git a/scala-package/examples/src/test/scala/org/apache/mxnetexamples/customop/CustomOpExampleSuite.scala b/scala-package/examples/src/test/scala/org/apache/mxnetexamples/customop/CustomOpExampleSuite.scala deleted file mode 100644 index b6894f148b11..000000000000 --- a/scala-package/examples/src/test/scala/org/apache/mxnetexamples/customop/CustomOpExampleSuite.scala +++ /dev/null @@ -1,90 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.mxnetexamples.customop - -import java.io.File -import java.net.URL - -import org.apache.commons.io.FileUtils -import org.apache.mxnet.Context -import org.apache.mxnet.ResourceScope; -import org.apache.mxnetexamples.Util -import org.scalatest.{BeforeAndAfterAll, FunSuite} -import org.slf4j.LoggerFactory - -import scala.language.postfixOps -import scala.sys.process.Process - -class CustomOpExampleSuite extends FunSuite with BeforeAndAfterAll { - private val logger = LoggerFactory.getLogger(classOf[CustomOpExampleSuite]) - - test("Example CI: Test Customop MNIST") { - // This test is CPU only - if (System.getenv().containsKey("SCALA_TEST_ON_GPU") && - System.getenv("SCALA_TEST_ON_GPU").toInt == 1) { - logger.info("CPU test only, skipped...") - } else { - ResourceScope.using() { - logger.info("Downloading mnist model") - val baseUrl = "https://s3.us-east-2.amazonaws.com/mxnet-scala/scala-example-ci" - val tempDirPath = System.getProperty("java.io.tmpdir") - val modelDirPath = tempDirPath + File.separator + "mnist/" - val tmpFile = new File(tempDirPath + "/mnist/mnist.zip") - if (!tmpFile.exists()) { - FileUtils.copyURLToFile(new URL(baseUrl + "/mnist/mnist.zip"), - tmpFile) - } - // TODO: Need to confirm with Windows - Process("unzip " + tempDirPath + "/mnist/mnist.zip -d " - + tempDirPath + "/mnist/") ! - val context = Context.cpu() - val output = ExampleCustomOp.test(modelDirPath, context) - assert(output >= 0.95f) - } - } - } - - test("Example CI: Test CustomopRtc MNIST") { - // This test is GPU only - // TODO: RTC is depreciated, need to change to CUDA Module - val RTC_fixed = false - if (RTC_fixed) { - if (System.getenv().containsKey("SCALA_TEST_ON_GPU") && - System.getenv("SCALA_TEST_ON_GPU").toInt == 1) { - ResourceScope.using() { - logger.info("Downloading mnist model") - val baseUrl = "https://s3.us-east-2.amazonaws.com/mxnet-scala/scala-example-ci" - val tempDirPath = System.getProperty("java.io.tmpdir") - val modelDirPath = tempDirPath + File.separator + "mnist/" - Util.downloadUrl(baseUrl + "/mnist/mnist.zip", - tempDirPath + "/mnist/mnist.zip") - // TODO: Need to confirm with Windows - Process("unzip " + tempDirPath + "/mnist/mnist.zip -d " - + tempDirPath + "/mnist/") ! - val context = Context.gpu() - val output = ExampleCustomOpWithRtc.test(modelDirPath, context) - assert(output >= 0.95f) - } - } else { - logger.info("GPU test only, skipped...") - } - } else { - logger.warn("RTC module is not up to date, please don't use this" + - "\nCreate CudaModule for this") - } - } -} diff --git a/scala-package/examples/src/test/scala/org/apache/mxnetexamples/infer/objectdetector/ObjectDetectorExampleSuite.scala b/scala-package/examples/src/test/scala/org/apache/mxnetexamples/infer/objectdetector/ObjectDetectorExampleSuite.scala deleted file mode 100644 index 918fb835f76e..000000000000 --- a/scala-package/examples/src/test/scala/org/apache/mxnetexamples/infer/objectdetector/ObjectDetectorExampleSuite.scala +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnetexamples.infer.objectdetector - -import java.io.File -import org.apache.mxnet.Context -import org.apache.mxnetexamples.Util -import org.scalatest.{BeforeAndAfterAll, FunSuite} -import org.slf4j.LoggerFactory - -import scala.language.postfixOps -import scala.sys.process.Process - -class ObjectDetectorExampleSuite extends FunSuite with BeforeAndAfterAll { - private val logger = LoggerFactory.getLogger(classOf[ObjectDetectorExampleSuite]) - - test("testObjectDetectionExample") { - logger.info("Downloading resnetssd model") - val tempDirPath = System.getProperty("java.io.tmpdir") - - logger.info("tempDirPath: %s".format(tempDirPath)) - - val modelBase = "https://s3.amazonaws.com/model-server/models/resnet50_ssd/" - val imageBase = "https://s3.amazonaws.com/model-server/inputs/" - - Util.downloadUrl(modelBase + "resnet50_ssd_model-symbol.json", - tempDirPath + "/resnetssd/resnet50_ssd_model-symbol.json") - Util.downloadUrl(modelBase + "resnet50_ssd_model-0000.params", - tempDirPath + "/resnetssd/resnet50_ssd_model-0000.params") - Util.downloadUrl(modelBase + "synset.txt", - tempDirPath + "/resnetssd/synset.txt") - Util.downloadUrl(imageBase + "dog-ssd.jpg", - tempDirPath + "/inputImages/resnetssd/dog-ssd.jpg") - - val modelDirPath = tempDirPath + File.separator + "resnetssd/" - val inputImagePath = tempDirPath + File.separator + - "inputImages/resnetssd/dog-ssd.jpg" - val inputImageDir = tempDirPath + File.separator + "inputImages/resnetssd/" - - var context = Context.cpu() - if (System.getenv().containsKey("SCALA_TEST_ON_GPU") && - System.getenv("SCALA_TEST_ON_GPU").toInt == 1) { - context = Context.gpu() - } - - val output = SSDClassifierExample.runObjectDetectionSingle(modelDirPath + "resnet50_ssd_model", - inputImagePath, context) - - val outputList = SSDClassifierExample.runObjectDetectionBatch( - modelDirPath + "resnet50_ssd_model", - inputImageDir, context) - - Process("rm -rf " + modelDirPath + " " + inputImageDir) ! - - assert(output(0)(0)._1 === "car") - assert(output(0)(0)._1 === "car") - - } -} diff --git a/scala-package/examples/src/test/scala/org/apache/mxnetexamples/neuralstyle/NeuralStyleSuite.scala b/scala-package/examples/src/test/scala/org/apache/mxnetexamples/neuralstyle/NeuralStyleSuite.scala deleted file mode 100644 index 5264769c6e5d..000000000000 --- a/scala-package/examples/src/test/scala/org/apache/mxnetexamples/neuralstyle/NeuralStyleSuite.scala +++ /dev/null @@ -1,99 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnetexamples.neuralstyle - -import org.apache.mxnet.{Context, ResourceScope} -import org.apache.mxnetexamples.Util -import org.apache.mxnetexamples.neuralstyle.end2end.{BoostInference, BoostTrain} -import org.scalatest.{BeforeAndAfterAll, FunSuite} -import org.slf4j.LoggerFactory - -import scala.language.postfixOps -import scala.sys.process.Process - -/** - * Neural Suite Test package - * Currently there is no plan to run to test accuracy - * This test is just to verify the model is runnable - */ -class NeuralStyleSuite extends FunSuite with BeforeAndAfterAll { - private val logger = LoggerFactory.getLogger(classOf[NeuralStyleSuite]) - - - override def beforeAll(): Unit = { - logger.info("Downloading vgg model") - val tempDirPath = System.getProperty("java.io.tmpdir") - logger.info("tempDirPath: %s".format(tempDirPath)) - val baseUrl = "https://s3.us-east-2.amazonaws.com/mxnet-scala/scala-example-ci/NeuralStyle/" - Util.downloadUrl(baseUrl + "IMG_4343.jpg", tempDirPath + "/NS/IMG_4343.jpg") - Util.downloadUrl(baseUrl + "starry_night.jpg", tempDirPath + "/NS/starry_night.jpg") - Util.downloadUrl(baseUrl + "model.zip", tempDirPath + "/NS/model.zip") - Util.downloadUrl(baseUrl + "vgg19.params", tempDirPath + "/NS/vgg19.params") - // TODO: Need to confirm with Windows - Process(s"unzip $tempDirPath/NS/model.zip -d $tempDirPath/NS/") ! - - Process(s"mkdir $tempDirPath/NS/images") ! - - for (i <- 0 until 20) { - Process(s"cp $tempDirPath/NS/IMG_4343.jpg $tempDirPath/NS/images/img$i.jpg") ! - } - } - - test("Example CI: Test Boost Inference") { - val tempDirPath = System.getProperty("java.io.tmpdir") - var ctx = Context.cpu() - if (System.getenv().containsKey("SCALA_TEST_ON_GPU") && - System.getenv("SCALA_TEST_ON_GPU").toInt == 1) { - ctx = Context.gpu() - } - ResourceScope.using() { - BoostInference.runInference(tempDirPath + "/NS/model", tempDirPath + "/NS", 2, - tempDirPath + "/NS/IMG_4343.jpg", ctx) - } - } - - test("Example CI: Test Boost Training") { - val tempDirPath = System.getProperty("java.io.tmpdir") - if (System.getenv().containsKey("SCALA_TEST_ON_GPU") && - System.getenv("SCALA_TEST_ON_GPU").toInt == 1) { - val ctx = Context.gpu() - ResourceScope.using() { - BoostTrain.runTraining(tempDirPath + "/NS/images", tempDirPath + "/NS/vgg19.params", ctx, - tempDirPath + "/NS/starry_night.jpg", tempDirPath + "/NS") - } - } else { - logger.info("GPU test only, skip CPU...") - } - } - - test("Example CI: Test Neural Style") { - val tempDirPath = System.getProperty("java.io.tmpdir") - if (System.getenv().containsKey("SCALA_TEST_ON_GPU") && - System.getenv("SCALA_TEST_ON_GPU").toInt == 1) { - val ctx = Context.gpu() - ResourceScope.using() { - NeuralStyle.runTraining("vgg19", tempDirPath + "/NS/IMG_4343.jpg", - tempDirPath + "/NS/starry_night.jpg", - ctx, tempDirPath + "/NS/vgg19.params", tempDirPath + "/NS", - 1f, 20f, 0.01f, 1, 10f, 60, 600, 50, 0.0005f) - } - } else { - logger.info("GPU test only, skip CPU") - } - } -} diff --git a/scala-package/examples/src/test/scala/org/apache/mxnetexamples/profiler/ProfilerSuite.scala b/scala-package/examples/src/test/scala/org/apache/mxnetexamples/profiler/ProfilerSuite.scala deleted file mode 100644 index 67638b8fac0f..000000000000 --- a/scala-package/examples/src/test/scala/org/apache/mxnetexamples/profiler/ProfilerSuite.scala +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnetexamples.profiler - -import org.scalatest.{BeforeAndAfterAll, FunSuite} -import org.slf4j.LoggerFactory -import java.io.File - -import org.apache.mxnet.Profiler -import org.apache.mxnet.Context - -/** - * Integration test for profiler example. - */ -class ProfilerSuite extends FunSuite with BeforeAndAfterAll { - private val logger = LoggerFactory.getLogger(classOf[ProfilerSuite]) - - override def beforeAll(): Unit = { - logger.info("Running profiler test...") - val eray = new ProfilerNDArray - val path = System.getProperty("java.io.tmpdir") - val kwargs = Map("file_name" -> path) - logger.info(s"profile file save to $path") - - Profiler.profilerSetState("run") - } - - override def afterAll(): Unit = { - Profiler.profilerSetState("stop") - } - - test("Profiler Broadcast test") { - ProfilerNDArray.testBroadcast() - } - - test("Profiler NDArray Saveload test") { - ProfilerNDArray.testNDArraySaveload() - } - - test("Profiler NDArray Copy") { - ProfilerNDArray.testNDArrayCopy() - } - - test("Profiler NDArray Negate") { - ProfilerNDArray.testNDArrayNegate() - } - - test("Profiler NDArray Scalar") { - ProfilerNDArray.testNDArrayScalar() - } - - test("Profiler NDArray Onehot") { - ProfilerNDArray.testNDArrayOnehot() - } - - test("Profiler Clip") { - ProfilerNDArray.testClip() - } - - test("Profiler Dot") { - ProfilerNDArray.testDot() - } -} diff --git a/scala-package/externalPom/pom.xml b/scala-package/externalPom/pom.xml deleted file mode 100644 index 515beeb94f40..000000000000 --- a/scala-package/externalPom/pom.xml +++ /dev/null @@ -1,152 +0,0 @@ - - - - 4.0.0 - - org.apache.mxnet - mxnet-parent - INTERNAL - ../pom.xml - - - mxnet-external-pom - ${revision} - MXNet Scala Package - Full ${platform}-only - pom - - Scala Package for Apache MXNet (Incubating) - flexible and efficient library for deep learning. - - - - ${project.parent.basedir}/.. - mxnet-full_2.11-${platform}-${flavor} - ${base.revision}-SNAPSHOT - true - - - - - org.apache.mxnet - mxnet-full_2.11 - INTERNAL - - - - - - staging - - ${base.revision} - false - - - - - - - - org.apache.maven.plugins - maven-assembly-plugin - - false - - - - - com.google.code.maven-replacer-plugin - replacer - 1.5.3 - - - deploy - - replace - - - - - ${basedir}/src/main/deploy/deploy.xml - ${project.build.directory}/deploy.xml - - - DESCRIPTION - ${project.description} - - - ARTIFACT_ID - ${ARTIFACT_ID} - - - PROJECT_VERSION - ${project.version} - - - SCALA_VERSION - ${scala.version} - - - - - - - org.codehaus.mojo - build-helper-maven-plugin - 3.0.0 - - - attach-artifacts - deploy - - attach-artifact - - - - - target/deploy.xml - xml - - - - - - - - - org.apache.maven.plugins - maven-gpg-plugin - 1.6 - - - sign-artifacts - deploy - - sign - - - - - ${skipGpg} - - - - - - - diff --git a/scala-package/externalPom/src/main/deploy/deploy.xml b/scala-package/externalPom/src/main/deploy/deploy.xml deleted file mode 100644 index f5b624885cbb..000000000000 --- a/scala-package/externalPom/src/main/deploy/deploy.xml +++ /dev/null @@ -1,69 +0,0 @@ - - - - 4.0.0 - org.apache.mxnet - ARTIFACT_ID - PROJECT_VERSION - DESCRIPTION - - - org.scala-lang - scala-library - SCALA_VERSION - - - org.scala-lang - scala-reflect - SCALA_VERSION - - - org.scala-lang.modules - scala-parser-combinators_2.11 - 1.0.5 - - - org.scala-lang - scala-compiler - SCALA_VERSION - - - commons-codec - commons-codec - 1.10 - - - commons-io - commons-io - 2.1 - - - org.slf4j - slf4j-api - 1.7.5 - - - args4j - args4j - 2.0.29 - - - diff --git a/scala-package/infer/pom.xml b/scala-package/infer/pom.xml deleted file mode 100644 index 81e93932e83f..000000000000 --- a/scala-package/infer/pom.xml +++ /dev/null @@ -1,80 +0,0 @@ - - - - 4.0.0 - - mxnet-parent - org.apache.mxnet - INTERNAL - ../pom.xml - - - - false - - - mxnet-infer - MXNet Scala Package - Inference - - - - - org.apache.maven.plugins - maven-jar-plugin - - - META-INF/*.SF - META-INF/*.DSA - META-INF/*.RSA - - - - - org.scalatest - scalatest-maven-plugin - - - -Djava.library.path=${project.parent.basedir}/native/target \ - -Dlog4j.configuration=file://${project.basedir}/src/test/resources/log4j.properties - - - - - org.scalastyle - scalastyle-maven-plugin - - - - - - org.apache.mxnet - mxnet-core - INTERNAL - provided - - - - org.mockito - mockito-all - 1.10.19 - test - - - diff --git a/scala-package/infer/src/main/scala/org/apache/mxnet/infer/Classifier.scala b/scala-package/infer/src/main/scala/org/apache/mxnet/infer/Classifier.scala deleted file mode 100644 index 38fdc0028a7a..000000000000 --- a/scala-package/infer/src/main/scala/org/apache/mxnet/infer/Classifier.scala +++ /dev/null @@ -1,212 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet.infer - -import org.apache.mxnet._ -import java.io.File - -import org.apache.mxnet.MX_PRIMITIVES.MX_PRIMITIVE_TYPE -import org.slf4j.LoggerFactory - -import scala.io -import scala.collection.mutable.ListBuffer -import scala.collection.parallel.mutable.ParArray - -trait ClassifierBase { - - /** - * Takes an array of floats and returns corresponding (Label, Score) tuples - * @tparam T The Scala equivalent of the DType used for the input array and return value - * @param input Indexed sequence one-dimensional array of floats/doubles - * @param topK (Optional) How many result (sorting based on the last axis) - * elements to return. Default returns unsorted output. - * @return Indexed sequence of (Label, Score) tuples - */ - def classify[@specialized (Base.MX_PRIMITIVES) T](input: IndexedSeq[Array[T]], - topK: Option[Int] = None): IndexedSeq[(String, T)] - - /** - * Takes a sequence of NDArrays and returns (Label, Score) tuples - * @param input Indexed sequence of NDArrays - * @param topK (Optional) How many result (sorting based on the last axis) - * elements to return. Default returns unsorted output. - * @return Traversable sequence of (Label, Score) tuple - */ - def classifyWithNDArray(input: IndexedSeq[NDArray], - topK: Option[Int] = None): IndexedSeq[IndexedSeq[(String, Float)]] -} - -/** - * A class for classifier tasks - * @param modelPathPrefix Path prefix from where to load the model artifacts - * These include the symbol, parameters, and synset.txt - * Example: file://model-dir/resnet-152 (containing - * resnet-152-symbol.json, resnet-152-0000.params, and synset.txt) - * @param inputDescriptors Descriptors defining the input node names, shape, - * layout and type parameters - * @param contexts Device contexts on which you want to run inference; defaults to CPU - * @param epoch Model epoch to load; defaults to 0 - */ -class Classifier(modelPathPrefix: String, - protected val inputDescriptors: IndexedSeq[DataDesc], - protected val contexts: Array[Context] = Context.cpu(), - protected val epoch: Option[Int] = Some(0)) - extends ClassifierBase { - - private val logger = LoggerFactory.getLogger(classOf[Classifier]) - - protected[infer] val predictor: PredictBase = getPredictor() - - protected[infer] val synsetFilePath = getSynsetFilePath(modelPathPrefix) - - protected[infer] val synset = readSynsetFile(synsetFilePath) - - protected[infer] val handler = MXNetHandler() - - /** - * Takes flat arrays as input and returns (Label, Score) tuples. - * @param input Indexed sequence one-dimensional array of floats/doubles - * @param topK (Optional) How many result (sorting based on the last axis) - * elements to return. Default returns unsorted output. - * @return Indexed sequence of (Label, Score) tuples - */ - override def classify[@specialized (Base.MX_PRIMITIVES) T](input: IndexedSeq[Array[T]], - topK: Option[Int] = None): IndexedSeq[(String, T)] = { - - // considering only the first output - val result = input(0)(0) match { - case d: Double => { - classifyImpl(input.asInstanceOf[IndexedSeq[Array[Double]]], topK) - } - case _ => { - classifyImpl(input.asInstanceOf[IndexedSeq[Array[Float]]], topK) - } - } - - result.asInstanceOf[IndexedSeq[(String, T)]] - } - - private def classifyImpl[B, A <: MX_PRIMITIVE_TYPE] - (input: IndexedSeq[Array[B]], topK: Option[Int] = None)(implicit ev: B => A) - : IndexedSeq[(String, B)] = { - - // considering only the first output - val predictResult = predictor.predict(input)(0) - - var result: IndexedSeq[(String, B)] = IndexedSeq.empty - - if (topK.isDefined) { - val sortedIndex = predictResult.zipWithIndex.sortBy(-_._1).map(_._2).take(topK.get) - result = sortedIndex.map(i => (synset(i), predictResult(i))).toIndexedSeq - } else { - result = synset.zip(predictResult).toIndexedSeq - } - result - } - - /** - * Perform multiple classification operations on NDArrays. - * Also works with batched input. - * @param input Indexed sequence of NDArrays - * @param topK (Optional) How many result (sorting based on the last axis) - * elements to return. Default returns unsorted output. - * @return Traversable sequence of (Label, Score) tuples. - */ - override def classifyWithNDArray(input: IndexedSeq[NDArray], topK: Option[Int] = None) - : IndexedSeq[IndexedSeq[(String, Float)]] = { - - // considering only the first output - // Copy NDArray to CPU to avoid frequent GPU to CPU copying - val predictResultND: NDArray = - predictor.predictWithNDArray(input)(0).asInContext(Context.cpu()) - // Parallel Execution with ParArray for better performance - val predictResultPar: ParArray[Array[Float]] = - new ParArray[Array[Float]](predictResultND.shape(0)) - - // iterating over the individual items(batch size is in axis 0) - (0 until predictResultND.shape(0)).toVector.par.foreach( i => { - val r = predictResultND.at(i) - predictResultPar(i) = r.toArray - r.dispose() - }) - - val predictResult = predictResultPar.toArray - var result: ListBuffer[IndexedSeq[(String, Float)]] = - ListBuffer.empty[IndexedSeq[(String, Float)]] - - if (topK.isDefined) { - val sortedIndices = predictResult.map(r => - r.zipWithIndex.sortBy(-_._1).map(_._2).take(topK.get) - ) - for (i <- sortedIndices.indices) { - result += sortedIndices(i).map(sIndx => - (synset(sIndx), predictResult(i)(sIndx))).toIndexedSeq - } - } else { - for (i <- predictResult.indices) { - result += synset.zip(predictResult(i)).toIndexedSeq - } - } - - handler.execute(predictResultND.dispose()) - - result.toIndexedSeq - } - - /** - * Gives the path to the standard location of the synset.txt file - * @throws IllegalArgumentException Thrown when the file does not exist - * @param modelPathPrefix The path to the model directory - * @return The path to the synset.txt file - */ - private[infer] def getSynsetFilePath(modelPathPrefix: String): String = { - val dirPath = modelPathPrefix.substring(0, 1 + modelPathPrefix.lastIndexOf(File.separator)) - val d = new File(dirPath) - require(d.exists && d.isDirectory, s"directory: $dirPath not found") - - val s = new File(dirPath + "synset.txt") - require(s.exists() && s.isFile, - s"File synset.txt should exist inside modelPath: ${dirPath + "synset.txt"}") - - s.getCanonicalPath - } - - /** - * Parses the labels from a synset file - * @param synsetFilePath The path to the synset file. Can be gotten from getSynsetFilePath - * @return A IndexedSeq of each element in the file - */ - private[infer] def readSynsetFile(synsetFilePath: String): IndexedSeq[String] = { - val f = io.Source.fromFile(synsetFilePath) - try { - f.getLines().toIndexedSeq - } finally { - f.close - } - } - - /** - * Creates a predictor with the same modelPath, inputDescriptors, contexts, - * and epoch as the classifier - * @return The new Predictor - */ - private[infer] def getPredictor(): PredictBase = { - new Predictor(modelPathPrefix, inputDescriptors, contexts, epoch) - } - -} diff --git a/scala-package/infer/src/main/scala/org/apache/mxnet/infer/ImageClassifier.scala b/scala-package/infer/src/main/scala/org/apache/mxnet/infer/ImageClassifier.scala deleted file mode 100644 index fb5f39fb2096..000000000000 --- a/scala-package/infer/src/main/scala/org/apache/mxnet/infer/ImageClassifier.scala +++ /dev/null @@ -1,252 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet.infer - -import org.apache.mxnet.DType.DType -import org.apache.mxnet._ - -import scala.collection.mutable.ListBuffer - -// scalastyle:off -import java.awt.image.BufferedImage -// scalastyle:on -import java.io.File - -import javax.imageio.ImageIO - - -/** - * A class for image classification tasks. - * Contains helper methods. - * - * @param modelPathPrefix Path prefix from where to load the model artifacts. - * These include the symbol, parameters, and synset.txt. - * Example: file://model-dir/resnet-152 (containing - * resnet-152-symbol.json, resnet-152-0000.params, and synset.txt). - * @param inputDescriptors Descriptors defining the input node names, shape, - * layout and type parameters - * @param contexts Device contexts on which you want to run inference; defaults to CPU - * @param epoch Model epoch to load; defaults to 0 - */ -class ImageClassifier(modelPathPrefix: String, - inputDescriptors: IndexedSeq[DataDesc], - contexts: Array[Context] = Context.cpu(), - epoch: Option[Int] = Some(0)) - extends Classifier(modelPathPrefix, - inputDescriptors, contexts, epoch) { - - protected[infer] val inputLayout = inputDescriptors.head.layout - - require(inputDescriptors.nonEmpty, "Please provide input descriptor") - require(inputDescriptors.head.layout == "NCHW", "Provided layout doesn't match NCHW format") - - protected[infer] val inputShape = inputDescriptors.head.shape - - // Considering 'NCHW' as default layout when not provided - // Else get axis according to the layout - // [TODO] if layout is different than the bufferedImage layout, - // transpose to match the inputdescriptor shape - protected[infer] val batch = inputShape(inputLayout.indexOf('N')) - protected[infer] val channel = inputShape(inputLayout.indexOf('C')) - protected[infer] val height = inputShape(inputLayout.indexOf('H')) - protected[infer] val width = inputShape(inputLayout.indexOf('W')) - - /** - * Get the names and shapes that would be returns by a classify call - * @return a list of (name, shape) tuples - */ - def outputShapes: IndexedSeq[(String, Shape)] = predictor.outputShapes - - /** - * To classify the image according to the provided model - * - * @param inputImage Path prefix of the input image - * @param topK Number of result elements to return, sorted by probability - * @param dType The precision at which to run the inference. - * specify the DType as DType.Float64 for Double precision. - * Defaults to DType.Float32 - * @return List of list of tuples of (Label, Probability) - */ - def classifyImage - (inputImage: BufferedImage, topK: Option[Int] = None, dType: DType = DType.Float32): - IndexedSeq[IndexedSeq[(String, Float)]] = { - - val scaledImage = ImageClassifier.reshapeImage(inputImage, width, height) - val imageShape = inputShape.drop(1) - val pixelsNDArray = ImageClassifier.bufferedImageToPixels(scaledImage, imageShape, dType) - val imgWithBatchNum = NDArray.api.expand_dims(pixelsNDArray, 0) - inputImage.flush() - scaledImage.flush() - handler.execute(pixelsNDArray.dispose()) - - val output = super.classifyWithNDArray(IndexedSeq(imgWithBatchNum), topK) - - handler.execute(imgWithBatchNum.dispose()) - - IndexedSeq(output(0)) - } - - /** - * To classify batch of input images according to the provided model - * - * @param inputBatch Input array of buffered images - * @param topK Number of result elements to return, sorted by probability - * @param dType The precision at which to run the inference. - * specify the DType as DType.Float64 for Double precision. - * Defaults to DType.Float32 - * @return List of list of tuples of (Label, Probability) - */ - def classifyImageBatch(inputBatch: Traversable[BufferedImage], topK: Option[Int] = None, - dType: DType = DType.Float32): IndexedSeq[IndexedSeq[(String, Float)]] = { - - val inputBatchSeq = inputBatch.toIndexedSeq - val imageBatch = inputBatchSeq.indices.par.map(idx => { - val scaledImage = ImageClassifier.reshapeImage(inputBatchSeq(idx), width, height) - val imageShape = inputShape.drop(1) - val imgND = ImageClassifier.bufferedImageToPixels(scaledImage, imageShape, dType) - val imgWithBatch = NDArray.api.expand_dims(imgND, 0).get - handler.execute(imgND.dispose()) - imgWithBatch - }).toList - val op = NDArray.concatenate(imageBatch) - val result = super.classifyWithNDArray(IndexedSeq(op), topK) - handler.execute(op.dispose()) - handler.execute(imageBatch.foreach(_.dispose())) - - result - } - - /** - * Creates a Classifier - * - * @param modelPathPrefix Path prefix from where to load the model artifacts. - * These include the symbol, parameters, and synset.txt. - * Example: file://model-dir/resnet-152 (containing - * resnet-152-symbol.json, resnet-152-0000.params, and synset.txt). - * @param inputDescriptors Descriptors defining the input node names, shape, - * layout and type parameters - * @param contexts Device contexts on which you want to run inference; defaults to CPU - * @param epoch Model epoch to load; defaults to 0 - * @return A Classifier to perform inference with - */ - private[infer] def getClassifier(modelPathPrefix: String, - inputDescriptors: IndexedSeq[DataDesc], - contexts: Array[Context] = Context.cpu(), - epoch: Option[Int] = Some(0)): Classifier = { - new Classifier(modelPathPrefix, inputDescriptors, contexts, epoch) - } -} - -object ImageClassifier { - - /** - * Reshape the input image to a new shape - * - * @param img Input image - * @param newWidth New width for rescaling - * @param newHeight New height for rescaling - * @return Rescaled BufferedImage - */ - def reshapeImage(img: BufferedImage, newWidth: Int, newHeight: Int): BufferedImage = { - val resizedImage = new BufferedImage(newWidth, newHeight, BufferedImage.TYPE_INT_RGB) - val g = resizedImage.createGraphics() - g.drawImage(img, 0, 0, newWidth, newHeight, null) - g.dispose() - - resizedImage - } - - /** - * Convert input BufferedImage to NDArray of input shape - * Note: Caller is responsible to dispose the NDArray - * returned by this method after the use. - * - * @param resizedImage BufferedImage to get pixels from - * @param inputImageShape Input shape; for example for resnet it is (3,224,224). - * Should be same as inputDescriptor shape. - * @param dType The DataType of the NDArray created from the image - * that should be returned. - * Currently it defaults to Dtype.Float32 - * @return NDArray pixels array with shape (3, 224, 224) in CHW format - */ - def bufferedImageToPixels(resizedImage: BufferedImage, inputImageShape: Shape, - dType : DType = DType.Float32): NDArray = { - - if (dType == DType.Float64) { - val result = getFloatPixelsArray(resizedImage) - NDArray.array(result.map(_.toDouble), shape = inputImageShape) - } - else { - val result = getFloatPixelsArray(resizedImage) - NDArray.array(result, shape = inputImageShape) - } - } - - private def getFloatPixelsArray(resizedImage: BufferedImage): Array[Float] = { - - // Get height and width of the image - val w = resizedImage.getWidth() - val h = resizedImage.getHeight() - - // get an array of integer pixels in the default RGB color mode - val pixels = resizedImage.getRGB(0, 0, w, h, null, 0, w) - - // 3 times height and width for R,G,B channels - val result = new Array[Float](3 * h * w) - var row = 0 - // copy pixels to array vertically - while (row < h) { - var col = 0 - // copy pixels to array horizontally - while (col < w) { - val rgb = pixels(row * w + col) - // getting red color - result(0 * h * w + row * w + col) = (rgb >> 16) & 0xFF - // getting green color - result(1 * h * w + row * w + col) = (rgb >> 8) & 0xFF - // getting blue color - result(2 * h * w + row * w + col) = rgb & 0xFF - col += 1 - } - row += 1 - } - - resizedImage.flush() - - result - } - - /** - * Loads an input images from file - * @param inputImagePath Path of single input image - * @return BufferedImage Buffered image - */ - def loadImageFromFile(inputImagePath: String): BufferedImage = { - val img = ImageIO.read(new File(inputImagePath)) - img - } - - /** - * Loads a batch of images from a folder - * @param inputImagePaths Path to a folder of images - * @return List of buffered images - */ - def loadInputBatch(inputImagePaths: List[String]): Traversable[BufferedImage] = { - inputImagePaths.map(path => ImageIO.read(new File(path))) - } -} diff --git a/scala-package/infer/src/main/scala/org/apache/mxnet/infer/MXNetHandler.scala b/scala-package/infer/src/main/scala/org/apache/mxnet/infer/MXNetHandler.scala deleted file mode 100644 index 593bab66bf12..000000000000 --- a/scala-package/infer/src/main/scala/org/apache/mxnet/infer/MXNetHandler.scala +++ /dev/null @@ -1,117 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet.infer - -import java.util.concurrent._ - -import org.slf4j.LoggerFactory - -private[infer] trait MXNetHandler { - - /** - * Executes a function within a thread-safe executor - * @param f The function to execute - * @tparam T The return type of the function - * @return Returns the result of the function f - */ - def execute[T](f: => T): T - - val executor: ExecutorService - -} - -private[infer] object MXNetHandlerType extends Enumeration { - - /** - * The internal type of the MXNetHandlerType enumeration - */ - type MXNetHandlerType = Value - - val SingleThreadHandler = Value("MXNetSingleThreadHandler") - val OneThreadPerModelHandler = Value("MXNetOneThreadPerModelHandler") -} - -private[infer] class MXNetThreadPoolHandler(numThreads: Int = 1) - extends MXNetHandler { - - require(numThreads > 0, s"Invalid numThreads $numThreads") - - private val logger = LoggerFactory.getLogger(classOf[MXNetThreadPoolHandler]) - private var threadCount: Int = 0 - - private val threadFactory = new ThreadFactory { - override def newThread(r: Runnable): Thread = new Thread(r) { - setName(classOf[MXNetThreadPoolHandler].getCanonicalName - + "-%d".format(threadCount)) - // setting to daemon threads to exit along with the main threads - setDaemon(true) - threadCount += 1 - } - } - - override val executor: ExecutorService = - Executors.newFixedThreadPool(numThreads, threadFactory) - - private val creatorThread = executor.submit(new Callable[Thread] { - override def call(): Thread = Thread.currentThread() - }).get() - - override def execute[T](f: => T): T = { - - if (Thread.currentThread() eq creatorThread) { - f - } else { - - val task = new Callable[T] { - override def call(): T = { - logger.debug("threadId: %s".format(Thread.currentThread().getId())) - f - } - } - - val result = executor.submit(task) - try { - result.get() - } catch { - case e : InterruptedException => throw e - // unwrap the exception thrown by the task - case e1: Exception => throw e1.getCause() - } - } - } - -} - -private[infer] object MXNetSingleThreadHandler extends MXNetThreadPoolHandler(1) { - -} - -private[infer] object MXNetHandler { - - /** - * Creates a handler based on the handlerType - * @return A ThreadPool or Thread Handler - */ - def apply(): MXNetHandler = { - if (handlerType == MXNetHandlerType.OneThreadPerModelHandler) { - new MXNetThreadPoolHandler(1) - } else { - MXNetSingleThreadHandler - } - } -} diff --git a/scala-package/infer/src/main/scala/org/apache/mxnet/infer/ObjectDetector.scala b/scala-package/infer/src/main/scala/org/apache/mxnet/infer/ObjectDetector.scala deleted file mode 100644 index b78cfbccd987..000000000000 --- a/scala-package/infer/src/main/scala/org/apache/mxnet/infer/ObjectDetector.scala +++ /dev/null @@ -1,218 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet.infer - -// scalastyle:off -import java.awt.image.BufferedImage - -import org.apache.mxnet.Shape - -import scala.collection.parallel.mutable.ParArray -// scalastyle:on -import org.apache.mxnet.NDArray -import org.apache.mxnet.DataDesc -import org.apache.mxnet.Context - -/** - * The ObjectDetector class helps to run ObjectDetection tasks where the goal - * is to find bounding boxes and corresponding labels for objects in a image. - * - * @param modelPathPrefix Path prefix from where to load the model artifacts. - * These include the symbol, parameters, and synset.txt. - * Example: file://model-dir/ssd_resnet50_512 (containing - * ssd_resnet50_512-symbol.json, ssd_resnet50_512-0000.params, - * and synset.txt) - * @param inputDescriptors Descriptors defining the input node names, shape, - * layout and type parameters - * @param contexts Device contexts on which you want to run inference. - * Defaults to CPU. - * @param epoch Model epoch to load; defaults to 0 - */ -class ObjectDetector(modelPathPrefix: String, - inputDescriptors: IndexedSeq[DataDesc], - contexts: Array[Context] = Context.cpu(), - epoch: Option[Int] = Some(0)) { - - protected[infer] val imgClassifier: ImageClassifier = - getImageClassifier(modelPathPrefix, inputDescriptors, contexts, epoch) - - protected[infer] val inputShape = imgClassifier.inputShape - - protected[infer] val handler = imgClassifier.handler - - protected[infer] val predictor = imgClassifier.predictor - - protected[infer] val synset = imgClassifier.synset - - protected[infer] val height = imgClassifier.height - - protected[infer] val width = imgClassifier.width - - /** - * Detects objects and returns bounding boxes with corresponding class/label - * - * @param inputImage Path prefix of the input image - * @param topK Number of result elements to return, sorted by probability - * @return List of list of tuples of - * (class, [probability, xmin, ymin, xmax, ymax]) - */ - def imageObjectDetect(inputImage: BufferedImage, - topK: Option[Int] = None) - : IndexedSeq[IndexedSeq[(String, Array[Float])]] = { - - val scaledImage = ImageClassifier.reshapeImage(inputImage, width, height) - val imageShape = inputShape.drop(1) - val pixelsNDArray = ImageClassifier.bufferedImageToPixels(scaledImage, imageShape) - val pixelsNDWithBatch = NDArray.api.expand_dims(pixelsNDArray, 0) - handler.execute(pixelsNDArray.dispose()) - val output = objectDetectWithNDArray(IndexedSeq(pixelsNDWithBatch), topK) - handler.execute(pixelsNDWithBatch.dispose()) - output - } - - /** - * Takes input images as NDArrays. Useful when you want to perform multiple operations on - * the input array, or when you want to pass a batch of input images. - * - * @param input Indexed Sequence of NDArrays - * @param topK (Optional) How many top_k (sorting will be based on the last axis) - * elements to return. If not passed, returns all unsorted output. - * @return List of list of tuples of - * (class, [probability, xmin, ymin, xmax, ymax]) - */ - def objectDetectWithNDArray(input: IndexedSeq[NDArray], topK: Option[Int]) - : IndexedSeq[IndexedSeq[(String, Array[Float])]] = { - - // Copy NDArray to CPU to avoid frequent GPU to CPU copying - val predictResult = predictor.predictWithNDArray(input)(0).asInContext(Context.cpu()) - // Parallel Execution with ParArray for better performance - var batchResult = new ParArray[IndexedSeq[(String, Array[Float])]](predictResult.shape(0)) - (0 until predictResult.shape(0)).toArray.par.foreach( i => { - val r = predictResult.at(i) - batchResult(i) = sortAndReformat(r, topK) - handler.execute(r.dispose()) - }) - handler.execute(predictResult.dispose()) - batchResult.toIndexedSeq - } - - /** - * Formats detection results by sorting in descending order of accuracy (topK only) - * and combining with synset labels - * @param predictResultND The results from the objectDetect call - * @param topK The number of top results to return or None for all - * @return The top predicted results as (className, [Accuracy, Xmin, Ymin, Xmax, Ymax]) - */ - private[infer] def sortAndReformat(predictResultND: NDArray, topK: Option[Int]) - : IndexedSeq[(String, Array[Float])] = { - // iterating over the all the predictions - val length = predictResultND.shape(0) - - val predictResult = (0 until length).toArray.par.flatMap( i => { - val r = predictResultND.at(i) - val tempArr = r.toArray - val res = if (tempArr(0) != -1.0) { - Array[Array[Float]](tempArr) - } else { - // Ignore the minus 1 part - Array[Array[Float]]() - } - handler.execute(r.dispose()) - res - }).toArray - var result = IndexedSeq[(String, Array[Float])]() - if (topK.isDefined) { - var sortedIndices = predictResult.zipWithIndex.sortBy(-_._1(1)).map(_._2) - sortedIndices = sortedIndices.take(topK.get) - // takeRight(5) would provide the output as Array[Accuracy, Xmin, Ymin, Xmax, Ymax] - result = sortedIndices.map(idx - => (synset(predictResult(idx)(0).toInt), - predictResult(idx).takeRight(5))).toIndexedSeq - } else { - result = predictResult.map(ele - => (synset(ele(0).toInt), ele.takeRight(5))).toIndexedSeq - } - result - } - - /** - * To classify batch of input images according to the provided model - * - * @param inputBatch Input array of buffered images - * @param topK Number of result elements to return, sorted by probability - * @return List of list of tuples of (class, probability) - */ - def imageBatchObjectDetect(inputBatch: Traversable[BufferedImage], topK: Option[Int] = None): - IndexedSeq[IndexedSeq[(String, Array[Float])]] = { - - val inputBatchSeq = inputBatch.toIndexedSeq - val imageBatch = inputBatchSeq.indices.par.map(idx => { - val scaledImage = ImageClassifier.reshapeImage(inputBatchSeq(idx), width, height) - val imageShape = inputShape.drop(1) - val pixelsND = ImageClassifier.bufferedImageToPixels(scaledImage, imageShape) - val pixelsNDWithBatch = NDArray.api.expand_dims(pixelsND, 0).get - handler.execute(pixelsND.dispose()) - pixelsNDWithBatch - }) - val op = NDArray.concatenate(imageBatch.toList) - - val result = objectDetectWithNDArray(IndexedSeq(op), topK) - handler.execute(op.dispose()) - handler.execute(imageBatch.foreach(_.dispose())) - result - } - - /** - * Creates an image classifier from the object detector model - * @param modelPathPrefix Path prefix from where to load the model artifacts. - * These include the symbol, parameters, and synset.txt. - * Example: file://model-dir/resnet-152 (containing - * resnet-152-symbol.json, resnet-152-0000.params, and synset.txt). - * @param inputDescriptors Descriptors defining the input node names, shape, - * layout and type parameters - * @param contexts Device contexts on which you want to run inference; defaults to CPU - * @param epoch Model epoch to load; defaults to 0 - * @return The corresponding image classifier - */ - private[infer] def getImageClassifier(modelPathPrefix: String, - inputDescriptors: IndexedSeq[DataDesc], - contexts: Array[Context] = Context.cpu(), - epoch: Option[Int] = Some(0)): - ImageClassifier = { - val imageClassifier: ImageClassifier = - new ImageClassifier(modelPathPrefix, inputDescriptors, contexts, epoch) - - val shapes: IndexedSeq[(String, Shape)] = imageClassifier.outputShapes - if (shapes.length != inputDescriptors.length) { - throw new IllegalStateException(s"Invalid output shapes, expected:" + - s" $inputDescriptors.length, actual: $shapes.length.") - } - shapes.map(_._2).foreach(shape => { - if (shape.length < 3) { - throw new IllegalArgumentException("Invalid output shapes, the model doesn't" - + " support object detection.") - } - if (shape.get(2) < 6) { - throw new IllegalArgumentException("Invalid output shapes, the model doesn't" - + " support object detection with bounding box.") - } - }) - - imageClassifier - } -} diff --git a/scala-package/infer/src/main/scala/org/apache/mxnet/infer/Predictor.scala b/scala-package/infer/src/main/scala/org/apache/mxnet/infer/Predictor.scala deleted file mode 100644 index cb27c930903d..000000000000 --- a/scala-package/infer/src/main/scala/org/apache/mxnet/infer/Predictor.scala +++ /dev/null @@ -1,261 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet.infer - -import org.apache.mxnet.MX_PRIMITIVES.MX_PRIMITIVE_TYPE -import org.apache.mxnet.io.NDArrayIter -import org.apache.mxnet._ -import org.apache.mxnet.module.Module - -import scala.collection.mutable.ListBuffer -import scala.util.Try -import org.slf4j.LoggerFactory - - -/** - * Base Trait for MXNet Predictor classes. - */ -private[infer] trait PredictBase { - - /** - * Converts indexed sequences of 1-D array to NDArrays. - * This method will take input as IndexedSeq one dimensional arrays and creates the - * NDArray needed for inference. The array will be reshaped based on the input descriptors. - * @tparam T The Scala equivalent of the DType used for the input array and return value - * @param input An Indexed Sequence of a one-dimensional array of datatype - * Float or Double - * An IndexedSequence is needed when the model has more than one input. - * @return Indexed sequence array of outputs - */ - def predict[@specialized (Base.MX_PRIMITIVES) T](input: IndexedSeq[Array[T]]) - : IndexedSeq[Array[T]] - - /** - * Predict using NDArray as input. - *

- * This method is useful when the input is a batch of data - * or when multiple operations on the input have to performed. - * Note: User is responsible for managing allocation/deallocation of NDArrays. - * @param input IndexedSequence NDArrays. - * @return Output of predictions as NDArrays. - */ - def predictWithNDArray(input: IndexedSeq[NDArray]): IndexedSeq[NDArray] - - /** - * Get model output shapes. - * @return model output shapes. - */ - def outputShapes: IndexedSeq[(String, Shape)] -} - -/** - * Implementation of prediction routines. - * - * @param modelPathPrefix Path prefix from where to load the model artifacts. - * These include the symbol, parameters, and synset.txt - * Example: file://model-dir/resnet-152 (containing - * resnet-152-symbol.json, resnet-152-0000.params, and synset.txt). - * @param inputDescriptors Descriptors defining the input node names, shape, - * layout and type parameters - *

Note: If the input Descriptors is missing batchSize - * ('N' in layout), a batchSize of 1 is assumed for the model. - * @param contexts Device contexts on which you want to run inference; defaults to CPU - * @param epoch Model epoch to load; defaults to 0 - - */ -class Predictor(modelPathPrefix: String, - protected val inputDescriptors: IndexedSeq[DataDesc], - protected val contexts: Array[Context] = Context.cpu(), - protected val epoch: Option[Int] = Some(0)) - extends PredictBase { - - private val logger = LoggerFactory.getLogger(classOf[Predictor]) - - /* - By setting -Dmxnet.disableShapeCheck=true would disable the data Shape - Check of the predictor. Some model may allow different lens of the data - such as Seq2Seq, however there maybe risk of crashes if the lens beyond - the acceptable range of the model - */ - private val traceProperty = "mxnet.disableShapeCheck" - private lazy val shapeCheckDisabled = { - val value = Try(System.getProperty(traceProperty).toBoolean).getOrElse(false) - if (value) { - logger.warn("Shape check is disabled (property {} is set)", traceProperty) - } - value - } - - require(inputDescriptors.head.layout.size != 0, "layout size should not be zero") - - protected[infer] var batchIndex = inputDescriptors(0).layout.indexOf('N') - protected[infer] var batchSize = if (batchIndex != -1) inputDescriptors(0).shape(batchIndex) - else 1 - - protected[infer] var iDescriptors = inputDescriptors - - inputDescriptors.foreach((f: DataDesc) => require(f.layout.indexOf('N') == batchIndex, - "batch size should be in the same index for all inputs")) - - if (batchIndex != -1) { - inputDescriptors.foreach((f: DataDesc) => require(f.shape(batchIndex) == batchSize, - "batch size should be same for all inputs")) - } else { - // Note: this is assuming that the input needs a batch - logger.warn("InputDescriptor does not have batchSize, using 1 as the default batchSize") - iDescriptors = inputDescriptors.map((f: DataDesc) => new DataDesc(f.name, - Shape(1 +: f.shape.toVector), f.dtype, 'N' +: f.layout)) - batchIndex = 1 - } - - protected[infer] val mxNetHandler = MXNetHandler() - - protected[infer] val mod = loadModule() - - override def outputShapes: IndexedSeq[(String, Shape)] = mod.outputShapes - - /** - * Takes input as IndexedSeq one dimensional arrays and creates the NDArray needed for inference - * The array will be reshaped based on the input descriptors. - * - * @param input: An IndexedSequence of a one-dimensional array - * of data type Float or Double. - An IndexedSequence is needed when the model has more than one input. - * @return Indexed sequence array of outputs - */ - override def predict[@specialized (Base.MX_PRIMITIVES) T](input: IndexedSeq[Array[T]]) - : IndexedSeq[Array[T]] = { - require(input.length == inputDescriptors.length, - s"number of inputs provided: ${input.length} does not match number of inputs " + - s"in inputDescriptors: ${inputDescriptors.length}") - - for((i, d) <- input.zip(inputDescriptors)) { - require(i.length == d.shape.product / batchSize, - s"number of elements:${i.length} in the input does not match the shape:" + - s"${d.shape.toString()}") - } - - // Infer the dtype of input and call relevant method - val result = input(0)(0) match { - case d: Double => predictImpl(input.asInstanceOf[IndexedSeq[Array[Double]]]) - case _ => predictImpl(input.asInstanceOf[IndexedSeq[Array[Float]]]) - } - - result.asInstanceOf[IndexedSeq[Array[T]]] - } - - private def predictImpl[B, A <: MX_PRIMITIVE_TYPE] - (input: IndexedSeq[Array[B]])(implicit ev: B => A) - : IndexedSeq[Array[B]] = { - - var inputND: ListBuffer[NDArray] = ListBuffer.empty[NDArray] - - for((i, d) <- input.zip(inputDescriptors)) { - val shape = d.shape.toVector.patch(from = batchIndex, patch = Vector(1), replaced = 1) - if (d.dtype == DType.Float64) { - inputND += mxNetHandler.execute(NDArray.array(i.asInstanceOf[Array[Double]], Shape(shape))) - } - else { - inputND += mxNetHandler.execute(NDArray.array(i.asInstanceOf[Array[Float]], Shape(shape))) - } - } - - // rebind with batchsize 1 - if (batchSize != 1) { - val desc = iDescriptors.map((f : DataDesc) => new DataDesc(f.name, - Shape(f.shape.toVector.patch(batchIndex, Vector(1), 1)), f.dtype, f.layout) ) - mxNetHandler.execute(mod.bind(desc, forceRebind = true, - forTraining = false)) - } - - val resultND = mxNetHandler.execute(mod.predict(new NDArrayIter( - inputND.toIndexedSeq, dataBatchSize = 1))) - - val result = - resultND.map((f : NDArray) => if (f.dtype == DType.Float64) f.toFloat64Array else f.toArray) - - mxNetHandler.execute(inputND.foreach(_.dispose)) - mxNetHandler.execute(resultND.foreach(_.dispose)) - - // rebind to batchSize - if (batchSize != 1) { - mxNetHandler.execute(mod.bind(inputDescriptors, forTraining = false, forceRebind = true)) - } - - result.asInstanceOf[IndexedSeq[Array[B]]] - } - - - - /** - * Predict using NDArray as input - * This method is useful when the input is a batch of data - * Note: User is responsible for managing allocation/deallocation of input/output NDArrays. - * - * @param inputBatch IndexedSequence NDArrays - * @return Output of predictions as NDArrays - */ - override def predictWithNDArray(inputBatch: IndexedSeq[NDArray]): IndexedSeq[NDArray] = { - - require(inputBatch.length == inputDescriptors.length, - s"number of inputs provided: ${inputBatch.length} do not match number " + - s"of inputs in inputDescriptors: ${inputDescriptors.length}") - - // Shape validation, remove this when backend throws better error messages. - for((i, d) <- inputBatch.zip(iDescriptors)) { - require(inputBatch(0).shape(batchIndex) == i.shape(batchIndex), - "All inputs should be of same batch size") - if (!shapeCheckDisabled) { - require(i.shape.drop(batchIndex + 1) == d.shape.drop(batchIndex + 1), - s"Input Data Shape: ${i.shape} should match the inputDescriptor " + - s"shape: ${d.shape} except batchSize") - } - } - - val inputBatchSize = inputBatch(0).shape(batchIndex) - - // rebind with the new batchSize - if (batchSize != inputBatchSize) { - logger.info(s"Latency increased due to batchSize mismatch $batchSize vs $inputBatchSize") - val desc = inputBatch.zip(iDescriptors).map(f => new DataDesc(f._2.name, - f._1.shape, f._2.dtype, f._2.layout)) - mxNetHandler.execute(mod.bind(desc, forceRebind = true, - forTraining = false)) - } - - val resultND = mxNetHandler.execute(mod.predict(new NDArrayIter( - inputBatch, dataBatchSize = inputBatchSize))) - - if (batchSize != inputBatchSize) { - mxNetHandler.execute(mod.bind(iDescriptors, forceRebind = true, - forTraining = false)) - } - resultND - } - - /** - * Creates the module backing the Predictor with the same path, epoch, contexts, and inputs - * @return The Module - */ - private[infer] def loadModule(): Module = { - val mod = mxNetHandler.execute(Module.loadCheckpoint(modelPathPrefix, epoch.get, - contexts = contexts, dataNames = inputDescriptors.map(desc => desc.name))) - mxNetHandler.execute(mod.bind(inputDescriptors, forTraining = false)) - mod - } -} diff --git a/scala-package/infer/src/main/scala/org/apache/mxnet/infer/javaapi/ObjectDetector.scala b/scala-package/infer/src/main/scala/org/apache/mxnet/infer/javaapi/ObjectDetector.scala deleted file mode 100644 index 8131273eca94..000000000000 --- a/scala-package/infer/src/main/scala/org/apache/mxnet/infer/javaapi/ObjectDetector.scala +++ /dev/null @@ -1,179 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet.infer.javaapi - -// scalastyle:off -import java.awt.image.BufferedImage -// scalastyle:on - -import org.apache.mxnet.javaapi.{Context, DataDesc, NDArray, Shape} - -import scala.collection.JavaConverters -import scala.collection.JavaConverters._ -import scala.language.implicitConversions - -/** - * The ObjectDetector class helps to run ObjectDetection tasks where the goal - * is to find bounding boxes and corresponding labels for objects in a image. - * - * @param objDetector A source Scala Object detector - */ -class ObjectDetector private[mxnet] (val objDetector: org.apache.mxnet.infer.ObjectDetector){ - - /** - * - * @param modelPathPrefix Path prefix from where to load the model artifacts. - * These include the symbol, parameters, and synset.txt. - * Example: file://model-dir/ssd_resnet50_512 (containing - * ssd_resnet50_512-symbol.json, ssd_resnet50_512-0000.params, - * and synset.txt) - * @param inputDescriptors Descriptors defining the input node names, shape, - * layout and type parameters - * @param contexts Device contexts on which you want to run inference. - * Defaults to CPU. - * @param epoch Model epoch to load; defaults to 0 - */ - def this(modelPathPrefix: String, inputDescriptors: java.lang.Iterable[DataDesc], contexts: - java.lang.Iterable[Context], epoch: Int) - = this { - val informationDesc = JavaConverters.asScalaIteratorConverter(inputDescriptors.iterator) - .asScala.toIndexedSeq map {a => a: org.apache.mxnet.DataDesc} - val inContexts = (contexts.asScala.toList map {a => a: org.apache.mxnet.Context}).toArray - // scalastyle:off - new org.apache.mxnet.infer.ObjectDetector(modelPathPrefix, informationDesc, inContexts, Some(epoch)) - // scalastyle:on - } - - /** - * Detects objects and returns bounding boxes with corresponding class/label - * - * @param inputImage Path prefix of the input image - * @param topK Number of result elements to return, sorted by probability - * @return List of list of tuples of - * (class, [probability, xmin, ymin, xmax, ymax]) - */ - def imageObjectDetect(inputImage: BufferedImage, topK: Int): - java.util.List[java.util.List[ObjectDetectorOutput]] = { - val ret = objDetector.imageObjectDetect(inputImage, Some(topK)) - (ret map {a => (a map {e => new ObjectDetectorOutput(e._1, e._2)}).asJava}).asJava - } - - /** - * Takes input images as NDArrays. Useful when you want to perform multiple operations on - * the input array, or when you want to pass a batch of input images. - * - * @param input Indexed Sequence of NDArrays - * @param topK (Optional) How many top_k (sorting will be based on the last axis) - * elements to return. If not passed, returns all unsorted output. - * @return List of list of tuples of - * (class, [probability, xmin, ymin, xmax, ymax]) - */ - def objectDetectWithNDArray(input: java.lang.Iterable[NDArray], topK: Int): - java.util.List[java.util.List[ObjectDetectorOutput]] = { - val ret = objDetector.objectDetectWithNDArray(convert(input.asScala.toIndexedSeq), Some(topK)) - (ret map {a => (a map {e => new ObjectDetectorOutput(e._1, e._2)}).asJava}).asJava - } - - /** - * To classify batch of input images according to the provided model - * - * @param inputBatch Input array of buffered images - * @param topK Number of result elements to return, sorted by probability - * @return List of list of tuples of (class, probability) - */ - def imageBatchObjectDetect(inputBatch: java.lang.Iterable[BufferedImage], topK: Int): - java.util.List[java.util.List[ObjectDetectorOutput]] = { - val ret = objDetector.imageBatchObjectDetect(inputBatch.asScala, Some(topK)) - (ret map {a => (a map {e => new ObjectDetectorOutput(e._1, e._2)}).asJava}).asJava - } - - /** - * Helper to map an implicit conversion - * @param l The value to convert - * @tparam B The desired type - * @tparam A The input type - * @return The converted result - */ - def convert[B, A <% B](l: IndexedSeq[A]): IndexedSeq[B] = l map { a => a: B } - -} - - -object ObjectDetector { - - /** - * Loads an input images from file - * @param inputImagePath Path of single input image - * @return BufferedImage Buffered image - */ - def loadImageFromFile(inputImagePath: String): BufferedImage = { - org.apache.mxnet.infer.ImageClassifier.loadImageFromFile(inputImagePath) - } - - /** - * Reshape the input image to a new shape - * - * @param img Input image - * @param newWidth New width for rescaling - * @param newHeight New height for rescaling - * @return Rescaled BufferedImage - */ - def reshapeImage(img : BufferedImage, newWidth: Int, newHeight: Int): BufferedImage = { - org.apache.mxnet.infer.ImageClassifier.reshapeImage(img, newWidth, newHeight) - } - - /** - * Convert input BufferedImage to NDArray of input shape - * Note: Caller is responsible to dispose the NDArray - * returned by this method after the use. - * - * @param resizedImage BufferedImage to get pixels from - * @param inputImageShape Input shape; for example for resnet it is (3,224,224). - * Should be same as inputDescriptor shape. - * @return NDArray pixels array with shape (3, 224, 224) in CHW format - */ - def bufferedImageToPixels(resizedImage: BufferedImage, inputImageShape: Shape): NDArray = { - org.apache.mxnet.infer.ImageClassifier.bufferedImageToPixels(resizedImage, inputImageShape) - } - - /** - * Loads a batch of images from a folder - * @param inputImagePaths Path to a folder of images - * @return List of buffered images - */ - def loadInputBatch(inputImagePaths: java.lang.Iterable[String]): java.util.List[BufferedImage] = { - org.apache.mxnet.infer.ImageClassifier - .loadInputBatch(inputImagePaths.asScala.toList).toList.asJava - } - - /** - * Implicitly convert a Scala ObjectDetector to a Java ObjectDetector - * @param OD The Scala ObjectDetector - * @return The Java ObjectDetector - */ - implicit def fromObjectDetector(OD: org.apache.mxnet.infer.ObjectDetector): - ObjectDetector = new ObjectDetector(OD) - - /** - * Implicitly converts a Java ObjectDetector to a Scala ObjectDetector - * @param jOD The Java ObjectDetector - * @return The Scala ObjectDetector - */ - implicit def toObjectDetector(jOD: ObjectDetector): - org.apache.mxnet.infer.ObjectDetector = jOD.objDetector -} diff --git a/scala-package/infer/src/main/scala/org/apache/mxnet/infer/javaapi/ObjectDetectorOutput.scala b/scala-package/infer/src/main/scala/org/apache/mxnet/infer/javaapi/ObjectDetectorOutput.scala deleted file mode 100644 index 32fd87e05f69..000000000000 --- a/scala-package/infer/src/main/scala/org/apache/mxnet/infer/javaapi/ObjectDetectorOutput.scala +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet.infer.javaapi - -/** - * The ObjectDetectorOutput class is a simple POJO helper class that is used to simplify - * the interactions with ObjectDetector predict results. The class stores the bounding box - * coordinates, name of preicted class, and the probability. - */ - - -class ObjectDetectorOutput (className: String, args: Array[Float]){ - - /** - * Gets the predicted class's name. - * - * @return String representing the name of the predicted class - */ - def getClassName: String = className - - /** - * Gets the probability of the predicted class. - * - * @return Float representing the probability of predicted class - */ - def getProbability: Float = args(0) - - /** - * Gets the minimum X coordinate for the bounding box containing the predicted object. - * - * @return Float of the min X coordinate for the object bounding box - */ - def getXMin: Float = args(1) - - /** - * Gets the maximum X coordinate for the bounding box containing the predicted object. - * - * @return Float of the max X coordinate for the object bounding box - */ - def getXMax: Float = args(3) - - /** - * Gets the minimum Y coordinate for the bounding box containing the predicted object. - * - * @return Float of the min Y coordinate for the object bounding box - */ - def getYMin: Float = args(2) - - /** - * Gets the maximum Y coordinate for the bounding box containing the predicted object. - * - * @return Float of the max Y coordinate for the object bounding box - */ - def getYMax: Float = args(4) - -} diff --git a/scala-package/infer/src/main/scala/org/apache/mxnet/infer/javaapi/Predictor.scala b/scala-package/infer/src/main/scala/org/apache/mxnet/infer/javaapi/Predictor.scala deleted file mode 100644 index e1505a4da821..000000000000 --- a/scala-package/infer/src/main/scala/org/apache/mxnet/infer/javaapi/Predictor.scala +++ /dev/null @@ -1,134 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet.infer.javaapi - -import org.apache.mxnet.javaapi.{Context, DataDesc, NDArray} - -import scala.collection.JavaConverters -import scala.collection.JavaConverters._ - -/** - * Implementation of prediction routines. - * - * @param predictor The underlying Scala predictor - */ - -// JavaDoc description of class to be updated in https://issues.apache.org/jira/browse/MXNET-1178 -class Predictor private[mxnet] (val predictor: org.apache.mxnet.infer.Predictor){ - - /** - * - * @param modelPathPrefix Path prefix from where to load the model artifacts. - * These include the symbol, parameters, and synset.txt - * Example: file://model-dir/resnet-152 (containing - * resnet-152-symbol.json, resnet-152-0000.params, and synset.txt). - * @param inputDescriptors Descriptors defining the input node names, shape, - * layout and type parameters - *

Note: If the input Descriptors is missing batchSize - * ('N' in layout), a batchSize of 1 is assumed for the model. - * @param contexts Device contexts on which you want to run inference; defaults to CPU - * @param epoch Model epoch to load; defaults to 0 - */ - def this(modelPathPrefix: String, inputDescriptors: java.lang.Iterable[DataDesc], - contexts: java.lang.Iterable[Context], epoch: Int) - = this { - val informationDesc = JavaConverters.asScalaIteratorConverter(inputDescriptors.iterator) - .asScala.toIndexedSeq map {a => a: org.apache.mxnet.DataDesc} - val inContexts = (contexts.asScala.toList map {a => a: org.apache.mxnet.Context}).toArray - new org.apache.mxnet.infer.Predictor(modelPathPrefix, informationDesc, inContexts, Some(epoch)) - } - - /** - * Takes input as Array of one dimensional arrays and creates the NDArray needed for inference - * The array will be reshaped based on the input descriptors. Example of calling in Java: - * - * {{{ - * float tmp[][] = new float[1][224]; - * for (int x = 0; x < 1; x++) - * for (int y = 0; y < 224; y++) - * tmp[x][y] = (int)(Math.random()*10); - * predictor.predict(tmp); - * }}} - * - * @param input An Array of a one-dimensional array. - * An extra Array is needed for when the model has more than one input. - * @return Indexed sequence array of outputs - */ - def predict(input: Array[Array[Float]]): - Array[Array[Float]] = { - predictor.predict(input).toArray - } - - /** - * Takes input as Array of one dimensional arrays and creates the NDArray needed for inference - * The array will be reshaped based on the input descriptors. Example of calling in Java: - * - * {{{ - * double tmp[][] = new double[1][224]; - * for (int x = 0; x < 1; x++) - * for (int y = 0; y < 224; y++) - * tmp[x][y] = (int)(Math.random()*10); - * predictor.predict(tmp); - * }}} - * - * @param input An Array of a one-dimensional array. - * An extra Array is needed for when the model has more than one input. - * @return Indexed sequence array of outputs - */ - - def predict(input: Array[Array[Double]]): - Array[Array[Double]] = { - predictor.predict(input).toArray - } - - /** - * Takes input as List of one dimensional iterables and creates the NDArray needed for inference - * The array will be reshaped based on the input descriptors. - * - * @param input A List of a one-dimensional iterables of DType Float. - * An extra List is needed for when the model has more than one input. - * @return Indexed sequence array of outputs - */ - def predict(input: java.util.List[java.util.List[java.lang.Float]]): - java.util.List[java.util.List[java.lang.Float]] = { - val in = JavaConverters.asScalaIteratorConverter(input.iterator).asScala.toIndexedSeq - (predictor.predict(in map {a => a.asScala.map(Float2float).toArray}) - map {b => b.map(float2Float).toList.asJava}).asJava - } - - - - /** - * Predict using NDArray as input - * This method is useful when the input is a batch of data - * Note: User is responsible for managing allocation/deallocation of input/output NDArrays. - * - * @param input Iterable of NDArrays - * @return Output of predictions as NDArrays - */ - def predictWithNDArray(input: java.lang.Iterable[NDArray]): - java.util.List[NDArray] = { - val ret = predictor.predictWithNDArray(convert(JavaConverters - .asScalaIteratorConverter(input.iterator).asScala.toIndexedSeq)) - // TODO: For some reason the implicit wasn't working here when trying to use convert. - // So did it this way. Needs to be figured out - (ret map {a => new NDArray(a)}).asJava - } - - private def convert[B, A <% B](l: IndexedSeq[A]): IndexedSeq[B] = l map { a => a: B } -} diff --git a/scala-package/infer/src/main/scala/org/apache/mxnet/infer/package.scala b/scala-package/infer/src/main/scala/org/apache/mxnet/infer/package.scala deleted file mode 100644 index 75b48647ec95..000000000000 --- a/scala-package/infer/src/main/scala/org/apache/mxnet/infer/package.scala +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet - -package object infer { - private[mxnet] val handlerType = MXNetHandlerType.SingleThreadHandler -} diff --git a/scala-package/infer/src/test/java/org/apache/mxnet/infer/javaapi/ObjectDetectorOutputTest.java b/scala-package/infer/src/test/java/org/apache/mxnet/infer/javaapi/ObjectDetectorOutputTest.java deleted file mode 100644 index 6f3df86b8e74..000000000000 --- a/scala-package/infer/src/test/java/org/apache/mxnet/infer/javaapi/ObjectDetectorOutputTest.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet.infer.javaapi; - -import org.junit.Assert; -import org.junit.Test; - -public class ObjectDetectorOutputTest { - - private String predictedClassName = "lion"; - - private float delta = 0.00001f; - - @Test - public void testConstructor() { - - float[] arr = new float[]{0f, 1f, 2f, 3f, 4f}; - - ObjectDetectorOutput odOutput = new ObjectDetectorOutput(predictedClassName, arr); - - Assert.assertEquals(odOutput.getClassName(), predictedClassName); - Assert.assertEquals("Threshold not matching", odOutput.getProbability(), 0f, delta); - Assert.assertEquals("Threshold not matching", odOutput.getXMin(), 1f, delta); - Assert.assertEquals("Threshold not matching", odOutput.getXMax(), 3f, delta); - Assert.assertEquals("Threshold not matching", odOutput.getYMin(), 2f, delta); - Assert.assertEquals("Threshold not matching", odOutput.getYMax(), 4f, delta); - - } - - @Test (expected = ArrayIndexOutOfBoundsException.class) - public void testIncompleteArgsConstructor() { - - float[] arr = new float[]{0f, 1f}; - - ObjectDetectorOutput odOutput = new ObjectDetectorOutput(predictedClassName, arr); - - Assert.assertEquals(odOutput.getClassName(), predictedClassName); - Assert.assertEquals("Threshold not matching", odOutput.getProbability(), 0f, delta); - Assert.assertEquals("Threshold not matching", odOutput.getXMin(), 1f, delta); - - // This is where exception will be thrown - odOutput.getXMax(); - } -} diff --git a/scala-package/infer/src/test/java/org/apache/mxnet/infer/javaapi/ObjectDetectorTest.java b/scala-package/infer/src/test/java/org/apache/mxnet/infer/javaapi/ObjectDetectorTest.java deleted file mode 100644 index 3219b5aac8e1..000000000000 --- a/scala-package/infer/src/test/java/org/apache/mxnet/infer/javaapi/ObjectDetectorTest.java +++ /dev/null @@ -1,131 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet.infer.javaapi; - -import org.apache.mxnet.Layout; -import org.apache.mxnet.javaapi.DType; -import org.apache.mxnet.javaapi.DataDesc; -import org.apache.mxnet.javaapi.NDArray; -import org.apache.mxnet.javaapi.Shape; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mockito; - -import java.awt.image.BufferedImage; -import java.util.ArrayList; -import java.util.HashSet; -import java.util.List; -import java.util.Set; - -public class ObjectDetectorTest { - - private List inputDesc; - private BufferedImage inputImage; - - private List> expectedResult; - - private ObjectDetector objectDetector; - - private int batchSize = 1; - - private int channels = 3; - - private int imageHeight = 512; - - private int imageWidth = 512; - - private String dataName = "data"; - - private int topK = 5; - - private String predictedClassName = "lion"; // Random string - - private Shape getTestShape() { - - return new Shape(new int[] {batchSize, channels, imageHeight, imageWidth}); - } - - @Before - public void setUp() { - - inputDesc = new ArrayList<>(); - inputDesc.add(new DataDesc(dataName, getTestShape(), DType.Float32(), Layout.NCHW())); - inputImage = new BufferedImage(imageWidth, imageHeight, BufferedImage.TYPE_INT_RGB); - objectDetector = Mockito.mock(ObjectDetector.class); - expectedResult = new ArrayList<>(); - expectedResult.add(new ArrayList()); - expectedResult.get(0).add(new ObjectDetectorOutput(predictedClassName, new float[]{})); - } - - @Test - public void testObjectDetectorWithInputImage() { - - Mockito.when(objectDetector.imageObjectDetect(inputImage, topK)).thenReturn(expectedResult); - List> actualResult = objectDetector.imageObjectDetect(inputImage, topK); - Mockito.verify(objectDetector, Mockito.times(1)).imageObjectDetect(inputImage, topK); - Assert.assertEquals(expectedResult, actualResult); - } - - - @Test - public void testObjectDetectorWithBatchImage() { - - List batchImage = new ArrayList<>(); - batchImage.add(inputImage); - Mockito.when(objectDetector.imageBatchObjectDetect(batchImage, topK)).thenReturn(expectedResult); - List> actualResult = objectDetector.imageBatchObjectDetect(batchImage, topK); - Mockito.verify(objectDetector, Mockito.times(1)).imageBatchObjectDetect(batchImage, topK); - Assert.assertEquals(expectedResult, actualResult); - } - - @Test - public void testObjectDetectorWithIterableOfBatchImage() { - - Set batchImage = new HashSet<>(); - batchImage.add(inputImage); - Mockito.when(objectDetector.imageBatchObjectDetect(batchImage, topK)).thenReturn(expectedResult); - List> actualResult = objectDetector.imageBatchObjectDetect(batchImage, topK); - Mockito.verify(objectDetector, Mockito.times(1)).imageBatchObjectDetect(batchImage, topK); - Assert.assertEquals(expectedResult, actualResult); - } - - @Test - public void testObjectDetectorWithNDArrayInput() { - - NDArray inputArr = ObjectDetector.bufferedImageToPixels(inputImage, getTestShape()); - List inputL = new ArrayList<>(); - inputL.add(inputArr); - Mockito.when(objectDetector.objectDetectWithNDArray(inputL, 5)).thenReturn(expectedResult); - List> actualResult = objectDetector.objectDetectWithNDArray(inputL, topK); - Mockito.verify(objectDetector, Mockito.times(1)).objectDetectWithNDArray(inputL, topK); - Assert.assertEquals(expectedResult, actualResult); - } - - @Test - public void testObjectDetectorWithIterableOfNDArrayInput() { - - NDArray inputArr = ObjectDetector.bufferedImageToPixels(inputImage, getTestShape()); - Set inputL = new HashSet<>(); - inputL.add(inputArr); - Mockito.when(objectDetector.objectDetectWithNDArray(inputL, 5)).thenReturn(expectedResult); - List> actualResult = objectDetector.objectDetectWithNDArray(inputL, topK); - Mockito.verify(objectDetector, Mockito.times(1)).objectDetectWithNDArray(inputL, topK); - Assert.assertEquals(expectedResult, actualResult); - } -} diff --git a/scala-package/infer/src/test/java/org/apache/mxnet/infer/javaapi/PredictorTest.java b/scala-package/infer/src/test/java/org/apache/mxnet/infer/javaapi/PredictorTest.java deleted file mode 100644 index 0d83c74fe901..000000000000 --- a/scala-package/infer/src/test/java/org/apache/mxnet/infer/javaapi/PredictorTest.java +++ /dev/null @@ -1,123 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet.infer.javaapi; - -import org.apache.mxnet.javaapi.Context; -import org.apache.mxnet.javaapi.NDArray; -import org.apache.mxnet.javaapi.Shape; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mockito; - -import java.util.*; - -public class PredictorTest { - - Predictor mockPredictor; - - @Before - public void setUp() { - mockPredictor = Mockito.mock(Predictor.class); - } - - @Test - public void testPredictWithFloatArray() { - - float tmp[][] = new float[1][224]; - for (int x = 0; x < 1; x++) { - for (int y = 0; y < 224; y++) - tmp[x][y] = (int) (Math.random() * 10); - } - - float [][] expectedResult = new float[][] {{1f, 2f}}; - Mockito.when(mockPredictor.predict(tmp)).thenReturn(expectedResult); - float[][] actualResult = mockPredictor.predict(tmp); - - Mockito.verify(mockPredictor, Mockito.times(1)).predict(tmp); - Assert.assertArrayEquals(expectedResult, actualResult); - } - - @Test - public void testPredictWithNDArray() { - - float[] tmpArr = new float[224]; - for (int y = 0; y < 224; y++) - tmpArr[y] = (int) (Math.random() * 10); - - NDArray arr = new org.apache.mxnet.javaapi.NDArray(tmpArr, new Shape(new int[] {1, 1, 1, 224}), new Context("cpu", 0)); - - List inputList = new ArrayList<>(); - inputList.add(arr); - - NDArray expected = new NDArray(tmpArr, new Shape(new int[] {1, 1, 1, 224}), new Context("cpu", 0)); - List expectedResult = new ArrayList<>(); - expectedResult.add(expected); - - Mockito.when(mockPredictor.predictWithNDArray(inputList)).thenReturn(expectedResult); - - List actualOutput = mockPredictor.predictWithNDArray(inputList); - - Mockito.verify(mockPredictor, Mockito.times(1)).predictWithNDArray(inputList); - - Assert.assertEquals(expectedResult, actualOutput); - } - - @Test - public void testPredictWithIterablesNDArray() { - - float[] tmpArr = new float[224]; - for (int y = 0; y < 224; y++) - tmpArr[y] = (int) (Math.random() * 10); - - NDArray arr = new org.apache.mxnet.javaapi.NDArray(tmpArr, new Shape(new int[] {1, 1, 1, 224}), new Context("cpu", 0)); - - Set inputSet = new HashSet<>(); - inputSet.add(arr); - - NDArray expected = new NDArray(tmpArr, new Shape(new int[] {1, 1, 1, 224}), new Context("cpu", 0)); - List expectedResult = new ArrayList<>(); - expectedResult.add(expected); - - Mockito.when(mockPredictor.predictWithNDArray(inputSet)).thenReturn(expectedResult); - - List actualOutput = mockPredictor.predictWithNDArray(inputSet); - - Mockito.verify(mockPredictor, Mockito.times(1)).predictWithNDArray(inputSet); - - Assert.assertEquals(expectedResult, actualOutput); - } - - @Test - public void testPredictWithListOfFloatsAsInput() { - List> input = new ArrayList<>(); - - input.add(Arrays.asList(new Float[] {1f, 2f})); - - List> expectedOutput = new ArrayList<>(input); - - Mockito.when(mockPredictor.predict(input)).thenReturn(expectedOutput); - - List> actualOutput = mockPredictor.predict(input); - - Mockito.verify(mockPredictor, Mockito.times(1)).predict(input); - - Assert.assertEquals(expectedOutput, actualOutput); - - } -} \ No newline at end of file diff --git a/scala-package/infer/src/test/resources/log4j.properties b/scala-package/infer/src/test/resources/log4j.properties deleted file mode 100644 index d82fd7ea4f3d..000000000000 --- a/scala-package/infer/src/test/resources/log4j.properties +++ /dev/null @@ -1,24 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -# for development debugging -log4j.rootLogger = debug, stdout - -log4j.appender.stdout = org.apache.log4j.ConsoleAppender -log4j.appender.stdout.Target = System.out -log4j.appender.stdout.layout = org.apache.log4j.PatternLayout -log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss,SSS} [%t] [%c] [%p] - %m%n diff --git a/scala-package/infer/src/test/scala/org/apache/mxnet/infer/ClassifierSuite.scala b/scala-package/infer/src/test/scala/org/apache/mxnet/infer/ClassifierSuite.scala deleted file mode 100644 index 11d418002744..000000000000 --- a/scala-package/infer/src/test/scala/org/apache/mxnet/infer/ClassifierSuite.scala +++ /dev/null @@ -1,250 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet.infer - -import java.io.File -import java.nio.file.{Files, Paths} -import java.util - -import org.apache.mxnet.module.Module -import org.apache.mxnet.{Context, DType, DataDesc, NDArray, Shape} -import org.mockito.Matchers._ -import org.mockito.Mockito -import org.scalatest.{BeforeAndAfterAll, FunSuite} -import org.slf4j.LoggerFactory - -import scala.io - -class ClassifierSuite extends FunSuite with BeforeAndAfterAll { - - private val logger = LoggerFactory.getLogger(classOf[Predictor]) - - var modelPath = "" - - var synFilePath = "" - - def createTempModelFiles(): Unit = { - val tempDirPath = System.getProperty("java.io.tmpdir") - logger.info("tempDirPath: %s".format(tempDirPath)) - - val modelDirPath = tempDirPath + File.separator + "model" - val synPath = tempDirPath + File.separator + "synset.txt" - val synsetFile = new File(synPath) - synsetFile.createNewFile() - val lines: util.List[String] = util.Arrays. - asList("class1 label1", "class2 label2", "class3 label3", "class4 label4") - val path = Paths.get(synPath) - Files.write(path, lines) - - this.modelPath = modelDirPath - this.synFilePath = synsetFile.getCanonicalPath - logger.info("modelPath: %s".format(this.modelPath)) - logger.info("synFilePath: %s".format(this.synFilePath)) - } - - override def beforeAll() { - createTempModelFiles - } - - override def afterAll() { - new File(synFilePath).delete() - } - - class MyClassyPredictor(val modelPathPrefix: String, - override val inputDescriptors: IndexedSeq[DataDesc]) - extends Predictor(modelPathPrefix, inputDescriptors, epoch = Some(0)) { - - override def loadModule(): Module = mockModule - - val getIDescriptor: IndexedSeq[DataDesc] = iDescriptors - val getBatchSize: Int = batchSize - val getBatchIndex: Int = batchIndex - - lazy val mockModule: Module = Mockito.mock(classOf[Module]) - } - - class MyClassifier(modelPathPrefix: String, - protected override val inputDescriptors: IndexedSeq[DataDesc]) - extends Classifier(modelPathPrefix, inputDescriptors, Context.cpu(), Some(0)) { - - override def getPredictor(): MyClassyPredictor = { - Mockito.mock(classOf[MyClassyPredictor]) - } - def getSynset(): IndexedSeq[String] = synset - } - - test("ClassifierSuite-getSynsetFilePath") { - val inputDescriptor = IndexedSeq[DataDesc](new DataDesc("data", Shape(2, 3, 2, 2))) - val testClassifer = new MyClassifier(modelPath, inputDescriptor) - - assertResult(this.synFilePath) { - testClassifer.synsetFilePath - } - } - - test("ClassifierSuite-readSynsetFile") { - val inputDescriptor = IndexedSeq[DataDesc](new DataDesc("data", Shape(2, 3, 2, 2))) - val testClassifer = new MyClassifier(modelPath, inputDescriptor) - - assertResult(io.Source.fromFile(this.synFilePath).getLines().toList) { - testClassifer.getSynset() - } - } - - test("ClassifierSuite-flatArray-topK") { - val inputDescriptor = IndexedSeq[DataDesc](new DataDesc("data", Shape(2, 3, 2, 2))) - val inputData = Array.fill[Float](12)(1) - - val predictResult : IndexedSeq[Array[Float]] = - IndexedSeq[Array[Float]](Array(.98f, 0.97f, 0.96f, 0.99f)) - - val testClassifier = new MyClassifier(modelPath, inputDescriptor) - - Mockito.doReturn(predictResult).when(testClassifier.predictor) - .predict(any(classOf[IndexedSeq[Array[Float]]])) - - val result: IndexedSeq[(String, Float)] = testClassifier. - classify(IndexedSeq(inputData), topK = Some(10)) - - assertResult(predictResult(0).sortBy(-_)) { - result.map(_._2).toArray - } - - } - - test("ClassifierSuite-flatFloat64Array-topK") { - val inputDescriptor = IndexedSeq[DataDesc](new DataDesc("data", Shape(2, 3, 2, 2))) - val inputData = Array.fill[Double](12)(1d) - - val predictResult : IndexedSeq[Array[Double]] = - IndexedSeq[Array[Double]](Array(.98d, 0.97d, 0.96d, 0.99d)) - - val testClassifier = new MyClassifier(modelPath, inputDescriptor) - - Mockito.doReturn(predictResult).when(testClassifier.predictor) - .predict(any(classOf[IndexedSeq[Array[Double]]])) - - val result: IndexedSeq[(String, Double)] = testClassifier. - classify(IndexedSeq(inputData), topK = Some(10)) - - assert((result(0)._2).getClass == 1d.getClass) - - assertResult(predictResult(0).sortBy(-_)) { - result.map(_._2).toArray - } - - } - - test("ClassifierSuite-flatArrayInput") { - val inputDescriptor = IndexedSeq[DataDesc](new DataDesc("data", Shape(2, 3, 2, 2))) - val inputData = Array.fill[Float](12)(1) - - val predictResult : IndexedSeq[Array[Float]] = - IndexedSeq[Array[Float]](Array(.98f, 0.97f, 0.96f, 0.99f)) - - val testClassifier = new MyClassifier(modelPath, inputDescriptor) - - Mockito.doReturn(predictResult).when(testClassifier.predictor) - .predict(any(classOf[IndexedSeq[Array[Float]]])) - - val result: IndexedSeq[(String, Float)] = testClassifier. - classify(IndexedSeq(inputData)) - - assertResult(predictResult(0)) { - result.map(_._2).toArray - } - } - - test("ClassifierSuite-flatArrayFloat64Input") { - val inputDescriptor = IndexedSeq[DataDesc](new DataDesc("data", Shape(2, 3, 2, 2))) - val inputData = Array.fill[Double](12)(1d) - - val predictResult : IndexedSeq[Array[Double]] = - IndexedSeq[Array[Double]](Array(.98d, 0.97d, 0.96d, 0.99d)) - - val testClassifier = new MyClassifier(modelPath, inputDescriptor) - - Mockito.doReturn(predictResult).when(testClassifier.predictor) - .predict(any(classOf[IndexedSeq[Array[Double]]])) - - val result: IndexedSeq[(String, Double)] = testClassifier. - classify(IndexedSeq(inputData)) - - assert((result(0)._2).getClass == 1d.getClass) - - assertResult(predictResult(0)) { - result.map(_._2).toArray - } - } - - test("ClassifierSuite-NDArray1InputWithoutTopK") { - val inputDescriptor = IndexedSeq[DataDesc](new DataDesc("data", Shape(2, 3, 2, 2))) - val inputDataShape = Shape(1, 3, 2, 2) - val inputData = NDArray.ones(inputDataShape) - val predictResult: IndexedSeq[Array[Float]] = - IndexedSeq[Array[Float]](Array(.98f, 0.97f, 0.96f, 0.99f)) - - val predictResultND: NDArray = NDArray.array(predictResult.flatten.toArray, Shape(1, 4)) - - val testClassifier = new MyClassifier(modelPath, inputDescriptor) - - Mockito.doReturn(IndexedSeq(predictResultND)).when(testClassifier.predictor) - .predictWithNDArray(any(classOf[IndexedSeq[NDArray]])) - - val result: IndexedSeq[IndexedSeq[(String, Float)]] = testClassifier. - classifyWithNDArray(IndexedSeq(inputData)) - - assert(predictResult.size == result.size) - - for(i <- predictResult.indices) { - assertResult(predictResult(i)) { - result(i).map(_._2).toArray - } - } - } - - test("ClassifierSuite-NDArray3InputWithTopK") { - - val inputDescriptor = IndexedSeq[DataDesc](new DataDesc("data", Shape(2, 3, 2, 2))) - val inputDataShape = Shape(3, 3, 2, 2) - val inputData = NDArray.ones(inputDataShape) - - val predictResult: IndexedSeq[Array[Float]] = - IndexedSeq[Array[Float]](Array(.98f, 0.97f, 0.96f, 0.99f), - Array(.98f, 0.97f, 0.96f, 0.99f), Array(.98f, 0.97f, 0.96f, 0.99f)) - - val predictResultND: NDArray = NDArray.array(predictResult.flatten.toArray, Shape(3, 4)) - - val testClassifier = new MyClassifier(modelPath, inputDescriptor) - - Mockito.doReturn(IndexedSeq(predictResultND)).when(testClassifier.predictor) - .predictWithNDArray(any(classOf[IndexedSeq[NDArray]])) - - val result: IndexedSeq[IndexedSeq[(String, Float)]] = testClassifier. - classifyWithNDArray(IndexedSeq(inputData), topK = Some(10)) - - assert(predictResult.size == result.size) - - for(i <- predictResult.indices) { - assertResult(predictResult(i).sortBy(-_)) { - result(i).map(_._2).toArray - } - } - } - -} diff --git a/scala-package/infer/src/test/scala/org/apache/mxnet/infer/ImageClassifierSuite.scala b/scala-package/infer/src/test/scala/org/apache/mxnet/infer/ImageClassifierSuite.scala deleted file mode 100644 index 5198c4a1f309..000000000000 --- a/scala-package/infer/src/test/scala/org/apache/mxnet/infer/ImageClassifierSuite.scala +++ /dev/null @@ -1,162 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet.infer - -import org.apache.mxnet.{DType, DataDesc, Shape, NDArray, Context} - -import org.mockito.Matchers._ -import org.mockito.Mockito -import org.scalatest.BeforeAndAfterAll - -// scalastyle:off -import java.awt.image.BufferedImage -// scalastyle:on - -/** - * Unit tests for ImageClassifier - */ -class ImageClassifierSuite extends ClassifierSuite with BeforeAndAfterAll { - - class MyImageClassifier(modelPathPrefix: String, - inputDescriptors: IndexedSeq[DataDesc]) - extends ImageClassifier(modelPathPrefix, inputDescriptors) { - - override def getPredictor(): MyClassyPredictor = { - Mockito.mock(classOf[MyClassyPredictor]) - } - - override def getClassifier(modelPathPrefix: String, inputDescriptors: - IndexedSeq[DataDesc], contexts: Array[Context] = Context.cpu(), - epoch: Option[Int] = Some(0)): Classifier = { - Mockito.mock(classOf[Classifier]) - } - - def getSynset(): IndexedSeq[String] = synset - } - - test("ImageClassifierSuite-testRescaleImage") { - val image1 = new BufferedImage(100, 200, BufferedImage.TYPE_BYTE_GRAY) - val image2 = ImageClassifier.reshapeImage(image1, 1000, 2000) - - assert(image2.getWidth === 1000) - assert(image2.getHeight === 2000) - } - - test("ImageClassifierSuite-testConvertBufferedImageToNDArray") { - val dType = DType.Float32 - val inputDescriptor = IndexedSeq[DataDesc](new DataDesc(modelPath, Shape(1, 3, 2, 2), - dType, "NCHW")) - - val image1 = new BufferedImage(100, 200, BufferedImage.TYPE_BYTE_GRAY) - val image2 = ImageClassifier.reshapeImage(image1, 2, 2) - - val result = ImageClassifier.bufferedImageToPixels(image2, Shape(3, 2, 2)) - - assert(result.shape == inputDescriptor(0).shape.drop(1)) - assert(result.dtype == DType.Float32) - - val resultFloat64 = ImageClassifier.bufferedImageToPixels(image2, Shape(3, 2, 2), DType.Float64) - assert(resultFloat64.dtype == DType.Float64) - } - - test("ImageClassifierSuite-testWithInputImage") { - val dType = DType.Float32 - val inputDescriptor = IndexedSeq[DataDesc](new DataDesc(modelPath, Shape(1, 3, 512, 512), - dType, "NCHW")) - - val inputImage = new BufferedImage(224, 224, BufferedImage.TYPE_INT_RGB) - - val testImageClassifier: ImageClassifier = - new MyImageClassifier(modelPath, inputDescriptor) - - val predictExpected: IndexedSeq[Array[Float]] = - IndexedSeq[Array[Float]](Array(.98f, 0.97f, 0.96f, 0.99f)) - - val synset = testImageClassifier.synset - - val predictExpectedOp: List[(String, Float)] = - List[(String, Float)]((synset(1), .98f), (synset(2), .97f), - (synset(3), .96f), (synset(0), .99f)) - - val predictExpectedND: NDArray = NDArray.array(predictExpected.flatten.toArray, Shape(1, 4)) - - Mockito.doReturn(IndexedSeq(predictExpectedND)).when(testImageClassifier.predictor) - .predictWithNDArray(any(classOf[IndexedSeq[NDArray]])) - - Mockito.doReturn(IndexedSeq(predictExpectedOp)) - .when(testImageClassifier.getClassifier(modelPath, inputDescriptor)) - .classifyWithNDArray(any(classOf[IndexedSeq[NDArray]]), Some(anyInt())) - - val predictResult: IndexedSeq[IndexedSeq[(String, Float)]] = - testImageClassifier.classifyImage(inputImage, Some(4)) - - for (i <- predictExpected.indices) { - assertResult(predictExpected(i).sortBy(-_)) { - predictResult(i).map(_._2).toArray - } - } - - } - - - test("ImageClassifierSuite-testWithInputBatchImage") { - val dType = DType.Float32 - val inputDescriptor = IndexedSeq[DataDesc](new DataDesc(modelPath, Shape(1, 3, 512, 512), - dType, "NCHW")) - - val inputImage = new BufferedImage(224, 224, BufferedImage.TYPE_INT_RGB) - val imageBatch = IndexedSeq[BufferedImage](inputImage, inputImage) - - val testImageClassifier: ImageClassifier = - new MyImageClassifier(modelPath, inputDescriptor) - - val predictExpected: IndexedSeq[Array[Array[Float]]] = - IndexedSeq[Array[Array[Float]]](Array(Array(.98f, 0.97f, 0.96f, 0.99f), - Array(.98f, 0.97f, 0.96f, 0.99f))) - - val synset = testImageClassifier.synset - - val predictExpectedOp: List[List[(String, Float)]] = - List[List[(String, Float)]](List((synset(1), .98f), (synset(2), .97f), - (synset(3), .96f), (synset(0), .99f)), - List((synset(1), .98f), (synset(2), .97f), - (synset(3), .96f), (synset(0), .99f))) - - val predictExpectedND: NDArray = NDArray.array(predictExpected.flatten.flatten.toArray, - Shape(2, 4)) - - Mockito.doReturn(IndexedSeq(predictExpectedND)).when(testImageClassifier.predictor) - .predictWithNDArray(any(classOf[IndexedSeq[NDArray]])) - - Mockito.doReturn(IndexedSeq(predictExpectedOp)) - .when(testImageClassifier.getClassifier(modelPath, inputDescriptor)) - .classifyWithNDArray(any(classOf[IndexedSeq[NDArray]]), Some(anyInt())) - - val result: IndexedSeq[IndexedSeq[(String, Float)]] = - testImageClassifier.classifyImageBatch(imageBatch, Some(4)) - - for (i <- predictExpected.indices) { - for (idx <- predictExpected(i).indices) { - assertResult(predictExpected(i)(idx).sortBy(-_)) { - result(i).map(_._2).toArray - } - } - } - } - -} diff --git a/scala-package/infer/src/test/scala/org/apache/mxnet/infer/ObjectDetectorSuite.scala b/scala-package/infer/src/test/scala/org/apache/mxnet/infer/ObjectDetectorSuite.scala deleted file mode 100644 index 39139f8d3d2e..000000000000 --- a/scala-package/infer/src/test/scala/org/apache/mxnet/infer/ObjectDetectorSuite.scala +++ /dev/null @@ -1,171 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet.infer - -// scalastyle:off -import java.awt.image.BufferedImage - -import org.apache.mxnet.{DType, Layout} -// scalastyle:on -import org.apache.mxnet.Context -import org.apache.mxnet.DataDesc -import org.apache.mxnet.{NDArray, Shape} -import org.mockito.Matchers.any -import org.mockito.Mockito -import org.scalatest.BeforeAndAfterAll - - -class ObjectDetectorSuite extends ClassifierSuite with BeforeAndAfterAll { - - class MyObjectDetector(modelPathPrefix: String, - inputDescriptors: IndexedSeq[DataDesc]) - extends ObjectDetector(modelPathPrefix, inputDescriptors) { - - override def getImageClassifier(modelPathPrefix: String, inputDescriptors: - IndexedSeq[DataDesc], contexts: Array[Context] = Context.cpu(), - epoch: Option[Int] = Some(0)): ImageClassifier = { - new MyImageClassifier(modelPathPrefix, inputDescriptors) - } - - } - - class MyImageClassifier(modelPathPrefix: String, - protected override val inputDescriptors: IndexedSeq[DataDesc]) - extends ImageClassifier(modelPathPrefix, inputDescriptors, Context.cpu(), Some(0)) { - - override def getPredictor(): MyClassyPredictor = { - Mockito.mock(classOf[MyClassyPredictor]) - } - - override def getClassifier(modelPathPrefix: String, inputDescriptors: IndexedSeq[DataDesc], - contexts: Array[Context] = Context.cpu(), - epoch: Option[Int] = Some(0)): - Classifier = { - new MyClassifier(modelPathPrefix, inputDescriptors) - } - } - - class MyClassifier(modelPathPrefix: String, - protected override val inputDescriptors: IndexedSeq[DataDesc]) - extends Classifier(modelPathPrefix, inputDescriptors) { - - override def getPredictor(): MyClassyPredictor = { - Mockito.mock(classOf[MyClassyPredictor]) - } - def getSynset(): IndexedSeq[String] = synset - } - - test("objectDetectWithInputImage") { - val inputDescriptor = IndexedSeq[DataDesc](new DataDesc(modelPath, Shape(1, 3, 512, 512), - DType.Float32, Layout.NCHW)) - val inputImage = new BufferedImage(512, 512, BufferedImage.TYPE_INT_RGB) - val testObjectDetector: ObjectDetector = - new MyObjectDetector(modelPath, inputDescriptor) - - val predictRaw: IndexedSeq[Array[Array[Float]]] = - IndexedSeq[Array[Array[Float]]](Array( - Array(1.0f, 0.42f, 0.45f, 0.66f, 0.72f, 0.88f), - Array(2.0f, 0.88f, 0.21f, 0.33f, 0.45f, 0.66f), - Array(3.0f, 0.62f, 0.50f, 0.42f, 0.68f, 0.99f) - )) - val predictResultND: NDArray = - NDArray.array(predictRaw.flatten.flatten.toArray, Shape(1, 3, 6)) - - val synset = testObjectDetector.synset - - val predictResult: IndexedSeq[IndexedSeq[(String, Array[Float])]] = - IndexedSeq[IndexedSeq[(String, Array[Float])]]( - IndexedSeq[(String, Array[Float])]( - (synset(2), Array(0.88f, 0.21f, 0.33f, 0.45f, 0.66f)), - (synset(3), Array(0.62f, 0.50f, 0.42f, 0.68f, 0.99f)), - (synset(1), Array(0.42f, 0.45f, 0.66f, 0.72f, 0.88f)) - ) - ) - - Mockito.doReturn(IndexedSeq(predictResultND)).when(testObjectDetector.predictor) - .predictWithNDArray(any(classOf[IndexedSeq[NDArray]])) - - val result: IndexedSeq[IndexedSeq[(String, Array[Float])]] = - testObjectDetector.imageObjectDetect(inputImage, Some(3)) - - for (idx <- predictResult(0).indices) { - assert(predictResult(0)(idx)._1 == result(0)(idx)._1) - for (arridx <- predictResult(0)(idx)._2.indices) { - assert(predictResult(0)(idx)._2(arridx) == result(0)(idx)._2(arridx)) - } - } - } - - test("objectDetectWithBatchImages") { - val inputDescriptor = IndexedSeq[DataDesc](new DataDesc(modelPath, Shape(1, 3, 512, 512), - DType.Float32, Layout.NCHW)) - val inputImage = new BufferedImage(224, 224, BufferedImage.TYPE_INT_RGB) - val imageBatch = IndexedSeq[BufferedImage](inputImage, inputImage) - - val testObjectDetector: ObjectDetector = - new MyObjectDetector(modelPath, inputDescriptor) - - val predictRaw: IndexedSeq[Array[Array[Float]]] = - IndexedSeq[Array[Array[Float]]]( - Array( - Array(1.0f, 0.42f, 0.45f, 0.66f, 0.72f, 0.88f), - Array(2.0f, 0.88f, 0.21f, 0.33f, 0.45f, 0.66f), - Array(3.0f, 0.62f, 0.50f, 0.42f, 0.68f, 0.99f) - ), - Array( - Array(0.0f, 0.42f, 0.45f, 0.66f, 0.72f, 0.88f), - Array(2.0f, 0.23f, 0.21f, 0.33f, 0.45f, 0.66f), - Array(2.0f, 0.94f, 0.50f, 0.42f, 0.68f, 0.99f) - ) - ) - val predictResultND: NDArray = - NDArray.array(predictRaw.flatten.flatten.toArray, Shape(2, 3, 6)) - - val synset = testObjectDetector.synset - - val predictResult: IndexedSeq[IndexedSeq[(String, Array[Float])]] = - IndexedSeq[IndexedSeq[(String, Array[Float])]]( - IndexedSeq[(String, Array[Float])]( - (synset(2), Array(0.88f, 0.21f, 0.33f, 0.45f, 0.66f)), - (synset(3), Array(0.62f, 0.50f, 0.42f, 0.68f, 0.99f)), - (synset(1), Array(0.42f, 0.45f, 0.66f, 0.72f, 0.88f)) - ), - IndexedSeq[(String, Array[Float])]( - (synset(2), Array(0.94f, 0.50f, 0.42f, 0.68f, 0.99f)), - (synset(0), Array(0.42f, 0.45f, 0.66f, 0.72f, 0.88f)), - (synset(2), Array(0.23f, 0.21f, 0.33f, 0.45f, 0.66f)) - ) - ) - - Mockito.doReturn(IndexedSeq(predictResultND)).when(testObjectDetector.predictor) - .predictWithNDArray(any(classOf[IndexedSeq[NDArray]])) - - val result: IndexedSeq[IndexedSeq[(String, Array[Float])]] = - testObjectDetector.imageBatchObjectDetect(imageBatch, Some(3)) - for (preidx <- predictResult.indices) { - for (idx <- predictResult(preidx).indices) { - assert(predictResult(preidx)(idx)._1 == result(preidx)(idx)._1) - for (arridx <- predictResult(preidx)(idx)._2.indices) { - assert(predictResult(preidx)(idx)._2(arridx) == result(preidx)(idx)._2(arridx)) - } - } - } - - } - -} diff --git a/scala-package/infer/src/test/scala/org/apache/mxnet/infer/PredictorSuite.scala b/scala-package/infer/src/test/scala/org/apache/mxnet/infer/PredictorSuite.scala deleted file mode 100644 index 9afbc9b3d4a8..000000000000 --- a/scala-package/infer/src/test/scala/org/apache/mxnet/infer/PredictorSuite.scala +++ /dev/null @@ -1,147 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet.infer - -import org.apache.mxnet.io.NDArrayIter -import org.apache.mxnet.module.{BaseModule, Module} -import org.apache.mxnet._ -import org.mockito.Matchers._ -import org.mockito.Mockito -import org.scalatest.{BeforeAndAfterAll, FunSuite} - -class PredictorSuite extends FunSuite with BeforeAndAfterAll { - - class MyPredictor(val modelPathPrefix: String, - override val inputDescriptors: IndexedSeq[DataDesc]) - extends Predictor(modelPathPrefix, inputDescriptors, epoch = Some(0)) { - - override def loadModule(): Module = mockModule - - val getIDescriptor: IndexedSeq[DataDesc] = iDescriptors - val getBatchSize: Int = batchSize - val getBatchIndex: Int = batchIndex - - lazy val mockModule: Module = Mockito.mock(classOf[Module]) - } - - test("PredictorSuite-testPredictorConstruction") { - val inputDescriptor = IndexedSeq[DataDesc](new DataDesc("data", Shape(1, 3, 2, 2), - layout = Layout.NCHW)) - - val mockPredictor = new MyPredictor("xyz", inputDescriptor) - - assert(mockPredictor.getBatchSize == 1) - assert(mockPredictor.getBatchIndex == inputDescriptor(0).layout.indexOf('N')) - - val inputDescriptor2 = IndexedSeq[DataDesc](new DataDesc("data", Shape(1, 3, 2, 2), - layout = Layout.NCHW), - new DataDesc("data", Shape(2, 3, 2, 2), layout = Layout.NCHW)) - - assertThrows[IllegalArgumentException] { - val mockPredictor = new MyPredictor("xyz", inputDescriptor2) - } - - // batchsize is defaulted to 1 - val iDesc2 = IndexedSeq[DataDesc](new DataDesc("data", Shape(3, 2, 2), layout = "CHW")) - val p2 = new MyPredictor("xyz", inputDescriptor) - assert(p2.getBatchSize == 1, "should use a default batch size of 1") - - } - - test("PredictorSuite-testWithFlatArrays") { - - val inputDescriptor = IndexedSeq[DataDesc](new DataDesc("data", Shape(2, 3, 2, 2), - layout = Layout.NCHW)) - val inputData = Array.fill[Float](12)(1) - - // this will disposed at the end of the predict call on Predictor. - val predictResult = IndexedSeq(NDArray.ones(Shape(1, 3, 2, 2))) - - val testPredictor = new MyPredictor("xyz", inputDescriptor) - - Mockito.doReturn(predictResult).when(testPredictor.mockModule) - .predict(any(classOf[NDArrayIter]), any[Int], any[Boolean]) - - val testFun = testPredictor.predict(IndexedSeq(inputData)) - - assert(testFun.size == 1, "output size should be 1 ") - - assert(Array.fill[Float](12)(1).mkString == testFun(0).mkString) - - // Verify that the module was bound with batch size 1 and rebound back to the original - // input descriptor. the number of times is twice here because loadModule overrides the - // initial bind. - Mockito.verify(testPredictor.mockModule, Mockito.times(2)).bind(any[IndexedSeq[DataDesc]], - any[Option[IndexedSeq[DataDesc]]], any[Boolean], any[Boolean], any[Boolean] - , any[Option[BaseModule]], any[String]) - } - - test("PredictorSuite-testWithFlatFloat64Arrays") { - - val inputDescriptor = IndexedSeq[DataDesc](new DataDesc("data", Shape(2, 3, 2, 2), - layout = Layout.NCHW, dtype = DType.Float64)) - val inputData = Array.fill[Double](12)(1d) - - // this will disposed at the end of the predict call on Predictor. - val predictResult = IndexedSeq(NDArray.ones(Shape(1, 3, 2, 2), dtype = DType.Float64)) - - val testPredictor = new MyPredictor("xyz", inputDescriptor) - - Mockito.doReturn(predictResult).when(testPredictor.mockModule) - .predict(any(classOf[NDArrayIter]), any[Int], any[Boolean]) - - val testFun = testPredictor.predict(IndexedSeq(inputData)) - - assert(testFun.size == 1, "output size should be 1 ") - - assert(testFun(0)(0).getClass == 1d.getClass) - - assert(Array.fill[Double](12)(1d).mkString == testFun(0).mkString) - - // Verify that the module was bound with batch size 1 and rebound back to the original - // input descriptor. the number of times is twice here because loadModule overrides the - // initial bind. - Mockito.verify(testPredictor.mockModule, Mockito.times(2)).bind(any[IndexedSeq[DataDesc]], - any[Option[IndexedSeq[DataDesc]]], any[Boolean], any[Boolean], any[Boolean] - , any[Option[BaseModule]], any[String]) - } - - test("PredictorSuite-testWithNDArray") { - val inputDescriptor = IndexedSeq[DataDesc](new DataDesc("data", Shape(2, 3, 2, 2), - layout = Layout.NCHW)) - val inputData = NDArray.ones(Shape(1, 3, 2, 2)) - - // this will disposed at the end of the predict call on Predictor. - val predictResult = IndexedSeq(NDArray.ones(Shape(1, 3, 2, 2))) - - val testPredictor = new MyPredictor("xyz", inputDescriptor) - - Mockito.doReturn(predictResult).when(testPredictor.mockModule) - .predict(any(classOf[NDArrayIter]), any[Int], any[Boolean]) - - val testFun = testPredictor.predictWithNDArray(IndexedSeq(inputData)) - - assert(testFun.size == 1, "output size should be 1") - - assert(Array.fill[Float](12)(1).mkString == testFun(0).toArray.mkString) - - Mockito.verify(testPredictor.mockModule, Mockito.times(2)).bind(any[IndexedSeq[DataDesc]], - any[Option[IndexedSeq[DataDesc]]], any[Boolean], any[Boolean], any[Boolean] - , any[Option[BaseModule]], any[String]) - } -} diff --git a/scala-package/init-native/pom.xml b/scala-package/init-native/pom.xml deleted file mode 100644 index 1254f615c64b..000000000000 --- a/scala-package/init-native/pom.xml +++ /dev/null @@ -1,190 +0,0 @@ - - - - 4.0.0 - - org.apache.mxnet - mxnet-parent - INTERNAL - ../pom.xml - - - libmxnet-init-scala - MXNet Scala Package - Initializer Native - - - ${project.parent.basedir}/.. - - - ${libtype} - - - - osx-x86_64 - - mac - - - - - org.codehaus.mojo - native-maven-plugin - true - - darwin - generic-classic - ${cxx} - ${cxx} - - - src/main/native - - org_apache_mxnet_init_native_c_api.cc - - - - - -std=c++0x - - - -I${MXNET_DIR}/include - -I${MXNET_DIR}/3rdparty/dmlc-core/include - -I${MXNET_DIR}/3rdparty/mshadow - -I${MXNET_DIR}/3rdparty/dlpack/include - -I${MXNET_DIR}/3rdparty/tvm/nnvm/include - -DMSHADOW_USE_MKL=0 -DMSHADOW_USE_CUDA=0 - -g -O0 -fPIC -msse3 -mf16c - -Wall -Wsign-compare -Wno-unused-parameter -Wno-unknown-pragmas -Wno-unused-local-typedefs - - - -shared - - - -framework JavaVM - -Wl,-exported_symbol,_Java_* - -Wl,-x - - - -lmxnet -L${MXNET_DIR}/lib - - - - - - org.codehaus.mojo - exec-maven-plugin - 1.6.0 - - - post-native-build - package - - exec - - - install_name_tool - -add_rpath @loader_path ${project.build.directory}/${project.artifactId}.jnilib - - - - - - - - - linux-x86_64 - - - unix - Linux - - - - - - org.codehaus.mojo - native-maven-plugin - true - - linux - generic-classic - ${cxx} - ${cxx} - - - src/main/native - - org_apache_mxnet_init_native_c_api.cc - - - - - -std=c++0x - - - -I${MXNET_DIR}/include - -I${MXNET_DIR}/3rdparty/dmlc-core/include - -I${MXNET_DIR}/3rdparty/mshadow - -I${MXNET_DIR}/3rdparty/dlpack/include - -I${MXNET_DIR}/3rdparty/tvm/nnvm/include - -DMSHADOW_USE_MKL=0 -DMSHADOW_USE_CUDA=0 - -O3 -DNDEBUG=1 -fPIC -msse3 -mf16c - -Wall -Wsign-compare -Wno-unused-parameter -Wno-unknown-pragmas -Wno-unused-local-typedefs - - - -shared - - - -Wl,--whole-archive - -Wl,--no-whole-archive -pthread -lm -fopenmp -lrt - - - -Wl,-rpath=${dollar}ORIGIN -lmxnet -L${MXNET_DIR}/lib - - - - - - - - - - - - org.codehaus.mojo - exec-maven-plugin - 1.6.0 - - - link-native-lib - generate-resources - - exec - - - bash - -c 'ln -sf ${MXNET_DIR}/lib/* ${project.build.directory}/' - - - - - - - diff --git a/scala-package/init-native/src/main/native/org_apache_mxnet_init_native_c_api.cc b/scala-package/init-native/src/main/native/org_apache_mxnet_init_native_c_api.cc deleted file mode 100644 index b689521bcad1..000000000000 --- a/scala-package/init-native/src/main/native/org_apache_mxnet_init_native_c_api.cc +++ /dev/null @@ -1,120 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/*! - * Copyright (c) 2015 by Contributors - * \file org_apache_mxnet_native_c_api.cc - * \brief JNI function implementations - */ -#include "org_apache_mxnet_init_native_c_api.h" // generated by javah -#include -#include - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_init_LibInfo_mxSymbolListAtomicSymbolCreators - (JNIEnv *env, jobject obj, jobject symbolList) { - mx_uint outSize; - AtomicSymbolCreator *outArray; - int ret = MXSymbolListAtomicSymbolCreators(&outSize, &outArray); - - jclass longCls = env->FindClass("java/lang/Long"); - jmethodID longConst = env->GetMethodID(longCls, "", "(J)V"); - - jclass listCls = env->FindClass("scala/collection/mutable/ListBuffer"); - jmethodID listAppend = env->GetMethodID(listCls, - "$plus$eq", "(Ljava/lang/Object;)Lscala/collection/mutable/ListBuffer;"); - - for (size_t i = 0; i < outSize; ++i) { - env->CallObjectMethod(symbolList, listAppend, - env->NewObject(longCls, longConst, outArray[i])); - } - - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_init_LibInfo_mxSymbolGetAtomicSymbolInfo - (JNIEnv *env, jobject obj, jlong symbolPtr, jobject name, jobject desc, jobject numArgs, - jobject argNames, jobject argTypes, jobject argDescs, jobject keyVarNumArgs) { - - const char *cName; - const char *cDesc; - mx_uint cNumArgs; - const char **cArgNames; - const char **cArgTypes; - const char **cArgDescs; - const char *cKeyVarNumArgs; - - int ret = MXSymbolGetAtomicSymbolInfo(reinterpret_cast(symbolPtr), - &cName, &cDesc, &cNumArgs, - &cArgNames, &cArgTypes, &cArgDescs, - &cKeyVarNumArgs); - - jclass refIntClass = env->FindClass("org/apache/mxnet/init/Base$RefInt"); - jfieldID valueInt = env->GetFieldID(refIntClass, "value", "I"); - - jclass refStringClass = env->FindClass("org/apache/mxnet/init/Base$RefString"); - jfieldID valueStr = env->GetFieldID(refStringClass, "value", "Ljava/lang/String;"); - - // scala.collection.mutable.ListBuffer append method - jclass listClass = env->FindClass("scala/collection/mutable/ListBuffer"); - jmethodID listAppend = env->GetMethodID(listClass, "$plus$eq", - "(Ljava/lang/Object;)Lscala/collection/mutable/ListBuffer;"); - - env->SetObjectField(name, valueStr, env->NewStringUTF(cName)); - env->SetObjectField(desc, valueStr, env->NewStringUTF(cDesc)); - env->SetObjectField(keyVarNumArgs, valueStr, env->NewStringUTF(cKeyVarNumArgs)); - env->SetIntField(numArgs, valueInt, static_cast(cNumArgs)); - for (size_t i = 0; i < cNumArgs; ++i) { - env->CallObjectMethod(argNames, listAppend, env->NewStringUTF(cArgNames[i])); - env->CallObjectMethod(argTypes, listAppend, env->NewStringUTF(cArgTypes[i])); - env->CallObjectMethod(argDescs, listAppend, env->NewStringUTF(cArgDescs[i])); - } - - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_init_LibInfo_mxListAllOpNames - (JNIEnv *env, jobject obj, jobject nameList) { - mx_uint outSize; - const char **outArray; - int ret = MXListAllOpNames(&outSize, &outArray); - - jclass listCls = env->FindClass("scala/collection/mutable/ListBuffer"); - jmethodID listAppend = env->GetMethodID(listCls, - "$plus$eq", "(Ljava/lang/Object;)Lscala/collection/mutable/ListBuffer;"); - for (size_t i = 0; i < outSize; ++i) { - env->CallObjectMethod(nameList, listAppend, env->NewStringUTF(outArray[i])); - } - - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_init_LibInfo_nnGetOpHandle - (JNIEnv *env, jobject obj, jstring jopname, jobject jhandle) { - - OpHandle handle; - const char *opname = env->GetStringUTFChars(jopname, 0); - int ret = NNGetOpHandle(opname, &handle); - env->ReleaseStringUTFChars(jopname, opname); - - jclass refClass = env->FindClass("org/apache/mxnet/init/Base$RefLong"); - jfieldID refFid = env->GetFieldID(refClass, "value", "J"); - env->SetLongField(jhandle, refFid, reinterpret_cast(handle)); - - return ret; -} diff --git a/scala-package/init-native/src/main/native/org_apache_mxnet_init_native_c_api.h b/scala-package/init-native/src/main/native/org_apache_mxnet_init_native_c_api.h deleted file mode 100644 index 6ff6ae6a107c..000000000000 --- a/scala-package/init-native/src/main/native/org_apache_mxnet_init_native_c_api.h +++ /dev/null @@ -1,45 +0,0 @@ -/* DO NOT EDIT THIS FILE - it is machine generated */ -#include -/* Header for class org_apache_mxnet_init_LibInfo */ - -#ifndef _Included_org_apache_mxnet_init_LibInfo -#define _Included_org_apache_mxnet_init_LibInfo -#ifdef __cplusplus -extern "C" { -#endif -/* - * Class: org_apache_mxnet_init_LibInfo - * Method: mxSymbolListAtomicSymbolCreators - * Signature: (Lscala/collection/mutable/ListBuffer;)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_init_LibInfo_mxSymbolListAtomicSymbolCreators - (JNIEnv *, jobject, jobject); - -/* - * Class: org_apache_mxnet_init_LibInfo - * Method: mxSymbolGetAtomicSymbolInfo - * Signature: (JLorg/apache/mxnet/init/Base/RefString;Lorg/apache/mxnet/init/Base/RefString;Lorg/apache/mxnet/init/Base/RefInt;Lscala/collection/mutable/ListBuffer;Lscala/collection/mutable/ListBuffer;Lscala/collection/mutable/ListBuffer;Lorg/apache/mxnet/init/Base/RefString;)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_init_LibInfo_mxSymbolGetAtomicSymbolInfo - (JNIEnv *, jobject, jlong, jobject, jobject, jobject, jobject, jobject, jobject, jobject); - -/* - * Class: org_apache_mxnet_init_LibInfo - * Method: mxListAllOpNames - * Signature: (Lscala/collection/mutable/ListBuffer;)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_init_LibInfo_mxListAllOpNames - (JNIEnv *, jobject, jobject); - -/* - * Class: org_apache_mxnet_init_LibInfo - * Method: nnGetOpHandle - * Signature: (Ljava/lang/String;Lorg/apache/mxnet/init/Base/RefLong;)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_init_LibInfo_nnGetOpHandle - (JNIEnv *, jobject, jstring, jobject); - -#ifdef __cplusplus -} -#endif -#endif diff --git a/scala-package/init/pom.xml b/scala-package/init/pom.xml deleted file mode 100644 index fb9e3d3535fb..000000000000 --- a/scala-package/init/pom.xml +++ /dev/null @@ -1,89 +0,0 @@ - - - - 4.0.0 - - org.apache.mxnet - mxnet-parent - INTERNAL - ../pom.xml - - - mxnet-scala-init - MXNet Scala Package - Initializer - - - - - org.codehaus.mojo - native-maven-plugin - true - - - javah - verify - - default - ${project.build.directory}/custom-javah - ${basedir} - org_apache_mxnet_init_native_c_api.h - - org.apache.mxnet.init.LibInfo - - - - javah - - - - - - - org.codehaus.mojo - exec-maven-plugin - 1.6.0 - - - verify-javah - verify - - exec - - - diff - - ${project.build.directory}/custom-javah/org_apache_mxnet_init_native_c_api.h - ${project.parent.basedir}/init-native/src/main/native/org_apache_mxnet_init_native_c_api.h - - - - - - - org.apache.maven.plugins - maven-deploy-plugin - - true - - - - - - diff --git a/scala-package/init/src/main/scala/org/apache/mxnet/init/Base.scala b/scala-package/init/src/main/scala/org/apache/mxnet/init/Base.scala deleted file mode 100644 index e3fa28fb2a59..000000000000 --- a/scala-package/init/src/main/scala/org/apache/mxnet/init/Base.scala +++ /dev/null @@ -1,86 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet.init - -import java.io.File - -object Base { - tryLoadInitLibrary() - val _LIB = new LibInfo - - // type definitions - class RefInt(val value: Int = 0) - class RefLong(val value: Long = 0) - class RefFloat(val value: Float = 0) - class RefString(val value: String = null) - - /** - * This C Pointer Address point to the - * actual memory piece created in MXNet Engine - */ - type CPtrAddress = Long - - /** - * NDArrayHandle is the C pointer to - * the NDArray - */ - type NDArrayHandle = CPtrAddress - /** - * FunctionHandle is the C pointer to - * the ids of the operators - */ - type FunctionHandle = CPtrAddress - /** - * KVStoreHandle is the C pointer to - * the KVStore - */ - type KVStoreHandle = CPtrAddress - /** - * ExecutorHandle is the C pointer to - * the Executor - */ - type ExecutorHandle = CPtrAddress - /** - * SymbolHandle is the C pointer to - * the Symbol - */ - type SymbolHandle = CPtrAddress - - @throws(classOf[UnsatisfiedLinkError]) - private def tryLoadInitLibrary(): Unit = { - val userDir : File = new File(System.getProperty("user.dir")) - var nativeDir : File = new File(userDir, "init-native") - if (!nativeDir.exists()) { - nativeDir = new File(userDir.getParent, "init-native") - if (!nativeDir.exists()) { - throw new IllegalStateException("scala-init should be executed inside scala-package folder") - } - } - val baseDir = nativeDir.getAbsolutePath - - val os = System.getProperty("os.name") - if (os.startsWith("Linux")) { - System.load(s"$baseDir/target/libmxnet-init-scala.so") - } else if (os.startsWith("Mac")) { - System.load(s"$baseDir/target/libmxnet-init-scala.jnilib") - } else { - // TODO(yizhi) support windows later - throw new UnsatisfiedLinkError() - } - } -} diff --git a/scala-package/init/src/main/scala/org/apache/mxnet/init/LibInfo.scala b/scala-package/init/src/main/scala/org/apache/mxnet/init/LibInfo.scala deleted file mode 100644 index c813d449f652..000000000000 --- a/scala-package/init/src/main/scala/org/apache/mxnet/init/LibInfo.scala +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet.init - -import org.apache.mxnet.init.Base._ - -import scala.collection.mutable.ListBuffer - -class LibInfo { - /** - * Get the list of the symbol ids - * @param symbolList Pass in an empty ListBuffer and obtain a list of operator IDs - * @return Callback result - */ - @native def mxSymbolListAtomicSymbolCreators(symbolList: ListBuffer[SymbolHandle]): Int - - /** - * Get the detailed information of an operator - * @param handle The ID of the operator - * @param name Name of the operator - * @param desc Description of the operator - * @param numArgs Number of arguments - * @param argNames Argument names - * @param argTypes Argument types - * @param argDescs Argument descriptions - * @param keyVarNumArgs Kwargs number - * @return Callback result - */ - @native def mxSymbolGetAtomicSymbolInfo(handle: SymbolHandle, - name: RefString, - desc: RefString, - numArgs: RefInt, - argNames: ListBuffer[String], - argTypes: ListBuffer[String], - argDescs: ListBuffer[String], - keyVarNumArgs: RefString): Int - /** - * Get the name list of all operators - * @param names Names of all operator - * @return Callback result - */ - @native def mxListAllOpNames(names: ListBuffer[String]): Int - - /** - * Get operator ID from its name - * @param opName Operator name - * @param opHandle Operator ID - * @return Callback result - */ - @native def nnGetOpHandle(opName: String, opHandle: RefLong): Int -} diff --git a/scala-package/macros/pom.xml b/scala-package/macros/pom.xml deleted file mode 100644 index cb84824bb4e2..000000000000 --- a/scala-package/macros/pom.xml +++ /dev/null @@ -1,73 +0,0 @@ - - - - 4.0.0 - - org.apache.mxnet - mxnet-parent - INTERNAL - ../pom.xml - - - mxnet-macros - MXNet Scala Package - Macros - - - - org.apache.mxnet - mxnet-scala-init - INTERNAL - provided - - - - - - - org.apache.maven.plugins - maven-jar-plugin - - - META-INF/*.SF - META-INF/*.DSA - META-INF/*.RSA - - - - - org.scalatest - scalatest-maven-plugin - - - ${project.parent.basedir}/init-native - - - -Djava.library.path=${project.parent.basedir}/native/target \ - -Dlog4j.configuration=file://${project.basedir}/src/test/resources/log4j.properties - - - - - org.scalastyle - scalastyle-maven-plugin - - - - diff --git a/scala-package/macros/src/main/scala/org/apache/mxnet/APIDocGenerator.scala b/scala-package/macros/src/main/scala/org/apache/mxnet/APIDocGenerator.scala deleted file mode 100644 index e939b2ebf9e7..000000000000 --- a/scala-package/macros/src/main/scala/org/apache/mxnet/APIDocGenerator.scala +++ /dev/null @@ -1,361 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet - -import java.io._ -import java.security.MessageDigest - -import scala.collection.mutable.ListBuffer - -/** - * This object will generate the Scala documentation of the Scala/Java APIs - * The code will be executed during Macros stage and file live in Core stage - */ -private[mxnet] object APIDocGenerator extends GeneratorBase with RandomHelpers { - - /** - * Main method used to generate code and write to files - * A hash check placed at the end to verify changes - * @param args Input args - */ - def main(args: Array[String]): Unit = { - val FILE_PATH = args(0) - val hashCollector = ListBuffer[String]() - hashCollector += typeSafeClassGen(FILE_PATH, true) - hashCollector += typeSafeClassGen(FILE_PATH, false) - hashCollector += typeSafeRandomClassGen(FILE_PATH, true) - hashCollector += typeSafeRandomClassGen(FILE_PATH, false) - hashCollector += nonTypeSafeClassGen(FILE_PATH, true) - hashCollector += nonTypeSafeClassGen(FILE_PATH, false) - hashCollector += javaClassGen(FILE_PATH) - val finalHash = hashCollector.mkString("\n") - } - - /** - * Generate MD5 result from an input string - * Encoded in UTF-8 - * @param input The input string - * @return A MD5 value from the string - */ - def MD5Generator(input: String): String = { - val md = MessageDigest.getInstance("MD5") - md.update(input.getBytes("UTF-8")) - val digest = md.digest() - org.apache.commons.codec.binary.Base64.encodeBase64URLSafeString(digest) - } - - /** - * Type-safe class body generation for NDArray/Symbol - * @param FILE_PATH File path write the file to - * @param isSymbol Check if write the Symbol API, NDArray otherwise - * @return MD5 String - */ - def typeSafeClassGen(FILE_PATH: String, isSymbol: Boolean): String = { - val generated = typeSafeFunctionsToGenerate(isSymbol, isContrib = false) - .map { func => - val scalaDoc = generateAPIDocFromBackend(func) - val decl = generateAPISignature(func, isSymbol) - s"$scalaDoc\n$decl" - } - - writeFile( - FILE_PATH, - "package org.apache.mxnet", - if (isSymbol) "SymbolAPIBase" else "NDArrayAPIBase", - "import org.apache.mxnet.annotation.Experimental", - generated) - } - - /** - * Generate the Random classes for Symbol/NDArray - * @param FILE_PATH File path write the file to - * @param isSymbol Check if write the Symbol API, NDArray otherwise - * @return MD5 String - */ - def typeSafeRandomClassGen(FILE_PATH: String, isSymbol: Boolean): String = { - val generated = typeSafeRandomFunctionsToGenerate(isSymbol) - .map { func => - val scalaDoc = generateAPIDocFromBackend(func) - val typeParameter = randomGenericTypeSpec(isSymbol, false) - val decl = generateAPISignature(func, isSymbol, typeParameter) - s"$scalaDoc\n$decl" - } - - writeFile( - FILE_PATH, - "package org.apache.mxnet", - if (isSymbol) "SymbolRandomAPIBase" else "NDArrayRandomAPIBase", - """import org.apache.mxnet.annotation.Experimental - |import scala.reflect.ClassTag""".stripMargin, - generated) - } - - /** - * Non Type-safe interface of Scala Symbol/NDArray - * It includes class definition : e.g class SymbolBase - * and function definitions : e.g def softmax(...)(...)(...) : NDArray - * Users can directly use the api by calling NDArray. - * It support both positional input or Map input - * @param FILE_PATH File path write the file to - * @param isSymbol Check if write the Symbol API, NDArray otherwise - * @return MD5 String - */ - def nonTypeSafeClassGen(FILE_PATH: String, isSymbol: Boolean): String = { - val absFuncs = functionsToGenerate(isSymbol, isContrib = false) - .map { func => - val scalaDoc = generateAPIDocFromBackend(func, false) - if (isSymbol) { - s"""$scalaDoc - |def ${func.name}(name : String = null, attr : Map[String, String] = null) - | (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): - | org.apache.mxnet.Symbol - """.stripMargin - } else { - s"""$scalaDoc - |def ${func.name}(kwargs: Map[String, Any] = null) - | (args: Any*): org.apache.mxnet.NDArrayFuncReturn - | - |$scalaDoc - |def ${func.name}(args: Any*): org.apache.mxnet.NDArrayFuncReturn - """.stripMargin - } - } - - writeFile( - FILE_PATH, - "package org.apache.mxnet", - if (isSymbol) "SymbolBase" else "NDArrayBase", - "import org.apache.mxnet.annotation.Experimental", - absFuncs) - } - - /** - * Type-safe interface of Java NDArray - * @param FILE_PATH File path write the file to - * @return MD5 String - */ - def javaClassGen(FILE_PATH : String) : String = { - val notGenerated = Set("Custom") - val absClassFunctions = functionsToGenerate(false, false, true) - val (absFuncs, paramClassUncleaned) = - absClassFunctions.filterNot(ele => notGenerated.contains(ele.name)) - .groupBy(_.name.toLowerCase).map(ele => { - /* Pattern matching for not generating deprecated method - * Group all method name in lowercase - * Kill the capital lettered method such as Cast vs cast - * As it defined by default it deprecated - */ - if (ele._2.length == 1) ele._2.head - else { - if (ele._2.head.name.head.isLower) ele._2.head - else ele._2.last - } - }).map(absClassFunction => { - generateJavaAPISignature(absClassFunction) - }).toSeq.unzip - val paramClass = paramClassUncleaned.filterNot(_.isEmpty) - val packageName = "NDArrayBase" - val packageDef = "package org.apache.mxnet.javaapi" - writeFile( - FILE_PATH + "javaapi/", - packageDef, - packageName, - "import org.apache.mxnet.annotation.Experimental", - absFuncs, Some(paramClass)) - } - - /** - * Generate Scala docs from the function description - * @param func The function case class - * @param withParam Whether to generate param field - * @return A formatted string for the function description - */ - def generateAPIDocFromBackend(func: Func, withParam: Boolean = true): String = { - def fixDesc(desc: String): String = { - var curDesc = desc - var prevDesc = "" - while ( curDesc != prevDesc ) { - prevDesc = curDesc - curDesc = curDesc.replace("[[", "`[ [").replace("]]", "] ]") - } - curDesc - } - val desc = fixDesc(func.desc).split("\n") - .mkString(" *\n * {{{\n *\n * ", "\n * ", "\n * }}}\n * ") - - val params = func.listOfArgs.map { absClassArg => - s" * @param ${absClassArg.safeArgName}\t\t${fixDesc(absClassArg.argDesc)}" - } - - val returnType = s" * @return ${func.returnType}" - - if (withParam) { - s""" /** - |$desc - |${params.mkString("\n")} - |$returnType - | */""".stripMargin - } else { - s""" /** - |$desc - |$returnType - | */""".stripMargin - } - } - - /** - * Generate the function interface - * e.g: def softmax(data: NDArray, name ...): NDArrayFunctionReturn - * @param func The function case class - * @param isSymbol Check if generate Symbol function, NDArray otherwise - * @param typeParameter Type param specifically used in Random Module - * @return Formatted string for the function - */ - def generateAPISignature(func: Func, isSymbol: Boolean, typeParameter: String = ""): String = { - val argDef = ListBuffer[String]() - - argDef ++= typedFunctionCommonArgDef(func) - - if (isSymbol) { - argDef += "name : String = null" - argDef += "attr : Map[String, String] = null" - } else { - argDef += "out : Option[NDArray] = None" - - } - - val returnType = func.returnType - - s"""@Experimental - |def ${func.name}$typeParameter (${argDef.mkString(", ")}): $returnType""".stripMargin - } - - /** - * Generate Java function interface - * @param func The function case class - * @return A formatted string for the function - */ - def generateJavaAPISignature(func : Func) : (String, String) = { - val useParamObject = func.listOfArgs.count(arg => arg.isOptional) >= 2 - var argDef = ListBuffer[String]() - var classDef = ListBuffer[String]() - var requiredParam = ListBuffer[String]() - func.listOfArgs.foreach(absClassArg => { - val currArgName = absClassArg.safeArgName - // scalastyle:off - if (absClassArg.isOptional && useParamObject) { - classDef += - s"""private var $currArgName: ${absClassArg.argType} = null - |/** - | * @param $currArgName\t\t${absClassArg.argDesc} - | */ - |def set${currArgName.capitalize}($currArgName : ${absClassArg.argType}): ${func.name}Param = { - | this.$currArgName = $currArgName - | this - | }""".stripMargin - } - else { - requiredParam += s" * @param $currArgName\t\t${absClassArg.argDesc}" - argDef += s"$currArgName : ${absClassArg.argType}" - } - classDef += s"def get${currArgName.capitalize}() = this.$currArgName" - // scalastyle:on - }) - val experimentalTag = "@Experimental" - val returnType = "Array[NDArray]" - val scalaDoc = generateAPIDocFromBackend(func) - val scalaDocNoParam = generateAPIDocFromBackend(func, false) - if(useParamObject) { - classDef += - s"""private var out : org.apache.mxnet.NDArray = null - |def setOut(out : NDArray) : ${func.name}Param = { - | this.out = out - | this - | } - | def getOut() = this.out - | """.stripMargin - (s"""$scalaDocNoParam - | $experimentalTag - | def ${func.name}(po: ${func.name}Param) : $returnType - | """.stripMargin, - s"""/** - | * This Param Object is specifically used for ${func.name} - | ${requiredParam.mkString("\n")} - | */ - | class ${func.name}Param(${argDef.mkString(",")}) { - | ${classDef.mkString("\n ")} - | }""".stripMargin) - } else { - argDef += "out : NDArray" - (s"""$scalaDoc - |$experimentalTag - | def ${func.name}(${argDef.mkString(", ")}) : $returnType - | """.stripMargin, "") - } - } - - /** - * Write the formatted string to file - * @param FILE_PATH Location of the file writes to - * @param packageDef Package definition - * @param className Class name - * @param imports Packages need to import - * @param absFuncs All formatted functions - * @return A MD5 string - */ - def writeFile(FILE_PATH: String, packageDef: String, className: String, - imports: String, absFuncs: Seq[String], - paramClass: Option[Seq[String]] = None): String = { - - val finalStr = - s"""/* - | * Licensed to the Apache Software Foundation (ASF) under one or more - | * contributor license agreements. See the NOTICE file distributed with - | * this work for additional information regarding copyright ownership. - | * The ASF licenses this file to You under the Apache License, Version 2.0 - | * (the "License"); you may not use this file except in compliance with - | * the License. You may obtain a copy of the License at - | * - | * http://www.apache.org/licenses/LICENSE-2.0 - | * - | * Unless required by applicable law or agreed to in writing, software - | * distributed under the License is distributed on an "AS IS" BASIS, - | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - | * See the License for the specific language governing permissions and - | * limitations under the License. - | */ - | - |$packageDef - | - |$imports - | - |// scalastyle:off - |abstract class $className { - |${absFuncs.mkString("\n")} - |} - |${paramClass.getOrElse(Seq()).mkString("\n")} - |""".stripMargin - - - val pw = new PrintWriter(new File(FILE_PATH + s"$className.scala")) - pw.write(finalStr) - pw.close() - MD5Generator(finalStr) - } - -} diff --git a/scala-package/macros/src/main/scala/org/apache/mxnet/GeneratorBase.scala b/scala-package/macros/src/main/scala/org/apache/mxnet/GeneratorBase.scala deleted file mode 100644 index cc3992c2ebc3..000000000000 --- a/scala-package/macros/src/main/scala/org/apache/mxnet/GeneratorBase.scala +++ /dev/null @@ -1,273 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet - -import org.apache.mxnet.init.Base.{CPtrAddress, RefInt, RefLong, RefString, _LIB} -import org.apache.mxnet.utils.CToScalaUtils - -import scala.collection.mutable.ListBuffer -import scala.reflect.macros.blackbox - -private[mxnet] abstract class GeneratorBase { - - case class Arg(argName: String, argType: String, argDesc: String, isOptional: Boolean) { - /** - * Filter the arg name with the Scala keyword that are not allow to use as arg name, - * such as var and type listed in here. This is due to the diff between C and Scala - * @return argname that works in Scala - */ - def safeArgName: String = argName match { - case "var" => "vari" - case "type" => "typeOf" - case _ => argName - } - } - - case class Func(name: String, desc: String, listOfArgs: List[Arg], returnType: String) - - /** - * Non Type-safe function generation method - * This method will filter all "_" functions - * @param isSymbol Check if generate the Symbol method - * @param isContrib Check if generate the contrib method - * @param isJava Check if generate Corresponding Java method - * @return List of functions - */ - def functionsToGenerate(isSymbol: Boolean, isContrib: Boolean, - isJava: Boolean = false): List[Func] = { - val l = getBackEndFunctions(isSymbol, isJava) - if (isContrib) { - l.filter(func => func.name.startsWith("_contrib_") || !func.name.startsWith("_")) - } else { - l.filterNot(_.name.startsWith("_")) - } - } - - /** - * Filter the operators to generate in the type-safe Symbol.api and NDArray.api - * @param isSymbol Check if generate the Symbol method - * @param isContrib Check if generate the contrib method - * @return List of functions - */ - protected def typeSafeFunctionsToGenerate(isSymbol: Boolean, isContrib: Boolean): List[Func] = { - // Operators that should not be generated - val notGenerated = Set("Custom") - - val l = getBackEndFunctions(isSymbol) - val res = if (isContrib) { - l.filter(func => func.name.startsWith("_contrib_") || !func.name.startsWith("_")) - } else { - l.filterNot(_.name.startsWith("_")) - } - res.filterNot(ele => notGenerated.contains(ele.name)) - } - - /** - * Extract and format the functions obtained from C API - * @param isSymbol Check if generate for Symbol - * @param isJava Check if extracting in Java format - * @return List of functions - */ - protected def getBackEndFunctions(isSymbol: Boolean, isJava: Boolean = false): List[Func] = { - val opNames = ListBuffer.empty[String] - _LIB.mxListAllOpNames(opNames) - opNames.map(opName => { - val opHandle = new RefLong - _LIB.nnGetOpHandle(opName, opHandle) - makeAtomicFunction(opHandle.value, opName, isSymbol, isJava) - }).toList - } - - private def makeAtomicFunction(handle: CPtrAddress, aliasName: String, - isSymbol: Boolean, isJava: Boolean): Func = { - val name = new RefString - val desc = new RefString - val keyVarNumArgs = new RefString - val numArgs = new RefInt - val argNames = ListBuffer.empty[String] - val argTypes = ListBuffer.empty[String] - val argDescs = ListBuffer.empty[String] - - _LIB.mxSymbolGetAtomicSymbolInfo( - handle, name, desc, numArgs, argNames, argTypes, argDescs, keyVarNumArgs) - val extraDoc: String = if (keyVarNumArgs.value != null && keyVarNumArgs.value.length > 0) { - s"This function support variable length of positional input (${keyVarNumArgs.value})." - } else { - "" - } - - val argList = argNames zip argTypes zip argDescs map { case ((argName, argType), argDesc) => - val family = if (isJava) "org.apache.mxnet.javaapi.NDArray" - else if (isSymbol) "org.apache.mxnet.Symbol" - else "org.apache.mxnet.NDArray" - val typeAndOption = - CToScalaUtils.argumentCleaner(argName, argType, family, isJava) - Arg(argName, typeAndOption._1, argDesc, typeAndOption._2) - } - val returnType = - if (isJava) "Array[org.apache.mxnet.javaapi.NDArray]" - else if (isSymbol) "org.apache.mxnet.Symbol" - else "org.apache.mxnet.NDArrayFuncReturn" - Func(aliasName, desc.value, argList.toList, returnType) - } - - /** - * Generate class structure for all function APIs - * - * @param c Context used for generation - * @param funcDef DefDef type of function definitions - * @param annottees Annottees used to define Class or Module - * @return Expr used for code generation - */ - protected def structGeneration(c: blackbox.Context) - (funcDef: List[c.universe.DefDef], annottees: c.Expr[Any]*) - : c.Expr[Nothing] = { - import c.universe._ - val inputs = annottees.map(_.tree).toList - // pattern match on the inputs - val modDefs = inputs map { - case ClassDef(mods, name, something, template) => - val q = template match { - case Template(superMaybe, emptyValDef, defs) => - Template(superMaybe, emptyValDef, defs ++ funcDef) - case ex => - throw new IllegalArgumentException(s"Invalid template: $ex") - } - ClassDef(mods, name, something, q) - case ModuleDef(mods, name, template) => - val q = template match { - case Template(superMaybe, emptyValDef, defs) => - Template(superMaybe, emptyValDef, defs ++ funcDef) - case ex => - throw new IllegalArgumentException(s"Invalid template: $ex") - } - ModuleDef(mods, name, q) - case ex => - throw new IllegalArgumentException(s"Invalid macro input: $ex") - } - // wrap the result up in an Expr, and return it - val result = c.Expr(Block(modDefs, Literal(Constant(())))) - result - } - - /** - * Build function argument definition, with optionality, and safe names - * @param func Functions - * @return List of string representing the functions interface - */ - protected def typedFunctionCommonArgDef(func: Func): List[String] = { - func.listOfArgs.map(arg => - if (arg.isOptional) { - // let's avoid a stupid Option[Array[...]] - if (arg.argType.startsWith("Array[")) { - s"${arg.safeArgName} : ${arg.argType} = Array.empty" - } else { - s"${arg.safeArgName} : Option[${arg.argType}] = None" - } - } - else { - s"${arg.safeArgName} : ${arg.argType}" - } - ) - } -} - -// a mixin to ease generating the Random module -private[mxnet] trait RandomHelpers { - self: GeneratorBase => - -/** - * A generic type spec used in Symbol.random and NDArray.random modules - * @param isSymbol Check if generate for Symbol - * @param fullPackageSpec Check if leave the full name of the classTag - * @return A formatted string for random Symbol/NDArray - */ - protected def randomGenericTypeSpec(isSymbol: Boolean, fullPackageSpec: Boolean): String = { - val classTag = if (fullPackageSpec) "scala.reflect.ClassTag" else "ClassTag" - if (isSymbol) s"[T: SymbolOrScalar : $classTag]" - else s"[T: NDArrayOrScalar : $classTag]" - } - -/** - * Filter the operators to generate in the type-safe Symbol.random and NDArray.random - * @param isSymbol Check if generate Symbol functions - * @return List of functions - */ - protected def typeSafeRandomFunctionsToGenerate(isSymbol: Boolean): List[Func] = { - getBackEndFunctions(isSymbol) - .filter(f => f.name.startsWith("_sample_") || f.name.startsWith("_random_")) - .map(f => f.copy(name = f.name.stripPrefix("_"))) - // unify _random and _sample - .map(f => unifyRandom(f, isSymbol)) - // deduplicate - .groupBy(_.name) - .mapValues(_.head) - .values - .toList - } - - // unify call targets (random_xyz and sample_xyz) and unify their argument types - private def unifyRandom(func: Func, isSymbol: Boolean): Func = { - var typeConv = Set("org.apache.mxnet.NDArray", "org.apache.mxnet.Symbol", - "Float", "Int") - - func.copy( - name = func.name.replaceAll("(random|sample)_", ""), - listOfArgs = func.listOfArgs - .map(hackNormalFunc) - .map(arg => - if (typeConv(arg.argType)) arg.copy(argType = "T") - else arg - ) - // TODO: some functions are non consistent in random_ vs sample_ regarding optionality - // we may try to unify that as well here. - ) - } - - /** - * Hacks to manage the fact that random_normal and sample_normal have - * non-consistent parameter naming in the back-end - * this first one, merge loc/scale and mu/sigma - * @param arg Argument need to modify - * @return Arg case class with clean arg names - */ - protected def hackNormalFunc(arg: Arg): Arg = { - if (arg.argName == "loc") arg.copy(argName = "mu") - else if (arg.argName == "scale") arg.copy(argName = "sigma") - else arg - } - - /** - * This second one reverts this merge prior to back-end call - * @param func Function case class - * @return A string contains the implementation of random args - */ - protected def unhackNormalFunc(func: Func): String = { - if (func.name.equals("normal")) { - s"""if(target.equals("random_normal")) { - | if(map.contains("mu")) { map("loc") = map("mu"); map.remove("mu") } - | if(map.contains("sigma")) { map("scale") = map("sigma"); map.remove("sigma") } - |} - """.stripMargin - } else { - "" - } - - } - -} diff --git a/scala-package/macros/src/main/scala/org/apache/mxnet/NDArrayMacro.scala b/scala-package/macros/src/main/scala/org/apache/mxnet/NDArrayMacro.scala deleted file mode 100644 index c9c10f50c01f..000000000000 --- a/scala-package/macros/src/main/scala/org/apache/mxnet/NDArrayMacro.scala +++ /dev/null @@ -1,276 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet - -import scala.annotation.StaticAnnotation -import scala.language.experimental.macros -import scala.reflect.macros.blackbox - -private[mxnet] class AddNDArrayFunctions(isContrib: Boolean) extends StaticAnnotation { -/** - * Generate non-typesafe method for NDArray operations - * @param annottees Annottees used to define Class or Module - * @return Generated code for injection - */ - private[mxnet] def macroTransform(annottees: Any*) = macro NDArrayMacro.addDefs -} - -private[mxnet] class AddNDArrayAPIs(isContrib: Boolean) extends StaticAnnotation { -/** - * Generate typesafe method for NDArray operations - * @param annottees Annottees used to define Class or Module - * @return Generated code for injection - */ - private[mxnet] def macroTransform(annottees: Any*) = macro TypedNDArrayAPIMacro.typeSafeAPIDefs -} - -private[mxnet] class AddNDArrayRandomAPIs(isContrib: Boolean) extends StaticAnnotation { -/** - * Generate typesafe method for Random Symbol - * @param annottees Annottees used to define Class or Module - * @return Generated code for injection - */ - private[mxnet] def macroTransform(annottees: Any*) = - macro TypedNDArrayRandomAPIMacro.typeSafeAPIDefs -} - -/** - * For non-typed NDArray API - */ -private[mxnet] object NDArrayMacro extends GeneratorBase { - /** - * Methods that check the ``isContrib`` and call code generation - * @param c Context used for code gen - * @param annottees Annottees used to define Class or Module - * @return Generated code for injection - */ - def addDefs(c: blackbox.Context)(annottees: c.Expr[Any]*): c.Expr[Any] = { - import c.universe._ - val isContrib: Boolean = c.prefix.tree match { - case q"new AddNDArrayFunctions($b)" => c.eval[Boolean](c.Expr(b)) - } - - impl(c)(isContrib, annottees: _*) - } - - private def impl(c: blackbox.Context) - (isContrib: Boolean, annottees: c.Expr[Any]*): c.Expr[Nothing] = { - import c.universe._ - - val functions = functionsToGenerate(isSymbol = false, isContrib) - - val functionDefs = functions.flatMap { NDArrayfunction => - val funcName = NDArrayfunction.name - val termName = TermName(funcName) - Seq( - // e.g def transpose(kwargs: Map[String, Any] = null)(args: Any*) - q""" - def $termName(kwargs: Map[String, Any] = null)(args: Any*) = { - genericNDArrayFunctionInvoke($funcName, args, kwargs) - } - """.asInstanceOf[DefDef], - // e.g def transpose(args: Any*) - q""" - def $termName(args: Any*) = { - genericNDArrayFunctionInvoke($funcName, args, null) - } - """.asInstanceOf[DefDef] - ) - } - - structGeneration(c)(functionDefs, annottees: _*) - } -} - -/** - * NDArray.api code generation - */ -private[mxnet] object TypedNDArrayAPIMacro extends GeneratorBase { - /** - * Methods that check the ``isContrib`` and call code generation - * @param c Context used for code gen - * @param annottees Annottees used to define Class or Module - * @return Generated code for injection - */ - def typeSafeAPIDefs(c: blackbox.Context)(annottees: c.Expr[Any]*): c.Expr[Any] = { - import c.universe._ - val isContrib: Boolean = c.prefix.tree match { - case q"new AddNDArrayAPIs($b)" => c.eval[Boolean](c.Expr(b)) - } - - val functionDefs = typeSafeFunctionsToGenerate(isSymbol = false, isContrib) - .map(f => buildTypedFunction(c)(f)) - - structGeneration(c)(functionDefs, annottees: _*) - } - - /** - * Methods that construct the code and build the syntax tree - * @param c Context used for code gen - * @param function Case class that store all information of the single function - * @return Generated syntax tree - */ - protected def buildTypedFunction(c: blackbox.Context) - (function: Func): c.universe.DefDef = { - import c.universe._ - - val returnType = "org.apache.mxnet.NDArrayFuncReturn" - - // Construct API arguments declaration - val argDecl = super.typedFunctionCommonArgDef(function) :+ "out : Option[NDArray] = None" - - // Map API input args to backend args - val backendArgsMapping = - function.listOfArgs.map { arg => - // ndarrays go to args, other types go to kwargs - if (arg.argType.equals(s"Array[org.apache.mxnet.NDArray]")) { - s"args ++= ${arg.safeArgName}.toSeq" - } else { - val base = if (arg.argType.equals("org.apache.mxnet.NDArray")) { - s"args += ${arg.safeArgName}" - } else { - s"""map("${arg.argName}") = ${arg.safeArgName}""" - } - if (arg.isOptional) s"if (!${arg.safeArgName}.isEmpty) $base.get" - else base - } - } - - val impl = - s""" - |def ${function.name} - | (${argDecl.mkString(",")}): $returnType = { - | - | val map = scala.collection.mutable.Map[String, Any]() - | val args = scala.collection.mutable.ArrayBuffer.empty[org.apache.mxnet.NDArray] - | - | if (!out.isEmpty) map("out") = out.get - | - | ${backendArgsMapping.mkString("\n")} - | - | org.apache.mxnet.NDArray.genericNDArrayFunctionInvoke( - | "${function.name}", args.toSeq, map.toMap) - |} - """.stripMargin - - c.parse(impl).asInstanceOf[DefDef] - } -} - - -/** - * NDArray.random code generation - */ -private[mxnet] object TypedNDArrayRandomAPIMacro extends GeneratorBase - with RandomHelpers { - /** - * methods that check the ``isContrib`` and call code generation - * @param c Context used for code gen - * @param annottees annottees used to define Class or Module - * @return generated code for injection - */ - def typeSafeAPIDefs(c: blackbox.Context)(annottees: c.Expr[Any]*): c.Expr[Any] = { - // Note: no contrib managed in this module - - val functionDefs = typeSafeRandomFunctionsToGenerate(isSymbol = false) - .map(f => buildTypedFunction(c)(f)) - - structGeneration(c)(functionDefs, annottees: _*) - } - - /** - * Methods that construct the code and build the syntax tree - * @param c Context used for code gen - * @param function Case class that store all information of the single function - * @return Generated syntax tree - */ - protected def buildTypedFunction(c: blackbox.Context) - (function: Func): c.universe.DefDef = { - import c.universe._ - - val returnType = "org.apache.mxnet.NDArrayFuncReturn" - - // Construct API arguments declaration - val argDecl = super.typedFunctionCommonArgDef(function) :+ "out : Option[NDArray] = None" - - // Map API input args to backend args - val backendArgsMapping = - function.listOfArgs.map { arg => - // ndarrays go to args, other types go to kwargs - if (arg.argType.equals("Array[org.apache.mxnet.NDArray]")) { - s"args ++= ${arg.safeArgName}.toSeq" - } else { - if (arg.argType.equals("T")) { - if (arg.isOptional) { - s"""if(${arg.safeArgName}.isDefined) { - | if(isScalar) { - | map("${arg.argName}") = ${arg.safeArgName}.get - | } else { - | args += ${arg.safeArgName}.get.asInstanceOf[org.apache.mxnet.NDArray] - | } - |} - """.stripMargin - } else { - s"""if(isScalar) { - | map("${arg.argName}") = ${arg.safeArgName} - |} else { - | args += ${arg.safeArgName}.asInstanceOf[org.apache.mxnet.NDArray] - |} - """.stripMargin - } - } else { - if (arg.isOptional) { - s"""if (${arg.safeArgName}.isDefined) map("${arg.argName}")=${arg.safeArgName}.get""" - } else { - s"""map("${arg.argName}") = ${arg.safeArgName}""" - } - } - } - } - - val impl = - s""" - |def ${function.name}${randomGenericTypeSpec(false, true)} - | (${argDecl.mkString(",")}): $returnType = { - | - | val map = scala.collection.mutable.Map[String, Any]() - | val args = scala.collection.mutable.ArrayBuffer.empty[org.apache.mxnet.NDArray] - | val isScalar = NDArrayOrScalar[T].isScalar - | - | if(out.isDefined) map("out") = out.get - | - | ${backendArgsMapping.mkString("\n")} - | - | val target = if(isScalar) { - | "random_${function.name}" - | } else { - | "sample_${function.name}" - | } - | - | ${unhackNormalFunc(function)} - | - | org.apache.mxnet.NDArray.genericNDArrayFunctionInvoke( - | target, args.toSeq, map.toMap) - |} - """.stripMargin - - c.parse(impl).asInstanceOf[DefDef] - } - - -} diff --git a/scala-package/macros/src/main/scala/org/apache/mxnet/SymbolMacro.scala b/scala-package/macros/src/main/scala/org/apache/mxnet/SymbolMacro.scala deleted file mode 100644 index 1a0405cfd63d..000000000000 --- a/scala-package/macros/src/main/scala/org/apache/mxnet/SymbolMacro.scala +++ /dev/null @@ -1,248 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet - - -import scala.annotation.StaticAnnotation -import scala.language.experimental.macros -import scala.reflect.macros.blackbox - -private[mxnet] class AddSymbolFunctions(isContrib: Boolean) extends StaticAnnotation { -/** - * Generate non-typesafe method for Symbol operations - * @param annottees Annottees used to define Class or Module - * @return Generated code for injection - */ - private[mxnet] def macroTransform(annottees: Any*) = macro SymbolMacro.addDefs -} - -private[mxnet] class AddSymbolAPIs(isContrib: Boolean) extends StaticAnnotation { -/** - * Generate typesafe method for Symbol - * @param annottees Annottees used to define Class or Module - * @return Generated code for injection - */ - private[mxnet] def macroTransform(annottees: Any*) = macro TypedSymbolAPIMacro.typeSafeAPIDefs -} - -private[mxnet] class AddSymbolRandomAPIs(isContrib: Boolean) extends StaticAnnotation { -/** - * Generate typesafe method for Random Symbol - * @param annottees Annottees used to define Class or Module - * @return Generated code for injection - */ - private[mxnet] def macroTransform(annottees: Any*) = - macro TypedSymbolRandomAPIMacro.typeSafeAPIDefs -} - -/** - * For non-typed Symbol API - */ -private[mxnet] object SymbolMacro extends GeneratorBase { - - /** - * Methods that check the ``isContrib`` and call code generation - * @param c Context used for code gen - * @param annottees Annottees used to define Class or Module - * @return Generated code for injection - */ - def addDefs(c: blackbox.Context)(annottees: c.Expr[Any]*): c.Expr[Any] = { - import c.universe._ - val isContrib: Boolean = c.prefix.tree match { - case q"new AddSymbolFunctions($b)" => c.eval[Boolean](c.Expr(b)) - } - - impl(c)(isContrib, annottees: _*) - } - - private def impl(c: blackbox.Context) - (isContrib: Boolean, annottees: c.Expr[Any]*): c.Expr[Nothing] = { - import c.universe._ - - val functions = functionsToGenerate(isSymbol = false, isContrib) - - val functionDefs = functions.map { symbolfunction => - val funcName = symbolfunction.name - val tName = TermName(funcName) - q""" - def $tName(name : String = null, attr : Map[String, String] = null) - (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null) - : org.apache.mxnet.Symbol = { - createSymbolGeneral($funcName,name,attr,args,kwargs) - } - """.asInstanceOf[DefDef] - } - - structGeneration(c)(functionDefs, annottees: _*) - } -} - -/** - * Symbol.api code generation - */ -private[mxnet] object TypedSymbolAPIMacro extends GeneratorBase { - - /** - * Methods that check the ``isContrib`` and call code generation - * @param c Context used for code gen - * @param annottees Annottees used to define Class or Module - * @return Generated code for injection - */ - def typeSafeAPIDefs(c: blackbox.Context)(annottees: c.Expr[Any]*): c.Expr[Any] = { - import c.universe._ - val isContrib: Boolean = c.prefix.tree match { - case q"new AddSymbolAPIs($b)" => c.eval[Boolean](c.Expr(b)) - } - - val functionDefs = typeSafeFunctionsToGenerate(isSymbol = true, isContrib) - .map(f => buildTypedFunction(c)(f)) - - structGeneration(c)(functionDefs, annottees: _*) - } - - /** - * Methods that construct the code and build the syntax tree - * @param c Context used for code gen - * @param function Case class that store all information of the single function - * @return Generated syntax tree - */ - protected def buildTypedFunction(c: blackbox.Context) - (function: Func): c.universe.DefDef = { - import c.universe._ - - val returnType = "org.apache.mxnet.Symbol" - - // Construct API arguments declaration - val argDecl = super.typedFunctionCommonArgDef(function) :+ - "name : String = null" :+ - "attr : Map[String, String] = null" - - // Map API input args to backend args - val backendArgsMapping = - function.listOfArgs.map { arg => - if (arg.argType.equals(s"Array[org.apache.mxnet.Symbol]")) { - s"args = ${arg.safeArgName}.toSeq" - } else { - // all go in kwargs - if (arg.isOptional) { - s"""if (!${arg.safeArgName}.isEmpty) map("${arg.argName}") = ${arg.safeArgName}.get""" - } else { - s"""map("${arg.argName}") = ${arg.safeArgName}""" - } - } - } - - val impl = - s""" - |def ${function.name} - | (${argDecl.mkString(",")}): $returnType = { - | - | val map = scala.collection.mutable.Map[String, Any]() - | var args = scala.collection.Seq[org.apache.mxnet.Symbol]() - | - | ${backendArgsMapping.mkString("\n")} - | - | org.apache.mxnet.Symbol.createSymbolGeneral( - | "${function.name}", name, attr, args, map.toMap) - |} - """.stripMargin - - c.parse(impl).asInstanceOf[DefDef] - } -} - - -/** - * Symbol.random code generation - */ -private[mxnet] object TypedSymbolRandomAPIMacro extends GeneratorBase - with RandomHelpers { - - /** - * Methods that check the ``isContrib`` and call code generation - * @param c Context used for code gen - * @param annottees Annottees used to define Class or Module - * @return Generated code for injection - */ - def typeSafeAPIDefs(c: blackbox.Context)(annottees: c.Expr[Any]*): c.Expr[Any] = { - val functionDefs = typeSafeRandomFunctionsToGenerate(isSymbol = true) - .map(f => buildTypedFunction(c)(f)) - - structGeneration(c)(functionDefs, annottees: _*) - } - - /** - * Methods that construct the code and build the syntax tree - * @param c Context used for code gen - * @param function Case class that store all information of the single function - * @return Generated syntax tree - */ - protected def buildTypedFunction(c: blackbox.Context) - (function: Func): c.universe.DefDef = { - import c.universe._ - - val returnType = "org.apache.mxnet.Symbol" - - // Construct API arguments declaration - val argDecl = super.typedFunctionCommonArgDef(function) :+ - "name : String = null" :+ - "attr : Map[String, String] = null" - - // Map API input args to backend args - val backendArgsMapping = - function.listOfArgs.map { arg => - if (arg.argType.equals(s"Array[org.apache.mxnet.Symbol]")) { - s"args = ${arg.safeArgName}.toSeq" - } else { - // all go in kwargs - if (arg.isOptional) { - s"""if (${arg.safeArgName}.isDefined) map("${arg.argName}") = ${arg.safeArgName}.get""" - } else { - s"""map("${arg.argName}") = ${arg.safeArgName}""" - } - } - } - - val impl = - s""" - |def ${function.name}${randomGenericTypeSpec(true, true)} - | (${argDecl.mkString(",")}): $returnType = { - | - | val map = scala.collection.mutable.Map[String, Any]() - | var args = scala.collection.Seq[org.apache.mxnet.Symbol]() - | val isScalar = SymbolOrScalar[T].isScalar - | - | ${backendArgsMapping.mkString("\n")} - | - | val target = if(isScalar) { - | "random_${function.name}" - | } else { - | "sample_${function.name}" - | } - | - | ${unhackNormalFunc(function)} - | - | org.apache.mxnet.Symbol.createSymbolGeneral( - | target, name, attr, args, map.toMap) - |} - """.stripMargin - - c.parse(impl).asInstanceOf[DefDef] - } -} - diff --git a/scala-package/macros/src/main/scala/org/apache/mxnet/javaapi/JavaNDArrayMacro.scala b/scala-package/macros/src/main/scala/org/apache/mxnet/javaapi/JavaNDArrayMacro.scala deleted file mode 100644 index 29206247296d..000000000000 --- a/scala-package/macros/src/main/scala/org/apache/mxnet/javaapi/JavaNDArrayMacro.scala +++ /dev/null @@ -1,134 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet.javaapi - -import org.apache.mxnet.GeneratorBase - -import scala.annotation.StaticAnnotation -import scala.collection.mutable.ListBuffer -import scala.language.experimental.macros -import scala.reflect.macros.blackbox - -private[mxnet] class AddJNDArrayAPIs(isContrib: Boolean) extends StaticAnnotation { -/** - * Generate typesafe method for Java NDArray operations - * @param annottees Annottees used to define Class or Module - * @return Generated code for injection - */ - private[mxnet] def macroTransform(annottees: Any*) = macro JavaNDArrayMacro.typeSafeAPIDefs -} - -private[mxnet] object JavaNDArrayMacro extends GeneratorBase { - - /** - * Methods that call code generation - * @param c Context used for code gen - * @param annottees Annottees used to define Class or Module - * @return Generated code for injection - */ - def typeSafeAPIDefs(c: blackbox.Context)(annottees: c.Expr[Any]*) : c.Expr[Any] = { - typeSafeAPIImpl(c)(annottees: _*) - } - - private def typeSafeAPIImpl(c: blackbox.Context)(annottees: c.Expr[Any]*) : c.Expr[Nothing] = { - import c.universe._ - - val isContrib: Boolean = c.prefix.tree match { - case q"new AddJNDArrayAPIs($b)" => c.eval[Boolean](c.Expr(b)) - } - // Defines Operators that should not generated - val notGenerated = Set("Custom") - - val newNDArrayFunctions = functionsToGenerate(false, false, true) - .filterNot(ele => notGenerated.contains(ele.name)).groupBy(_.name.toLowerCase).map(ele => { - /* Pattern matching for not generating deprecated method - * Group all method name in lowercase - * Kill the capital lettered method such as Cast vs cast - * As it defined by default it deprecated - */ - if (ele._2.length == 1) ele._2.head - else { - if (ele._2.head.name.head.isLower) ele._2.head - else ele._2.last - } - }) - - val functionDefs = ListBuffer[DefDef]() - val classDefs = ListBuffer[ClassDef]() - - newNDArrayFunctions.foreach { ndarrayfunction => - - val useParamObject = ndarrayfunction.listOfArgs.count(arg => arg.isOptional) >= 2 - // Construct argument field with all required args - var argDef = ListBuffer[String]() - // Construct function Implementation field (e.g norm) - var impl = ListBuffer[String]() - impl += "val map = scala.collection.mutable.Map[String, Any]()" - impl += - "val args= scala.collection.mutable.ArrayBuffer.empty[org.apache.mxnet.NDArray]" - ndarrayfunction.listOfArgs.foreach({ ndarrayArg => - // var is a special word used to define variable in Scala, - // need to changed to something else in order to make it work - var currArgName = ndarrayArg.safeArgName - if (useParamObject) currArgName = s"po.get${currArgName.capitalize}()" - argDef += s"$currArgName : ${ndarrayArg.argType}" - // NDArray arg implementation - val returnType = "org.apache.mxnet.javaapi.NDArray" - val base = - if (ndarrayArg.argType.equals(returnType)) { - s"args += $currArgName" - } else if (ndarrayArg.argType.equals(s"Array[$returnType]")){ - s"$currArgName.foreach(args+=_)" - } else { - "map(\"" + ndarrayArg.argName + "\") = " + currArgName - } - impl.append( - if (ndarrayArg.isOptional) s"if ($currArgName != null) $base" - else base - ) - }) - // add default out parameter - argDef += s"out: org.apache.mxnet.javaapi.NDArray" - if (useParamObject) { - impl += "if (po.getOut() != null) map(\"out\") = po.getOut().nd" - } else { - impl += "if (out != null) map(\"out\") = out.nd" - } - val returnType = "Array[org.apache.mxnet.javaapi.NDArray]" - // scalastyle:off - // Combine and build the function string - impl += "val finalArr = org.apache.mxnet.NDArray.genericNDArrayFunctionInvoke(\"" + - ndarrayfunction.name + "\", args.toSeq, map.toMap).arr" - impl += "finalArr.map(ele => new NDArray(ele))" - if (useParamObject) { - val funcDef = - s"""def ${ndarrayfunction.name}(po: ${ndarrayfunction.name}Param): $returnType = { - | ${impl.mkString("\n")} - | }""".stripMargin - functionDefs += c.parse(funcDef).asInstanceOf[DefDef] - } else { - val funcDef = - s"""def ${ndarrayfunction.name}(${argDef.mkString(",")}): $returnType = { - | ${impl.mkString("\n")} - | }""".stripMargin - functionDefs += c.parse(funcDef).asInstanceOf[DefDef] - } - } - structGeneration(c)(functionDefs.toList, annottees : _*) - } -} diff --git a/scala-package/macros/src/main/scala/org/apache/mxnet/utils/CToScalaUtils.scala b/scala-package/macros/src/main/scala/org/apache/mxnet/utils/CToScalaUtils.scala deleted file mode 100644 index 7c9d4d347f01..000000000000 --- a/scala-package/macros/src/main/scala/org/apache/mxnet/utils/CToScalaUtils.scala +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.mxnet.utils - -private[mxnet] object CToScalaUtils { - - private val javaType = Map( - "float" -> "java.lang.Float", - "int" -> "java.lang.Integer", - "long" -> "java.lang.Long", - "double" -> "java.lang.Double", - "bool" -> "java.lang.Boolean") - private val scalaType = Map( - "float" -> "Float", - "int" -> "Int", - "long" -> "Long", - "double" -> "Double", - "bool" -> "Boolean") - - /** - * Convert C++ Types to Scala Types - * @param in Input raw string that contains C type docs - * @param argType Arg type that used for error messaging - * @param argName Arg name used for error messaging - * @param returnType The type that NDArray/Symbol should be - * @param isJava Check if generating for Java - * @return String that contains right Scala/Java types - */ - def typeConversion(in : String, argType : String = "", argName : String, - returnType : String, isJava : Boolean) : String = { - val header = returnType.split("\\.").dropRight(1) - val types = if (isJava) javaType else scalaType - in match { - case "Shape(tuple)" | "ShapeorNone" => s"${header.mkString(".")}.Shape" - case "Symbol" | "NDArray" | "NDArray-or-Symbol" => returnType - case "Symbol[]" | "NDArray[]" | "NDArray-or-Symbol[]" | "SymbolorSymbol[]" - => s"Array[$returnType]" - case "float" | "real_t" | "floatorNone" => types("float") - case "int" | "intorNone" | "int(non-negative)" => types("int") - case "long" | "long(non-negative)" => types("long") - case "double" | "doubleorNone" => types("double") - case "string" => "String" - case "boolean" | "booleanorNone" => types("bool") - case "tupleof"| "tupleof" | "tupleof" | "tupleof" | - "tupleof" | "tupleof>" | "tupleof" | - "tupleof<>" | "ptr" | "" => "Any" - case default => throw new IllegalArgumentException( - s"Invalid type for args: $default\nString argType: $argType\nargName: $argName") - } - } - - - /** - * By default, the argType come from the C++ API is a description more than a single word - * For Example: - * , , - * The three field shown above do not usually come at the same time - * This function used the above format to determine if the argument is - * optional, what is it Scala type and possibly pass in a default value - * @param argName The name of the argument - * @param argType Raw arguement Type description - * @param returnType Return type of the function (Symbol/NDArray) - * @param isJava Check if Java args should be generated - * @return (Scala_Type, isOptional) - */ - def argumentCleaner(argName: String, argType : String, - returnType : String, isJava : Boolean) : (String, Boolean) = { - val spaceRemoved = argType.replaceAll("\\s+", "") - var commaRemoved : Array[String] = new Array[String](0) - // Deal with the case e.g: stype : {'csr', 'default', 'row_sparse'} - if (spaceRemoved.charAt(0)== '{') { - val endIdx = spaceRemoved.indexOf('}') - commaRemoved = spaceRemoved.substring(endIdx + 1).split(",") - commaRemoved(0) = "string" - } else { - commaRemoved = spaceRemoved.split(",") - } - // Optional Field - if (commaRemoved.length >= 3) { - // arg: Type, optional, default = Null - require(commaRemoved(1).equals("optional"), - s"""expected "optional" got ${commaRemoved(1)}""") - require(commaRemoved(2).startsWith("default="), - s"""expected "default=..." got ${commaRemoved(2)}""") - (typeConversion(commaRemoved(0), argType, argName, returnType, isJava), true) - } else if (commaRemoved.length == 2 || commaRemoved.length == 1) { - val tempType = typeConversion(commaRemoved(0), argType, argName, returnType, isJava) - val tempOptional = tempType.equals("org.apache.mxnet.Symbol") - (tempType, tempOptional) - } else { - throw new IllegalArgumentException( - s"Unrecognized arg field: $argType, ${commaRemoved.length}") - } - - } -} diff --git a/scala-package/macros/src/test/resources/log4j.properties b/scala-package/macros/src/test/resources/log4j.properties deleted file mode 100644 index d82fd7ea4f3d..000000000000 --- a/scala-package/macros/src/test/resources/log4j.properties +++ /dev/null @@ -1,24 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -# for development debugging -log4j.rootLogger = debug, stdout - -log4j.appender.stdout = org.apache.log4j.ConsoleAppender -log4j.appender.stdout.Target = System.out -log4j.appender.stdout.layout = org.apache.log4j.PatternLayout -log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss,SSS} [%t] [%c] [%p] - %m%n diff --git a/scala-package/macros/src/test/scala/org/apache/mxnet/MacrosSuite.scala b/scala-package/macros/src/test/scala/org/apache/mxnet/MacrosSuite.scala deleted file mode 100644 index 4069bba25220..000000000000 --- a/scala-package/macros/src/test/scala/org/apache/mxnet/MacrosSuite.scala +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet - -import org.apache.mxnet.utils.CToScalaUtils -import org.scalatest.{BeforeAndAfterAll, FunSuite} -import org.slf4j.LoggerFactory - -class MacrosSuite extends FunSuite with BeforeAndAfterAll { - - private val logger = LoggerFactory.getLogger(classOf[MacrosSuite]) - - - test("MacrosSuite-testArgumentCleaner") { - val input = List( - "Symbol, optional, default = Null", - "int, required", - "Shape(tuple), optional, default = []", - "{'csr', 'default', 'row_sparse'}, optional, default = 'csr'", - ", required" - ) - val output = List( - ("org.apache.mxnet.Symbol", true), - ("Int", false), - ("org.apache.mxnet.Shape", true), - ("String", true), - ("Any", false) - ) - - for (idx <- input.indices) { - val result = CToScalaUtils.argumentCleaner("Sample", input(idx), - "org.apache.mxnet.Symbol", false) - assert(result._1 === output(idx)._1 && result._2 === output(idx)._2) - } - } - -} diff --git a/scala-package/memory-management.md b/scala-package/memory-management.md deleted file mode 100644 index b97bcbcb9026..000000000000 --- a/scala-package/memory-management.md +++ /dev/null @@ -1,135 +0,0 @@ - - - - - - - - - - - - - - - - - -# JVM Memory Management -The Scala and Java bindings of Apache MXNet use native memory (memory from the C++ heap in either RAM or GPU memory) for most of the MXNet objects such as NDArray, Symbol, Executor, KVStore, Data Iterators, etc. -The associated Scala classes act only as wrappers. The operations done on these wrapper objects are then directed to the high performance MXNet C++ backend via the Java Native Interface (JNI). Therefore, the bytes are stored in the C++ native heap which allows for fast access. - -However, the JVM Garbage Collector only manages objects allocated in the JVM Heap and is not aware of the memory footprint of these objects in the native memory. Hence, the allocation/deallocation of native memory must be managed by MXNet Scala. -Allocating native memory is straight forward and is done during the construction of the object by calling the associated C++ API through JNI. However, since JVM languages do not have destructors, the deallocation of these objects must be done explicitly. -MXNet Scala provides a few easy modes of operation which are explained in detail below. - -## Memory Management in Scala -### 1. [ResourceScope.using](https://github.com/apache/incubator-mxnet/blob/master/scala-package/core/src/main/scala/org/apache/mxnet/ResourceScope.scala#L106) (Recommended) -`ResourceScope.using` provides the familiar Java try-with-resources primitive in Scala and will automatically manage the memory of all the MXNet objects created in the associated code block (`body`). It works by tracking the allocations performed inside the code block deallocating when exiting the block. -Passing MXNet objects out of a using block can be easily accomplished by simply returning an object or an iterable containing multiple MXNet objects. If you have nested using blocks, then the returned objects will be moved into the parent scope as well. - -**Usage** -```scala -ResourceScope.using() { - ResourceScope.using() { - val r1 = NDArray.ones(Shape(2, 2)) - val r2 = NDArray.ones(Shape(3, 4)) - val r3 = NDArray.ones(Shape(5, 6)) - val r4 = NDArray.ones(Shape(7, 8)) - (r3, r4) - } - r4 -} -``` -In the example above, we have two ResourceScopes stacked together. In the inner scope, 4 NDArrays `(r1, r2, r3, r4)` are created and the NDArrays -`(r3, r4)` are returned. The inner ResourceScope recognizes that it should not deallocate these objects and automatically moves `r3` and `r4` to the outer scope. When the outer scope -returns `r4` from its code-block, it will only deallocate `r3` and will remove `r4` from its list of objects to be deallocated. All other objects are automatically released by calling the C++ backend to free the native memory. - -**Note:** -You should consider nesting ResourceScopes when you have layers of functionality in your application code or create a lot of MXNet objects such as NDArrays. -For example, holding onto all the memory that is created for an entire training loop can result in running out of memory, especially when training on GPUs which might only have 8 to 16 GB. -It is recommended not to use a single ResourceScope block which spans the entire training code. You should instead nest multiple scopes: an innermost scope where you run forward-backward passes on each batch, a middle scope for each epoch, and an outer scope that runs the entire training script. This is demonstrated in the example below: - -```scala -ResourceScope.using() { - val m = Module() - m.bind() - val k = KVStore(...) - ResourceScope.using() { - val itr = MXIterator(..) - val num_epochs: Int = 100 - //... - for (i <- 0 until num_epoch) { - ResourceScope.using() { - val dataBatch = itr.next() - while(itr.next()) { - m.forward(dataBatch) - m.backward(dataBatch) - m.update() - } - } - } - } -} - -``` - -### 2. Using Phantom References (Recommended for some use cases) - -Apache MXNet uses [Phantom References](https://docs.oracle.com/javase/8/docs/api/java/lang/ref/PhantomReference.html) to track all MXNet Objects that have native memory associated with it. -When the Garbage Collector runs, it identifies unreachable Scala/Java objects in the JVM Heap and finalizes them. -It then enqueues objects which are ready to be reclaimed into a reference queue. We take advantage of this and do a -pre-mortem cleanup on these wrapper objects by freeing the corresponding native memory as well. - -This approach is automatic and does not require any special code to clean up the native memory. However, the Garbage Collector is not aware of the potentially large amount of native memory used and therefore may not free up memory often enough with it's standard behavior. -You can control the frequency of garbage collection by calling System.gc() at strategic points such as the end of an epoch or the end of a mini-batch. - -This approach could be suitable for some use cases such as inference on CPUs where you have a large amount of Memory (RAM) on your system. - -**Note:** -Calling GC too frequently can also cause your application to perform poorly. This approach might not be suitable -for use cases which quickly allocate a large number of large NDArrays such as when training a GAN model. - -### 3. Using dispose Pattern (least Recommended) - -There might be situations where you want to manually manage the lifecycle of Apache MXNet objects. For such use-cases, we have provided the `dispose()` method which will manually deallocate the associated native memory when called. We have also -made all MXNet objects [AutoCloseable](https://docs.oracle.com/javase/8/docs/api/java/lang/AutoCloseable.html). If you are using Java8 and above you can use it with try-with-resources or call close() in the finally block. - -**Note:** -We recommend you avoid manually managing MXNet objects and instead use `ResourceScope.using`. This creates less readable code and could leak memory if you miss calling dispose (until it is cleaned up by the Garbage Collector through the Phantom References). - -```scala -def showDispose(): Unit = { - val r = NDArray.ones(Shape (2, 2)) - r.dispose() -} -``` - -## Memory Management in Java -Memory Management in MXNet Java is similar to Scala. We recommend you use [ResourceScope](https://github.com/apache/incubator-mxnet/blob/master/scala-package/core/src/main/scala/org/apache/mxnet/ResourceScope.scala#L32) in a `try-with-resources` block or in a `try-finally` block. -The [try-with-resource](https://docs.oracle.com/javase/tutorial/essential/exceptions/tryResourceClose.html) tracks the resources declared in the try block and automatically closes them upon exiting (supported from Java 7 onwards). -The ResourceScope discussed above implements AutoCloseable and tracks all MXNet Objects created at a Thread Local scope level. - -```java -try(ResourceScope scope = new ResourceScope()) { - NDArray test = NDArray.ones((Shape (2,2)) -} -``` -or -```java -try { - ResourceScope scope = new ResourceScope() - NDArray test = NDArray.ones((Shape(2,2)) -} finally { - scope.close() -} -``` - -**Note:** -A ResourceScope within a try block tracks all MXNet Native Object Allocations (NDArray, Symbol, Executor, etc.,) and deallocates them at -the end of the try block. This is also true of the objects that are returned e.g. in the example above, the native memory associated with `test` would be deallocated even if it were to be returned. -If you use the object outside of the try block, the process might crash due to illegal memory access. - -To retain certain objects created within try blocks, you should explicitly remove them from the scope by calling `scope.moveToOuterScope`. -It is highly recommended to nest multiple try-with-resource ResourceScopes so you do not have to explicitly manage the lifecycle of the Native objects. - diff --git a/scala-package/mvnw b/scala-package/mvnw deleted file mode 100755 index 5551fde8e7db..000000000000 --- a/scala-package/mvnw +++ /dev/null @@ -1,286 +0,0 @@ -#!/bin/sh -# ---------------------------------------------------------------------------- -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# ---------------------------------------------------------------------------- - -# ---------------------------------------------------------------------------- -# Maven2 Start Up Batch script -# -# Required ENV vars: -# ------------------ -# JAVA_HOME - location of a JDK home dir -# -# Optional ENV vars -# ----------------- -# M2_HOME - location of maven2's installed home dir -# MAVEN_OPTS - parameters passed to the Java VM when running Maven -# e.g. to debug Maven itself, use -# set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000 -# MAVEN_SKIP_RC - flag to disable loading of mavenrc files -# ---------------------------------------------------------------------------- - -if [ -z "$MAVEN_SKIP_RC" ] ; then - - if [ -f /etc/mavenrc ] ; then - . /etc/mavenrc - fi - - if [ -f "$HOME/.mavenrc" ] ; then - . "$HOME/.mavenrc" - fi - -fi - -# OS specific support. $var _must_ be set to either true or false. -cygwin=false; -darwin=false; -mingw=false -case "`uname`" in - CYGWIN*) cygwin=true ;; - MINGW*) mingw=true;; - Darwin*) darwin=true - # Use /usr/libexec/java_home if available, otherwise fall back to /Library/Java/Home - # See https://developer.apple.com/library/mac/qa/qa1170/_index.html - if [ -z "$JAVA_HOME" ]; then - if [ -x "/usr/libexec/java_home" ]; then - export JAVA_HOME="`/usr/libexec/java_home`" - else - export JAVA_HOME="/Library/Java/Home" - fi - fi - ;; -esac - -if [ -z "$JAVA_HOME" ] ; then - if [ -r /etc/gentoo-release ] ; then - JAVA_HOME=`java-config --jre-home` - fi -fi - -if [ -z "$M2_HOME" ] ; then - ## resolve links - $0 may be a link to maven's home - PRG="$0" - - # need this for relative symlinks - while [ -h "$PRG" ] ; do - ls=`ls -ld "$PRG"` - link=`expr "$ls" : '.*-> \(.*\)$'` - if expr "$link" : '/.*' > /dev/null; then - PRG="$link" - else - PRG="`dirname "$PRG"`/$link" - fi - done - - saveddir=`pwd` - - M2_HOME=`dirname "$PRG"`/.. - - # make it fully qualified - M2_HOME=`cd "$M2_HOME" && pwd` - - cd "$saveddir" - # echo Using m2 at $M2_HOME -fi - -# For Cygwin, ensure paths are in UNIX format before anything is touched -if $cygwin ; then - [ -n "$M2_HOME" ] && - M2_HOME=`cygpath --unix "$M2_HOME"` - [ -n "$JAVA_HOME" ] && - JAVA_HOME=`cygpath --unix "$JAVA_HOME"` - [ -n "$CLASSPATH" ] && - CLASSPATH=`cygpath --path --unix "$CLASSPATH"` -fi - -# For Mingw, ensure paths are in UNIX format before anything is touched -if $mingw ; then - [ -n "$M2_HOME" ] && - M2_HOME="`(cd "$M2_HOME"; pwd)`" - [ -n "$JAVA_HOME" ] && - JAVA_HOME="`(cd "$JAVA_HOME"; pwd)`" - # TODO classpath? -fi - -if [ -z "$JAVA_HOME" ]; then - javaExecutable="`which javac`" - if [ -n "$javaExecutable" ] && ! [ "`expr \"$javaExecutable\" : '\([^ ]*\)'`" = "no" ]; then - # readlink(1) is not available as standard on Solaris 10. - readLink=`which readlink` - if [ ! `expr "$readLink" : '\([^ ]*\)'` = "no" ]; then - if $darwin ; then - javaHome="`dirname \"$javaExecutable\"`" - javaExecutable="`cd \"$javaHome\" && pwd -P`/javac" - else - javaExecutable="`readlink -f \"$javaExecutable\"`" - fi - javaHome="`dirname \"$javaExecutable\"`" - javaHome=`expr "$javaHome" : '\(.*\)/bin'` - JAVA_HOME="$javaHome" - export JAVA_HOME - fi - fi -fi - -if [ -z "$JAVACMD" ] ; then - if [ -n "$JAVA_HOME" ] ; then - if [ -x "$JAVA_HOME/jre/sh/java" ] ; then - # IBM's JDK on AIX uses strange locations for the executables - JAVACMD="$JAVA_HOME/jre/sh/java" - else - JAVACMD="$JAVA_HOME/bin/java" - fi - else - JAVACMD="`which java`" - fi -fi - -if [ ! -x "$JAVACMD" ] ; then - echo "Error: JAVA_HOME is not defined correctly." >&2 - echo " We cannot execute $JAVACMD" >&2 - exit 1 -fi - -if [ -z "$JAVA_HOME" ] ; then - echo "Warning: JAVA_HOME environment variable is not set." -fi - -CLASSWORLDS_LAUNCHER=org.codehaus.plexus.classworlds.launcher.Launcher - -# traverses directory structure from process work directory to filesystem root -# first directory with .mvn subdirectory is considered project base directory -find_maven_basedir() { - - if [ -z "$1" ] - then - echo "Path not specified to find_maven_basedir" - return 1 - fi - - basedir="$1" - wdir="$1" - while [ "$wdir" != '/' ] ; do - if [ -d "$wdir"/.mvn ] ; then - basedir=$wdir - break - fi - # workaround for JBEAP-8937 (on Solaris 10/Sparc) - if [ -d "${wdir}" ]; then - wdir=`cd "$wdir/.."; pwd` - fi - # end of workaround - done - echo "${basedir}" -} - -# concatenates all lines of a file -concat_lines() { - if [ -f "$1" ]; then - echo "$(tr -s '\n' ' ' < "$1")" - fi -} - -BASE_DIR=`find_maven_basedir "$(pwd)"` -if [ -z "$BASE_DIR" ]; then - exit 1; -fi - -########################################################################################## -# Extension to allow automatically downloading the maven-wrapper.jar from Maven-central -# This allows using the maven wrapper in projects that prohibit checking in binary data. -########################################################################################## -if [ -r "$BASE_DIR/.mvn/wrapper/maven-wrapper.jar" ]; then - if [ "$MVNW_VERBOSE" = true ]; then - echo "Found .mvn/wrapper/maven-wrapper.jar" - fi -else - if [ "$MVNW_VERBOSE" = true ]; then - echo "Couldn't find .mvn/wrapper/maven-wrapper.jar, downloading it ..." - fi - jarUrl="https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.4.2/maven-wrapper-0.4.2.jar" - while IFS="=" read key value; do - case "$key" in (wrapperUrl) jarUrl="$value"; break ;; - esac - done < "$BASE_DIR/.mvn/wrapper/maven-wrapper.properties" - if [ "$MVNW_VERBOSE" = true ]; then - echo "Downloading from: $jarUrl" - fi - wrapperJarPath="$BASE_DIR/.mvn/wrapper/maven-wrapper.jar" - - if command -v wget > /dev/null; then - if [ "$MVNW_VERBOSE" = true ]; then - echo "Found wget ... using wget" - fi - wget "$jarUrl" -O "$wrapperJarPath" - elif command -v curl > /dev/null; then - if [ "$MVNW_VERBOSE" = true ]; then - echo "Found curl ... using curl" - fi - curl -o "$wrapperJarPath" "$jarUrl" - else - if [ "$MVNW_VERBOSE" = true ]; then - echo "Falling back to using Java to download" - fi - javaClass="$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.java" - if [ -e "$javaClass" ]; then - if [ ! -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then - if [ "$MVNW_VERBOSE" = true ]; then - echo " - Compiling MavenWrapperDownloader.java ..." - fi - # Compiling the Java class - ("$JAVA_HOME/bin/javac" "$javaClass") - fi - if [ -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then - # Running the downloader - if [ "$MVNW_VERBOSE" = true ]; then - echo " - Running MavenWrapperDownloader.java ..." - fi - ("$JAVA_HOME/bin/java" -cp .mvn/wrapper MavenWrapperDownloader "$MAVEN_PROJECTBASEDIR") - fi - fi - fi -fi -########################################################################################## -# End of extension -########################################################################################## - -export MAVEN_PROJECTBASEDIR=${MAVEN_BASEDIR:-"$BASE_DIR"} -if [ "$MVNW_VERBOSE" = true ]; then - echo $MAVEN_PROJECTBASEDIR -fi -MAVEN_OPTS="$(concat_lines "$MAVEN_PROJECTBASEDIR/.mvn/jvm.config") $MAVEN_OPTS" - -# For Cygwin, switch paths to Windows format before running java -if $cygwin; then - [ -n "$M2_HOME" ] && - M2_HOME=`cygpath --path --windows "$M2_HOME"` - [ -n "$JAVA_HOME" ] && - JAVA_HOME=`cygpath --path --windows "$JAVA_HOME"` - [ -n "$CLASSPATH" ] && - CLASSPATH=`cygpath --path --windows "$CLASSPATH"` - [ -n "$MAVEN_PROJECTBASEDIR" ] && - MAVEN_PROJECTBASEDIR=`cygpath --path --windows "$MAVEN_PROJECTBASEDIR"` -fi - -WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain - -exec "$JAVACMD" \ - $MAVEN_OPTS \ - -classpath "$MAVEN_PROJECTBASEDIR/.mvn/wrapper/maven-wrapper.jar" \ - "-Dmaven.home=${M2_HOME}" "-Dmaven.multiModuleProjectDirectory=${MAVEN_PROJECTBASEDIR}" \ - ${WRAPPER_LAUNCHER} $MAVEN_CONFIG "$@" diff --git a/scala-package/mvnw.cmd b/scala-package/mvnw.cmd deleted file mode 100755 index 48363fa60b93..000000000000 --- a/scala-package/mvnw.cmd +++ /dev/null @@ -1,161 +0,0 @@ -@REM ---------------------------------------------------------------------------- -@REM Licensed to the Apache Software Foundation (ASF) under one -@REM or more contributor license agreements. See the NOTICE file -@REM distributed with this work for additional information -@REM regarding copyright ownership. The ASF licenses this file -@REM to you under the Apache License, Version 2.0 (the -@REM "License"); you may not use this file except in compliance -@REM with the License. You may obtain a copy of the License at -@REM -@REM http://www.apache.org/licenses/LICENSE-2.0 -@REM -@REM Unless required by applicable law or agreed to in writing, -@REM software distributed under the License is distributed on an -@REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -@REM KIND, either express or implied. See the License for the -@REM specific language governing permissions and limitations -@REM under the License. -@REM ---------------------------------------------------------------------------- - -@REM ---------------------------------------------------------------------------- -@REM Maven2 Start Up Batch script -@REM -@REM Required ENV vars: -@REM JAVA_HOME - location of a JDK home dir -@REM -@REM Optional ENV vars -@REM M2_HOME - location of maven2's installed home dir -@REM MAVEN_BATCH_ECHO - set to 'on' to enable the echoing of the batch commands -@REM MAVEN_BATCH_PAUSE - set to 'on' to wait for a key stroke before ending -@REM MAVEN_OPTS - parameters passed to the Java VM when running Maven -@REM e.g. to debug Maven itself, use -@REM set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000 -@REM MAVEN_SKIP_RC - flag to disable loading of mavenrc files -@REM ---------------------------------------------------------------------------- - -@REM Begin all REM lines with '@' in case MAVEN_BATCH_ECHO is 'on' -@echo off -@REM set title of command window -title %0 -@REM enable echoing my setting MAVEN_BATCH_ECHO to 'on' -@if "%MAVEN_BATCH_ECHO%" == "on" echo %MAVEN_BATCH_ECHO% - -@REM set %HOME% to equivalent of $HOME -if "%HOME%" == "" (set "HOME=%HOMEDRIVE%%HOMEPATH%") - -@REM Execute a user defined script before this one -if not "%MAVEN_SKIP_RC%" == "" goto skipRcPre -@REM check for pre script, once with legacy .bat ending and once with .cmd ending -if exist "%HOME%\mavenrc_pre.bat" call "%HOME%\mavenrc_pre.bat" -if exist "%HOME%\mavenrc_pre.cmd" call "%HOME%\mavenrc_pre.cmd" -:skipRcPre - -@setlocal - -set ERROR_CODE=0 - -@REM To isolate internal variables from possible post scripts, we use another setlocal -@setlocal - -@REM ==== START VALIDATION ==== -if not "%JAVA_HOME%" == "" goto OkJHome - -echo. -echo Error: JAVA_HOME not found in your environment. >&2 -echo Please set the JAVA_HOME variable in your environment to match the >&2 -echo location of your Java installation. >&2 -echo. -goto error - -:OkJHome -if exist "%JAVA_HOME%\bin\java.exe" goto init - -echo. -echo Error: JAVA_HOME is set to an invalid directory. >&2 -echo JAVA_HOME = "%JAVA_HOME%" >&2 -echo Please set the JAVA_HOME variable in your environment to match the >&2 -echo location of your Java installation. >&2 -echo. -goto error - -@REM ==== END VALIDATION ==== - -:init - -@REM Find the project base dir, i.e. the directory that contains the folder ".mvn". -@REM Fallback to current working directory if not found. - -set MAVEN_PROJECTBASEDIR=%MAVEN_BASEDIR% -IF NOT "%MAVEN_PROJECTBASEDIR%"=="" goto endDetectBaseDir - -set EXEC_DIR=%CD% -set WDIR=%EXEC_DIR% -:findBaseDir -IF EXIST "%WDIR%"\.mvn goto baseDirFound -cd .. -IF "%WDIR%"=="%CD%" goto baseDirNotFound -set WDIR=%CD% -goto findBaseDir - -:baseDirFound -set MAVEN_PROJECTBASEDIR=%WDIR% -cd "%EXEC_DIR%" -goto endDetectBaseDir - -:baseDirNotFound -set MAVEN_PROJECTBASEDIR=%EXEC_DIR% -cd "%EXEC_DIR%" - -:endDetectBaseDir - -IF NOT EXIST "%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config" goto endReadAdditionalConfig - -@setlocal EnableExtensions EnableDelayedExpansion -for /F "usebackq delims=" %%a in ("%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config") do set JVM_CONFIG_MAVEN_PROPS=!JVM_CONFIG_MAVEN_PROPS! %%a -@endlocal & set JVM_CONFIG_MAVEN_PROPS=%JVM_CONFIG_MAVEN_PROPS% - -:endReadAdditionalConfig - -SET MAVEN_JAVA_EXE="%JAVA_HOME%\bin\java.exe" -set WRAPPER_JAR="%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.jar" -set WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain - -set DOWNLOAD_URL="https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.4.2/maven-wrapper-0.4.2.jar" -FOR /F "tokens=1,2 delims==" %%A IN (%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.properties) DO ( - IF "%%A"=="wrapperUrl" SET DOWNLOAD_URL=%%B -) - -@REM Extension to allow automatically downloading the maven-wrapper.jar from Maven-central -@REM This allows using the maven wrapper in projects that prohibit checking in binary data. -if exist %WRAPPER_JAR% ( - echo Found %WRAPPER_JAR% -) else ( - echo Couldn't find %WRAPPER_JAR%, downloading it ... - echo Downloading from: %DOWNLOAD_URL% - powershell -Command "(New-Object Net.WebClient).DownloadFile('%DOWNLOAD_URL%', '%WRAPPER_JAR%')" - echo Finished downloading %WRAPPER_JAR% -) -@REM End of extension - -%MAVEN_JAVA_EXE% %JVM_CONFIG_MAVEN_PROPS% %MAVEN_OPTS% %MAVEN_DEBUG_OPTS% -classpath %WRAPPER_JAR% "-Dmaven.multiModuleProjectDirectory=%MAVEN_PROJECTBASEDIR%" %WRAPPER_LAUNCHER% %MAVEN_CONFIG% %* -if ERRORLEVEL 1 goto error -goto end - -:error -set ERROR_CODE=1 - -:end -@endlocal & set ERROR_CODE=%ERROR_CODE% - -if not "%MAVEN_SKIP_RC%" == "" goto skipRcPost -@REM check for post script, once with legacy .bat ending and once with .cmd ending -if exist "%HOME%\mavenrc_post.bat" call "%HOME%\mavenrc_post.bat" -if exist "%HOME%\mavenrc_post.cmd" call "%HOME%\mavenrc_post.cmd" -:skipRcPost - -@REM pause the script if MAVEN_BATCH_PAUSE is set to 'on' -if "%MAVEN_BATCH_PAUSE%" == "on" pause - -if "%MAVEN_TERMINATE_CMD%" == "on" exit %ERROR_CODE% - -exit /B %ERROR_CODE% diff --git a/scala-package/mxnet-demo/java-demo/README.md b/scala-package/mxnet-demo/java-demo/README.md deleted file mode 100644 index d265dde4f218..000000000000 --- a/scala-package/mxnet-demo/java-demo/README.md +++ /dev/null @@ -1,124 +0,0 @@ - - - - - - - - - - - - - - - - - -# MXNet Java Sample Project -This is a project demonstrating how to use the Maven published Scala/Java MXNet package. -The examples provided include: -* Hello World -* NDArray creation -* NDArray operation -* Object Detection using the Inference API -* Image Classification using the Predictor API - -## Setup -You are required to use Maven to build the package with the following commands under `java-demo`: -``` -mvn package -``` -This command will pick the default values specified in the [pom](https://github.com/apache/incubator-mxnet/blob/master/scala-package/mxnet-demo/java-demo/pom.xml) file. - -Note: If you are planning to use GPU, please add `-Dmxnet.profile=linux-x86_64-gpu` - -Note: The Maven package is built with CUDA 9.2. - -### Use customized version set -You can use the following instruction as an alternative to achieve the same result: -You may use `mvn package` to build the package, -using the following commands: -```Bash -export SCALA_VERSION_PROFILE=2.11 -export SCALA_PKG_PROFILE= -mvn package -Dmxnet.profile=$SCALA_PKG_PROFILE \ - -Dmxnet.scalaprofile=$SCALA_VERSION_PROFILE -``` -These environment variable (`SCALA_PKG_PROFILE`, `SCALA_VERSION_PROFILE`) -should be set before executing the line above. -The `SCALA_PKG_PROFILE` should be chosen from `osx-x86_64-cpu`, `linux-x86_64-cpu` or `linux-x86_64-gpu`. - - -## Run -### NDArrayCreation -The Scala file is being executed using Java. You can execute the `NDArrayCreation` example as follows: -```Bash -bash bin/java_sample.sh -``` -You can also run the following command manually: -```Bash -java -cp $CLASSPATH sample.NDArrayCreation -``` -However, you have to define the Classpath before you run the demo code. More information can be found in `bin/java_sample.sh`. -The `CLASSPATH` should point to the jar file you have downloaded. - -It will load the library automatically and run the example - -In order to use the `Param Object`. We requires user to place this line in the front: -``` -static NDArray$ NDArray = NDArray$.MODULE$; -``` -It would help to have the NDArray companion object static and accessable from the outside. - -### Object Detection using Inference API -We also provide an example to do object detection, which downloads a ImageNet trained resnet50 model and runs inference on an image to return the classification result as -```Bash -Class: car -Probabilties: 0.99847263 -Coord:312.21335, 72.02908, 456.01443, 150.66176 -Class: bicycle -Probabilties: 0.9047381 -Coord:155.9581, 149.96365, 383.83694, 418.94516 -Class: dog -Probabilties: 0.82268167 -Coord:83.82356, 179.14001, 206.63783, 476.78754 -``` - -you can run using the command shown below: -```Bash -bash bin/run_od.sh -``` -or the command below as an alternative -```Bash -java -cp $CLASSPATH sample.ObjectDetection -``` - -If you want to test run on GPU, you can set a environment variable as follows: -```Bash -export SCALA_TEST_ON_GPU=1 -``` -## Clean up -Clean up for Maven package is simple: -```Bash -mvn clean -``` - -## Convert to Eclipse project (Optional) -You can convert the maven project to the eclipse one by running the following command: -``` -mvn eclipse:eclipse -``` - -## Q & A -If you are facing opencv issue on Ubuntu, please try as follows to install opencv 3.4 (required by 1.2.0 package and above) -```Bash -sudo add-apt-repository ppa:timsc/opencv-3.4 -sudo apt-get update -sudo apt install libopencv-imgcodecs3.4 -``` - -Is there any other version available? - -You can find nightly release version from [here](https://repository.apache.org/#nexus-search;gav~org.apache.mxnet~~2.0.0-SNAPSHOT~~). -Please keep the same version in the pom file or [other versions in here](https://repository.apache.org/#nexus-search;gav~org.apache.mxnet~~~~) to run this demo. diff --git a/scala-package/mxnet-demo/java-demo/bin/java_sample.sh b/scala-package/mxnet-demo/java-demo/bin/java_sample.sh deleted file mode 100755 index fb1795f20f9d..000000000000 --- a/scala-package/mxnet-demo/java-demo/bin/java_sample.sh +++ /dev/null @@ -1,20 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -#!/bin/bash -CURR_DIR=$(cd $(dirname $0)/../; pwd) -CLASSPATH=$CLASSPATH:$CURR_DIR/target/*:$CLASSPATH:$CURR_DIR/target/dependency/* -java -Xmx8G -cp $CLASSPATH mxnet.NDArrayCreation diff --git a/scala-package/mxnet-demo/java-demo/bin/run_od.sh b/scala-package/mxnet-demo/java-demo/bin/run_od.sh deleted file mode 100755 index 4370518dc8cd..000000000000 --- a/scala-package/mxnet-demo/java-demo/bin/run_od.sh +++ /dev/null @@ -1,20 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -#!/bin/bash -CURR_DIR=$(cd $(dirname $0)/../; pwd) -CLASSPATH=$CLASSPATH:$CURR_DIR/target/*:$CLASSPATH:$CURR_DIR/target/dependency/* -java -Xmx8G -cp $CLASSPATH mxnet.ObjectDetection diff --git a/scala-package/mxnet-demo/java-demo/pom.xml b/scala-package/mxnet-demo/java-demo/pom.xml deleted file mode 100644 index d9d60a50055f..000000000000 --- a/scala-package/mxnet-demo/java-demo/pom.xml +++ /dev/null @@ -1,117 +0,0 @@ - - - - 4.0.0 - Demo - mxnet-java-demo - 1.0-SNAPSHOT - MXNet Java Demo - - - 1.8 - 1.8 - [2.0.0-SNAPSHOT, ) - 2.11 - - - - - ci-nightly - - - Apache Snapshot - https://repository.apache.org/content/groups/snapshots - - - - [2.0.0-SNAPSHOT, ) - - - - osx-x86_64 - - - mac - - - - osx-x86_64-cpu - - - - linux-x86_64 - - - unix - Linux - - - - linux-x86_64-cpu - - - - - - - Apache Snapshot - https://repository.apache.org/content/groups/snapshots - - - - - - org.apache.mxnet - mxnet-full_${mxnet.scalaprofile}-${mxnet.profile} - ${mxnet.version} - - - org.apache.mxnet - mxnet-full_${mxnet.scalaprofile}-${mxnet.profile} - ${mxnet.version} - sources - - - commons-io - commons-io - 2.4 - - - - - - - org.apache.maven.plugins - maven-dependency-plugin - 2.9 - - - copy-dependencies - package - - copy-dependencies - - - - - - - - diff --git a/scala-package/mxnet-demo/java-demo/src/main/java/mxnet/HelloWorld.java b/scala-package/mxnet-demo/java-demo/src/main/java/mxnet/HelloWorld.java deleted file mode 100644 index e119e56e67c4..000000000000 --- a/scala-package/mxnet-demo/java-demo/src/main/java/mxnet/HelloWorld.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package mxnet; - -import org.apache.mxnet.javaapi.*; -import java.util.Arrays; - -public class HelloWorld { - static NDArray$ NDArray = NDArray$.MODULE$; - - public static void main(String[] args) { - System.out.println("Hello World!"); - NDArray nd = new NDArray(new float[]{2.0f, 3.0f}, new Shape(new int[]{1, 2}), Context.cpu()); - System.out.println(nd.shape()); - NDArray nd2 = NDArray.dot(new dotParam(nd, nd.T()))[0]; - System.out.println(Arrays.toString(nd2.toArray())); - } -} diff --git a/scala-package/mxnet-demo/java-demo/src/main/java/mxnet/ImageClassification.java b/scala-package/mxnet-demo/java-demo/src/main/java/mxnet/ImageClassification.java deleted file mode 100644 index 8cb58da5c2e6..000000000000 --- a/scala-package/mxnet-demo/java-demo/src/main/java/mxnet/ImageClassification.java +++ /dev/null @@ -1,131 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package mxnet; - -import org.apache.commons.io.FileUtils; -import org.apache.mxnet.infer.javaapi.Predictor; -import org.apache.mxnet.javaapi.*; - -import java.io.BufferedReader; -import java.io.File; -import java.io.FileReader; -import java.io.IOException; -import java.net.URL; -import java.util.ArrayList; -import java.util.List; - -public class ImageClassification { - private static String modelPath; - private static String imagePath; - - private static void downloadUrl(String url, String filePath) { - File tmpFile = new File(filePath); - if (!tmpFile.exists()) { - try { - FileUtils.copyURLToFile(new URL(url), tmpFile); - } catch (Exception exception) { - System.err.println(exception); - } - } - } - - public static void downloadModelImage() { - String tempDirPath = System.getProperty("java.io.tmpdir"); - String baseUrl = "https://s3.us-east-2.amazonaws.com/scala-infer-models"; - downloadUrl(baseUrl + "/resnet-18/resnet-18-symbol.json", - tempDirPath + "/resnet18/resnet-18-symbol.json"); - downloadUrl(baseUrl + "/resnet-18/resnet-18-0000.params", - tempDirPath + "/resnet18/resnet-18-0000.params"); - downloadUrl(baseUrl + "/resnet-18/synset.txt", - tempDirPath + "/resnet18/synset.txt"); - downloadUrl("https://s3.amazonaws.com/model-server/inputs/Pug-Cookie.jpg", - tempDirPath + "/inputImages/resnet18/Pug-Cookie.jpg"); - modelPath = tempDirPath + File.separator + "resnet18/resnet-18"; - imagePath = tempDirPath + File.separator + - "inputImages/resnet18/Pug-Cookie.jpg"; - } - - /** - * Helper class to print the maximum prediction result - * @param probabilities The float array of probability - * @param modelPathPrefix model Path needs to load the synset.txt - */ - private static String printMaximumClass(float[] probabilities, - String modelPathPrefix) throws IOException { - String synsetFilePath = modelPathPrefix.substring(0, - 1 + modelPathPrefix.lastIndexOf(File.separator)) + "/synset.txt"; - BufferedReader reader = new BufferedReader(new FileReader(synsetFilePath)); - ArrayList list = new ArrayList<>(); - String line = reader.readLine(); - - while (line != null){ - list.add(line); - line = reader.readLine(); - } - reader.close(); - - int maxIdx = 0; - for (int i = 1;i probabilities[maxIdx]) { - maxIdx = i; - } - } - - return "Probability : " + probabilities[maxIdx] + " Class : " + list.get(maxIdx) ; - } - - public static void main(String[] args) { - // Download the model and Image - downloadModelImage(); - - // Prepare the model - List context = new ArrayList(); - context.add(Context.cpu()); - List inputDesc = new ArrayList<>(); - Shape inputShape = new Shape(new int[]{1, 3, 224, 224}); - inputDesc.add(new DataDesc("data", inputShape, DType.Float32(), "NCHW")); - Predictor predictor = new Predictor(modelPath, inputDesc, context,0); - - // Prepare data - NDArray nd = Image.imRead(imagePath, 1, true); - nd = Image.imResize(nd, 224, 224, null); - nd = NDArray.transpose(nd, new Shape(new int[]{2, 0, 1}), null)[0]; // HWC to CHW - nd = NDArray.expand_dims(nd, 0, null)[0]; // Add N -> NCHW - nd = nd.asType(DType.Float32()); // Inference with Float32 - - // Predict directly - float[][] result = predictor.predict(new float[][]{nd.toArray()}); - try { - System.out.println("Predict with Float input"); - System.out.println(printMaximumClass(result[0], modelPath)); - } catch (IOException e) { - System.err.println(e); - } - - // predict with NDArray - List ndList = new ArrayList<>(); - ndList.add(nd); - List ndResult = predictor.predictWithNDArray(ndList); - try { - System.out.println("Predict with NDArray"); - System.out.println(printMaximumClass(ndResult.get(0).toArray(), modelPath)); - } catch (IOException e) { - System.err.println(e); - } - } -} diff --git a/scala-package/mxnet-demo/java-demo/src/main/java/mxnet/NDArrayCreation.java b/scala-package/mxnet-demo/java-demo/src/main/java/mxnet/NDArrayCreation.java deleted file mode 100644 index 4361c06edf32..000000000000 --- a/scala-package/mxnet-demo/java-demo/src/main/java/mxnet/NDArrayCreation.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package mxnet; - -import org.apache.mxnet.javaapi.*; - -public class NDArrayCreation { - static NDArray$ NDArray = NDArray$.MODULE$; - public static void main(String[] args) { - - // Create new NDArray - NDArray nd = new NDArray(new float[]{2.0f, 3.0f}, new Shape(new int[]{1, 2}), Context.cpu()); - System.out.println(nd); - - // create new Double NDArray - NDArray ndDouble = new NDArray(new double[]{2.0d, 3.0d}, new Shape(new int[]{2, 1}), Context.cpu()); - System.out.println(ndDouble); - - // create ones - NDArray ones = NDArray.ones(Context.cpu(), new int[] {1, 2, 3}); - System.out.println(ones); - - // random - NDArray random = NDArray.random_uniform( - new random_uniformParam() - .setLow(0.0f) - .setHigh(2.0f) - .setShape(new Shape(new int[]{10, 10})) - )[0]; - System.out.println(random); - } -} diff --git a/scala-package/mxnet-demo/java-demo/src/main/java/mxnet/NDArrayOperation.java b/scala-package/mxnet-demo/java-demo/src/main/java/mxnet/NDArrayOperation.java deleted file mode 100644 index 646adf5550b1..000000000000 --- a/scala-package/mxnet-demo/java-demo/src/main/java/mxnet/NDArrayOperation.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package mxnet; - -import org.apache.mxnet.javaapi.*; - -public class NDArrayOperation { - static NDArray$ NDArray = NDArray$.MODULE$; - public static void main(String[] args) { - NDArray nd = new NDArray(new float[]{2.0f, 3.0f}, new Shape(new int[]{1, 2}), Context.cpu()); - - // Transpose - NDArray ndT = nd.T(); - System.out.println(nd); - System.out.println(ndT); - - // change Data Type - NDArray ndInt = nd.asType(DType.Int32()); - System.out.println(ndInt); - - // element add - NDArray eleAdd = NDArray.elemwise_add(nd, nd, null)[0]; - System.out.println(eleAdd); - - // norm (L2 Norm) - NDArray normed = NDArray.norm(new normParam(nd))[0]; - System.out.println(normed); - } -} diff --git a/scala-package/mxnet-demo/java-demo/src/main/java/mxnet/ObjectDetection.java b/scala-package/mxnet-demo/java-demo/src/main/java/mxnet/ObjectDetection.java deleted file mode 100644 index 65fe286aa2c7..000000000000 --- a/scala-package/mxnet-demo/java-demo/src/main/java/mxnet/ObjectDetection.java +++ /dev/null @@ -1,99 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package mxnet; -import org.apache.mxnet.infer.javaapi.ObjectDetectorOutput; -import org.apache.mxnet.javaapi.*; -import org.apache.mxnet.infer.javaapi.ObjectDetector; -import org.apache.commons.io.FileUtils; -import java.io.File; -import java.net.URL; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; - -public class ObjectDetection { - private static String modelPath; - private static String imagePath; - - private static void downloadUrl(String url, String filePath) { - File tmpFile = new File(filePath); - if (!tmpFile.exists()) { - try { - FileUtils.copyURLToFile(new URL(url), tmpFile); - } catch (Exception exception) { - System.err.println(exception); - } - } - } - - public static void downloadModelImage() { - String tempDirPath = System.getProperty("java.io.tmpdir"); - System.out.println("tempDirPath: %s".format(tempDirPath)); - imagePath = tempDirPath + "/inputImages/resnetssd/dog-ssd.jpg"; - String imgURL = "https://s3.amazonaws.com/model-server/inputs/dog-ssd.jpg"; - downloadUrl(imgURL, imagePath); - modelPath = tempDirPath + "/resnetssd/resnet50_ssd_model"; - System.out.println("Download model files, this can take a while..."); - String modelURL = "https://s3.amazonaws.com/model-server/models/resnet50_ssd/"; - downloadUrl(modelURL + "resnet50_ssd_model-symbol.json", - tempDirPath + "/resnetssd/resnet50_ssd_model-symbol.json"); - downloadUrl(modelURL + "resnet50_ssd_model-0000.params", - tempDirPath + "/resnetssd/resnet50_ssd_model-0000.params"); - downloadUrl(modelURL + "synset.txt", - tempDirPath + "/resnetssd/synset.txt"); - } - - static List> - runObjectDetectionSingle(String modelPathPrefix, String inputImagePath, List context) { - Shape inputShape = new Shape(new int[] {1, 3, 512, 512}); - List inputDescriptors = new ArrayList(); - inputDescriptors.add(new DataDesc("data", inputShape, DType.Float32(), "NCHW")); - ObjectDetector objDet = new ObjectDetector(modelPathPrefix, inputDescriptors, context, 0); - return objDet.imageObjectDetect(ObjectDetector.loadImageFromFile(inputImagePath), 3); - } - - public static void main(String[] args) { - List context = new ArrayList(); - context.add(Context.cpu()); - downloadModelImage(); - - List> output - = runObjectDetectionSingle(modelPath, imagePath, context); - - Shape inputShape = new Shape(new int[] {1, 3, 512, 512}); - Shape outputShape = new Shape(new int[] {1, 6132, 6}); - int width = inputShape.get(2); - int height = inputShape.get(3); - String outputStr = "\n"; - - for (List ele : output) { - for (ObjectDetectorOutput i : ele) { - outputStr += "Class: " + i.getClassName() + "\n"; - outputStr += "Probabilties: " + i.getProbability() + "\n"; - - List coord = Arrays.asList(i.getXMin() * width, - i.getXMax() * height, i.getYMin() * width, i.getYMax() * height); - StringBuilder sb = new StringBuilder(); - for (float c: coord) { - sb.append(", ").append(c); - } - outputStr += "Coord:" + sb.substring(2)+ "\n"; - } - } - System.out.println(outputStr); - } -} diff --git a/scala-package/mxnet-demo/scala-demo/README.md b/scala-package/mxnet-demo/scala-demo/README.md deleted file mode 100644 index 6c638194687c..000000000000 --- a/scala-package/mxnet-demo/scala-demo/README.md +++ /dev/null @@ -1,86 +0,0 @@ - - - - - - - - - - - - - - - - - -# MXNet Scala Sample Project -This is an project created to use Maven-published Scala package with two Scala examples. -## Setup -You are required to use maven to build the package, by running the following: -``` -mvn package -``` -This command will pick the default values specified in the pom file. - -Note: If you are planning to use GPU, please add `-Dmxnet.profile=linux-x86_64-gpu` - -### Use customized version set - which are shown below: -```Bash -export SCALA_VERSION_PROFILE=2.11 SCALA_VERSION=2.11.8 -export SCALA_PKG_PROFILE= -mvn package -Dmxnet.profile=$(SCALA_PKG_PROFILE) \ - -Dmxnet.scalaprofile=$(SCALA_VERSION_PROFILE) \ - -Dscala.version=$(SCALA_VERSION) -``` -These environment variable (`SCALA_PKG_PROFILE`, `SCALA_VERSION_PROFILE`, `SCALA_VERSION`) -should be set before executing the line above. - -To obtain the most recent MXNet version, please click [here](https://mvnrepository.com/search?q=org.apache.mxnet) - -## Run -### Hello World -The Scala file is being executed using Java. You can execute the helloWorld example as follows: -```Bash -java -cp $CLASSPATH sample.HelloWorld -``` -However, you have to define the Classpath before you run the demo code. More information can be found in the `demo.sh` And you can run the bash script as follows: -```Bash -bash bin/demo.sh -``` -It will load the library automatically and run the example -### Image Classification using Inference API -We also provide an example to do image classification, which downloads a ImageNet trained resnet18 model and runs inference on a cute puppy to return the classification result as -```Bash -Classes with top 5 probability = Vector((n02110958 pug, pug-dog,0.49161583), (n02108422 bull mastiff,0.40025946), (n02108089 boxer,0.04657662), (n04409515 tennis ball,0.028773671), (n02109047 Great Dane,0.009004086)) -``` -You can review the complete example [here](https://github.com/apache/incubator-mxnet/tree/master/scala-package/examples/src/main/scala/org/apache/mxnetexamples/infer/imageclassifier) - -you can run using the command shown below: -```Bash -java -cp $CLASSPATH sample.ImageClassificationExample -``` -or script as follows: -```Bash -bash bin/run_im.sh -``` - -If you want to test run on GPU, you can set a environment variable as follows: -```Bash -export SCALA_TEST_ON_GPU=1 -``` -## Clean up -To clean up a Maven package, run the following: -```Bash -mvn clean -``` - -## Q & A -If you are facing opencv issue on Ubuntu, please try as follows to install opencv 3.4 (required by 1.2.0 package and above) -```Bash -sudo add-apt-repository ppa:timsc/opencv-3.4 -sudo apt-get update -sudo apt install libopencv-imgcodecs3.4 -``` \ No newline at end of file diff --git a/scala-package/mxnet-demo/scala-demo/bin/demo.sh b/scala-package/mxnet-demo/scala-demo/bin/demo.sh deleted file mode 100644 index d5e567b432ce..000000000000 --- a/scala-package/mxnet-demo/scala-demo/bin/demo.sh +++ /dev/null @@ -1,20 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -#!/bin/bash -CURR_DIR=$(cd $(dirname $0)/../; pwd) -CLASSPATH=$CLASSPATH:$CURR_DIR/target/*:$CLASSPATH:$CURR_DIR/target/classes/lib/* -java -Xmx8G -cp $CLASSPATH sample.HelloWorld diff --git a/scala-package/mxnet-demo/scala-demo/bin/run_im.sh b/scala-package/mxnet-demo/scala-demo/bin/run_im.sh deleted file mode 100644 index 68beb8941766..000000000000 --- a/scala-package/mxnet-demo/scala-demo/bin/run_im.sh +++ /dev/null @@ -1,21 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -#!/bin/bash -CURR_DIR=$(cd $(dirname $0)/../; pwd) - -CLASSPATH=$CLASSPATH:$CURR_DIR/target/*:$CLASSPATH:$CURR_DIR/target/classes/lib/* -java -Xmx8G -cp $CLASSPATH sample.ImageClassificationExample \ No newline at end of file diff --git a/scala-package/mxnet-demo/scala-demo/pom.xml b/scala-package/mxnet-demo/scala-demo/pom.xml deleted file mode 100644 index 98653ec95370..000000000000 --- a/scala-package/mxnet-demo/scala-demo/pom.xml +++ /dev/null @@ -1,163 +0,0 @@ - - - - 4.0.0 - Demo - mxnet-scala-demo - 1.0-SNAPSHOT - MXNet Scala Demo - pom - - - - ci-nightly - - - Apache Snapshot - https://repository.apache.org/content/groups/snapshots - - - - [2.0.0-SNAPSHOT, ) - - - - osx-x86_64 - - - mac - - - - osx-x86_64-cpu - - - - linux-x86_64 - - - unix - Linux - - - - linux-x86_64-cpu - - - - - - 2.11 - [1.4.0, ) - 2.11.8 - - - - - org.apache.mxnet - mxnet-full_${mxnet.scalaprofile}-${mxnet.profile} - ${mxnet.version} - - - org.scala-lang - scala-library - ${scala.version} - - - commons-io - commons-io - 2.4 - - - - - - - org.apache.maven.plugins - maven-compiler-plugin - 3.3 - - 1.6 - 1.6 - UTF-8 - - - - maven-resources-plugin - 2.7 - - - org.apache.maven.plugins - maven-dependency-plugin - 2.9 - - - copy-dependencies - package - - copy-dependencies - - - ${project.build.outputDirectory}/lib - runtime - test,provided - false - false - true - - - - - - org.apache.maven.plugins - maven-jar-plugin - 2.5 - - - package - - jar - - - - **/* - - - - - - - net.alchim31.maven - scala-maven-plugin - 3.2.2 - - - compile - - compile - - compile - - - - - - - diff --git a/scala-package/mxnet-demo/scala-demo/src/main/scala/sample/HelloWorld.scala b/scala-package/mxnet-demo/scala-demo/src/main/scala/sample/HelloWorld.scala deleted file mode 100644 index c625b6d0e812..000000000000 --- a/scala-package/mxnet-demo/scala-demo/src/main/scala/sample/HelloWorld.scala +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package sample -import org.apache.mxnet._ - -object HelloWorld { - def main(args: Array[String]): Unit = { - println("hello World") - val arr = NDArray.ones(2, 3) - println(arr.shape) - } -} \ No newline at end of file diff --git a/scala-package/mxnet-demo/scala-demo/src/main/scala/sample/ImageClassificationExample.scala b/scala-package/mxnet-demo/scala-demo/src/main/scala/sample/ImageClassificationExample.scala deleted file mode 100644 index bb6114dfede4..000000000000 --- a/scala-package/mxnet-demo/scala-demo/src/main/scala/sample/ImageClassificationExample.scala +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package sample - -import org.apache.mxnet.{Context, DType, DataDesc, Shape} -import org.kohsuke.args4j.{CmdLineParser, Option} -import org.slf4j.LoggerFactory -import org.apache.mxnet.infer.{ImageClassifier, _} - -import scala.collection.JavaConverters._ -import java.io.File -import java.net.URL -import org.apache.commons.io._ - -import scala.collection.mutable.ListBuffer - -/** - * Example showing usage of Infer package to do inference on resnet-18 model - * Follow instructions in README.md to run this example. - */ -object ImageClassificationExample { - - def downloadUrl(url: String, filePath: String) : Unit = { - var tmpFile = new File(filePath) - if (!tmpFile.exists()) { - FileUtils.copyURLToFile(new URL(url), tmpFile) - } - } - - def downloadModelImage() : (String, String) = { - val tempDirPath = System.getProperty("java.io.tmpdir") - printf("tempDirPath: %s".format(tempDirPath)) - val imgPath = tempDirPath + "/inputImages/resnet18/Pug-Cookie.jpg" - val imgURL = "https://s3.amazonaws.com/model-server/inputs/Pug-Cookie.jpg" - downloadUrl(imgURL, imgPath) - - val baseUrl = "https://s3.us-east-2.amazonaws.com/scala-infer-models" - var tmpPath = tempDirPath + "/resnet18/resnet-18-symbol.json" - var tmpUrl = baseUrl + "/resnet-18/resnet-18-symbol.json" - downloadUrl(tmpUrl, tmpPath) - - tmpPath = tempDirPath + "/resnet18/resnet-18-0000.params" - tmpUrl = baseUrl + "/resnet-18/resnet-18-0000.params" - downloadUrl(tmpUrl, tmpPath) - - tmpPath = tempDirPath + "/resnet18/synset.txt" - tmpUrl = baseUrl + "/resnet-18/synset.txt" - downloadUrl(tmpUrl, tmpPath) - - (imgPath, tempDirPath + "/resnet18/resnet-18") - } - - def main(args: Array[String]): Unit = { - - var context = Context.cpu() - if (System.getenv().containsKey("SCALA_TEST_ON_GPU") && - System.getenv("SCALA_TEST_ON_GPU").toInt == 1) { - context = Context.gpu() - } - val (inputImagePath, modelPathPrefix) = downloadModelImage() - - val dType = DType.Float32 - val inputShape = Shape(1, 3, 224, 224) - val inputDescriptor = IndexedSeq(DataDesc("data", inputShape, dType, "NCHW")) - - // Create object of ImageClassifier class - val imgClassifier: ImageClassifier = new - ImageClassifier(modelPathPrefix, inputDescriptor, context) - - // Loading single image from file and getting BufferedImage - val img = ImageClassifier.loadImageFromFile(inputImagePath) - - // Running inference on single image - val output = imgClassifier.classifyImage(img, Some(5)) - - // Printing top 5 class probabilities - for (i <- output) { - printf("Classes with top 5 probability = %s \n", i) - } - - } -} diff --git a/scala-package/native/README.md b/scala-package/native/README.md deleted file mode 100644 index 4ca6e5f75eaa..000000000000 --- a/scala-package/native/README.md +++ /dev/null @@ -1,84 +0,0 @@ - - - - - - - - - - - - - - - - - -# MXNet Scala JNI - -MXNet Scala JNI is a thin wrapper layer of underlying libmxnet.so. - -## javah -JNI native code requires a header file that matches the java/scala interface, -this file is usually generated with javah. - -In our case, org_apache_mxnet_native_c.h is generated and will be used to compile native code. - -To improve build performance, we check in generated org_apache_mxnet_native_c.h file. -And we added a check to detect mismatch with Scala code and generated header. The checker will -make sure we won't forget to update org_apache_mxnet_native_c.h file. - - -## Linker options - -Scala JNI (libmxnet-scala.so/libmxnet-scala.jnilib) is dynamically linked to libmxnet.so. -MXNet Scala will trying to load libmxnet.so from system LD_LIBRARY_PATH first. -If it failed, the try to resolve libmxnet.so in the same location as libmxnet-scala.so file. - -### Linux -``` --Wl,-rpath=$ORIGIN -lmxnet -``` -Above option will tell system to looking for libmxnet.so from the same location. - - -### Mac OSX -On Mac, we have to execute install_name_tool command to change library loading path: -```bash -install_name_tool -change lib/libmxnet.so @loader_path/libmxnet.so libmxnet-scala.jnilib -``` - -Other linker options: -* -shared : link as shared library -* -Wl,-install_name,libmxnet-scala.jnilib : avoid use build machine's absolute path -* -framework JavaVM : Stand jni options for mac -* -Wl,-exported_symbol,_Java_* : Stand jni options for mac -* -Wl,-x : Do not put non-global symbols in the output file's symbol table. - - -## Compiler flags - -Scala JNI code technically doesn't need on any of MXNet make flags, -however c_api.h header links to many other dependencies header file, -which requires us to add DMSHADOW_USE_MKL and DMSHADOW_USE_CUDA to compile the JNI code. -These flags are not actually used by JNI and won't impact Scala's behavior. - - -### Linux - -``` --DMSHADOW_USE_MKL=0 --DMSHADOW_USE_CUDA=0 --O3 -DNDEBUG=1 -fPIC -msse3 -mf16c --Wall -Wsign-compare -Wno-unused-parameter -Wno-unknown-pragmas -Wno-unused-local-typedefs -``` - -### Mac OSX - -``` --DMSHADOW_USE_MKL=0 --DMSHADOW_USE_CUDA=0 --g -O0 -fPIC -msse3 -mf16c --Wall -Wsign-compare -Wno-unused-parameter -Wno-unknown-pragmas -Wno-unused-local-typedefs -``` diff --git a/scala-package/native/pom.xml b/scala-package/native/pom.xml deleted file mode 100644 index cc8c1cc86daf..000000000000 --- a/scala-package/native/pom.xml +++ /dev/null @@ -1,185 +0,0 @@ - - - - 4.0.0 - - org.apache.mxnet - mxnet-parent - INTERNAL - ../pom.xml - - - libmxnet-scala - MXNet Scala Package - Native - ${libtype} - - - ${project.parent.basedir}/.. - - - - - osx-x86_64 - - mac - - - - - org.codehaus.mojo - native-maven-plugin - true - - darwin - generic-classic - ${cxx} - ${cxx} - - - src/main/native - - org_apache_mxnet_native_c_api.cc - - - - - -std=c++0x - - - -I${MXNET_DIR}/include - -I${MXNET_DIR}/3rdparty/dmlc-core/include - -I${MXNET_DIR}/3rdparty/mshadow - -I${MXNET_DIR}/3rdparty/dlpack/include - -I${MXNET_DIR}/3rdparty/tvm/nnvm/include - -DMSHADOW_USE_MKL=0 -DMSHADOW_USE_CUDA=0 - -g -O0 -fPIC -msse3 -mf16c - -Wall -Wsign-compare -Wno-unused-parameter -Wno-unknown-pragmas -Wno-unused-local-typedefs - - - -shared - - - -framework JavaVM - -Wl,-exported_symbol,_Java_* - -Wl,-x - - - -Wl,-install_name,libmxnet-scala.jnilib -lmxnet -L${MXNET_DIR}/lib - - - - - - org.codehaus.mojo - exec-maven-plugin - 1.6.0 - - - post-native-build - package - - exec - - - install_name_tool - -add_rpath @loader_path ${project.build.directory}/${project.artifactId}.jnilib - - - - - - - - - linux-x86_64 - - - unix - Linux - - - - - - org.codehaus.mojo - native-maven-plugin - true - - linux - generic-classic - ${cxx} - ${cxx} - - - src/main/native - - org_apache_mxnet_native_c_api.cc - - - - - -std=c++0x - - - -I${MXNET_DIR}/include - -I${MXNET_DIR}/3rdparty/dmlc-core/include - -I${MXNET_DIR}/3rdparty/mshadow - -I${MXNET_DIR}/3rdparty/dlpack/include - -I${MXNET_DIR}/3rdparty/tvm/nnvm/include - -DMSHADOW_USE_MKL=0 -DMSHADOW_USE_CUDA=0 - -O3 -DNDEBUG=1 -fPIC -msse3 -mf16c - -Wall -Wsign-compare -Wno-unused-parameter -Wno-unknown-pragmas -Wno-unused-local-typedefs - - - -shared - - - -Wl,-rpath=${dollar}ORIGIN -lmxnet -L${MXNET_DIR}/lib - - - - - - - - - - - - org.codehaus.mojo - exec-maven-plugin - 1.6.0 - - - link-native-lib - generate-resources - - exec - - - bash - -c 'ln -sf ${MXNET_DIR}/lib/* ${project.build.directory}/' - - - - - - - diff --git a/scala-package/native/src/main/native/jni_helper_func.h b/scala-package/native/src/main/native/jni_helper_func.h deleted file mode 100644 index a30e94dda09b..000000000000 --- a/scala-package/native/src/main/native/jni_helper_func.h +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/*! - * Copyright (c) 2015 by Contributors - * \file jni_helper_func.h - * \brief Helper functions for operating JVM objects - */ -#include - -#ifndef MXNET_JNICPP_MAIN_NATIVE_JNI_HELPER_FUNC_H_ -#define MXNET_JNICPP_MAIN_NATIVE_JNI_HELPER_FUNC_H_ - -jlong GetLongField(JNIEnv *env, jobject obj) { - jclass refClass = env->FindClass("org/apache/mxnet/Base$RefLong"); - jfieldID refFid = env->GetFieldID(refClass, "value", "J"); - jlong ret = env->GetLongField(obj, refFid); - env->DeleteLocalRef(refClass); - return ret; -} - -jint GetIntField(JNIEnv *env, jobject obj) { - jclass refClass = env->FindClass("org/apache/mxnet/Base$RefInt"); - jfieldID refFid = env->GetFieldID(refClass, "value", "I"); - jint ret = env->GetIntField(obj, refFid); - env->DeleteLocalRef(refClass); - return ret; -} - -void SetIntField(JNIEnv *env, jobject obj, jint value) { - jclass refClass = env->FindClass("org/apache/mxnet/Base$RefInt"); - jfieldID refFid = env->GetFieldID(refClass, "value", "I"); - env->SetIntField(obj, refFid, value); - env->DeleteLocalRef(refClass); -} - -void SetLongField(JNIEnv *env, jobject obj, jlong value) { - jclass refClass = env->FindClass("org/apache/mxnet/Base$RefLong"); - jfieldID refFid = env->GetFieldID(refClass, "value", "J"); - env->SetLongField(obj, refFid, value); - env->DeleteLocalRef(refClass); -} - -void SetStringField(JNIEnv *env, jobject obj, const char *value) { - jclass refClass = env->FindClass("org/apache/mxnet/Base$RefString"); - jfieldID refFid = env->GetFieldID(refClass, "value", "Ljava/lang/String;"); - env->SetObjectField(obj, refFid, env->NewStringUTF(value)); - env->DeleteLocalRef(refClass); -} -#endif // MXNET_JNICPP_MAIN_NATIVE_JNI_HELPER_FUNC_H_ diff --git a/scala-package/native/src/main/native/org_apache_mxnet_native_c_api.cc b/scala-package/native/src/main/native/org_apache_mxnet_native_c_api.cc deleted file mode 100644 index 26eea3dd062b..000000000000 --- a/scala-package/native/src/main/native/org_apache_mxnet_native_c_api.cc +++ /dev/null @@ -1,2792 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/*! - * Copyright (c) 2015 by Contributors - * \file org_apache_mxnet_native_c_api.cc - * \brief JNI function implementations - */ -#include "org_apache_mxnet_native_c_api.h" // generated by javah -#include -#include -#include -#include -#include <../src/common/cuda_utils.h> -#include -#include -#include -#include -#include -#include -#include "jni_helper_func.h" - -JavaVM *_jvm; - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_nativeLibInit - (JNIEnv *env, jobject obj) { - return env->GetJavaVM(&_jvm); -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxListAllOpNames - (JNIEnv *env, jobject obj, jobject nameList) { - mx_uint outSize; - const char **outArray; - int ret = MXListAllOpNames(&outSize, &outArray); - - jclass listCls = env->FindClass("scala/collection/mutable/ListBuffer"); - jmethodID listAppend = env->GetMethodID(listCls, - "$plus$eq", "(Ljava/lang/Object;)Lscala/collection/mutable/ListBuffer;"); - for (size_t i = 0; i < outSize; ++i) { - env->CallObjectMethod(nameList, listAppend, env->NewStringUTF(outArray[i])); - } - - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_nnGetOpHandle - (JNIEnv *env, jobject obj, jstring jopname, jobject jhandle) { - OpHandle handle; - const char *opname = env->GetStringUTFChars(jopname, 0); - int ret = NNGetOpHandle(opname, &handle); - env->ReleaseStringUTFChars(jopname, opname); - - jclass refClass = env->FindClass("org/apache/mxnet/Base$RefLong"); - jfieldID refFid = env->GetFieldID(refClass, "value", "J"); - env->SetLongField(jhandle, refFid, reinterpret_cast(handle)); - - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxNDArrayCreateNone - (JNIEnv *env, jobject obj, jobject ndArrayHandle) { - NDArrayHandle out; - int ret = MXNDArrayCreateNone(&out); - SetLongField(env, ndArrayHandle, reinterpret_cast(out)); - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxNDArrayCreateEx - (JNIEnv *env, jobject obj, jintArray shape, jint ndim, jint devType, - jint devId, jint delayAlloc, jint dtype, jobject ndArrayHandle) { - jint *shapeArr = env->GetIntArrayElements(shape, NULL); - NDArrayHandle out; - int ret = MXNDArrayCreateEx(reinterpret_cast(shapeArr), static_cast(ndim), - devType, devId, delayAlloc, dtype, &out); - env->ReleaseIntArrayElements(shape, shapeArr, 0); - SetLongField(env, ndArrayHandle, reinterpret_cast(out)); - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxNDArrayCreateSparseEx - (JNIEnv *env, jobject obj, jint storageType, jintArray shape, jint ndim, jint devType, - jint devId, jint delayAlloc, jint dtype, jint numAux, jintArray auxTypes, - jintArray auxNdims, jintArray auxShapes, jobject ndArrayHandle) { - jint *shapeArr = env->GetIntArrayElements(shape, NULL); - jint *auxTypesArr = env->GetIntArrayElements(auxTypes, NULL); - jint *auxNdimsArr = env->GetIntArrayElements(auxNdims, NULL); - jint *auxShapesArr = env->GetIntArrayElements(auxShapes, NULL); - NDArrayHandle out; - int ret = MXNDArrayCreateSparseEx(storageType, - reinterpret_cast(shapeArr), - static_cast(ndim), - devType, devId, delayAlloc, dtype, - static_cast(numAux), - reinterpret_cast(auxTypesArr), - reinterpret_cast(auxNdimsArr), - reinterpret_cast(auxShapesArr), &out); - env->ReleaseIntArrayElements(shape, shapeArr, 0); - env->ReleaseIntArrayElements(auxTypes, auxTypesArr, 0); - env->ReleaseIntArrayElements(auxNdims, auxNdimsArr, 0); - env->ReleaseIntArrayElements(auxShapes, auxShapesArr, 0); - SetLongField(env, ndArrayHandle, reinterpret_cast(out)); - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxNDArrayWaitAll(JNIEnv *env, jobject obj) { - return MXNDArrayWaitAll(); -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxNDArrayWaitToRead - (JNIEnv *env, jobject obj, jlong arrayPtr) { - return MXNDArrayWaitToRead(reinterpret_cast(arrayPtr)); -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxListFunctions - (JNIEnv *env, jobject obj, jobject functions) { - jclass longCls = env->FindClass("java/lang/Long"); - jmethodID longConst = env->GetMethodID(longCls, "", "(J)V"); - - // scala.collection.mutable.ListBuffer append method - jclass listClass = env->FindClass("scala/collection/mutable/ListBuffer"); - jmethodID listAppend = env->GetMethodID(listClass, - "$plus$eq", "(Ljava/lang/Object;)Lscala/collection/mutable/ListBuffer;"); - - // Get function list - FunctionHandle *outArray; - mx_uint outSize; - int ret = MXListFunctions(&outSize, &outArray); - for (size_t i = 0; i < outSize; ++i) { - env->CallObjectMethod(functions, listAppend, - env->NewObject(longCls, longConst, outArray[i])); - } - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxFuncDescribe - (JNIEnv *env, jobject obj, jlong funcPtr, jobject nUsedVars, - jobject nScalars, jobject nMutateVars, jobject typeMask) { - mx_uint numUseVars; - mx_uint numScalars; - mx_uint numMutateVars; - int type; - int ret = MXFuncDescribe(reinterpret_cast(funcPtr), &numUseVars, - &numScalars, &numMutateVars, &type); - - jclass refIntClass = env->FindClass("org/apache/mxnet/Base$RefInt"); - jfieldID value = env->GetFieldID(refIntClass, "value", "I"); - env->SetIntField(nUsedVars, value, static_cast(numUseVars)); - env->SetIntField(nScalars, value, static_cast(numScalars)); - env->SetIntField(nMutateVars, value, static_cast(numMutateVars)); - env->SetIntField(typeMask, value, static_cast(type)); - - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxFuncGetInfo - (JNIEnv *env, jobject obj, jlong funcPtr, jobject name, jobject desc, - jobject numArgs, jobject argNames, jobject argTypes, jobject argDescs) { - const char *cName; - const char *cDesc; - mx_uint cNumArgs; - const char **cArgNames; - const char **cArgTypes; - const char **cArgDescs; - int ret = MXFuncGetInfo(reinterpret_cast(funcPtr), - &cName, &cDesc, &cNumArgs, - &cArgNames, &cArgTypes, &cArgDescs); - - jclass refIntClass = env->FindClass("org/apache/mxnet/Base$RefInt"); - jfieldID valueInt = env->GetFieldID(refIntClass, "value", "I"); - - jclass refStringClass = env->FindClass("org/apache/mxnet/Base$RefString"); - jfieldID valueStr = env->GetFieldID(refStringClass, "value", "Ljava/lang/String;"); - - // scala.collection.mutable.ListBuffer append method - jclass listClass = env->FindClass("scala/collection/mutable/ListBuffer"); - jmethodID listAppend = env->GetMethodID(listClass, "$plus$eq", - "(Ljava/lang/Object;)Lscala/collection/mutable/ListBuffer;"); - - env->SetObjectField(name, valueStr, env->NewStringUTF(cName)); - env->SetObjectField(desc, valueStr, env->NewStringUTF(cDesc)); - env->SetIntField(numArgs, valueInt, static_cast(cNumArgs)); - for (size_t i = 0; i < cNumArgs; ++i) { - env->CallObjectMethod(argNames, listAppend, env->NewStringUTF(cArgNames[i])); - env->CallObjectMethod(argTypes, listAppend, env->NewStringUTF(cArgTypes[i])); - env->CallObjectMethod(argDescs, listAppend, env->NewStringUTF(cArgDescs[i])); - } - - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxImperativeInvokeEx - (JNIEnv *env, jobject obj, jlong funcPtr, jlongArray inputs, - jlongArray outputsGiven, jobject outputs, jint numParams, - jobjectArray paramKeys, jobjectArray paramVals, jobject outStypes) { - - const char **cParamKeys = NULL; - const char **cParamVals = NULL; - if (numParams > 0) { - cParamKeys = new const char *[numParams]; - cParamVals = new const char *[numParams]; - for (int i = 0; i < numParams; i++) { - jstring jkey = reinterpret_cast(env->GetObjectArrayElement(paramKeys, i)); - const char *key = env->GetStringUTFChars(jkey, 0); - cParamKeys[i] = key; - env->DeleteLocalRef(jkey); - jstring jval = reinterpret_cast(env->GetObjectArrayElement(paramVals, i)); - const char *val = env->GetStringUTFChars(jval, 0); - cParamVals[i] = val; - env->DeleteLocalRef(jval); - } - } - - int numOutputs = 0; - jlong *cOutputsGiven = NULL; - NDArrayHandle *cOutputs = NULL; - const int *cOutStypes; - if (outputsGiven) { - cOutputsGiven = env->GetLongArrayElements(outputsGiven, NULL); - cOutputs = reinterpret_cast(cOutputsGiven); - numOutputs = static_cast(env->GetArrayLength(outputsGiven)); - } - jlong *cInputs = env->GetLongArrayElements(inputs, NULL); - jsize numInputs = env->GetArrayLength(inputs); - int ret = MXImperativeInvokeEx(reinterpret_cast(funcPtr), - static_cast(numInputs), - reinterpret_cast(cInputs), - &numOutputs, - &cOutputs, - static_cast(numParams), - cParamKeys, - cParamVals, - &cOutStypes); - env->ReleaseLongArrayElements(inputs, cInputs, 0); - - // release allocated memory - if (numParams > 0) { - for (int i = 0; i < numParams; i++) { - jstring jkey = reinterpret_cast(env->GetObjectArrayElement(paramKeys, i)); - env->ReleaseStringUTFChars(jkey, cParamKeys[i]); - env->DeleteLocalRef(jkey); - jstring jval = reinterpret_cast(env->GetObjectArrayElement(paramVals, i)); - env->ReleaseStringUTFChars(jval, cParamVals[i]); - env->DeleteLocalRef(jval); - } - delete[] cParamKeys; - delete[] cParamVals; - } - - if (cOutputs) { - jclass longCls = env->FindClass("java/lang/Long"); - jclass intCls = env->FindClass("java/lang/Integer"); - jmethodID longConst = env->GetMethodID(longCls, "", "(J)V"); - jmethodID intConst = env->GetMethodID(intCls, "", "(I)V"); - // scala.collection.mutable.ListBuffer append method - jclass listClass = env->FindClass("scala/collection/mutable/ArrayBuffer"); - jmethodID listAppend = env->GetMethodID(listClass, "$plus$eq", - "(Ljava/lang/Object;)Lscala/collection/mutable/ArrayBuffer;"); - for (int i = 0; i < numOutputs; ++i) { - env->CallObjectMethod(outputs, listAppend, - env->NewObject(longCls, longConst, - reinterpret_cast(cOutputs[i]))); - env->CallObjectMethod(outStypes, listAppend, - env->NewObject(intCls, intConst, - cOutStypes[i])); - } - } - - if (cOutputsGiven) { - env->ReleaseLongArrayElements(outputsGiven, cOutputsGiven, 0); - } - - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxFuncInvoke - (JNIEnv *env, jobject obj, jlong funcPtr, jlongArray useVars, - jfloatArray scalarArgs, jlongArray mutateVars) { - jlong *cUseVars = env->GetLongArrayElements(useVars, NULL); - jfloat *cScalarArgs = env->GetFloatArrayElements(scalarArgs, NULL); - jlong *cMutateVars = env->GetLongArrayElements(mutateVars, NULL); - int ret = MXFuncInvoke(reinterpret_cast(funcPtr), - reinterpret_cast(cUseVars), - reinterpret_cast(cScalarArgs), - reinterpret_cast(cMutateVars)); - env->ReleaseLongArrayElements(useVars, cUseVars, 0); - env->ReleaseFloatArrayElements(scalarArgs, cScalarArgs, 0); - env->ReleaseLongArrayElements(mutateVars, cMutateVars, 0); - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxFuncInvokeEx - (JNIEnv *env, jobject obj, jlong funcPtr, jlongArray useVars, - jfloatArray scalarArgs, jlongArray mutateVars, - jint numParams, jobjectArray paramKeys, jobjectArray paramVals) { - jlong *cUseVars = env->GetLongArrayElements(useVars, NULL); - jfloat *cScalarArgs = env->GetFloatArrayElements(scalarArgs, NULL); - jlong *cMutateVars = env->GetLongArrayElements(mutateVars, NULL); - jbyte **cParamKeys = NULL; - jbyte **cParamVals = NULL; - if (numParams > 0) { - cParamKeys = new jbyte *[numParams]; - cParamVals = new jbyte *[numParams]; - for (int i = 0; i < numParams; i++) { - jbyteArray jkey = reinterpret_cast(env->GetObjectArrayElement(paramKeys, i)); - jbyte *cParamKey = env->GetByteArrayElements(jkey, NULL); - cParamKeys[i] = cParamKey; - env->DeleteLocalRef(jkey); - jbyteArray jval = reinterpret_cast(env->GetObjectArrayElement(paramVals, i)); - jbyte *cParamVal = env->GetByteArrayElements(jval, NULL); - cParamVals[i] = cParamVal; - env->DeleteLocalRef(jval); - } - } - int ret = MXFuncInvokeEx(reinterpret_cast(funcPtr), - reinterpret_cast(cUseVars), - reinterpret_cast(cScalarArgs), - reinterpret_cast(cMutateVars), - static_cast(numParams), - reinterpret_cast(cParamKeys), - reinterpret_cast(cParamVals)); - env->ReleaseLongArrayElements(useVars, cUseVars, 0); - env->ReleaseFloatArrayElements(scalarArgs, cScalarArgs, 0); - env->ReleaseLongArrayElements(mutateVars, cMutateVars, 0); - if (numParams > 0) { - for (int i = 0; i < numParams; i++) { - jbyteArray jkey = reinterpret_cast(env->GetObjectArrayElement(paramKeys, i)); - env->ReleaseByteArrayElements(jkey, cParamKeys[i], 0); - env->DeleteLocalRef(jkey); - jbyteArray jval = reinterpret_cast(env->GetObjectArrayElement(paramVals, i)); - env->ReleaseByteArrayElements(jval, cParamVals[i], 0); - env->DeleteLocalRef(jval); - } - delete[] cParamKeys; - delete[] cParamVals; - } - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxNDArraySaveRawBytes - (JNIEnv *env, jobject obj, jlong ndArrayPtr, jobject dataBuf) { - size_t length; - const char *pdata; - int ret = MXNDArraySaveRawBytes(reinterpret_cast(ndArrayPtr), &length, &pdata); - - // fill dataBuf - jclass byteClass = env->FindClass("java/lang/Byte"); - jmethodID newByte = env->GetMethodID(byteClass, "", "(B)V"); - jclass arrayClass = env->FindClass("scala/collection/mutable/ArrayBuffer"); - jmethodID arrayAppend = env->GetMethodID(arrayClass, - "$plus$eq", "(Ljava/lang/Object;)Lscala/collection/mutable/ArrayBuffer;"); - for (size_t i = 0; i < length; ++i) { - jobject data = env->NewObject(byteClass, newByte, static_cast(pdata[i])); - env->CallObjectMethod(dataBuf, arrayAppend, data); - env->DeleteLocalRef(data); - } - - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxNDArrayLoadFromRawBytes - (JNIEnv *env, jobject obj, jbyteArray bytes, jobject handleRef) { - int size = env->GetArrayLength(bytes); - jbyte *byteArr = env->GetByteArrayElements(bytes, NULL); - NDArrayHandle out; - int ret = MXNDArrayLoadFromRawBytes(reinterpret_cast(byteArr), - static_cast(size), &out); - env->ReleaseByteArrayElements(bytes, byteArr, 0); - SetLongField(env, handleRef, reinterpret_cast(out)); - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxNDArrayGetShape - (JNIEnv *env, jobject obj, jlong ndArrayPtr, jobject ndimRef, jobject dataBuf) { - int ndim; - const int *pdata; - int ret = MXNDArrayGetShapeEx(reinterpret_cast(ndArrayPtr), &ndim, &pdata); - - // fill dataBuf - jclass integerClass = env->FindClass("java/lang/Integer"); - jmethodID newInteger = env->GetMethodID(integerClass, "", "(I)V"); - - jclass arrayClass = env->FindClass("scala/collection/mutable/ArrayBuffer"); - jmethodID arrayAppend = env->GetMethodID(arrayClass, - "$plus$eq", "(Ljava/lang/Object;)Lscala/collection/mutable/ArrayBuffer;"); - for (int i = 0; i < ndim; ++i) { - jobject data = env->NewObject(integerClass, newInteger, pdata[i]); - env->CallObjectMethod(dataBuf, arrayAppend, data); - env->DeleteLocalRef(data); - } - - // set ndimRef - jclass refIntClass = env->FindClass("org/apache/mxnet/Base$RefInt"); - jfieldID valueInt = env->GetFieldID(refIntClass, "value", "I"); - env->SetIntField(ndimRef, valueInt, ndim); - - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxNDArraySyncCopyFromNDArray - (JNIEnv *env, jobject obj, jlong dstPtr, jlong srcPtr, jint locator) { - int ret = MXNDArraySyncCopyFromNDArray(reinterpret_cast(dstPtr), - reinterpret_cast(srcPtr), - locator); - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxNDArraySyncCopyToCPU - (JNIEnv *env, jobject obj, jlong ndArrayPtr, jbyteArray data, jint size) { - jbyte *pdata = env->GetByteArrayElements(data, NULL); - int ret = MXNDArraySyncCopyToCPU(reinterpret_cast(ndArrayPtr), - reinterpret_cast(pdata), size); - env->ReleaseByteArrayElements(data, pdata, 0); // copy back to java array automatically - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxNDArraySlice - (JNIEnv *env, jobject obj, jlong ndArrayPtr, jint start, jint end, jobject slicedHandle) { - NDArrayHandle out; - int ret = MXNDArraySlice(reinterpret_cast(ndArrayPtr), start, end, &out); - SetLongField(env, slicedHandle, reinterpret_cast(out)); - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxNDArrayAt - (JNIEnv *env, jobject obj, jlong ndArrayPtr, jint idx, jobject jout) { - NDArrayHandle out; - int ret = MXNDArrayAt(reinterpret_cast(ndArrayPtr), idx, &out); - SetLongField(env, jout, reinterpret_cast(out)); - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxNDArrayReshape64 - (JNIEnv *env, jobject obj, jlong ndArrayPtr, jint ndim, - jlongArray dims, jboolean reverse, jobject reshapedHandle) { - NDArrayHandle out; - jlong *pdims = env->GetLongArrayElements(dims, NULL); - int ret = MXNDArrayReshape64(reinterpret_cast(ndArrayPtr), ndim, - reinterpret_cast(pdims), reverse, &out); - SetLongField(env, reshapedHandle, reinterpret_cast(out)); - env->ReleaseLongArrayElements(dims, pdims, 0); - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxNDArraySyncCopyFromCPU - (JNIEnv *env, jobject obj, jlong arrayPtr, jfloatArray sourceArr, jint arrSize) { - jfloat *sourcePtr = env->GetFloatArrayElements(sourceArr, NULL); - int ret = MXNDArraySyncCopyFromCPU(reinterpret_cast(arrayPtr), - static_cast(sourcePtr), arrSize); - env->ReleaseFloatArrayElements(sourceArr, sourcePtr, 0); - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxFloat64NDArraySyncCopyFromCPU - (JNIEnv *env, jobject obj, jlong arrayPtr, jdoubleArray sourceArr, jint arrSize) { - jdouble *sourcePtr = env->GetDoubleArrayElements(sourceArr, NULL); - int ret = MXNDArraySyncCopyFromCPU(reinterpret_cast(arrayPtr), - static_cast(sourcePtr), arrSize); - env->ReleaseDoubleArrayElements(sourceArr, sourcePtr, 0); - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxNDArrayGetDataNDArray - (JNIEnv *env, jobject obj, jlong arrayPtr, jobject ndArrayHandle) { - NDArrayHandle out; - int ret = MXNDArrayGetDataNDArray(reinterpret_cast(arrayPtr), - &out); - SetLongField(env, ndArrayHandle, reinterpret_cast(out)); - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxNDArrayGetAuxNDArray - (JNIEnv *env, jobject obj, jlong arrayPtr, jint location, jobject ndArrayHandle) { - NDArrayHandle out; - int ret = MXNDArrayGetAuxNDArray(reinterpret_cast(arrayPtr), - static_cast(location), - &out); - SetLongField(env, ndArrayHandle, reinterpret_cast(out)); - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxNDArrayGetContext - (JNIEnv *env, jobject obj, jlong arrayPtr, jobject devTypeId, jobject devId) { - int outDevType; - int outDevId; - int ret = MXNDArrayGetContext(reinterpret_cast(arrayPtr), &outDevType, &outDevId); - jclass refClass = env->FindClass("org/apache/mxnet/Base$RefInt"); - jfieldID refFid = env->GetFieldID(refClass, "value", "I"); - env->SetIntField(devTypeId, refFid, outDevType); - env->SetIntField(devId, refFid, outDevId); - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxNDArrayFree - (JNIEnv * env, jobject obj, jlong ndArrayHandle) { - return MXNDArrayFree(reinterpret_cast(ndArrayHandle)); -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxNDArrayLoad - (JNIEnv * env, jobject obj, jstring jfname, jobject joutSize, - jobject jhandles, jobject joutNameSize, jobject jnames) { - mx_uint outSize; - NDArrayHandle *outArr; - mx_uint outNameSize; - const char **outNames; - - const char *fname = env->GetStringUTFChars(jfname, 0); - int ret = MXNDArrayLoad(fname, &outSize, &outArr, &outNameSize, &outNames); - env->ReleaseStringUTFChars(jfname, fname); - - if (ret) { - return ret; - } - - // fill sizes - jclass refIntClass = env->FindClass("org/apache/mxnet/Base$RefInt"); - jfieldID valueInt = env->GetFieldID(refIntClass, "value", "I"); - env->SetIntField(joutSize, valueInt, outSize); - env->SetIntField(joutNameSize, valueInt, outNameSize); - - jclass arrayClass = env->FindClass("scala/collection/mutable/ArrayBuffer"); - jmethodID arrayAppend = env->GetMethodID(arrayClass, - "$plus$eq", "(Ljava/lang/Object;)Lscala/collection/mutable/ArrayBuffer;"); - - // fill handles - jclass longCls = env->FindClass("java/lang/Long"); - jmethodID longConst = env->GetMethodID(longCls, "", "(J)V"); - for (size_t i = 0; i < outSize; ++i) { - jobject handle = env->NewObject(longCls, longConst, outArr[i]); - env->CallObjectMethod(jhandles, arrayAppend, handle); - env->DeleteLocalRef(handle); - } - - // fill names - for (size_t i = 0; i < outNameSize; ++i) { - jstring jname = env->NewStringUTF(outNames[i]); - env->CallObjectMethod(jnames, arrayAppend, jname); - env->DeleteLocalRef(jname); - } - - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxNDArraySave - (JNIEnv * env, jobject obj, jstring jfname, jlongArray jhandles, jobjectArray jkeys) { - int numArgs = env->GetArrayLength(jhandles); - const char **keys = NULL; - if (jkeys != NULL) { - keys = new const char *[numArgs]; - for (int i = 0; i < numArgs; i++) { - jstring jkey = reinterpret_cast(env->GetObjectArrayElement(jkeys, i)); - const char *key = env->GetStringUTFChars(jkey, 0); - keys[i] = key; - env->DeleteLocalRef(jkey); - } - } - - const char *fname = env->GetStringUTFChars(jfname, 0); - jlong *handles = env->GetLongArrayElements(jhandles, NULL); - - int ret = MXNDArraySave(fname, static_cast(numArgs), - reinterpret_cast(handles), keys); - - env->ReleaseLongArrayElements(jhandles, handles, 0); - env->ReleaseStringUTFChars(jfname, fname); - - // release allocated memory - if (jkeys != NULL) { - for (int i = 0; i < numArgs; i++) { - jstring jkey = reinterpret_cast(env->GetObjectArrayElement(jkeys, i)); - env->ReleaseStringUTFChars(jkey, keys[i]); - env->DeleteLocalRef(jkey); - } - delete[] keys; - } - - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxNDArrayGetDType - (JNIEnv * env, jobject obj, jlong jhandle, jobject jdtype) { - int dtype; - int ret = MXNDArrayGetDType(reinterpret_cast(jhandle), &dtype); - SetIntField(env, jdtype, dtype); - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxNDArrayGetStorageType - (JNIEnv * env, jobject obj, jlong jhandle, jobject jstype) { - int stype; - int ret = MXNDArrayGetStorageType(reinterpret_cast(jhandle), &stype); - SetIntField(env, jstype, stype); - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxInitPSEnv - (JNIEnv *env, jobject obj, jobjectArray jkeys, jobjectArray jvals) { - // keys and values - int paramSize = env->GetArrayLength(jkeys); - const char** keys = new const char*[paramSize]; - const char** vals = new const char*[paramSize]; - jstring jkey, jval; - // use strcpy and release char* created by JNI inplace - for (int i = 0; i < paramSize; i++) { - jkey = reinterpret_cast(env->GetObjectArrayElement(jkeys, i)); - const char* ckey = env->GetStringUTFChars(jkey, 0); - keys[i] = ckey; - env->DeleteLocalRef(jkey); - - jval = reinterpret_cast(env->GetObjectArrayElement(jvals, i)); - const char* cval = env->GetStringUTFChars(jval, 0); - vals[i] = cval; - env->DeleteLocalRef(jval); - } - - int ret = MXInitPSEnv(static_cast(paramSize), - static_cast(keys), - static_cast(vals)); - - // release keys and vals - for (int i = 0; i < paramSize; i++) { - jstring key = reinterpret_cast(env->GetObjectArrayElement(jkeys, i)); - env->ReleaseStringUTFChars(key, keys[i]); - env->DeleteLocalRef(key); - - jstring value = reinterpret_cast(env->GetObjectArrayElement(jvals, i)); - env->ReleaseStringUTFChars(value, vals[i]); - env->DeleteLocalRef(value); - } - delete[] keys; - delete[] vals; - - return ret; -} - -extern "C" void KVStoreServerControllerFunc - (int head, const char *body, void *handle) { - jobject controllerObjGlb = static_cast(handle); - - JNIEnv *env; - _jvm->AttachCurrentThread(reinterpret_cast(&env), NULL); - - // find java controller method - jclass ctrlClass = env->GetObjectClass(controllerObjGlb); - jmethodID ctrlFunc = env->GetMethodID(ctrlClass, "invoke", "(ILjava/lang/String;)V"); - - jstring jbody = env->NewStringUTF(body); - env->CallVoidMethod(controllerObjGlb, ctrlFunc, head, jbody); - env->DeleteLocalRef(jbody); - - env->DeleteLocalRef(ctrlClass); - // FIXME(Yizhi): This function can be called multiple times, - // can we find a way to safely destroy this object ? - // env->DeleteGlobalRef(controllerObjGlb); -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxKVStoreRunServer - (JNIEnv *env, jobject obj, jlong kvStorePtr, jobject controllerObj) { - jobject controllerObjGlb = env->NewGlobalRef(controllerObj); - return MXKVStoreRunServer(reinterpret_cast(kvStorePtr), - KVStoreServerControllerFunc, - reinterpret_cast(controllerObjGlb)); -} - -extern "C" void KVStoreUpdaterCallbackFunc - (int key, NDArrayHandle recv, NDArrayHandle local, void *handle) { - jobject updaterFuncObjGlb = static_cast(handle); - - JNIEnv *env; - _jvm->AttachCurrentThread(reinterpret_cast(&env), NULL); - - // find java updater method - jclass updtClass = env->GetObjectClass(updaterFuncObjGlb); - jmethodID updtFunc = env->GetMethodID(updtClass, - "update", "(ILorg/apache/mxnet/NDArray;Lorg/apache/mxnet/NDArray;)V"); - - // find java NDArray constructor - jclass ndObjClass = env->FindClass("org/apache/mxnet/NDArray"); - jmethodID ndObjConstructor = env->GetMethodID(ndObjClass, "", "(JZZ)V"); - - jobject ndRecv = env->NewObject(ndObjClass, ndObjConstructor, - reinterpret_cast(recv), true); - jobject ndLocal = env->NewObject(ndObjClass, ndObjConstructor, - reinterpret_cast(local), true); - - env->CallVoidMethod(updaterFuncObjGlb, updtFunc, key, ndRecv, ndLocal); - - env->DeleteLocalRef(ndLocal); - env->DeleteLocalRef(ndRecv); - env->DeleteLocalRef(ndObjClass); - env->DeleteLocalRef(updtClass); - // FIXME(Yizhi): This function can be called multiple times, - // can we find a way to safely destroy this object ? - // env->DeleteGlobalRef(updaterFuncObjGlb); -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxKVStoreSetUpdater - (JNIEnv *env, jobject obj, jlong kvStorePtr, jobject updaterFuncObj) { - jobject updaterFuncObjGlb = env->NewGlobalRef(updaterFuncObj); - return MXKVStoreSetUpdater(reinterpret_cast(kvStorePtr), - KVStoreUpdaterCallbackFunc, - reinterpret_cast(updaterFuncObjGlb)); -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxKVStoreIsWorkerNode - (JNIEnv *env, jobject obj, jobject isWorkerRef) { - int isWorker; - int ret = MXKVStoreIsWorkerNode(&isWorker); - SetIntField(env, isWorkerRef, isWorker); - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxKVStoreCreate - (JNIEnv *env, jobject obj, jstring name, jobject kvStoreHandle) { - jclass refLongClass = env->FindClass("org/apache/mxnet/Base$RefLong"); - jfieldID refLongFid = env->GetFieldID(refLongClass, "value", "J"); - - KVStoreHandle out; - const char *type = env->GetStringUTFChars(name, 0); - int ret = MXKVStoreCreate(type, &out); - env->ReleaseStringUTFChars(name, type); - - env->SetLongField(kvStoreHandle, refLongFid, reinterpret_cast(out)); - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxKVStoreInit - (JNIEnv *env, jobject obj, jlong kvStorePtr, jint len, jintArray keys, jlongArray values) { - jint *keyArray = env->GetIntArrayElements(keys, NULL); - jlong *valueArray = env->GetLongArrayElements(values, NULL); - int ret = MXKVStoreInit(reinterpret_cast(kvStorePtr), - static_cast(len), - static_cast(keyArray), - reinterpret_cast(valueArray)); - env->ReleaseIntArrayElements(keys, keyArray, 0); - env->ReleaseLongArrayElements(values, valueArray, 0); - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxKVStoreInitEx - (JNIEnv *env, jobject obj, jlong kvStorePtr, jint len, jobjectArray keys, jlongArray values) { - const char **keyArray = new const char *[len]; - for (int i = 0; i < len; i++) { - jstring jkey = reinterpret_cast(env->GetObjectArrayElement(keys, i)); - const char *key = env->GetStringUTFChars(jkey, 0); - keyArray[i] = key; - env->DeleteLocalRef(jkey); - } - jlong *valueArray = env->GetLongArrayElements(values, NULL); - int ret = MXKVStoreInitEx(reinterpret_cast(kvStorePtr), - static_cast(len), - keyArray, - reinterpret_cast(valueArray)); - env->ReleaseLongArrayElements(values, valueArray, 0); - for (int i = 0; i < len; i++) { - jstring jkey = reinterpret_cast(env->GetObjectArrayElement(keys, i)); - env->ReleaseStringUTFChars(jkey, keyArray[i]); - env->DeleteLocalRef(jkey); - } - delete[] keyArray; - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxKVStorePush - (JNIEnv *env, jobject obj, jlong kvStorePtr, jint len, jintArray keys, - jlongArray values, jint priority) { - jint *keyArray = env->GetIntArrayElements(keys, NULL); - jlong *valueArray = env->GetLongArrayElements(values, NULL); - int ret = MXKVStorePush(reinterpret_cast(kvStorePtr), - static_cast(len), - static_cast(keyArray), - reinterpret_cast(valueArray), - priority); - env->ReleaseLongArrayElements(values, valueArray, 0); - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxKVStorePushEx - (JNIEnv *env, jobject obj, jlong kvStorePtr, jint len, jobjectArray keys, - jlongArray values, jint priority) { - const char **keyArray = new const char *[len]; - for (int i = 0; i < len; i++) { - jstring jkey = reinterpret_cast(env->GetObjectArrayElement(keys, i)); - const char *key = env->GetStringUTFChars(jkey, 0); - keyArray[i] = key; - env->DeleteLocalRef(jkey); - } - jlong *valueArray = env->GetLongArrayElements(values, NULL); - int ret = MXKVStorePushEx(reinterpret_cast(kvStorePtr), - static_cast(len), - keyArray, - reinterpret_cast(valueArray), - priority); - env->ReleaseLongArrayElements(values, valueArray, 0); - for (int i = 0; i < len; i++) { - jstring jkey = reinterpret_cast(env->GetObjectArrayElement(keys, i)); - env->ReleaseStringUTFChars(jkey, keyArray[i]); - env->DeleteLocalRef(jkey); - } - delete[] keyArray; - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxKVStorePull - (JNIEnv *env, jobject obj, jlong kvStorePtr, jint len, jintArray keys, - jlongArray outs, jint priority) { - jint *keyArray = env->GetIntArrayElements(keys, NULL); - jlong *outArray = env->GetLongArrayElements(outs, NULL); - int ret = MXKVStorePull(reinterpret_cast(kvStorePtr), - static_cast(len), - static_cast(keyArray), - reinterpret_cast(outArray), - priority); - env->ReleaseIntArrayElements(keys, keyArray, 0); - env->ReleaseLongArrayElements(outs, outArray, 0); - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxKVStorePullEx - (JNIEnv *env, jobject obj, jlong kvStorePtr, jint len, jobjectArray keys, - jlongArray outs, jint priority) { - const char **keyArray = new const char *[len]; - for (int i = 0; i < len; i++) { - jstring jkey = reinterpret_cast(env->GetObjectArrayElement(keys, i)); - const char *key = env->GetStringUTFChars(jkey, 0); - keyArray[i] = key; - env->DeleteLocalRef(jkey); - } - jlong *outArray = env->GetLongArrayElements(outs, NULL); - int ret = MXKVStorePullEx(reinterpret_cast(kvStorePtr), - static_cast(len), - keyArray, - reinterpret_cast(outArray), - priority); - env->ReleaseLongArrayElements(outs, outArray, 0); - for (int i = 0; i < len; i++) { - jstring jkey = reinterpret_cast(env->GetObjectArrayElement(keys, i)); - env->ReleaseStringUTFChars(jkey, keyArray[i]); - env->DeleteLocalRef(jkey); - } - delete[] keyArray; - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxKVStoreGetType - (JNIEnv *env, jobject obj, jlong kvStorePtr, jobject kvType) { - const char *type; - int ret = MXKVStoreGetType(reinterpret_cast(kvStorePtr), &type); - jclass refStringClass = env->FindClass("org/apache/mxnet/Base$RefString"); - jfieldID valueStr = env->GetFieldID(refStringClass, "value", "Ljava/lang/String;"); - env->SetObjectField(kvType, valueStr, env->NewStringUTF(type)); - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxKVStoreSendCommmandToServers - (JNIEnv *env, jobject obj, jlong kvStorePtr, jint head, jstring body) { - const char *bodyCStr = env->GetStringUTFChars(body, 0); - int ret = MXKVStoreSendCommmandToServers( - reinterpret_cast(kvStorePtr), head, bodyCStr); - env->ReleaseStringUTFChars(body, bodyCStr); - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxKVStoreBarrier - (JNIEnv *env, jobject obj, jlong kvStorePtr) { - return MXKVStoreBarrier((KVStoreHandle)kvStorePtr); -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxKVStoreGetGroupSize - (JNIEnv *env, jobject obj, jlong kvStorePtr, jobject sizeRef) { - int size; - int ret = MXKVStoreGetGroupSize(reinterpret_cast(kvStorePtr), &size); - SetIntField(env, sizeRef, size); - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxKVStoreGetRank - (JNIEnv *env, jobject obj, jlong kvStorePtr, jobject rankRef) { - int rank; - int ret = MXKVStoreGetRank(reinterpret_cast(kvStorePtr), &rank); - SetIntField(env, rankRef, rank); - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxKVStoreGetNumDeadNode - (JNIEnv * env, jobject obj, jlong kvStorePtr, jint nodeId, jobject numberRef) { - int number; - int ret = MXKVStoreGetNumDeadNode(reinterpret_cast(kvStorePtr), - static_cast(nodeId), - &number); - SetIntField(env, numberRef, number); - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxKVStoreSetBarrierBeforeExit - (JNIEnv * env, jobject obj, jlong kvStorePtr, jint doBarrier) { - return MXKVStoreSetBarrierBeforeExit(reinterpret_cast(kvStorePtr), - static_cast(doBarrier)); -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxKVStoreFree - (JNIEnv * env, jobject obj, jlong ptr) { - return MXKVStoreFree(reinterpret_cast(ptr)); -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxExecutorOutputs - (JNIEnv *env, jobject obj, jlong executorPtr, jobject outputs) { - mx_uint outSize; - NDArrayHandle *out; - int ret = MXExecutorOutputs(reinterpret_cast(executorPtr), &outSize, &out); - - jclass longCls = env->FindClass("java/lang/Long"); - jmethodID longConst = env->GetMethodID(longCls, "", "(J)V"); - - // fill java outputs - jclass arrayClass = env->FindClass("scala/collection/mutable/ArrayBuffer"); - jmethodID arrayAppend = env->GetMethodID(arrayClass, - "$plus$eq", "(Ljava/lang/Object;)Lscala/collection/mutable/ArrayBuffer;"); - for (size_t i = 0; i < outSize; ++i) { - env->CallObjectMethod(outputs, arrayAppend, - env->NewObject(longCls, longConst, out[i])); - } - - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxExecutorFree - (JNIEnv * env, jobject obj, jlong ptr) { - return MXExecutorFree(reinterpret_cast(ptr)); -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxExecutorForward - (JNIEnv * env, jobject obj, jlong ptr, jint isTrain) { - return MXExecutorForward(reinterpret_cast(ptr), static_cast(isTrain)); -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxExecutorBackward - (JNIEnv * env, jobject obj, jlong executorPtr, jlongArray grads) { - int gradsSize = env->GetArrayLength(grads); - jlong *gradArr = env->GetLongArrayElements(grads, NULL); - int ret = MXExecutorBackward(reinterpret_cast(executorPtr), - static_cast(gradsSize), - reinterpret_cast(gradArr)); - env->ReleaseLongArrayElements(grads, gradArr, 0); - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxExecutorReshape - (JNIEnv * env, jobject obj, - jint partialReshaping, jint allowUpSizing, jint devType, jint devId, - jobjectArray jmapKeys, jintArray jmapDevTypes, jintArray jmapDevIds, - jobjectArray jprovidedArgShapeNames, jintArray jprovidedArgShapeData, - jintArray jprovidedArgShapeIdx, jobject jrefInArgs, jobject jrefArgGrads, - jobject jrefAuxStates, jlong jsharedExec, jobject jrefOut) { - CHECK(jmapKeys != NULL); - CHECK(jprovidedArgShapeNames != NULL); - - int numMapKeys = env->GetArrayLength(jmapKeys); - jint *mapDevTypes = env->GetIntArrayElements(jmapDevTypes, NULL); - jint *mapDevIds = env->GetIntArrayElements(jmapDevIds, NULL); - const char **mapKeys = NULL; - if (numMapKeys > 0) { - mapKeys = new const char*[numMapKeys]; - for (int i = 0; i < numMapKeys; ++i) { - jstring jkey = reinterpret_cast(env->GetObjectArrayElement(jmapKeys, i)); - mapKeys[i] = env->GetStringUTFChars(jkey, 0); - env->DeleteLocalRef(jkey); - } - } - - int numProvidedArgShapes = env->GetArrayLength(jprovidedArgShapeNames); - jint *providedArgShapeData = env->GetIntArrayElements(jprovidedArgShapeData, NULL); - jint *providedArgShapeIdx = env->GetIntArrayElements(jprovidedArgShapeIdx, NULL); - const char **providedArgShapeNames = NULL; - if (numProvidedArgShapes > 0) { - providedArgShapeNames = new const char*[numProvidedArgShapes]; - for (int i = 0; i < numProvidedArgShapes; ++i) { - jstring jkey = reinterpret_cast( - env->GetObjectArrayElement(jprovidedArgShapeNames, i)); - providedArgShapeNames[i] = env->GetStringUTFChars(jkey, 0); - env->DeleteLocalRef(jkey); - } - } - - mx_uint numInArgs = 0; - NDArrayHandle *inArgs; - NDArrayHandle *argGrads; - - mx_uint numAuxStates = 0; - NDArrayHandle *auxStates; - - ExecutorHandle out; - - int ret = MXExecutorReshapeEx(partialReshaping, - allowUpSizing, - devType, - devId, - static_cast(numMapKeys), - mapKeys, - static_cast(mapDevTypes), - static_cast(mapDevIds), - static_cast(numProvidedArgShapes), - providedArgShapeNames, - static_cast(providedArgShapeData), - reinterpret_cast(providedArgShapeIdx), - &numInArgs, - &inArgs, - &argGrads, - &numAuxStates, - &auxStates, - reinterpret_cast(jsharedExec), - &out); - - jclass longCls = env->FindClass("java/lang/Long"); - jmethodID newLong = env->GetMethodID(longCls, "", "(J)V"); - - jclass arrayClass = env->FindClass("scala/collection/mutable/ArrayBuffer"); - jmethodID arrayAppend = env->GetMethodID(arrayClass, - "$plus$eq", "(Ljava/lang/Object;)Lscala/collection/mutable/ArrayBuffer;"); - - for (size_t i = 0; i < numInArgs; ++i) { - jobject inArg = env->NewObject(longCls, newLong, inArgs[i]); - env->CallObjectMethod(jrefInArgs, arrayAppend, inArg); - env->DeleteLocalRef(inArg); - - jobject argGrad = env->NewObject(longCls, newLong, argGrads[i]); - env->CallObjectMethod(jrefArgGrads, arrayAppend, argGrad); - env->DeleteLocalRef(argGrad); - } - - for (size_t i = 0; i < numAuxStates; ++i) { - jobject auxState = env->NewObject(longCls, newLong, auxStates[i]); - env->CallObjectMethod(jrefAuxStates, arrayAppend, auxState); - env->DeleteLocalRef(auxState); - } - - SetLongField(env, jrefOut, reinterpret_cast(out)); - - // release allocated memory - for (int i = 0; i < numMapKeys; i++) { - jstring jkey = reinterpret_cast(env->GetObjectArrayElement(jmapKeys, i)); - env->ReleaseStringUTFChars(jkey, mapKeys[i]); - env->DeleteLocalRef(jkey); - } - if (mapKeys != NULL) { - delete[] mapKeys; - } - - for (int i = 0; i < numProvidedArgShapes; i++) { - jstring jkey = reinterpret_cast(env->GetObjectArrayElement(jprovidedArgShapeNames, i)); - env->ReleaseStringUTFChars(jkey, providedArgShapeNames[i]); - env->DeleteLocalRef(jkey); - } - if (providedArgShapeNames != NULL) { - delete[] providedArgShapeNames; - } - - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxExecutorPrint - (JNIEnv * env, jobject obj, jlong ptr, jobject debugStr) { - const char *retDebugStr; - int ret = MXExecutorPrint(reinterpret_cast(ptr), &retDebugStr); - SetStringField(env, debugStr, retDebugStr); - return ret; -} - -extern "C" void ExecutorMonitorCallbackFunc - (const char *name, NDArrayHandle arr, void *handle) { - jobject callbackFuncObjGlb = static_cast(handle); - - JNIEnv *env; - _jvm->AttachCurrentThread(reinterpret_cast(&env), NULL); - - // find java callback method - jclass callbackClass = env->GetObjectClass(callbackFuncObjGlb); - jmethodID callbackFunc = env->GetMethodID(callbackClass, "invoke", "(Ljava/lang/String;J)V"); - - // invoke java callback method - jstring jname = env->NewStringUTF(name); - env->CallVoidMethod(callbackFuncObjGlb, callbackFunc, jname, reinterpret_cast(arr)); - env->DeleteLocalRef(jname); - - env->DeleteLocalRef(callbackClass); - // FIXME(Yizhi): This function can be called multiple times, - // can we find a way to safely destroy this global ref ? - // env->DeleteGlobalRef(callbackFuncObjGlb); -} -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxExecutorSetMonitorCallback - (JNIEnv *env, jobject obj, jlong executorPtr, jobject callbackFuncObj) { - jobject callbackFuncObjGlb = env->NewGlobalRef(callbackFuncObj); - return MXExecutorSetMonitorCallback(reinterpret_cast(executorPtr), - ExecutorMonitorCallbackFunc, - reinterpret_cast(callbackFuncObjGlb)); -} - -JNIEXPORT jstring JNICALL Java_org_apache_mxnet_LibInfo_mxGetLastError(JNIEnv * env, jobject obj) { - return env->NewStringUTF(MXGetLastError()); -} - -// IO funcs -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxListDataIters - (JNIEnv * env, jobject obj, jobject creators) { - jclass longCls = env->FindClass("java/lang/Long"); - jmethodID longConst = env->GetMethodID(longCls, "", "(J)V"); - - // scala.collection.mutable.ListBuffer append method - jclass listClass = env->FindClass("scala/collection/mutable/ListBuffer"); - jmethodID listAppend = env->GetMethodID(listClass, - "$plus$eq", "(Ljava/lang/Object;)Lscala/collection/mutable/ListBuffer;"); - - // Get function list - DataIterCreator *outArray; - mx_uint outSize; - int ret = MXListDataIters(&outSize, &outArray); - for (size_t i = 0; i < outSize; ++i) { - env->CallObjectMethod(creators, listAppend, - env->NewObject(longCls, longConst, - reinterpret_cast(outArray[i]))); - } - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxDataIterCreateIter - (JNIEnv * env, jobject obj, jlong creator, jobjectArray jkeys, - jobjectArray jvals, jobject dataIterHandleRef) { - // keys and values - int paramSize = env->GetArrayLength(jkeys); - const char** keys = new const char*[paramSize]; - const char** vals = new const char*[paramSize]; - jstring jkey, jval; - // use strcpy and release char* created by JNI inplace - for (int i = 0; i < paramSize; i++) { - jkey = reinterpret_cast(env->GetObjectArrayElement(jkeys, i)); - const char* ckey = env->GetStringUTFChars(jkey, 0); - keys[i] = ckey; - env->DeleteLocalRef(jkey); - - jval = reinterpret_cast(env->GetObjectArrayElement(jvals, i)); - const char* cval = env->GetStringUTFChars(jval, 0); - vals[i] = cval; - env->DeleteLocalRef(jval); - } - - // create iter - DataIterHandle out; - int ret = MXDataIterCreateIter(reinterpret_cast(creator), - static_cast(paramSize), - static_cast(keys), - static_cast(vals), - &out); - SetLongField(env, dataIterHandleRef, reinterpret_cast(out)); - - // release keys and vals - for (int i = 0; i < paramSize; i++) { - jstring key = reinterpret_cast(env->GetObjectArrayElement(jkeys, i)); - env->ReleaseStringUTFChars(key, keys[i]); - env->DeleteLocalRef(key); - - jstring value = reinterpret_cast(env->GetObjectArrayElement(jvals, i)); - env->ReleaseStringUTFChars(value, vals[i]); - env->DeleteLocalRef(value); - } - delete[] keys; - delete[] vals; - - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxDataIterGetIterInfo - (JNIEnv * env, jobject obj, jlong creator, jobject jname, - jobject jdesc, jobject jargNames, jobject jargTypeInfos, jobject jargDescs) { - const char* name; - const char* description; - mx_uint numArgs; - const char** argNames; - const char** argTypeInfos; - const char** argDescs; - int ret = MXDataIterGetIterInfo(reinterpret_cast(creator), - &name, - &description, - &numArgs, - &argNames, - &argTypeInfos, - &argDescs); - - jclass refStringClass = env->FindClass("org/apache/mxnet/Base$RefString"); - jfieldID valueStr = env->GetFieldID(refStringClass, "value", "Ljava/lang/String;"); - // set params - env->SetObjectField(jname, valueStr, env->NewStringUTF(name)); - env->SetObjectField(jdesc, valueStr, env->NewStringUTF(description)); - jclass listClass = env->FindClass("scala/collection/mutable/ListBuffer"); - jmethodID listAppend = env->GetMethodID(listClass, - "$plus$eq", "(Ljava/lang/Object;)Lscala/collection/mutable/ListBuffer;"); - for (size_t i = 0; i < numArgs; i++) { - env->CallObjectMethod(jargNames, listAppend, env->NewStringUTF(argNames[i])); - env->CallObjectMethod(jargTypeInfos, listAppend, env->NewStringUTF(argTypeInfos[i])); - env->CallObjectMethod(jargDescs, listAppend, env->NewStringUTF(argDescs[i])); - } - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxDataIterFree - (JNIEnv *env, jobject obj, jlong handle) { - int ret = MXDataIterFree(reinterpret_cast(handle)); - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxDataIterBeforeFirst - (JNIEnv *env, jobject obj, jlong handle) { - int ret = MXDataIterBeforeFirst(reinterpret_cast(handle)); - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxDataIterNext - (JNIEnv *env, jobject obj, jlong handle, jobject out) { - int cout; - int ret = MXDataIterNext(reinterpret_cast(handle), &cout); - SetIntField(env, out, cout); - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxDataIterGetLabel - (JNIEnv *env, jobject obj, jlong handle, jobject ndArrayHandleRef) { - NDArrayHandle out; - int ret = MXDataIterGetLabel(reinterpret_cast(handle), &out); - SetLongField(env, ndArrayHandleRef, reinterpret_cast(out)); - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxDataIterGetData - (JNIEnv *env, jobject obj, jlong handle, jobject ndArrayHandleRef) { - NDArrayHandle out; - int ret = MXDataIterGetData(reinterpret_cast(handle), &out); - SetLongField(env, ndArrayHandleRef, reinterpret_cast(out)); - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxDataIterGetIndex - (JNIEnv *env, jobject obj, jlong handle, jobject outIndex, jobject outSize) { - uint64_t* coutIndex; - uint64_t coutSize; - int ret = MXDataIterGetIndex(reinterpret_cast(handle), &coutIndex, &coutSize); - // set field - SetLongField(env, outSize, static_cast(coutSize)); - // scala.collection.mutable.ListBuffer append method - jclass listClass = env->FindClass("scala/collection/mutable/ListBuffer"); - jmethodID listAppend = env->GetMethodID(listClass, - "$plus$eq", "(Ljava/lang/Object;)Lscala/collection/mutable/ListBuffer;"); - - // long class - jclass longCls = env->FindClass("java/lang/Long"); - jmethodID longConst = env->GetMethodID(longCls, "", "(J)V"); - - for (size_t i = 0; i < coutSize; i++) { - env->CallObjectMethod(outIndex, listAppend, - env->NewObject(longCls, longConst, coutIndex[i])); - } - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxDataIterGetPadNum - (JNIEnv *env, jobject obj, jlong handle, jobject pad) { - int cpad; - int ret = MXDataIterGetPadNum((DataIterHandle)handle, &cpad); - SetIntField(env, pad, cpad); - return ret; -} - -// Symbol functions -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxSymbolFree - (JNIEnv * env, jobject obj, jlong ptr) { - return MXSymbolFree((SymbolHandle) ptr); -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxSymbolListAtomicSymbolCreators - (JNIEnv *env, jobject obj, jobject symbolList) { - mx_uint outSize; - AtomicSymbolCreator *outArray; - int ret = MXSymbolListAtomicSymbolCreators(&outSize, &outArray); - - jclass longCls = env->FindClass("java/lang/Long"); - jmethodID longConst = env->GetMethodID(longCls, "", "(J)V"); - - jclass listCls = env->FindClass("scala/collection/mutable/ListBuffer"); - jmethodID listAppend = env->GetMethodID(listCls, - "$plus$eq", "(Ljava/lang/Object;)Lscala/collection/mutable/ListBuffer;"); - - for (size_t i = 0; i < outSize; ++i) { - env->CallObjectMethod(symbolList, listAppend, - env->NewObject(longCls, longConst, outArray[i])); - } - - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxSymbolGetAtomicSymbolInfo - (JNIEnv *env, jobject obj, jlong symbolPtr, jobject name, jobject desc, jobject numArgs, - jobject argNames, jobject argTypes, jobject argDescs, jobject keyVarNumArgs) { - - const char *cName; - const char *cDesc; - mx_uint cNumArgs; - const char **cArgNames; - const char **cArgTypes; - const char **cArgDescs; - const char *cKeyVarNumArgs; - - int ret = MXSymbolGetAtomicSymbolInfo(reinterpret_cast(symbolPtr), - &cName, &cDesc, &cNumArgs, - &cArgNames, &cArgTypes, &cArgDescs, - &cKeyVarNumArgs); - - jclass refIntClass = env->FindClass("org/apache/mxnet/Base$RefInt"); - jfieldID valueInt = env->GetFieldID(refIntClass, "value", "I"); - - jclass refStringClass = env->FindClass("org/apache/mxnet/Base$RefString"); - jfieldID valueStr = env->GetFieldID(refStringClass, "value", "Ljava/lang/String;"); - - // scala.collection.mutable.ListBuffer append method - jclass listClass = env->FindClass("scala/collection/mutable/ListBuffer"); - jmethodID listAppend = env->GetMethodID(listClass, "$plus$eq", - "(Ljava/lang/Object;)Lscala/collection/mutable/ListBuffer;"); - - env->SetObjectField(name, valueStr, env->NewStringUTF(cName)); - env->SetObjectField(desc, valueStr, env->NewStringUTF(cDesc)); - env->SetObjectField(keyVarNumArgs, valueStr, env->NewStringUTF(cKeyVarNumArgs)); - env->SetIntField(numArgs, valueInt, static_cast(cNumArgs)); - for (size_t i = 0; i < cNumArgs; ++i) { - env->CallObjectMethod(argNames, listAppend, env->NewStringUTF(cArgNames[i])); - env->CallObjectMethod(argTypes, listAppend, env->NewStringUTF(cArgTypes[i])); - env->CallObjectMethod(argDescs, listAppend, env->NewStringUTF(cArgDescs[i])); - } - - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxSymbolCreateAtomicSymbol - (JNIEnv *env, jobject obj, jlong symbolPtr, jobjectArray paramKeys, - jobjectArray paramVals, jobject symbolRef) { - int paramSize = env->GetArrayLength(paramKeys); - const char **keys = new const char*[paramSize]; - const char **vals = new const char*[paramSize]; - for (int i = 0; i < paramSize; i++) { - jstring key = reinterpret_cast(env->GetObjectArrayElement(paramKeys, i)); - const char *rawKey = env->GetStringUTFChars(key, 0); - keys[i] = rawKey; - env->DeleteLocalRef(key); - - jstring value = reinterpret_cast(env->GetObjectArrayElement(paramVals, i)); - const char *rawValue = env->GetStringUTFChars(value, 0); - vals[i] = rawValue; - env->DeleteLocalRef(value); - } - - SymbolHandle out; - int ret = MXSymbolCreateAtomicSymbol(reinterpret_cast(symbolPtr), - static_cast(paramSize), keys, vals, &out); - SetLongField(env, symbolRef, reinterpret_cast(out)); - - // release keys and vals - for (int i = 0; i < paramSize; i++) { - jstring key = reinterpret_cast(env->GetObjectArrayElement(paramKeys, i)); - env->ReleaseStringUTFChars(key, keys[i]); - env->DeleteLocalRef(key); - - jstring value = reinterpret_cast(env->GetObjectArrayElement(paramVals, i)); - env->ReleaseStringUTFChars(value, vals[i]); - env->DeleteLocalRef(value); - } - delete[] keys; - delete[] vals; - - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxSymbolSetAttr - (JNIEnv *env, jobject obj, jlong symbolPtr, jstring jkey, jstring jvalue) { - const char *ckey = env->GetStringUTFChars(jkey, 0); - const char *cvalue = env->GetStringUTFChars(jvalue, 0); - int ret = MXSymbolSetAttr(reinterpret_cast(symbolPtr), ckey, cvalue); - env->ReleaseStringUTFChars(jkey, ckey); - env->ReleaseStringUTFChars(jvalue, cvalue); - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxSymbolListAttrShallow - (JNIEnv *env, jobject obj, jlong symbolPtr, jobject joutSize, jobject jout) { - mx_uint outSize; - const char** out; - - int ret = MXSymbolListAttrShallow(reinterpret_cast(symbolPtr), &outSize, &out); - - jclass refIntClass = env->FindClass("org/apache/mxnet/Base$RefInt"); - jfieldID valueInt = env->GetFieldID(refIntClass, "value", "I"); - env->SetIntField(joutSize, valueInt, static_cast(outSize)); - - jclass arrayClass = env->FindClass("scala/collection/mutable/ArrayBuffer"); - jmethodID arrayAppend = env->GetMethodID(arrayClass, - "$plus$eq", "(Ljava/lang/Object;)Lscala/collection/mutable/ArrayBuffer;"); - for (size_t i = 0; i < outSize * 2; ++i) { - jstring jtmp = env->NewStringUTF(out[i]); - env->CallObjectMethod(jout, arrayAppend, jtmp); - env->DeleteLocalRef(jtmp); - } - - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxSymbolListAttr - (JNIEnv *env, jobject obj, jlong symbolPtr, jobject joutSize, jobject jout) { - mx_uint outSize; - const char** out; - - int ret = MXSymbolListAttr(reinterpret_cast(symbolPtr), &outSize, &out); - - jclass refIntClass = env->FindClass("org/apache/mxnet/Base$RefInt"); - jfieldID valueInt = env->GetFieldID(refIntClass, "value", "I"); - env->SetIntField(joutSize, valueInt, static_cast(outSize)); - - jclass arrayClass = env->FindClass("scala/collection/mutable/ArrayBuffer"); - jmethodID arrayAppend = env->GetMethodID(arrayClass, - "$plus$eq", "(Ljava/lang/Object;)Lscala/collection/mutable/ArrayBuffer;"); - for (size_t i = 0; i < outSize * 2; ++i) { - jstring jtmp = env->NewStringUTF(out[i]); - env->CallObjectMethod(jout, arrayAppend, jtmp); - env->DeleteLocalRef(jtmp); - } - - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxSymbolCompose - (JNIEnv *env, jobject obj, jlong symbolPtr, jstring jname, - jobjectArray jkeys, jlongArray jargs) { - int argSize = env->GetArrayLength(jargs); - const char **keys = NULL; - if (jkeys != NULL) { - keys = new const char*[argSize]; - for (int i = 0; i < argSize; i++) { - jstring jkey = reinterpret_cast(env->GetObjectArrayElement(jkeys, i)); - const char *key = env->GetStringUTFChars(jkey, 0); - keys[i] = key; - env->DeleteLocalRef(jkey); - } - } - jlong *args = env->GetLongArrayElements(jargs, NULL); - const char *name = env->GetStringUTFChars(jname, 0); - int ret = MXSymbolCompose(reinterpret_cast(symbolPtr), - name, static_cast(argSize), keys, - reinterpret_cast(args)); - env->ReleaseStringUTFChars(jname, name); - env->ReleaseLongArrayElements(jargs, args, 0); - // release allocated memory - if (jkeys != NULL) { - for (int i = 0; i < argSize; i++) { - jstring jkey = (jstring) env->GetObjectArrayElement(jkeys, i); - env->ReleaseStringUTFChars(jkey, keys[i]); - env->DeleteLocalRef(jkey); - } - delete[] keys; - } - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxSymbolCreateVariable - (JNIEnv *env, jobject obj, jstring jname, jobject handle) { - SymbolHandle out; - const char *name = env->GetStringUTFChars(jname, 0); - int ret = MXSymbolCreateVariable(name, &out); - env->ReleaseStringUTFChars(jname, name); - SetLongField(env, handle, reinterpret_cast(out)); - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxSymbolGetAttr - (JNIEnv *env, jobject obj, jlong symbolPtr, jstring jkey, jobject retRef, jobject successRef) { - const char *out; - int success; - const char *key = env->GetStringUTFChars(jkey, 0); - int ret = MXSymbolGetAttr(reinterpret_cast(symbolPtr), key, &out, &success); - env->ReleaseStringUTFChars(jkey, key); - - SetStringField(env, retRef, out); - SetIntField(env, successRef, success); - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxSymbolListArguments - (JNIEnv *env, jobject obj, jlong symbolPtr, jobject arguments) { - mx_uint outSize; - const char **outStrArray; - int ret = MXSymbolListArguments( - reinterpret_cast(symbolPtr), &outSize, &outStrArray); - - jclass arrayClass = env->FindClass("scala/collection/mutable/ArrayBuffer"); - jmethodID arrayAppend = env->GetMethodID(arrayClass, - "$plus$eq", "(Ljava/lang/Object;)Lscala/collection/mutable/ArrayBuffer;"); - for (size_t i = 0; i < outSize; i++) { - jstring argument = env->NewStringUTF(outStrArray[i]); - env->CallObjectMethod(arguments, arrayAppend, argument); - env->DeleteLocalRef(argument); - } - - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxSymbolListOutputs - (JNIEnv *env, jobject obj, jlong symbolPtr, jobject outputs) { - mx_uint outSize; - const char **outStrArray; - int ret = MXSymbolListOutputs(reinterpret_cast(symbolPtr), &outSize, &outStrArray); - - jclass arrayClass = env->FindClass("scala/collection/mutable/ArrayBuffer"); - jmethodID arrayAppend = env->GetMethodID(arrayClass, - "$plus$eq", "(Ljava/lang/Object;)Lscala/collection/mutable/ArrayBuffer;"); - for (size_t i = 0; i < outSize; i++) { - jstring output = env->NewStringUTF(outStrArray[i]); - env->CallObjectMethod(outputs, arrayAppend, output); - env->DeleteLocalRef(output); - } - - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxSymbolListAuxiliaryStates - (JNIEnv *env, jobject obj, jlong symbolPtr, jobject outputs) { - mx_uint outSize; - const char **outStrArray; - int ret = MXSymbolListAuxiliaryStates( - reinterpret_cast(symbolPtr), &outSize, &outStrArray); - - jclass arrayClass = env->FindClass("scala/collection/mutable/ArrayBuffer"); - jmethodID arrayAppend = env->GetMethodID(arrayClass, - "$plus$eq", "(Ljava/lang/Object;)Lscala/collection/mutable/ArrayBuffer;"); - for (size_t i = 0; i < outSize; i++) { - jstring output = env->NewStringUTF(outStrArray[i]); - env->CallObjectMethod(outputs, arrayAppend, output); - env->DeleteLocalRef(output); - } - - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxSymbolCopy - (JNIEnv *env, jobject obj, jlong symbolPtr, jobject clonedSymbolRef) { - SymbolHandle clonedSymbol; - int ret = MXSymbolCopy(reinterpret_cast(symbolPtr), &clonedSymbol); - SetLongField(env, clonedSymbolRef, reinterpret_cast(clonedSymbol)); - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxSymbolCreateGroup - (JNIEnv *env, jobject obj, jlongArray jsymbols, jobject out) { - int numSymbols = env->GetArrayLength(jsymbols); - SymbolHandle handle; - jlong *symbols = env->GetLongArrayElements(jsymbols, NULL); - int ret = MXSymbolCreateGroup(numSymbols, reinterpret_cast(symbols), &handle); - env->ReleaseLongArrayElements(jsymbols, symbols, 0); - SetLongField(env, out, reinterpret_cast(handle)); - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxSymbolPrint - (JNIEnv *env, jobject obj, jlong symbolPtr, jobject out) { - const char *outStr; - int ret = MXSymbolPrint(reinterpret_cast(symbolPtr), &outStr); - SetStringField(env, out, outStr); - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxSymbolGetOutput - (JNIEnv *env, jobject obj, jlong symbolPtr, jint index, jobject jout) { - SymbolHandle out; - int ret = MXSymbolGetOutput(reinterpret_cast(symbolPtr), - static_cast(index), &out); - SetLongField(env, jout, reinterpret_cast(out)); - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxSymbolGetInternals - (JNIEnv *env, jobject obj, jlong symbolPtr, jobject jout) { - SymbolHandle out; - int ret = MXSymbolGetInternals(reinterpret_cast(symbolPtr), &out); - SetLongField(env, jout, reinterpret_cast(out)); - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxSymbolInferType - (JNIEnv *env, jobject obj, jlong symbolPtr, jobjectArray jkeys, jintArray jvals, - jobject jargTypeData, jobject joutTypeData, jobject jauxTypeData, jobject jcomplete) { - int numArgs = env->GetArrayLength(jvals); - const char **keys = NULL; - if (jkeys != NULL) { - keys = new const char *[numArgs]; - for (int i = 0; i < numArgs; i++) { - jstring jkey = reinterpret_cast(env->GetObjectArrayElement(jkeys, i)); - const char *key = env->GetStringUTFChars(jkey, 0); - keys[i] = key; - env->DeleteLocalRef(jkey); - } - } - - mx_uint inTypeSize; - const int *inTypeData; - mx_uint outTypeSize; - const int *outTypeData; - mx_uint auxTypeSize; - const int *auxTypeData; - int complete; - - jint *vals = env->GetIntArrayElements(jvals, NULL); - int ret = MXSymbolInferType(reinterpret_cast(symbolPtr), - static_cast(numArgs), keys, - static_cast(vals), - &inTypeSize, &inTypeData, - &outTypeSize, &outTypeData, - &auxTypeSize, &auxTypeData, - &complete); - env->ReleaseIntArrayElements(jvals, vals, 0); - - jclass integerClass = env->FindClass("java/lang/Integer"); - jmethodID newInteger = env->GetMethodID(integerClass, "", "(I)V"); - - jclass listClass = env->FindClass("scala/collection/mutable/ListBuffer"); - jmethodID listAppend = env->GetMethodID(listClass, - "$plus$eq", "(Ljava/lang/Object;)Lscala/collection/mutable/ListBuffer;"); - - for (size_t i = 0; i < inTypeSize; ++i) { - jobject data = env->NewObject(integerClass, newInteger, inTypeData[i]); - env->CallObjectMethod(jargTypeData, listAppend, data); - env->DeleteLocalRef(data); - } - for (size_t i = 0; i < outTypeSize; ++i) { - jobject data = env->NewObject(integerClass, newInteger, outTypeData[i]); - env->CallObjectMethod(joutTypeData, listAppend, data); - env->DeleteLocalRef(data); - } - for (size_t i = 0; i < auxTypeSize; ++i) { - jobject data = env->NewObject(integerClass, newInteger, auxTypeData[i]); - env->CallObjectMethod(jauxTypeData, listAppend, data); - env->DeleteLocalRef(data); - } - - SetIntField(env, jcomplete, complete); - - // release allocated memory - if (jkeys != NULL) { - for (int i = 0; i < numArgs; i++) { - jstring jkey = reinterpret_cast(env->GetObjectArrayElement(jkeys, i)); - env->ReleaseStringUTFChars(jkey, keys[i]); - env->DeleteLocalRef(jkey); - } - delete[] keys; - } - - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxSymbolSaveToJSON - (JNIEnv *env, jobject obj, jlong symbolPtr, jobject jout) { - const char *out; - int ret = MXSymbolSaveToJSON(reinterpret_cast(symbolPtr), &out); - SetStringField(env, jout, out); - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxSymbolCreateFromJSON - (JNIEnv *env, jobject obj, jstring json, jobject jhandleRef) { - const char *str = env->GetStringUTFChars(json, 0); - SymbolHandle out; - int ret = MXSymbolCreateFromJSON(str, &out); - SetLongField(env, jhandleRef, reinterpret_cast(out)); - env->ReleaseStringUTFChars(json, str); - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxSymbolSaveToFile - (JNIEnv *env, jobject obj, jlong symbolPtr, jstring jfname) { - const char *fname = env->GetStringUTFChars(jfname, 0); - int ret = MXSymbolSaveToFile(reinterpret_cast(symbolPtr), fname); - env->ReleaseStringUTFChars(jfname, fname); - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxSymbolCreateFromFile - (JNIEnv *env, jobject obj, jstring jfname, jobject jhandleRef) { - const char *fname = env->GetStringUTFChars(jfname, 0); - SymbolHandle out; - int ret = MXSymbolCreateFromFile(fname, &out); - SetLongField(env, jhandleRef, reinterpret_cast(out)); - env->ReleaseStringUTFChars(jfname, fname); - return ret; -} - -int FillSymbolInferShape - (JNIEnv *env, jmethodID listAppend, jobject joutData, - int shapeSize, const int *shapeNdim, const int **shapeData) { - for (int i = 0; i < shapeSize; ++i) { - jintArray jshape = NULL; - if (shapeNdim[i] >= 0) { - jshape = env->NewIntArray(shapeNdim[i]); - if (jshape == NULL) { - // TODO(Yizhi): out of memory error thrown, return a specific error code ? - return -1; - } - env->SetIntArrayRegion(jshape, 0, shapeNdim[i], reinterpret_cast(shapeData[i])); - } - env->CallObjectMethod(joutData, listAppend, jshape); - env->DeleteLocalRef(jshape); - } - return 0; -} - -int SymbolInferShapeHelper(JNIEnv *env, jobject obj, jlong symbolPtr, jint jnumArgs, - jobjectArray jkeys, jintArray jargIndPtr, jintArray jargShapeData, - jobject jinShapeData, jobject joutShapeData, jobject jauxShapeData, - jobject jcomplete, bool partial) { - const char **keys = NULL; - if (jkeys != NULL) { - keys = new const char *[jnumArgs]; - for (int i = 0; i < jnumArgs; i++) { - jstring jkey = (jstring) env->GetObjectArrayElement(jkeys, i); - const char *key = env->GetStringUTFChars(jkey, 0); - keys[i] = key; - env->DeleteLocalRef(jkey); - } - } - - mx_uint inShapeSize; - const int *inShapeNdim; - const int **inShapeData; - - mx_uint outShapeSize; - const int *outShapeNdim; - const int **outShapeData; - - mx_uint auxShapeSize; - const int *auxShapeNdim; - const int **auxShapeData; - - int complete; - - jint *argIndPtr = env->GetIntArrayElements(jargIndPtr, NULL); - jint *argShapeData = env->GetIntArrayElements(jargShapeData, NULL); - int ret; - if (!partial) { - ret = MXSymbolInferShapeEx(reinterpret_cast(symbolPtr), - static_cast(jnumArgs), - keys, - reinterpret_cast(argIndPtr), - reinterpret_cast(argShapeData), - &inShapeSize, - &inShapeNdim, - &inShapeData, - &outShapeSize, - &outShapeNdim, - &outShapeData, - &auxShapeSize, - &auxShapeNdim, - &auxShapeData, - &complete); - } else { - ret = MXSymbolInferShapePartialEx(reinterpret_cast(symbolPtr), - static_cast(jnumArgs), - keys, - reinterpret_cast(argIndPtr), - reinterpret_cast(argShapeData), - &inShapeSize, - &inShapeNdim, - &inShapeData, - &outShapeSize, - &outShapeNdim, - &outShapeData, - &auxShapeSize, - &auxShapeNdim, - &auxShapeData, - &complete); - } - env->ReleaseIntArrayElements(jargShapeData, argShapeData, 0); - env->ReleaseIntArrayElements(jargIndPtr, argIndPtr, 0); - - if (ret == 0) { - jclass listClass = env->FindClass("scala/collection/mutable/ListBuffer"); - jmethodID listAppend = env->GetMethodID(listClass, - "$plus$eq", "(Ljava/lang/Object;)Lscala/collection/mutable/ListBuffer;"); - - if (FillSymbolInferShape( - env, listAppend, jinShapeData, inShapeSize, inShapeNdim, inShapeData)) { - // TODO(Yizhi): out of memory error thrown, return a specific error code ? - return -1; - } - if (FillSymbolInferShape( - env, listAppend, joutShapeData, outShapeSize, outShapeNdim, outShapeData)) { - // TODO(Yizhi): out of memory error thrown, return a specific error code ? - return -1; - } - if (FillSymbolInferShape( - env, listAppend, jauxShapeData, auxShapeSize, auxShapeNdim, auxShapeData)) { - // TODO(Yizhi): out of memory error thrown, return a specific error code ? - return -1; - } - - SetIntField(env, jcomplete, complete); - } - - // release allocated memory - if (jkeys != NULL) { - for (int i = 0; i < jnumArgs; i++) { - jstring jkey = reinterpret_cast(env->GetObjectArrayElement(jkeys, i)); - env->ReleaseStringUTFChars(jkey, keys[i]); - env->DeleteLocalRef(jkey); - } - delete[] keys; - } - - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxSymbolInferShape - (JNIEnv *env, jobject obj, jlong symbolPtr, jint jnumArgs, jobjectArray jkeys, - jintArray jargIndPtr, jintArray jargShapeData, - jobject jinShapeData, jobject joutShapeData, jobject jauxShapeData, jobject jcomplete) { - - return SymbolInferShapeHelper(env, obj, symbolPtr, jnumArgs, jkeys, jargIndPtr, jargShapeData, - jinShapeData, joutShapeData, jauxShapeData, jcomplete, false); -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxSymbolInferShapePartial - (JNIEnv *env, jobject obj, jlong symbolPtr, jint jnumArgs, jobjectArray jkeys, - jintArray jargIndPtr, jintArray jargShapeData, - jobject jinShapeData, jobject joutShapeData, jobject jauxShapeData, jobject jcomplete) { - - return SymbolInferShapeHelper(env, obj, symbolPtr, jnumArgs, jkeys, jargIndPtr, jargShapeData, - jinShapeData, joutShapeData, jauxShapeData, jcomplete, true); -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxExecutorBindX - (JNIEnv *env, jobject obj, jlong symbolPtr, jint deviceTypeId, jint deviceID, jint numCtx, - jobjectArray jctxMapKeys, jintArray jctxMapDevTypes, jintArray jctxMapDevIDs, jint numArgs, - jlongArray jargsHandle, jlongArray jargsGradHandle, jintArray jreqsArray, - jlongArray jauxArgsHandle, jobject jexecOut) { - ExecutorHandle out; - int auxStatesLen = env->GetArrayLength(jauxArgsHandle); - - const char **mapKeys = new const char *[numCtx]; - for (int i = 0; i < numCtx; i++) { - jstring jkey = reinterpret_cast(env->GetObjectArrayElement(jctxMapKeys, i)); - const char *key = env->GetStringUTFChars(jkey, 0); - mapKeys[i] = key; - env->DeleteLocalRef(jkey); - } - jlong *auxStates = env->GetLongArrayElements(jauxArgsHandle, NULL); - jint *gradReqType = env->GetIntArrayElements(jreqsArray, NULL); - jlong *inArgs = env->GetLongArrayElements(jargsHandle, NULL); - jlong *argGradStore = env->GetLongArrayElements(jargsGradHandle, NULL); - jint *mapDevTypes = env->GetIntArrayElements(jctxMapDevTypes, NULL); - jint *mapDevIDs = env->GetIntArrayElements(jctxMapDevIDs, NULL); - int ret = MXExecutorBindX(reinterpret_cast(symbolPtr), - deviceTypeId, - deviceID, - static_cast(numCtx), - mapKeys, - mapDevTypes, - mapDevIDs, - static_cast(numArgs), - reinterpret_cast(inArgs), - reinterpret_cast(argGradStore), - reinterpret_cast(gradReqType), - static_cast(auxStatesLen), - reinterpret_cast(auxStates), - &out); - env->ReleaseIntArrayElements(jctxMapDevIDs, mapDevIDs, 0); - env->ReleaseIntArrayElements(jctxMapDevTypes, mapDevTypes, 0); - env->ReleaseLongArrayElements(jargsGradHandle, argGradStore, 0); - env->ReleaseLongArrayElements(jargsHandle, inArgs, 0); - env->ReleaseIntArrayElements(jreqsArray, gradReqType, 0); - env->ReleaseLongArrayElements(jauxArgsHandle, auxStates, 0); - for (int i = 0; i < numCtx; i++) { - jstring jkey = (jstring) env->GetObjectArrayElement(jctxMapKeys, i); - env->ReleaseStringUTFChars(jkey, mapKeys[i]); - env->DeleteLocalRef(jkey); - } - delete[] mapKeys; - - SetLongField(env, jexecOut, reinterpret_cast(out)); - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxExecutorBindEX - (JNIEnv *env, jobject obj, jlong symbolPtr, jint deviceTypeId, jint deviceID, jint numCtx, - jobjectArray jctxMapKeys, jintArray jctxMapDevTypes, jintArray jctxMapDevIDs, jint numArgs, - jlongArray jargsHandle, jlongArray jargsGradHandle, jintArray jreqsArray, - jlongArray jauxArgsHandle, jlong jsharedExec, jobject jexecOut) { - ExecutorHandle out; - int auxStatesLen = env->GetArrayLength(jauxArgsHandle); - ExecutorHandle sharedExec = nullptr; - if ((int32_t)jsharedExec != 0) sharedExec = reinterpret_cast(jsharedExec); - - const char **mapKeys = new const char *[numCtx]; - for (int i = 0; i < numCtx; i++) { - jstring jkey = reinterpret_cast(env->GetObjectArrayElement(jctxMapKeys, i)); - const char *key = env->GetStringUTFChars(jkey, 0); - mapKeys[i] = key; - env->DeleteLocalRef(jkey); - } - jlong *auxStates = env->GetLongArrayElements(jauxArgsHandle, NULL); - jint *gradReqType = env->GetIntArrayElements(jreqsArray, NULL); - jlong *inArgs = env->GetLongArrayElements(jargsHandle, NULL); - jlong *argGradStore = env->GetLongArrayElements(jargsGradHandle, NULL); - jint *mapDevTypes = env->GetIntArrayElements(jctxMapDevTypes, NULL); - jint *mapDevIDs = env->GetIntArrayElements(jctxMapDevIDs, NULL); - int ret = MXExecutorBindEX(reinterpret_cast(symbolPtr), - deviceTypeId, - deviceID, - static_cast(numCtx), - mapKeys, - mapDevTypes, - mapDevIDs, - static_cast(numArgs), - reinterpret_cast(inArgs), - reinterpret_cast(argGradStore), - reinterpret_cast(gradReqType), - static_cast(auxStatesLen), - reinterpret_cast(auxStates), - sharedExec, - &out); - env->ReleaseIntArrayElements(jctxMapDevIDs, mapDevIDs, 0); - env->ReleaseIntArrayElements(jctxMapDevTypes, mapDevTypes, 0); - env->ReleaseLongArrayElements(jargsGradHandle, argGradStore, 0); - env->ReleaseLongArrayElements(jargsHandle, inArgs, 0); - env->ReleaseIntArrayElements(jreqsArray, gradReqType, 0); - env->ReleaseLongArrayElements(jauxArgsHandle, auxStates, 0); - for (int i = 0; i < numCtx; i++) { - jstring jkey = (jstring) env->GetObjectArrayElement(jctxMapKeys, i); - env->ReleaseStringUTFChars(jkey, mapKeys[i]); - env->DeleteLocalRef(jkey); - } - delete[] mapKeys; - - SetLongField(env, jexecOut, reinterpret_cast(out)); - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxRandomSeed - (JNIEnv *env, jobject obj, jint seed) { - return MXRandomSeed(seed); -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxNotifyShutdown - (JNIEnv *env, jobject obj) { - return MXNotifyShutdown(); -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxRecordIOWriterCreate - (JNIEnv *env, jobject obj, jstring juri, jobject handle) { - RecordIOHandle out; - const char *uri = env->GetStringUTFChars(juri, 0); - int ret = MXRecordIOWriterCreate(uri, &out); - env->ReleaseStringUTFChars(juri, uri); - SetLongField(env, handle, reinterpret_cast(out)); - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxRecordIOReaderCreate - (JNIEnv *env, jobject obj, jstring juri, jobject handle) { - RecordIOHandle out; - const char *uri = env->GetStringUTFChars(juri, 0); - int ret = MXRecordIOReaderCreate(uri, &out); - env->ReleaseStringUTFChars(juri, uri); - SetLongField(env, handle, reinterpret_cast(out)); - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxRecordIOWriterFree - (JNIEnv *env, jobject obj, jlong handle) { - RecordIOHandle recordIOHandle = reinterpret_cast(handle); - int ret = MXRecordIOWriterFree(recordIOHandle); - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxRecordIOReaderFree - (JNIEnv *env, jobject obj, jlong handle) { - RecordIOHandle recordIOHandle = reinterpret_cast(handle); - int ret = MXRecordIOReaderFree(&recordIOHandle); - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxRecordIOWriterWriteRecord - (JNIEnv *env, jobject obj, jlong handle, jstring jbuf, jint size) { - const char *buf = env->GetStringUTFChars(jbuf, 0); - RecordIOHandle *recordIOHandle = reinterpret_cast(handle); - int ret = MXRecordIOWriterWriteRecord(recordIOHandle, buf, size); - env->ReleaseStringUTFChars(jbuf, buf); - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxRecordIOReaderReadRecord - (JNIEnv *env, jobject obj, jlong handle, jobject buf) { - RecordIOHandle *recordIOHandle = reinterpret_cast(handle); - size_t size; - char const *out; - int ret = MXRecordIOReaderReadRecord(recordIOHandle, &out, &size); - SetStringField(env, buf, out); - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxRecordIOWriterTell - (JNIEnv *env, jobject obj, jlong handle, jobject jpos) { - RecordIOHandle *recordIOHandle = reinterpret_cast(handle); - size_t pos; - int ret = MXRecordIOWriterTell(recordIOHandle, &pos); - SetIntField(env, jpos, pos); - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxRecordIOReaderSeek - (JNIEnv *env, jobject obj, jlong handle, jint pos) { - RecordIOHandle *recordIOHandle = reinterpret_cast(handle); - int ret = MXRecordIOReaderSeek(recordIOHandle, pos); - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxRtcCreate - (JNIEnv *env, jobject obj, jstring jname, jobjectArray jinputNames, - jobjectArray joutputNames, jlongArray jinputs, jlongArray joutputs, - jstring jkernel, jobject jhandle) { - RtcHandle out; - char *name = const_cast(env->GetStringUTFChars(jname, 0)); - int num_input = env->GetArrayLength(jinputNames); - char **inputNames = new char *[num_input]; - for (int i = 0; i < num_input; i++) { - jstring jinname = reinterpret_cast(env->GetObjectArrayElement(jinputNames, i)); - char *inname = const_cast(env->GetStringUTFChars(jinname, 0)); - inputNames[i] = inname; - env->DeleteLocalRef(jinname); - } - int num_output = env->GetArrayLength(joutputNames); - char **outputNames = new char *[num_output]; - for (int i = 0; i < num_output; i++) { - jstring joutname = reinterpret_cast(env->GetObjectArrayElement(joutputNames, i)); - char *outname = const_cast(env->GetStringUTFChars(joutname, 0)); - outputNames[i] = outname; - env->DeleteLocalRef(joutname); - } - jlong *inputs = env->GetLongArrayElements(jinputs, NULL); - jlong *outputs = env->GetLongArrayElements(joutputs, NULL); - char *kernel = const_cast(env->GetStringUTFChars(jkernel, 0)); - - int ret = MXRtcCreate(name, - static_cast(num_input), - static_cast(num_output), - inputNames, - outputNames, - reinterpret_cast(inputs), - reinterpret_cast(outputs), - kernel, - &out); - - // release allocated memory - env->ReleaseStringUTFChars(jname, name); - env->ReleaseStringUTFChars(jkernel, kernel); - env->ReleaseLongArrayElements(jinputs, inputs, 0); - env->ReleaseLongArrayElements(joutputs, outputs, 0); - for (int i = 0; i < num_input; i++) { - jstring jinname = reinterpret_cast(env->GetObjectArrayElement(jinputNames, i)); - env->ReleaseStringUTFChars(jinname, inputNames[i]); - env->DeleteLocalRef(jinname); - } - delete[] inputNames; - for (int i = 0; i < num_output; i++) { - jstring joutname = reinterpret_cast(env->GetObjectArrayElement(joutputNames, i)); - env->ReleaseStringUTFChars(joutname, outputNames[i]); - env->DeleteLocalRef(joutname); - } - delete[] outputNames; - - SetLongField(env, jhandle, reinterpret_cast(out)); - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxRtcPush - (JNIEnv *env, jobject obj, jlong jhandle, jlongArray jinputs, - jlongArray joutputs, jint gridDimX, jint gridDimY, jint gridDimZ, - jint blockDimX, jint blockDimY, jint blockDimZ) { - - RtcHandle handle = reinterpret_cast(jhandle); - jlong *inputs = env->GetLongArrayElements(jinputs, NULL); - jlong *outputs = env->GetLongArrayElements(joutputs, NULL); - int num_input = env->GetArrayLength(jinputs); - int num_output = env->GetArrayLength(joutputs); - - int ret = MXRtcPush(handle, - static_cast(num_input), - static_cast(num_output), - reinterpret_cast(inputs), - reinterpret_cast(outputs), - static_cast(gridDimX), - static_cast(gridDimY), - static_cast(gridDimZ), - static_cast(blockDimX), - static_cast(blockDimY), - static_cast(blockDimZ)); - - // release allocated memory - env->ReleaseLongArrayElements(jinputs, inputs, 0); - env->ReleaseLongArrayElements(joutputs, outputs, 0); - - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxRtcFree - (JNIEnv *env, jobject obj, jlong jhandle) { - RtcHandle handle = reinterpret_cast(jhandle); - int ret = MXRtcFree(handle); - return ret; -} - -// store the user defined CustomOpProp object reference with its name -std::unordered_map globalOpPropMap; -// store the user defined CustomOp object reference with its name -std::unordered_map globalOpMap; -// used for thread safty when insert elements into -// or erase elements from the std::unordered_map -std::mutex mutex_opprop; -std::mutex mutex_op; - -// Registers a custom operator when called -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxCustomOpRegister - (JNIEnv *env, jobject obj, jstring jregName, jobject jopProp) { - const char *regName = env->GetStringUTFChars(jregName, 0); - std::string key(regName); - - std::unique_lock lock(mutex_opprop); - globalOpPropMap.insert({ key, env->NewGlobalRef(jopProp) }); - lock.unlock(); - - // lambda function to initialize the operator and create all callbacks - auto creatorLambda = [](const char *opType, const int numKwargs, - const char **keys, const char **values, MXCallbackList *ret) { - int success = true; - - std::string opPropKey(opType); - if (globalOpPropMap.find(opPropKey) == globalOpPropMap.end()) { - LOG(WARNING) << "CustomOpProp: " << opPropKey << " not found"; - success = false; - } else { - JNIEnv *env; - _jvm->AttachCurrentThread(reinterpret_cast(&env), NULL); - jclass opPropClass = env->GetObjectClass(globalOpPropMap.at(opPropKey)); - jmethodID midInit = env->GetMethodID(opPropClass, - "init", "([Ljava/lang/String;[Ljava/lang/String;)V"); - if (NULL == midInit) { - LOG(WARNING) << "could not find CustomOpProp method init."; - success = false; - } else { - // call init and set CustomOpProp.kwargs - jclass strCls = env->FindClass("Ljava/lang/String;"); - jobjectArray keysArr = env->NewObjectArray(numKwargs, strCls, NULL); - jobjectArray valuesArr = env->NewObjectArray(numKwargs, strCls, NULL); - for (int i = 0; i < numKwargs; ++i) { - jstring keyStr = env->NewStringUTF(keys[i]); - jstring valueStr = env->NewStringUTF(values[i]); - env->SetObjectArrayElement(keysArr, i, keyStr); - env->SetObjectArrayElement(valuesArr, i, valueStr); - env->DeleteLocalRef(keyStr); - env->DeleteLocalRef(valueStr); - } - env->CallVoidMethod(globalOpPropMap.at(opPropKey), midInit, keysArr, valuesArr); - env->DeleteLocalRef(keysArr); - env->DeleteLocalRef(valuesArr); - } - _jvm->DetachCurrentThread(); - } - - // list_arguments callback - auto opPropListArgument = [](char ***args, void *state) { - int success = true; - std::string key(reinterpret_cast(state)); - if (globalOpPropMap.find(key) == globalOpPropMap.end()) { - LOG(WARNING) << "CustomOpProp: " << key << " not found"; - success = false; - } else { - JNIEnv *env; - _jvm->AttachCurrentThread(reinterpret_cast(&env), NULL); - jclass opPropClass = env->GetObjectClass(globalOpPropMap.at(key)); - jmethodID midListArguments = env->GetMethodID( - opPropClass, "listArguments", "()[Ljava/lang/String;"); - if (NULL == midListArguments) { - LOG(WARNING) << "could not find opProp method listArguments."; - success = false; - } else { - jobjectArray jargs =(jobjectArray)(env->CallObjectMethod( - globalOpPropMap.at(key), midListArguments)); - int len = env->GetArrayLength(jargs); - *args = new char *[len+1]; - for (int i = 0; i < len; ++i) { - jstring jarg = reinterpret_cast(env->GetObjectArrayElement(jargs, i)); - const char *arg = env->GetStringUTFChars(jarg, 0); - (*args)[i] = const_cast(arg); - env->DeleteLocalRef(jarg); - } - (*args)[len] = NULL; - } - _jvm->DetachCurrentThread(); - } - return success; - }; - - // list_outputs callback - auto opPropListOutputs = [](char ***outputs, void *state) { - int success = true; - std::string key(reinterpret_cast(state)); - if (globalOpPropMap.find(key) == globalOpPropMap.end()) { - LOG(WARNING) << "CustomOpProp: " << key << " not found"; - success = false; - } else { - JNIEnv *env; - _jvm->AttachCurrentThread(reinterpret_cast(&env), NULL); - jclass opPropClass = env->GetObjectClass(globalOpPropMap.at(key)); - jmethodID midListOutputs = env->GetMethodID( - opPropClass, "listOutputs", "()[Ljava/lang/String;"); - if (NULL == midListOutputs) { - LOG(WARNING) << "could not find opProp method listOutputs."; - success = false; - } else { - jobjectArray joutputs = (jobjectArray)(env->CallObjectMethod( - globalOpPropMap.at(key), midListOutputs)); - int len = env->GetArrayLength(joutputs); - *outputs = new char *[len + 1]; - for (int i = 0; i < len; ++i) { - jstring joutput = reinterpret_cast(env->GetObjectArrayElement(joutputs, i)); - const char *output = env->GetStringUTFChars(joutput, 0); - (*outputs)[i] = const_cast(output); - env->DeleteLocalRef(joutput); - } - (*outputs)[len] = NULL; - } - _jvm->DetachCurrentThread(); - } - return success; - }; - - // list_auxiliary_states callback - auto opPropListAuxStates = [](char ***auxs, void *state) { - int success = true; - std::string key(reinterpret_cast(state)); - if (globalOpPropMap.find(key) == globalOpPropMap.end()) { - LOG(WARNING) << "CustomOpProp: " << key << " not found"; - success = false; - } else { - JNIEnv *env; - _jvm->AttachCurrentThread(reinterpret_cast(&env), NULL); - jclass opPropClass = env->GetObjectClass(globalOpPropMap.at(key)); - jmethodID midListAuxStates = env->GetMethodID( - opPropClass, "listAuxiliaryStates", "()[Ljava/lang/String;"); - if (NULL == midListAuxStates) { - LOG(WARNING) << "could not find opProp method listAuxiliaryStates."; - success = false; - } else { - auto obj = env->CallObjectMethod(globalOpPropMap.at(key), midListAuxStates); - if (obj != NULL) { - jobjectArray jauxs = (jobjectArray)obj; - int len = env->GetArrayLength(jauxs); - *auxs = new char *[len+1]; - for (int i = 0; i < len; ++i) { - jstring jaux = reinterpret_cast(env->GetObjectArrayElement(jauxs, i)); - const char *aux = env->GetStringUTFChars(jaux, 0); - (*auxs)[i] = const_cast(aux); - env->DeleteLocalRef(jaux); - } - (*auxs)[len] = NULL; - } else { - (*auxs) = new char *[1]; - (*auxs)[0] = NULL; - } - } - _jvm->DetachCurrentThread(); - } - return success; - }; - - // declare_backward_dependency callback - auto opPropDeclareBkDep = [](const int *outGrad, const int *inData, - const int *outData, int *numDeps, int **rdeps, void *state) { - int success = true; - std::string key(reinterpret_cast(state)); - if (globalOpPropMap.find(key) == globalOpPropMap.end()) { - LOG(WARNING) << "CustomOpProp: " << key << " not found"; - success = false; - } else { - JNIEnv *env; - _jvm->AttachCurrentThread(reinterpret_cast(&env), NULL); - jclass opPropClass = env->GetObjectClass(globalOpPropMap.at(key)); - jmethodID midDeclareBkDep = env->GetMethodID( - opPropClass, "declareBackwardDependency", "([I[I[I)[I"); - if (NULL == midDeclareBkDep) { - LOG(WARNING) << "could not find opProp method declareBackwardDependency."; - success = false; - } else { - jmethodID midListOutputs = env->GetMethodID( - opPropClass, "listOutputs", "()[Ljava/lang/String;"); - jobjectArray joutputs = (jobjectArray)(env->CallObjectMethod( - globalOpPropMap.at(key), midListOutputs)); - int outLen = env->GetArrayLength(joutputs); - jmethodID midListArguments = env->GetMethodID( - opPropClass, "listArguments", "()[Ljava/lang/String;"); - jobjectArray jargs = (jobjectArray)(env->CallObjectMethod( - globalOpPropMap.at(key), midListArguments)); - int intLen = env->GetArrayLength(jargs); - - jintArray outGradArr = env->NewIntArray(outLen); - env->SetIntArrayRegion(outGradArr, (jsize)0, (jsize)outLen, outGrad); - jintArray inDataArr = env->NewIntArray(intLen); - env->SetIntArrayRegion(inDataArr, (jsize)0, (jsize)intLen, inData); - jintArray outDataArr = env->NewIntArray(outLen); - env->SetIntArrayRegion(outDataArr, (jsize)0, (jsize)outLen, outData); - - auto obj = env->CallObjectMethod(globalOpPropMap.at(key), midDeclareBkDep, - outGradArr, - inDataArr, - outDataArr); - jintArray jrdeps = (jintArray)obj; - jint *rdepsArr = env->GetIntArrayElements(jrdeps, NULL); - - *numDeps = env->GetArrayLength(jrdeps); - *rdeps = new int[(* numDeps)]; - for (int i = 0 ; i < (*numDeps); ++i) { - (*rdeps)[i] = rdepsArr[i]; - } - env->DeleteLocalRef(outGradArr); - env->DeleteLocalRef(inDataArr); - env->DeleteLocalRef(outDataArr); - env->ReleaseIntArrayElements(jrdeps, rdepsArr, 0); - } - _jvm->DetachCurrentThread(); - } - return success; - }; - - // infer_shape callback - auto opPropInferShape = [](int numInput, int *ndims, - unsigned **shapes, void *state) { - int success = true; - std::string key(reinterpret_cast(state)); - if (globalOpPropMap.find(key) == globalOpPropMap.end()) { - LOG(WARNING) << "CustomOpProp: " << key << " not found"; - success = false; - } else { - JNIEnv *env; - _jvm->AttachCurrentThread(reinterpret_cast(&env), NULL); - jclass opPropClass = env->GetObjectClass(globalOpPropMap.at(key)); - jmethodID midInferShape = env->GetMethodID(opPropClass, "inferShapeEntry", "(I[[I)[[I"); - if (NULL == midInferShape) { - LOG(WARNING) << "could not find opProp method inferShapeEntry."; - success = false; - } else { - jmethodID midListArguments = env->GetMethodID( - opPropClass, "listArguments", "()[Ljava/lang/String;"); - jobjectArray jargs = (jobjectArray)(env->CallObjectMethod( - globalOpPropMap.at(key), midListArguments)); - int intLen = env->GetArrayLength(jargs); - jintArray *ts = new jintArray[intLen]; - auto tmp = env->NewIntArray(1); - jclass arrayClass = env->GetObjectClass(tmp); - env->DeleteLocalRef(tmp); - jobjectArray tensorShapes = env->NewObjectArray(intLen, arrayClass, NULL); - for (int i = 0; i < intLen; ++i) { - ts[i] = env->NewIntArray(ndims[i]); - env->SetIntArrayRegion( - ts[i], (jsize)0, (jsize)ndims[i], reinterpret_cast(shapes[i])); - env->SetObjectArrayElement(tensorShapes, i, (jobject)(ts[i])); - } - jobjectArray ret = (jobjectArray)(env->CallObjectMethod( - globalOpPropMap.at(key), midInferShape, - numInput, - tensorShapes)); - for (int i = 0; i < numInput; ++i) { - jintArray jarr = reinterpret_cast(env->GetObjectArrayElement(ret, i)); - int len = env->GetArrayLength(jarr); - jint *arr = env->GetIntArrayElements(jarr, NULL); - ndims[i] = len; - shapes[i] = new unsigned[len]; - for (int j = 0; j < len; ++j) shapes[i][j] = (unsigned)(arr[j]); - env->DeleteLocalRef(jarr); - } - for (int i = 0; i < intLen; ++i) { - env->DeleteLocalRef(ts[i]); - } - delete[] ts; - } - _jvm->DetachCurrentThread(); - } - return success; - }; - - // infer_type callback - auto opPropInferType = [](int numInput, int* types, void* state) { - int success = true; - std::string key(reinterpret_cast(state)); - if (globalOpPropMap.find(key) == globalOpPropMap.end()) { - LOG(WARNING) << "CustomOpProp: " << key << " not found"; - success = false; - } else { - JNIEnv *env; - _jvm->AttachCurrentThread(reinterpret_cast(&env), NULL); - jclass opPropClass = env->GetObjectClass(globalOpPropMap.at(key)); - jmethodID midInferType = env->GetMethodID(opPropClass, "inferTypeEntry", "(I[I)[I"); - if (NULL == midInferType) { - LOG(WARNING) << "could not find opProp method inferTypeEntry."; - success = false; - } else { - jmethodID midListArguments = env->GetMethodID( - opPropClass, "listArguments", "()[Ljava/lang/String;"); - jobjectArray jargs = (jobjectArray)(env->CallObjectMethod( - globalOpPropMap.at(key), midListArguments)); - - int intLen = env->GetArrayLength(jargs); - jintArray ts = env->NewIntArray(intLen); - int *tmp = new int[intLen]; - for (int i = 0; i < intLen; ++i) tmp[i] = types[i]; - env->SetIntArrayRegion(ts, (jsize)0, (jsize)intLen, tmp); - - jintArray ret = (jintArray)(env->CallObjectMethod( - globalOpPropMap.at(key), midInferType, - numInput, - ts)); - jint *arr = env->GetIntArrayElements(ret, NULL); - for (int i = 0; i < numInput; ++i) { - types[i] = static_cast(arr[i]); - } - - delete[] tmp; - env->ReleaseIntArrayElements(ret, arr, 0); - env->DeleteLocalRef(ret); - env->DeleteLocalRef(ts); - } - _jvm->DetachCurrentThread(); - } - return success; - }; - - // create_operator callback - auto opPropCreateOp = [](const char *ctx, int numInputs, - unsigned **shapes, int *ndims, int *dtypes, MXCallbackList *ret, void *state) { - int success = true; - std::string key(reinterpret_cast(state)); - if (globalOpPropMap.find(key) == globalOpPropMap.end()) { - LOG(WARNING) << "CustomOpProp: " << key << " not found"; - success = false; - } else { - JNIEnv *env; - _jvm->AttachCurrentThread(reinterpret_cast(&env), NULL); - jclass opPropClass = env->GetObjectClass(globalOpPropMap.at(key)); - jmethodID midCreateOp = env->GetMethodID( - opPropClass, "createOperator", "(Ljava/lang/String;[[I[I)Lorg/apache/mxnet/CustomOp;"); - if (NULL == midCreateOp) { - LOG(WARNING) << "could not find opProp method createOperator."; - success = false; - } else { - jstring jctx = env->NewStringUTF(ctx); - jintArray *ts = new jintArray[numInputs]; - auto tmp = env->NewIntArray(1); - jclass arrayClass = env->GetObjectClass(tmp); - env->DeleteLocalRef(tmp); - jobjectArray inputShapes = env->NewObjectArray(numInputs, arrayClass, NULL); - for (int i = 0; i < numInputs; ++i) { - ts[i] = env->NewIntArray(ndims[i]); - env->SetIntArrayRegion( - ts[i], (jsize)0, (jsize)ndims[i], reinterpret_cast(shapes[i])); - env->SetObjectArrayElement(inputShapes, i, (jobject)(ts[i])); - } - jintArray jdtypes = env->NewIntArray(numInputs); - env->SetIntArrayRegion(jdtypes, (jsize)0, (jsize)numInputs, dtypes); - // get operator - jobject jOp = env->CallObjectMethod(globalOpPropMap.at(key), midCreateOp, - jctx, - inputShapes, - jdtypes); - env->DeleteLocalRef(jctx); - for (int i = 0; i < numInputs; ++i) { - env->DeleteLocalRef(ts[i]); - } - delete[] ts; - - std::unique_lock lock(mutex_op); - globalOpMap.insert({ key, env->NewGlobalRef(jOp) }); - lock.unlock(); - - _jvm->DetachCurrentThread(); - - // forward callback - auto forwardEntry = [](int size, void **ptrs, int *tags, - const int *reqs, const int isTrain, void *state) { - std::string key(reinterpret_cast(state)); - int success = true; - if (globalOpMap.find(key) == globalOpMap.end()) { - LOG(WARNING) << "op: " << key << " not found"; - success = false; - } else { - JNIEnv *env; - _jvm->AttachCurrentThread(reinterpret_cast(&env), NULL); - jclass opClass = env->GetObjectClass(globalOpMap.at(key)); - jmethodID midForward = env->GetMethodID(opClass, "forwardEntry", "(I[J[I[IZ)Z"); - if (NULL == midForward) { - LOG(WARNING) << "could not find op method forwardEntry."; - success = false; - } else { - jintArray tagsArr = env->NewIntArray(size); - env->SetIntArrayRegion(tagsArr, (jsize)0, (jsize)size, tags); - int reqSize = 0; - for (int i = 0; i < size; ++i) { - if (tags[i] == 1) reqSize++; - } - jintArray reqsArr = env->NewIntArray(reqSize); - env->SetIntArrayRegion(reqsArr, (jsize)0, (jsize)reqSize, reqs); - jlongArray ptrsArr = env->NewLongArray(size); - env->SetLongArrayRegion( - ptrsArr, (jsize)0, (jsize)size, reinterpret_cast(ptrs)); -#if MXNET_USE_CUDA - mxnet::NDArray* tmp = reinterpret_cast(ptrs[0]); - if (tmp->ctx().dev_type == mxnet::Context::kGPU - || tmp->ctx().dev_type == mxnet::Context::kCPUPinned) { - CUDA_CALL(cudaSetDevice(tmp->ctx().dev_id)); - } -#endif - bool is_train = true; - if (isTrain == 0) is_train = false; - success = env->CallBooleanMethod(globalOpMap.at(key), midForward, - size, - ptrsArr, - tagsArr, - reqsArr, - is_train); - env->DeleteLocalRef(tagsArr); - env->DeleteLocalRef(reqsArr); - env->DeleteLocalRef(ptrsArr); - } - _jvm->DetachCurrentThread(); - } - return success; - }; - - // backward callback - auto backwardEntry = [](int size, void **ptrs, int *tags, - const int *reqs, const int isTrain, void *state) { - std::string key(reinterpret_cast(state)); - int success = true; - if (globalOpMap.find(key) == globalOpMap.end()) { - LOG(WARNING) << "op: " << key << " not found"; - success = false; - } else { - JNIEnv *env; - _jvm->AttachCurrentThread(reinterpret_cast(&env), NULL); - jclass opClass = env->GetObjectClass(globalOpMap.at(key)); - jmethodID midBackward = env->GetMethodID(opClass, "backwardEntry", "(I[J[I[IZ)Z"); - if (NULL == midBackward) { - LOG(WARNING) << "could not find op method backwardEntry."; - success = false; - } else { - jintArray tagsArr = env->NewIntArray(size); - env->SetIntArrayRegion(tagsArr, (jsize)0, (jsize)size, tags); - - int reqSize = 0; - for (int i = 0; i < size; ++i) { - if (tags[i] == 2) reqSize++; - } - jintArray reqsArr = env->NewIntArray(reqSize); - env->SetIntArrayRegion(reqsArr, (jsize)0, (jsize)reqSize, reqs); - jlongArray ptrsArr = env->NewLongArray(size); - env->SetLongArrayRegion( - ptrsArr, (jsize)0, (jsize)size, reinterpret_cast(ptrs)); - bool is_train = true; - if (isTrain == 0) is_train = false; - success = env->CallBooleanMethod(globalOpMap.at(key), midBackward, - size, - ptrsArr, - tagsArr, - reqsArr, - is_train); - env->DeleteLocalRef(tagsArr); - env->DeleteLocalRef(reqsArr); - env->DeleteLocalRef(ptrsArr); - } - _jvm->DetachCurrentThread(); - } - return success; - }; - - // del callback - auto delEntry = [](void *state) { - std::string key(reinterpret_cast(state)); - int success = true; - std::unique_lock lock(mutex_op); - if (globalOpMap.find(key) == globalOpMap.end()) { - LOG(WARNING) << "op: " << key << " not found"; - success = false; - } else { - JNIEnv *env; - _jvm->AttachCurrentThread(reinterpret_cast(&env), NULL); - env->DeleteGlobalRef(globalOpMap.at(key)); - _jvm->DetachCurrentThread(); - for (auto it = globalOpMap.begin(); it != globalOpMap.end(); ) { - if (it->first == key) { - it = globalOpMap.erase(it); - } else { - ++it; - } - } - } - lock.unlock(); - return success; - }; - - // TODO(eric): Memory leak here. Refactor later and delete in delEntry - ret->num_callbacks = 3; - ret->callbacks = new MXGenericCallback[ret->num_callbacks]; - ret->callbacks[kCustomOpDelete] = - reinterpret_cast(static_cast(delEntry)); - ret->callbacks[kCustomOpForward] = - reinterpret_cast( - static_cast( - forwardEntry)); - ret->callbacks[kCustomOpBackward] = - reinterpret_cast( - static_cast( - backwardEntry)); - ret->contexts = new void*[ret->num_callbacks]; - ret->contexts[kCustomOpDelete] = state; - ret->contexts[kCustomOpForward] = state; - ret->contexts[kCustomOpBackward] = state; - } - } - return success; - }; - - // del callback - auto opPropDel = [](void *state) { - /* - * This method seems to be called by the engine to clean up after multiple calls were made - * to the creator lambda. The current creator function isn't allocating a new object but is - * instead reinitializing the object which was created when register was called. This means - * that there doesn't seem to be anything to clean up here (previous efforts were actually - * deregistering the operator). - */ - return 1; - }; - - // TODO(eric): Memory leak. Missing infertype. - ret->num_callbacks = 8; - ret->callbacks = new MXGenericCallback[ret->num_callbacks]; - ret->callbacks[kCustomOpPropDelete] = - reinterpret_cast( - static_cast(opPropDel)); - ret->callbacks[kCustomOpPropListArguments] = - reinterpret_cast( - static_cast(opPropListArgument)); - ret->callbacks[kCustomOpPropListOutputs] = - reinterpret_cast( - static_cast(opPropListOutputs)); - ret->callbacks[kCustomOpPropListAuxiliaryStates] = - reinterpret_cast( - static_cast(opPropListAuxStates)); - ret->callbacks[kCustomOpPropInferShape] = - reinterpret_cast( - static_cast(opPropInferShape)); - ret->callbacks[kCustomOpPropDeclareBackwardDependency] = - reinterpret_cast( - static_cast( - opPropDeclareBkDep)); - ret->callbacks[kCustomOpPropCreateOperator] = - reinterpret_cast( - static_cast( - opPropCreateOp)); - ret->callbacks[kCustomOpPropInferType] = - reinterpret_cast( - static_cast(opPropInferType)); - - ret->contexts = new void*[ret->num_callbacks]; - ret->contexts[kCustomOpPropDelete] = - reinterpret_cast(const_cast(opType)); - ret->contexts[kCustomOpPropListArguments] = - reinterpret_cast(const_cast(opType)); - ret->contexts[kCustomOpPropListOutputs] = - reinterpret_cast(const_cast(opType)); - ret->contexts[kCustomOpPropListAuxiliaryStates] = - reinterpret_cast(const_cast(opType)); - ret->contexts[kCustomOpPropInferShape] = - reinterpret_cast(const_cast(opType)); - ret->contexts[kCustomOpPropDeclareBackwardDependency] = - reinterpret_cast(const_cast(opType)); - ret->contexts[kCustomOpPropCreateOperator] = - reinterpret_cast(const_cast(opType)); - ret->contexts[kCustomOpPropInferType] = - reinterpret_cast(const_cast(opType)); - return success; - }; - - CustomOpPropCreator creator = - static_cast( - creatorLambda); - return MXCustomOpRegister(regName, creator); -} - -struct JNIString { - JNIEnv *env_; - jstring java_string_; - const char *str_; - inline JNIString(JNIEnv *env, const jstring& java_string) - : env_(env) - , java_string_(java_string) { - str_ = env_->GetStringUTFChars(java_string_, 0); - } - inline ~JNIString() { - if (str_) { - env_->ReleaseStringUTFChars(java_string_, str_); - } - } - inline const char *operator ()() const { - return str_; - } -}; - -struct JNIStringArray { - std::vector> jni_strings_; - std::vector strings_; - JNIStringArray(JNIEnv *env, const jobjectArray& stringArray) { - const int count = env->GetArrayLength(stringArray); - jni_strings_.reserve(count); - strings_.reserve(count); - for (int i = 0; i < count; ++i) { - jstring string = static_cast(env->GetObjectArrayElement(stringArray, i)); - jni_strings_.emplace_back(std::unique_ptr(new JNIString(env, string))); - strings_.emplace_back((*jni_strings_.rbegin())->str_); - } - } - const char * const* operator ()() const { return &strings_[0]; } -}; - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxSetProfilerConfig - (JNIEnv *env, jobject obj, jobjectArray keys, jobjectArray vals) { - const int stringCount = env->GetArrayLength(keys); - CHECK_EQ(stringCount, env->GetArrayLength(vals)) << "Key and value arrays must be the same size"; - - JNIStringArray the_keys(env, keys), the_vals(env, vals); - - const int ret = MXSetProfilerConfig(stringCount, the_keys(), the_vals()); - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxSetProfilerState - (JNIEnv *env, jobject obj, jint jstate) { - return MXSetProfilerState(jstate); -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxDumpProfile - (JNIEnv *env, jobject obj, jint finished) { - return MXDumpProfile(finished); -} - -// Numpy -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxIsNumpyShape - (JNIEnv *env, jobject obj, jobject compatibleRef) { - int isNumpyShape; - int ret = MXIsNumpyShape(&isNumpyShape); - SetIntField(env, compatibleRef, isNumpyShape); - return ret; -} - -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxSetIsNumpyShape - (JNIEnv *env, jobject obj, jint isNpComp, jobject prevRef) { - int prev; - int ret = MXSetIsNumpyShape(isNpComp, &prev); - SetIntField(env, prevRef, prev); - return ret; -} diff --git a/scala-package/native/src/main/native/org_apache_mxnet_native_c_api.h b/scala-package/native/src/main/native/org_apache_mxnet_native_c_api.h deleted file mode 100644 index c8ee0ce8d22b..000000000000 --- a/scala-package/native/src/main/native/org_apache_mxnet_native_c_api.h +++ /dev/null @@ -1,933 +0,0 @@ -/* DO NOT EDIT THIS FILE - it is machine generated */ -#include -/* Header for class org_apache_mxnet_LibInfo */ - -#ifndef _Included_org_apache_mxnet_LibInfo -#define _Included_org_apache_mxnet_LibInfo -#ifdef __cplusplus -extern "C" { -#endif -/* - * Class: org_apache_mxnet_LibInfo - * Method: nativeLibInit - * Signature: ()I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_nativeLibInit - (JNIEnv *, jobject); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxGetLastError - * Signature: ()Ljava/lang/String; - */ -JNIEXPORT jstring JNICALL Java_org_apache_mxnet_LibInfo_mxGetLastError - (JNIEnv *, jobject); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxListAllOpNames - * Signature: (Lscala/collection/mutable/ListBuffer;)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxListAllOpNames - (JNIEnv *, jobject, jobject); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: nnGetOpHandle - * Signature: (Ljava/lang/String;Lorg/apache/mxnet/Base/RefLong;)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_nnGetOpHandle - (JNIEnv *, jobject, jstring, jobject); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxImperativeInvokeEx - * Signature: (J[J[JLscala/collection/mutable/ArrayBuffer;I[Ljava/lang/String;[Ljava/lang/String;Lscala/collection/mutable/ArrayBuffer;)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxImperativeInvokeEx - (JNIEnv *, jobject, jlong, jlongArray, jlongArray, jobject, jint, jobjectArray, jobjectArray, jobject); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxNDArrayFree - * Signature: (J)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxNDArrayFree - (JNIEnv *, jobject, jlong); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxNDArrayCreateNone - * Signature: (Lorg/apache/mxnet/Base/RefLong;)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxNDArrayCreateNone - (JNIEnv *, jobject, jobject); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxNDArrayCreateEx - * Signature: ([IIIIIILorg/apache/mxnet/Base/RefLong;)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxNDArrayCreateEx - (JNIEnv *, jobject, jintArray, jint, jint, jint, jint, jint, jobject); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxNDArrayCreateSparseEx - * Signature: (I[IIIIIII[I[I[ILorg/apache/mxnet/Base/RefLong;)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxNDArrayCreateSparseEx - (JNIEnv *, jobject, jint, jintArray, jint, jint, jint, jint, jint, jint, jintArray, jintArray, jintArray, jobject); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxNDArrayWaitAll - * Signature: ()I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxNDArrayWaitAll - (JNIEnv *, jobject); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxNDArrayWaitToRead - * Signature: (J)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxNDArrayWaitToRead - (JNIEnv *, jobject, jlong); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxListFunctions - * Signature: (Lscala/collection/mutable/ListBuffer;)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxListFunctions - (JNIEnv *, jobject, jobject); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxFuncDescribe - * Signature: (JLorg/apache/mxnet/Base/RefInt;Lorg/apache/mxnet/Base/RefInt;Lorg/apache/mxnet/Base/RefInt;Lorg/apache/mxnet/Base/RefInt;)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxFuncDescribe - (JNIEnv *, jobject, jlong, jobject, jobject, jobject, jobject); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxFuncGetInfo - * Signature: (JLorg/apache/mxnet/Base/RefString;Lorg/apache/mxnet/Base/RefString;Lorg/apache/mxnet/Base/RefInt;Lscala/collection/mutable/ListBuffer;Lscala/collection/mutable/ListBuffer;Lscala/collection/mutable/ListBuffer;)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxFuncGetInfo - (JNIEnv *, jobject, jlong, jobject, jobject, jobject, jobject, jobject, jobject); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxFuncInvoke - * Signature: (J[J[F[J)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxFuncInvoke - (JNIEnv *, jobject, jlong, jlongArray, jfloatArray, jlongArray); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxFuncInvokeEx - * Signature: (J[J[F[JI[[B[[B)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxFuncInvokeEx - (JNIEnv *, jobject, jlong, jlongArray, jfloatArray, jlongArray, jint, jobjectArray, jobjectArray); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxNDArrayGetShape - * Signature: (JLorg/apache/mxnet/Base/RefInt;Lscala/collection/mutable/ArrayBuffer;)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxNDArrayGetShape - (JNIEnv *, jobject, jlong, jobject, jobject); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxNDArraySyncCopyFromNDArray - * Signature: (JJI)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxNDArraySyncCopyFromNDArray - (JNIEnv *, jobject, jlong, jlong, jint); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxNDArraySyncCopyToCPU - * Signature: (J[BI)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxNDArraySyncCopyToCPU - (JNIEnv *, jobject, jlong, jbyteArray, jint); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxNDArraySlice - * Signature: (JIILorg/apache/mxnet/Base/RefLong;)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxNDArraySlice - (JNIEnv *, jobject, jlong, jint, jint, jobject); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxNDArrayAt - * Signature: (JILorg/apache/mxnet/Base/RefLong;)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxNDArrayAt - (JNIEnv *, jobject, jlong, jint, jobject); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxNDArrayReshape64 - * Signature: (JI[JZLorg/apache/mxnet/Base/RefLong;)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxNDArrayReshape64 - (JNIEnv *, jobject, jlong, jint, jlongArray, jboolean, jobject); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxNDArraySyncCopyFromCPU - * Signature: (J[FI)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxNDArraySyncCopyFromCPU - (JNIEnv *, jobject, jlong, jfloatArray, jint); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxFloat64NDArraySyncCopyFromCPU - * Signature: (J[DI)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxFloat64NDArraySyncCopyFromCPU - (JNIEnv *, jobject, jlong, jdoubleArray, jint); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxNDArrayLoad - * Signature: (Ljava/lang/String;Lorg/apache/mxnet/Base/RefInt;Lscala/collection/mutable/ArrayBuffer;Lorg/apache/mxnet/Base/RefInt;Lscala/collection/mutable/ArrayBuffer;)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxNDArrayLoad - (JNIEnv *, jobject, jstring, jobject, jobject, jobject, jobject); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxNDArraySave - * Signature: (Ljava/lang/String;[J[Ljava/lang/String;)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxNDArraySave - (JNIEnv *, jobject, jstring, jlongArray, jobjectArray); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxNDArrayGetDataNDArray - * Signature: (JLorg/apache/mxnet/Base/RefLong;)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxNDArrayGetDataNDArray - (JNIEnv *, jobject, jlong, jobject); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxNDArrayGetAuxNDArray - * Signature: (JILorg/apache/mxnet/Base/RefLong;)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxNDArrayGetAuxNDArray - (JNIEnv *, jobject, jlong, jint, jobject); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxNDArrayGetContext - * Signature: (JLorg/apache/mxnet/Base/RefInt;Lorg/apache/mxnet/Base/RefInt;)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxNDArrayGetContext - (JNIEnv *, jobject, jlong, jobject, jobject); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxNDArraySaveRawBytes - * Signature: (JLscala/collection/mutable/ArrayBuffer;)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxNDArraySaveRawBytes - (JNIEnv *, jobject, jlong, jobject); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxNDArrayLoadFromRawBytes - * Signature: ([BLorg/apache/mxnet/Base/RefLong;)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxNDArrayLoadFromRawBytes - (JNIEnv *, jobject, jbyteArray, jobject); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxNDArrayGetDType - * Signature: (JLorg/apache/mxnet/Base/RefInt;)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxNDArrayGetDType - (JNIEnv *, jobject, jlong, jobject); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxNDArrayGetStorageType - * Signature: (JLorg/apache/mxnet/Base/RefInt;)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxNDArrayGetStorageType - (JNIEnv *, jobject, jlong, jobject); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxInitPSEnv - * Signature: ([Ljava/lang/String;[Ljava/lang/String;)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxInitPSEnv - (JNIEnv *, jobject, jobjectArray, jobjectArray); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxKVStoreRunServer - * Signature: (JLorg/apache/mxnet/KVServerControllerCallback;)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxKVStoreRunServer - (JNIEnv *, jobject, jlong, jobject); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxKVStoreGetNumDeadNode - * Signature: (JILorg/apache/mxnet/Base/RefInt;)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxKVStoreGetNumDeadNode - (JNIEnv *, jobject, jlong, jint, jobject); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxKVStoreCreate - * Signature: (Ljava/lang/String;Lorg/apache/mxnet/Base/RefLong;)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxKVStoreCreate - (JNIEnv *, jobject, jstring, jobject); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxKVStoreInit - * Signature: (JI[I[J)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxKVStoreInit - (JNIEnv *, jobject, jlong, jint, jintArray, jlongArray); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxKVStoreInitEx - * Signature: (JI[Ljava/lang/String;[J)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxKVStoreInitEx - (JNIEnv *, jobject, jlong, jint, jobjectArray, jlongArray); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxKVStorePush - * Signature: (JI[I[JI)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxKVStorePush - (JNIEnv *, jobject, jlong, jint, jintArray, jlongArray, jint); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxKVStorePushEx - * Signature: (JI[Ljava/lang/String;[JI)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxKVStorePushEx - (JNIEnv *, jobject, jlong, jint, jobjectArray, jlongArray, jint); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxKVStorePull - * Signature: (JI[I[JI)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxKVStorePull - (JNIEnv *, jobject, jlong, jint, jintArray, jlongArray, jint); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxKVStorePullEx - * Signature: (JI[Ljava/lang/String;[JI)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxKVStorePullEx - (JNIEnv *, jobject, jlong, jint, jobjectArray, jlongArray, jint); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxKVStoreSetUpdater - * Signature: (JLorg/apache/mxnet/MXKVStoreUpdater;)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxKVStoreSetUpdater - (JNIEnv *, jobject, jlong, jobject); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxKVStoreIsWorkerNode - * Signature: (Lorg/apache/mxnet/Base/RefInt;)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxKVStoreIsWorkerNode - (JNIEnv *, jobject, jobject); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxKVStoreGetType - * Signature: (JLorg/apache/mxnet/Base/RefString;)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxKVStoreGetType - (JNIEnv *, jobject, jlong, jobject); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxKVStoreSendCommmandToServers - * Signature: (JILjava/lang/String;)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxKVStoreSendCommmandToServers - (JNIEnv *, jobject, jlong, jint, jstring); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxKVStoreBarrier - * Signature: (J)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxKVStoreBarrier - (JNIEnv *, jobject, jlong); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxKVStoreGetGroupSize - * Signature: (JLorg/apache/mxnet/Base/RefInt;)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxKVStoreGetGroupSize - (JNIEnv *, jobject, jlong, jobject); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxKVStoreGetRank - * Signature: (JLorg/apache/mxnet/Base/RefInt;)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxKVStoreGetRank - (JNIEnv *, jobject, jlong, jobject); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxKVStoreSetBarrierBeforeExit - * Signature: (JI)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxKVStoreSetBarrierBeforeExit - (JNIEnv *, jobject, jlong, jint); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxKVStoreFree - * Signature: (J)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxKVStoreFree - (JNIEnv *, jobject, jlong); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxListDataIters - * Signature: (Lscala/collection/mutable/ListBuffer;)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxListDataIters - (JNIEnv *, jobject, jobject); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxDataIterCreateIter - * Signature: (J[Ljava/lang/String;[Ljava/lang/String;Lorg/apache/mxnet/Base/RefLong;)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxDataIterCreateIter - (JNIEnv *, jobject, jlong, jobjectArray, jobjectArray, jobject); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxDataIterGetIterInfo - * Signature: (JLorg/apache/mxnet/Base/RefString;Lorg/apache/mxnet/Base/RefString;Lscala/collection/mutable/ListBuffer;Lscala/collection/mutable/ListBuffer;Lscala/collection/mutable/ListBuffer;)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxDataIterGetIterInfo - (JNIEnv *, jobject, jlong, jobject, jobject, jobject, jobject, jobject); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxDataIterFree - * Signature: (J)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxDataIterFree - (JNIEnv *, jobject, jlong); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxDataIterBeforeFirst - * Signature: (J)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxDataIterBeforeFirst - (JNIEnv *, jobject, jlong); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxDataIterNext - * Signature: (JLorg/apache/mxnet/Base/RefInt;)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxDataIterNext - (JNIEnv *, jobject, jlong, jobject); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxDataIterGetLabel - * Signature: (JLorg/apache/mxnet/Base/RefLong;)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxDataIterGetLabel - (JNIEnv *, jobject, jlong, jobject); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxDataIterGetData - * Signature: (JLorg/apache/mxnet/Base/RefLong;)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxDataIterGetData - (JNIEnv *, jobject, jlong, jobject); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxDataIterGetIndex - * Signature: (JLscala/collection/mutable/ListBuffer;Lorg/apache/mxnet/Base/RefLong;)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxDataIterGetIndex - (JNIEnv *, jobject, jlong, jobject, jobject); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxDataIterGetPadNum - * Signature: (JLorg/apache/mxnet/Base/RefInt;)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxDataIterGetPadNum - (JNIEnv *, jobject, jlong, jobject); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxExecutorOutputs - * Signature: (JLscala/collection/mutable/ArrayBuffer;)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxExecutorOutputs - (JNIEnv *, jobject, jlong, jobject); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxExecutorFree - * Signature: (J)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxExecutorFree - (JNIEnv *, jobject, jlong); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxExecutorForward - * Signature: (JI)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxExecutorForward - (JNIEnv *, jobject, jlong, jint); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxExecutorBackward - * Signature: (J[J)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxExecutorBackward - (JNIEnv *, jobject, jlong, jlongArray); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxExecutorPrint - * Signature: (JLorg/apache/mxnet/Base/RefString;)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxExecutorPrint - (JNIEnv *, jobject, jlong, jobject); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxExecutorSetMonitorCallback - * Signature: (JLorg/apache/mxnet/MXMonitorCallback;)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxExecutorSetMonitorCallback - (JNIEnv *, jobject, jlong, jobject); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxExecutorReshape - * Signature: (IIII[Ljava/lang/String;[I[I[Ljava/lang/String;[I[ILscala/collection/mutable/ArrayBuffer;Lscala/collection/mutable/ArrayBuffer;Lscala/collection/mutable/ArrayBuffer;JLorg/apache/mxnet/Base/RefLong;)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxExecutorReshape - (JNIEnv *, jobject, jint, jint, jint, jint, jobjectArray, jintArray, jintArray, jobjectArray, jintArray, jintArray, jobject, jobject, jobject, jlong, jobject); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxSymbolListAtomicSymbolCreators - * Signature: (Lscala/collection/mutable/ListBuffer;)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxSymbolListAtomicSymbolCreators - (JNIEnv *, jobject, jobject); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxSymbolGetAtomicSymbolInfo - * Signature: (JLorg/apache/mxnet/Base/RefString;Lorg/apache/mxnet/Base/RefString;Lorg/apache/mxnet/Base/RefInt;Lscala/collection/mutable/ListBuffer;Lscala/collection/mutable/ListBuffer;Lscala/collection/mutable/ListBuffer;Lorg/apache/mxnet/Base/RefString;)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxSymbolGetAtomicSymbolInfo - (JNIEnv *, jobject, jlong, jobject, jobject, jobject, jobject, jobject, jobject, jobject); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxSymbolCreateAtomicSymbol - * Signature: (J[Ljava/lang/String;[Ljava/lang/String;Lorg/apache/mxnet/Base/RefLong;)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxSymbolCreateAtomicSymbol - (JNIEnv *, jobject, jlong, jobjectArray, jobjectArray, jobject); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxSymbolSetAttr - * Signature: (JLjava/lang/String;Ljava/lang/String;)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxSymbolSetAttr - (JNIEnv *, jobject, jlong, jstring, jstring); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxSymbolListAttrShallow - * Signature: (JLorg/apache/mxnet/Base/RefInt;Lscala/collection/mutable/ArrayBuffer;)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxSymbolListAttrShallow - (JNIEnv *, jobject, jlong, jobject, jobject); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxSymbolListAttr - * Signature: (JLorg/apache/mxnet/Base/RefInt;Lscala/collection/mutable/ArrayBuffer;)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxSymbolListAttr - (JNIEnv *, jobject, jlong, jobject, jobject); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxSymbolCompose - * Signature: (JLjava/lang/String;[Ljava/lang/String;[J)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxSymbolCompose - (JNIEnv *, jobject, jlong, jstring, jobjectArray, jlongArray); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxSymbolCreateVariable - * Signature: (Ljava/lang/String;Lorg/apache/mxnet/Base/RefLong;)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxSymbolCreateVariable - (JNIEnv *, jobject, jstring, jobject); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxSymbolGetAttr - * Signature: (JLjava/lang/String;Lorg/apache/mxnet/Base/RefString;Lorg/apache/mxnet/Base/RefInt;)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxSymbolGetAttr - (JNIEnv *, jobject, jlong, jstring, jobject, jobject); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxSymbolListArguments - * Signature: (JLscala/collection/mutable/ArrayBuffer;)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxSymbolListArguments - (JNIEnv *, jobject, jlong, jobject); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxSymbolCopy - * Signature: (JLorg/apache/mxnet/Base/RefLong;)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxSymbolCopy - (JNIEnv *, jobject, jlong, jobject); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxSymbolListAuxiliaryStates - * Signature: (JLscala/collection/mutable/ArrayBuffer;)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxSymbolListAuxiliaryStates - (JNIEnv *, jobject, jlong, jobject); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxSymbolListOutputs - * Signature: (JLscala/collection/mutable/ArrayBuffer;)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxSymbolListOutputs - (JNIEnv *, jobject, jlong, jobject); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxSymbolCreateGroup - * Signature: ([JLorg/apache/mxnet/Base/RefLong;)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxSymbolCreateGroup - (JNIEnv *, jobject, jlongArray, jobject); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxSymbolPrint - * Signature: (JLorg/apache/mxnet/Base/RefString;)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxSymbolPrint - (JNIEnv *, jobject, jlong, jobject); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxSymbolGetInternals - * Signature: (JLorg/apache/mxnet/Base/RefLong;)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxSymbolGetInternals - (JNIEnv *, jobject, jlong, jobject); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxSymbolInferType - * Signature: (J[Ljava/lang/String;[ILscala/collection/mutable/ListBuffer;Lscala/collection/mutable/ListBuffer;Lscala/collection/mutable/ListBuffer;Lorg/apache/mxnet/Base/RefInt;)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxSymbolInferType - (JNIEnv *, jobject, jlong, jobjectArray, jintArray, jobject, jobject, jobject, jobject); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxSymbolInferShape - * Signature: (JI[Ljava/lang/String;[I[ILscala/collection/mutable/ListBuffer;Lscala/collection/mutable/ListBuffer;Lscala/collection/mutable/ListBuffer;Lorg/apache/mxnet/Base/RefInt;)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxSymbolInferShape - (JNIEnv *, jobject, jlong, jint, jobjectArray, jintArray, jintArray, jobject, jobject, jobject, jobject); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxSymbolInferShapePartial - * Signature: (JI[Ljava/lang/String;[I[ILscala/collection/mutable/ListBuffer;Lscala/collection/mutable/ListBuffer;Lscala/collection/mutable/ListBuffer;Lorg/apache/mxnet/Base/RefInt;)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxSymbolInferShapePartial - (JNIEnv *, jobject, jlong, jint, jobjectArray, jintArray, jintArray, jobject, jobject, jobject, jobject); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxSymbolGetOutput - * Signature: (JILorg/apache/mxnet/Base/RefLong;)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxSymbolGetOutput - (JNIEnv *, jobject, jlong, jint, jobject); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxSymbolSaveToJSON - * Signature: (JLorg/apache/mxnet/Base/RefString;)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxSymbolSaveToJSON - (JNIEnv *, jobject, jlong, jobject); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxSymbolCreateFromJSON - * Signature: (Ljava/lang/String;Lorg/apache/mxnet/Base/RefLong;)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxSymbolCreateFromJSON - (JNIEnv *, jobject, jstring, jobject); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxExecutorBindX - * Signature: (JIII[Ljava/lang/String;[I[II[J[J[I[JLorg/apache/mxnet/Base/RefLong;)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxExecutorBindX - (JNIEnv *, jobject, jlong, jint, jint, jint, jobjectArray, jintArray, jintArray, jint, jlongArray, jlongArray, jintArray, jlongArray, jobject); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxExecutorBindEX - * Signature: (JIII[Ljava/lang/String;[I[II[J[J[I[JJLorg/apache/mxnet/Base/RefLong;)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxExecutorBindEX - (JNIEnv *, jobject, jlong, jint, jint, jint, jobjectArray, jintArray, jintArray, jint, jlongArray, jlongArray, jintArray, jlongArray, jlong, jobject); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxSymbolSaveToFile - * Signature: (JLjava/lang/String;)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxSymbolSaveToFile - (JNIEnv *, jobject, jlong, jstring); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxSymbolCreateFromFile - * Signature: (Ljava/lang/String;Lorg/apache/mxnet/Base/RefLong;)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxSymbolCreateFromFile - (JNIEnv *, jobject, jstring, jobject); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxSymbolFree - * Signature: (J)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxSymbolFree - (JNIEnv *, jobject, jlong); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxRandomSeed - * Signature: (I)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxRandomSeed - (JNIEnv *, jobject, jint); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxNotifyShutdown - * Signature: ()I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxNotifyShutdown - (JNIEnv *, jobject); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxRecordIOWriterCreate - * Signature: (Ljava/lang/String;Lorg/apache/mxnet/Base/RefLong;)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxRecordIOWriterCreate - (JNIEnv *, jobject, jstring, jobject); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxRecordIOReaderCreate - * Signature: (Ljava/lang/String;Lorg/apache/mxnet/Base/RefLong;)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxRecordIOReaderCreate - (JNIEnv *, jobject, jstring, jobject); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxRecordIOWriterFree - * Signature: (J)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxRecordIOWriterFree - (JNIEnv *, jobject, jlong); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxRecordIOReaderFree - * Signature: (J)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxRecordIOReaderFree - (JNIEnv *, jobject, jlong); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxRecordIOWriterWriteRecord - * Signature: (JLjava/lang/String;I)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxRecordIOWriterWriteRecord - (JNIEnv *, jobject, jlong, jstring, jint); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxRecordIOReaderReadRecord - * Signature: (JLorg/apache/mxnet/Base/RefString;)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxRecordIOReaderReadRecord - (JNIEnv *, jobject, jlong, jobject); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxRecordIOWriterTell - * Signature: (JLorg/apache/mxnet/Base/RefInt;)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxRecordIOWriterTell - (JNIEnv *, jobject, jlong, jobject); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxRecordIOReaderSeek - * Signature: (JI)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxRecordIOReaderSeek - (JNIEnv *, jobject, jlong, jint); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxRtcCreate - * Signature: (Ljava/lang/String;[Ljava/lang/String;[Ljava/lang/String;[J[JLjava/lang/String;Lorg/apache/mxnet/Base/RefLong;)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxRtcCreate - (JNIEnv *, jobject, jstring, jobjectArray, jobjectArray, jlongArray, jlongArray, jstring, jobject); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxRtcPush - * Signature: (J[J[JIIIIII)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxRtcPush - (JNIEnv *, jobject, jlong, jlongArray, jlongArray, jint, jint, jint, jint, jint, jint); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxRtcFree - * Signature: (J)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxRtcFree - (JNIEnv *, jobject, jlong); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxCustomOpRegister - * Signature: (Ljava/lang/String;Lorg/apache/mxnet/CustomOpProp;)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxCustomOpRegister - (JNIEnv *, jobject, jstring, jobject); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxSetProfilerConfig - * Signature: ([Ljava/lang/String;[Ljava/lang/String;)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxSetProfilerConfig - (JNIEnv *, jobject, jobjectArray, jobjectArray); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxSetProfilerState - * Signature: (I)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxSetProfilerState - (JNIEnv *, jobject, jint); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxDumpProfile - * Signature: (I)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxDumpProfile - (JNIEnv *, jobject, jint); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxIsNumpyShape - * Signature: (Lorg/apache/mxnet/Base/RefInt;)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxIsNumpyShape - (JNIEnv *, jobject, jobject); - -/* - * Class: org_apache_mxnet_LibInfo - * Method: mxSetIsNumpyShape - * Signature: (ILorg/apache/mxnet/Base/RefInt;)I - */ -JNIEXPORT jint JNICALL Java_org_apache_mxnet_LibInfo_mxSetIsNumpyShape - (JNIEnv *, jobject, jint, jobject); - -#ifdef __cplusplus -} -#endif -#endif diff --git a/scala-package/packageTest/Makefile b/scala-package/packageTest/Makefile deleted file mode 100644 index 8c12c1d04189..000000000000 --- a/scala-package/packageTest/Makefile +++ /dev/null @@ -1,91 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -SCALA_VERSION_PROFILE := 2.11 -SCALA_VERSION := 2.11.8 -MXNET_VERSION := "[1.3.0-SNAPSHOT,)" - -MXNET_REPO = https://repository.apache.org/content/repositories/snapshots - -ifeq ($(OS),Windows_NT) - UNAME_S := Windows -else - UNAME_S := $(shell uname -s) -endif - -ifeq ($(UNAME_S), Windows) - # TODO: currently scala package does not support windows - SCALA_PKG_PROFILE := windows -else - ifeq ($(UNAME_S), Darwin) - SCALA_PKG_PROFILE := osx-x86_64-cpu - else - SCALA_PKG_PROFILE := linux-x86_64 - ifeq ($(USE_CUDA), 1) - SCALA_PKG_PROFILE := $(SCALA_PKG_PROFILE)-gpu - else - SCALA_PKG_PROFILE := $(SCALA_PKG_PROFILE)-cpu - endif - endif -endif - -ifeq ($(CI), 1) - MAVEN_ARGS := -B -endif - -PROFILES := -Ptest -ifeq ($(UNIT), 1) - PROFILES := "$(PROFILES),unittest" -endif -ifeq ($(INTEGRATION), 1) - PROFILES := "$(PROFILES),integrationtest" -endif - -ifneq ($(UNIT), 1) - ifneq ($(INTEGRATION), 1) - PROFILES := "$(PROFILES),unittest,integrationtest" - endif -endif - - -clean: - (mvn $(MAVEN_ARGS) clean -Dmxnet.profile=$(SCALA_PKG_PROFILE) \ - -Dmxnet.scalaprofile=$(SCALA_VERSION_PROFILE) \ - -Dmxnet.version=$(MXNET_VERSION) \ - -Dscala.version=$(SCALA_VERSION)) - -testinstall: - (mvn $(MAVEN_ARGS) integration-test -Dmxnet.profile=$(SCALA_PKG_PROFILE) \ - $(PROFILES) \ - -Dmxnet.scalaprofile=$(SCALA_VERSION_PROFILE) \ - -Dmxnet.version=$(MXNET_VERSION) \ - -Dscala.version=$(SCALA_VERSION)) - -testlocal: - (mvn $(MAVEN_ARGS) integration-test -Dmxnet.profile=$(SCALA_PKG_PROFILE) \ - $(PROFILES),fromLocal \ - -Dmxnet.scalaprofile=$(SCALA_VERSION_PROFILE) \ - -Dmxnet.version=$(MXNET_VERSION) \ - -Dscala.version=$(SCALA_VERSION)) - -testsnapshot: - (mvn $(MAVEN_ARGS) integration-test -Dmxnet.profile=$(SCALA_PKG_PROFILE) \ - $(PROFILES),fromSnapshots \ - -Dmxnet.scalaprofile=$(SCALA_VERSION_PROFILE) \ - -Dmxnet.repo=$(MXNET_REPO) \ - -Dmxnet.version=$(MXNET_VERSION) \ - -Dscala.version=$(SCALA_VERSION)) diff --git a/scala-package/packageTest/README.md b/scala-package/packageTest/README.md deleted file mode 100644 index 4ca5568beb27..000000000000 --- a/scala-package/packageTest/README.md +++ /dev/null @@ -1,91 +0,0 @@ - - - - - - - - - - - - - - - - - -# MXNet Scala Package Test - -This is an project created to run the test suite on a fully packaged mxnet jar. The test suite is found locally but mxnet is from the target jarfile. - -## General Setup - -To setup the packageTest, you must first build your tests. To build the tests, follow these steps from the mxnet main directory: - -1. Build MXNet and the scala package from source following the directions [here](https://mxnet.apache.org/get_started/scala_setup#source) -2. Build the tests by running `mvn test-compile`. -3. Follow setup instructions below for your testing goal - -## Running - -There are three different modes of operation for testing based on the location of the jar and where it is coming from: - -### Test Installed Jars - -If you have a jar file, you can install it to your maven cache repository(`~/.m2/repository`). This might be useful if you acquire the .jar file from elsewhere. To install, it is easiest to use `mvn install:install-file -Dfile= -DpomFile=`. If the pom file is not available, you can also run `mvn install:install-file -Dfile= -DgroupId= -DartifactId= -Dversion= -Dpackaging=`. With the full mxnet jar, this might look like `mvn install:install-file -Dfile= -DgroupId=org.apache.mxnet -DartifactId=mxnet-full_2.11-linux-x86_64-cpu -Dversion=1.3.0 -Dpackaging=jar`. - -You can also run `mvn install` to install from a local build. - -After installing, run `make testinstall` in the package test directory to run the tests. Note that unless you also install an additional mxnetexamples jar, you can only run the unit tests. - -### Test Local Deployment - -To test the jars that would be produced by a deployment, you can run `mvn deploy` from the main mxnet directory. This produces a local snapshot located at `scala-package/deploy/target/repo`. To test this local snapshot, run `make testlocal`. It also installs the component packages needed for testing the examples in `scala-package/*/target/repo`. - -### Remote Repository Snapshot - -This mode is to test a jar located in a remote repository. The default repository is the apache snapshot repisotory located at `https://repository.apache.org/content/repositories/snapshots`. Note that the actual jar in a repisotory should be located at `$repoUrl/org/apache/mxnet/mxnet-full_$scalaVersion-$osMode/$version/*.jar`. - -Test the snapshot repo using `make testsnapshot` or a different repo using `make testsnapshot MXNET_REPO=$NEW_REPO_URL`. - -### Options - -You are able to run unit tests, integration tests, or both using this utility. To run the unit tests, add the flag `UNIT=1` to make (e.g. `make testsnapshot UNIT=1`). Use `INTEGRATION=1` for integration tests. The default behavior is to run both the unit and integration tests. However, the integration tests require that the mxnet examples be installed in addition to the full mxnet package (see test mode instructions above). - -For running on GPU, add the flag `USE_CUDA=1`. - -An additional option, you can specify the mxnet version with `MXNET_VERSION=1.4.0-SNAPSHOT`. - -## Cleaning Up - -You can clean temporary files and target artifacts by running `make clean`. - -## Troubleshooting - -### Missing Examples - -If you fail with the following error -``` -[ERROR] Failed to execute goal org.scalatest:scalatest-maven-plugin:1.0:test (test) on project mxnet-scala-packagetest-examples_2.11: There are test failures -> [Help 1] -[ERROR] -[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch. -[ERROR] Re-run Maven using the -X switch to enable full debug logging. -[ERROR] -[ERROR] For more information about the errors and possible solutions, please read the following articles: -[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException -[ERROR] -[ERROR] After correcting the problems, you can resume the build with the command -[ERROR] mvn -rf :mxnet-scala-packagetest-examples_2.11 -Makefile:57: recipe for target 'scalaintegrationtest' failed -make: *** [scalaintegrationtest] Error 1 -``` - -and stacktrace begins with the following, - -``` -*** RUN ABORTED *** - java.lang.NoClassDefFoundError: org/apache/mxnetexamples/Util$ -``` - -you are missing the mxnetexamples package. See your test mode installation section for details. diff --git a/scala-package/packageTest/core/pom.xml b/scala-package/packageTest/core/pom.xml deleted file mode 100644 index f21c04a3346c..000000000000 --- a/scala-package/packageTest/core/pom.xml +++ /dev/null @@ -1,55 +0,0 @@ - - - - 4.0.0 - - PackageTest - mxnet-scala-packagetest_2.11 - 1.0-SNAPSHOT - ../pom.xml - - - mxnet-scala-packagetest-core_2.11 - MXNet Scala Package Test - pom - - - - unittest - - false - - - - - - - - org.scalatest - scalatest-maven-plugin - 1.0 - - ${project.build.outputDirectory},${project.build.testOutputDirectory},../../core/target/test-classes - - - - - - diff --git a/scala-package/packageTest/core/scripts b/scala-package/packageTest/core/scripts deleted file mode 120000 index f806668aa847..000000000000 --- a/scala-package/packageTest/core/scripts +++ /dev/null @@ -1 +0,0 @@ -../../core/scripts \ No newline at end of file diff --git a/scala-package/packageTest/examples/pom.xml b/scala-package/packageTest/examples/pom.xml deleted file mode 100644 index dc3500a00181..000000000000 --- a/scala-package/packageTest/examples/pom.xml +++ /dev/null @@ -1,111 +0,0 @@ - - - - 4.0.0 - - PackageTest - mxnet-scala-packagetest_2.11 - 1.0-SNAPSHOT - ../pom.xml - - - mxnet-scala-packagetest-examples_2.11 - MXNet Scala Package Test - pom - - - - integrationtest - - false - - - - fromLocal - - - parent - file://${basedir}/../../target/repo - - true - - - - init - file://${basedir}/../../init/target/repo - - true - - - - macros - file://${basedir}/../../macros/target/repo - - true - - - - core - file://${basedir}/../../core/target/repo - - true - - - - infer - file://${basedir}/../../infer/target/repo - - true - - - - examples - file://${basedir}/../../examples/target/repo - - true - - - - - - - - - - org.scalatest - scalatest-maven-plugin - 1.0 - - ${project.build.outputDirectory},${project.build.testOutputDirectory},../../examples/target/test-classes - - - - - - - - org.apache.mxnet - mxnet-examples - INTERNAL - test - - - - diff --git a/scala-package/packageTest/examples/scripts b/scala-package/packageTest/examples/scripts deleted file mode 120000 index 2bba4eeece74..000000000000 --- a/scala-package/packageTest/examples/scripts +++ /dev/null @@ -1 +0,0 @@ -../../examples/scripts \ No newline at end of file diff --git a/scala-package/packageTest/infer/pom.xml b/scala-package/packageTest/infer/pom.xml deleted file mode 100644 index 137d8aed0d10..000000000000 --- a/scala-package/packageTest/infer/pom.xml +++ /dev/null @@ -1,54 +0,0 @@ - - - - 4.0.0 - - PackageTest - mxnet-scala-packagetest_2.11 - 1.0-SNAPSHOT - ../pom.xml - - - mxnet-scala-packagetest-infer_2.11 - MXNet Scala Package Test - pom - - - - unittest - - false - - - - - - - - org.scalatest - scalatest-maven-plugin - 1.0 - - ${project.build.outputDirectory},${project.build.testOutputDirectory},../../infer/target/test-classes - - - - - diff --git a/scala-package/packageTest/pom.xml b/scala-package/packageTest/pom.xml deleted file mode 100644 index 37b3a9a23931..000000000000 --- a/scala-package/packageTest/pom.xml +++ /dev/null @@ -1,212 +0,0 @@ - - - - 4.0.0 - PackageTest - mxnet-scala-packagetest_2.11 - 1.0-SNAPSHOT - MXNet Scala Package Test - pom - - - core - infer - - - - - test - - - integrationtest - - examples - - - - fromSnapshots - - - apache-snapshots - ${mxnet.repo} - default - - true - - - - - - fromLocal - - - full - file://${basedir}/../deploy/target/repo - - true - - - - - - - - true - - - - - org.apache.mxnet - mxnet-full_${mxnet.scalaprofile}-${mxnet.profile} - ${mxnet.version} - - - org.scala-lang - scala-library - ${scala.version} - - - commons-io - commons-io - 2.4 - - - org.scalatest - scalatest_${mxnet.scalaprofile} - 3.0.4 - test - - - org.scalacheck - scalacheck_${mxnet.scalaprofile} - 1.13.5 - test - - - org.mockito - mockito-all - 1.10.19 - test - - - - - - - org.apache.maven.plugins - maven-compiler-plugin - 3.3 - - 1.6 - 1.6 - UTF-8 - - - - maven-resources-plugin - 2.7 - - - org.apache.maven.plugins - maven-dependency-plugin - 2.9 - - - copy-dependencies - package - - copy-dependencies - - - ${project.build.outputDirectory}/lib - runtime - test,provided - false - false - true - - - - - - org.apache.maven.plugins - maven-jar-plugin - 2.5 - - - package - - jar - - - - **/* - - - - - - - net.alchim31.maven - scala-maven-plugin - 3.2.2 - - - compile - - compile - - compile - - - - - org.apache.maven.plugins - maven-surefire-plugin - 2.19 - - true - - - - org.scalatest - scalatest-maven-plugin - 1.0 - - ${skipTests} - ${project.build.directory}/surefire-reports - . - F - WDF TestSuite.txt - - - - test - integration-test - - test - - - - - - - - diff --git a/scala-package/pom.xml b/scala-package/pom.xml deleted file mode 100644 index 9fdbd60306b1..000000000000 --- a/scala-package/pom.xml +++ /dev/null @@ -1,477 +0,0 @@ - - - - 4.0.0 - - org.apache - apache - 19 - - - org.apache.mxnet - mxnet-parent - INTERNAL - MXNet Scala Package - Parent - https://github.com/apache/incubator-mxnet/tree/master/scala-package - - Scala Package for Apache MXNet (Incubating) - flexible and efficient library for deep learning. - - - The Apache Software Foundation - https://www.apache.org/ - - - - Apache License, Version 2.0 - https://www.apache.org/licenses/LICENSE-2.0.txt - repo - - - - - scm:git:git@github.com:apache/incubator-mxnet.git - scm:git:git@github.com:apache/incubator-mxnet.git - https://github.com/apache/incubator-mxnet - HEAD - - - - 1.7 - 2.11.8 - - 2.11 - 2.0.0 - - g++ - $ - ${project.basedir}/.. - true - file://${project.build.directory}/repo - - - pom - - init - init-native - macros - native - core - infer - examples - spark - assembly - externalPom - deploy - - - - - scala-2.11 - - - - org.apache.maven.plugins - maven-enforcer-plugin - - - enforce-versions - - enforce - - - - - - *:*_2.12 - *:*_2.10 - - - - - - - - - - - - - osx-x86_64 - - - mac - - - - osx-x86_64 - jnilib - cpu - - - - linux-x86_64 - - - unix - Linux - - - - linux-x86_64 - so - - - - - - org.codehaus.mojo - exec-maven-plugin - 1.6.0 - - - init-build-flavor - initialize - - exec - - - bash - -c 'mkdir -p ${project.build.directory}; if [[ $(ldd ${MXNET_DIR}/lib/libmxnet.so | grep libcuda | wc -l) == "0" ]]; then echo flavor=cpu > ${project.build.directory}/flavor.properties; else echo flavor=gpu > ${project.build.directory}/flavor.properties; fi' - - - - - - org.codehaus.mojo - properties-maven-plugin - 1.0.0 - - - read-properties - initialize - - read-project-properties - - - - ${project.build.directory}/flavor.properties - - - - - - - - - - staging - - - - org.apache.maven.plugins - maven-deploy-plugin - - true - - - - - - - nightly - - - - org.apache.maven.plugins - maven-deploy-plugin - - true - - - - - - - - - - - org.commonjava.maven.plugins - directory-maven-plugin - 0.1 - - - directories - - directory-of - - initialize - - rootdir - - org.apache.mxnet - mxnet-parent - - - - - - - org.apache.maven.plugins - maven-source-plugin - 2.2.1 - - - package - attach-sources - - jar-no-fork - - - - - - org.codehaus.mojo - native-maven-plugin - 1.0-alpha-7 - - - org.apache.maven.plugins - maven-compiler-plugin - 3.3 - - ${java.version} - ${java.version} - UTF-8 - true - true - - - - maven-resources-plugin - 2.7 - - - org.apache.maven.plugins - maven-dependency-plugin - 2.9 - - - org.apache.maven.plugins - maven-assembly-plugin - 3.1.0 - - - org.apache.maven.plugins - maven-surefire-plugin - 2.22.0 - - ${skipJavaTests} - - -Djava.library.path=${project.parent.basedir}/native/target - - false - - - - org.apache.maven.plugins - maven-install-plugin - 2.5.2 - - - - org.scalatest - scalatest-maven-plugin - 1.0 - - ${project.build.directory}/surefire-reports - . - F - WDF TestSuite.txt - - - - test - - integration-test - - test - - - - - - org.scalastyle - scalastyle-maven-plugin - 1.0.0 - - false - true - true - false - ${basedir}/src/main/scala - ${basedir}/src/test/scala - ${rootdir}/scalastyle-config.xml - ${basedir}/target/scalastyle-output.xml - UTF-8 - - - - - check - - - - - - net.alchim31.maven - scala-maven-plugin - 3.4.4 - - ${java.version} - ${java.version} - - - org.scalamacros - paradise_${scala.version} - 2.1.0 - - - - -feature - -deprecation - - - - - compile - - add-source - compile - testCompile - doc-jar - - - - presite - pre-site - - add-source - - - - - - - org.apache.maven.plugins - maven-deploy-plugin - - false - deployrepo::default::${repo_url} - - - - - - - - commons-codec - commons-codec - 1.10 - - - commons-io - commons-io - 2.1 - - - org.apache.logging.log4j - log4j-core - 2.11.1 - provided - - - org.slf4j - slf4j-api - 1.7.5 - - - org.slf4j - slf4j-log4j12 - 1.7.7 - provided - - - org.scalatest - scalatest_2.11 - 3.0.2 - test - - - args4j - args4j - 2.0.29 - - - org.scalacheck - scalacheck_2.11 - 1.13.5 - test - - - junit - junit - 4.11 - test - - - - - org.scala-lang - scala-library - ${scala.version} - - - org.scala-lang - scala-reflect - ${scala.version} - - - org.scala-lang.modules - scala-parser-combinators_2.11 - 1.0.5 - - - org.scala-lang.modules - scala-xml_2.11 - 1.0.6 - - - org.scala-lang - scala-compiler - ${scala.version} - - - diff --git a/scala-package/scalastyle-config.xml b/scala-package/scalastyle-config.xml deleted file mode 100644 index 71e246490fe2..000000000000 --- a/scala-package/scalastyle-config.xml +++ /dev/null @@ -1,170 +0,0 @@ - - - - - Scalastyle standard configuration - - - - - - - - - - - - - - - - - - - - - true - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ARROW, EQUALS, ELSE, TRY, CATCH, FINALLY, LARROW, RARROW - - - - - - ARROW, EQUALS, COMMA, COLON, IF, ELSE, DO, WHILE, FOR, MATCH, TRY, CATCH, FINALLY, LARROW, RARROW - - - - - - - - - ^println$ - - - - - Class\.forName - - - - - - JavaConversions - Instead of importing implicits in scala.collection.JavaConversions._, import - scala.collection.JavaConverters._ and use .asScala / .asJava methods - - - - - - - - - - - diff --git a/scala-package/spark/README.md b/scala-package/spark/README.md deleted file mode 100644 index 957b05f99b0e..000000000000 --- a/scala-package/spark/README.md +++ /dev/null @@ -1,108 +0,0 @@ - - - - - - - - - - - - - - - - - - Deep Learning on Spark -===== - -Here comes the MXNet on [Spark](http://spark.apache.org/). -It is built on the MXNet Scala Package and brings deep learning to Spark. - -Now you have an end-to-end solution for large-scale deep models, which means you can take advantage of both the flexible parallel training approaches and GPU support with MXNet, and the fast data processing flow with Spark, to build the full pipeline from raw data to efficient deep learning. - -The MXNet on Spark is still in *experimental stage*. Any suggestion or contribution will be highly appreciated. - -Build ------------- - -Checkout the [Installation Guide](http://mxnet.apache.org/get_started) contains instructions to install mxnet. Remember to enable the distributed training, i.e., set `USE_DIST_KVSTORE=1`. - -Compile the Scala Package by - -```bash -cd scala-package -mvn package -``` - -This will automatically build the `spark` submodule. Now you can submit Spark job with these built jars. - -You can find a piece of submit script in the `bin` directory of the `spark` module. Remember to set variables and versions according to your own environment. - -Usage ------------- -Here is a Spark job example of how training a deep network looks like. - -First define the parameters for the training procedure, - -```scala -val conf = new SparkConf().setAppName("MXNet") -val sc = new SparkContext(conf) - -val mxnet = new MXNet() - .setBatchSize(128) - .setLabelName("softmax_label") - .setContext(Context.cpu()) // or GPU if you like - .setDimension(Shape(784)) - .setNetwork(network) // e.g. MLP model - .setNumEpoch(10) - .setNumServer(2) - .setNumWorker(4) - // These jars are required by the KVStores at runtime. - // They will be uploaded and distributed to each node automatically - .setExecutorJars(cmdLine.jars) -``` - -Now load data and do distributed training, - -```scala -val trainData = parseRawData(sc, cmdLine.input) -val model = mxnet.fit(trainData) -``` - -In this example, it will start PS scheduler on driver and launch 2 servers. The input data will be split into 4 pieces and train with `dist_async` mode. - -To save the output model, simply call `save` method, - -```scala -model.save(sc, cmdLine.output + "/model") -``` - -Predicting is straightforward, - -```scala -val valData = parseRawData(sc, cmdLine.inputVal) -val brModel = sc.broadcast(model) -val res = valData.mapPartitions { data => - val probArrays = brModel.value.predict(points.toIterator) - require(probArrays.length == 1) - val prob = probArrays(0) - val py = NDArray.argmax_channel(prob.get) - val labels = py.toArray.mkString(",") - py.dispose() - prob.get.dispose() - labels -} -res.saveAsTextFile(cmdLine.output + "/data") -``` - -Pitfalls ------------- - -- Sometime you have to specify the `java` argument, to help MXNet find the right java binary on worker nodes. -- MXNet and [ps-lite](https://github.com/dmlc/ps-lite) currently do NOT support multiple instances in one process, (we will fix this issue in the future, but with lower priority.) thus you must run Spark job in cluster mode (standalone, yarn-client, yarn-cluster). Local mode is NOT supported because it runs tasks in multiples threads with one process, which will block the initialization of KVStore. -(Hint: If you only have one physical node and want to test the Spark package, you can start N workers on one node by setting `export SPARK_WORKER_INSTANCES=N` in `spark-env.sh`.) -Also, remember to set `--executor-cores 1` to ensure there's only one task run in one Spark executor. -- Fault tolerance is not fully supported. If some of your tasks fail, please restart the whole application. We will solve it soon. diff --git a/scala-package/spark/bin/run-mnist-example.sh b/scala-package/spark/bin/run-mnist-example.sh deleted file mode 100755 index 4f747f2c91a1..000000000000 --- a/scala-package/spark/bin/run-mnist-example.sh +++ /dev/null @@ -1,78 +0,0 @@ -#!/bin/bash - -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -set -x - -CURR_DIR=$(cd `dirname $0`; pwd) -SPARK_MODULE_DIR=$(cd $CURR_DIR/../; pwd) -SCALA_PKG_DIR=$(cd $CURR_DIR/../../; pwd) - -OS="" - -if [ "$(uname)" == "Darwin" ]; then - # Do something under Mac OS X platform - OS='osx-x86_64' -elif [ "$(expr substr $(uname -s) 1 5)" == "Linux" ]; then - OS='linux-x86_64' -fi - -LIB_DIR=${SPARK_MODULE_DIR}/target/classes/lib -SPARK_JAR=`find ${SPARK_MODULE_DIR}/target -name "*.jar" -type f -exec ls "{}" + | grep -v -E '(javadoc|sources)'` -SCALA_JAR=`find ${SCALA_PKG_DIR}/assembly/$OS/target -maxdepth 1 -name "*.jar" -type f -exec ls "{}" + | grep -v -E '(javadoc|sources)'` - -SPARK_OPTS+=" --name mxnet-spark-mnist" -SPARK_OPTS+=" --driver-memory 2g" -SPARK_OPTS+=" --jars ${SCALA_JAR}" - -# Download training and test set -if [ ! -f ./train.txt ]; then - wget https://s3.us-east-2.amazonaws.com/mxnet-scala/scala-example-ci/Spark/train.txt -fi - -if [ ! -f ./val.txt ]; then - wget https://s3.us-east-2.amazonaws.com/mxnet-scala/scala-example-ci/Spark/val.txt -fi - -# running opts -RUN_OPTS+=" --input train.txt" -RUN_OPTS+=" --input-val val.txt" -RUN_OPTS+=" --output ./" -# These jars are required by the KVStores at runtime. -# They will be uploaded and distributed to each node automatically. -RUN_OPTS+=" --jars $SCALA_JAR,$SPARK_JAR" -RUN_OPTS+=" --num-server 1" -RUN_OPTS+=" --num-worker 2" -RUN_OPTS+=" --java $JAVA_HOME/bin/java" -RUN_OPTS+=" --model mlp" -RUN_OPTS+=" --cpus 0,1" -RUN_OPTS+=" --num-epoch 5" - -# check if SPARK_HOME is set -if [ -z "$SPARK_HOME" ]; then - echo "SPARK_HOME is unset"; - exit 1 -fi - -HOST=`hostname` - -$SPARK_HOME/bin/spark-submit --master local[*] \ - --class org.apache.mxnet.spark.example.ClassificationExample \ - ${SPARK_OPTS} \ - ${SPARK_JAR} \ - ${RUN_OPTS} diff --git a/scala-package/spark/pom.xml b/scala-package/spark/pom.xml deleted file mode 100644 index b61a427f83ef..000000000000 --- a/scala-package/spark/pom.xml +++ /dev/null @@ -1,59 +0,0 @@ - - - - 4.0.0 - - org.apache.mxnet - mxnet-parent - INTERNAL - ../pom.xml - - - mxnet-spark - MXNet Scala Package - Spark ML - - - 1.6.3 - - - - - org.apache.mxnet - mxnet-core - INTERNAL - provided - - - org.apache.spark - spark-mllib_2.11 - ${spark.version} - - - args4j - args4j - 2.33 - - - org.json4s - json4s-core_2.11 - 3.5.1 - - - diff --git a/scala-package/spark/src/main/scala/org/apache/mxnet/spark/MXNDArray.scala b/scala-package/spark/src/main/scala/org/apache/mxnet/spark/MXNDArray.scala deleted file mode 100644 index 0d4c18c2b8e9..000000000000 --- a/scala-package/spark/src/main/scala/org/apache/mxnet/spark/MXNDArray.scala +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet.spark - -import org.apache.mxnet.NDArray - -/** - * A wrapper for serialize & deserialize ``org.apache.mxnet.NDArray`` in spark job - * @author Yizhi Liu - */ -class MXNDArray(@transient private var ndArray: NDArray) extends Serializable { - require(ndArray != null, "Undefined ndArray") - private val arrayBytes: Array[Byte] = ndArray.serialize() - - def get: NDArray = { - if (ndArray == null) { - ndArray = NDArray.deserialize(arrayBytes) - } - ndArray - } -} - -object MXNDArray { - def apply(ndArray: NDArray): MXNDArray = new MXNDArray(ndArray) -} diff --git a/scala-package/spark/src/main/scala/org/apache/mxnet/spark/MXNet.scala b/scala-package/spark/src/main/scala/org/apache/mxnet/spark/MXNet.scala deleted file mode 100644 index 4952ca2626da..000000000000 --- a/scala-package/spark/src/main/scala/org/apache/mxnet/spark/MXNet.scala +++ /dev/null @@ -1,266 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet.spark - -import org.apache.mxnet._ -import org.apache.mxnet.optimizer.SGD -import org.apache.mxnet.spark.io.LabeledPointIter - -import org.slf4j.{Logger, LoggerFactory} - -import org.apache.spark.mllib.regression.LabeledPoint -import org.apache.spark.rdd.RDD -import org.apache.spark.SparkContext - -class MXNet extends Serializable { - - class MXNetControllingThread( - schedulerIP: String, - schedulerPort: Int, - sparkContext: SparkContext, - triggerOfComponent: (String, Int, SparkContext) => Unit) extends Thread { - override def run() { - triggerOfComponent(schedulerIP, schedulerPort, sparkContext) - } - } - - private val logger: Logger = LoggerFactory.getLogger(classOf[MXNet]) - private val params: MXNetParams = new MXNetParams - - @transient private var psServerThread: MXNetControllingThread = _ - @transient private var psSchedulerThread: MXNetControllingThread = _ - - def setBatchSize(batchSize: Int): this.type = { - params.batchSize = batchSize - this - } - - def setNumEpoch(numEpoch: Int): this.type = { - params.numEpoch = numEpoch - this - } - - def setDimension(dimension: Shape): this.type = { - params.dimension = dimension - this - } - - def setNetwork(network: Symbol): this.type = { - params.setNetwork(network) - this - } - - def setContext(ctx: Array[Context]): this.type = { - params.context = ctx - this - } - - def setNumWorker(numWorker: Int): this.type = { - params.numWorker = numWorker - this - } - - def setNumServer(numServer: Int): this.type = { - params.numServer = numServer - this - } - - def setDataName(name: String): this.type = { - params.dataName = name - this - } - - def setLabelName(name: String): this.type = { - params.labelName = name - this - } - - /** - * The application (including parameter scheduler & servers) - * will exist if it hasn't received heart beat for over timeout seconds - * @param timeout timeout in seconds (default 300) - */ - def setTimeout(timeout: Int): this.type = { - params.timeout = timeout - this - } - - /** - * These jars are required by the KVStores at runtime. - * They will be uploaded and distributed to each node automatically - * @param jars jars required by the KVStore at runtime. - */ - def setExecutorJars(jars: String): this.type = { - params.jars = jars.split(",|:") - this - } - - def setJava(java: String): this.type = { - params.javabin = java - this - } - - private def startPSServers( - schedulerIP: String, - schedulerPort: Int, - sc: SparkContext) = { - def startPSServersInner( - schedulerIP: String, - schedulerPort: Int, - sc: SparkContext): Unit = { - sc.parallelize(1 to params.numServer, params.numServer).foreachPartition { p => - logger.info("Starting server ...") - val server = new ParameterServer(params.runtimeClasspath, - role = "server", - rootUri = schedulerIP, - rootPort = schedulerPort, - numServer = params.numServer, - numWorker = params.numWorker, - timeout = params.timeout, - java = params.javabin) - val exitCode = server.startProcess() - require(exitCode == 0, s"ps server process quit with exit code $exitCode") - } - } - psServerThread = new MXNetControllingThread(schedulerIP, schedulerPort, sc, startPSServersInner) - psServerThread.start() - } - - private def startPSScheduler( - schedulerIP: String, - schedulerPort: Int, - sc: SparkContext) = { - def startPSSchedulerInner( - schedulerIP: String, - schedulerPort: Int, - sc: SparkContext): Unit = { - // TODO: check ip & port available - logger.info("Starting scheduler on {}:{}", schedulerIP, schedulerPort) - val scheduler = new ParameterServer(params.runtimeClasspath, role = "scheduler", - rootUri = schedulerIP, rootPort = schedulerPort, - numServer = params.numServer, numWorker = params.numWorker, - timeout = params.timeout, java = params.javabin) - val exitCode = scheduler.startProcess() - require(exitCode == 0, s"Failed to start ps scheduler process with exit code $exitCode") - } - psSchedulerThread = new MXNetControllingThread(schedulerIP, schedulerPort, sc, - startPSSchedulerInner) - psSchedulerThread.start() - } - - private def setFeedForwardModel( - optimizer: Optimizer, - numExamples: Int, - kv: KVStore, - inputInPartition: LabeledPointIter): FeedForward = { - logger.debug("Define model") - val model = new FeedForward(ctx = params.context, - symbol = params.getNetwork, - numEpoch = params.numEpoch, - optimizer = optimizer, - initializer = new Xavier(factorType = "in", magnitude = 2.34f), - argParams = null, - auxParams = null, - beginEpoch = 0, - epochSize = numExamples / params.batchSize / kv.numWorkers) - logger.info("Start training ...") - model.fit(trainData = inputInPartition, - evalData = null, - evalMetric = new Accuracy(), - kvStore = kv) - model - } - - private def setupKVStore(schedulerIP: String, schedulerPort: Int): KVStore = { - KVStoreServer.init(ParameterServer.buildEnv(role = "worker", - rootUri = schedulerIP, rootPort = schedulerPort, - numServer = params.numServer, - numWorker = params.numWorker)) - val kv = KVStore.create("dist_async") - kv.setBarrierBeforeExit(false) - kv - } - - private def reclaimResources(dataIter: LabeledPointIter, kv: KVStore): Unit = { - dataIter.dispose() - kv.setBarrierBeforeExit(true) - kv.dispose() - } - - private def trainModel( - trainData: RDD[LabeledPoint], - schedulerIP: String, - schedulerPort: Int): MXNetModel = { - val job = trainData.mapPartitions { partition => - val dataIter = new LabeledPointIter( - partition, params.dimension, - params.batchSize, - dataName = params.dataName, - labelName = params.labelName) - // TODO: more nature way to get the # of examples? - var numExamples = 0 - while (dataIter.hasNext) { - val dataBatch = dataIter.next() - numExamples += dataBatch.label.head.shape(0) - } - logger.debug("Number of samples: {}", numExamples) - dataIter.reset() - - logger.info("Launching worker ...") - logger.info("Batch {}", params.batchSize) - // give enough time for ps-lite to detect the dead nodes - Thread.sleep(20000) - val kv = setupKVStore(schedulerIP, schedulerPort) - val optimizer = new SGD(learningRate = 0.01f, momentum = 0.9f, wd = 0.00001f) - val model = setFeedForwardModel(optimizer, numExamples, kv, dataIter) - logger.info("Training finished, waiting for other workers ...") - reclaimResources(dataIter, kv) - Iterator(new MXNetModel( - model, params.dimension, params.batchSize, - dataName = params.dataName, labelName = params.labelName)) - }.cache() - // force job to run - job.foreachPartition(() => _) - job.first() - } - - def fit(data: RDD[LabeledPoint]): MXNetModel = { - val sc = data.context - // distribute native jars - if (params.jars != null) { - params.jars.foreach(jar => sc.addFile(jar)) - } - val trainData = { - if (params.numWorker != data.partitions.length) { - logger.info("repartitioning training set to {} partitions", params.numWorker) - data.repartition(params.numWorker) - } else { - data - } - } - val schedulerIP = utils.Network.ipAddress - val schedulerPort = utils.Network.availablePort - startPSScheduler(schedulerIP, schedulerPort, sc) - startPSServers(schedulerIP, schedulerPort, sc) - val mxModel = trainModel(trainData, schedulerIP, schedulerPort) - logger.info("Waiting for scheduler ...") - psSchedulerThread.join() - psServerThread.join() - mxModel - } -} diff --git a/scala-package/spark/src/main/scala/org/apache/mxnet/spark/MXNetModel.scala b/scala-package/spark/src/main/scala/org/apache/mxnet/spark/MXNetModel.scala deleted file mode 100644 index 234e9a597cf5..000000000000 --- a/scala-package/spark/src/main/scala/org/apache/mxnet/spark/MXNetModel.scala +++ /dev/null @@ -1,86 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet.spark - -import org.apache.mxnet.spark.io.PointIter -import org.apache.mxnet.{FeedForward, NDArray, Shape} -import org.apache.spark.SparkContext -import org.apache.spark.mllib.linalg.Vector - -/** - * Wrapper for ``org.apache.mxnet.Model`` which used in Spark application - * @author Yizhi Liu - */ -class MXNetModel private[mxnet]( - @transient private var model: FeedForward, - private val dimension: Shape, - private val batchSize: Int, - private val dataName: String = "data", - private val labelName: String = "label") extends Serializable { - require(model != null, "Undefined model") - require(dimension != null, "Undefined dimension") - require(batchSize > 0, s"Invalid batchSize: $batchSize") - val serializedModel = model.serialize() - - /** - * Get inner model

[[FeedForward]]
- * @return the underlying model used to train & predict - */ - def innerModel: FeedForward = { - if (model == null) { - model = FeedForward.deserialize(serializedModel) - } - model - } - - /** - * Predict a bunch of Vectors - * @param dataset points - * @return predicted results. - */ - def predict(dataset: Iterator[Vector]): Array[MXNDArray] = { - val dt = new PointIter(dataset, dimension, batchSize, dataName, labelName) - val results = innerModel.predict(dt) - results.map(arr => MXNDArray(arr)) - } - - def predict(data: Vector): Array[MXNDArray] = { - predict(Iterator(data)) - } - - /** - * Save [[MXNetModel]] as object file - * @param sc SparkContext - * @param path output path - */ - def save(sc: SparkContext, path: String): Unit = { - sc.parallelize(Seq(this), 1).saveAsObjectFile(path) - } -} - -object MXNetModel { - /** - * Load [[MXNetModel]] from path - * @param sc SparkContext - * @param path input path - * @return Loaded [[MXNetModel]] - */ - def load(sc: SparkContext, path: String): MXNetModel = { - sc.objectFile[MXNetModel](path, 1).first() - } -} diff --git a/scala-package/spark/src/main/scala/org/apache/mxnet/spark/MXNetParams.scala b/scala-package/spark/src/main/scala/org/apache/mxnet/spark/MXNetParams.scala deleted file mode 100644 index f72e56e9efb5..000000000000 --- a/scala-package/spark/src/main/scala/org/apache/mxnet/spark/MXNetParams.scala +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet.spark - -import java.io.File - -import org.apache.mxnet.{Context, Shape, Symbol} -import org.apache.spark.SparkFiles - -/** - * MXNet on Spark training arguments - * @author Yizhi Liu - */ -private[mxnet] class MXNetParams extends Serializable { - // training batch size - var batchSize: Int = 128 - // dimension of input data - var dimension: Shape = null - // number of training epochs - var numEpoch: Int = 10 - - // network architecture - private var network: String = null - def setNetwork(net: Symbol): Unit = { - network = net.toJson - } - def getNetwork: Symbol = { - if (network == null) { - null - } else { - Symbol.loadJson(network) - } - } - - // executor running context - var context: Array[Context] = Context.cpu() - - var numWorker: Int = 1 - var numServer: Int = 1 - - var dataName: String = "data" - var labelName: String = "label" - - var timeout: Int = 300 - - // jars on executors for running mxnet application - var jars: Array[String] = null - def runtimeClasspath: String = { - if (jars != null) { - jars.map(jar => SparkFiles.get(new File(jar).getName)).mkString(":") - } else { - "" - } - } - - // java binary - var javabin: String = "java" -} diff --git a/scala-package/spark/src/main/scala/org/apache/mxnet/spark/ParameterServer.scala b/scala-package/spark/src/main/scala/org/apache/mxnet/spark/ParameterServer.scala deleted file mode 100644 index 45033d48c6ac..000000000000 --- a/scala-package/spark/src/main/scala/org/apache/mxnet/spark/ParameterServer.scala +++ /dev/null @@ -1,165 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet.spark - -import java.io.{IOException, InputStream, OutputStream} -import java.util.concurrent.atomic.AtomicReference - -import org.apache.mxnet.KVStoreServer -import org.kohsuke.args4j.{Option, CmdLineParser} -import org.slf4j.{LoggerFactory, Logger} - -import scala.collection.mutable -import scala.collection.JavaConverters._ - -/** - * Start ps scheduler/server in a new process - */ -private[mxnet] object ParameterServer { - private val logger: Logger = LoggerFactory.getLogger(classOf[ParameterServer]) - def main(args: Array[String]): Unit = { - val cmdLine = new CommandLine - val parser: CmdLineParser = new CmdLineParser(cmdLine) - try { - parser.parseArgument(args.toList.asJava) - cmdLine.checkArguments() - KVStoreServer.init(buildEnv( - cmdLine.role, cmdLine.rootUri, cmdLine.rootPort, - cmdLine.numServer, cmdLine.numWorker)) - KVStoreServer.start(dieIfOthersGoOutTimeout = cmdLine.timeout) - } catch { - case e: Throwable => - logger.error(e.getMessage, e) - sys.exit(-1) - } - } - - def buildEnv(role: String, rootUri: String, rootPort: Int, - numServer: Int, numWorker: Int): Map[String, String] = { - val envs = mutable.HashMap.empty[String, String] - envs.put("DMLC_ROLE", role) - envs.put("DMLC_PS_ROOT_URI", rootUri) - envs.put("DMLC_PS_ROOT_PORT", rootPort.toString) - envs.put("DMLC_NUM_SERVER", numServer.toString) - envs.put("DMLC_NUM_WORKER", numWorker.toString) - envs.toMap - } - - private class CommandLine { - @Option(name = "--role", usage = "PS role") - val role: String = null - @Option(name = "--root-uri", usage = "PS scheduler address") - val rootUri: String = null - @Option(name = "--root-port", usage = "PS scheduler port") - val rootPort: Int = -1 - @Option(name = "--num-server", usage = "PS server number") - val numServer: Int = 1 - @Option(name = "--num-worker", usage = "PS worker number") - val numWorker: Int = 1 - @Option(name = "--timeout", usage = "PS go out timeout") - val timeout: Int = 0 - - def checkArguments(): Unit = { - require(role != null, "Undefined role") - require(rootUri != null, "Undefined root uri") - require(rootPort > 0, s"Invalid root port $rootPort") - require(numServer > 0, s"Invalid number of servers: $numServer") - require(numWorker > 0, s"Invalid number of workers: $numWorker") - } - } -} - -class ParameterServer( - classpath: String, - role: String, - rootUri: String, - rootPort: Int, - numServer: Int = 1, - numWorker: Int = 1, - timeout: Int = 0, - java: String = "java", - jvmOpts: String = "") { - - private val logger: Logger = LoggerFactory.getLogger(classOf[ParameterServer]) - private val psProcess: AtomicReference[Process] = new AtomicReference[Process] - - /** - * A utility class to redirect the child process's stdout or stderr. - */ - private class RedirectThread( - in: InputStream, - out: OutputStream, - name: String, - propagateEof: Boolean = false) - extends Thread(name) { - - setDaemon(true) - override def run() { - val buf = new Array[Byte](1024) - var len = in.read(buf) - while (len != -1) { - out.write(buf, 0, len) - out.flush() - len = in.read(buf) - } - if (propagateEof) { - out.close() - } - } - } - - private def startLoggingThreads(rootUri: String, rootPort: Int): Unit = { - val inputStream = psProcess.get().getInputStream - val errorStream = psProcess.get().getErrorStream - logger.info(s"Starting InputStream-Redirecter Thread for $rootUri:$rootPort") - new RedirectThread(inputStream, System.out, "InputStream-Redirecter", false).start() - logger.info(s"Starting ErrorStream-Redirecter Thread for $rootUri:$rootPort") - new RedirectThread(errorStream, System.err, "ErrorStream-Redirecter", false).start() - } - - def startProcess(): Int = { - val cp = if (classpath == null) "" else s"-cp $classpath" - val cmd = s"$java $jvmOpts $cp $runningClass " + - s"--role=$role --root-uri=$rootUri --root-port=$rootPort " + - s"--num-server=$numServer --num-worker=$numWorker --timeout=$timeout" - try { - val childProcess = Runtime.getRuntime.exec(cmd) - logger.info(s"Started process: $cmd at $rootUri:$rootPort") - psProcess.set(childProcess) - startLoggingThreads(rootUri, rootPort) - psProcess.get().waitFor() - } catch { - case ioe: IOException => - ioe.printStackTrace() - 1 - } finally { - stop() - } - } - - def stop() { - if (psProcess.get != null && psProcess.get().isAlive) { - psProcess.get.destroy() - } - } - - private def runningClass: String = { - // trick to remove the last '$' - classOf[ParameterServer].getName.replace("$", "") - } -} diff --git a/scala-package/spark/src/main/scala/org/apache/mxnet/spark/io/LabeledPointIter.scala b/scala-package/spark/src/main/scala/org/apache/mxnet/spark/io/LabeledPointIter.scala deleted file mode 100644 index 44d6f3345bdf..000000000000 --- a/scala-package/spark/src/main/scala/org/apache/mxnet/spark/io/LabeledPointIter.scala +++ /dev/null @@ -1,151 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet.spark.io - -import org.apache.mxnet.DType.DType -import org.apache.mxnet._ -import org.apache.spark.mllib.regression.LabeledPoint - -import scala.collection.immutable.ListMap -import scala.collection.mutable.ArrayBuffer - -/** - * A helper converter for LabeledPoint - */ -class LabeledPointIter private[mxnet]( - private val points: Iterator[LabeledPoint], - private val dimension: Shape, - private val _batchSize: Int, - private val dataName: String = "data", - private val labelName: String = "label") extends DataIter { - - private val cache: ArrayBuffer[DataBatch] = ArrayBuffer.empty[DataBatch] - private var index: Int = -1 - private val dataShape = Shape(_batchSize) ++ dimension - - def dispose(): Unit = { - cache.foreach(_.dispose()) - } - - /** - * reset the iterator - */ - override def reset(): Unit = { - index = -1 - } - - @throws(classOf[NoSuchElementException]) - override def next(): DataBatch = { - if (!hasNext) { - throw new NoSuchElementException("No more data") - } - index += 1 - if (index >= 0 && index < cache.size) { - cache(index) - } else { - val dataBuilder = NDArray.empty(dataShape) - val labelBuilder = NDArray.empty(_batchSize) - var instNum = 0 - while (instNum < batchSize && points.hasNext) { - val point = points.next() - val features = point.features.toArray.map(_.toFloat) - require(features.length == dimension.product, - s"Dimension mismatch: ${features.length} != $dimension") - dataBuilder.slice(instNum).set(features) - labelBuilder.slice(instNum).set(Array(point.label.toFloat)) - instNum += 1 - } - val pad = batchSize - instNum - val dataBatch = new LongLivingDataBatch( - IndexedSeq(dataBuilder), IndexedSeq(labelBuilder), null, pad) - cache += dataBatch - dataBatch - } - } - - /** - * get data of current batch - * @return the data of current batch - */ - override def getData(): IndexedSeq[NDArray] = { - if (index >= 0 && index < cache.size) { - cache(index).data - } else { - null - } - } - - /** - * Get label of current batch - * @return the label of current batch - */ - override def getLabel(): IndexedSeq[NDArray] = { - if (index >= 0 && index < cache.size) { - cache(index).label - } else { - null - } - } - - /** - * Get the index of current batch - * @return the index of current batch - */ - override def getIndex(): IndexedSeq[Long] = { - if (index >= 0 && index < cache.size) { - cache(index).index - } else { - null - } - } - - // The name and shape of label provided by this iterator - @deprecated("Use provideLabelDesc instead", "1.3.0") - override def provideLabel: ListMap[String, Shape] = { - ListMap(labelName -> Shape(_batchSize)) - } - - // The name and shape of data provided by this iterator - @deprecated("Use provideDataDesc instead", "1.3.0") - override def provideData: ListMap[String, Shape] = { - ListMap(dataName -> dataShape) - } - - override def provideDataDesc: IndexedSeq[DataDesc] = { - // TODO: need to allow user to specify DType and Layout - IndexedSeq(new DataDesc(dataName, dataShape, DType.Float32, Layout.UNDEFINED)) - } - - override def provideLabelDesc: IndexedSeq[DataDesc] = { - // TODO: need to allow user to specify DType and Layout - IndexedSeq(new DataDesc(labelName, Shape(_batchSize), DType.Float32, Layout.UNDEFINED)) - } - - /** - * Get the number of padding examples - * in current batch - * @return number of padding examples in current batch - */ - override def getPad(): Int = 0 - - override def batchSize: Int = _batchSize - - override def hasNext: Boolean = { - points.hasNext || (index < cache.size - 1 && cache.size > 0) - } -} diff --git a/scala-package/spark/src/main/scala/org/apache/mxnet/spark/io/LongLivingDataBatch.scala b/scala-package/spark/src/main/scala/org/apache/mxnet/spark/io/LongLivingDataBatch.scala deleted file mode 100644 index abf82f6e510c..000000000000 --- a/scala-package/spark/src/main/scala/org/apache/mxnet/spark/io/LongLivingDataBatch.scala +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet.spark.io - -import org.apache.mxnet.DType.DType -import org.apache.mxnet.{DataBatch, NDArray} - -/** - * Dispose only when 'disposeForce' called - * @author Yizhi Liu - */ -class LongLivingDataBatch( - override val data: IndexedSeq[NDArray], - override val label: IndexedSeq[NDArray], - override val index: IndexedSeq[Long], - override val pad: Int) extends DataBatch(data, label, index, pad) { - override def dispose(): Unit = {} - def disposeForce(): Unit = super.dispose() -} diff --git a/scala-package/spark/src/main/scala/org/apache/mxnet/spark/io/PointIter.scala b/scala-package/spark/src/main/scala/org/apache/mxnet/spark/io/PointIter.scala deleted file mode 100644 index 1ca23927e123..000000000000 --- a/scala-package/spark/src/main/scala/org/apache/mxnet/spark/io/PointIter.scala +++ /dev/null @@ -1,150 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet.spark.io - -import org.apache.mxnet.DType.DType -import org.apache.mxnet._ -import org.apache.spark.mllib.linalg.Vector - -import scala.collection.immutable.ListMap -import scala.collection.mutable.ArrayBuffer - -/** - * A temporary helper implementation for predicting Vectors - */ -class PointIter private[mxnet]( - private val points: Iterator[Vector], - private val dimension: Shape, - private val _batchSize: Int, - private val dataName: String = "data", - private val labelName: String = "label") extends DataIter { - - private val cache: ArrayBuffer[DataBatch] = ArrayBuffer.empty[DataBatch] - private var index: Int = -1 - private val dataShape = Shape(_batchSize) ++ dimension - - def dispose(): Unit = { - cache.foreach(_.dispose()) - } - - /** - * reset the iterator - */ - override def reset(): Unit = { - index = -1 - } - - @throws(classOf[NoSuchElementException]) - override def next(): DataBatch = { - if (!hasNext) { - throw new NoSuchElementException("No more data") - } - index += 1 - if (index >= 0 && index < cache.size) { - cache(index) - } else { - val dataBuilder = NDArray.empty(dataShape) - val labelBuilder = NDArray.empty(_batchSize) - var instNum = 0 - while (instNum < batchSize && points.hasNext) { - val point = points.next().toArray.map(_.toFloat) - require(point.length == dimension.product, - s"Dimension mismatch: ${point.length} != $dimension") - dataBuilder.slice(instNum).set(point) - labelBuilder.slice(instNum).set(Array(-1f)) // fake label - instNum += 1 - } - val pad = batchSize - instNum - val dataBatch = new LongLivingDataBatch( - IndexedSeq(dataBuilder), IndexedSeq(labelBuilder), null, pad) - cache += dataBatch - dataBatch - } - } - - /** - * get data of current batch - * @return the data of current batch - */ - override def getData(): IndexedSeq[NDArray] = { - if (index >= 0 && index < cache.size) { - cache(index).data - } else { - null - } - } - - /** - * Get label of current batch - * @return the label of current batch - */ - override def getLabel(): IndexedSeq[NDArray] = { - if (index >= 0 && index < cache.size) { - cache(index).label - } else { - null - } - } - - /** - * Get the index of current batch - * @return the index of current batch - */ - override def getIndex(): IndexedSeq[Long] = { - if (index >= 0 && index < cache.size) { - cache(index).index - } else { - null - } - } - - // The name and shape of label provided by this iterator - @deprecated("Use provideLabelDesc instead", "1.3.0") - override def provideLabel: ListMap[String, Shape] = { - ListMap(labelName -> Shape(_batchSize)) - } - - // The name and shape of data provided by this iterator - @deprecated("Use provideDataDesc instead", "1.3.0") - override def provideData: ListMap[String, Shape] = { - ListMap(dataName -> dataShape) - } - - override def provideDataDesc: IndexedSeq[DataDesc] = { - // TODO: Make DType, Layout configurable - IndexedSeq(new DataDesc(dataName, dataShape, DType.Float32, Layout.UNDEFINED)) - } - - override def provideLabelDesc: IndexedSeq[DataDesc] = { - IndexedSeq(new DataDesc(labelName, Shape(_batchSize), - DType.Float32, Layout.UNDEFINED)) - } - - /** - * Get the number of padding examples - * in current batch - * @return number of padding examples in current batch - */ - override def getPad(): Int = 0 - - override def batchSize: Int = _batchSize - - override def hasNext: Boolean = { - points.hasNext || (index < cache.size - 1 && cache.size > 0) - } -} diff --git a/scala-package/spark/src/main/scala/org/apache/mxnet/spark/transformer/MXNet.scala b/scala-package/spark/src/main/scala/org/apache/mxnet/spark/transformer/MXNet.scala deleted file mode 100644 index ca141498cf32..000000000000 --- a/scala-package/spark/src/main/scala/org/apache/mxnet/spark/transformer/MXNet.scala +++ /dev/null @@ -1,172 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet.spark.transformer - -import java.util.UUID - -import org.apache.mxnet.spark.{MXNetModel, MXNetParams} -import org.apache.mxnet.{Context, Shape, Symbol} -import org.apache.spark.SparkContext -import org.apache.spark.ml.param.ParamMap -import org.apache.spark.ml.util.{MLReadable, MLReader, MLWritable, MLWriter} -import org.apache.spark.ml.{PredictionModel, Predictor} -import org.apache.spark.mllib.linalg.Vector -import org.apache.spark.mllib.regression.LabeledPoint -import org.apache.spark.sql.DataFrame -import org.slf4j.{Logger, LoggerFactory} - - -class MXNet extends Predictor[Vector, MXNet, MXNetModelWrap] { - - private val logger: Logger = LoggerFactory.getLogger(classOf[MXNet]) - private val p: MXNetParams = new MXNetParams - private var _featuresCol: String = _ - private var _labelCol: String = _ - - override val uid = UUID.randomUUID().toString - - override def train(dataset: DataFrame) : MXNetModelWrap = { - val lps = dataset.select(getFeaturesCol, getLabelCol).rdd - .map(row => new LabeledPoint(row.getAs[Double](getLabelCol), - row.getAs[Vector](getFeaturesCol))) - val mxNet = new org.apache.mxnet.spark.MXNet() - .setBatchSize(p.batchSize) - .setLabelName(p.labelName) - .setContext(p.context) - .setDimension(p.dimension) - .setNetwork(p.getNetwork) - .setNumEpoch(p.numEpoch) - .setNumServer(p.numServer) - .setNumWorker(p.numWorker) - .setExecutorJars(p.jars.mkString(",")) - val fitted = mxNet.fit(lps) - new MXNetModelWrap(lps.sparkContext, fitted, uid) - } - - override def copy(extra: ParamMap) : MXNet = defaultCopy(extra) - - def setBatchSize(batchSize: Int): this.type = { - p.batchSize = batchSize - this - } - - def setNumEpoch(numEpoch: Int): this.type = { - p.numEpoch = numEpoch - this - } - - def setDimension(dimension: Shape): this.type = { - p.dimension = dimension - this - } - - def setNetwork(network: Symbol): this.type = { - p.setNetwork(network) - this - } - - def setContext(ctx: Array[Context]): this.type = { - p.context = ctx - this - } - - def setNumWorker(numWorker: Int): this.type = { - p.numWorker = numWorker - this - } - - def setNumServer(numServer: Int): this.type = { - p.numServer = numServer - this - } - - def setDataName(name: String): this.type = { - p.dataName = name - this - } - - def setLabelName(name: String): this.type = { - p.labelName = name - this - } - - /** - * The application (including parameter scheduler & servers) - * will exist if it hasn't received heart beat for over timeout seconds - * @param timeout timeout in seconds (default 300) - */ - def setTimeout(timeout: Int): this.type = { - p.timeout = timeout - this - } - - /** - * These jars are required by the KVStores at runtime. - * They will be uploaded and distributed to each node automatically - * @param jars jars required by the KVStore at runtime. - */ - def setExecutorJars(jars: String): this.type = { - p.jars = jars.split(",|:") - this - } - - def setJava(java: String): this.type = { - p.javabin = java - this - } - -} - -class MXNetModelWrap(sc: SparkContext, mxNet: MXNetModel, uuid: String) - extends PredictionModel[Vector, MXNetModelWrap] with Serializable with MLWritable { - - override def copy(extra: ParamMap): MXNetModelWrap = { - copyValues(new MXNetModelWrap(sc, mxNet, uuid)).setParent(parent) - } - - override val uid: String = uuid - - override def predict(features: Vector) : Double = { - val probArrays = mxNet.predict(features) - val prob = probArrays(0) - val arr = prob.get.toArray - if (arr.length == 1) { - arr(0) - } else { - arr.indexOf(arr.max) - } - - } - - protected[MXNetModelWrap] class MXNetModelWriter(instance: MXNetModelWrap) extends MLWriter { - override protected def saveImpl(path: String): Unit = { - mxNet.save(sc, path) - } - } - - override def write: MLWriter = new MXNetModelWriter(this) - - object MXNetModelWrap extends MLReadable[MXNetModel] { - override def read: MLReader[MXNetModel] = new MXNetModelReader - override def load(path: String): MXNetModel = super.load(path) - private class MXNetModelReader extends MLReader[MXNetModel] { - override def load(path: String): MXNetModel = MXNetModel.load(sc, path) - } - } - -} diff --git a/scala-package/spark/src/main/scala/org/apache/mxnet/spark/utils/Img2Vector.scala b/scala-package/spark/src/main/scala/org/apache/mxnet/spark/utils/Img2Vector.scala deleted file mode 100644 index a9ab1d732746..000000000000 --- a/scala-package/spark/src/main/scala/org/apache/mxnet/spark/utils/Img2Vector.scala +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet.spark.utils - -import javax.imageio.ImageIO - -import scala.collection.mutable.ArrayBuffer - -import org.apache.spark.SparkContext -import org.apache.spark.rdd.RDD -import org.apache.spark.SparkConf -import org.apache.spark.input._ -import org.apache.spark.mllib.linalg.{Vector, Vectors} - -/** - * Img2Vector tools could convert imgae directory into Vectorized RDD,for example: - * Images stored in hdfs://namenode:9000/user/xxx/images/ - * val sc = new SparkContext(conf) - * val imagesArrayRDD = Img2Vector.getRGBArray(sc, "hdfs://namenode:9000/user/xxx/images/") - * val imagesVectorRDD = Img2Vector.getRGBVector(sc, "hdfs://namenode:9000/user/xxx/images/") - * @author Yuance.Li - */ -object Img2Vector{ - def getImgRGB(PDS: PortableDataStream, fullcolor: Boolean): Array[Double] = { - val img = ImageIO.read(PDS.open()) - val R = ArrayBuffer[Double]() - val G = ArrayBuffer[Double]() - val B = ArrayBuffer[Double]() - val RGB = ArrayBuffer[Double]() - val w = img.getWidth - val h = img.getHeight - if (fullcolor) { - for (x <- 0 until w){ - for (y <- 0 until h) { - val color = img.getRGB(w - x - 1, y) & 0xffffff - R += (color & 0xff0000) / 65536 - G += (color & 0xff00) / 256 - B += (color & 0xff) - } - } - RGB ++= R ++= G ++= B - RGB.toArray - } else { - for (x <- 0 until w) { - for (y <- 0 until h){ - val color = img.getRGB(w - x - 1, y) & 0xffffff - R += (color & 0xff0000) / 65536 * 0.3 - G += (color & 0xff00) / 256 * 0.59 - B += (color & 0xff) * 0.11 - } - } - val grayArr = new Array[Double](w * h) - for (i <- 0 until w * h) { - grayArr(i) = R(i) + G(i) + B(i) - } - grayArr - } - } - - def getRGBArray(sc: SparkContext, path: String, fullcolor: Boolean = true): RDD[Array[Double]] = { - val rgbArray = sc.binaryFiles(path).map(_._2).map(getImgRGB(_, fullcolor)) - rgbArray - } - - def getRGBVector(sc: SparkContext, path: String, fullcolor: Boolean = true): RDD[Vector] = { - val rgbArray = sc.binaryFiles(path).map(_._2).map(getImgRGB(_, fullcolor)) - val rgbVector = rgbArray.map(x => Vectors.dense(x)) - rgbVector - } -} diff --git a/scala-package/spark/src/main/scala/org/apache/mxnet/spark/utils/Network.scala b/scala-package/spark/src/main/scala/org/apache/mxnet/spark/utils/Network.scala deleted file mode 100644 index 836901f69f8f..000000000000 --- a/scala-package/spark/src/main/scala/org/apache/mxnet/spark/utils/Network.scala +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet.spark.utils - -import java.io.IOException -import java.net.{ServerSocket, NetworkInterface} -import java.util.regex.Pattern -import scala.collection.JavaConverters._ - -/** - * Helper functions to decide ip address / port - * @author Yizhi - */ -object Network { - private val IPADDRESS_PATTERN = Pattern.compile( - "^([01]?\\d\\d?|2[0-4]\\d|25[0-5])\\." + - "([01]?\\d\\d?|2[0-4]\\d|25[0-5])\\." + - "([01]?\\d\\d?|2[0-4]\\d|25[0-5])\\." + - "([01]?\\d\\d?|2[0-4]\\d|25[0-5])$") - - def ipAddress: String = { - val interfaces = NetworkInterface.getNetworkInterfaces.asScala - val interface = interfaces.toStream.flatMap( - _.getInetAddresses.asScala.toStream.flatMap( - address => { - val ip = address.getHostAddress - Option(ip).filter(ip => !ip.startsWith("127.") && IPADDRESS_PATTERN.matcher(ip).matches()) - } - ) - ).headOption - interface.getOrElse("127.0.0.1") - } - - def availablePort: Int = { - try { - val serverSocket = new ServerSocket(0) - val port = serverSocket.getLocalPort - try { - serverSocket.close() - } catch { - case _: IOException => // do nothing - } - port - } catch { - case ex: Throwable => throw new IOException("Cannot find an available port") - } - } -} diff --git a/scala-package/spark/src/main/scala/org/apache/mxnet/spark/utils/RepIterator.scala b/scala-package/spark/src/main/scala/org/apache/mxnet/spark/utils/RepIterator.scala deleted file mode 100644 index f95333aaef31..000000000000 --- a/scala-package/spark/src/main/scala/org/apache/mxnet/spark/utils/RepIterator.scala +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.mxnet.spark.utils - -import scala.collection.Iterator - -/** - * Repeatable Iterator useful in mapPartitions - * @author Yuance.Li - */ -class RepIterator[T](iteratorInternal: Iterator[T], repetition: Int = 1) extends Iterator[T] { - assert(repetition > 0) - var counter = repetition - 1 - var (currentIter, backupIter) = iteratorInternal.duplicate - - override def hasNext: Boolean = { - currentIter.hasNext || counter > 0 - } - - override def next(): T = { - assert(hasNext) - if(currentIter.hasNext) { - currentIter.next() - } else if (counter > 0) { - counter = counter - 1 - var iterTuple = backupIter.duplicate - currentIter = iterTuple._1 - backupIter = iterTuple._2 - currentIter.next() - } else { - throw new NoSuchElementException("No element in this collection") - } - } -} diff --git a/src/c_api/c_api_executor.cc b/src/c_api/c_api_executor.cc deleted file mode 100644 index 79806f922ea0..000000000000 --- a/src/c_api/c_api_executor.cc +++ /dev/null @@ -1,1227 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/*! - * Copyright (c) 2016 by Contributors - * \file c_api_executor.cc - * \brief C API of mxnet - */ -#include -#include -#include -#include -#include "./c_api_common.h" -#include "../executor/graph_executor.h" -#include "../common/utils.h" - -int MXExecutorPrint(ExecutorHandle handle, const char **out_str) { - Executor *exec = static_cast(handle); - MXAPIThreadLocalEntry<> *ret = MXAPIThreadLocalStore<>::Get(); - API_BEGIN(); - std::ostringstream os; - exec->Print(os); - ret->ret_str = os.str(); - *out_str = (ret->ret_str).c_str(); - API_END(); -} - -int MXExecutorFree(ExecutorHandle handle) { - API_BEGIN(); - delete static_cast(handle); - API_END(); -} - -int MXExecutorForward(ExecutorHandle handle, int is_train) { - API_BEGIN(); - Executor *exec = static_cast(handle); - exec->Forward(is_train != 0); - API_END(); -} - -int MXExecutorBackward(ExecutorHandle handle, - uint32_t len, - NDArrayHandle *head_grads) { - return MXExecutorBackwardEx(handle, len, head_grads, true); -} - -int MXExecutorBackwardEx(ExecutorHandle handle, - uint32_t len, - NDArrayHandle *head_grads, - int is_train) { - API_BEGIN(); - Executor *exec = static_cast(handle); - std::vector ndarrays; - NDArray **args_ptr = reinterpret_cast(head_grads); - for (uint32_t i = 0; i < len; ++i) { - ndarrays.push_back(*args_ptr[i]); - } - exec->Backward(ndarrays, is_train); - API_END(); -} - -int MXExecutorOutputs(ExecutorHandle handle, - uint32_t *out_size, - NDArrayHandle **out) { - MXAPIThreadLocalEntry<> *ret = MXAPIThreadLocalStore<>::Get(); - API_BEGIN(); - Executor *exec = static_cast(handle); - std::vector heads = exec->outputs(); - ret->ret_handles.resize(heads.size()); - for (size_t i = 0; i < heads.size(); ++i) { - NDArray *ptr = new NDArray(); - *ptr = heads[i]; - ret->ret_handles[i] = ptr; - } - *out_size = heads.size(); - *out = dmlc::BeginPtr(ret->ret_handles); - API_END(); -} - -int MXExecutorBind(SymbolHandle symbol_handle, - int dev_type, - int dev_id, - uint32_t len, - NDArrayHandle *in_args, - NDArrayHandle *arg_grad_store, - uint32_t *grad_req_type, - uint32_t aux_states_len, - NDArrayHandle *aux_states, - ExecutorHandle *out) { - return MXExecutorBindX(symbol_handle, - dev_type, dev_id, - 0, nullptr, nullptr, nullptr, - len, in_args, arg_grad_store, grad_req_type, - aux_states_len, aux_states, out); -} - -int MXExecutorBindX(SymbolHandle symbol_handle, - int dev_type, - int dev_id, - uint32_t num_map_keys, - const char** map_keys, - const int* map_dev_types, - const int* map_dev_ids, - uint32_t len, - NDArrayHandle *in_args, - NDArrayHandle *arg_grad_store, - uint32_t *grad_req_type, - uint32_t aux_states_len, - NDArrayHandle *aux_states, - ExecutorHandle *out) { - return MXExecutorBindEX(symbol_handle, - dev_type, dev_id, - num_map_keys, map_keys, map_dev_types, map_dev_ids, - len, in_args, arg_grad_store, grad_req_type, - aux_states_len, aux_states, - nullptr, out); -} - -int MXExecutorBindEX(SymbolHandle symbol_handle, - int dev_type, - int dev_id, - uint32_t num_map_keys, - const char** map_keys, - const int* map_dev_types, - const int* map_dev_ids, - uint32_t len, - NDArrayHandle *in_args, - NDArrayHandle *arg_grad_store, - uint32_t *grad_req_type, - uint32_t aux_states_len, - NDArrayHandle *aux_states, - ExecutorHandle shared_exec, - ExecutorHandle *out) { - API_BEGIN(); - nnvm::Symbol *symb = static_cast(symbol_handle); - Context ctx = Context::Create(static_cast(dev_type), dev_id); - std::map ctx_map; - for (uint32_t i = 0; i < num_map_keys; ++i) { - ctx_map[std::string(map_keys[i])] = Context::Create( - static_cast(map_dev_types[i]), map_dev_ids[i]); - } - NDArray **in_args_ptr = reinterpret_cast(in_args); - NDArray **arg_grad_ptr = reinterpret_cast(arg_grad_store); - NDArray **aux_states_ptr = reinterpret_cast(aux_states); - std::vector in_args_vec; - std::vector arg_grad_vec; - std::vector grad_req_vec; - std::vector aux_states_vec; - for (uint32_t i = 0; i < len; ++i) { - in_args_vec.push_back(*(in_args_ptr[i])); - if (arg_grad_ptr[i] == nullptr) { - arg_grad_vec.emplace_back(); - grad_req_vec.push_back(kNullOp); - } else { - arg_grad_vec.push_back(*(arg_grad_ptr[i])); - grad_req_vec.push_back(static_cast(grad_req_type[i])); - } - } - for (uint32_t i = 0; i < aux_states_len; ++i) { - aux_states_vec.push_back(*(aux_states_ptr[i])); - } - *out = Executor::Bind(*symb, ctx, ctx_map, in_args_vec, - arg_grad_vec, grad_req_vec, aux_states_vec, - reinterpret_cast(shared_exec)); - API_END(); -} - -/*! - * \brief DEPRECATED. Use MXExecutorSimpleBindEx instead. - * \param symbol_handle symbol handle - * \param dev_type default device type - * \param dev_id default device id - * \param num_g2c_keys number of group2ctx keys - * \param g2c_keys key list of group2ctx - * \param g2c_dev_types device type list of group2ctx - * \param g2c_dev_ids id list of group2ctx - * \param provided_grad_req_list_len grad_req length provided by users in front-end - * \param provided_grad_req_names grad_req names provided by users in front-end - * \param provided_grad_req_types req types provided by users in front-end - * \param num_provided_arg_shapes number of user provided in_arg and aux_state shapes - * \param provided_arg_shape_names name list of provided shapes - * \param provided_arg_shape_data provided shape data - * \param provided_arg_shape_idx provided shape data index - * \param num_provided_arg_dtypes number of user provided in_arg and axu_state dtypes - * \param provided_arg_dtype_names argument name list of provided dtypes - * \param provided_arg_dtypes data of provided dtypes - * \param num_provided_arg_stypes number of user provided in_arg and axu_state storage types - * \param provided_arg_stype_names argument name list of provided storage types - * \param provided_arg_stypes data of provided storage types - * \param num_shared_arg_names number of parameter names passed from _bind_ith_exec - * \param shared_arg_name_list parameter name list passed from _bind_ith_exec - * \param shared_buffer_len number of shared data arrays passed from _bind_ith_exec - * \param shared_buffer_name_list shared data array names passed from _bind_ith_exec - * \param shared_buffer_handle_list shared data array handles passed from _bind_ith_exec - * \param updated_shared_buffer_name_list updated shared data array names after binding - * \param updated_shared_buffer_handle_list updated shared data arrays after binding - * \param num_in_args number of input arguments of this sym - * \param in_args list_arguments associated with the current executor - * \param arg_grads list of gradients of in_args associated with the current executor - * \param num_aux_states number of aux states of this sym - * \param aux_states list_auxiliary_states associated with the current executor - * \param shared_exec_handle shared excutor handle passed from _bind_ith_exec - * \param out the handle of the executor to be created - */ -int MXExecutorSimpleBind(SymbolHandle symbol_handle, - int dev_type, - int dev_id, - const uint32_t num_g2c_keys, - const char** g2c_keys, - const int* g2c_dev_types, - const int* g2c_dev_ids, - const uint32_t provided_grad_req_list_len, - const char** provided_grad_req_names, - const char** provided_grad_req_types, - const uint32_t num_provided_arg_shapes, - const char** provided_arg_shape_names, - const uint32_t* provided_arg_shape_data, - const uint32_t* provided_arg_shape_idx, - const uint32_t num_provided_arg_dtypes, - const char** provided_arg_dtype_names, - const int* provided_arg_dtypes, - const uint32_t num_provided_arg_stypes, - const char** provided_arg_stype_names, - const int* provided_arg_stypes, - const uint32_t num_shared_arg_names, - const char** shared_arg_name_list, - int* shared_buffer_len, - const char** shared_buffer_name_list, - NDArrayHandle* shared_buffer_handle_list, - const char*** updated_shared_buffer_name_list, - NDArrayHandle** updated_shared_buffer_handle_list, - uint32_t* num_in_args, - NDArrayHandle** in_args, - NDArrayHandle** arg_grads, - uint32_t* num_aux_states, - NDArrayHandle** aux_states, - ExecutorHandle shared_exec_handle, - ExecutorHandle* out) { - MXAPIThreadLocalEntry<> *ret = MXAPIThreadLocalStore<>::Get(); - API_BEGIN(); - nnvm::Symbol *sym = static_cast(symbol_handle); - - // get in_arg names - std::vector in_arg_names = sym->ListInputNames(nnvm::Symbol::kReadOnlyArgs); - std::vector aux_state_names = sym->ListInputNames(nnvm::Symbol::kAuxiliaryStates); - - // attr_dict for setting up type_dict and arg/aux ctx - std::unordered_map> attr_dict; - if (nullptr == provided_arg_dtypes || nullptr != g2c_keys || nullptr == provided_arg_stypes) { - std::vector> attrs = - sym->ListAttrsRecursive(); - attr_dict.reserve(attrs.size()); - for (const auto& tp : attrs) { - attr_dict[std::get<0>(tp)][std::get<1>(tp)] = std::get<2>(tp); - } - } - - // setup arg_dtype_map - std::unordered_map arg_dtype_map; - if (nullptr == provided_arg_dtypes) { // use attr_dict - for (const auto& arg_name : in_arg_names) { - const auto it = attr_dict.find(arg_name); - if (it == attr_dict.end() || !it->second.count("__dtype__")) { - arg_dtype_map[arg_name] = mshadow::kFloat32; - } - } - } else { // use user input type_dict - // create dtype map for in_args and aux_states - arg_dtype_map.reserve(num_provided_arg_dtypes); - for (uint32_t i = 0; i < num_provided_arg_dtypes; ++i) { - arg_dtype_map[provided_arg_dtype_names[i]] = provided_arg_dtypes[i]; - } - } - - // setup arg_stype_map - std::unordered_map arg_stype_map; - if (nullptr == provided_arg_stypes) { // use attr_dict - for (const auto& arg_name : in_arg_names) { - const auto it = attr_dict.find(arg_name); - if (it == attr_dict.end() || !it->second.count("__storage_type__")) { - arg_stype_map[arg_name] = kDefaultStorage; - } - } - } else { // use user input type_dict - // create stype map for in_args and aux_states - arg_stype_map.reserve(num_provided_arg_stypes); - for (uint32_t i = 0; i < num_provided_arg_stypes; ++i) { - arg_stype_map[provided_arg_stype_names[i]] = provided_arg_stypes[i]; - } - } - - // create default ctx - Context ctx = Context::Create(static_cast(dev_type), dev_id); - // create ctx map - std::map ctx_map; - std::vector in_arg_ctx_vec(in_arg_names.size(), ctx); - std::vector aux_state_ctx_vec(aux_state_names.size(), ctx); - if (nullptr != g2c_keys) { // use user input group2ctx dict - for (uint32_t i = 0; i < num_g2c_keys; ++i) { - ctx_map[g2c_keys[i]] = Context::Create( - static_cast(g2c_dev_types[i]), g2c_dev_ids[i]); - } - - // initialize in_arg_ctx_vec using group2ctx if there are any - for (size_t i = 0; i < in_arg_ctx_vec.size(); ++i) { - const auto it1 = attr_dict.find(in_arg_names[i]); - if (it1 != attr_dict.end()) { - const auto it2 = it1->second.find("__ctx_group__"); - if (it2 != it1->second.end()) { - const auto it3 = ctx_map.find(it2->second); - if (it3 != ctx_map.end()) { - in_arg_ctx_vec[i] = it3->second; - } - } - } - } - - // initialize aux_state_ctx_vec using group2ctx if there are any - for (size_t i = 0; i < aux_state_ctx_vec.size(); ++i) { - const auto it1 = attr_dict.find(aux_state_names[i]); - if (it1 != attr_dict.end()) { - const auto it2 = it1->second.find("__ctx_group__"); - if (it2 != it1->second.end()) { - const auto it3 = ctx_map.find(it2->second); - if (it3 != ctx_map.end()) { - aux_state_ctx_vec[i] = it3->second; - } - } - } - } - } - - // create provided_grad_req_map - const std::map req_map = - {{"null", kNullOp}, {"write", kWriteTo}, {"add", kAddTo}}; - std::unordered_map provided_grad_req_map; - std::string grad_req_type; - if (0 == provided_grad_req_list_len - && nullptr == provided_grad_req_names - && nullptr != provided_grad_req_types) { // string, grad_req='write' - CHECK_EQ(req_map.count(provided_grad_req_types[0]), 1U) - << "grad_req=" << provided_grad_req_types[0] << " is not a valid input in simple_bind; " - "only \'null\', \'write\', and \'add\' are supported"; - grad_req_type = "string"; - } else if (provided_grad_req_list_len > 0 - && nullptr == provided_grad_req_names - && nullptr != provided_grad_req_types) { // list, grad_req=['null', 'write'] - grad_req_type = "list"; - CHECK_EQ(provided_grad_req_list_len, in_arg_names.size()) - << "The length of grad_req list does not match the number of input arguments in simple_bind, " - "expected " << in_arg_names.size() << ", provided " << provided_grad_req_list_len; - } else if (provided_grad_req_list_len > 0 - && nullptr != provided_grad_req_names - && nullptr != provided_grad_req_types) { // dict, grad_req=['lhs': 'null', 'rhs': 'write'] - grad_req_type = "dict"; - provided_grad_req_map.reserve(provided_grad_req_list_len); - for (uint32_t i = 0; i < provided_grad_req_list_len; ++i) { - CHECK_EQ(req_map.count(provided_grad_req_types[i]), 1U) - << "grad_req=" << provided_grad_req_types[i] << " is not a valid input in simple_bind; " - "only \'null\', \'write\', and \'add\' are supported"; - provided_grad_req_map[provided_grad_req_names[i]] = provided_grad_req_types[i]; - } - } else { // grad_req is None - grad_req_type = "none"; - } - - // initialize arg_grad_ctx_vec and grad_req_type_vec - std::vector arg_grad_ctx_vec(in_arg_names.size(), ctx); - std::vector grad_req_type_vec(in_arg_names.size(), kNullOp); - if ("none" != grad_req_type) { - for (size_t i = 0; i < in_arg_names.size(); ++i) { - OpReqType cur_req = kNullOp; - if ("string" == grad_req_type) { - cur_req = req_map.at(provided_grad_req_types[0]); - } else if ("list" == grad_req_type) { - CHECK_EQ(req_map.count(provided_grad_req_types[i]), 1U) - << "grad_req=" << provided_grad_req_types[i] << " is not a valid input in simple_bind; " - "only \'null\', \'write\', and \'add\' are supported"; - cur_req = req_map.at(provided_grad_req_types[i]); - } else if ("dict" == grad_req_type) { - const auto it = provided_grad_req_map.find(in_arg_names[i]); - if (it != provided_grad_req_map.end()) { - cur_req = req_map.at(it->second); - } - } - if (kNullOp != cur_req) { - arg_grad_ctx_vec[i] = in_arg_ctx_vec[i]; - grad_req_type_vec[i] = static_cast(cur_req); - } - } - } - - // create shape map for in_args and aux_states - std::unordered_map arg_shape_map(num_provided_arg_shapes); - for (uint32_t i = 0; i < num_provided_arg_shapes; ++i) { - auto p = arg_shape_map.emplace(provided_arg_shape_names[i], - mxnet::TShape(provided_arg_shape_data+provided_arg_shape_idx[i], - provided_arg_shape_data+provided_arg_shape_idx[i+1])); - CHECK(p.second) << "Duplicate shapes are provided for argument " - << provided_arg_shape_names[i] << " in simple_bind"; - } - if (!Imperative::Get()->is_np_shape()) { - for (auto &kv : arg_shape_map) { - common::ConvertToNumpyShape(&kv.second); - } - } - - // create para name set for sharing data array memory - std::unordered_set shared_arg_name_set(num_shared_arg_names); - for (uint32_t i = 0; i < num_shared_arg_names; ++i) { - shared_arg_name_set.insert(shared_arg_name_list[i]); - } - - // create shared_buffer_map - std::unordered_map shared_buffer_map; - bool use_shared_buffer = (*shared_buffer_len >= 0); - if (*shared_buffer_len > 0) { - // create shared_buffer_map - shared_buffer_map.reserve(*shared_buffer_len); - NDArray** shared_buffer_ptrs = - reinterpret_cast(shared_buffer_handle_list); - for (int i = 0; i < *shared_buffer_len; ++i) { - shared_buffer_map[shared_buffer_name_list[i]] = *(shared_buffer_ptrs[i]); - } - } - - // create temporary place holders for the initialized NDArrays - // to be passed back to front end - std::vector in_arg_vec; - std::vector arg_grad_vec; - std::vector aux_state_vec; - *out = Executor::SimpleBind(*sym, ctx, ctx_map, in_arg_ctx_vec, arg_grad_ctx_vec, - aux_state_ctx_vec, arg_shape_map, arg_dtype_map, arg_stype_map, - grad_req_type_vec, shared_arg_name_set, &in_arg_vec, - &arg_grad_vec, &aux_state_vec, - use_shared_buffer ? &shared_buffer_map : nullptr, - reinterpret_cast(shared_exec_handle)); - - // copy ndarray ptrs to ret->handles so that front end - // can access them - ret->ret_handles.clear(); - ret->ret_handles.reserve(in_arg_vec.size()+arg_grad_vec.size()+aux_state_vec.size() - +shared_buffer_map.size()); - size_t nd_idx = 0; - for (const auto& nd : in_arg_vec) { - if (nd.is_none()) { - LOG(FATAL) << "Input argument NDArray cannot be un-allocated"; - } - ret->ret_handles.push_back(new NDArray(nd)); - } - if (in_arg_vec.size() > 0) { - *num_in_args = in_arg_vec.size(); - *in_args = &(ret->ret_handles[nd_idx]); - nd_idx = ret->ret_handles.size(); - } - - for (const auto& nd : arg_grad_vec) { - if (nd.is_none()) { - ret->ret_handles.push_back(nullptr); - } else { - ret->ret_handles.push_back(new NDArray(nd)); - } - } - if (arg_grad_vec.size() > 0) { - *arg_grads = &(ret->ret_handles[nd_idx]); - nd_idx = ret->ret_handles.size(); - } - - for (const auto& nd : aux_state_vec) { - if (nd.is_none()) { - LOG(FATAL) << "Auxiliary argument NDArray cannot be un-allocated"; - } - ret->ret_handles.push_back(new NDArray(nd)); - } - if (aux_state_vec.size() > 0) { - *num_aux_states = aux_state_vec.size(); - *aux_states = &(ret->ret_handles[nd_idx]); - nd_idx = ret->ret_handles.size(); - } - - if (use_shared_buffer) { - ret->ret_vec_str.clear(); - ret->ret_vec_str.reserve(shared_buffer_map.size()); - ret->ret_vec_charp.clear(); - ret->ret_vec_charp.reserve(shared_buffer_map.size()); - for (const auto& kv : shared_buffer_map) { - if (kv.second.is_none()) { - LOG(FATAL) << "Shared data NDArray cannot be un-allocated"; - } - ret->ret_handles.push_back(new NDArray(kv.second)); - ret->ret_vec_str.emplace_back(kv.first); - ret->ret_vec_charp.push_back(ret->ret_vec_str.back().c_str()); - } - *shared_buffer_len = shared_buffer_map.size(); - *updated_shared_buffer_handle_list = &(ret->ret_handles[nd_idx]); - *updated_shared_buffer_name_list = &(ret->ret_vec_charp[0]); - } - - API_END(); -} - - -namespace mxnet { - -template -int _SimpleBindImpl(SymbolHandle symbol_handle, - int dev_type, - int dev_id, - const uint32_t num_g2c_keys, - const char** g2c_keys, - const int* g2c_dev_types, - const int* g2c_dev_ids, - const uint32_t provided_grad_req_list_len, - const char** provided_grad_req_names, - const char** provided_grad_req_types, - const uint32_t num_provided_arg_shapes, - const char** provided_arg_shape_names, - const DType* provided_arg_shape_data, - const uint32_t* provided_arg_shape_idx, - const uint32_t num_provided_arg_dtypes, - const char** provided_arg_dtype_names, - const int* provided_arg_dtypes, - const uint32_t num_provided_arg_stypes, - const char** provided_arg_stype_names, - const int* provided_arg_stypes, - const uint32_t num_shared_arg_names, - const char** shared_arg_name_list, - int* shared_buffer_len, - const char** shared_buffer_name_list, - NDArrayHandle* shared_buffer_handle_list, - const char*** updated_shared_buffer_name_list, - NDArrayHandle** updated_shared_buffer_handle_list, - uint32_t* num_in_args, - NDArrayHandle** in_args, - NDArrayHandle** arg_grads, - uint32_t* num_aux_states, - NDArrayHandle** aux_states, - ExecutorHandle shared_exec_handle, - ExecutorHandle* out) { - MXAPIThreadLocalEntry<> *ret = MXAPIThreadLocalStore<>::Get(); - API_BEGIN(); - nnvm::Symbol *sym = static_cast(symbol_handle); - - // get in_arg names - std::vector in_arg_names = sym->ListInputNames(nnvm::Symbol::kReadOnlyArgs); - std::vector aux_state_names = sym->ListInputNames(nnvm::Symbol::kAuxiliaryStates); - - // attr_dict for setting up type_dict and arg/aux ctx - std::unordered_map> attr_dict; - if (nullptr == provided_arg_dtypes || nullptr != g2c_keys || nullptr == provided_arg_stypes) { - std::vector> attrs = - sym->ListAttrsRecursive(); - attr_dict.reserve(attrs.size()); - for (const auto& tp : attrs) { - attr_dict[std::get<0>(tp)][std::get<1>(tp)] = std::get<2>(tp); - } - } - - // setup arg_dtype_map - std::unordered_map arg_dtype_map; - if (nullptr == provided_arg_dtypes) { // use attr_dict - for (const auto& arg_name : in_arg_names) { - const auto it = attr_dict.find(arg_name); - if (it == attr_dict.end() || !it->second.count("__dtype__")) { - arg_dtype_map[arg_name] = mshadow::kFloat32; - } - } - } else { // use user input type_dict - // create dtype map for in_args and aux_states - arg_dtype_map.reserve(num_provided_arg_dtypes); - for (uint32_t i = 0; i < num_provided_arg_dtypes; ++i) { - arg_dtype_map[provided_arg_dtype_names[i]] = provided_arg_dtypes[i]; - } - } - - // setup arg_stype_map - std::unordered_map arg_stype_map; - if (nullptr == provided_arg_stypes) { // use attr_dict - for (const auto& arg_name : in_arg_names) { - const auto it = attr_dict.find(arg_name); - if (it == attr_dict.end() || !it->second.count("__storage_type__")) { - arg_stype_map[arg_name] = kDefaultStorage; - } - } - } else { // use user input type_dict - // create stype map for in_args and aux_states - arg_stype_map.reserve(num_provided_arg_stypes); - for (uint32_t i = 0; i < num_provided_arg_stypes; ++i) { - arg_stype_map[provided_arg_stype_names[i]] = provided_arg_stypes[i]; - } - } - - // create default ctx - Context ctx = Context::Create(static_cast(dev_type), dev_id); - // create ctx map - std::map ctx_map; - std::vector in_arg_ctx_vec(in_arg_names.size(), ctx); - std::vector aux_state_ctx_vec(aux_state_names.size(), ctx); - if (nullptr != g2c_keys) { // use user input group2ctx dict - for (uint32_t i = 0; i < num_g2c_keys; ++i) { - ctx_map[g2c_keys[i]] = Context::Create( - static_cast(g2c_dev_types[i]), g2c_dev_ids[i]); - } - - // initialize in_arg_ctx_vec using group2ctx if there are any - for (size_t i = 0; i < in_arg_ctx_vec.size(); ++i) { - const auto it1 = attr_dict.find(in_arg_names[i]); - if (it1 != attr_dict.end()) { - const auto it2 = it1->second.find("__ctx_group__"); - if (it2 != it1->second.end()) { - const auto it3 = ctx_map.find(it2->second); - if (it3 != ctx_map.end()) { - in_arg_ctx_vec[i] = it3->second; - } - } - } - } - - // initialize aux_state_ctx_vec using group2ctx if there are any - for (size_t i = 0; i < aux_state_ctx_vec.size(); ++i) { - const auto it1 = attr_dict.find(aux_state_names[i]); - if (it1 != attr_dict.end()) { - const auto it2 = it1->second.find("__ctx_group__"); - if (it2 != it1->second.end()) { - const auto it3 = ctx_map.find(it2->second); - if (it3 != ctx_map.end()) { - aux_state_ctx_vec[i] = it3->second; - } - } - } - } - } - - // create provided_grad_req_map - const std::map req_map = - {{"null", kNullOp}, {"write", kWriteTo}, {"add", kAddTo}}; - std::unordered_map provided_grad_req_map; - std::string grad_req_type; - if (0 == provided_grad_req_list_len - && nullptr == provided_grad_req_names - && nullptr != provided_grad_req_types) { // string, grad_req='write' - CHECK_EQ(req_map.count(provided_grad_req_types[0]), 1U) - << "grad_req=" << provided_grad_req_types[0] << " is not a valid input in simple_bind; " - "only \'null\', \'write\', and \'add\' are supported"; - grad_req_type = "string"; - } else if (provided_grad_req_list_len > 0 - && nullptr == provided_grad_req_names - && nullptr != provided_grad_req_types) { // list, grad_req=['null', 'write'] - grad_req_type = "list"; - CHECK_EQ(provided_grad_req_list_len, in_arg_names.size()) - << "The length of grad_req list does not match the number of input arguments in simple_bind, " - "expected " << in_arg_names.size() << ", provided " << provided_grad_req_list_len; - } else if (provided_grad_req_list_len > 0 - && nullptr != provided_grad_req_names - && nullptr != provided_grad_req_types) { // dict, grad_req=['lhs': 'null', 'rhs': 'write'] - grad_req_type = "dict"; - provided_grad_req_map.reserve(provided_grad_req_list_len); - for (uint32_t i = 0; i < provided_grad_req_list_len; ++i) { - CHECK_EQ(req_map.count(provided_grad_req_types[i]), 1U) - << "grad_req=" << provided_grad_req_types[i] << " is not a valid input in simple_bind; " - "only \'null\', \'write\', and \'add\' are supported"; - provided_grad_req_map[provided_grad_req_names[i]] = provided_grad_req_types[i]; - } - } else { // grad_req is None - grad_req_type = "none"; - } - - // initialize arg_grad_ctx_vec and grad_req_type_vec - std::vector arg_grad_ctx_vec(in_arg_names.size(), ctx); - std::vector grad_req_type_vec(in_arg_names.size(), kNullOp); - if ("none" != grad_req_type) { - for (size_t i = 0; i < in_arg_names.size(); ++i) { - OpReqType cur_req = kNullOp; - if ("string" == grad_req_type) { - cur_req = req_map.at(provided_grad_req_types[0]); - } else if ("list" == grad_req_type) { - CHECK_EQ(req_map.count(provided_grad_req_types[i]), 1U) - << "grad_req=" << provided_grad_req_types[i] << " is not a valid input in simple_bind; " - "only \'null\', \'write\', and \'add\' are supported"; - cur_req = req_map.at(provided_grad_req_types[i]); - } else if ("dict" == grad_req_type) { - const auto it = provided_grad_req_map.find(in_arg_names[i]); - if (it != provided_grad_req_map.end()) { - cur_req = req_map.at(it->second); - } - } - if (kNullOp != cur_req) { - arg_grad_ctx_vec[i] = in_arg_ctx_vec[i]; - grad_req_type_vec[i] = static_cast(cur_req); - } - } - } - - // create shape map for in_args and aux_states - std::unordered_map arg_shape_map(num_provided_arg_shapes); - for (uint32_t i = 0; i < num_provided_arg_shapes; ++i) { - auto p = arg_shape_map.emplace(provided_arg_shape_names[i], - mxnet::TShape(provided_arg_shape_data+provided_arg_shape_idx[i], - provided_arg_shape_data+provided_arg_shape_idx[i+1])); - CHECK(p.second) << "Duplicate shapes are provided for argument " - << provided_arg_shape_names[i] << " in simple_bind"; - } - if (!Imperative::Get()->is_np_shape()) { - for (auto &kv : arg_shape_map) { - common::ConvertToNumpyShape(&kv.second); - } - } - - // create para name set for sharing data array memory - std::unordered_set shared_arg_name_set(num_shared_arg_names); - for (uint32_t i = 0; i < num_shared_arg_names; ++i) { - shared_arg_name_set.insert(shared_arg_name_list[i]); - } - - // create shared_buffer_map - std::unordered_map shared_buffer_map; - bool use_shared_buffer = (*shared_buffer_len >= 0); - if (*shared_buffer_len > 0) { - // create shared_buffer_map - shared_buffer_map.reserve(*shared_buffer_len); - NDArray** shared_buffer_ptrs = - reinterpret_cast(shared_buffer_handle_list); - for (int i = 0; i < *shared_buffer_len; ++i) { - shared_buffer_map[shared_buffer_name_list[i]] = *(shared_buffer_ptrs[i]); - } - } - - // create temporary place holders for the initialized NDArrays - // to be passed back to front end - std::vector in_arg_vec; - std::vector arg_grad_vec; - std::vector aux_state_vec; - *out = Executor::SimpleBind(*sym, ctx, ctx_map, in_arg_ctx_vec, arg_grad_ctx_vec, - aux_state_ctx_vec, arg_shape_map, arg_dtype_map, arg_stype_map, - grad_req_type_vec, shared_arg_name_set, &in_arg_vec, - &arg_grad_vec, &aux_state_vec, - use_shared_buffer ? &shared_buffer_map : nullptr, - reinterpret_cast(shared_exec_handle)); - // copy ndarray ptrs to ret->handles so that front end - // can access them - ret->ret_handles.clear(); - ret->ret_handles.reserve(in_arg_vec.size()+arg_grad_vec.size()+aux_state_vec.size() - +shared_buffer_map.size()); - size_t nd_idx = 0; - for (const auto& nd : in_arg_vec) { - if (nd.is_none()) { - LOG(FATAL) << "Input argument NDArray cannot be un-allocated"; - } - ret->ret_handles.push_back(new NDArray(nd)); - } - if (in_arg_vec.size() > 0) { - *num_in_args = in_arg_vec.size(); - *in_args = &(ret->ret_handles[nd_idx]); - nd_idx = ret->ret_handles.size(); - } - - for (const auto& nd : arg_grad_vec) { - if (nd.is_none()) { - ret->ret_handles.push_back(nullptr); - } else { - ret->ret_handles.push_back(new NDArray(nd)); - } - } - if (arg_grad_vec.size() > 0) { - *arg_grads = &(ret->ret_handles[nd_idx]); - nd_idx = ret->ret_handles.size(); - } - - for (const auto& nd : aux_state_vec) { - if (nd.is_none()) { - LOG(FATAL) << "Auxiliary argument NDArray cannot be un-allocated"; - } - ret->ret_handles.push_back(new NDArray(nd)); - } - if (aux_state_vec.size() > 0) { - *num_aux_states = aux_state_vec.size(); - *aux_states = &(ret->ret_handles[nd_idx]); - nd_idx = ret->ret_handles.size(); - } - - if (use_shared_buffer) { - ret->ret_vec_str.clear(); - ret->ret_vec_str.reserve(shared_buffer_map.size()); - ret->ret_vec_charp.clear(); - ret->ret_vec_charp.reserve(shared_buffer_map.size()); - for (const auto& kv : shared_buffer_map) { - if (kv.second.is_none()) { - LOG(FATAL) << "Shared data NDArray cannot be un-allocated"; - } - ret->ret_handles.push_back(new NDArray(kv.second)); - ret->ret_vec_str.emplace_back(kv.first); - ret->ret_vec_charp.push_back(ret->ret_vec_str.back().c_str()); - } - *shared_buffer_len = shared_buffer_map.size(); - *updated_shared_buffer_handle_list = &(ret->ret_handles.at(nd_idx)); - *updated_shared_buffer_name_list = &(ret->ret_vec_charp[0]); - } - - API_END(); -} - -} // namespace mxnet - - -/*! - * \brief Executor for simple_bind - * when INT64_TENSOR_SIZE = OFF - * \param symbol_handle symbol handle - * \param dev_type default device type - * \param dev_id default device id - * \param num_g2c_keys number of group2ctx keys - * \param g2c_keys key list of group2ctx - * \param g2c_dev_types device type list of group2ctx - * \param g2c_dev_ids id list of group2ctx - * \param provided_grad_req_list_len grad_req length provided by users in front-end - * \param provided_grad_req_names grad_req names provided by users in front-end - * \param provided_grad_req_types req types provided by users in front-end - * \param num_provided_arg_shapes number of user provided in_arg and aux_state shapes - * \param provided_arg_shape_names name list of provided shapes - * \param provided_arg_shape_data provided shape data - * \param provided_arg_shape_idx provided shape data index - * \param num_provided_arg_dtypes number of user provided in_arg and axu_state dtypes - * \param provided_arg_dtype_names argument name list of provided dtypes - * \param provided_arg_dtypes data of provided dtypes - * \param num_provided_arg_stypes number of user provided in_arg and axu_state storage types - * \param provided_arg_stype_names argument name list of provided storage types - * \param provided_arg_stypes data of provided storage types - * \param num_shared_arg_names number of parameter names passed from _bind_ith_exec - * \param shared_arg_name_list parameter name list passed from _bind_ith_exec - * \param shared_buffer_len number of shared data arrays passed from _bind_ith_exec - * \param shared_buffer_name_list shared data array names passed from _bind_ith_exec - * \param shared_buffer_handle_list shared data array handles passed from _bind_ith_exec - * \param updated_shared_buffer_name_list updated shared data array names after binding - * \param updated_shared_buffer_handle_list updated shared data arrays after binding - * \param num_in_args number of input arguments of this sym - * \param in_args list_arguments associated with the current executor - * \param arg_grads list of gradients of in_args associated with the current executor - * \param num_aux_states number of aux states of this sym - * \param aux_states list_auxiliary_states associated with the current executor - * \param shared_exec_handle shared excutor handle passed from _bind_ith_exec - * \param out the handle of the executor to be created - */ -int MXExecutorSimpleBindEx(SymbolHandle symbol_handle, - int dev_type, - int dev_id, - const uint32_t num_g2c_keys, - const char** g2c_keys, - const int* g2c_dev_types, - const int* g2c_dev_ids, - const uint32_t provided_grad_req_list_len, - const char** provided_grad_req_names, - const char** provided_grad_req_types, - const uint32_t num_provided_arg_shapes, - const char** provided_arg_shape_names, - const int* provided_arg_shape_data, - const uint32_t* provided_arg_shape_idx, - const uint32_t num_provided_arg_dtypes, - const char** provided_arg_dtype_names, - const int* provided_arg_dtypes, - const uint32_t num_provided_arg_stypes, - const char** provided_arg_stype_names, - const int* provided_arg_stypes, - const uint32_t num_shared_arg_names, - const char** shared_arg_name_list, - int* shared_buffer_len, - const char** shared_buffer_name_list, - NDArrayHandle* shared_buffer_handle_list, - const char*** updated_shared_buffer_name_list, - NDArrayHandle** updated_shared_buffer_handle_list, - uint32_t* num_in_args, - NDArrayHandle** in_args, - NDArrayHandle** arg_grads, - uint32_t* num_aux_states, - NDArrayHandle** aux_states, - ExecutorHandle shared_exec_handle, - ExecutorHandle* out) { - return mxnet::_SimpleBindImpl(symbol_handle, - dev_type, dev_id, - num_g2c_keys, g2c_keys, g2c_dev_types, g2c_dev_ids, - provided_grad_req_list_len, provided_grad_req_names, - provided_grad_req_types, - num_provided_arg_shapes, provided_arg_shape_names, - provided_arg_shape_data, provided_arg_shape_idx, - num_provided_arg_dtypes, provided_arg_dtype_names, provided_arg_dtypes, - num_provided_arg_stypes, provided_arg_stype_names, provided_arg_stypes, - num_shared_arg_names, shared_arg_name_list, - shared_buffer_len, shared_buffer_name_list, - shared_buffer_handle_list, updated_shared_buffer_name_list, - updated_shared_buffer_handle_list, - num_in_args, in_args, arg_grads, - num_aux_states, aux_states, - shared_exec_handle, out); -} - - -// TODO(ChaiBapchya): add API doc for rest of C APIs for int64 -/*! - * \brief Large tensor specific implementation for simple_bind executor - * when USE_INT64_TENSOR_SIZE = ON - * \param symbol_handle symbol handle - * \param dev_type default device type - * \param dev_id default device id - * \param num_g2c_keys number of group2ctx keys - * \param g2c_keys key list of group2ctx - * \param g2c_dev_types device type list of group2ctx - * \param g2c_dev_ids id list of group2ctx - * \param provided_grad_req_list_len grad_req length provided by users in front-end - * \param provided_grad_req_names grad_req names provided by users in front-end - * \param provided_grad_req_types req types provided by users in front-end - * \param num_provided_arg_shapes number of user provided in_arg and aux_state shapes - * \param provided_arg_shape_names name list of provided shapes - * \param provided_arg_shape_data provided shape data - * \param provided_arg_shape_idx provided shape data index - * \param num_provided_arg_dtypes number of user provided in_arg and axu_state dtypes - * \param provided_arg_dtype_names argument name list of provided dtypes - * \param provided_arg_dtypes data of provided dtypes - * \param num_provided_arg_stypes number of user provided in_arg and axu_state storage types - * \param provided_arg_stype_names argument name list of provided storage types - * \param provided_arg_stypes data of provided storage types - * \param num_shared_arg_names number of parameter names passed from _bind_ith_exec - * \param shared_arg_name_list parameter name list passed from _bind_ith_exec - * \param shared_buffer_len number of shared data arrays passed from _bind_ith_exec - * \param shared_buffer_name_list shared data array names passed from _bind_ith_exec - * \param shared_buffer_handle_list shared data array handles passed from _bind_ith_exec - * \param updated_shared_buffer_name_list updated shared data array names after binding - * \param updated_shared_buffer_handle_list updated shared data arrays after binding - * \param num_in_args number of input arguments of this sym - * \param in_args list_arguments associated with the current executor - * \param arg_grads list of gradients of in_args associated with the current executor - * \param num_aux_states number of aux states of this sym - * \param aux_states list_auxiliary_states associated with the current executor - * \param shared_exec_handle shared excutor handle passed from _bind_ith_exec - * \param out the handle of the executor to be created - */ -int MXExecutorSimpleBindEx64(SymbolHandle symbol_handle, - int dev_type, - int dev_id, - const uint32_t num_g2c_keys, - const char** g2c_keys, - const int* g2c_dev_types, - const int* g2c_dev_ids, - const uint32_t provided_grad_req_list_len, - const char** provided_grad_req_names, - const char** provided_grad_req_types, - const uint32_t num_provided_arg_shapes, - const char** provided_arg_shape_names, - const int64_t* provided_arg_shape_data, - const uint32_t* provided_arg_shape_idx, - const uint32_t num_provided_arg_dtypes, - const char** provided_arg_dtype_names, - const int* provided_arg_dtypes, - const uint32_t num_provided_arg_stypes, - const char** provided_arg_stype_names, - const int* provided_arg_stypes, - const uint32_t num_shared_arg_names, - const char** shared_arg_name_list, - int* shared_buffer_len, - const char** shared_buffer_name_list, - NDArrayHandle* shared_buffer_handle_list, - const char*** updated_shared_buffer_name_list, - NDArrayHandle** updated_shared_buffer_handle_list, - uint32_t* num_in_args, - NDArrayHandle** in_args, - NDArrayHandle** arg_grads, - uint32_t* num_aux_states, - NDArrayHandle** aux_states, - ExecutorHandle shared_exec_handle, - ExecutorHandle* out) { - return mxnet::_SimpleBindImpl(symbol_handle, - dev_type, dev_id, - num_g2c_keys, g2c_keys, g2c_dev_types, g2c_dev_ids, - provided_grad_req_list_len, provided_grad_req_names, - provided_grad_req_types, - num_provided_arg_shapes, provided_arg_shape_names, - provided_arg_shape_data, provided_arg_shape_idx, - num_provided_arg_dtypes, provided_arg_dtype_names, provided_arg_dtypes, - num_provided_arg_stypes, provided_arg_stype_names, provided_arg_stypes, - num_shared_arg_names, shared_arg_name_list, - shared_buffer_len, shared_buffer_name_list, - shared_buffer_handle_list, updated_shared_buffer_name_list, - updated_shared_buffer_handle_list, - num_in_args, in_args, arg_grads, - num_aux_states, aux_states, - shared_exec_handle, out); -} - - -int MXExecutorReshape(int partial_shaping, - int allow_up_sizing, - int dev_type, - int dev_id, - uint32_t num_map_keys, - const char** map_keys, - const int* map_dev_types, - const int* map_dev_ids, - const uint32_t num_provided_arg_shapes, - const char** provided_arg_shape_names, - const uint32_t* provided_arg_shape_data, - const uint32_t* provided_arg_shape_idx, - uint32_t* num_in_args, - NDArrayHandle** in_args, - NDArrayHandle** arg_grads, - uint32_t* num_aux_states, - NDArrayHandle** aux_states, - ExecutorHandle shared_exec, - ExecutorHandle *out) { - Executor* new_exec = nullptr; - - MXAPIThreadLocalEntry<> *ret = MXAPIThreadLocalStore<>::Get(); - API_BEGIN(); - *out = nullptr; // ensure we can know whether to free executor on early abort - // create shape map for in_args and aux_states - std::unordered_map kwargs(num_provided_arg_shapes); - for (uint32_t i = 0; i < num_provided_arg_shapes; ++i) { - auto p = kwargs.emplace(provided_arg_shape_names[i], - mxnet::TShape(provided_arg_shape_data+provided_arg_shape_idx[i], - provided_arg_shape_data+provided_arg_shape_idx[i+1])); - CHECK(p.second) << "Duplicate shapes are provided for argument " - << provided_arg_shape_names[i] << " in reshape of executor"; - } - - Context ctx = Context::Create(static_cast(dev_type), dev_id); - std::map ctx_map; - for (uint32_t i = 0; i < num_map_keys; ++i) { - ctx_map[std::string(map_keys[i])] = Context::Create( - static_cast(map_dev_types[i]), map_dev_ids[i]); - } - std::vector in_arg_vec; - std::vector arg_grad_vec; - std::vector aux_state_vec; - - Executor* exec = static_cast(shared_exec); - new_exec = exec->Reshape(partial_shaping, allow_up_sizing, ctx, ctx_map, kwargs, - &in_arg_vec, &arg_grad_vec, &aux_state_vec); - *out = new_exec; - - ret->ret_handles.clear(); - ret->ret_handles.reserve(in_arg_vec.size()+arg_grad_vec.size()+aux_state_vec.size()); - - size_t nd_idx = 0; - for (const auto& nd : in_arg_vec) { - if (nd.is_none()) { - LOG(FATAL) << "Input argument NDArray cannot be un-allocated"; - } - ret->ret_handles.push_back(new NDArray(nd)); - } - if (in_arg_vec.size() > 0) { - *num_in_args = in_arg_vec.size(); - *in_args = &(ret->ret_handles[nd_idx]); - nd_idx = ret->ret_handles.size(); - } - - for (const auto& nd : arg_grad_vec) { - if (nd.is_none()) { - ret->ret_handles.push_back(nullptr); - } else { - ret->ret_handles.push_back(new NDArray(nd)); - } - } - if (arg_grad_vec.size() > 0) { - *arg_grads = &(ret->ret_handles[nd_idx]); - nd_idx = ret->ret_handles.size(); - } - - for (const auto& nd : aux_state_vec) { - if (nd.is_none()) { - LOG(FATAL) << "Auxiliary argument NDArray cannot be un-allocated"; - } - ret->ret_handles.push_back(new NDArray(nd)); - } - if (aux_state_vec.size() > 0) { - *num_aux_states = aux_state_vec.size(); - *aux_states = &(ret->ret_handles[nd_idx]); - nd_idx = ret->ret_handles.size(); - } - API_END_HANDLE_ERROR(delete new_exec); -} - -int MXExecutorReshapeEx(int partial_shaping, - int allow_up_sizing, - int dev_type, - int dev_id, - uint32_t num_map_keys, - const char** map_keys, - const int* map_dev_types, - const int* map_dev_ids, - const uint32_t num_provided_arg_shapes, - const char** provided_arg_shape_names, - const int* provided_arg_shape_data, - const uint32_t* provided_arg_shape_idx, - uint32_t* num_in_args, - NDArrayHandle** in_args, - NDArrayHandle** arg_grads, - uint32_t* num_aux_states, - NDArrayHandle** aux_states, - ExecutorHandle shared_exec, - ExecutorHandle *out) { - Executor* new_exec = nullptr; - - MXAPIThreadLocalEntry<> *ret = MXAPIThreadLocalStore<>::Get(); - API_BEGIN(); - *out = nullptr; // ensure we can know whether to free executor on early abort - // create shape map for in_args and aux_states - std::unordered_map kwargs(num_provided_arg_shapes); - for (uint32_t i = 0; i < num_provided_arg_shapes; ++i) { - auto p = kwargs.emplace(provided_arg_shape_names[i], - mxnet::TShape(provided_arg_shape_data+provided_arg_shape_idx[i], - provided_arg_shape_data+provided_arg_shape_idx[i+1])); - CHECK(p.second) << "Duplicate shapes are provided for argument " - << provided_arg_shape_names[i] << " in reshape of executor"; - } - - Context ctx = Context::Create(static_cast(dev_type), dev_id); - std::map ctx_map; - for (uint32_t i = 0; i < num_map_keys; ++i) { - ctx_map[std::string(map_keys[i])] = Context::Create( - static_cast(map_dev_types[i]), map_dev_ids[i]); - } - std::vector in_arg_vec; - std::vector arg_grad_vec; - std::vector aux_state_vec; - - Executor* exec = static_cast(shared_exec); - new_exec = exec->Reshape(partial_shaping, allow_up_sizing, ctx, ctx_map, kwargs, - &in_arg_vec, &arg_grad_vec, &aux_state_vec); - *out = new_exec; - - ret->ret_handles.clear(); - ret->ret_handles.reserve(in_arg_vec.size()+arg_grad_vec.size()+aux_state_vec.size()); - - size_t nd_idx = 0; - for (const auto& nd : in_arg_vec) { - if (nd.is_none()) { - LOG(FATAL) << "Input argument NDArray cannot be un-allocated"; - } - ret->ret_handles.push_back(new NDArray(nd)); - } - if (in_arg_vec.size() > 0) { - *num_in_args = in_arg_vec.size(); - *in_args = &(ret->ret_handles[nd_idx]); - nd_idx = ret->ret_handles.size(); - } - - for (const auto& nd : arg_grad_vec) { - if (nd.is_none()) { - ret->ret_handles.push_back(nullptr); - } else { - ret->ret_handles.push_back(new NDArray(nd)); - } - } - if (arg_grad_vec.size() > 0) { - *arg_grads = &(ret->ret_handles[nd_idx]); - nd_idx = ret->ret_handles.size(); - } - - for (const auto& nd : aux_state_vec) { - if (nd.is_none()) { - LOG(FATAL) << "Auxiliary argument NDArray cannot be un-allocated"; - } - ret->ret_handles.push_back(new NDArray(nd)); - } - if (aux_state_vec.size() > 0) { - *num_aux_states = aux_state_vec.size(); - *aux_states = &(ret->ret_handles[nd_idx]); - nd_idx = ret->ret_handles.size(); - } - API_END_HANDLE_ERROR(delete new_exec); -} - -int MXExecutorGetOptimizedSymbol(ExecutorHandle handle, - SymbolHandle *out) { - auto s = new nnvm::Symbol(); - API_BEGIN(); - - auto exec = static_cast(handle); - *s = exec->GetOptimizedSymbol(); - *out = s; - - API_END_HANDLE_ERROR(delete s); -} - -int MXExecutorSetMonitorCallback(ExecutorHandle handle, - ExecutorMonitorCallback callback, - void* callback_handle) { - API_BEGIN(); - ExecutorMonitorCallback callback_temp = callback; - void* callback_handle_temp = callback_handle; - std::function clbk - = [callback_temp, callback_handle_temp](const char *name, void* handle) { - callback_temp(name, handle, callback_handle_temp); - }; - Executor *exec = static_cast(handle); - exec->SetMonitorCallback(clbk, false); - API_END(); -} - -int MXExecutorSetMonitorCallbackEX(ExecutorHandle handle, - ExecutorMonitorCallback callback, - void* callback_handle, - bool monitor_all) { - API_BEGIN(); - ExecutorMonitorCallback callback_temp = callback; - void* callback_handle_temp = callback_handle; - std::function clbk - = [callback_temp, callback_handle_temp](const char *name, void* handle) { - callback_temp(name, handle, callback_handle_temp); - }; - Executor *exec = static_cast(handle); - exec->SetMonitorCallback(clbk, monitor_all); - API_END(); -} diff --git a/src/c_api/c_api_symbolic.cc b/src/c_api/c_api_symbolic.cc index 74455388d0e8..963a2d52ad3e 100644 --- a/src/c_api/c_api_symbolic.cc +++ b/src/c_api/c_api_symbolic.cc @@ -32,7 +32,7 @@ #include "./c_api_common.h" #include "../common/exec_utils.h" #include "../operator/operator_common.h" -#include "../executor/exec_pass.h" +#include "../imperative/exec_pass.h" #include "../operator/subgraph/subgraph_property.h" namespace mxnet { @@ -703,7 +703,7 @@ inline void SymbolInferShape(const char** keys, } /*! - * \brief Executor for Symbol Shape Inference + * \brief Symbol shape Inference * This api is available when MXNet is built with flag * USE_INT64_TENSOR_SIZE=0 (by default) * \param sym symbol handle diff --git a/src/c_api/c_predict_api.cc b/src/c_api/c_predict_api.cc deleted file mode 100644 index efbe63207a71..000000000000 --- a/src/c_api/c_predict_api.cc +++ /dev/null @@ -1,680 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/*! - * Copyright (c) 2015 by Contributors - * \file c_predict_api.cc - * \brief C predict API of mxnet - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "./c_api_common.h" -#include "../operator/operator_common.h" -#include "../executor/exec_pass.h" - -using namespace mxnet; - -// predictor interface -struct MXAPIPredictor { - // output arrays - std::vector out_arrays; - // argument arrays - std::vector arg_arrays; - // auxiliary arrays - std::vector aux_arrays; - // output shapes - mxnet::ShapeVector out_shapes; - // output types - nnvm::DTypeVector out_dtypes; - - // uint32_t buffer for output shapes - std::vector out_shapes_buffer; - // key to arguments - std::unordered_map key2arg; - // executor - std::unique_ptr exec; - // symbol - nnvm::Symbol sym; - // Context - Context ctx; -}; - -struct MXAPINDList { - std::vector keys; - mxnet::ShapeVector shapes; - std::vector shapes_buffer; - std::vector indptr; - std::vector data; -}; - -inline void _CreateExecutor(PredictorHandle pred_hnd) { - MXAPIPredictor *pred = static_cast(pred_hnd); - if (pred->exec == nullptr) { - auto sym = pred->sym; - auto ctx = pred->ctx; - auto key2arg = pred->key2arg; - auto arg_arrays = pred->arg_arrays; - auto aux_arrays = pred->aux_arrays; - std::map ctx_map; - std::vector grad_store(arg_arrays.size()); - std::vector grad_req(arg_arrays.size(), kNullOp); - pred->exec.reset(Executor::Bind(sym, ctx, ctx_map, arg_arrays, - grad_store, grad_req, aux_arrays)); - pred->out_arrays = pred->exec->outputs(); - } -} - -int _CreatePartialOut(const char* symbol_json_str, - const void* param_bytes, - int param_size, - int dev_type, int dev_id, - const uint32_t num_input_nodes, - const char** input_keys, - const uint32_t* input_shape_indptr, - const uint32_t* input_shape_data, - uint32_t num_output_nodes, - const char** output_keys, - // This is used for parallel inference. - int num_threads, - bool lazy, - const uint32_t num_provided_arg_dtypes, - const char** provided_arg_dtype_names, - const int* provided_arg_dtypes, - PredictorHandle* out) { - using nnvm::Symbol; - - API_BEGIN(); - Symbol sym; - // make sure symbols are registered - { - uint32_t outSize; - const char **outArray; - MXListAllOpNames(&outSize, &outArray); - } - // load in the symbol. - { - nnvm::Graph g; - g.attrs["json"] = std::make_shared(std::string(symbol_json_str)); - sym.outputs = nnvm::ApplyPass(g, "LoadLegacyJSON").outputs; - } - // looks likely to output the internal results - if (num_output_nodes != 0) { - Symbol internal = sym.GetInternals(); - std::vector all_out = internal.ListOutputNames(); - std::vector out_syms(num_output_nodes); - for (uint32_t i = 0; i < num_output_nodes; ++i) { - std::string out_key(output_keys[i]); - out_key += "_output"; - for (size_t j = 0; j < all_out.size(); ++j) { - if (all_out[j] == out_key) { - out_syms[i] = internal[j]; - break; - } - CHECK_NE(j, all_out.size() - 1) << "didn't find node name: " << out_key; - } - } - sym = nnvm::Symbol::CreateGroup(out_syms); - } - - // load the parameters - std::unordered_map arg_params, aux_params; - std::unordered_map arg_types, aux_types; - { - std::unordered_set arg_names, aux_names; - std::vector arg_names_vec = sym.ListInputNames(Symbol::kReadOnlyArgs); - std::vector aux_names_vec = sym.ListInputNames(Symbol::kAuxiliaryStates); - for (const auto &arg_name : arg_names_vec) { - arg_names.insert(arg_name); - } - for (const auto &aux_name : aux_names_vec) { - aux_names.insert(aux_name); - } - std::vector data; - std::vector names; - dmlc::MemoryFixedSizeStream fi((void*)param_bytes, param_size); // NOLINT(*) - NDArray::Load(&fi, &data, &names); - CHECK_EQ(names.size(), data.size()) - << "Invalid param file format"; - for (size_t i = 0; i < names.size(); ++i) { - if (!strncmp(names[i].c_str(), "aux:", 4)) { - std::string name(names[i].c_str() + 4); - if (aux_names.count(name) != 0) { - aux_params[name] = data[i]; - aux_types[name] = data[i].dtype(); - } - } - if (!strncmp(names[i].c_str(), "arg:", 4)) { - std::string name(names[i].c_str() + 4); - if (arg_names.count(name) != 0) { - arg_params[name] = data[i]; - arg_types[name] = data[i].dtype(); - } - } - } - - if (num_provided_arg_dtypes > 0) { - for (uint32_t i = 0; i < num_provided_arg_dtypes; ++i) { - if (aux_types.count(provided_arg_dtype_names[i]) == 0 && - arg_types.count(provided_arg_dtype_names[i]) == 0) { - arg_types[provided_arg_dtype_names[i]] = provided_arg_dtypes[i]; - } - } - } - } - - // shape inference and bind - std::unordered_map known_shape; - for (uint32_t i = 0; i < num_input_nodes; ++i) { - known_shape[std::string(input_keys[i])] = - mxnet::TShape(input_shape_data + input_shape_indptr[i], - input_shape_data + input_shape_indptr[i + 1]); - } - std::vector arg_names = sym.ListInputNames(Symbol::kReadOnlyArgs); - std::vector aux_names = sym.ListInputNames(Symbol::kAuxiliaryStates); - mxnet::ShapeVector out_shapes(sym.ListOutputNames().size()); - mxnet::ShapeVector aux_shapes(aux_names.size()); - mxnet::ShapeVector arg_shapes; - nnvm::DTypeVector result_arg_types, result_out_types, result_aux_types; - std::unordered_map key2arg; - for (size_t i = 0; i < arg_names.size(); ++i) { - std::string key = arg_names[i]; - key2arg[key] = i; - } - - try { - mxnet::ShapeVector in_shapes; - nnvm::DTypeVector in_types; - for (std::string key : sym.ListInputNames(Symbol::kAll)) { - if (known_shape.count(key) != 0) { - in_shapes.push_back(known_shape[key]); - } else { - in_shapes.emplace_back(); - } - } - - for (std::string key : sym.ListInputNames(Symbol::kAll)) { - if (arg_types.count(key) != 0) { - in_types.push_back(arg_types[key]); - } else if (aux_types.count(key) != 0) { - in_types.push_back(aux_types[key]); - } else { - // if key not in arg_types or aux_types set to FP32 - in_types.push_back(0); - } - } - nnvm::Graph g; g.outputs = sym.outputs; - g = mxnet::exec::InferShape(std::move(g), std::move(in_shapes), "__shape__"); - g = mxnet::exec::InferType(std::move(g), std::move(in_types), "__dtype__"); - bool infer_complete = (g.GetAttr("shape_num_unknown_nodes") == 0); - // This is tricky for AMP Use case, for example, with only weights input types - // cannot be inferred in AMP. Thus for AMP converted model type_dict will be - // required - bool infer_type_complete = (g.GetAttr("dtype_num_unknown_nodes") == 0); - CHECK(infer_complete) - << "The shape information of is not enough to get the shapes"; - CHECK(infer_type_complete) - << "The type information is not enough, please provide input arg_types " - "with provided_arg_dtype_names and provided_arg_dtypes." - "If using amalgamation python frontend you can use type_dict in Predictor API" - "to provide this information"; - CopyAttr(g.indexed_graph(), - g.GetAttr("shape"), - &arg_shapes, &out_shapes, &aux_shapes); - CopyAttr(g.indexed_graph(), - g.GetAttr("dtype"), - &result_arg_types, &result_out_types, &result_aux_types); - } catch (const mxnet::op::InferShapeError &err) { - throw dmlc::Error(err.msg); - } - - Context ctx = Context::Create(static_cast(dev_type), dev_id); - - std::vector arg_arrays, aux_arrays; - for (size_t i = 0; i < arg_shapes.size(); ++i) { - NDArray nd; - if (result_arg_types[i] != -1) { - nd = NDArray(arg_shapes[i], ctx, false, result_arg_types[i]); - } else { - nd = NDArray(arg_shapes[i], ctx); - } - if (arg_params.count(arg_names[i]) != 0) { - CopyFromTo(arg_params[arg_names[i]], &nd); - } - arg_arrays.push_back(nd); - } - - for (size_t i = 0; i < aux_shapes.size(); ++i) { - NDArray nd; - if (result_aux_types[i] != -1) { - nd = NDArray(aux_shapes[i], ctx, false, result_aux_types[i]); - } else { - nd = NDArray(aux_shapes[i], ctx); - } - if (aux_params.count(aux_names[i]) != 0) { - CopyFromTo(aux_params[aux_names[i]], &nd); - } - aux_arrays.push_back(nd); - } - - // bind - for (int i = 0; i < num_threads; i++) { - std::unique_ptr ret(new MXAPIPredictor()); - ret->sym = sym; - ret->ctx = ctx; - ret->key2arg = key2arg; - ret->arg_arrays = arg_arrays; - ret->aux_arrays = aux_arrays; - ret->out_shapes = out_shapes; - ret->out_dtypes = result_out_types; - - if (!lazy) { - std::map ctx_map; - std::vector grad_store(arg_arrays.size()); - std::vector grad_req(arg_arrays.size(), kNullOp); - ret->exec.reset(Executor::Bind(sym, ctx, ctx_map, - arg_arrays, - grad_store, grad_req, - aux_arrays)); - ret->out_arrays = ret->exec->outputs(); - } - out[i] = ret.release(); - } - API_END_HANDLE_ERROR(); -} - -int MXPredCreatePartialOut(const char* symbol_json_str, - const void* param_bytes, - int param_size, - int dev_type, int dev_id, - uint32_t num_input_nodes, - const char** input_keys, - const uint32_t* input_shape_indptr, - const uint32_t* input_shape_data, - uint32_t num_output_nodes, - const char** output_keys, - PredictorHandle* out) { - return _CreatePartialOut( - symbol_json_str, - param_bytes, - param_size, - dev_type, dev_id, - num_input_nodes, - input_keys, - input_shape_indptr, - input_shape_data, - num_output_nodes, - output_keys, - 1, - false, - 0, - nullptr, - nullptr, - out); -} - -int MXPredCreate(const char* symbol_json_str, - const void* param_bytes, - int param_size, - int dev_type, int dev_id, - uint32_t num_input_nodes, - const char** input_keys, - const uint32_t* input_shape_indptr, - const uint32_t* input_shape_data, - PredictorHandle* out) { - return _CreatePartialOut( - symbol_json_str, - param_bytes, - param_size, - dev_type, - dev_id, - num_input_nodes, - input_keys, - input_shape_indptr, - input_shape_data, - 0, - nullptr, - 1, - false, - 0, - nullptr, - nullptr, - out); -} - -int MXPredCreateEx(const char* symbol_json_str, - const void* param_bytes, - int param_size, - int dev_type, int dev_id, - uint32_t num_input_nodes, - const char** input_keys, - const uint32_t* input_shape_indptr, - const uint32_t* input_shape_data, - const uint32_t num_provided_arg_dtypes, - const char** provided_arg_dtype_names, - const int* provided_arg_dtypes, - PredictorHandle* out) { - return _CreatePartialOut( - symbol_json_str, - param_bytes, - param_size, - dev_type, - dev_id, - num_input_nodes, - input_keys, - input_shape_indptr, - input_shape_data, - 0, - nullptr, - 1, - false, - num_provided_arg_dtypes, - provided_arg_dtype_names, - provided_arg_dtypes, - out); -} - -int MXPredCreateMultiThread(const char* symbol_json_str, - const void* param_bytes, - int param_size, - int dev_type, int dev_id, - uint32_t num_input_nodes, - const char** input_keys, - const uint32_t* input_shape_indptr, - const uint32_t* input_shape_data, - // This is used for paralle inference. - int num_threads, - PredictorHandle* out) { - const char *type = getenv("MXNET_ENGINE_TYPE"); - std::string stype; - if (type) - stype = type; - CHECK(stype == "NaiveEngine") << "Multithread inference only works with NaiveEngine.\n" - << "Please set MXNET_ENGINE_TYPE to NaiveEngine" - << std::endl; - return _CreatePartialOut( - symbol_json_str, - param_bytes, - param_size, - dev_type, - dev_id, - num_input_nodes, - input_keys, - input_shape_indptr, - input_shape_data, - 0, - nullptr, - num_threads, - true, - 0, - nullptr, - nullptr, - out); -} - -int MXPredReshape(uint32_t num_input_nodes, - const char** input_keys, - const uint32_t* input_shape_indptr, - const uint32_t* input_shape_data, - PredictorHandle handle, - PredictorHandle* out) { - _CreateExecutor(handle); - MXAPIPredictor* p = static_cast(handle); - std::unique_ptr ret(new MXAPIPredictor()); - - API_BEGIN(); - // shape inference - std::unordered_map new_shape; - for (uint32_t i = 0; i < num_input_nodes; ++i) { - new_shape[std::string(input_keys[i])] = - mxnet::TShape(input_shape_data + input_shape_indptr[i], - input_shape_data + input_shape_indptr[i + 1]); - } - ret->sym = p->sym; - std::vector arg_names = ret->sym.ListInputNames(Symbol::kReadOnlyArgs); - std::vector aux_names = ret->sym.ListInputNames(Symbol::kAuxiliaryStates); - mxnet::ShapeVector out_shapes(ret->sym.ListOutputNames().size()); - mxnet::ShapeVector aux_shapes(aux_names.size()); - mxnet::ShapeVector arg_shapes; - ret->key2arg = p->key2arg; - - try { - mxnet::ShapeVector in_shapes; - in_shapes.reserve(arg_names.size()); - for (std::string key : ret->sym.ListInputNames(Symbol::kAll)) { - if (new_shape.count(key) != 0) { - in_shapes.push_back(new_shape[key]); - } else { - in_shapes.emplace_back(); - } - } - nnvm::Graph g; g.outputs = ret->sym.outputs; - g = mxnet::exec::InferShape(std::move(g), std::move(in_shapes), "__shape__"); - bool infer_complete = (g.GetAttr("shape_num_unknown_nodes") == 0); - CHECK(infer_complete) - << "The shape information of is not enough to get the shapes"; - CopyAttr(g.indexed_graph(), - g.GetAttr("shape"), - &arg_shapes, &out_shapes, &aux_shapes); - } catch (const mxnet::op::InferShapeError &err) { - throw dmlc::Error(err.msg); - } - - ret->arg_arrays = p->arg_arrays; - ret->ctx = p->ctx; - for (size_t i=0; i < arg_names.size(); ++i) { - mxnet::TShape newShape = arg_shapes[i]; - NDArray &arr = p->arg_arrays[i]; - if (new_shape.count(arg_names[i]) != 0) { - ret->arg_arrays[i].ReshapeAndAlloc(newShape); - } else { - CHECK_EQ(newShape.Size(), arr.shape().Size()) - << "arg " << arg_names[i] - << " shape has been changed, only allow to change the shape of input data."; - } - } - - for (size_t i=0; i < aux_names.size(); ++i) { - mxnet::TShape newShape = aux_shapes[i]; - NDArray &arr = p->aux_arrays[i]; - CHECK_EQ(newShape.Size(), arr.shape().Size()) - << "aux " << aux_names[i] - << " shape has been changed, only allow to change the shape of input data."; - } - ret->aux_arrays = p->aux_arrays; - - // bind - { - std::map ctx_map; - std::vector grad_store; - grad_store.reserve(ret->arg_arrays.size()); - std::vector grad_req(ret->arg_arrays.size(), kNullOp); - - ret->exec.reset(Executor::Bind(ret->sym, ret->ctx, ctx_map, - ret->arg_arrays, - grad_store, grad_req, - ret->aux_arrays, - p->exec.get())); - ret->out_shapes = out_shapes; - ret->out_arrays = ret->exec->outputs(); - ret->out_dtypes = p->out_dtypes; - } - *out = ret.release(); - API_END(); -} - -int MXPredGetOutputShape(PredictorHandle handle, - uint32_t out_index, - uint32_t** shape_data, - uint32_t* shape_ndim) { - MXAPIPredictor* p = static_cast(handle); - API_BEGIN(); - CHECK_LT(out_index, p->out_arrays.size()) - << "Index exceed number of outputs"; - - const mxnet::TShape& s = p->out_shapes[out_index]; - CHECK_GE(s.ndim(), 0); - p->out_shapes_buffer.resize(s.ndim()); - nnvm::ShapeTypeCast(s.begin(), s.end(), p->out_shapes_buffer.data()); - *shape_data = p->out_shapes_buffer.data(); - *shape_ndim = p->out_shapes[out_index].ndim(); - API_END(); -} - -int MXPredGetOutputType(PredictorHandle handle, - uint32_t out_index, - int* out_dtype) { - MXAPIPredictor* p = static_cast(handle); - API_BEGIN(); - CHECK_LT(out_index, p->out_arrays.size()) - << "Index exceed number of outputs, provided out_index should be less than " - << p->out_arrays.size(); - - const int s = p->out_dtypes[out_index]; - CHECK_GE(s, 0); - out_dtype[out_index] = s; - API_END(); -} - -int MXPredSetInput(PredictorHandle handle, - const char* key, - const float* data, - uint32_t size) { - MXAPIPredictor* p = static_cast(handle); - API_BEGIN(); - auto it = p->key2arg.find(key); - if (it == p->key2arg.end()) { - LOG(FATAL) << "cannot find input key " << key; - } - NDArray& nd = p->arg_arrays[it->second]; - nd.SyncCopyFromCPU(data, size); - API_END(); -} - -int MXPredForward(PredictorHandle handle) { - _CreateExecutor(handle); - MXAPIPredictor* p = static_cast(handle); - API_BEGIN(); - p->exec->Forward(false); - API_END(); -} - -int MXPredPartialForward(PredictorHandle handle, int step, int* step_left) { - _CreateExecutor(handle); - MXAPIPredictor* p = static_cast(handle); - API_BEGIN(); - p->exec->PartialForward(false, step, step_left); - API_END(); -} - -int MXPredGetOutput(PredictorHandle handle, - uint32_t index, - float* data, - uint32_t size) { - MXAPIPredictor* p = static_cast(handle); - API_BEGIN(); - CHECK_LT(index, p->out_arrays.size()) - << "Output index out of range"; - const NDArray& nd = p->out_arrays[index]; - nd.SyncCopyToCPU(data, size); - API_END(); -} - -int MXPredFree(PredictorHandle handle) { - API_BEGIN(); - delete static_cast(handle); - API_END(); -} - -int MXNDListCreate(const char* nd_file_bytes, - int nd_file_size, - NDListHandle *out, - uint32_t* out_length) { - MXAPINDList* ret = new MXAPINDList(); - API_BEGIN(); - std::vector arrays; - dmlc::MemoryFixedSizeStream fi((void*)nd_file_bytes, nd_file_size); // NOLINT(*) - NDArray::Load(&fi, - &(arrays), - &(ret->keys)); - if (ret->keys.size() == 0) { - ret->keys.resize(arrays.size()); - } - ret->indptr.push_back(0); - for (auto &array : arrays) { - mxnet::TShape shape = array.shape(); - size_t begin = ret->data.size(); - size_t size = shape.Size(); - ret->shapes.push_back(shape); - ret->data.resize(begin + size); - array.SyncCopyToCPU(dmlc::BeginPtr(ret->data) + begin, size); - ret->indptr.push_back(begin + size); - } - *out = ret; - *out_length = static_cast(arrays.size()); - API_END(); -} - -int MXNDListGet(NDListHandle handle, - uint32_t index, - const char** out_key, - const float** out_data, - const uint32_t** out_shape, - uint32_t* out_ndim) { - MXAPINDList* p = static_cast(handle); - API_BEGIN(); - CHECK_LT(index, p->shapes.size()) - << "Index out of range"; - *out_key = p->keys[index].c_str(); - *out_data = dmlc::BeginPtr(p->data) + p->indptr[index]; - const mxnet::TShape& s = p->shapes[index]; - p->shapes_buffer.resize(s.ndim()); - nnvm::ShapeTypeCast(s.begin(), s.end(), p->shapes_buffer.data()); - *out_shape = p->shapes_buffer.data(); - *out_ndim = p->shapes[index].ndim(); - API_END(); -} - -int MXPredSetMonitorCallback(PredictorHandle handle, - PredMonitorCallback callback, - void* callback_handle, - bool monitor_all) { - MXAPIPredictor* p = static_cast(handle); - API_BEGIN(); - PredMonitorCallback callback_temp = callback; - void* callback_handle_temp = callback_handle; - std::function clbk - = [callback_temp, callback_handle_temp](const char* name, void* handle) { - callback_temp(name, handle, callback_handle_temp); - }; - p->exec->SetMonitorCallback(clbk, monitor_all); - API_END(); -} - -int MXNDListFree(NDListHandle handle) { - API_BEGIN(); - delete static_cast(handle); - API_END(); -} diff --git a/src/common/exec_utils.h b/src/common/exec_utils.h index 3bd2ef3597a9..ff1c477990e5 100644 --- a/src/common/exec_utils.h +++ b/src/common/exec_utils.h @@ -31,7 +31,7 @@ #include #include #include "../common/utils.h" -#include "../executor/exec_pass.h" +#include "../imperative/exec_pass.h" namespace mxnet { namespace common { diff --git a/src/executor/graph_executor.cc b/src/executor/graph_executor.cc deleted file mode 100644 index 6519ab21acab..000000000000 --- a/src/executor/graph_executor.cc +++ /dev/null @@ -1,2160 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/*! - * Copyright (c) 2015 by Contributors - * \file graph_executor.cc - * \brief graph executor - */ -#include -#include -#include -#include -#include -#include - -#include "./exec_pass.h" -#include "./graph_executor.h" -#include "../profiler/profiler.h" -#include "../common/utils.h" -#include "../common/exec_utils.h" -#include "../operator/subgraph/subgraph_property.h" -#include "../operator/operator_common.h" - -namespace mxnet { -namespace exec { - -using namespace mxnet::common; - -static const std::string GetDefaultSubgraphBackend() { -#if MXNET_USE_MKLDNN == 1 - return std::string("MKLDNN"); -#else - return std::string(); -#endif -} - -GraphExecutor::GraphExecutor(const nnvm::Symbol& symbol) { - log_verbose_ = dmlc::GetEnv("MXNET_EXEC_VERBOSE_LOGGING", false); - need_grad_ = false; - is_dynamic_ = false; - subgraph_property_ = dmlc::GetEnv("MXNET_SUBGRAPH_BACKEND", GetDefaultSubgraphBackend()); - if (subgraph_property_ == "NONE") { - subgraph_property_ = std::string(); - LOG(INFO) << "MXNET_SUBGRAPH_BACKEND=NONE is detected, subgraph backend is not in use"; - } - engine_ref_ = Engine::_GetSharedRef(); - symbol_ = symbol.Copy(); -} - -GraphExecutor::~GraphExecutor() { - for (auto& n : op_nodes_) { - if (n.cached_opr != nullptr) { - Engine::Get()->DeleteOperator(n.cached_opr); - } - } - // clean up seg ops - for (auto& seg : cached_seg_opr_) { - if (seg.opr != nullptr) { - Engine::Get()->DeleteOperator(seg.opr); - } - } -} - -void GraphExecutor::Forward(bool is_train) { - RunOps(is_train, 0, num_forward_nodes_); -} - -void GraphExecutor::PartialForward(bool is_train, int step, int *step_left) { - size_t sstep = static_cast(step); - if (sstep >= num_forward_nodes_) { - *step_left = 0; return; - } - RunOps(is_train, sstep, sstep + 1); - *step_left = static_cast(num_forward_nodes_ - sstep - 1); -} - -void GraphExecutor::Backward(const std::vector& head_grads, bool is_train) { - { - const auto& idx = graph_.indexed_graph(); - if (num_forward_inputs_ != idx.input_nodes().size()) { - for (size_t i = 0; i < head_grad_array_.size(); ++i) { - if (!head_grad_array_[i].is_none()) { - CHECK(i < head_grads.size() && !head_grads[i].is_none()) - << "Because the last operator is not Loss function, " - << "head_gradient is required when calling backward. " - << "If you are attempting to minimize the output as " - << "an objective, please modify your network and " - << "pass it through the make_loss symbol."; - const NDArray &from = head_grads[i]; - NDArray &to = head_grad_array_[i]; - if (this->is_dynamic_) { - to.WaitToRead(); - if (!shape_is_known(to.shape())) { - to.Init(from.shape()); - } - } - CopyFromTo(from, &to); - } - } - } - } - if (this->is_dynamic_) { - graph_ = InferShape(std::move(graph_), {}, ""); - mxnet::ShapeVector rshape = graph_.MoveCopyAttr("shape"); - const auto& idx = graph_.indexed_graph(); - for (size_t nid = 0; nid < idx.num_nodes(); ++nid) { - const auto& inode = idx[nid]; - if (inode.source->is_variable()) continue; - OpNode& opnode = op_nodes_[nid]; - if (opnode.skip_exec_node) continue; - for (NDArray &array : opnode.exec->in_array) { - array.WaitToRead(); - if (!shape_is_known(array.shape())) { - array.SetShapeFromChunk(); - } - } - int i = 0; - for (NDArray &array : opnode.exec->in_array) { - array.WaitToRead(); - if (!shape_is_known(array.shape())) { - array.SetShapeFromChunk(); - } - if (!shape_is_known(array.shape())) { - mxnet::TShape shape = rshape[idx.entry_id(inode.inputs[i])]; - if (shape_is_known(shape)) { - array.ReshapeAndAlloc(shape); - } - } - ++i; - } - i = 0; - for (NDArray &array : opnode.exec->out_array) { - array.WaitToRead(); - if (!shape_is_known(array.shape())) { - array.SetShapeFromChunk(); - } - if (!shape_is_known(array.shape())) { - mxnet::TShape shape = rshape[idx.entry_id(nid, i)]; - if (shape_is_known(shape)) { - array.ReshapeAndAlloc(shape); - } - } - ++i; - } - } - graph_.attrs["shape"] = std::make_shared(rshape); - } - const auto& idx = graph_.indexed_graph(); - RunOps(is_train, num_forward_nodes_, idx.num_nodes()); -} - -void GraphExecutor::Print(std::ostream &os) const { // NOLINT(*) - nnvm::Symbol s; s.outputs = graph_.outputs; - s.Print(os); - // message to be backward compatible with the memonger - size_t total_bytes = graph_.GetAttr("storage_allocated_bytes"); - os << "Total " << (total_bytes >> 20UL) <<" MB allocated\n"; - os << "Total " << 11 << " TempSpace resource requested\n"; -} - -/*! - * \brief Return the "optimized" symbol contained in the executor graph. - */ -nnvm::Symbol GraphExecutor::GetOptimizedSymbol() { - Symbol ret; - ret.outputs = std::vector(graph_.outputs.begin(), - graph_.outputs.begin() + num_forward_outputs_); - return ret.Copy(); -} - -void GraphExecutor::SetMonitorCallback(const MonitorCallback& callback, bool monitor_all) { - CHECK(callback) << "invalid callback"; - monitor_callback_ = callback; - monitor_all_ = monitor_all; -} - -const std::vector& GraphExecutor::outputs() const { - if (this->is_dynamic_) { - for (const NDArray &array : output_arrays_) { - array.WaitToRead(); - if (!shape_is_known(array.shape())) { - const_cast(array).SetShapeFromChunk(); - } - } - } - return output_arrays_; -} - -const std::unordered_map& GraphExecutor::in_arg_map() const { - return in_arg_map_; -} - -const std::unordered_map& GraphExecutor::arg_grad_map() const { - return arg_grad_map_; -} - -const std::unordered_map& GraphExecutor::aux_state_map() const { - return aux_state_map_; -} - -static nnvm::NodeEntry AttrHint(nnvm::NodeEntry src, nnvm::NodeEntry like) { - static const Op* id_like = Op::Get("_identity_with_attr_like_rhs"); - nnvm::ObjectPtr n = nnvm::Node::Create(); - n->attrs.op = id_like; - n->attrs.name = src.node->attrs.name + "_id"; - n->inputs = {src, like}; - return nnvm::NodeEntry{n, 0, 0}; -} - -nnvm::NodeEntry AggregateGradient(std::vector&& v) { - using nnvm::Op; - static size_t inplace_sum_cap = dmlc::GetEnv("MXNET_EXEC_INPLACE_GRAD_SUM_CAP", 8); - static const Op* ewise_plus_op = Op::Get("_grad_add"); - static const Op* ewise_sum_op = Op::Get("ElementWiseSum"); - static const Op* identity_op = Op::Get("identity"); - static const Op* zeros_op = Op::Get("_zeros"); - static const Op* zeros_like_op = Op::Get("zeros_like"); - - if (v.empty()) { - nnvm::ObjectPtr ng = nnvm::Node::Create(); - ng->attrs.op = Op::Get("_zeros_without_dtype"); - ng->attrs.name = "zeros_without_dtype"; - ng->attrs.op->attr_parser(&(ng->attrs)); - return nnvm::NodeEntry(std::move(ng), 0, 0); - } - - // remove zero in the sum. at least keep 1. - auto begin = std::remove_if(v.begin(), v.end(), [](const nnvm::NodeEntry& nodeEntry) { - CHECK(nodeEntry.node); - return nodeEntry.node->op() == zeros_op || nodeEntry.node->op() == zeros_like_op; - }); - if (begin == v.begin()) ++begin; - v.erase(begin, v.end()); - CHECK(!v.empty()); - - if (v.size() == 1) { - return std::move(v[0]); - } else { - if (v.size() < inplace_sum_cap) { - nnvm::ObjectPtr sum_node = nnvm::Node::Create(); - sum_node->attrs.op = ewise_sum_op; - sum_node->attrs.name = "sum_grad"; - sum_node->attrs.dict["num_args"] = std::to_string(v.size()); - sum_node->attrs.op->attr_parser(&(sum_node->attrs)); - sum_node->inputs = std::move(v); - return nnvm::NodeEntry(std::move(sum_node), 0, 0); - } else { - // use a stream line of plus instead - nnvm::NodeEntry ret = v[0]; - for (size_t i = 1; i < v.size(); ++i) { - // Add control flow dependency from to previous node - // This enforces the gradient sum order will be in the inverse - // order of forward traversal - // NOTE: adding control dependency can be dangerous and cause cycle in the dep. - // The curent usage is correct, because of the following invariant: - // assert: v[i-1] do not depend on v[i] - // To put in plain text: v is gradient vector that get pushed in the order - // that can generate them, which means if v[i] is not yet pushed, - // all previous gradient cannot depend on it. - // Note: For a symbol like the following: - // data = mx.sym.Variable('data') - // sym = data + data + data + data + data + data + data - // the node entries v passed in here are of the same node of - // op _identity_with_attr_like_rhs. We should skip adding a node - // to its own control_deps. - if (v[i-1].node != v[i].node) { - v[i].node->control_deps.push_back(ret.node); - } - - std::ostringstream os; - os << "sum_grad_" << i; - nnvm::ObjectPtr x = nnvm::Node::Create(); - x->attrs.op = ewise_plus_op; - x->attrs.name = os.str(); - x->inputs = {ret, v[i]}; - ret = nnvm::NodeEntry(std::move(x), 0, 0); - } - // identity node is used to avoid exposure of dummy plus node - // when its output get assigned to another space. - nnvm::ObjectPtr id_node = nnvm::Node::Create(); - id_node->attrs.op = identity_op; - id_node->attrs.name = "sum_grad_final"; - id_node->inputs = {ret}; - return nnvm::NodeEntry{id_node, 0, 0}; - } - } -} - - -/*! - * \brief Create the graph for backward pass. - * This is triggered by both simple_bind and bind flows. - */ -nnvm::Graph GraphExecutor::InitFullGraph(nnvm::Symbol symbol, - const std::vector& grad_req_types, - const ShapeVector& in_arg_shapes, - const nnvm::DTypeVector& in_arg_dtypes) { - using nnvm::ObjectPtr; - using nnvm::NodeEntry; - // initial information - num_forward_outputs_ = symbol.outputs.size(); - num_forward_inputs_ = symbol.ListInputs(nnvm::Symbol::kAll).size(); - - nnvm::Graph g; - g.outputs = symbol.outputs; - bool do_elim_common_expr = dmlc::GetEnv("MXNET_ELIMINATE_COMMON_EXPR", true); - if (do_elim_common_expr) - g = exec::EliminateCommonExpr(std::move(g)); - need_grad_ = false; - for (OpReqType req : grad_req_types) { - if (req != kNullOp) need_grad_ = true; - } - if (!need_grad_) return g; - for (size_t i = 0; i < g.outputs.size(); ++i) { - NodeEntry ngrad(nnvm::Node::Create(), 0, 0); - const nnvm::NodeAttrs& attrs = g.outputs[i].node->attrs; - ngrad.node->attrs.name = attrs.name + "_head_grad"; - ngrad.node->attrs.dict["__profiler_scope__"] = common::NodeAttrsGetProfilerScope(attrs); - head_grad_entry_.emplace_back(AttrHint(ngrad, g.outputs[i])); - head_grad_map_[ngrad.node.get()] = i; - } - std::vector args = symbol.ListInputs(nnvm::Symbol::kReadOnlyArgs); - std::vector xs; - for (size_t i = 0; i < grad_req_types.size(); ++i) { - if (grad_req_types[i] != kNullOp) { - xs.emplace_back(args[i]); - } - } - - std::function need_mirror = - [](const nnvm::Node& node) -> int { - if (node.is_variable()) return false; - const std::string& type = node.attrs.op->name; - if (type == "Dropout") return false; - // We follow the hidden key attribute "force_mirroring" if it is - // explicitly set. - auto iter = node.attrs.dict.find("__force_mirroring__"); - if (iter != node.attrs.dict.end()) { - bool do_mirror; - dmlc::parameter::FieldEntry e; - e.Init("__force_mirroring__", &do_mirror, do_mirror); - e.Set(&do_mirror, iter->second); - return do_mirror; - } - if (type == "Embedding") return false; - if (type == "Convolution") return false; - if (type == "FullyConnected") return false; - if (type == "Concat") return false; - return true; - }; - - std::vector zero_ops; - zero_ops.push_back(nnvm::Op::Get("zeros_like")); - zero_ops.push_back(nnvm::Op::Get("_zeros")); - - // take gradient - nnvm::Graph g_grad = nnvm::pass::MXGradient( - g, symbol.outputs, xs, head_grad_entry_, - AggregateGradient, - (dmlc::GetEnv("MXNET_BACKWARD_DO_MIRROR", 0) || - dmlc::GetEnv("MXNET_MEMORY_OPT", 0)) ? need_mirror : nullptr, - zero_ops, "_copy", - in_arg_shapes, in_arg_dtypes); - - CHECK_EQ(g_grad.outputs.size(), xs.size()); - for (const auto &e : g_grad.outputs) { - g.outputs.push_back(e); - } - - return g; -} - -/*! - * \brief GraphExecutor initializer for regular bind flow in which - * input arguments and gradients are provided by users. This initializer - * uses the user provided NDArrays to populate data entries of the graph. - */ -void GraphExecutor::Init(nnvm::Symbol symbol, - const Context& default_ctx, - const std::map& ctx_map, - const std::vector& in_args, - const std::vector& arg_grad_store, - const std::vector& grad_req_types, - const std::vector& aux_states, - Executor* shared_exec, - const nnvm::NodeEntryMap& feed_dict) { - // create in_arg_ctxes, arg_grad_ctxes, aux_state_ctxes - auto get_ctx1 = [](const NDArray& nd) { return nd.ctx(); }; - auto get_ctx2 = [default_ctx](const NDArray& nd) -> Context { - if (nd.is_none()) return default_ctx; - return nd.ctx(); - }; - std::vector in_arg_ctxes(in_args.size()); - std::transform(in_args.begin(), in_args.end(), in_arg_ctxes.begin(), get_ctx1); - std::vector arg_grad_ctxes(arg_grad_store.size()); - std::transform(arg_grad_store.begin(), arg_grad_store.end(), arg_grad_ctxes.begin(), get_ctx2); - std::vector aux_state_ctxes(aux_states.size()); - std::transform(aux_states.begin(), aux_states.end(), aux_state_ctxes.begin(), get_ctx1); - - // Record the shapes and data types of the input arguments in the source graph - // (i.e., the graph prior to the Gradient pass). Such information is need by - // the backward mirroring algorithm for shape and data type inference. - nnvm::Graph src; - src.outputs = symbol.outputs; - const nnvm::IndexedGraph& src_idx = src.indexed_graph(); - const std::unordered_set& src_mutable_nodes = src_idx.mutable_input_nodes(); - size_t src_arg_top = 0, src_aux_top = 0; - ShapeVector src_arg_shapes; - nnvm::DTypeVector src_arg_dtypes; - const size_t src_num_forward_inputs = symbol.ListInputs(nnvm::Symbol::kAll).size(); - - for (size_t i = 0; i < src_num_forward_inputs; ++i) { - const uint32_t nid = src_idx.input_nodes().at(i); - - if (src_mutable_nodes.count(nid)) { - CHECK_LT(src_aux_top, aux_states.size()); - src_arg_shapes.push_back(aux_states[src_aux_top].shape()); - src_arg_dtypes.push_back(aux_states[src_aux_top].dtype()); - ++src_aux_top; - } else { - CHECK_LT(src_arg_top, in_args.size()); - src_arg_shapes.push_back(in_args[src_arg_top].shape()); - src_arg_dtypes.push_back(in_args[src_arg_top].dtype()); - ++src_arg_top; - } - } - - nnvm::Graph g = InitGraph(symbol, default_ctx, ctx_map, in_arg_ctxes, - arg_grad_ctxes, aux_state_ctxes, grad_req_types, - src_arg_shapes, src_arg_dtypes); - - // create arg_shapes and arg_dtypes for shape and type inferences - const auto& idx = g.indexed_graph(); - const auto& mutable_nodes = idx.mutable_input_nodes(); - size_t arg_top = 0, aux_top = 0; - data_entry_.resize(idx.num_node_entries()); - mxnet::ShapeVector arg_shapes; - nnvm::DTypeVector arg_dtypes; - StorageTypeVector arg_stypes(idx.num_node_entries(), -1); - for (size_t i = 0; i < num_forward_inputs_; ++i) { - const uint32_t nid = idx.input_nodes().at(i); - const std::string& arg_name = idx[nid].source->attrs.name; - size_t eid = idx.entry_id(nid, 0); - if (mutable_nodes.count(nid)) { - CHECK_LT(aux_top, aux_states.size()); - data_entry_[eid] = aux_states[aux_top]; - arg_shapes.push_back(aux_states[aux_top].shape()); - arg_dtypes.push_back(aux_states[aux_top].dtype()); - arg_stypes[eid] = aux_states[aux_top].storage_type(); - aux_state_map_.emplace(arg_name, aux_states[aux_top]); - ++aux_top; - } else { - CHECK_LT(arg_top, in_args.size()); - data_entry_[eid] = in_args[arg_top]; - arg_shapes.push_back(in_args[arg_top].shape()); - arg_dtypes.push_back(in_args[arg_top].dtype()); - arg_stypes[eid] = in_args[arg_top].storage_type(); - in_arg_map_.emplace(arg_name, in_args[arg_top]); - if (kNullOp != grad_req_types[arg_top]) { - auto grad_oid = grad_store_.size() + num_forward_outputs_; - auto grad_eid = idx.entry_id(idx.outputs()[grad_oid]); - arg_stypes[grad_eid] = arg_grad_store[arg_top].storage_type(); - grad_store_.emplace_back(grad_req_types[arg_top], arg_grad_store[arg_top]); - arg_grad_map_.emplace(arg_name, arg_grad_store[arg_top]); - if (log_verbose_) { - LOG(INFO) << "\tassign data entry\t" << grad_eid << " as " - << common::stype_string(arg_stypes[grad_eid]) << " (grad)"; - } - } - ++arg_top; - } - if (log_verbose_) { - LOG(INFO) << "\tassign data entry\t" << eid << " as " - << common::stype_string(data_entry_[eid].storage_type()) << " (input)"; - } - } - - // expand arg_shapes and arg_dtypes to contain backward inputs - arg_shapes.resize(idx.input_nodes().size(), mxnet::TShape()); - g = InferShape(std::move(g), std::move(arg_shapes), "__shape__"); - if (g.GetAttr("shape_num_unknown_nodes") != 0U) { - this->is_dynamic_ = true; - } - - arg_dtypes.resize(idx.input_nodes().size(), -1); - g = InferType(std::move(g), std::move(arg_dtypes), "__dtype__"); - if (g.GetAttr("dtype_num_unknown_nodes") != 0U) { - HandleInferTypeError(num_forward_inputs_, g.indexed_graph(), - g.GetAttr("dtype")); - } - - g.attrs["storage_type"] = std::make_shared(std::move(arg_stypes)); - g = InferStorageType(std::move(g), StorageTypeVector(), ""); - if (g.GetAttr("storage_type_num_unknown_nodes") != 0U) { - HandleInferStorageTypeError(num_forward_inputs_, g.indexed_graph(), - g.GetAttr("storage_type")); - } - - // Initialize the rest attributes of the graph. - // This function can be called by regular bind - // operation flow as well. - FinishInitGraph(symbol, g, shared_exec, feed_dict); -} - -/*! - * \brief Initialize in_args, arg_grads, and aux_states - * and their data_entry_ of the executor. This function - * is called for regular simple_bind flow, i.e. no - * shared data arrays are provided. - */ -void GraphExecutor::InitArguments(const nnvm::IndexedGraph& idx, - const mxnet::ShapeVector& inferred_shapes, - const nnvm::DTypeVector& inferred_dtypes, - const StorageTypeVector& inferred_stypes, - const std::vector& in_arg_ctxes, - const std::vector& arg_grad_ctxes, - const std::vector& aux_state_ctxes, - const std::vector& grad_req_types, - std::vector* in_arg_vec, - std::vector* arg_grad_vec, - std::vector* aux_state_vec) { - // initialize in_args, arg_grads, and aux_states - // populate grad_store_ - data_entry_.resize(idx.num_node_entries()); - size_t arg_top = 0, aux_top = 0; - const auto& mutable_nodes = idx.mutable_input_nodes(); - for (size_t i = 0; i < num_forward_inputs_; ++i) { - const uint32_t nid = idx.input_nodes().at(i); - const uint32_t eid = idx.entry_id(nid, 0); - const mxnet::TShape& inferred_shape = inferred_shapes[eid]; - const int inferred_dtype = inferred_dtypes[eid]; - const NDArrayStorageType inferred_stype = (NDArrayStorageType) inferred_stypes[eid]; - const std::string& arg_name = idx[nid].source->attrs.name; - const std::string profiler_scope = common::NodeAttrsGetProfilerScope(idx[nid].source->attrs); - if (mutable_nodes.count(nid)) { // aux_states - EmplaceBackZeros(inferred_stype, inferred_shape, aux_state_ctxes[aux_top], - inferred_dtype, aux_state_vec); - aux_state_vec->back().AssignStorageInfo(profiler_scope + "aux_state:", arg_name); - data_entry_[eid] = aux_state_vec->back(); - aux_state_map_.emplace(arg_name, aux_state_vec->back()); - ++aux_top; - if (log_verbose_) { - LOG(INFO) << "\tassign aux entry\t" << eid << "\t as " - << common::stype_string(inferred_stype); - } - } else { // in_args - EmplaceBackZeros(inferred_stype, inferred_shape, in_arg_ctxes[arg_top], - inferred_dtype, in_arg_vec); - in_arg_vec->back().AssignStorageInfo(profiler_scope + "in_arg:", arg_name); - data_entry_[eid] = in_arg_vec->back(); - if (log_verbose_) { - LOG(INFO) << "\tassign data entry\t" << eid << "\tas " - << common::stype_string(inferred_stype); - } - // Get the storage type for grad - if (kNullOp == grad_req_types[arg_top]) { - arg_grad_vec->emplace_back(); - } else { - // Init based on storage type - auto grad_oid = grad_store_.size() + num_forward_outputs_; - auto grad_eid = idx.entry_id(idx.outputs()[grad_oid]); - auto grad_stype = (NDArrayStorageType) inferred_stypes[grad_eid]; - EmplaceBackZeros(grad_stype, inferred_shape, arg_grad_ctxes[arg_top], - inferred_dtype, arg_grad_vec); - arg_grad_vec->back().AssignStorageInfo(profiler_scope + "arg_grad:", arg_name); - if (log_verbose_) { - LOG(INFO) << "\tassign grad entry\t" << grad_eid << "\tas " - << common::stype_string(grad_stype); - } - grad_store_.emplace_back(grad_req_types[arg_top], arg_grad_vec->back()); - arg_grad_map_.emplace(arg_name, arg_grad_vec->back()); - } - in_arg_map_.emplace(arg_name, in_arg_vec->back()); - ++arg_top; - } - } -} - -/*! - * \brief Initialize in_args, arg_grads, and aux_states - * and their data_entry_ of the executor using - * shared_buffer from DataParallelExecutorGroup - * and shared_exec if available. - */ -void GraphExecutor::InitArguments(const nnvm::IndexedGraph& idx, - const mxnet::ShapeVector& inferred_shapes, - const nnvm::DTypeVector& inferred_dtypes, - const StorageTypeVector& inferred_stypes, - const std::vector& in_arg_ctxes, - const std::vector& arg_grad_ctxes, - const std::vector& aux_state_ctxes, - const std::vector& grad_req_types, - const std::unordered_set& shared_arg_names, - const Executor* shared_exec, - std::unordered_map* shared_buffer, - std::vector* in_arg_vec, - std::vector* arg_grad_vec, - std::vector* aux_state_vec) { - // initialize in_args, arg_grads, and aux_states and populate grad_store_ - data_entry_.resize(idx.num_node_entries()); - size_t arg_top = 0, aux_top = 0; - const auto& mutable_nodes = idx.mutable_input_nodes(); - for (size_t i = 0; i < num_forward_inputs_; ++i) { - const uint32_t nid = idx.input_nodes().at(i); - const uint32_t eid = idx.entry_id(nid, 0); - const mxnet::TShape& inferred_shape = inferred_shapes[eid]; - const int inferred_dtype = inferred_dtypes[eid]; - const NDArrayStorageType inferred_stype = (NDArrayStorageType) inferred_stypes[eid]; - const std::string& arg_name = idx[nid].source->attrs.name; - const std::string profiler_scope = common::NodeAttrsGetProfilerScope(idx[nid].source->attrs); - // aux_states - if (mutable_nodes.count(nid)) { - if (nullptr != shared_exec) { - const NDArray& aux_nd = shared_exec->aux_state_map().at(arg_name); - CHECK(inferred_stype == kDefaultStorage && aux_nd.storage_type() == kDefaultStorage) - << "Non-default storage type detected when creating auxilliary NDArray. The allocated " - << "memory of shared_exec.aux_array cannot be resued for argument: " - << arg_name << " for the current executor"; - CHECK_EQ(inferred_shape, aux_nd.shape()) - << "Inferred shape does not match shared_exec.aux_array's shape." - " Therefore, the allocated memory for shared_exec.aux_array cannot" - " be resued for creating auxilliary NDArray of the argument: " - << arg_name << " for the current executor"; - CHECK_EQ(inferred_dtype, aux_nd.dtype()) - << "Inferred dtype does not match shared_exec.aux_array's dtype." - " Therefore, the allocated memory for shared_exec.aux_array cannot" - " be resued for creating auxilliary NDArray of the argument: " - << arg_name << " for the current executor"; - aux_state_vec->emplace_back(aux_nd); - } else { - EmplaceBackZeros(inferred_stype, inferred_shape, aux_state_ctxes[aux_top], - inferred_dtype, aux_state_vec); - aux_state_vec->back().AssignStorageInfo(profiler_scope + "aux_state:", arg_name); - } // if (has_shared_exec) - data_entry_[eid] = aux_state_vec->back(); - aux_state_map_.emplace(arg_name, aux_state_vec->back()); - ++aux_top; - } else { // in_args and grad for in_args - if (shared_arg_names.count(arg_name)) { // model parameter - // model parameter - if (nullptr != shared_exec) { - const NDArray& in_arg_nd = shared_exec->in_arg_map().at(arg_name); - auto arg_nd_stype = in_arg_nd.storage_type(); - // for model parameter, both default storage and row_sparse storage can be shared - bool shareable_arg_stype = inferred_stype == kDefaultStorage || - inferred_stype == kRowSparseStorage; - // try to reuse memory from shared_exec - CHECK(shareable_arg_stype) << "Inferred storage type " - << common::stype_string(inferred_stype) - << " does not support memory sharing with shared_exec.arg_array"; - CHECK_EQ(inferred_stype, arg_nd_stype) - << "Inferred stype does not match shared_exec.arg_array's stype" - " Therefore, the allocated memory for shared_exec.arg_array cannot" - " be resued for creating NDArray of the argument " - << arg_name << " for the current executor"; - CHECK_EQ(inferred_shape, in_arg_nd.shape()) - << "Inferred shape does not match shared_exec.arg_array's shape" - " Therefore, the allocated memory for shared_exec.arg_array cannot" - " be resued for creating NDArray of the argument " - << arg_name << " for the current executor"; - CHECK_EQ(inferred_dtype, in_arg_nd.dtype()) - << "Inferred dtype does not match shared_exec.arg_array's dtype" - " Therefore, the allocated memory for shared_exec.arg_array cannot" - " be resued for creating NDArray of the argument " - << arg_name << " for the current executor"; - in_arg_vec->emplace_back(in_arg_nd); - } else { - // doesn't have shared_exec, or non-default storage - EmplaceBackZeros(inferred_stype, inferred_shape, in_arg_ctxes[arg_top], - inferred_dtype, in_arg_vec); - in_arg_vec->back().AssignStorageInfo(profiler_scope + "in_arg:", arg_name); - } - // gradient for model parameter - if (kNullOp == grad_req_types[arg_top]) { - arg_grad_vec->emplace_back(); - } else { - auto grad_oid = grad_store_.size() + num_forward_outputs_; - auto grad_eid = idx.entry_id(idx.outputs()[grad_oid]); - auto grad_stype = (NDArrayStorageType) inferred_stypes[grad_eid]; - if (nullptr != shared_exec && grad_stype == kDefaultStorage && - shared_exec->arg_grad_map().at(arg_name).storage_type() == kDefaultStorage) { - // try to reuse memory from shared_exec - arg_grad_vec->emplace_back(shared_exec->arg_grad_map().at(arg_name)); - } else { - // no need to reuse memory from shared_exec for gradient of non-default storage - EmplaceBackZeros(grad_stype, inferred_shape, arg_grad_ctxes[arg_top], - inferred_dtype, arg_grad_vec); - arg_grad_vec->back().AssignStorageInfo(profiler_scope + "arg_grad:", arg_name); - } - grad_store_.emplace_back(grad_req_types[arg_top], arg_grad_vec->back()); - } - } else { // !shared_arg_names.count(arg_name) - // model parameter, row_sparse ndarray sharing enabled - bool enable_row_sparse_sharing = true; - in_arg_vec->emplace_back(ReshapeOrCreate(arg_name, inferred_shape, inferred_dtype, - inferred_stype, in_arg_ctxes[arg_top], - shared_buffer, enable_row_sparse_sharing)); - in_arg_vec->back().AssignStorageInfo(profiler_scope + "in_arg:", arg_name); - // gradient for model parameter, row_sparse ndarray sharing disabled - if (kNullOp == grad_req_types[arg_top]) { - arg_grad_vec->emplace_back(); - } else { - auto grad_oid = grad_store_.size() + num_forward_outputs_; - auto grad_eid = idx.entry_id(idx.outputs()[grad_oid]); - auto grad_stype = (NDArrayStorageType) inferred_stypes[grad_eid]; - bool enable_row_sparse_sharing = false; - arg_grad_vec->emplace_back(ReshapeOrCreate("grad of " + arg_name, inferred_shape, - inferred_dtype, grad_stype, - arg_grad_ctxes[arg_top], shared_buffer, - enable_row_sparse_sharing)); - arg_grad_vec->back().AssignStorageInfo(profiler_scope + "arg_grad:", arg_name); - grad_store_.emplace_back(grad_req_types[arg_top], arg_grad_vec->back()); - } // if (kNullOp == grad_req_types[arg_top]) - } // if (shared_arg_names.count(arg_name)) - in_arg_map_.emplace(arg_name, in_arg_vec->back()); - if (!arg_grad_vec->back().is_none()) { - arg_grad_map_.emplace(arg_name, arg_grad_vec->back()); - } - data_entry_[eid] = in_arg_vec->back(); - ++arg_top; - } - } -} - -/*! - * \brief Finish graph initialization after shape and dtype inferences. - * This function is used by both simple_bind and bind flows. - */ -void GraphExecutor::FinishInitGraph(nnvm::Symbol symbol, - nnvm::Graph g, - Executor* shared_exec, - const nnvm::NodeEntryMap& feed_dict) { - const auto& idx = g.indexed_graph(); - const auto& vstorage_type = g.GetAttr("storage_type"); - - // data entries for output gradients - for (size_t j = num_forward_outputs_; j < idx.outputs().size(); ++j) { - data_entry_[idx.entry_id(idx.outputs()[j])] = grad_store_[j - num_forward_outputs_].second; - } - - { - // memory allocator - nnvm::StorageVector arg_storage_id(idx.num_node_entries(), kBadStorageID); - for (size_t j = num_forward_outputs_; j < idx.outputs().size(); ++j) { - arg_storage_id[idx.entry_id(idx.outputs()[j])] = kExternalStorageID; - } - for (const auto& kv : feed_dict) { - uint32_t eid = idx.entry_id(kv.first); - data_entry_[eid] = kv.second; - arg_storage_id[eid] = kExternalStorageID; - } - for (size_t i = 0; i < idx.num_node_entries(); i++) { - if (vstorage_type[i] != kDefaultStorage) arg_storage_id[i] = kDynamicStorageID; - } - g.attrs["storage"] = std::make_shared(std::move(arg_storage_id)); - g = nnvm::ApplyPass(g, "MXPlanMemory"); - } - g = DetectInplaceAddTo(g); - - // log the static memory plan of the graph - static bool mem_log_verbose = dmlc::GetEnv("MXNET_MEM_PLAN_VERBOSE_LOGGING", false); - if (mem_log_verbose) { - common::LogMemoryPlan(g); - } - - g = AttachOpExecs(g); - AttachOpResources(g); - graph_ = std::move(g); - - if (shared_exec != nullptr) { - this->InitDataEntryMemory(&(dynamic_cast(shared_exec)->data_pool_)); - } else { - this->InitDataEntryMemory(nullptr); - } - - { - // initialize output arrays - auto& idx = graph_.indexed_graph(); - for (size_t i = 0; i < num_forward_outputs_; ++i) { - auto& e = idx.outputs()[i]; - output_arrays_.push_back(data_entry_[idx.entry_id(e)]); - } - // initialize head gradient array - head_grad_array_.resize(symbol.outputs.size()); - for (size_t i = num_forward_inputs_; i < idx.input_nodes().size(); ++i) { - uint32_t nid = idx.input_nodes().at(i); - uint32_t oid = head_grad_map_.at(idx[nid].source); - head_grad_array_[oid] = data_entry_[idx.entry_id(nid, 0)]; - } - } - this->InitCachedOps(); - this->InitOpSegs(); -} - -/*! - * \brief GraphExecutor initializer for simple bind flow in - * which only certain input shapes and dtypes are provided by users. - * The initializer uses these shapes and dtypes to perform - * shape and dtype inferences, and then create NDArrays - * to populate data entries of the graph. The created NDArrays - * for in_args, arg_grads and aux_states are passed to the - * front end to attach the created executor. - * In front end, if the simple_bind flow is trigger by - * _bind_ith_exec, the shared data arrays of DataParallelExecutorGroup - * and shared executor will be taken into account in creating - * NDArrays for in_args, arg_grads, and aux_states for resuing - * already allocated memory. - */ -void GraphExecutor::Init(nnvm::Symbol symbol, - const Context& default_ctx, - const std::map& ctx_map, - const std::vector& in_arg_ctxes, - const std::vector& arg_grad_ctxes, - const std::vector& aux_state_ctxes, - const std::unordered_map& arg_shape_map, - const std::unordered_map& arg_dtype_map, - const std::unordered_map& arg_stype_map, - const std::vector& grad_req_types, - const std::unordered_set& shared_arg_names, - std::vector* in_arg_vec, - std::vector* arg_grad_vec, - std::vector* aux_state_vec, - std::unordered_map* shared_buffer, - Executor* shared_exec, - const nnvm::NodeEntryMap& feed_dict) { - // Record the shapes and data types of the input arguments in the source graph - // (i.e., the graph prior to the Gradient pass). Such information is need by - // the backward mirroring algorithm for shape and data type inference. - nnvm::Graph src; - src.outputs = symbol.outputs; - const nnvm::IndexedGraph& src_idx = src.indexed_graph(); - ShapeVector src_arg_shapes(src_idx.input_nodes().size(), TShape()); - nnvm::DTypeVector src_arg_dtypes(src_idx.input_nodes().size(), -1); - const size_t src_num_forward_inputs = symbol.ListInputs(nnvm::Symbol::kAll).size(); - - for (size_t i = 0; i < src_num_forward_inputs; ++i) { - const uint32_t nid = src_idx.input_nodes().at(i); - const std::string& name = src_idx[nid].source->attrs.name; - std::unordered_map::const_iterator - arg_shape_iter = arg_shape_map.find(name); - std::unordered_map::const_iterator - arg_dtype_iter = arg_dtype_map.find(name); - if (arg_shape_iter != arg_shape_map.end()) { - src_arg_shapes[i] = arg_shape_iter->second; - } - if (arg_dtype_iter != arg_dtype_map.end()) { - src_arg_dtypes[i] = arg_dtype_iter->second; - } - } - - nnvm::Graph g = InitGraph(symbol, default_ctx, ctx_map, in_arg_ctxes, arg_grad_ctxes, - aux_state_ctxes, grad_req_types, - src_arg_shapes, src_arg_dtypes); - - // The following code of shape and dtype inferences and argument - // initialization is for simple_bind only. Regular bind operation - // should do this differently. - - // Initialize arg_shapes and arg_dtypes for shape and type inferences. - // It contains all in_args and aux_states' shapes and types in a certain order. - const nnvm::IndexedGraph& idx = g.indexed_graph(); - mxnet::ShapeVector arg_shapes(idx.input_nodes().size(), mxnet::TShape()); - nnvm::DTypeVector arg_dtypes(idx.input_nodes().size(), -1); - StorageTypeVector arg_stypes(idx.input_nodes().size(), kUndefinedStorage); - for (size_t i = 0; i < num_forward_inputs_; ++i) { - const uint32_t nid = idx.input_nodes().at(i); - const std::string& name = idx[nid].source->attrs.name; - auto it1 = arg_shape_map.find(name); - if (arg_shape_map.end() != it1) { - arg_shapes[i] = it1->second; - } - auto it2 = arg_dtype_map.find(name); - if (arg_dtype_map.end() != it2) { - arg_dtypes[i] = it2->second; - } - auto it3 = arg_stype_map.find(name); - if (arg_stype_map.end() != it3) { - arg_stypes[i] = it3->second; - } - } - g = InferShape(std::move(g), std::move(arg_shapes), "__shape__"); - if (g.GetAttr("shape_num_unknown_nodes") != 0U) { - HandleInferShapeError(num_forward_inputs_, g.indexed_graph(), - g.GetAttr("shape")); - } - - g = InferType(std::move(g), std::move(arg_dtypes), "__dtype__"); - if (g.GetAttr("dtype_num_unknown_nodes") != 0U) { - HandleInferTypeError(num_forward_inputs_, g.indexed_graph(), - g.GetAttr("dtype")); - } - - g = InferStorageType(std::move(g), std::move(arg_stypes), "__storage_type__"); - if (g.GetAttr("storage_type_num_unknown_nodes") != 0U) { - HandleInferStorageTypeError(num_forward_inputs_, g.indexed_graph(), - g.GetAttr("storage_type")); - } - - // Create in_args, arg_grads, and aux_states using - // the inferred shapes and dtypes. - if (nullptr == shared_buffer) { // regular simple bind - InitArguments(idx, g.GetAttr("shape"), - g.GetAttr("dtype"), - g.GetAttr("storage_type"), - in_arg_ctxes, arg_grad_ctxes, aux_state_ctxes, - grad_req_types, in_arg_vec, arg_grad_vec, aux_state_vec); - } else { // simple bind using shared data arrays and shared_exec - InitArguments(idx, g.GetAttr("shape"), - g.GetAttr("dtype"), - g.GetAttr("storage_type"), - in_arg_ctxes, arg_grad_ctxes, aux_state_ctxes, - grad_req_types, shared_arg_names, shared_exec, - shared_buffer, in_arg_vec, arg_grad_vec, aux_state_vec); - } - // The above code of shape and dtype inferences and argument - // initialization is for simple_bind only. Regular bind operation - // should do this differently. - - // Initialize the rest attributes of the graph. - // This function can be called by regular bind - // operation flow as well. - FinishInitGraph(symbol, g, shared_exec, feed_dict); -} - -/*! - * \brief Return a new executor with the same symbol and shared memory, - * but different input/output shapes. - * For runtime reshaping, variable length sequences, etc. - * The returned executor shares state with the current one, - * and cannot be used in parallel with it. - */ -Executor* GraphExecutor::Reshape(const bool partial_shaping, - const bool allow_up_sizing, - const Context& default_ctx, - const std::map& ctx_map, - const std::unordered_map& - provided_arg_shapes, - std::vector* in_args, - std::vector* arg_grads, - std::vector* aux_states) { - nnvm::Graph g; - nnvm::Symbol symbol; - symbol.outputs = symbol_.outputs; - g.outputs = symbol_.outputs; - const nnvm::IndexedGraph& idx = g.indexed_graph(); - mxnet::ShapeVector arg_shapes(idx.input_nodes().size(), mxnet::TShape()); - for (size_t i = 0; i < num_forward_inputs_; ++i) { - const uint32_t nid = idx.input_nodes().at(i); - const std::string& name = idx[nid].source->attrs.name; - auto it = provided_arg_shapes.find(name); - if (provided_arg_shapes.end() != it) { - arg_shapes[i] = it->second; - } - } - g = InferShape(std::move(g), std::move(arg_shapes), "__shape__"); - if (g.GetAttr("shape_num_unknown_nodes") != 0U) { - this->is_dynamic_ = true; - } - const mxnet::ShapeVector& shape_vec = g.GetAttr("shape"); - std::vector grad_req_types; - size_t grad_top = 0; - const size_t num_args = in_arg_map_.size(); - const size_t num_aux = aux_state_map_.size(); - in_args->reserve(num_args); - grad_req_types.reserve(num_args); - arg_grads->reserve(num_args); - aux_states->reserve(num_aux); - for (uint32_t nid : idx.input_nodes()) { - std::string name = idx[nid].source->attrs.name; - const mxnet::TShape& new_shape = shape_vec[idx.entry_id(nid, 0)]; - if (idx.mutable_input_nodes().count(nid) == 0) { - NDArray& arr = in_arg_map_.at(name); - auto it = arg_grad_map_.find(name); - if (partial_shaping || provided_arg_shapes.count(name) || new_shape == arr.shape()) { - if (new_shape.Size() > arr.shape().Size()) { - CHECK(allow_up_sizing) << "New shape of arg: " << name << " is larger than original." - << "First making a big executor and then down sizing it " - << "is more efficient than the reverse." - << "If you really want to up size, set allow_up_sizing=True " - << "to enable allocation of new arrays."; - in_args->emplace_back(new_shape, arr.ctx(), false, arr.dtype()); - if (it != arg_grad_map_.end()) { - NDArray& darr = it->second; - arg_grads->emplace_back(new_shape, darr.ctx(), false, darr.dtype()); - grad_req_types.push_back(grad_store_.at(grad_top++).first); - } else { - arg_grads->emplace_back(); - grad_req_types.push_back(kNullOp); - } - } else { - in_args->push_back(arr.Reshape(new_shape)); - if (it != arg_grad_map_.end()) { - NDArray& darr = it->second; - arg_grads->push_back(darr.Reshape(new_shape)); - grad_req_types.push_back(grad_store_.at(grad_top++).first); - } else { - arg_grads->emplace_back(); - grad_req_types.push_back(kNullOp); - } - } - } else { - LOG(FATAL) << "Shape of unspecifie arg: " << name << " changed. " - << "This can cause the new executor to not share parameters " - << "with the old one. Please check for error in network." - << "If this is intended, set partial_shaping=True to suppress this warning."; - } - } else { - NDArray& arr = aux_state_map_.at(name); - if (partial_shaping || new_shape == arr.shape()) { - if (new_shape.Size() > arr.shape().Size()) { - CHECK(allow_up_sizing) << "New shape of arg: " << name << " is larger than original." - << "First making a big executor and then down sizing it " - << "is more efficient than the reverse." - << "If you really want to up size, set allow_up_sizing=True " - << "to enable allocation of new arrays."; - aux_states->emplace_back(new_shape, arr.ctx(), false, arr.dtype()); - } else { - aux_states->push_back(arr.Reshape(new_shape)); - } - } else { - LOG(FATAL) << "Shape of unspecifie arg: " << name << " changed. " - << "This can cause the new executor to not share parameters " - << "with the old one. Please check for error in network." - << "If this is intended, set partial_shaping=True to suppress this warning."; - } - } - } - auto exec = new GraphExecutor(symbol); - exec->Init(symbol.Copy(), default_ctx, ctx_map, - *in_args, *arg_grads, grad_req_types, *aux_states, - this); - return exec; -} - -/*! - * \brief This function is triggered by both simple_bind - * and bind flows. - * Setup backward graph, create device and context - * attributes in the graph, and calculate the number - * of forward nodes. - */ -Graph GraphExecutor::InitGraph(nnvm::Symbol symbol, - const Context& default_ctx, - const std::map& ctx_map, - const std::vector& in_arg_ctxes, - const std::vector& arg_grad_ctxes, - const std::vector& aux_state_ctxes, - const std::vector& grad_req_types, - const ShapeVector& in_arg_shapes, - const nnvm::DTypeVector& in_arg_dtypes) { - // setup gradient - nnvm::Graph g = InitFullGraph(symbol, grad_req_types, - in_arg_shapes, in_arg_dtypes); - -#if MXNET_USE_CUDA && MXNET_ENABLE_CUDA_RTC && !defined(_WIN32) - if (default_ctx.dev_mask() == Context::kGPU && dmlc::GetEnv("MXNET_USE_FUSION", true)) { - nnvm::Graph unoptimized_graph; - common::CopyGraph(&unoptimized_graph, g, false); - - if (common::CheckForInputNameDuplicates(unoptimized_graph.indexed_graph())) { - g.attrs["num_forward_outputs"] = std::make_shared(num_forward_outputs_); - g = FusePointwiseForward(std::move(g)); - g.attrs["num_forward_outputs"] = std::make_shared(num_forward_outputs_); - g = FusePointwiseBackward(std::move(g)); - // Check the topological order of inputs - const auto &original_inputs = unoptimized_graph.indexed_graph().input_nodes(); - const auto &new_inputs = g.indexed_graph().input_nodes(); - if (original_inputs.size() != new_inputs.size()) { - LOG(WARNING) - << "Number of inputs after fusion does not match original number of inputs. " - << "This is most probably a bug. Disabling fusion for this run."; - g = unoptimized_graph; - } else { - for (size_t i = 0; i < new_inputs.size(); ++i) { - if (unoptimized_graph.indexed_graph()[original_inputs[i]].source->attrs.name != - g.indexed_graph()[new_inputs[i]].source->attrs.name) { - LOG(WARNING) << "Disabling fusion due to altered topological order of inputs."; - g = unoptimized_graph; - break; - } - } - } - } else { - LOG(WARNING) - << "Graph contains duplicate names for some of its inputs - fusion is NOT enabled!"; - } - } -#else - // Only warn user if MXNET_USE_FUSION env var is explicitly set - if (default_ctx.dev_mask() == Context::kGPU && dmlc::GetEnv("MXNET_USE_FUSION", false)) { - WarnFusionNotSupported(); - } -#endif // MXNET_USE_CUDA && MXNET_ENABLE_CUDA_RTC && !defined(_WIN32) - - // create "device" and "context" attrs for the graph - g = AssignContext(g, default_ctx, ctx_map, - in_arg_ctxes, - arg_grad_ctxes, - aux_state_ctxes, - grad_req_types, - num_forward_inputs_, - num_forward_outputs_); - - const auto& idx = g.indexed_graph(); - // get number of nodes used in forward pass - num_forward_nodes_ = 0; - for (size_t i = 0; i < num_forward_outputs_; ++i) { - num_forward_nodes_ = std::max( - num_forward_nodes_, static_cast(idx.outputs()[i].node_id + 1)); - } - return g; -} - -// initialize the memory of each entries -void GraphExecutor::InitDataEntryMemory(std::vector* shared_pool) { - using nnvm::DTypeVector; - using mxnet::ShapeVector; - using nnvm::StorageVector; - // get the graph - const auto& idx = graph_.indexed_graph(); - // get the storage - const auto& vdtype = graph_.GetAttr("dtype"); - const auto& vshape = graph_.GetAttr("shape"); - const auto& vstorage = graph_.GetAttr("storage_id"); - const auto& vstorage_type = graph_.GetAttr("storage_type"); - const auto& vctx = graph_.GetAttr("context"); - CHECK_EQ(idx.num_node_entries(), vshape.size()); - CHECK_EQ(idx.num_node_entries(), vdtype.size()); - CHECK_EQ(idx.num_node_entries(), vstorage.size()); - CHECK_EQ(data_entry_.size(), vshape.size()); - std::vector data_context(idx.num_node_entries()); - std::vector data_storage_type(idx.num_node_entries(), kUndefinedStorage); - std::vector data_storage_profiler_scope(idx.num_node_entries()); - std::vector data_storage_name(idx.num_node_entries()); - for (uint32_t nid = 0; nid < idx.num_nodes(); ++nid) { - const std::string profiler_scope = common::NodeAttrsGetProfilerScope(idx[nid].source->attrs); - for (uint32_t i = 0; i < idx[nid].source->num_outputs(); ++i) { - auto eid = idx.entry_id(nid, i); - data_context[eid] = vctx[nid]; - CHECK_NE(vstorage_type[eid], kUndefinedStorage); - data_storage_type[eid] = (NDArrayStorageType) vstorage_type[eid]; - data_storage_profiler_scope[eid] = profiler_scope; - data_storage_name[eid] = idx[nid].source->attrs.name; - } - } - - // information about the pool - struct PoolEntry { - Context ctx; - size_t bytes; - NDArrayStorageType stype; - std::string profiler_scope; - std::string name; - }; - std::vector pool_info; - - // assign array to head gradient - for (size_t i = num_forward_inputs_; i < idx.input_nodes().size(); ++i) { - uint32_t nid = idx.input_nodes().at(i); - uint32_t oid = head_grad_map_.at(idx[nid].source); - uint32_t eid = idx.entry_id(idx.outputs()[oid]); - NDArrayStorageType stype = (NDArrayStorageType) vstorage_type[eid]; - bool unknown_shape = !shape_is_known(vshape[eid]); - CHECK_NE(vdtype[eid], -1); - auto data_eid = idx.entry_id(nid, 0); - // initialize based on storage_type - if (stype != kDefaultStorage) { - data_entry_[data_eid] = NDArray(stype, vshape[eid], data_context[eid], true, vdtype[eid]); - } else if (!unknown_shape) { - data_entry_[data_eid] = NDArray(vshape[eid], data_context[eid], false, vdtype[eid]); - } else { - data_entry_[data_eid] = NDArray(data_context[eid], vdtype[eid]); - } - data_entry_[data_eid].AssignStorageInfo(data_storage_profiler_scope[data_eid], - data_storage_name[data_eid]); - if (log_verbose_) { - LOG(INFO) << "\tinit head_grad entry\t" << data_eid << "\tas " - << common::stype_string(stype); - } - } - // get maximum bytes in each pool - for (size_t i = 0; i < vshape.size(); ++i) { - if (!data_entry_[i].is_none()) continue; - size_t shape_size = 0; - if (shape_is_known(vshape[i])) { - shape_size = vshape[i].Size(); - } - size_t bytes = shape_size * mshadow::mshadow_sizeof(vdtype[i]); - int storage_id = vstorage[i]; - // skip pool allocation for kBadStorageID, kExternalStorageID and kDynamicStorageID - if (storage_id < 0) continue; - size_t sid = static_cast(storage_id); - if (sid >= pool_info.size()) { - pool_info.resize(sid + 1, PoolEntry{Context::CPU(), size_t(0), kUndefinedStorage, - MXNET_STORAGE_DEFAULT_PROFILER_SCOPE_CSTR, - MXNET_STORAGE_DEFAULT_NAME_CSTR}); - } - PoolEntry& info = pool_info[sid]; - if (info.bytes == 0) { - info = PoolEntry{data_context[i], bytes, data_storage_type[i], - data_storage_profiler_scope[i], data_storage_name[i]}; - } else { - info.bytes = std::max(info.bytes, bytes); - } - } - // construct the re-use pool, if needed - std::multimap free_pool; - if (shared_pool != nullptr) { - for (const NDArray& nd : *shared_pool) { - size_t bytes = 0; - if (shape_is_known(nd.shape())) { - bytes = nd.shape().Size() * mshadow::mshadow_sizeof(nd.dtype()); - } - free_pool.insert(std::make_pair(bytes, nd)); - } - } - // remake the data pool - data_pool_.clear(); - data_pool_.resize(pool_info.size()); - - // sort the pool info the descending order before allocating memory - std::vector sorted_pool_index; - for (size_t i = 0; i < pool_info.size(); i++) { - sorted_pool_index.push_back(i); - } - auto pool_comparator = [&pool_info](size_t lhs, size_t rhs){ - return pool_info[lhs].bytes > pool_info[rhs].bytes; - }; - std::sort(sorted_pool_index.begin(), sorted_pool_index.end(), pool_comparator); - - for (size_t i : sorted_pool_index) { - const Context& ctx = pool_info[i].ctx; - size_t bytes = pool_info[i].bytes; - bool allocated = false; - for (auto it = free_pool.lower_bound(bytes); it != free_pool.end(); ++it) { - if (it->second.ctx() == ctx && it->first >= bytes) { - data_pool_[i] = it->second; - free_pool.erase(it); - allocated = true; - break; - } - } - if (!allocated) { - size_t nword = (bytes + 3) / 4; - CHECK_LE(nword, std::numeric_limits::max()); - // allocate float arrays - mxnet::TShape shape{static_cast(nword)}; - // TODO(junwu): adding delay_alloc=true to create nd - // is a temporary solution. - NDArray nd(shape, ctx, true); - nd.AssignStorageInfo(pool_info[i].profiler_scope, - pool_info[i].name); - data_pool_[i] = nd; - // put the new allocated arrays to shared pool - if (shared_pool != nullptr) { - shared_pool->push_back(nd); - } - } - } - CHECK_EQ(data_pool_.size(), pool_info.size()); - // assign the data entries - for (size_t i = 0; i < data_entry_.size(); ++i) { - // avoid pre-allocated arrays - if (!data_entry_[i].is_none()) continue; - // assign allocated array by storage id - int storage_id = vstorage[i]; - auto storage_type = (NDArrayStorageType) vstorage_type[i]; - if (storage_type == kDefaultStorage) { - if (!shape_is_known(vshape[i])) { - data_entry_[i] = NDArray(data_context[i], vdtype[i]); - data_entry_[i].AssignStorageInfo(data_storage_profiler_scope[i], - data_storage_name[i]); - } else { - CHECK_GE(storage_id, 0) << "Do not support runtime shape op yet"; - const NDArray& src = data_pool_.at(storage_id); - data_entry_[i] = src.AsArray(vshape[i], vdtype[i]); - } - } else { - data_entry_[i] = NDArray(storage_type, vshape[i], data_context[i], - true, vdtype[i]); - data_entry_[i].AssignStorageInfo(data_storage_profiler_scope[i], - data_storage_name[i]); - } - if (log_verbose_) { - LOG(INFO) << "\tinit data entry\t" << i << "\tas " << common::stype_string(storage_type); - } - } -} - - -void GraphExecutor::InitCachedOps() { - // get the graph - const auto& idx = graph_.indexed_graph(); - const auto& vstorage_inplace = - graph_.GetAttr >("storage_inplace_index"); - const auto& op_execs = - graph_.GetAttr("op_execs"); - const auto& vctx = graph_.GetAttr("context"); - const auto& addto_entry = graph_.GetAttr >("addto_entry"); - const auto& skip_plus_node = graph_.GetAttr >("skip_plus_node"); - - op_nodes_.resize(idx.num_nodes()); - // setup the array and requirements. - for (uint32_t nid = 0; nid < idx.num_nodes(); ++nid) { - const auto& inode = idx[nid]; - if (inode.source->is_variable()) continue; - op_nodes_[nid].opr_name = inode.source->op()->name.c_str(); - if (skip_plus_node.at(nid)) { - op_nodes_[nid].skip_exec_node = true; continue; - } - - op_nodes_[nid].exec = op_execs[nid]; - op_nodes_[nid].ctx = vctx[nid]; - auto& exec = op_nodes_[nid].exec; - CHECK_EQ(exec->in_array.size(), 0U); - CHECK_EQ(exec->out_array.size(), 0U); - for (const auto& e : inode.inputs) { - exec->in_array.push_back(data_entry_[idx.entry_id(e)]); - } - // detect inplace requirement - for (uint32_t index = 0; index < inode.source->num_outputs(); ++index) { - uint32_t eid = idx.entry_id(nid, index); - exec->out_array.push_back(data_entry_[eid]); - if (addto_entry.at(eid) != 0) { - exec->req.push_back(kAddTo); - } else if (vstorage_inplace[eid] >= 0) { - exec->req.push_back(kWriteInplace); - } else if (vstorage_inplace[eid] == -2) { - // -2 indicate that the entry is never referenced. - exec->req.push_back(kNullOp); - } else { - exec->req.push_back(kWriteTo); - } - } - } - // Note that this modifies the requirement of kWriteInplace - for (size_t j = num_forward_outputs_; j < idx.outputs().size(); ++j) { - auto& e = idx.outputs()[j]; - op_nodes_[e.node_id].exec->req[e.index] = - grad_store_[j - num_forward_outputs_].first; - } - for (uint32_t nid = 0; nid < idx.num_nodes(); ++nid) { - const auto& inode = idx[nid]; - if (inode.source->is_variable()) continue; - if (op_nodes_[nid].skip_exec_node) continue; - auto& exec = op_nodes_[nid].exec; - bool is_async = op_nodes_[nid].exec->exec_type() == ExecType::kAsync; - bool is_gpu = op_nodes_[nid].ctx.dev_mask() == gpu::kDevMask; - - // the variables - std::vector use_vars, mutate_vars; - for (const auto& nd : exec->in_array) { - use_vars.push_back(nd.var()); - } - for (const auto& r : exec->op_ctx.requested) { - mutate_vars.push_back(r.var); - } - for (const auto& nd : exec->out_array) { - mutate_vars.push_back(nd.var()); - } - if (exec->var() != nullptr) { - mutate_vars.push_back(exec->var()); - } - // dedup vars - Engine::Get()->DeduplicateVarHandle(&use_vars, &mutate_vars); - // all vars include both mutate vars and use vars - std::vector all_vars(use_vars); - std::copy(mutate_vars.begin(), mutate_vars.end(), - std::inserter(all_vars, all_vars.end())); - // setup exec vars - Engine::Get()->PushAsync( - [exec](RunContext rctx, Engine::CallbackOnComplete on_complete) { - exec->Setup(); - on_complete(); - }, Context::CPU(), {}, all_vars, FnProperty::kNormal, 0, - "SetupExec"); - auto exec_fun = [exec, is_async, is_gpu] ( - RunContext ctx, Engine::CallbackOnComplete on_complete) { - if (is_async) { - exec->op_ctx.async_on_complete = on_complete; - } - exec->Run(ctx, is_gpu); - // call on complete only if it is async op - if (!is_async) { - if (is_gpu) { - #if MXNET_USE_CUDA - // Wait GPU kernel to finish. - ctx.get_stream()->Wait(); - #else - LOG(FATAL) << MXNET_GPU_NOT_ENABLED_ERROR; - #endif - } - on_complete(); - } - }; - // setup the vars - op_nodes_[nid].cached_opr = Engine::Get()->NewOperator( - exec_fun, use_vars, mutate_vars, FnProperty::kNormal, - op_nodes_[nid].opr_name); - op_nodes_[nid].mutate_vars = mutate_vars; - op_nodes_[nid].use_vars = use_vars; - } -} - -void GraphExecutor::InitOpSegs() { - size_t total_num_nodes = graph_.indexed_graph().num_nodes(); - cached_seg_opr_.clear(); - CachedSegOpr p; - cached_seg_opr_.resize(total_num_nodes, p); - if (monitor_callback_) return; - - // Symbolic bulking is set by the same environment variables as Imperative bulking. - // Generate segments based on the graph structure - bool prefer_bulk_exec_inference = Imperative::PreferBulkExecInference(); - // Whether to perform bulk exec for training - const profiler::Profiler *prof = profiler::Profiler::Get(); - bool prefer_bulk_exec_train = Imperative::PreferBulkExecTrain() - && (!prof || !prof->AggregateEnabled()); - if (this->is_dynamic_) { - prefer_bulk_exec_inference = false; - prefer_bulk_exec_train = false; - } - bool is_training = num_forward_nodes_ != total_num_nodes; - - if (prefer_bulk_exec_train && is_training) { - // Bulk the forward portion of the graph per the bulk segment max size for forward training - this->BulkOpSegs(0, num_forward_nodes_, Imperative::BulkExecMaxNodeTrainFwd()); - // Bulk the backward portion of the graph per the bulk segment max size for backward training - this->BulkOpSegs(num_forward_nodes_, total_num_nodes, Imperative::BulkExecMaxNodeTrainBwd()); - } - - if (prefer_bulk_exec_inference && !is_training) { - // Bulk the entire graph as one bulk segment if possible - this->BulkOpSegs(0, total_num_nodes, total_num_nodes); - } -} - - -void GraphExecutor::BulkOpSegs(size_t from_node, size_t up_to_node, size_t segment_num_nodes_max) { - size_t topo_start = from_node; - size_t segment_node_count = 0; - for (size_t nid = from_node; nid < up_to_node; nid++) { - auto &node = graph_.indexed_graph()[nid].source; - auto &op_node = op_nodes_[nid]; - // Variables, such as learned weights, are ignored in the segment_node_count - bool ignore_node = node->is_variable() || op_node.skip_exec_node || op_node.exec == nullptr; - if (!ignore_node) - segment_node_count++; - bool can_bulk = ignore_node || op_node.exec->exec_type() == ExecType::kSync; - // check if we need to create the segment based on properties of this node - if (!can_bulk || nid == up_to_node - 1 || segment_node_count >= segment_num_nodes_max) { - // Create a new segment for the previous nodes- include also this node if it's bulkable - cached_seg_opr_[topo_start] = this->CreateCachedSegOpr(topo_start, can_bulk ? nid + 1 : nid); - topo_start = nid + 1; - segment_node_count = 0; - } - } -} - -void GraphExecutor::ExecuteMonInputCallback(size_t nid) { - static const auto& flist_inputs = - nnvm::Op::GetAttr("FListInputNames"); - const auto& idx = graph_.indexed_graph(); - std::vector input_names; - OpNode& opnode = op_nodes_[nid]; - const auto& inode = idx[nid]; - const auto& node = idx[nid].source; - if (flist_inputs.count(node->op())) { - input_names = flist_inputs[node->op()](node->attrs); - } else { - for (size_t i = 0; i < node->num_inputs(); ++i) { - input_names.emplace_back("input" + std::to_string(i)); - } - } - CHECK_EQ(opnode.exec->in_array.size(), input_names.size()); - for (size_t i = 0; i < opnode.exec->in_array.size(); ++i) { - if (node->inputs[i].node->is_variable()) { - // Monitor variable - NDArray *cpy = new NDArray(opnode.exec->in_array[i]); - std::string name = node->inputs[i].node->attrs.name; - this->monitor_callback_(name.c_str(), reinterpret_cast(cpy)); - } - NDArray *cpy = new NDArray(opnode.exec->in_array[i]); - std::string name = inode.source->attrs.name + "_" + input_names[i]; - this->monitor_callback_(name.c_str(), reinterpret_cast(cpy)); - } -} - -void GraphExecutor::ExecuteMonOutputCallback(size_t nid) { - const auto& idx = graph_.indexed_graph(); - OpNode& opnode = op_nodes_[nid]; - const auto& node = idx[nid].source; - for (size_t i = 0; i < opnode.exec->out_array.size(); ++i) { - NDArray *cpy = new NDArray(opnode.exec->out_array[i]); - nnvm::ObjectPtr node_ptr = std::make_shared(*node); - std::string name = GetOutputName({node_ptr, static_cast(i), 0}); - this->monitor_callback_(name.c_str(), reinterpret_cast(cpy)); - } -} - -void GraphExecutor::RunOps(bool is_train, size_t topo_start, size_t topo_end) { - static auto& finfer_shape = nnvm::Op::GetAttr("FInferShape"); - static auto& is_backward = Op::GetAttr("TIsBackward"); - // Update context - const auto& idx = graph_.indexed_graph(); - for (size_t nid = topo_start; nid < topo_end; ++nid) { - OpNode& opnode = op_nodes_[nid]; - if (opnode.skip_exec_node) continue; - const auto& inode = idx[nid]; - if (inode.source->is_variable()) continue; - opnode.exec->op_ctx.is_train = is_train; - opnode.exec->op_ctx.need_grad = need_grad_; - } - - mxnet::ShapeVector rshape = graph_.MoveCopyAttr("shape"); - // Push Ops - for (size_t nid = topo_start; nid < topo_end; ++nid) { - auto seg_op = cached_seg_opr_[nid]; - // Check segments first - if (monitor_callback_ == nullptr && seg_op.opr != nullptr && seg_op.topo_end <= topo_end) { - bool profiling = profiler::Profiler::Get()->GetState() == profiler::Profiler::kRunning; - Engine::Get()->Push(seg_op.opr, seg_op.ctx, 0, profiling); - nid = seg_op.topo_end - 1; - continue; - } - // Normal mode - const auto& inode = idx[nid]; - const uint32_t num_inputs = inode.inputs.size(); - const uint32_t num_outputs = inode.source->num_outputs(); - if (inode.source->is_variable()) continue; - OpNode& opnode = op_nodes_[nid]; - if (op_nodes_[nid].skip_exec_node) continue; - // Monitor callbacks - if (monitor_callback_ && monitor_all_) { - ExecuteMonInputCallback(nid); - } - if (this->is_dynamic_) { - const auto &op = inode.source->op(); - { - for (NDArray &array : opnode.exec->in_array) { - array.WaitToRead(); - if (!shape_is_known(array.shape())) { - array.SetShapeFromChunk(); - } - } - int i = 0; - for (NDArray &array : opnode.exec->out_array) { - array.WaitToRead(); - if (!shape_is_known(array.shape())) { - array.SetShapeFromChunk(); - } - if (!shape_is_known(array.shape())) { - mxnet::TShape shape = rshape[idx.entry_id(nid, i)]; - if (shape_is_known(shape)) { - array.ReshapeAndAlloc(shape); - } - } - ++i; - } - } - if (finfer_shape.count(op)) { - mxnet::ShapeVector in_shapes; - mxnet::ShapeVector out_shapes; - for (NDArray &array : opnode.exec->in_array) { - in_shapes.push_back(array.shape()); - } - for (NDArray &array : opnode.exec->out_array) { - out_shapes.push_back(array.shape()); - } - auto finfer = finfer_shape[op]; - try { - bool success = finfer(inode.source->attrs, &in_shapes, &out_shapes); - CHECK(success) << "InferShape failed in operator " << inode.source->attrs.name; - } catch (const std::exception& e) { - throw dmlc::Error("Error in operator " + inode.source->attrs.name + ": " + e.what()); - } - int n_out = out_shapes.size(); - for (int i = 0; i < n_out; ++i) { - NDArray &array = opnode.exec->out_array[i]; - if (!shape_is_known(array.shape())) { - array.Init(out_shapes[i]); - } - } - } else if (is_backward.get(inode.source->op(), false) && inode.control_deps.size()) { - CHECK_GE(inode.control_deps.size(), 1U) << - "BackwardOp need to have control_deps to its forward op"; - uint32_t fid = inode.control_deps[0]; - const OpNode& fopnode = op_nodes_[fid]; - CHECK_EQ(fopnode.exec->in_array.size(), opnode.exec->out_array.size()); - int nelem = fopnode.exec->in_array.size(); - std::vector &from = fopnode.exec->in_array; - std::vector &to = opnode.exec->out_array; - for (int i = 0; i < nelem; ++i) { - if (!shape_is_known(to[i].shape())) { - to[i].Init(from[i].shape()); - } - } - } - } - opnode.exec->op_ctx.is_train = is_train; - opnode.exec->op_ctx.need_grad = need_grad_; - if (opnode.exec->exec_type() == ExecType::kCrossDeviceCopy) { - CHECK_EQ(inode.inputs.size(), 1U); - CHECK_EQ(opnode.exec->in_array.size(), 1U); - CHECK_EQ(opnode.exec->out_array.size(), 1U); - CopyFromTo(opnode.exec->in_array[0], &(opnode.exec->out_array[0])); - } else if (opnode.exec->exec_type() == ExecType::kSubgraphExec) { - // If the node contains a subgraph, we can't execute it in the engine. - opnode.exec->Run(opnode.exec->op_ctx.run_ctx, false); - } else if (opnode.cached_opr != nullptr) { - bool profiling = profiler::Profiler::Get()->GetState() == profiler::Profiler::kRunning; - Engine::Get()->Push(opnode.cached_opr, opnode.ctx, 0, profiling); - if (this->is_dynamic_) { - for (NDArray &array : opnode.exec->out_array) { - array.WaitToRead(); - if (!shape_is_known(array.shape())) { - array.SetShapeFromChunk(); - } - } - } - } else { - LOG(FATAL) << "Not accessed"; - } - for (uint32_t i = 0; i < num_inputs; ++i) { - int eid = idx.entry_id(inode.inputs[i]); - if (!shape_is_known(rshape[eid])) { - rshape[eid] = opnode.exec->in_array[i].shape(); - } - } - for (uint32_t i = 0; i < num_outputs; ++i) { - int eid = idx.entry_id(nid, i); - if (!shape_is_known(rshape[eid])) { - rshape[eid] = opnode.exec->out_array[i].shape(); - } - } - // Monitor callbacks - if (monitor_callback_) { - ExecuteMonOutputCallback(nid); - } - } - graph_.attrs["shape"] = std::make_shared(rshape); -} - -GraphExecutor::CachedSegOpr GraphExecutor::CreateCachedSegOpr(size_t topo_start, size_t topo_end) { - std::vector use_vars; - std::vector mutate_vars; - Context *pctx = nullptr; - GraphExecutor::CachedSegOpr ret; - ret.topo_start = topo_start; - ret.topo_end = topo_end; - auto& exec_list = ret.exec_list; - // invalid segment - if (topo_end <= topo_start) { - return ret; - } - std::string opr_names = "["; - - const auto& idx = graph_.indexed_graph(); - for (size_t nid = topo_start; nid < topo_end; ++nid) { - std::vector all_vars; - const auto& inode = idx[nid]; - OpNode& op_node = op_nodes_[nid]; - if (op_node.skip_exec_node) continue; - if (inode.source->is_variable()) continue; - if (op_node.exec->exec_type() != ExecType::kSync) { - return ret; - } - if (pctx == nullptr) pctx = &(op_node.ctx); - if (*pctx != op_node.ctx) { - return ret; - } - auto& exec = op_nodes_[nid].exec; - std::copy(op_node.mutate_vars.begin(), op_node.mutate_vars.end(), - std::inserter(mutate_vars, mutate_vars.end())); - std::copy(op_node.use_vars.begin(), op_node.use_vars.end(), - std::inserter(use_vars, use_vars.end())); - ret.exec_list.push_back(exec); - opr_names += inode.source->op()->name + ","; - } - - if (pctx == nullptr) return ret; - ret.ctx = *pctx; - Engine::Get()->DeduplicateVarHandle(&use_vars, &mutate_vars); - - bool is_gpu = pctx->dev_mask() == gpu::kDevMask; - auto exec_fun = [exec_list, is_gpu] ( - RunContext ctx, Engine::CallbackOnComplete on_complete) { - // Run all opr in the sub-graph - for (auto &exec : exec_list) { - exec->Run(ctx, is_gpu); - } - if (is_gpu) { -#if MXNET_USE_CUDA - // Wait GPU kernel to finish. - ctx.get_stream()->Wait(); -#else - LOG(FATAL) << MXNET_GPU_NOT_ENABLED_ERROR; -#endif - } - on_complete(); - }; - opr_names.pop_back(); - opr_names += "]"; - ret.opr = Engine::Get()->NewOperator( - exec_fun, use_vars, mutate_vars, FnProperty::kNormal, - opr_names.c_str()); - return ret; -} - -// Infer shapes, dtypes, stypes, contexts for the forward graph -static nnvm::Graph InferForwardAttrs(nnvm::Graph g, - mxnet::ShapeVector arg_shapes, - nnvm::DTypeVector arg_dtypes, - StorageTypeVector arg_stypes, - const Context& default_ctx, - const std::map& ctx_map, - const std::vector& in_arg_ctxes, - const std::vector& aux_state_ctxes, - bool partial_shape = false) { - const auto& indexed_graph = g.indexed_graph(); - const auto num_forward_inputs = indexed_graph.input_nodes().size(); - g = AssignContext(g, default_ctx, ctx_map, in_arg_ctxes, {}, - aux_state_ctxes, {}, num_forward_inputs, g.outputs.size()); - g = InferShape(std::move(g), std::move(arg_shapes), "__shape__"); - if (g.GetAttr("shape_num_unknown_nodes") != 0U) { - if (!partial_shape) { - HandleInferShapeError(num_forward_inputs, indexed_graph, - g.GetAttr("shape")); - } - } - g = InferType(std::move(g), std::move(arg_dtypes), "__dtype__"); - if (g.GetAttr("dtype_num_unknown_nodes") != 0U) { - HandleInferTypeError(num_forward_inputs, indexed_graph, - g.GetAttr("dtype")); - } - g = InferStorageType(std::move(g), std::move(arg_stypes), "__storage_type__"); - if (g.GetAttr("storage_type_num_unknown_nodes") != 0U) { - HandleInferStorageTypeError(num_forward_inputs, indexed_graph, - g.GetAttr("storage_type")); - } - return g; -} - -static bool SubgraphBackendCheck(const op::SubgraphBackendPtr& backend, - const Context& default_ctx, - int verbose = 1) { - if (backend->HasAttr("enable") && (backend->GetAttr("enable") != true)) { - if (verbose > 1) { - LOG(INFO) << "Subgraph backend " << backend->GetName() - << " isn't activated."; - } - return false; - } - if (backend->HasAttr("context") && backend->GetAttr("context") != default_ctx) { - if (verbose > 1) { - LOG(INFO) << "Subgraph backend " << backend->GetName() - << " isn't activated as context mismatch."; - } - return false; - } - return true; -} - -static bool SubgraphPropertyCheck(const std::string& backend_name, - const op::SubgraphPropertyPtr& prop, bool need_grad, - int verbose = 1) { - auto full_name = - prop->HasAttr("property_name") ? prop->GetAttr("property_name") : std::string(); - if (prop->HasAttr("disable") && prop->GetAttr("disable") == true) { - LOG(INFO) << "subgraph property " << full_name << " from backend " << backend_name - << " is disabled."; - return false; - } - if (prop->HasAttr("inference_only") && prop->GetAttr("inference_only") == true) { - if (need_grad) { - if (verbose > 1) { - LOG(INFO) << "skip partitioning graph with subgraph property " << full_name - << " from backend " << backend_name << " as it requires `grad_req=null`."; - } - return false; - } - } - return true; -} - -// Given input attr arrays, partition the graph using the backend name equal to prop_name. -// This is a common function for bind and simple_bind flows. -static nnvm::Symbol BuildSubgraph(const nnvm::Symbol& src, op::SubgraphPropertyPtr subgraph_prop, - const mxnet::ShapeVector& arg_shapes, - const nnvm::DTypeVector& arg_dtypes, - const StorageTypeVector& arg_stypes, const Context& default_ctx, - const std::map& ctx_map, - const std::vector& in_arg_ctxes, - const std::vector& aux_state_ctxes) { - nnvm::Symbol ret = src.Copy(); - nnvm::Graph g; - g.outputs = ret.outputs; - g = InferForwardAttrs(g, arg_shapes, arg_dtypes, arg_stypes, default_ctx, ctx_map, in_arg_ctxes, - aux_state_ctxes, true); - subgraph_prop->SetAttr("graph", g); - g.attrs["subgraph_property"] = std::make_shared(subgraph_prop); - g = ApplyPass(std::move(g), "BuildSubgraph"); - subgraph_prop->RemoveAttr("graph"); - g.attrs.erase("subgraph_property"); - ret.outputs = g.outputs; - return ret; -} - -// Given input attr dicts, partition the graph using the backend. -// This is for simple_bind flow. -static nnvm::Symbol BuildSubgraph( - const nnvm::Symbol& src, const op::SubgraphBackendPtr backend, - const std::unordered_map& arg_shape_map, - const std::unordered_map& arg_dtype_map, - const std::unordered_map& arg_stype_map, const Context& default_ctx, - const std::map& ctx_map, std::vector* in_arg_ctxes, - std::vector* arg_grad_ctxes, std::vector* grad_req_types, - std::vector* aux_state_ctxes, int verbose = 1) { - // setup map for in_arg_ctxes, arg_grad_ctxes, aux_state_ctxes and grad_req_types - std::unordered_map in_arg_ctx_map; - std::unordered_map arg_grad_ctx_map; - std::unordered_map aux_state_ctx_map; - std::unordered_map grad_req_type_map; - - auto arg_names = src.ListInputNames(nnvm::Symbol::kReadOnlyArgs); - auto aux_names = src.ListInputNames(nnvm::Symbol::kAuxiliaryStates); - for (size_t i = 0; i < arg_names.size(); ++i) { - const auto& name = arg_names[i]; - in_arg_ctx_map[name] = in_arg_ctxes->at(i); - arg_grad_ctx_map[name] = arg_grad_ctxes->at(i); - grad_req_type_map[name] = grad_req_types->at(i); - } - - for (size_t i = 0; i < aux_names.size(); ++i) { - aux_state_ctx_map[aux_names[i]] = aux_state_ctxes->at(i); - } - - bool need_grad = false; - for (OpReqType req : *grad_req_types) { - if (req != kNullOp) { - need_grad = true; - break; - } - } - nnvm::Symbol ret = src.Copy(); - std::unordered_set op_names_set; - const auto& backend_name = backend->GetName(); - const auto it = op::SubgraphPropertyOpNameSet::Get()->find(backend_name); - // assign a op name set to the subgraph property if it has been provided by users - if (it != op::SubgraphPropertyOpNameSet::Get()->end()) { - LOG(INFO) << "SubgraphPropertyOpNameSet for subgraph property " << backend_name - << " has been assigned a value. Please make sure it is initialized" - " only for the testing purpose."; - op_names_set = it->second; - } - - const auto& subgraph_prop_list = backend->GetSubgraphProperties(); - for (auto& subgraph_prop : subgraph_prop_list) { - if (SubgraphPropertyCheck(backend_name, subgraph_prop, need_grad, verbose)) { - subgraph_prop->SetAttr("op_names", op_names_set); - const std::vector input_names = ret.ListInputNames(Symbol::kAll); - mxnet::ShapeVector arg_shapes(input_names.size(), mxnet::TShape()); - nnvm::DTypeVector arg_dtypes(input_names.size(), -1); - StorageTypeVector arg_stypes(input_names.size(), kUndefinedStorage); - for (size_t i = 0; i < input_names.size(); ++i) { - const auto& input_name = input_names[i]; - const auto it1 = arg_shape_map.find(input_name); - if (arg_shape_map.end() != it1) { - arg_shapes[i] = it1->second; - } - const auto it2 = arg_dtype_map.find(input_name); - if (arg_dtype_map.end() != it2) { - arg_dtypes[i] = it2->second; - } - const auto it3 = arg_stype_map.find(input_name); - if (arg_stype_map.end() != it3) { - arg_stypes[i] = it3->second; - } - } - ret = BuildSubgraph(ret, subgraph_prop, arg_shapes, arg_dtypes, arg_stypes, default_ctx, - ctx_map, *in_arg_ctxes, *aux_state_ctxes); - // Reorder in_arg_ctxes, arg_grad_ctxes, aux_state_ctxes and grad_req_types according to - // partitioned symbol input sequence - in_arg_ctxes->clear(); - arg_grad_ctxes->clear(); - aux_state_ctxes->clear(); - grad_req_types->clear(); - auto new_arg_names = ret.ListInputNames(nnvm::Symbol::kReadOnlyArgs); - auto new_aux_names = ret.ListInputNames(nnvm::Symbol::kAuxiliaryStates); - for (const auto& arg_name : new_arg_names) { - CHECK(in_arg_ctx_map.count(arg_name)); - in_arg_ctxes->push_back(in_arg_ctx_map[arg_name]); - arg_grad_ctxes->push_back(arg_grad_ctx_map[arg_name]); - grad_req_types->push_back(grad_req_type_map[arg_name]); - } - for (const auto& arg_name : new_aux_names) { - CHECK(aux_state_ctx_map.count(arg_name)); - aux_state_ctxes->push_back(aux_state_ctx_map[arg_name]); - } - } - } - return ret; -} - -// Given input ndarrays, partition the graph using backend. -// This is for bind flow. -static nnvm::Symbol BuildSubgraph(const nnvm::Symbol& src, const op::SubgraphBackendPtr backend, - const Context& default_ctx, - const std::map& ctx_map, - std::vector* in_args, - std::vector* arg_grad_store, - std::vector* grad_req_type, - std::vector* aux_states, int verbose = 1) { - // setup map for in_args, arg_grad_store, grad_req_type and aux_states - std::unordered_map in_args_map; - std::unordered_map arg_grad_store_map; - std::unordered_map grad_req_type_map; - std::unordered_map aux_states_map; - const std::vector arg_names = src.ListInputNames(nnvm::Symbol::kReadOnlyArgs); - const std::vector aux_names = src.ListInputNames(nnvm::Symbol::kAuxiliaryStates); - for (size_t i = 0; i < arg_names.size(); ++i) { - in_args_map[arg_names[i]] = in_args->at(i); - } - - for (size_t i = 0; i < aux_names.size(); ++i) { - aux_states_map[aux_names[i]] = aux_states->at(i); - } - - if (arg_grad_store->size()) { - for (size_t i = 0; i < arg_names.size(); ++i) { - const auto& name = arg_names[i]; - arg_grad_store_map[name] = arg_grad_store->at(i); - grad_req_type_map[name] = grad_req_type->at(i); - } - } - - bool need_grad = false; - for (OpReqType req : *grad_req_type) { - if (req != kNullOp) { - need_grad = true; - break; - } - } - nnvm::Symbol ret = src.Copy(); - std::unordered_set op_names_set; - const auto& backend_name = backend->GetName(); - auto it = op::SubgraphPropertyOpNameSet::Get()->find(backend_name); - // assign a op name set to the subgraph property if it has been provided by users - if (it != op::SubgraphPropertyOpNameSet::Get()->end()) { - LOG(INFO) << "SubgraphPropertyOpNameSet for subgraph property " << backend_name - << " has been assigned a value. Please make sure it is initialized" - " only for the testing purpose."; - op_names_set = it->second; - } - const auto& subgraph_prop_list = backend->GetSubgraphProperties(); - - for (auto subgraph_prop : subgraph_prop_list) { - if (SubgraphPropertyCheck(backend_name, subgraph_prop, need_grad, verbose)) { - subgraph_prop->SetAttr("op_names", op_names_set); - const std::vector input_names = ret.ListInputNames(Symbol::kAll); - const std::vector arg_names = ret.ListInputNames(nnvm::Symbol::kReadOnlyArgs); - const std::vector aux_names = ret.ListInputNames(nnvm::Symbol::kAuxiliaryStates); - CHECK_EQ(arg_names.size(), in_args_map.size()); - CHECK_EQ(aux_names.size(), aux_states_map.size()); - mxnet::ShapeVector arg_shapes; // all input shapes - arg_shapes.reserve(input_names.size()); - nnvm::DTypeVector arg_dtypes; // all input dtypes - arg_dtypes.reserve(input_names.size()); - StorageTypeVector arg_stypes; // all input stypes - arg_stypes.reserve(input_names.size()); - std::vector in_arg_ctxes(in_args_map.size()); - std::vector aux_state_ctxes(aux_states_map.size()); - - size_t i1 = 0, i2 = 0; - for (const auto& input_name : input_names) { - if (i2 < aux_names.size() && aux_names[i2] == input_name) { - const auto &aux_st = aux_states_map[input_name]; - arg_shapes.push_back(aux_st.shape()); - arg_dtypes.push_back(aux_st.dtype()); - arg_stypes.push_back(aux_st.storage_type()); - aux_state_ctxes[i2] = aux_st.ctx(); - ++i2; - } else { - CHECK(i1 < arg_names.size()); - CHECK_EQ(arg_names[i1], input_name); - const auto &in_arg = in_args_map[input_name]; - arg_shapes.push_back(in_arg.shape()); - arg_dtypes.push_back(in_arg.dtype()); - arg_stypes.push_back(in_arg.storage_type()); - in_arg_ctxes[i1] = in_arg.ctx(); - ++i1; - } - } - - ret = BuildSubgraph(ret, subgraph_prop, arg_shapes, arg_dtypes, arg_stypes, default_ctx, - ctx_map, in_arg_ctxes, aux_state_ctxes); - } - } - // Reorder in_args, arg_grad_store, grad_req_type and aux_states according to partitioned symbol - // input sequence - const auto new_arg_names = ret.ListInputNames(nnvm::Symbol::kReadOnlyArgs); - const auto new_aux_names = ret.ListInputNames(nnvm::Symbol::kAuxiliaryStates); - CHECK_EQ(arg_names.size(), new_arg_names.size()); - CHECK_EQ(arg_names.size(), new_arg_names.size()); - in_args->clear(); - aux_states->clear(); - for (const auto& arg_name : new_arg_names) { - CHECK(in_args_map.count(arg_name)); - in_args->push_back(in_args_map[arg_name]); - } - - for (const auto& arg_name : new_aux_names) { - CHECK(aux_states_map.count(arg_name)); - aux_states->push_back(aux_states_map[arg_name]); - } - - if (arg_grad_store->size()) { - arg_grad_store->clear(); - grad_req_type->clear(); - for (const auto& arg_name : new_arg_names) { - arg_grad_store->push_back(arg_grad_store_map[arg_name]); - grad_req_type->push_back(grad_req_type_map[arg_name]); - } - } - return ret; -} -} // namespace exec - -Executor *Executor::SimpleBind(nnvm::Symbol symbol, - const Context& default_ctx, - const std::map& group2ctx, - const std::vector& in_arg_ctxes, - const std::vector& arg_grad_ctxes, - const std::vector& aux_state_ctxes, - const std::unordered_map& arg_shape_map, - const std::unordered_map& arg_dtype_map, - const std::unordered_map& arg_stype_map, - const std::vector& grad_req_types, - const std::unordered_set& shared_arg_names, - std::vector* in_args, - std::vector* arg_grads, - std::vector* aux_states, - std::unordered_map* shared_buffer, - Executor* shared_exec) { - auto exec = new exec::GraphExecutor(symbol); - bool init = false; - if (!exec->subgraph_property().empty()) { - static int verbose = dmlc::GetEnv("MXNET_SUBGRAPH_VERBOSE", 1); - const auto& backend_name = exec->subgraph_property(); - const auto& backend = op::SubgraphBackendRegistry::Get()->GetSubgraphBackend(backend_name); - if (exec::SubgraphBackendCheck(backend, default_ctx, verbose)) { - if (verbose) LOG(INFO) << "Subgraph backend " << backend_name << " is activated."; - std::vector tmp_in_arg_ctxes = in_arg_ctxes; - std::vector tmp_arg_grad_ctxes = arg_grad_ctxes; - std::vector tmp_aux_state_ctxes = aux_state_ctxes; - std::vector tmp_grad_req_types = grad_req_types; - std::vector tmp_in_args; - std::vector tmp_arg_grads; - std::vector tmp_aux_states; - const auto arg_names = symbol.ListInputNames(nnvm::Symbol::kReadOnlyArgs); - const auto aux_names = symbol.ListInputNames(nnvm::Symbol::kAuxiliaryStates); - symbol = exec::BuildSubgraph(symbol, backend, arg_shape_map, arg_dtype_map, arg_stype_map, - default_ctx, group2ctx, &tmp_in_arg_ctxes, &tmp_arg_grad_ctxes, - &tmp_grad_req_types, &tmp_aux_state_ctxes, verbose); - // Subgraph cannot be recreated from unoptimized symbol - exec = new exec::GraphExecutor(symbol); - exec->Init(symbol.Copy(), default_ctx, group2ctx, tmp_in_arg_ctxes, tmp_arg_grad_ctxes, - tmp_aux_state_ctxes, arg_shape_map, arg_dtype_map, arg_stype_map, - tmp_grad_req_types, shared_arg_names, &tmp_in_args, &tmp_arg_grads, - &tmp_aux_states, shared_buffer, shared_exec); - init = true; - const auto new_arg_names = symbol.ListInputNames(nnvm::Symbol::kReadOnlyArgs); - const auto new_aux_names = symbol.ListInputNames(nnvm::Symbol::kAuxiliaryStates); - std::unordered_map new_arg_names_idx_map; - std::unordered_map new_aux_names_idx_map; - for (size_t i = 0; i != new_arg_names.size(); ++i) { - new_arg_names_idx_map[new_arg_names[i]] = i; - } - for (size_t i = 0; i != new_aux_names.size(); ++i) { - new_aux_names_idx_map[new_aux_names[i]] = i; - } - - in_args->reserve(arg_names.size()); - arg_grads->reserve(arg_names.size()); - for (size_t i = 0; i != arg_names.size(); ++i) { - const auto& arg_name = arg_names[i]; - const auto& it = new_arg_names_idx_map.find(arg_name); - CHECK(it != new_arg_names_idx_map.end()) - << "Subgraph doesn't support remove any input node for now."; - in_args->emplace_back(std::move(tmp_in_args[it->second])); - arg_grads->emplace_back(std::move(tmp_arg_grads[it->second])); - } - - aux_states->reserve(aux_names.size()); - for (size_t i = 0; i != aux_names.size(); ++i) { - const auto& aux_name = aux_names[i]; - const auto& it = new_aux_names_idx_map.find(aux_name); - CHECK(it != new_aux_names_idx_map.end()) - << "Subgraph doesn't support remove any input node for now."; - aux_states->emplace_back(std::move(tmp_aux_states[it->second])); - } - } - } - if (!init) { - // init without subgraph - exec->Init(symbol.Copy(), default_ctx, group2ctx, in_arg_ctxes, arg_grad_ctxes, aux_state_ctxes, - arg_shape_map, arg_dtype_map, arg_stype_map, grad_req_types, shared_arg_names, - in_args, arg_grads, aux_states, shared_buffer, shared_exec); - } - return exec; -} - -Executor *Executor::Bind(nnvm::Symbol symbol, - const Context& default_ctx, - const std::map& group2ctx, - const std::vector &in_args, - const std::vector &arg_grad_store, - const std::vector &grad_req_type, - const std::vector &aux_states, - Executor* shared_exec) { - auto exec = new exec::GraphExecutor(symbol); - static int verbose = dmlc::GetEnv("MXNET_SUBGRAPH_VERBOSE", 1); - std::vector tmp_in_args = in_args; - std::vector tmp_arg_grad_store = arg_grad_store; - std::vector tmp_grad_req_type = grad_req_type; - std::vector tmp_aux_states = aux_states; - - if (!exec->subgraph_property().empty()) { - const auto& backend_name = exec->subgraph_property(); - const auto& backend = op::SubgraphBackendRegistry::Get()->GetSubgraphBackend(backend_name); - if (exec::SubgraphBackendCheck(backend, default_ctx, verbose)) { - if (verbose) LOG(INFO) << "Subgraph backend " << backend_name << " is activated."; - symbol = exec::BuildSubgraph(symbol, backend, default_ctx, group2ctx, &tmp_in_args, - &tmp_arg_grad_store, &tmp_grad_req_type, &tmp_aux_states, - verbose); - // Subgraph cannot be recreated from unoptimized symbol - exec = new exec::GraphExecutor(symbol); - } - } - exec->Init(symbol.Copy(), default_ctx, group2ctx, tmp_in_args, tmp_arg_grad_store, - tmp_grad_req_type, tmp_aux_states, reinterpret_cast(shared_exec)); - return exec; -} -} // namespace mxnet diff --git a/src/executor/graph_executor.h b/src/executor/graph_executor.h deleted file mode 100644 index ed6eeaa11f4f..000000000000 --- a/src/executor/graph_executor.h +++ /dev/null @@ -1,279 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/*! - * Copyright (c) 2016 by Contributors - * \file graph_executor.h - * \brief Executor to execute the computation graph. - */ -#ifndef MXNET_EXECUTOR_GRAPH_EXECUTOR_H_ -#define MXNET_EXECUTOR_GRAPH_EXECUTOR_H_ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "./exec_pass.h" - -namespace mxnet { - -// forward declaration -namespace exec { -class GraphExecutor; -} - -namespace exec { - -using nnvm::Graph; - -nnvm::NodeEntry AggregateGradient(std::vector&& v); - -// graph executors -class GraphExecutor : public Executor { - public: - using Executor::MonitorCallback; - - explicit GraphExecutor(const nnvm::Symbol& symbol); - virtual ~GraphExecutor(); - void Forward(bool is_train) override; - void PartialForward(bool is_train, int step, int *step_left) override; - void Backward(const std::vector &head_grads, bool is_train = true) override; - const std::vector& outputs() const override; - const std::unordered_map& in_arg_map() const override; - const std::unordered_map& arg_grad_map() const override; - const std::unordered_map& aux_state_map() const override; - void Print(std::ostream &os) const override; // NOLINT(*) - nnvm::Symbol GetOptimizedSymbol(); - void SetMonitorCallback(const MonitorCallback& callback, bool monitor_all = false) override; - // Initialize the rest of attributes - // after setting up arguments. - void FinishInitGraph(nnvm::Symbol symbol, nnvm::Graph g, - Executor* shared_exec = nullptr, - const nnvm::NodeEntryMap& feed_dict - = nnvm::NodeEntryMap()); - - // initialize executor for bind - void Init(nnvm::Symbol symbol, - const Context& default_ctx, - const std::map& ctx_map, - const std::vector& in_args, - const std::vector& arg_grad_store, - const std::vector& grad_req_types, - const std::vector& aux_states, - Executor* shared_exec = nullptr, - const nnvm::NodeEntryMap& feed_dict - = nnvm::NodeEntryMap()); - // initialize executor for simple bind - void Init(nnvm::Symbol symbol, - const Context& default_ctx, - const std::map& ctx_map, - const std::vector& in_arg_ctxes, - const std::vector& arg_grad_ctxes, - const std::vector& aux_state_ctxes, - const std::unordered_map& arg_shape_map, - const std::unordered_map& arg_dtype_map, - const std::unordered_map& arg_stype_map, - const std::vector& grad_req_types, - const std::unordered_set& shared_arg_names, - std::vector* in_arg_vec, - std::vector* arg_grad_vec, - std::vector* aux_state_vec, - std::unordered_map* shared_buffer = nullptr, - Executor* shared_exec = nullptr, - const nnvm::NodeEntryMap& feed_dict - = nnvm::NodeEntryMap()); - - Executor* Reshape(const bool partial_shaping, - const bool allow_up_sizing, - const Context& default_ctx, - const std::map& ctx_map, - const std::unordered_map& - provided_arg_shapes, - std::vector* in_args, - std::vector* arg_grads, - std::vector* aux_states) override; - - const std::string& subgraph_property() const { return subgraph_property_; } - - protected: - friend class mxnet::Imperative; - // Information about operational node - struct OpNode { - // The name of the operator - const char* opr_name; - // the context of the node - Context ctx; - // The executor - std::shared_ptr exec; - // skip the execution of this node - bool skip_exec_node{false}; - // cached operator handle - Engine::OprHandle cached_opr{nullptr}; - // cached const vars, used for seg ops creation - std::vector use_vars; - // cached mutate vars, used for seg ops creation - std::vector mutate_vars; - }; - // a cached segment operator that executes a segment - struct CachedSegOpr { - // context of the operator - Context ctx; - // begin in topo order - size_t topo_start; - // end in topo order - size_t topo_end; - // the cached operator - Engine::OprHandle opr = nullptr; - // list of op executors - std::vector > exec_list; - }; - // Initialize in_args, arg_grads, and aux_states - void InitArguments(const nnvm::IndexedGraph& idx, - const mxnet::ShapeVector& inferred_shapes, - const nnvm::DTypeVector& inferred_dtypes, - const StorageTypeVector& inferred_stypes, - const std::vector& in_arg_ctxes, - const std::vector& arg_grad_ctxes, - const std::vector& aux_state_ctxes, - const std::vector& grad_req_types, - std::vector* in_arg_vec, - std::vector* arg_grad_vec, - std::vector* aux_state_vec); - // Initialize in_args, arg_grads and aux_states with - // shared_buffer and shared_exec - virtual void InitArguments(const nnvm::IndexedGraph& idx, - const mxnet::ShapeVector& inferred_shapes, - const nnvm::DTypeVector& inferred_dtypes, - const StorageTypeVector& inferred_stypes, - const std::vector& in_arg_ctxes, - const std::vector& arg_grad_ctxes, - const std::vector& aux_state_ctxes, - const std::vector& grad_req_types, - const std::unordered_set& shared_arg_names, - const Executor* shared_exec, - std::unordered_map* shared_buffer, - std::vector* in_arg_vec, - std::vector* arg_grad_vec, - std::vector* aux_state_vec); - - // internal initialization of the graph for simple bind - Graph InitGraph(nnvm::Symbol symbol, - const Context& default_ctx, - const std::map& ctx_map, - const std::vector& in_arg_ctxes, - const std::vector& arg_grad_ctxes, - const std::vector& aux_state_ctxes, - const std::vector& grad_req_types, - const ShapeVector& in_arg_shapes, - const nnvm::DTypeVector& in_arg_dtypes); - // intialize the full graph for simple bind, including gradient - Graph InitFullGraph(nnvm::Symbol symbol, - const std::vector& grad_req_types, - const ShapeVector& in_arg_shapes, - const nnvm::DTypeVector& in_arg_dtypes); - // initialize the cached operator - void InitCachedOps(); - // initialize the opr segments for bulk exec - void InitOpSegs(); - // initialize the resources in the graph - // initialize the memory of data entries - // shared_pool: extra memory shared from other parts - void InitDataEntryMemory(std::vector* shared_pool); - // run ops from topo order start to end - void RunOps(bool is_train, size_t topo_start, size_t topo_end); - /*! - * \brief Try to create a cached operator to run segments between start and end - * \param topo_start beginning of segment - * \param topo_end end of segment - * \return the cached operator. - * ret.opr Can be nullptr if creation failed. - */ - CachedSegOpr CreateCachedSegOpr(size_t topo_start, size_t topo_end); - // run the monitor callback for input of node `nid` - void ExecuteMonInputCallback(size_t nid); - // run the monitor callback for output of node `nid` - void ExecuteMonOutputCallback(size_t nid); - // peform bulking and segmentation on the region [from_node, up_to_node) of a graph - void BulkOpSegs(size_t from_node, size_t up_to_node, size_t segment_num_nodes_max); - // When infer shape fails, fall back to ensure dynamic-shaped operators executed correctly. - bool is_dynamic_; - // indicate whether there is a backward graph for gradients. - bool need_grad_; - // internal graph - nnvm::Graph graph_; - // operator node - std::vector op_nodes_; - // internal data entry of each node - std::vector data_entry_; - // internal data pool of allocated entries. - // these allocated entries can be used for static memory sharing between executors. - std::vector data_pool_; - // output arrays - std::vector output_arrays_; - // input argument map, key is arg name, value is arg's NDArray - std::unordered_map in_arg_map_; - // arg grad map, key is arg name, value is arg grad NDArray - std::unordered_map arg_grad_map_; - // aux state map, key is aux state name, value is aux state NDArray - std::unordered_map aux_state_map_; - // gradient store - std::vector > grad_store_; - // array to hold head gradient. - std::vector head_grad_array_; - // entry to hold head gradient - std::vector head_grad_entry_; - // the index map of entry to map. - std::unordered_map head_grad_map_; - // number of outputs. - size_t num_forward_outputs_{0}; - // number of inputs - size_t num_forward_inputs_{0}; - // number of forward nodes - size_t num_forward_nodes_{0}; - // monitor call back - std::function monitor_callback_{nullptr}; - // monitor both input and output from monitor call back - bool monitor_all_{false}; - // whether to enable bulk execution - bool prefer_bulk_execution_; - // cached segment operator - std::vector cached_seg_opr_; - // verbose logging - bool log_verbose_ = false; - // subgraph property name - std::string subgraph_property_; - // ref of engine - std::shared_ptr engine_ref_; - // Unoptimized copy of the symbol for sharing with - // child executors - nnvm::Symbol symbol_; -}; - -} // namespace exec -} // namespace mxnet -#endif // MXNET_EXECUTOR_GRAPH_EXECUTOR_H_ diff --git a/src/executor/attach_op_execs_pass.cc b/src/imperative/attach_op_execs_pass.cc similarity index 100% rename from src/executor/attach_op_execs_pass.cc rename to src/imperative/attach_op_execs_pass.cc diff --git a/src/executor/attach_op_resource_pass.cc b/src/imperative/attach_op_resource_pass.cc similarity index 100% rename from src/executor/attach_op_resource_pass.cc rename to src/imperative/attach_op_resource_pass.cc diff --git a/src/imperative/cached_op.cc b/src/imperative/cached_op.cc index 3ad392280ea0..83e8d31fe4ec 100644 --- a/src/imperative/cached_op.cc +++ b/src/imperative/cached_op.cc @@ -20,7 +20,7 @@ #include #include "./imperative_utils.h" #include "./cached_op.h" -#include "../executor/exec_pass.h" +#include "./exec_pass.h" #include "../profiler/profiler.h" #include "../operator/operator_common.h" #include "../operator/subgraph/common.h" @@ -33,7 +33,7 @@ DMLC_REGISTER_PARAMETER(CachedOpConfig); constexpr uint32_t kEidNotExist = std::numeric_limits::max(); nnvm::Symbol CachedOp::GetOptimizedSymbol() const { - Symbol ret; + nnvm::Symbol ret; ret.outputs = std::vector(full_graph_.outputs.begin(), full_graph_.outputs.begin() + num_outputs()); return ret.Copy(); diff --git a/src/imperative/cached_op.h b/src/imperative/cached_op.h index 5153620ee693..936000c92a8e 100644 --- a/src/imperative/cached_op.h +++ b/src/imperative/cached_op.h @@ -47,6 +47,86 @@ std::string AddPrefix(const std::string& prefix, return prefix + "_" + s; } +nnvm::NodeEntry AggregateGradient(std::vector&& v) { + using nnvm::Op; + static size_t inplace_sum_cap = dmlc::GetEnv("MXNET_EXEC_INPLACE_GRAD_SUM_CAP", 8); + static const Op* ewise_plus_op = Op::Get("_grad_add"); + static const Op* ewise_sum_op = Op::Get("ElementWiseSum"); + static const Op* identity_op = Op::Get("identity"); + static const Op* zeros_op = Op::Get("_zeros"); + static const Op* zeros_like_op = Op::Get("zeros_like"); + + if (v.empty()) { + nnvm::ObjectPtr ng = nnvm::Node::Create(); + ng->attrs.op = Op::Get("_zeros_without_dtype"); + ng->attrs.name = "zeros_without_dtype"; + ng->attrs.op->attr_parser(&(ng->attrs)); + return nnvm::NodeEntry(std::move(ng), 0, 0); + } + + // remove zero in the sum. at least keep 1. + auto begin = std::remove_if(v.begin(), v.end(), [](const nnvm::NodeEntry& nodeEntry) { + CHECK(nodeEntry.node); + return nodeEntry.node->op() == zeros_op || nodeEntry.node->op() == zeros_like_op; + }); + if (begin == v.begin()) ++begin; + v.erase(begin, v.end()); + CHECK(!v.empty()); + + if (v.size() == 1) { + return std::move(v[0]); + } else { + if (v.size() < inplace_sum_cap) { + nnvm::ObjectPtr sum_node = nnvm::Node::Create(); + sum_node->attrs.op = ewise_sum_op; + sum_node->attrs.name = "sum_grad"; + sum_node->attrs.dict["num_args"] = std::to_string(v.size()); + sum_node->attrs.op->attr_parser(&(sum_node->attrs)); + sum_node->inputs = std::move(v); + return nnvm::NodeEntry(std::move(sum_node), 0, 0); + } else { + // use a stream line of plus instead + nnvm::NodeEntry ret = v[0]; + for (size_t i = 1; i < v.size(); ++i) { + // Add control flow dependency from to previous node + // This enforces the gradient sum order will be in the inverse + // order of forward traversal + // NOTE: adding control dependency can be dangerous and cause cycle in the dep. + // The curent usage is correct, because of the following invariant: + // assert: v[i-1] do not depend on v[i] + // To put in plain text: v is gradient vector that get pushed in the order + // that can generate them, which means if v[i] is not yet pushed, + // all previous gradient cannot depend on it. + // Note: For a symbol like the following: + // data = mx.sym.Variable('data') + // sym = data + data + data + data + data + data + data + // the node entries v passed in here are of the same node of + // op _identity_with_attr_like_rhs. We should skip adding a node + // to its own control_deps. + if (v[i-1].node != v[i].node) { + v[i].node->control_deps.push_back(ret.node); + } + + std::ostringstream os; + os << "sum_grad_" << i; + nnvm::ObjectPtr x = nnvm::Node::Create(); + x->attrs.op = ewise_plus_op; + x->attrs.name = os.str(); + x->inputs = {ret, v[i]}; + ret = nnvm::NodeEntry(std::move(x), 0, 0); + } + // identity node is used to avoid exposure of dummy plus node + // when its output get assigned to another space. + nnvm::ObjectPtr id_node = nnvm::Node::Create(); + id_node->attrs.op = identity_op; + id_node->attrs.name = "sum_grad_final"; + id_node->inputs = {ret}; + return nnvm::NodeEntry{id_node, 0, 0}; + } + } +} + + /* \brief collect pointers to input and output ndarrays * into a single data structure, this data structure can * be used for Memory allocation pass*/ @@ -168,7 +248,7 @@ void CreateBackwardGraph(nnvm::Graph* fwd_graph, try { *grad_graph = pass::MXGradient( *fwd_graph, fwd_graph->outputs, xs, *ograd_entries, - exec::AggregateGradient, nullptr, + mxnet::AggregateGradient, nullptr, zero_ops, "_copy"); } catch (const nnvm::pass::InvalidGraphError &e) { *grad_graph = nnvm::Graph(); diff --git a/src/imperative/cached_op_threadsafe.cc b/src/imperative/cached_op_threadsafe.cc index bf08d311c74f..a820c06b901f 100644 --- a/src/imperative/cached_op_threadsafe.cc +++ b/src/imperative/cached_op_threadsafe.cc @@ -20,7 +20,7 @@ #include #include #include "./imperative_utils.h" -#include "../executor/exec_pass.h" +#include "./exec_pass.h" #include "./cached_op_threadsafe.h" #include "../profiler/profiler.h" #include "../operator/operator_common.h" diff --git a/src/executor/eliminate_common_expr_pass.cc b/src/imperative/eliminate_common_expr_pass.cc similarity index 100% rename from src/executor/eliminate_common_expr_pass.cc rename to src/imperative/eliminate_common_expr_pass.cc diff --git a/src/executor/exec_pass.h b/src/imperative/exec_pass.h similarity index 99% rename from src/executor/exec_pass.h rename to src/imperative/exec_pass.h index 270c546f0f49..888e19f3b244 100644 --- a/src/executor/exec_pass.h +++ b/src/imperative/exec_pass.h @@ -22,8 +22,8 @@ * \file exec_pass.h * \brief All the execution related pass and data structures. */ -#ifndef MXNET_EXECUTOR_EXEC_PASS_H_ -#define MXNET_EXECUTOR_EXEC_PASS_H_ +#ifndef MXNET_IMPERATIVE_EXEC_PASS_H_ +#define MXNET_IMPERATIVE_EXEC_PASS_H_ #include #include @@ -320,4 +320,4 @@ inline Graph MXGradient( } // namespace pass } // namespace nnvm -#endif // MXNET_EXECUTOR_EXEC_PASS_H_ +#endif // MXNET_IMPERATIVE_EXEC_PASS_H_ diff --git a/src/imperative/imperative.cc b/src/imperative/imperative.cc index 0fb5a97ca385..45fdf549b0ed 100644 --- a/src/imperative/imperative.cc +++ b/src/imperative/imperative.cc @@ -332,7 +332,7 @@ void Imperative::RecordDeferredCompute(nnvm::NodeAttrs &&attrs, } nnvm::Symbol Imperative::GetDeferredComputeSymbol(const std::vector &outputs) { - Symbol s; + nnvm::Symbol s; s.outputs.reserve(outputs.size()); for (NDArray * ndoutput : outputs) { CHECK(!Imperative::DCInfo::IsNone(*ndoutput)) @@ -456,7 +456,7 @@ std::vector Imperative::Backward( Graph g_graph = pass::MXGradient( graph, graph.outputs, xs, ograd_entries, - exec::AggregateGradient, nullptr, + mxnet::AggregateGradient, nullptr, zero_ops, "_copy"); CHECK_EQ(g_graph.outputs.size(), xs.size()); for (const auto& e : g_graph.outputs) { diff --git a/src/imperative/imperative_utils.h b/src/imperative/imperative_utils.h index d6850ad3e569..1bda6be3363d 100644 --- a/src/imperative/imperative_utils.h +++ b/src/imperative/imperative_utils.h @@ -17,7 +17,6 @@ * under the License. */ #include -#include #include #include #include @@ -25,8 +24,7 @@ #include #include #include -#include "../executor/graph_executor.h" -#include "../executor/exec_pass.h" +#include "./exec_pass.h" #include "../c_api/c_api_common.h" #include "../common/utils.h" #include "../common/exec_utils.h" diff --git a/src/executor/infer_graph_attr_pass.cc b/src/imperative/infer_graph_attr_pass.cc similarity index 100% rename from src/executor/infer_graph_attr_pass.cc rename to src/imperative/infer_graph_attr_pass.cc diff --git a/src/executor/inplace_addto_detect_pass.cc b/src/imperative/inplace_addto_detect_pass.cc similarity index 100% rename from src/executor/inplace_addto_detect_pass.cc rename to src/imperative/inplace_addto_detect_pass.cc diff --git a/src/imperative/naive_cached_op.cc b/src/imperative/naive_cached_op.cc index 84425e3f8068..ff3e899bbfed 100644 --- a/src/imperative/naive_cached_op.cc +++ b/src/imperative/naive_cached_op.cc @@ -20,7 +20,7 @@ #include #include "./imperative_utils.h" #include "./naive_cached_op.h" -#include "../executor/exec_pass.h" +#include "./exec_pass.h" #include "../profiler/profiler.h" #include "../operator/operator_common.h" #include "../operator/subgraph/common.h" diff --git a/src/executor/pointwise_fusion_pass.cc b/src/imperative/pointwise_fusion_pass.cc similarity index 100% rename from src/executor/pointwise_fusion_pass.cc rename to src/imperative/pointwise_fusion_pass.cc diff --git a/src/executor/simple_partition_pass.h b/src/imperative/simple_partition_pass.h similarity index 99% rename from src/executor/simple_partition_pass.h rename to src/imperative/simple_partition_pass.h index 1ca0086dbc53..c57bd64328da 100644 --- a/src/executor/simple_partition_pass.h +++ b/src/imperative/simple_partition_pass.h @@ -23,8 +23,8 @@ * \brief Simple pass for partitioning a graph. * \author Clement Fuji Tsang */ -#ifndef MXNET_EXECUTOR_SIMPLE_PARTITION_PASS_H_ -#define MXNET_EXECUTOR_SIMPLE_PARTITION_PASS_H_ +#ifndef MXNET_IMPERATIVE_SIMPLE_PARTITION_PASS_H_ +#define MXNET_IMPERATIVE_SIMPLE_PARTITION_PASS_H_ #include #include @@ -472,4 +472,4 @@ std::vector GetCompatibleSubsets(const Graph& g, FCompatible is_c } // namespace exec } // namespace mxnet -#endif // MXNET_EXECUTOR_SIMPLE_PARTITION_PASS_H_ +#endif // MXNET_IMPERATIVE_SIMPLE_PARTITION_PASS_H_ diff --git a/src/nnvm/gradient.cc b/src/nnvm/gradient.cc index a8a836ea71b8..09c02b2aa26f 100644 --- a/src/nnvm/gradient.cc +++ b/src/nnvm/gradient.cc @@ -38,7 +38,7 @@ #include #include "error.h" -#include "../executor/exec_pass.h" +#include "../imperative/exec_pass.h" namespace nnvm { namespace pass { @@ -47,7 +47,6 @@ extern size_t MXGetDTypeSize(const int type_flag); // defined in plan_memory.cc namespace { - /*! Auxiliary Data Structure for Gradient Entries */ struct GradEntry { NodeEntry sum = NodeEntry(nullptr, 0, 0); @@ -709,5 +708,6 @@ NNVM_REGISTER_PASS(MXGradient) .depend_graph_attr("grad_ys_out_grad"); } // namespace + } // namespace pass } // namespace nnvm diff --git a/src/operator/control_flow.cc b/src/operator/control_flow.cc index ccf83a5c038f..e6cc90ac13dc 100644 --- a/src/operator/control_flow.cc +++ b/src/operator/control_flow.cc @@ -65,7 +65,7 @@ class ForeachState: public LoopState { ForeachParam params; int num_iterations; - ForeachState(const Symbol &g, const ForeachParam ¶ms) : LoopState(g) { + ForeachState(const nnvm::Symbol &g, const ForeachParam ¶ms) : LoopState(g) { this->params = params; } }; @@ -531,7 +531,7 @@ class WhileLoopState: public LoopState { // indicates to which index the output of `func' will be copied to the input of `cond' std::vector oi_map; - WhileLoopState(const WhileLoopParam ¶ms, const Symbol &cond, const Symbol &func) : + WhileLoopState(const WhileLoopParam ¶ms, const nnvm::Symbol &cond, const nnvm::Symbol &func) : LoopState(func), params(params), n_iterations(0U), @@ -866,9 +866,9 @@ class CondState { int branch_selection; // 1 if then branch; 0 if else branch; -1 if undefined CondState(const CondParam ¶ms, - const Symbol &cond, - const Symbol &then_sym, - const Symbol &else_sym): + const nnvm::Symbol &cond, + const nnvm::Symbol &then_sym, + const nnvm::Symbol &else_sym): params(params), cond_op(LoopState::MakeSharedOp(cond)), then_branch(then_sym), @@ -1028,7 +1028,8 @@ static bool BackwardCondStorageType(const nnvm::NodeAttrs& attrs, CHECK_EQ(out_attrs->size() + 3U, (size_t) params.num_args); CHECK_EQ(attrs.subgraphs.size(), 3U); static const std::function is_udf = is_stype_udf; - auto sub_pass = [&](const std::shared_ptr &subg, const mxnet::Tuple &input_locs) { + auto sub_pass = [&](const std::shared_ptr &subg, + const mxnet::Tuple &input_locs) { // A. first construct subg_in_attrs // need subg_in_attrs as subg_bwd_out (copy), subg_fwd_in (extract), subg_fwd_out (copy) std::vector subg_in_attrs; diff --git a/src/operator/fusion/fused_op.cc b/src/operator/fusion/fused_op.cc index ee470cf1de42..596f4e7146e0 100644 --- a/src/operator/fusion/fused_op.cc +++ b/src/operator/fusion/fused_op.cc @@ -21,7 +21,7 @@ #include "./fused_op.h" #include "../operator_common.h" -#include "../../executor/exec_pass.h" +#include "../../imperative/exec_pass.h" #if MXNET_USE_CUDA && MXNET_ENABLE_CUDA_RTC diff --git a/src/operator/fusion/fused_op.cu b/src/operator/fusion/fused_op.cu index 3d7caab2fb31..fe667946a0c4 100644 --- a/src/operator/fusion/fused_op.cu +++ b/src/operator/fusion/fused_op.cu @@ -30,7 +30,7 @@ #include "./fused_op-inl.h" #include "../operator_common.h" #include "../elemwise_op_common.h" -#include "../../executor/exec_pass.h" +#include "../../imperative/exec_pass.h" #include "../../common/cuda_utils.h" namespace mxnet { diff --git a/src/operator/subgraph/common.h b/src/operator/subgraph/common.h index 740c8d409ccd..5550ad284751 100644 --- a/src/operator/subgraph/common.h +++ b/src/operator/subgraph/common.h @@ -24,7 +24,7 @@ #include #include #include "../elemwise_op_common.h" -#include "../../executor/exec_pass.h" +#include "../../imperative/exec_pass.h" namespace mxnet { namespace op { diff --git a/src/operator/subgraph_op_common.cc b/src/operator/subgraph_op_common.cc index 619aaca08f94..d8e74cfa7852 100644 --- a/src/operator/subgraph_op_common.cc +++ b/src/operator/subgraph_op_common.cc @@ -189,7 +189,7 @@ bool is_type_udf(const int &x) { return x == -1; } -LoopState::LoopState(const Symbol &g) { +LoopState::LoopState(const nnvm::Symbol &g) { this->subgraph_sym = g; this->subgraph.outputs = g.outputs; this->iter_op = LoopState::MakeSharedOp(g); diff --git a/src/operator/subgraph_op_common.h b/src/operator/subgraph_op_common.h index 19528349c0c7..78bf3de307ac 100644 --- a/src/operator/subgraph_op_common.h +++ b/src/operator/subgraph_op_common.h @@ -135,11 +135,11 @@ class LoopState { // which will be used in the backward. std::vector all_states; CachedOpPtr iter_op; - Symbol subgraph_sym; + nnvm::Symbol subgraph_sym; nnvm::Graph subgraph; public: - explicit LoopState(const Symbol &g); + explicit LoopState(const nnvm::Symbol &g); void Forward(int iter_no, const std::vector &inputs, @@ -155,7 +155,7 @@ class LoopState { all_inputs.clear(); all_states.clear(); } - static CachedOpPtr MakeSharedOp(const Symbol &sym) { + static CachedOpPtr MakeSharedOp(const nnvm::Symbol &sym) { // We turn on static_alloc for two reasons. // It avoids the overhead of unnecessary memory allocation. // only static_alloc supports nested call of CachedOp. diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index a221f058a1dd..3209aa0cae1a 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -24,7 +24,6 @@ if(GTEST_FOUND AND NOT MSVC) include_directories(${GTEST_INCLUDE_DIR}) include_directories(cpp/include) - include_directories(../cpp-package/include) if (NOT PRIVATE_RUNTIME_DIR) set(PRIVATE_RUNTIME_DIR ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}) @@ -33,9 +32,6 @@ if(GTEST_FOUND AND NOT MSVC) add_executable(${PROJECT_NAME}_unit_tests ${UNIT_TEST_SOURCE}) set_property(TARGET ${PROJECT_NAME}_unit_tests PROPERTY RUNTIME_OUTPUT_DIRECTORY ${PRIVATE_RUNTIME_DIR}) - if (USE_CPP_PACKAGE) - add_dependencies(${PROJECT_NAME}_unit_tests cpp_package_op_h) - endif() target_link_libraries(${PROJECT_NAME}_unit_tests ${GTEST_LIBRARY} diff --git a/tests/cpp/operator/batchnorm_test.cc b/tests/cpp/operator/batchnorm_test.cc index 92bd54beffc4..22bcb70387a8 100644 --- a/tests/cpp/operator/batchnorm_test.cc +++ b/tests/cpp/operator/batchnorm_test.cc @@ -30,7 +30,7 @@ #include "../../src/operator/operator_common.h" #include "./test_legacy_op.h" #include "./test_core_op.h" -#include "executor/exec_pass.h" +#include "imperative/exec_pass.h" using namespace mxnet; diff --git a/tests/cpp/thread_safety/thread_safety_test.cc b/tests/cpp/thread_safety/thread_safety_test.cc deleted file mode 100644 index 7ab9aaa3d8c0..000000000000 --- a/tests/cpp/thread_safety/thread_safety_test.cc +++ /dev/null @@ -1,652 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/*! - * \file thread_safety_test.cc - * \brief test thread safety at the dependency engine level and cached op level - */ - -#if MXNET_USE_CPP_PACKAGE == 1 -#include -#include -#include -#include -#include -#include -#include -#include -#include "../src/engine/engine_impl.h" -#include "../src/imperative/imperative_utils.h" -#include "../include/test_util.h" -#include "mxnet-cpp/MxNetCpp.h" - -/* - * Prepares input data for the ops/models used in this file - */ -void prepare_input_data(const mxnet::cpp::Shape& shape, const mxnet::cpp::Context& ctx, - int num_threads, - std::vector* data_arr, - bool random_uniform = false) { - for (size_t i = 0; i < num_threads; ++i) { - data_arr->emplace_back(shape, ctx, false, 0); - int begin = i * 100; - int end = begin + 100; - if (random_uniform) { - mxnet::cpp::Operator("_random_uniform")(begin, end).Invoke((*data_arr)[i]); - } - mxnet::cpp::NDArray::WaitAll(); - } -} - -void prepare_output_data(const mxnet::cpp::Shape& shape, const mxnet::cpp::Context& ctx, - int num_threads, - std::vector* output_arr) { - for (size_t i = 0; i < num_threads; ++i) { - output_arr->emplace_back(shape, ctx, false, 0); - mxnet::cpp::NDArray::WaitAll(); - } -} - -/* - * Prepare backend ndarrays from cpp frontend ndarrays - */ -void prepare_backend_data(const std::vector &input_cpp_arrs, - int num_threads, - std::vector *output_backend_arrs) { - output_backend_arrs->resize(num_threads); - for (size_t i = 0; i < num_threads; ++i) { - (*output_backend_arrs)[i] = static_cast(input_cpp_arrs[i].GetHandle()); - } -} - -/* - * Create and Invoke CachedOp for given data - */ -void get_expected_results(const mxnet::cpp::Symbol &sym, - const std::vector &flag_keys, - const std::vector &flag_vals, - int num_threads, - std::vector> *arr_handles, - std::vector *result_expected, - CachedOpHandle* hdl) { - // prepare flag_keys and flag_vals - std::vector flag_key_cstrs, flag_val_cstrs; - flag_key_cstrs.reserve(flag_keys.size()); - for (size_t i = 0; i < flag_keys.size(); ++i) { - flag_key_cstrs.emplace_back(flag_keys[i].c_str()); - } - for (size_t i = 0; i < flag_vals.size(); ++i) { - flag_val_cstrs.emplace_back(flag_vals[i].c_str()); - } - - // Create CachedOp - int ret1 = MXCreateCachedOpEx(sym.GetHandle(), flag_keys.size(), - flag_key_cstrs.data(), flag_val_cstrs.data(), - hdl); - if (ret1 < 0) { - LOG(FATAL) << MXGetLastError(); - } - - std::vector nd_ptrs(num_threads); - - // Invoke CachedOp same number of times as number of threads - for (size_t i = 0; i < num_threads; ++i) { - int num_output = 0; - const int *stypes; - int ret4 = MXInvokeCachedOpEx(*hdl, (*arr_handles)[i].size(), (*arr_handles)[i].data(), - cpu::kDevMask, 0, &num_output, &nd_ptrs[i], &stypes); - if (ret4 < 0) { - LOG(FATAL) << MXGetLastError(); - } - mxnet::cpp::NDArray::WaitAll(); - (*result_expected)[i] = static_cast(*nd_ptrs[i]); - } -} - -/* - * Create and Invoke CachedOp for multiple threads, each thread with multiple - * inferences - */ -inline void get_expected_results_multiple( - const mxnet::cpp::Symbol &sym, - const std::vector &flag_keys, const std::vector &flag_vals, - std::vector>> *arr_handles, - int num_threads, - std::vector> *result_expected, - CachedOpHandle *hdl) { - // prepare flag_keys and flag_vals - std::vector flag_key_cstrs, flag_val_cstrs; - flag_key_cstrs.reserve(flag_keys.size()); - flag_val_cstrs.reserve(flag_vals.size()); - for (size_t i = 0; i < flag_keys.size(); ++i) { - flag_key_cstrs.emplace_back(flag_keys[i].c_str()); - } - for (size_t i = 0; i < flag_vals.size(); ++i) { - flag_val_cstrs.emplace_back(flag_vals[i].c_str()); - } - - // Create CachedOp - int ret1 = - MXCreateCachedOpEX(sym.GetHandle(), flag_keys.size(), - flag_key_cstrs.data(), flag_val_cstrs.data(), hdl, false); - if (ret1 < 0) { - LOG(FATAL) << MXGetLastError(); - } - std::vector> nd_ptrs((*arr_handles).size()); - - // Invoke CachedOp same number of times as number of threads - for (size_t i = 0; i < (*arr_handles).size(); ++i) { - nd_ptrs[i].resize(num_threads); - (*result_expected)[i].resize(num_threads); - for (size_t j = 0; j < num_threads; ++j) { - int num_output = 0; - const int *stypes; - int ret4 = MXInvokeCachedOpEx(*hdl, (*arr_handles)[i][j].size(), - (*arr_handles)[i][j].data(), cpu::kDevMask, 0, - &num_output, &nd_ptrs[i][j], &stypes); - if (ret4 < 0) { - LOG(FATAL) << MXGetLastError(); - } - mxnet::cpp::NDArray::WaitAll(); - (*result_expected)[i][j] = static_cast(*nd_ptrs[i][j]); - } - } -} - -void run_inference(const std::string& model, - int num_inf_per_thread = 1, bool random_sleep = false, - int num_threads = 1, bool static_alloc = false, - bool static_shape = false) { - // Load model - LOG(INFO) << "Running inference for " + model + - " num_threads: " + std::to_string(num_threads) + - " num_inf_per_thread: " + std::to_string(num_inf_per_thread) + - " random_sleep: " + std::to_string(random_sleep) + - " static_alloc: " + std::to_string(static_alloc) + - " static_shape: " + std::to_string(static_shape); - auto out = mxnet::cpp::Symbol::Load(model + "-symbol.json"); - std::string static_alloc_str = static_alloc ? "true" : "false"; - std::string static_shape_str = static_shape ? "true" : "false"; - - // Prepare context -#if MXNET_USE_CUDA == 1 - Context backend_ctx; - mxnet::cpp::Context ctx = mxnet::cpp::Context::gpu(0); - if (!mxnet::test::thread_safety_force_cpu) { - backend_ctx = Context::GPU(0); - ctx = mxnet::cpp::Context::gpu(0); - } else { - backend_ctx = Context::CPU(); - ctx = mxnet::cpp::Context::cpu(); - } -#else - Context backend_ctx = Context::CPU(0); - mxnet::cpp::Context ctx = mxnet::cpp::Context::cpu(0); -#endif - - // Prepare input data and parameters - std::vector> data_arr(num_inf_per_thread); - std::vector> softmax_arr(num_inf_per_thread); - std::vector params; - mxnet::cpp::Shape data_shape = mxnet::cpp::Shape(1, 3, 224, 224); - mxnet::cpp::Shape softmax_shape = mxnet::cpp::Shape(1); - for (size_t i = 0; i < num_inf_per_thread; ++i) { - prepare_input_data(data_shape, ctx, num_threads, &(data_arr[i]), true); - prepare_input_data(softmax_shape, ctx, num_threads, &(softmax_arr[i])); - } - std::map parameters; - mxnet::cpp::NDArray::Load(model + "-0000.params", 0, ¶meters); - - for (std::string name : out.ListInputs()) { - if (name == "arg:data") { - continue; - } - if (parameters.find("arg:" + name) != parameters.end()) { - params.push_back(parameters["arg:" + name].Copy(ctx)); - } else if (parameters.find("aux:" + name) != parameters.end()) { - params.push_back(parameters["aux:" + name].Copy(ctx)); - } - } - - // Prepare data_indices, param_indices and get_expected_results - std::vector flag_keys{"data_indices", "param_indices", - "static_alloc", "static_shape"}; - std::string param_indices = "["; - std::vector> result_expected(num_inf_per_thread); - int num_inputs = out.ListInputs().size(); - for (size_t i = 1; i < num_inputs; ++i) { - param_indices += std::to_string(i); - param_indices += std::string(", "); - } - param_indices += "]"; - std::vector flag_vals{"[0]", param_indices, static_alloc_str, static_shape_str}; - std::vector>> arr_handles(num_inf_per_thread); - for (size_t i = 0; i < num_inf_per_thread; ++i) { - arr_handles[i].resize(num_threads); - for (size_t j = 0; j < num_threads; ++j) { - arr_handles[i][j].push_back(data_arr[i][j].GetHandle()); - for (size_t k = 1; k < num_inputs - 1; k++) { - arr_handles[i][j].push_back(params[k - 1].GetHandle()); - } - arr_handles[i][j].push_back(softmax_arr[i][j].GetHandle()); - } - } - CachedOpHandle hdl = CachedOpHandle(); - get_expected_results_multiple(out, flag_keys, flag_vals, &arr_handles, - num_threads, &result_expected, &hdl); - - - // Create thread safe cahced op - CachedOpHandle hdl2 = CachedOpHandle(); - std::vector flag_key_cstrs, flag_val_cstrs; - flag_key_cstrs.reserve(flag_keys.size()); - for (size_t i = 0; i < flag_keys.size(); ++i) { - flag_key_cstrs.emplace_back(flag_keys[i].c_str()); - } - for (size_t i = 0; i < flag_vals.size(); ++i) { - flag_val_cstrs.emplace_back(flag_vals[i].c_str()); - } - - int ret1 = MXCreateCachedOpEX(out.GetHandle(), flag_keys.size(), - flag_key_cstrs.data(), flag_val_cstrs.data(), - &hdl2, true); - if (ret1 < 0) { - LOG(FATAL) << MXGetLastError(); - } - - - // Prepare data structures and lambda to run in different threads - std::vector cached_op_handles(num_threads * num_inf_per_thread); - std::vector>> temp(num_inf_per_thread); - std::vector> output_mx_arr(num_inf_per_thread); - for (size_t i = 0; i < num_inf_per_thread; i++) { - output_mx_arr[i].resize(num_threads); - temp[i].resize(num_threads); - for (size_t j = 0; j < num_threads; ++j) { - temp[i][j].resize(1000); - } - } - - std::vector>> arr_handles2(num_inf_per_thread); - for (size_t i = 0; i < num_inf_per_thread; ++i) { - arr_handles2[i].resize(num_threads); - for (size_t j = 0; j < num_threads; ++j) { - arr_handles2[i][j].reserve(num_inputs); - arr_handles2[i][j].emplace_back(data_arr[i][j].GetHandle()); - for (size_t k = 1; k < num_inputs - 1; ++k) { - arr_handles2[i][j].emplace_back(params[k - 1].GetHandle()); - } - arr_handles2[i][j].emplace_back(softmax_arr[i][j].GetHandle()); - } - } - std::vector data(num_inf_per_thread * num_threads); - auto func = [&](int num) { - unsigned next = num; - for (size_t i = 0; i < num_inf_per_thread; ++i) { - if (random_sleep) { - static thread_local std::mt19937 generator; - std::uniform_int_distribution distribution(0, 5); - int sleep_time = distribution(generator); - std::this_thread::sleep_for(std::chrono::seconds(sleep_time)); - } - int num_output = 0; - const int *stypes; - int ret = MXInvokeCachedOpEx( - hdl2, arr_handles2[i][num].size(), arr_handles2[i][num].data(), - cpu::kDevMask, 0, &num_output, &(cached_op_handles[i * num_threads + num]), - &stypes); - if (ret < 0) { - LOG(FATAL) << MXGetLastError(); - } - output_mx_arr[i][num] = static_cast( - *cached_op_handles[i * num_threads + num]); - } - }; - - // Spawn multiple threads, join and wait for all threads to complete - std::vector worker_threads(num_threads); - int count = 0; - for (auto &&i : worker_threads) { - i = std::thread(func, count); - count++; - } - - for (auto &&i : worker_threads) { - i.join(); - } - - mxnet::cpp::NDArray::WaitAll(); - for (size_t i = 0; i < num_inf_per_thread; i++) { - mxnet::test::AssertEqual(output_mx_arr[i], result_expected[i], 1e-2, 1e-5); - } - mxnet::cpp::NDArray::WaitAll(); - int ret2 = MXFreeCachedOp(hdl); - if (ret2 < 0) { - LOG(FATAL) << MXGetLastError(); - } - - ret2 = MXFreeCachedOp(hdl2); - if (ret2 < 0) { - LOG(FATAL) << MXGetLastError(); - } -} - -void run_inference_unsupported(const std::string& model, - int num_inf_per_thread = 1, bool random_sleep = false, - int num_threads = 1, bool static_alloc = false, - bool static_shape = false) { - // Load model - LOG(INFO) << "Running inference for " + model + - " num_threads: " + std::to_string(num_threads) + - " num_inf_per_thread: " + std::to_string(num_inf_per_thread) + - " random_sleep: " + std::to_string(random_sleep) + - " static_alloc: " + std::to_string(static_alloc) + - " static_shape: " + std::to_string(static_shape); - auto out = mxnet::cpp::Symbol::Load(model + "-symbol.json"); - std::string static_alloc_str = static_alloc ? "true" : "false"; - std::string static_shape_str = static_shape ? "true" : "false"; - - // Prepare context -#if MXNET_USE_CUDA == 1 - Context backend_ctx; - mxnet::cpp::Context ctx = mxnet::cpp::Context::gpu(0); - if (!mxnet::test::thread_safety_force_cpu) { - backend_ctx = Context::GPU(0); - ctx = mxnet::cpp::Context::gpu(0); - } else { - backend_ctx = Context::CPU(); - ctx = mxnet::cpp::Context::cpu(); - } -#else - Context backend_ctx = Context::CPU(0); - mxnet::cpp::Context ctx = mxnet::cpp::Context::cpu(0); -#endif - - // Prepare input data and parameters - std::vector> data_arr(num_inf_per_thread); - std::vector> softmax_arr(num_inf_per_thread); - std::vector params; - mxnet::cpp::Shape data_shape = mxnet::cpp::Shape(1, 3, 224, 224); - mxnet::cpp::Shape softmax_shape = mxnet::cpp::Shape(1); - for (size_t i = 0; i < num_inf_per_thread; ++i) { - prepare_input_data(data_shape, ctx, num_threads, &(data_arr[i]), true); - prepare_input_data(softmax_shape, ctx, num_threads, &(softmax_arr[i])); - } - std::map parameters; - mxnet::cpp::NDArray::Load(model + "-0000.params", 0, ¶meters); - - for (std::string name : out.ListInputs()) { - if (name == "arg:data") { - continue; - } - if (parameters.find("arg:" + name) != parameters.end()) { - params.push_back(parameters["arg:" + name].Copy(ctx)); - } else if (parameters.find("aux:" + name) != parameters.end()) { - params.push_back(parameters["aux:" + name].Copy(ctx)); - } - } - - // Prepare data_indices, param_indices and get_expected_results - std::vector flag_keys{"data_indices", "param_indices", - "static_alloc", "static_shape"}; - std::string param_indices = "["; - std::vector> result_expected(num_inf_per_thread); - int num_inputs = out.ListInputs().size(); - for (size_t i = 1; i < num_inputs; ++i) { - param_indices += std::to_string(i); - param_indices += std::string(", "); - } - param_indices += "]"; - std::vector flag_vals{"[0]", param_indices, static_alloc_str, static_shape_str}; - std::vector>> arr_handles(num_inf_per_thread); - for (size_t i = 0; i < num_inf_per_thread; ++i) { - arr_handles[i].resize(num_threads); - for (size_t j = 0; j < num_threads; ++j) { - arr_handles[i][j].push_back(data_arr[i][j].GetHandle()); - for (size_t k = 1; k < num_inputs - 1; k++) { - arr_handles[i][j].push_back(params[k - 1].GetHandle()); - } - arr_handles[i][j].push_back(softmax_arr[i][j].GetHandle()); - } - } - CachedOpHandle hdl = CachedOpHandle(); - get_expected_results_multiple(out, flag_keys, flag_vals, &arr_handles, - num_threads, &result_expected, &hdl); - - - // Create thread safe cahced op - CachedOpHandle hdl2 = CachedOpHandle(); - - - // Prepare data structures and lambda to run in different threads - std::vector cached_op_handles(num_threads * num_inf_per_thread); - std::vector> output_mx_arr(num_inf_per_thread); - for (size_t i = 0; i < num_inf_per_thread; i++) { - output_mx_arr[i].resize(num_threads); - } - - std::vector>> arr_handles2(num_inf_per_thread); - for (size_t i = 0; i < num_inf_per_thread; ++i) { - arr_handles2[i].resize(num_threads); - for (size_t j = 0; j < num_threads; ++j) { - arr_handles2[i][j].reserve(num_inputs); - arr_handles2[i][j].emplace_back(data_arr[i][j].GetHandle()); - for (size_t k = 1; k < num_inputs - 1; ++k) { - arr_handles2[i][j].emplace_back(params[k - 1].GetHandle()); - } - arr_handles2[i][j].emplace_back(softmax_arr[i][j].GetHandle()); - } - } - std::vector data(num_inf_per_thread * num_threads); - std::mutex mutex_; - auto func = [&](int num) { - std::vector flag_key_cstrs, flag_val_cstrs; - flag_key_cstrs.reserve(flag_keys.size()); - for (size_t i = 0; i < flag_keys.size(); ++i) { - flag_key_cstrs.emplace_back(flag_keys[i].c_str()); - } - for (size_t i = 0; i < flag_vals.size(); ++i) { - flag_val_cstrs.emplace_back(flag_vals[i].c_str()); - } - - { - // Uncomment these lines for a workaround around the same - /* - std::lock_guard lock{mutex_}; - */ - - if (hdl2 == nullptr) { - int ret1 = MXCreateCachedOpEX(out.GetHandle(), flag_keys.size(), - flag_key_cstrs.data(), - flag_val_cstrs.data(), &hdl2, true); - if (ret1 < 0) { - LOG(FATAL) << MXGetLastError(); - } - } - } - - unsigned next = num; - for (size_t i = 0; i < num_inf_per_thread; ++i) { - if (random_sleep) { - static thread_local std::mt19937 generator; - std::uniform_int_distribution distribution(0, 5); - int sleep_time = distribution(generator); - std::this_thread::sleep_for(std::chrono::seconds(sleep_time)); - } - int num_output = 0; - const int *stypes; - int ret = MXInvokeCachedOpEx( - hdl2, arr_handles2[i][num].size(), arr_handles2[i][num].data(), - cpu::kDevMask, 0, &num_output, &(cached_op_handles[i * num_threads + num]), - &stypes); - if (ret < 0) { - LOG(FATAL) << MXGetLastError(); - } - mxnet::cpp::NDArray::WaitAll(); - output_mx_arr[i][num] = static_cast( - *cached_op_handles[i * num_threads + num]); - } - }; - - // Spawn multiple threads, join and wait for all threads to complete - std::vector worker_threads(num_threads); - int count = 0; - for (auto &&i : worker_threads) { - i = std::thread(func, count); - count++; - } - - for (auto &&i : worker_threads) { - i.join(); - } - - mxnet::cpp::NDArray::WaitAll(); - for (size_t i = 0; i < num_inf_per_thread; i++) { - mxnet::test::AssertEqual(output_mx_arr[i], result_expected[i], 1e-2, 1e-5); - } - mxnet::cpp::NDArray::WaitAll(); - int ret2 = MXFreeCachedOp(hdl); - if (ret2 < 0) { - LOG(FATAL) << MXGetLastError(); - } - - ret2 = MXFreeCachedOp(hdl2); - if (ret2 < 0) { - LOG(FATAL) << MXGetLastError(); - } -} - -/** - * Verifying engine thread safety by pushing ops from multiple threads to the - * dependency engine - */ -TEST(ThreadSafety, Engine) { - int num_threads = 20; -#if MXNET_USE_CUDA == 1 - Context backend_ctx; - mxnet::cpp::Context ctx = mxnet::cpp::Context::gpu(0); - DispatchMode dispatch_mode; - if (!mxnet::test::thread_safety_force_cpu) { - backend_ctx = Context::GPU(0); - ctx = mxnet::cpp::Context::gpu(0); - dispatch_mode = DispatchMode::kFCompute; - } else { - backend_ctx = Context::CPU(); - ctx = mxnet::cpp::Context::cpu(); - dispatch_mode = DispatchMode::kFComputeEx; - } -#else - Context backend_ctx = Context::CPU(0); - mxnet::cpp::Context ctx = mxnet::cpp::Context::cpu(0); - DispatchMode dispatch_mode = DispatchMode::kFComputeEx; -#endif - // Prepare convolution op and parse attrs - const nnvm::Op *op = Op::Get("Convolution"); - nnvm::NodeAttrs attrs; - attrs.op = op; - attrs.name = "conv_node1"; - std::unordered_map params = { - {"kernel", "(2,2)"}, {"no_bias", "0"}, {"dilate", "(1,1)"}, - {"num_group", "1"}, {"layout", "NCHW"}, {"stride", "(1,1)"}, - {"pad", "(0,0)"}, {"num_filter", "10"}}; - attrs.dict = params; - op->attr_parser(&attrs); - - // Prepare input data - std::vector data_arr, weight_arr, bias_arr, output_arr; - mxnet::cpp::Shape data_shape(2, 4, 10, 10); - mxnet::cpp::Shape weight_shape(10, 4, 2, 2); - mxnet::cpp::Shape bias_shape(10); - mxnet::cpp::Shape output_shape(2, 10, 9, 9); - - prepare_input_data(data_shape, ctx, num_threads, &data_arr, true); - prepare_input_data(weight_shape, ctx, num_threads, &weight_arr, true); - prepare_input_data(bias_shape, ctx, num_threads, &bias_arr, true); - prepare_output_data(output_shape, ctx, num_threads, &output_arr); - - // Prepare symbol - mxnet::cpp::Symbol data = mxnet::cpp::Symbol::Variable("data"); - mxnet::cpp::Symbol weight = mxnet::cpp::Symbol::Variable("weight"); - mxnet::cpp::Symbol bias = mxnet::cpp::Symbol::Variable("bias"); - auto out = mxnet::cpp::Operator("Convolution") - .SetParam("kernel", mxnet::cpp::Shape(2, 2)) - .SetParam("no_bias", false) - .SetParam("dilate", mxnet::cpp::Shape(1, 1)) - .SetParam("num_group", 1) - .SetParam("layout", "NCHW") - .SetParam("stride", mxnet::cpp::Shape(1, 1)) - .SetParam("pad", mxnet::cpp::Shape(0, 0)) - .SetParam("num_filter", 10) - .SetInput("data", data) - .SetInput("weight", weight) - .SetInput("bias", bias) - .CreateSymbol("fwd"); - - // Prepare data_indices, param_indices and get_expected_results - std::vector flag_keys{"data_indices", "param_indices"}; - std::vector flag_vals{"[0]", "[1,2]"}; - std::vector result_expected(num_threads); - - std::vector> arr_handles(num_threads); - for (size_t i = 0; i < num_threads; ++i) { - arr_handles[i].push_back(data_arr[i].GetHandle()); - arr_handles[i].push_back(weight_arr[i].GetHandle()); - arr_handles[i].push_back(bias_arr[i].GetHandle()); - } - CachedOpHandle hdl = CachedOpHandle(); - get_expected_results(out, flag_keys, flag_vals, num_threads, - &arr_handles, &result_expected, &hdl); - - // Prepare backend NDArray inputs - std::vector data_mx_arr, weight_mx_arr, bias_mx_arr, output_mx_arr; - prepare_backend_data(data_arr, num_threads, &data_mx_arr); - prepare_backend_data(weight_arr, num_threads, &weight_mx_arr); - prepare_backend_data(bias_arr, num_threads, &bias_mx_arr); - prepare_backend_data(output_arr, num_threads, &output_mx_arr); - - // Prepare func which Invokes op - auto func = [&](int num) { - std::vector tmp_inputs, tmp_outputs; - tmp_inputs.emplace_back(data_mx_arr[num]); - tmp_inputs.emplace_back(weight_mx_arr[num]); - tmp_inputs.emplace_back(bias_mx_arr[num]); - tmp_outputs.emplace_back(output_mx_arr[num]); - std::vector reqs; - reqs.push_back(kWriteTo); - Imperative::Get()->InvokeOp(backend_ctx, attrs, tmp_inputs, tmp_outputs, - reqs, dispatch_mode, OpStatePtr()); - }; - - // Spawn multiple threads - std::vector worker_threads(num_threads); - int count = 0; - for (auto &&i : worker_threads) { - i = std::thread(func, count); - count++; - } - - for (auto &&i : worker_threads) { - i.join(); - } - - mxnet::cpp::NDArray::WaitAll(); - mxnet::test::AssertEqual(output_mx_arr, result_expected, 1e-2, 1e-5); - mxnet::cpp::NDArray::WaitAll(); -} -#endif diff --git a/tests/cpp/unittest.mk b/tests/cpp/unittest.mk index 8534db91b52a..704ee41fdc4c 100644 --- a/tests/cpp/unittest.mk +++ b/tests/cpp/unittest.mk @@ -27,7 +27,6 @@ GTEST_HEADERS = $(GTEST_DIR)/include/gtest/*.h \ TEST_CFLAGS = -Itests/cpp/include -Isrc $(CFLAGS) TEST_LDFLAGS = $(LDFLAGS) -Llib -lmxnet -TEST_CPPFLAGS = -Icpp-package/include ifeq ($(USE_BREAKPAD), 1) TEST_CFLAGS += -I/usr/local/include/breakpad @@ -67,11 +66,6 @@ build/tests/cpp/engine/%.o : tests/cpp/engine/%.cc | mkldnn $(CXX) -std=c++17 $(TEST_CFLAGS) -I$(GTEST_INC) -MM -MT tests/cpp/engine/$* $< > build/tests/cpp/engine/$*.d $(CXX) -c -std=c++17 $(TEST_CFLAGS) -I$(GTEST_INC) -o build/tests/cpp/engine/$*.o $(filter %.cc %.a, $^) -build/tests/cpp/thread_safety/%.o : tests/cpp/thread_safety/%.cc | mkldnn - @mkdir -p $(@D) - $(CXX) -std=c++17 $(TEST_CFLAGS) $(TEST_CPPFLAGS) -I$(GTEST_INC) -MM -MT tests/cpp/thread_safety/$* $< > build/tests/cpp/thread_safety/$*.d - $(CXX) -c -std=c++17 $(TEST_CFLAGS) $(TEST_CPPFLAGS) -I$(GTEST_INC) -o build/tests/cpp/thread_safety/$*.o $(filter %.cc %.a, $^) - $(TEST): $(TEST_OBJ) lib/libmxnet.so $(TEST_LIB_DEP) $(CXX) -std=c++17 $(TEST_CFLAGS) -I$(GTEST_INC) -o $@ $^ $(TEST_LDFLAGS) @@ -85,4 +79,3 @@ testclean: -include build/tests/cpp/operator/*.d -include build/tests/cpp/storage/*.d -include build/tests/cpp/engine/*.d --include build/tests/cpp/thread_safety/*.d diff --git a/tests/nightly/Jenkinsfile b/tests/nightly/Jenkinsfile index 191a84431ca8..32fa15d5dd84 100755 --- a/tests/nightly/Jenkinsfile +++ b/tests/nightly/Jenkinsfile @@ -30,70 +30,6 @@ utils.assign_node_labels(utility: 'utility', linux_cpu: 'mxnetlinux-cpu', linux_ utils.main_wrapper( core_logic: { stage('NightlyTests'){ - 'Amalgamation-atlas: CPU': { - node(NODE_LINUX_CPU) { - ws('workspace/nt-amalgamation1') { - utils.init_git() - utils.docker_run('ubuntu_cpu', 'nightly_test_amalgamation USE_BLAS=atlas', false) - } - } - }, - 'Amalgamation-atlas-min: CPU': { - node(NODE_LINUX_CPU) { - ws('workspace/nt-amalgamation2') { - utils.init_git() - utils.docker_run('ubuntu_cpu', 'nightly_test_amalgamation USE_BLAS=atlas MIN=1', false) - } - } - }, - 'Amalgamation-atlas-mkl: CPU': { - node(NODE_LINUX_CPU) { - ws('workspace/nt-amalgamation3') { - utils.init_git() - utils.docker_run('ubuntu_cpu', 'nightly_test_amalgamation USE_BLAS=atlas MSHADOW_USE_MKL=1', false) - } - } - }, - 'Amalgamation-atlas-cuda: CPU': { - node(NODE_LINUX_CPU) { - ws('workspace/nt-amalgamation4') { - utils.init_git() - utils.docker_run('ubuntu_cpu', 'nightly_test_amalgamation USE_BLAS=atlas MSHADOW_USE_CUDA=1', false) - } - } - }, - 'Amalgamation-atlas-openmp: CPU': { - node(NODE_LINUX_CPU) { - ws('workspace/nt-amalgamation5') { - utils.init_git() - utils.docker_run('ubuntu_cpu', 'nightly_test_amalgamation USE_BLAS=atlas DISABLE_OPENMP=0', false) - } - } - }, - 'Java Demo: CPU': { - node(NODE_LINUX_CPU) { - ws('workspace/java-demo') { - utils.init_git() - utils.docker_run('ubuntu_cpu', 'nightly_java_demo_test_cpu', false) - } - } - }, - 'Scala Demo: CPU': { - node(NODE_LINUX_CPU) { - ws('workspace/scala-demo') { - utils.init_git() - utils.docker_run('ubuntu_cpu', 'nightly_scala_demo_test_cpu', false) - } - } - }, - 'MXNetJS: CPU': { - node(NODE_LINUX_CPU) { - ws('workspace/nt-mxnetjs') { - utils.init_git() - utils.docker_run('ubuntu_cpu', 'nightly_test_javascript', false) - } - } - } } } , diff --git a/tests/nightly/JenkinsfileForBinaries b/tests/nightly/JenkinsfileForBinaries index 8470129dfb8f..22061d9f6626 100755 --- a/tests/nightly/JenkinsfileForBinaries +++ b/tests/nightly/JenkinsfileForBinaries @@ -18,7 +18,7 @@ // //This is a Jenkinsfile for nightly tests. The format and some functions have been picked up from the top-level Jenkinsfile -mx_lib = 'build/libmxnet.so, build/3rdparty/tvm/libtvm_runtime.so, build/libtvmop.so, build/tvmop.conf, build/libcustomop_lib.so, build/libcustomop_gpu_lib.so, build/libsubgraph_lib.so, build/3rdparty/openmp/runtime/src/libomp.so, build/cpp-package/example/*' +mx_lib = 'build/libmxnet.so, build/3rdparty/tvm/libtvm_runtime.so, build/libtvmop.so, build/tvmop.conf, build/libcustomop_lib.so, build/libcustomop_gpu_lib.so, build/libsubgraph_lib.so, build/3rdparty/openmp/runtime/src/libomp.so' node('utility') { // Loading the utilities requires a node context unfortunately @@ -51,15 +51,7 @@ core_logic: { } stage('NightlyTests'){ - parallel 'ImageNet Inference: GPU': { - node(NODE_LINUX_GPU) { - ws('workspace/nt-ImageInferenceTest') { - utils.unpack_and_init('gpu', mx_lib) - utils.docker_run('ubuntu_build_cuda', 'nightly_test_imagenet_inference', true) - } - } - }, - 'KVStore_SingleNode: GPU': { + parallel 'KVStore_SingleNode: GPU': { node('mxnetlinux-gpu-p3-8xlarge') { ws('workspace/nt-KVStoreTest') { utils.unpack_and_init('gpu', mx_lib) diff --git a/tests/nightly/README.md b/tests/nightly/README.md index bc483d2e5ee1..f601db116a95 100644 --- a/tests/nightly/README.md +++ b/tests/nightly/README.md @@ -49,11 +49,10 @@ If the test runs on MXNet binaries modify tests/nightly/JenkinsfileForBinaries - ### Currently Running Tests #### Tests on Source -1. Amalgamation Tests -2. Compilation Warnings -3. Installation Guide -4. MXNet Javascript Test -5. Apache RAT check +1. Compilation Warnings +2. Installation Guide +3. MXNet Javascript Test +4. Apache RAT check #### Tests on Binaries 1. Image Classification Test diff --git a/tests/python/gpu/test_predictor.py b/tests/python/gpu/test_predictor.py deleted file mode 100644 index 7992f59f6210..000000000000 --- a/tests/python/gpu/test_predictor.py +++ /dev/null @@ -1,64 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -from __future__ import print_function -import sys, os -curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__))) -sys.path.append(os.path.join(curr_path, "../../../amalgamation/python/")) -from mxnet_predict import Predictor, load_ndarray_file - -import ctypes -import numpy as np -import mxnet as mx -import mxnet.ndarray as nd -from mxnet.ndarray import NDArray -from mxnet import gluon -from mxnet.test_utils import assert_almost_equal -from mxnet.contrib.amp import amp -from mxnet.base import NDArrayHandle, py_str -sys.path.insert(0, os.path.join(curr_path, '../unittest')) -from common import setup_module, with_seed, teardown_module - -@with_seed() -def test_predictor_with_dtype(): - prefix = 'test_predictor_simple_dense' - symbol_file = "%s-symbol.json" % prefix - param_file = "%s-0000.params" % prefix - - input1 = np.random.uniform(size=(1, 3)) - input1 = input1.astype(np.float16) - - block = mx.gluon.nn.HybridSequential() - block.add(mx.gluon.nn.Dense(7)) - block.add(mx.gluon.nn.Dense(3)) - block.cast(np.float16) - block.hybridize() - block.initialize(ctx=mx.gpu(0)) - tmp = mx.nd.array(input1, dtype=np.float16, ctx=mx.gpu(0)) - out1 = block.forward(tmp) - block.export(prefix) - - predictor = Predictor(open(symbol_file, "r").read(), - open(param_file, "rb").read(), - {"data": input1.shape}, - dev_type="gpu", - dev_id=0, - type_dict={"data": input1.dtype}) - predictor.forward(data=input1) - predictor_out1 = predictor.get_output(0) - - assert_almost_equal(out1.asnumpy(), predictor_out1, rtol=1e-5, atol=1e-6) diff --git a/tests/python/predict/mxnet_predict_example.py b/tests/python/predict/mxnet_predict_example.py deleted file mode 100644 index 1db3f5c29954..000000000000 --- a/tests/python/predict/mxnet_predict_example.py +++ /dev/null @@ -1,71 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -import sys, os -curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__))) -sys.path.append("../../../amalgamation/python/") - -from mxnet_predict import Predictor, load_ndarray_file -import logging -import numpy as np -from skimage import io, transform - -# Load the pre-trained model -prefix = "resnet/resnet-18" -num_round = 0 -symbol_file = "%s-symbol.json" % prefix -param_file = "%s-0000.params" % prefix -predictor = Predictor(open(symbol_file, "r").read(), - open(param_file, "rb").read(), - {'data':(1, 3, 224, 224)}) - -synset = [l.strip() for l in open('resnet/synset.txt').readlines()] - -def PreprocessImage(path, show_img=False): - # load image - img = io.imread(path) - print("Original Image Shape: ", img.shape) - # we crop image from center - short_egde = min(img.shape[:2]) - yy = int((img.shape[0] - short_egde) / 2) - xx = int((img.shape[1] - short_egde) / 2) - crop_img = img[yy : yy + short_egde, xx : xx + short_egde] - # resize to 224, 224 - resized_img = transform.resize(crop_img, (224, 224)) - # convert to numpy.ndarray - sample = np.asarray(resized_img) * 255 - # swap axes to make image from (224, 224, 3) to (3, 224, 224) - sample = np.swapaxes(sample, 0, 2) - sample = np.swapaxes(sample, 1, 2) - - # sub mean - return sample - -# Get preprocessed batch (single image batch) -batch = PreprocessImage('./download.jpg', True) - -predictor.forward(data=batch) -prob = predictor.get_output(0)[0] - -pred = np.argsort(prob)[::-1] -# Get top1 label -top1 = synset[pred[0]] -print("Top1: ", top1) -# Get top5 label -top5 = [synset[pred[i]] for i in range(5)] -print("Top5: ", top5) - diff --git a/tests/python/unittest/test_numpy_interoperability.py b/tests/python/unittest/test_numpy_interoperability.py index de363d2ee69f..6a2845e0fb24 100644 --- a/tests/python/unittest/test_numpy_interoperability.py +++ b/tests/python/unittest/test_numpy_interoperability.py @@ -3228,6 +3228,8 @@ def check_interoperability(op_list): if name in ['shares_memory', 'may_share_memory', 'empty_like', '__version__', 'dtype', '_NoValue']: # skip list continue + if name in ['delete']: # https://github.com/apache/incubator-mxnet/issues/18600 + continue if name in ['full_like', 'zeros_like', 'ones_like'] and \ StrictVersion(platform.python_version()) < StrictVersion('3.0.0'): continue diff --git a/tests/python/unittest/test_numpy_op.py b/tests/python/unittest/test_numpy_op.py index 6440dec7d342..5224404c9242 100644 --- a/tests/python/unittest/test_numpy_op.py +++ b/tests/python/unittest/test_numpy_op.py @@ -4194,6 +4194,7 @@ def hybrid_forward(self, F, x): @with_seed() @use_np +@pytest.mark.skip(reason='https://github.com/apache/incubator-mxnet/issues/18600') def test_np_delete(): class TestDelete(HybridBlock): def __init__(self, obj, axis=None): @@ -4755,6 +4756,7 @@ def _test_gamma_exception(shape, scale): @with_seed() @use_np +@pytest.mark.skip(reason='https://github.com/apache/incubator-mxnet/issues/18600') def test_np_random_beta(): class TestRandomBeta(HybridBlock): def __init__(self, size=None, dtype=None, ctx=None): @@ -4799,6 +4801,7 @@ def _test_random_beta_range(output): @with_seed() @use_np +@pytest.mark.skip(reason='https://github.com/apache/incubator-mxnet/issues/18600') def test_np_random_f(): class TestRandomF(HybridBlock): def __init__(self, size=None): @@ -4830,6 +4833,7 @@ def hybrid_forward(self, F, dfnum, dfden): @with_seed() @use_np +@pytest.mark.skip(reason='https://github.com/apache/incubator-mxnet/issues/18600') def test_np_random_chisquare(): class TestRandomChisquare(HybridBlock): def __init__(self, size=None, dtype=None, ctx=None): diff --git a/tests/python/unittest/test_predictor.py b/tests/python/unittest/test_predictor.py deleted file mode 100644 index 325b830e4226..000000000000 --- a/tests/python/unittest/test_predictor.py +++ /dev/null @@ -1,83 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -from __future__ import print_function -import sys, os -curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__))) -sys.path.append(os.path.join(curr_path, "../../../amalgamation/python/")) -from mxnet_predict import Predictor, load_ndarray_file - -import numpy as np -import mxnet as mx -import mxnet.ndarray as nd -from mxnet import gluon -from mxnet.test_utils import assert_almost_equal -from common import setup_module, with_seed, teardown_module - -@with_seed() -def test_predictor(): - prefix = 'test_predictor_simple_dense' - symbol_file = "%s-symbol.json" % prefix - param_file = "%s-0000.params" % prefix - - # two inputs with different batch sizes - input1 = np.random.uniform(size=(1,3)) - input2 = np.random.uniform(size=(3,3)) - - # define a simple model - block = gluon.nn.HybridSequential() - block.add(gluon.nn.Dense(7)) - block.add(gluon.nn.Dense(3)) - block.hybridize() - block.initialize() - out1 = block.forward(nd.array(input1)) - out2 = block.forward(nd.array(input2)) - block.export(prefix) - - # create a predictor - predictor = Predictor(open(symbol_file, "r").read(), - open(param_file, "rb").read(), - {'data':input1.shape}) - - # forward and get output - predictor.forward(data=input1) - predictor_out1 = predictor.get_output(0) - assert_almost_equal(out1.asnumpy(), predictor_out1, rtol=1e-5, atol=1e-6) - - # reshape - predictor.reshape({'data':input2.shape}) - predictor.forward(data=input2) - predictor_out2 = predictor.get_output(0) - assert_almost_equal(out2.asnumpy(), predictor_out2, rtol=1e-5, atol=1e-6) - - # destroy the predictor - del predictor - -@with_seed() -def test_load_ndarray(): - nd_file = 'test_predictor_load_ndarray.params' - a = nd.random.uniform(shape=(7, 3)) - b = nd.random.uniform(shape=(7,)) - nd_data = {'a':a, 'b':b} - nd.save(nd_file, nd_data) - - # test load_ndarray_file - nd_load = load_ndarray_file(open(nd_file, "rb").read()) - assert(set(nd_data.keys()) == set(nd_load.keys())) - for k in nd_data.keys(): - assert_almost_equal(nd_data[k].asnumpy(), nd_load[k], rtol=1e-5, atol=1e-6) - diff --git a/tools/license_header.py b/tools/license_header.py index 0211c2c9feb5..2a89221f0455 100755 --- a/tools/license_header.py +++ b/tools/license_header.py @@ -80,10 +80,6 @@ # Code shared with project by author - see file for details 'src/operator/special_functions-inl.h', - # Code generated by scala-package, checked in, and verified - 'scala-package/init-native/src/main/native/org_apache_mxnet_init_native_c_api.h', - 'scala-package/native/src/main/native/org_apache_mxnet_native_c_api.h', - # Licensed under Caffe header 'src/operator/nn/pool.h', 'src/operator/contrib/psroi_pooling-inl.h', @@ -101,15 +97,6 @@ 'cmake/upstream/FindCUDAToolkit.cmake', 'cmake/upstream/select_compute_arch.cmake', 'src/operator/numpy/np_einsum_op-inl.h', - - # Licensed under 2-Clause BSD in header - 'example/ssd/dataset/pycocotools/coco.py', - - # Julia package metadata, generated by Pkg3.jl - 'julia/Project.toml', - - # Licensed under Apache 2.0 license - 'example/image-classification/predict-cpp/image-classification-predict.cc' ] # language extensions and the according commment mark