diff --git a/CODEOWNERS b/.github/CODEOWNERS similarity index 51% rename from CODEOWNERS rename to .github/CODEOWNERS index 0e22cff91e0b..5d0e94533bf4 100644 --- a/CODEOWNERS +++ b/.github/CODEOWNERS @@ -5,13 +5,23 @@ * @dmlc/tvm-committers # LLVM backends -src/llvm/* @aatluri +src/codegen/llvm/* @aatluri # ROCM runtime src/runtime/rocm/* @aatluri +# SGX support +src/runtime/sgx/* @nhynes +apps/sgx/* @nhynes + # JVM language -jvm/* @javelinjs +jvm/* @yzhliu + +# WebGL backends +src/runtime/opengl/* @phisiart +src/codegen/*opengl* @phisiart # TOPI topi/python/topi/* @Laurawly @Huyuwei + + diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md new file mode 100644 index 000000000000..0e2a130d489e --- /dev/null +++ b/.github/ISSUE_TEMPLATE.md @@ -0,0 +1,7 @@ +Thanks for participating in the TVM community! We use https://discuss.tvm.ai for any general usage questions and discussions. The issue tracker is used for actionable items such as feature proposals discussion, roadmaps, and bug tracking. You are always welcomed to post on the forum first :) + +Issues that are inactive for a period of time may get closed. We adopt this policy so that we won't lose track of actionable issues that may fall at the bottom of the pile. Feel free to reopen a new one if you feel there is an additional problem that needs attention when an old one gets closed. + +For bug reports, to help the developer act on the issues, please include a description of your environment, preferably a minimum script to reproduce the problem. + +For feature proposals, list clear, small actionable items so we can track the progress of the change. diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 000000000000..313b776b0824 --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1 @@ +Thanks for contributing to TVM! Please refer to guideline https://docs.tvm.ai/contribute/ for useful information and tips. After the pull request is submitted, please request code reviews from others in the community. diff --git a/.gitignore b/.gitignore index f59a58552f8d..3c968eb3ed47 100644 --- a/.gitignore +++ b/.gitignore @@ -98,7 +98,6 @@ build_* Win32 *.dir perf -nnvm *.wasm .emscripten @@ -132,13 +131,63 @@ xcuserdata/ .emscripten* .m2 +# Compiled Dynamic libraries +*.so +*.dylib +*.dll + +# Compiled Object files +*.slo +*.lo +*.o +*.obj + +# Precompiled Headers +*.gch +*.pch + +# Compiled Static libraries +*.lai +*.la +*.a +*.lib + +# Executables +*.exe +*.out +*.app + ## Other *.moved-aside *.xccheckout *.xcscmblueprint .DS_Store tags +cscope* +*.lock # vim temporary files *.swp *.swo + +# TVM generated code +perf +.bash_history +*.json +*.params +*.onnx +*.h5 +synset.txt +cat.jpg +docs.tgz +cat.png +*.mlmodel +# Mac OS X +.DS_Store +build* + +# Jetbrain +.idea + +# tmp file +.nfs* diff --git a/CMakeLists.txt b/CMakeLists.txt index f42705ae7fda..39776d53d1f1 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,11 +1,12 @@ -cmake_minimum_required(VERSION 3.5) +cmake_minimum_required(VERSION 3.2) project(tvm C CXX) -if(EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/build/private/local_config.cmake) - include(${CMAKE_CURRENT_SOURCE_DIR}/build/private/local_config.cmake) -endif() - -include(cmake/Util.cmake) +# Utility functions +include(cmake/util/Util.cmake) +include(cmake/util/FindCUDA.cmake) +include(cmake/util/FindVulkan.cmake) +include(cmake/util/FindLLVM.cmake) +include(cmake/util/FindROCM.cmake) if(EXISTS ${CMAKE_CURRENT_BINARY_DIR}/config.cmake) include(${CMAKE_CURRENT_BINARY_DIR}/config.cmake) @@ -19,33 +20,47 @@ endif() # You can create a config.cmake at build folder # and add set(OPTION VALUE) to override these build options. # Alernatively, use cmake -DOPTION=VALUE through command-line. - tvm_option(USE_CUDA "Build with CUDA" OFF) tvm_option(USE_OPENCL "Build with OpenCL" OFF) +tvm_option(USE_VULKAN "Build with Vulkan" OFF) +tvm_option(USE_OPENGL "Build with OpenGL" OFF) tvm_option(USE_METAL "Build with Metal" OFF) +tvm_option(USE_ROCM "Build with ROCM" OFF) +tvm_option(ROCM_PATH "The path to rocm" /opt/rocm) tvm_option(USE_RPC "Build with RPC" ON) +tvm_option(USE_LLVM "Build with LLVM, can be set to specific llvm-config path" OFF) tvm_option(USE_GRAPH_RUNTIME "Build with tiny graph runtime" ON) -tvm_option(USE_LLVM "Build with LLVM" OFF) +tvm_option(USE_GRAPH_RUNTIME_DEBUG "Build with tiny graph runtime debug mode" OFF) tvm_option(USE_RTTI "Build with RTTI" ON) tvm_option(USE_MSVC_MT "Build with MT" OFF) tvm_option(INSTALL_DEV "Install compiler infrastructure" OFF) +# Contrib library options +tvm_option(USE_BLAS "The blas library to be linked" none) +tvm_option(USE_MKL_PATH "MKL root path when use MKL blas" none) +tvm_option(USE_CUDNN "Build with cuDNN" OFF) +tvm_option(USE_CUBLAS "Build with cuBLAS" OFF) +tvm_option(USE_MIOPEN "Build with ROCM:MIOpen" OFF) +tvm_option(USE_ROCBLAS "Build with ROCM:RoCBLAS" OFF) +tvm_option(USE_SORT "Build with sort support" OFF) +tvm_option(USE_NNPACK "Build with nnpack support" OFF) +tvm_option(USE_RANDOM "Build with random support" OFF) + +# include directories include_directories("include") -include_directories("HalideIR/src") include_directories("dlpack/include") +include_directories("dmlc-core/include") - +# initial variables set(TVM_LINKER_LIBS "") set(TVM_RUNTIME_LINKER_LIBS "") -# compile +# Generic compilation options if(MSVC) add_definitions(-DWIN32_LEAN_AND_MEAN) add_definitions(-D_CRT_SECURE_NO_WARNINGS) add_definitions(-D_SCL_SECURE_NO_WARNINGS) - add_definitions(-DTVM_EXPORTS) add_definitions(-DHalide_SHARED) - add_definitions(-DHalide_EXPORTS) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /EHsc") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /MP") set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /bigobj") @@ -61,146 +76,142 @@ if(MSVC) else(MSVC) include(CheckCXXCompilerFlag) check_cxx_compiler_flag("-std=c++11" SUPPORT_CXX11) - set(CMAKE_C_FLAGS "-O3 -Wall -std=c++11 -fPIC") - set(CMAKE_CXX_FLAGS ${CMAKE_C_FLAGS}) + set(CMAKE_C_FLAGS "-O2 -Wall -fPIC ${CMAKE_C_FLAGS}") + set(CMAKE_CXX_FLAGS "-O2 -Wall -fPIC -std=c++11 ${CMAKE_CXX_FLAGS}") + if (CMAKE_CXX_COMPILER_ID MATCHES "GNU" AND + CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 7.0) + set(CMAKE_CXX_FLAGS "-faligned-new ${CMAKE_CXX_FLAGS}") + endif() endif(MSVC) # add source group -FILE(GLOB_RECURSE GROUP_SOURCE "src/*.cc" "HalideIR/src/*.cpp") -FILE(GLOB_RECURSE GROUP_Include "src/*.h" "include/*.h" "HalideIR/src/*.h") +FILE(GLOB_RECURSE GROUP_SOURCE "src/*.cc" "HalideIR/src/*.cpp" "nnvm/src/*.cc") +FILE(GLOB_RECURSE GROUP_INCLUDE "src/*.h" "include/*.h" "HalideIR/src/*.h" + "nnvm/src/*.h" "nnvm/include/*.h") assign_source_group("Source" ${GROUP_SOURCE}) -assign_source_group("Include" ${GROUP_Include}) +assign_source_group("Include" ${GROUP_INCLUDE}) +# Source file lists file(GLOB COMPILER_SRCS src/api/*.cc src/arithmetic/*.cc + src/autotvm/*.cc src/codegen/*.cc src/codegen/stack_vm/*.cc src/lang/*.cc src/pass/*.cc src/op/*.cc src/schedule/*.cc + ) + +if(NOT MSVC) + file(GLOB COMPILER_VERILOG_SRCS src/codegen/verilog/*.cc) + list(APPEND COMPILER_SRCS ${COMPILER_VERILOG_SRCS}) +endif() + +file(GLOB_RECURSE NNVM_COMPILER_SRCS + nnvm/src/c_api/*.cc + nnvm/src/core/*.cc + nnvm/src/pass/*.cc + nnvm/src/compiler/*.cc + nnvm/src/top/*.cc + ) + +file(GLOB TOPI_SRCS + topi/src/*.cc ) file(GLOB_RECURSE HALIDEIR_SRCS HalideIR/src/*.cpp) list(APPEND COMPILER_SRCS ${HALIDEIR_SRCS}) file(GLOB RUNTIME_SRCS src/runtime/*.cc) -file(GLOB COMPILER_LLVM_SRCS src/codegen/llvm/*.cc) -file(GLOB RUNTIME_CUDA_SRCS src/runtime/cuda/*.cc) -file(GLOB RUNTIME_OPENCL_SRCS src/runtime/opencl/*.cc) -file(GLOB RUNTIME_METAL_SRCS src/runtime/metal/*.mm) -file(GLOB RUNTIME_RPC_SRCS src/runtime/rpc/*.cc) -file(GLOB RUNTIME_GRAPH_SRCS src/runtime/graph/*.cc) - -if(USE_CUDA) -find_package(CUDA) -# Find CUDA doesn't find all the libraries we need, add the extra ones -find_library(CUDA_CUDA_LIBRARIES cuda - PATHS ${CUDA_TOOLKIT_ROOT_DIR} - PATH_SUFFIXES lib lib64 targets/x86_64-linux/lib targets/x86_64-linux/lib/stubs) -find_library(CUDA_NVRTC_LIBRARIES nvrtc - PATHS ${CUDA_TOOLKIT_ROOT_DIR} - PATH_SUFFIXES lib lib64 targets/x86_64-linux/lib targets/x86_64-linux/lib/stubs) - set(CUDA_CUDA_LIBRARY ${CUDA_CUDA_LIBRARIES}) - - find_package(CUDA QUIET REQUIRED) - message(STATUS "Build with CUDA support") - include_directories(${CUDA_INCLUDE_DIRS}) - list(APPEND TVM_RUNTIME_LINKER_LIBS ${CUDA_CUDART_LIBRARY}) - list(APPEND TVM_RUNTIME_LINKER_LIBS ${CUDA_CUDA_LIBRARY}) - list(APPEND RUNTIME_SRCS ${RUNTIME_CUDA_SRCS}) - if(MSVC) - find_library(CUDA_NVRTC_LIB nvrtc - ${CUDA_TOOLKIT_ROOT_DIR}/lib/x64 - ${CUDA_TOOLKIT_ROOT_DIR}/lib/win32) - list(APPEND TVM_LINKER_LIBS ${CUDA_NVRTC_LIB}) - else(MSVC) - find_library(CUDA_NVRTC_LIB nvrtc - ${CUDA_TOOLKIT_ROOT_DIR}/lib64 - ${CUDA_TOOLKIT_ROOT_DIR}/lib) - list(APPEND TVM_LINKER_LIBS ${CUDA_NVRTC_LIB}) - endif(MSVC) - add_definitions(-DTVM_CUDA_RUNTIME=1) -else(USE_CUDA) - add_definitions(-DTVM_CUDA_RUNTIME=0) -endif(USE_CUDA) - -if(USE_OPENCL) - find_package(OpenCL QUIET REQUIRED) - message(STATUS "Build with OpenCL support") - include_directories(${OPENCL_INCLUDE_DIRS}) - list(APPEND TVM_RUNTIME_LINKER_LIBS ${OpenCL_LIBRARIES}) - list(APPEND RUNTIME_SRCS ${RUNTIME_OPENCL_SRCS}) - add_definitions(-DTVM_OPENCL_RUNTIME=1) -else(USE_OPENCL) - add_definitions(-DTVM_OPENCL_RUNTIME=0) -endif(USE_OPENCL) - -if(USE_METAL) - find_package(OpenCL QUIET REQUIRED) - message(STATUS "Build with Metal support") - FIND_LIBRARY(METAL_LIB Metal) - FIND_LIBRARY(FOUNDATION_LIB Foundation) - list(APPEND TVM_RUNTIME_LINKER_LIBS ${METAL_LIB} ${FOUNDATION_LIB}) - list(APPEND RUNTIME_SRCS ${RUNTIME_METAL_SRCS}) - add_definitions(-DTVM_METAL_RUNTIME=1) -else(USE_METAL) - add_definitions(-DTVM_METAL_RUNTIME=0) -endif(USE_METAL) + +# Package runtime rules +if(NOT USE_RTTI) + add_definitions(-DDMLC_ENABLE_RTTI=0) +endif() if(USE_RPC) message(STATUS "Build with RPC support...") + file(GLOB RUNTIME_RPC_SRCS src/runtime/rpc/*.cc) list(APPEND RUNTIME_SRCS ${RUNTIME_RPC_SRCS}) endif(USE_RPC) if(USE_GRAPH_RUNTIME) message(STATUS "Build with Graph runtime support...") + file(GLOB RUNTIME_GRAPH_SRCS src/runtime/graph/*.cc) list(APPEND RUNTIME_SRCS ${RUNTIME_GRAPH_SRCS}) -endif(USE_GRAPH_RUNTIME) -if(USE_LLVM) - find_package(LLVM CONFIG REQUIRED) - include_directories(${LLVM_INCLUDE_DIRS}) - add_definitions(${LLVM_DEFINITIONS}) - set(TVM_LLVM_VERSION ${LLVM_VERSION_MAJOR}${LLVM_VERSION_MINOR}) - message(STATUS "Build with LLVM " ${LLVM_PACKAGE_VERSION}) - message(STATUS "Set TVM_LLVM_VERSION=" ${TVM_LLVM_VERSION}) - add_definitions(-DTVM_LLVM_VERSION=${TVM_LLVM_VERSION}) - add_definitions(-DDMLC_USE_FOPEN64=0) - llvm_map_components_to_libnames(LLVM_LIBS all) - list(REMOVE_ITEM LLVM_LIBS LTO) - list(APPEND TVM_LINKER_LIBS ${LLVM_LIBS}) - list(APPEND COMPILER_SRCS ${COMPILER_LLVM_SRCS}) - if(NOT MSVC) - set_property(SOURCE ${COMPILER_LLVM_SRCS} APPEND_STRING PROPERTY COMPILE_FLAGS - "-fno-rtti -DDMLC_ENABLE_RTTI=0") - endif() -endif(USE_LLVM) - -if(NOT USE_RTTI) - add_definitions(-DDMLC_ENABLE_RTTI=0) -endif() + if(USE_GRAPH_RUNTIME_DEBUG) + set_source_files_properties(${RUNTIME_GRAPH_SRCS} + PROPERTIES COMPILE_DEFINITIONS "TVM_GRAPH_RUNTIME_DEBUG") + endif(USE_GRAPH_RUNTIME_DEBUG) +endif(USE_GRAPH_RUNTIME) -if(EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/dmlc-core/CMakeLists.txt) - include_directories(${CMAKE_CURRENT_SOURCE_DIR}/dmlc-core/include) - if (INSTALL_DEV) - install( - DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}/dmlc-core/include/." DESTINATION "include" - FILES_MATCHING - PATTERN "*.h" - ) - endif() -elseif(DMLC_CORE_PATH) - include_directories(${DMLC_CORE_PATH}/include) -endif() +# Module rules +include(cmake/modules/VTA.cmake) +include(cmake/modules/CUDA.cmake) +include(cmake/modules/OpenCL.cmake) +include(cmake/modules/OpenGL.cmake) +include(cmake/modules/Vulkan.cmake) +include(cmake/modules/Metal.cmake) +include(cmake/modules/ROCM.cmake) +include(cmake/modules/LLVM.cmake) +include(cmake/modules/contrib/BLAS.cmake) +include(cmake/modules/contrib/Random.cmake) +include(cmake/modules/contrib/Sort.cmake) +include(cmake/modules/contrib/NNPack.cmake) -list(APPEND RUNTIME_SRCS ${GROUP_Include}) add_library(tvm SHARED ${COMPILER_SRCS} ${RUNTIME_SRCS}) +add_library(tvm_topi SHARED ${TOPI_SRCS}) add_library(tvm_runtime SHARED ${RUNTIME_SRCS}) +add_library(nnvm_compiler SHARED ${NNVM_COMPILER_SRCS}) + target_link_libraries(tvm ${TVM_LINKER_LIBS} ${TVM_RUNTIME_LINKER_LIBS}) -target_link_libraries(tvm_runtime ${TVM_RUNTIME_LINKER_LIBS}) +target_link_libraries(tvm_topi tvm ${TVM_LINKER_LIBS} ${TVM_RUNTIME_LINKER_LIBS}) +target_link_libraries(tvm_runtime ${TVM_RUNTIME_LINKER_LIBS}) +target_link_libraries(nnvm_compiler tvm) + +# Related headers +target_include_directories( + tvm + PUBLIC "HalideIR/src" + PUBLIC "topi/include") +target_include_directories( + tvm_topi + PUBLIC "topi/include") +target_include_directories( + nnvm_compiler + PUBLIC "nnvm/include" + PUBLIC "topi/include") + +# Tests +set(TEST_EXECS "") +file(GLOB TEST_SRCS tests/cpp/*.cc) +find_library(GTEST_LIB gtest) + +if(GTEST_LIB) + foreach(__srcpath ${TEST_SRCS}) + get_filename_component(__srcname ${__srcpath} NAME) + string(REPLACE ".cc" "" __execname ${__srcname}) + add_executable(${__execname} ${__srcpath}) + list(APPEND TEST_EXECS ${__execname}) + target_link_libraries(${__execname} + tvm ${GTEST_LIB} pthread) + set_target_properties(${__execname} PROPERTIES EXCLUDE_FROM_ALL 1) + set_target_properties(${__execname} PROPERTIES EXCLUDE_FROM_DEFAULT_BUILD 1) + endforeach() + add_custom_target(cpptest DEPENDS ${TEST_EXECS}) +endif() + +# Custom targets +add_custom_target(runtime DEPENDS tvm_runtime) + +# Installation rules +install(TARGETS tvm DESTINATION lib${LIB_SUFFIX}) +install(TARGETS tvm_topi DESTINATION lib${LIB_SUFFIX}) install(TARGETS tvm_runtime DESTINATION lib${LIB_SUFFIX}) +install(TARGETS nnvm_compiler DESTINATION lib${LIB_SUFFIX}) + if (INSTALL_DEV) - install(TARGETS tvm DESTINATION lib${LIB_SUFFIX}) install( DIRECTORY "include/." DESTINATION "include" FILES_MATCHING @@ -220,11 +231,25 @@ if (INSTALL_DEV) DIRECTORY "dlpack/include/." DESTINATION "include" FILES_MATCHING PATTERN "*.h" - ) + ) + install( + DIRECTORY "nnvm/include/." DESTINATION "include" + FILES_MATCHING + PATTERN "*.h" + ) else(INSTALL_DEV) install( DIRECTORY "include/tvm/runtime/." DESTINATION "include/tvm/runtime" FILES_MATCHING PATTERN "*.h" - ) + ) endif(INSTALL_DEV) + +# More target definitions +if(MSVC) + target_compile_definitions(tvm PRIVATE -DHalide_EXPORTS) + target_compile_definitions(tvm_runtime PRIVATE -DHalide_EXPORTS) + target_compile_definitions(tvm PRIVATE -DTVM_EXPORTS) + target_compile_definitions(tvm_runtime PRIVATE -DTVM_EXPORTS) + target_compile_definitions(nnvm_compiler PRIVATE -DNNVM_EXPORTS) +endif() diff --git a/CONTRIBUTORS.md b/CONTRIBUTORS.md index ab9950a9f31d..6e3cf55b94b0 100644 --- a/CONTRIBUTORS.md +++ b/CONTRIBUTORS.md @@ -1,37 +1,42 @@ -Contributors of TVM -=================== -TVM adopts Apache style committer model. The package is developed and used by the community. +TVM Contributors +================ +TVM adopts the Apache style model and governs by merit. We believe that it is important to create an inclusive community where everyone can use, +contribute to, and influence the direction of the project. We actively invite contributors who have earned the merit to be part of the development community. -We actively seek committers that comes from contributors who: -- Made substantial contribution to the project. -- Willing to spent time on maintaining and lead the project. +See the [community structure document](http://docs.tvm.ai/contribute/community.html) for the explanation of community structure and contribution guidelines. -How to Contribute ------------------ -See [Contributor guide](docs/how_to/contribute.md) on how to contribute - -Committers ----------- -Committers are people who have made substantial contribution to the project and granted write access to the project. -- [Tianqi Chen](https://github.com/tqchen), University of Washington -- [Thierry Moreau](http://homes.cs.washington.edu/~moreau/), University of Washington -- [Haichen Shen](http://homes.cs.washington.edu/~haichen/), University of Washington -- [Ziheng Jiang](https://github.com/ZihengJiang), Fudan University - -Code Owners ------------ -[Code owners](CODEOWNERS) are people who make substantial contribution to a module -and are qualified to lead development and review changes of the owned module. +## Committers +- [Tianqi Chen](https://github.com/tqchen) (PMC) +- [Thierry Moreau](http://homes.cs.washington.edu/~moreau/) +- [Ziheng Jiang](https://github.com/ZihengJiang) +- [Haichen Shen](http://homes.cs.washington.edu/~haichen/) +- [Yizhi Liu](https://github.com/yzhliu) +## Code Owners - [Aditya Atluri](https://github.com/adityaatluri) ROCM - [Leyuan Wang](https://github.com/Laurawly) TOPI - [Yuwei Hu](https://github.com/Huyuwei) TOPI -- [Yizhi Liu](https://github.com/javelinjs) JVM package +- [Zhixun Tan](https://github.com/phisiart) OpenGL/WebGL backend +- [Nick Hynes](https://github.com/nhynes) SGX and secured computing + +## Reviewers +- [Masahiro Masuda](https://github.com/masahi) +- [Kazutaka Morita](https://github.com/kazum) +- [Pariksheet Pinjari](https://github.com/PariksheetPinjari909) +- [Siva](https://github.com/srkreddy1238) +- [Alex Weaver](https://github.com/alex-weaver) +- [Eddie Yan](https://github.com/eqy) +- [Joshua Z. Zhang](https://github.com/zhreshold) +- [Lianmin Zheng](https://github.com/merrymercy) -List of Contributors --------------------- +## List of Contributors - [Full List of Contributors](https://github.com/dmlc/tvm/graphs/contributors) - To contributors: please add your name to the list. - [Qiao Zhang](https://github.com/zhangqiaorjc) - [Jian Weng](https://github.com/were) - [Masahiro Masuda](https://github.com/masahi) +- [Haolong Zhang](https://github.com/haolongzhangm) +- [Cody Hao Yu](https://github.com/comaniac) +- [Chris Nuernberger](https://github.com/cnuernber) +- [Tatsuya Nishiyama](https://github.com/nishi-t) +- [Kazutaka Morita](https://github.com/kazum) diff --git a/HalideIR b/HalideIR index d91cf97d5d6c..a0b9563f4571 160000 --- a/HalideIR +++ b/HalideIR @@ -1 +1 @@ -Subproject commit d91cf97d5d6cd2b47ec408bb08e978b88cbf6ab7 +Subproject commit a0b9563f45719553adf4d39fe3c14db1af0e1f40 diff --git a/Jenkinsfile b/Jenkinsfile index ef9666351ba5..8d76ebedeaae 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -4,13 +4,14 @@ // See documents at https://jenkins.io/doc/book/pipeline/jenkinsfile/ // tvm libraries -tvm_runtime = "lib/libtvm_runtime.so, config.mk" -tvm_lib = "lib/libtvm.so, " + tvm_runtime +tvm_runtime = "build/libtvm_runtime.so, build/config.cmake" +tvm_lib = "build/libtvm.so, " + tvm_runtime // LLVM upstream lib -tvm_multilib = "lib/libtvm_llvm40.so, lib/libtvm_llvm50.so, lib/libtvm_llvm60.so, " + tvm_runtime +tvm_multilib = "build/libtvm.so, " + + "build/libvta.so, build/libtvm_topi.so, build/libnnvm_compiler.so, " + tvm_runtime // command to start a docker container -docker_run = 'tests/ci_build/ci_build.sh' +docker_run = 'docker/bash.sh' // timeout in minutes max_time = 60 @@ -38,7 +39,7 @@ stage("Sanity Check") { node('linux') { ws('workspace/tvm/sanity') { init_git() - sh "${docker_run} lint ./tests/scripts/task_lint.sh" + sh "${docker_run} tvmai/ci-lint ./tests/scripts/task_lint.sh" } } } @@ -47,14 +48,14 @@ stage("Sanity Check") { // Run make. First try to do an incremental make from a previous workspace in hope to // accelerate the compilation. If something wrong, clean the workspace and then // build from scratch. -def make(docker_type, make_flag) { +def make(docker_type, path, make_flag) { timeout(time: max_time, unit: 'MINUTES') { try { - sh "${docker_run} ${docker_type} make ${make_flag}" + sh "${docker_run} ${docker_type} ./tests/scripts/task_build.sh ${path} ${make_flag}" } catch (exc) { echo 'Incremental compilation failed. Fall back to build from scratch' - sh "${docker_run} ${docker_type} make clean" - sh "${docker_run} ${docker_type} make ${make_flag}" + sh "${docker_run} ${docker_type} ./tests/scripts/task_clean.sh ${path}" + sh "${docker_run} ${docker_type} ./tests/scripts/task_build.sh ${path} ${make_flag}" } } } @@ -84,30 +85,35 @@ stage('Build') { ws('workspace/tvm/build-gpu') { init_git() sh """ - cp make/config.mk . - echo USE_CUDNN=1 >> config.mk - echo USE_CUDA=1 >> config.mk - echo USE_OPENCL=1 >> config.mk - echo LLVM_CONFIG=llvm-config-4.0 >> config.mk - echo USE_RPC=1 >> config.mk - echo USE_GRAPH_RUNTIME=1 >> config.mk - echo USE_BLAS=openblas >> config.mk - rm -f lib/libtvm_runtime.so lib/libtvm.so + mkdir -p build + cd build + cp ../cmake/config.cmake . + echo set\\(USE_CUBLAS ON\\) >> config.cmake + echo set\\(USE_CUDNN ON\\) >> config.cmake + echo set\\(USE_CUDA ON\\) >> config.cmake + echo set\\(USE_OPENGL ON\\) >> config.cmake + echo set\\(USE_LLVM llvm-config-6.0\\) >> config.cmake + echo set\\(USE_RPC ON\\) >> config.cmake + echo set\\(USE_SORT ON\\) >> config.cmake + echo set\\(USE_GRAPH_RUNTIME ON\\) >> config.cmake + echo set\\(USE_BLAS openblas\\) >> config.cmake + echo set\\(CMAKE_CXX_COMPILER g++\\) >> config.cmake + echo set\\(CMAKE_CXX_FLAGS -Werror\\) >> config.cmake """ - make('gpu', '-j2') - sh "mv lib/libtvm.so lib/libtvm_llvm40.so" - sh "echo LLVM_CONFIG=llvm-config-5.0 >> config.mk" - make('gpu', '-j2') - sh "mv lib/libtvm.so lib/libtvm_llvm50.so" - sh "echo LLVM_CONFIG=llvm-config-6.0 >> config.mk" - make('gpu', '-j2') - sh "mv lib/libtvm.so lib/libtvm_llvm60.so" + make('tvmai/ci-gpu', 'build', '-j2') pack_lib('gpu', tvm_multilib) + // compiler test sh """ - echo USE_ROCM=1 >> config.mk - echo ROCM_PATH=/opt/rocm >> config.mk + mkdir -p build2 + cd build2 + cp ../cmake/config.cmake . + echo set\\(USE_OPENCL ON\\) >> config.cmake + echo set\\(USE_ROCM ON\\) >> config.cmake + echo set\\(USE_VULKAN ON\\) >> config.cmake + echo set\\(CMAKE_CXX_COMPILER clang-6.0\\) >> config.cmake + echo set\\(CMAKE_CXX_FLAGS -Werror\\) >> config.cmake """ - make('gpu', '-j2') + make('tvmai/ci-gpu', 'build2', '-j2') } } }, @@ -116,13 +122,20 @@ stage('Build') { ws('workspace/tvm/build-cpu') { init_git() sh """ - cp make/config.mk . - echo USE_CUDA=0 >> config.mk - echo USE_OPENCL=0 >> config.mk - echo USE_RPC=0 >> config.mk + mkdir -p build + cd build + cp ../cmake/config.cmake . + echo set\\(USE_SORT ON\\) >> config.cmake + echo set\\(USE_LLVM llvm-config-4.0\\) >> config.cmake + echo set\\(CMAKE_CXX_COMPILER g++\\) >> config.cmake + echo set\\(CMAKE_CXX_FLAGS -Werror\\) >> config.cmake """ - make('cpu', '-j2') + make('tvmai/ci-cpu', 'build', '-j2') pack_lib('cpu', tvm_lib) + timeout(time: max_time, unit: 'MINUTES') { + sh "${docker_run} tvmai/ci-cpu ./tests/scripts/task_cpp_unittest.sh" + sh "${docker_run} tvmai/ci-cpu ./tests/scripts/task_python_vta.sh" + } } } }, @@ -131,48 +144,19 @@ stage('Build') { ws('workspace/tvm/build-i386') { init_git() sh """ - cp make/config.mk . - echo USE_CUDA=0 >> config.mk - echo USE_OPENCL=0 >> config.mk - echo LLVM_CONFIG=llvm-config-4.0 >> config.mk - echo USE_RPC=1 >> config.mk + mkdir -p build + cd build + cp ../cmake/config.cmake . + echo set\\(USE_SORT ON\\) >> config.cmake + echo set\\(USE_RPC ON\\) >> config.cmake + echo set\\(USE_LLVM llvm-config-5.0\\) >> config.cmake + echo set\\(CMAKE_CXX_COMPILER g++\\) >> config.cmake + echo set\\(CMAKE_CXX_FLAGS -Werror\\) >> config.cmake """ - make('i386', '-j2') - sh "mv lib/libtvm.so lib/libtvm_llvm40.so" - sh "echo LLVM_CONFIG=llvm-config-5.0 >> config.mk" - make('i386', '-j2') - sh "mv lib/libtvm.so lib/libtvm_llvm50.so" - sh "echo LLVM_CONFIG=llvm-config-6.0 >> config.mk" - make('i386', '-j2') - sh "mv lib/libtvm.so lib/libtvm_llvm60.so" + make('tvmai/ci-i386', 'build', '-j2') pack_lib('i386', tvm_multilib) } } - }, - 'web': { - node('emcc') { - ws('workspace/tvm/build-weblib') { - init_git() - sh """ - cp make/config.mk . - echo USE_CUDA=0 >> config.mk - echo USE_OPENCL=0 >> config.mk - echo LLVM_CONFIG=llvm-config >> config.mk - echo USE_RPC=0 >> config.mk - """ - sh "${docker_run} emscripten echo testing javascript..." - timeout(time: max_time, unit: 'MINUTES') { - try { - sh "${docker_run} emscripten ./tests/scripts/task_web_build.sh" - } catch (exc) { - echo 'Incremental compilation failed. Fall back to build from scratch' - sh "${docker_run} emscripten make clean" - sh "${docker_run} emscripten ./tests/scripts/task_web_build.sh" - } - } - pack_lib('weblib', tvm_lib) - } - } } } @@ -182,14 +166,8 @@ stage('Unit Test') { ws('workspace/tvm/ut-python-gpu') { init_git() unpack_lib('gpu', tvm_multilib) - sh "cp lib/libtvm_llvm40.so lib/libtvm.so" timeout(time: max_time, unit: 'MINUTES') { - sh "${docker_run} gpu ./tests/scripts/task_python_unittest.sh" - } - // Test on the lastest mainline. - sh "cp lib/libtvm_llvm60.so lib/libtvm.so" - timeout(time: max_time, unit: 'MINUTES') { - sh "${docker_run} gpu ./tests/scripts/task_python_unittest.sh" + sh "${docker_run} tvmai/ci-gpu ./tests/scripts/task_python_unittest.sh" } } } @@ -199,26 +177,10 @@ stage('Unit Test') { ws('workspace/tvm/ut-python-i386') { init_git() unpack_lib('i386', tvm_multilib) - sh "cp lib/libtvm_llvm40.so lib/libtvm.so" - timeout(time: max_time, unit: 'MINUTES') { - sh "${docker_run} i386 ./tests/scripts/task_python_unittest.sh" - sh "${docker_run} i386 ./tests/scripts/task_python_integration.sh" - } - // Test on llvm 5.0 - sh "cp lib/libtvm_llvm50.so lib/libtvm.so" timeout(time: max_time, unit: 'MINUTES') { - sh "${docker_run} i386 ./tests/scripts/task_python_integration.sh" - } - } - } - }, - 'cpp': { - node('linux') { - ws('workspace/tvm/ut-cpp') { - init_git() - unpack_lib('cpu', tvm_lib) - timeout(time: max_time, unit: 'MINUTES') { - sh "${docker_run} cpu ./tests/scripts/task_cpp_unittest.sh" + sh "${docker_run} tvmai/ci-i386 ./tests/scripts/task_python_unittest.sh" + sh "${docker_run} tvmai/ci-i386 ./tests/scripts/task_python_integration.sh" + sh "${docker_run} tvmai/ci-i386 ./tests/scripts/task_python_vta.sh" } } } @@ -228,9 +190,8 @@ stage('Unit Test') { ws('workspace/tvm/ut-java') { init_git() unpack_lib('gpu', tvm_multilib) - sh "cp lib/libtvm_llvm40.so lib/libtvm.so" timeout(time: max_time, unit: 'MINUTES') { - sh "${docker_run} gpu ./tests/scripts/task_java_unittest.sh" + sh "${docker_run} tvmai/ci-gpu ./tests/scripts/task_java_unittest.sh" } } } @@ -243,22 +204,11 @@ stage('Integration Test') { ws('workspace/tvm/it-python-gpu') { init_git() unpack_lib('gpu', tvm_multilib) - sh "cp lib/libtvm_llvm40.so lib/libtvm.so" - timeout(time: max_time, unit: 'MINUTES') { - sh "${docker_run} gpu ./tests/scripts/task_python_integration.sh" - sh "${docker_run} gpu ./tests/scripts/task_python_topi.sh" - } - } - } - }, - 'web': { - node('emcc') { - ws('workspace/tvm/it-weblib') { - init_git() - unpack_lib('weblib', tvm_lib) - sh "${docker_run} emscripten echo testing javascript..." timeout(time: max_time, unit: 'MINUTES') { - sh "${docker_run} emscripten ./tests/scripts/task_web_test.sh" + sh "${docker_run} tvmai/ci-gpu ./tests/scripts/task_python_integration.sh" + sh "${docker_run} tvmai/ci-gpu ./tests/scripts/task_python_topi.sh" + sh "${docker_run} tvmai/ci-gpu ./tests/scripts/task_cpp_topi.sh" + sh "${docker_run} tvmai/ci-gpu ./tests/scripts/task_python_nnvm.sh" } } } @@ -268,9 +218,8 @@ stage('Integration Test') { ws('workspace/tvm/docs-python-gpu') { init_git() unpack_lib('gpu', tvm_multilib) - sh "cp lib/libtvm_llvm40.so lib/libtvm.so" timeout(time: max_time, unit: 'MINUTES') { - sh "${docker_run} gpu ./tests/scripts/task_python_docs.sh" + sh "${docker_run} tvmai/ci-gpu ./tests/scripts/task_python_docs.sh" } pack_lib('mydocs', 'docs.tgz') } diff --git a/Makefile b/Makefile index 4a16d5162102..2d3d4843c4c0 100644 --- a/Makefile +++ b/Makefile @@ -1,16 +1,7 @@ ROOTDIR = $(CURDIR) -ifndef config -ifneq ("$(wildcard ./config.mk)","") - config ?= config.mk -else - config ?= make/config.mk -endif -endif - -include $(config) - -.PHONY: clean install installdev all test doc pylint cpplint lint verilog cython cython2 cython3 web runtime +.PHONY: clean all test doc pylint cpplint lint\ + cython cython2 cython3 web runtime vta ifndef DMLC_CORE_PATH DMLC_CORE_PATH = $(ROOTDIR)/dmlc-core @@ -20,242 +11,65 @@ ifndef DLPACK_PATH DLPACK_PATH = $(ROOTDIR)/dlpack endif -UNAME_S := $(shell uname -s) - -# The flags -LLVM_CFLAGS= -fno-rtti -DDMLC_ENABLE_RTTI=0 -DDMLC_USE_FOPEN64=0 -LDFLAGS = -pthread -lm -ldl -INCLUDE_FLAGS = -Iinclude -I$(DLPACK_PATH)/include -I$(DMLC_CORE_PATH)/include -IHalideIR/src -Itopi/include -CFLAGS = -std=c++11 -Wall -O2 $(INCLUDE_FLAGS) -fPIC -FRAMEWORKS = -OBJCFLAGS = -fno-objc-arc -EMCC_FLAGS= -s RESERVED_FUNCTION_POINTERS=2 -s NO_EXIT_RUNTIME=1 -s MAIN_MODULE=1 -DDMLC_LOG_STACK_TRACE=0\ - -std=c++11 -Oz $(INCLUDE_FLAGS) - -# llvm configuration -ifdef LLVM_CONFIG - LLVM_VERSION=$(shell $(LLVM_CONFIG) --version| cut -b 1,3) - LLVM_INCLUDE=$(filter -I%, $(shell $(LLVM_CONFIG) --cxxflags)) - LDFLAGS += $(shell $(LLVM_CONFIG) --ldflags --libs --system-libs) - LLVM_CFLAGS += $(LLVM_INCLUDE) -DTVM_LLVM_VERSION=$(LLVM_VERSION) -else - LLVM_VERSION=00 -endif - -# The source code dependencies -LIB_HALIDEIR = HalideIR/lib/libHalideIR.a +INCLUDE_FLAGS = -Iinclude -I$(DLPACK_PATH)/include -I$(DMLC_CORE_PATH)/include +PKG_CFLAGS = -std=c++11 -Wall -O2 $(INCLUDE_FLAGS) -fPIC +PKG_LDFLAGS = -CC_SRC = $(filter-out src/contrib/%.cc src/runtime/%.cc src/codgen/llvm/%.cc,\ - $(wildcard src/*/*.cc src/*/*/*.cc)) -LLVM_SRC = $(wildcard src/codegen/llvm/*.cc src/codegen/llvm/*/*.cc) -METAL_SRC = $(wildcard src/runtime/metal/*.mm) -CUDA_SRC = $(wildcard src/runtime/cuda/*.cc) -ROCM_SRC = $(wildcard src/runtime/rocm/*.cc) -OPENCL_SRC = $(wildcard src/runtime/opencl/*.cc) -RPC_SRC = $(wildcard src/runtime/rpc/*.cc) -GRAPH_SRC = $(wildcard src/runtime/graph/*.cc) -RUNTIME_SRC = $(wildcard src/runtime/*.cc) -# Objectives -LLVM_BUILD = build/llvm${LLVM_VERSION} -LLVM_OBJ = $(patsubst src/%.cc, ${LLVM_BUILD}/%.o, $(LLVM_SRC)) -METAL_OBJ = $(patsubst src/%.mm, build/%.o, $(METAL_SRC)) -CUDA_OBJ = $(patsubst src/%.cc, build/%.o, $(CUDA_SRC)) -ROCM_OBJ = $(patsubst src/%.cc, build/%.o, $(ROCM_SRC)) -OPENCL_OBJ = $(patsubst src/%.cc, build/%.o, $(OPENCL_SRC)) -RPC_OBJ = $(patsubst src/%.cc, build/%.o, $(RPC_SRC)) -GRAPH_OBJ = $(patsubst src/%.cc, build/%.o, $(GRAPH_SRC)) -CC_OBJ = $(patsubst src/%.cc, build/%.o, $(CC_SRC)) $(LLVM_OBJ) -RUNTIME_OBJ = $(patsubst src/%.cc, build/%.o, $(RUNTIME_SRC)) -CONTRIB_OBJ = +all: + @mkdir -p build && cd build && cmake .. && $(MAKE) -# Deps -ALL_DEP = $(CC_OBJ) $(CONTRIB_OBJ) $(LIB_HALIDEIR) -RUNTIME_DEP = $(RUNTIME_OBJ) +runtime: + @mkdir -p build && cd build && cmake .. && $(MAKE) runtime -# Dependency specific rules -ifdef CUDA_PATH - NVCC=$(CUDA_PATH)/bin/nvcc - CFLAGS += -I$(CUDA_PATH)/include - LDFLAGS += -L$(CUDA_PATH)/lib64 -endif +vta: + @mkdir -p build && cd build && cmake .. && $(MAKE) vta -ifeq ($(USE_CUDA), 1) - CFLAGS += -DTVM_CUDA_RUNTIME=1 - LDFLAGS += -lcuda -lcudart -lnvrtc - RUNTIME_DEP += $(CUDA_OBJ) -else - CFLAGS += -DTVM_CUDA_RUNTIME=0 -endif +cpptest: + @mkdir -p build && cd build && cmake .. && $(MAKE) cpptest -ifdef ROCM_PATH - CFLAGS += -I$(ROCM_PATH)/include - LDFLAGS += -L$(ROCM_PATH)/lib -endif +# EMCC; Web related scripts +EMCC_FLAGS= -std=c++11 -DDMLC_LOG_STACK_TRACE=0\ + -Oz -s RESERVED_FUNCTION_POINTERS=2 -s MAIN_MODULE=1 -s NO_EXIT_RUNTIME=1\ + -s TOTAL_MEMORY=1073741824\ + -s EXTRA_EXPORTED_RUNTIME_METHODS="['cwrap','getValue','setValue','addFunction']"\ + -s USE_GLFW=3 -s USE_WEBGL2=1 -lglfw\ + $(INCLUDE_FLAGS) -ifeq ($(USE_ROCM), 1) - CFLAGS += -DTVM_ROCM_RUNTIME=1 -D__HIP_PLATFORM_HCC__=1 - LDFLAGS += -lhip_hcc - RUNTIME_DEP += $(ROCM_OBJ) -else - CFLAGS += -DTVM_ROCM_RUNTIME=0 -endif - -ifeq ($(USE_OPENCL), 1) - CFLAGS += -DTVM_OPENCL_RUNTIME=1 - ifeq ($(UNAME_S), Darwin) - FRAMEWORKS += -framework OpenCL - else - LDFLAGS += -lOpenCL - endif - RUNTIME_DEP += $(OPENCL_OBJ) -else - CFLAGS += -DTVM_OPENCL_RUNTIME=0 -endif - -ifeq ($(USE_METAL), 1) - CFLAGS += -DTVM_METAL_RUNTIME=1 - LDFLAGS += -lobjc - RUNTIME_DEP += $(METAL_OBJ) - FRAMEWORKS += -framework Metal -framework Foundation -else - CFLAGS += -DTVM_METAL_RUNTIME=0 -endif - -ifeq ($(USE_RPC), 1) - RUNTIME_DEP += $(RPC_OBJ) -endif - -ifeq ($(USE_GRAPH_RUNTIME), 1) - RUNTIME_DEP += $(GRAPH_OBJ) -endif - -include make/contrib/cblas.mk -include make/contrib/nnpack.mk -include make/contrib/cudnn.mk - -ifdef ADD_CFLAGS - CFLAGS += $(ADD_CFLAGS) -endif - -ifdef ADD_LDFLAGS - LDFLAGS += $(ADD_LDFLAGS) -endif - -ifeq ($(OS),Windows_NT) - JVM_PKG_PROFILE := windows - SHARED_LIBRARY_SUFFIX := dll -else - UNAME_S := $(shell uname -s) - ifeq ($(UNAME_S), Darwin) - JVM_PKG_PROFILE := osx-x86_64 - SHARED_LIBRARY_SUFFIX := dylib - else - JVM_PKG_PROFILE := linux-x86_64 - SHARED_LIBRARY_SUFFIX := so - endif -endif +web: build/libtvm_web_runtime.js build/libtvm_web_runtime.bc -JVM_TEST_ARGS := $(if $(JVM_TEST_ARGS),$(JVM_TEST_ARGS),-DskipTests -Dcheckstyle.skip=true) - -ifeq ($(USE_CUDA), 1) - JVM_PKG_PROFILE := $(JVM_PKG_PROFILE)-gpu -else ifeq ($(USE_OPENCL), 1) - JVM_PKG_PROFILE := $(JVM_PKG_PROFILE)-gpu -else ifeq ($(USE_METAL), 1) - JVM_PKG_PROFILE := $(JVM_PKG_PROFILE)-gpu -else - JVM_PKG_PROFILE := $(JVM_PKG_PROFILE)-cpu -endif - -BUILD_TARGETS ?= lib/libtvm.$(SHARED_LIBRARY_SUFFIX) lib/libtvm_runtime.$(SHARED_LIBRARY_SUFFIX) -all: ${BUILD_TARGETS} -runtime: lib/libtvm_runtime.$(SHARED_LIBRARY_SUFFIX) -web: lib/libtvm_web_runtime.js lib/libtvm_web_runtime.bc - -include tests/cpp/unittest.mk - -test: $(TEST) - -include verilog/verilog.mk -verilog: $(VER_LIBS) - -# Special rules for LLVM related modules. -${LLVM_BUILD}/codegen/llvm/%.o: src/codegen/llvm/%.cc - @mkdir -p $(@D) - $(CXX) $(CFLAGS) $(LLVM_CFLAGS) -MM -MT ${LLVM_BUILD}/codegen/llvm/$*.o $< >${LLVM_BUILD}/codegen/llvm/$*.d - $(CXX) -c $(CFLAGS) $(LLVM_CFLAGS) -c $< -o $@ - -build/runtime/metal/%.o: src/runtime/metal/%.mm - @mkdir -p $(@D) - $(CXX) $(OBJCFLAGS) $(CFLAGS) -MM -MT build/runtime/metal/$*.o $< >build/runtime/metal/$*.d - $(CXX) $(OBJCFLAGS) -c $(CFLAGS) -c $< -o $@ - -build/%.o: src/%.cc - @mkdir -p $(@D) - $(CXX) $(CFLAGS) -MM -MT build/$*.o $< >build/$*.d - $(CXX) -c $(CFLAGS) -c $< -o $@ - -lib/libtvm.dylib: $(ALL_DEP) $(RUNTIME_DEP) - @mkdir -p $(@D) - $(CXX) $(CFLAGS) $(FRAMEWORKS) -shared -o $@ $(filter %.o %.a, $^) $(LDFLAGS) - -lib/libtvm_runtime.dylib: $(RUNTIME_DEP) - @mkdir -p $(@D) - $(CXX) $(CFLAGS) $(FRAMEWORKS) -shared -o $@ $(filter %.o %.a, $^) $(LDFLAGS) - -lib/libtvm.so: $(ALL_DEP) $(RUNTIME_DEP) - @mkdir -p $(@D) - $(CXX) $(CFLAGS) $(FRAMEWORKS) -shared -o $@ $(filter %.o %.a, $^) $(LDFLAGS) - -lib/libtvm_runtime.so: $(RUNTIME_DEP) - @mkdir -p $(@D) - $(CXX) $(CFLAGS) $(FRAMEWORKS) -shared -o $@ $(filter %.o %.a, $^) $(LDFLAGS) - -lib/libtvm_web_runtime.bc: web/web_runtime.cc +build/libtvm_web_runtime.bc: web/web_runtime.cc @mkdir -p build/web @mkdir -p $(@D) - $(CXX) $(CFLAGS) -MM -MT lib/libtvm_web_runtime.bc $< >build/web/web_runtime.d + emcc $(EMCC_FLAGS) -MM -MT build/libtvm_web_runtime.bc $< >build/web/web_runtime.d emcc $(EMCC_FLAGS) -o $@ web/web_runtime.cc -lib/libtvm_web_runtime.js: lib/libtvm_web_runtime.bc +build/libtvm_web_runtime.js: build/libtvm_web_runtime.bc @mkdir -p $(@D) - emcc $(EMCC_FLAGS) -o $@ lib/libtvm_web_runtime.bc - -$(LIB_HALIDEIR): LIBHALIDEIR - -LIBHALIDEIR: - + cd HalideIR; make lib/libHalideIR.a DMLC_CORE_PATH=../dmlc-core; cd $(ROOTDIR) + emcc $(EMCC_FLAGS) -o $@ build/libtvm_web_runtime.bc +# Lint scripts cpplint: - python dmlc-core/scripts/lint.py topi cpp topi/include; - python dmlc-core/scripts/lint.py tvm cpp include src verilog\ + python3 dmlc-core/scripts/lint.py vta cpp vta/include vta/src + python3 dmlc-core/scripts/lint.py topi cpp topi/include; + python3 dmlc-core/scripts/lint.py nnvm cpp nnvm/include nnvm/src; + python3 dmlc-core/scripts/lint.py tvm cpp include src verilog\ examples/extension/src examples/graph_executor/src pylint: - pylint python/tvm --rcfile=$(ROOTDIR)/tests/lint/pylintrc - pylint topi/python/topi --rcfile=$(ROOTDIR)/tests/lint/pylintrc + python3 -m pylint python/tvm --rcfile=$(ROOTDIR)/tests/lint/pylintrc + python3 -m pylint topi/python/topi --rcfile=$(ROOTDIR)/tests/lint/pylintrc + python3 -m pylint nnvm/python/nnvm --rcfile=$(ROOTDIR)/tests/lint/pylintrc + python3 -m pylint vta/python/vta --rcfile=$(ROOTDIR)/tests/lint/pylintrc jnilint: - python dmlc-core/scripts/lint.py tvm4j-jni cpp jvm/native/src + python3 dmlc-core/scripts/lint.py tvm4j-jni cpp jvm/native/src lint: cpplint pylint jnilint doc: doxygen docs/Doxyfile -install: lib/libtvm_runtime.$(SHARED_LIBRARY_SUFFIX) - mkdir -p $(DESTDIR)$(PREFIX)/include/tvm/runtime - cp -R include/tvm/runtime/. $(DESTDIR)$(PREFIX)/include/tvm/runtime - cp lib/libtvm_runtime.$(SHARED_LIBRARY_SUFFIX) $(DESTDIR)$(PREFIX)/lib - -installdev: lib/libtvm.$(SHARED_LIBRARY_SUFFIX) lib/libtvm_runtime.$(SHARED_LIBRARY_SUFFIX) lib/libtvm.a - mkdir -p $(DESTDIR)$(PREFIX)/include - cp -R include/tvm $(DESTDIR)$(PREFIX)/include - cp lib/libtvm.$(SHARED_LIBRARY_SUFFIX) $(DESTDIR)$(PREFIX)/lib - cp lib/libtvm_runtime.$(SHARED_LIBRARY_SUFFIX) $(DESTDIR)$(PREFIX)/lib - cp lib/libtvm.a $(DESTDIR)$(PREFIX)/lib - # Cython build cython: cd python; python setup.py build_ext --inplace @@ -269,22 +83,34 @@ cython3: cyclean: rm -rf python/tvm/*/*/*.so python/tvm/*/*/*.dylib python/tvm/*/*/*.cpp +# JVM build rules +ifeq ($(OS),Windows_NT) + JVM_PKG_PROFILE := windows + SHARED_LIBRARY_SUFFIX := dll +else + UNAME_S := $(shell uname -s) + ifeq ($(UNAME_S), Darwin) + JVM_PKG_PROFILE := osx-x86_64 + SHARED_LIBRARY_SUFFIX := dylib + else + JVM_PKG_PROFILE := linux-x86_64 + SHARED_LIBRARY_SUFFIX := so + endif +endif + +JVM_TEST_ARGS := $(if $(JVM_TEST_ARGS),$(JVM_TEST_ARGS),-DskipTests -Dcheckstyle.skip=true) + jvmpkg: (cd $(ROOTDIR)/jvm; \ mvn clean package -P$(JVM_PKG_PROFILE) -Dcxx="$(CXX)" \ - -Dcflags="$(CFLAGS)" -Dldflags="$(LDFLAGS)" \ - -Dcurrent_libdir="$(ROOTDIR)/lib" $(JVM_TEST_ARGS)) + -Dcflags="$(PKG_CFLAGS)" -Dldflags="$(PKG_LDFLAGS)" \ + -Dcurrent_libdir="$(ROOTDIR)/build" $(JVM_TEST_ARGS)) jvminstall: (cd $(ROOTDIR)/jvm; \ mvn install -P$(JVM_PKG_PROFILE) -Dcxx="$(CXX)" \ - -Dcflags="$(CFLAGS)" -Dldflags="$(LDFLAGS)" \ - -Dcurrent_libdir="$(ROOTDIR)/lib" $(JVM_TEST_ARGS)) + -Dcflags="$(PKG_CFLAGS)" -Dldflags="$(PKG_LDFLAGS)" \ + -Dcurrent_libdir="$(ROOTDIR)/build" $(JVM_TEST_ARGS)) +# clean rule clean: - $(RM) -rf build lib bin *~ */*~ */*/*~ */*/*/*~ */*.o */*/*.o */*/*/*.o */*.d */*/*.d */*/*/*.d - cd HalideIR; make clean; cd $(ROOTDIR) - --include build/*.d --include build/*/*.d --include build/*/*/*.d --include build/*/*/*/*.d + @mkdir -p build && cd build && cmake .. && $(MAKE) clean diff --git a/NEWS.md b/NEWS.md index 6bc97b163ab1..567aabf3fcbd 100644 --- a/NEWS.md +++ b/NEWS.md @@ -3,11 +3,104 @@ TVM Change Log This file records the changes in TVM library in reverse chronological order. +## On-going version -## On onging verison +Refer to the Roadmap issue for complete list on on-going version features. +If you check in something that is not reflected in Roadmap issue, please reply +to that issue so it can get added. + +## 0.3 + +This release features numerous improvements in TOPI and backends. We make the first step toward object detection support in TOPI, featuring operators necessary for YOLO and SSDs. The topi now supports numpy-style API and operator overloading. RPC is significantly improved to support resource allocation and using a pool of devices. We are adding two new backends: WebGL for running GPUs on the browser, and Vulkan for running on next-generation graphics API. + +- TOPI Vision operators + - SSD support + - YOLO support + - NMS operator support in vision +- TOPI general numpy-style operators + - numpy style operator overload in topi + - more operators: flip, take + - dilation support on conv2d and depthwise +- 8bit support + - ARM 8bit gemm + - ARM 8bit conv +- Low bit operator support + - popcount intrinsics + - 1-bit fully connected +- Contrib: MPSDNN fully-connected and conv2d support +- Better RPC support + - RPC Tracker support to allow centralized resource management + - RPC protocol upgrade (this is a non-backward compatible change) to support timeout in the proxy + - This is a breaking change, need to use the latest version of TVM runtime with the RPC + - Fault-tolerant to early server termination with correct exception propagated + - RPC support enabled for ROCm AMDGPUs +- Tutorials and docs + - How to deploy to android devices. +- Optimizations for hardware backends + - intel CPU (AVX and AVX512) +- Schedule Primitives + - rfactor now support factor_axis to specify the factored dimension in the result + - cache_write now support multiple output operators + - enable warp memory which generates shuffle instructions +- Framework bridge + - MXNet bridge supported +- C++ compiler API support + - build migration + - topi migration to c++ + - Target system in c++ +- WebGL backend + - runtime and codegen + - topi integration + - end to end pipeline on the browser +- Vulkan backend + - vulkan runtime + - spirv code generator +- Security + - intel SGX runtime support + - multi-threaded SGX runtime +- LLVM 7.0 support +- Robustness + - VerifyMemory to verify incorrect GPU schedules that writes into GPU memory from cpu + - Verify compute formulas +- Better CPU parallel runtime + +## 0.2 + +This release comes with a complete set of TOPI support for NNVM compiler, which allows compilation of end to end workloads. +We also make major improvements in supporting new backends: ROCm for AMDGPUs and ARM GPU. + +- Backend support + - Support LLVM mainline(4.0, 5.0, 6.0) + - Support ROCM stack for AMD GPUs + - More robust OpenCL support for ARM GPUs +- Android RPC runtime +- Multi-threading optimization for ARM + - multi-threaded depthwise + - multi-threaded conv2d +- New schedule primitives + - storage_align for shared memory alignment + - double_buffer - UnrollLoop : more robust version of unroll loop, count maximum steps that can be unrolled. +- Full set of TOPI operators + - Introduce tvm.target to specify target options for compilation better. + - broadcast/ reduction operators + - pooling and global pooling + - Generic target support for topi + - schedule with external libraries +- End to end deep learning pipelines for CPU, GPU, ARM GPU +- Tutorials + - How to load compiled module in any language runtime + - How to use java runtime +- Contrib library: MIOpen, CuDNN +- Ongoing items that contains functioning pieces + - WebGL backend + - C++ compiler support + - MPS DNN + - low bit support, introduced popcount + + +## 0.1 -## 0.1rc - Language runtime - python - javascript diff --git a/README.md b/README.md index 07e550d76043..561ca91d5abe 100644 --- a/README.md +++ b/README.md @@ -1,33 +1,27 @@ -TVM: Tensor IR Stack for Deep Learning Systems + Open Deep Learning Compiler Stack ============================================== -[![GitHub license](http://dmlc.github.io/img/apache2.svg)](./LICENSE) -[![Build Status](http://mode-gpu.cs.washington.edu:8080/buildStatus/icon?job=dmlc/tvm/master)](http://mode-gpu.cs.washington.edu:8080/job/dmlc/job/tvm/job/master/) +[![GitHub license](https://dmlc.github.io/img/apache2.svg)](./LICENSE) +[![Build Status](http://mode-gpu.cs.washington.edu:8080/buildStatus/icon?job=tvm/master)](http://mode-gpu.cs.washington.edu:8080/job/tvm/job/master/) -[Installation](docs/how_to/install.md) | -[Documentation](http://docs.tvmlang.org) | -[Tutorials](http://tutorials.tvmlang.org) | -[Operator Inventory](topi) | -[FAQ](docs/faq.md) | +[Documentation](https://docs.tvm.ai) | [Contributors](CONTRIBUTORS.md) | +[Community](https://tvm.ai/community.html) | [Release Notes](NEWS.md) -TVM is a Tensor intermediate representation(IR) stack for deep learning systems. It is designed to close the gap between the +TVM is a compiler stack for deep learning systems. It is designed to close the gap between the productivity-focused deep learning frameworks, and the performance- and efficiency-focused hardware backends. TVM works with deep learning frameworks to provide end to end compilation to different backends. -Checkout our [announcement](http://tvmlang.org/2017/08/17/tvm-release-announcement.html) for more details. +Checkout the [tvm stack homepage](https://tvm.ai/) for more information. License ------- -© Contributors, 2017. Licensed under an [Apache-2.0](https://github.com/dmlc/tvm/blob/master/LICENSE) license. +© Contributors Licensed under an [Apache-2.0](https://github.com/dmlc/tvm/blob/master/LICENSE) license. Contribute to TVM ----------------- TVM adopts apache committer model, we aim to create an open source project that is maintained and owned by the community. - -- [Contributor Guide](docs/how_to/contribute.md) -- Please add your name to [CONTRIBUTORS.md](CONTRIBUTORS.md) -- Please also update [NEWS.md](NEWS.md) on changes and improvements in API and codes. +Checkout the [Contributor Guide](https://docs.tvm.ai/contribute/) Acknowledgement --------------- diff --git a/apps/README.md b/apps/README.md index 254f8c26a510..2345cc3ab548 100644 --- a/apps/README.md +++ b/apps/README.md @@ -3,9 +3,9 @@ This folder contains various extension projects using TVM, they also serve as examples on how to use TVM in your own project. If you are interested in writing optimized kernels with TVM, checkout [TOPI: TVM Operator Inventory](../topi). -If you are interested in end to end deep learning model compilation, checkout [NNVM Compiler](https://github.com/dmlc/nnvm). - [extension](extension) How to extend TVM C++ api along with python API. - [ios_rpc](ios_rpc) iOS RPC server. - [android_rpc](android_rpc) Android RPC server. +- [benchmark](benchmark) Example end to end compilation benchmarks - [howto_deploy](howto_deploy) Tutorial on how to deploy TVM with minimum code dependency. diff --git a/apps/android_deploy/.gitignore b/apps/android_deploy/.gitignore new file mode 100644 index 000000000000..39fb081a42a8 --- /dev/null +++ b/apps/android_deploy/.gitignore @@ -0,0 +1,9 @@ +*.iml +.gradle +/local.properties +/.idea/workspace.xml +/.idea/libraries +.DS_Store +/build +/captures +.externalNativeBuild diff --git a/apps/android_deploy/README.md b/apps/android_deploy/README.md new file mode 100644 index 000000000000..801ca8bdf95c --- /dev/null +++ b/apps/android_deploy/README.md @@ -0,0 +1,119 @@ +# Android TVM Demo + +This folder contains Android Demo app that allows us to show how to deploy model using TVM runtime api on a Android phone. + +You will need [JDK](http://www.oracle.com/technetwork/java/javase/downloads/jdk8-downloads-2133151.html), [Android SDK](https://developer.android.com/studio/index.html), [Android NDK](https://developer.android.com/ndk) and an Android device to use this. + +## Build and Installation + +### Build APK + +We use [Gradle](https://gradle.org) to build. Please follow [the installation instruction](https://gradle.org/install) for your operating system. + +Before you build the Android application, please refer to [TVM4J Installation Guide](https://github.com/dmlc/tvm/blob/master/jvm/README.md) and install tvm4j-core to your local maven repository. You can find tvm4j dependency declare in `app/build.gradle`. Modify it if it is necessary. + +``` +dependencies { + compile fileTree(dir: 'libs', include: ['*.jar']) + androidTestCompile('com.android.support.test.espresso:espresso-core:2.2.2', { + exclude group: 'com.android.support', module: 'support-annotations' + }) + compile 'com.android.support:appcompat-v7:26.0.1' + compile 'com.android.support.constraint:constraint-layout:1.0.2' + compile 'com.android.support:design:26.0.1' + compile 'ml.dmlc.tvm:tvm4j-core:0.0.1-SNAPSHOT' + testCompile 'junit:junit:4.12' +} +``` + +Application default has CPU version TVM runtime flavor and follow below instruction to setup. +In `app/src/main/jni/make` you will find JNI Makefile config `config.mk` and copy it to `app/src/main/jni` and modify it. + +```bash +cd apps/android_deploy/app/src/main/jni +cp make/config.mk . +``` + +Here's a piece of example for `config.mk`. + +```makefile +APP_ABI = arm64-v8a + +APP_PLATFORM = android-17 + +# whether enable OpenCL during compile +USE_OPENCL = 0 +``` + +Now use Gradle to compile JNI, resolve Java dependencies and build the Android application together with tvm4j. Run following script to generate the apk file. + +```bash +export ANDROID_HOME=[Path to your Android SDK, e.g., ~/Android/sdk] +cd apps/android_deploy +gradle clean build +``` + +In `app/build/outputs/apk` you'll find `app-release-unsigned.apk`, use `dev_tools/gen_keystore.sh` to generate a signature and use `dev_tools/sign_apk.sh` to get the signed apk file `app/build/outputs/apk/tvmdemo-release.apk`. + +Upload `tvmdemo-release.apk` to your Android device and install it. + +### Build with OpenCL + +Application does not link with OpenCL library unless you configure it to. Modify JNI Makefile config `app/src/main/jni` with proper target OpenCL configuration. + +Here's a piece of example for `config.mk`. + +```makefile +APP_ABI = arm64-v8a + +APP_PLATFORM = android-17 + +# whether enable OpenCL during compile +USE_OPENCL = 1 + +# the additional include headers you want to add, e.g., SDK_PATH/adrenosdk/Development/Inc +ADD_C_INCLUDES = /opt/adrenosdk-osx/Development/Inc + +# the additional link libs you want to add, e.g., ANDROID_LIB_PATH/libOpenCL.so +ADD_LDLIBS = libOpenCL.so +``` + +Note that you should specify the correct GPU development headers for your android device. Run `adb shell dumpsys | grep GLES` to find out what GPU your android device uses. It is very likely the library (libOpenCL.so) is already present on the mobile device. For instance, I found it under `/system/vendor/lib64`. You can do `adb pull /system/vendor/lib64/libOpenCL.so ./` to get the file to your desktop. + +After you setup the `config.mk`, follow the instructions in [Build APK](#buildapk) to build the Android package with OpenCL flavor. + +## Cross Compile and Run on Android Devices + +### Architecture and Android Standalone Toolchain + +In order to cross compile a shared library (.so) for your android device, you have to know the target triple for the device. (Refer to [Cross-compilation using Clang](https://clang.llvm.org/docs/CrossCompilation.html) for more information). Run `adb shell cat /proc/cpuinfo` to list the device's CPU information. + +Now use NDK to generate standalone toolchain for your device. For my test device, I use following command. + +```bash +cd /opt/android-ndk/build/tools/ +./make-standalone-toolchain.sh --platform=android-24 --use-llvm --arch=arm64 --install-dir=/opt/android-toolchain-arm64 +``` + +If everything goes well, you will find compile tools in `/opt/android-toolchain-arm64/bin`. For example, `bin/aarch64-linux-android-g++` can be used to compile C++ source codes and create shared libraries for arm64 Android devices. + +### Place compiled model on Android application assets folder + +Follow instruction to get compiled version model for android target [here.](http://docs.tvm.ai/deploy/android.html) + +Copied these compiled model deploy_lib.so, deploy_graph.json and deploy_param.params to apps/android_deploy/app/src/main/assets/ and modify TVM flavor changes on [java](https://github.com/dmlc/tvm/blob/master/apps/android_deploy/app/src/main/java/ml/dmlc/tvm/android/demo/MainActivity.java#L81) + +`CPU Verison flavor` +``` + private static final boolean EXE_GPU = false; +``` + +`OpenCL Verison flavor` +``` + private static final boolean EXE_GPU = true; +``` + + +Install compiled android application on phone and enjoy the image classifier demo using extraction model + +You can define your own TVM operators and deploy via this demo application on your Android device to find the most optimized TVM schedule. diff --git a/apps/android_deploy/app/.gitignore b/apps/android_deploy/app/.gitignore new file mode 100644 index 000000000000..796b96d1c402 --- /dev/null +++ b/apps/android_deploy/app/.gitignore @@ -0,0 +1 @@ +/build diff --git a/apps/android_deploy/app/build.gradle b/apps/android_deploy/app/build.gradle new file mode 100644 index 000000000000..6790308a9ec4 --- /dev/null +++ b/apps/android_deploy/app/build.gradle @@ -0,0 +1,56 @@ +// import DownloadModels task +project.ext.ASSET_DIR = projectDir.toString() + '/src/main/assets' +project.ext.TMP_DIR = project.buildDir.toString() + '/downloads' + +// Download default models(darknet framework extraction model compiled version); +// if you wish to use your own models then place them in the "assets" directory +// and comment out this line. +apply from: "download-models.gradle" + +apply plugin: 'com.android.application' + +task buildJni(type: Exec, description: 'Build JNI libs') { + commandLine 'sh', 'src/main/jni/build.sh' +} + +tasks.withType(JavaCompile) { + compileTask -> compileTask.dependsOn buildJni +} + +android { + compileSdkVersion 26 + buildToolsVersion "26.0.1" + defaultConfig { + applicationId "ml.dmlc.tvm.android.demo" + minSdkVersion 17 + targetSdkVersion 26 + versionCode 1 + versionName "1.0" + testInstrumentationRunner "android.support.test.runner.AndroidJUnitRunner" + } + buildTypes { + release { + minifyEnabled false + proguardFiles getDefaultProguardFile('proguard-android.txt'), 'proguard-rules.pro' + } + } + sourceSets { + main { + jni.srcDirs = [] + jniLibs.srcDirs = ['src/main/libs'] + assets.srcDirs = [project.ext.ASSET_DIR] + } + } +} + +dependencies { + compile fileTree(dir: 'libs', include: ['*.jar']) + androidTestCompile('com.android.support.test.espresso:espresso-core:2.2.2', { + exclude group: 'com.android.support', module: 'support-annotations' + }) + compile 'com.android.support:appcompat-v7:26.0.1' + compile 'com.android.support.constraint:constraint-layout:1.0.2' + compile 'com.android.support:design:26.0.1' + compile 'ml.dmlc.tvm:tvm4j-core:0.0.1-SNAPSHOT' + testCompile 'junit:junit:4.12' +} diff --git a/apps/android_deploy/app/download-models.gradle b/apps/android_deploy/app/download-models.gradle new file mode 100644 index 000000000000..5b0509fbca2b --- /dev/null +++ b/apps/android_deploy/app/download-models.gradle @@ -0,0 +1,64 @@ +/* + * download-models.gradle + * Downloads model files from ${MODEL_URL} into application's asset folder + * Input: + * project.ext.TMP_DIR: absolute path to hold downloaded zip files + * project.ext.ASSET_DIR: absolute path to save unzipped model files + * Output: + * 3 model files will be downloaded into given folder of ext.ASSET_DIR + */ +// hard coded model files +def models = ['extraction.zip'] + +// Root URL for model archives +def MODEL_URL = 'https://github.com/PariksheetPinjari909/TVM_models/blob/master/extraction_model' +buildscript { + repositories { + jcenter() + } + dependencies { + classpath 'de.undercouch:gradle-download-task:3.2.0' + } +} + +import de.undercouch.gradle.tasks.download.Download +task downloadFile(type: Download){ + for (f in models) { + src "${MODEL_URL}/" + f + "?raw=true" + dest new File(project.ext.TMP_DIR + "/" + f) + } + overwrite true +} + +task extractModels(type: Copy) { + def needDownload = false + for (f in models) { + def localFile = f.split("/")[-1] + if (!(new File(project.ext.TMP_DIR + '/' + localFile)).exists()) { + needDownload = true + } + } + + if (needDownload) { + dependsOn downloadFile + } + + for (f in models) { + def localFile = f.split("/")[-1] + from zipTree(project.ext.TMP_DIR + '/' + localFile) + } + + into file(project.ext.ASSET_DIR) + fileMode 0644 + exclude '**/LICENSE' +} + +tasks.whenTaskAdded { task -> + if (task.name == 'assembleDebug') { + task.dependsOn 'extractModels' + } + if (task.name == 'assembleRelease') { + task.dependsOn 'extractModels' + } +} + diff --git a/apps/android_deploy/app/src/main/AndroidManifest.xml b/apps/android_deploy/app/src/main/AndroidManifest.xml new file mode 100644 index 000000000000..bac82ee90faa --- /dev/null +++ b/apps/android_deploy/app/src/main/AndroidManifest.xml @@ -0,0 +1,37 @@ + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/apps/android_deploy/app/src/main/java/ml/dmlc/tvm/android/demo/MainActivity.java b/apps/android_deploy/app/src/main/java/ml/dmlc/tvm/android/demo/MainActivity.java new file mode 100644 index 000000000000..f3cdefe1c2ff --- /dev/null +++ b/apps/android_deploy/app/src/main/java/ml/dmlc/tvm/android/demo/MainActivity.java @@ -0,0 +1,633 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package ml.dmlc.tvm.android.demo; + +import android.Manifest; +import android.content.Intent; +import android.content.pm.PackageManager; +import android.content.res.AssetManager; +import android.app.AlertDialog; +import android.app.ProgressDialog; +import android.content.DialogInterface; +import android.graphics.Bitmap; +import android.graphics.BitmapFactory; +import android.graphics.Canvas; +import android.graphics.Matrix; +import android.net.Uri; +import android.os.AsyncTask; +import android.os.Build; +import android.os.Bundle; +import android.os.Environment; +import android.os.SystemClock; +import android.provider.MediaStore; +import android.support.v4.content.FileProvider; +import android.support.v7.app.AppCompatActivity; +import android.support.v7.widget.Toolbar; +import android.util.Log; +import android.view.View; +import android.widget.ImageView; +import android.widget.TextView; +import android.widget.Toast; + +import java.io.ByteArrayOutputStream; +import java.io.File; +import java.io.FileOutputStream; +import java.io.InputStream; +import java.io.IOException; +import java.text.SimpleDateFormat; +import java.util.Date; +import java.util.Vector; + +import ml.dmlc.tvm.Function; +import ml.dmlc.tvm.Module; +import ml.dmlc.tvm.NDArray; +import ml.dmlc.tvm.TVMContext; +import ml.dmlc.tvm.TVMValue; +import ml.dmlc.tvm.TVMType; + +public class MainActivity extends AppCompatActivity { + private static final String TAG = MainActivity.class.getSimpleName(); + + private static final int PERMISSIONS_REQUEST = 100; + private static final int PICTURE_FROM_GALLERY = 101; + private static final int PICTURE_FROM_CAMERA = 102; + private static final int IMAGE_PREVIEW_WIDTH = 960; + private static final int IMAGE_PREVIEW_HEIGHT = 720; + + // TVM constants + private static final int OUTPUT_INDEX = 0; + private static final int IMG_CHANNEL = 3; + private static final String INPUT_NAME = "data"; + + // Configuration values for extraction model. Note that the graph, lib and params is not + // included with TVM and must be manually placed in the assets/ directory by the user. + // Graphs and models downloaded from https://github.com/pjreddie/darknet/blob/ may be + // converted e.g. via define_and_compile_model.py. + private static final boolean EXE_GPU = false; + private static final int MODEL_INPUT_SIZE = 224; + private static final String MODEL_CL_LIB_FILE = "file:///android_asset/deploy_lib_opencl.so"; + private static final String MODEL_CPU_LIB_FILE = "file:///android_asset/deploy_lib_cpu.so"; + private static final String MODEL_GRAPH_FILE = "file:///android_asset/deploy_graph.json"; + private static final String MODEL_PARAM_FILE = "file:///android_asset/deploy_param.params"; + private static final String MODEL_LABEL_FILE = "file:///android_asset/imagenet.shortnames.list"; + + private Uri mCameraImageUri; + private ImageView mImageView; + private TextView mResultView; + private AssetManager assetManager; + private Module graphRuntimeModule; + private Vector labels = new Vector(); + + @Override + protected void onCreate(Bundle savedInstanceState) { + super.onCreate(savedInstanceState); + setContentView(R.layout.activity_main); + Toolbar toolbar = findViewById(R.id.toolbar); + setSupportActionBar(toolbar); + assetManager = getAssets(); + + mImageView = (ImageView) findViewById(R.id.imageView); + mResultView = (TextView) findViewById(R.id.resultTextView); + findViewById(R.id.btnPickImage).setOnClickListener(new View.OnClickListener() { + @Override + public void onClick(View v) { + showPictureDialog(); + } + }); + + if (hasPermission()) { + // instantiate tvm runtime and setup environment on background after application begin + new LoadModleAsyncTask().execute(); + } else { + requestPermission(); + } + } + + /* + Load precompiled model on TVM graph runtime and init the system. + */ + private class LoadModleAsyncTask extends AsyncTask { + ProgressDialog dialog = new ProgressDialog(MainActivity.this); + + @Override + protected Integer doInBackground(Void... args) { + + // load synset name + String lableFilename = MODEL_LABEL_FILE.split("file:///android_asset/")[1]; + Log.i(TAG, "Reading synset name from: " + lableFilename); + try { + String labelsContent = new String(getBytesFromFile(assetManager, lableFilename)); + for (String line : labelsContent.split("\\r?\\n")) { + labels.add(line); + } + } catch (IOException e) { + Log.e(TAG, "Problem reading synset name file!" + e); + return -1;//failure + } + + // load json graph + String modelGraph = null; + String graphFilename = MODEL_GRAPH_FILE.split("file:///android_asset/")[1]; + Log.i(TAG, "Reading json graph from: " + graphFilename); + try { + modelGraph = new String(getBytesFromFile(assetManager, graphFilename)); + } catch (IOException e) { + Log.e(TAG, "Problem reading json graph file!" + e); + return -1;//failure + } + + // upload tvm compiled function on application cache folder + String libCacheFilePath = null; + String libFilename = EXE_GPU ? MODEL_CL_LIB_FILE.split("file:///android_asset/")[1] : + MODEL_CPU_LIB_FILE.split("file:///android_asset/")[1]; + Log.i(TAG, "Uploading compiled function to cache folder"); + try { + libCacheFilePath = getTempLibFilePath(libFilename); + byte[] modelLibByte = getBytesFromFile(assetManager, libFilename); + FileOutputStream fos = new FileOutputStream(libCacheFilePath); + fos.write(modelLibByte); + fos.close(); + } catch (IOException e) { + Log.e(TAG, "Problem uploading compiled function!" + e); + return -1;//failure + } + + // load parameters + byte[] modelParams = null; + String paramFilename = MODEL_PARAM_FILE.split("file:///android_asset/")[1]; + try { + modelParams = getBytesFromFile(assetManager, paramFilename); + } catch (IOException e) { + Log.e(TAG, "Problem reading params file!" + e); + return -1;//failure + } + + // create java tvm context + TVMContext tvmCtx = EXE_GPU ? TVMContext.opencl() : TVMContext.cpu(); + + // tvm module for compiled functions + Module modelLib = Module.load(libCacheFilePath); + + // get global function module for graph runtime + Function runtimeCreFun = Function.getFunction("tvm.graph_runtime.create"); + TVMValue runtimeCreFunRes = runtimeCreFun.pushArg(modelGraph) + .pushArg(modelLib) + .pushArg(tvmCtx.deviceType) + .pushArg(tvmCtx.deviceId) + .invoke(); + graphRuntimeModule = runtimeCreFunRes.asModule(); + + // get the function from the module(load parameters) + Function loadParamFunc = graphRuntimeModule.getFunction("load_params"); + loadParamFunc.pushArg(modelParams).invoke(); + + // release tvm local variables + modelLib.release(); + loadParamFunc.release(); + runtimeCreFun.release(); + + return 0;//success + } + + @Override + protected void onPreExecute() { + dialog.setCancelable(false); + dialog.setMessage("Loading Model..."); + dialog.show(); + super.onPreExecute(); + } + + @Override + protected void onPostExecute(Integer status) { + if (dialog != null && dialog.isShowing()) { + dialog.dismiss(); + } + if (status != 0) { + showDialog("Error", "Fail to initialized model, check compiled model"); + } + } + } + + /* + Execute prediction for processed decode input bitmap image content on TVM graph runtime. + */ + private class ModelRunAsyncTask extends AsyncTask { + ProgressDialog dialog = new ProgressDialog(MainActivity.this); + + @Override + protected Integer doInBackground(Bitmap... bitmaps) { + if (null != graphRuntimeModule) { + int count = bitmaps.length; + for (int i = 0 ; i < count ; i++) { + long processingTimeMs = SystemClock.uptimeMillis(); + Log.i(TAG, "Decode JPEG image content"); + + // extract the jpeg content + ByteArrayOutputStream stream = new ByteArrayOutputStream(); + bitmaps[i].compress(Bitmap.CompressFormat.JPEG,100,stream); + byte[] byteArray = stream.toByteArray(); + Bitmap imageBitmap = BitmapFactory.decodeByteArray(byteArray, 0, byteArray.length); + + // crop input image at centre to model input size + // commecial deploy note:: instead of cropying image do resize + // image to model input size so we never lost the image content + Bitmap cropImageBitmap = Bitmap.createBitmap(MODEL_INPUT_SIZE, MODEL_INPUT_SIZE, Bitmap.Config.ARGB_8888); + Matrix frameToCropTransform = getTransformationMatrix(imageBitmap.getWidth(), imageBitmap.getHeight(), + MODEL_INPUT_SIZE, MODEL_INPUT_SIZE, 0, true); + Canvas canvas = new Canvas(cropImageBitmap); + canvas.drawBitmap(imageBitmap, frameToCropTransform, null); + + // image pixel int values + int[] pixelValues = new int[MODEL_INPUT_SIZE * MODEL_INPUT_SIZE]; + // image RGB float values + float[] imgRgbValues = new float[MODEL_INPUT_SIZE * MODEL_INPUT_SIZE * IMG_CHANNEL]; + // image RGB transpose float values + float[] imgRgbTranValues = new float[MODEL_INPUT_SIZE * MODEL_INPUT_SIZE * IMG_CHANNEL]; + + // pre-process the image data from 0-255 int to normalized float based on the + // provided parameters. + cropImageBitmap.getPixels(pixelValues, 0, MODEL_INPUT_SIZE, 0, 0, MODEL_INPUT_SIZE, MODEL_INPUT_SIZE); + for (int j = 0; j < pixelValues.length; ++j) { + imgRgbValues[j * 3 + 0] = ((pixelValues[j] >> 16) & 0xFF)/255.0f; + imgRgbValues[j * 3 + 1] = ((pixelValues[j] >> 8) & 0xFF)/255.0f; + imgRgbValues[j * 3 + 2] = (pixelValues[j] & 0xFF)/255.0f; + } + + // pre-process the image rgb data transpose based on the provided parameters. + for (int k = 0; k < IMG_CHANNEL; ++k) { + for (int l = 0; l < MODEL_INPUT_SIZE; ++l) { + for (int m = 0; m < MODEL_INPUT_SIZE; ++m) { + int dst_index = m + MODEL_INPUT_SIZE*l + MODEL_INPUT_SIZE*MODEL_INPUT_SIZE*k; + int src_index = k + IMG_CHANNEL*m + IMG_CHANNEL*MODEL_INPUT_SIZE*l; + imgRgbTranValues[dst_index] = imgRgbValues[src_index]; + } + } + } + + // get the function from the module(set input data) + Log.i(TAG, "set input data"); + NDArray inputNdArray = NDArray.empty(new long[]{1, IMG_CHANNEL, MODEL_INPUT_SIZE, MODEL_INPUT_SIZE}, new TVMType("float32"));; + inputNdArray.copyFrom(imgRgbTranValues); + Function setInputFunc = graphRuntimeModule.getFunction("set_input"); + setInputFunc.pushArg(INPUT_NAME).pushArg(inputNdArray).invoke(); + // release tvm local variables + inputNdArray.release(); + setInputFunc.release(); + + // get the function from the module(run it) + Log.i(TAG, "run function on target"); + Function runFunc = graphRuntimeModule.getFunction("run"); + runFunc.invoke(); + // release tvm local variables + runFunc.release(); + + // get the function from the module(get output data) + Log.i(TAG, "get output data"); + NDArray outputNdArray = NDArray.empty(new long[]{1000}, new TVMType("float32")); + Function getOutputFunc = graphRuntimeModule.getFunction("get_output"); + getOutputFunc.pushArg(OUTPUT_INDEX).pushArg(outputNdArray).invoke(); + float[] output = outputNdArray.asFloatArray(); + // release tvm local variables + outputNdArray.release(); + getOutputFunc.release(); + + // display the result from extracted output data + if (null != output) { + int maxPosition = -1; + float maxValue = 0; + for (int j = 0; j < output.length; ++j) { + if (output[j] > maxValue) { + maxValue = output[j]; + maxPosition = j; + } + } + processingTimeMs = SystemClock.uptimeMillis() - processingTimeMs; + String label = "Prediction Result : "; + label += labels.size() > maxPosition ? labels.get(maxPosition) : "unknown"; + label += "\nPrediction Time : " + processingTimeMs + "ms"; + mResultView.setText(label); + } + Log.i(TAG, "prediction finished"); + } + return 0; + } + return -1; + } + + @Override + protected void onPreExecute() { + dialog.setCancelable(false); + dialog.setMessage("Prediction running on image..."); + dialog.show(); + super.onPreExecute(); + } + + @Override + protected void onPostExecute(Integer status) { + if (dialog != null && dialog.isShowing()) { + dialog.dismiss(); + } + if (status != 0) { + showDialog("Error", "Fail to predict image, GraphRuntime exception"); + } + } + } + + @Override + protected void onDestroy() { + // release tvm local variables + if (null != graphRuntimeModule) + graphRuntimeModule.release(); + super.onDestroy(); + } + + /** + * Read file from assets and return byte array. + * + * @param assets The asset manager to be used to load assets. + * @param fileName The filepath of read file. + * @return byte[] file content + * @throws IOException + */ + private byte[] getBytesFromFile(AssetManager assets, String fileName) throws IOException { + InputStream is = assets.open(fileName); + int length = is.available(); + byte[] bytes = new byte[length]; + // Read in the bytes + int offset = 0; + int numRead = 0; + try { + while (offset < bytes.length + && (numRead = is.read(bytes, offset, bytes.length - offset)) >= 0) { + offset += numRead; + } + } finally { + is.close(); + } + // Ensure all the bytes have been read in + if (offset < bytes.length) { + throw new IOException("Could not completely read file " + fileName); + } + return bytes; + } + + /** + * Dialog show pick option for select image from Gallery or Camera. + */ + private void showPictureDialog(){ + AlertDialog.Builder pictureDialog = new AlertDialog.Builder(this); + pictureDialog.setTitle("Select Action"); + String[] pictureDialogItems = { + "Select photo from gallery", + "Capture photo from camera" }; + pictureDialog.setItems(pictureDialogItems, + new DialogInterface.OnClickListener() { + @Override + public void onClick(DialogInterface dialog, int which) { + switch (which) { + case 0: + choosePhotoFromGallery(); + break; + case 1: + takePhotoFromCamera(); + break; + } + } + }); + pictureDialog.show(); + } + + /** + * Request to pick image from Gallery. + */ + public void choosePhotoFromGallery() { + Intent galleryIntent = new Intent(Intent.ACTION_PICK, + android.provider.MediaStore.Images.Media.EXTERNAL_CONTENT_URI); + + startActivityForResult(galleryIntent, PICTURE_FROM_GALLERY); + } + + /** + * Request to capture image from Camera. + */ + private void takePhotoFromCamera() { + Intent intent = new Intent(android.provider.MediaStore.ACTION_IMAGE_CAPTURE); + + if (Build.VERSION.SDK_INT < Build.VERSION_CODES.N) { + mCameraImageUri = Uri.fromFile(createImageFile()); + } else { + File file = new File(createImageFile().getPath()); + mCameraImageUri = FileProvider.getUriForFile(getApplicationContext(), getApplicationContext().getPackageName() + ".provider", file); + } + + intent.putExtra(MediaStore.EXTRA_OUTPUT, mCameraImageUri); + startActivityForResult(intent, PICTURE_FROM_CAMERA); + } + + @Override + public void onActivityResult(int requestCode, int resultCode, Intent data) { + super.onActivityResult(requestCode, resultCode, data); + if (resultCode == this.RESULT_CANCELED) { + return; + } + Uri contentURI = null; + if (requestCode == PICTURE_FROM_GALLERY) { + if (data != null) { + contentURI = data.getData(); + } + } else if (requestCode == PICTURE_FROM_CAMERA) { + contentURI = mCameraImageUri; + } + if (null != contentURI) { + try { + Bitmap bitmap = MediaStore.Images.Media.getBitmap(this.getContentResolver(), contentURI); + Bitmap scaled = Bitmap.createScaledBitmap(bitmap, IMAGE_PREVIEW_HEIGHT, IMAGE_PREVIEW_WIDTH, true); + mImageView.setImageBitmap(scaled); + new ModelRunAsyncTask().execute(scaled); + } catch (IOException e) { + e.printStackTrace(); + } + } + } + + /** + * Get application cache path where to place compiled functions. + * + * @param fileName library file name. + * @return String application cache folder path + * @throws IOException + */ + private final String getTempLibFilePath(String fileName) throws IOException { + File tempDir = File.createTempFile("tvm4j_demo_", ""); + if (!tempDir.delete() || !tempDir.mkdir()) { + throw new IOException("Couldn't create directory " + tempDir.getAbsolutePath()); + } + return (tempDir + File.separator + fileName); + } + + /** + * Create image file under storage where camera application save captured image. + * + * @return File image file under sdcard where camera can save image + */ + private File createImageFile() { + // Create an image file name + String timeStamp = new SimpleDateFormat("yyyyMMdd_HHmmss").format(new Date()); + String imageFileName = "JPEG_" + timeStamp + "_"; + File storageDir = Environment.getExternalStoragePublicDirectory( + Environment.DIRECTORY_PICTURES); + try { + File image = File.createTempFile( + imageFileName, // prefix + ".jpg", // suffix + storageDir // directory + ); + return image; + } catch (IOException e) { + e.printStackTrace(); + } + return null; + } + + /** + * Show dialog to user. + * + * @param title dialog display title + * @param msg dialog display message + */ + private void showDialog(String title, String msg) { + AlertDialog.Builder builder = new AlertDialog.Builder(this); + builder.setTitle(title); + builder.setMessage(msg); + builder.setCancelable(true); + builder.setNeutralButton(android.R.string.ok, + new DialogInterface.OnClickListener() { + public void onClick(DialogInterface dialog, int id) { + dialog.cancel(); + finish(); + } + }); + builder.create().show(); + } + + @Override + public void onRequestPermissionsResult (final int requestCode, final String[] permissions, final int[] grantResults){ + if (requestCode == PERMISSIONS_REQUEST) { + if (grantResults.length > 0 + && grantResults[0] == PackageManager.PERMISSION_GRANTED + && grantResults[1] == PackageManager.PERMISSION_GRANTED) { + // instantiate tvm runtime and setup environment on background after application begin + new LoadModleAsyncTask().execute(); + } else { + requestPermission(); + } + } + } + + /** + * Whether application has required mandatory permissions to run. + */ + private boolean hasPermission() { + if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.M) { + return checkSelfPermission(Manifest.permission.CAMERA) == PackageManager.PERMISSION_GRANTED && + checkSelfPermission(Manifest.permission.WRITE_EXTERNAL_STORAGE) == PackageManager.PERMISSION_GRANTED; + } else { + return true; + } + } + + /** + * Request required mandatory permission for application to run. + */ + private void requestPermission() { + if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.M) { + if (shouldShowRequestPermissionRationale(Manifest.permission.CAMERA) || + shouldShowRequestPermissionRationale(Manifest.permission.WRITE_EXTERNAL_STORAGE)) { + Toast.makeText(this, + "Camera AND storage permission are required for this demo", Toast.LENGTH_LONG).show(); + } + requestPermissions(new String[] {Manifest.permission.CAMERA, Manifest.permission.WRITE_EXTERNAL_STORAGE}, PERMISSIONS_REQUEST); + } + } + + /** + * Returns a transformation matrix from one reference frame into another. + * Handles cropping (if maintaining aspect ratio is desired) and rotation. + * + * @param srcWidth Width of source frame. + * @param srcHeight Height of source frame. + * @param dstWidth Width of destination frame. + * @param dstHeight Height of destination frame. + * @param applyRotation Amount of rotation to apply from one frame to another. + * Must be a multiple of 90. + * @param maintainAspectRatio If true, will ensure that scaling in x and y remains constant, + * cropping the image if necessary. + * @return The transformation fulfilling the desired requirements. + */ + public static Matrix getTransformationMatrix( + final int srcWidth, + final int srcHeight, + final int dstWidth, + final int dstHeight, + final int applyRotation, + final boolean maintainAspectRatio) { + final Matrix matrix = new Matrix(); + + if (applyRotation != 0) { + if (applyRotation % 90 != 0) { + Log.w(TAG, "Rotation of %d % 90 != 0 " + applyRotation); + } + + // Translate so center of image is at origin. + matrix.postTranslate(-srcWidth / 2.0f, -srcHeight / 2.0f); + + // Rotate around origin. + matrix.postRotate(applyRotation); + } + + // Account for the already applied rotation, if any, and then determine how + // much scaling is needed for each axis. + final boolean transpose = (Math.abs(applyRotation) + 90) % 180 == 0; + + final int inWidth = transpose ? srcHeight : srcWidth; + final int inHeight = transpose ? srcWidth : srcHeight; + + // Apply scaling if necessary. + if (inWidth != dstWidth || inHeight != dstHeight) { + final float scaleFactorX = dstWidth / (float) inWidth; + final float scaleFactorY = dstHeight / (float) inHeight; + + if (maintainAspectRatio) { + // Scale by minimum factor so that dst is filled completely while + // maintaining the aspect ratio. Some image may fall off the edge. + final float scaleFactor = Math.max(scaleFactorX, scaleFactorY); + matrix.postScale(scaleFactor, scaleFactor); + } else { + // Scale exactly to fill dst from src. + matrix.postScale(scaleFactorX, scaleFactorY); + } + } + + if (applyRotation != 0) { + // Translate back from origin centered reference to destination frame. + matrix.postTranslate(dstWidth / 2.0f, dstHeight / 2.0f); + } + + return matrix; + } +} \ No newline at end of file diff --git a/apps/android_deploy/app/src/main/jni/Android.mk b/apps/android_deploy/app/src/main/jni/Android.mk new file mode 100644 index 000000000000..a99517f90332 --- /dev/null +++ b/apps/android_deploy/app/src/main/jni/Android.mk @@ -0,0 +1,42 @@ +LOCAL_PATH := $(call my-dir) +MY_PATH := $(LOCAL_PATH) + +include $(CLEAR_VARS) + +LOCAL_PATH := $(MY_PATH) +ROOT_PATH := $(MY_PATH)/../../../../../.. + +ifndef config + ifneq ("$(wildcard ./config.mk)","") + config ?= config.mk + else + config ?= make/config.mk + endif +endif + +include $(config) + +LOCAL_SRC_FILES := ml_dmlc_tvm_native_c_api.cc +LOCAL_LDFLAGS := -L$(SYSROOT)/usr/lib/ -llog + +LOCAL_C_INCLUDES := $(ROOT_PATH)/include \ + $(ROOT_PATH)/dlpack/include \ + $(ROOT_PATH)/dmlc-core/include \ + $(ROOT_PATH)/HalideIR/src \ + $(ROOT_PATH)/topi/include + +LOCAL_MODULE = tvm4j_runtime_packed + +LOCAL_CPP_FEATURES += exceptions +LOCAL_LDLIBS += -latomic +LOCAL_ARM_MODE := arm + +ifdef ADD_C_INCLUDES + LOCAL_C_INCLUDES += $(ADD_C_INCLUDES) +endif + +ifdef ADD_LDLIBS + LOCAL_LDLIBS += $(ADD_LDLIBS) +endif + +include $(BUILD_SHARED_LIBRARY) diff --git a/apps/android_deploy/app/src/main/jni/Application.mk b/apps/android_deploy/app/src/main/jni/Application.mk new file mode 100644 index 000000000000..8e81a8d6a81c --- /dev/null +++ b/apps/android_deploy/app/src/main/jni/Application.mk @@ -0,0 +1,16 @@ +ifndef config + ifneq ("$(wildcard ./config.mk)","") + config ?= config.mk + else + config ?= make/config.mk + endif +endif + +include $(config) + +APP_STL := c++_static + +APP_CPPFLAGS += -DDMLC_LOG_STACK_TRACE=0 -DTVM4J_ANDROID=1 -std=c++11 -Oz -frtti +ifeq ($(USE_OPENCL), 1) + APP_CPPFLAGS += -DTVM_OPENCL_RUNTIME=1 +endif diff --git a/apps/android_deploy/app/src/main/jni/build.sh b/apps/android_deploy/app/src/main/jni/build.sh new file mode 100644 index 000000000000..1ca38ae5bd12 --- /dev/null +++ b/apps/android_deploy/app/src/main/jni/build.sh @@ -0,0 +1,9 @@ +#!/bin/bash +PATH="$PATH:/usr/local/bin" +CURR_DIR=$(cd `dirname $0`; pwd) +ROOT_DIR="$CURR_DIR/../../../../../.." +javah -o $CURR_DIR/ml_dmlc_tvm_native_c_api.h -cp "$ROOT_DIR/jvm/core/target/*" ml.dmlc.tvm.LibInfo || exit -1 +cp -f $ROOT_DIR/jvm/native/src/main/native/ml_dmlc_tvm_native_c_api.cc $CURR_DIR/ || exit -1 +cp -f $ROOT_DIR/jvm/native/src/main/native/jni_helper_func.h $CURR_DIR/ || exit -1 +rm -rf $CURR_DIR/../libs +ndk-build --directory=$CURR_DIR diff --git a/apps/android_deploy/app/src/main/jni/make/config.mk b/apps/android_deploy/app/src/main/jni/make/config.mk new file mode 100644 index 000000000000..8d6f5a56dd5b --- /dev/null +++ b/apps/android_deploy/app/src/main/jni/make/config.mk @@ -0,0 +1,26 @@ +#------------------------------------------------------------------------------- +# Template configuration for compiling +# +# If you want to change the configuration, please use the following +# steps. Assume you are on the root directory. First copy the this +# file so that any local changes will be ignored by git +# +# cp make/config.mk . +# +# Next modify the according entries, and then compile by +# +# ./build.sh +# +#------------------------------------------------------------------------------- +APP_ABI = all + +APP_PLATFORM = android-17 + +# whether enable OpenCL during compile +USE_OPENCL = 0 + +# the additional include headers you want to add, e.g., SDK_PATH/adrenosdk/Development/Inc +ADD_C_INCLUDES = + +# the additional link libs you want to add, e.g., ANDROID_LIB_PATH/libOpenCL.so +ADD_LDLIBS = diff --git a/apps/android_deploy/app/src/main/jni/tvm_runtime.h b/apps/android_deploy/app/src/main/jni/tvm_runtime.h new file mode 100644 index 000000000000..0b5f4ee67237 --- /dev/null +++ b/apps/android_deploy/app/src/main/jni/tvm_runtime.h @@ -0,0 +1,27 @@ +/*! + * Copyright (c) 2018 by Contributors + * \file tvm_runtime.h + * \brief Pack all tvm runtime source files + */ +#include +#include + +#include "../src/runtime/c_runtime_api.cc" +#include "../src/runtime/cpu_device_api.cc" +#include "../src/runtime/workspace_pool.cc" +#include "../src/runtime/module_util.cc" +#include "../src/runtime/system_lib_module.cc" +#include "../src/runtime/module.cc" +#include "../src/runtime/registry.cc" +#include "../src/runtime/file_util.cc" +#include "../src/runtime/dso_module.cc" +#include "../src/runtime/thread_pool.cc" +#include "../src/runtime/threading_backend.cc" +#include "../src/runtime/ndarray.cc" + +#include "../src/runtime/graph/graph_runtime.cc" + +#ifdef TVM_OPENCL_RUNTIME +#include "../src/runtime/opencl/opencl_device_api.cc" +#include "../src/runtime/opencl/opencl_module.cc" +#endif diff --git a/apps/android_deploy/app/src/main/res/layout/activity_main.xml b/apps/android_deploy/app/src/main/res/layout/activity_main.xml new file mode 100644 index 000000000000..b16a5c2548a6 --- /dev/null +++ b/apps/android_deploy/app/src/main/res/layout/activity_main.xml @@ -0,0 +1,27 @@ + + + + + + + + + + + + + diff --git a/apps/android_deploy/app/src/main/res/layout/content_main.xml b/apps/android_deploy/app/src/main/res/layout/content_main.xml new file mode 100644 index 000000000000..34de93843645 --- /dev/null +++ b/apps/android_deploy/app/src/main/res/layout/content_main.xml @@ -0,0 +1,46 @@ + + + + + +