Skip to content

Commit

Permalink
Merge branch 'master' into master
Browse files Browse the repository at this point in the history
  • Loading branch information
eric-haibin-lin authored Jun 19, 2017
2 parents 24105cf + 423490c commit 6c0e9c8
Show file tree
Hide file tree
Showing 110 changed files with 8,333 additions and 708 deletions.
11 changes: 11 additions & 0 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ mxnet_option(USE_OPENCV "Build with OpenCV support" ON)
mxnet_option(USE_OPENMP "Build with Openmp support" ON)
mxnet_option(USE_CUDA "Build with CUDA support" ON)
mxnet_option(USE_CUDNN "Build with cudnn support" ON) # one could set CUDNN_ROOT for search path
mxnet_option(USE_LAPACK "Build with lapack support" ON)
mxnet_option(USE_MKL_IF_AVAILABLE "Use MKL if found" ON)
mxnet_option(USE_MKLML_MKL "Use MKLML variant of MKL (if MKL found)" ON IF USE_MKL_IF_AVAILABLE AND UNIX AND (NOT APPLE))
mxnet_option(USE_MKL_EXPERIMENTAL "Use experimental MKL (if MKL enabled and found)" OFF)
Expand Down Expand Up @@ -199,6 +200,16 @@ if(USE_OPENMP)
endif()
endif()

if(USE_LAPACK)
add_definitions(-DMXNET_USE_LAPACK=1)
else(USE_LAPACK)
# Workaround for Windows until using new Jenkinsfile.
if(USE_BLAS STREQUAL "open")
add_definitions(-DMXNET_USE_LAPACK=1)
endif()
endif()


if(UNIX)
find_library(RTLIB rt)
if(RTLIB)
Expand Down
5 changes: 5 additions & 0 deletions CONTRIBUTORS.md
Original file line number Diff line number Diff line change
Expand Up @@ -137,3 +137,8 @@ List of Contributors
* [Roshani Nagmote](https://github.com/Roshrini)
* [Chetan Khatri](https://github.com/chetkhatri/)
* [James Liu](https://github.com/jamesliu/)
* [Yuwen Xiong](https://github.com/Orpine/)
* [Haozhi Qi](https://github.com/Oh233/)
* [Yi Li](https://github.com/liyi14/)
* [Guodong Zhang](https://github.com/gd-zhang/)
* [Xizhou Zhu](https://github.com/einsiedler0408/)
4 changes: 2 additions & 2 deletions Jenkinsfile
Original file line number Diff line number Diff line change
Expand Up @@ -157,7 +157,7 @@ USE_CPP_PACKAGE=1 \
init_git_win()
bat """mkdir build_vc14_cpu
cd build_vc14_cpu
cmake -G \"Visual Studio 14 2015 Win64\" -DUSE_CUDA=0 -DUSE_CUDNN=0 -DUSE_NVRTC=0 -DUSE_OPENCV=1 -DUSE_OPENMP=1 -DUSE_PROFILER=1 -DUSE_BLAS=open -DUSE_DIST_KVSTORE=0 ${env.WORKSPACE}"""
cmake -G \"Visual Studio 14 2015 Win64\" -DUSE_CUDA=0 -DUSE_CUDNN=0 -DUSE_NVRTC=0 -DUSE_OPENCV=1 -DUSE_OPENMP=1 -DUSE_PROFILER=1 -DUSE_BLAS=open -DUSE_LAPACK=1 -DUSE_DIST_KVSTORE=0 ${env.WORKSPACE}"""
bat 'C:\\mxnet\\build_vc14_cpu.bat'

bat '''rmdir /s/q pkg_vc14_gpu
Expand Down Expand Up @@ -188,7 +188,7 @@ del /Q *.7z
bat """mkdir build_vc14_gpu
call "C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\bin\\x86_amd64\\vcvarsx86_amd64.bat"
cd build_vc14_gpu
cmake -G \"NMake Makefiles JOM\" -DUSE_CUDA=1 -DUSE_CUDNN=1 -DUSE_NVRTC=1 -DUSE_OPENCV=1 -DUSE_OPENMP=1 -DUSE_PROFILER=1 -DUSE_BLAS=open -DUSE_DIST_KVSTORE=0 -DCUDA_ARCH_NAME=All -DCMAKE_CXX_FLAGS_RELEASE="/FS /MD /O2 /Ob2 /DNDEBUG" -DCMAKE_BUILD_TYPE=Release ${env.WORKSPACE}"""
cmake -G \"NMake Makefiles JOM\" -DUSE_CUDA=1 -DUSE_CUDNN=1 -DUSE_NVRTC=1 -DUSE_OPENCV=1 -DUSE_OPENMP=1 -DUSE_PROFILER=1 -DUSE_BLAS=open -DUSE_LAPACK=1 -DUSE_DIST_KVSTORE=0 -DCUDA_ARCH_NAME=All -DCMAKE_CXX_FLAGS_RELEASE="/FS /MD /O2 /Ob2 /DNDEBUG" -DCMAKE_BUILD_TYPE=Release ${env.WORKSPACE}"""
bat 'C:\\mxnet\\build_vc14_gpu.bat'
bat '''rmdir /s/q pkg_vc14_gpu
mkdir pkg_vc14_gpu\\lib
Expand Down
29 changes: 29 additions & 0 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -106,6 +106,35 @@ else
endif
endif

# verify existence of separate lapack library when using blas/openblas/atlas
# switch off lapack support in case it can't be found
# issue covered with this
# - for Ubuntu 14.04 or lower, lapack is not automatically installed with openblas
# - for Ubuntu, installing atlas will not automatically install the atlas provided lapack library
# silently switching lapack off instead of letting the build fail because of backward compatibility
ifeq ($(USE_LAPACK), 1)
ifeq ($(USE_BLAS),$(filter $(USE_BLAS),blas openblas atlas))
ifeq (,$(wildcard /lib/liblapack.a))
ifeq (,$(wildcard /usr/lib/liblapack.a))
ifeq (,$(wildcard $(USE_LAPACK_PATH)/liblapack.a))
USE_LAPACK = 0
endif
endif
endif
endif
endif

# lapack settings.
ifeq ($(USE_LAPACK), 1)
ifneq ($(USE_LAPACK_PATH), )
LDFLAGS += -L$(USE_LAPACK_PATH)
endif
ifeq ($(USE_BLAS),$(filter $(USE_BLAS),blas openblas atlas))
LDFLAGS += -llapack
endif
CFLAGS += -DMXNET_USE_LAPACK
endif

ifeq ($(USE_CUDNN), 1)
CFLAGS += -DMSHADOW_USE_CUDNN=1
LDFLAGS += -lcudnn
Expand Down
4 changes: 2 additions & 2 deletions R-package/R/context.R
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# Initialize the global context
init.context.default <- function() {
assign("mx.ctx.internal.default.value", mx.cpu(), envir = .MXNetEnv)
.MXNetEnv[["mx.ctx.internal.default.value"]] <- mx.cpu()
}

#' Set/Get default context for array creation.
Expand All @@ -11,7 +11,7 @@ init.context.default <- function() {
#' @export
mx.ctx.default <- function(new = NULL) {
if (!is.null(new)) {
assign("mx.ctx.internal.default.value", new, envir = .MXNetEnv)
.MXNetEnv[["mx.ctx.internal.default.value"]] <- new
}
return (.MXNetEnv$mx.ctx.internal.default.value)
}
Expand Down
7 changes: 0 additions & 7 deletions R-package/R/model.R
Original file line number Diff line number Diff line change
Expand Up @@ -503,14 +503,7 @@ predict.MXFeedForwardModel <- function(model, X, ctx=NULL, array.batch.size=128,
X$reset()
if (!X$iter.next()) stop("Cannot predict on empty iterator")
dlist = X$value()
arg_names <- arguments(model$symbol)
tmp <- unlist(lapply(arg_names, function(a) {
mxnet:::mx.util.str.endswith(a, "label")
}))
label_name <- arg_names[tmp]
arg_lst <- list(symbol = model$symbol, ctx = ctx, data = dim(dlist$data), grad.req="null")
arg_lst[[label_name]] <- dim(dlist$label)


pexec <- do.call(mx.simple.bind, arg_lst)
mx.exec.update.arg.arrays(pexec, model$arg.params, match.name=TRUE)
Expand Down
48 changes: 24 additions & 24 deletions R-package/R/optimizer.R
Original file line number Diff line number Diff line change
Expand Up @@ -32,12 +32,12 @@ mx.opt.sgd <- function(learning.rate,
lr <- sgd$lr
## update count
indexKey <- paste0('ik', index)
if (!exists(envir = sgd, x = indexKey)){
assign(x = indexKey, value = 0, envir = sgd)
if (!exists(envir = sgd, x = indexKey, inherits = FALSE)){
sgd[[indexKey]] <- 0
} else {
indexValue <- get(envir = sgd, x = indexKey)
assign(x = indexKey, value = indexValue + 1, envir = sgd)
sgd$num_update <- max(sgd$num_update, get(envir = sgd, x = indexKey))
indexValue <- sgd[[indexKey]]
sgd[[indexKey]] <- indexValue + 1
sgd$num_update <- max(sgd$num_update, sgd[[indexKey]])
}
}
grad <- grad * rescale.grad
Expand Down Expand Up @@ -114,12 +114,12 @@ mx.opt.rmsprop <- function(learning.rate=0.002,
lr <- rmsprop$lr
## update count
indexKey <- paste0('ik', index)
if (!exists(envir = rmsprop, x = indexKey)){
assign(x = indexKey, value = 0, envir = rmsprop)
if (!exists(envir = rmsprop, x = indexKey, inherits = FALSE)){
rmsprop[[indexKey]] <- 0
} else {
indexValue <- get(envir = rmsprop, x = indexKey)
assign(x = indexKey, value = indexValue + 1, envir = rmsprop)
rmsprop$num_update <- max(rmsprop$num_update, get(envir = rmsprop, x = indexKey))
indexValue <- rmsprop[[indexKey]]
rmsprop[[indexKey]] <- indexValue + 1
rmsprop$num_update <- max(rmsprop$num_update, rmsprop[[indexKey]])
}
}
grad <- grad * rescale.grad
Expand Down Expand Up @@ -201,23 +201,23 @@ mx.opt.adam <- function(learning.rate=0.001,
lr <- adam$lr
## update count
indexKey <- paste0('ik', index)
if (!exists(envir = adam, x = indexKey)){
assign(x = indexKey, value = 0, envir = adam)
if (!exists(envir = adam, x = indexKey, inherits = FALSE)){
adam[[indexKey]] <- 0
} else {
indexValue <- get(envir = adam, x = indexKey)
assign(x = indexKey, value = indexValue + 1, envir = adam)
adam$num_update <- max(adam$num_update, get(envir = adam, x = indexKey))
indexValue <- adam[[indexKey]]
adam[[indexKey]] <- indexValue + 1
adam$num_update <- max(adam$num_update, adam[[indexKey]])
}
}

# increment time
time.key <- paste0('t', index)
if (!exists(envir = adam, x = time.key)){
assign(x = time.key, value = 0, envir = adam)
if (!exists(envir = adam, x = time.key, inherits = FALSE)){
adam[[time.key]] <- 0
}
t <- get(envir = adam, x = time.key)
t <- adam[[time.key]]
t <- t + 1
assign(x = time.key, value = t, envir = adam)
adam[[time.key]] <- t

mean <- state$mean
variance <- state$variance
Expand Down Expand Up @@ -297,12 +297,12 @@ mx.opt.adagrad <- function(learning.rate=0.05,
lr <- adagrad$lr
## update count
indexKey <- paste0('ik', index)
if (!exists(envir = adagrad, x = indexKey)){
assign(x = indexKey, value = 0, envir = adagrad)
if (!exists(envir = adagrad, x = indexKey, inherits = FALSE)){
adagrad[[indexKey]] <- 0
} else {
indexValue <- get(envir = adagrad, x = indexKey)
assign(x = indexKey, value = indexValue + 1, envir = adagrad)
adagrad$num_update <- max(adagrad$num_update, get(envir = adagrad, x = indexKey))
indexValue <- adagrad[[indexKey]]
adagrad[[indexKey]] <- indexValue + 1
adagrad$num_update <- max(adagrad$num_update, adagrad[[indexKey]])
}
}

Expand Down
2 changes: 1 addition & 1 deletion appveyor.yml
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ before_build:
set OpenCV_DIR=%APPVEYOR_BUILD_FOLDER%/%MXNET_OPENCV_DIR%/build
cmake .. -DOPENCV_DIR=%OpenCV_DIR% -DUSE_PROFILER=1 -DUSE_CUDA=0 -DUSE_CUDNN=0 -DUSE_NVRTC=0 -DUSE_OPENCV=1 -DUSE_OPENMP=1 -DUSE_BLAS=open -DUSE_DIST_KVSTORE=0 -G "Visual Studio 12 2013 Win64"
cmake .. -DOPENCV_DIR=%OpenCV_DIR% -DUSE_PROFILER=1 -DUSE_CUDA=0 -DUSE_CUDNN=0 -DUSE_NVRTC=0 -DUSE_OPENCV=1 -DUSE_OPENMP=1 -DUSE_BLAS=open -DUSE_LAPACK=1 -DUSE_DIST_KVSTORE=0 -G "Visual Studio 12 2013 Win64"
build_script:
- cmd: >-
Expand Down
6 changes: 3 additions & 3 deletions cpp-package/example/charRNN.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ Symbol LSTMUnroll(int num_lstm_layer, int sequence_length, int input_dim,

auto label = Symbol::Variable("softmax_label");
label = transpose(label);
label = Reshape(label, Shape(), false, Shape(-1), false); // -1: infer from graph
label = Reshape(label, Shape(), false, Shape(0), false); // -1: infer from graph
auto sm = SoftmaxOutput("softmax", pred, label);
if (isTrain)
return sm;
Expand All @@ -141,7 +141,7 @@ Symbol LSTMWithBuiltInRNNOp(int num_lstm_layer, int sequence_length, int input_d
auto label = Symbol::Variable("softmax_label");
label = transpose(label);
label = Reshape(label, Shape(), false,
Shape(-1), false); // FullyConnected requires one dimension
Shape(0), false); // FullyConnected requires one dimension
if (!TIME_MAJOR && isTrain)
embed = SwapAxis(embed, 0, 1); // Change to time-major as cuDNN requires

Expand All @@ -151,7 +151,7 @@ Symbol LSTMWithBuiltInRNNOp(int num_lstm_layer, int sequence_length, int input_d
auto rnn_params = Symbol::Variable("LSTM_parameters"); // See explanations near RNNXavier class
auto rnn = RNN(embed, rnn_params, rnn_h_init, rnn_c_init, num_hidden, num_lstm_layer,
RNNMode::kLstm, false, dropout, !isTrain);
auto hidden = Reshape(rnn[0], Shape(), false, Shape(-1, num_hidden), false);
auto hidden = Reshape(rnn[0], Shape(), false, Shape(0, num_hidden), false);

auto cls_weight = Symbol::Variable("cls_weight");
auto cls_bias = Symbol::Variable("cls_bias");
Expand Down
2 changes: 1 addition & 1 deletion docker/install/cpp.sh
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
# libraries for building mxnet c++ core on ubuntu

apt-get update && apt-get install -y \
build-essential git libatlas-base-dev libopencv-dev \
build-essential git libatlas-base-dev libopencv-dev python-opencv \
libcurl4-openssl-dev libgtest-dev cmake wget unzip

cd /usr/src/gtest && cmake CMakeLists.txt && make && cp *.a /usr/lib
2 changes: 1 addition & 1 deletion docs/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ MAINTAINER Mu Li <[email protected]>
# First, build MXNet binaries (ref mxnet/docker/cpu/Dockerfile)
#

RUN apt-get update && apt-get install -y build-essential git libopenblas-dev libopencv-dev
RUN apt-get update && apt-get install -y build-essential git libopenblas-dev liblapack-dev libopencv-dev
RUN git clone --recursive https://github.com/dmlc/mxnet/ && cd mxnet && \
cp make/config.mk . && \
echo "USE_BLAS=openblas" >>config.mk && \
Expand Down
18 changes: 18 additions & 0 deletions docs/_static/mxnet.css
Original file line number Diff line number Diff line change
Expand Up @@ -849,6 +849,24 @@ dt {
border-bottom: solid #0079b2;
}

dl.method dt {
background: #f0f0f0;
border-bottom: solid #ccc;
}

dl.method dt code.descname {
color:#555;
}

dl.attribute dt {
background: #f0f0f0;
border-bottom: solid #ccc;
}

dl.attribute dt code.descname {
color:#555;
}

dt em {
font-weight: normal;
font-style: normal;
Expand Down
1 change: 1 addition & 0 deletions docs/api/python/ndarray.md
Original file line number Diff line number Diff line change
Expand Up @@ -321,6 +321,7 @@ In the rest of this document, we first overview the methods provided by the
fix
floor
ceil
trunc
```


Expand Down
7 changes: 7 additions & 0 deletions docs/api/python/rnn.md
Original file line number Diff line number Diff line change
Expand Up @@ -250,6 +250,13 @@ outputs, _ = fused_lstm_cell.unroll(length=sequence_length, \
of `FusedRNNCell` is twice the size specified by `num_hidden`.
```

When training a deep, complex model *on multiple GPUs* it's recommended to stack
fused RNN cells (one layer per cell) together instead of one with all layers.
The reason is that fused RNN cells don't set gradients to be ready until the
computation for the entire layer is completed. Breaking a multi-layer fused RNN
cell into several one-layer ones allows gradients to be processed ealier. This
reduces communication overhead, especially with multiple GPUs.

The `unfuse()` method can be used to convert the `FusedRNNCell` into an equivalent
and CPU-compatible `SequentialRNNCell` that mirrors the settings of the `FusedRNNCell`.
```python
Expand Down
16 changes: 16 additions & 0 deletions docs/api/python/symbol.md
Original file line number Diff line number Diff line change
Expand Up @@ -314,6 +314,7 @@ Composite multiple symbols into a new one by an operator.
fix
floor
ceil
trunc
```


Expand Down Expand Up @@ -392,6 +393,21 @@ Composite multiple symbols into a new one by an operator.
argmin
```

### Linear Algebra

```eval_rst
.. autosummary::
:nosignatures:
linalg_gemm
linalg_gemm2
linalg_potrf
linalg_potri
linalg_trmm
linalg_trsm
linalg_sumlogdiag
```

### Miscellaneous

```eval_rst
Expand Down
4 changes: 2 additions & 2 deletions docs/architecture/note_engine.md
Original file line number Diff line number Diff line change
Expand Up @@ -136,8 +136,8 @@ training one batch on a two-layer neural network.
# aggregate gradient and update
fc1_wgrad[cpu] = fc1_wgrad[gpu0] + fc1_wgrad[gpu1]
fc2_wgrad[cpu] = fc2_wgrad[gpu0] + fc2_wgrad[gpu1]
fc1_weight[cpu] -= lr * fc1_wgrad[gpu0]
fc2_weight[cpu] -= lr * fc2_wgrad[gpu0]
fc1_weight[cpu] -= lr * fc1_wgrad[cpu]
fc2_weight[cpu] -= lr * fc2_wgrad[cpu]
fc1_weight[cpu].copyto(fc1_weight[gpu0] , fc1_weight[gpu1])
fc2_weight[cpu].copyto(fc2_weight[gpu0] , fc2_weight[gpu1])
```
Expand Down
16 changes: 8 additions & 8 deletions docs/get_started/install.md
Original file line number Diff line number Diff line change
Expand Up @@ -209,9 +209,9 @@ $ sudo apt-get install -y build-essential git

**Step 2** Install OpenBLAS.

*MXNet* uses [BLAS](https://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms) library for accelerated numerical computations on CPU machine. There are several flavors of BLAS libraries - [OpenBLAS](http://www.openblas.net/), [ATLAS](http://math-atlas.sourceforge.net/) and [MKL](https://software.intel.com/en-us/intel-mkl). In this step we install OpenBLAS. You can choose to install ATLAS or MKL.
*MXNet* uses [BLAS](https://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms) and [LAPACK](https://en.wikipedia.org/wiki/LAPACK) libraries for accelerated numerical computations on CPU machine. There are several flavors of BLAS/LAPACK libraries - [OpenBLAS](http://www.openblas.net/), [ATLAS](http://math-atlas.sourceforge.net/) and [MKL](https://software.intel.com/en-us/intel-mkl). In this step we install OpenBLAS. You can choose to install ATLAS or MKL.
```bash
$ sudo apt-get install -y libopenblas-dev
$ sudo apt-get install -y libopenblas-dev liblapack-dev
```

**Step 3** Install OpenCV.
Expand Down Expand Up @@ -429,9 +429,9 @@ $ sudo apt-get install -y build-essential git
```
**Step 2** Install OpenBLAS.

*MXNet* uses [BLAS](https://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms) library for accelerated numerical computations. There are several flavors of BLAS libraries - [OpenBLAS](http://www.openblas.net/), [ATLAS](http://math-atlas.sourceforge.net/) and [MKL](https://software.intel.com/en-us/intel-mkl). In this step we install OpenBLAS. You can choose to install ATLAS or MKL.
*MXNet* uses [BLAS](https://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms) and [LAPACK](https://en.wikipedia.org/wiki/LAPACK) libraries for accelerated numerical computations on CPU machine. There are several flavors of BLAS/LAPACK libraries - [OpenBLAS](http://www.openblas.net/), [ATLAS](http://math-atlas.sourceforge.net/) and [MKL](https://software.intel.com/en-us/intel-mkl). In this step we install OpenBLAS. You can choose to install ATLAS or MKL.
```bash
$ sudo apt-get install -y libopenblas-dev
$ sudo apt-get install -y libopenblas-dev liblapack-dev
```

**Step 3** Install OpenCV.
Expand Down Expand Up @@ -751,9 +751,9 @@ $ sudo apt-get install -y build-essential git

**Step 2** Install OpenBLAS.

*MXNet* uses [BLAS](https://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms) library for accelerated numerical computations on CPU machine. There are several flavors of BLAS libraries - [OpenBLAS](http://www.openblas.net/), [ATLAS](http://math-atlas.sourceforge.net/) and [MKL](https://software.intel.com/en-us/intel-mkl). In this step we install OpenBLAS. You can choose to install ATLAS or MKL.
*MXNet* uses [BLAS](https://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms) and [LAPACK](https://en.wikipedia.org/wiki/LAPACK) libraries for accelerated numerical computations on CPU machine. There are several flavors of BLAS/LAPACK libraries - [OpenBLAS](http://www.openblas.net/), [ATLAS](http://math-atlas.sourceforge.net/) and [MKL](https://software.intel.com/en-us/intel-mkl). In this step we install OpenBLAS. You can choose to install ATLAS or MKL.
```bash
$ sudo apt-get install -y libopenblas-dev
$ sudo apt-get install -y libopenblas-dev liblapack-dev
```

**Step 3** Install OpenCV.
Expand Down Expand Up @@ -823,9 +823,9 @@ $ sudo apt-get install -y build-essential git
```
**Step 2** Install OpenBLAS.

*MXNet* uses [BLAS](https://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms) library for accelerated numerical computations. There are several flavors of BLAS libraries - [OpenBLAS](http://www.openblas.net/), [ATLAS](http://math-atlas.sourceforge.net/) and [MKL](https://software.intel.com/en-us/intel-mkl). In this step we install OpenBLAS. You can choose to install ATLAS or MKL.
*MXNet* uses [BLAS](https://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms) and [LAPACK](https://en.wikipedia.org/wiki/LAPACK) libraries for accelerated numerical computations on CPU machine. There are several flavors of BLAS/LAPACK libraries - [OpenBLAS](http://www.openblas.net/), [ATLAS](http://math-atlas.sourceforge.net/) and [MKL](https://software.intel.com/en-us/intel-mkl). In this step we install OpenBLAS. You can choose to install ATLAS or MKL.
```bash
$ sudo apt-get install -y libopenblas-dev
$ sudo apt-get install -y libopenblas-dev liblapack-dev
```

**Step 3** Install OpenCV.
Expand Down
Loading

0 comments on commit 6c0e9c8

Please sign in to comment.