Skip to content

Commit

Permalink
[Topi,x86] Split MKL from BLAS.
Browse files Browse the repository at this point in the history
Make cblas and mkl seperate entities in cmake and topi, allowing users
to use both a BLAS library and MKL. In the future, MKL specific
functions can be added easily. MKLDNN is also split off from MKL and
BLAS for the same reasons.

Other improvements:
  - cblas and mkl strategies are now only applied when they are viable.
  - compile_engine will log which implementation it has chosen and why.
  • Loading branch information
tkonolige committed Aug 4, 2020
1 parent f11abf2 commit 5e9583d
Show file tree
Hide file tree
Showing 11 changed files with 600 additions and 178 deletions.
45 changes: 23 additions & 22 deletions cmake/modules/contrib/BLAS.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -15,15 +15,29 @@
# specific language governing permissions and limitations
# under the License.

# Plugin rules for cblas
file(GLOB CBLAS_CONTRIB_SRC src/runtime/contrib/cblas/*.cc)

if(USE_BLAS STREQUAL "openblas")
find_library(BLAS_LIBRARY openblas)
list(APPEND TVM_RUNTIME_LINKER_LIBS ${BLAS_LIBRARY})
list(APPEND RUNTIME_SRCS ${CBLAS_CONTRIB_SRC})
list(APPEND RUNTIME_SRCS src/runtime/contrib/cblas/cblas.cc)
message(STATUS "Using BLAS library " ${BLAS_LIBRARY})
elseif(USE_BLAS STREQUAL "atlas" OR USE_BLAS STREQUAL "blas")
find_library(BLAS_LIBRARY cblas)
list(APPEND TVM_RUNTIME_LINKER_LIBS ${BLAS_LIBRARY})
list(APPEND RUNTIME_SRCS src/runtime/contrib/cblas/cblas.cc)
message(STATUS "Use BLAS library " ${BLAS_LIBRARY})
elseif(USE_BLAS STREQUAL "apple")
find_library(BLAS_LIBRARY Accelerate)
include_directories(${BLAS_LIBRARY}/Versions/Current/Frameworks/vecLib.framework/Versions/Current/Headers/)
list(APPEND TVM_RUNTIME_LINKER_LIBS ${BLAS_LIBRARY})
list(APPEND RUNTIME_SRCS src/runtime/contrib/cblas/cblas.cc)
message(STATUS "Use BLAS library " ${BLAS_LIBRARY})
elseif(USE_BLAS STREQUAL "mkl")
elseif(USE_BLAS STREQUAL "none")
# pass
else()
message(FATAL_ERROR "Invalid option: USE_BLAS=" ${USE_BLAS})
endif()

if(USE_MKL)
if(NOT IS_DIRECTORY ${USE_MKL_PATH})
set(USE_MKL_PATH /opt/intel/mkl)
endif()
Expand All @@ -36,24 +50,9 @@ elseif(USE_BLAS STREQUAL "mkl")
endif()
include_directories(${USE_MKL_PATH}/include)
list(APPEND TVM_RUNTIME_LINKER_LIBS ${BLAS_LIBRARY_MKL})
list(APPEND RUNTIME_SRCS ${CBLAS_CONTRIB_SRC})
list(APPEND RUNTIME_SRCS src/runtime/contrib/cblas/mkl.cc)
add_definitions(-DUSE_MKL_BLAS=1)
message(STATUS "Use BLAS library " ${BLAS_LIBRARY_MKL})
elseif(USE_BLAS STREQUAL "atlas" OR USE_BLAS STREQUAL "blas")
find_library(BLAS_LIBRARY cblas)
list(APPEND TVM_RUNTIME_LINKER_LIBS ${BLAS_LIBRARY})
list(APPEND RUNTIME_SRCS ${CBLAS_CONTRIB_SRC})
message(STATUS "Use BLAS library " ${BLAS_LIBRARY})
elseif(USE_BLAS STREQUAL "apple")
find_library(BLAS_LIBRARY Accelerate)
include_directories(${BLAS_LIBRARY}/Versions/Current/Frameworks/vecLib.framework/Versions/Current/Headers/)
list(APPEND TVM_RUNTIME_LINKER_LIBS ${BLAS_LIBRARY})
list(APPEND RUNTIME_SRCS ${CBLAS_CONTRIB_SRC})
message(STATUS "Use BLAS library " ${BLAS_LIBRARY})
elseif(USE_BLAS STREQUAL "none")
# pass
else()
message(FATAL_ERROR "Invalid option: USE_BLAS=" ${USE_BLAS})
message(STATUS "Use MKL library " ${BLAS_LIBRARY_MKL})
endif()

if(IS_DIRECTORY ${USE_MKLDNN})
Expand All @@ -63,6 +62,7 @@ if(IS_DIRECTORY ${USE_MKLDNN})
else()
include_directories(${USE_MKLDNN}/include)
list(APPEND TVM_RUNTIME_LINKER_LIBS ${MKLDNN_LIBRARY})
list(APPEND RUNTIME_SRCS src/runtime/contrib/cblas/mkldnn.cc)
add_definitions(-DUSE_DNNL=1)
message(STATUS "Use MKLDNN library " ${MKLDNN_LIBRARY})
endif()
Expand All @@ -74,6 +74,7 @@ elseif(USE_MKLDNN STREQUAL "ON")
list(APPEND TVM_RUNTIME_LINKER_LIBS ${MKLDNN_LIBRARY})
add_definitions(-DUSE_DNNL=1)
message(STATUS "Use MKLDNN library " ${MKLDNN_LIBRARY})
list(APPEND RUNTIME_SRCS src/runtime/contrib/cblas/mkldnn.cc)
endif()
elseif(USE_MKLDNN STREQUAL "OFF")
# pass
Expand Down
33 changes: 0 additions & 33 deletions python/tvm/contrib/cblas.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,39 +52,6 @@ def matmul(lhs, rhs, transa=False, transb=False, **kwargs):
)


def matmul_u8s8s32(lhs, rhs, transa=False, transb=False, **kwargs):
"""Create an extern op that compute matrix mult of A and rhs with CrhsLAS
This function serves as an example on how to call external libraries.
Parameters
----------
lhs: Tensor
The left matrix operand
rhs: Tensor
The right matrix operand
transa: bool
Whether transpose lhs
transb: bool
Whether transpose rhs
Returns
-------
C: Tensor
The result tensor.
"""
n = lhs.shape[1] if transa else lhs.shape[0]
m = rhs.shape[0] if transb else rhs.shape[1]
return te.extern(
(n, m),
[lhs, rhs],
lambda ins, outs: tvm.tir.call_packed(
"tvm.contrib.cblas.matmul_u8s8s32", ins[0], ins[1], outs[0], transa, transb
),
name="C",
**kwargs
)


def batch_matmul(lhs, rhs, transa=False, transb=False, iterative=False, **kwargs):
"""Create an extern op that compute batched matrix mult of A and rhs with CBLAS
This function serves as an example on how to call external libraries.
Expand Down
126 changes: 126 additions & 0 deletions python/tvm/contrib/mkl.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,126 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""External function interface to BLAS libraries."""
import tvm
from tvm import te


def matmul(lhs, rhs, transa=False, transb=False, **kwargs):
"""Create an extern op that compute matrix mult of A and rhs with CrhsLAS
This function serves as an example on how to call external libraries.
Parameters
----------
lhs: Tensor
The left matrix operand
rhs: Tensor
The right matrix operand
transa: bool
Whether transpose lhs
transb: bool
Whether transpose rhs
Returns
-------
C: Tensor
The result tensor.
"""
n = lhs.shape[1] if transa else lhs.shape[0]
m = rhs.shape[0] if transb else rhs.shape[1]
return te.extern(
(n, m),
[lhs, rhs],
lambda ins, outs: tvm.tir.call_packed(
"tvm.contrib.mkl.matmul", ins[0], ins[1], outs[0], transa, transb
),
name="C",
**kwargs
)


def matmul_u8s8s32(lhs, rhs, transa=False, transb=False, **kwargs):
"""Create an extern op that compute matrix mult of A and rhs with CrhsLAS
This function serves as an example on how to call external libraries.
Parameters
----------
lhs: Tensor
The left matrix operand
rhs: Tensor
The right matrix operand
transa: bool
Whether transpose lhs
transb: bool
Whether transpose rhs
Returns
-------
C: Tensor
The result tensor.
"""
n = lhs.shape[1] if transa else lhs.shape[0]
m = rhs.shape[0] if transb else rhs.shape[1]
return te.extern(
(n, m),
[lhs, rhs],
lambda ins, outs: tvm.tir.call_packed(
"tvm.contrib.mkl.matmul_u8s8s32", ins[0], ins[1], outs[0], transa, transb
),
name="C",
**kwargs
)


def batch_matmul(lhs, rhs, transa=False, transb=False, iterative=False, **kwargs):
"""Create an extern op that compute batched matrix mult of A and rhs with mkl
This function serves as an example on how to call external libraries.
Parameters
----------
lhs: Tensor
The left matrix operand
rhs: Tensor
The right matrix operand
transa: bool
Whether transpose lhs
transb: bool
Whether transpose rhs
Returns
-------
C: Tensor
The result tensor.
"""
b = lhs.shape[0]
n = lhs.shape[2] if transa else lhs.shape[1]
m = rhs.shape[1] if transb else rhs.shape[2]
return te.extern(
(b, n, m),
[lhs, rhs],
lambda ins, outs: tvm.tir.call_packed(
"tvm.contrib.mkl.batch_matmul"
if not iterative
else "tvm.contrib.mkl.batch_matmul_iterative",
ins[0],
ins[1],
outs[0],
transa,
transb,
),
name="C",
**kwargs
)
52 changes: 52 additions & 0 deletions python/tvm/contrib/mkldnn.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""External function interface to BLAS libraries."""
import tvm
from tvm import te


def matmul(lhs, rhs, transa=False, transb=False, **kwargs):
"""Create an extern op that compute matrix mult of A and rhs with CrhsLAS
This function serves as an example on how to call external libraries.
Parameters
----------
lhs: Tensor
The left matrix operand
rhs: Tensor
The right matrix operand
transa: bool
Whether transpose lhs
transb: bool
Whether transpose rhs
Returns
-------
C: Tensor
The result tensor.
"""
n = lhs.shape[1] if transa else lhs.shape[0]
m = rhs.shape[0] if transb else rhs.shape[1]
return te.extern(
(n, m),
[lhs, rhs],
lambda ins, outs: tvm.tir.call_packed(
"tvm.contrib.mkl.matmul", ins[0], ins[1], outs[0], transa, transb
),
name="C",
**kwargs
)
27 changes: 22 additions & 5 deletions python/tvm/relay/backend/compile_engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -181,11 +181,14 @@ def select_implementation(op, attrs, inputs, out_type, target, use_autotvm=True)
"""
all_impls = get_valid_implementations(op, attrs, inputs, out_type, target)

best_plevel_impl = None
for impl in all_impls:
if best_plevel_impl is None or impl.plevel > best_plevel_impl.plevel:
best_plevel_impl = impl
best_plevel_impl = max(all_impls, key=lambda x: x.plevel)
if not use_autotvm:
logger.info(
"Using %s for %s based on highest priority (%d)",
best_plevel_impl.name,
op.name,
best_plevel_impl.plevel,
)
outs = best_plevel_impl.compute(attrs, inputs, out_type)
return best_plevel_impl, outs

Expand All @@ -207,12 +210,21 @@ def select_implementation(op, attrs, inputs, out_type, target, use_autotvm=True)
if cfg.is_fallback:
# Skip fallback config
continue
logger.info(
"Implementation %s for %s has cost %.2e", impl.name, op.name, cfg.cost
)
if best_cfg is None or best_cfg.cost > cfg.cost:
best_autotvm_impl = impl
best_cfg = cfg
autotvm.GLOBAL_SCOPE.silent = False
if best_autotvm_impl:
# The best autotvm implementation definitely doesn't use fallback config
logger.info(
"Using %s for %s based on lowest cost (%.2e)",
best_autotvm_impl.name,
op.name,
best_cfg.cost,
)
return best_autotvm_impl, outputs[best_autotvm_impl]
# Use the implementation with highest plevel
if workloads[best_plevel_impl] is not None:
Expand All @@ -222,6 +234,12 @@ def select_implementation(op, attrs, inputs, out_type, target, use_autotvm=True)
if msg not in autotvm.task.DispatchContext.warning_messages:
autotvm.task.DispatchContext.warning_messages.add(msg)
autotvm_logger.warning(msg)
logger.info(
"Using %s for %s based on highest priority (%s)",
best_plevel_impl.name,
op.name,
best_plevel_impl.plevel,
)
return best_plevel_impl, outputs[best_plevel_impl]


Expand Down Expand Up @@ -261,7 +279,6 @@ def lower_call(call, inputs, target):
if not is_dyn:
best_impl, outputs = select_implementation(
op, call.attrs, inputs, ret_type, target)
logger.info("Use implementation %s for op %s", best_impl.name, op.name)
else:
# TODO(@icemelon9): Allow tvm to generate multiple kernels for dynamic shapes.
# Currently, we just use the implementation with highest plevel
Expand Down
Loading

0 comments on commit 5e9583d

Please sign in to comment.