From a8e63ac465d3166849e7de114165373d218dd6b5 Mon Sep 17 00:00:00 2001 From: Bing Xu Date: Fri, 27 Feb 2015 16:12:11 -0700 Subject: [PATCH 1/3] add static mkl flag --- make/mshadow.mk | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/make/mshadow.mk b/make/mshadow.mk index 5a0a65109573..d892248ca170 100644 --- a/make/mshadow.mk +++ b/make/mshadow.mk @@ -2,7 +2,7 @@ # mshadow configuration script # # include mshadow.mk after the variables are set -# +# # Add MSHADOW_CFLAGS to the compile flags # Add MSHADOW_LDFLAGS to the linker flags # Add MSHADOW_NVCCFLAGS to the nvcc compile flags @@ -10,8 +10,8 @@ MSHADOW_CFLAGS = -msse3 -funroll-loops -Wno-unused-parameter -Wno-unknown-pragmas MSHADOW_LDFLAGS = -lm -MSHADOW_NVCCFLAGS = - +MSHADOW_NVCCFLAGS = +MKLROOT = ifeq ($(USE_CUDA), 0) MSHADOW_CFLAGS += -DMSHADOW_USE_CUDA=0 else @@ -34,7 +34,16 @@ ifneq ($(USE_INTEL_PATH), NONE) endif MSHADOW_CFLAGS += -I$(USE_INTEL_PATH)/mkl/include endif +ifneq ($(USE_STATIC_MKL), NONE) +ifeq ($(USE_INTEL_PATH), NONE) + MKLROOT = /opt/intel/mkl +else + MKLROOT = $(USE_INTEL_PATH)/mkl +endif + MSHADOW_LDFLAGS += -Wl,--start-group ${MKLROOT}/lib/intel64/libmkl_intel_lp64.a ${MKLROOT}/lib/intel64/libmkl_core.a ${MKLROOT}/lib/intel64/libmkl_intel_thread.a -Wl,--end-group -liomp5 -ldl -lpthread -lm +else MSHADOW_LDFLAGS += -lmkl_intel_lp64 -lmkl_intel_thread -lmkl_core -liomp5 +endif else MSHADOW_CFLAGS += -DMSHADOW_USE_CBLAS=1 -DMSHADOW_USE_MKL=0 endif From 9b7cb2fc0e68048546ab49bdb9d70ef8bde1e0ae Mon Sep 17 00:00:00 2001 From: tqchen Date: Fri, 27 Feb 2015 16:44:54 -0800 Subject: [PATCH 2/3] checkin tensor holder --- mshadow/tensor.h | 3 +- mshadow/tensor_holder.h | 245 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 247 insertions(+), 1 deletion(-) create mode 100644 mshadow/tensor_holder.h diff --git a/mshadow/tensor.h b/mshadow/tensor.h index 773094dd4637..ee62eb4f2a26 100644 --- a/mshadow/tensor.h +++ b/mshadow/tensor.h @@ -52,7 +52,7 @@ struct Shape { for (int i = 0; i < kDimension; ++i) { this->shape_[i] = s[i]; } - } + } /*! * \brief get corresponding index * \param idx dimension index @@ -630,6 +630,7 @@ inline void MapReduceKeepHighDim(TRValue *dst, #include "./tensor_cpu-inl.h" #include "./tensor_gpu-inl.h" #include "./io.h" +#include "./tensor_holder.h" #include "./tensor_container.h" #include "./random.h" // add definition of scalar related operators diff --git a/mshadow/tensor_holder.h b/mshadow/tensor_holder.h new file mode 100644 index 000000000000..10f15e42e355 --- /dev/null +++ b/mshadow/tensor_holder.h @@ -0,0 +1,245 @@ +/*! + * Copyright (c) 2014 by Contributors + * \file tensor_holder.h + * \brief tensor holder that holds common representation of + * arbirary dimension tensor, can be used to transformed to normal fixed + * dimenson tensor + * \author Tianqi Chen + */ +#ifndef MSHADOW_TENSOR_HOLDER_H_ +#define MSHADOW_TENSOR_HOLDER_H_ +#include +#include "./tensor.h" + +namespace mshadow { +/*! + * \brief holder class that can be used to hold tensor of any dimension + * holder itself do not involve any arithmentic operations, + * can be converted to tensor of fixed dimension for further operations + */ +struct ShapeHolder { + /*! \brief shape of the tensor */ + std::vector shape_; + /*! \brief return number of dimension of the tensor inside */ + inline index_t ndim(void) const { + return static_cast(shape_.size()); + } + /*! + * \brief get corresponding index + * \param idx dimension index + * \return the corresponding dimension size + */ + inline index_t &operator[](index_t i) { + return shape_[i]; + } + /*! + * \brief get corresponding index + * \param idx dimension index + * \return the corresponding dimension size + */ + inline const index_t &operator[](index_t i) const { + return shape_[i]; + } + /*! + * flatten the higher dimension to second dimension, return a 2D shape + * \return the flat 2d shape + */ + inline Shape<2> FlatTo2D(void) const { + Shape<2> s; + if (shape_.size() == 0) { + return Shape2(0, 0); + } + s.shape_[1] = this->shape_[shape_.size()- 1]; + index_t ymax = 1; + for (size_t i = 1; i < shape_.size(); ++i) { + ymax *= this->shape_[i - 1]; + } + s.shape_[0] = ymax; + return s; + } + /*! + * \brief get the shape of tensor specifying ndim + * \return the shape requested + * \tparam ndim dimension of the tensor + */ + template + inline mshadow::Shape get(void) const { + utils::Check(ndim == shape_.size(), + "dimension do not match target dimension"); + Shape s; + for (index_t i = 0; i < ndim; ++i) { + s[i] = shape_[i]; + } + return s; + } + /*! + * \brief assignment from shape + * \param src source shape + * \tparam ndim shape dimension + */ + template + inline ShapeHolder &operator=(const Shape &shape) { + shape_.resize(ndim); + for (index_t i = 0; i < ndim; ++i) { + shape_[i] = shape[i]; + } + return *this; + } + /*! + * \return whether two shape equals + * \param s the shape to compare against + */ + inline bool operator==(const ShapeHolder &s) const { + if (shape_.size() != s.shape_.size()) return false; + for (size_t i = 0; i < shape_.size(); ++i) { + if (shape_[i] != s.shape_[i]) return false; + } + return true; + } + /*! + * \return whether two shape equals + * \param s the shape to compare against + * \param the shape to compare against + */ + template + inline bool operator==(const Shape &s) const { + if (shape_.size() != ndim) return false; + for (index_t i = 0; i < ndim; ++i) { + if (shape_[i] != s.shape_[i]) return false; + } + return true; + } +}; + +/*! + * \brief holder class that can be used to hold tensor of any dimension + * holder itself do not involve any arithmentic operations, + * can be converted to tensor of fixed dimension for further operations, with fixdim + * + * Like tensor, this data structure is like a pointer class and do not + * implicit allocated, de-allocate space. + * This data structure can be helpful to hold tensors of different dimensions + * and wait for further processing + * \tparam Device which device the tensor is on + * \tparam DType the type of elements in the tensor + */ +template +class TensorHolder { + public: + /*! \brief pointer to the data */ + DType *dptr_; + /*! \brief shape of the tensor */ + ShapeHolder shape_; + /*! + * \brief storing the stride information in x dimension + */ + index_t stride_; + /*! + * \brief stream where the computation lies + * stream is a device dependency concept where each computation + */ + Stream *stream_; + /*! \brief default constructor, default copy assign will work */ + TensorHolder(void) : dptr_(NULL), stream_(NULL) { + } + /*! + * \brief constructor from tensor + * \param src source tensor + * \tparam ndim tensor dimension + */ + template + TensorHolder(const Tensor &src) { + *this = src; + } + /*! + * \brief assignment from tensor + * \param src source tensor + * \tparam ndim tensor dimension + */ + template + inline TensorHolder + &operator=(const Tensor &src) { + dptr_ = src.dptr_; + shape_ = src.shape_; + stride_ = src.stride_; + stream_ = src.stream_; + return *this; + } + /*! + * \brief set the stream to do computation of current tensor + * \param stream the computation stream + */ + inline void set_stream(Stream *stream) { + this->stream_ = stream; + } + /*! + * \return whether the tensor's memory is continuous + */ + inline bool CheckContiguous(void) const { + return shape_[shape_.shape_.size() - 1] == stride_; + } + /*! + * \brief flatten the tensor to 2 dimension, collapse the higher dimensions together + * \return tensor after flatten + */ + inline Tensor FlatTo2D(void) const { + return Tensor(dptr_, shape_.FlatTo2D(), stride_, stream_); + } + /*! \brief return number of dimension of the tensor inside */ + inline int ndim(void) const { + return shape_.ndim(); + } + /*! + * \brief return size of i-th dimension, start counting from highest dimension + * \param idx the dimension count from the highest dimensin + * \return the size + */ + inline index_t size(index_t idx) const { + return shape_[idx]; + } + /*! + * \brief fetch the tensor, with respect to specific dimension + * if ndim do not match the stored dimension, an error will be issued + * \return the tensor requested + * \tparam ndim dimension of the tensor + */ + template + inline Tensor get(void) const { + return Tensor(dptr_, shape_.get(), + stride_, stream_); + } + /*! + * \brief allocate space for the tensor holder + * \param pad whether padding is requred + */ + inline void AllocSpace(bool pad = MSHADOW_ALLOC_PAD) { + if (this->ndim() == 0) return; + Tensor ts = this->FlatTo2D(); + mshadow::AllocSpace(&ts, pad); + dptr_ = ts.dptr_; + stride_ = ts.stride_; + } + /*! \brief free space holded by this tensor holder */ + inline void FreeSpace(void) { + if (this->ndim() == 0) return; + Tensor ts = this->FlatTo2D(); + mshadow::FreeSpace(&ts); + } +}; +/*! + * \brief fetch the tensor, with respect to specific dimension + * if ndim do not match the stored dimension, an error will be issued + * \param src the source tensor holder + * \return the tensor requested + * \tparam ndim dimension of the tensor + * \tparam Device the device where the tensor lies + * \tparam DType the data type of the tensor + */ +template +inline Tensor fixdim(const TensorHolder &src) { + const ShapeHolder &s = src.shape_; + return Tensor(src.dptr_, s.get(), + src.stride_, src.stream_); +} +} // namespace mshadow +#endif // MSHADOW_TENSOR_HOLDER_H_ From fb34669b47ab61b63240d75e462a95b1ff970d74 Mon Sep 17 00:00:00 2001 From: tqchen Date: Sat, 14 Mar 2015 21:37:03 -0700 Subject: [PATCH 3/3] fix uninitialization error --- mshadow-ps/ps_local-inl.h | 1 + 1 file changed, 1 insertion(+) diff --git a/mshadow-ps/ps_local-inl.h b/mshadow-ps/ps_local-inl.h index f61ac3896ea1..937ba211b5b9 100644 --- a/mshadow-ps/ps_local-inl.h +++ b/mshadow-ps/ps_local-inl.h @@ -39,6 +39,7 @@ class LocalModel : public ISharedModel { nthread_reduction = 8; use_pin_memory = 1; test_on_server = 0; + update_on_server = 0; destroy_signal = false; custom_server = NULL; }