Skip to content

Commit

Permalink
Merge changes from github.
Browse files Browse the repository at this point in the history
Change: 151046259
  • Loading branch information
martinwicke authored and tensorflower-gardener committed Mar 23, 2017
1 parent 8ca0714 commit bc456e3
Show file tree
Hide file tree
Showing 141 changed files with 4,407 additions and 602 deletions.
3 changes: 2 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
.DS_Store
.ipynb_checkpoints
node_modules
/.bazelrc
/bazel-*
/third_party/py/numpy/numpy_include
/tools/bazel.rc
Expand All @@ -13,4 +14,4 @@ node_modules
*.pyc
__pycache__
*.swp
.vscode/
.vscode/
9 changes: 9 additions & 0 deletions RELEASE.md
Original file line number Diff line number Diff line change
@@ -1,3 +1,10 @@
# Release 1.0.1

## Bug Fixes and Other Changes
* Change GraphConstructor to not increase the version when importing, but instead take the min of all versions.
* Google Cloud Storage fixes.
* Removed `tf.core` and `tf.python` modules from the API. These were never intended to be exposed. Please use the same objects through top-level `tf` module instead.

# Release 1.0.0

## Major Features and Improvements
Expand Down Expand Up @@ -88,6 +95,8 @@ To help you upgrade your existing TensorFlow Python code to match the API change
from the tensorflow::ops namespace to tensorflow.
* Change arg order for `{softmax,sparse_softmax,sigmoid}_cross_entropy_with_logits` to be (labels, predictions), and force use of named args.
* tf.nn.rnn_cell.* and most functions in tf.nn.rnn.* (with the exception of dynamic_rnn and raw_rnn) are temporarily in tf.contrib.rnn. They will be moved back into core for TF 1.1.
* `tf.nn.sampled_softmax_loss` and `tf.nn.nce_loss` have both changed their API such that you need to switch the `inputs, labels` to `labels, inputs` parameters.
* The shape keyword argument of the `SparseTensor` constructor changes its name to `dense_shape` between Tensorflow 0.12 and Tensorflow 1.0.

## Bug Fixes and Other Changes
* Numerous C++ API updates.
Expand Down
7 changes: 1 addition & 6 deletions WORKSPACE
Original file line number Diff line number Diff line change
Expand Up @@ -14,12 +14,7 @@ load("@io_bazel_rules_closure//closure:defs.bzl", "closure_repositories")

closure_repositories()

load("//tensorflow:workspace.bzl", "check_version", "tf_workspace")

# We must check the bazel version before trying to parse any other BUILD files,
# in case the parsing of those build files depends on the bazel version we
# require here.
check_version("0.4.2")
load("//tensorflow:workspace.bzl", "tf_workspace")

# Uncomment and update the paths in these entries to build the Android demo.
#android_sdk_repository(
Expand Down
65 changes: 23 additions & 42 deletions configure
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,9 @@ pushd `dirname $0` > /dev/null
SOURCE_BASE_DIR=`pwd -P`
popd > /dev/null

# This file contains customized config settings.
touch .bazelrc

PLATFORM="$(uname -s | tr 'A-Z' 'a-z')"

function is_linux() {
Expand Down Expand Up @@ -36,15 +39,11 @@ function is_windows() {
}

function bazel_clean_and_fetch() {
# bazel clean --expunge currently doesn't work on Windows
# TODO(pcloudy): Re-enable it after bazel clean --expunge is fixed.
if ! is_windows; then
bazel clean --expunge
fi
if [ -z "$TF_BAZEL_TARGETS" ]; then
TF_BAZEL_TARGETS="//tensorflow/... -//tensorflow/contrib/nccl/... -//tensorflow/examples/android/..."
bazel fetch "//tensorflow/... -//tensorflow/contrib/nccl/... -//tensorflow/examples/android/..."
else
bazel fetch $TF_BAZEL_TARGETS
fi
bazel fetch "$TF_BAZEL_TARGETS"
}

function sed_hyphen_i() {
Expand Down Expand Up @@ -102,8 +101,8 @@ if false; then # Disable building with MKL for now

if [ "$TF_NEED_MKL" == "1" ]; then # TF_NEED_MKL
DST=`dirname $0`
ARCHIVE_BASENAME=mklml_lnx_2017.0.2.20170110.tgz
GITHUB_RELEASE_TAG=v0.3
ARCHIVE_BASENAME=mklml_lnx_2017.0.2.20170209.tgz
GITHUB_RELEASE_TAG=v0.5
MKLURL="https://github.com/01org/mkl-dnn/releases/download/$GITHUB_RELEASE_TAG/$ARCHIVE_BASENAME"
if ! [ -e "$DST/third_party/mkl/$ARCHIVE_BASENAME" ]; then
wget --no-check-certificate -P $DST/third_party/mkl/ $MKLURL
Expand Down Expand Up @@ -182,13 +181,12 @@ else
TF_NEED_JEMALLOC=0
fi

if [ "$TF_NEED_JEMALLOC" == "1" ]; then
sed_hyphen_i -e "s/WITH_JEMALLOC = False/WITH_JEMALLOC = True/" tensorflow/core/platform/default/build_config.bzl
else
sed_hyphen_i -e "s/WITH_JEMALLOC = True/WITH_JEMALLOC = False/" tensorflow/core/platform/default/build_config.bzl
sed_hyphen_i -e "/with_jemalloc/d" .bazelrc
if [[ "$TF_NEED_JEMALLOC" == "1" ]]; then
echo 'build --define with_jemalloc=true' >>.bazelrc
fi

while [ "$TF_NEED_GCP" == "" ]; do
while [[ "$TF_NEED_GCP" == "" ]]; do
read -p "Do you wish to build TensorFlow with "\
"Google Cloud Platform support? [y/N] " INPUT
case $INPUT in
Expand All @@ -202,23 +200,12 @@ while [ "$TF_NEED_GCP" == "" ]; do
esac
done

if [ "$TF_NEED_GCP" == "1" ]; then
## Verify that libcurl header files are available.
# Only check Linux, since on MacOS the header files are installed with XCode.
if is_linux && [[ ! -f "/usr/include/curl/curl.h" ]]; then
echo "ERROR: It appears that the development version of libcurl is not "\
"available. Please install the libcurl3-dev package."
exit 1
fi

# Update Bazel build configuration.
sed_hyphen_i -e "s/WITH_GCP_SUPPORT = False/WITH_GCP_SUPPORT = True/" tensorflow/core/platform/default/build_config.bzl
else
# Update Bazel build configuration.
sed_hyphen_i -e "s/WITH_GCP_SUPPORT = True/WITH_GCP_SUPPORT = False/" tensorflow/core/platform/default/build_config.bzl
sed_hyphen_i -e "/with_gcp_support/d" .bazelrc
if [[ "$TF_NEED_GCP" == "1" ]]; then
echo 'build --define with_gcp_support=true' >>.bazelrc
fi

while [ "$TF_NEED_HDFS" == "" ]; do
while [[ "$TF_NEED_HDFS" == "" ]]; do
read -p "Do you wish to build TensorFlow with "\
"Hadoop File System support? [y/N] " INPUT
case $INPUT in
Expand All @@ -232,16 +219,13 @@ while [ "$TF_NEED_HDFS" == "" ]; do
esac
done

if [ "$TF_NEED_HDFS" == "1" ]; then
# Update Bazel build configuration.
sed_hyphen_i -e "s/WITH_HDFS_SUPPORT = False/WITH_HDFS_SUPPORT = True/" tensorflow/core/platform/default/build_config.bzl
else
# Update Bazel build configuration.
sed_hyphen_i -e "s/WITH_HDFS_SUPPORT = True/WITH_HDFS_SUPPORT = False/" tensorflow/core/platform/default/build_config.bzl
sed_hyphen_i -e "/with_hdfs_support/d" .bazelrc
if [[ "$TF_NEED_HDFS" == "1" ]]; then
echo 'build --define with_hdfs_support=true' >>.bazelrc
fi

## Enable XLA.
while [ "$TF_ENABLE_XLA" == "" ]; do
while [[ "$TF_ENABLE_XLA" == "" ]]; do
read -p "Do you wish to build TensorFlow with the XLA just-in-time compiler (experimental)? [y/N] " INPUT
case $INPUT in
[Yy]* ) echo "XLA JIT support will be enabled for TensorFlow"; TF_ENABLE_XLA=1;;
Expand All @@ -251,12 +235,9 @@ while [ "$TF_ENABLE_XLA" == "" ]; do
esac
done

if [ "$TF_ENABLE_XLA" == "1" ]; then
# Update Bazel build configuration.
sed_hyphen_i -e "s/^WITH_XLA_SUPPORT = [FT].*/WITH_XLA_SUPPORT = True/" tensorflow/core/platform/default/build_config_root.bzl
else
# Update Bazel build configuration.
sed_hyphen_i -e "s/^WITH_XLA_SUPPORT = [FT].*/WITH_XLA_SUPPORT = False/" tensorflow/core/platform/default/build_config_root.bzl
sed_hyphen_i -e "/with_xla_support/d" .bazelrc
if [[ "$TF_ENABLE_XLA" == "1" ]]; then
echo 'build --define with_xla_support=true' >>.bazelrc
fi


Expand Down
30 changes: 30 additions & 0 deletions tensorflow/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -110,6 +110,34 @@ config_setting(
visibility = ["//visibility:public"],
)

# TODO(jhseu): Enable on other platforms other than Linux.
config_setting(
name = "with_jemalloc",
values = {
"cpu": "k8",
"define": "with_jemalloc=true",
},
visibility = ["//visibility:public"],
)

config_setting(
name = "with_gcp_support",
values = {"define": "with_gcp_support=true"},
visibility = ["//visibility:public"],
)

config_setting(
name = "with_hdfs_support",
values = {"define": "with_hdfs_support=true"},
visibility = ["//visibility:public"],
)

config_setting(
name = "with_xla_support",
values = {"define": "with_xla_support=true"},
visibility = ["//visibility:public"],
)

package_group(
name = "internal",
packages = ["//tensorflow/..."],
Expand Down Expand Up @@ -321,6 +349,8 @@ cc_binary(
deps = [
"//tensorflow/c:c_api",
"//tensorflow/cc:cc_ops",
"//tensorflow/cc:client_session",
"//tensorflow/cc:scope",
"//tensorflow/core:tensorflow",
],
)
Expand Down
3 changes: 2 additions & 1 deletion tensorflow/compiler/xla/service/allocation_tracker.cc
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,8 @@ tensorflow::Status AllocationTracker::DeallocateShape(
TF_RET_CHECK(ShapeUtil::TupleElementCount(shape) == elements.size())
<< "tuple has unexpected number of elements: " << elements.size()
<< " != " << ShapeUtil::TupleElementCount(shape);
for (int i = 0; i < elements.size(); ++i) {
for (std::vector<se::DeviceMemoryBase>::size_type i = 0;
i < elements.size(); ++i) {
VLOG(2) << "recursing onto the tuple elements";
TF_RETURN_IF_ERROR(DeallocateShape(backend, device_ordinal, &elements[i],
shape.tuple_shapes(i),
Expand Down
4 changes: 2 additions & 2 deletions tensorflow/compiler/xla/service/generic_transfer_manager.cc
Original file line number Diff line number Diff line change
Expand Up @@ -118,10 +118,10 @@ GenericTransferManager::ShallowCopyTupleFromDevice(

// Create a DeviceMemoryBase from each void* pointer.
std::vector<se::DeviceMemoryBase> destination;
for (int i = 0; i < element_pointers.size(); ++i) {
for (std::vector<void*>::size_type i = 0; i < element_pointers.size(); ++i) {
if (element_pointers[i] == nullptr &&
!ShapeUtil::HasZeroElements(shape.tuple_shapes(i))) {
return FailedPrecondition("tuple contains nullptr at element %d", i);
return FailedPrecondition("tuple contains nullptr at element %lu", i);
}
int64 buffer_size = ShapeUtil::ByteSizeOf(shape.tuple_shapes(i),
/*pointer_size=*/sizeof(void*));
Expand Down
16 changes: 10 additions & 6 deletions tensorflow/compiler/xla/service/service.cc
Original file line number Diff line number Diff line change
Expand Up @@ -256,7 +256,8 @@ StatusOr<std::vector<const Allocation*>> Service::ResolveAndValidateArguments(
tensorflow::gtl::ArraySlice<const GlobalDataHandle*> arguments,
const Backend* backend, int device_ordinal) {
std::vector<const Allocation*> allocations;
for (int i = 0; i < arguments.size(); ++i) {
for (tensorflow::gtl::ArraySlice<const GlobalDataHandle*>::size_type i = 0;
i < arguments.size(); ++i) {
auto allocation_status = allocation_tracker_.Resolve(*arguments[i]);
if (!allocation_status.ok()) {
return Status(allocation_status.status().code(),
Expand All @@ -269,7 +270,7 @@ StatusOr<std::vector<const Allocation*>> Service::ResolveAndValidateArguments(
if (allocation->backend() != backend ||
allocation->device_ordinal() != device_ordinal) {
return InvalidArgument(
"argument %d is on device %s but computation will be executed "
"argument %lu is on device %s but computation will be executed "
"on device %s",
i,
allocation->backend()
Expand All @@ -295,13 +296,14 @@ StatusOr<std::unique_ptr<HloModuleConfig>> Service::CreateModuleConfig(
program_shape.parameters_size(), arguments.size());
}

for (int i = 0; i < arguments.size(); ++i) {
for (tensorflow::gtl::ArraySlice<const Allocation*>::size_type i = 0;
i < arguments.size(); ++i) {
// Verify that shape of arguments matches the shape of the arguments in the
// ProgramShape.
if (!ShapeUtil::Compatible(arguments[i]->shape(),
program_shape.parameters(i))) {
return InvalidArgument(
"computation expects parameter %d to have shape %s, given shape %s",
"computation expects parameter %lu to have shape %s, given shape %s",
i, ShapeUtil::HumanString(program_shape.parameters(i)).c_str(),
ShapeUtil::HumanString(arguments[i]->shape()).c_str());
}
Expand Down Expand Up @@ -383,7 +385,8 @@ StatusOr<std::vector<std::unique_ptr<Executable>>> Service::BuildExecutables(
hlo_dumper, std::move(executors)));

if (!other_directory_path.empty()) {
for (int64 i = 0; i < versioned_handles.size(); ++i) {
for (std::vector<VersionedComputationHandle>::size_type i = 0;
i < versioned_handles.size(); ++i) {
executables[i]->set_session_module(std::move(session_modules[i]));
}
}
Expand Down Expand Up @@ -523,7 +526,8 @@ Service::ExecuteParallelAndRegisterResult(

// Asynchronously launch all executables.
std::vector<GlobalDataHandle> result_handles;
for (int64 i = 0; i < executables.size(); i++) {
for (tensorflow::gtl::ArraySlice<Executable*>::size_type i = 0;
i < executables.size(); i++) {
TF_ASSIGN_OR_RETURN(
perftools::gputools::DeviceMemoryBase result,
executables[i]->ExecuteAsyncOnStream(&run_options[i], arguments[i]));
Expand Down
6 changes: 5 additions & 1 deletion tensorflow/contrib/android/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -72,13 +72,17 @@ LINKER_SCRIPT = "//tensorflow/contrib/android:jni/version_script.lds"
cc_binary(
name = "libtensorflow_inference.so",
srcs = [],
copts = tf_copts(),
copts = tf_copts() + [
"-ffunction-sections",
"-fdata-sections",
],
linkopts = if_android([
"-landroid",
"-llog",
"-lm",
"-z defs",
"-s",
"-Wl,--gc-sections",
"-Wl,--version-script", # This line must be directly followed by LINKER_SCRIPT.
LINKER_SCRIPT,
]),
Expand Down
4 changes: 3 additions & 1 deletion tensorflow/contrib/cmake/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -56,9 +56,10 @@ mark_as_advanced(DOWNLOAD_LOCATION)
set(CMAKE_POSITION_INDEPENDENT_CODE ON)
add_definitions(-DEIGEN_AVOID_STL_ARRAY)
if(WIN32)
add_definitions(-DNOMINMAX -D_WIN32_WINNT=0x0A00 -DLANG_CXX11 -DCOMPILER_MSVC -D__VERSION__=\"MSVC\")
add_definitions(-DNOMINMAX -D_WIN32_WINNT=0x0A00 -DLANG_CXX11 -DCOMPILER_MSVC)
add_definitions(-DWIN32 -DOS_WIN -D_MBCS -DWIN64 -DWIN32_LEAN_AND_MEAN -DNOGDI -DPLATFORM_WINDOWS)
add_definitions(-DTENSORFLOW_USE_EIGEN_THREADPOOL -DEIGEN_HAS_C99_MATH -D_ITERATOR_DEBUG_LEVEL=0)
add_definitions(-DTF_COMPILE_LIBRARY)
add_definitions(-DNDEBUG /O2) # Equivalent of -c opt in Bazel.
add_definitions(/bigobj /nologo /EHsc /GF /FC /MP /Gm-)
# Suppress warnings to reduce build log size.
Expand Down Expand Up @@ -190,6 +191,7 @@ if (tensorflow_ENABLE_GPU)
${CUDA_TOOLKIT_TARGET_DIR}/include/cuda.h ${CUDA_TOOLKIT_TARGET_DIR}/include/cuComplex.h
${CUDA_TOOLKIT_TARGET_DIR}/include/cublas_v2.h ${CUDNN_HOME}/include/cudnn.h
${CUDA_TOOLKIT_TARGET_DIR}/include/cufft.h ${CUDA_TOOLKIT_TARGET_DIR}/include/curand.h
${CUDA_TOOLKIT_TARGET_DIR}/include/cuda_runtime_api.h
DESTINATION ${tensorflow_source_dir}/third_party/gpus/cuda/include
)
include_directories(${tensorflow_source_dir}/third_party/gpus)
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/contrib/cmake/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ Linux.
Current Status
--------------

CMake can be used to build TensorFlow on Windows. See the [getting started documentation](https://www.tensorflow.org/get_started/os_setup.html#pip-installation-on-windows)
CMake can be used to build TensorFlow on Windows. See the [getting started documentation](https://www.tensorflow.org/install/install_windows)
for instructions on how to install a pre-built TensorFlow package on Windows.

### Current known limitations
Expand Down
40 changes: 40 additions & 0 deletions tensorflow/contrib/cmake/tf_cc_ops.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -120,3 +120,43 @@ list(REMOVE_ITEM tf_cc_srcs ${tf_cc_test_srcs})

add_library(tf_cc OBJECT ${tf_cc_srcs})
add_dependencies(tf_cc tf_cc_framework tf_cc_ops)

set (pywrap_tensorflow_lib "${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_BUILD_TYPE}/pywrap_tensorflow_internal.lib")
add_custom_target(tf_extension_ops)

function(AddUserOps)
cmake_parse_arguments(_AT "" "" "TARGET;SOURCES;GPUSOURCES;DEPENDS;DISTCOPY" ${ARGN})
if (tensorflow_ENABLE_GPU AND _AT_GPUSOURCES)
# if gpu build is enabled and we have gpu specific code,
# hint to cmake that this needs to go to nvcc
set (gpu_source ${_AT_GPUSOURCES})
set (gpu_lib "${_AT_TARGET}_gpu")
set_source_files_properties(${gpu_source} PROPERTIES CUDA_SOURCE_PROPERTY_FORMAT OBJ)
cuda_compile(gpu_lib ${gpu_source})
endif()
# create shared library from source and cuda obj
add_library(${_AT_TARGET} SHARED ${_AT_SOURCES} ${gpu_lib})
target_link_libraries(${_AT_TARGET} ${pywrap_tensorflow_lib})
if(WIN32)
if (tensorflow_ENABLE_GPU AND _AT_GPUSOURCES)
# some ops call out to cuda directly; need to link libs for the cuda dlls
target_link_libraries(${_AT_TARGET} ${CUDA_LIBRARIES})
endif()
if (_AT_DISTCOPY)
add_custom_command(TARGET ${_AT_TARGET} POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy $<TARGET_FILE:${_AT_TARGET}> ${_AT_DISTCOPY}/)
endif()
endif()
if (_AT_DEPENDS)
add_dependencies(${_AT_TARGET} ${_AT_DEPENDS})
endif()
# make sure TF_COMPILE_LIBRARY is not defined for this target
get_target_property(target_compile_flags ${_AT_TARGET} COMPILE_FLAGS)
if(target_compile_flags STREQUAL "target_compile_flags-NOTFOUND")
set(target_compile_flags "/UTF_COMPILE_LIBRARY")
else()
set(target_compile_flags "${target_compile_flags} /UTF_COMPILE_LIBRARY")
endif()
set_target_properties(${_AT_TARGET} PROPERTIES COMPILE_FLAGS ${target_compile_flags})
add_dependencies(tf_extension_ops ${_AT_TARGET})
endfunction(AddUserOps)
Loading

0 comments on commit bc456e3

Please sign in to comment.