Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Assume row-major order for indices #10

Merged
merged 8 commits into from
May 20, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions .github/CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,8 @@

### Breaking Changes

* Indices are now specified in row-major order. [(#10)](https://github.com/XanaduAI/jet/pull/10)

### Bug Fixes

* The output of `TensorNetwork::Contract()` and `TaskBasedCpuContractor::Contract()` now agree with one another. [(#6)](https://github.com/XanaduAI/jet/pull/6)
Expand Down
12 changes: 6 additions & 6 deletions include/jet/Tensor.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -234,7 +234,7 @@ Tensor<T> Reshape(const Tensor<T> &old_tensor,
using namespace Utilities;

JET_ABORT_IF_NOT(old_tensor.GetSize() ==
TensorHelpers::ShapeToSize(new_shape),
Jet::Utilities::ShapeToSize(new_shape),
"Size is inconsistent between tensors.");
Tensor<T> new_tensor(new_shape);
Utilities::FastCopy(old_tensor.GetData(), new_tensor.GetData());
Expand Down Expand Up @@ -482,7 +482,7 @@ template <class T> class Tensor {
* @param shape Dimension of each `%Tensor` index.
*/
Tensor(const std::vector<size_t> &shape)
: data_(TensorHelpers::ShapeToSize(shape))
: data_(Jet::Utilities::ShapeToSize(shape))
{
using namespace Utilities;
std::vector<std::string> indices(shape.size());
Expand All @@ -504,7 +504,7 @@ template <class T> class Tensor {
*/
Tensor(const std::vector<std::string> &indices,
const std::vector<size_t> &shape)
: data_(TensorHelpers::ShapeToSize(shape))
: data_(Jet::Utilities::ShapeToSize(shape))
{
InitIndicesAndShape(indices, shape);
}
Expand Down Expand Up @@ -690,7 +690,7 @@ template <class T> class Tensor {
/**
* @brief Sets the `%Tensor` data value at the given n-dimensional index.
*
* @param indices n-dimensional `%Tensor` data index.
* @param indices n-dimensional `%Tensor` data index in row-major order.
* @param value Data value to set at given index.
*/
void SetValue(const std::vector<size_t> &indices, const T &value)
Expand All @@ -701,7 +701,7 @@ template <class T> class Tensor {
/**
* @brief Returns the `%Tensor` data value at the given n-dimensional index.
*
* @param indices n-dimensional `%Tensor` data index.
* @param indices n-dimensional `%Tensor` data index in row-major order.
*
* @returns Complex data value.
*/
Expand Down Expand Up @@ -737,7 +737,7 @@ template <class T> class Tensor {
*
* @return Number of data elements.
*/
size_t GetSize() const { return TensorHelpers::ShapeToSize(shape_); }
size_t GetSize() const { return Jet::Utilities::ShapeToSize(shape_); }

/**
* @brief Returns a single scalar value from the `%Tensor` object.
Expand Down
13 changes: 0 additions & 13 deletions include/jet/TensorHelpers.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -167,18 +167,5 @@ inline void MultiplyTensorData(const std::vector<ComplexPrecision> &A,
}
}

/**
* @brief Calulate the size of data from the tensor size.
*
* @param tensor_shape Size of each tensor index label.
*/
inline size_t ShapeToSize(const std::vector<size_t> &tensor_shape)
{
size_t total_dim = 1;
for (const auto &dim : tensor_shape)
total_dim *= dim;
return total_dim;
}

}; // namespace TensorHelpers
}; // namespace Jet
85 changes: 48 additions & 37 deletions include/jet/Utilities.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -402,61 +402,72 @@ inline size_t Factorial(size_t n)
}

/**
* Converts a linear index to a multi-dimensional index (modulo the highest
* value representable by the index dimensions), written in increasing order of
* index weight.
* @brief Returns the size of a shape.
*
* Example: To compute the multi-index (i, j) of an element in a 2 x 2 matrix
* given a linear index of 3 in the array storing the matrix,
* `multi_index_sizes` would be {2, 2} and the result `{1, 0}`.
* @param shape Index dimensions.
* @return Product of the index dimensions in the shape.
*/
inline size_t ShapeToSize(const std::vector<size_t> &shape) noexcept
{
size_t size = 1;
for (const auto &dim : shape) {
size *= dim;
}
return size;
}

/**
* @brief Converts a linear index into a multi-dimensional index.
*
* @warning If the given list of index dimension sizes is empty, the provided
* linear index is returned (wrapped in a vector of size 1).
* The multi-dimensional index is written in row-major order.
*
* @param linear_index Linear index to be unraveled.
* @param multi_index_sizes Maximum size of each index dimension.
* Example: To compute the multi-index (i, j) of an element in a 2x2 matrix
* given a linear index of 2, `shape` would be {2, 2} and the result
* would be `{1, 0}`.
* \code{.cpp}
* std::vector<size_t> multi_index = UnravelIndex(2, {2, 2}); // {1, 0}
* \endcode
*
* @param index Linear index to be unraveled.
* @param shape Size of each index dimension.
* @return Multi-index associated with the linear index.
*/
inline std::vector<size_t>
UnravelIndex(unsigned long long linear_index,
const std::vector<size_t> &multi_index_sizes)
inline std::vector<size_t> UnravelIndex(unsigned long long index,
const std::vector<size_t> &shape)
{
if (multi_index_sizes.empty()) {
return {linear_index};
}

std::vector<size_t> multi_index(multi_index_sizes.size());
multi_index[0] = linear_index % multi_index_sizes[0];
const size_t size = ShapeToSize(shape);
JET_ABORT_IF(size <= index, "Linear index does not fit in the shape.");

for (size_t i = 1; i < multi_index.size(); i++) {
linear_index -= multi_index[i - 1];
linear_index /= multi_index_sizes[i - 1];
multi_index[i] = linear_index % multi_index_sizes[i];
std::vector<size_t> multi_index(shape.size());
for (int i = multi_index.size() - 1; i >= 0; i--) {
multi_index[i] = index % shape[i];
index /= shape[i];
}
return multi_index;
}

/**
* Converts a multi-dimensional index into a linear index.
* @brief Converts a multi-dimensional index into a linear index.
*
* @see UnravelIndex
* @note This function is the inverse of UnravelIndex().
*
* @param multi_index Multi-index to be raveled.
* @param multi_index_sizes Maximum size of each index dimension.
* @return Smallest linear index associated with the multi-index.
* @param index Multi-index to be raveled, expressed in row-major order.
* @param shape Size of each index dimension.
* @return Linear index associated with the multi-index.
*/
inline unsigned long long
RavelIndex(const std::vector<size_t> &multi_index,
const std::vector<size_t> &multi_index_sizes)
inline unsigned long long RavelIndex(const std::vector<size_t> &index,
const std::vector<size_t> &shape)
{
JET_ABORT_IF_NOT(multi_index.size() == multi_index_sizes.size(),
"Size of multi-index and index dimensions must match.");
JET_ABORT_IF_NOT(index.size() == shape.size(),
"Number of index and shape dimensions must match.");

unsigned long long linear_index = 0;
size_t multiplier = 1;
for (size_t i = 0; i < multi_index_sizes.size(); i++) {
linear_index += (multi_index[i] % multi_index_sizes[i]) * multiplier;
multiplier *= multi_index_sizes[i];

unsigned long long linear_index = 0;
for (int i = index.size() - 1; i >= 0; i--) {
JET_ABORT_IF(index[i] >= shape[i], "Index does not fit in the shape.");
linear_index += index[i] * multiplier;
multiplier *= shape[i];
}
return linear_index;
}
Expand Down
4 changes: 2 additions & 2 deletions python/src/Tensor.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -188,7 +188,7 @@ template <class T> void AddBindingsForTensor(py::module_ &m, const char *name)
Returns the tensor data value at the given n-dimensional index.

Args:
indices: n-dimensional tensor data index.
indices: n-dimensional tensor data index in row-major order.

Returns:
Complex data value at the given index.
Expand All @@ -215,7 +215,7 @@ template <class T> void AddBindingsForTensor(py::module_ &m, const char *name)
Sets the tensor data value at the given n-dimensional index.

Args:
indices: n-dimensional tensor data index.
indices: n-dimensional tensor data index in row-major order.
value: value to set at the data index.
)");

Expand Down
4 changes: 2 additions & 2 deletions python/tests/test_tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -85,8 +85,8 @@ def test_get_value(self, Tensor):
"""Tests that the value of a tensor at a multi-dimensional index can be retrieved."""
tensor = Tensor(shape=[2, 2], indices=["i", "j"], data=range(4))
assert tensor.get_value(indices=[0, 0]) == 0
assert tensor.get_value(indices=[1, 0]) == 1
assert tensor.get_value(indices=[0, 1]) == 2
assert tensor.get_value(indices=[0, 1]) == 1
assert tensor.get_value(indices=[1, 0]) == 2
assert tensor.get_value(indices=[1, 1]) == 3

def test_set_value(self, Tensor):
Expand Down
32 changes: 11 additions & 21 deletions test/Test_Tensor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
#include <catch2/catch.hpp>

#include "jet/Tensor.hpp"
#include "jet/Utilities.hpp"

using c_fp64 = std::complex<double>;
using c_fp32 = std::complex<float>;
Expand Down Expand Up @@ -274,19 +275,19 @@ TEST_CASE("Tensor::FillRandom", "[Tensor]")

TEST_CASE("Tensor::GetValue", "[Tensor]")
{
std::vector<std::size_t> t_shape{3, 2};
std::vector<std::size_t> t_shape{2, 3};
std::vector<std::string> t_indices{"a", "b"};
std::vector<c_fp32> data{{0, 0.5}, {1, 0.5}, {2, 0.5},
{3, 0.5}, {4, 0.5}, {5, 0.5}};

Tensor tensor(t_indices, t_shape, data);

CHECK(tensor.GetValue({0, 0}) == data[0]);
CHECK(tensor.GetValue({1, 0}) == data[1]);
CHECK(tensor.GetValue({2, 0}) == data[2]);
CHECK(tensor.GetValue({0, 1}) == data[3]);
CHECK(tensor.GetValue({0, 1}) == data[1]);
CHECK(tensor.GetValue({0, 2}) == data[2]);
CHECK(tensor.GetValue({1, 0}) == data[3]);
CHECK(tensor.GetValue({1, 1}) == data[4]);
CHECK(tensor.GetValue({2, 1}) == data[5]);
CHECK(tensor.GetValue({1, 2}) == data[5]);
}

TEST_CASE("Tensor::RenameIndex", "[Tensor]")
Expand Down Expand Up @@ -316,17 +317,6 @@ TEST_CASE("Tensor::SetValue", "[Tensor]")
CHECK(tensor.GetData() == data_expected);
}

TEST_CASE("Inline helper ShapeToSize", "[Tensor]")
{
std::vector<std::size_t> t_shape_1{2, 3, 4};
std::vector<std::size_t> t_shape_2{3, 4, 2};
std::vector<std::size_t> t_shape_3{2, 2};

CHECK(TensorHelpers::ShapeToSize(t_shape_1) == 24);
CHECK(TensorHelpers::ShapeToSize(t_shape_2) == 24);
CHECK(TensorHelpers::ShapeToSize(t_shape_3) == 4);
}

TEST_CASE("Inline helper MultiplyTensorData", "[Tensor]")
{
SECTION("Matrix-vector product")
Expand Down Expand Up @@ -399,17 +389,17 @@ TEMPLATE_TEST_CASE("ContractTensors", "[Tensor]", c_fp32, c_fp64)
{"j"}, {2},
{
r_ji.GetValue({0, 0}) * s_i.GetValue({0}) +
r_ji.GetValue({0, 1}) * s_i.GetValue({1}),
r_ji.GetValue({1, 0}) * s_i.GetValue({0}) +
r_ji.GetValue({1, 0}) * s_i.GetValue({1}),
r_ji.GetValue({0, 1}) * s_i.GetValue({0}) +
r_ji.GetValue({1, 1}) * s_i.GetValue({1}),
});
// R_{j,i} S_i == S_i R_{i,j}
Tensor<TestType> expected_rji_si(
{"j"}, {2},
{
r_ji.GetValue({0, 0}) * s_i.GetValue({0}) +
r_ji.GetValue({1, 0}) * s_i.GetValue({1}),
r_ji.GetValue({0, 1}) * s_i.GetValue({0}) +
r_ji.GetValue({0, 1}) * s_i.GetValue({1}),
r_ji.GetValue({1, 0}) * s_i.GetValue({0}) +
r_ji.GetValue({1, 1}) * s_i.GetValue({1}),
});

Expand Down Expand Up @@ -671,6 +661,6 @@ TEST_CASE("Reshape", "[Tensor]")
Tensor tensor_r({"?a", "?b"}, {3, 2}, t_data);
CHECK_THROWS_WITH(Reshape(tensor, {3, 3}),
Contains("Size is inconsistent between tensors."));
CHECK(tensor_r.GetSize() != TensorHelpers::ShapeToSize({3, 3}));
CHECK(tensor_r.GetSize() != Jet::Utilities::ShapeToSize({3, 3}));
}
}
4 changes: 2 additions & 2 deletions test/Test_TensorNetwork.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -276,7 +276,7 @@ TEST_CASE("TensorNetwork::SliceIndices", "[TensorNetwork]")

SECTION("Slice [1, :, 2]")
{
tn.SliceIndices({"A0", "C2"}, 1 + 2 * 2);
tn.SliceIndices({"A0", "C2"}, 1 * 4 + 2);
const auto &node = tn.GetNodes().front();

const std::string have_name = node.name;
Expand All @@ -302,7 +302,7 @@ TEST_CASE("TensorNetwork::SliceIndices", "[TensorNetwork]")

SECTION("Slice [1, 2, 3]")
{
tn.SliceIndices({"A0", "B1", "C2"}, 1 + 2 * 2 + 3 * 2 * 3);
tn.SliceIndices({"A0", "B1", "C2"}, 1 * 3 * 4 + 2 * 4 + 3);
const auto &node = tn.GetNodes().front();

const std::string have_name = node.name;
Expand Down
Loading