Skip to content

Commit

Permalink
TTENSOR: Improve test coverage and corresponding bug fixes discovered.
Browse files Browse the repository at this point in the history
  • Loading branch information
nickj committed Feb 4, 2023
1 parent 9f48eb2 commit 3ac89ea
Show file tree
Hide file tree
Showing 2 changed files with 186 additions and 23 deletions.
33 changes: 16 additions & 17 deletions pyttb/ttensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,8 @@
import textwrap
import warnings

ALT_CORE_ERROR = "TTensor doesn't support non-tensor cores yet"

class ttensor(object):
"""
TTENSOR Class for Tucker tensors (decomposed).
Expand Down Expand Up @@ -55,7 +57,7 @@ def from_data(cls, core, factors):
>>> import numpy as np
Set up input data
# Create sptensor with explicit data description
# Create ttensor with explicit data description
>>> core_values = np.ones((2,2,2))
>>> core = ttb.tensor.from_data(core_values)
Expand Down Expand Up @@ -98,6 +100,8 @@ def _validate_ttensor(self):
"""
# Confirm all factors are matrices
for factor_idx, factor in enumerate(self.u):
if not isinstance(factor, np.ndarray):
raise ValueError(f"Factor matrices must be numpy arrays but factor {factor_idx} was {type(factor)}")
if len(factor.shape) != 2:
raise ValueError(f"Factor matrix {factor_idx} has shape {factor.shape} and is not a matrix!")

Expand Down Expand Up @@ -156,7 +160,7 @@ def full(self):

# There is a small chance tensor could be sparse so ensure we cast that to dense.
if not isinstance(recomposed_tensor, tensor):
recomposed_tensor = tensor(recomposed_tensor)
raise ValueError(ALT_CORE_ERROR)
return recomposed_tensor

def double(self):
Expand Down Expand Up @@ -331,7 +335,7 @@ def ttv(self, vector, dims=None):
# Check that each multiplicand is the right size.
for i in range(dims.size):
if vector[vidx[i]].shape != (self.shape[dims[i]],):
assert False, "Multiplicand is wrong size"
raise ValueError("Multiplicand is wrong size")

# Get remaining dimensions when we're done
remdims = np.setdiff1d(np.arange(0, self.ndims), dims)
Expand Down Expand Up @@ -364,11 +368,7 @@ def mttkrp(self, U, n):
-------
:class:`numpy.ndarray`
"""

if n == 0:
R = U[1].shape[-1]
else:
R = U[0].shape[-1]
# NOTE: MATLAB version calculates an unused R here

W = [None] * self.ndims
for i in range(0, self.ndims):
Expand Down Expand Up @@ -444,10 +444,6 @@ def ttm(self, matrix, dims=None, transpose=False):

# Check that each multiplicand is the right size.
for i in range(len(dims)):
import logging
logging.warning(
f"Matrix shape: \n\t{matrix[vidx[i]].shape}"
)
if matrix[vidx[i]].shape[size_idx] != self.shape[dims[i]]:
raise ValueError(f"Multiplicand {i} is wrong size")

Expand Down Expand Up @@ -482,7 +478,7 @@ def reconstruct(self, samples=None, modes=None):
if modes is None:
modes = np.arange(self.ndims)
elif isinstance(modes, list):
modes = np.array([modes])
modes = np.array(modes)
elif np.isscalar(modes):
modes = np.array([modes])

Expand All @@ -500,7 +496,10 @@ def reconstruct(self, samples=None, modes=None):

full_samples = [np.array([])] * self.ndims
for sample, mode in zip(samples, modes):
full_samples[mode] = sample
if np.isscalar(sample):
full_samples[mode] = np.array([sample])
else:
full_samples[mode] = sample

shape = self.shape
new_u = []
Expand All @@ -510,7 +509,7 @@ def reconstruct(self, samples=None, modes=None):
new_u.append(self.u[k])
continue
elif len(full_samples[k].shape) == 2 and full_samples[k].shape[-1] == shape[k]:
new_u.append(full_samples[k] * self.u[k])
new_u.append(full_samples[k].dot(self.u[k]))
else:
new_u.append(self.u[k][full_samples[k], :])

Expand Down Expand Up @@ -540,14 +539,14 @@ def nvecs(self, n, r, flipsign = True):
H = self.core.ttm(V)

if isinstance(H, sptensor):
raise NotImplementedError("TTensor doesn't support sparse core yet")
raise NotImplementedError(ALT_CORE_ERROR)
else:
HnT = tenmat.from_tensor_type(H.full(), cdims=np.array([n])).double()

G = self.core

if isinstance(G, sptensor):
raise NotImplementedError("TTensor doesn't support sparse core yet")
raise NotImplementedError(ALT_CORE_ERROR)
else:
GnT = tenmat.from_tensor_type(G.full(), cdims=np.array([n])).double()

Expand Down
176 changes: 170 additions & 6 deletions tests/test_ttensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
import numpy as np
import pyttb as ttb
import pytest
import scipy.sparse as sparse

@pytest.fixture()
def sample_ttensor():
Expand Down Expand Up @@ -41,6 +42,30 @@ def test_ttensor_initialization_from_data(sample_ttensor):
assert isinstance(ttensorInstance.core, ttb.tensor)
assert all([isinstance(a_factor, np.ndarray) for a_factor in ttensorInstance.u])

# Negative Tests
non_array_factor = ttensorInstance.u + [1]
with pytest.raises(ValueError):
ttb.ttensor.from_data(ttensorInstance.core, non_array_factor[1:])

non_matrix_factor = ttensorInstance.u + [np.array([1])]
with pytest.raises(ValueError):
ttb.ttensor.from_data(ttensorInstance.core, non_matrix_factor[1:])

too_few_factors = ttensorInstance.u.copy()
too_few_factors.pop()
with pytest.raises(ValueError):
ttb.ttensor.from_data(ttensorInstance.core, too_few_factors)

wrong_shape_factor = ttensorInstance.u.copy()
row, col = wrong_shape_factor[0].shape
wrong_shape_factor[0] = np.random.random((row+1, col+1))
with pytest.raises(ValueError):
ttb.ttensor.from_data(ttensorInstance.core, wrong_shape_factor)

# Enforce error until sptensor core/other cores supported
with pytest.raises(ValueError):
ttb.ttensor.from_data(ttb.sptensor.from_tensor_type(ttensorInstance.core), ttensorInstance.u)

@pytest.mark.indevelopment
def test_ttensor_initialization_from_tensor_type(sample_ttensor):

Expand All @@ -58,6 +83,17 @@ def test_ttensor_full(sample_ttensor):
# This sanity check only works for all 1's
assert tensor.double() == np.prod(ttensorInstance.core.shape)

# Negative tests
sparse_core = ttb.sptensor()
sparse_core.shape = ttensorInstance.core.shape
sparse_u = [sparse.coo_matrix(np.zeros(factor.shape)) for factor in ttensorInstance.u]
# We could probably make these properties to avoid this edge case but expect to eventually cover these alternate
# cores
ttensorInstance.core = sparse_core
ttensorInstance.u = sparse_u
with pytest.raises(ValueError):
ttensorInstance.full()

@pytest.mark.indevelopment
def test_ttensor_double(sample_ttensor):
ttensorInstance = sample_ttensor
Expand Down Expand Up @@ -87,17 +123,45 @@ def test_sptensor__neg__(sample_ttensor):
assert ttensorInstance.isequal(ttensorInstance3)

@pytest.mark.indevelopment
def test_ttensor_innerproduct(sample_ttensor):
def test_ttensor_innerproduct(sample_ttensor, random_ttensor):
ttensorInstance = sample_ttensor

# TODO these are an overly simplistic edge case for ttensors that are a single float

# ttensor innerprod ttensor
assert ttensorInstance.innerprod(ttensorInstance) == ttensorInstance.double()**2
core_dim = ttensorInstance.core.shape[0] + 1
ndim = ttensorInstance.ndims
large_core_ttensor = ttb.ttensor.from_data(
ttb.tensor.from_data(np.ones((core_dim,)*ndim)),
[np.ones((1, core_dim))] * ndim
)
assert large_core_ttensor.innerprod(ttensorInstance) == ttensorInstance.full().innerprod(large_core_ttensor.full())

# ttensor innerprod tensor
assert ttensorInstance.innerprod(ttensorInstance.full()) == ttensorInstance.double() ** 2

# ttensr innerprod ktensor
ktensorInstance = ttb.ktensor.from_data(np.array([8.]), [np.array([[1.]])]*3)
assert ttensorInstance.innerprod(ktensorInstance) == ttensorInstance.double() ** 2

# ttensor innerprod tensor (shape larger than core)
random_ttensor.innerprod(random_ttensor.full())

# Negative Tests
ttensor_extra_factors = ttb.ttensor.from_tensor_type(ttensorInstance)
ttensor_extra_factors.u.extend(ttensorInstance.u)
with pytest.raises(ValueError):
ttensorInstance.innerprod(ttensor_extra_factors)

tensor_extra_dim = ttb.tensor.from_data(np.ones(ttensorInstance.shape + (1,)))
with pytest.raises(ValueError):
ttensorInstance.innerprod(tensor_extra_dim)

invalid_option = []
with pytest.raises(ValueError):
ttensorInstance.innerprod(invalid_option)

@pytest.mark.indevelopment
def test_ttensor__mul__(sample_ttensor):
ttensorInstance = sample_ttensor
Expand All @@ -107,6 +171,10 @@ def test_ttensor__mul__(sample_ttensor):
assert (ttensorInstance * mul_factor).double() == np.prod(ttensorInstance.core.shape) * mul_factor
assert (ttensorInstance * float(2)).double() == np.prod(ttensorInstance.core.shape) * float(mul_factor)

# Negative tests
with pytest.raises(ValueError):
_ = ttensorInstance * 'some_string'

@pytest.mark.indevelopment
def test_ttensor__rmul__(sample_ttensor):
ttensorInstance = sample_ttensor
Expand All @@ -116,6 +184,10 @@ def test_ttensor__rmul__(sample_ttensor):
assert (mul_factor * ttensorInstance).double() == np.prod(ttensorInstance.core.shape) * mul_factor
assert (float(2) * ttensorInstance).double() == np.prod(ttensorInstance.core.shape) * float(mul_factor)

# Negative tests
with pytest.raises(ValueError):
_ = 'some_string' * ttensorInstance

@pytest.mark.indevelopment
def test_ttensor_ttv(sample_ttensor):
ttensorInstance = sample_ttensor
Expand All @@ -124,6 +196,17 @@ def test_ttensor_ttv(sample_ttensor):
final_value = sample_ttensor.ttv(trivial_vectors)
assert final_value == np.prod(ttensorInstance.core.shape)

assert np.allclose(
ttensorInstance.ttv(trivial_vectors[0], 0).double(),
ttensorInstance.full().ttv(trivial_vectors[0], 0).double()
)

# Negative tests
wrong_shape_vector = trivial_vectors.copy()
wrong_shape_vector[0] = np.array([mul_factor, mul_factor])
with pytest.raises(ValueError):
sample_ttensor.ttv(wrong_shape_vector)

@pytest.mark.indevelopment
def test_ttensor_mttkrp(random_ttensor):
ttensorInstance = random_ttensor
Expand All @@ -133,23 +216,32 @@ def test_ttensor_mttkrp(random_ttensor):
]
final_value = ttensorInstance.mttkrp(vectors, 2)
full_value = ttensorInstance.full().mttkrp(vectors, 2)
assert np.all(np.isclose(final_value, full_value)), (
assert np.allclose(final_value, full_value), (
f"TTensor value is: \n{final_value}\n\n"
f"Full value is: \n{full_value}"
)

@pytest.mark.indevelopment
def test_ttensor_norm(random_ttensor):
def test_ttensor_norm(sample_ttensor, random_ttensor):
ttensorInstance = random_ttensor
assert np.isclose(ttensorInstance.norm(), ttensorInstance.full().norm())

# Core larger than full tensor
ttensorInstance = sample_ttensor
assert np.isclose(ttensorInstance.norm(), ttensorInstance.full().norm())

@pytest.mark.indevelopment
def test_ttensor_permute(random_ttensor):
ttensorInstance = random_ttensor
original_order = np.arange(0, len(ttensorInstance.core.shape))
permuted_tensor = ttensorInstance.permute(original_order)
assert ttensorInstance.isequal(permuted_tensor)

# Negative Tests
with pytest.raises(ValueError):
bad_permutation_order = np.arange(0, len(ttensorInstance.core.shape) + 1)
ttensorInstance.permute(bad_permutation_order)

@pytest.mark.indevelopment
def test_ttensor_ttm(random_ttensor):
ttensorInstance = random_ttensor
Expand All @@ -163,19 +255,91 @@ def test_ttensor_ttm(random_ttensor):
f"TTensor value is: \n{final_value}\n\n"
f"Full value is: \n{reverse_value}"
)
final_value = ttensorInstance.ttm(matrices) # No dims
assert final_value.isequal(reverse_value)
final_value = ttensorInstance.ttm(matrices, list(range(len(matrices)))) # Dims as list
assert final_value.isequal(reverse_value)


single_tensor_result = ttensorInstance.ttm(matrices[0], 0)
single_tensor_full_result = ttensorInstance.full().ttm(matrices[0], 0)
assert np.allclose(single_tensor_result.double(), single_tensor_full_result.double()), (
f"TTensor value is: \n{single_tensor_result.full()}\n\n"
f"Full value is: \n{single_tensor_full_result}"
)

transposed_matrices = [matrix.transpose() for matrix in matrices]
transpose_value = ttensorInstance.ttm(transposed_matrices, np.arange(len(matrices)), transpose=True)
assert final_value.isequal(transpose_value)

# Negative Tests
big_wrong_size = 123
matrices[0] = np.random.random((big_wrong_size, big_wrong_size))
with pytest.raises(ValueError):
_ = ttensorInstance.ttm(matrices, np.arange(len(matrices)))


@pytest.mark.indevelopment
def test_ttensor_reconstruct(random_ttensor):
ttensorInstance = random_ttensor
# TODO: This slice drops the singleton dimension, should it? If so should ttensor squeeze during reconstruct?
full_slice = ttensorInstance.full()[:, 1, :]
ttensor_slice = ttensorInstance.reconstruct(1, 1)
assert np.all(np.isclose(full_slice.double(), ttensor_slice.squeeze().double()))
assert np.allclose(full_slice.double(), ttensor_slice.squeeze().double())
assert ttensorInstance.reconstruct().isequal(ttensorInstance.full())
sample_all_modes = [np.array([0])] * len(ttensorInstance.shape)
sample_all_modes[-1] = 0 # Make raw scalar
reconstruct_scalar = ttensorInstance.reconstruct(sample_all_modes).full().double()
full_scalar = ttensorInstance.full()[tuple(sample_all_modes)]
assert np.isclose(reconstruct_scalar, full_scalar)

scale = np.random.random(ttensorInstance.u[1].shape).transpose()
_ = ttensorInstance.reconstruct(scale, 1)
# FIXME from the MATLAB docs wasn't totally clear how to validate this

# Negative Tests
with pytest.raises(ValueError):
_ = ttensorInstance.reconstruct(1, [0, 1])

@pytest.mark.indevelopment
def test_ttensor_nvecs(random_ttensor):
ttensorInstance = random_ttensor
ttensor_eigvals = ttensorInstance.nvecs(0, 2)
full_eigvals = ttensorInstance.full().nvecs(0, 2)
n = 0
r = 2
ttensor_eigvals = ttensorInstance.nvecs(n, r)
full_eigvals = ttensorInstance.full().nvecs(n, r)
assert np.allclose(ttensor_eigvals, full_eigvals)

# Test for eig vals larger than shape-1
n = 1
r = 2
full_eigvals = ttensorInstance.full().nvecs(n, r)
with pytest.warns(Warning) as record:
ttensor_eigvals = ttensorInstance.nvecs(n, r)
assert 'Greater than or equal to tensor.shape[n] - 1 eigenvectors requires cast to dense to solve' \
in str(record[0].message)
assert np.allclose(ttensor_eigvals, full_eigvals)

# Negative Tests
sparse_core = ttb.sptensor()
sparse_core.shape = ttensorInstance.core.shape
ttensorInstance.core = sparse_core

# Sparse core
with pytest.raises(NotImplementedError):
ttensorInstance.nvecs(0, 1)

# Sparse factors
sparse_u = [sparse.coo_matrix(np.zeros(factor.shape)) for factor in ttensorInstance.u]
ttensorInstance.u = sparse_u
with pytest.raises(NotImplementedError):
ttensorInstance.nvecs(0, 1)

@pytest.mark.indevelopment
def test_sptensor_isequal(sample_ttensor):
ttensorInstance = sample_ttensor
# Negative Tests
assert not ttensorInstance.isequal(ttensorInstance.full())
ttensor_extra_factors = ttb.ttensor.from_tensor_type(ttensorInstance)
ttensor_extra_factors.u.extend(ttensorInstance.u)
assert not ttensorInstance.isequal(ttensor_extra_factors)

0 comments on commit 3ac89ea

Please sign in to comment.