diff --git a/docs/source/ktensor.rst b/docs/source/ktensor.rst index f340daf..6bcaa4f 100644 --- a/docs/source/ktensor.rst +++ b/docs/source/ktensor.rst @@ -4,5 +4,5 @@ pyttb.ktensor .. autoclass:: pyttb.ktensor :members: :special-members: - :exclude-members: __dict__,__weakref__, __slots__, __init__ + :exclude-members: __dict__, __weakref__, __slots__, __init__ :show-inheritance: \ No newline at end of file diff --git a/docs/source/sptensor.rst b/docs/source/sptensor.rst index a4ae236..14b8afd 100644 --- a/docs/source/sptensor.rst +++ b/docs/source/sptensor.rst @@ -1,8 +1,8 @@ pyttb.sptensor ---------------------- -.. automodule:: pyttb.sptensor +.. autoclass:: pyttb.sptensor :members: :special-members: - :exclude-members: __dict__,__weakref__, __slots__, __init__ + :exclude-members: __dict__, __weakref__, __slots__, __init__ :show-inheritance: diff --git a/docs/source/tenmat.rst b/docs/source/tenmat.rst index 4a3ba08..8ab519d 100644 --- a/docs/source/tenmat.rst +++ b/docs/source/tenmat.rst @@ -1,7 +1,7 @@ pyttb.tenmat ==================== -.. automodule:: pyttb.tenmat +.. autoclass:: pyttb.tenmat :members: :special-members: :exclude-members: __dict__, __weakref__, __slots__, __init__ diff --git a/docs/source/tensor.rst b/docs/source/tensor.rst index d24231f..8edf6b3 100644 --- a/docs/source/tensor.rst +++ b/docs/source/tensor.rst @@ -1,7 +1,7 @@ pyttb.tensor -------------------- -.. automodule:: pyttb.tensor +.. autoclass:: pyttb.tensor :members: :special-members: :exclude-members: __dict__, __weakref__, __slots__, __init__ diff --git a/docs/source/ttensor.rst b/docs/source/ttensor.rst index 2240a83..8c088b3 100644 --- a/docs/source/ttensor.rst +++ b/docs/source/ttensor.rst @@ -1,7 +1,7 @@ pyttb.ttensor -------------------- -.. automodule:: pyttb.ttensor +.. autoclass:: pyttb.ttensor :members: :special-members: :exclude-members: __dict__, __weakref__, __slots__, __init__ diff --git a/pyttb/ktensor.py b/pyttb/ktensor.py index a4f8335..ded3897 100644 --- a/pyttb/ktensor.py +++ b/pyttb/ktensor.py @@ -43,9 +43,8 @@ class ktensor: dimension i and r is the rank of the tensor (as well as the length of the weights vector). - Although the constructor `__init__()` can be used to create an empty - :class:`pyttb.ktensor`, there are several class methods that can be used - to create an instance of this class: + Instances of :class:`pyttb.ktensor` can be created using `__init__()` or + one of the following methods: * :meth:`from_function` * :meth:`from_vector` @@ -1279,11 +1278,11 @@ def normalize( def nvecs(self, n: int, r: int, flipsign: bool = True) -> np.ndarray: """ - Compute the leading mode-n vectors for a :class:`pyttb.ktensor`. + Compute the leading mode-n vectors of the ktensor. Computes the `r` leading eigenvectors of Xn*Xn.T (where Xn is the mode-`n` matricization/unfolding of self), which provides information - about the mode-N fibers. In two-dimensions, the `r` leading mode-1 + about the mode-n fibers. In two-dimensions, the `r` leading mode-1 vectors are the same as the `r` left singular vectors and the `r` leading mode-2 vectors are the same as the `r` right singular vectors. By default, this method computes the top `r` eigenvectors diff --git a/pyttb/sptensor.py b/pyttb/sptensor.py index ccae499..1bd4605 100644 --- a/pyttb/sptensor.py +++ b/pyttb/sptensor.py @@ -2511,7 +2511,7 @@ def __repr__(self): # pragma: no cover return s s = f"Sparse tensor of shape {self.shape}" - s += f" with {nz} nonzeros \n" + s += f" with {nz} nonzeros\n" # Stop insane printouts if nz > 10000: @@ -2519,7 +2519,6 @@ def __repr__(self): # pragma: no cover if r.upper() != "Y": return s for i in range(0, self.subs.shape[0]): - s += "\t" s += "[" idx = self.subs[i, :] s += str(idx.tolist())[1:] diff --git a/pyttb/tensor.py b/pyttb/tensor.py index bd6b113..a2b6eb3 100644 --- a/pyttb/tensor.py +++ b/pyttb/tensor.py @@ -1,4 +1,4 @@ -"""Dense Tensor Implementation""" +"""Classes and functions for working with dense tensors.""" # Copyright 2022 National Technology & Engineering Solutions of Sandia, # LLC (NTESS). Under the terms of Contract DE-NA0003525 with NTESS, the # U.S. Government retains certain rights in this software. @@ -31,6 +31,23 @@ class tensor: """ TENSOR Class for dense tensors. + + Contains the following data members: + + ``data``: :class:`numpy.ndarray` dense array containing the data elements + of the tensor. + + Instances of :class:`pyttb.tensor` can be created using `__init__()` or + the following method: + + * :meth:`from_function` + + Examples + -------- + For all examples listed below, the following module imports are assumed: + + >>> import pyttb as ttb + >>> import numpy as np """ __slots__ = ("data", "shape") @@ -42,33 +59,42 @@ def __init__( copy: bool = True, ): """ - Creates a tensor from explicit description. Note that 1D tensors (i.e., - when len(shape)==1) contains a data array that follow the Numpy convention - of being a row vector, which is different than in the Matlab Tensor Toolbox. + Creates a :class:`pyttb.ktensor` from a :class:`numpy.ndarray` + Note that 1D tensors (i.e., when len(shape)==1) contains a data + array that follow the Numpy convention of being a row vector. Parameters ---------- data: - Tensor source data + Tensor source data. shape: - Shape of resulting tensor if not the same as data shape + Shape of resulting tensor if not the same as data shape. copy: - Whether to make a copy of provided data or just reference it + Whether to make a copy of provided data or just reference it. - Returns + Examples ------- - Constructed tensor + Create an empty :class:`pyttb.tensor`: - Example - ------- - >>> X = ttb.tensor(np.ones((2,2))) - >>> Y = ttb.tensor(np.ones((2,2)), shape=(4,1)) + >>> T = ttb.tensor() + >>> print(T) + empty tensor of shape () + data = [] + + Create a :class:`pyttb.tensor` from a :class:`numpy.ndarray`: + >>> T = ttb.tensor(np.array([[1,2],[3,4]])) + >>> print(T) + tensor of shape (2, 2) + data[:, :] = + [[1 2] + [3 4]] """ if data is None: # EMPTY / DEFAULT CONSTRUCTOR self.data: np.ndarray = np.array([]) self.shape: Tuple = () return + # CONVERT A MULTIDIMENSIONAL ARRAY if not issubclass(data.dtype.type, np.number) and not issubclass( data.dtype.type, np.bool_ @@ -111,25 +137,42 @@ def from_function( shape: Tuple[int, ...], ) -> tensor: """ - Creates a tensor from a function handle and size + Construct a :class:`pyttb.tensor` whose data entries are set using + a function. Parameters ---------- - function_handle: - Function to generate data to construct tensor + fun: function, required + A function that can accept a shape (i.e., :class:`tuple` of + dimension sizes) and return a :class:`numpy.ndarray` of that shape. + Example functions include `numpy.random.random_sample`, + `numpy,zeros`, `numpy.ones`. shape: - Shape of resulting tensor + Shape of the resulting tensor. Returns ------- - Constructed tensor + Constructed tensor. - Example - ------- - >>> X = ttb.tensor.from_function(lambda a_shape: np.ones(a_shape), (2,2)) - """ - # FUNCTION HANDLE AND SIZE + Examples + -------- + Create a :class:`pyttb.ktensor` with entries taken from a uniform + random distribution: + Create a :class:`pyttb.tensor` with entries equal to 1: + + >>> T = ttb.tensor.from_function(np.ones, (2, 3, 4)) + >>> print(T) + tensor of shape (2, 3, 4) + data[0, :, :] = + [[1. 1. 1. 1.] + [1. 1. 1. 1.] + [1. 1. 1. 1.]] + data[1, :, :] = + [[1. 1. 1. 1.] + [1. 1. 1. 1.] + [1. 1. 1. 1.]] + """ # Check size if not isinstance(shape, tuple): assert False, "TTB:BadInput, Shape must be a tuple" @@ -149,13 +192,13 @@ def copy(self) -> tensor: Examples -------- - >>> first = ttb.tensor(np.ones((3,2))) - >>> second = first - >>> third = second.copy() - >>> first[0,0] = 3 - >>> first[0,0] == second[0,0] + >>> T1 = ttb.tensor(np.ones((3,2))) + >>> T2 = T1 + >>> T3 = T2.copy() + >>> T1[0,0] = 3 + >>> T2[0,0] == T2[0,0] True - >>> first[0,0] == third[0,0] + >>> T1[0,0] == T3[0,0] False """ return ttb.tensor(self.data, self.shape, copy=True) @@ -174,21 +217,27 @@ def collapse( Parameters ---------- dims: - Dimensions to collapse + Dimensions to collapse. fun: - Method used to collapse dimensions + Method used to collapse dimensions. Returns ------- - Collapsed value + Collapsed value. - Example - ------- - >>> X = ttb.tensor(np.ones((2,2))) - >>> X.collapse() + Examples + -------- + >>> T = ttb.tensor(np.ones((2,2))) + >>> T.collapse() 4.0 - >>> X.collapse(np.arange(X.ndims), sum) + >>> T.collapse(np.array([0])) + tensor of shape (2,) + data[:] = + [2. 2.] + >>> T.collapse(np.arange(T.ndims), sum) 4.0 + >>> T.collapse(np.arange(T.ndims), np.prod) + 1.0 """ if self.data.size == 0: return np.array([]) @@ -220,31 +269,52 @@ def collapse( ## Form and return the final result return ttb.tensor(B, newshape, copy=False) - def contract(self, i: int, j: int) -> Union[np.ndarray, tensor]: + def contract(self, i1: int, i2: int) -> Union[np.ndarray, tensor]: """ Contract tensor along two dimensions (array trace). Parameters ---------- - i: + i1: First dimension - j: + i2: Second dimension Returns ------- - Contracted tensor + Contracted tensor. - Example - ------- - >>> X = ttb.tensor(np.ones((2,2))) - >>> X.contract(0, 1) + Examples + -------- + >>> T = ttb.tensor(np.ones((2,2))) + >>> T.contract(0, 1) 2.0 + >>> T = ttb.tensor(np.array([[[1,2],[3,4]],[[5,6],[7,8]]])) + >>> print(T) + tensor of shape (2, 2, 2) + data[0, :, :] = + [[1 2] + [3 4]] + data[1, :, :] = + [[5 6] + [7 8]] + >>> T.contract(0,1) + tensor of shape (2,) + data[:] = + [ 8. 10.] + >>> T.contract(0,2) + tensor of shape (2,) + data[:] = + [ 7. 11.] + >>> T.contract(1,2) + tensor of shape (2,) + data[:] = + [ 5. 13.] """ - if self.shape[i] != self.shape[j]: + if self.shape[i1] != self.shape[i2]: assert False, "Must contract along equally sized dimensions" - if i == j: + if i1 == i2: assert False, "Must contract along two different dimensions" # Easy case - returns a scalar @@ -252,7 +322,7 @@ def contract(self, i: int, j: int) -> Union[np.ndarray, tensor]: return np.trace(self.data) # Remaining dimensions after trace - remdims = np.setdiff1d(np.arange(0, self.ndims), np.array([i, j])).astype(int) + remdims = np.setdiff1d(np.arange(0, self.ndims), np.array([i1, i2])).astype(int) # Size for return newsize = tuple(np.array(self.shape)[remdims]) @@ -261,10 +331,10 @@ def contract(self, i: int, j: int) -> Union[np.ndarray, tensor]: m = np.prod(newsize) # Number of items to add for trace - n = self.shape[i] + n = self.shape[i1] # Permute trace dimensions to the end - x = self.permute(np.concatenate((remdims, np.array([i, j])))) + x = self.permute(np.concatenate((remdims, np.array([i1, i2])))) # Reshape data to be 3D data = np.reshape(x.data, (m, n, n), order="F") @@ -282,16 +352,16 @@ def contract(self, i: int, j: int) -> Union[np.ndarray, tensor]: def double(self) -> np.ndarray: """ - Convert tensor to an array of doubles + Convert tensor to an :array of doubles. Returns ------- - Copy of tensor data + Copy of tensor data. - Example - ------- - >>> X = ttb.tensor(np.ones((2,2))) - >>> X.double() + Examples + -------- + >>> T = ttb.tensor(np.ones((2,2))) + >>> T.double() array([[1., 1.], [1., 1.]]) """ @@ -299,16 +369,17 @@ def double(self) -> np.ndarray: def exp(self) -> tensor: """ - Exponential of the elements of tensor + Exponential of the elements of tensor. Returns ------- - Copy of tensor data element-wise raised to exponential + Copy of tensor data wtih the exponential function applied to data + element-wise. Examples -------- - >>> tensor1 = ttb.tensor(np.array([[1, 2], [3, 4]])) - >>> tensor1.exp().data # doctest: +ELLIPSIS + >>> T = ttb.tensor(np.array([[1, 2], [3, 4]])) + >>> T.exp().data # doctest: +ELLIPSIS array([[ 2.7182..., 7.3890... ], [20.0855..., 54.5981...]]) """ @@ -316,24 +387,29 @@ def exp(self) -> tensor: def find(self) -> Tuple[np.ndarray, np.ndarray]: """ - FIND Find subscripts of nonzero elements in a tensor. + Find subscripts of nonzero elements in a tensor. - S, V = FIND(X) returns the subscripts of the nonzero values in X and a column - vector of the values. + Returns + ------- + Array of subscripts of the nonzero values in the tensor and a column + vector of the corresponding values. Examples -------- - >>> X = ttb.tensor(np.zeros((3,4,2))) - >>> larger_entries = X > 0.5 - >>> subs, vals = larger_entries.find() - - See Also - -------- - TENSOR/SUBSREF, TENSOR/SUBSASGN - - Returns - ------- - Subscripts and values for non-zero entries + >>> T = ttb.tensor(np.array([[1,2],[3,4]])) + >>> print(T) + tensor of shape (2, 2) + data[:, :] = + [[1 2] + [3 4]] + >>> T_threshold = T > 2 + >>> subs, vals = T_threshold.find() + >>> subs + array([[1, 0], + [1, 1]]) + >>> vals + array([[ True], + [ True]]) """ idx = np.nonzero(np.ravel(self.data, order="F"))[0] subs = tt_ind2sub(self.shape, idx) @@ -342,15 +418,30 @@ def find(self) -> Tuple[np.ndarray, np.ndarray]: def to_sptensor(self) -> ttb.sptensor: """ - Contruct an :class:`pyttb.sptensor` from `pyttb.tensor` + Contruct a :class:`pyttb.sptensor` from `pyttb.tensor` Returns ------- Generated Sparse Tensor + + Examples + -------- + >>> T = ttb.tensor(np.array([[0,2],[3,0]])) + >>> print(T) + tensor of shape (2, 2) + data[:, :] = + [[0 2] + [3 0]] + >>> S = T.to_sptensor() + >>> print(S) + Sparse tensor of shape (2, 2) with 2 nonzeros + [1, 0] = 3 + [0, 1] = 2 """ subs, vals = self.find() return ttb.sptensor(subs, vals, self.shape, copy=False) + # TODO: do we need this, now that we have copy() and __deepcopy__()? def full(self) -> tensor: """ Convert dense tensor to dense tensor. @@ -361,20 +452,26 @@ def full(self) -> tensor: """ return ttb.tensor(self.data) - def innerprod(self, other: Union[tensor, ttb.sptensor, ttb.ktensor]) -> float: + def innerprod( + self, other: Union[tensor, ttb.sptensor, ttb.ktensor, ttb.ttensor] + ) -> float: """ - Efficient inner product with a tensor + Efficient inner product between a tensor and other `pyttb` tensor + (`tensor`, `sptensor`, `ktensor`, or `ttensor`). Parameters ---------- other: - Tensor type to take an innerproduct with + Tensor to take an innerproduct with. Examples -------- - >>> tensor1 = ttb.tensor(np.array([[1, 2], [3, 4]])) - >>> tensor1.innerprod(tensor1) - 30 + >>> T = ttb.tensor(np.array([[1, 0], [0, 4]])) + >>> T.innerprod(T) + 17 + >>> S = T.to_sptensor() + >>> T.innerprod(S) + 17 """ if isinstance(other, ttb.tensor): if self.shape != other.shape: @@ -389,18 +486,21 @@ def innerprod(self, other: Union[tensor, ttb.sptensor, ttb.ktensor]) -> float: def isequal(self, other: Union[tensor, ttb.sptensor]) -> bool: """ - Exact equality for tensors + Exact equality for tensors. Parameters ---------- other: - Tensor to compare against + Tensor to compare against. Examples -------- - >>> X = ttb.tensor(np.ones((2,2))) - >>> Y = ttb.tensor(np.zeros((2,2))) - >>> X.isequal(Y) + >>> T1 = ttb.tensor(2 * np.ones((2,2))) + >>> T2 = 2 * ttb.tensor(np.ones((2,2))) + >>> T1.isequal(T2) + True + >>> T2[0,0] = 1 + >>> T1.isequal(T2) False """ if isinstance(other, ttb.tensor): @@ -434,13 +534,13 @@ def issymmetric( # noqa: PLR0912 Examples -------- - >>> X = ttb.tensor(np.ones((2,2))) - >>> X.issymmetric() + >>> T = ttb.tensor(np.ones((2,2))) + >>> T.issymmetric() True - >>> X.issymmetric(grps=np.arange(X.ndims)) + >>> T.issymmetric(grps=np.arange(T.ndims)) True >>> is_sym, diffs, perms = \ - X.issymmetric(grps=np.arange(X.ndims), version=1, return_details=True) + T.issymmetric(grps=np.arange(T.ndims), version=1, return_details=True) >>> print(f"Tensor is symmetric: {is_sym}") Tensor is symmetric: True >>> print(f"Differences in modes: {diffs}") @@ -517,56 +617,52 @@ def issymmetric( # noqa: PLR0912 return bool((all_diffs == 0).all()) return bool((all_diffs == 0).all()), all_diffs, all_perms - def logical_and(self, B: Union[float, tensor]) -> tensor: + def logical_and(self, other: Union[float, tensor]) -> tensor: """ - Logical and for tensors + Logical and for tensors. Parameters ---------- - B: - Value to and against self + other: + Value to perform and against. Examples -------- - >>> X = ttb.tensor(np.ones((2,2), dtype=bool)) - >>> X.logical_and(X).collapse() # All true + >>> T = ttb.tensor(np.ones((2,2), dtype=bool)) + >>> T.logical_and(T).collapse() # All true 4 """ def logical_and(x, y): return np.logical_and(x, y) - return tt_tenfun(logical_and, self, B) + return tt_tenfun(logical_and, self, other) def logical_not(self) -> tensor: """ - Logical Not For Tensors - - Returns - ------- - Negated tensor + Logical not for tensors. Examples -------- - >>> X = ttb.tensor(np.ones((2,2), dtype=bool)) - >>> X.logical_not().collapse() # All false + >>> T = ttb.tensor(np.ones((2,2), dtype=bool)) + >>> T.logical_not().collapse() # All false 0 """ return ttb.tensor(np.logical_not(self.data), copy=False) def logical_or(self, other: Union[float, tensor]) -> tensor: """ - Logical or for tensors + Logical or for tensors. Parameters ---------- other: - Value to perform or against + Value to perform or against. Examples -------- - >>> X = ttb.tensor(np.ones((2,2), dtype=bool)) - >>> X.logical_or(X.logical_not()).collapse() # All true + >>> T = ttb.tensor(np.ones((2,2), dtype=bool)) + >>> T.logical_or(T.logical_not()).collapse() # All true 4 """ @@ -577,17 +673,17 @@ def tensor_or(x, y): def logical_xor(self, other: Union[float, tensor]) -> tensor: """ - Logical xor for tensors + Logical xor for tensors. Parameters ---------- other: - Value to perform xor against + Value to perform xor against. Examples -------- - >>> X = ttb.tensor(np.ones((2,2), dtype=bool)) - >>> X.logical_xor(X.logical_not()).collapse() # All true + >>> T = ttb.tensor(np.ones((2,2), dtype=bool)) + >>> T.logical_xor(T.logical_not()).collapse() # All true 4 """ @@ -598,22 +694,22 @@ def tensor_xor(x, y): def mask(self, W: tensor) -> np.ndarray: """ - Extract non-zero values at locations specified by mask tensor + Extract non-zero values at locations specified by mask tensor `W`. Parameters ---------- W: - Mask tensor + Mask tensor. Returns ------- - Extracted values + Array of extracted values. Examples -------- + >>> T = ttb.tensor(np.array([[1, 2], [3, 4]])) >>> W = ttb.tensor(np.ones((2,2))) - >>> tensor1 = ttb.tensor(np.array([[1, 2], [3, 4]])) - >>> tensor1.mask(W) + >>> T.mask(W) array([1, 3, 2, 4]) """ # Error checking @@ -630,24 +726,26 @@ def mttkrp( # noqa: PLR0912 self, U: Union[ttb.ktensor, List[np.ndarray]], n: int ) -> np.ndarray: """ - Matricized tensor times Khatri-Rao product + Matricized tensor times Khatri-Rao product. The matrices used in the + Khatri-Rao product are passed as a :class:`pyttb.ktensor` (where the + factor matrices are used) or as a list of :class:`numpy.ndarray` objects. Parameters ---------- U: - Matrices to create the Khatri-Rao product + Matrices to create the Khatri-Rao product. n: - Mode to matricize tensor in + Mode used to matricize tensor. Returns ------- - Matrix product + Array containing matrix product. - Example - ------- - >>> tensor1 = ttb.tensor(np.ones((2,2,2))) - >>> matrices = [np.ones((2,2))] * 3 - >>> tensor1.mttkrp(matrices, 2) + Examples + -------- + >>> T = ttb.tensor(np.ones((2,2,2))) + >>> U = [np.ones((2,2))] * 3 + >>> T.mttkrp(U, 2) array([[4., 4.], [4., 4.]]) """ @@ -712,9 +810,29 @@ def mttkrp( # noqa: PLR0912 return V def mttkrps(self, U: Union[ttb.ktensor, List[np.ndarray]]) -> List[np.ndarray]: - """Sequence of MTTKRP calculations for a tensor. + """ + Sequence of MTTKRP calculations for a tensor. + + Result is equivalent to [T.mttkrp(U, k) for k in range(T.ndims)]. + + Parameters + ---------- + U: + Matrices to create the Khatri-Rao product. - Result is equivalent to [X.mttkrp(U, k) for k in range(X.ndims)] + Returns + ------- + Array containing matrix product. + + Examples + -------- + >>> T = ttb.tensor(np.ones((2,2,2))) + >>> U = [np.ones((2,2))] * 3 + >>> T.mttkrps(U) + [array([[4., 4.], + [4., 4.]]), array([[4., 4.], + [4., 4.]]), array([[4., 4.], + [4., 4.]])] """ if isinstance(U, ttb.ktensor): U = U.factor_matrices @@ -739,12 +857,12 @@ def mttkrps(self, U: Union[ttb.ktensor, List[np.ndarray]]) -> List[np.ndarray]: @property def ndims(self) -> int: """ - Return the number of dimensions of a tensor + Number of dimensions of the tensor. Examples -------- - >>> X = ttb.tensor(np.ones((2,2))) - >>> X.ndims + >>> T = ttb.tensor(np.ones((2,2))) + >>> T.ndims 2 """ if self.shape == (0,): @@ -754,25 +872,26 @@ def ndims(self) -> int: @property def nnz(self) -> int: """ - Number of non-zero elements in tensor + Number of non-zero elements in the tensor. Examples -------- - >>> X = ttb.tensor(np.ones((2,2))) - >>> X.nnz - 4 + >>> T = ttb.tensor(np.ones((2,2,2))) + >>> T.nnz + 8 """ return np.count_nonzero(self.data) def norm(self) -> float: """ - Frobenius Norm of Tensor + Frobenius norm of the tensor, defined as the square root of the sum of the + squares of the elements of the tensor. Examples -------- - >>> X = ttb.tensor(np.ones((2,2))) - >>> X.norm() - 2.0 + >>> T = ttb.tensor(np.ones((2,2,2,2))) + >>> T.norm() + 4.0 """ # default of np.linalg.norm is to vectorize the data and compute the vector # norm, which is equivalent to the Frobenius norm for multidimensional arrays. @@ -781,24 +900,36 @@ def norm(self) -> float: def nvecs(self, n: int, r: int, flipsign: bool = True) -> np.ndarray: """ - Compute the leading mode-n eigenvectors for a tensor + Compute the leading mode-n vectors of the tensor. + + Computes the `r` leading eigenvectors of Tn*Tn.T (where Tn is the + mode-`n` matricization/unfolding of self), which provides information + about the mode-n fibers. In two-dimensions, the `r` leading mode-1 + vectors are the same as the `r` left singular vectors and the `r` + leading mode-2 vectors are the same as the `r` right singular + vectors. By default, this method computes the top `r` eigenvectors + of Tn*Tn.T. Parameters ---------- n: - Mode to unfold + Mode for tensor matricization. r: - Number of eigenvectors to compute + Number of eigenvectors to compute and use. flipsign: - Make each eigenvector's largest element positive + If True, make each column's largest element positive. + + Returns + ------- + Computed eigenvectors. Examples -------- - >>> tensor1 = ttb.tensor(np.array([[1, 2], [3, 4]])) - >>> tensor1.nvecs(0,1) # doctest: +ELLIPSIS + >>> T = ttb.tensor(np.array([[1, 2], [3, 4]])) + >>> T.nvecs(0,1) # doctest: +ELLIPSIS array([[0.4045...], [0.9145...]]) - >>> tensor1.nvecs(0,2) # doctest: +ELLIPSIS + >>> T.nvecs(0,2) # doctest: +ELLIPSIS array([[ 0.4045..., 0.9145...], [ 0.9145..., -0.4045...]]) """ @@ -827,23 +958,32 @@ def nvecs(self, n: int, r: int, flipsign: bool = True) -> np.ndarray: def permute(self, order: np.ndarray) -> tensor: """ - Permute tensor dimensions. + Permute tensor dimensions. The result is a tensor that has the + same values, but the order of the subscripts needed to access + any particular element are rearranged as specified by `order`. Parameters ---------- order: - New order of tensor dimensions + New order of tensor dimensions. Returns ------- - Updated tensor with shapeNew == shapePrevious[order] + New tensor with permuted dimensions. Examples -------- - >>> X = ttb.tensor(np.ones((2,2))) - >>> Y = X.permute(np.array((1,0))) - >>> X.isequal(Y) - True + >>> T1 = ttb.tensor(np.array([[1, 2], [3, 4]])) + >>> T1 + tensor of shape (2, 2) + data[:, :] = + [[1 2] + [3 4]] + >>> T1.permute(np.array((1,0))) + tensor of shape (2, 2) + data[:, :] = + [[1 3] + [2 4]] """ if self.ndims != order.size: assert False, "Invalid permutation order" @@ -861,7 +1001,7 @@ def permute(self, order: np.ndarray) -> tensor: def reshape(self, shape: Tuple[int, ...]) -> tensor: """ - Reshapes a tensor + Reshape the tensor. Parameters ---------- @@ -870,12 +1010,13 @@ def reshape(self, shape: Tuple[int, ...]) -> tensor: Examples -------- - >>> X = ttb.tensor(np.ones((2,2))) - >>> Y = X.reshape((4,1)) - >>> Y.shape + >>> T1 = ttb.tensor(np.ones((2,2))) + >>> T1.shape + (2, 2) + >>> T2 = T1.reshape((4,1)) + >>> T2.shape (4, 1) """ - if np.prod(self.shape) != np.prod(shape): assert False, "Reshaping a tensor cannot change number of elements" @@ -883,21 +1024,20 @@ def reshape(self, shape: Tuple[int, ...]) -> tensor: def squeeze(self) -> Union[tensor, np.ndarray, float]: """ - Removes singleton dimensions from a tensor + Removes singleton dimensions from the tensor. Returns ------- - Tensor or scalar if all dims squeezed + Tensor or scalar if all dims squeezed. Examples -------- - >>> tensor1 = ttb.tensor(np.array([[[4]]])) - >>> tensor1.squeeze() + >>> T = ttb.tensor(np.array([[[4]]])) + >>> T.squeeze() 4 - >>> tensor2 = ttb.tensor(np.array([[1, 2, 3]])) - >>> tensor2.squeeze().data + >>> T = ttb.tensor(np.array([[1, 2, 3]])) + >>> T.squeeze().data array([1, 2, 3]) - """ shapeArray = np.array(self.shape) if np.all(shapeArray > 1): @@ -912,21 +1052,33 @@ def symmetrize( # noqa: PLR0912,PLR0915 self, grps: Optional[np.ndarray] = None, version: Optional[Any] = None ) -> tensor: """ - Symmetrize a tensor in the specified modes - Notes - ----- - It is *the same or less* work to just call X = symmetrize(X) then to first - check if X is symmetric and then symmetrize it, even if X is already symmetric. + Symmetrize a tensor in the specified modes. + + It is *the same or less* work to just call T = T.symmetrize() then to first + check if T is symmetric and then symmetrize it, even if T is already symmetric. Parameters ---------- grps: - Modes to check for symmetry + Modes to check for symmetry. version: - Any non-None value will call the non-default old version + Any non-None value will call the non-default old version. Returns ------- + Symmetrized tensor. + + Examples + -------- + >>> T = ttb.tensor(np.ones((2,2,2))) + >>> T.symmetrize(np.array([0,2])) + tensor of shape (2, 2, 2) + data[0, :, :] = + [[1. 1.] + [1. 1.]] + data[1, :, :] = + [[1. 1.] + [1. 1.]] """ n = self.ndims sz = np.array(self.shape) @@ -1049,18 +1201,57 @@ def ttm( transpose: bool = False, ) -> tensor: """ - Tensor times matrix + Tensor times matrix. + + Computes the n-mode product of `self` with the matrix `matrix`; i.e., + `self x_n matrix`. The integer `n` specifies the dimension (or mode) + along which the matrix should be multiplied. If `matrix.shape = (J,I)`, + then the tensor must have `self.shape[n] = I`. The result will be the + same order and shape as `self` except that the size of dimension `n` + will be `J`. + + Multiplication with more than one matrix is provided using a list of + matrices and corresponding dimensions in the tensor to use. Multiplication + using the transpose of the matrix (or matrices) is also provided. + + The dimensions of the tensor with which to multiply can be provided as + `dims`, or the dimensions to exclude from `[0, ..., self.ndims]` can be + specified using `exclude_dims`. Parameters ---------- matrix: - Matrix or matrices to multiple by + Matrix or matrices to multiple by. dims: - Dimensions to multiply against + Dimensions to multiply against. exclude_dims: - Use all dimensions but these + Use all dimensions but these. transpose: - Transpose matrices during multiplication + Transpose matrices during multiplication. + + Returns + ------- + Tensor product. + + Examples + -------- + >>> T = ttb.tensor(np.ones((2,2,2,2))) + >>> A = 2*np.ones((2,1)) + >>> T.ttm([A,A], dims=[0,1], transpose=True) + tensor of shape (1, 1, 2, 2) + data[0, 0, :, :] = + [[16. 16.] + [16. 16.]] + >>> T.ttm([A,A], exclude_dims=[0,1], transpose=True) + tensor of shape (2, 2, 1, 1) + data[0, 0, :, :] = + [[16.]] + data[1, 0, :, :] = + [[16.]] + data[0, 1, :, :] = + [[16.]] + data[1, 1, :, :] = + [[16.]] """ if dims is None and exclude_dims is None: dims = np.arange(self.ndims) @@ -1121,18 +1312,56 @@ def ttt( otherdims: Optional[Union[int, np.ndarray]] = None, ) -> tensor: """ - Tensor multiplication (tensor times tensor) + Tensor multiplication (tensor times tensor). + + Computes the contracted product of tensors, self and other, in the + dimensions specified by the `selfdims` and `otherdims`. The sizes of + the dimensions specified by `selfdims` and `otherdims` must match; + that is, `self.shape(selfdims)` must equal `other.shape(otherdims)`. + If only `selfdims` is provided as input, it is used to specify the + dimensions for both `self` and `other`. Parameters ---------- other: - Tensor to multiply by + Tensor to multiply by. selfdims: - Dimensions to contract this tensor by for multiplication + Dimensions to contract self by for multiplication. otherdims: - Dimensions to contract other tensor by for multiplication - """ + Dimensions to contract other tensor by for multiplication. + + Returns + ------- + Tensor product. + Examples + -------- + >>> T = ttb.tensor(np.array([[1, 2], [3, 4]])) + >>> T.ttt(T) + tensor of shape (2, 2, 2, 2) + data[0, 0, :, :] = + [[1 2] + [3 4]] + data[1, 0, :, :] = + [[ 3 6] + [ 9 12]] + data[0, 1, :, :] = + [[2 4] + [6 8]] + data[1, 1, :, :] = + [[ 4 8] + [12 16]] + >>> T.ttt(T, 0) + tensor of shape (2, 2) + data[:, :] = + [[10 14] + [14 20]] + >>> T.ttt(T, selfdims=0, otherdims=1) + tensor of shape (2, 2) + data[:, :] = + [[ 7 15] + [10 22]] + """ if not isinstance(other, tensor): assert False, "other must be of type tensor" @@ -1172,18 +1401,50 @@ def ttv( exclude_dims: Optional[Union[int, np.ndarray]] = None, ) -> tensor: """ - Tensor times vector + Tensor times vector. + + Computes the n-mode product of `self` with the vector `vector`; i.e., + `self x_n vector`. The integer `n` specifies the dimension (or mode) + along which the vector should be multiplied. If `vector.shape = (I,)`, + then the tensor must have `self.shape[n] = I`. The result will be the + same order and shape as `self` except that the size of dimension `n` + will be `J`. The resulting tensor has one less dimension, as dimension + `n` is removed in the multiplication. + + Multiplication with more than one vector is provided using a list of + vectors and corresponding dimensions in the tensor to use. + + The dimensions of the tensor with which to multiply can be provided as + `dims`, or the dimensions to exclude from `[0, ..., self.ndims]` can be + specified using `exclude_dims`. Parameters ---------- vector: - Vector(s) to multiply against + Vector or vectors to multiple by. dims: - Dimensions to multiply with vector(s) + Dimensions to multiply against. exclude_dims: - Use all dimensions but these - """ + Use all dimensions but these. + + Returns + ------- + Tensor product. + Examples + -------- + >>> T = ttb.tensor(np.array([[1, 2], [3, 4]])) + >>> T.ttv(np.ones(2),0) + tensor of shape (2,) + data[:] = + [4. 6.] + >>> T.ttv(np.ones(2),1) + tensor of shape (2,) + data[:] = + [3. 7.] + >>> T.ttv([np.ones(2), np.ones(2)]) + 10.0 + """ if dims is None and exclude_dims is None: dims = np.array([]) elif isinstance(dims, (float, int)): @@ -1229,19 +1490,34 @@ def ttv( def ttsv( self, - vector: Union[np.ndarray, List[np.ndarray]], + vector: np.ndarray, skip_dim: Optional[int] = None, version: Optional[int] = None, ) -> Union[np.ndarray, tensor]: """ - Tensor times same vector in multiple modes + Tensor times same vector in multiple modes. + + See :meth:`ttv` for details on multiplication of a tensor with a + vector. When `skip_dim` is provided, multiply the vector by all but + dimensions except `[0, ..., skip_dim]`. Parameters ---------- vector: - Vector(s) to multiply against + Vector to multiply by. skip_dim: - Multiply tensor by vector in all dims except [0, skip_dim] + Initial dimensions of the tensor to skip when multiplying. + + Examples + -------- + >>> T = ttb.tensor(np.array([[1, 2], [3, 4]])) + >>> T.ttsv(np.ones(2)) + 10.0 + >>> T.ttsv(np.ones(2),0) + array([3., 7.]) + >>> T.ttsv(np.ones(2),1) + array([[1, 2], + [3, 4]]) """ # Only two simple cases are supported if skip_dim is None: @@ -1279,39 +1555,44 @@ def ttsv( np.reshape(y, newshape=sz * np.ones(dnew, dtype=int), order="F"), copy=False, ) + + # extract scalar if needed + if len(y) == 1: + y = y.item() + return y assert False, "Invalid value for version; should be None, 1, or 2" def __setitem__(self, key, value): """ - SUBSASGN Subscripted assignment for a tensor. + Subscripted assignment for a tensor. We can assign elements to a tensor in three ways. - Case 1: X(R1,R2,...,RN) = Y, in which case we replace the + Case 1: `T[R1,R2,...,Rn] = Y`, in which case we replace the rectangular subtensor (or single element) specified by the ranges - R1,...,RN with Y. The right-hand-side can be a scalar, a tensor, or an - MDA. + `R1`,...,`Rn` with `Y`. The right-hand-side can be a scalar, a tensor, + or a :class:`numpy.ndarray`. - Case 2a: X(S) = V, where S is a p x n array of subscripts and V is - a scalar or a vector containing p values. + Case 2a: `T[S] = V`, where `S` is a `p` x `n` array of subscripts and `V` is + a scalar or a vector containing `p` values. - Case 2b: X(I) = V, where I is a set of p linear indices and V is a - scalar or a vector containing p values. Resize is not allowed in this + Case 2b: `T[I] = V`, where `I` is a set of `p` linear indices and `V` is a + scalar or a vector containing p values. Resizing is not allowed in this case. Examples -------- - >>> X = tensor(np.ones((3,4,2))) + >>> T = tensor(np.ones((3,4,2))) >>> # replaces subtensor - >>> X[0:2,0:2,0] = np.ones((2,2)) + >>> T[0:2,0:2,0] = np.ones((2,2)) >>> # replaces two elements - >>> X[np.array([[1, 1, 1], [1, 1, 2]])] = [5, 7] + >>> T[np.array([[1, 1, 1], [1, 1, 2]])] = [5, 7] >>> # replaces two elements with linear indices - >>> X[np.array([1, 13])] = [5, 7] + >>> T[np.array([1, 13])] = [5, 7] >>> # grows tensor to accept new element - >>> X[1,1,2:3] = 1 - >>> X[1,1,4] = 1 + >>> T[1,1,2:3] = 1 + >>> T[1,1,4] = 1 """ access_type = get_index_variant(key) @@ -1425,38 +1706,38 @@ def _set_subscripts(self, key, value): def __getitem__(self, item): # noqa: PLR0912 """ - SUBSREF Subscripted reference for tensors. + Subscripted reference for tensors. We can extract elements or subtensors from a tensor in the following ways. - Case 1a: y = X(i1,i2,...,iN), where each in is an index, returns a + Case 1a: `y = T[I1,I2,...,In], where each `I` is an index, returns a scalar. - Case 1b: Y = X(R1,R2,...,RN), where one or more Rn is a range and + Case 1b: `Y = T[R1,R2,...,Rn]`, where one or more `R` is a range and the rest are indices, returns a tensor. - Case 2a: V = X(S) where S is a p x n array - of subscripts, returns a vector of p values. + Case 2a: `V = T[S]` where `S` is a `p` x `n` array + of subscripts, returns a vector of `p` values. - Case 2b: V = X(I) where I is a set of p - linear indices, returns a vector of p values. + Case 2b: `V = T[I]` where `I` is a set of `p` + linear indices, returns a vector of `p` values. Any ambiguity results in executing the first valid case. This - is particularly an issue if ndims(X)==1. + is particularly an issue if `self.ndims == 1`. Examples -------- - >>> X = tensor(np.ones((3,4,2,1))) - >>> X[0,0,0,0] # produces a scalar + >>> T = tensor(np.ones((3,4,2,1))) + >>> T[0,0,0,0] # produces a scalar 1.0 >>> # produces a tensor of order 1 and size 1 - >>> X[1,1,1,:] # doctest: +NORMALIZE_WHITESPACE + >>> T[1,1,1,:] # doctest: +NORMALIZE_WHITESPACE tensor of shape (1,) data[:] = [1.] >>> # produces a tensor of size 2 x 2 x 1 - >>> X[0:2,[2, 3],1,:] # doctest: +NORMALIZE_WHITESPACE + >>> T[0:2,[2, 3],1,:] # doctest: +NORMALIZE_WHITESPACE tensor of shape (2, 2, 1) data[0, :, :] = [[1.] @@ -1466,14 +1747,10 @@ def __getitem__(self, item): # noqa: PLR0912 [1.]] >>> # returns a vector of length 2 >>> # Equivalent to selecting [0,0,0,0] and [1,1,1,0] separately - >>> X[np.array([[0, 0, 0, 0], [1, 1, 1, 0]])] + >>> T[np.array([[0, 0, 0, 0], [1, 1, 1, 0]])] array([1., 1.]) - >>> X[[0,1,2]] # extracts the first three linearized indices + >>> T[[0,1,2]] # extracts the first three linearized indices array([1., 1., 1.]) - - Returns - ------- - :class:`pyttb.tensor` or :class:`numpy.ndarray` """ # Case 0: Single Index Linear if isinstance(item, (int, float, np.generic, slice)): @@ -1550,7 +1827,7 @@ def __getitem__(self, item): # noqa: PLR0912 def __eq__(self, other): """ - Equal for tensors + Equal for tensors (element-wise). Parameters ---------- @@ -1558,7 +1835,21 @@ def __eq__(self, other): Returns ------- - :class:`pyttb.tensor` + :class:`pyttb.tensor` of `bool`. + + Examples + -------- + >>> T = ttb.tensor(np.array([[1, 2], [3, 4]])) + >>> T == T + tensor of shape (2, 2) + data[:, :] = + [[ True True] + [ True True]] + >>> T == 1 + tensor of shape (2, 2) + data[:, :] = + [[ True False] + [False False]] """ def tensor_equality(x, y): @@ -1568,7 +1859,7 @@ def tensor_equality(x, y): def __ne__(self, other): """ - Not equal (!=) for tensors + Not equal (!=) for tensors (element-wise). Parameters ---------- @@ -1576,7 +1867,21 @@ def __ne__(self, other): Returns ------- - :class:`pyttb.tensor` + :class:`pyttb.tensor` of `bool`. + + Examples + -------- + >>> T = ttb.tensor(np.array([[1, 2], [3, 4]])) + >>> T != T + tensor of shape (2, 2) + data[:, :] = + [[False False] + [False False]] + >>> T != 1 + tensor of shape (2, 2) + data[:, :] = + [[False True] + [ True True]] """ def tensor_not_equal(x, y): @@ -1586,7 +1891,7 @@ def tensor_not_equal(x, y): def __ge__(self, other): """ - Greater than or equal (>=) for tensors + Greater than or equal (>=) for tensors (element-wise). Parameters ---------- @@ -1594,7 +1899,21 @@ def __ge__(self, other): Returns ------- - :class:`pyttb.tensor` + :class:`pyttb.tensor` of `bool`. + + Examples + -------- + >>> T = ttb.tensor(np.array([[1, 2], [3, 4]])) + >>> T >= T + tensor of shape (2, 2) + data[:, :] = + [[ True True] + [ True True]] + >>> T >= 1 + tensor of shape (2, 2) + data[:, :] = + [[ True True] + [ True True]] """ def greater_or_equal(x, y): @@ -1604,7 +1923,7 @@ def greater_or_equal(x, y): def __le__(self, other): """ - Less than or equal (<=) for tensors + Less than or equal (<=) for tensors (element-wise). Parameters ---------- @@ -1612,7 +1931,21 @@ def __le__(self, other): Returns ------- - :class:`pyttb.tensor` + :class:`pyttb.tensor` of `bool`. + + Examples + -------- + >>> T = ttb.tensor(np.array([[1, 2], [3, 4]])) + >>> T <= T + tensor of shape (2, 2) + data[:, :] = + [[ True True] + [ True True]] + >>> T <= 1 + tensor of shape (2, 2) + data[:, :] = + [[ True False] + [False False]] """ def less_or_equal(x, y): @@ -1622,7 +1955,7 @@ def less_or_equal(x, y): def __gt__(self, other): """ - Greater than (>) for tensors + Greater than (>) for tensors (element-wise). Parameters ---------- @@ -1630,7 +1963,21 @@ def __gt__(self, other): Returns ------- - :class:`pyttb.tensor` + :class:`pyttb.tensor` of `bool`. + + Examples + -------- + >>> T = ttb.tensor(np.array([[1, 2], [3, 4]])) + >>> T > T + tensor of shape (2, 2) + data[:, :] = + [[False False] + [False False]] + >>> T > 1 + tensor of shape (2, 2) + data[:, :] = + [[False True] + [ True True]] """ def greater(x, y): @@ -1640,7 +1987,7 @@ def greater(x, y): def __lt__(self, other): """ - Less than (<) for tensors + Less than (<) for tensors (element-wise). Parameters ---------- @@ -1648,7 +1995,21 @@ def __lt__(self, other): Returns ------- - :class:`pyttb.tensor` + :class:`pyttb.tensor` of `bool`. + + Examples + -------- + >>> T = ttb.tensor(np.array([[1, 2], [3, 4]])) + >>> T < T + tensor of shape (2, 2) + data[:, :] = + [[False False] + [False False]] + >>> T < 1 + tensor of shape (2, 2) + data[:, :] = + [[False False] + [False False]] """ def less(x, y): @@ -1658,7 +2019,7 @@ def less(x, y): def __sub__(self, other): """ - Binary subtraction (-) for tensors + Binary subtraction (-) for tensors. Parameters ---------- @@ -1667,6 +2028,20 @@ def __sub__(self, other): Returns ------- :class:`pyttb.tensor` + + Examples + -------- + >>> T = ttb.tensor(np.array([[1, 2], [3, 4]])) + >>> T - T + tensor of shape (2, 2) + data[:, :] = + [[0 0] + [0 0]] + >>> T - 1 + tensor of shape (2, 2) + data[:, :] = + [[0 1] + [2 3]] """ def minus(x, y): @@ -1676,7 +2051,7 @@ def minus(x, y): def __add__(self, other): """ - Binary addition (+) for tensors + Binary addition (+) for tensors. Parameters ---------- @@ -1685,8 +2060,21 @@ def __add__(self, other): Returns ------- :class:`pyttb.tensor` - """ + Examples + -------- + >>> T = ttb.tensor(np.array([[1, 2], [3, 4]])) + >>> T + T + tensor of shape (2, 2) + data[:, :] = + [[2 4] + [6 8]] + >>> T + 1 + tensor of shape (2, 2) + data[:, :] = + [[2 3] + [4 5]] + """ # If rhs is sumtensor, treat as such if isinstance(other, ttb.sumtensor): # pragma: no cover return other.__add__(self) @@ -1707,13 +2095,21 @@ def __radd__(self, other): Returns ------- :class:`pyttb.tensor` - """ + Examples + -------- + >>> T = ttb.tensor(np.array([[1, 2], [3, 4]])) + >>> 1 + T + tensor of shape (2, 2) + data[:, :] = + [[2 3] + [4 5]] + """ return self.__add__(other) def __pow__(self, power): """ - Element Wise Power (**) for tensors + Element-wise Power (**) for tensors. Parameters ---------- @@ -1722,6 +2118,15 @@ def __pow__(self, power): Returns ------- :class:`pyttb.tensor` + + Examples + -------- + >>> T = ttb.tensor(np.array([[1, 2], [3, 4]])) + >>> T**2 + tensor of shape (2, 2) + data[:, :] = + [[ 1 4] + [ 9 16]] """ def tensor_pow(x, y): @@ -1731,7 +2136,7 @@ def tensor_pow(x, y): def __mul__(self, other): """ - Element wise multiplication (*) for tensors, self*other + Element-wise multiplication (*) for tensors, self*other Parameters ---------- @@ -1740,6 +2145,20 @@ def __mul__(self, other): Returns ------- :class:`pyttb.tensor` + + Examples + -------- + >>> T = ttb.tensor(np.array([[1, 2], [3, 4]])) + >>> T * T + tensor of shape (2, 2) + data[:, :] = + [[ 1 4] + [ 9 16]] + >>> T * 2 + tensor of shape (2, 2) + data[:, :] = + [[2 4] + [6 8]] """ def mul(x, y): @@ -1761,12 +2180,21 @@ def __rmul__(self, other): Returns ------- :class:`pyttb.tensor` + + Examples + -------- + >>> T = ttb.tensor(np.array([[1, 2], [3, 4]])) + >>> 2 * T + tensor of shape (2, 2) + data[:, :] = + [[2 4] + [6 8]] """ return self.__mul__(other) def __truediv__(self, other): """ - Element wise left division (/) for tensors, self/other + Element-wise left division (/) for tensors, self/other Parameters ---------- @@ -1775,6 +2203,20 @@ def __truediv__(self, other): Returns ------- :class:`pyttb.tensor` + + Examples + -------- + >>> T = ttb.tensor(np.array([[1, 2], [3, 4]])) + >>> T / T + tensor of shape (2, 2) + data[:, :] = + [[1. 1.] + [1. 1.]] + >>> T / 2 + tensor of shape (2, 2) + data[:, :] = + [[0.5 1. ] + [1.5 2. ]] """ def div(x, y): @@ -1796,6 +2238,16 @@ def __rtruediv__(self, other): Returns ------- :class:`pyttb.tensor` + + Examples + -------- + >>> T = ttb.tensor(np.array([[1, 2], [3, 4]])) + >>> np.set_printoptions(precision=8) + >>> 2 / T + tensor of shape (2, 2) + data[:, :] = + [[2. 1. ] + [0.66666667 0.5 ]] """ def div(x, y): @@ -1808,69 +2260,88 @@ def div(x, y): def __pos__(self): """ - Unary plus (+) for tensors + Unary plus (+) for tensors. Returns ------- - :class:`pyttb.tensor` - copy of tensor - """ + Copy of tensor. + Examples + -------- + >>> T = ttb.tensor(np.array([[1, 2], [3, 4]])) + >>> +T + tensor of shape (2, 2) + data[:, :] = + [[1 2] + [3 4]] + """ return ttb.tensor(self.data) def __neg__(self): """ - Unary minus (-) for tensors + Unary minus (-) for tensors. Returns ------- - :class:`pyttb.tensor` - copy of tensor + Copy of negated tensor. + + Examples + -------- + >>> T = ttb.tensor(np.array([[1, 2], [3, 4]])) + >>> -T + tensor of shape (2, 2) + data[:, :] = + [[-1 -2] + [-3 -4]] """ return ttb.tensor(-1 * self.data) def __repr__(self): """ - String representation of a tensor. + String representation of the tensor. Returns ------- - str - Contains the shape and data as strings on different lines. + String displaying shape and data as strings on different lines. + + Examples + -------- + >>> T = ttb.tensor(np.array([[1, 2], [3, 4]])) + >>> T + tensor of shape (2, 2) + data[:, :] = + [[1 2] + [3 4]] """ if self.ndims == 0: s = "" s += "empty tensor of shape " s += str(self.shape) - s += "\n" - s += "data = []" + s += "\ndata = []" return s s = "" s += f"tensor of shape {self.shape}" - s += "\n" if self.ndims == 1: - s += "data" + s += "\ndata" if self.ndims == 1: s += "[:]" - s += " = \n" + s += " =\n" s += str(self.data) - s += "\n" return s for i in np.arange(np.prod(self.shape[:-2])): - s += "data" + s += "\ndata" if self.ndims == 2: s += "[:, :]" - s += " = \n" + s += " =\n" s += str(self.data) - s += "\n" elif self.ndims > 2: idx = tt_ind2sub(self.shape[:-2], np.array([i])) s += str(idx[0].tolist())[0:-1] s += ", :, :]" - s += " = \n" + s += " =\n" s += str( self.data[ tuple( @@ -1880,7 +2351,6 @@ def __repr__(self): ) ] ) - s += "\n" # s += '\n' return s @@ -1889,60 +2359,89 @@ def __repr__(self): def tenones(shape: Tuple[int, ...]) -> tensor: """ - Creates a tensor of all ones + Creates a tensor of all ones. Parameters ---------- shape: - Shape of resulting tensor + Shape of resulting tensor. Returns ------- - Constructed tensor + Constructed tensor. - Example + Examples ------- - >>> X = ttb.tenones((2,2)) + >>> T = ttb.tenones((3,)) + >>> T + tensor of shape (3,) + data[:] = + [1. 1. 1.] + >>> T = ttb.tenones((3,3)) + >>> T + tensor of shape (3, 3) + data[:, :] = + [[1. 1. 1.] + [1. 1. 1.] + [1. 1. 1.]] """ return tensor.from_function(np.ones, shape) def tenzeros(shape: Tuple[int, ...]) -> tensor: """ - Creates a tensor of all zeros + Creates a tensor of all zeros. Parameters ---------- shape: - Shape of resulting tensor + Shape of resulting tensor. Returns ------- - Constructed tensor - - Example - ------- - >>> X = ttb.tenzeros((2,2)) + Constructed tensor. + + Examples + -------- + >>> T = ttb.tenzeros((3,)) + >>> T + tensor of shape (3,) + data[:] = + [0. 0. 0.] + >>> T = ttb.tenzeros((3,3)) + >>> T + tensor of shape (3, 3) + data[:, :] = + [[0. 0. 0.] + [0. 0. 0.] + [0. 0. 0.]] """ return tensor.from_function(np.zeros, shape) def tenrand(shape: Tuple[int, ...]) -> tensor: """ - Creates a tensor with entries drawn from a uniform distribution on the unit interval + Creates a tensor with entries drawn from a uniform + distribution on the unit interval. Parameters ---------- shape: - Shape of resulting tensor + Shape of resulting tensor. Returns ------- - Constructed tensor - - Example - ------- - >>> X = ttb.tenrand((2,2)) + Constructed tensor. + + Examples + -------- + >>> np.random.seed(1) + >>> np.set_printoptions(precision=8) + >>> T = ttb.tenrand((3,)) + >>> T + tensor of shape (3,) + data[:] = + [4.17022005e-01 7.20324493e-01 1.14374817e-04] """ # Typing doesn't play nice with partial @@ -1955,27 +2454,27 @@ def unit_uniform(pass_through_shape: Tuple[int, ...]) -> np.ndarray: def tendiag(elements: np.ndarray, shape: Optional[Tuple[int, ...]] = None) -> tensor: """ - Creates a tensor with elements along super diagonal - If provided shape is too small the tensor will be enlarged to accomodate + Creates a tensor with elements along super diagonal. If provided shape is too + small the tensor will be enlarged to accomodate. Parameters ---------- elements: - Elements to set along the diagonal + Elements to set along the diagonal. shape: - Shape of resulting tensor + Shape of resulting tensor. Returns ------- - Constructed tensor + Constructed tensor. - Example - ------- - >>> shape = (2,) + Examples + -------- + >>> shape = (3,) >>> values = np.ones(shape) - >>> X = ttb.tendiag(values) - >>> Y = ttb.tendiag(values, (2, 2)) - >>> X.isequal(Y) + >>> T1 = ttb.tendiag(values) + >>> T2 = ttb.tendiag(values, (3, 3, 3)) + >>> T1.isequal(T2) True """ # Flatten provided elements @@ -2007,7 +2506,7 @@ def mttv_left(W_in: np.ndarray, U1: np.ndarray) -> np.ndarray: Returns ------- - Matrix with modes (m2 x ... x mN, C) + Matrix with modes (m2 x ... x mN, C). """ r = U1.shape[1] W_in = np.reshape(W_in, (U1.shape[0], -1, r), order="F") @@ -2034,7 +2533,7 @@ def mttv_mid(W_in: np.ndarray, U_mid: List[np.ndarray]) -> np.ndarray: Returns ------- - Matrix with modes (m1, C) + Matrix with modes (m1, C). """ if len(U_mid) == 0: return W_in diff --git a/tests/test_tensor.py b/tests/test_tensor.py index 2ef7265..ec61e81 100644 --- a/tests/test_tensor.py +++ b/tests/test_tensor.py @@ -1469,11 +1469,9 @@ def test_tensor__str__(sample_tensor_2way): tensorInstance = ttb.tensor(data) s = "" s += f"tensor of shape {tensorInstance.shape}" - s += "\n" - s += "data" - s += "[:] = \n" + s += "\ndata" + s += "[:] =\n" s += data.__str__() - s += "\n" assert s == tensorInstance.__str__() # Test 2D @@ -1481,11 +1479,9 @@ def test_tensor__str__(sample_tensor_2way): tensorInstance = ttb.tensor(data) s = "" s += f"tensor of shape {tensorInstance.shape}" - s += "\n" - s += "data" - s += "[:, :] = \n" + s += "\ndata" + s += "[:, :] =\n" s += data.__str__() - s += "\n" assert s == tensorInstance.__str__() # Test 3D,shape in decreasing and increasing order @@ -1493,24 +1489,20 @@ def test_tensor__str__(sample_tensor_2way): tensorInstance = ttb.tensor(data) s = "" s += f"tensor of shape {tensorInstance.shape}" - s += "\n" for i in range(data.shape[0]): - s += "data" - s += "[{}, :, :] = \n".format(i) + s += "\ndata" + s += "[{}, :, :] =\n".format(i) s += data[i, :, :].__str__() - s += "\n" assert s == tensorInstance.__str__() data = np.random.normal(size=(2, 3, 4)) tensorInstance = ttb.tensor(data) s = "" s += f"tensor of shape {tensorInstance.shape}" - s += "\n" for i in range(data.shape[0]): - s += "data" - s += "[{}, :, :] = \n".format(i) + s += "\ndata" + s += "[{}, :, :] =\n".format(i) s += data[i, :, :].__str__() - s += "\n" assert s == tensorInstance.__str__() # Test 4D @@ -1518,13 +1510,11 @@ def test_tensor__str__(sample_tensor_2way): tensorInstance = ttb.tensor(data) s = "" s += f"tensor of shape {tensorInstance.shape}" - s += "\n" for i in range(data.shape[0]): for j in range(data.shape[1]): - s += "data" - s += "[{}, {}, :, :] = \n".format(j, i) + s += "\ndata" + s += "[{}, {}, :, :] =\n".format(j, i) s += data[j, i, :, :].__str__() - s += "\n" assert s == tensorInstance.__str__() # Test 5D @@ -1532,14 +1522,12 @@ def test_tensor__str__(sample_tensor_2way): tensorInstance = ttb.tensor(data) s = "" s += f"tensor of shape {tensorInstance.shape}" - s += "\n" for i in range(data.shape[0]): for j in range(data.shape[1]): for k in range(data.shape[2]): - s += "data" - s += "[{}, {}, {}, :, :] = \n".format(k, j, i) + s += "\ndata" + s += "[{}, {}, {}, :, :] =\n".format(k, j, i) s += data[k, j, i, :, :].__str__() - s += "\n" assert s == tensorInstance.__str__()