diff --git a/examples/rlst_elementary_matrices.rs b/examples/rlst_elementary_matrices.rs deleted file mode 100644 index 1d7610b..0000000 --- a/examples/rlst_elementary_matrices.rs +++ /dev/null @@ -1,69 +0,0 @@ -//! Demo the inverse of a matrix -use rlst::dense::linalg::elementary_matrix::ElementaryOperations; -pub use rlst::prelude::*; -use rlst::dense::linalg::elementary_matrix::{OpType, RowOpType}; - -pub fn main() { - //Example 1: use elementary matrices to perform one step of an LU block decomposition - //Here we use a 9x9 matrix and we partition it in 3x3 blocks - let mut arr = rlst_dynamic_array2!(f64, [9, 9]); - arr.fill_from_seed_equally_distributed(0); - - //We use the block 11 to eliminate block 21 using the LU with elementary matrices - let mut block_11 = rlst_dynamic_array2!(f64, [3, 3]); - let mut block_21 = rlst_dynamic_array2!(f64, [3, 3]); - block_11.fill_from(arr.view().into_subview([0, 0], [3, 3])); - block_21.fill_from(arr.view().into_subview([3, 0], [3, 3])); - block_11.view_mut().into_inverse_alloc().unwrap(); - - //We define the elementary matrix using the coordinates of the square blocks block_11 and block_21 and the dimension of the elementary matrix - //(in this case it's a 9x9 matrix and we assume that elementary matrices are squared) - let mut el_mat_fact = empty_array().simple_mult_into_resize(block_21.view(), block_11.view()); - let row_indices: Vec = (3..6).collect(); - let col_indices: Vec = (0..3).collect(); - let el_mat = el_mat_fact.view_mut().into_el_mat_alloc(9, row_indices, col_indices, OpType::Row, false).unwrap(); - el_mat.el_mul(arr.view_mut(), Some(RowOpType::Sub), None).unwrap(); - - //As a result we se that the block 21 has been eliminated - let res = arr.view().into_subview([3, 0], [3, 3]); - println!("L2 of block 21 after LU elimination through elementary matrices {}", res.view_flat().norm_2()); - - - ////////////////////////////////////////////////////////////////////////////////// - //Example 2: Use elementary matrices for row scaling - //Using the matrix of the previous example, we can re-scale the rows corresponding to blocks 11, 12, 13: - let row_indices: Vec = (0..3).collect(); //The row and col indices must be the same, since the relevant dimension is the row_indices - let col_indices: Vec = (0..3).collect(); - - //We simply use a 1x1 matrix for this case, since the only relevant information is contained in the scaling factor - let mut el_mat_fact = rlst_dynamic_array2!(f64, [1, 1]); - let el_mat = el_mat_fact.view_mut().into_el_mat_alloc(9, row_indices, col_indices, OpType::Mul, false).unwrap(); - - //We extract the elements of the matrix before scaling: - let mut bef_scaling = rlst_dynamic_array2!(f64, [3, 9]); - bef_scaling.fill_from(arr.view().into_subview([0, 0], [3, 9])); - el_mat.el_mul(arr.view_mut(), None, Some(5.0)).unwrap(); - let aft_scaling = arr.view().into_subview([0, 0], [3, 9]); - let norm_comp = aft_scaling.view_flat().norm_2()/bef_scaling.view_flat().norm_2(); - println!("The norm of the rows corresponding to blocks 11, 12, 13 after scaling is {} times larger", norm_comp); - - - ////////////////////////////////////////////////////////////////////////////////// - //Example 3: Use elementary matrices for row permutation - // The permutation is as follows col_indices->row_indices. In this case: - // row 0 moves into row 2, row 1 moves into row 0 and row 2 moves into row 1. - let col_indices: Vec = [0, 1, 2].to_vec(); - let row_indices: Vec = [2, 0, 1].to_vec(); - - let mut el_mat_fact = rlst_dynamic_array2!(f64, [1, 1]); - let el_mat = el_mat_fact.view_mut().into_el_mat_alloc(9, row_indices, col_indices, OpType::Perm, false).unwrap(); - let mut bef_perm = rlst_dynamic_array2!(f64, [3, 9]); - bef_perm.fill_from(arr.view().into_subview([0, 0], [3, 9])); - el_mat.el_mul(arr.view_mut(), None, None).unwrap(); - let aft_perm = arr.view().into_subview([0, 0], [3, 9]); - - //Here we check if row 0 moved into row 2: - let res = bef_perm.into_subview([0, 0], [1, 9]) - aft_perm.into_subview([2, 0], [1, 9]); - - println!("The difference between the row 0 before permutation and the row 2 after the permutation is {}",res.view_flat().norm_2()); -} diff --git a/examples/rlst_interpolative_decomposition.rs b/examples/rlst_interpolative_decomposition.rs index 8756e3e..44a106f 100644 --- a/examples/rlst_interpolative_decomposition.rs +++ b/examples/rlst_interpolative_decomposition.rs @@ -1,6 +1,7 @@ //! Demo the inverse of a matrix pub use rlst::prelude::*; +use rlst::dense::linalg::interpolative_decomposition::{MatrixIdDecomposition, Accuracy}; //Function that creates a low rank matrix by calculating a kernel given a random point distribution on an unit sphere. fn low_rank_matrix(n: usize, arr: &mut Array, 2>, 2>){ @@ -54,22 +55,31 @@ pub fn main() { let tol: f64 = 1e-5; //Create a low rank matrix - let mut arr = rlst_dynamic_array2!(f64, [slice, n]); - low_rank_matrix(n,&mut arr); + let mut arr: DynamicArray = rlst_dynamic_array2!(f64, [slice, n]); + low_rank_matrix(n, &mut arr); - let mut res = arr.view_mut().into_id_alloc(tol, None).unwrap(); + let res: IdDecomposition = arr.view_mut().into_id_alloc(Accuracy::Tol(tol)).unwrap(); - println!("The permuted matrix is:"); - res.arr.pretty_print(); + println!("The skeleton of the matrix is given by"); + res.skel.pretty_print(); println!("The interpolative decomposition matrix is:"); res.id_mat.pretty_print(); println!("The rank of this matrix is {}\n", res.rank); - let a_rs_app = empty_array().simple_mult_into_resize(res.id_mat.view(), res.arr.view_mut().into_subview([0, 0], [res.rank, n])); - let a_rs = res.arr.view_mut().into_subview([res.rank, 0], [slice-res.rank, n]); + //We extract the residuals of the matrix + let mut perm_mat: DynamicArray = rlst_dynamic_array2!(f64, [slice, slice]); + res.get_p(perm_mat.view_mut()); + let perm_arr: DynamicArray = empty_array::() + .simple_mult_into_resize(perm_mat.view_mut(), arr.view()); + let mut a_rs: DynamicArray = rlst_dynamic_array2!(f64, [slice-res.rank, n]); + a_rs.fill_from(perm_arr.into_subview([res.rank, 0], [slice-res.rank, n])); + + //We compute an approximation of the residual columns of the matrix + let a_rs_app: DynamicArray = empty_array().simple_mult_into_resize(res.id_mat.view(), res.skel); + let error: f64 = a_rs.view().sub(a_rs_app.view()).view_flat().norm_2(); println!("Interpolative Decomposition L2 absolute error: {}", error); } diff --git a/examples/rlst_null_space.rs b/examples/rlst_null_space.rs index 12e29b1..5c0afd7 100644 --- a/examples/rlst_null_space.rs +++ b/examples/rlst_null_space.rs @@ -1,14 +1,13 @@ //! Demo the inverse of a matrix -use rlst::dense::linalg::null_space::NullSpaceType; pub use rlst::prelude::*; //Here we compute the null space (B) of the rowspace of a short matrix (A). pub fn main() { let mut arr = rlst_dynamic_array2!(f64, [3, 4]); arr.fill_from_seed_equally_distributed(0); - - let null_res = arr.view_mut().into_null_alloc(NullSpaceType::Row).unwrap(); - let res = empty_array().simple_mult_into_resize(arr.view_mut(), null_res.null_space_arr.view()); + let tol = 1e-15; + let null_res = arr.view_mut().into_null_alloc(tol).unwrap(); + let res: Array, 2>, 2> = empty_array().simple_mult_into_resize(arr.view_mut(), null_res.null_space_arr.view()); println!("Value of |A*B|_2, where B=null(A): {}", res.view_flat().norm_2()); diff --git a/src/dense/linalg.rs b/src/dense/linalg.rs index 11f3979..b8327b1 100644 --- a/src/dense/linalg.rs +++ b/src/dense/linalg.rs @@ -1,7 +1,7 @@ //! Linear algebra routines -use crate::{MatrixInverse, MatrixPseudoInverse, MatrixQr, MatrixId, MatrixSvd, MatrixNull, RlstScalar, ElementaryMatrixData}; +use crate::{MatrixInverse, MatrixPseudoInverse, MatrixQr, MatrixId, MatrixSvd, MatrixNull, RlstScalar}; use self::lu::MatrixLu; @@ -11,7 +11,6 @@ pub mod pseudo_inverse; pub mod qr; pub mod svd; pub mod interpolative_decomposition; -pub mod elementary_matrix; pub mod null_space; /// Return true if stride is column major as required by Lapack. @@ -24,9 +23,9 @@ pub fn assert_lapack_stride(stride: [usize; 2]) { } /// Marker trait for objects that support Matrix decompositions. -pub trait LinAlg: MatrixInverse + MatrixQr + MatrixSvd + MatrixLu + MatrixPseudoInverse + MatrixId + MatrixNull +ElementaryMatrixData{} +pub trait LinAlg: MatrixInverse + MatrixQr + MatrixSvd + MatrixLu + MatrixPseudoInverse + MatrixId + MatrixNull{} -impl LinAlg +impl LinAlg for T { } diff --git a/src/dense/linalg/elementary_matrix.rs b/src/dense/linalg/elementary_matrix.rs deleted file mode 100644 index 93b446c..0000000 --- a/src/dense/linalg/elementary_matrix.rs +++ /dev/null @@ -1,267 +0,0 @@ -//! Elementary matrices (row swapping, row multiplication and row addition) -use crate::dense::array::Array; -use crate::dense::traits::{RawAccessMut, Shape, Stride, UnsafeRandomAccessByValue, MultIntoResize}; -use crate::dense::types::{c32, c64, RlstResult, RlstScalar, RlstError}; -use crate::{empty_array, rlst_dynamic_array2}; -use crate::dense::traits::accessors::RandomAccessMut; - -///Allocate space for the elementary matrix -pub trait ElementaryMatrixData: RlstScalar { - ///This is the method that allocates space for the elements of the elementary matrix - fn into_el_mat_alloc< - ArrayImpl: UnsafeRandomAccessByValue<2, Item = Self> - + Stride<2> - + Shape<2> - + RawAccessMut, - >( - arr: Array, dim:usize, row_indices: Vec, col_indices: Vec, op_type: OpType, trans: bool - ) -> RlstResult>; -} - -macro_rules! implement_into_el_mat { - ($scalar:ty) => { - impl ElementaryMatrixData for $scalar { - fn into_el_mat_alloc< - ArrayImpl: UnsafeRandomAccessByValue<2, Item = Self> - + Stride<2> - + Shape<2> - + RawAccessMut, - >( - arr: Array, dim:usize, row_indices: Vec, col_indices: Vec, op_type: OpType, trans: bool - ) -> RlstResult> { - ElementaryMatrix::<$scalar, ArrayImpl>::new(arr, dim, row_indices, col_indices, op_type, trans) - } - } - }; -} - -implement_into_el_mat!(f32); -implement_into_el_mat!(f64); -implement_into_el_mat!(c32); -implement_into_el_mat!(c64); - -impl< - Item: RlstScalar + ElementaryMatrixData, - ArrayImpl: UnsafeRandomAccessByValue<2, Item = Item> - + Stride<2> - + RawAccessMut - + Shape<2>, - > Array -{ - ///This computes an elementary (row addition/substraction, row scaling and row permutation) matrix given a series of parameters - pub fn into_el_mat_alloc(self, dim:usize, row_indices: Vec, col_indices: Vec, op_type: OpType, trans: bool) -> RlstResult> { - ::into_el_mat_alloc(self, dim, row_indices, col_indices, op_type, trans) - } -} - -///These are the methods associated to elementary matrices -pub trait ElementaryOperations: Sized { - /// Item type - type Item: RlstScalar; - /// Array implementaion - type ArrayImpl: UnsafeRandomAccessByValue<2, Item = Self::Item> - + Stride<2> - + RawAccessMut - + Shape<2>; - - /// We create the Elementary matrix (E), so it can be applied to a matrix A. In other words, we implement E(A). - /// arr: is needed when row additions/substractions on A are performed. - /// dim: is the dimension of the elementary matrix, given by the row dimension of A. - /// row_indices and col_indices are respectively the domain and range of this application; i.e. we take the rows of A - /// corresponding to col_indices of A and we place the result of this operation in row_indices of A. - /// op_type: indicates if we are adding/substracting rows of A, scaling rows of A by a scalar or if we are permuting rows of A. - /// trans: indicates if we are applying E^T - fn new(arr: Array, dim:usize, row_indices: Vec, col_indices: Vec, op_type: OpType, trans: bool) -> RlstResult; - - ///This method performs E(A). Here: - /// right_arr: matrix A. - /// row_op_type: indicates substraction or addition of rows - /// alpha: is the scaling parameter of a scaling is applied - fn el_mul(self, right_arr: Array, row_op_type: Option, alpha: Option)-> RlstResult<()> ; - - ///This method implements the row addition/substraction - fn row_ops(self, right_arr: Array, op_type: RowOpType); - - ///This method implements the row scaling - fn row_mul(self, right_arr: Array, alpha: Self::Item); - - ///This method implements the row permutation - fn row_perm(self, right_arr: Array); - -} - -/// Container for the LU Decomposition of a matrix. -pub struct ElementaryMatrix< - Item: RlstScalar, - ArrayImpl: UnsafeRandomAccessByValue<2, Item = Item> + Stride<2> + Shape<2> + RawAccessMut, -> { - dim: usize, - arr: Array, - row_indices: Vec, - col_indices: Vec, - op_type: OpType, - - ///Indicates if the operation is to be transposed or not - pub trans: bool - -} - -/// Type of row operation -#[derive(Clone, Copy)] -#[repr(u8)] -pub enum RowOpType { - /// Row addition - Add = b'A', - /// Row substraction - Sub = b'S', -} - -/// Type of Elementary matrix -#[derive(Clone, Copy)] -#[repr(u8)] -pub enum OpType { - /// Row operation (addition or substraction) - Row = b'R', - /// Row scaling - Mul = b'M', - /// Row permutation - Perm = b'P', -} - - -macro_rules! impl_el_ops { - ($scalar:ty) => { - impl< - ArrayImpl: UnsafeRandomAccessByValue<2, Item = $scalar> - + Stride<2> - + Shape<2> - + RawAccessMut, - > ElementaryOperations for ElementaryMatrix<$scalar, ArrayImpl> - { - type Item = $scalar; - type ArrayImpl = ArrayImpl; - - fn new(arr: Array, dim:usize, row_indices: Vec, col_indices: Vec, op_type: OpType, trans: bool) -> RlstResult { - - match op_type{ - OpType::Row => {}, - OpType::Mul => {assert_eq!(row_indices.len(), col_indices.len())}, - OpType::Perm => {assert_eq!(row_indices.len(), col_indices.len())} - } - - Ok(Self{arr, dim, row_indices, col_indices, op_type, trans}) - } - - fn el_mul(self, right_arr: Array, row_op_type: Option, alpha: Option)-> RlstResult<()> - { - match self.op_type{ - OpType::Row => { - match(row_op_type){ - Some(row_op_type)=> Ok(self.row_ops(right_arr, row_op_type)), - None =>Err(RlstError::IoError("Missing parameter".to_string())) - }}, - OpType::Mul => { - match(alpha){ - Some(alpha) => Ok(self.row_mul(right_arr, alpha)), - None =>Err(RlstError::IoError("Missing parameter".to_string())) - }}, - OpType::Perm => {Ok(self.row_perm(right_arr))} - } - } - - fn row_ops(self, mut right_arr: Array, row_op_type: RowOpType){ - let mut subarr_cols = rlst_dynamic_array2!($scalar, [self.col_indices.len(), self.dim]); - let mut subarr_rows = rlst_dynamic_array2!($scalar, [self.row_indices.len(), self.dim]); - let mut aux_arr = empty_array(); - let col_indices :Vec; - let row_indices :Vec; - - if self.trans{ - aux_arr.fill_from_resize(self.arr.view().conj().transpose()); - col_indices = self.row_indices; - row_indices = self.col_indices; - } - else{ - aux_arr.fill_from_resize(self.arr.view()); - col_indices = self.col_indices; - row_indices = self.row_indices; - } - - let right_arr_shape = right_arr.view().shape(); - let dim = self.dim; - - - for col in 0..dim{ - for row in 0..col_indices.len(){ - *subarr_cols.get_mut([row, col]).unwrap() = right_arr.data_mut()[col*right_arr_shape[0] + col_indices[row]]; - } - for row in 0..row_indices.len(){ - *subarr_rows.get_mut([row, col]).unwrap() = right_arr.data_mut()[col*right_arr_shape[0] + row_indices[row]]; - } - } - - let res_mul = empty_array::<$scalar, 2>().simple_mult_into_resize(aux_arr, subarr_cols.view_mut()); - let mut add_res = rlst_dynamic_array2!($scalar, subarr_rows.shape()); - - match row_op_type{ - RowOpType::Add => {add_res.fill_from(subarr_rows.view() + res_mul.view());}, - RowOpType::Sub => {add_res.fill_from(subarr_rows.view() - res_mul.view());} - } - - for col in 0..dim{ - for row in 0..row_indices.len(){ - right_arr.data_mut()[col*right_arr_shape[0] + row_indices[row]] = *add_res.get_mut([row, col]).unwrap(); - } - } - } - - fn row_mul(self, mut right_arr: Array, alpha: Self::Item){ - let right_arr_shape = right_arr.view().shape(); - let dim = self.dim; - let row_indices = self.row_indices; - - for col in 0..dim{ - for row in 0..row_indices.len(){ - right_arr.data_mut()[col*right_arr_shape[0] + row_indices[row]] = alpha*right_arr.data_mut()[col*right_arr_shape[0] + row_indices[row]] - } - } - - } - - fn row_perm(self, mut right_arr: Array){ - let dim = self.dim; - let col_indices :Vec; - let row_indices :Vec; - - if self.trans{ - col_indices = self.row_indices; - row_indices = self.col_indices; - } - else{ - col_indices = self.col_indices; - row_indices = self.row_indices; - } - - let right_arr_shape = right_arr.view().shape(); - let mut subarr_cols = rlst_dynamic_array2!($scalar, [col_indices.len(), dim]); - - for col in 0..dim{ - for row in 0..col_indices.len(){ - *subarr_cols.get_mut([row, col]).unwrap() = right_arr.data_mut()[col*right_arr_shape[0] + col_indices[row]]; - } - } - - for col in 0..dim{ - for row in 0..row_indices.len(){ - right_arr.data_mut()[col*right_arr_shape[0] + row_indices[row]] = *subarr_cols.get_mut([row, col]).unwrap(); - } - } - } - } - }; -} - -impl_el_ops!(f64); -impl_el_ops!(f32); -impl_el_ops!(c64); -impl_el_ops!(c32); diff --git a/src/dense/linalg/interpolative_decomposition.rs b/src/dense/linalg/interpolative_decomposition.rs index 5850b5f..7740eef 100644 --- a/src/dense/linalg/interpolative_decomposition.rs +++ b/src/dense/linalg/interpolative_decomposition.rs @@ -1,10 +1,9 @@ use crate::dense::array::Array; -use crate::dense::traits::{RawAccessMut, Shape, Stride, UnsafeRandomAccessByValue, UnsafeRandomAccessMut, UnsafeRandomAccessByRef, MultIntoResize, DefaultIterator}; -use crate::dense::types::{RlstResult, RlstScalar}; -use crate::dense::traits::accessors::RandomAccessMut; +use crate::dense::traits::{RandomAccessMut, RawAccessMut, Shape, Stride, UnsafeRandomAccessByValue, UnsafeRandomAccessMut, UnsafeRandomAccessByRef, MultIntoResize, DefaultIterator}; +use crate::dense::types::{c32, c64, RlstResult, RlstScalar}; use crate::{BaseArray, VectorContainer, rlst_dynamic_array1, empty_array, rlst_dynamic_array2}; -use crate::dense::types::{c32, c64}; -use num::One; +use crate::DynamicArray; + /// Compute the matrix interpolative decomposition, by providing a rank and an interpolation matrix. /// @@ -31,8 +30,8 @@ pub trait MatrixId: RlstScalar { + Shape<2> + RawAccessMut >( - arr: Array, tol: ::Real, k: Option - ) -> RlstResult>; + arr: Array, rank_param: Accuracy<::Real> + ) -> RlstResult>; } macro_rules! implement_into_id { @@ -44,9 +43,9 @@ macro_rules! implement_into_id { + Shape<2> + RawAccessMut >( - arr: Array, tol: ::Real, k: Option - ) -> RlstResult> { - IdDecomposition::<$scalar, ArrayImpl>::new(arr, tol, k) + arr: Array, rank_param: Accuracy<::Real> + ) -> RlstResult> { + IdDecomposition::<$scalar>::new(arr, rank_param) } } }; @@ -66,8 +65,8 @@ impl< > Array { /// Compute the interpolative decomposition of a given 2-dimensional array. - pub fn into_id_alloc(self, tol: ::Real, k: Option) -> RlstResult> { - ::into_id_alloc(self, tol, k) + pub fn into_id_alloc(self, rank_param: Accuracy<::Real>) -> RlstResult> { + ::into_id_alloc(self, rank_param) } } @@ -75,15 +74,21 @@ impl< pub trait MatrixIdDecomposition: Sized { /// Item type type Item: RlstScalar; - /// Array implementaion - type ArrayImpl: UnsafeRandomAccessByValue<2, Item = Self::Item> - + Stride<2> - + RawAccessMut - + Shape<2>; - /// Create a new Interpolative Decomposition from a given array. - fn new(arr: Array, tol: ::Real, k: Option) -> RlstResult; + fn new< + ArrayImpl: UnsafeRandomAccessByValue<2, Item=Self::Item> + + Stride<2> + + Shape<2> + + RawAccessMut>(arr: Array, rank_param: Accuracy<::Real>) -> RlstResult; + /// Compute the rank of the decomposition from a given tolerance + fn rank_from_tolerance< + ArrayImplMut: UnsafeRandomAccessByValue<2, Item = Self::Item> + + Shape<2> + + UnsafeRandomAccessMut<2, Item = Self::Item> + + UnsafeRandomAccessByRef<2, Item = Self::Item>, + >(r_diag: Array, tol: ::Real)->usize; + ///Compute the permutation matrix associated to the Interpolative Decomposition fn get_p< ArrayImplMut: UnsafeRandomAccessByValue<2, Item = Self::Item> @@ -91,120 +96,136 @@ pub trait MatrixIdDecomposition: Sized { + UnsafeRandomAccessMut<2, Item = Self::Item> + UnsafeRandomAccessByRef<2, Item = Self::Item>, >( - arr: Array, - perm: Vec + &self, arr: Array ); + } ///Stores the relevant features regarding interpolative decomposition. pub struct IdDecomposition< - Item: RlstScalar, - ArrayImpl: UnsafeRandomAccessByValue<2, Item = Item> + Stride<2> + Shape<2> + RawAccessMut, + Item: RlstScalar > { - /// arr: permuted array - pub arr: Array, - /// perm_mat: permutation matrix associated to the interpolative decomposition - pub perm_mat: Array, 2>, 2>, + /// skel: skeleton of the interpolative decomposition + pub skel: DynamicArray, + /// perm: permutation associated to the pivoting indiced interpolative decomposition + pub perm: Vec, /// rank: rank of the matrix associated to the interpolative decomposition for a given tolerance pub rank: usize, ///id_mat: interpolative matrix calculated for a given tolerance pub id_mat: Array, 2>, 2>, } +///Options to decide the matrix rank +pub enum Accuracy{ + /// Indicates that the rank of the decomposition will be computed from a given tolerance + Tol(T), + /// Indicates that the rank of the decomposition is given beforehand by the user + FixedRank(usize), + /// Computes the rank from the tolerance, and if this one is smaller than a user set range, then we stick to the user set range + MaxRank(T, usize) +} + macro_rules! impl_id { ($scalar:ty) => { - impl< - ArrayImpl: UnsafeRandomAccessByValue<2, Item = $scalar> - + Stride<2> - + Shape<2> - + RawAccessMut, - > MatrixIdDecomposition for IdDecomposition<$scalar, ArrayImpl> + impl MatrixIdDecomposition for IdDecomposition<$scalar> { type Item = $scalar; - type ArrayImpl = ArrayImpl; - fn new(mut arr: Array<$scalar, ArrayImpl, 2>, tol: <$scalar as RlstScalar>::Real, k: Option) -> RlstResult{ - //We assume that for a matrix of m rows and n columns, n>m, so we apply ID the transpose - let mut arr_trans: Array<$scalar, BaseArray<$scalar, VectorContainer<$scalar>, 2>, 2> = rlst_dynamic_array2!($scalar, [arr.shape()[1], arr.shape()[0]]); - arr_trans.fill_from(arr.view().conj().transpose()); + fn new< + ArrayImpl: UnsafeRandomAccessByValue<2, Item = $scalar> + + Stride<2> + + Shape<2> + + RawAccessMut, + >(arr: Array<$scalar, ArrayImpl, 2>, rank_param: Accuracy<<$scalar as RlstScalar>::Real>) -> RlstResult{ //We compute the QR decomposition using rlst QR decomposition - let mut arr_qr: Array<$scalar, BaseArray<$scalar, VectorContainer<$scalar>, 2>, 2> = rlst_dynamic_array2!($scalar, [arr.shape()[1], arr.shape()[0]]); + let mut arr_qr: DynamicArray<$scalar, 2> = rlst_dynamic_array2!($scalar, [arr.shape()[1], arr.shape()[0]]); arr_qr.fill_from(arr.view().conj().transpose()); let arr_qr_shape = arr_qr.shape(); let qr = arr_qr.view_mut().into_qr_alloc().unwrap(); - + //We obtain relevant parameters of the decomposition: the permutation induced by the pivoting and the R matrix let perm = qr.get_perm(); let mut r_mat = rlst_dynamic_array2!($scalar, [arr_qr_shape[1], arr_qr_shape[1]]); qr.get_r(r_mat.view_mut()); - //The maximum rank is given by the number of columns of the transposed matrix let dim: usize = arr_qr_shape[1]; let rank: usize; //The rank can be given a priori, in which case, we do not need to compute the rank using the tolerance parameter. - match k { - Some(k) => rank = k, - None => { - //We extract the diagonal to calculate the rank of the matrix - let mut r_diag:Array<$scalar, BaseArray<$scalar, VectorContainer<$scalar>, 1>, 1> = rlst_dynamic_array1!($scalar, [dim]); - r_mat.get_diag(r_diag.view_mut()); - let max: $scalar = r_diag.iter().max_by(|a, b| a.abs().total_cmp(&b.abs())).unwrap().abs().into(); - - //We compute the rank of the matrix - if max.re() > 0.0{ - let alpha: $scalar = (1.0/max) as $scalar; - r_diag.scale_inplace(alpha); - let aux_vec = r_diag.iter().filter(|el| el.abs() > tol.into() ).collect::>(); - rank = aux_vec.len(); - } - else{ - rank = dim; - } + + match rank_param{ + Accuracy::Tol(tol) =>{ + rank = Self::rank_from_tolerance(r_mat.view_mut(), tol); + }, + Accuracy::FixedRank(k) => rank = k, + Accuracy::MaxRank(tol, k) =>{ + rank = std::cmp::max(k, Self::rank_from_tolerance(r_mat.view_mut(), tol)); }, } - - let mut perm_mat = rlst_dynamic_array2!($scalar, [dim, dim]); - Self::get_p(perm_mat.view_mut(), perm); - - - let mut perm_arr = empty_array::<$scalar, 2>() - .simple_mult_into_resize(perm_mat.view_mut(), arr.view()); + //We permute arr to extract the columns belonging to the skeleton + let mut permutation = rlst_dynamic_array2!($scalar, [arr_qr_shape[1], arr_qr_shape[1]]); + permutation.set_zero(); - for col in 0..arr.shape()[1]{ - for row in 0..arr.shape()[0]{ - arr.data_mut()[col*arr_qr_shape[1] + row] = *perm_arr.get_mut([row, col]).unwrap() - } + for (index, &elem) in perm.iter().enumerate() { + *permutation.get_mut([index, elem]).unwrap() = <$scalar as num::One>::one(); } + let perm_arr = empty_array::<$scalar, 2>() + .simple_mult_into_resize(permutation.view_mut(), arr.view()); + + let mut skel = empty_array(); + skel.fill_from_resize(perm_arr.into_subview([0,0],[rank, arr_qr_shape[0]])); //In the case the matrix is full rank or we get a matrix of rank 0, then return the identity matrix. //If not, compute the Interpolative Decomposition matrix. if rank == 0 || rank >= dim{ let mut id_mat = rlst_dynamic_array2!($scalar, [dim, dim]); id_mat.set_identity(); - Ok(Self{arr, perm_mat, rank, id_mat}) + Ok(Self{skel, perm, rank, id_mat}) } else{ - let shape: [usize; 2] = r_mat.shape(); - let mut id_mat: Array<$scalar, BaseArray<$scalar, VectorContainer<$scalar>, 2>, 2>= rlst_dynamic_array2!($scalar, [dim-rank, rank]); + let shape: [usize; 2] = [arr_qr_shape[1], arr_qr_shape[1]]; + let mut id_mat: DynamicArray<$scalar, 2> = rlst_dynamic_array2!($scalar, [dim-rank, rank]); - let mut k11: Array<$scalar, BaseArray<$scalar, VectorContainer<$scalar>, 2>, 2> = rlst_dynamic_array2!($scalar, [rank, rank]); + let mut k11: DynamicArray<$scalar, 2> = rlst_dynamic_array2!($scalar, [rank, rank]); k11.fill_from(r_mat.view_mut().into_subview([0, 0], [rank, rank])); k11.view_mut().into_inverse_alloc().unwrap(); - let mut k12: Array<$scalar, BaseArray<$scalar, VectorContainer<$scalar>, 2>, 2> = rlst_dynamic_array2!($scalar, [rank, dim-rank]); + let mut k12: DynamicArray<$scalar, 2> = rlst_dynamic_array2!($scalar, [rank, dim-rank]); k12.fill_from(r_mat.view_mut().into_subview([0, rank], [rank, shape[1]-rank])); let res = empty_array().simple_mult_into_resize(k11.view(), k12.view()); id_mat.fill_from(res.view().conj().transpose().view()); - Ok(Self{arr, perm_mat, rank, id_mat}) + Ok(Self{skel, perm, rank, id_mat}) + } + } + + fn rank_from_tolerance< + ArrayImplMut: UnsafeRandomAccessByValue<2, Item = $scalar> + + Shape<2> + + UnsafeRandomAccessMut<2, Item = $scalar> + + UnsafeRandomAccessByRef<2, Item = $scalar>, + >(r_mat: Array<$scalar, ArrayImplMut, 2>, tol: <$scalar as RlstScalar>::Real)->usize{ + let dim = r_mat.shape()[0]; + let mut r_diag:Array<$scalar, BaseArray<$scalar, VectorContainer<$scalar>, 1>, 1> = rlst_dynamic_array1!($scalar, [dim]); + r_mat.get_diag(r_diag.view_mut()); + let max: $scalar = r_diag.iter().max_by(|a, b| a.abs().total_cmp(&b.abs())).unwrap().abs().into(); + + //We compute the rank of the matrix + if max.re() > 0.0{ + let alpha: $scalar = (1.0/max) as $scalar; + r_diag.scale_inplace(alpha); + let aux_vec = r_diag.iter().filter(|el| el.abs() > tol.into() ).collect::>(); + aux_vec.len() + } + else{ + dim } } @@ -214,14 +235,11 @@ macro_rules! impl_id { + UnsafeRandomAccessMut<2, Item = $scalar> + UnsafeRandomAccessByRef<2, Item = $scalar>, >( - mut arr: Array<$scalar, ArrayImplMut, 2>, - perm: Vec + &self, mut arr: Array<$scalar, ArrayImplMut, 2> ) { - let m = arr.shape()[0]; - arr.set_zero(); - for col in 0..m { - arr[[col, perm[col]]] = <$scalar as One>::one(); + for (index, &elem) in self.perm.iter().enumerate() { + *arr.get_mut([index, elem]).unwrap() = <$scalar as num::One>::one(); } } } diff --git a/src/dense/linalg/null_space.rs b/src/dense/linalg/null_space.rs index 8a0b0d2..7d5d626 100644 --- a/src/dense/linalg/null_space.rs +++ b/src/dense/linalg/null_space.rs @@ -1,11 +1,7 @@ use crate::dense::array::Array; -use crate::dense::traits::{ - RawAccessMut, Shape, Stride, UnsafeRandomAccessByValue, -}; +use crate::dense::traits::{RawAccessMut, Shape, Stride, UnsafeRandomAccessByValue, DefaultIterator}; use crate::dense::types::{c32, c64, RlstResult, RlstScalar}; -use crate::{BaseArray, VectorContainer, rlst_dynamic_array1, rlst_dynamic_array2}; -use crate::dense::traits::DefaultIterator; -use crate::empty_array; +use crate::{BaseArray, VectorContainer, rlst_dynamic_array1, rlst_dynamic_array2, empty_array}; use itertools::min; /// Compute the matrix nullspace. @@ -26,12 +22,6 @@ use itertools::min; /// ``` /// This method allocates memory for the nullspace computation. - -// pub trait MatrixNull { -// /// Compute the matrix null space -// fn into_null_alloc(arr, null_space_type) -> RlstResult>; -// } - pub trait MatrixNull: RlstScalar { /// Compute the matrix null space fn into_null_alloc< @@ -41,7 +31,7 @@ pub trait MatrixNull: RlstScalar { + RawAccessMut, >( arr: Array, - null_space_type: NullSpaceType + tol: ::Real ) -> RlstResult>; } @@ -55,9 +45,9 @@ macro_rules! implement_into_null { + RawAccessMut, >( arr: Array, - null_space_type: NullSpaceType + tol: ::Real ) -> RlstResult> { - NullSpace::<$scalar>::new(arr, null_space_type) + NullSpace::<$scalar>::new(arr, tol) } } }; @@ -77,27 +67,17 @@ impl< > Array { /// Compute the Column or Row nullspace of a given 2-dimensional array. - pub fn into_null_alloc(self, null_space_type: NullSpaceType) -> RlstResult> { - ::into_null_alloc(self, null_space_type) + pub fn into_null_alloc(self, tol: ::Real) -> RlstResult> { + ::into_null_alloc(self, tol) } } -///Null space -#[derive(Clone, Copy)] -#[repr(u8)] -pub enum NullSpaceType { - /// Row Nullspace - Row = b'R', - /// Column Nullspace - Column = b'C', -} +type RealScalar = ::Real; /// QR decomposition pub struct NullSpace< Item: RlstScalar > { - ///Row or column nullspace - pub null_space_type: NullSpaceType, ///Computed null space pub null_space_arr: Array, 2>, 2>, } @@ -106,70 +86,47 @@ macro_rules! implement_null_space { ($scalar:ty) => { impl NullSpace<$scalar> { + fn new< ArrayImpl: UnsafeRandomAccessByValue<2, Item = $scalar> + Stride<2> + Shape<2> + RawAccessMut, - >(arr: Array<$scalar, ArrayImpl, 2>, null_space_type: NullSpaceType) -> RlstResult { + >(arr: Array<$scalar, ArrayImpl, 2>, tol: RealScalar<$scalar>) -> RlstResult { let shape = arr.shape(); - - match null_space_type { - NullSpaceType::Row => { - let mut arr_qr: Array<$scalar, BaseArray<$scalar, VectorContainer<$scalar>, 2>, 2> = rlst_dynamic_array2!($scalar, [shape[1], shape[0]]); - arr_qr.fill_from(arr.view().conj().transpose()); - let mut null_space_arr = empty_array(); - Self::find_null_space(arr_qr, &mut null_space_arr); - Ok(Self {null_space_type, null_space_arr}) - - }, - NullSpaceType::Column => { - let mut null_space_arr = empty_array(); - Self::find_null_space(arr, &mut null_space_arr); - Ok(Self {null_space_type, null_space_arr}) - }, - } - - } - - fn find_null_space< - ArrayImpl: UnsafeRandomAccessByValue<2, Item = $scalar> - + Stride<2> - + Shape<2> - + RawAccessMut, - >(arr: Array<$scalar, ArrayImpl, 2>, null_space_arr: &mut Array<$scalar, BaseArray<$scalar, VectorContainer<$scalar>, 2>, 2>) - { - let shape = arr.shape(); let dim: usize = min(shape).unwrap(); - let qr = arr.into_qr_alloc().unwrap(); - - //We compute the QR decomposition to find a linearly independent basis of the space. - let mut q: Array<$scalar, BaseArray<$scalar, VectorContainer<$scalar>, 2>, 2> = rlst_dynamic_array2!($scalar, [shape[0], shape[0]]); - let _ = qr.get_q_alloc(q.view_mut()); + let mut singular_values = rlst_dynamic_array1!(RealScalar<$scalar>, [dim]); + let mode = crate::dense::linalg::svd::SvdMode::Full; + let mut u = rlst_dynamic_array2!($scalar, [shape[0], shape[0]]); + let mut vt = rlst_dynamic_array2!($scalar, [shape[1], shape[1]]); - let mut r_mat = rlst_dynamic_array2!($scalar, [dim, dim]); - qr.get_r(r_mat.view_mut()); + arr.into_svd_alloc(u.view_mut(), vt.view_mut(), singular_values.data_mut(), mode).unwrap(); //For a full rank rectangular matrix, then rank = dim. //find_matrix_rank checks if the matrix is full rank and recomputes the rank. - let rank: usize = Self::find_matrix_rank(r_mat, dim); + let rank: usize = Self::find_matrix_rank(singular_values, dim, tol); - null_space_arr.fill_from_resize(q.into_subview([0, shape[1]], [shape[0], shape[0]-rank])); + //The null space is given by the last shape[1]-rank columns of V + let mut null_space_arr = empty_array(); + null_space_arr.fill_from_resize(vt.conj().transpose().into_subview([0, rank], [shape[1], shape[1]-rank])); + + Ok(Self {null_space_arr}) + } - fn find_matrix_rank(r_mat: Array<$scalar, BaseArray<$scalar, VectorContainer<$scalar>, 2>, 2>, dim: usize)->usize{ + fn find_matrix_rank(singular_values: Array, BaseArray, VectorContainer>, 1>, 1>, dim: usize, tol:<$scalar as RlstScalar>::Real)->usize{ //We compute the rank of the matrix by expecting the values of the elements in the diagonal of R. - let mut r_diag:Array<$scalar, BaseArray<$scalar, VectorContainer<$scalar>, 1>, 1> = rlst_dynamic_array1!($scalar, [dim]); - r_mat.get_diag(r_diag.view_mut()); + let mut singular_values_copy: Array, BaseArray, VectorContainer>, 1>, 1> = rlst_dynamic_array1!(RealScalar<$scalar>, [dim]); + singular_values_copy.fill_from(singular_values.view()); - let max: $scalar = r_diag.iter().max_by(|a, b| a.abs().total_cmp(&b.abs())).unwrap().abs().into(); + let max: RealScalar<$scalar> = singular_values.view().iter().max_by(|a, b| a.abs().total_cmp(&b.abs())).unwrap().abs().into(); let rank: usize; if max.re() > 0.0{ - let alpha: $scalar = (1.0/max) as $scalar; - r_diag.scale_inplace(alpha); - let aux_vec = r_diag.iter().filter(|el| el.abs() > 1e-15 ).collect::>(); + let alpha: RealScalar<$scalar> = (1.0/max); + singular_values_copy.scale_inplace(alpha); + let aux_vec = singular_values_copy.iter().filter(|el| el.abs() > tol ).collect::>(); rank = aux_vec.len(); } else{ diff --git a/src/prelude.rs b/src/prelude.rs index b759a4c..f1da0c8 100644 --- a/src/prelude.rs +++ b/src/prelude.rs @@ -66,7 +66,6 @@ pub use crate::dense::linalg::qr::{MatrixQr, QrDecomposition}; pub use crate::dense::linalg::svd::{MatrixSvd, SvdMode}; pub use crate::dense::linalg::interpolative_decomposition::{MatrixId, IdDecomposition}; pub use crate::dense::linalg::null_space::{MatrixNull, NullSpace}; -pub use crate::dense::linalg::elementary_matrix::{ElementaryMatrixData, ElementaryMatrix}; pub use crate::dense::array::rank1_array::Rank1Array;