Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Timo/refactor #104

Merged
merged 9 commits into from
Dec 15, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 5 additions & 4 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -26,10 +26,11 @@ blas = "0.22"
lapack = "0.19"
thiserror = "2"
serde = "1"
bempp-ghost = { git = "https://github.com/bempp/ghost.git" }
# bempp-distributed-tools = { path = "../distributed_tools", optional = true }
bempp-distributed-tools = { git = "https://github.com/bempp/distributed_tools.git", optional = true }

coe-rs = "0.1.2"
pulp = { version = "0.20" }
pulp = { version = "0.21" }
bytemuck = "1.16.0"

mpi = { version = "0.8.0", optional = true }
Expand All @@ -49,8 +50,8 @@ git2 = "0.19"

[features]
strict = []
default = []
mpi = ["dep:mpi"]
default = ["mpi"]
mpi = ["dep:mpi", "dep:bempp-distributed-tools"]
suitesparse = []
disable_system_blas_lapack = []
sleef = []
Expand Down
2 changes: 1 addition & 1 deletion benches/exp.rs
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ macro_rules! impl_exp_bench {

c.bench_function(&format!("scalar_exp_{}", stringify!($scalar)), |b| {
b.iter(|| {
pulp::ScalarArch::new().dispatch(Impl::<'_, $scalar> {
pulp::Arch::Scalar.dispatch(Impl::<'_, $scalar> {
values: values.as_slice(),
})
})
Expand Down
2 changes: 1 addition & 1 deletion benches/sin_cos.rs
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ macro_rules! impl_sin_cos_bench {

c.bench_function(&format!("scalar_sin_cos_{}", stringify!($scalar)), |b| {
b.iter(|| {
pulp::ScalarArch::new().dispatch(Impl::<'_, $scalar> {
pulp::Arch::Scalar.dispatch(Impl::<'_, $scalar> {
values: values.as_slice(),
})
})
Expand Down
2 changes: 1 addition & 1 deletion examples/approx_inv_accuracy.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
//! Test the accuracy of the inverse sqrt
//! Test the accuracy of the inverse

const NSAMPLES: usize = 10000;
use rand::prelude::*;
Expand Down
2 changes: 2 additions & 0 deletions examples/approx_inv_sqrt_accuracy.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
//! Test the accuracy of the inverse sqrt

#[allow(dead_code)]
const NSAMPLES: usize = 10000;
use rand::prelude::*;
use rlst::SimdFor;
Expand Down Expand Up @@ -53,6 +54,7 @@ fn main() {
println!("Maximum relative error f64: {:.2E}", max_error_f64);
}

#[allow(dead_code)]
#[cfg(target_arch = "x86_64")]
fn main() {
fn rel_diff_sqrt_f32(a: f32, b: f32) -> f32 {
Expand Down
6 changes: 3 additions & 3 deletions examples/cg_distributed.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ use rand::Rng;
use rlst::operator::interface::distributed_sparse_operator::DistributedCsrMatrixOperator;
use rlst::operator::interface::DistributedArrayVectorSpace;
use rlst::{
CgIteration, DefaultDistributedIndexLayout, DistributedCsrMatrix, Element, LinearSpace,
CgIteration, DistributedCsrMatrix, Element, EquiDistributedIndexLayout, LinearSpace,
NormedSpace,
};

Expand All @@ -22,9 +22,9 @@ pub fn main() {
let n = 500;
let tol = 1E-5;

let index_layout = DefaultDistributedIndexLayout::new(n, 1, &world);
let index_layout = EquiDistributedIndexLayout::new(n, 1, &world);

let space = DistributedArrayVectorSpace::<f64, _>::new(&index_layout);
let space = DistributedArrayVectorSpace::<_, f64>::new(&index_layout);
let mut residuals = Vec::<f64>::new();

let mut rng = rand::thread_rng();
Expand Down
14 changes: 7 additions & 7 deletions examples/distributed_csr_matmul.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,24 +13,24 @@ fn main() {

let dist_mat;

let domain_layout = DefaultDistributedIndexLayout::new(313, 1, &world);
let domain_layout = EquiDistributedIndexLayout::new(313, 1, &world);

let mut dist_x = DistributedVector::<f64, _>::new(&domain_layout);
let mut dist_x = DistributedVector::<_, f64>::new(&domain_layout);

let range_layout = DefaultDistributedIndexLayout::new(507, 1, &world);
let range_layout = EquiDistributedIndexLayout::new(507, 1, &world);

let mut dist_y = DistributedVector::<f64, _>::new(&range_layout);
let mut dist_y = DistributedVector::<_, f64>::new(&range_layout);

if rank == 0 {
// Read the sparse matrix in matrix market format.
let sparse_mat = read_coordinate_mm::<f64>("mat_507_313.mm").unwrap();
let sparse_mat = read_coordinate_mm::<f64>("examples/mat_507_313.mm").unwrap();

// Read the vector x. Note that the matrix market format mandates two dimensions for arrays.
// So the vector is returned as two-dimensional array with the column dimension being 1.
let x = read_array_mm::<f64>("x_313.mm").unwrap();
let x = read_array_mm::<f64>("examples/x_313.mm").unwrap();

// Read the expected result vector in matrix market format.
let y_expected = read_array_mm::<f64>("y_507.mm").unwrap();
let y_expected = read_array_mm::<f64>("examples/y_507.mm").unwrap();

// Create a new vector to store the actual matrix-vector product.
let mut y_actual = rlst_dynamic_array1!(f64, [507]);
Expand Down
4 changes: 2 additions & 2 deletions examples/mpi_gather_to_all_vector.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,9 @@ pub fn main() {

let rank = world.rank() as usize;

let index_layout = DefaultDistributedIndexLayout::new(NDIM, 1, &world);
let index_layout = EquiDistributedIndexLayout::new(NDIM, 1, &world);

let vec = DistributedVector::<f64, _>::new(&index_layout);
let vec = DistributedVector::<_, f64>::new(&index_layout);

let local_index_range = vec.index_layout().index_range(rank).unwrap();

Expand Down
4 changes: 2 additions & 2 deletions examples/mpi_gather_vector.rs
Original file line number Diff line number Diff line change
Expand Up @@ -15,9 +15,9 @@ pub fn main() {

let rank = world.rank() as usize;

let index_layout = DefaultDistributedIndexLayout::new(NDIM, 1, &world);
let index_layout = EquiDistributedIndexLayout::new(NDIM, 1, &world);

let vec = DistributedVector::<f64, _>::new(&index_layout);
let vec = DistributedVector::<_, f64>::new(&index_layout);

let local_index_range = vec.index_layout().index_range(rank).unwrap();

Expand Down
2 changes: 1 addition & 1 deletion examples/mpi_index_set.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ pub fn main() {
let universe = mpi::initialize().unwrap();
let world = universe.world();

let index_layout = DefaultDistributedIndexLayout::new(NCHUNKS, CHUNK_SIZE, &world);
let index_layout = EquiDistributedIndexLayout::new(NCHUNKS, CHUNK_SIZE, &world);

if world.rank() == 0 {
println!("Local index range: {:#?}", index_layout.local_range());
Expand Down
4 changes: 2 additions & 2 deletions examples/mpi_scatter_vector.rs
Original file line number Diff line number Diff line change
Expand Up @@ -15,9 +15,9 @@ pub fn main() {

let rank = world.rank() as usize;

let index_layout = DefaultDistributedIndexLayout::new(NDIM, 1, &world);
let index_layout = EquiDistributedIndexLayout::new(NDIM, 1, &world);

let mut vec = DistributedVector::<f64, _>::new(&index_layout);
let mut vec = DistributedVector::<_, f64>::new(&index_layout);

let local_index_range = vec.index_layout().index_range(rank).unwrap();

Expand Down
53 changes: 30 additions & 23 deletions src/operator/interface/distributed_array_vector_space.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,56 +2,63 @@

use std::marker::PhantomData;

use mpi::traits::{Communicator, Equivalence};
use mpi::traits::Equivalence;

use crate::dense::types::RlstScalar;
use crate::operator::space::{Element, IndexableSpace, InnerProductSpace, LinearSpace};
use crate::{DefaultDistributedIndexLayout, DistributedVector, IndexLayout};
use crate::DistributedVector;
use bempp_distributed_tools::IndexLayout;

/// Array vector space
pub struct DistributedArrayVectorSpace<'a, Item: RlstScalar + Equivalence, C: Communicator> {
index_layout: &'a DefaultDistributedIndexLayout<'a, C>,
pub struct DistributedArrayVectorSpace<'a, Layout: IndexLayout, Item: RlstScalar + Equivalence> {
index_layout: &'a Layout,
_marker: PhantomData<Item>,
}

/// Element of an array vector space
pub struct DistributedArrayVectorSpaceElement<'a, Item: RlstScalar + Equivalence, C: Communicator> {
elem: DistributedVector<'a, Item, C>,
pub struct DistributedArrayVectorSpaceElement<
'a,
Layout: IndexLayout,
Item: RlstScalar + Equivalence,
> {
elem: DistributedVector<'a, Layout, Item>,
}

impl<'a, C: Communicator, Item: RlstScalar + Equivalence>
DistributedArrayVectorSpaceElement<'a, Item, C>
impl<'a, Layout: IndexLayout, Item: RlstScalar + Equivalence>
DistributedArrayVectorSpaceElement<'a, Layout, Item>
{
/// Create a new element
pub fn new(space: &DistributedArrayVectorSpace<'a, Item, C>) -> Self {
pub fn new(space: &DistributedArrayVectorSpace<'a, Layout, Item>) -> Self {
Self {
elem: DistributedVector::new(space.index_layout),
}
}
}

impl<'a, C: Communicator, Item: RlstScalar + Equivalence> DistributedArrayVectorSpace<'a, Item, C> {
impl<'a, Layout: IndexLayout, Item: RlstScalar + Equivalence>
DistributedArrayVectorSpace<'a, Layout, Item>
{
/// Create a new vector space
pub fn new(index_layout: &'a DefaultDistributedIndexLayout<'a, C>) -> Self {
pub fn new(index_layout: &'a Layout) -> Self {
Self {
index_layout,
_marker: PhantomData,
}
}
}

impl<C: Communicator, Item: RlstScalar + Equivalence> IndexableSpace
for DistributedArrayVectorSpace<'_, Item, C>
impl<Layout: IndexLayout, Item: RlstScalar + Equivalence> IndexableSpace
for DistributedArrayVectorSpace<'_, Layout, Item>
{
fn dimension(&self) -> usize {
self.index_layout.number_of_global_indices()
}
}

impl<'a, C: Communicator, Item: RlstScalar + Equivalence> LinearSpace
for DistributedArrayVectorSpace<'a, Item, C>
impl<'a, Layout: IndexLayout, Item: RlstScalar + Equivalence> LinearSpace
for DistributedArrayVectorSpace<'a, Layout, Item>
{
type E = DistributedArrayVectorSpaceElement<'a, Item, C>;
type E = DistributedArrayVectorSpaceElement<'a, Layout, Item>;

type F = Item;

Expand All @@ -60,27 +67,27 @@ impl<'a, C: Communicator, Item: RlstScalar + Equivalence> LinearSpace
}
}

impl<C: Communicator, Item: RlstScalar + Equivalence> InnerProductSpace
for DistributedArrayVectorSpace<'_, Item, C>
impl<Layout: IndexLayout, Item: RlstScalar + Equivalence> InnerProductSpace
for DistributedArrayVectorSpace<'_, Layout, Item>
{
fn inner(&self, x: &Self::E, other: &Self::E) -> Self::F {
x.view().inner(other.view())
}
}

impl<'a, C: Communicator, Item: RlstScalar + Equivalence> Element
for DistributedArrayVectorSpaceElement<'a, Item, C>
impl<'a, Layout: IndexLayout, Item: RlstScalar + Equivalence> Element
for DistributedArrayVectorSpaceElement<'a, Layout, Item>
{
type F = Item;
type Space = DistributedArrayVectorSpace<'a, Item, C>;
type Space = DistributedArrayVectorSpace<'a, Layout, Item>;

type View<'b>
= &'b DistributedVector<'a, Item, C>
= &'b DistributedVector<'a, Layout, Item>
where
Self: 'b;

type ViewMut<'b>
= &'b mut DistributedVector<'a, Item, C>
= &'b mut DistributedVector<'a, Layout, Item>
where
Self: 'b;

Expand Down
59 changes: 42 additions & 17 deletions src/operator/interface/distributed_sparse_operator.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
//! Distributed sparse operator
use bempp_distributed_tools::IndexLayout;
use mpi::traits::{Communicator, Equivalence};

use crate::dense::traits::Shape;
Expand All @@ -13,14 +14,24 @@ use crate::{
use super::DistributedArrayVectorSpace;

/// CSR matrix operator
pub struct DistributedCsrMatrixOperator<'a, Item: RlstScalar + Equivalence, C: Communicator> {
csr_mat: &'a DistributedCsrMatrix<'a, Item, C>,
domain: &'a DistributedArrayVectorSpace<'a, Item, C>,
range: &'a DistributedArrayVectorSpace<'a, Item, C>,
pub struct DistributedCsrMatrixOperator<
'a,
DomainLayout: IndexLayout<Comm = C>,
RangeLayout: IndexLayout<Comm = C>,
Item: RlstScalar + Equivalence,
C: Communicator,
> {
csr_mat: &'a DistributedCsrMatrix<'a, DomainLayout, RangeLayout, Item, C>,
domain: &'a DistributedArrayVectorSpace<'a, DomainLayout, Item>,
range: &'a DistributedArrayVectorSpace<'a, RangeLayout, Item>,
}

impl<Item: RlstScalar + Equivalence, C: Communicator> std::fmt::Debug
for DistributedCsrMatrixOperator<'_, Item, C>
impl<
DomainLayout: IndexLayout<Comm = C>,
RangeLayout: IndexLayout<Comm = C>,
Item: RlstScalar + Equivalence,
C: Communicator,
> std::fmt::Debug for DistributedCsrMatrixOperator<'_, DomainLayout, RangeLayout, Item, C>
{
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("DistributedCsrMatrixOperator")
Expand All @@ -30,14 +41,19 @@ impl<Item: RlstScalar + Equivalence, C: Communicator> std::fmt::Debug
}
}

impl<'a, Item: RlstScalar + Equivalence, C: Communicator>
DistributedCsrMatrixOperator<'a, Item, C>
impl<
'a,
DomainLayout: IndexLayout<Comm = C>,
RangeLayout: IndexLayout<Comm = C>,
Item: RlstScalar + Equivalence,
C: Communicator,
> DistributedCsrMatrixOperator<'a, DomainLayout, RangeLayout, Item, C>
{
/// Create a new CSR matrix operator
pub fn new(
csr_mat: &'a DistributedCsrMatrix<'a, Item, C>,
domain: &'a DistributedArrayVectorSpace<'a, Item, C>,
range: &'a DistributedArrayVectorSpace<'a, Item, C>,
csr_mat: &'a DistributedCsrMatrix<'a, DomainLayout, RangeLayout, Item, C>,
domain: &'a DistributedArrayVectorSpace<'a, DomainLayout, Item>,
range: &'a DistributedArrayVectorSpace<'a, RangeLayout, Item>,
) -> Self {
let shape = csr_mat.shape();
assert_eq!(domain.dimension(), shape[1]);
Expand All @@ -50,12 +66,17 @@ impl<'a, Item: RlstScalar + Equivalence, C: Communicator>
}
}

impl<'a, Item: RlstScalar + Equivalence, C: Communicator> OperatorBase
for DistributedCsrMatrixOperator<'a, Item, C>
impl<
'a,
DomainLayout: IndexLayout<Comm = C>,
RangeLayout: IndexLayout<Comm = C>,
Item: RlstScalar + Equivalence,
C: Communicator,
> OperatorBase for DistributedCsrMatrixOperator<'a, DomainLayout, RangeLayout, Item, C>
{
type Domain = DistributedArrayVectorSpace<'a, Item, C>;
type Domain = DistributedArrayVectorSpace<'a, DomainLayout, Item>;

type Range = DistributedArrayVectorSpace<'a, Item, C>;
type Range = DistributedArrayVectorSpace<'a, RangeLayout, Item>;

fn domain(&self) -> &Self::Domain {
self.domain
Expand All @@ -66,8 +87,12 @@ impl<'a, Item: RlstScalar + Equivalence, C: Communicator> OperatorBase
}
}

impl<Item: RlstScalar + Equivalence, C: Communicator> AsApply
for DistributedCsrMatrixOperator<'_, Item, C>
impl<
DomainLayout: IndexLayout<Comm = C>,
RangeLayout: IndexLayout<Comm = C>,
Item: RlstScalar + Equivalence,
C: Communicator,
> AsApply for DistributedCsrMatrixOperator<'_, DomainLayout, RangeLayout, Item, C>
{
fn apply_extended(
&self,
Expand Down
7 changes: 3 additions & 4 deletions src/prelude.rs
Original file line number Diff line number Diff line change
Expand Up @@ -73,14 +73,13 @@ pub use crate::dense::simd::{RlstSimd, SimdFor};

#[cfg(feature = "mpi")]
pub use crate::sparse::{
distributed_vector::DistributedVector, index_layout::DefaultDistributedIndexLayout,
sparse_mat::distributed_csr_mat::DistributedCsrMatrix,
distributed_vector::DistributedVector, sparse_mat::distributed_csr_mat::DistributedCsrMatrix,
};
#[cfg(feature = "mpi")]
pub use bempp_distributed_tools::{EquiDistributedIndexLayout, IndexLayout};

pub use crate::sparse::index_layout::DefaultSerialIndexLayout;
pub use crate::sparse::sparse_mat::csc_mat::CscMatrix;
pub use crate::sparse::sparse_mat::csr_mat::CsrMatrix;
pub use crate::sparse::traits::index_layout::IndexLayout;

pub use crate::operator::interface::{
ArrayVectorSpace, ArrayVectorSpaceElement, CscMatrixOperator, CsrMatrixOperator,
Expand Down
Loading
Loading