Skip to content

Commit

Permalink
Remove most trait bounds in functions
Browse files Browse the repository at this point in the history
by using nalgebra::SVector and SMatrix, which use const generics.
  • Loading branch information
hhirtz committed Sep 17, 2021
1 parent 9cf541a commit cd58268
Show file tree
Hide file tree
Showing 9 changed files with 280 additions and 602 deletions.
9 changes: 2 additions & 7 deletions src/algorithms/fiduccia_mattheyses.rs
Original file line number Diff line number Diff line change
@@ -1,24 +1,19 @@
use itertools::Itertools;
use nalgebra::{allocator::Allocator, DefaultAllocator, DimName};
use sprs::CsMatView;

use crate::partition::Partition;
use crate::PointND;
use crate::ProcessUniqueId;
use std::collections::HashMap;

pub fn fiduccia_mattheyses<'a, D>(
pub fn fiduccia_mattheyses<'a, const D: usize>(
initial_partition: &mut Partition<'a, PointND<D>, f64>,
adjacency: CsMatView<f64>,
max_passes: impl Into<Option<usize>>,
max_flips_per_pass: impl Into<Option<usize>>,
max_imbalance_per_flip: impl Into<Option<f64>>,
max_bad_move_in_a_row: usize,
) where
D: DimName,
DefaultAllocator: Allocator<f64, D>,
<DefaultAllocator as Allocator<f64, D>>::Buffer: Send + Sync,
{
) {
let max_passes = max_passes.into();
let max_flips_per_pass = max_flips_per_pass.into();
let max_imbalance_per_flip = max_imbalance_per_flip.into();
Expand Down
98 changes: 16 additions & 82 deletions src/algorithms/k_means.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,12 +16,6 @@ use super::z_curve;
use itertools::iproduct;
use itertools::Itertools;

use nalgebra::allocator::Allocator;
use nalgebra::base::dimension::{DimDiff, DimSub};
use nalgebra::DefaultAllocator;
use nalgebra::DimName;
use nalgebra::U1;

/// A wrapper type for ProcessUniqueId
/// to enforce that it represents temporary ids
/// for the k-means algorithm and not a partition id
Expand All @@ -33,25 +27,15 @@ type ClusterId = ProcessUniqueId;
/// this version shows some noticeable oscillations when imposing a restrictive balance constraint.
/// It also skips the bounding boxes optimization which would slightly reduce the complexity of the
/// algorithm.
pub fn simplified_k_means<D>(
pub fn simplified_k_means<const D: usize>(
points: &[PointND<D>],
weights: &[f64],
num_partitions: usize,
imbalance_tol: f64,
mut n_iter: isize,
hilbert: bool,
) -> Vec<ProcessUniqueId>
where
D: DimName + DimSub<U1>,
DefaultAllocator: Allocator<f64, D, D>
+ Allocator<f64, D>
+ Allocator<f64, U1, D>
+ Allocator<f64, U1, D>
+ Allocator<f64, DimDiff<D, U1>>,
<DefaultAllocator as Allocator<f64, D>>::Buffer: Send + Sync,
<DefaultAllocator as Allocator<f64, D, D>>::Buffer: Send + Sync,
{
let sfc_order = (f64::from(std::u32::MAX)).log(f64::from(2u32.pow(D::dim() as u32))) as u32;
) -> Vec<ProcessUniqueId> {
let sfc_order = (f64::from(std::u32::MAX)).log(f64::from(2u32.pow(D as u32))) as u32;
let permu = if hilbert {
unimplemented!("hilbert curve currently not implemented for n-dimension");
// hilbert_curve::hilbert_curve_reorder(points, 15)
Expand Down Expand Up @@ -222,25 +206,15 @@ impl Default for BalancedKmeansSettings {
}
}

pub fn balanced_k_means<D>(
pub fn balanced_k_means<const D: usize>(
points: &[PointND<D>],
weights: &[f64],
settings: impl Into<Option<BalancedKmeansSettings>>,
) -> Vec<ProcessUniqueId>
where
D: DimName + DimSub<U1>,
DefaultAllocator: Allocator<f64, D, D>
+ Allocator<f64, D>
+ Allocator<f64, U1, D>
+ Allocator<f64, U1, D>
+ Allocator<f64, DimDiff<D, U1>>,
<DefaultAllocator as Allocator<f64, D>>::Buffer: Send + Sync,
<DefaultAllocator as Allocator<f64, D, D>>::Buffer: Send + Sync,
{
) -> Vec<ProcessUniqueId> {
let settings = settings.into().unwrap_or_default();

// sort points with space filling curve
let sfc_order = (f64::from(std::u32::MAX)).log(f64::from(2u32.pow(D::dim() as u32))) as u32;
let sfc_order = (f64::from(std::u32::MAX)).log(f64::from(2u32.pow(D as u32))) as u32;
let mut permu = if settings.hilbert {
unimplemented!("hilbert curve currently not implemented for n-dimension");
// hilbert_curve::hilbert_curve_reorder(points, 15)
Expand Down Expand Up @@ -309,21 +283,12 @@ where
assignments
}

pub fn balanced_k_means_with_initial_partition<D>(
pub fn balanced_k_means_with_initial_partition<const D: usize>(
points: &[PointND<D>],
weights: &[f64],
settings: impl Into<Option<BalancedKmeansSettings>>,
initial_partition: &mut [ProcessUniqueId],
) where
D: DimName + DimSub<U1>,
DefaultAllocator: Allocator<f64, D, D>
+ Allocator<f64, D>
+ Allocator<f64, U1, D>
+ Allocator<f64, U1, D>
+ Allocator<f64, DimDiff<D, U1>>,
<DefaultAllocator as Allocator<f64, D>>::Buffer: Send + Sync,
<DefaultAllocator as Allocator<f64, D, D>>::Buffer: Send + Sync,
{
) {
let settings = settings.into().unwrap_or_default();

// validate partition soundness
Expand Down Expand Up @@ -387,11 +352,7 @@ pub fn balanced_k_means_with_initial_partition<D>(
}

#[derive(Clone, Copy)]
struct Inputs<'a, D>
where
D: DimName,
DefaultAllocator: Allocator<f64, D>,
{
struct Inputs<'a, const D: usize> {
points: &'a [PointND<D>],
weights: &'a [f64],
}
Expand All @@ -414,23 +375,14 @@ struct AlgorithmState<'a> {
// - moving each cluster after load balance
// - checking delta threshold
// - relaxing lower and upper bounds
fn balanced_k_means_iter<D>(
fn balanced_k_means_iter<const D: usize>(
inputs: Inputs<D>,
clusters: Clusters<Vec<PointND<D>>, &[ClusterId]>,
permutation: &mut [usize],
state: AlgorithmState,
settings: &BalancedKmeansSettings,
current_iter: usize,
) where
D: DimName + DimSub<U1>,
DefaultAllocator: Allocator<f64, D, D>
+ Allocator<f64, D>
+ Allocator<f64, U1, D>
+ Allocator<f64, U1, D>
+ Allocator<f64, DimDiff<D, U1>>,
<DefaultAllocator as Allocator<f64, D>>::Buffer: Send + Sync,
<DefaultAllocator as Allocator<f64, D, D>>::Buffer: Send + Sync,
{
) {
let Inputs { points, weights } = inputs;
let Clusters {
centers,
Expand Down Expand Up @@ -538,23 +490,14 @@ fn balanced_k_means_iter<D>(
// - checking partitions imbalance
// - increasing of diminishing clusters influence based on their imbalance
// - relaxing upper and lower bounds
fn assign_and_balance<D>(
fn assign_and_balance<const D: usize>(
points: &[PointND<D>],
weights: &[f64],
permutation: &mut [usize],
state: AlgorithmState,
clusters: Clusters<&[PointND<D>], &[ClusterId]>,
settings: &BalancedKmeansSettings,
) where
D: DimName + DimSub<U1>,
DefaultAllocator: Allocator<f64, D, D>
+ Allocator<f64, D>
+ Allocator<f64, U1, D>
+ Allocator<f64, U1, D>
+ Allocator<f64, DimDiff<D, U1>>,
<DefaultAllocator as Allocator<f64, D>>::Buffer: Send + Sync,
<DefaultAllocator as Allocator<f64, D, D>>::Buffer: Send + Sync,
{
) {
let AlgorithmState {
assignments,
influences,
Expand Down Expand Up @@ -715,7 +658,7 @@ fn relax_bounds(lbs: &mut [f64], ubs: &mut [f64], distances_moved: &[f64], influ

/// Most inner loop of the algorithm that aims to optimize
/// clusters assignments
fn best_values<D>(
fn best_values<const D: usize>(
point: &PointND<D>,
centers: &[PointND<D>],
center_ids: &[ClusterId],
Expand All @@ -726,12 +669,7 @@ fn best_values<D>(
f64, // new lb
f64, // new ub
Option<ClusterId>, // new cluster assignment for the current point (None if the same assignment is kept)
)
where
D: DimName,
DefaultAllocator: Allocator<f64, D>,
<DefaultAllocator as Allocator<f64, D>>::Buffer: Send + Sync,
{
) {
let mut best_value = std::f64::MAX;
let mut snd_best_value = std::f64::MAX;
let mut assignment = None;
Expand Down Expand Up @@ -766,11 +704,7 @@ fn erosion(distance_moved: f64, average_cluster_diameter: f64) -> f64 {
}

// computes the maximum distance between two points in the array
fn max_distance<D>(points: &[PointND<D>]) -> f64
where
D: DimName,
DefaultAllocator: Allocator<f64, D>,
{
fn max_distance<const D: usize>(points: &[PointND<D>]) -> f64 {
iproduct!(points, points)
.map(|(p1, p2)| (p1 - p2).norm())
.max_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal))
Expand Down
21 changes: 5 additions & 16 deletions src/algorithms/kernighan_lin.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,23 +8,16 @@ use crate::partition::Partition;
use crate::ProcessUniqueId;

use itertools::Itertools;
use nalgebra::allocator::Allocator;
use nalgebra::DefaultAllocator;
use nalgebra::DimName;
use sprs::CsMatView;

pub(crate) fn kernighan_lin<'a, D>(
pub(crate) fn kernighan_lin<'a, const D: usize>(
initial_partition: &mut Partition<'a, PointND<D>, f64>,
adjacency: CsMatView<f64>,
max_passes: impl Into<Option<usize>>,
max_flips_per_pass: impl Into<Option<usize>>,
max_imbalance_per_flip: impl Into<Option<f64>>,
max_bad_move_in_a_row: usize,
) where
D: DimName,
DefaultAllocator: Allocator<f64, D>,
<DefaultAllocator as Allocator<f64, D>>::Buffer: Send + Sync,
{
) {
// To adapt Kernighan-Lin to a partition of more than 2 parts,
// we apply the algorithm to each pair of adjacent parts (two parts
// are adjacent if there exists an element in one part that is linked to
Expand All @@ -35,7 +28,7 @@ pub(crate) fn kernighan_lin<'a, D>(
let max_imbalance_per_flip = max_imbalance_per_flip.into();
let (_points, weights, ids) = initial_partition.as_raw_mut();

kernighan_lin_2_impl(
kernighan_lin_2_impl::<D>(
weights,
adjacency.view(),
ids,
Expand All @@ -46,19 +39,15 @@ pub(crate) fn kernighan_lin<'a, D>(
);
}

fn kernighan_lin_2_impl<D>(
fn kernighan_lin_2_impl<const D: usize>(
weights: &[f64],
adjacency: CsMatView<f64>,
initial_partition: &mut [ProcessUniqueId],
max_passes: Option<usize>,
max_flips_per_pass: Option<usize>,
_max_imbalance_per_flip: Option<f64>,
max_bad_move_in_a_row: usize,
) where
D: DimName,
DefaultAllocator: Allocator<f64, D>,
<DefaultAllocator as Allocator<f64, D>>::Buffer: Send + Sync,
{
) {
let unique_ids = initial_partition
.iter()
.cloned()
Expand Down
33 changes: 7 additions & 26 deletions src/algorithms/multi_jagged.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,10 +7,6 @@

use approx::Ulps;

use nalgebra::allocator::Allocator;
use nalgebra::DefaultAllocator;
use nalgebra::DimName;

use crate::geometry::*;
use rayon::prelude::*;
use snowflake::ProcessUniqueId;
Expand Down Expand Up @@ -150,31 +146,21 @@ fn compute_modifiers(
.collect()
}

pub fn multi_jagged<D>(
pub fn multi_jagged<const D: usize>(
points: &[PointND<D>],
weights: &[f64],
num_parts: usize,
max_iter: usize,
) -> Vec<ProcessUniqueId>
where
D: DimName,
DefaultAllocator: Allocator<f64, D>,
<DefaultAllocator as Allocator<f64, D>>::Buffer: Send + Sync,
{
) -> Vec<ProcessUniqueId> {
let partition_scheme = partition_scheme(num_parts, max_iter);
multi_jagged_with_scheme(points, weights, partition_scheme)
}

fn multi_jagged_with_scheme<D>(
fn multi_jagged_with_scheme<const D: usize>(
points: &[PointND<D>],
weights: &[f64],
partition_scheme: PartitionScheme,
) -> Vec<ProcessUniqueId>
where
D: DimName,
DefaultAllocator: Allocator<f64, D>,
<DefaultAllocator as Allocator<f64, D>>::Buffer: Send + Sync,
{
) -> Vec<ProcessUniqueId> {
let len = points.len();
let mut permutation = (0..len).into_par_iter().collect::<Vec<_>>();
let initial_id = ProcessUniqueId::new();
Expand All @@ -194,18 +180,14 @@ where
initial_partition
}

fn multi_jagged_recurse<D>(
fn multi_jagged_recurse<const D: usize>(
points: &[PointND<D>],
weights: &[f64],
permutation: &mut [usize],
partition: &AtomicPtr<ProcessUniqueId>,
current_coord: usize,
partition_scheme: PartitionScheme,
) where
D: DimName,
DefaultAllocator: Allocator<f64, D>,
<DefaultAllocator as Allocator<f64, D>>::Buffer: Send + Sync,
{
) {
if partition_scheme.num_splits != 0 {
let num_splits = partition_scheme.num_splits;

Expand All @@ -219,7 +201,6 @@ fn multi_jagged_recurse<D>(
);
let mut sub_permutations = split_at_mut_many(permutation, &split_positions);

let dim = D::dim();
sub_permutations
.par_iter_mut()
.zip(partition_scheme.next.unwrap())
Expand All @@ -229,7 +210,7 @@ fn multi_jagged_recurse<D>(
weights,
permu,
partition,
(current_coord + 1) % dim,
(current_coord + 1) % D,
scheme,
)
});
Expand Down
Loading

0 comments on commit cd58268

Please sign in to comment.