Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

add time_limit arg to the optimize api #28

Merged
merged 1 commit into from
Dec 26, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 4 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ All of the algorithms are parallelized with Rayon.
You need to implement your own model that implements `OptModel` trait. Actual optimization is handled by each algorithm functions. Here is a simple example to optimize a quadratic function with Hill Climbing algorithm.

```rust
use std::error::Error;
use std::{error::Error, time::Duration};

use indicatif::{ProgressBar, ProgressDrawTarget, ProgressStyle};
use localsearch::{
Expand Down Expand Up @@ -98,6 +98,7 @@ fn main() {

println!("running Hill Climbing optimizer");
let n_iter = 10000;
let time_limit = Duration::from_secs(60);
let patiance = 1000;
let n_trials = 50;
let opt = HillClimbingOptimizer::new(patiance, n_trials);
Expand All @@ -107,10 +108,11 @@ fn main() {
pb.set_position(op.iter as u64);
};

let res = opt.optimize(&model, None, n_iter, Some(&callback), ());
let res = opt.optimize(&model, None, n_iter, time_limit, Some(&callback), ());
pb.finish();
dbg!(res);
}

```

Further details can be found at API document, example and test codes.
5 changes: 3 additions & 2 deletions examples/quadratic_model.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
use std::error::Error;
use std::{error::Error, time::Duration};

use indicatif::{ProgressBar, ProgressDrawTarget, ProgressStyle};
use localsearch::{
Expand Down Expand Up @@ -79,6 +79,7 @@ fn main() {

println!("running Hill Climbing optimizer");
let n_iter = 10000;
let time_limit = Duration::from_secs_f32(1.0);
let patiance = 1000;
let n_trials = 50;
let opt = HillClimbingOptimizer::new(patiance, n_trials);
Expand All @@ -88,7 +89,7 @@ fn main() {
pb.set_position(op.iter as u64);
};

let res = opt.optimize(&model, None, n_iter, Some(&callback), ());
let res = opt.optimize(&model, None, n_iter, time_limit, Some(&callback), ());
pb.finish();
dbg!(res);
}
16 changes: 14 additions & 2 deletions examples/tsp_model.rs
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
use core::time::Duration;
use std::{
collections::{HashMap, HashSet},
error::Error,
Expand Down Expand Up @@ -243,6 +244,7 @@ fn main() {
let tsp_model = TSPModel::from_coords(&coords);

let n_iter: usize = 20000;
let time_limit = Duration::from_secs(60);
let patience = n_iter / 2;

let mut rng = rand::thread_rng();
Expand All @@ -266,6 +268,7 @@ fn main() {
&tsp_model,
initial_solution.clone(),
n_iter,
time_limit,
Some(&callback),
(),
);
Expand All @@ -284,6 +287,7 @@ fn main() {
&tsp_model,
initial_solution.clone(),
n_iter,
time_limit,
Some(&callback),
tabu_list,
);
Expand All @@ -301,6 +305,7 @@ fn main() {
&tsp_model,
initial_solution.clone(),
n_iter,
time_limit,
Some(&callback),
(200.0, 50.0),
);
Expand All @@ -318,6 +323,7 @@ fn main() {
&tsp_model,
initial_solution.clone(),
n_iter,
time_limit,
Some(&callback),
(),
);
Expand All @@ -331,8 +337,14 @@ fn main() {

println!("run relative annealing");
let optimizer = RelativeAnnealingOptimizer::new(patience, 200, 10, 1e1);
let (final_solution, final_score, _) =
optimizer.optimize(&tsp_model, initial_solution, n_iter, Some(&callback), ());
let (final_solution, final_score, _) = optimizer.optimize(
&tsp_model,
initial_solution,
n_iter,
time_limit,
Some(&callback),
(),
);
println!(
"final score = {}, num of cities {}",
final_score,
Expand Down
3 changes: 3 additions & 0 deletions src/optim/base.rs
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
use std::time::Duration;

use auto_impl::auto_impl;
use trait_set::trait_set;

Expand All @@ -17,6 +19,7 @@ pub trait LocalSearchOptimizer<M: OptModel> {
model: &M,
initial_solution: Option<M::SolutionType>,
n_iter: usize,
time_limit: Duration,
callback: Option<&F>,
extra_in: Self::ExtraIn,
) -> (M::SolutionType, M::ScoreType, Self::ExtraOut)
Expand Down
12 changes: 11 additions & 1 deletion src/optim/epsilon_greedy.rs
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
use std::time::Duration;

use crate::{callback::OptCallbackFn, OptModel};

use super::{base::LocalSearchOptimizer, GenericLocalSearchOptimizer};
Expand Down Expand Up @@ -53,6 +55,7 @@ impl<M: OptModel> LocalSearchOptimizer<M> for EpsilonGreedyOptimizer {
model: &M,
initial_solution: Option<M::SolutionType>,
n_iter: usize,
time_limit: Duration,
callback: Option<&F>,
_extra_in: Self::ExtraIn,
) -> (M::SolutionType, M::ScoreType, Self::ExtraOut)
Expand All @@ -66,6 +69,13 @@ impl<M: OptModel> LocalSearchOptimizer<M> for EpsilonGreedyOptimizer {
self.return_iter,
|current, trial| transition_prob(current, trial, self.epsilon),
);
optimizer.optimize(model, initial_solution, n_iter, callback, _extra_in)
optimizer.optimize(
model,
initial_solution,
n_iter,
time_limit,
callback,
_extra_in,
)
}
}
13 changes: 12 additions & 1 deletion src/optim/generic.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,9 @@
use std::{cell::RefCell, marker::PhantomData, rc::Rc};
use std::{
cell::RefCell,
marker::PhantomData,
rc::Rc,
time::{Duration, Instant},
};

use rand::Rng;
use rayon::prelude::*;
Expand Down Expand Up @@ -69,12 +74,14 @@ where
model: &M,
initial_solution: Option<M::SolutionType>,
n_iter: usize,
time_limit: Duration,
callback: Option<&F>,
_extra_in: Self::ExtraIn,
) -> (M::SolutionType, M::ScoreType, Self::ExtraOut)
where
F: OptCallbackFn<M::SolutionType, M::ScoreType>,
{
let start_time = Instant::now();
let mut rng = rand::thread_rng();
let mut current_solution = if let Some(s) = initial_solution {
s
Expand All @@ -88,6 +95,10 @@ where
let mut counter = 0;

for it in 0..n_iter {
let duration = Instant::now().duration_since(start_time);
if duration > time_limit {
break;
}
let (trial_solution, trial_score) = (0..self.n_trials)
.into_par_iter()
.map(|_| {
Expand Down
12 changes: 11 additions & 1 deletion src/optim/hill_climbing.rs
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
use std::time::Duration;

use crate::{callback::OptCallbackFn, OptModel};

use super::{EpsilonGreedyOptimizer, LocalSearchOptimizer};
Expand Down Expand Up @@ -34,13 +36,21 @@ impl<M: OptModel> LocalSearchOptimizer<M> for HillClimbingOptimizer {
model: &M,
initial_solution: Option<M::SolutionType>,
n_iter: usize,
time_limit: Duration,
callback: Option<&F>,
_extra_in: Self::ExtraIn,
) -> (M::SolutionType, M::ScoreType, Self::ExtraOut)
where
F: OptCallbackFn<M::SolutionType, M::ScoreType>,
{
let optimizer = EpsilonGreedyOptimizer::new(self.patience, self.n_trials, usize::MAX, 0.0);
optimizer.optimize(model, initial_solution, n_iter, callback, _extra_in)
optimizer.optimize(
model,
initial_solution,
n_iter,
time_limit,
callback,
_extra_in,
)
}
}
12 changes: 11 additions & 1 deletion src/optim/logistic_annealing.rs
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
use std::time::Duration;

use ordered_float::NotNan;

use crate::{callback::OptCallbackFn, OptModel};
Expand Down Expand Up @@ -58,6 +60,7 @@ impl<M: OptModel<ScoreType = NotNan<f64>>> LocalSearchOptimizer<M> for LogisticA
model: &M,
initial_solution: Option<M::SolutionType>,
n_iter: usize,
time_limit: Duration,
callback: Option<&F>,
_extra_in: Self::ExtraIn,
) -> (M::SolutionType, M::ScoreType, Self::ExtraOut)
Expand All @@ -71,7 +74,14 @@ impl<M: OptModel<ScoreType = NotNan<f64>>> LocalSearchOptimizer<M> for LogisticA
|current, trial| transition_prob(current, trial, self.w),
);

optimizer.optimize(model, initial_solution, n_iter, callback, _extra_in)
optimizer.optimize(
model,
initial_solution,
n_iter,
time_limit,
callback,
_extra_in,
)
}
}

Expand Down
12 changes: 11 additions & 1 deletion src/optim/relative_annealing.rs
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
use std::time::Duration;

use ordered_float::NotNan;

use crate::{callback::OptCallbackFn, OptModel};
Expand Down Expand Up @@ -59,6 +61,7 @@ impl<M: OptModel<ScoreType = NotNan<f64>>> LocalSearchOptimizer<M> for RelativeA
model: &M,
initial_solution: Option<M::SolutionType>,
n_iter: usize,
time_limit: Duration,
callback: Option<&F>,
_extra_in: Self::ExtraIn,
) -> (M::SolutionType, M::ScoreType, Self::ExtraOut)
Expand All @@ -72,7 +75,14 @@ impl<M: OptModel<ScoreType = NotNan<f64>>> LocalSearchOptimizer<M> for RelativeA
|current, trial| transition_prob(current, trial, self.w),
);

optimizer.optimize(model, initial_solution, n_iter, callback, _extra_in)
optimizer.optimize(
model,
initial_solution,
n_iter,
time_limit,
callback,
_extra_in,
)
}
}

Expand Down
12 changes: 11 additions & 1 deletion src/optim/simulated_annealing.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,8 @@
use std::{cell::RefCell, rc::Rc};
use std::{
cell::RefCell,
rc::Rc,
time::{Duration, Instant},
};

use ordered_float::NotNan;
use rand::Rng;
Expand Down Expand Up @@ -46,12 +50,14 @@ impl<M: OptModel<ScoreType = NotNan<f64>>> LocalSearchOptimizer<M> for Simulated
model: &M,
initial_solution: Option<M::SolutionType>,
n_iter: usize,
time_limit: Duration,
callback: Option<&F>,
max_min_temperatures: Self::ExtraIn,
) -> (M::SolutionType, M::ScoreType, Self::ExtraOut)
where
F: OptCallbackFn<M::SolutionType, M::ScoreType>,
{
let start_time = Instant::now();
let (max_temperature, min_temperature) = max_min_temperatures;
let mut rng = rand::thread_rng();
let mut current_solution = if let Some(s) = initial_solution {
Expand All @@ -68,6 +74,10 @@ impl<M: OptModel<ScoreType = NotNan<f64>>> LocalSearchOptimizer<M> for Simulated
let mut counter = 0;

for it in 0..n_iter {
let duration = Instant::now().duration_since(start_time);
if duration > time_limit {
break;
}
let (trial_solution, trial_score) = (0..self.n_trials)
.into_par_iter()
.map(|_| {
Expand Down
13 changes: 12 additions & 1 deletion src/optim/tabu_search.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,9 @@
use std::{cell::RefCell, marker::PhantomData, rc::Rc};
use std::{
cell::RefCell,
marker::PhantomData,
rc::Rc,
time::{Duration, Instant},
};

use auto_impl::auto_impl;
use rayon::prelude::*;
Expand Down Expand Up @@ -91,12 +96,14 @@ impl<M: OptModel, T: TabuList<Item = (M::SolutionType, M::TransitionType)>> Loca
model: &M,
initial_solution: Option<M::SolutionType>,
n_iter: usize,
time_limit: Duration,
callback: Option<&F>,
mut tabu_list: Self::ExtraIn,
) -> (M::SolutionType, M::ScoreType, Self::ExtraOut)
where
F: OptCallbackFn<M::SolutionType, M::ScoreType>,
{
let start_time = Instant::now();
let mut rng = rand::thread_rng();
let mut current_solution = if let Some(s) = initial_solution {
s
Expand All @@ -110,6 +117,10 @@ impl<M: OptModel, T: TabuList<Item = (M::SolutionType, M::TransitionType)>> Loca
let mut accepted_counter = 0;

for it in 0..n_iter {
let duration = Instant::now().duration_since(start_time);
if duration > time_limit {
break;
}
let mut samples = vec![];
(0..self.n_trials)
.into_par_iter()
Expand Down
11 changes: 10 additions & 1 deletion src/tests/test_epsilon_greedy.rs
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
use std::time::Duration;

use approx::assert_abs_diff_eq;

use crate::optim::{EpsilonGreedyOptimizer, LocalSearchOptimizer};
Expand All @@ -9,7 +11,14 @@ fn test() {
let model = QuadraticModel::new(3, vec![2.0, 0.0, -3.5], (-10.0, 10.0));
let opt = EpsilonGreedyOptimizer::new(1000, 10, 200, 0.1);
let null_closure = None::<&fn(_)>;
let (final_solution, final_score, _) = opt.optimize(&model, None, 10000, null_closure, ());
let (final_solution, final_score, _) = opt.optimize(
&model,
None,
10000,
Duration::from_secs(10),
null_closure,
(),
);
assert_abs_diff_eq!(2.0, final_solution[0], epsilon = 0.05);
assert_abs_diff_eq!(0.0, final_solution[1], epsilon = 0.05);
assert_abs_diff_eq!(-3.5, final_solution[2], epsilon = 0.05);
Expand Down
Loading