Skip to content

Commit

Permalink
adaptive PPP
Browse files Browse the repository at this point in the history
  • Loading branch information
FelipeR888 committed Oct 12, 2020
1 parent 8a038d3 commit d5729cf
Show file tree
Hide file tree
Showing 4 changed files with 150 additions and 16 deletions.
4 changes: 3 additions & 1 deletion pyabc/population.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,8 @@ def __init__(self,
accepted_distances: List[float],
rejected_sum_stats: List[dict] = None,
rejected_distances: List[float] = None,
accepted: bool = True):
accepted: bool = True,
is_prel: bool = False):

self.m = m
self.parameter = parameter
Expand All @@ -94,6 +95,7 @@ def __init__(self,
rejected_distances = []
self.rejected_distances = rejected_distances
self.accepted = accepted
self.is_prel = is_prel


class Population:
Expand Down
7 changes: 4 additions & 3 deletions pyabc/sampler/redis_eps/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,7 @@ def work_on_population(redis: StrictRedis,
sample = sample_factory()

# loop until no more particles required
while int(redis.get(idfy(N_ACC, t)).decode()) < n_req \
while int(redis.get(idfy(N_ACC, t)).decode()) < int(redis.get(idfy(N_REQ, t)).decode()) \
and (not all_accepted or
int(redis.get(idfy(N_EVAL, t)).decode()) < n_req):
if kill_handler.killed:
Expand Down Expand Up @@ -174,8 +174,9 @@ def work_on_population(redis: StrictRedis,
if len(accepted_samples) > 0:
# new pipeline
pipeline = redis.pipeline()
# update particles counter
pipeline.incr(idfy(N_ACC, t), len(accepted_samples))
# update particles counter if not preliminary
if not is_prel:
pipeline.incr(idfy(N_ACC, t), len(accepted_samples))
# note: samples are appended 1-by-1
pipeline.rpush(idfy(QUEUE, t), *accepted_samples)
# execute all commands
Expand Down
57 changes: 50 additions & 7 deletions pyabc/sampler/redis_eps/sampler.py
Original file line number Diff line number Diff line change
Expand Up @@ -109,11 +109,54 @@ def sample_until_n_accepted(
# pop result from queue, block until one is available
dump = self.redis.blpop(idfy(QUEUE, t))[1]
# extract pickled object
particle_with_id = pickle.loads(dump)
sample_with_id = pickle.loads(dump)
# TODO check whether the acceptance criterion changed
# append to collected results
id_results.append(particle_with_id)
bar.inc()

any_particle_accepted = False
for result in sample_with_id[1]._particles:
if result.is_prel:

accepted_sum_stats = []
accepted_distances = []
accepted_weights = []
rejected_sum_stats = []
rejected_distances = []

for i_sum_stat, sum_stat in enumerate(result.accepted_sum_stats):
acc_res = kwargs['acceptor'](
distance_function=kwargs['distance_function'],
eps=kwargs['eps'],
x=sum_stat,
x_0=kwargs['x_0'],
t=t,
par=result.parameter)

if acc_res.accepted:
accepted_distances.append(acc_res.distance)
accepted_sum_stats.append(sum_stat)
accepted_weights.append(acc_res.weight)
else:
rejected_distances.append(acc_res.distance)
rejected_sum_stats.append(sum_stat)

result.accepted = len(accepted_distances) > 0

result.accepted_distances = accepted_distances
result.accepted_sum_stats = accepted_sum_stats
result.accepted_weights = accepted_weights
result.rejected_distances = rejected_distances
result.rejected_sum_stats = rejected_sum_stats

if result.accepted:
self.redis.incr(idfy(N_ACC, t), 1)
any_particle_accepted = True

# TODO Update rejected sumstats & distances

if any_particle_accepted:
# append to collected results
id_results.append(sample_with_id)
bar.inc()

# maybe head-start the next generation already
self.maybe_start_next_generation(
Expand Down Expand Up @@ -267,7 +310,7 @@ def maybe_start_next_generation(
return

# create a preliminary simulate_one function
simulate_one_prel = create_preliminary_simulate_one(
simulate_one_prel = _create_preliminary_simulate_one(
t=t+1, sample=sample,
eps=eps, **kwargs)

Expand All @@ -278,7 +321,7 @@ def maybe_start_next_generation(
all_accepted=False, is_prel=True)


def create_preliminary_simulate_one(
def _create_preliminary_simulate_one(
t, sample,
model_perturbation_kernel, transitions, model_prior, parameter_priors,
nr_samples_per_parameter, models, summary_statistics, x_0,
Expand Down Expand Up @@ -340,7 +383,7 @@ def create_preliminary_simulate_one(

# TODO fit distance, eps, acceptor

return create_simulate_function(
return create_prel_simulate_function(
t=t, model_probabilities=model_probabilities,
model_perturbation_kernel=model_perturbation_kernel,
transitions=transitions, model_prior=model_prior,
Expand Down
98 changes: 93 additions & 5 deletions pyabc/util.py
Original file line number Diff line number Diff line change
Expand Up @@ -306,13 +306,19 @@ def weight_function(


def create_simulate_function(
t: int, model_probabilities: pd.DataFrame,
t: int,
model_probabilities: pd.DataFrame,
model_perturbation_kernel: ModelPerturbationKernel,
transitions: List[Transition],
model_prior: RV, parameter_priors: List[Distribution],
nr_samples_per_parameter: int, models: List[Model],
summary_statistics: Callable, x_0: dict,
distance_function: Distance, eps: Epsilon, acceptor: Acceptor,
model_prior: RV,
parameter_priors: List[Distribution],
nr_samples_per_parameter: int,
models: List[Model],
summary_statistics: Callable,
x_0: dict,
distance_function: Distance,
eps: Epsilon,
acceptor: Acceptor,
) -> Callable:
"""
Create a simulation function which performs the sampling of parameters,
Expand Down Expand Up @@ -386,6 +392,88 @@ def simulate_one():
return simulate_one


def prel_evaluate_no_distance(
m_ss: int, theta_ss: Parameter, t: int, models: List[Model],
summary_statistics: Callable, prior_pdf: Callable, transition_pdf: Callable) -> Particle:
nr_samples_per_parameter = np.inf
accepted = True

accepted_sum_stats = []
accepted_distances = []

for _ in range(nr_samples_per_parameter): #TODO does this loop break correctly?
sum_stats_one = models[m_ss].summary_statistics(t, theta_ss, summary_statistics)

accepted_sum_stats.append(sum_stats_one.sum_stats)
accepted_distances.append(np.inf)

weight = prior_pdf(m_ss, theta_ss) / transition_pdf(m_ss, theta_ss)

return Particle(
m=m_ss,
parameter=theta_ss,
weight=weight,
accepted_sum_stats=accepted_sum_stats,
accepted_distances=accepted_distances,
rejected_sum_stats=None,
rejected_distances=None,
accepted=accepted,
is_prel=True)


def create_prel_simulate_function(
t: int,
model_probabilities: pd.DataFrame,
model_perturbation_kernel: ModelPerturbationKernel,
transitions: List[Transition],
model_prior: RV,
parameter_priors: List[Distribution],
nr_samples_per_parameter: int,
models: List[Model],
summary_statistics: Callable,
x_0: dict,
distance_function: Distance,
eps: Epsilon,
acceptor: Acceptor,
) -> Callable:

# cache model_probabilities to not query the database so often
m = np.array(model_probabilities.index)
p = np.array(model_probabilities.p)

# create prior and transition densities for weight function
prior_pdf = create_prior_pdf(
model_prior=model_prior, parameter_priors=parameter_priors)
if t == 0:
transition_pdf = prior_pdf
else:
transition_pdf = create_transition_pdf(
transitions=transitions,
model_probabilities=model_probabilities,
model_perturbation_kernel=model_perturbation_kernel)
"""
# create weight function
weight_function = create_weight_function(
nr_samples_per_parameter=nr_samples_per_parameter,
prior_pdf=prior_pdf, transition_pdf=transition_pdf)
"""
# simulation function
def simulate_one():
parameter = generate_valid_proposal(
t=t, m=m, p=p,
model_prior=model_prior, parameter_priors=parameter_priors,
model_perturbation_kernel=model_perturbation_kernel,
transitions=transitions)
particle = prel_evaluate_no_distance(
*parameter, t=t,
models=models, summary_statistics=summary_statistics,
prior_pdf=prior_pdf, transition_pdf=transition_pdf)
return particle

return simulate_one



def termination_criteria_fulfilled(
current_eps: float, min_eps: float, stop_if_single_model_alive: bool,
nr_of_models_alive: int, acceptance_rate: float,
Expand Down

0 comments on commit d5729cf

Please sign in to comment.