Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

0.6.15 #247

Merged
merged 38 commits into from
Jul 22, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
38 commits
Select commit Hold shift + click to select a range
42f6d69
0.6.15 a1
winedarksea May 18, 2024
c992c93
0.6.15 a2
winedarksea May 18, 2024
40b7d29
0.6.15 a3
winedarksea May 18, 2024
281c8d6
0.6.15 a4
winedarksea May 20, 2024
e5e0fee
0.6.15 a5
winedarksea May 20, 2024
f0fd3f2
0.6.15 a6
winedarksea May 25, 2024
f02fa42
0.6.15 a7
winedarksea Jun 5, 2024
18c38b1
0.6.15 a8
winedarksea Jun 6, 2024
100bb55
0.6.15 a9
winedarksea Jun 11, 2024
a32f26e
0.6.15 a10
winedarksea Jun 11, 2024
fba104c
0.6.15 a11
winedarksea Jun 12, 2024
30407fb
0.6.15 a12
winedarksea Jun 13, 2024
91efa85
0.6.15 a13
winedarksea Jun 14, 2024
85b1f1c
0.6.15 a14
winedarksea Jun 14, 2024
d338f82
0.6.14 a15
winedarksea Jun 14, 2024
fbd04ce
0.6.15 a16
winedarksea Jun 21, 2024
13d911e
0.6.15 a17
winedarksea Jun 21, 2024
76b2e9a
0.6.15 a18
winedarksea Jun 25, 2024
ad73ec7
0.6.15 a19
winedarksea Jun 25, 2024
89f4530
0.6.15 a20
winedarksea Jun 28, 2024
68430d3
0.6.15 a21
winedarksea Jul 2, 2024
d26ad24
0.6.15 a22
winedarksea Jul 3, 2024
5d8ef24
0.6.15 a23
winedarksea Jul 3, 2024
36eee9a
0.6.15 a24
winedarksea Jul 3, 2024
c1679f1
0.6.15 a25
winedarksea Jul 3, 2024
51582c9
0.6.15 a26
winedarksea Jul 5, 2024
6f089b3
0.6.15 a27
winedarksea Jul 10, 2024
8ae9c8d
0.6.15 a28
winedarksea Jul 11, 2024
f218dec
0.6.15 a29
winedarksea Jul 13, 2024
29548f1
0.6.15 a30
winedarksea Jul 13, 2024
e1bff4d
0.6.15 a31
winedarksea Jul 14, 2024
a2021b2
0.6.15 a32
winedarksea Jul 18, 2024
f42d7a7
0.6.15 a33
winedarksea Jul 19, 2024
e173e77
0.6.15 a34
winedarksea Jul 19, 2024
cb044ff
0.6.15 a35
winedarksea Jul 19, 2024
66a8d95
0.6.15 a36
winedarksea Jul 19, 2024
4143312
0.6.15 a37
winedarksea Jul 21, 2024
9297587
0.6.15
winedarksea Jul 22, 2024
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 11 additions & 5 deletions TODO.md
Original file line number Diff line number Diff line change
Expand Up @@ -13,11 +13,17 @@
* Forecasts are desired for the future immediately following the most recent data.
* trimmed_mean to AverageValueNaive

# 0.6.14 🇺🇦 🇺🇦 🇺🇦
* prevent excessive use of 'CenterSplit' and other macro_micro style transformers
* added ElasticNetwork as subsidiary regression model option
* KalmanSmoothing, BKBandpassFilter added on_inverse option
* add threshold arg to AlignLastValue
# 0.6.15 🇺🇦 🇺🇦 🇺🇦
* Constraint transformer added
* historical_growth constraint method added
* fft as multivariate_feature for Cassandra
* None trend_window now searched as part of Cassandra
* "quarterlydayofweek" method added for datepart
* threshold_method arg to AlignLastValue
* general tempate updated
* slight change to MATSE metric, now only abs values for scaling
* additional args to DatepartRegression
* bug fixes

### Unstable Upstream Pacakges (those that are frequently broken by maintainers)
* Pytorch-Forecasting
Expand Down
2 changes: 1 addition & 1 deletion autots/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@
from autots.models.cassandra import Cassandra


__version__ = '0.6.14'
__version__ = '0.6.15'

TransformTS = GeneralTransformer

Expand Down
2 changes: 1 addition & 1 deletion autots/evaluator/anomaly_detector.py
Original file line number Diff line number Diff line change
Expand Up @@ -221,7 +221,7 @@ def get_new_params(method="random"):
method_choice, method_params, transform_dict = anomaly_new_params(method=method)
if transform_dict == "random":
transform_dict = RandomTransform(
transformer_list='fast', transformer_max_depth=2
transformer_list='scalable', transformer_max_depth=2
)
if method == "fast":
preforecast = False
Expand Down
38 changes: 35 additions & 3 deletions autots/evaluator/auto_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -616,6 +616,7 @@ def ModelMonster(
random_seed=random_seed,
verbose=verbose,
n_jobs=n_jobs,
forecast_length=forecast_length,
**parameters,
)
elif model == "PreprocessingRegression":
Expand Down Expand Up @@ -810,6 +811,7 @@ def __init__(
holiday_country=self.holiday_country,
verbose=self.verbose,
random_seed=self.random_seed,
forecast_length=self.forecast_length,
)
self.name = "ModelPrediction"
self._fit_complete = False
Expand Down Expand Up @@ -912,7 +914,7 @@ def predict(self, forecast_length=None, future_regressor=None):

if df_forecast.forecast.shape[1] != self.df.shape[1]:
raise ValueError(
f"Model failed to return correct number of series. Returned {df_forecast.forecast.shape[1]} and requested: {self.df.shape[1]}"
f"{self.model.name} with {self.transformer_object.transformations} failed to return correct number of series. Returned {df_forecast.forecast.shape[1]} and requested: {self.df.shape[1]}"
)

df_forecast.transformation_parameters = self.transformation_dict
Expand Down Expand Up @@ -2489,7 +2491,10 @@ def validation_aggregation(


def generate_score(
model_results, metric_weighting: dict = {}, prediction_interval: float = 0.9
model_results,
metric_weighting: dict = {},
prediction_interval: float = 0.9,
return_score_dict: bool = False,
):
"""Generate score based on relative accuracies.

Expand Down Expand Up @@ -2528,6 +2533,8 @@ def generate_score(
wasserstein_weighting = metric_weighting.get('wasserstein_weighting', 0)
dwd_weighting = metric_weighting.get('dwd_weighting', 0)
matse_weighting = metric_weighting.get('matse_weighting', 0)

score_dict = {"ID": model_results["ID"]}
# handle various runtime information records
if 'TotalRuntimeSeconds' in model_results.columns:
model_results['TotalRuntimeSeconds'] = np.where(
Expand Down Expand Up @@ -2556,23 +2563,27 @@ def generate_score(
].min()
smape_score = model_results['smape_weighted'] / smape_scaler
overall_score = smape_score * smape_weighting
score_dict['smape'] = smape_score * smape_weighting
if mae_weighting != 0:
mae_scaler = model_results['mae_weighted'][
model_results['mae_weighted'] != 0
].min()
mae_score = model_results['mae_weighted'] / mae_scaler
score_dict['mae'] = mae_score * mae_weighting
overall_score = overall_score + (mae_score * mae_weighting)
if rmse_weighting != 0:
rmse_scaler = model_results['rmse_weighted'][
model_results['rmse_weighted'] != 0
].min()
rmse_score = model_results['rmse_weighted'] / rmse_scaler
score_dict['rmse'] = rmse_score * rmse_weighting
overall_score = overall_score + (rmse_score * rmse_weighting)
if made_weighting != 0:
made_scaler = model_results['made_weighted'][
model_results['made_weighted'] != 0
].min()
made_score = model_results['made_weighted'] / made_scaler
score_dict['made'] = made_score * made_weighting
# fillna, but only if all are nan (forecast_length = 1)
# if pd.isnull(made_score.max()):
# made_score.fillna(0, inplace=True)
Expand All @@ -2582,54 +2593,63 @@ def generate_score(
model_results['mage_weighted'] != 0
].min()
mage_score = model_results['mage_weighted'] / mage_scaler
score_dict['mage'] = mage_score * mage_weighting
overall_score = overall_score + (mage_score * mage_weighting)
if mle_weighting != 0:
mle_scaler = model_results['mle_weighted'][
model_results['mle_weighted'] != 0
].min()
mle_score = model_results['mle_weighted'] / mle_scaler
score_dict['mle'] = mle_score * mle_weighting
overall_score = overall_score + (mle_score * mle_weighting)
if imle_weighting != 0:
imle_scaler = model_results['imle_weighted'][
model_results['imle_weighted'] != 0
].min()
imle_score = model_results['imle_weighted'] / imle_scaler
score_dict['imle'] = imle_score * imle_weighting
overall_score = overall_score + (imle_score * imle_weighting)
if maxe_weighting != 0:
maxe_scaler = model_results['maxe_weighted'][
model_results['maxe_weighted'] != 0
].min()
maxe_score = model_results['maxe_weighted'] / maxe_scaler
score_dict['maxe'] = maxe_score * maxe_weighting
overall_score = overall_score + (maxe_score * maxe_weighting)
if mqae_weighting != 0:
mqae_scaler = model_results['mqae_weighted'][
model_results['mqae_weighted'] != 0
].min()
mqae_score = model_results['mqae_weighted'] / mqae_scaler
score_dict['mqae'] = mqae_score * mqae_weighting
overall_score = overall_score + (mqae_score * mqae_weighting)
if dwae_weighting != 0:
dwae_scaler = model_results['dwae_weighted'][
model_results['dwae_weighted'] != 0
].min()
dwae_score = model_results['dwae_weighted'] / dwae_scaler
score_dict['dwae'] = dwae_score * dwae_weighting
overall_score = overall_score + (dwae_score * dwae_weighting)
if ewmae_weighting != 0:
ewmae_scaler = model_results['ewmae_weighted'][
model_results['ewmae_weighted'] != 0
].min()
ewmae_score = model_results['ewmae_weighted'] / ewmae_scaler
score_dict['ewmae'] = ewmae_score * ewmae_weighting
overall_score = overall_score + (ewmae_score * ewmae_weighting)
if uwmse_weighting != 0:
uwmse_scaler = model_results['uwmse_weighted'][
model_results['uwmse_weighted'] != 0
].min()
uwmse_score = model_results['uwmse_weighted'] / uwmse_scaler
score_dict['uwmse'] = uwmse_score * uwmse_weighting
overall_score = overall_score + (uwmse_score * uwmse_weighting)
if mate_weighting != 0:
mate_scaler = model_results['mate_weighted'][
model_results['mate_weighted'] != 0
].min()
mate_score = model_results['mate_weighted'] / mate_scaler
score_dict['mate'] = mate_score * mate_weighting
overall_score = overall_score + (mate_score * mate_weighting)
if wasserstein_weighting != 0:
wasserstein_scaler = model_results['wasserstein_weighted'][
Expand All @@ -2638,30 +2658,35 @@ def generate_score(
wasserstein_score = (
model_results['wasserstein_weighted'] / wasserstein_scaler
)
score_dict['wasserstein'] = wasserstein_score * wasserstein_weighting
overall_score = overall_score + (wasserstein_score * wasserstein_weighting)
if dwd_weighting != 0:
dwd_scaler = model_results['dwd_weighted'][
model_results['dwd_weighted'] != 0
].min()
dwd_score = model_results['dwd_weighted'] / dwd_scaler
score_dict['dwd'] = dwd_score * dwd_weighting
overall_score = overall_score + (dwd_score * dwd_weighting)
if matse_weighting != 0:
matse_scaler = model_results['matse_weighted'][
model_results['matse_weighted'] != 0
].min()
matse_score = model_results['matse_weighted'] / matse_scaler
score_dict['matse'] = matse_score * matse_weighting
overall_score = overall_score + (matse_score * matse_weighting)
if smoothness_weighting != 0:
smoothness_scaler = model_results['smoothness_weighted'][
model_results['smoothness_weighted'] != 0
].mean()
smoothness_score = model_results['smoothness_weighted'] / smoothness_scaler
score_dict['smoothness'] = smoothness_score * smoothness_weighting
overall_score = overall_score + (smoothness_score * smoothness_weighting)
if spl_weighting != 0:
spl_scaler = model_results['spl_weighted'][
model_results['spl_weighted'] != 0
].min()
spl_score = model_results['spl_weighted'] / spl_scaler
score_dict['spl'] = spl_score * spl_weighting
overall_score = overall_score + (spl_score * spl_weighting)
smape_median = smape_score.median()
if runtime_weighting != 0:
Expand All @@ -2670,18 +2695,22 @@ def generate_score(
runtime_score = runtime / runtime_scaler
# this scales it into a similar range as SMAPE
runtime_score = runtime_score * (smape_median / runtime_score.median())
score_dict['runtime'] = runtime_score * runtime_weighting
overall_score = overall_score + (runtime_score * runtime_weighting)
# these have values in the range 0 to 1
if contour_weighting != 0:
contour_score = (2 - model_results['contour_weighted']) * smape_median
score_dict['contour'] = contour_score * contour_weighting
overall_score = overall_score + (contour_score * contour_weighting)
if oda_weighting != 0:
oda_score = (2 - model_results['oda_weighted']) * smape_median
score_dict['oda'] = oda_score * oda_weighting
overall_score = overall_score + (oda_score * oda_weighting)
if containment_weighting != 0:
containment_score = (
1 + abs(prediction_interval - model_results['containment_weighted'])
) * smape_median
score_dict['contaiment'] = containment_score * containment_weighting
overall_score = overall_score + (containment_score * containment_weighting)

except Exception as e:
Expand All @@ -2693,7 +2722,10 @@ def generate_score(
A new starting template may also help. {repr(e)}"""
)

return overall_score.astype(float) # need to handle complex values (!)
if return_score_dict:
return overall_score.astype(float), score_dict
else:
return overall_score.astype(float) # need to handle complex values (!)


def generate_score_per_series(
Expand Down
Loading
Loading