Skip to content

Remove to_be_fixed from inference tests on SpecialDaysTransform #1283

Merged
merged 2 commits into from
Jun 8, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,9 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- Update requirements for `holidays` and `scipy`, change saving library from `pickle` to `dill` in `SaveMixin` ([#1268](https://github.com/tinkoff-ai/etna/pull/1268))
- Update requirement for `ruptures`, add requirement for `sqlalchemy` ([#1276](https://github.com/tinkoff-ai/etna/pull/1276))
- Optimize `make_samples` of `RNNNet` and `MLPNet` ([#1281](https://github.com/tinkoff-ai/etna/pull/1281))
- Remove `to_be_fixed` from inference tests on `SpecialDaysTransform` ([#1283](https://github.com/tinkoff-ai/etna/pull/1283))
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Not really sure that wee need to ad it in the Changeling

-
-
-
### Fixed
- Fix `plot_backtest` and `plot_backtest_interactive` on one-step forecast ([1260](https://github.com/tinkoff-ai/etna/pull/1260))
Expand Down
32 changes: 4 additions & 28 deletions tests/test_transforms/test_inference/test_inverse_transform.py
Original file line number Diff line number Diff line change
Expand Up @@ -691,6 +691,8 @@ def test_inverse_transform_train_new_segments(self, transform, dataset_name, exp
(DensityOutliersTransform(in_column="target"), "ts_with_outliers"),
(MedianOutliersTransform(in_column="target"), "ts_with_outliers"),
(PredictionIntervalOutliersTransform(in_column="target", model=ProphetModel), "ts_with_outliers"),
# timestamp
(SpecialDaysTransform(), "regular_ts"),
],
)
def test_inverse_transform_train_new_segments_not_implemented(self, transform, dataset_name, request):
Expand All @@ -700,20 +702,6 @@ def test_inverse_transform_train_new_segments_not_implemented(self, transform, d
ts, transform, train_segments=["segment_1", "segment_2"], expected_changes={}
)

@to_be_fixed(raises=NotImplementedError, match="Per-segment transforms can't work on new segments")
@pytest.mark.parametrize(
"transform, dataset_name",
[
# timestamp
(SpecialDaysTransform(), "regular_ts"),
],
)
def test_inverse_transform_train_new_segments_failed_not_implemented(self, transform, dataset_name, request):
ts = request.getfixturevalue(dataset_name)
self._test_inverse_transform_train_new_segments(
ts, transform, train_segments=["segment_1", "segment_2"], expected_changes={}
)


class TestInverseTransformFutureNewSegments:
"""Test inverse transform on future part of new segments.
Expand Down Expand Up @@ -1027,6 +1015,8 @@ def test_inverse_transform_future_new_segments(self, transform, dataset_name, ex
(DensityOutliersTransform(in_column="target"), "ts_with_outliers"),
(MedianOutliersTransform(in_column="target"), "ts_with_outliers"),
(PredictionIntervalOutliersTransform(in_column="target", model=ProphetModel), "ts_with_outliers"),
# timestamp
(SpecialDaysTransform(), "regular_ts"),
],
)
def test_inverse_transform_future_new_segments_not_implemented(self, transform, dataset_name, request):
Expand All @@ -1036,20 +1026,6 @@ def test_inverse_transform_future_new_segments_not_implemented(self, transform,
ts, transform, train_segments=["segment_1", "segment_2"], expected_changes={}
)

@to_be_fixed(raises=NotImplementedError, match="Per-segment transforms can't work on new segments")
@pytest.mark.parametrize(
"transform, dataset_name",
[
# timestamp
(SpecialDaysTransform(), "regular_ts"),
],
)
def test_inverse_transform_future_new_segments_failed_not_implemented(self, transform, dataset_name, request):
ts = request.getfixturevalue(dataset_name)
self._test_inverse_transform_future_new_segments(
ts, transform, train_segments=["segment_1", "segment_2"], expected_changes={}
)

@to_be_fixed(raises=Exception)
@pytest.mark.parametrize(
"transform, dataset_name, expected_changes",
Expand Down
33 changes: 4 additions & 29 deletions tests/test_transforms/test_inference/test_transform.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,6 @@
from etna.transforms.decomposition import RupturesChangePointsModel
from tests.test_transforms.test_inference.common import find_columns_diff
from tests.utils import select_segments_subset
from tests.utils import to_be_fixed

# TODO: figure out what happened to TrendTransform

Expand Down Expand Up @@ -652,6 +651,8 @@ def test_transform_train_new_segments(self, transform, dataset_name, expected_ch
(DensityOutliersTransform(in_column="target"), "ts_with_outliers"),
(MedianOutliersTransform(in_column="target"), "ts_with_outliers"),
(PredictionIntervalOutliersTransform(in_column="target", model=ProphetModel), "ts_with_outliers"),
# timestamp
(SpecialDaysTransform(), "regular_ts"),
],
)
def test_transform_train_new_segments_not_implemented(self, transform, dataset_name, request):
Expand All @@ -661,20 +662,6 @@ def test_transform_train_new_segments_not_implemented(self, transform, dataset_n
ts, transform, train_segments=["segment_1", "segment_2"], expected_changes={}
)

@to_be_fixed(raises=NotImplementedError, match="Per-segment transforms can't work on new segments")
@pytest.mark.parametrize(
"transform, dataset_name",
[
# timestamp
(SpecialDaysTransform(), "regular_ts"),
],
)
def test_transform_train_new_segments_failed_not_implemented(self, transform, dataset_name, request):
ts = request.getfixturevalue(dataset_name)
self._test_transform_train_new_segments(
ts, transform, train_segments=["segment_1", "segment_2"], expected_changes={}
)


class TestTransformFutureNewSegments:
"""Test transform on future part of new segments.
Expand Down Expand Up @@ -981,6 +968,8 @@ def test_transform_future_new_segments(self, transform, dataset_name, expected_c
(DensityOutliersTransform(in_column="target"), "ts_with_outliers"),
(MedianOutliersTransform(in_column="target"), "ts_with_outliers"),
(PredictionIntervalOutliersTransform(in_column="target", model=ProphetModel), "ts_with_outliers"),
# timestamp
(SpecialDaysTransform(), "regular_ts"),
],
)
def test_transform_future_new_segments_not_implemented(self, transform, dataset_name, request):
Expand All @@ -990,20 +979,6 @@ def test_transform_future_new_segments_not_implemented(self, transform, dataset_
ts, transform, train_segments=["segment_1", "segment_2"], expected_changes={}
)

@to_be_fixed(raises=NotImplementedError, match="Per-segment transforms can't work on new segments")
@pytest.mark.parametrize(
"transform, dataset_name",
[
# timestamp
(SpecialDaysTransform(), "regular_ts"),
],
)
def test_transform_future_new_segments_failed_not_implemented(self, transform, dataset_name, request):
ts = request.getfixturevalue(dataset_name)
self._test_transform_future_new_segments(
ts, transform, train_segments=["segment_1", "segment_2"], expected_changes={}
)


class TestTransformFutureWithTarget:
"""Test transform on future dataset with known target.
Expand Down