From 75b9b0ea6756a619db5bc28d685c5a171c7b5665 Mon Sep 17 00:00:00 2001 From: "Sharlon N. Regales" Date: Thu, 2 May 2024 11:02:33 +0000 Subject: [PATCH] Set new parser standard There has been talks of making this project into a library and letting each forward model creator manage there own forward model. For this we need a prorper and clear standard that each project can inherit and abide by. What better way then starting with the parser standard. New parser standard: - uses three main cammands, run, lint, and schema - retains backwards compatibility with old adhoc version - library is plug-n-play --- pyproject.toml | 3 - src/everest_models/jobs/__init__.py | 2 + .../jobs/fm_well_swapping/__init__.py | 2 +- .../jobs/fm_well_swapping/cli.py | 53 +-- .../{models/config.py => model_config.py} | 305 ++++++++++-------- .../jobs/fm_well_swapping/models/__init__.py | 10 - .../jobs/fm_well_swapping/models/wells.py | 35 -- .../jobs/fm_well_swapping/parser.py | 69 ++-- .../jobs/fm_well_swapping/tasks.py | 100 +++++- src/everest_models/jobs/shared/__init__.py | 3 + src/everest_models/jobs/shared/arguments.py | 62 +--- src/everest_models/jobs/shared/converters.py | 8 + src/everest_models/jobs/shared/io_utils.py | 3 + .../jobs/shared/models/__init__.py | 3 +- .../shared/models/base_config/__init__.py | 6 +- .../jobs/shared/models/base_config/base.py | 79 +++++ .../models/base_config/introspective.py | 53 ++- .../jobs/shared/parsers/__init__.py | 4 + .../jobs/shared/parsers/action.py | 66 ++++ .../jobs/shared/parsers/bootstrap.py | 141 ++++++++ tests/jobs/shared/models/test_base_config.py | 14 +- .../jobs/shared/models/test_introspective.py | 12 +- .../well_swapping/test_well_swapping_cli.py | 45 ++- .../test_well_swapping_models.py | 4 +- .../well_swapping/well_swap_config.yml | 65 ++-- .../well_swapping/well_swap_config.yml | 64 ++-- .../workflows/test_well_swapping_workflow.py | 2 +- 27 files changed, 755 insertions(+), 458 deletions(-) rename src/everest_models/jobs/fm_well_swapping/{models/config.py => model_config.py} (69%) delete mode 100644 src/everest_models/jobs/fm_well_swapping/models/__init__.py delete mode 100644 src/everest_models/jobs/fm_well_swapping/models/wells.py create mode 100644 src/everest_models/jobs/shared/parsers/__init__.py create mode 100644 src/everest_models/jobs/shared/parsers/action.py create mode 100644 src/everest_models/jobs/shared/parsers/bootstrap.py diff --git a/pyproject.toml b/pyproject.toml index 3eaf8ba4..caf45b4b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -84,9 +84,6 @@ fm_well_constraints = "everest_models.jobs.fm_well_constraints.cli:main_entry_po fm_well_filter = "everest_models.jobs.fm_well_filter.cli:main_entry_point" fm_well_trajectory = "everest_models.jobs.fm_well_trajectory.cli:main_entry_point" fm_well_swapping = "everest_models.jobs.fm_well_swapping.cli:main_entry_point" -fm_track_well_switch = "everest_models.jobs.fm_track_well_switch.cli:main_entry_point" -fm_update_dates = "everest_models.jobs.fm_update_dates.cli:main_entry_point" -fm_adjust_well_status = "everest_models.jobs.fm_adjust_well_status.cli:main_entry_point" [tool.setuptools.packages.find] where = ["src"] diff --git a/src/everest_models/jobs/__init__.py b/src/everest_models/jobs/__init__.py index abd44ee6..75b5b18f 100644 --- a/src/everest_models/jobs/__init__.py +++ b/src/everest_models/jobs/__init__.py @@ -13,6 +13,7 @@ fm_strip_dates, fm_well_constraints, fm_well_filter, + fm_well_swapping, fm_well_trajectory, ) @@ -32,4 +33,5 @@ "fm_well_constraints", "fm_well_filter", "fm_well_trajectory", + "fm_well_swapping", ] diff --git a/src/everest_models/jobs/fm_well_swapping/__init__.py b/src/everest_models/jobs/fm_well_swapping/__init__.py index 61376038..26b7651c 100644 --- a/src/everest_models/jobs/fm_well_swapping/__init__.py +++ b/src/everest_models/jobs/fm_well_swapping/__init__.py @@ -1,3 +1,3 @@ -from everest_models.jobs.fm_add_templates.cli import main_entry_point +from everest_models.jobs.fm_well_swapping.cli import main_entry_point __all__ = ["main_entry_point"] diff --git a/src/everest_models/jobs/fm_well_swapping/cli.py b/src/everest_models/jobs/fm_well_swapping/cli.py index ed738814..21ba89c4 100755 --- a/src/everest_models/jobs/fm_well_swapping/cli.py +++ b/src/everest_models/jobs/fm_well_swapping/cli.py @@ -1,73 +1,32 @@ #!/usr/bin/env python import logging -from argparse import Namespace -from typing import Dict, List, NamedTuple, Optional, Sequence, Tuple +from typing import Optional, Sequence -from .models import validate_priorities_and_state_initial_same_wells from .parser import build_argument_parser from .state_processor import StateProcessor from .tasks import ( + clean_parsed_data, determine_index_states, duration_to_dates, inject_well_operations, - sorted_well_priorities, ) logger = logging.getLogger(__name__) -class Data(NamedTuple): - priorities: Tuple[Tuple[str, ...], ...] - initial_states: Dict[str, str] - n_max_wells: Tuple[int, ...] - n_switch_states: Tuple[int, ...] - state_duration: Tuple[int, ...] - errors: List[str] - - -def clean_incoming_data(options: Namespace) -> Data: - errors: List[str] = [] - if not ( - priorities := sorted_well_priorities( - options.priorities or options.config.index_priorities - ) - ): - errors.append("no priorities") - try: - validate_priorities_and_state_initial_same_wells( - set(priorities[0]), set(options.config.state.wells) - ) - except ValueError as e: - errors.append(str(e)) - - if not ( - constraints := options.config.rescale_constraints(options.constraints) - if options.constraints - else options.config.constraints - ): - errors.append("no constraints") - - return Data( - priorities, - options.config.initial_states(priorities[0]), - **constraints, - errors=errors, - ) - - def main_entry_point(args: Optional[Sequence[str]] = None): args_parser = build_argument_parser() options = args_parser.parse_args(args) - data = clean_incoming_data(options) + data = clean_parsed_data(options) if data.errors: args_parser.error("\n".join(data.errors)) - if args and args[0] == "parse": + if data.lint_only: args_parser.exit() inject_well_operations( - options.input.to_dict(), + data.wells.to_dict(), zip( duration_to_dates( data.state_duration, @@ -86,7 +45,7 @@ def main_entry_point(args: Optional[Sequence[str]] = None): ), ), ) - options.input.json_dump(options.output) + data.wells.json_dump(options.output) if __name__ == "__main__": diff --git a/src/everest_models/jobs/fm_well_swapping/models/config.py b/src/everest_models/jobs/fm_well_swapping/model_config.py similarity index 69% rename from src/everest_models/jobs/fm_well_swapping/models/config.py rename to src/everest_models/jobs/fm_well_swapping/model_config.py index 1e0a75e4..eaf7d7d3 100644 --- a/src/everest_models/jobs/fm_well_swapping/models/config.py +++ b/src/everest_models/jobs/fm_well_swapping/model_config.py @@ -1,33 +1,37 @@ from datetime import date from functools import cached_property +from pathlib import Path +from textwrap import dedent from typing import Dict, List, NamedTuple, Optional, Sequence, Set, Tuple from pydantic import ( - AfterValidator, Field, + FilePath, ValidationInfo, field_validator, model_validator, ) from typing_extensions import Annotated, Self -from everest_models.jobs.shared.converters import rescale_value -from everest_models.jobs.shared.models import ModelConfig +from everest_models.jobs.shared import rescale_value +from everest_models.jobs.shared.models import ModelConfig, Wells +from everest_models.jobs.shared.validators import parse_file SINGLE_WORD = r"^[a-zA-Z][_\-a-zA-Z0-9]*$" -class Bound(NamedTuple): - min: float - max: float - - -def valid_bound(bound: Bound) -> Bound: - if bound.min > bound.max: - raise ValueError( - f"[min, max], where min cannot be greater than max: {list(bound)}" - ) - return bound +def file_path_description(argument: str) -> str: + message = f"Everest generated well {argument}" + if argument == "wells": + message = "Everest generated or forward model modified wells json file" + if argument == "output": + message = "where you wish to output the modified wells json file" + return dedent( + f""" + Relative or absolute path to {message}. + NOTE: --{argument.lower()} {argument.upper()} argument overrides this field + """ + ) def validate_priorities_and_state_initial_same_wells( @@ -43,25 +47,63 @@ def validate_priorities_and_state_initial_same_wells( ) +class Priorities(ModelConfig): + file_path: Annotated[ + FilePath, Field(None, description=file_path_description("priorities")) + ] + fallback_values: Annotated[ + Dict[str, List[float]], + Field( + default=None, + description="fallback priorities if priorities file is not given", + ), + ] + + @property + def wells(self) -> Tuple[str, ...]: + return tuple(self.fallback_values) if self.fallback_values else () + + @cached_property + def inverted(self) -> List[Dict[str, float]]: + if not (priorities := self.fallback_values): + return [] + + return [ + {well: priorities[well][index] for well in priorities} + for index in range(len(next(iter(priorities.values())))) + ] + + +class Bound(NamedTuple): + min: float + max: float + + class Scaling(ModelConfig): source: Annotated[ Bound, - AfterValidator(valid_bound), Field(description="[min, max] values for scaling source"), ] target: Annotated[ Bound, - AfterValidator(valid_bound), Field(description="[min, max] values for scaling target"), ] + @field_validator("*", mode="after") + def valid_bound(cls, bound: Bound) -> Bound: + if bound.min > bound.max: + raise ValueError( + f"[min, max], where min cannot be greater than max: {list(bound)}" + ) + return bound + -class Constraint(ModelConfig): +class _Constraint(ModelConfig): fallback_values: Annotated[ List[int], Field( default=None, - description="A list of values to fallback to if constraint json file is not present", + description="A list of values to fallback on if constraint json file is not present", ), ] scaling: Annotated[ @@ -73,6 +115,94 @@ class Constraint(ModelConfig): ] +class Constraints(ModelConfig): + file_path: Annotated[ + FilePath, + Field(None, description=file_path_description("constraints")), + ] + n_max_wells: Annotated[ + _Constraint, + Field(description="Constraint information for maximum number of wells"), + ] + n_switch_states: Annotated[ + _Constraint, + Field(description="Constraint information for number state switches allowed"), + ] + state_duration: Annotated[ + _Constraint, + Field( + description="Constraint information for the time duration of any given state" + ), + ] + + @model_validator(mode="after") + def same_fallback_length(self) -> Self: + if ( + len( + set( + fallbacks := { + field: len(values) + for field in ( + "n_max_wells", + "n_switch_states", + "state_duration", + ) + if (values := getattr(self, field).fallback_values) + }.values() + ) + ) + > 1 + ): + raise ValueError(f"Fallback values are not the same length: {fallbacks}") + return self + + def rescale( + self, constraints: Optional[Dict[str, Sequence[float]]] + ) -> Dict[str, Tuple[int, ...]]: + """ + Rescale the constraints based on the scaling parameters of the fields. + Each constraint is rescaled and rounded to an integer + + Parameters: + - constraints (Dict[str, Sequence[float]]): A dictionary where keys are one of the following n_max_wells and/or n_switch_states and state_duration and values are sequences of float to be rescaled. + + Returns: + - Dict[str, Tuple[int, ...]]: A dictionary where keys are field names and values are tuples of rescaled integer values. + + Example: + constraints = {'field1': [0.5, 0.75, 1.0], 'field2': [10.0, 15.0, 20.0]} + rescaled_constraints = rescale_constraints(constraints) + # Output: {'field1': (1, 2, 3), 'field2': (10, 15, 20)} + """ + + if not constraints: + return { + field: tuple(values) + for field in ("n_max_wells", "n_switch_states", "state_duration") + if (values := getattr(self, field).fallback_values) + } + + def rescale(field: str, values: Sequence[float]) -> Tuple[int, ...]: + scaling = getattr(self, field).scaling + return tuple( + round( + rescale_value( + value, + scaling.source.min, + scaling.source.max, + scaling.target.min, + scaling.target.max, + ) + ) + for value in values + ) + + return { + constraint: rescale(constraint, values) + for constraint, values in constraints.items() + } + + class DircetionalState(ModelConfig): source: Annotated[str, Field(pattern=SINGLE_WORD)] target: Annotated[str, Field(pattern=SINGLE_WORD)] @@ -147,120 +277,41 @@ def initial_states_are_viable( def wells(self) -> Tuple[str, ...]: return tuple(self.initial) if self.initial else () - def validate_initial_wells_alligned(self, priority_wells: Set[str]) -> None: - validate_priorities_and_state_initial_same_wells( - priority_wells, set(self.wells) - ) - class ConfigSchema(ModelConfig): - priorities: Annotated[ - Dict[str, List[float]], - Field( - default=None, - description="fallback priorities if priorities file is not given", - ), - ] - n_max_wells: Annotated[ - Constraint, - Field(description="Constraint information for maximum number of wells"), - ] - n_switch_states: Annotated[ - Constraint, - Field(description="Constraint information for number state switches allowed"), - ] - state_duration: Annotated[ - Constraint, + priorities: Priorities + constraints: Annotated[ + Constraints, Field( - description="Constraint information for the time duration of any given state" + description=dedent( + """ + Make sure the following are present in you Everest configuration file. + + create a generic control where the control variables are: + 'n_max_wells', 'n_switch_states', and 'state_duration' + and the length of all initial_guesses are n+1, + where 'n' is the nth index in the initial_guess array + + controls: + - name: + type: generic_control + variables: + - { name: n_max_wells, initial_guess: [x0, x1, ..., xn] } + - { name: n_switch_states, initial_guess: [y0, y1, ..., yn ] } + - { name: state_duration, initial_guess: [z0, z1, ..., zn] } + """ + ) ), ] start_date: date state: State + output: Annotated[Path, Field(None, description=file_path_description("output"))] + wells: Annotated[Path, Field(None, description=file_path_description("wells"))] - @model_validator(mode="after") - def same_fallback_length(self) -> Self: - if ( - len( - set( - fallbacks := { - field: len(values) - for field in ( - "n_max_wells", - "n_switch_states", - "state_duration", - ) - if (values := getattr(self, field).fallback_values) - }.values() - ) - ) - > 1 - ): - raise ValueError(f"Fallback values are not the same length: {fallbacks}") - return self - - @property - def wells(self) -> Tuple[str, ...]: - if self.priorities: - return tuple(self.priorities) - return self.state.wells - - @cached_property - def constraints(self) -> Dict[str, List[int]]: - return { - field: values - for field in ("n_max_wells", "n_switch_states", "state_duration") - if (values := getattr(self, field).fallback_values) - } - - @cached_property - def index_priorities(self) -> List[Dict[str, float]]: - if not (priorities := self.priorities): - return [] - - return [ - {well: priorities[well][index] for well in priorities} - for index in range(len(next(iter(priorities.values())))) - ] - - def rescale_constraints( - self, constraints: Dict[str, Sequence[float]] - ) -> Dict[str, Tuple[int, ...]]: - """ - Rescale the constraints based on the scaling parameters of the fields. - Each constraint is rescaled and rounded to an integer - - Parameters: - - constraints (Dict[str, Sequence[float]]): A dictionary where keys are one of the following n_max_wells and/or n_switch_states and state_duration and values are sequences of float to be rescaled. - - Returns: - - Dict[str, Tuple[int, ...]]: A dictionary where keys are field names and values are tuples of rescaled integer values. - - Example: - constraints = {'field1': [0.5, 0.75, 1.0], 'field2': [10.0, 15.0, 20.0]} - rescaled_constraints = rescale_constraints(constraints) - # Output: {'field1': (1, 2, 3), 'field2': (10, 15, 20)} - """ - - def rescale(field: str, values: Sequence[float]) -> Tuple[int, ...]: - scaling = getattr(self, field).scaling - return tuple( - round( - rescale_value( - value, - scaling.source.min, - scaling.source.max, - scaling.target.min, - scaling.target.max, - ) - ) - for value in values - ) - - return { - constraint: rescale(constraint, values) - for constraint, values in constraints.items() - } + def wells_instance(self) -> Optional[Wells]: + if not self.wells: + return + return parse_file(str(self.wells), Wells) def initial_states(self, wells: Optional[Sequence[str]]) -> Dict[str, str]: """ @@ -277,13 +328,13 @@ def initial_states(self, wells: Optional[Sequence[str]]) -> Dict[str, str]: ValueError: If no wells are provided and there are no priorities to generate initial states. Example: - >>> initial_states(['A', 'B', 'C']) + > initial_states(['A', 'B', 'C']) {'A': 'initial_state_A', 'B': 'initial_state_B', 'C': 'initial_state_C'} """ initial = self.state.initial - if not (wells := wells or self.wells): + if not (wells := wells or self.priorities.wells or self.state.wells): raise ValueError("No wells to generate initial states") - if initial and wells == tuple(initial): + if initial and tuple(initial) == wells: return initial _initial = {well: self.state.viable[0] for well in wells} return _initial if not initial else {**_initial, **initial} diff --git a/src/everest_models/jobs/fm_well_swapping/models/__init__.py b/src/everest_models/jobs/fm_well_swapping/models/__init__.py deleted file mode 100644 index 0fa81f27..00000000 --- a/src/everest_models/jobs/fm_well_swapping/models/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -from .config import ConfigSchema, validate_priorities_and_state_initial_same_wells -from .wells import Operation, Well, Wells - -__all__ = [ - "Wells", - "Well", - "ConfigSchema", - "Operation", - "validate_priorities_and_state_initial_same_wells", -] diff --git a/src/everest_models/jobs/fm_well_swapping/models/wells.py b/src/everest_models/jobs/fm_well_swapping/models/wells.py deleted file mode 100644 index 94068393..00000000 --- a/src/everest_models/jobs/fm_well_swapping/models/wells.py +++ /dev/null @@ -1,35 +0,0 @@ -from typing import List, Sequence, Tuple - -from pydantic import Field, FilePath, computed_field -from typing_extensions import Annotated - -from everest_models.jobs.shared.models import Operation as _Operation -from everest_models.jobs.shared.models import Well as _Well -from everest_models.jobs.shared.models import Wells as _Wells - - -class Operation(_Operation): - template: Annotated[ - FilePath, - Field(default=None, description="File path to jinja template"), - ] - - -class Well(_Well): - operations: Annotated[ - List[Operation], - Field(default_factory=list, description="Sequence of operations", alias="ops"), - ] - - # TODO: create an pydantic issue - # NOTE: this is a pydantic workaround - # not sure if its a issue or feature - # but model_dump_json with both exclude_unset and by_alias set to true - # will omit operations since its not the alias and the alias is unset - @computed_field - def ops(self) -> Sequence[Operation]: - return self.operations - - -class Wells(_Wells): - root: Tuple[Well, ...] diff --git a/src/everest_models/jobs/fm_well_swapping/parser.py b/src/everest_models/jobs/fm_well_swapping/parser.py index dc62fef1..b42f44ca 100644 --- a/src/everest_models/jobs/fm_well_swapping/parser.py +++ b/src/everest_models/jobs/fm_well_swapping/parser.py @@ -1,19 +1,20 @@ -import argparse +from datetime import date from functools import partial from typing import Dict, Tuple from everest_models.jobs.shared.arguments import ( - SchemaAction, + Parser, add_output_argument, add_wells_input_argument, ) from everest_models.jobs.shared.io_utils import load_json +from everest_models.jobs.shared.parsers import bootstrap_parser from everest_models.jobs.shared.validators import ( parse_file, valid_optimizer, ) -from .models import ConfigSchema, Wells +from .model_config import ConfigSchema _CONFIG_ARGUMENT = "config" _PRIORITIES_ARGUMENT = "-p/--priorities" @@ -25,12 +26,17 @@ def _clean_constraint(value: str) -> Dict[str, Tuple[float, ...]]: return {key: tuple(value.values()) for key, value in load_json(value).items()} -def add_file_arguments( - parser: argparse.ArgumentParser, required: bool = True -) -> argparse._ArgumentGroup: - required_group = parser.add_argument_group("required named arguments") +@bootstrap_parser( + schemas=SCHEMAS, # type: ignore + deprication=date(2024, 5, 1), + prog="Well Swapping", + description="Swap well operation status over multiple time intervals.", +) +def build_argument_parser( + parser: Parser, legacy: bool = False, lint: bool = False +) -> None: parser.add_argument( - _CONFIG_ARGUMENT, + f"{'--' if legacy else ''}{_CONFIG_ARGUMENT}", type=partial(parse_file, schema=ConfigSchema), help="well swapping configuration file", ) @@ -44,45 +50,20 @@ def add_file_arguments( type=valid_optimizer, help="Everest generated optimized priorities", ) - add_wells_input_argument( - required_group if required else parser, - schema=Wells, - required=required, - help="Everest generated wells.json file", - ) - return required_group - - -def build_argument_parser() -> argparse.ArgumentParser: - SchemaAction.register_models(SCHEMAS) - parser = argparse.ArgumentParser(prog="Well Swapping", description="we swap wells") - sub_parser = parser.add_subparsers() - run = sub_parser.add_parser("run", help="run well swapping forward model") - add_output_argument( - add_file_arguments(run), help="Where do you wish to write this run too?" - ) - run.add_argument( + parser.add_argument( "--allow-reopen", action="store_true", help="ignores irreversible states if exist", ) - schema = sub_parser.add_parser("schema", help="input files schematic specification") - schema.add_argument( - "--show", - nargs=0, - action=SchemaAction, - help="write all user defined input file schematics to stdout", - ) - schema.add_argument( - "--init", - nargs=0, - action=SchemaAction, - help="Initialize all needed configuration files", - ) - add_file_arguments( - parser=sub_parser.add_parser( - "parse", help="parse all files that would be used under run" - ), + add_wells_input_argument( + parser, required=False, + arg=("-w", "--wells"), + help="Everest generated wells.json file", ) - return parser + if not lint: + add_output_argument( + parser, + required=False, + help="Where to write wells opertation json file", + ) diff --git a/src/everest_models/jobs/fm_well_swapping/tasks.py b/src/everest_models/jobs/fm_well_swapping/tasks.py index 9245254b..ac25fe36 100644 --- a/src/everest_models/jobs/fm_well_swapping/tasks.py +++ b/src/everest_models/jobs/fm_well_swapping/tasks.py @@ -1,11 +1,97 @@ +from argparse import Namespace +from collections import defaultdict from datetime import date, timedelta from itertools import accumulate -from typing import Dict, Iterable, Iterator, List, Sequence, Tuple - -from .models import Operation, Well +from pathlib import Path +from typing import ( + Any, + DefaultDict, + Dict, + Iterable, + Iterator, + List, + NamedTuple, + Sequence, + Tuple, +) + +from everest_models.jobs.shared.models import Operation, Well, Wells + +from .model_config import validate_priorities_and_state_initial_same_wells from .state_processor import StateProcessor +class Data(NamedTuple): + lint_only: bool + priorities: Tuple[Tuple[str, ...], ...] + initial_states: Dict[str, str] + wells: Wells + output: Path + n_max_wells: Tuple[int, ...] + n_switch_states: Tuple[int, ...] + state_duration: Tuple[int, ...] + errors: List[str] + + +def clean_parsed_data(options: Namespace) -> Data: + """ + Cleans command-line parsed data and returns a Data object. + + if errors are trigger it will be written to Data.errors + + Parameters: + options (Namespace): A command-line parsed options + + Returns: + Data: (cleaned data and/or errors). + - lint_only + - priorities + - initial_states + - wells + - output + - n_max_wells + - n_switch_states + - state_duration + - errors + """ + + errors: List[str] = [] + lint_only = options.command == "lint" + + def validate_exist(value: Any, message: str): + if not (value or lint_only): + errors.append(message) + return value + + priorities = validate_exist( + sorted_well_priorities( + options.priorities or options.config.priorities.inverted + ), + "no priorities", + ) + + try: + validate_priorities_and_state_initial_same_wells( + set(priorities[0]), set(options.config.state.wells) + ) + except ValueError as e: + errors.append(str(e)) + + return Data( + lint_only, + priorities, + options.config.initial_states(priorities[0]), + wells=validate_exist( + options.wells or options.config.wells_instance(), "no wells" + ), + output=validate_exist(options.output or options.config.output, "no output"), + **validate_exist( + options.config.constraints.rescale(options.constraints), "no constraints" + ), + errors=errors, + ) + + def sorted_well_priorities( values: List[Dict[str, float]], ) -> Tuple[Tuple[str, ...], ...]: @@ -32,6 +118,7 @@ def inject_well_operations( This function iterates through the provided params and injects the corresponding operations into the respective wells. Each operation is validated using the Operation model before being added to the well's operations list. """ + wells_: DefaultDict[str, List[Operation]] = defaultdict(list) for _date, states in params: for well, state in states: if well not in wells: @@ -42,7 +129,10 @@ def inject_well_operations( raise ValueError( f"Invalid operation data: {_date}, {state}. {str(e)}" ) from e - wells[well].operations.append(operation) + wells_[well].append(operation) + + for well, operation in wells_.items(): + wells[well].operations = (*wells[well].operations, *operation) # type: ignore def duration_to_dates(durations: Sequence[int], start_date: date) -> Iterator[date]: @@ -67,7 +157,7 @@ def determine_index_states( state_processor: StateProcessor, ) -> Iterator[Iterator[Tuple[str, str]]]: """ - Iterate through state paramenteres (wells, n_max_wells, n_switch_states) and process the well states for each index. + Iterate through state parameters (wells, n_max_wells, n_switch_states) and process the well states for each index. Args: process_params(Iterable[Tuple[Tuple[str, ...], int, int]],): diff --git a/src/everest_models/jobs/shared/__init__.py b/src/everest_models/jobs/shared/__init__.py index e69de29b..1e517ede 100644 --- a/src/everest_models/jobs/shared/__init__.py +++ b/src/everest_models/jobs/shared/__init__.py @@ -0,0 +1,3 @@ +from .converters import is_related, rescale_value + +__all__ = ["rescale_value", "is_related"] diff --git a/src/everest_models/jobs/shared/arguments.py b/src/everest_models/jobs/shared/arguments.py index ca67eec0..3690818f 100644 --- a/src/everest_models/jobs/shared/arguments.py +++ b/src/everest_models/jobs/shared/arguments.py @@ -1,19 +1,13 @@ import argparse import functools from functools import partial -from pathlib import Path -from sys import stdout -from typing import Callable, Dict, Iterator, Optional, Tuple, Type, TypeVar, Union +from typing import Callable, Optional, Tuple, Type, TypeVar, Union from pydantic import BaseModel -from ruamel.yaml.comments import CommentedMap, CommentedSeq from typing_extensions import TypeAlias -from everest_models.jobs.shared.models.base_config.base import ModelConfig -from everest_models.jobs.shared.models.base_config.introspective import is_related - -from .io_utils import dump_yaml from .models import Wells +from .parsers import SchemaAction from .validators import ( is_writable_path, parse_file, @@ -25,56 +19,6 @@ Parser: TypeAlias = Union[argparse.ArgumentParser, argparse._ArgumentGroup] -class SchemaAction(argparse.Action): - _models = {} - - @classmethod - def register_models(cls, models: Dict[str, Type[T]]) -> None: - cls._models.update(models) - - @staticmethod - def _model_specsifactions( - argument: str, model: ModelConfig - ) -> Union[CommentedSeq, CommentedMap]: - data = model.commented_map() - data.yaml_set_start_comment( - f"{argument} specification:\n'...' are REQUIRED fields that needs replacing\n\n" - ) - return data - - def _specification_iterator( - self, - ) -> Iterator[Tuple[str, ModelConfig, Union[CommentedSeq, CommentedMap]]]: - return ( - ( - argument.split("/")[-1].lstrip("-"), - model, - self._model_specsifactions(argument, model), - ) - for argument, model in self._models.items() - ) - - def __call__(self, parser: argparse.ArgumentParser, *_): - for argument, model, data in self._specification_iterator(): - if self.dest in ("show", "schema"): - print("\n\n") - if is_related(model, Wells): - print(f"{argument} is Everest generated wells JSON file") - continue - dump_yaml(data, stdout, explicit=True, default_flow_style=False) - if self.dest == "init": - if is_related(model, Wells): - continue - path = Path( - f"{'_'.join(parser.prog.split()[:-1]).lower()}_{argument}.yml" - ) - with path.open(mode="w") as fd: - dump_yaml(data, fd, default_flow_style=False) - print(f"file `{path.resolve()}` created") - - parser.exit() - - class ArgumentDefaultsHelpFormatter(argparse.ArgumentDefaultsHelpFormatter): def _get_help_string(self, action): return ( @@ -158,6 +102,7 @@ def add_wells_input_argument( *, required: bool = True, schema: Type[T] = Wells, + arg: Tuple[str, str] = ("-i", "--input"), **kwargs, ) -> None: """Add wells argument to parser @@ -170,7 +115,6 @@ def add_wells_input_argument( schema (models.BaseConfig, optional): Parser and validation schema to use. Defaults to models.WellListModel. """ - arg = ["-i", "--input"] parser.add_argument( *arg, type=partial(parse_file, schema=schema), diff --git a/src/everest_models/jobs/shared/converters.py b/src/everest_models/jobs/shared/converters.py index 4b538d99..009d6110 100644 --- a/src/everest_models/jobs/shared/converters.py +++ b/src/everest_models/jobs/shared/converters.py @@ -1,4 +1,12 @@ import pathlib +from inspect import isclass +from typing import Any, Type + + +def is_related(value: Any, typ: Type) -> bool: + if isclass(value): + return issubclass(value, typ) + return isinstance(value, typ) def rescale_value(value, lower, upper, new_lower, new_upper): diff --git a/src/everest_models/jobs/shared/io_utils.py b/src/everest_models/jobs/shared/io_utils.py index b2bdecb8..c281cda6 100644 --- a/src/everest_models/jobs/shared/io_utils.py +++ b/src/everest_models/jobs/shared/io_utils.py @@ -52,5 +52,8 @@ def dump_yaml( _yaml.default_flow_style = default_flow_style _yaml.explicit_start = explicit _yaml.explicit_end = explicit + _yaml.representer.add_representer( + type(None), lambda x, _: x.represent_scalar("tag:yaml.org,2002:null", "null") + ) _yaml.indent(mapping=3, sequence=2, offset=0) _yaml.dump(data, fp) diff --git a/src/everest_models/jobs/shared/models/__init__.py b/src/everest_models/jobs/shared/models/__init__.py index 23c10adc..b9b4cc4a 100644 --- a/src/everest_models/jobs/shared/models/__init__.py +++ b/src/everest_models/jobs/shared/models/__init__.py @@ -1,9 +1,10 @@ -from .base_config import ModelConfig, RootModelConfig +from .base_config import Model, ModelConfig, RootModelConfig from .operation import Operation, Tokens from .phase import PhaseEnum from .wells import OPERATIONS_FIELD_ATTRIBUTE, Well, Wells __all__ = [ + "Model", "ModelConfig", "OPERATIONS_FIELD_ATTRIBUTE", "Wells", diff --git a/src/everest_models/jobs/shared/models/base_config/__init__.py b/src/everest_models/jobs/shared/models/base_config/__init__.py index f3e60321..c2a97696 100644 --- a/src/everest_models/jobs/shared/models/base_config/__init__.py +++ b/src/everest_models/jobs/shared/models/base_config/__init__.py @@ -1,3 +1,7 @@ +from typing import TypeVar + from .base import ModelConfig, RootModelConfig -__all__ = ["ModelConfig", "RootModelConfig"] +Model = TypeVar("Model", bound=ModelConfig) + +__all__ = ["ModelConfig", "RootModelConfig", "Model"] diff --git a/src/everest_models/jobs/shared/models/base_config/base.py b/src/everest_models/jobs/shared/models/base_config/base.py index 0efcdc44..60e72798 100644 --- a/src/everest_models/jobs/shared/models/base_config/base.py +++ b/src/everest_models/jobs/shared/models/base_config/base.py @@ -15,6 +15,37 @@ class ModelConfig(BaseModel): + """ + Introspective pydantic 2 BaseModel. + + Main use is for any model that you wish to expose the model's specification to a user. + + NOTE: If your not planning to access your model fields introspectivally + please stick to pydantic BaseModel. This base model can be expensive + + Attributes: + - model_config: + - str_strip_whitespace = True + - frozen = True + - extra = "forbid" + - ser_json_timedelta = "iso8601" + - regex_engine = "rust-regex" + + Methods: + - introspective_data() -> Dict[str, Any]: Returns introspective data about the model fields. + - commented_map() -> Union[CommentedMap, CommentedSeq]: Returns a commented map or sequence based on the introspective data. + + Raises: + - ValueError: If a field's value is set to '...' and the field is required and has no default. + + Usage: + class MyModel(ModelConfig): + some_field: int + + data = MyModel.introspective_data() + map = MyModel.commented_map() + """ + model_config = ConfigDict( str_strip_whitespace=True, frozen=True, @@ -40,16 +71,64 @@ def any_ellipses(data: Any): @classmethod def introspective_data(cls) -> Dict[str, Any]: + """ + Returns introspective data about the model fields. + + This method returns a dictionary containing information about each field in the model. + The keys of the dictionary are the field names, and the values are dictionaries containing information about each field. + + Returns: + Dict[str, Any]: A dictionary containing introspective data about the model fields. + where Any is a nested CommentedObject + + Raises: + None + + Example: + { + 'field1': CommentedObject( + value=5, + comment='comment containing introspective information on value', + inline_comment=None + ), + 'field2': CommentedObject( + value={ + 'field2_a': CommentedObject( + value='...', + comment='more comment', + inline_comment='replace value' + ), + }, + comment='comment containing introspective information on value', + inline_comment=None + ), + ... + } + """ return { field: parse_field_info(info) for field, info in cls.model_fields.items() } @classmethod def commented_map(cls) -> Union[CommentedMap, CommentedSeq]: + """ + Recursively go through model fields and build an comment injected yaml object. + + where key is the field in the model + value is the default value of the field or `...` + and injected comment is the introspective information on the field and value + + Returns: + Union[CommentedMap, CommentedSeq]: A CommentedMap or CommentedSeq object based on the introspective data. + """ return build_yaml_structure(cls.introspective_data()) class RootModelConfig(ModelConfig, RootModel): + """ + Same as ModelConfig but for RootModel + """ + model_config = ConfigDict(extra=None) @override diff --git a/src/everest_models/jobs/shared/models/base_config/introspective.py b/src/everest_models/jobs/shared/models/base_config/introspective.py index c2b0719b..a0cf13ad 100644 --- a/src/everest_models/jobs/shared/models/base_config/introspective.py +++ b/src/everest_models/jobs/shared/models/base_config/introspective.py @@ -1,17 +1,18 @@ -from collections.abc import Collection, Mapping, Sequence +from collections.abc import Mapping, Sequence from dataclasses import dataclass from datetime import date, datetime from enum import Enum -from inspect import isclass from pathlib import Path -from typing import Any, Optional, Type, get_args, get_origin +from typing import Any, Optional, Union, get_args, get_origin from pydantic import BaseModel from pydantic.fields import FieldInfo from pydantic_core import PydanticUndefined from ruamel.yaml.comments import CommentedMap, CommentedSeq -__all__ = ["builtin_datatypes", "build_yaml_structure", "is_related"] +from everest_models.jobs.shared import is_related + +__all__ = ["builtin_datatypes", "build_yaml_structure"] INLINE_REPLACE = "← REPLACE" @@ -24,11 +25,9 @@ class CommentedObject: inline_comment: Optional[str] = None -def is_related(value: Any, typ: Type) -> bool: - return (isclass(value) and issubclass(value, typ)) or isinstance(value, typ) - - def builtin_datatypes(value: Any) -> str: + if value is None or value is type(None): + return "" if is_related(value, bool): return "boolean" if is_related(value, int): @@ -38,7 +37,7 @@ def builtin_datatypes(value: Any) -> str: if is_related(value, str): return "string" if is_related(value, BaseModel): - return f"{value.__name__} map" + return f"{value.__name__.lstrip('_')} map" if is_related(value, Enum): try: value = value.value @@ -49,24 +48,18 @@ def builtin_datatypes(value: Any) -> str: if is_related(value, Sequence) and hasattr(value, "_field_types"): return str([builtin_datatypes(type_) for type_ in value._field_types.values()]) if origin := get_origin(value): - string = ( - f"({items})" - if "," - in ( - items := ", ".join( - builtin_datatypes(arg) - for arg in get_args(value) - if arg is not Ellipsis - ) - ) - else items + string = ", ".join( + builtin_datatypes(arg) for arg in get_args(value) if arg is not Ellipsis ) + if origin is Union: + return string.rstrip(", ").replace(",", " or") + if is_related(origin, set): + return f"unique values [{string}]" if is_related(origin, Sequence): - return f"a array of {string}" + return f"[{string}]" if is_related(origin, Mapping): - return f"a mapping of {string}" - if is_related(origin, Collection): - return f"a collection of {string}" + return "{" + string.replace(",", ":") + "}" + return string return value.__name__ @@ -77,7 +70,7 @@ def _example_types(value: Any) -> str: if is_related(value, int): return f"{prefix}: 1, 1.34E5, 1.34e5" if is_related(value, float): - return f"{prefix}: .1, 1. 1 1.0, 1.34E-5, 1.34e-5" + return f"{prefix}: .1, 1., 1, 1.0, 1.34E-5, 1.34e-5" if is_related(value, str): return f"{prefix}: a string value" if is_related(value, Path): @@ -112,7 +105,8 @@ def _build_comment(info: FieldInfo) -> str: info.description, f"Datatype: {typ or '_'}", examples, - f"Default: {default}" if default else default, + f"Required: {info.is_required()}", + default if default is None else f"Default: {default}", ), ) ) @@ -134,7 +128,7 @@ def build_yaml_structure(data: Any, level: int = 0): else: result[key] = build_yaml_structure(value) return result - elif isinstance(data, Sequence) and not isinstance(data, str): + if isinstance(data, Sequence) and not isinstance(data, str): result = CommentedSeq() for item in data: if isinstance(item, CommentedObject): @@ -147,11 +141,10 @@ def build_yaml_structure(data: Any, level: int = 0): else: result.append(build_yaml_structure(item, level + 1)) return result - elif isinstance(data, CommentedObject): + if isinstance(data, CommentedObject): # For standalone CommentedObject not in a collection return build_yaml_structure(data.value, level) - else: - return "null" if data is None else data() if callable(data) else data + return data() if callable(data) else data def parse_field_info(info: FieldInfo) -> Any: diff --git a/src/everest_models/jobs/shared/parsers/__init__.py b/src/everest_models/jobs/shared/parsers/__init__.py new file mode 100644 index 00000000..5648ba6e --- /dev/null +++ b/src/everest_models/jobs/shared/parsers/__init__.py @@ -0,0 +1,4 @@ +from .action import SchemaAction +from .bootstrap import bootstrap_parser + +__all__ = ["bootstrap_parser", "SchemaAction"] diff --git a/src/everest_models/jobs/shared/parsers/action.py b/src/everest_models/jobs/shared/parsers/action.py new file mode 100644 index 00000000..29b0db5c --- /dev/null +++ b/src/everest_models/jobs/shared/parsers/action.py @@ -0,0 +1,66 @@ +from argparse import Action, ArgumentParser +from pathlib import Path +from sys import stdout +from typing import ( + Dict, + Iterator, + Tuple, + Type, + Union, +) + +from ruamel.yaml.comments import CommentedMap, CommentedSeq + +from .. import is_related +from ..io_utils import dump_yaml +from ..models import Model, ModelConfig, Wells + + +class SchemaAction(Action): + _models = {} + + @classmethod + def register_models(cls, models: Dict[str, Type[Model]]) -> None: + cls._models.update(models) + + @staticmethod + def _model_specsifactions( + argument: str, model: ModelConfig + ) -> Union[CommentedSeq, CommentedMap]: + data = model.commented_map() + data.yaml_set_start_comment( + f"{argument} specification:\n'...' are REQUIRED fields that needs replacing\n\n" + ) + return data + + def _specification_iterator( + self, + ) -> Iterator[Tuple[str, ModelConfig, Union[CommentedSeq, CommentedMap]]]: + return ( + ( + argument.split("/")[-1].lstrip("-"), + model, + self._model_specsifactions(argument, model), + ) + for argument, model in self._models.items() + ) + + def __call__(self, parser: ArgumentParser, *_): + for argument, model, data in self._specification_iterator(): + if self.dest in ("show", "schema"): + print("\n\n") + if is_related(model, Wells): + print(f"{argument} is Everest generated wells JSON file") + continue + dump_yaml(data, stdout, explicit=True, default_flow_style=False) + if self.dest == "init": + if is_related(model, Wells): + continue + path = Path( + f"{'_'.join(parser.prog.split()[:-1]).lower()}_{argument}.yml" + ) + with path.open(mode="w") as fd: + dump_yaml(data, fd, default_flow_style=False) + print(f"file `{path.resolve()}` created") + + parser.exit() diff --git a/src/everest_models/jobs/shared/parsers/bootstrap.py b/src/everest_models/jobs/shared/parsers/bootstrap.py new file mode 100644 index 00000000..82721f11 --- /dev/null +++ b/src/everest_models/jobs/shared/parsers/bootstrap.py @@ -0,0 +1,141 @@ +import re +from argparse import ( + ArgumentDefaultsHelpFormatter, + ArgumentParser, + _ArgumentGroup, + _SubParsersAction, +) +from datetime import date, timedelta +from functools import wraps +from typing import Callable, Dict, Optional, Protocol, Type, Union + +from typing_extensions import TypeAlias + +from ..models import Model +from .action import SchemaAction + +Parser: TypeAlias = Union[ArgumentParser, _ArgumentGroup] + + +class CustomFormatter(ArgumentDefaultsHelpFormatter): + def _get_help_string(self, action): + return ( + action.help + if action.default is None or isinstance(action.default, bool) + else super()._get_help_string(action) + ) + + def _format_action(self, action): + parts = super(CustomFormatter, self)._format_action(action) + if isinstance(action, _SubParsersAction): + return re.sub(r"^\s+{.*}\n", "", parts) or parts + return parts + + +class ParserBuilder(Protocol): + def __call__( + self, parser: Parser, legacy: bool = False, lint: bool = False + ) -> None: + ... + + +def build_schema_sub_parser(schema: ArgumentParser) -> None: + schema.add_argument( + "--show", + nargs=0, + action=SchemaAction, + help="write all user defined input file schematics to stdout", + ) + schema.add_argument( + "--init", + nargs=0, + action=SchemaAction, + help="Initialize all needed configuration files", + ) + + +def bootstrap_parser( + schemas: Optional[Dict[str, Type[Model]]] = None, + deprication: Optional[date] = None, + **argument_parser_params, +) -> Callable[ + [ParserBuilder], + Callable[[], ArgumentParser], +]: + """ + Decorator function for creating an command line argument parser + + NOTE: By giving a deprication date, you admit that the decorated function + is either a legacy function or a derivitive of one. + deprication date should be the day that the commit would be expected on komodo release + + Args: + schemas (Optional[Dict[str, Type[ModelConfig]]]): input file schematics to be register + for the `schema` sub parser actions + deprication (Optional[date]): Date of feature deprication. + This will put feature removal a year from given date + **argument_parser_params: ArgumentParser keyword arguments. + + Returns: + Callable[[ParserBuilder], Callable[[], ArgumentParser]] + + The decorator expects a callable that sets the arguments for `run` and `lint` sub parser action. + + Example usage: + ```python + @bootstrap_parser( + schemas={"schema": MySchema}, + prog="My Program", + description="does something", + ) + def build_argument_parser( + parser: ArgumentParser, + legacy: bool = False, + lint: bool = False, + )-> None: + # Add positional and optional arguments to the parser + if legacy: + # arguments that differ based on legacy + if not lint: + # Arguments to skip for linting + + parser = build_argument_parser() + ``` + """ + + def decorator(func: ParserBuilder) -> Callable[[], ArgumentParser]: + if schemas: + SchemaAction.register_models(schemas) + + @wraps(func) + def wrapper() -> ArgumentParser: + argument_parser_params.setdefault("formatter_class", CustomFormatter) + main = ArgumentParser(**argument_parser_params) + if deprication: + func( + main.add_argument_group( + "legacy forward model usage", + description=( + f"This flat structure flat is depricated since {deprication}, " + f"and will be removed in the future {deprication + timedelta(weeks=52)} " + "for the command structure" + ), + ), + legacy=True, + ) + sub_parser = main.add_subparsers( + dest="command", + title="Commands", + ) + build_schema_sub_parser( + sub_parser.add_parser( + "schema", help="Schematic description of input data files" + ) + ) + func(sub_parser.add_parser("run", help="Forward model execution")) + func(sub_parser.add_parser("lint", help="Static files analysis"), lint=True) + return main + + return wrapper + + return decorator diff --git a/tests/jobs/shared/models/test_base_config.py b/tests/jobs/shared/models/test_base_config.py index c5a92a8d..993fdc10 100644 --- a/tests/jobs/shared/models/test_base_config.py +++ b/tests/jobs/shared/models/test_base_config.py @@ -48,32 +48,38 @@ class Wrapper(ModelConfig): """ # User description. A relatively simple data. # Datatype: User map + # Required: True user: # The name of the test model # Datatype: string # Examples: a string value + # Required: False # Default: some_name name: some_name # Long live the test model # Datatype: integer # Examples: 5, 1.5e4 + # Required: True age: '...' # ← REPLACE # Sex of the user # Datatype: string # Choices: male, female + # Required: False # Default: male sex: male # Datatype: integer # Examples: 1, 1.34E5, 1.34e5 + # Required: False # Default: 213 user_id: 213 # Datatype: Path # Examples: /path/to/file.ext, /path/to/dirictory/ + # Required: True data: '...' # ← REPLACE """ ), @@ -88,17 +94,20 @@ class Wrapper(ModelConfig): # The name of the test model # Datatype: string # Examples: a string value + # Required: False # Default: some_name name: some_name # Long live the test model # Datatype: integer # Examples: 5, 1.5e4 + # Required: True age: '...' # ← REPLACE # Sex of the user # Datatype: string # Choices: male, female + # Required: False # Default: male sex: male """ @@ -115,17 +124,20 @@ class Wrapper(ModelConfig): # The name of the test model # Datatype: string # Examples: a string value + # Required: False # Default: some_name name: some_name # Long live the test model # Datatype: integer # Examples: 5, 1.5e4 + # Required: True age: '...' # ← REPLACE # Sex of the user # Datatype: string # Choices: male, female + # Required: False # Default: male sex: male """ @@ -178,5 +190,5 @@ def test_base_config_check_for_ellipses() -> None: ), ), ) -def test_base_config_model_config(model: ModelConfig, expected: Dict[str, Any]) -> None: +def test_base_config_model(model: ModelConfig, expected: Dict[str, Any]) -> None: assert model.model_config == expected diff --git a/tests/jobs/shared/models/test_introspective.py b/tests/jobs/shared/models/test_introspective.py index 77e732d6..114f3dc5 100644 --- a/tests/jobs/shared/models/test_introspective.py +++ b/tests/jobs/shared/models/test_introspective.py @@ -1,7 +1,7 @@ from enum import Enum from io import StringIO from textwrap import dedent -from typing import Any, List, Sequence, Set, Tuple, Type +from typing import Any, List, Optional, Sequence, Set, Tuple, Type, Union import pytest from everest_models.jobs.shared.models.base_config.introspective import ( @@ -153,10 +153,12 @@ class MyEnum(Enum): @pytest.mark.parametrize( "typ, expected", ( - pytest.param(Sequence[int], "a array of integer", id="sequence"), - pytest.param(Set[int], "a collection of integer", id="set"), - pytest.param(List[str], "a array of string", id="list"), - pytest.param(Tuple[int, int], "a array of (integer, integer)", id="tuple"), + pytest.param(Sequence[int], "[integer]", id="sequence"), + pytest.param(Union[int, float], "integer or number", id="union"), + pytest.param(Optional[int], "integer", id="optional"), + pytest.param(Set[int], "unique values [integer]", id="set"), + pytest.param(List[str], "[string]", id="list"), + pytest.param(Tuple[int, int], "[integer, integer]", id="tuple"), ), ) def test_builtin_datatypes_with_sequence(typ: Type, expected: str): diff --git a/tests/jobs/well_swapping/test_well_swapping_cli.py b/tests/jobs/well_swapping/test_well_swapping_cli.py index 8fa2b1d0..650e06a9 100644 --- a/tests/jobs/well_swapping/test_well_swapping_cli.py +++ b/tests/jobs/well_swapping/test_well_swapping_cli.py @@ -1,5 +1,6 @@ import json from pathlib import Path +from typing import Tuple import pytest from everest_models.jobs.fm_well_swapping.cli import main_entry_point @@ -7,45 +8,43 @@ from sub_testdata import WELL_SWAPPING as TEST_DATA -def test_well_swapping_main_entrypoint_run(copy_testdata_tmpdir) -> None: +@pytest.mark.parametrize( + "command", + ( + pytest.param(("run", "well_swap_config.yml"), id="command structure"), + pytest.param(("--config", "well_swap_config.yml"), id="legacy structure"), + ), +) +def test_well_swapping_main_entrypoint_run( + copy_testdata_tmpdir, command: Tuple[str] +) -> None: copy_testdata_tmpdir(TEST_DATA) output = "well_swap_output.json" main_entry_point( - [ - "run", + ( + *command, "-p", "priorities.json", "-c", "constraints.json", "-o", output, - "-i", + "-w", "wells.json", - "well_swap_config.yml", - ] + ) ) assert Path("expected_output.json").read_bytes() == Path(output).read_bytes() -def test_well_swapping_main_entrypoint_schema(switch_cwd_tmp_path) -> None: - with pytest.raises(SystemExit, match="0"): - main_entry_point(["schema", "--init"]) - - config = Path("well_swapping_config.yml").read_text() - assert ( - "# config specification:\n# '...' are REQUIRED fields that needs replacing\n" - ) in config - - def test_well_swapping_main_entrypoint_parse(copy_testdata_tmpdir) -> None: copy_testdata_tmpdir(TEST_DATA) files = tuple(Path().glob("*.*")) with pytest.raises(SystemExit, match="0"): main_entry_point( - [ - "parse", + ( + "lint", "well_swap_config.yml", - ] + ) ) assert files == tuple(Path().glob("*.*")) @@ -61,16 +60,16 @@ def test_well_swapping_main_entrypoint_parse_fault( files = tuple(Path().glob("*.*")) with pytest.raises(SystemExit, match="2"): main_entry_point( - [ - "parse", + ( + "lint", "-p", "priorities.json", "well_swap_config.yml", - ] + ) ) assert files == tuple(Path().glob("*.*")) _, err = capsys.readouterr() assert ( - "parse: error: argument -p/--priorities: All entries must contain the same amount of elements/indexes" + "lint: error: argument -p/--priorities: All entries must contain the same amount of elements/indexes" in err ) diff --git a/tests/jobs/well_swapping/test_well_swapping_models.py b/tests/jobs/well_swapping/test_well_swapping_models.py index ecb630db..bad05eaa 100644 --- a/tests/jobs/well_swapping/test_well_swapping_models.py +++ b/tests/jobs/well_swapping/test_well_swapping_models.py @@ -2,7 +2,7 @@ from typing import Any, Dict, Set import pytest -from everest_models.jobs.fm_well_swapping.models.config import ( +from everest_models.jobs.fm_well_swapping.model_config import ( SINGLE_WORD, ConfigSchema, DircetionalState, @@ -85,7 +85,7 @@ def test_well_swapping_config_constraints_bad( well_swap_config_data: Dict[str, Any], ) -> None: config = deepcopy(well_swap_config_data) - config[constraint]["fallback_values"].pop() + config["constraints"][constraint]["fallback_values"].pop() with pytest.raises( ValidationError, match="Fallback values are not the same length" ): diff --git a/tests/testdata/well_swapping/well_swap_config.yml b/tests/testdata/well_swapping/well_swap_config.yml index 9b52b341..0c0015a0 100644 --- a/tests/testdata/well_swapping/well_swap_config.yml +++ b/tests/testdata/well_swapping/well_swap_config.yml @@ -5,48 +5,49 @@ priorities: # fallback priorities if a priorities file is not given # Datatype: a mapping of (string, a array of number) # Default: null - WELL-1: [.5, .5, .5, .5] - WELL-2: [.5, .5, .5, .5] - WELL-3: [.5, .5, .5, .5] - WELL-4: [.5, .5, .5, .5] - WELL-5: [.5, .5, .5, .5] + fallback_values: + WELL-1: [.5, .5, .5, .5] + WELL-2: [.5, .5, .5, .5] + WELL-3: [.5, .5, .5, .5] + WELL-4: [.5, .5, .5, .5] + WELL-5: [.5, .5, .5, .5] # Datatype: Constraint map -n_max_wells: - fallback_values: [2, 2, 2, 2] +constraints: + n_max_wells: + fallback_values: [2, 2, 2, 2] - # Datatype: Scaling map - scaling: - # Datatype: Bound - source: [0.001, 1] + # Datatype: Scaling map + scaling: + # Datatype: Bound + source: [0.001, 1] - # Datatype: Bound - target: [2, 5] + # Datatype: Bound + target: [2, 5] -# Datatype: Constraint map -n_switch_states: - fallback_values: [300, 300, 300, 300] - - # Datatype: Scaling map - scaling: - # Datatype: Bound - source: [0.001, 1] + # Datatype: Constraint map + n_switch_states: + fallback_values: [300, 300, 300, 300] - # Datatype: Bound - target: [2, 5] + # Datatype: Scaling map + scaling: + # Datatype: Bound + source: [0.001, 1] -# Datatype: Constraint map -state_duration: - fallback_values: [300, 300, 300, 300] + # Datatype: Bound + target: [2, 5] - # Datatype: Scaling map - scaling: - # Datatype: Bound - source: [0.001, 1] + # Datatype: Constraint map + state_duration: + fallback_values: [300, 300, 300, 300] - # Datatype: Bound - target: [0, 500] + # Datatype: Scaling map + scaling: + # Datatype: Bound + source: [0.001, 1] + # Datatype: Bound + target: [0, 500] # Datatype: date # Examples: 2024-01-31, 2024-01-31T11:06 start_date: 2022-06-01 diff --git a/tests/testdata/workflows/well_swapping/well_swap_config.yml b/tests/testdata/workflows/well_swapping/well_swap_config.yml index 9b52b341..22f30048 100644 --- a/tests/testdata/workflows/well_swapping/well_swap_config.yml +++ b/tests/testdata/workflows/well_swapping/well_swap_config.yml @@ -5,47 +5,49 @@ priorities: # fallback priorities if a priorities file is not given # Datatype: a mapping of (string, a array of number) # Default: null - WELL-1: [.5, .5, .5, .5] - WELL-2: [.5, .5, .5, .5] - WELL-3: [.5, .5, .5, .5] - WELL-4: [.5, .5, .5, .5] - WELL-5: [.5, .5, .5, .5] + fallback_values: + WELL-1: [.5, .5, .5, .5] + WELL-2: [.5, .5, .5, .5] + WELL-3: [.5, .5, .5, .5] + WELL-4: [.5, .5, .5, .5] + WELL-5: [.5, .5, .5, .5] # Datatype: Constraint map -n_max_wells: - fallback_values: [2, 2, 2, 2] +constraints: + n_max_wells: + fallback_values: [2, 2, 2, 2] - # Datatype: Scaling map - scaling: - # Datatype: Bound - source: [0.001, 1] + # Datatype: Scaling map + scaling: + # Datatype: Bound + source: [0.001, 1] - # Datatype: Bound - target: [2, 5] + # Datatype: Bound + target: [2, 5] -# Datatype: Constraint map -n_switch_states: - fallback_values: [300, 300, 300, 300] + # Datatype: Constraint map + n_switch_states: + fallback_values: [300, 300, 300, 300] - # Datatype: Scaling map - scaling: - # Datatype: Bound - source: [0.001, 1] + # Datatype: Scaling map + scaling: + # Datatype: Bound + source: [0.001, 1] - # Datatype: Bound - target: [2, 5] + # Datatype: Bound + target: [2, 5] -# Datatype: Constraint map -state_duration: - fallback_values: [300, 300, 300, 300] + # Datatype: Constraint map + state_duration: + fallback_values: [300, 300, 300, 300] - # Datatype: Scaling map - scaling: - # Datatype: Bound - source: [0.001, 1] + # Datatype: Scaling map + scaling: + # Datatype: Bound + source: [0.001, 1] - # Datatype: Bound - target: [0, 500] + # Datatype: Bound + target: [0, 500] # Datatype: date # Examples: 2024-01-31, 2024-01-31T11:06 diff --git a/tests/workflows/test_well_swapping_workflow.py b/tests/workflows/test_well_swapping_workflow.py index 7dde3821..150505b3 100644 --- a/tests/workflows/test_well_swapping_workflow.py +++ b/tests/workflows/test_well_swapping_workflow.py @@ -17,7 +17,7 @@ def test_well_swapping_workflow(copy_testdata_tmpdir) -> None: "constraints.json", "-o", "well_swap_output.json", - "-i", + "-w", "wells.json", "well_swap_config.yml", ]