diff --git a/changelog/4280.improvement.md b/changelog/4280.improvement.md new file mode 100644 index 000000000000..542e9250bf69 --- /dev/null +++ b/changelog/4280.improvement.md @@ -0,0 +1 @@ +Improved CLI startup time. diff --git a/changelog/8141.misc.md b/changelog/8141.misc.md new file mode 100644 index 000000000000..c51cbfe1efe5 --- /dev/null +++ b/changelog/8141.misc.md @@ -0,0 +1,3 @@ +The following modules were renamed: +* `rasa.train` -> `rasa.model_training` +* `rasa.test` -> `rasa.model_testing` diff --git a/changelog/8141.removal.md b/changelog/8141.removal.md new file mode 100644 index 000000000000..5c4a2c581134 --- /dev/null +++ b/changelog/8141.removal.md @@ -0,0 +1,6 @@ +The following import abbreviations were removed: +* `rasa.core.train`: Please use `rasa.core.train.train` instead. +* `rasa.core.visualize`: Please use `rasa.core.visualize.visualize` instead. +* `rasa.nlu.train`: Please use `rasa.nlu.train.train` instead. +* `rasa.nlu.test`: Please use `rasa.nlu.test.run_evaluation` instead. +* `rasa.nlu.cross_validate`: Please use `rasa.nlu.test.cross_validate` instead. diff --git a/docs/docs/telemetry/telemetry.mdx b/docs/docs/telemetry/telemetry.mdx index 0b36a52dcb8e..f4d532dabd87 100644 --- a/docs/docs/telemetry/telemetry.mdx +++ b/docs/docs/telemetry/telemetry.mdx @@ -113,7 +113,6 @@ Here is an example report that shows the data reported to Rasa after running "project": "a0a7178e6e5f9e6484c5cfa3ea4497ffc0c96d0ad3f3ad8e9399a1edd88e3cf4", "python": "3.7.5", "rasa_open_source": "2.0.0", - "gpu": 0, "cpu": 16 } } diff --git a/rasa/__init__.py b/rasa/__init__.py index db331550b2c1..e10d5781c15d 100644 --- a/rasa/__init__.py +++ b/rasa/__init__.py @@ -1,12 +1,10 @@ import logging from rasa import version +from rasa.api import run, train, test # define the version before the other imports since these need it __version__ = version.__version__ -from rasa.run import run -from rasa.train import train -from rasa.test import test logging.getLogger(__name__).addHandler(logging.NullHandler()) diff --git a/rasa/api.py b/rasa/api.py new file mode 100644 index 000000000000..a9d9ae10e49b --- /dev/null +++ b/rasa/api.py @@ -0,0 +1,155 @@ +import rasa.shared.constants +import typing + +# WARNING: Be careful about adding any top level imports at this place! +# These functions are imported in `rasa.__init__` and any top level import +# added here will get executed as soon as someone runs `import rasa`. +# Some imports are very slow (e.g. `tensorflow`) and we want them to get +# imported when running `import rasa`. If you add more imports here, +# please check that in the chain you are importing, no slow packages +# are getting imported. + +if typing.TYPE_CHECKING: + from typing import Any, Text, Dict, Union, List, Optional, NoReturn + from rasa.model_training import TrainingResult + import asyncio + + +def run( + model: "Text", + endpoints: "Text", + connector: "Text" = None, + credentials: "Text" = None, + **kwargs: "Dict[Text, Any]", +) -> "NoReturn": + """Runs a Rasa model. + + Args: + model: Path to model archive. + endpoints: Path to endpoints file. + connector: Connector which should be use (overwrites `credentials` + field). + credentials: Path to channel credentials file. + **kwargs: Additional arguments which are passed to + `rasa.core.run.serve_application`. + + """ + import rasa.core.run + from rasa.core.utils import AvailableEndpoints + from rasa.shared.utils.cli import print_warning + import rasa.shared.utils.common + from rasa.shared.constants import DOCS_BASE_URL + + _endpoints = AvailableEndpoints.read_endpoints(endpoints) + + if not connector and not credentials: + connector = "rest" + + print_warning( + f"No chat connector configured, falling back to the " + f"REST input channel. To connect your bot to another channel, " + f"read the docs here: {DOCS_BASE_URL}/messaging-and-voice-channels" + ) + + kwargs = rasa.shared.utils.common.minimal_kwargs( + kwargs, rasa.core.run.serve_application + ) + rasa.core.run.serve_application( + model, + channel=connector, + credentials=credentials, + endpoints=_endpoints, + **kwargs, + ) + + +def train( + domain: "Text", + config: "Text", + training_files: "Union[Text, List[Text]]", + output: "Text" = rasa.shared.constants.DEFAULT_MODELS_PATH, + dry_run: bool = False, + force_training: bool = False, + fixed_model_name: "Optional[Text]" = None, + persist_nlu_training_data: bool = False, + core_additional_arguments: "Optional[Dict]" = None, + nlu_additional_arguments: "Optional[Dict]" = None, + loop: "Optional[asyncio.AbstractEventLoop]" = None, + model_to_finetune: "Optional[Text]" = None, + finetuning_epoch_fraction: float = 1.0, +) -> "TrainingResult": + """Runs Rasa Core and NLU training in `async` loop. + + Args: + domain: Path to the domain file. + config: Path to the config for Core and NLU. + training_files: Paths to the training data for Core and NLU. + output: Output path. + dry_run: If `True` then no training will be done, and the information about + whether the training needs to be done will be printed. + force_training: If `True` retrain model even if data has not changed. + fixed_model_name: Name of model to be stored. + persist_nlu_training_data: `True` if the NLU training data should be persisted + with the model. + core_additional_arguments: Additional training parameters for core training. + nlu_additional_arguments: Additional training parameters forwarded to training + method of each NLU component. + loop: Optional EventLoop for running coroutines. + model_to_finetune: Optional path to a model which should be finetuned or + a directory in case the latest trained model should be used. + finetuning_epoch_fraction: The fraction currently specified training epochs + in the model configuration which should be used for finetuning. + + Returns: + An instance of `TrainingResult`. + """ + from rasa.model_training import train_async + import rasa.utils.common + + return rasa.utils.common.run_in_loop( + train_async( + domain=domain, + config=config, + training_files=training_files, + output=output, + dry_run=dry_run, + force_training=force_training, + fixed_model_name=fixed_model_name, + persist_nlu_training_data=persist_nlu_training_data, + core_additional_arguments=core_additional_arguments, + nlu_additional_arguments=nlu_additional_arguments, + model_to_finetune=model_to_finetune, + finetuning_epoch_fraction=finetuning_epoch_fraction, + ), + loop, + ) + + +def test( + model: "Text", + stories: "Text", + nlu_data: "Text", + output: "Text" = rasa.shared.constants.DEFAULT_RESULTS_PATH, + additional_arguments: "Optional[Dict]" = None, +) -> None: + """Test a Rasa model against a set of test data. + + Args: + model: model to test + stories: path to the dialogue test data + nlu_data: path to the NLU test data + output: path to folder where all output will be stored + additional_arguments: additional arguments for the test call + """ + from rasa.model_testing import test_core + import rasa.utils.common + from rasa.model_testing import test_nlu + + if additional_arguments is None: + additional_arguments = {} + + test_core(model, stories, output, additional_arguments) + + rasa.utils.common.run_in_loop( + test_nlu(model, nlu_data, output, additional_arguments) + ) diff --git a/rasa/cli/data.py b/rasa/cli/data.py index 00f7752e2050..0fd3abb8f6c3 100644 --- a/rasa/cli/data.py +++ b/rasa/cli/data.py @@ -11,11 +11,6 @@ from rasa.cli.arguments import data as arguments from rasa.cli.arguments import default_arguments import rasa.cli.utils -from rasa.core.training.converters.responses_prefix_converter import ( - DomainResponsePrefixConverter, - StoryResponsePrefixConverter, -) -import rasa.nlu.convert from rasa.shared.constants import ( DEFAULT_DATA_PATH, DEFAULT_CONFIG_PATH, @@ -24,6 +19,10 @@ ) import rasa.shared.data from rasa.shared.core.constants import ( + POLICY_NAME_FALLBACK, + POLICY_NAME_FORM, + POLICY_NAME_MAPPING, + POLICY_NAME_TWO_STAGE_FALLBACK, USER_INTENT_OUT_OF_SCOPE, ACTION_DEFAULT_FALLBACK_NAME, ) @@ -38,18 +37,13 @@ import rasa.shared.nlu.training_data.util import rasa.shared.utils.cli import rasa.utils.common -from rasa.utils.converter import TrainingDataConverter -from rasa.validator import Validator from rasa.shared.core.domain import Domain, InvalidDomain import rasa.shared.utils.io -import rasa.core.config -from rasa.core.policies.form_policy import FormPolicy -from rasa.core.policies.fallback import FallbackPolicy -from rasa.core.policies.two_stage_fallback import TwoStageFallbackPolicy -from rasa.core.policies.mapping_policy import MappingPolicy if TYPE_CHECKING: from rasa.shared.core.training_data.structures import StoryStep + from rasa.validator import Validator + from rasa.utils.converter import TrainingDataConverter logger = logging.getLogger(__name__) @@ -242,6 +236,7 @@ def validate_files(args: argparse.Namespace, stories_only: bool = False) -> None args: Commandline arguments stories_only: If `True`, only the story structure is validated. """ + from rasa.validator import Validator config = rasa.cli.utils.get_validated_path( args.config, "config", DEFAULT_CONFIG_PATH, none_is_valid=True @@ -278,15 +273,15 @@ def validate_stories(args: argparse.Namespace) -> None: validate_files(args, stories_only=True) -def _validate_domain(validator: Validator) -> bool: +def _validate_domain(validator: "Validator") -> bool: return validator.verify_domain_validity() -def _validate_nlu(validator: Validator, args: argparse.Namespace) -> bool: +def _validate_nlu(validator: "Validator", args: argparse.Namespace) -> bool: return validator.verify_nlu(not args.fail_on_warnings) -def _validate_story_structure(validator: Validator, args: argparse.Namespace) -> bool: +def _validate_story_structure(validator: "Validator", args: argparse.Namespace) -> bool: # Check if a valid setting for `max_history` was given if isinstance(args.max_history, int) and args.max_history < 1: raise argparse.ArgumentTypeError( @@ -299,6 +294,8 @@ def _validate_story_structure(validator: Validator, args: argparse.Namespace) -> def _convert_nlu_data(args: argparse.Namespace) -> None: + import rasa.nlu.convert + from rasa.nlu.training_data.converters.nlu_markdown_to_yaml_converter import ( NLUMarkdownToYamlConverter, ) @@ -356,8 +353,14 @@ def _convert_nlg_data(args: argparse.Namespace) -> None: def _migrate_responses(args: argparse.Namespace) -> None: """Migrate retrieval intent responses to the new 2.0 format. + It does so modifying the stories and domain files. """ + from rasa.core.training.converters.responses_prefix_converter import ( + DomainResponsePrefixConverter, + StoryResponsePrefixConverter, + ) + if args.format == "yaml": rasa.utils.common.run_in_loop( _convert_to_yaml(args.out, args.domain, DomainResponsePrefixConverter()) @@ -374,7 +377,7 @@ def _migrate_responses(args: argparse.Namespace) -> None: async def _convert_to_yaml( - out_path: Text, data_path: Text, converter: TrainingDataConverter + out_path: Text, data_path: Text, converter: "TrainingDataConverter" ) -> None: output = Path(out_path) @@ -415,7 +418,7 @@ async def _convert_to_yaml( async def _convert_file_to_yaml( - source_file: Path, target_dir: Path, converter: TrainingDataConverter + source_file: Path, target_dir: Path, converter: "TrainingDataConverter" ) -> bool: """Converts a single training data file to `YAML` format. @@ -447,6 +450,8 @@ def _migrate_model_config(args: argparse.Namespace) -> None: Args: args: The commandline args with the required paths. """ + import rasa.core.config + configuration_file = Path(args.config) model_configuration = _get_configuration(configuration_file) @@ -499,7 +504,7 @@ def _get_configuration(path: Path) -> Dict: _assert_two_stage_fallback_policy_is_migratable(config) _assert_only_one_fallback_policy_present(policy_names) - if FormPolicy.__name__ in policy_names: + if POLICY_NAME_FORM in policy_names: _warn_about_manual_forms_migration() return config @@ -507,24 +512,24 @@ def _get_configuration(path: Path) -> Dict: def _assert_config_needs_migration(policies: List[Text]) -> None: migratable_policies = { - MappingPolicy.__name__, - FallbackPolicy.__name__, - TwoStageFallbackPolicy.__name__, + POLICY_NAME_MAPPING, + POLICY_NAME_FALLBACK, + POLICY_NAME_TWO_STAGE_FALLBACK, } if not migratable_policies.intersection((set(policies))): rasa.shared.utils.cli.print_error_and_exit( f"No policies were found which need migration. This command can migrate " - f"'{MappingPolicy.__name__}', '{FallbackPolicy.__name__}' and " - f"'{TwoStageFallbackPolicy.__name__}'." + f"'{POLICY_NAME_MAPPING}', '{POLICY_NAME_FALLBACK}' and " + f"'{POLICY_NAME_TWO_STAGE_FALLBACK}'." ) def _warn_about_manual_forms_migration() -> None: rasa.shared.utils.cli.print_warning( - f"Your model configuration contains the '{FormPolicy.__name__}'. " - f"Note that this command does not migrate the '{FormPolicy.__name__}' and " - f"you have to migrate the '{FormPolicy.__name__}' manually. " + f"Your model configuration contains the '{POLICY_NAME_FORM}'. " + f"Note that this command does not migrate the '{POLICY_NAME_FORM}' and " + f"you have to migrate the '{POLICY_NAME_FORM}' manually. " f"Please see the migration guide for further details: " f"{DOCS_URL_MIGRATION_GUIDE}" ) @@ -533,7 +538,7 @@ def _warn_about_manual_forms_migration() -> None: def _assert_nlu_pipeline_given(config: Dict, policy_names: List[Text]) -> None: if not config.get("pipeline") and any( policy in policy_names - for policy in [FallbackPolicy.__name__, TwoStageFallbackPolicy.__name__] + for policy in [POLICY_NAME_FALLBACK, POLICY_NAME_TWO_STAGE_FALLBACK] ): rasa.shared.utils.cli.print_error_and_exit( "The model configuration has to include an NLU pipeline. This is required " @@ -546,7 +551,7 @@ def _assert_two_stage_fallback_policy_is_migratable(config: Dict) -> None: ( policy_config for policy_config in config.get("policies", []) - if policy_config.get("name") == TwoStageFallbackPolicy.__name__ + if policy_config.get("name") == POLICY_NAME_TWO_STAGE_FALLBACK ), None, ) @@ -583,10 +588,7 @@ def _assert_two_stage_fallback_policy_is_migratable(config: Dict) -> None: def _assert_only_one_fallback_policy_present(policies: List[Text]) -> None: - if ( - FallbackPolicy.__name__ in policies - and TwoStageFallbackPolicy.__name__ in policies - ): + if POLICY_NAME_FALLBACK in policies and POLICY_NAME_TWO_STAGE_FALLBACK in policies: rasa.shared.utils.cli.print_error_and_exit( "Your policy configuration contains two configured policies for handling " "fallbacks. Please decide on one." diff --git a/rasa/cli/run.py b/rasa/cli/run.py index 99c785b1d607..aa964da88b0c 100644 --- a/rasa/cli/run.py +++ b/rasa/cli/run.py @@ -1,7 +1,7 @@ import argparse import logging import os -from typing import List, Text +from typing import List, Text, NoReturn from rasa.cli import SubParsersAction from rasa.cli.arguments import run as arguments @@ -74,8 +74,13 @@ def _validate_model_path(model_path: Text, parameter: Text, default: Text): return model_path -def run(args: argparse.Namespace): - import rasa.run +def run(args: argparse.Namespace) -> NoReturn: + """Entrypoint for `rasa run`. + + Args: + args: The CLI arguments. + """ + import rasa args.endpoints = rasa.cli.utils.get_validated_path( args.endpoints, "endpoints", DEFAULT_ENDPOINTS_PATH, True diff --git a/rasa/cli/scaffold.py b/rasa/cli/scaffold.py index 5357b8702463..fd5f30e9f673 100644 --- a/rasa/cli/scaffold.py +++ b/rasa/cli/scaffold.py @@ -5,7 +5,6 @@ from rasa import telemetry from rasa.cli import SubParsersAction -import rasa.train from rasa.cli.shell import shell from rasa.cli.utils import create_output_path from rasa.shared.utils.cli import print_success, print_error_and_exit @@ -48,7 +47,9 @@ def add_subparser( def print_train_or_instructions(args: argparse.Namespace, path: Text) -> None: + """Train a model if the user wants to.""" import questionary + import rasa print_success("Finished creating project structure.") diff --git a/rasa/cli/test.py b/rasa/cli/test.py index 4218ff3f81fd..a98847d9cfc1 100644 --- a/rasa/cli/test.py +++ b/rasa/cli/test.py @@ -17,7 +17,6 @@ DEFAULT_DATA_PATH, DEFAULT_RESULTS_PATH, ) -from rasa.core.test import FAILED_STORIES_FILE import rasa.shared.utils.validation as validation_utils import rasa.cli.utils import rasa.utils.common @@ -69,7 +68,12 @@ def add_subparser( def run_core_test(args: argparse.Namespace) -> None: """Run core tests.""" - from rasa.test import test_core_models_in_directory, test_core, test_core_models + from rasa.model_testing import ( + test_core_models_in_directory, + test_core, + test_core_models, + ) + from rasa.core.test import FAILED_STORIES_FILE stories = rasa.cli.utils.get_validated_path( args.stories, "stories", DEFAULT_DATA_PATH @@ -144,7 +148,11 @@ async def run_nlu_test_async( no_errors: indicates if incorrect predictions should be written to a file or not. """ - from rasa.test import compare_nlu_models, perform_nlu_cross_validation, test_nlu + from rasa.model_testing import ( + compare_nlu_models, + perform_nlu_cross_validation, + test_nlu, + ) nlu_data = rasa.cli.utils.get_validated_path(data_path, "nlu", DEFAULT_DATA_PATH) nlu_data = rasa.shared.data.get_nlu_directory(nlu_data) diff --git a/rasa/cli/train.py b/rasa/cli/train.py index 7f8cb4f8f2b8..c0c14d989810 100644 --- a/rasa/cli/train.py +++ b/rasa/cli/train.py @@ -130,7 +130,7 @@ def run_core_training( Returns: Path to a trained model or `None` if training was not successful. """ - from rasa.train import train_core + from rasa.model_training import train_core output = train_path or args.out @@ -179,7 +179,7 @@ def run_nlu_training( Returns: Path to a trained model or `None` if training was not successful. """ - from rasa.train import train_nlu + from rasa.model_training import train_nlu output = train_path or args.out diff --git a/rasa/cli/visualize.py b/rasa/cli/visualize.py index 17b2ebff7770..0a69fc8929eb 100644 --- a/rasa/cli/visualize.py +++ b/rasa/cli/visualize.py @@ -38,7 +38,7 @@ def visualize_stories(args: argparse.Namespace): args.nlu = rasa.shared.data.get_nlu_directory(DEFAULT_DATA_PATH) rasa.utils.common.run_in_loop( - rasa.core.visualize( + rasa.core.visualize.visualize( args.config, args.domain, args.stories, args.nlu, args.out, args.max_history ) ) diff --git a/rasa/core/__init__.py b/rasa/core/__init__.py index 0f76ff881aef..4defa9baa4c2 100644 --- a/rasa/core/__init__.py +++ b/rasa/core/__init__.py @@ -2,9 +2,6 @@ import rasa -from rasa.core.train import train -from rasa.core.visualize import visualize - logging.getLogger(__name__).addHandler(logging.NullHandler()) __version__ = rasa.__version__ diff --git a/rasa/core/agent.py b/rasa/core/agent.py index fa3fe0d17e00..ae3784a4101a 100644 --- a/rasa/core/agent.py +++ b/rasa/core/agent.py @@ -11,6 +11,7 @@ from aiohttp import ClientError import rasa +import rasa.utils from rasa.core import jobs, training from rasa.core.channels.channel import OutputChannel, UserMessage from rasa.core.constants import DEFAULT_REQUEST_TIMEOUT @@ -269,6 +270,33 @@ async def schedule_model_pulling( ) +def create_agent(model: Text, endpoints: Text = None) -> "Agent": + """Create an agent instance based on a stored model. + + Args: + model: file path to the stored model + endpoints: file path to the used endpoint configuration + """ + from rasa.core.tracker_store import TrackerStore + from rasa.core.utils import AvailableEndpoints + from rasa.core.brokers.broker import EventBroker + import rasa.utils.common + + _endpoints = AvailableEndpoints.read_endpoints(endpoints) + + _broker = rasa.utils.common.run_in_loop(EventBroker.create(_endpoints.event_broker)) + _tracker_store = TrackerStore.create(_endpoints.tracker_store, event_broker=_broker) + _lock_store = LockStore.create(_endpoints.lock_store) + + return Agent.load( + model, + generator=_endpoints.nlg, + tracker_store=_tracker_store, + lock_store=_lock_store, + action_endpoint=_endpoints.action, + ) + + async def load_agent( model_path: Optional[Text] = None, model_server: Optional[EndpointConfig] = None, @@ -278,7 +306,23 @@ async def load_agent( tracker_store: Optional[TrackerStore] = None, lock_store: Optional[LockStore] = None, action_endpoint: Optional[EndpointConfig] = None, -): +) -> Optional["Agent"]: + """Loads agent from server, remote storage or disk. + + Args: + model_path: Path to the model if it's on disk. + model_server: Configuration for a potential server which serves the model. + remote_storage: URL of remote storage for model. + interpreter: NLU interpreter to parse incoming messages. + generator: Optional response generator. + tracker_store: TrackerStore for persisting the conversation history. + lock_store: LockStore to avoid that a conversation is modified by concurrent + actors. + action_endpoint: Action server configuration for executing custom actions. + + Returns: + The instantiated `Agent` or `None`. + """ try: if model_server is not None: return await load_from_server( diff --git a/rasa/core/config.py b/rasa/core/config.py index 2080a5dde667..a0936541a1c7 100644 --- a/rasa/core/config.py +++ b/rasa/core/config.py @@ -13,6 +13,11 @@ from rasa.shared.core.constants import ( ACTION_DEFAULT_FALLBACK_NAME, ACTION_TWO_STAGE_FALLBACK_NAME, + POLICY_NAME_RULE, + POLICY_NAME_FALLBACK, + POLICY_NAME_MAPPING, + POLICY_NAME_TWO_STAGE_FALLBACK, + CLASSIFIER_NAME_FALLBACK, ) import rasa.utils.io from rasa.shared.constants import ( @@ -25,11 +30,6 @@ import rasa.shared.utils.io import rasa.utils.io -from rasa.core.policies.mapping_policy import MappingPolicy -from rasa.core.policies.rule_policy import RulePolicy -from rasa.core.policies.fallback import FallbackPolicy -from rasa.core.policies.two_stage_fallback import TwoStageFallbackPolicy -from rasa.nlu.classifiers.fallback_classifier import FallbackClassifier if TYPE_CHECKING: from rasa.core.policies.policy import Policy @@ -69,8 +69,8 @@ def migrate_fallback_policies(config: Dict) -> Tuple[Dict, Optional["StoryStep"] policies = new_config.get("policies", []) fallback_config = _get_config_for_name( - FallbackPolicy.__name__, policies - ) or _get_config_for_name(TwoStageFallbackPolicy.__name__, policies) + POLICY_NAME_FALLBACK, policies + ) or _get_config_for_name(POLICY_NAME_TWO_STAGE_FALLBACK, policies) if not fallback_config: return config, None @@ -83,7 +83,7 @@ def migrate_fallback_policies(config: Dict) -> Tuple[Dict, Optional["StoryStep"] # The triggered action is hardcoded for the Two-Stage Fallback` fallback_action_name = ACTION_TWO_STAGE_FALLBACK_NAME - if fallback_config.get("name") == FallbackPolicy.__name__: + if fallback_config.get("name") == POLICY_NAME_FALLBACK: fallback_action_name = fallback_config.get( "fallback_action_name", ACTION_DEFAULT_FALLBACK_NAME ) @@ -113,10 +113,10 @@ def _update_rule_policy_config_for_fallback( policies: The current list of configured policies. fallback_config: The configuration of the deprecated fallback configuration. """ - rule_policy_config = _get_config_for_name(RulePolicy.__name__, policies) + rule_policy_config = _get_config_for_name(POLICY_NAME_RULE, policies) if not rule_policy_config: - rule_policy_config = {"name": RulePolicy.__name__} + rule_policy_config = {"name": POLICY_NAME_RULE} policies.append(rule_policy_config) core_threshold = fallback_config.get( @@ -132,11 +132,11 @@ def _update_rule_policy_config_for_fallback( def _update_fallback_config(config: Dict, fallback_config: Dict) -> None: fallback_classifier_config = _get_config_for_name( - FallbackClassifier.__name__, config.get("pipeline", []) + CLASSIFIER_NAME_FALLBACK, config.get("pipeline", []) ) if not fallback_classifier_config: - fallback_classifier_config = {"name": FallbackClassifier.__name__} + fallback_classifier_config = {"name": CLASSIFIER_NAME_FALLBACK} config["pipeline"].append(fallback_classifier_config) nlu_threshold = fallback_config.get("nlu_threshold", DEFAULT_NLU_FALLBACK_THRESHOLD) @@ -188,15 +188,15 @@ def migrate_mapping_policy_to_rules( has_rule_policy = False for policy in policies: - if policy.get("name") == MappingPolicy.__name__: + if policy.get("name") == POLICY_NAME_MAPPING: has_mapping_policy = True - if policy.get("name") == RulePolicy.__name__: + if policy.get("name") == POLICY_NAME_RULE: has_rule_policy = True if not has_mapping_policy: return config, domain, [] - rasa.shared.utils.cli.print_info(f"Migrating the '{MappingPolicy.__name__}'.") + rasa.shared.utils.cli.print_info(f"Migrating the '{POLICY_NAME_MAPPING}'.") new_config = copy.deepcopy(config) new_domain = copy.deepcopy(domain) @@ -214,10 +214,10 @@ def migrate_mapping_policy_to_rules( new_rules.append(trigger_rule) # finally update the policies - policies = _drop_policy(MappingPolicy.__name__, policies) + policies = _drop_policy(POLICY_NAME_MAPPING, policies) if not has_rule_policy: - policies.append({"name": RulePolicy.__name__}) + policies.append({"name": POLICY_NAME_RULE}) new_config["policies"] = policies return new_config, new_domain, new_rules diff --git a/rasa/core/test.py b/rasa/core/test.py index efc60300fd50..adaa58160585 100644 --- a/rasa/core/test.py +++ b/rasa/core/test.py @@ -832,7 +832,7 @@ async def test( Returns: Evaluation summary. """ - from rasa.test import get_evaluation_metrics + from rasa.model_testing import get_evaluation_metrics generator = await _create_data_generator(stories, agent, max_stories, e2e) completed_trackers = generator.generate_story_trackers() diff --git a/rasa/core/training/interactive.py b/rasa/core/training/interactive.py index 2637fdaa17e2..e399cab3ba50 100644 --- a/rasa/core/training/interactive.py +++ b/rasa/core/training/interactive.py @@ -34,7 +34,8 @@ REQUESTED_SLOT, LOOP_INTERRUPTED, ) -from rasa.core import run, train, utils +from rasa.core import run, utils +import rasa.core.train from rasa.core.constants import DEFAULT_SERVER_FORMAT, DEFAULT_SERVER_PORT from rasa.shared.core.domain import Domain import rasa.shared.core.events @@ -1628,7 +1629,7 @@ async def train_agent_on_start( model_directory = args.get("out", tempfile.mkdtemp(suffix="_core_model")) - _agent = await train( + _agent = await rasa.core.train.train( args.get("domain"), args.get("stories"), model_directory, diff --git a/rasa/jupyter.py b/rasa/jupyter.py index e57bda6d0cee..a0d516bc4640 100644 --- a/rasa/jupyter.py +++ b/rasa/jupyter.py @@ -8,10 +8,11 @@ import rasa.utils.common if typing.TYPE_CHECKING: - from rasa.core.agent import Agent + from rasa.core.agent import Agent, create_agent -def pprint(obj: Any): +def pprint(obj: Any) -> None: + """Prints JSONs with indent.""" pretty_print.pprint(obj, indent=2) @@ -32,7 +33,6 @@ def chat( """ if model_path: - from rasa.run import create_agent agent = create_agent(model_path, endpoints) diff --git a/rasa/test.py b/rasa/model_testing.py similarity index 96% rename from rasa/test.py rename to rasa/model_testing.py index db3a8524ee10..ad954ed951a9 100644 --- a/rasa/test.py +++ b/rasa/model_testing.py @@ -90,20 +90,8 @@ def test_core_models(models: List[Text], stories: Text, output: Text): rasa.utils.common.run_in_loop(compare_models(models, stories, output)) -def test( - model: Text, - stories: Text, - nlu_data: Text, - output: Text = DEFAULT_RESULTS_PATH, - additional_arguments: Optional[Dict] = None, -): - if additional_arguments is None: - additional_arguments = {} - - test_core(model, stories, output, additional_arguments) - rasa.utils.common.run_in_loop( - test_nlu(model, nlu_data, output, additional_arguments) - ) +# backwards compatibility +test = rasa.test def test_core( diff --git a/rasa/train.py b/rasa/model_training.py similarity index 92% rename from rasa/train.py rename to rasa/model_training.py index e87136702c1c..b8106fe365df 100644 --- a/rasa/train.py +++ b/rasa/model_training.py @@ -51,63 +51,8 @@ class TrainingResult(NamedTuple): code: int = 0 -def train( - domain: Text, - config: Text, - training_files: Union[Text, List[Text]], - output: Text = DEFAULT_MODELS_PATH, - dry_run: bool = False, - force_training: bool = False, - fixed_model_name: Optional[Text] = None, - persist_nlu_training_data: bool = False, - core_additional_arguments: Optional[Dict] = None, - nlu_additional_arguments: Optional[Dict] = None, - loop: Optional[asyncio.AbstractEventLoop] = None, - model_to_finetune: Optional[Text] = None, - finetuning_epoch_fraction: float = 1.0, -) -> TrainingResult: - """Runs Rasa Core and NLU training in `async` loop. - - Args: - domain: Path to the domain file. - config: Path to the config for Core and NLU. - training_files: Paths to the training data for Core and NLU. - output: Output path. - dry_run: If `True` then no training will be done, and the information about - whether the training needs to be done will be printed. - force_training: If `True` retrain model even if data has not changed. - fixed_model_name: Name of model to be stored. - persist_nlu_training_data: `True` if the NLU training data should be persisted - with the model. - core_additional_arguments: Additional training parameters for core training. - nlu_additional_arguments: Additional training parameters forwarded to training - method of each NLU component. - loop: Optional EventLoop for running coroutines. - model_to_finetune: Optional path to a model which should be finetuned or - a directory in case the latest trained model should be used. - finetuning_epoch_fraction: The fraction currently specified training epochs - in the model configuration which should be used for finetuning. - - Returns: - An instance of `TrainingResult`. - """ - return rasa.utils.common.run_in_loop( - train_async( - domain=domain, - config=config, - training_files=training_files, - output=output, - dry_run=dry_run, - force_training=force_training, - fixed_model_name=fixed_model_name, - persist_nlu_training_data=persist_nlu_training_data, - core_additional_arguments=core_additional_arguments, - nlu_additional_arguments=nlu_additional_arguments, - model_to_finetune=model_to_finetune, - finetuning_epoch_fraction=finetuning_epoch_fraction, - ), - loop, - ) +# backwards compatibility +train = rasa.train async def train_async( @@ -600,7 +545,7 @@ async def _train_core_with_validated_data( model_type="core", is_finetuning=model_to_finetune is not None, ): - await rasa.core.train( + await rasa.core.train.train( domain_file=domain, training_resource=file_importer, output_path=os.path.join(_train_path, DEFAULT_CORE_SUBDIRECTORY_NAME), @@ -809,7 +754,7 @@ async def _train_nlu_with_validated_data( model_type="nlu", is_finetuning=model_to_finetune is not None, ): - await rasa.nlu.train( + await rasa.nlu.train.train( config, file_importer, _train_path, diff --git a/rasa/nlu/__init__.py b/rasa/nlu/__init__.py index c78df260380d..4defa9baa4c2 100644 --- a/rasa/nlu/__init__.py +++ b/rasa/nlu/__init__.py @@ -1,9 +1,6 @@ import logging import rasa -from rasa.nlu.train import train -from rasa.nlu.test import run_evaluation as test -from rasa.nlu.test import cross_validate logging.getLogger(__name__).addHandler(logging.NullHandler()) diff --git a/rasa/nlu/test.py b/rasa/nlu/test.py index 9da46081d065..5bf39964f858 100644 --- a/rasa/nlu/test.py +++ b/rasa/nlu/test.py @@ -645,7 +645,7 @@ def _calculate_report( report_as_dict: Optional[bool] = None, exclude_label: Optional[Text] = None, ) -> Tuple[Union[Text, Dict], float, float, float, np.ndarray, List[Text]]: - from rasa.test import get_evaluation_metrics + from rasa.model_testing import get_evaluation_metrics import sklearn.metrics import sklearn.utils.multiclass @@ -1843,7 +1843,7 @@ async def compare_nlu( Returns: training examples per run """ - from rasa.train import train_nlu_async + from rasa.model_training import train_nlu_async training_examples_per_run = [] @@ -1930,7 +1930,7 @@ def _compute_metrics( Returns: metrics """ - from rasa.test import get_evaluation_metrics + from rasa.model_testing import get_evaluation_metrics # compute fold metrics targets, predictions = _targets_predictions_from( @@ -1952,7 +1952,7 @@ def _compute_entity_metrics( Returns: entity metrics """ - from rasa.test import get_evaluation_metrics + from rasa.model_testing import get_evaluation_metrics entity_metric_results: EntityMetrics = defaultdict(lambda: defaultdict(list)) extractors = get_entity_extractors(interpreter) diff --git a/rasa/run.py b/rasa/run.py deleted file mode 100644 index 2a6c3c8ac55a..000000000000 --- a/rasa/run.py +++ /dev/null @@ -1,80 +0,0 @@ -import logging -import typing -from typing import Dict, Text - -import rasa.shared.utils.common -from rasa.shared.utils.cli import print_warning -from rasa.shared.constants import DOCS_BASE_URL -from rasa.core.lock_store import LockStore - -logger = logging.getLogger(__name__) - -if typing.TYPE_CHECKING: - from rasa.core.agent import Agent - - -def run( - model: Text, - endpoints: Text, - connector: Text = None, - credentials: Text = None, - **kwargs: Dict, -): - """Runs a Rasa model. - - Args: - model: Path to model archive. - endpoints: Path to endpoints file. - connector: Connector which should be use (overwrites `credentials` - field). - credentials: Path to channel credentials file. - **kwargs: Additional arguments which are passed to - `rasa.core.run.serve_application`. - - """ - import rasa.core.run - import rasa.nlu.run - from rasa.core.utils import AvailableEndpoints - - _endpoints = AvailableEndpoints.read_endpoints(endpoints) - - if not connector and not credentials: - connector = "rest" - print_warning( - f"No chat connector configured, falling back to the " - f"REST input channel. To connect your bot to another channel, " - f"read the docs here: {DOCS_BASE_URL}/messaging-and-voice-channels" - ) - - kwargs = rasa.shared.utils.common.minimal_kwargs( - kwargs, rasa.core.run.serve_application - ) - rasa.core.run.serve_application( - model, - channel=connector, - credentials=credentials, - endpoints=_endpoints, - **kwargs, - ) - - -def create_agent(model: Text, endpoints: Text = None) -> "Agent": - from rasa.core.tracker_store import TrackerStore - from rasa.core.utils import AvailableEndpoints - from rasa.core.agent import Agent - from rasa.core.brokers.broker import EventBroker - import rasa.utils.common - - _endpoints = AvailableEndpoints.read_endpoints(endpoints) - - _broker = rasa.utils.common.run_in_loop(EventBroker.create(_endpoints.event_broker)) - _tracker_store = TrackerStore.create(_endpoints.tracker_store, event_broker=_broker) - _lock_store = LockStore.create(_endpoints.lock_store) - - return Agent.load( - model, - generator=_endpoints.nlg, - tracker_store=_tracker_store, - lock_store=_lock_store, - action_endpoint=_endpoints.action, - ) diff --git a/rasa/server.py b/rasa/server.py index 6e9c08e69dc0..96aac1017768 100644 --- a/rasa/server.py +++ b/rasa/server.py @@ -7,7 +7,6 @@ import traceback from collections import defaultdict from functools import reduce, wraps -from http import HTTPStatus from inspect import isawaitable from pathlib import Path from http import HTTPStatus @@ -68,7 +67,8 @@ from rasa.shared.core.trackers import DialogueStateTracker, EventVerbosity from rasa.core.utils import AvailableEndpoints from rasa.nlu.emulators.no_emulator import NoEmulator -from rasa.nlu.test import run_evaluation, CVEvaluationResult +import rasa.nlu.test +from rasa.nlu.test import CVEvaluationResult from rasa.utils.endpoints import EndpointConfig if TYPE_CHECKING: @@ -1045,7 +1045,7 @@ async def train(request: Request, temporary_directory: Path) -> HTTPResponse: with app.active_training_processes.get_lock(): app.active_training_processes.value += 1 - from rasa.train import train_async + from rasa.model_training import train_async # pass `None` to run in default executor training_result = await train_async(**training_payload) @@ -1203,7 +1203,7 @@ async def _evaluate_model_using_test_set( HTTPStatus.CONFLICT, "Conflict", "Missing NLU model directory.", ) - return await run_evaluation( + return await rasa.nlu.test.run_evaluation( data_path, nlu_model, disable_plotting=True, report_as_dict=True ) @@ -1215,7 +1215,7 @@ async def _cross_validate(data_file: Text, config_file: Text, folds: int) -> Dic config = await importer.get_config() nlu_data = await importer.get_nlu_data() - evaluations = rasa.nlu.cross_validate( + evaluations = rasa.nlu.test.cross_validate( data=nlu_data, n_folds=folds, nlu_config=config, diff --git a/rasa/shared/core/constants.py b/rasa/shared/core/constants.py index 41f5ed9d4235..2713b0e1d73e 100644 --- a/rasa/shared/core/constants.py +++ b/rasa/shared/core/constants.py @@ -81,4 +81,14 @@ USE_TEXT_FOR_FEATURIZATION = "use_text_for_featurization" ENTITY_LABEL_SEPARATOR = "#" +# if you add more policy/classifier names, make sure to add a test as well to ensure +# that the name and the class stay in sync +POLICY_NAME_TWO_STAGE_FALLBACK = "TwoStageFallbackPolicy" +POLICY_NAME_MAPPING = "MappingPolicy" +POLICY_NAME_FALLBACK = "FallbackPolicy" +POLICY_NAME_FORM = "FormPolicy" +POLICY_NAME_RULE = "RulePolicy" + +CLASSIFIER_NAME_FALLBACK = "FallbackClassifier" + POLICIES_THAT_EXTRACT_ENTITIES = {"TEDPolicy"} diff --git a/rasa/telemetry.py b/rasa/telemetry.py index e7667bd5f8a0..0883d4b017e7 100644 --- a/rasa/telemetry.py +++ b/rasa/telemetry.py @@ -470,11 +470,12 @@ def _default_context_fields() -> Dict[Text, Any]: Return: A new context containing information about the runtime environment. """ - import tensorflow as tf global TELEMETRY_CONTEXT if not TELEMETRY_CONTEXT: + # Make sure to update the example in docs/docs/telemetry/telemetry.mdx + # if you change / add context TELEMETRY_CONTEXT = { "os": {"name": platform.system(), "version": platform.release()}, "ci": in_continuous_integration(), @@ -482,7 +483,6 @@ def _default_context_fields() -> Dict[Text, Any]: "directory": _hash_directory_path(os.getcwd()), "python": sys.version.split(" ")[0], "rasa_open_source": rasa.__version__, - "gpu": len(tf.config.list_physical_devices("GPU")), "cpu": multiprocessing.cpu_count(), "docker": _is_docker(), } @@ -702,6 +702,8 @@ async def track_model_training( training_id = uuid.uuid4().hex + # Make sure to update the example in docs/docs/telemetry/telemetry.mdx + # if you change / add any properties _track( TRAINING_STARTED_EVENT, { diff --git a/rasa/utils/common.py b/rasa/utils/common.py index 588c8c497615..75354943652b 100644 --- a/rasa/utils/common.py +++ b/rasa/utils/common.py @@ -6,7 +6,6 @@ from types import TracebackType from typing import Any, Coroutine, Dict, List, Optional, Text, Type, TypeVar, Union -import rasa.core.utils import rasa.utils.io from rasa.constants import DEFAULT_LOG_LEVEL_LIBRARIES, ENV_LOG_LEVEL_LIBRARIES from rasa.shared.constants import DEFAULT_LOG_LEVEL, ENV_LOG_LEVEL @@ -104,20 +103,12 @@ def update_tensorflow_log_level() -> None: # first import since some warnings are raised on the first import. os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2" - import tensorflow as tf - log_level = os.environ.get(ENV_LOG_LEVEL_LIBRARIES, DEFAULT_LOG_LEVEL_LIBRARIES) - if log_level == "DEBUG": - tf_log_level = tf.compat.v1.logging.DEBUG - elif log_level == "INFO": - tf_log_level = tf.compat.v1.logging.INFO - elif log_level == "WARNING": - tf_log_level = tf.compat.v1.logging.WARN - else: - tf_log_level = tf.compat.v1.logging.ERROR + if not log_level: + log_level = "ERROR" - tf.compat.v1.logging.set_verbosity(tf_log_level) + logging.getLogger("tensorflow").setLevel(log_level) logging.getLogger("tensorflow").propagate = False diff --git a/rasa/utils/train_utils.py b/rasa/utils/train_utils.py index 81d26b974940..b64b0b5a639f 100644 --- a/rasa/utils/train_utils.py +++ b/rasa/utils/train_utils.py @@ -1,5 +1,4 @@ from pathlib import Path -import tensorflow as tf import copy import numpy as np from typing import Optional, Text, Dict, Any, Union, List, Tuple, TYPE_CHECKING @@ -46,6 +45,7 @@ if TYPE_CHECKING: from rasa.nlu.extractors.extractor import EntityTagSpec from rasa.nlu.tokenizers.tokenizer import Token + from tensorflow.keras.callbacks import Callback def normalize(values: np.ndarray, ranking_length: Optional[int] = 0) -> np.ndarray: @@ -425,7 +425,7 @@ def create_common_callbacks( tensorboard_log_dir: Optional[Text] = None, tensorboard_log_level: Optional[Text] = None, checkpoint_dir: Optional[Path] = None, -) -> List[tf.keras.callbacks.Callback]: +) -> List["Callback"]: """Create common callbacks. The following callbacks are created: @@ -443,6 +443,8 @@ def create_common_callbacks( Returns: A list of callbacks. """ + import tensorflow as tf + callbacks = [RasaTrainingLogger(epochs, silent=False)] if tensorboard_log_dir: diff --git a/tests/cli/test_cli.py b/tests/cli/test_cli.py index de6c663051d6..57b3527489cf 100644 --- a/tests/cli/test_cli.py +++ b/tests/cli/test_cli.py @@ -1,24 +1,39 @@ +from pathlib import Path from typing import Callable -from _pytest.pytester import RunResult +from _pytest.pytester import RunResult, Testdir import pytest import sys -def test_cli_start(run: Callable[..., RunResult]): +def test_cli_start_is_fast(testdir: Testdir): """ - Checks that a call to ``rasa --help`` does not take longer than 7 seconds - (10 seconds on Windows). + Checks that a call to ``rasa --help`` does not import any slow imports. + + If this is failing this means, that a simple "rasa --help" commands imports + `tensorflow` which makes our CLI extremely slow. In case this test is failing + you've very likely added a global import of "tensorflow" which should be + avoided. Consider making this import (or the import of its parent module) + a local import. + + If you are clueless where that import happens, you can run + ``` + python -X importtime -m rasa.__main__ --help 2> import.log + tuna import.log + ``` + to get the import chain. + (make sure to run with python >= 3.7, and install tune (pip install tuna)) """ - import time - start = time.time() - run("--help") - end = time.time() + rasa_path = str( + (Path(__file__).parent / ".." / ".." / "rasa" / "__main__.py").absolute() + ) + args = [sys.executable, "-X", "importtime", rasa_path, "--help"] + result = testdir.run(*args) - duration = end - start + assert result.ret == 0 - # it sometimes takes a bit more time to start it on Windows - assert duration <= 20 if sys.platform == "win32" else 10 + # tensorflow is slow -> can't get imported when running basic CLI commands + result.stderr.no_fnmatch_line("*tensorflow.python.eager") def test_data_convert_help(run: Callable[..., RunResult]): diff --git a/tests/cli/test_rasa_interactive.py b/tests/cli/test_rasa_interactive.py index b49d39c130c0..b87211ae636e 100644 --- a/tests/cli/test_rasa_interactive.py +++ b/tests/cli/test_rasa_interactive.py @@ -10,7 +10,7 @@ from rasa.core.train import do_interactive_learning from rasa.core.training import interactive as interactive_learning from rasa.cli import interactive, train -from rasa.train import TrainingResult +from rasa.model_training import TrainingResult from tests.conftest import DEFAULT_NLU_DATA diff --git a/tests/cli/test_rasa_train.py b/tests/cli/test_rasa_train.py index 114439370fac..4fac719fef47 100644 --- a/tests/cli/test_rasa_train.py +++ b/tests/cli/test_rasa_train.py @@ -11,7 +11,7 @@ from rasa import model from rasa.nlu.model import Metadata from rasa.shared.nlu.training_data import training_data -from rasa.train import ( +from rasa.model_training import ( CODE_CORE_NEEDS_TO_BE_RETRAINED, CODE_NLU_NEEDS_TO_BE_RETRAINED, CODE_NLG_NEEDS_TO_BE_RETRAINED, diff --git a/tests/conftest.py b/tests/conftest.py index b30d854caabd..d03216d85f71 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -35,7 +35,7 @@ import rasa.core.run from rasa.core.tracker_store import InMemoryTrackerStore, TrackerStore from rasa.model import get_model -from rasa.train import train_async, train_nlu_async +from rasa.model_training import train_async, train_nlu_async from rasa.utils.common import TempDirectoryPath from tests.core.conftest import ( DEFAULT_DOMAIN_PATH_WITH_SLOTS, @@ -79,7 +79,7 @@ Path("tests", "nlu", "selectors").absolute(), ], "category_full_model_training": [ - Path("tests", "test_train.py").absolute(), + Path("tests", "test_model_training.py").absolute(), Path("tests", "nlu", "test_train.py").absolute(), Path("tests", "core", "test_training.py").absolute(), Path("tests", "core", "test_examples.py").absolute(), diff --git a/tests/core/policies/test_ted_policy.py b/tests/core/policies/test_ted_policy.py index 62d5b72f75ed..e0e42c023c21 100644 --- a/tests/core/policies/test_ted_policy.py +++ b/tests/core/policies/test_ted_policy.py @@ -22,7 +22,7 @@ from rasa.utils.tensorflow.data_generator import RasaBatchDataGenerator from rasa.shared.core.trackers import DialogueStateTracker from rasa.shared.nlu.interpreter import RegexInterpreter -from rasa.train import train_core +from rasa.model_training import train_core from rasa.utils import train_utils from rasa.utils.tensorflow.constants import ( EVAL_NUM_EXAMPLES, diff --git a/tests/nlu/classifiers/test_diet_classifier.py b/tests/nlu/classifiers/test_diet_classifier.py index 47367c748aab..96141340a05c 100644 --- a/tests/nlu/classifiers/test_diet_classifier.py +++ b/tests/nlu/classifiers/test_diet_classifier.py @@ -1,5 +1,4 @@ from pathlib import Path -from typing import Text import numpy as np import pytest @@ -9,7 +8,7 @@ import rasa.model from rasa.shared.nlu.training_data.features import Features -from rasa.nlu import train +import rasa.nlu.train from rasa.nlu.classifiers import LABEL_RANKING_LENGTH from rasa.nlu.config import RasaNLUModelConfig from rasa.shared.nlu.constants import ( @@ -178,7 +177,7 @@ async def _train_persist_load_with_different_settings( ): _config = RasaNLUModelConfig({"pipeline": pipeline, "language": "en"}) - (trainer, trained, persisted_path) = await train( + (trainer, trained, persisted_path) = await rasa.nlu.train.train( _config, path=str(tmp_path), data="data/examples/rasa/demo-rasa-multi-intent.yml", @@ -287,7 +286,7 @@ async def test_raise_error_on_incorrect_pipeline(component_builder, tmp_path: Pa ) with pytest.raises(Exception) as e: - await train( + await rasa.nlu.train.train( _config, path=str(tmp_path), data=DEFAULT_DATA_PATH, @@ -351,7 +350,7 @@ async def test_softmax_normalization( pipeline[2].update(classifier_params) _config = RasaNLUModelConfig({"pipeline": pipeline}) - (trained_model, _, persisted_path) = await train( + (trained_model, _, persisted_path) = await rasa.nlu.train.train( _config, path=str(tmp_path), data=data_path, component_builder=component_builder ) loaded = Interpreter.load(persisted_path, component_builder) @@ -399,7 +398,7 @@ async def test_inner_linear_normalization( pipeline[2].update(classifier_params) _config = RasaNLUModelConfig({"pipeline": pipeline}) - (trained_model, _, persisted_path) = await train( + (trained_model, _, persisted_path) = await rasa.nlu.train.train( _config, path=str(tmp_path), data=data_path, component_builder=component_builder ) loaded = Interpreter.load(persisted_path, component_builder) @@ -440,7 +439,7 @@ async def test_margin_loss_is_not_normalized( monkeypatch.setattr(train_utils, "normalize", mock.normalize) _config = RasaNLUModelConfig({"pipeline": pipeline}) - (trained_model, _, persisted_path) = await train( + (trained_model, _, persisted_path) = await rasa.nlu.train.train( _config, path=str(tmpdir), data="data/test/many_intents.yml", @@ -477,14 +476,14 @@ async def test_set_random_seed(component_builder, tmpdir): ) # first run - (trained_a, _, persisted_path_a) = await train( + (trained_a, _, persisted_path_a) = await rasa.nlu.train.train( _config, path=tmpdir.strpath + "_a", data=DEFAULT_DATA_PATH, component_builder=component_builder, ) # second run - (trained_b, _, persisted_path_b) = await train( + (trained_b, _, persisted_path_b) = await rasa.nlu.train.train( _config, path=tmpdir.strpath + "_b", data=DEFAULT_DATA_PATH, @@ -525,7 +524,7 @@ async def test_train_tensorboard_logging( } ) - await train( + await rasa.nlu.train.train( _config, path=str(tmpdir), data="data/examples/rasa/demo-rasa-multi-intent.yml", @@ -562,7 +561,7 @@ async def test_train_model_checkpointing( } ) - await train( + await rasa.nlu.train.train( _config, path=str(tmpdir), data="data/examples/rasa/demo-rasa.yml", @@ -602,7 +601,7 @@ async def test_train_persist_load_with_composite_entities( _config = RasaNLUModelConfig({"pipeline": pipeline, "language": "en"}) - (trainer, trained, persisted_path) = await train( + (trainer, trained, persisted_path) = await rasa.nlu.train.train( _config, path=tmpdir.strpath, data="data/test/demo-rasa-composite-entities.yml", diff --git a/tests/nlu/extractors/test_crf_entity_extractor.py b/tests/nlu/extractors/test_crf_entity_extractor.py index 84849f1046c8..358296c9aab7 100644 --- a/tests/nlu/extractors/test_crf_entity_extractor.py +++ b/tests/nlu/extractors/test_crf_entity_extractor.py @@ -5,7 +5,7 @@ import pytest from rasa.nlu.components import ComponentBuilder -from rasa.nlu import train +import rasa.nlu.train from rasa.nlu.config import RasaNLUModelConfig from rasa.nlu.model import Interpreter from rasa.nlu.featurizers.dense_featurizer.spacy_featurizer import SpacyFeaturizer @@ -27,7 +27,7 @@ async def test_train_persist_load_with_composite_entities( _config = RasaNLUModelConfig({"pipeline": pipeline, "language": "en"}) - (trainer, trained, persisted_path) = await train( + (trainer, trained, persisted_path) = await rasa.nlu.train.train( _config, path=str(tmp_path), data="data/test/demo-rasa-composite-entities.yml", @@ -98,7 +98,7 @@ async def test_train_persist_with_different_configurations( _config = RasaNLUModelConfig({"pipeline": pipeline, "language": "en"}) - (trainer, trained, persisted_path) = await train( + (trainer, trained, persisted_path) = await rasa.nlu.train.train( _config, path=str(tmp_path), data="data/examples/rasa", diff --git a/tests/nlu/selectors/test_selectors.py b/tests/nlu/selectors/test_selectors.py index 8dbcf1dd0a9b..35c753095334 100644 --- a/tests/nlu/selectors/test_selectors.py +++ b/tests/nlu/selectors/test_selectors.py @@ -7,7 +7,7 @@ from _pytest.monkeypatch import MonkeyPatch import rasa.model -from rasa.nlu import train +import rasa.nlu.train from rasa.nlu.components import ComponentBuilder from rasa.shared.nlu.training_data import util from rasa.nlu.config import RasaNLUModelConfig @@ -228,7 +228,7 @@ async def test_train_model_checkpointing( } ) - await train( + await rasa.nlu.train.train( _config, path=str(tmpdir), data="data/test_selectors", @@ -258,7 +258,7 @@ async def _train_persist_load_with_different_settings( ): _config = RasaNLUModelConfig({"pipeline": pipeline, "language": "en"}) - (trainer, trained, persisted_path) = await train( + (trainer, trained, persisted_path) = await rasa.nlu.train.train( _config, path=str(tmp_path), data="data/examples/rasa/demo-rasa.yml", @@ -342,7 +342,7 @@ async def test_cross_entropy_with_linear_norm( pipeline[2].update(classifier_params) _config = RasaNLUModelConfig({"pipeline": pipeline}) - (trained_model, _, persisted_path) = await train( + (trained_model, _, persisted_path) = await rasa.nlu.train.train( _config, path=str(tmp_path), data="data/test_selectors", @@ -388,7 +388,7 @@ async def test_margin_loss_is_not_normalized( monkeypatch.setattr(train_utils, "normalize", mock.normalize) _config = RasaNLUModelConfig({"pipeline": pipeline}) - (trained_model, _, persisted_path) = await train( + (trained_model, _, persisted_path) = await rasa.nlu.train.train( _config, path=str(tmp_path), data="data/test_selectors", @@ -428,7 +428,7 @@ async def test_softmax_ranking( pipeline[2].update(classifier_params) _config = RasaNLUModelConfig({"pipeline": pipeline}) - (trained_model, _, persisted_path) = await train( + (trained_model, _, persisted_path) = await rasa.nlu.train.train( _config, path=str(tmp_path), data=data_path, component_builder=component_builder ) loaded = Interpreter.load(persisted_path, component_builder) diff --git a/tests/nlu/test_components.py b/tests/nlu/test_components.py index 4c32fb2844ef..e03d0edf4fd3 100644 --- a/tests/nlu/test_components.py +++ b/tests/nlu/test_components.py @@ -3,7 +3,8 @@ import pytest -from rasa.nlu import registry, train +from rasa.nlu import registry +import rasa.nlu.train from rasa.nlu.components import Component, ComponentBuilder, find_unavailable_packages from rasa.nlu.config import RasaNLUModelConfig from rasa.shared.exceptions import InvalidConfigException @@ -98,7 +99,7 @@ async def test_example_component(component_builder: ComponentBuilder, tmp_path: {"pipeline": [{"name": "tests.nlu.example_component.MyComponent"}]} ) - (trainer, trained, persisted_path) = await train( + (trainer, trained, persisted_path) = await rasa.nlu.train.train( _config, data=DEFAULT_DATA_PATH, path=str(tmp_path), @@ -199,7 +200,7 @@ async def test_validate_requirements_raises_exception_on_component_without_name( ) with pytest.raises(InvalidConfigException): - await train( + await rasa.nlu.train.train( _config, data=DEFAULT_DATA_PATH, path=str(tmp_path), ) @@ -211,7 +212,7 @@ async def test_validate_component_keys_raises_warning_on_invalid_key(tmp_path: P ) with pytest.warns(UserWarning) as record: - await train( + await rasa.nlu.train.train( _config, data=DEFAULT_DATA_PATH, path=str(tmp_path), ) diff --git a/tests/nlu/test_evaluation.py b/tests/nlu/test_evaluation.py index 1b7b42b323bf..e8d665d890d9 100644 --- a/tests/nlu/test_evaluation.py +++ b/tests/nlu/test_evaluation.py @@ -12,7 +12,7 @@ import rasa.shared.nlu.training_data.loading import rasa.shared.utils.io import rasa.utils.io -from rasa.nlu import train +import rasa.nlu.train from rasa.nlu.classifiers.diet_classifier import DIETClassifier from rasa.nlu.classifiers.fallback_classifier import FallbackClassifier from rasa.nlu.components import ComponentBuilder, Component @@ -65,7 +65,7 @@ ) from rasa.shared.nlu.training_data.message import Message from rasa.shared.nlu.training_data.training_data import TrainingData -from rasa.test import compare_nlu_models +from rasa.model_testing import compare_nlu_models from rasa.utils.tensorflow.constants import EPOCHS, ENTITY_RECOGNITION from tests.nlu.conftest import DEFAULT_DATA_PATH @@ -394,7 +394,7 @@ async def test_eval_data( ], ) - (_, _, persisted_path) = await train( + (_, _, persisted_path) = await rasa.nlu.train.train( _config, path=str(tmp_path), data=data_importer, diff --git a/tests/nlu/test_persistor.py b/tests/nlu/test_persistor.py index baa0a2ea18d0..d7d086168abf 100644 --- a/tests/nlu/test_persistor.py +++ b/tests/nlu/test_persistor.py @@ -4,7 +4,8 @@ from moto import mock_s3 -from rasa.nlu import persistor, train +from rasa.nlu import persistor +import rasa.nlu.train from rasa.nlu.config import RasaNLUModelConfig @@ -24,7 +25,7 @@ async def test_list_method_method_in_AWS_persistor( # noinspection PyPep8Naming os.environ["BUCKET_NAME"] = "rasa-test" os.environ["AWS_DEFAULT_REGION"] = "us-west-1" - (trained, _, persisted_path) = await train( + (trained, _, persisted_path) = await rasa.nlu.train.train( _config, data="data/test/demo-rasa-small.json", path=str(tmp_path), diff --git a/tests/nlu/test_train.py b/tests/nlu/test_train.py index 93924ede2f9d..2fdefc196991 100644 --- a/tests/nlu/test_train.py +++ b/tests/nlu/test_train.py @@ -1,7 +1,8 @@ import os import pytest -from rasa.nlu import registry, train +from rasa.nlu import registry +import rasa.nlu.train from rasa.nlu.config import RasaNLUModelConfig from rasa.nlu.model import Interpreter, Trainer from rasa.shared.nlu.training_data.training_data import TrainingData @@ -138,7 +139,7 @@ def test_all_components_are_in_at_least_one_test_pipeline(): async def test_train_persist_load_parse(language, pipeline, component_builder, tmpdir): _config = RasaNLUModelConfig({"pipeline": pipeline, "language": language}) - (trained, _, persisted_path) = await train( + (trained, _, persisted_path) = await rasa.nlu.train.train( _config, path=tmpdir.strpath, data=DEFAULT_DATA_PATH, @@ -212,7 +213,7 @@ async def test_train_model_empty_pipeline(component_builder): _config = RasaNLUModelConfig({"pipeline": None, "language": "en"}) with pytest.raises(ValueError): - await train( + await rasa.nlu.train.train( _config, data=DEFAULT_DATA_PATH, component_builder=component_builder ) @@ -222,7 +223,7 @@ async def test_train_named_model(component_builder, tmpdir): {"pipeline": [{"name": "KeywordIntentClassifier"}], "language": "en"} ) - (trained, _, persisted_path) = await train( + (trained, _, persisted_path) = await rasa.nlu.train.train( _config, path=tmpdir.strpath, data=DEFAULT_DATA_PATH, @@ -242,7 +243,7 @@ async def test_handles_pipeline_with_non_existing_component( pretrained_embeddings_spacy_config.pipeline.append({"name": "my_made_up_component"}) with pytest.raises(Exception) as execinfo: - await train( + await rasa.nlu.train.train( pretrained_embeddings_spacy_config, data=DEFAULT_DATA_PATH, component_builder=component_builder, @@ -255,7 +256,7 @@ async def test_train_model_training_data_persisted(component_builder, tmpdir): {"pipeline": [{"name": "KeywordIntentClassifier"}], "language": "en"} ) - (trained, _, persisted_path) = await train( + (trained, _, persisted_path) = await rasa.nlu.train.train( _config, path=tmpdir.strpath, data=DEFAULT_DATA_PATH, @@ -276,7 +277,7 @@ async def test_train_model_no_training_data_persisted(component_builder, tmpdir) {"pipeline": [{"name": "KeywordIntentClassifier"}], "language": "en"} ) - (trained, _, persisted_path) = await train( + (trained, _, persisted_path) = await rasa.nlu.train.train( _config, path=tmpdir.strpath, data=DEFAULT_DATA_PATH, diff --git a/tests/shared/core/test_constants.py b/tests/shared/core/test_constants.py new file mode 100644 index 000000000000..1906d7a7c908 --- /dev/null +++ b/tests/shared/core/test_constants.py @@ -0,0 +1,41 @@ +from typing import Text, Type + +import pytest + +from rasa.core.policies.fallback import FallbackPolicy +from rasa.core.policies.form_policy import FormPolicy +from rasa.core.policies.mapping_policy import MappingPolicy +from rasa.core.policies.rule_policy import RulePolicy +from rasa.core.policies.two_stage_fallback import TwoStageFallbackPolicy +from rasa.nlu.classifiers.fallback_classifier import FallbackClassifier +from rasa.shared.core.constants import ( + CLASSIFIER_NAME_FALLBACK, + POLICY_NAME_FALLBACK, + POLICY_NAME_MAPPING, + POLICY_NAME_RULE, + POLICY_NAME_TWO_STAGE_FALLBACK, + POLICY_NAME_FORM, +) + + +@pytest.mark.parametrize( + "name_in_constant, policy_class", + [ + (POLICY_NAME_TWO_STAGE_FALLBACK, TwoStageFallbackPolicy), + (POLICY_NAME_FALLBACK, FallbackPolicy), + (POLICY_NAME_MAPPING, MappingPolicy), + (POLICY_NAME_FORM, FormPolicy), + (POLICY_NAME_RULE, RulePolicy), + (CLASSIFIER_NAME_FALLBACK, FallbackClassifier), + ], +) +def test_policy_names(name_in_constant: Text, policy_class: Type): + assert name_in_constant == policy_class.__name__ + + +@pytest.mark.parametrize( + "name_in_constant, classifier_class", + [(CLASSIFIER_NAME_FALLBACK, FallbackClassifier),], +) +def test_classifier_names(name_in_constant: Text, classifier_class: Type): + assert name_in_constant == classifier_class.__name__ diff --git a/tests/test_model.py b/tests/test_model.py index 52e97902cf95..25ba91aad9ee 100644 --- a/tests/test_model.py +++ b/tests/test_model.py @@ -51,7 +51,7 @@ FingerprintComparisonResult, ) from rasa.exceptions import ModelNotFound -from rasa.train import train_core, train_core_async +from rasa.model_training import train_core, train_core_async from tests.core.conftest import DEFAULT_DOMAIN_PATH_WITH_MAPPING, DEFAULT_STACK_CONFIG diff --git a/tests/test_test.py b/tests/test_model_testing.py similarity index 97% rename from tests/test_test.py rename to tests/test_model_testing.py index aea5aed8b157..a753218186ed 100644 --- a/tests/test_test.py +++ b/tests/test_model_testing.py @@ -35,7 +35,7 @@ def monkeypatch_get_latest_model(tmp_path: Path, monkeypatch: MonkeyPatch) -> No def test_get_sanitized_model_directory_when_not_passing_model( capsys: CaptureFixture, tmp_path: Path, monkeypatch: MonkeyPatch ): - from rasa.test import _get_sanitized_model_directory + from rasa.model_testing import _get_sanitized_model_directory monkeypatch_get_latest_model(tmp_path, monkeypatch) @@ -54,7 +54,7 @@ def test_get_sanitized_model_directory_when_not_passing_model( def test_get_sanitized_model_directory_when_passing_model_file_explicitly( capsys: CaptureFixture, tmp_path: Path, monkeypatch: MonkeyPatch ): - from rasa.test import _get_sanitized_model_directory + from rasa.model_testing import _get_sanitized_model_directory monkeypatch_get_latest_model(tmp_path, monkeypatch) @@ -73,7 +73,7 @@ def test_get_sanitized_model_directory_when_passing_model_file_explicitly( def test_get_sanitized_model_directory_when_passing_other_input( capsys: CaptureFixture, tmp_path: Path, monkeypatch: MonkeyPatch ): - from rasa.test import _get_sanitized_model_directory + from rasa.model_testing import _get_sanitized_model_directory monkeypatch_get_latest_model(tmp_path, monkeypatch) @@ -109,7 +109,7 @@ def test_get_sanitized_model_directory_when_passing_other_input( def test_get_evaluation_metrics( targets, predictions, expected_precision, expected_fscore, expected_accuracy ): - from rasa.test import get_evaluation_metrics + from rasa.model_testing import get_evaluation_metrics report, precision, f1, accuracy = get_evaluation_metrics( targets, predictions, True, exclude_label=NO_ENTITY @@ -140,7 +140,7 @@ def test_get_evaluation_metrics( ], ) def test_get_label_set(targets, exclude_label, expected): - from rasa.test import get_unique_labels + from rasa.model_testing import get_unique_labels actual = get_unique_labels(targets, exclude_label) assert set(expected) == set(actual) @@ -158,7 +158,7 @@ async def test_interpreter_passed_to_agent( def test_e2e_warning_if_no_nlu_model( monkeypatch: MonkeyPatch, trained_core_model: Text, capsys: CaptureFixture ): - from rasa.test import test_core + from rasa.model_testing import test_core # Patching is bit more complicated as we have a module `train` and function # with the same name 😬 diff --git a/tests/test_train.py b/tests/test_model_training.py similarity index 92% rename from tests/test_train.py rename to tests/test_model_training.py index 2d5145d9d8e6..0e0354e838d1 100644 --- a/tests/test_train.py +++ b/tests/test_model_training.py @@ -15,6 +15,7 @@ from rasa.core.policies.ted_policy import TEDPolicy import rasa.model import rasa.core +import rasa.core.train import rasa.nlu from rasa.nlu.classifiers.diet_classifier import DIETClassifier import rasa.shared.importers.autoconfig as autoconfig @@ -23,7 +24,7 @@ from rasa.core.interpreter import RasaNLUInterpreter from rasa.nlu.model import Interpreter -from rasa.train import train_core, train_nlu, train, dry_run_result +from rasa.model_training import train_core, train_nlu, train, dry_run_result from rasa.utils.tensorflow.constants import EPOCHS from tests.conftest import DEFAULT_CONFIG_PATH, DEFAULT_NLU_DATA, AsyncMock from tests.core.conftest import DEFAULT_DOMAIN_PATH_WITH_SLOTS, DEFAULT_STORIES_FILE @@ -233,14 +234,13 @@ def test_trained_interpreter_passed_to_core_training( # Patching is bit more complicated as we have a module `train` and function # with the same name 😬 monkeypatch.setattr( - sys.modules["rasa.train"], + rasa.model_training, "_train_nlu_with_validated_data", AsyncMock(return_value=unpacked_trained_rasa_model), ) # Mock the actual Core training - _train_core = AsyncMock() - monkeypatch.setattr(rasa.core, "train", _train_core) + _train_core = mock_core_training(monkeypatch) train( DEFAULT_DOMAIN_PATH_WITH_SLOTS, @@ -270,8 +270,7 @@ def test_interpreter_of_old_model_passed_to_core_training( ) # Mock the actual Core training - _train_core = AsyncMock() - monkeypatch.setattr(rasa.core, "train", _train_core) + _train_core = mock_core_training(monkeypatch) train( DEFAULT_DOMAIN_PATH_WITH_SLOTS, @@ -286,13 +285,13 @@ def test_interpreter_of_old_model_passed_to_core_training( def test_load_interpreter_returns_none_for_none(): - from rasa.train import _load_interpreter + from rasa.model_training import _load_interpreter assert _load_interpreter(None) is None def test_interpreter_from_previous_model_returns_none_for_none(): - from rasa.train import _interpreter_from_previous_model + from rasa.model_training import _interpreter_from_previous_model assert _interpreter_from_previous_model(None) is None @@ -312,7 +311,7 @@ def test_train_core_autoconfig( # skip actual core training monkeypatch.setattr( - sys.modules["rasa.train"], "_train_core_with_validated_data", AsyncMock() + rasa.model_training, "_train_core_with_validated_data", AsyncMock() ) # do training @@ -341,7 +340,7 @@ def test_train_nlu_autoconfig( monkeypatch.setattr(autoconfig, "get_configuration", mocked_get_configuration) monkeypatch.setattr( - sys.modules["rasa.train"], "_train_nlu_with_validated_data", AsyncMock() + rasa.model_training, "_train_nlu_with_validated_data", AsyncMock() ) # do training @@ -367,11 +366,11 @@ async def mock_async_func(*args: Any, **kwargs: Any) -> None: def mock_core_training(monkeypatch: MonkeyPatch) -> Mock: - return mock_async(monkeypatch, rasa.core, rasa.core.train.__name__) + return mock_async(monkeypatch, rasa.core.train, rasa.core.train.train.__name__) def mock_nlu_training(monkeypatch: MonkeyPatch) -> Mock: - return mock_async(monkeypatch, rasa.nlu, rasa.nlu.train.__name__) + return mock_async(monkeypatch, rasa.nlu.train, rasa.nlu.train.train.__name__) def new_model_path_in_same_dir(old_model_path: Text) -> Text: @@ -620,11 +619,8 @@ def test_model_finetuning( trained_rasa_model: Text, use_latest_model: bool, ): - mocked_nlu_training = Mock(wraps=rasa.nlu.train) - monkeypatch.setattr(rasa.nlu, rasa.nlu.train.__name__, mocked_nlu_training) - - mocked_core_training = Mock(wraps=rasa.core.train) - monkeypatch.setattr(rasa.core, rasa.core.train.__name__, mocked_core_training) + mocked_nlu_training = mock_nlu_training(monkeypatch) + mocked_core_training = mock_core_training(monkeypatch) (tmp_path / "models").mkdir() output = str(tmp_path / "models") @@ -659,9 +655,7 @@ def test_model_finetuning_core( trained_moodbot_path: Text, use_latest_model: bool, ): - mocked_core_training = AsyncMock() - monkeypatch.setattr(rasa.core, rasa.core.train.__name__, mocked_core_training) - + mocked_core_training = mock_core_training(monkeypatch) mock_agent_load = Mock(wraps=Agent.load) monkeypatch.setattr(Agent, "load", mock_agent_load) @@ -710,9 +704,7 @@ def test_model_finetuning_core( def test_model_finetuning_core_with_default_epochs( tmp_path: Path, monkeypatch: MonkeyPatch, trained_moodbot_path: Text, ): - mocked_core_training = AsyncMock() - monkeypatch.setattr(rasa.core, rasa.core.train.__name__, mocked_core_training) - + mocked_core_training = mock_core_training(monkeypatch) (tmp_path / "models").mkdir() output = str(tmp_path / "models") @@ -743,8 +735,7 @@ def test_model_finetuning_core_with_default_epochs( def test_model_finetuning_core_new_domain_label( tmp_path: Path, monkeypatch: MonkeyPatch, trained_moodbot_path: Text, ): - mocked_core_training = AsyncMock() - monkeypatch.setattr(rasa.core, rasa.core.train.__name__, mocked_core_training) + mocked_core_training = mock_core_training(monkeypatch) (tmp_path / "models").mkdir() output = str(tmp_path / "models") @@ -770,10 +761,8 @@ def test_model_finetuning_core_new_domain_label( def test_model_finetuning_new_domain_label_stops_all_training( tmp_path: Path, monkeypatch: MonkeyPatch, trained_moodbot_path: Text, ): - mocked_core_training = AsyncMock() - mocked_nlu_training = AsyncMock() - monkeypatch.setattr(rasa.core, rasa.core.train.__name__, mocked_core_training) - monkeypatch.setattr(rasa.nlu, rasa.nlu.train.__name__, mocked_nlu_training) + mocked_core_training = mock_core_training(monkeypatch) + mocked_nlu_training = mock_nlu_training(monkeypatch) (tmp_path / "models").mkdir() output = str(tmp_path / "models") @@ -807,8 +796,7 @@ def test_model_finetuning_nlu( trained_nlu_moodbot_path: Text, use_latest_model: bool, ): - mocked_nlu_training = AsyncMock(return_value="") - monkeypatch.setattr(rasa.nlu, rasa.nlu.train.__name__, mocked_nlu_training) + mocked_nlu_training = mock_nlu_training(monkeypatch) mock_interpreter_create = Mock(wraps=Interpreter.create) monkeypatch.setattr(Interpreter, "create", mock_interpreter_create) @@ -862,8 +850,7 @@ def test_model_finetuning_nlu( def test_model_finetuning_nlu_new_label( tmp_path: Path, monkeypatch: MonkeyPatch, trained_nlu_moodbot_path: Text, ): - mocked_nlu_training = AsyncMock(return_value="") - monkeypatch.setattr(rasa.nlu, rasa.nlu.train.__name__, mocked_nlu_training) + mocked_nlu_training = mock_nlu_training(monkeypatch) (tmp_path / "models").mkdir() output = str(tmp_path / "models") @@ -888,8 +875,7 @@ def test_model_finetuning_nlu_new_label( def test_model_finetuning_nlu_new_entity( tmp_path: Path, monkeypatch: MonkeyPatch, trained_nlu_moodbot_path: Text, ): - mocked_nlu_training = AsyncMock(return_value="") - monkeypatch.setattr(rasa.nlu, rasa.nlu.train.__name__, mocked_nlu_training) + mocked_nlu_training = mock_nlu_training(monkeypatch) (tmp_path / "models").mkdir() output = str(tmp_path / "models") @@ -919,8 +905,7 @@ def test_model_finetuning_nlu_new_label_already_in_domain( default_config_path: Text, default_domain_path: Text, ): - mocked_nlu_training = AsyncMock(return_value="") - monkeypatch.setattr(rasa.nlu, rasa.nlu.train.__name__, mocked_nlu_training) + mocked_nlu_training = mock_nlu_training(monkeypatch) (tmp_path / "models").mkdir() output = str(tmp_path / "models") @@ -946,8 +931,7 @@ def test_model_finetuning_nlu_new_label_already_in_domain( def test_model_finetuning_nlu_new_label_to_domain_only( tmp_path: Path, monkeypatch: MonkeyPatch, trained_nlu_moodbot_path: Text, ): - mocked_nlu_training = AsyncMock(return_value="") - monkeypatch.setattr(rasa.nlu, rasa.nlu.train.__name__, mocked_nlu_training) + mocked_nlu_training = mock_nlu_training(monkeypatch) (tmp_path / "models").mkdir() output = str(tmp_path / "models") @@ -972,8 +956,7 @@ def test_model_finetuning_nlu_new_label_to_domain_only( def test_model_finetuning_nlu_with_default_epochs( tmp_path: Path, monkeypatch: MonkeyPatch, trained_nlu_moodbot_path: Text, ): - mocked_nlu_training = AsyncMock(return_value="") - monkeypatch.setattr(rasa.nlu, rasa.nlu.train.__name__, mocked_nlu_training) + mocked_nlu_training = mock_nlu_training(monkeypatch) (tmp_path / "models").mkdir() output = str(tmp_path / "models") @@ -1012,12 +995,9 @@ def test_model_finetuning_with_invalid_model( model_to_fine_tune: Text, capsys: CaptureFixture, ): - mocked_nlu_training = AsyncMock(return_value="") - monkeypatch.setattr(rasa.nlu, rasa.nlu.train.__name__, mocked_nlu_training) - - mocked_core_training = AsyncMock() - monkeypatch.setattr(rasa.core, rasa.core.train.__name__, mocked_core_training) + mocked_nlu_training = mock_nlu_training(monkeypatch) + mocked_core_training = mock_core_training(monkeypatch) (tmp_path / "models").mkdir() output = str(tmp_path / "models") @@ -1048,9 +1028,7 @@ def test_model_finetuning_with_invalid_model_core( model_to_fine_tune: Text, capsys: CaptureFixture, ): - mocked_core_training = AsyncMock() - monkeypatch.setattr(rasa.core, rasa.core.train.__name__, mocked_core_training) - + mocked_core_training = mock_core_training(monkeypatch) (tmp_path / "models").mkdir() output = str(tmp_path / "models") @@ -1079,8 +1057,7 @@ def test_model_finetuning_with_invalid_model_nlu( model_to_fine_tune: Text, capsys: CaptureFixture, ): - mocked_nlu_training = AsyncMock(return_value="") - monkeypatch.setattr(rasa.nlu, rasa.nlu.train.__name__, mocked_nlu_training) + mocked_nlu_training = mock_nlu_training(monkeypatch) (tmp_path / "models").mkdir() output = str(tmp_path / "models") diff --git a/tests/test_server.py b/tests/test_server.py index c6270b7c372a..5df96ca2218d 100644 --- a/tests/test_server.py +++ b/tests/test_server.py @@ -43,6 +43,7 @@ from rasa.core.channels.slack import SlackBot from rasa.core.tracker_store import InMemoryTrackerStore from rasa.model import unpack_model +import rasa.nlu.test from rasa.nlu.test import CVEvaluationResult from rasa.shared.core import events from rasa.shared.core.constants import ( @@ -62,7 +63,7 @@ ) from rasa.shared.core.trackers import DialogueStateTracker from rasa.shared.nlu.constants import INTENT_NAME_KEY -from rasa.train import TrainingResult +from rasa.model_training import TrainingResult from rasa.utils.endpoints import EndpointConfig from tests.core.conftest import DEFAULT_STACK_CONFIG from tests.nlu.utilities import ResponseTest @@ -217,7 +218,7 @@ def run_server(monkeypatch: MonkeyPatch) -> NoReturn: import sys monkeypatch.setattr( - sys.modules["rasa.train"], "train_async", mocked_training_function, + sys.modules["rasa.model_training"], "train_async", mocked_training_function, ) from rasa import __main__ @@ -990,7 +991,9 @@ async def test_cross_validation_with_callback_success( ) ) monkeypatch.setattr( - rasa.nlu, rasa.nlu.cross_validate.__name__, mocked_cross_validation + rasa.nlu.test, + rasa.nlu.test.cross_validate.__name__, + mocked_cross_validation, ) _, response = await rasa_app_nlu.post( @@ -1033,7 +1036,9 @@ async def test_cross_validation_with_callback_error( payload = f"{nlu_data}\n{config}" monkeypatch.setattr( - rasa.nlu, rasa.nlu.cross_validate.__name__, Mock(side_effect=ValueError()) + rasa.nlu.test, + rasa.nlu.test.cross_validate.__name__, + Mock(side_effect=ValueError()), ) callback_url = "https://example.com/webhooks/actions"