From 0c69020432d09c81ee4b5e2bce784b8354a08279 Mon Sep 17 00:00:00 2001 From: SangBin Cho Date: Tue, 24 Jan 2023 08:25:20 +0900 Subject: [PATCH] Revert "Simplify logging configuration. (#30863)" (#31858) This reverts commit 608276bb96b5b49769cd8816414c280c5431d843. Looks like this breaks the backward compatibility of rllib (it is supposed to print warn first, but it prints the info log). --- .../_private/runtime_env/_clonevirtualenv.py | 2 +- .../_private/kuberay/run_autoscaler.py | 18 +++--------------- .../autoscaler/_private/local/node_provider.py | 3 ++- python/ray/experimental/raysort/main.py | 1 + python/ray/tests/kuberay/utils.py | 12 ------------ python/ray/util/client/server/logservicer.py | 3 --- release/ray_release/logger.py | 2 +- rllib/__init__.py | 16 ++++++++++++++++ rllib/algorithms/algorithm.py | 9 +++++++++ rllib/algorithms/algorithm_config.py | 16 ++++------------ rllib/evaluation/rollout_worker.py | 3 +++ rllib/examples/simulators/sumo/connector.py | 1 + rllib/examples/simulators/sumo/utils.py | 1 + rllib/examples/sumo_env_local.py | 3 ++- rllib/examples/tune/framework.py | 3 ++- 15 files changed, 46 insertions(+), 47 deletions(-) diff --git a/python/ray/_private/runtime_env/_clonevirtualenv.py b/python/ray/_private/runtime_env/_clonevirtualenv.py index 269a0e448781..1f2eab3d1040 100644 --- a/python/ray/_private/runtime_env/_clonevirtualenv.py +++ b/python/ray/_private/runtime_env/_clonevirtualenv.py @@ -15,7 +15,7 @@ __version__ = "0.5.7" -logger = logging.getLogger(__name__) +logger = logging.getLogger() env_bin_dir = "bin" diff --git a/python/ray/autoscaler/_private/kuberay/run_autoscaler.py b/python/ray/autoscaler/_private/kuberay/run_autoscaler.py index 1d610a655e97..657f5b2e3242 100644 --- a/python/ray/autoscaler/_private/kuberay/run_autoscaler.py +++ b/python/ray/autoscaler/_private/kuberay/run_autoscaler.py @@ -17,21 +17,7 @@ def run_kuberay_autoscaler(cluster_name: str, cluster_namespace: str): - """Wait until the Ray head container is ready. Then start the autoscaler. - - For kuberay's autoscaler integration, the autoscaler runs in a sidecar container - in the same pod as the main Ray container, which runs the rest of the Ray - processes. - - The logging configuration here is for the sidecar container, but we need the - logs to go to the same place as the head node logs because the autoscaler is - allowed to send scaling events to Ray drivers' stdout. The implementation of - this feature involves the autoscaler communicating to another Ray process - (the log monitor) via logs in that directory. - - However, the Ray head container sets up the log directory. Thus, we set up - logging only after the Ray head is ready. - """ + """Wait until the Ray head container is ready. Then start the autoscaler.""" head_ip = get_node_ip_address() ray_address = f"{head_ip}:6379" while True: @@ -55,6 +41,8 @@ def run_kuberay_autoscaler(cluster_name: str, cluster_namespace: str): print(f"Will check again in {BACKOFF_S} seconds.") time.sleep(BACKOFF_S) + # The Ray head container sets up the log directory. Thus, we set up logging + # only after the Ray head is ready. _setup_logging() # autoscaling_config_producer reads the RayCluster CR from K8s and uses the CR diff --git a/python/ray/autoscaler/_private/local/node_provider.py b/python/ray/autoscaler/_private/local/node_provider.py index e9bb540b88fc..cd3b7b64166f 100644 --- a/python/ray/autoscaler/_private/local/node_provider.py +++ b/python/ray/autoscaler/_private/local/node_provider.py @@ -25,7 +25,8 @@ logger = logging.getLogger(__name__) -logging.getLogger("filelock").setLevel(logging.WARNING) +filelock_logger = logging.getLogger("filelock") +filelock_logger.setLevel(logging.WARNING) class ClusterState: diff --git a/python/ray/experimental/raysort/main.py b/python/ray/experimental/raysort/main.py index 44270da31803..608b7b304f4c 100644 --- a/python/ray/experimental/raysort/main.py +++ b/python/ray/experimental/raysort/main.py @@ -447,6 +447,7 @@ def init(args: Args): ray.init(resources={"worker": os.cpu_count()}) else: ray.init(address=args.ray_address) + logging_utils.init() logging.info(args) os.makedirs(constants.WORK_DIR, exist_ok=True) resources = ray.cluster_resources() diff --git a/python/ray/tests/kuberay/utils.py b/python/ray/tests/kuberay/utils.py index 892530912051..345cc09bcf6d 100644 --- a/python/ray/tests/kuberay/utils.py +++ b/python/ray/tests/kuberay/utils.py @@ -33,18 +33,6 @@ def setup_logging(): - """Set up logging for kuberay. - - For kuberay's autoscaler integration, the autoscaler runs in a sidecar container - in the same pod as the main Ray container, which runs the rest of the Ray - processes. - - The logging configuration here is for the sidecar container, but we need the - logs to go to the same place as the head node logs because the autoscaler is - allowed to send scaling events to Ray drivers' stdout. The implementation of - this feature involves the autoscaler communicating to another Ray process - (the log monitor) via logs in that directory. - """ logging.basicConfig( level=logging.INFO, format=LOG_FORMAT, diff --git a/python/ray/util/client/server/logservicer.py b/python/ray/util/client/server/logservicer.py index 83351f64ca76..764e6c82c653 100644 --- a/python/ray/util/client/server/logservicer.py +++ b/python/ray/util/client/server/logservicer.py @@ -54,9 +54,6 @@ def unregister_global(self): def log_status_change_thread(log_queue, request_iterator): - """This is run in a separate thread and therefore needs a separate logging - configuration outside of the default ray logging configuration. - """ std_handler = StdStreamHandler(log_queue) current_handler = None root_logger = logging.getLogger("ray") diff --git a/release/ray_release/logger.py b/release/ray_release/logger.py index 11f7b90a9766..8d79a4368c5b 100644 --- a/release/ray_release/logger.py +++ b/release/ray_release/logger.py @@ -1,7 +1,7 @@ import logging import sys -logger = logging.getLogger(__name__) +logger = logging.getLogger() logger.setLevel(logging.INFO) diff --git a/rllib/__init__.py b/rllib/__init__.py index c57d3094fb27..f63b8173d433 100644 --- a/rllib/__init__.py +++ b/rllib/__init__.py @@ -1,3 +1,5 @@ +import logging + from ray._private.usage import usage_lib # Note: do not introduce unnecessary library dependencies here, e.g. gym. @@ -14,6 +16,18 @@ from ray.tune.registry import register_trainable +def _setup_logger(): + logger = logging.getLogger("ray.rllib") + handler = logging.StreamHandler() + handler.setFormatter( + logging.Formatter( + "%(asctime)s\t%(levelname)s %(filename)s:%(lineno)s -- %(message)s" + ) + ) + logger.addHandler(handler) + logger.propagate = False + + def _register_all(): from ray.rllib.algorithms.registry import ALGORITHMS, _get_algorithm_class @@ -24,6 +38,8 @@ def _register_all(): register_trainable(key, _get_algorithm_class(key)) +_setup_logger() + usage_lib.record_library_usage("rllib") __all__ = [ diff --git a/rllib/algorithms/algorithm.py b/rllib/algorithms/algorithm.py index 47d50a12db8c..6b9921225fb2 100644 --- a/rllib/algorithms/algorithm.py +++ b/rllib/algorithms/algorithm.py @@ -494,6 +494,15 @@ def setup(self, config: AlgorithmConfig) -> None: self._record_usage(self.config) self.callbacks = self.config["callbacks"]() + log_level = self.config.get("log_level") + if log_level in ["WARN", "ERROR"]: + logger.info( + "Current log_level is {}. For more information, " + "set 'log_level': 'INFO' / 'DEBUG' or use the -v and " + "-vv flags.".format(log_level) + ) + if self.config.get("log_level"): + logging.getLogger("ray.rllib").setLevel(self.config["log_level"]) # Create local replay buffer if necessary. self.local_replay_buffer = self._create_local_replay_buffer_if_necessary( diff --git a/rllib/algorithms/algorithm_config.py b/rllib/algorithms/algorithm_config.py index cd720babb0ca..2c657631897a 100644 --- a/rllib/algorithms/algorithm_config.py +++ b/rllib/algorithms/algorithm_config.py @@ -385,7 +385,7 @@ def __init__(self, algo_class=None): # `self.debugging()` self.logger_creator = None self.logger_config = None - self.log_level = DEPRECATED_VALUE + self.log_level = "WARN" self.log_sys_usage = True self.fake_sampler = False self.seed = None @@ -2052,7 +2052,7 @@ def debugging( *, logger_creator: Optional[Callable[[], Logger]] = NotProvided, logger_config: Optional[dict] = NotProvided, - log_level: Optional[str] = DEPRECATED_VALUE, + log_level: Optional[str] = NotProvided, log_sys_usage: Optional[bool] = NotProvided, fake_sampler: Optional[bool] = NotProvided, seed: Optional[int] = NotProvided, @@ -2086,16 +2086,8 @@ def debugging( self.logger_creator = logger_creator if logger_config is not NotProvided: self.logger_config = logger_config - if log_level != DEPRECATED_VALUE: - deprecation_warning( - old="config.log_level", - help=( - "RLlib no longer has a separate logging configuration from the rest" - " of Ray. Configure logging on the root logger; RLlib messages " - "will be propagated up the logger hierarchy to be handled there." - ), - error=False, - ) + if log_level is not NotProvided: + self.log_level = log_level if log_sys_usage is not NotProvided: self.log_sys_usage = log_sys_usage if fake_sampler is not NotProvided: diff --git a/rllib/evaluation/rollout_worker.py b/rllib/evaluation/rollout_worker.py index 16e2782c810c..6c876c0fdca5 100644 --- a/rllib/evaluation/rollout_worker.py +++ b/rllib/evaluation/rollout_worker.py @@ -521,6 +521,9 @@ def gen_rollouts(): ): tf1.enable_eager_execution() + if self.config.log_level: + logging.getLogger("ray.rllib").setLevel(self.config.log_level) + if self.worker_index > 1: disable_log_once_globally() # only need 1 worker to log elif self.config.log_level == "DEBUG": diff --git a/rllib/examples/simulators/sumo/connector.py b/rllib/examples/simulators/sumo/connector.py index 2d30e14392dc..f74380e29bf5 100644 --- a/rllib/examples/simulators/sumo/connector.py +++ b/rllib/examples/simulators/sumo/connector.py @@ -20,6 +20,7 @@ ############################################################################### +logging.basicConfig() logger = logging.getLogger(__name__) ############################################################################### diff --git a/rllib/examples/simulators/sumo/utils.py b/rllib/examples/simulators/sumo/utils.py index a574635c0614..b9f75552351d 100644 --- a/rllib/examples/simulators/sumo/utils.py +++ b/rllib/examples/simulators/sumo/utils.py @@ -29,6 +29,7 @@ ############################################################################### +logging.basicConfig() logger = logging.getLogger(__name__) ############################################################################### diff --git a/rllib/examples/sumo_env_local.py b/rllib/examples/sumo_env_local.py index 4f437af98a94..47f888e631fd 100644 --- a/rllib/examples/sumo_env_local.py +++ b/rllib/examples/sumo_env_local.py @@ -23,7 +23,8 @@ from ray.rllib.examples.simulators.sumo import marlenvironment from ray.rllib.utils.test_utils import check_learning_achieved -logger = logging.getLogger(__name__) +logging.basicConfig(level=logging.WARN) +logger = logging.getLogger("ppotrain") parser = argparse.ArgumentParser() parser.add_argument( diff --git a/rllib/examples/tune/framework.py b/rllib/examples/tune/framework.py index 78a42208f509..304b549708e9 100644 --- a/rllib/examples/tune/framework.py +++ b/rllib/examples/tune/framework.py @@ -10,7 +10,8 @@ from ray.rllib.algorithms.appo import APPOConfig from ray.tune import CLIReporter -logger = logging.getLogger(__name__) +logging.basicConfig(level=logging.WARN) +logger = logging.getLogger("tune_framework") def run(smoke_test=False):