Skip to content

Commit

Permalink
Revert "Simplify logging configuration. (#30863)" (#31858)
Browse files Browse the repository at this point in the history
This reverts commit 608276b.

Looks like this breaks the backward compatibility of rllib (it is supposed to print warn first, but it prints the info log).
  • Loading branch information
rkooo567 authored Jan 23, 2023
1 parent 33d4b14 commit 0c69020
Show file tree
Hide file tree
Showing 15 changed files with 46 additions and 47 deletions.
2 changes: 1 addition & 1 deletion python/ray/_private/runtime_env/_clonevirtualenv.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
__version__ = "0.5.7"


logger = logging.getLogger(__name__)
logger = logging.getLogger()


env_bin_dir = "bin"
Expand Down
18 changes: 3 additions & 15 deletions python/ray/autoscaler/_private/kuberay/run_autoscaler.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,21 +17,7 @@


def run_kuberay_autoscaler(cluster_name: str, cluster_namespace: str):
"""Wait until the Ray head container is ready. Then start the autoscaler.
For kuberay's autoscaler integration, the autoscaler runs in a sidecar container
in the same pod as the main Ray container, which runs the rest of the Ray
processes.
The logging configuration here is for the sidecar container, but we need the
logs to go to the same place as the head node logs because the autoscaler is
allowed to send scaling events to Ray drivers' stdout. The implementation of
this feature involves the autoscaler communicating to another Ray process
(the log monitor) via logs in that directory.
However, the Ray head container sets up the log directory. Thus, we set up
logging only after the Ray head is ready.
"""
"""Wait until the Ray head container is ready. Then start the autoscaler."""
head_ip = get_node_ip_address()
ray_address = f"{head_ip}:6379"
while True:
Expand All @@ -55,6 +41,8 @@ def run_kuberay_autoscaler(cluster_name: str, cluster_namespace: str):
print(f"Will check again in {BACKOFF_S} seconds.")
time.sleep(BACKOFF_S)

# The Ray head container sets up the log directory. Thus, we set up logging
# only after the Ray head is ready.
_setup_logging()

# autoscaling_config_producer reads the RayCluster CR from K8s and uses the CR
Expand Down
3 changes: 2 additions & 1 deletion python/ray/autoscaler/_private/local/node_provider.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,8 @@

logger = logging.getLogger(__name__)

logging.getLogger("filelock").setLevel(logging.WARNING)
filelock_logger = logging.getLogger("filelock")
filelock_logger.setLevel(logging.WARNING)


class ClusterState:
Expand Down
1 change: 1 addition & 0 deletions python/ray/experimental/raysort/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -447,6 +447,7 @@ def init(args: Args):
ray.init(resources={"worker": os.cpu_count()})
else:
ray.init(address=args.ray_address)
logging_utils.init()
logging.info(args)
os.makedirs(constants.WORK_DIR, exist_ok=True)
resources = ray.cluster_resources()
Expand Down
12 changes: 0 additions & 12 deletions python/ray/tests/kuberay/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,18 +33,6 @@


def setup_logging():
"""Set up logging for kuberay.
For kuberay's autoscaler integration, the autoscaler runs in a sidecar container
in the same pod as the main Ray container, which runs the rest of the Ray
processes.
The logging configuration here is for the sidecar container, but we need the
logs to go to the same place as the head node logs because the autoscaler is
allowed to send scaling events to Ray drivers' stdout. The implementation of
this feature involves the autoscaler communicating to another Ray process
(the log monitor) via logs in that directory.
"""
logging.basicConfig(
level=logging.INFO,
format=LOG_FORMAT,
Expand Down
3 changes: 0 additions & 3 deletions python/ray/util/client/server/logservicer.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,9 +54,6 @@ def unregister_global(self):


def log_status_change_thread(log_queue, request_iterator):
"""This is run in a separate thread and therefore needs a separate logging
configuration outside of the default ray logging configuration.
"""
std_handler = StdStreamHandler(log_queue)
current_handler = None
root_logger = logging.getLogger("ray")
Expand Down
2 changes: 1 addition & 1 deletion release/ray_release/logger.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import logging
import sys

logger = logging.getLogger(__name__)
logger = logging.getLogger()
logger.setLevel(logging.INFO)


Expand Down
16 changes: 16 additions & 0 deletions rllib/__init__.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
import logging

from ray._private.usage import usage_lib

# Note: do not introduce unnecessary library dependencies here, e.g. gym.
Expand All @@ -14,6 +16,18 @@
from ray.tune.registry import register_trainable


def _setup_logger():
logger = logging.getLogger("ray.rllib")
handler = logging.StreamHandler()
handler.setFormatter(
logging.Formatter(
"%(asctime)s\t%(levelname)s %(filename)s:%(lineno)s -- %(message)s"
)
)
logger.addHandler(handler)
logger.propagate = False


def _register_all():
from ray.rllib.algorithms.registry import ALGORITHMS, _get_algorithm_class

Expand All @@ -24,6 +38,8 @@ def _register_all():
register_trainable(key, _get_algorithm_class(key))


_setup_logger()

usage_lib.record_library_usage("rllib")

__all__ = [
Expand Down
9 changes: 9 additions & 0 deletions rllib/algorithms/algorithm.py
Original file line number Diff line number Diff line change
Expand Up @@ -494,6 +494,15 @@ def setup(self, config: AlgorithmConfig) -> None:
self._record_usage(self.config)

self.callbacks = self.config["callbacks"]()
log_level = self.config.get("log_level")
if log_level in ["WARN", "ERROR"]:
logger.info(
"Current log_level is {}. For more information, "
"set 'log_level': 'INFO' / 'DEBUG' or use the -v and "
"-vv flags.".format(log_level)
)
if self.config.get("log_level"):
logging.getLogger("ray.rllib").setLevel(self.config["log_level"])

# Create local replay buffer if necessary.
self.local_replay_buffer = self._create_local_replay_buffer_if_necessary(
Expand Down
16 changes: 4 additions & 12 deletions rllib/algorithms/algorithm_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -385,7 +385,7 @@ def __init__(self, algo_class=None):
# `self.debugging()`
self.logger_creator = None
self.logger_config = None
self.log_level = DEPRECATED_VALUE
self.log_level = "WARN"
self.log_sys_usage = True
self.fake_sampler = False
self.seed = None
Expand Down Expand Up @@ -2052,7 +2052,7 @@ def debugging(
*,
logger_creator: Optional[Callable[[], Logger]] = NotProvided,
logger_config: Optional[dict] = NotProvided,
log_level: Optional[str] = DEPRECATED_VALUE,
log_level: Optional[str] = NotProvided,
log_sys_usage: Optional[bool] = NotProvided,
fake_sampler: Optional[bool] = NotProvided,
seed: Optional[int] = NotProvided,
Expand Down Expand Up @@ -2086,16 +2086,8 @@ def debugging(
self.logger_creator = logger_creator
if logger_config is not NotProvided:
self.logger_config = logger_config
if log_level != DEPRECATED_VALUE:
deprecation_warning(
old="config.log_level",
help=(
"RLlib no longer has a separate logging configuration from the rest"
" of Ray. Configure logging on the root logger; RLlib messages "
"will be propagated up the logger hierarchy to be handled there."
),
error=False,
)
if log_level is not NotProvided:
self.log_level = log_level
if log_sys_usage is not NotProvided:
self.log_sys_usage = log_sys_usage
if fake_sampler is not NotProvided:
Expand Down
3 changes: 3 additions & 0 deletions rllib/evaluation/rollout_worker.py
Original file line number Diff line number Diff line change
Expand Up @@ -521,6 +521,9 @@ def gen_rollouts():
):
tf1.enable_eager_execution()

if self.config.log_level:
logging.getLogger("ray.rllib").setLevel(self.config.log_level)

if self.worker_index > 1:
disable_log_once_globally() # only need 1 worker to log
elif self.config.log_level == "DEBUG":
Expand Down
1 change: 1 addition & 0 deletions rllib/examples/simulators/sumo/connector.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@

###############################################################################

logging.basicConfig()
logger = logging.getLogger(__name__)

###############################################################################
Expand Down
1 change: 1 addition & 0 deletions rllib/examples/simulators/sumo/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@

###############################################################################

logging.basicConfig()
logger = logging.getLogger(__name__)

###############################################################################
Expand Down
3 changes: 2 additions & 1 deletion rllib/examples/sumo_env_local.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,8 @@
from ray.rllib.examples.simulators.sumo import marlenvironment
from ray.rllib.utils.test_utils import check_learning_achieved

logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.WARN)
logger = logging.getLogger("ppotrain")

parser = argparse.ArgumentParser()
parser.add_argument(
Expand Down
3 changes: 2 additions & 1 deletion rllib/examples/tune/framework.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,8 @@
from ray.rllib.algorithms.appo import APPOConfig
from ray.tune import CLIReporter

logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.WARN)
logger = logging.getLogger("tune_framework")


def run(smoke_test=False):
Expand Down

0 comments on commit 0c69020

Please sign in to comment.