Skip to content

Commit

Permalink
Unify the logger usage (#1581)
Browse files Browse the repository at this point in the history
Signed-off-by: yiliu30 <[email protected]>
Signed-off-by: chensuyue <[email protected]>
  • Loading branch information
yiliu30 authored Jan 29, 2024
1 parent 9a549c3 commit f50baf2
Show file tree
Hide file tree
Showing 26 changed files with 95 additions and 111 deletions.
2 changes: 1 addition & 1 deletion .azure-pipelines/scripts/ut/3x/collect_log_3x.sh
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
source /neural-compressor/.azure-pipelines/scripts/change_color.sh

set -xe
set -e
pip install coverage
export COVERAGE_RCFILE=/neural-compressor/.azure-pipelines/scripts/ut/3x/coverage.${1}
coverage_log="/neural-compressor/log_dir/coverage_log"
Expand Down
22 changes: 9 additions & 13 deletions neural_compressor/common/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,38 +13,34 @@
# limitations under the License.

from neural_compressor.common.utils import (
level,
log,
info,
DEBUG,
debug,
warn,
warning,
error,
fatal,
level,
logger,
Logger,
set_random_seed,
set_workspace,
set_resume_from,
set_tensorboard,
Logger,
logger,
)
from neural_compressor.common.base_config import options


__all__ = [
"level",
"log",
"info",
"DEBUG",
"debug",
"warn",
"warning",
"error",
"fatal",
"options",
"Logger",
"info",
"level",
"logger",
"log",
"warning",
"Logger",
"options",
"set_workspace",
"set_random_seed",
"set_resume_from",
Expand Down
4 changes: 1 addition & 3 deletions neural_compressor/common/base_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@
from itertools import product
from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union

from neural_compressor.common import Logger
from neural_compressor.common import logger
from neural_compressor.common.utils import (
BASE_CONFIG,
COMPOSABLE_CONFIG,
Expand All @@ -36,8 +36,6 @@
OP_NAME_OR_MODULE_TYPE,
)

logger = Logger().get_logger()

__all__ = [
"options",
"register_config",
Expand Down
4 changes: 1 addition & 3 deletions neural_compressor/common/base_tuning.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,11 +18,9 @@
import uuid
from typing import Any, Callable, Dict, Generator, List, Optional, Tuple, Union

from neural_compressor.common import Logger
from neural_compressor.common import logger
from neural_compressor.common.base_config import BaseConfig, ComposableConfig

logger = Logger().get_logger()

__all__ = [
"Evaluator",
"TuningConfig",
Expand Down
54 changes: 29 additions & 25 deletions neural_compressor/common/utils/logger.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,18 @@
import logging
import os

__all__ = [
"debug",
"error",
"fatal",
"info",
"level",
"logger",
"log",
"warning",
"Logger",
]


class Logger(object):
"""Logger class."""
Expand Down Expand Up @@ -67,68 +79,60 @@ def _pretty_dict(value, indent=0):
return repr(value)


level = Logger().get_logger().level
DEBUG = logging.DEBUG


def log(level, msg, *args, **kwargs):
"""Output log with the level as a parameter."""
if isinstance(msg, dict):
for _, line in enumerate(_pretty_dict(msg).split("\n")):
Logger().get_logger().log(level, line, *args, **kwargs)
Logger().get_logger().log(level, line, *args, **kwargs, stacklevel=2)
else:
Logger().get_logger().log(level, msg, *args, **kwargs)
Logger().get_logger().log(level, msg, *args, **kwargs, stacklevel=2)


def debug(msg, *args, **kwargs):
"""Output log with the debug level."""
if isinstance(msg, dict):
for _, line in enumerate(_pretty_dict(msg).split("\n")):
Logger().get_logger().debug(line, *args, **kwargs)
Logger().get_logger().debug(line, *args, **kwargs, stacklevel=2)
else:
Logger().get_logger().debug(msg, *args, **kwargs)
Logger().get_logger().debug(msg, *args, **kwargs, stacklevel=2)


def error(msg, *args, **kwargs):
"""Output log with the error level."""
if isinstance(msg, dict):
for _, line in enumerate(_pretty_dict(msg).split("\n")):
Logger().get_logger().error(line, *args, **kwargs)
Logger().get_logger().error(line, *args, **kwargs, stacklevel=2)
else:
Logger().get_logger().error(msg, *args, **kwargs)
Logger().get_logger().error(msg, *args, **kwargs, stacklevel=2)


def fatal(msg, *args, **kwargs):
"""Output log with the fatal level."""
if isinstance(msg, dict):
for _, line in enumerate(_pretty_dict(msg).split("\n")):
Logger().get_logger().fatal(line, *args, **kwargs)
Logger().get_logger().fatal(line, *args, **kwargs, stacklevel=2)
else:
Logger().get_logger().fatal(msg, *args, **kwargs)
Logger().get_logger().fatal(msg, *args, **kwargs, stacklevel=2)


def info(msg, *args, **kwargs):
"""Output log with the info level."""
if isinstance(msg, dict):
for _, line in enumerate(_pretty_dict(msg).split("\n")):
Logger().get_logger().info(line, *args, **kwargs)
Logger().get_logger().info(line, *args, **kwargs, stacklevel=2)
else:
Logger().get_logger().info(msg, *args, **kwargs)


def warn(msg, *args, **kwargs):
"""Output log with the warning level."""
if isinstance(msg, dict):
for _, line in enumerate(_pretty_dict(msg).split("\n")):
Logger().get_logger().warning(line, *args, **kwargs)
else:
Logger().get_logger().warning(msg, *args, **kwargs)
Logger().get_logger().info(msg, *args, **kwargs, stacklevel=2)


def warning(msg, *args, **kwargs):
"""Output log with the warning level (Alias of the method warn)."""
if isinstance(msg, dict):
for _, line in enumerate(_pretty_dict(msg).split("\n")):
Logger().get_logger().warning(line, *args, **kwargs)
Logger().get_logger().warning(line, *args, **kwargs, stacklevel=2)
else:
Logger().get_logger().warning(msg, *args, **kwargs)
Logger().get_logger().warning(msg, *args, **kwargs, stacklevel=2)


level = Logger().get_logger().level

logger = Logger().get_logger()
Original file line number Diff line number Diff line change
Expand Up @@ -18,13 +18,11 @@

import onnx

from neural_compressor.common import Logger
from neural_compressor.common import logger
from neural_compressor.common.utils import RTN
from neural_compressor.onnxrt.quantization.config import RTNConfig
from neural_compressor.onnxrt.utils.utility import register_algo

logger = Logger().get_logger()


###################### RTN Algo Entry ##################################
@register_algo(name=RTN)
Expand Down
4 changes: 1 addition & 3 deletions neural_compressor/onnxrt/quantization/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,12 +23,10 @@

import onnx

from neural_compressor.common import Logger
from neural_compressor.common import logger
from neural_compressor.common.base_config import BaseConfig, register_config, register_supported_configs_for_fwk
from neural_compressor.common.utils import DEFAULT_WHITE_LIST, OP_NAME_OR_MODULE_TYPE, RTN

logger = Logger().get_logger()

FRAMEWORK_NAME = "onnxrt"


Expand Down
4 changes: 1 addition & 3 deletions neural_compressor/onnxrt/quantization/quantize.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,13 +18,11 @@
import onnx
from onnxruntime.quantization import CalibrationDataReader

from neural_compressor.common import Logger
from neural_compressor.common import logger
from neural_compressor.common.base_config import BaseConfig, ComposableConfig, config_registry
from neural_compressor.onnxrt.quantization.config import FRAMEWORK_NAME
from neural_compressor.onnxrt.utils.utility import algos_mapping

logger = Logger().get_logger()


def need_apply(quant_config: BaseConfig, algo_name):
return quant_config.name == algo_name if hasattr(quant_config, "name") else False
Expand Down
4 changes: 1 addition & 3 deletions neural_compressor/onnxrt/utils/onnx_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,11 +22,9 @@

import onnx

from neural_compressor.common import Logger
from neural_compressor.common import logger
from neural_compressor.onnxrt.utils.utility import MAXIMUM_PROTOBUF, find_by_name

logger = Logger().get_logger()


class ONNXModel:
"""Build ONNX model."""
Expand Down
4 changes: 1 addition & 3 deletions neural_compressor/onnxrt/utils/utility.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,7 @@
import onnx
from packaging.version import Version

from neural_compressor.common import Logger

logger = Logger().get_logger()
from neural_compressor.common import logger

ONNXRT116_VERSION = Version("1.16.0")
ONNXRT1161_VERSION = Version("1.16.1")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,11 +26,9 @@
import tensorflow as tf
import yaml

from neural_compressor.common import Logger
from neural_compressor.common import logger
from neural_compressor.tensorflow.utils import deep_get, dump_elapsed_time

logger = Logger().get_logger()


def _add_supported_quantized_objects(custom_objects):
"""Map all the quantized objects."""
Expand Down
4 changes: 1 addition & 3 deletions neural_compressor/tensorflow/quantization/quantize.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,14 +16,12 @@

import tensorflow as tf

from neural_compressor.common import Logger
from neural_compressor.common import logger
from neural_compressor.common.base_config import BaseConfig
from neural_compressor.common.utils import STATIC_QUANT
from neural_compressor.tensorflow.quantization.config import parse_config_from_dict
from neural_compressor.tensorflow.utils import algos_mapping

logger = Logger().get_logger()


def quantize_model(
model: tf.keras.Model, quant_config: BaseConfig, calib_dataloader: Callable = None, calib_iteration: int = 100
Expand Down
4 changes: 1 addition & 3 deletions neural_compressor/torch/algorithms/weight_only_algos.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,13 +17,11 @@

import torch

from neural_compressor.common import Logger
from neural_compressor.common import logger
from neural_compressor.common.utils import GPTQ, RTN
from neural_compressor.torch.quantization.config import GPTQConfig, RTNConfig
from neural_compressor.torch.utils.utility import fetch_module, register_algo, set_module

logger = Logger().get_logger()


###################### RTN Algo Entry ##################################
@register_algo(name=RTN)
Expand Down
5 changes: 1 addition & 4 deletions neural_compressor/torch/quantization/autotune.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,15 +17,12 @@

import torch

from neural_compressor.common import Logger
from neural_compressor.common import logger
from neural_compressor.common.base_config import BaseConfig, get_all_config_set_from_config_registry
from neural_compressor.common.base_tuning import TuningConfig, evaluator, init_tuning
from neural_compressor.torch import quantize
from neural_compressor.torch.quantization.config import FRAMEWORK_NAME

logger = Logger().get_logger()


__all__ = [
"autotune",
"get_all_config_set",
Expand Down
2 changes: 1 addition & 1 deletion neural_compressor/torch/quantization/layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@
from torch.autograd import Function
from torch.nn import functional as F

from neural_compressor.common import DEBUG, level, logger
from neural_compressor.common import logger
from neural_compressor.torch.algorithms.weight_only import quant_tensor


Expand Down
4 changes: 1 addition & 3 deletions neural_compressor/torch/quantization/quantize.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,13 +17,11 @@

import torch

from neural_compressor.common import Logger
from neural_compressor.common import logger
from neural_compressor.common.base_config import BaseConfig, ComposableConfig, config_registry
from neural_compressor.torch.quantization.config import FRAMEWORK_NAME
from neural_compressor.torch.utils.utility import WHITE_MODULE_LIST, algos_mapping, get_model_info

logger = Logger().get_logger()


def need_apply(configs_mapping: Dict[Tuple[str, callable], BaseConfig], algo_name):
return any(config.name == algo_name for config in configs_mapping.values())
Expand Down
4 changes: 1 addition & 3 deletions neural_compressor/torch/utils/utility.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,9 +15,7 @@

from typing import Callable, Dict, List, Tuple

from neural_compressor.common import Logger

logger = Logger().get_logger()
from neural_compressor.common import logger

# Dictionary to store a mapping between algorithm names and corresponding algo implementation(function)
algos_mapping: Dict[str, Callable] = {}
Expand Down
4 changes: 1 addition & 3 deletions test/3x/onnxrt/quantization/weight_only/test_rtn.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,7 @@

from optimum.exporters.onnx import main_export

from neural_compressor.common import Logger

logger = Logger().get_logger()
from neural_compressor.common import logger


def find_onnx_file(folder_path):
Expand Down
4 changes: 1 addition & 3 deletions test/3x/onnxrt/test_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,7 @@
import onnx
from optimum.exporters.onnx import main_export

from neural_compressor.common import Logger

logger = Logger().get_logger()
from neural_compressor.common import logger


def find_onnx_file(folder_path):
Expand Down
4 changes: 1 addition & 3 deletions test/3x/tensorflow/test_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,9 +25,7 @@
import tensorflow as tf
from tensorflow import keras

from neural_compressor.common import Logger

logger = Logger().get_logger()
from neural_compressor.common import logger


def build_model():
Expand Down
4 changes: 1 addition & 3 deletions test/3x/torch/quantization/weight_only/test_gptq_algo.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,7 @@

import torch

from neural_compressor.common import Logger

logger = Logger().get_logger()
from neural_compressor.common import logger


def get_gpt_j():
Expand Down
Loading

0 comments on commit f50baf2

Please sign in to comment.