Skip to content

Commit

Permalink
[CI] Try introducing isort. (vllm-project#3495)
Browse files Browse the repository at this point in the history
  • Loading branch information
rkooo567 authored and jimpang committed Mar 31, 2024
1 parent 0db6f02 commit 07ef5b6
Show file tree
Hide file tree
Showing 143 changed files with 504 additions and 458 deletions.
7 changes: 5 additions & 2 deletions .github/workflows/ruff.yml
Original file line number Diff line number Diff line change
Expand Up @@ -25,10 +25,13 @@ jobs:
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install ruff==0.1.5 codespell==2.2.6 tomli==2.0.1
pip install ruff==0.1.5 codespell==2.2.6 tomli==2.0.1 isort==5.13.2
- name: Analysing the code with ruff
run: |
ruff .
- name: Spelling check with codespell
run: |
codespell --toml pyproject.toml
codespell --toml pyproject.toml
- name: Run isort
run: |
isort . --check-only
3 changes: 1 addition & 2 deletions benchmarks/benchmark_prefix_caching.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,7 @@
import argparse
import time

from vllm import LLM
from vllm import SamplingParams
from vllm import LLM, SamplingParams

PROMPT = "You are a helpful assistant in recognizes the content of tables in markdown format. Here is a table as fellows. You need to answer my question about the table.\n# Table\n|Opening|Opening|Sl. No.|Film|Cast|Director|Music Director|Notes|\n|----|----|----|----|----|----|----|----|\n|J A N|9|1|Agni Pushpam|Jayabharathi, Kamalahasan|Jeassy|M. K. Arjunan||\n|J A N|16|2|Priyamvada|Mohan Sharma, Lakshmi, KPAC Lalitha|K. S. Sethumadhavan|V. Dakshinamoorthy||\n|J A N|23|3|Yakshagaanam|Madhu, Sheela|Sheela|M. S. Viswanathan||\n|J A N|30|4|Paalkkadal|Sheela, Sharada|T. K. Prasad|A. T. Ummer||\n|F E B|5|5|Amma|Madhu, Srividya|M. Krishnan Nair|M. K. Arjunan||\n|F E B|13|6|Appooppan|Thikkurissi Sukumaran Nair, Kamal Haasan|P. Bhaskaran|M. S. Baburaj||\n|F E B|20|7|Srishti|Chowalloor Krishnankutty, Ravi Alummoodu|K. T. Muhammad|M. S. Baburaj||\n|F E B|20|8|Vanadevatha|Prem Nazir, Madhubala|Yusufali Kechery|G. Devarajan||\n|F E B|27|9|Samasya|Madhu, Kamalahaasan|K. Thankappan|Shyam||\n|F E B|27|10|Yudhabhoomi|K. P. Ummer, Vidhubala|Crossbelt Mani|R. K. Shekhar||\n|M A R|5|11|Seemantha Puthran|Prem Nazir, Jayabharathi|A. B. Raj|M. K. Arjunan||\n|M A R|12|12|Swapnadanam|Rani Chandra, Dr. Mohandas|K. G. George|Bhaskar Chandavarkar||\n|M A R|19|13|Thulavarsham|Prem Nazir, sreedevi, Sudheer|N. Sankaran Nair|V. Dakshinamoorthy||\n|M A R|20|14|Aruthu|Kaviyoor Ponnamma, Kamalahasan|Ravi|G. Devarajan||\n|M A R|26|15|Swimming Pool|Kamal Haasan, M. G. Soman|J. Sasikumar|M. K. Arjunan||\n\n# Question\nWhat' s the content in the (1,1) cells\n" # noqa: E501

Expand Down
9 changes: 3 additions & 6 deletions benchmarks/benchmark_serving.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,15 +25,12 @@
from typing import AsyncGenerator, List, Tuple

import numpy as np
from backend_request_func import (ASYNC_REQUEST_FUNCS, RequestFuncInput,
RequestFuncOutput)
from tqdm.asyncio import tqdm
from transformers import PreTrainedTokenizerBase
from vllm.transformers_utils.tokenizer import get_tokenizer

from backend_request_func import (
ASYNC_REQUEST_FUNCS,
RequestFuncInput,
RequestFuncOutput,
)
from vllm.transformers_utils.tokenizer import get_tokenizer


@dataclass
Expand Down
2 changes: 1 addition & 1 deletion benchmarks/benchmark_throughput.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,9 @@
from typing import List, Optional, Tuple

import torch
from tqdm import tqdm
from transformers import (AutoModelForCausalLM, AutoTokenizer,
PreTrainedTokenizerBase)
from tqdm import tqdm


def sample_requests(
Expand Down
4 changes: 3 additions & 1 deletion benchmarks/kernels/benchmark_mixtral_moe.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,11 +2,13 @@
import os
import sys

from vllm.model_executor.layers.fused_moe import fused_moe, get_config_file_name
import torch
import torch.nn.functional as F
import triton

from vllm.model_executor.layers.fused_moe import (fused_moe,
get_config_file_name)

os.environ['CUDA_VISIBLE_DEVICES'] = '0'


Expand Down
4 changes: 2 additions & 2 deletions benchmarks/kernels/benchmark_paged_attention.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,12 @@
from typing import Optional
import argparse
import random
import time
from typing import Optional

import torch

from vllm.utils import STR_DTYPE_TO_TORCH_DTYPE, create_kv_caches_with_random
from vllm._C import ops
from vllm.utils import STR_DTYPE_TO_TORCH_DTYPE, create_kv_caches_with_random

NUM_BLOCKS = 1024
PARTITION_SIZE = 512
Expand Down
7 changes: 4 additions & 3 deletions benchmarks/kernels/benchmark_rope.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,10 @@
import argparse
from itertools import accumulate
from typing import Optional

import argparse
import torch
import nvtx
from itertools import accumulate
import torch

from vllm.model_executor.layers.rotary_embedding import get_rope


Expand Down
2 changes: 1 addition & 1 deletion cmake/hipify.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,8 @@
#

import argparse
import shutil
import os
import shutil

from torch.utils.hipify.hipify_python import hipify

Expand Down
2 changes: 1 addition & 1 deletion collect_env.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,10 @@
# Run it with `python collect_env.py` or `python -m torch.utils.collect_env`
import datetime
import locale
import os
import re
import subprocess
import sys
import os
from collections import namedtuple

try:
Expand Down
3 changes: 2 additions & 1 deletion docs/source/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,10 +10,11 @@
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.

import logging
import os
import sys

from sphinx.ext import autodoc
import logging

sys.path.insert(0, os.path.abspath(os.path.join('..', '..')))

Expand Down
3 changes: 2 additions & 1 deletion examples/gradio_openai_chatbot_webserver.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import argparse
from openai import OpenAI

import gradio as gr
from openai import OpenAI

# Argument parser setup
parser = argparse.ArgumentParser(
Expand Down
2 changes: 1 addition & 1 deletion examples/llm_engine_example.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import argparse
from typing import List, Tuple

from vllm import EngineArgs, LLMEngine, SamplingParams, RequestOutput
from vllm import EngineArgs, LLMEngine, RequestOutput, SamplingParams


def create_test_prompts() -> List[Tuple[str, SamplingParams]]:
Expand Down
4 changes: 2 additions & 2 deletions examples/multilora_inference.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,11 +5,11 @@
Requires HuggingFace credentials for access to Llama2.
"""

from typing import Optional, List, Tuple
from typing import List, Optional, Tuple

from huggingface_hub import snapshot_download

from vllm import EngineArgs, LLMEngine, SamplingParams, RequestOutput
from vllm import EngineArgs, LLMEngine, RequestOutput, SamplingParams
from vllm.lora.request import LoRARequest


Expand Down
4 changes: 3 additions & 1 deletion examples/offline_inference_distributed.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,11 +5,13 @@
Learn more about Ray Data in https://docs.ray.io/en/latest/data/data.html
"""

from vllm import LLM, SamplingParams
from typing import Dict

import numpy as np
import ray

from vllm import LLM, SamplingParams

# Create a sampling params object.
sampling_params = SamplingParams(temperature=0.8, top_p=0.95)

Expand Down
42 changes: 42 additions & 0 deletions format.sh
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ YAPF_VERSION=$(yapf --version | awk '{print $2}')
RUFF_VERSION=$(ruff --version | awk '{print $2}')
MYPY_VERSION=$(mypy --version | awk '{print $2}')
CODESPELL_VERSION=$(codespell --version)
ISORT_VERSION=$(isort --vn)

# # params: tool name, tool version, required version
tool_version_check() {
Expand All @@ -37,6 +38,7 @@ tool_version_check() {
tool_version_check "yapf" $YAPF_VERSION "$(grep yapf requirements-dev.txt | cut -d'=' -f3)"
tool_version_check "ruff" $RUFF_VERSION "$(grep "ruff==" requirements-dev.txt | cut -d'=' -f3)"
tool_version_check "mypy" "$MYPY_VERSION" "$(grep mypy requirements-dev.txt | cut -d'=' -f3)"
tool_version_check "isort" "$ISORT_VERSION" "$(grep isort requirements-dev.txt | cut -d'=' -f3)"
tool_version_check "codespell" "$CODESPELL_VERSION" "$(grep codespell requirements-dev.txt | cut -d'=' -f3)"

YAPF_FLAGS=(
Expand Down Expand Up @@ -178,6 +180,46 @@ else
lint_changed
fi

# check spelling of specified files
isort_check() {
isort "$@"
}

isort_check_all(){
isort .
}

# Spelling check of files that differ from main branch.
isort_check_changed() {
# The `if` guard ensures that the list of filenames is not empty, which
# could cause ruff to receive 0 positional arguments, making it hang
# waiting for STDIN.
#
# `diff-filter=ACM` and $MERGEBASE is to ensure we only lint files that
# exist on both branches.
MERGEBASE="$(git merge-base origin/main HEAD)"

if ! git diff --diff-filter=ACM --quiet --exit-code "$MERGEBASE" -- '*.py' '*.pyi' &>/dev/null; then
git diff --name-only --diff-filter=ACM "$MERGEBASE" -- '*.py' '*.pyi' | xargs \
isort
fi
}

# Run Isort
# This flag runs spell check of individual files. --files *must* be the first command line
# arg to use this option.
if [[ "$1" == '--files' ]]; then
isort_check "${@:2}"
# If `--all` is passed, then any further arguments are ignored and the
# entire python directory is linted.
elif [[ "$1" == '--all' ]]; then
isort_check_all
else
# Check spelling only of the files that changed in last commit.
isort_check_changed
fi
echo 'vLLM isort: Done'

if ! git diff --quiet &>/dev/null; then
echo 'Reformatted files. Please review and stage the changes.'
echo 'Changes not staged for commit:'
Expand Down
4 changes: 4 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -51,3 +51,7 @@ exclude = "vllm/model_executor/parallel_utils/|vllm/model_executor/models/"
[tool.codespell]
ignore-words-list = "dout, te, indicies"
skip = "./tests/prompts"

[tool.isort]
use_parentheses = true
skip_gitignore = true
1 change: 1 addition & 0 deletions requirements-dev.txt
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ toml==0.10.2
tomli==2.0.1
ruff==0.1.5
codespell==2.2.6
isort==5.13.2

# type checking
mypy==0.991
Expand Down
10 changes: 5 additions & 5 deletions setup.py
Original file line number Diff line number Diff line change
@@ -1,16 +1,16 @@
import io
import logging
import os
import re
import logging
import subprocess
import sys
from shutil import which
from typing import List

from packaging.version import parse, Version
from setuptools import setup, find_packages, Extension
from setuptools.command.build_ext import build_ext
from shutil import which
import torch
from packaging.version import Version, parse
from setuptools import Extension, find_packages, setup
from setuptools.command.build_ext import build_ext
from torch.utils.cpp_extension import CUDA_HOME

ROOT_DIR = os.path.dirname(__file__)
Expand Down
6 changes: 3 additions & 3 deletions tests/async_engine/test_chat_template.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,12 @@
from dataclasses import dataclass
import os
import pathlib
from dataclasses import dataclass

import pytest

from vllm.transformers_utils.tokenizer import get_tokenizer
from vllm.entrypoints.openai.serving_chat import OpenAIServingChat
from vllm.entrypoints.openai.protocol import ChatCompletionRequest
from vllm.entrypoints.openai.serving_chat import OpenAIServingChat
from vllm.transformers_utils.tokenizer import get_tokenizer

chatml_jinja_path = pathlib.Path(os.path.dirname(os.path.abspath(
__file__))).parent.parent / "examples/template_chatml.jinja"
Expand Down
2 changes: 1 addition & 1 deletion tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,8 @@
from transformers import AutoModelForCausalLM

from vllm import LLM, SamplingParams
from vllm.transformers_utils.tokenizer import get_tokenizer
from vllm.config import TokenizerPoolConfig
from vllm.transformers_utils.tokenizer import get_tokenizer

_TEST_DIR = os.path.dirname(__file__)
_TEST_PROMPTS = [os.path.join(_TEST_DIR, "prompts", "example.txt")]
Expand Down
9 changes: 5 additions & 4 deletions tests/core/test_block_manager.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,14 @@
import pytest
import time
from typing import List

import pytest

from vllm import SamplingParams
from vllm.block import PhysicalTokenBlock
from vllm.core.block_manager import (UncachedBlockAllocator, BlockSpaceManager,
AllocStatus)
from vllm.core.block_manager import (AllocStatus, BlockSpaceManager,
UncachedBlockAllocator)
from vllm.sequence import Logprob, Sequence, SequenceGroup, SequenceStatus
from vllm.utils import Device
from vllm.sequence import Sequence, SequenceGroup, SequenceStatus, Logprob

from .utils import create_dummy_prompt

Expand Down
5 changes: 3 additions & 2 deletions tests/core/test_scheduler.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,11 @@
import time
from typing import List

import pytest # noqa
import time

from vllm.config import CacheConfig, SchedulerConfig
from vllm.core.scheduler import Scheduler
from vllm.sequence import SequenceGroup, Logprob
from vllm.sequence import Logprob, SequenceGroup

from .utils import create_dummy_prompt

Expand Down
8 changes: 3 additions & 5 deletions tests/distributed/test_comm_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,14 +3,12 @@
Run `pytest tests/distributed/test_comm_ops.py --forked`.
"""
import pytest
import torch
import ray
import torch

from vllm.model_executor.parallel_utils.communication_op import (
tensor_model_parallel_all_reduce,
tensor_model_parallel_all_gather,
broadcast_tensor_dict,
)
broadcast_tensor_dict, tensor_model_parallel_all_gather,
tensor_model_parallel_all_reduce)
from vllm.test_utils import (init_test_distributed_environment,
multi_process_tensor_parallel)

Expand Down
2 changes: 1 addition & 1 deletion tests/distributed/test_custom_all_reduce.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import os
import random

import os
import pytest
import ray
import torch
Expand Down
6 changes: 3 additions & 3 deletions tests/entrypoints/test_guided_processors.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
# This unit test should be moved to a new
# tests/test_guided_decoding directory.

from transformers import AutoTokenizer
import torch
from transformers import AutoTokenizer

from vllm.model_executor.guided_logits_processors import (RegexLogitsProcessor,
JSONLogitsProcessor)
from vllm.model_executor.guided_logits_processors import (JSONLogitsProcessor,
RegexLogitsProcessor)

TEST_SCHEMA = {
"type": "object",
Expand Down
Loading

0 comments on commit 07ef5b6

Please sign in to comment.