Skip to content
This repository has been archived by the owner on Oct 11, 2024. It is now read-only.

Commit

Permalink
Embed git commit hash into Python source (#319)
Browse files Browse the repository at this point in the history
This PR performs a simple embed of the current git commit hash into the
Python source:

1. The hash itself is defined in the ~`__githash__`~ `__commit__` var in
`vllm/version.py` alongside `__version__`
2. `vllm/__init__.py` is updated to (a) import ~`__githash__`~
`__commit__` and (b) include it in the `__all__` list

As a result, you can now do the following:

```python
import vllm
print(vllm.__version__)  # already present
print(vllm.__commit__)  # now available
```

**Test Plan**
New test added which minimally verifies that this new variable is
defined to a value with a bare-minimum length.
  • Loading branch information
dbarbuzzi authored Jun 27, 2024
1 parent 80701e4 commit 7c66172
Show file tree
Hide file tree
Showing 4 changed files with 36 additions and 1 deletion.
28 changes: 28 additions & 0 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
import re
import subprocess
import sys
import warnings
from shutil import which
from typing import Dict, List

Expand All @@ -27,6 +28,33 @@ def load_module_from_path(module_name, path):
ROOT_DIR = os.path.dirname(__file__)
logger = logging.getLogger(__name__)


def embed_commit_hash():
try:
commit_id = subprocess.check_output(["git", "rev-parse", "HEAD"],
encoding="utf-8").strip()

version_file = os.path.join(ROOT_DIR, "vllm", "version.py")
with open(version_file, encoding="utf-8") as f:
version_contents = f.read()

version_contents = version_contents.replace("COMMIT_HASH_PLACEHOLDER",
f"{commit_id}")

with open(version_file, "w", encoding="utf-8") as f:
f.write(version_contents)
except subprocess.CalledProcessError as e:
warnings.warn(f"failed to get commit hash:\n{e}",
RuntimeWarning,
stacklevel=2)
except Exception as e:
warnings.warn(f"failed to embed commit hash:\n{e}",
RuntimeWarning,
stacklevel=2)


embed_commit_hash()

# cannot import envs directly because it depends on vllm,
# which is not installed yet
envs = load_module_from_path('envs', os.path.join(ROOT_DIR, 'vllm', 'envs.py'))
Expand Down
5 changes: 5 additions & 0 deletions tests/test_embedded_commit.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
import vllm


def test_embedded_commit_defined():
assert len(vllm.__commit__) > 7
3 changes: 2 additions & 1 deletion vllm/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,9 +12,10 @@
from vllm.pooling_params import PoolingParams
from vllm.sampling_params import SamplingParams

from .version import __version__
from .version import __commit__, __version__

__all__ = [
"__commit__",
"__version__",
"LLM",
"ModelRegistry",
Expand Down
1 change: 1 addition & 0 deletions vllm/version.py
Original file line number Diff line number Diff line change
@@ -1,2 +1,3 @@
# UPSTREAM SYNC: take downstream
__version__ = "0.5.1"
__commit__ = "COMMIT_HASH_PLACEHOLDER"

1 comment on commit 7c66172

@github-actions
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

bigger_is_better

Benchmark suite Current: 7c66172 Previous: 4ab3b8a Ratio
{"name": "request_throughput", "description": "VLLM Engine throughput - synthetic\nmodel - NousResearch/Llama-2-7b-chat-hf\nmax_model_len - 4096\nbenchmark_throughput {\n \"use-all-available-gpus_\": \"\",\n \"input-len\": 256,\n \"output-len\": 128,\n \"num-prompts\": 1000\n}", "gpu_description": "NVIDIA L4 x 1", "vllm_version": "0.5.1", "python_version": "3.10.12 (main, Jun 7 2023, 13:43:11) [GCC 11.3.0]", "torch_version": "2.3.0+cu121"} 2.438282361320215 prompts/s 2.50032143674207 prompts/s 1.03
{"name": "token_throughput", "description": "VLLM Engine throughput - synthetic\nmodel - NousResearch/Llama-2-7b-chat-hf\nmax_model_len - 4096\nbenchmark_throughput {\n \"use-all-available-gpus_\": \"\",\n \"input-len\": 256,\n \"output-len\": 128,\n \"num-prompts\": 1000\n}", "gpu_description": "NVIDIA L4 x 1", "vllm_version": "0.5.1", "python_version": "3.10.12 (main, Jun 7 2023, 13:43:11) [GCC 11.3.0]", "torch_version": "2.3.0+cu121"} 936.3004267469627 tokens/s 960.1234317089549 tokens/s 1.03

This comment was automatically generated by workflow using github-action-benchmark.

Please sign in to comment.