Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[App] Scale out/in interval for autoscaler #16093

Merged
merged 42 commits into from
Dec 19, 2022
Merged
Show file tree
Hide file tree
Changes from 16 commits
Commits
Show all changes
42 commits
Select commit Hold shift + click to select a range
4bafc59
wip clean up autoscaler ui
akihironitta Dec 15, 2022
1cefa05
Revert "wip clean up autoscaler ui"
akihironitta Dec 15, 2022
1e69092
Apply sherin's suggestion
akihironitta Dec 15, 2022
f9406cc
update example
akihironitta Dec 15, 2022
694627f
print endpoint in the log
akihironitta Dec 15, 2022
96b77ea
Fix import
akihironitta Dec 15, 2022
44cbec2
revert irrelevant change
akihironitta Dec 15, 2022
5d8af44
Merge branch 'master' into feat/autoscaler-ui
Dec 16, 2022
82fef89
Update src/lightning_app/components/auto_scaler.py
Dec 16, 2022
d8f4778
clean up
Dec 16, 2022
c7443b6
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Dec 16, 2022
26f5f4b
test rename
Dec 16, 2022
1e7199c
Merge branch 'feat/autoscaler-ui' of github.com:Lightning-AI/lightnin…
Dec 16, 2022
4f3365c
Changelog
Dec 16, 2022
9e0cb74
adding up/down scale interval arguments
Dec 16, 2022
debb133
changelog
Dec 16, 2022
0781bd1
master
Dec 16, 2022
bdeb1f5
Update src/lightning_app/components/serve/__init__.py
Dec 16, 2022
3d3b592
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Dec 16, 2022
c38768a
test
Dec 16, 2022
98ba462
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Dec 16, 2022
4c9436e
Merge branch 'master' into feat/autoscaler-ui
akihironitta Dec 19, 2022
2520758
fix imports
Dec 19, 2022
eb87ee8
mypy
Dec 19, 2022
bbceaef
revert
Dec 19, 2022
8b65680
testfix
Dec 19, 2022
a5ffaae
docs fix
Dec 19, 2022
70dafbc
Merge branch 'feat/autoscaler-ui' into feature/scaleup-down-interval
akihironitta Dec 19, 2022
cfb02a3
Update src/lightning_app/components/serve/auto_scaler.py
Dec 19, 2022
5c6e33f
Merge branch 'master' into feature/scaleup-down-interval
Dec 19, 2022
7f48487
arg change\
Dec 19, 2022
2ca55d3
tests
Dec 19, 2022
f64fc70
review
Dec 19, 2022
b188ad9
review
Dec 19, 2022
7d6aba7
fixing comments
Dec 19, 2022
324a06b
Merge branch 'master' into feature/scaleup-down-interval
Dec 19, 2022
66396b1
name change
Dec 19, 2022
11058ca
Update src/lightning_app/components/serve/auto_scaler.py
Dec 19, 2022
778daa5
Update src/lightning_app/components/serve/auto_scaler.py
Dec 19, 2022
2a7e6a2
args change
Dec 19, 2022
541bce7
Merge branch 'feature/scaleup-down-interval' of github.com:Lightning-…
Dec 19, 2022
8177a83
Update src/lightning_app/CHANGELOG.md
Dec 19, 2022
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 5 additions & 9 deletions examples/app_server_with_auto_scaler/app.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
# ! pip install torch torchvision
from typing import Any, List
from typing import List

import torch
import torchvision
Expand All @@ -8,16 +8,12 @@
import lightning as L


class RequestModel(BaseModel):
image: str # bytecode


class BatchRequestModel(BaseModel):
inputs: List[RequestModel]
inputs: List[L.app.components.Image]


class BatchResponse(BaseModel):
outputs: List[Any]
outputs: List[L.app.components.Number]


class PyTorchServer(L.app.components.PythonServer):
Expand Down Expand Up @@ -81,8 +77,8 @@ def scale(self, replicas: int, metrics: dict) -> int:
max_replicas=4,
autoscale_interval=10,
endpoint="predict",
input_type=RequestModel,
output_type=Any,
input_type=L.app.components.Image,
output_type=L.app.components.Number,
timeout_batching=1,
max_batch_size=8,
)
Expand Down
4 changes: 4 additions & 0 deletions src/lightning_app/CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,10 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).

- Added partial support for fastapi `Request` annotation in `configure_api` handlers ([#16047](https://github.com/Lightning-AI/lightning/pull/16047))

- Added a nicer UI with URL and examples for the autoscaler component ([#16063](https://github.com/Lightning-AI/lightning/pull/16063))

- Enabled users to have more control over scaling up/down interval ([#16093](https://github.com/Lightning-AI/lightning/pull/16093))
hhsecond marked this conversation as resolved.
Show resolved Hide resolved


### Changed

Expand Down
3 changes: 1 addition & 2 deletions src/lightning_app/components/__init__.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
from lightning_app.components.auto_scaler import AutoScaler
from lightning_app.components.database.client import DatabaseClient
from lightning_app.components.database.server import Database
from lightning_app.components.multi_node import (
Expand All @@ -10,7 +9,7 @@
from lightning_app.components.python.popen import PopenPythonScript
from lightning_app.components.python.tracer import Code, TracerPythonScript
from lightning_app.components.serve.gradio import ServeGradio
from lightning_app.components.serve.python_server import Image, Number, PythonServer
from lightning_app.components.serve.python_server import AutoScaler, Image, Number, PythonServer
from lightning_app.components.serve.serve import ModelInferenceAPI
from lightning_app.components.serve.streamlit import ServeStreamlit
from lightning_app.components.training import LightningTrainerScript, PyTorchLightningScriptRunner
Expand Down
4 changes: 2 additions & 2 deletions src/lightning_app/components/serve/__init__.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
from lightning_app.components.serve.gradio import ServeGradio
from lightning_app.components.serve.python_server import Image, Number, PythonServer
from lightning_app.components.serve.python_server import AutoScaler, Image, Number, PythonServer
from lightning_app.components.serve.streamlit import ServeStreamlit

__all__ = ["ServeGradio", "ServeStreamlit", "PythonServer", "Image", "Number"]
__all__ = ["ServeGradio", "ServeStreamlit", "PythonServer", "Image", "Number", "AutoScaler"]
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
import uuid
from base64 import b64encode
from itertools import cycle
from typing import Any, Dict, List, Tuple, Type
from typing import Any, Dict, List, Optional, Tuple, Type

import requests
import uvicorn
Expand All @@ -15,11 +15,13 @@
from fastapi.responses import RedirectResponse
from fastapi.security import HTTPBasic, HTTPBasicCredentials
from pydantic import BaseModel
from starlette.staticfiles import StaticFiles
from starlette.status import HTTP_401_UNAUTHORIZED

from lightning_app.core.flow import LightningFlow
from lightning_app.core.work import LightningWork
from lightning_app.utilities.app_helpers import Logger
from lightning_app.utilities.cloud import is_running_in_cloud
from lightning_app.utilities.imports import _is_aiohttp_available, requires
from lightning_app.utilities.packaging.cloud_compute import CloudCompute

Expand Down Expand Up @@ -114,20 +116,21 @@ class _LoadBalancer(LightningWork):
requests to be batched. In any case, requests are processed as soon as `max_batch_size` is reached.
timeout_keep_alive: The number of seconds until it closes Keep-Alive connections if no new data is received.
timeout_inference_request: The number of seconds to wait for inference.
\**kwargs: Arguments passed to :func:`LightningWork.init` like ``CloudCompute``, ``BuildConfig``, etc.
**kwargs: Arguments passed to :func:`LightningWork.init` like ``CloudCompute``, ``BuildConfig``, etc.
"""

@requires(["aiohttp"])
def __init__(
self,
input_type: BaseModel,
output_type: BaseModel,
input_type: Type[BaseModel],
output_type: Type[BaseModel],
endpoint: str,
max_batch_size: int = 8,
# all timeout args are in seconds
timeout_batching: int = 1,
timeout_batching: float = 1,
timeout_keep_alive: int = 60,
timeout_inference_request: int = 60,
work_name: Optional[str] = "API", # used for displaying the name in the UI
**kwargs: Any,
) -> None:
super().__init__(cloud_compute=CloudCompute("default"), **kwargs)
Expand All @@ -142,6 +145,7 @@ def __init__(
self._batch = []
self._responses = {} # {request_id: response}
self._last_batch_sent = 0
self._work_name = work_name

if not endpoint.startswith("/"):
endpoint = "/" + endpoint
Expand Down Expand Up @@ -280,6 +284,14 @@ async def update_servers(servers: List[str], authenticated: bool = Depends(authe
async def balance_api(inputs: self._input_type):
return await self.process_request(inputs)

endpoint_info_page = self._get_endpoint_info_page()
if endpoint_info_page:
fastapi_app.mount(
"/endpoint-info", StaticFiles(directory=endpoint_info_page.serve_dir, html=True), name="static"
)

logger.info(f"Your load balancer has started. The endpoint is 'http://{self.host}:{self.port}{self.endpoint}'")

uvicorn.run(
fastapi_app,
host=self.host,
Expand Down Expand Up @@ -332,6 +344,56 @@ def send_request_to_update_servers(self, servers: List[str]):
response = requests.put(f"{self.url}/system/update-servers", json=servers, headers=headers, timeout=10)
response.raise_for_status()

@staticmethod
def _get_sample_dict_from_datatype(datatype: Any) -> dict:
if hasattr(datatype, "_get_sample_data"):
return datatype._get_sample_data()

datatype_props = datatype.schema()["properties"]
out: Dict[str, Any] = {}
lut = {"string": "data string", "number": 0.0, "integer": 0, "boolean": False}
for k, v in datatype_props.items():
if v["type"] not in lut:
raise TypeError("Unsupported type")
out[k] = lut[v["type"]]
return out

def get_code_sample(self, url: str) -> Optional[str]:
input_type: Any = self._input_type
output_type: Any = self._output_type

if not (hasattr(input_type, "request_code_sample") and hasattr(output_type, "response_code_sample")):
return None
return f"{input_type.request_code_sample(url)}\n{output_type.response_code_sample()}"

def _get_endpoint_info_page(self) -> Optional["APIAccessFrontend"]: # noqa: F821
try:
from lightning_api_access import APIAccessFrontend
except ModuleNotFoundError:
logger.warn("APIAccessFrontend not found. Please install lightning-api-access to enable the UI")
return

if is_running_in_cloud():
url = f"{self._future_url}{self.endpoint}"
else:
url = f"http://localhost:{self.port}{self.endpoint}"

frontend_objects = {"name": self._work_name, "url": url, "method": "POST", "request": None, "response": None}
code_samples = self.get_code_sample(url)
if code_samples:
frontend_objects["code_samples"] = code_samples
# TODO also set request/response for JS UI
else:
try:
request = self._get_sample_dict_from_datatype(self._input_type)
response = self._get_sample_dict_from_datatype(self._output_type)
except TypeError:
return None
else:
frontend_objects["request"] = request
frontend_objects["response"] = response
return APIAccessFrontend(apis=[frontend_objects])


class AutoScaler(LightningFlow):
"""The ``AutoScaler`` can be used to automatically change the number of replicas of the given server in
Expand All @@ -341,7 +403,8 @@ class AutoScaler(LightningFlow):
Args:
min_replicas: The number of works to start when app initializes.
max_replicas: The max number of works to spawn to handle the incoming requests.
autoscale_interval: The number of seconds to wait before checking whether to upscale or downscale the works.
autoscale_up_interval: The number of seconds to wait before checking whether to upscale the server
autoscale_down_interval: The number of seconds to wait before checking whether to downscale the server
hhsecond marked this conversation as resolved.
Show resolved Hide resolved
endpoint: Provide the REST API path.
max_batch_size: (auto-batching) The number of requests to process at once.
timeout_batching: (auto-batching) The number of seconds to wait before sending the requests to process.
Expand Down Expand Up @@ -399,12 +462,13 @@ def __init__(
work_cls: Type[LightningWork],
min_replicas: int = 1,
max_replicas: int = 4,
autoscale_interval: int = 10,
autoscale_up_interval: int = 10,
autoscale_down_interval: int = 10,
max_batch_size: int = 8,
timeout_batching: float = 1,
endpoint: str = "api/predict",
input_type: BaseModel = Dict,
output_type: BaseModel = Dict,
input_type: Type[BaseModel] = Dict,
output_type: Type[BaseModel] = Dict,
*work_args: Any,
**work_kwargs: Any,
) -> None:
Expand All @@ -418,7 +482,8 @@ def __init__(

self._input_type = input_type
self._output_type = output_type
self.autoscale_interval = autoscale_interval
self.autoscale_up_interval = autoscale_up_interval
self.autoscale_down_interval = autoscale_down_interval
self.max_batch_size = max_batch_size

if max_replicas < min_replicas:
Expand All @@ -438,6 +503,7 @@ def __init__(
timeout_batching=timeout_batching,
cache_calls=True,
parallel=True,
work_name=self._work_cls.__name__,
)
for _ in range(min_replicas):
work = self.create_work()
Expand Down Expand Up @@ -539,11 +605,6 @@ def num_pending_works(self) -> int:

def autoscale(self) -> None:
"""Adjust the number of works based on the target number returned by ``self.scale``."""
if time.time() - self._last_autoscale < self.autoscale_interval:
return

self.load_balancer.update_servers(self.workers)

metrics = {
"pending_requests": self.num_pending_requests,
"pending_works": self.num_pending_works,
Expand All @@ -556,23 +617,28 @@ def autoscale(self) -> None:
)

# upscale
num_workers_to_add = num_target_workers - self.num_replicas
for _ in range(num_workers_to_add):
logger.info(f"Upscaling from {self.num_replicas} to {self.num_replicas + 1}")
work = self.create_work()
new_work_id = self.add_work(work)
logger.info(f"Work created: '{new_work_id}'")
if time.time() - self._last_autoscale > self.autoscale_up_interval:
num_workers_to_add = num_target_workers - self.num_replicas
for _ in range(num_workers_to_add):
logger.info(f"Upscaling from {self.num_replicas} to {self.num_replicas + 1}")
work = self.create_work()
hhsecond marked this conversation as resolved.
Show resolved Hide resolved
new_work_id = self.add_work(work)
logger.info(f"Work created: '{new_work_id}'")

# downscale
num_workers_to_remove = self.num_replicas - num_target_workers
for _ in range(num_workers_to_remove):
logger.info(f"Downscaling from {self.num_replicas} to {self.num_replicas - 1}")
removed_work_id = self.remove_work(self.num_replicas - 1)
logger.info(f"Work removed: '{removed_work_id}'")
if time.time() - self._last_autoscale > self.autoscale_down_interval:
num_workers_to_remove = self.num_replicas - num_target_workers
for _ in range(num_workers_to_remove):
logger.info(f"Downscaling from {self.num_replicas} to {self.num_replicas - 1}")
removed_work_id = self.remove_work(self.num_replicas - 1)
logger.info(f"Work removed: '{removed_work_id}'")

self.load_balancer.update_servers(self.workers)
self._last_autoscale = time.time()
hhsecond marked this conversation as resolved.
Show resolved Hide resolved

def configure_layout(self):
tabs = [{"name": "Swagger", "content": self.load_balancer.url}]
tabs = [
{"name": "Endpoint Info", "content": f"{self.load_balancer}/endpoint-info"},
{"name": "Swagger", "content": self.load_balancer.url},
]
return tabs