Skip to content

Commit

Permalink
Update Extension (openvinotoolkit#719)
Browse files Browse the repository at this point in the history
* Update Extension

* Update Readme and vsix

* Update package-lock

---------

Co-authored-by: Ilya Lavrenov <[email protected]>
  • Loading branch information
apaniukov and ilya-lavrenov authored Sep 8, 2023
1 parent 79dc880 commit 597662c
Show file tree
Hide file tree
Showing 49 changed files with 790 additions and 398 deletions.
68 changes: 8 additions & 60 deletions modules/openvino_code/README.md
Original file line number Diff line number Diff line change
@@ -1,32 +1,16 @@
# OpenVINO Code - VSCode extension for AI code completion with OpenVINO™

VSCode extension for helping developers writing code with AI code assistant. OpenVINO Code is working with Large Language Model for Code (Code LLM) deployed on local or remote server.
VSCode extension for helping developers writing code with AI code assistant.
OpenVINO Code is working with Large Language Model for Code (Code LLM) deployed on local server
or remote server using [Remote Explorer](https://marketplace.visualstudio.com/items?itemName=ms-vscode.remote-explorer).

## Installing Extension
OpenVINO Code provides the following features:
- Inline Code Completion
- Summarization via docstring

VSCode extension can be installed from built `*.vsix` file:

1. Open `Extensions` side bar in VSCode.
2. Click on the menu icon (three dots menu icon aka "meatballs" icon) in the top right corner of Extensions side panel.
3. Select "Instal from VSIX..." option and select extension file.

For instructions on how to build extension `vsix` file please refer to the [Build Extension](#build-extension) section.

## Extension Configuration

To work with extension you should configure endpoint to server with Code LLM where requests will be sent:

1. Open extension settings.
2. Fill `Server URL` parameter with server endpoint URL.

For instructions on how to start server locally please refer to the [server README.md](./server/README.md).

Also in extension settings you can configure special tokens.

## Working with Extension

TDB

1. Create a new python file
2. Try typing `def main():`
3. Press shortcut buttons (TBD) for code completion
Expand All @@ -35,41 +19,5 @@ TDB

You can see input to and output from the code generation API:

1. Open VSCode `OUTPUT` panel
2. Select extension output source from the dropdown menu

## Developing

> **Prerequisite:** You should have `Node.js` installed (v16 and above).
#### Install dependencies

To install dependencies run the following command from the project root directory:

```
npm install
```

#### Run Extension from Source & Debugging

Open `Run and Debug` side bar in VSCode and click `Launch Extension` (or press `F5`).

#### Build Extension

To build extension and generate `*.vsix` file for further installation in VSCode, run the following command:

```
npm run vsce:package
```

#### Linting

To perform linting with `ESLint`, execute the following command:

```
npm run lint
```

#### Testing

TBD
1. Open VSCode Side Panel
2. Click `Show Server Log` or `Show Extension Log`
Binary file not shown.
Binary file not shown.
6 changes: 3 additions & 3 deletions modules/openvino_code/package-lock.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

17 changes: 15 additions & 2 deletions modules/openvino_code/package.json
Original file line number Diff line number Diff line change
@@ -1,13 +1,13 @@
{
"publisher": "OpenVINO",
"name": "openvino-code-completion",
"version": "0.0.1",
"version": "0.0.2",
"displayName": "OpenVINO Code Completion",
"description": "VSCode extension for AI code completion with OpenVINO",
"icon": "media/logo.png",
"author": "",
"contributors": [],
"license": "License at https://github.com/openvinotoolkit/openvino_contrib/tree/master/modules/openvino_code",
"license": "https://github.com/openvinotoolkit/openvino_contrib/blob/master/LICENSE",
"homepage": "https://docs.openvino.ai/",
"repository": {
"type": "git",
Expand Down Expand Up @@ -51,6 +51,7 @@
"lint": "eslint . --max-warnings 0",
"lint:fix": "eslint . --fix",
"lint:side-panel": "npm run lint -w side-panel-ui",
"lint:all": "npm run lint && npm run lint --workspaces",
"test": "node ./out/test/runTest.js",
"vsce:package": "vsce package",
"vsce:publish": "vsce publish",
Expand Down Expand Up @@ -164,6 +165,16 @@
{
"title": "OpenVINO Code",
"properties": {
"openvinoCode.model": {
"order": 0,
"type": "string",
"default": "codet5p-220m-py",
"enum": [
"codet5p-220m-py",
"decicoder-1b-openvino-int8"
],
"description": "Which model to use for code generation."
},
"openvinoCode.serverUrl": {
"order": 1,
"type": "string",
Expand Down Expand Up @@ -238,6 +249,7 @@
"description": "(Optional) Stop token."
},
"openvinoCode.quoteStyle": {
"order": 11,
"type": "string",
"default": "\"\"\"",
"enum": [
Expand All @@ -247,6 +259,7 @@
"description": "Style of quote used with generate docstring command"
},
"openvinoCode.docstringFormat": {
"order": 12,
"type": "string",
"default": "google_summary_only",
"enum": [
Expand Down
12 changes: 7 additions & 5 deletions modules/openvino_code/server/main.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,13 @@
import uvicorn
from src.utils import get_parser, setup_logger

from src.app import app, get_generator_dummy
from src.generators import get_generator_dependency
from src.utils import get_logger, get_parser

# Logger should be set up before other imports to propagate logging config to other packages
setup_logger()

logger = get_logger(__name__)
import uvicorn # noqa: E402

from src.app import app, get_generator_dummy # noqa: E402
from src.generators import get_generator_dependency # noqa: E402


def main():
Expand Down
2 changes: 1 addition & 1 deletion modules/openvino_code/server/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ dependencies = [
'torch @ https://download.pytorch.org/whl/cpu-cxx11-abi/torch-2.0.1%2Bcpu.cxx11.abi-cp311-cp311-linux_x86_64.whl ; sys_platform=="linux" and python_version == "3.11"',
'torch ; sys_platform != "linux"',
'openvino==2023.1.0.dev20230811',
'optimum-intel[openvino]==1.10.1',
'optimum-intel[openvino]==1.11.0',
]

[project.optional-dependencies]
Expand Down
58 changes: 43 additions & 15 deletions modules/openvino_code/server/src/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@

from fastapi import Depends, FastAPI
from fastapi.responses import RedirectResponse, StreamingResponse
from pydantic import BaseModel
from pydantic import BaseModel, Field

from src.generators import GeneratorFunctor
from src.utils import get_logger
Expand All @@ -26,6 +26,35 @@ class GenerationRequest(BaseModel):
parameters: GenerationParameters


class GenerationDocStringRequest(BaseModel):
inputs: str = Field(
...,
description="Function or Class body",
example=(
"def fibonacci(n):\n if n == 0:\n return 0\n elif n == 1:\n"
" return 1\n else:\n return fibonacci(n-1) + fibonacci(n-2)"
),
)
template: str = Field(
...,
description=(
"Doc string template with tab stops in format ${tab_stop_number:value[type | int | str | description]}"
),
example=(
' """\n ${1:}\n\n Parameters\n ----------\n n : ${2:int}\n'
" ${3:[description]}\n\n Returns\n -------\n ${4:[type]}\n"
' ${5:[description]}\n """'
),
)
format: str = Field(
...,
description="Doc string format passed from extension settings [google | numpy | sphinx | dockblockr | ...]",
example="numpy",
)
definition: str = Field("", description="Function signature", example="def fibonacci(n):")
parameters: GenerationParameters


class GenerationResponse(BaseModel):
generated_text: str

Expand All @@ -40,7 +69,7 @@ def get_generator_dummy():
@app.on_event("startup")
async def startup_event():
# This print is a anchor for vs code extension to track that server is started
SERVER_STARTED_STDOUT_ANCHOR = 'OpenVINO Code Server started'
SERVER_STARTED_STDOUT_ANCHOR = "OpenVINO Code Server started"
logger.info(SERVER_STARTED_STDOUT_ANCHOR)


Expand All @@ -59,7 +88,7 @@ async def generate(
request: GenerationRequest,
generator: GeneratorFunctor = Depends(get_generator_dummy),
) -> Dict[str, Union[int, str]]:
logger.info(request)
logger.info(f"Request:\n{request}")

start = perf_counter()
generated_text: str = generator(request.inputs, request.parameters.model_dump())
Expand All @@ -70,7 +99,7 @@ async def generate(
else:
logger.info(f"Elapsed: {elapsed:.3f}s")

logger.info(f"Response: {generated_text}")
logger.info(f"Response:\n{generated_text}")
return {"generated_text": generated_text}


Expand All @@ -85,16 +114,15 @@ async def generate_stream(

@app.post("/api/summarize", status_code=200, response_model=GenerationResponse)
async def summarize(
request: GenerationRequest,
request: GenerationDocStringRequest,
generator: GeneratorFunctor = Depends(get_generator_dummy),
):
logger.info(request)

generation_params = request.parameters.model_dump()
generation_params["repetition_penalty"] = 1.15

start = perf_counter()
generated_text: str = generator.summarize(request.inputs, generation_params)
generated_text: str = generator.summarize(
request.inputs, request.template, request.definition, request.format, request.parameters.model_dump()
)
stop = perf_counter()

if (elapsed := stop - start) > 1.5:
Expand All @@ -108,12 +136,12 @@ async def summarize(

@app.post("/api/summarize_stream", status_code=200)
async def summarize_stream(
request: GenerationRequest,
request: GenerationDocStringRequest,
generator: GeneratorFunctor = Depends(get_generator_dummy),
) -> StreamingResponse:
logger.info(request)

generation_params = request.parameters.model_dump()
generation_params["repetition_penalty"] = 1.15

return StreamingResponse(generator.summarize_stream(request.inputs, generation_params))
return StreamingResponse(
generator.summarize_stream(
request.inputs, request.template, request.definition, request.format, request.parameters.model_dump()
)
)
Loading

0 comments on commit 597662c

Please sign in to comment.