Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Feat/unit tests #15

Merged
merged 17 commits into from
May 19, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 13 additions & 4 deletions .env.example
Original file line number Diff line number Diff line change
Expand Up @@ -4,12 +4,21 @@ OPENAI_API_KEY="sk-..."
# https://console.anthropic.com/account/keys
ANTHROPIC_API_KEY="sk-ant-..."

DEFAULT_SERVICE="openai/chat/gpt-3.5-turbo"
# when things aren't specify, these defaults will kick in:
#DEFAULT_SERVICE="anthropic/complete/claude-v1.3-100k"
#DEFAULT_SERVICE="openai/chat/gpt-4"
#DEFAULT_SERVICE="openai/chat/gpt-3.5-turbo"

# WARNING/TODO/FIXME: do not specify below options when running pytest
#
#DEFAULT_TEMPERATURE=0.6
#DEFAULT_MAX_TOKENS=1337
#DEFAULT_TOP_K=-1
#DEFAULT_TOP_P=-1

# your Google Cloud Project ID or number
# environment default used is not set
GOOGLE_PROJECT="project-name-id"
#GOOGLE_PROJECT="project-name-id"
# the Vertex AI region you will use
# defaults to us-central1
GOOGLE_LOCATION="us-central1"

#GOOGLE_LOCATION="us-central1"
6 changes: 6 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -468,6 +468,12 @@ We'd love your help in making Prr even better! To contribute, please follow thes
5. Push the branch to your fork
6. Create a new Pull Request

### Running unit tests

```sh
$ pytest
```

## License

**prr** - Prompt Runner is released under the [MIT License](/LICENSE).
File renamed without changes.
2 changes: 1 addition & 1 deletion examples/code/html_boilerplate.yaml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
version: 1
prompt:
content_file: 'html_boilerplate'
content_file: '_html_boilerplate'
services:
gpt4_temp7:
model: 'openai/chat/gpt-4'
Expand Down
2 changes: 1 addition & 1 deletion examples/configured/chihuahua.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -52,4 +52,4 @@ expect:
min_response_length: 100
max_response_length: 200
match:
name: /independent/i
name: /independent/i
5 changes: 5 additions & 0 deletions examples/shebang/write_tests
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
#!/usr/bin/env prr script

Write tests using pytest for the code below:

{% include prompt_args %}
42 changes: 36 additions & 6 deletions prr/__main__.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,10 +4,10 @@
import os
import sys

from prr.config import load_config

from .utils.run import RunPromptCommand
from .utils.watch import WatchPromptCommand
from prr.commands.run import RunPromptCommand
from prr.commands.watch import WatchPromptCommand
from prr.prompt.model_options import ModelOptions
from prr.utils.config import load_config

config = load_config()

Expand Down Expand Up @@ -38,13 +38,43 @@ def add_common_args(_parser):
action="store_true",
default=False,
)

_parser.add_argument(
"--service",
"-s",
help="Service to use if none is configured (defaults to DEFAULT_SERVICE environment variable)",
default=config["DEFAULT_SERVICE"],
help="Service to use if none is configured (defaults to DEFAULT_SERVICE)",
default=config.get("DEFAULT_SERVICE"),
type=str,
)

_parser.add_argument(
"--temperature",
"-t",
help="Temperature (defaults to DEFAULT_TEMPERATURE)",
type=float,
)

_parser.add_argument(
"--max_tokens",
"-mt",
help="Max tokens to use (defaults to DEFAULT_MAX_TOKENS)",
type=int,
)

_parser.add_argument(
"--top_p",
"-tp",
help="Sets a cumulative probability threshold for selecting candidate tokens, where only tokens with a cumulative probability higher than the threshold are considered, allowing for flexible control over the diversity of the generated output (defaults to DEFAULT_TOP_P).",
type=int,
)

_parser.add_argument(
"--top_k",
"-tk",
help="Determines the number of top-scoring candidate tokens to consider at each decoding step, effectively limiting the diversity of the generated output (defaults to DEFAULT_TOP_K)",
type=int,
)

_parser.add_argument(
"--quiet",
"-q",
Expand Down
45 changes: 23 additions & 22 deletions prr/utils/run.py → prr/commands/run.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,8 @@
from rich.console import Console
from rich.panel import Panel

# from prr.config import config
from prr.prompt import Prompt
from prr.prompt.prompt_loader import PromptConfigLoader
from prr.runner import Runner

console = Console(log_time=False, log_path=False)
Expand All @@ -19,15 +19,15 @@ class RunPromptCommand:
def __init__(self, args, prompt_args=None):
self.args = args
self.prompt_args = prompt_args
self.prompt = None
self.prompt_config = None

if self.args["quiet"]:
self.console = Console(file=StringIO())
else:
self.console = Console(log_time=False, log_path=False)

self.load_prompt_for_path()
self.runner = Runner(self.prompt)
self.runner = Runner(self.prompt_config)

def print_run_results(self, result, run_save_directory):
request = result.request
Expand Down Expand Up @@ -57,19 +57,22 @@ def print_run_results(self, result, run_save_directory):
Panel("[green]" + response.response_content.strip() + "[/green]")
)

completion = f"[blue]Completion length[/blue]: {len(response.response_content)} bytes"
tokens_used = f"[blue]Tokens used[/blue]: {response.tokens_used()}"
elapsed_time = (
f"[blue]Elapsed time[/blue]: {round(result.elapsed_time, 2)}s"
)
completion = f"[blue]Completion length[/blue]: {len(response.response_content)} bytes"
tokens_used = f"[blue]Tokens used[/blue]: {response.tokens_used()}"
elapsed_time = (
f"[blue]Elapsed time[/blue]: {round(result.elapsed_time, 2)}s"
)

self.console.log(f"{completion} {tokens_used} {elapsed_time}")

if run_save_directory:
self.console.log(f"💾 {run_save_directory}")

def run_prompt_on_service(self, service_name, save=False):
service_config = self.prompt.config_for_service(service_name)
# TODO/FIXME: doing all this here just to get the actual options
# calculated after command line, defaults, config, etc
service_config = self.prompt_config.service_with_name(service_name)
service_config.process_option_overrides(self.args)
options = service_config.options

with self.console.status(
Expand All @@ -82,24 +85,22 @@ def run_prompt_on_service(self, service_name, save=False):

status.update(status="running model", spinner="dots8Bit")

result, run_save_directory = self.runner.run_service(service_name, save)
result, run_save_directory = self.runner.run_service(
service_name, self.args, save
)

self.print_run_results(result, run_save_directory)

def load_prompt_for_path(self):
prompt_path = self.args["prompt_path"]

if not os.path.exists(prompt_path) or not os.access(prompt_path, os.R_OK):
self.console.log(
f":x: Prompt file {prompt_path} is not accessible, giving up."
)
exit(-1)

self.console.log(f":magnifying_glass_tilted_left: Reading {prompt_path}")
self.prompt = Prompt(prompt_path, self.prompt_args)

loader = PromptConfigLoader()
self.prompt_config = loader.load_from_path(prompt_path)

def run_prompt(self):
services_to_run = self.prompt.configured_service_names()
services_to_run = self.prompt_config.configured_services()

if services_to_run == []:
if self.args["service"]:
Expand All @@ -109,14 +110,14 @@ def run_prompt(self):
)
else:
self.console.log(
f":x: No services configured for prompt {self.args['prompt_path']}, nor given in command-line. Not even in .env!"
f":x: No services configured for prompt {self.args['prompt_path']}, in ~/.prr_rc nor given in command-line."
)
exit(-1)
else:
self.console.log(f":racing_car: Running services: {services_to_run}")

if not self.args["abbrev"]:
self.console.log(Panel(self.prompt.text()))
self.console.log(Panel(self.prompt_config.template_text()))

for service_name in services_to_run:
self.run_prompt_on_service(service_name, self.args["log"])
for service_name in services_to_run:
self.run_prompt_on_service(service_name, self.args["log"])
10 changes: 6 additions & 4 deletions prr/utils/watch.py → prr/commands/watch.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,9 @@
import os
import time

from prr.commands.run import RunPromptCommand
from prr.prompt import Prompt
from prr.utils.run import RunPromptCommand
from prr.prompt.prompt_loader import PromptConfigLoader


def timestamp_for_file(path):
Expand Down Expand Up @@ -34,9 +35,10 @@ def update_timestamps(self, ready_timestamps=None):
self.file_timestamps = self.current_timestamps()

def setup_files_to_monitor(self):
prompt = Prompt(self.args["prompt_path"], self.prompt_args)
self.files = [prompt.path]
self.files.extend(prompt.dependency_files)
loader = PromptConfigLoader()
prompt_config = loader.load_from_path(self.args["prompt_path"])

self.files = loader.file_dependencies
self.update_timestamps()

def files_changed(self):
Expand Down
34 changes: 0 additions & 34 deletions prr/options.py

This file was deleted.

Loading