Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Flags Updates #1096

Merged
merged 10 commits into from
Apr 14, 2023
2 changes: 2 additions & 0 deletions .env.template
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,8 @@
################################################################################
# EXECUTE_LOCAL_COMMANDS - Allow local command execution (Example: False)
EXECUTE_LOCAL_COMMANDS=False
# AI_SETTINGS_FILE - Specifies which AI Settings file to use (defaults to ai_settings.yaml)
AI_SETTINGS_FILE=ai_settings.yaml

################################################################################
### LLM PROVIDER
Expand Down
8 changes: 8 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -134,6 +134,14 @@ To output debug logs:
```
python scripts/main.py --debug
```
### Command Line Arguments
Here are some common arguments you can use when running Auto-GPT:
> Replace anything in angled brackets (<>) to a value you want to specify
* `python scripts/main.py --help` to see a list of all available command line arguments.
* `python scripts/main.py --ai-settings <filename>` to run Auto-GPT with a different AI Settings file.
* `python scripts/main.py --use-memory <memory-backend>` to specify one of 3 memory backends: `local`, `redis`, `pinecone` or 'no_memory'.

> **NOTE**: There are shorthands for some of these flags, for example `-m` for `--use-memory`. Use `python scripts/main.py --help` for more information

## 🗣️ Speech Mode

Expand Down
2 changes: 2 additions & 0 deletions scripts/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,9 @@ def __init__(self):
self.continuous_mode = False
self.continuous_limit = 0
self.speak_mode = False
self.skip_reprompt = False

self.ai_settings_file = os.getenv("AI_SETTINGS_FILE", "ai_settings.yaml")
self.fast_llm_model = os.getenv("FAST_LLM_MODEL", "gpt-3.5-turbo")
self.smart_llm_model = os.getenv("SMART_LLM_MODEL", "gpt-4")
self.fast_token_limit = int(os.getenv("FAST_TOKEN_LIMIT", 4000))
Expand Down
30 changes: 27 additions & 3 deletions scripts/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -185,8 +185,12 @@ def load_variables(config_file="config.yaml"):

def construct_prompt():
"""Construct the prompt for the AI to respond to"""
config = AIConfig.load()
if config.ai_name:
config = AIConfig.load(cfg.ai_settings_file)
if cfg.skip_reprompt and config.ai_name:
logger.typewriter_log("Name :", Fore.GREEN, config.ai_name)
logger.typewriter_log("Role :", Fore.GREEN, config.ai_role)
logger.typewriter_log("Goals:", Fore.GREEN, config.ai_goals)
elif config.ai_name:
logger.typewriter_log(
f"Welcome back! ",
Fore.GREEN,
Expand Down Expand Up @@ -274,13 +278,15 @@ def parse_arguments():
cfg.set_speak_mode(False)

parser = argparse.ArgumentParser(description='Process arguments.')
parser.add_argument('--continuous', action='store_true', help='Enable Continuous Mode')
parser.add_argument('--continuous', '-c', action='store_true', help='Enable Continuous Mode')
parser.add_argument('--continuous-limit', '-l', type=int, dest="continuous_limit", help='Defines the number of times to run in continuous mode')
parser.add_argument('--speak', action='store_true', help='Enable Speak Mode')
parser.add_argument('--debug', action='store_true', help='Enable Debug Mode')
parser.add_argument('--gpt3only', action='store_true', help='Enable GPT3.5 Only Mode')
parser.add_argument('--gpt4only', action='store_true', help='Enable GPT4 Only Mode')
parser.add_argument('--use-memory', '-m', dest="memory_type", help='Defines which Memory backend to use')
parser.add_argument('--skip-reprompt', '-y', dest='skip_reprompt', action='store_true', help='Skips the re-prompting messages at the beginning of the script')
parser.add_argument('--ai-settings', '-C', dest='ai_settings_file', help="Specifies which ai_settings.yaml file to use, will also automatically skip the re-prompt.")
args = parser.parse_args()

if args.debug:
Expand Down Expand Up @@ -331,6 +337,24 @@ def parse_arguments():
else:
cfg.memory_backend = chosen

if args.skip_reprompt:
logger.typewriter_log("Skip Re-prompt: ", Fore.GREEN, "ENABLED")
cfg.skip_reprompt = True

if args.ai_settings_file:
file = args.ai_settings_file

# Validate file
(validated, message) = utils.validate_yaml_file(file)
if not validated:
logger.typewriter_log("FAILED FILE VALIDATION", Fore.RED, message)
logger.double_check()
exit(1)

logger.typewriter_log("Using AI Settings File:", Fore.GREEN, file)
cfg.ai_settings_file = file
cfg.skip_reprompt = True


def main():
global ai_name, memory
Expand Down
2 changes: 1 addition & 1 deletion scripts/memory/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@

# List of supported memory backends
# Add a backend to this list if the import attempt is successful
supported_memory = ['local']
supported_memory = ['local', 'no_memory']

try:
from memory.redismem import RedisMemory
Expand Down
15 changes: 15 additions & 0 deletions scripts/utils.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,7 @@
import yaml
from colorama import Fore


def clean_input(prompt: str=''):
try:
return input(prompt)
Expand All @@ -6,3 +10,14 @@ def clean_input(prompt: str=''):
print("Quitting...")
exit(0)


def validate_yaml_file(file: str):
try:
with open(file) as file:
yaml.load(file, Loader=yaml.FullLoader)
except FileNotFoundError:
return (False, f"The file {Fore.CYAN}`{file}`{Fore.RESET} wasn't found")
except yaml.YAMLError as e:
return (False, f"There was an issue while trying to read with your AI Settings file: {e}")

return (True, f"Successfully validated {Fore.CYAN}`{file}`{Fore.RESET}!")