-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathconfig.py
48 lines (38 loc) · 1.59 KB
/
config.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
import os
from flask import Flask, render_template, request, jsonify
from gpt_index import SimpleDirectoryReader, GPTListIndex, readers, GPTSimpleVectorIndex, LLMPredictor, PromptHelper, ServiceContext
from langchain import OpenAI
import asttokens, ast
from IPython.display import Markdown, display
import openai
class Config(object):
DEBUG = True
TESTING = False
class DevelopmentConfig(Config):
SECRET_KEY = "this-is-a-super-secret-key"
OPENAI_KEY = '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'
os.environ["OPENAI_API_KEY"] = DevelopmentConfig.OPENAI_KEY
config = {
'development': DevelopmentConfig,
'testing': DevelopmentConfig,
'production': DevelopmentConfig
}
def construct_index(directory_path):
# set maximum input size
max_input_size = 4096
# set number of output tokens
num_outputs = 2000
# set maximum chunk overlap
max_chunk_overlap = 20
# set chunk size limit
chunk_size_limit = 600
# define prompt helper
prompt_helper = PromptHelper(max_input_size, num_outputs, max_chunk_overlap, chunk_size_limit=chunk_size_limit)
# define LLM
llm_predictor = LLMPredictor(llm=OpenAI(temperature=0.5, model_name="text-davinci-003", max_tokens=num_outputs))
documents = SimpleDirectoryReader(directory_path).load_data()
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, prompt_helper=prompt_helper)
index = GPTSimpleVectorIndex.from_documents(documents, service_context=service_context)
index.save_to_disk('static/vector_index/index.json')
return index
construct_index("static/data")