diff --git a/.hadolint.yaml b/.hadolint.yaml index fafcf3056..7406fc2b2 100644 --- a/.hadolint.yaml +++ b/.hadolint.yaml @@ -1,5 +1,5 @@ failure-threshold: error -# TODO: slowly burn down these lower priority container issues +# TODO: slowly burn down these lower priority container warnings and errors, issue #984 ignored: - DL3007 # use of latest image - DL3042 # pip --no-cache-dir diff --git a/packages/ui/chart/values.yaml b/packages/ui/chart/values.yaml index 2eb63a3a0..e1550bcbd 100644 --- a/packages/ui/chart/values.yaml +++ b/packages/ui/chart/values.yaml @@ -32,7 +32,7 @@ env: - name: DEFAULT_MODEL value: "llama-cpp-python" - name: DEFAULT_SYSTEM_PROMPT - value: "You may be provided with a list of files and their content in the following structure: `[{\"filename\": \"test.pdf\", \"text\": \"some fake text\"}]``. Using the content of these files as context, you should refer to specific files by their filename when relevant and use the text content to provide detailed, accurate, and relevant information or answers. If the user asks questions that can be answered based on the content of the provided files, use the appropriate files text in your response. If the user requests clarification, further details, or specific information about a file, respond using the most relevant file or files. If necessary, combine information from multiple files to form a comprehensive response." + value: "You may be provided with a list of files and their content in the following structure: [{filename: test.pdf, text: some fake text}]. Using the content of these files as context, you should refer to specific files by their filename when relevant and use the text content to provide detailed, accurate, and relevant information or answers. If the user asks questions that can be answered based on the content of the provided files, use the appropriate files text in your response. If the user requests clarification, further details, or specific information about a file, respond using the most relevant file or files. If necessary, combine information from multiple files to form a comprehensive response." - name: DEFAULT_TEMPERATURE value: "0.1" - name: OPENAI_API_KEY diff --git a/packages/ui/zarf.yaml b/packages/ui/zarf.yaml index 27bd5e527..933985f50 100644 --- a/packages/ui/zarf.yaml +++ b/packages/ui/zarf.yaml @@ -35,7 +35,7 @@ variables: sensitive: false - name: SYSTEM_PROMPT description: The default system prompt to use for the LLM - default: "You may be provided with a list of files and their content in the following structure: `[{\"filename\": \"test.pdf\", \"text\": \"some fake text\"}]``. Using the content of these files as context, you should refer to specific files by their filename when relevant and use the text content to provide detailed, accurate, and relevant information or answers. If the user asks questions that can be answered based on the content of the provided files, use the appropriate files text in your response. If the user requests clarification, further details, or specific information about a file, respond using the most relevant file or files. If necessary, combine information from multiple files to form a comprehensive response." + default: "You may be provided with a list of files and their content in the following structure: [{filename: test.pdf, text: some fake text}]. Using the content of these files as context, you should refer to specific files by their filename when relevant and use the text content to provide detailed, accurate, and relevant information or answers. If the user asks questions that can be answered based on the content of the provided files, use the appropriate files text in your response. If the user requests clarification, further details, or specific information about a file, respond using the most relevant file or files. If necessary, combine information from multiple files to form a comprehensive response." prompt: true sensitive: false - name: TEMPERATURE