From 3d399fd3b1283990746bf757126ee34dedb14b8c Mon Sep 17 00:00:00 2001
From: dev <4052466+mattvr@users.noreply.github.com>
Date: Sat, 29 Apr 2023 10:50:06 -0700
Subject: [PATCH] Default to gpt-3.5, allow configuring api key from config
file, adds --debug flag
---
README.md | 30 ++-
deno.lock | 7 +
lib/ai-types.ts | 72 ++++++
lib/ai.ts | 293 +++++++++++-------------
lib/data.ts | 320 ++++++++++++++------------
mod.ts | 591 ++++++++++++++++++++++++++++--------------------
6 files changed, 749 insertions(+), 564 deletions(-)
create mode 100644 deno.lock
create mode 100644 lib/ai-types.ts
diff --git a/README.md b/README.md
index 19418de..c5d7c7b 100644
--- a/README.md
+++ b/README.md
@@ -2,7 +2,8 @@
#### [[ Introducing ShellGPT ]](https://twitter.com/matt_fvr/status/1645419221634125828)
-A command-line tool that allows you to interact with GPT-4 directly from your terminal.
+A command-line tool that allows you to interact with GPT-4 directly from your
+terminal.
https://user-images.githubusercontent.com/4052466/230909567-3569f34c-b145-4cd8-8e55-5445bba00ba8.mp4
@@ -39,7 +40,8 @@ gpt "Output a CSV of 10 notable cities in Japan with their name in English & Jap
## Installation
-1. Install the [Deno runtime](https://deno.land/manual/getting_started/installation).
+1. Install the
+ [Deno runtime](https://deno.land/manual/getting_started/installation).
2. Run the following command to install ShellGPT:
@@ -64,9 +66,11 @@ using a key obtained from https://platform.openai.com/account/api-keys:
export OPENAI_API_KEY=...
```
-To use the GPT-4 model (recommended as it produces much better results), you'll need to apply for access via [this link](https://openai.com/waitlist/gpt-4-api). Note that it is more expensive, however.
+To use the GPT-4 model (recommended as it produces much better results), you'll
+need to apply for access via [this link](https://openai.com/waitlist/gpt-4-api).
+Note that it is more expensive, however.
-To configure the specific ChatGPT model and system prompt used, you can type
+To configure the specific ChatGPT model, system prompt used, and more, you can type
`gpt --config`
## Commands and Arguments
@@ -75,13 +79,12 @@ To configure the specific ChatGPT model and system prompt used, you can type
These commands are used for general ShellGPT-wide operations.
-| Argument | Alias | Description |
-| ------------- | ---------- | ---------------------------------------------------- |
-| --help | | Show help |
-| --config | --cfg | Configure the model and system prompt |
-| --update | | Update ShellGPT to the latest version |
-| --history | -h | List all past conversations |
-
+| Argument | Alias | Description |
+| --------- | ----- | ------------------------------------- |
+| --help | | Show help |
+| --config | --cfg | Configure the model and system prompt |
+| --update | | Update ShellGPT to the latest version |
+| --history | -h | List all past conversations |
### Chat Commands
@@ -107,13 +110,15 @@ These commands are for specific chats, either new or existing.
| --wpm | | Words per minute, control the speed of typing output |
| --max_tokens | --max | Maximum number of tokens to generate |
| --model | -m | Manually use a different OpenAI model |
+| --debug | | Print OpenAI API information |
## Features
Shell-GPT has some useful and unique features:
- Execute shell commands with a confirmation step (just pass `-x`).
-- Supports input/output piping for simple file creation and transformation (see [Basic Usage](#basic-usage)).
+- Supports input/output piping for simple file creation and transformation (see
+ [Basic Usage](#basic-usage)).
- Utility commands for convenient chat history viewing and editing.
- Smooth, streaming output, resembling human typing rather than delayed or
choppy responses.
@@ -168,6 +173,7 @@ gpt --fast --wpm 1500 "How can I improve my programming skills?"
```
Interactive coding session:
+
```sh
gpt --code --repl "Write a typescript function that prints the first 100 primes"
```
diff --git a/deno.lock b/deno.lock
new file mode 100644
index 0000000..4b45dd4
--- /dev/null
+++ b/deno.lock
@@ -0,0 +1,7 @@
+{
+ "version": "2",
+ "remote": {
+ "https://deno.land/std@0.181.0/_util/asserts.ts": "178dfc49a464aee693a7e285567b3d0b555dc805ff490505a8aae34f9cfb1462",
+ "https://deno.land/std@0.181.0/flags/mod.ts": "4d829c5bd1d657799cdeb487c6e418960efc6f4d8ce6cadc38a54b9ce266160a"
+ }
+}
diff --git a/lib/ai-types.ts b/lib/ai-types.ts
new file mode 100644
index 0000000..0e1ed43
--- /dev/null
+++ b/lib/ai-types.ts
@@ -0,0 +1,72 @@
+export type Role = "system" | "user" | "assistant";
+
+export interface Message {
+ role: Role;
+ content: string;
+}
+
+export interface ChatCompletionRequest {
+ model: 'gpt-3.5-turbo' | 'gpt-4' | string
+ messages: Message[];
+ temperature?: number;
+ top_p?: number;
+ n?: number;
+ stream?: boolean;
+ stop?: string | string[];
+ max_tokens?: number;
+ presence_penalty?: number;
+ frequency_penalty?: number;
+ logit_bias?: Record;
+ user?: string;
+}
+
+export interface Choice {
+ index: number;
+ message: Message;
+ finish_reason: "stop" | "length" | "content_filter" | "null";
+}
+
+export interface ChatCompletionResponse {
+ id: string;
+ object: "chat.completion";
+ created: number;
+ choices: Choice[];
+ usage: {
+ prompt_tokens: number;
+ completion_tokens: number;
+ total_tokens: number;
+ };
+}
+
+export interface SpeechToTextRequest {
+ file: File;
+ model: 'whisper-1';
+}
+
+export interface Delta {
+ role?: Role;
+ content?: string;
+}
+
+export interface StreamChoice {
+ delta: Delta;
+ index: number;
+ finish_reason: "stop" | "length" | "content_filter" | "null" | null;
+}
+
+export interface ChatCompletionStreamResponse {
+ id: string;
+ object: "chat.completion.chunk";
+ created: number;
+ model: string;
+ choices: StreamChoice[];
+}
+
+export interface ChatCompetionStreamError {
+ "error": {
+ "message": string | null,
+ "type": string | null
+ "param": string | null
+ "code": string | null
+ }
+}
\ No newline at end of file
diff --git a/lib/ai.ts b/lib/ai.ts
index 7cc7132..bd409a5 100644
--- a/lib/ai.ts
+++ b/lib/ai.ts
@@ -1,225 +1,204 @@
-const OPENAI_API_KEY = Deno.env.get('OPENAI_API_KEY')
-const OPENAI_CHAT_URL = "https://api.openai.com/v1/chat/completions"
-
-if (!OPENAI_API_KEY) {
- console.error('Please set the OPENAI_API_KEY environment variable')
- Deno.exit(1)
-}
-
-const config = {
- debug: false
-}
-
-type Role = "system" | "user" | "assistant";
-
-export interface Message {
- role: Role;
- content: string;
-}
-
-export interface ChatCompletionRequest {
- model: 'gpt-3.5-turbo' | 'gpt-4' | string
- messages: Message[];
- temperature?: number;
- top_p?: number;
- n?: number;
- stream?: boolean;
- stop?: string | string[];
- max_tokens?: number;
- presence_penalty?: number;
- frequency_penalty?: number;
- logit_bias?: Record;
- user?: string;
-}
-
-interface Choice {
- index: number;
- message: Message;
- finish_reason: "stop" | "length" | "content_filter" | "null";
-}
-
-interface ChatCompletionResponse {
- id: string;
- object: "chat.completion";
- created: number;
- choices: Choice[];
- usage: {
- prompt_tokens: number;
- completion_tokens: number;
- total_tokens: number;
- };
-}
-
-interface SpeechToTextRequest {
- file: File; // You'll need to replace the file path in your curl call with a File object
- model: 'whisper-1';
-}
-
-interface Delta {
- role?: Role;
- content?: string;
-}
-
-interface StreamChoice {
- delta: Delta;
- index: number;
- finish_reason: "stop" | "length" | "content_filter" | "null" | null;
-}
-
-interface ChatCompletionStreamResponse {
- id: string;
- object: "chat.completion.chunk";
- created: number;
- model: string;
- choices: StreamChoice[];
-}
-
-interface ChatCompetionStreamError {
- "error": {
- "message": string | null,
- "type": string | null
- "param": string | null
- "code": string | null
+import {
+ ChatCompetionStreamError,
+ ChatCompletionRequest,
+ ChatCompletionResponse,
+ ChatCompletionStreamResponse,
+} from "./ai-types.ts";
+import { loadConfig } from "./data.ts";
+
+let OPENAI_API_KEY = Deno.env.get("OPENAI_API_KEY");
+const OPENAI_CHAT_URL = "https://api.openai.com/v1/chat/completions";
+
+export const aiConfig = {
+ debug: false,
+};
+
+export const checkAPIKey = async () => {
+ if (OPENAI_API_KEY) {
+ return;
+ }
+ const config = await loadConfig();
+ if (config?.openAiApiKey) {
+ OPENAI_API_KEY = config.openAiApiKey;
+ return;
}
-}
-export const getChatResponse_withRetries = async (req: ChatCompletionRequest, retries = 3): Promise => {
- let response = null
+ console.error(
+ "Please set the OPENAI_API_KEY environment variable in your current shell, or configure using `gpt --config`",
+ );
+ Deno.exit(1);
+};
+
+export const getChatResponse_withRetries = async (
+ req: ChatCompletionRequest,
+ retries = 3,
+): Promise => {
+ let response = null;
for (let i = 0; i < retries; i++) {
- response = await getChatResponse(req)
+ response = await getChatResponse(req);
if (response) {
- break
+ break;
}
}
- return response
-}
-
-export const getChatResponse = async (req: ChatCompletionRequest): Promise => {
- if (config.debug) {
- console.log('Request to OpenAI', req)
+ return response;
+};
+
+export const getChatResponse = async (
+ req: ChatCompletionRequest,
+): Promise => {
+ if (aiConfig.debug) {
+ console.log("Request to OpenAI", req);
}
+ await checkAPIKey();
const newReq = {
...req,
- messages: [...req.messages]
- }
+ messages: [...req.messages],
+ };
const response = await fetch(OPENAI_CHAT_URL, {
- method: 'POST',
+ method: "POST",
headers: {
- 'Authorization': `Bearer ${OPENAI_API_KEY}`,
- 'Content-Type': 'application/json'
+ "Authorization": `Bearer ${OPENAI_API_KEY}`,
+ "Content-Type": "application/json",
},
- body: JSON.stringify({ ...newReq, stream: false })
- })
+ body: JSON.stringify({ ...newReq, stream: false }),
+ });
try {
- const data = await response.json() as ChatCompletionResponse
- if (config.debug) {
- console.log('Response from OpenAI', data)
+ const data = await response.json() as ChatCompletionResponse;
+ if (aiConfig.debug) {
+ console.log("Response from OpenAI", data);
}
- const content = data?.choices?.[0]?.message?.content
+ const content = data?.choices?.[0]?.message?.content;
if (!content) {
- console.error('Invalid response from OpenAI', data)
- return null
+ console.error("Invalid response from OpenAI", data);
+ return null;
}
- return content
+ return content;
} catch (e) {
- console.error('Failed to reply', e)
- return null
+ console.error("Failed to reply", e);
+ return null;
}
-}
+};
export type StreamResponse = {
done: boolean;
value: string | null;
delta: string | null;
-}
+};
-export const getChatResponse_stream = async (req: ChatCompletionRequest): Promise> => {
- if (config.debug) {
- console.log('Request to OpenAI', req)
+export const getChatResponse_stream = async (
+ req: ChatCompletionRequest,
+): Promise> => {
+ if (aiConfig.debug) {
+ console.log("Request to OpenAI", req);
}
+ await checkAPIKey();
- req.stream = true
+ req.stream = true;
const response = await fetch(OPENAI_CHAT_URL, {
- method: 'POST',
+ method: "POST",
headers: {
- 'Authorization': `Bearer ${OPENAI_API_KEY}`,
- 'Content-Type': 'application/json'
+ "Authorization": `Bearer ${OPENAI_API_KEY}`,
+ "Content-Type": "application/json",
},
- body: JSON.stringify(req)
- })
+ body: JSON.stringify(req),
+ });
- const decoder = new TextDecoder('utf-8')
- const reader = response.body!.getReader()
+ const decoder = new TextDecoder("utf-8");
+ const reader = response.body!.getReader();
- let fullMessage = ''
+ let fullMessage = "";
const iterator = {
async next(): Promise> {
try {
- const { done, value } = await reader.read()
+ const { done, value } = await reader.read();
if (done) {
// DONE by stream close
- return { done: true, value: { done: true, value: fullMessage, delta: null } }
+ return {
+ done: true,
+ value: { done: true, value: fullMessage, delta: null },
+ };
}
// TODO: handle multiple messages
- const rawData = decoder.decode(value)
+ const rawData = decoder.decode(value);
const chunks = rawData.split("\n\n");
- let newContent = ''
+ let newContent = "";
// accumulate the chunks
for (const chunk of chunks) {
- const data = chunk.replace('data: ', '')
+ const data = chunk.replace("data: ", "");
if (!data) {
- continue
+ continue;
}
- if (data === '[DONE]') {
+ if (data === "[DONE]") {
// DONE by final message
- return { done: true, value: { done: true, value: fullMessage, delta: null } }
+ return {
+ done: true,
+ value: { done: true, value: fullMessage, delta: null },
+ };
}
- let parsed = null
+ let parsed = null;
// remove the "data: " from the beginning of the message
try {
- parsed = JSON.parse(data) as ChatCompletionStreamResponse
+ parsed = JSON.parse(data) as ChatCompletionStreamResponse;
if (parsed.choices[0].finish_reason) {
// DONE by incoming final message
- return { done: true, value: { done: true, value: fullMessage, delta: null } }
+ return {
+ done: true,
+ value: { done: true, value: fullMessage, delta: null },
+ };
}
- newContent += parsed.choices[0]?.delta?.content ?? ''
- }
- catch (e: unknown) {
+ newContent += parsed.choices[0]?.delta?.content ?? "";
+ } catch (e: unknown) {
// throw with added context
- const error = (parsed as unknown as ChatCompetionStreamError | null)?.error
- if (error?.code === 'model_not_found') {
- console.error(`Failed to find selected OpenAI model ${req.model}. Select a valid model from: https://platform.openai.com/docs/models/model-endpoint-compatibility by using \`gpt --config\`
-
- You may need to apply for access to GPT-4 via https://openai.com/waitlist/gpt-4-api. Use "gpt-3.5-turbo" in the meantime.`)
+ const error = (parsed as unknown as ChatCompetionStreamError | null)
+ ?.error;
+ if (error?.code === "model_not_found") {
+ console.error(
+ `%cFailed to find selected OpenAI model: ${req.model}.\n\nSelect a valid model by using \`gpt --config\`%c
+
+You may need to apply for access to GPT-4 via https://openai.com/waitlist/gpt-4-api.\n\n%cUse "gpt-3.5-turbo"%c (or another model from https://platform.openai.com/docs/models/model-endpoint-compatibility) in the meantime.`,
+ "font-weight: bold;",
+ "font-weight: normal;",
+ "font-weight: bold;",
+ "font-weight: normal;",
+ );
+ Deno.exit(1);
+ } else {
+ console.error(data);
}
- else {
- console.error(data)
- }
- throw e
+ throw e;
}
}
- fullMessage = (fullMessage + newContent)
- return { value: { done: false, value: fullMessage, delta: newContent || null }, done: false }
- }
- catch (e: unknown) {
- console.error('Failed to parse message', e)
- return { value: { done: true, value: fullMessage + '[error]', delta: '[error]' }, done: true }
+ fullMessage = fullMessage + newContent;
+ return {
+ value: { done: false, value: fullMessage, delta: newContent || null },
+ done: false,
+ };
+ } catch (e: unknown) {
+ console.error("Failed to parse message", e);
+ return {
+ value: {
+ done: true,
+ value: fullMessage + "[error]",
+ delta: "[error]",
+ },
+ done: true,
+ };
}
},
[Symbol.asyncIterator]() {
- return this
- }
- }
+ return this;
+ },
+ };
- return iterator
-}
\ No newline at end of file
+ return iterator;
+};
diff --git a/lib/data.ts b/lib/data.ts
index 9898e27..e78f159 100644
--- a/lib/data.ts
+++ b/lib/data.ts
@@ -1,30 +1,34 @@
-import { ChatCompletionRequest } from "./ai.ts"
+import { ChatCompletionRequest } from "./ai-types.ts";
import { genDescriptiveNameForChat } from "./prompts.ts";
-export const VERSION = '0.2.8'
-export const AUTO_UPDATE_PROBABILITY = 0.1
+export const VERSION = "0.3.0";
+export const AUTO_UPDATE_PROBABILITY = 0.1;
export type Config = {
- lastUpdated: string,
- version: string | 'unknown',
- command?: string,
- autoUpdate: 'never' | 'prompt' | 'always',
- latestName?: string,
- hasDescriptiveName?: boolean,
- model?: string,
- systemPrompt?: string,
-}
+ lastUpdated: string;
+ version: string | "unknown";
+ command?: string;
+ autoUpdate: "never" | "prompt" | "always";
+ latestName?: string;
+ hasDescriptiveName?: boolean;
+ model?: string;
+ systemPrompt?: string;
+ openAiApiKey?: string;
+};
export const DEFAULT_CONFIG: Config = {
lastUpdated: new Date().toISOString(),
version: VERSION,
- autoUpdate: 'prompt',
- command: 'gpt',
+ autoUpdate: "prompt",
+ command: "gpt",
latestName: undefined,
hasDescriptiveName: undefined,
model: undefined,
systemPrompt: undefined,
-}
+ openAiApiKey: undefined,
+};
+
+let cachedConfig: Config | null = null;
// set to $HOME
const getBasePath = () => {
@@ -34,51 +38,52 @@ const getBasePath = () => {
throw new Error(`Environment variable ${homeEnvVar} not found.`);
}
return `${home}/.gpt`;
-}
+};
-const getOrCreatePath = async (path: string, isJsonFile = false): Promise => {
+const getOrCreatePath = async (
+ path: string,
+ isJsonFile = false,
+): Promise => {
try {
if (await Deno.stat(path)) {
return path;
}
- }
- catch (_) {
+ } catch (_) {
// pass
}
if (isJsonFile) {
- const dir = path.split('/').slice(0, -1).join('/')
+ const dir = path.split("/").slice(0, -1).join("/");
await Deno.mkdir(
dir,
{ recursive: true },
);
- await Deno.writeTextFile(path, '{}')
+ await Deno.writeTextFile(path, "{}");
return path;
- }
- else {
+ } else {
await Deno.mkdir(
path,
{ recursive: true },
);
return path;
}
-}
+};
const getOrCreateHistoryPath = async (): Promise => {
const path = `${getBasePath()}/history`;
return await getOrCreatePath(path);
-}
+};
const getOrCreateHistorySnippetsFile = async (): Promise => {
const path = `${getBasePath()}/history-snippets.json`;
return await getOrCreatePath(path, true);
-}
+};
export const getOrCreateConfigFile = async (): Promise => {
- const path = `${getBasePath()}/config.json`
+ const path = `${getBasePath()}/config.json`;
return await getOrCreatePath(path, true);
-}
+};
const getDatetimeString = () => {
const now = new Date();
@@ -88,78 +93,88 @@ const getDatetimeString = () => {
now.getHours().toString().padStart(2, "0") + "-" +
now.getMinutes().toString().padStart(2, "0") + "-" +
now.getSeconds().toString().padStart(2, "0");
- return formattedDate
-}
+ return formattedDate;
+};
-const meta_getLatest = async (): Promise<{ name: string, request: ChatCompletionRequest } | null> => {
+const meta_getLatest = async (): Promise<
+ { name: string; request: ChatCompletionRequest } | null
+> => {
try {
- const config = await loadConfig()
+ const config = await loadConfig();
if (!config?.latestName) {
- return null
+ return null;
}
- const latestFullPath = `${await getOrCreateHistoryPath()}/${config.latestName}.json`
+ const latestFullPath =
+ `${await getOrCreateHistoryPath()}/${config.latestName}.json`;
- const chatData = await Deno.readTextFile(latestFullPath)
- const chatJson = JSON.parse(chatData)
+ const chatData = await Deno.readTextFile(latestFullPath);
+ const chatJson = JSON.parse(chatData);
return {
name: config.latestName,
- request: chatJson
- }
- }
- catch {
- return null
+ request: chatJson,
+ };
+ } catch {
+ return null;
}
-}
+};
-const meta_getChat = async (chatName: string): Promise<{ name: string, request: ChatCompletionRequest } | null> => {
+const meta_getChat = async (
+ chatName: string,
+): Promise<{ name: string; request: ChatCompletionRequest } | null> => {
try {
- const fullPath = `${await getOrCreateHistoryPath()}/${chatName}.json`
+ const fullPath = `${await getOrCreateHistoryPath()}/${chatName}.json`;
- const chatData = await Deno.readTextFile(fullPath)
- const chatJson = JSON.parse(chatData)
+ const chatData = await Deno.readTextFile(fullPath);
+ const chatJson = JSON.parse(chatData);
return {
name: fullPath,
- request: chatJson
- }
- }
- catch {
- return null
+ request: chatJson,
+ };
+ } catch {
+ return null;
}
-}
+};
-const meta_write = async (req: ChatCompletionRequest, isNewOrName: boolean | string) => {
+const meta_write = async (
+ req: ChatCompletionRequest,
+ isNewOrName: boolean | string,
+) => {
try {
- const config = await loadConfig()
- let latestName = isNewOrName === true ? getDatetimeString() : typeof isNewOrName === 'string' ? isNewOrName : (config?.latestName ?? getDatetimeString())
-
- const latestFullPath = `${await getOrCreateHistoryPath()}/${latestName}.json`
- let finalFullPath = latestFullPath
-
- let hasDescriptiveName = !isNewOrName && config?.hasDescriptiveName
+ const config = await loadConfig();
+ let latestName = isNewOrName === true
+ ? getDatetimeString()
+ : typeof isNewOrName === "string"
+ ? isNewOrName
+ : (config?.latestName ?? getDatetimeString());
+
+ const latestFullPath =
+ `${await getOrCreateHistoryPath()}/${latestName}.json`;
+ let finalFullPath = latestFullPath;
+
+ let hasDescriptiveName = !isNewOrName && config?.hasDescriptiveName;
if (!hasDescriptiveName && req.messages.length >= 5) {
// Write out a descriptive name for continued chats of a certain length
- const descName = await genDescriptiveNameForChat(req)
+ const descName = await genDescriptiveNameForChat(req);
if (descName) {
- latestName = descName
- finalFullPath = `${await getOrCreateHistoryPath()}/${latestName}.json`
- hasDescriptiveName = true
+ latestName = descName;
+ finalFullPath = `${await getOrCreateHistoryPath()}/${latestName}.json`;
+ hasDescriptiveName = true;
}
}
- const chatJson: ChatCompletionRequest = { ...req }
+ const chatJson: ChatCompletionRequest = { ...req };
try {
- const chatData = await Deno.readTextFile(latestFullPath)
+ const chatData = await Deno.readTextFile(latestFullPath);
// merge messages
chatJson.messages = [
...JSON.parse(chatData),
- ...chatJson.messages
- ]
+ ...chatJson.messages,
+ ];
// Delete since we're about to rewrite it
- await Deno.remove(latestFullPath)
- }
- catch (_) {
+ await Deno.remove(latestFullPath);
+ } catch (_) {
// failed but its all good
}
@@ -167,125 +182,138 @@ const meta_write = async (req: ChatCompletionRequest, isNewOrName: boolean | str
saveConfig({
...config,
latestName,
- hasDescriptiveName
+ hasDescriptiveName,
}),
- Deno.writeTextFile(finalFullPath, JSON.stringify(chatJson))
- ])
-
- }
- catch (e) {
- console.error(e)
- return null
+ Deno.writeTextFile(finalFullPath, JSON.stringify(chatJson)),
+ ]);
+ } catch (e) {
+ console.error(e);
+ return null;
}
-}
-
-export const writeChat = async (req: ChatCompletionRequest, isNewOrName: boolean | string = true) => {
- await meta_write(req, isNewOrName)
-}
-
-export const getChat = async (name: string | undefined): Promise => {
+};
+
+export const writeChat = async (
+ req: ChatCompletionRequest,
+ isNewOrName: boolean | string = true,
+) => {
+ await meta_write(req, isNewOrName);
+};
+
+export const getChat = async (
+ name: string | undefined,
+): Promise => {
if (name) {
- return (await meta_getChat(name))?.request || null
+ return (await meta_getChat(name))?.request || null;
}
- return (await meta_getLatest())?.request || null
-}
+ return (await meta_getLatest())?.request || null;
+};
/**
* Get the history of chats
* @example [{ name: '2021-01-01_12-00-00', time: Date }, { name: '2021-01-01_12-00-00', time: Date }]
*/
export const getHistory = async (): Promise<{
- name: string,
- snippet?: string,
- time: Date
+ name: string;
+ snippet?: string;
+ time: Date;
}[]> => {
- const path = await getOrCreateHistoryPath()
- const files = await Deno.readDir(path)
+ const path = await getOrCreateHistoryPath();
+ const files = await Deno.readDir(path);
- const historySnippetsPath = await getOrCreateHistorySnippetsFile()
- let historySnippets: { [key: string]: string } = {}
+ const historySnippetsPath = await getOrCreateHistorySnippetsFile();
+ let historySnippets: { [key: string]: string } = {};
try {
- const historySnippetsData = await Deno.readTextFile(historySnippetsPath)
- historySnippets = JSON.parse(historySnippetsData)
- }
- catch {
+ const historySnippetsData = await Deno.readTextFile(historySnippetsPath);
+ historySnippets = JSON.parse(historySnippetsData);
+ } catch {
// ignore
}
// convert AsyncIterable to array of strings
const fileInfos: {
- name: string,
- snippet?: string,
- time: Date
- }[] = []
+ name: string;
+ snippet?: string;
+ time: Date;
+ }[] = [];
for await (const file of files) {
- if (!file.name.endsWith('.json')) continue
- if (file.name === 'meta.json') continue
- const stat = await Deno.stat(`${path}/${file.name}`)
+ if (!file.name.endsWith(".json")) continue;
+ if (file.name === "meta.json") continue;
+ const stat = await Deno.stat(`${path}/${file.name}`);
fileInfos.push({
name: file.name.slice(0, -5),
- time: stat.mtime!
- })
+ time: stat.mtime!,
+ });
}
- fileInfos.sort((a, b) => b.time.getTime() - a.time.getTime())
+ fileInfos.sort((a, b) => b.time.getTime() - a.time.getTime());
// add historySnippets
- let generatedSnippets = false
- const SNIPPET_MAX_LENGTH = 50
+ let generatedSnippets = false;
+ const SNIPPET_MAX_LENGTH = 50;
for (let i = 0; i < fileInfos.length; i++) {
- const fileInfo = fileInfos[i]
+ const fileInfo = fileInfos[i];
if (historySnippets[fileInfo.name]) {
- fileInfo.snippet = historySnippets[fileInfo.name]
- continue
+ fileInfo.snippet = historySnippets[fileInfo.name];
+ continue;
}
// Generate snippets for the first 10 chats
- const chat = await meta_getChat(fileInfo.name)
+ const chat = await meta_getChat(fileInfo.name);
if (chat) {
const fullText = chat.request.messages
- .filter(m => m.role !== 'system')
- .map(m => m.content)
+ .filter((m) => m.role !== "system")
+ .map((m) => m.content)
.slice(0, 5)
- .join(' ')
- .replaceAll('\n', ' ')
-
- const snippet = fullText.length > SNIPPET_MAX_LENGTH ? `${fullText.slice(0, SNIPPET_MAX_LENGTH)}...` : fullText
- fileInfo.snippet = snippet
- historySnippets[fileInfo.name] = snippet
- generatedSnippets = true
+ .join(" ")
+ .replaceAll("\n", " ");
+
+ const snippet = fullText.length > SNIPPET_MAX_LENGTH
+ ? `${fullText.slice(0, SNIPPET_MAX_LENGTH)}...`
+ : fullText;
+ fileInfo.snippet = snippet;
+ historySnippets[fileInfo.name] = snippet;
+ generatedSnippets = true;
}
}
if (generatedSnippets) {
- await Deno.writeTextFile(historySnippetsPath, JSON.stringify(historySnippets))
+ await Deno.writeTextFile(
+ historySnippetsPath,
+ JSON.stringify(historySnippets),
+ );
}
- return fileInfos
-}
+ return fileInfos;
+};
export const loadConfig = async (): Promise => {
- const configPath = await getOrCreateConfigFile()
- try {
- const configData = await Deno.readTextFile(configPath)
- const configJson = JSON.parse(configData)
- return configJson
+ if (cachedConfig) {
+ return cachedConfig;
}
- catch {
- console.warn(`Failed to load config at ${configPath}`)
- return null
+ const configPath = await getOrCreateConfigFile();
+ try {
+ const configData = await Deno.readTextFile(configPath);
+ const configJson = JSON.parse(configData);
+ cachedConfig = configJson;
+ return configJson;
+ } catch {
+ console.warn(`Failed to load config at ${configPath}`);
+ return null;
}
-}
+};
export const saveConfig = async (config: Partial) => {
- const configPath = await getOrCreateConfigFile()
-
- await Deno.writeTextFile(configPath, JSON.stringify({
- lastUpdated: new Date().toISOString(),
- version: VERSION,
- autoUpdate: 'prompt',
- ...config,
- }))
-}
+ const configPath = await getOrCreateConfigFile();
+
+ await Deno.writeTextFile(
+ configPath,
+ JSON.stringify({
+ lastUpdated: new Date().toISOString(),
+ version: VERSION,
+ autoUpdate: "prompt",
+ ...config,
+ }),
+ );
+};
diff --git a/mod.ts b/mod.ts
index d18a56b..b0fd98d 100644
--- a/mod.ts
+++ b/mod.ts
@@ -1,128 +1,168 @@
import { parse } from "https://deno.land/std@0.181.0/flags/mod.ts";
-import { ChatCompletionRequest, Message, StreamResponse, getChatResponse_stream } from "./lib/ai.ts";
-import { AUTO_UPDATE_PROBABILITY, Config, DEFAULT_CONFIG, getChat, getHistory, getOrCreateConfigFile, loadConfig, saveConfig, writeChat } from "./lib/data.ts";
+import { aiConfig, getChatResponse_stream, StreamResponse } from "./lib/ai.ts";
+import { ChatCompletionRequest, Message } from "./lib/ai-types.ts";
+import {
+ AUTO_UPDATE_PROBABILITY,
+ Config,
+ DEFAULT_CONFIG,
+ getChat,
+ getHistory,
+ getOrCreateConfigFile,
+ loadConfig,
+ saveConfig,
+ writeChat,
+} from "./lib/data.ts";
import { printCtrlSequence, pullCharacter } from "./lib/lib.ts";
-import { setCodeCmdParamsForChat, setExecutableCmdParamsForChat } from "./lib/prompts.ts";
+import {
+ setCodeCmdParamsForChat,
+ setExecutableCmdParamsForChat,
+} from "./lib/prompts.ts";
import { exec as shExec } from "./lib/shell.ts";
import { install, isLatestVersion } from "./lib/update.ts";
const args = parse(Deno.args, {
boolean: [
// Instructions for this script
- 'help',
+ "help",
// Runs through persistent configuration
- 'config',
+ "config",
// Continuation (continue last conversation)
- 'continue', 'cont', 'c',
+ "continue",
+ "cont",
+ "c",
// Exec (run as a shell command)
- 'exec', 'x',
+ "exec",
+ "x",
// Retry (re-generate the last assistant message)
- 'retry', 'r',
+ "retry",
+ "r",
// Rewrite (reword the last user message)
- 'rewrite', 'rw', 'w',
+ "rewrite",
+ "rw",
+ "w",
// Pop (remove the last message in the conversation)
- 'pop',
+ "pop",
// Print (print the last message in the conversation)
- 'print', 'p',
+ "print",
+ "p",
// Slice (remove the first message in the conversation)
- 'slice', 's',
+ "slice",
+ "s",
// History (list chat history)
- 'history', 'h',
+ "history",
+ "h",
// Dump (dump the entire chat history)
- 'dump', 'd',
+ "dump",
+ "d",
// Fast (use GPT-3.5-turbo)
- 'fast', 'f',
+ "fast",
+ "f",
// Update (update ShellGPT)
- 'update', 'u',
+ "update",
+ "u",
// REPL (continuous conversation)
- 'repl',
+ "repl",
// Code mode (output code instead of text)
- 'code',
+ "code",
+
+ // Debug (print debug info)
+ "debug",
],
string: [
// Name (select a conversation from history to use)
- 'name', 'n',
+ "name",
+ "n",
// System (set a system prompt / context)
- 'system', 'sys',
+ "system",
+ "sys",
// Temperature (creativity)
- 'temperature', 'temp', 't',
+ "temperature",
+ "temp",
+ "t",
// Limit max tokens to output
- 'max_tokens', 'max',
+ "max_tokens",
+ "max",
// WPM (words per minute, speed of typing output)
- 'wpm',
+ "wpm",
// Model (manually use a different OpenAI model)
- 'model', 'm',
+ "model",
+ "m",
],
-})
+});
// --- Parse Args ---
-const DEFAULT_MODEL = 'gpt-4'
-const DEFAULT_WPM = 800
-const DEFAULT_FAST_WPM = 1200
-const AVG_CHARS_PER_WORD = 4.8
-
-const help = args.help
-const name = args.name || args.n
-const fast = args.f || args.fast
-const updateConfig = args.config
-const update = args.update || args.u
-const model = fast ? 'gpt-3.5-turbo' as const : (args.model ?? args.m)
-const temp = args.t || args.temp || args.temperature
-const exec = args.x || args.exec
-const retry = args.r || args.retry
-const rewrite = args.w || args.rw || args.rewrite
-const pop = args.pop
-const print = args.p || args.print
-const slice = args.s || args.slice
-const dump = args.dump || args.d
-const cont = slice || pop || retry || rewrite || print || dump || (Boolean(args.c || args.cont || args.continue))
-const wpm = args.wpm ? Number(args.wpm) : fast ? DEFAULT_FAST_WPM : DEFAULT_WPM
-const history = args.h || args.history
-const system = args.sys || args.system
-const maxTokens = args.max || args.max_tokens
-const readStdin = args._.at(0) === '-' || args._.at(-1) === '-'
-const repl = args.repl
-const code = args.code
+const DEFAULT_MODEL = "gpt-3.5-turbo";
+const DEFAULT_WPM = 800;
+const DEFAULT_FAST_WPM = 1200;
+const AVG_CHARS_PER_WORD = 4.8;
+
+const help = args.help;
+const name = args.name || args.n;
+const fast = args.f || args.fast;
+const updateConfig = args.config;
+const update = args.update || args.u;
+const model = fast ? "gpt-3.5-turbo" as const : (args.model ?? args.m);
+const temp = args.t || args.temp || args.temperature;
+const exec = args.x || args.exec;
+const retry = args.r || args.retry;
+const rewrite = args.w || args.rw || args.rewrite;
+const pop = args.pop;
+const print = args.p || args.print;
+const slice = args.s || args.slice;
+const dump = args.dump || args.d;
+const cont = slice || pop || retry || rewrite || print || dump ||
+ (Boolean(args.c || args.cont || args.continue));
+const wpm = args.wpm ? Number(args.wpm) : fast ? DEFAULT_FAST_WPM : DEFAULT_WPM;
+const history = args.h || args.history;
+const system = args.sys || args.system;
+const maxTokens = args.max || args.max_tokens;
+const readStdin = args._.at(0) === "-" || args._.at(-1) === "-";
+const repl = args.repl;
+const code = args.code;
+const debug = args.debug;
// --- END Parse Args ---
-let config = await loadConfig()
-const gptCommand = config?.command ?? DEFAULT_CONFIG.command
-const configWasEmpty = Object.keys(config ?? {}).length === 0
-const messageWasEmpty = args._.length === 0
-const shouldAutoUpdate = config?.autoUpdate !== 'never' && Math.random() < AUTO_UPDATE_PROBABILITY
+let config = await loadConfig();
+const gptCommand = config?.command ?? DEFAULT_CONFIG.command;
+const configWasEmpty = Object.keys(config ?? {}).length === 0;
+const messageWasEmpty = args._.length === 0;
+const shouldAutoUpdate = config?.autoUpdate !== "never" &&
+ Math.random() < AUTO_UPDATE_PROBABILITY;
-const messageContent = args._.join(' ')
+const messageContent = args._.join(" ");
const message: Message = {
- role: 'user',
- content: messageContent
-}
+ role: "user",
+ content: messageContent,
+};
const stock: ChatCompletionRequest = {
model: model ?? config?.model ?? DEFAULT_MODEL,
- messages: []
-}
+ messages: [],
+};
-const req: ChatCompletionRequest = (cont || name) ? ((await getChat(name)) ?? stock) : stock
+const req: ChatCompletionRequest = (cont || name)
+ ? ((await getChat(name)) ?? stock)
+ : stock;
const helpMessage = `
Usage: ${gptCommand} [OPTIONS] [MESSAGE]
@@ -131,8 +171,8 @@ Options:
--help Show this help message
--config Runs configuration
--update Updates ShellGPT
+ --debug Output debug information with each request
-
- Read message from stdin
-c, --continue Continue the last conversation
-x, --exec Run the generated response as a shell command
@@ -159,355 +199,408 @@ Examples:
${gptCommand} -c "Tell me more about Paris."
${gptCommand} -x "Create a new file called 'test.txt' and write 'Hello World!' to it."
cat test.txt | ${gptCommand} - "Invert the capitalization of this text."
-`
+`;
// --- HANDLE ARGS ---
+if (debug) {
+ aiConfig.debug = true;
+}
if (pop) {
- const lastMessage = req.messages.pop()
+ const lastMessage = req.messages.pop();
if (lastMessage) {
- console.log(`(Removing last message from ${lastMessage!.role})`)
- await writeChat(req, false)
+ console.log(`(Removing last message from ${lastMessage!.role})`);
+ await writeChat(req, false);
+ } else {
+ console.log("(Found no messages)");
}
- else {
- console.log('(Found no messages)')
- }
- Deno.exit()
+ Deno.exit();
}
if (slice) {
if (req.messages.length > 1) {
- console.log(`(Removing first message)`)
- req.messages = req.messages.slice(1)
- await writeChat(req, false)
- }
- else {
- console.log("(Found no messages)")
+ console.log(`(Removing first message)`);
+ req.messages = req.messages.slice(1);
+ await writeChat(req, false);
+ } else {
+ console.log("(Found no messages)");
}
- Deno.exit()
+ Deno.exit();
}
if (update || shouldAutoUpdate) {
- const isLatest = await isLatestVersion()
+ const isLatest = await isLatestVersion();
if (shouldAutoUpdate && isLatest) {
// do nothing in this case
- }
- else {
- const newConfig = config ?? DEFAULT_CONFIG
- const updateInfo = await install(newConfig, true)
- saveConfig(newConfig)
- if (updateInfo.result === 'updated') {
- console.log('\n%c> Successfully updated!', 'font-weight: bold;')
- Deno.exit()
- }
- else if (updateInfo.result === 'error') {
- console.log('\n%c> Encountered error while updating.', 'font-weight: bold;')
- Deno.exit()
- }
- else {
- console.log('\n%c> No updates found.', 'font-weight: bold;')
- Deno.exit()
+ } else {
+ const newConfig = config ?? DEFAULT_CONFIG;
+ const updateInfo = await install(newConfig, true);
+ saveConfig(newConfig);
+ if (updateInfo.result === "updated") {
+ console.log("\n%c> Successfully updated!", "font-weight: bold;");
+ Deno.exit();
+ } else if (updateInfo.result === "error") {
+ console.log(
+ "\n%c> Encountered error while updating.",
+ "font-weight: bold;",
+ );
+ Deno.exit();
+ } else {
+ console.log("\n%c> No updates found.", "font-weight: bold;");
+ Deno.exit();
}
}
}
if (updateConfig || configWasEmpty) {
if (configWasEmpty) {
- console.log('(No config found. Running initial setup...)\n')
+ console.log("(No config found. Running initial setup...)\n");
}
const newConfig: Config = {
- ...(config ?? DEFAULT_CONFIG)
- }
+ ...(config ?? DEFAULT_CONFIG),
+ };
- const updateInfo = await install(newConfig, true)
+ const updateInfo = await install(newConfig, true);
- if (updateInfo.result === 'updated') {
- console.log('\n%c> Successfully updated! Please re-run `gpt --config`', 'font-weight: bold;')
- Deno.exit()
+ if (updateInfo.result === "updated") {
+ console.log(
+ "\n%c> Successfully updated! Please re-run `gpt --config`",
+ "font-weight: bold;",
+ );
+ Deno.exit();
}
- const currentModel = config?.model || DEFAULT_MODEL
+ const currentModel = config?.model || DEFAULT_MODEL;
- console.log('\n%c> Which OpenAI ChatGPT model would you like to use?', 'font-weight: bold;')
- console.log()
- const model = window.prompt(`You can enter "gpt-4" or "gpt-3.5-turbo". (Leave empty for ${currentModel}):`)
+ console.log(
+ "\n%c> Which OpenAI ChatGPT model would you like to use?",
+ "font-weight: bold;",
+ );
+ console.log();
+ const model = window.prompt(
+ `You can enter "gpt-4" or "gpt-3.5-turbo". (Leave empty for ${currentModel}):`,
+ );
if (model) {
- newConfig.model = model ?? currentModel
+ newConfig.model = model ?? currentModel;
}
- console.log('\n%c> Would you like to set a custom system prompt to attach to each session?', 'font-weight: bold;')
+ console.log(
+ "\n%c> Would you like to set a custom system prompt to attach to each session?",
+ "font-weight: bold;",
+ );
if (config?.systemPrompt) {
- console.log(`Current system prompt: ${config.systemPrompt}`)
- console.log()
- const newPrompt = window.prompt(`Enter new prompt (empty to keep, "clear" to remove)`)
- if (newPrompt === 'clear') {
- newConfig.systemPrompt = undefined
- }
- else if (newPrompt) {
- newConfig.systemPrompt = newPrompt
+ console.log(`Current system prompt: ${config.systemPrompt}`);
+ console.log();
+ const newPrompt = window.prompt(
+ `Enter new prompt (empty to keep, "clear" to remove)`,
+ );
+ if (newPrompt === "clear") {
+ newConfig.systemPrompt = undefined;
+ } else if (newPrompt) {
+ newConfig.systemPrompt = newPrompt;
+ } else {
+ newConfig.systemPrompt = config.systemPrompt;
}
- else {
- newConfig.systemPrompt = config.systemPrompt
+ } else {
+ console.log();
+ const newPrompt = window.prompt(
+ `Press enter to skip, or type a new prompt:`,
+ );
+ if (newPrompt) {
+ newConfig.systemPrompt = newPrompt;
}
}
- else {
- console.log()
- const newPrompt = window.prompt(`Press enter to skip, or type a new prompt:`)
- if (newPrompt) {
- newConfig.systemPrompt = newPrompt
+
+ if (!Deno.env.get("OPENAI_API_KEY")) {
+ console.log(
+ `\n%c> No API key detected. It's recommended to configure using the OPENAI_API_KEY environment variable, but you can also paste one here.`,
+ "font-weight: bold;",
+ );
+
+ console.log(
+ "\n%c> You can get an API key here: %chttps://platform.openai.com/account/api-keys",
+ "font-weight: bold;",
+ "font-weight: normal;",
+ );
+
+ if (config?.openAiApiKey) {
+ console.log(`Current API key: ${config.openAiApiKey}`);
+ }
+ console.log();
+
+ const newPrompt = window.prompt(
+ config?.openAiApiKey
+ ? `Enter new API key (empty to keep, "clear" to remove)`
+ : `Enter API key (empty to skip)`,
+ );
+ if (newPrompt === "clear") {
+ newConfig.openAiApiKey = undefined;
+ } else if (newPrompt) {
+ newConfig.openAiApiKey = String(newPrompt);
+ } else {
+ // do nothing
}
}
try {
- await saveConfig(newConfig)
- console.log(`%cUpdated config file: %c${await getOrCreateConfigFile()}`, 'color: green; font-weight: bold;', 'color: green;')
+ await saveConfig(newConfig);
+ console.log(
+ `%cUpdated config file: %c${await getOrCreateConfigFile()}`,
+ "color: green; font-weight: bold;",
+ "color: green;",
+ );
if (updateConfig) {
// Exit only if the user manually requested config update
- Deno.exit()
+ Deno.exit();
} else {
// Otherwise, continue with the rest of the script
- config = newConfig
- req.model = model ?? config.model ?? DEFAULT_MODEL
+ config = newConfig;
+ req.model = model ?? config.model ?? DEFAULT_MODEL;
}
- }
- catch (e) {
- console.error(`Failed to update config file at: ${await getOrCreateConfigFile()}`)
- console.error(e)
- Deno.exit(1)
+ } catch (e) {
+ console.error(
+ `Failed to update config file at: ${await getOrCreateConfigFile()}`,
+ );
+ console.error(e);
+ Deno.exit(1);
}
}
if (help) {
console.log(helpMessage);
- Deno.exit()
+ Deno.exit();
}
if (readStdin) {
- message.content += '\n'
+ message.content += "\n";
const decoder = new TextDecoder();
for await (const chunk of Deno.stdin.readable) {
const textChunk = decoder.decode(chunk);
- message.content += textChunk
+ message.content += textChunk;
}
}
if (print) {
// print last message
- const lastMessage = req.messages.pop()
+ const lastMessage = req.messages.pop();
if (lastMessage) {
- console.log(lastMessage.content)
+ console.log(lastMessage.content);
+ } else {
+ console.log("(Found no messages)");
}
- else {
- console.log('(Found no messages)')
- }
- Deno.exit()
+ Deno.exit();
}
if (dump) {
for (const message of req.messages) {
- if (message.role === 'user') {
- console.log('---\n')
- console.log(`${message.content}\n`)
- console.log('---\n')
- }
- else {
- console.log(`${message.content}\n`)
+ if (message.role === "user") {
+ console.log("---\n");
+ console.log(`${message.content}\n`);
+ console.log("---\n");
+ } else {
+ console.log(`${message.content}\n`);
}
}
- Deno.exit()
+ Deno.exit();
}
if (system || (config?.systemPrompt && !cont)) {
// Add system prompt if set for this message, or is a new conversation
req.messages.push({
- role: 'system',
- content: system ?? config!.systemPrompt!
- })
+ role: "system",
+ content: system ?? config!.systemPrompt!,
+ });
}
-let empty = false
+let empty = false;
if (!message.content && !retry && !pop && !history) {
- empty = true
- console.log('(No message passed)\n')
+ empty = true;
+ console.log("(No message passed)\n");
}
if (temp) {
- req.temperature = Number(temp)
+ req.temperature = Number(temp);
}
if (maxTokens) {
- req.max_tokens = Number(maxTokens)
+ req.max_tokens = Number(maxTokens);
}
if (model) {
// @ts-ignore Allow any string as model for now
- req.model = model
+ req.model = model;
}
if (retry) {
// remove last assistant message
- req.messages.pop()
+ req.messages.pop();
}
if (rewrite) {
// remove last assistant AND user messages
- req.messages.pop()
- req.messages.pop()
+ req.messages.pop();
+ req.messages.pop();
}
if (!retry && !empty) {
- req.messages.push(message)
+ req.messages.push(message);
}
if (exec) {
- setExecutableCmdParamsForChat(req)
+ setExecutableCmdParamsForChat(req);
} else if (code) {
- setCodeCmdParamsForChat(req)
+ setCodeCmdParamsForChat(req);
}
if (history) {
- const files = await getHistory()
+ const files = await getHistory();
for (const file of files) {
- const hasSnippet = file.snippet && file.snippet.length > 0
+ const hasSnippet = file.snippet && file.snippet.length > 0;
if (hasSnippet) {
- console.log(`%c${file.name}\t\t%c${file.snippet}`, 'color: blue', 'color: gray')
- }
- else {
- console.log(`%c${file.name}`, 'color: blue')
+ console.log(
+ `%c${file.name}\t\t%c${file.snippet}`,
+ "color: blue",
+ "color: gray",
+ );
+ } else {
+ console.log(`%c${file.name}`, "color: blue");
}
}
- Deno.exit()
+ Deno.exit();
}
// --- END HANDLE ARGS ---
-let streamResponse: AsyncIterableIterator | null = null
+let streamResponse: AsyncIterableIterator | null = null;
const doStreamResponse = async () => {
try {
streamResponse = await getChatResponse_stream(req);
+ } catch (e) {
+ console.error("Unhandled error", e);
+ Deno.exit();
}
- catch (e) {
- console.error('Unhandled error', e)
- Deno.exit()
- }
-}
+};
// STATE
-type DoneType = 'with_net' | 'with_write' | 'with_print' | 'none'
-let done: DoneType = 'none'
-let responseStr = ''
-let intermediateStr = ''
-let printStr = ''
+type DoneType = "with_net" | "with_write" | "with_print" | "none";
+let done: DoneType = "none";
+let responseStr = "";
+let intermediateStr = "";
+let printStr = "";
if (repl && messageWasEmpty) {
- done = 'with_net'
-}
-else {
- await doStreamResponse()
+ done = "with_net";
+} else {
+ await doStreamResponse();
}
// Done, write it out
const flush = async () => {
- streamResponse = null
- const text = new TextEncoder().encode('\n')
- await Deno.stdout.write(text)
+ streamResponse = null;
+ const text = new TextEncoder().encode("\n");
+ await Deno.stdout.write(text);
req.messages.push({
content: responseStr,
- role: 'assistant'
- })
+ role: "assistant",
+ });
- await writeChat(req, cont ? false : (name || true))
+ await writeChat(req, cont ? false : (name || true));
if (exec && !readStdin) {
- const promptValue = prompt(`\nAre you SURE you wish to run the above command? (y/N):`)
- if (['y', 'yes'].includes(promptValue?.toLowerCase() ?? '')) {
+ console.log(
+ `\n%cAre you SURE you wish to run the above command? (y/N):`,
+ "color: red; font-weight: bold;",
+ )
+ const promptValue = prompt('> ');
+ if (["y", "yes"].includes(promptValue?.toLowerCase() ?? "")) {
// do it
- await shExec(responseStr)
- }
- else {
- console.log('(will not exec command)')
+ await shExec(responseStr);
+ } else {
+ console.log("(will not exec command)");
}
} else if (exec && readStdin) {
- console.log('(exec not currently supported when reading from stdin)')
+ console.log("(exec not currently supported when reading from stdin)");
}
if (repl) {
- responseStr = ''
+ responseStr = "";
- await printCtrlSequence('blue')
- await printCtrlSequence('bold')
+ await printCtrlSequence("blue");
+ await printCtrlSequence("bold");
- const promptValue = prompt(`\n>`)
+ const promptValue = prompt(`\n>`);
// print reset ctrl sequence
- await printCtrlSequence('reset')
+ await printCtrlSequence("reset");
if (promptValue) {
// do it
req.messages.push({
content: promptValue,
- role: 'user'
- })
- done = 'none'
- await doStreamResponse()
+ role: "user",
+ });
+ done = "none";
+ await doStreamResponse();
}
+ } else {
+ Deno.exit();
}
- else {
- Deno.exit()
- }
-}
+};
// Push strings
{
(async () => {
while (true) {
- if (done !== 'none' || !streamResponse) {
+ if (done !== "none" || !streamResponse) {
// Spin wait
- await new Promise(resolve => setTimeout(resolve))
- continue
+ await new Promise((resolve) => setTimeout(resolve));
+ continue;
}
try {
- for await (const response of streamResponse as AsyncIterableIterator) {
+ for await (
+ const response of streamResponse as AsyncIterableIterator<
+ StreamResponse
+ >
+ ) {
if (response.delta) {
- responseStr += response.delta
- intermediateStr += response.delta
+ responseStr += response.delta;
+ intermediateStr += response.delta;
}
}
+ } catch (e) {
+ console.error("Unhandled error", e);
+ Deno.exit(1);
}
- catch (e) {
- console.error('Unhandled error', e)
- Deno.exit(1)
- }
- done = 'with_net';
+ done = "with_net";
}
- })()
+ })();
}
// Intermediate string
-let startTime = -1
-const targetCps = (AVG_CHARS_PER_WORD * wpm) / 60
+let startTime = -1;
+const targetCps = (AVG_CHARS_PER_WORD * wpm) / 60;
{
(async () => {
while (true) {
- if (done === 'with_write' || (done as DoneType) === 'with_print') {
+ if (done === "with_write" || (done as DoneType) === "with_print") {
// Spin wait
- await new Promise(resolve => setTimeout(resolve))
- continue
+ await new Promise((resolve) => setTimeout(resolve));
+ continue;
}
// Go through characters one-by-one and write
- while ((done as DoneType) !== 'with_net' || intermediateStr.length > 0) {
+ while ((done as DoneType) !== "with_net" || intermediateStr.length > 0) {
if (startTime < 0) {
- startTime = Date.now()
+ startTime = Date.now();
}
- const { char, str } = pullCharacter(intermediateStr)
+ const { char, str } = pullCharacter(intermediateStr);
- printStr += char
- intermediateStr = str
+ printStr += char;
+ intermediateStr = str;
await new Promise((resolve) => {
setTimeout(() => {
- resolve(true)
- }, 1000 / targetCps)
- })
+ resolve(true);
+ }, 1000 / targetCps);
+ });
}
- done = 'with_write'
+ done = "with_write";
}
})();
}
@@ -515,16 +608,16 @@ const targetCps = (AVG_CHARS_PER_WORD * wpm) / 60
// Pull strings
{
const consumeFn = async () => {
- const latest = printStr
- printStr = ''
- if (!latest && done === 'with_write') {
- await flush()
+ const latest = printStr;
+ printStr = "";
+ if (!latest && done === "with_write") {
+ await flush();
}
if (latest) {
- const text = new TextEncoder().encode(latest)
- await Deno.stdout.write(text)
+ const text = new TextEncoder().encode(latest);
+ await Deno.stdout.write(text);
}
- setTimeout(consumeFn)
- }
+ setTimeout(consumeFn);
+ };
setTimeout(consumeFn);
}