Skip to content

Commit

Permalink
add embeddings, fix bug
Browse files Browse the repository at this point in the history
  • Loading branch information
mattvr committed Mar 13, 2024
1 parent 108f1ed commit 1e98512
Show file tree
Hide file tree
Showing 4 changed files with 103 additions and 16 deletions.
9 changes: 8 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -125,6 +125,7 @@ These commands are for specific chats, either new or existing.
| --model | -m | Manually use a different OpenAI model |
| --debug | | Print OpenAI API information |
| --lang | | Sets written or programming language of output |
| --dims | | Sets the dimensions of the embedding (--embed only) |

### Other Commands

Expand All @@ -133,6 +134,7 @@ These commands perform non-textual tasks.
| Argument | Alias | Description |
| ------------- | ---------- | ---------------------------------------------------- |
| --img | -i | Respond with a generated image URL instead of text |
| --embed | | Respond with a vector embedding of the input string |

## Features

Expand All @@ -144,7 +146,7 @@ Shell-GPT has some useful and unique features:
- Utility commands for convenient chat history viewing and editing.
- Smooth, streaming output, resembling human typing rather than delayed or
choppy responses.
- Supports image generation and custom language input/output.
- Supports generating images, embeddings, and configurable languages.
- Built in Deno for better performance, granular permissions, and easier script
modification.

Expand Down Expand Up @@ -201,6 +203,11 @@ Output an image:
gpt --img "a corgi wearing cashmere pants hacking into myspace.com"
```

Output a vector embedding:
```sh
gpt --embed "The quick brown fox jumped over the lazy dog." --dims=1024 --model=text-embedding-3-large
```

Interactive coding session:

```sh
Expand Down
75 changes: 64 additions & 11 deletions lib/ai.ts
Original file line number Diff line number Diff line change
@@ -1,9 +1,10 @@
export type Role = "system" | "user" | "assistant";
type ResponseFormat = 'url' | 'b64_json';
type ImageSize = '256x256' | '512x512' | '1024x1024' | '1792x1024' | '1024x1792';
type ModelType = 'dall-e-2' | 'dall-e-3';
type ImageModelType = 'dall-e-2' | 'dall-e-3';
type ImageStyle = 'vivid' | 'natural';
type ImageQuality = 'standard' | 'hd';
export type EmbedModelType = 'text-embedding-3-small' | 'text-embedding-3-large' | 'text-embedding-ada-002'

export interface Message {
role: Role;
Expand Down Expand Up @@ -67,7 +68,7 @@ export interface ChatCompletionStreamResponse {
choices: StreamChoice[];
}

export interface ChatCompetionStreamError {
export interface ChatCompletionStreamError {
"error": {
"message": string | null,
"type": string | null
Expand All @@ -77,7 +78,7 @@ export interface ChatCompetionStreamError {
}
export interface CreateImageRequest {
prompt: string;
model?: ModelType;
model?: ImageModelType;
n?: number | null;
quality?: ImageQuality;
response_format?: ResponseFormat | null;
Expand All @@ -99,7 +100,7 @@ export interface CreateImageEditRequest {
image: File; // Assuming this is a file object
prompt: string;
mask?: File;
model?: ModelType;
model?: ImageModelType;
n?: number | null;
size?: ImageSize | null;
response_format?: ResponseFormat | null;
Expand All @@ -113,7 +114,7 @@ interface CreateImageEditResponse {

export interface CreateImageVariationRequest {
image: File;
model?: ModelType;
model?: ImageModelType;
n?: number | null;
response_format?: ResponseFormat | null;
size?: ImageSize | null;
Expand All @@ -131,10 +132,27 @@ export interface Image {
revised_prompt?: string;
}

export interface EmbeddingRequest {
model: EmbedModelType | string;
input: string;
dimensions?: number;
}

export interface EmbeddingResponse {
data: EmbeddingObject[]
}

export interface EmbeddingObject {
object: "embedding";
embedding: number[];
index: number
}


const OPENAI_API_KEY = Deno.env.get("OPENAI_API_KEY");
const OPENAI_CHAT_URL = Deno.env.get("OPENAI_CHAT_URL") || "https://api.openai.com/v1/chat/completions";
const OPENAI_IMG_URL = Deno.env.get("OPENAI_IMG_URL") || "https://api.openai.com/v1/images/generations";
const OPENAI_EMBEDDING_URL = Deno.env.get("OPENAI_EMBEDDING_URL") || "https://api.openai.com/v1/embeddings";

export const aiConfig = {
debug: false,
Expand Down Expand Up @@ -251,11 +269,13 @@ export const getChatResponse_stream = async (
let frameEnd = buffer.indexOf("\n\n")
while (frameEnd === -1 && iters++ < MAX_ITERS) {
const result = await reader.read();
if (result.value) {
buffer += decoder.decode(result.value);
}
if (result.done) {
isDone = true;
break;
}
buffer += decoder.decode(result.value);
}

const chunks = []
Expand Down Expand Up @@ -283,20 +303,24 @@ export const getChatResponse_stream = async (

let parsed = null;
try {
parsed = JSON.parse(data) as ChatCompletionStreamResponse;
if ((parsed as unknown as ChatCompetionStreamError).error) {
throw new Error("error found");
parsed = JSON.parse(data);

if (parsed.error) {
const error = (parsed as unknown as ChatCompletionStreamError).error
throw new Error(error.message ?? "Unknown error")
}

const response = parsed as ChatCompletionStreamResponse;

newContent += parsed.choices[0]?.delta?.content ?? "";
newContent += response.choices[0]?.delta?.content ?? "";

if (parsed.choices[0].finish_reason) {
isDone = true;
break;
}
} catch (e: unknown) {
// throw with added context
const error = (parsed as unknown as ChatCompetionStreamError | null)
const error = (parsed as unknown as ChatCompletionStreamError | null)
?.error;
if (error?.code === "model_not_found") {
console.error(
Expand Down Expand Up @@ -399,4 +423,33 @@ export const getImageResponse = async (
console.error("Failed to reply", e);
return null;
}
}

export const getEmbeddingResponse = async (
req: EmbeddingRequest
): Promise<number[] | null> => {
if (aiConfig.debug) {
console.log("Request to OpenAI", req);
}
await checkAPIKey();

const response = await fetch(OPENAI_EMBEDDING_URL, {
"method": "POST",
"headers": {
"Authorization": `Bearer ${OPENAI_API_KEY}`,
"Content-Type": "application/json",
},
"body": JSON.stringify(req),
})

try {
const data = await response.json() as EmbeddingResponse;
if (aiConfig.debug) {
console.log("Response from OpenAI", data);
}
return data.data[0].embedding
} catch (e) {
console.error("Failed to reply", e);
return null;
}
}
2 changes: 1 addition & 1 deletion lib/data.ts
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import { ChatCompletionRequest } from "./ai.ts";
import { genDescriptiveNameForChat } from "./prompts.ts";

export const VERSION = "0.3.9";
export const VERSION = "0.4.0";
export const AUTO_UPDATE_PROBABILITY = 0.1;

export type Config = {
Expand Down
33 changes: 30 additions & 3 deletions mod.ts
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import { parse } from "https://deno.land/[email protected]/flags/mod.ts";
import { aiConfig, getChatResponse_stream, getImageResponse, StreamResponse, ChatCompletionRequest, CreateImageRequest, Message } from "./lib/ai.ts";
import { aiConfig, getChatResponse_stream, getImageResponse, StreamResponse, ChatCompletionRequest, CreateImageRequest, Message, EmbeddingRequest, getEmbeddingResponse } from "./lib/ai.ts";
import {
AUTO_UPDATE_PROBABILITY,
Config,
Expand Down Expand Up @@ -90,6 +90,9 @@ const args = parse(Deno.args, {
"image",
"img",
"i",

// Embedding
"embed"
],
string: [
// Name (select a conversation from history to use)
Expand Down Expand Up @@ -117,13 +120,16 @@ const args = parse(Deno.args, {
"m",

// Language to use for programming or text generation
'lang'
'lang',

// Dimensions for embedding
"dims",
],
});

// --- Parse Args ---
const DEFAULT_MODEL = "gpt-4-turbo-preview";
const DEFAULT_WPM = 800;
const DEFAULT_WPM = 1200;
const AVG_CHARS_PER_WORD = 4.8;

const help = args.help;
Expand Down Expand Up @@ -153,6 +159,8 @@ const asSys = args["as-sys"] || args["as-system"];
const bumpSys = args["as-sys"] || args["as-system"];
const lang = args.lang
const img = args.img || args.i
const embed = args.embed
const dims = args.dims
// --- END Parse Args ---

let config = await loadConfig();
Expand Down Expand Up @@ -201,8 +209,12 @@ Options:
-f, --fast Use GPT-3.5-turbo model (faster)
--repl Start a continuous conversation
--code Output code instead of chat text
--lang LANG Set the language for programming or text generation
--bump-sys Bump the most recent system prompt/context to front
-i, --img Output an image instead of text
--embed Output a vector embedding of the input
--dims Set the dimensions of the embedding
-n, --name NAME Select a conversation from history to use
--sys[tem] Set a system prompt/context
Expand All @@ -223,6 +235,21 @@ Examples:
if (debug) {
aiConfig.debug = true;
}
if (embed) {
const newReq: EmbeddingRequest = {
input: messageContent,
model: model ?? 'text-embedding-3-small',
...(dims ? {dimensions: Number(dims)} : {})
};

const response = await getEmbeddingResponse(newReq);
if (response) {
console.log(JSON.stringify(response));
} else {
console.log("(Failed to generate embedding)");
}
Deno.exit();
}
if (img) {
const newReq: CreateImageRequest = {
quality: "hd",
Expand Down

0 comments on commit 1e98512

Please sign in to comment.