Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: move utils #4

Merged
merged 15 commits into from
May 8, 2023
8 changes: 6 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -18,15 +18,19 @@ Hyv is a modular software development library centered on AI collaboration that
>
> Please look at the [examples](examples)
## Lingo

Hyv works great with [Lingo](https://github.com/failfa-st/lingo/) a pseudo language for large language models (LLMs)

```shell
npm install "@hyv/core" "@hyv/openai" "@hyv/store"
npm install "@hyv/core" "@hyv/openai"
```

```typescript
import { Agent, sequence } from "@hyv/core";
import { GPTModelAdapter } from "@hyv/openai";

const agent = new Agent(new GPTModelAdapter());
const agent = new Agent(new GPTModelAdapter(), {verbosity: 1});

try {
await sequence({ question: "What is life?" }, [agent]);
Expand Down
27 changes: 14 additions & 13 deletions examples/auto-book.ts
Original file line number Diff line number Diff line change
@@ -1,17 +1,19 @@
import path from "node:path";

import type { FileContentWithPath, SideEffect, ModelMessage } from "@hyv/core";
import { Agent, createInstruction, minify, sequence, createFileWriter, writeFile } from "@hyv/core";
import type { GPT4Options } from "@hyv/openai";
import { GPTModelAdapter } from "@hyv/openai";
import type { ModelMessage } from "@hyv/core";
import { Agent, sequence } from "@hyv/core";
import { createInstruction, GPTModelAdapter } from "@hyv/openai";
import type { FilesMessage } from "@hyv/stable-diffusion";
import { Automatic1111ModelAdapter } from "@hyv/stable-diffusion";
import { minify, createFileWriter, writeFile } from "@hyv/utils";
import type { FileContentWithPath, SideEffect } from "@hyv/utils";

/**
* Creates a file writer for writing output files and add reading time.
*
* @param {string} dir - The directory where the output files should be written.
* @param {BufferEncoding} [encoding="utf-8"] - the encoding that should vbe used when writing files
* @returns {SideEffect} - The file writer instance.
* @param dir - The directory where the output files should be written.
* @param [encoding="utf-8"] - the encoding that should vbe used when writing files
* @returns - The file writer instance.
*/
export function createFileWriterWithReadingTime(
dir: string,
Expand Down Expand Up @@ -40,7 +42,7 @@ const fileWriter = createFileWriterWithReadingTime(dir);
const imageWriter = createFileWriter(dir, "base64");

const bookAgent = new Agent(
new GPTModelAdapter<GPT4Options>({
new GPTModelAdapter({
model: "gpt-4",
maxTokens: 1024,
systemInstruction: createInstruction(
Expand Down Expand Up @@ -78,8 +80,8 @@ const bookAgent = new Agent(
* alternating between left and right alignment.
* Additionally, it ensures the headings are separated from the images.
*
* @param {string} inputText - The input markdown text.
* @returns {string} - The modified Markdown text with floating images and separated headings.
* @param inputText - The input markdown text.
* @returns - The modified Markdown text with floating images and separated headings.
*/
function makeFloatingImages(inputText: string) {
let count = 0;
Expand Down Expand Up @@ -122,7 +124,7 @@ function getWordCount(text: string) {
}

const author = new Agent(
new GPTModelAdapter<GPT4Options>({
new GPTModelAdapter({
model: "gpt-4",
maxTokens: 4096,
systemInstruction: createInstruction(
Expand Down Expand Up @@ -171,7 +173,7 @@ const author = new Agent(
},
};
},
async after(message) {
async after(message: FilesMessage) {
return {
...message,
files: message.files.map(file => ({
Expand All @@ -188,7 +190,6 @@ const author = new Agent(
const illustrator = new Agent(
new Automatic1111ModelAdapter({
seed: Math.floor(Math.random() * 1_000_000) + 1,
model: "Undertone_v1.safetensors",
}),
{
sideEffects: [imageWriter],
Expand Down
19 changes: 11 additions & 8 deletions examples/auto-tweet.ts
Original file line number Diff line number Diff line change
@@ -1,16 +1,16 @@
import type { ModelMessage } from "@hyv/core";
import { Agent, createInstruction, minify, sequence, createFileWriter } from "@hyv/core";
import type { GPT4Options } from "@hyv/openai";
import { GPTModelAdapter } from "@hyv/openai";
import { Agent, sequence } from "@hyv/core";
import { createInstruction, GPTModelAdapter } from "@hyv/openai";
import type { ImageMessage } from "@hyv/stable-diffusion";
import { Automatic1111ModelAdapter } from "@hyv/stable-diffusion";
import { minify, createFileWriter } from "@hyv/utils";

const dir = `out/auto-tweet/${Date.now()}`;
const fileWriter = createFileWriter(dir);
const imageWriter = createFileWriter(dir, "base64");

const termAgent = new Agent(
new GPTModelAdapter<GPT4Options>({
new GPTModelAdapter({
model: "gpt-4",
maxTokens: 1024,
temperature: 0.8,
Expand Down Expand Up @@ -39,11 +39,14 @@ const termAgent = new Agent(
},
}
),
})
}),
{
verbosity: 1,
}
);

const tweeter = new Agent(
new GPTModelAdapter<GPT4Options>({
new GPTModelAdapter({
model: "gpt-4",
maxTokens: 1024,
systemInstruction: createInstruction(
Expand Down Expand Up @@ -89,6 +92,7 @@ const tweeter = new Agent(
),
}),
{
verbosity: 1,
sideEffects: [fileWriter],
async before(message: ModelMessage & { instructions: Record<string, unknown> }) {
return {
Expand All @@ -109,8 +113,7 @@ const tweeter = new Agent(

const illustrator = new Agent(new Automatic1111ModelAdapter(), {
sideEffects: [imageWriter],

async before(message: ModelMessage & ImageMessage) {
async before(message: ImageMessage): Promise<ImageMessage> {
return {
...message,
images: message.images.map(image => ({
Expand Down
8 changes: 5 additions & 3 deletions examples/book.ts
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
import { Agent, createFileWriter, createInstruction, minify, sequence } from "@hyv/core";
import { DallEModelAdapter, GPTModelAdapter } from "@hyv/openai";
import { Agent, sequence } from "@hyv/core";
import type { FilesMessage } from "@hyv/openai";
import { createInstruction, DallEModelAdapter, GPTModelAdapter } from "@hyv/openai";
import { createFileWriter, minify } from "@hyv/utils";
import slugify from "@sindresorhus/slugify";

const title = "Utopia";
Expand Down Expand Up @@ -39,7 +41,7 @@ function getWordCount(text: string) {
}

// Give the agent some tools
author.after = async message => ({
author.after = async (message: FilesMessage) => ({
...message,
files: message.files.map(file => ({
...file,
Expand Down
50 changes: 50 additions & 0 deletions examples/chaining.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
import type { ModelMessage } from "@hyv/core";
import { Agent, sequence } from "@hyv/core";
import { createInstruction, GPTModelAdapter } from "@hyv/openai";
import { minify } from "@hyv/utils";

const systemInstruction = createInstruction(
"AI",
minify`
You have deep thoughts,
reason your thoughts,
reflect your reasoning,
debate your reflection,
decide base on your debate.
You then create a new task based on your decision!
`,
{
mainGoal: "={{mainGoal}}",
thoughts: "detailed string",
reasoning: "detailed string",
reflection: "detailed string",
debate: "detailed string",
decision: "detailed string",
task: "full and detailed task without references",
}
);

async function before({ task, mainGoal }: ModelMessage) {
return {
task,
mainGoal,
};
}

const agents = Array.from(
{ length: 3 },
() =>
new Agent(new GPTModelAdapter({ model: "gpt-4", systemInstruction }), {
before,
verbosity: 1,
})
);

try {
await sequence(
{ task: "Make the world a better place!", mainGoal: "Make the world a better place!" },
agents
);
} catch (error) {
console.error("Error:", error);
}
3 changes: 2 additions & 1 deletion examples/dall-e.ts
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import { Agent, createFileWriter, sequence } from "@hyv/core";
import { Agent, sequence } from "@hyv/core";
import { DallEModelAdapter } from "@hyv/openai";
import { createFileWriter } from "@hyv/utils";

const imageWriter = createFileWriter(`out/dall-e/${Date.now()}`, "base64");

Expand Down
2 changes: 1 addition & 1 deletion examples/simple.ts
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import { Agent, sequence } from "@hyv/core";
import { GPTModelAdapter } from "@hyv/openai";

const agent = new Agent(new GPTModelAdapter());
const agent = new Agent(new GPTModelAdapter(), { verbosity: 2 });

try {
await sequence({ question: "What is life?" }, [agent]);
Expand Down
3 changes: 2 additions & 1 deletion examples/stable-diffusion.ts
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import { Agent, createFileWriter, sequence } from "@hyv/core";
import { Agent, sequence } from "@hyv/core";
import { Automatic1111ModelAdapter } from "@hyv/stable-diffusion";
import { createFileWriter } from "@hyv/utils";

const imageWriter = createFileWriter(`out/stable-diffusion/${Date.now()}`, "base64");

Expand Down
50 changes: 50 additions & 0 deletions package-lock.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions package.json
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
"demo:auto-tweet": "ts-node-esm examples/auto-tweet.ts",
"demo:book": "ts-node-esm examples/book.ts",
"demo:simple": "ts-node-esm examples/simple.ts",
"demo:chaining": "ts-node-esm examples/chaining.ts",
"demo:stable-diffusion": "ts-node-esm examples/stable-diffusion.ts",
"demo:dall-e": "ts-node-esm examples/dall-e.ts",
"prebuild": "npm run clean",
Expand Down
3 changes: 3 additions & 0 deletions packages/core/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,9 @@
"clean": "npx rimraf dist"
},
"dependencies": {
"@hyv/utils": "^0.3.0",
"chalk": "5.2.0",
"humanize-string": "3.0.0",
"lru-cache": "9.1.1",
"nanoid": "4.0.2"
},
Expand Down
Loading