Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

add gpt-4-turbo #97

Merged
merged 3 commits into from
Nov 23, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion lerna.json
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
{
"$schema": "node_modules/lerna/schemas/lerna-schema.json",
"version": "0.22.1"
"version": "0.23.0"
}
30 changes: 15 additions & 15 deletions package-lock.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

4 changes: 2 additions & 2 deletions packages/@pufflig/ps-chains/package.json
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
{
"name": "@pufflig/ps-chains",
"version": "0.22.0",
"version": "0.23.0",
"license": "MIT",
"main": "./dist/ps-chains.umd.js",
"module": "./dist/ps-chains.es.js",
Expand All @@ -16,7 +16,7 @@
"test": "jest"
},
"devDependencies": {
"@pufflig/ps-types": "^0.22.0",
"@pufflig/ps-types": "^0.23.0",
"@types/react-dom": "^18.2.7",
"immer": "^10.0.2",
"prop-types": "^15.8.1",
Expand Down
4 changes: 2 additions & 2 deletions packages/@pufflig/ps-models/package.json
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
{
"name": "@pufflig/ps-models",
"private": false,
"version": "0.22.0",
"version": "0.23.0",
"description": "Configuration of models used in Prompt Studio",
"files": [
"dist"
Expand All @@ -16,7 +16,7 @@
"author": "Pufflig AB",
"license": "MIT",
"devDependencies": {
"@pufflig/ps-types": "^0.22.0",
"@pufflig/ps-types": "^0.23.0",
"typescript": "^5.2.2",
"vite": "^4.3.9",
"vite-plugin-dts": "^2.3.0"
Expand Down
65 changes: 65 additions & 0 deletions packages/@pufflig/ps-models/src/models/openai.ts
Original file line number Diff line number Diff line change
Expand Up @@ -207,6 +207,71 @@ export const openai_completion: ModelDefinition = {
};

export const openai_chat: ModelDefinition = {
"gpt-4-1106-preview": {
modelId: "gpt-4-1106-preview",
description: "More capable than any GPT-3.5 model, able to do more complex tasks, and optimized for chat.",
settings: openai_settings,
streaming: true,
contextLength: 4096,
parameters: [
{
id: "temperature",
type: "number",
name: "Temperature",
max: 2,
min: 0,
step: 0.1,
defaultValue: 0.4,
description:
"What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.",
},
{
id: "max_tokens",
type: "number",
name: "Max Tokens",
// although the documentation says the model should support 8192 tokens, it actually supports 4096
max: 4096,
min: 1,
step: 20,
defaultValue: 1024,
description:
"The maximum number of tokens to generate in the completion. The total length of input tokens and generated tokens is limited by the model's context length.",
},
{
id: "top_p",
type: "number",
name: "Top P",
max: 1,
min: 0,
step: 0.1,
defaultValue: 1,
description:
"An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.",
},
{
id: "frequency_penalty",
type: "number",
name: "Frequency penalty",
max: 2,
min: -2,
step: 0.1,
defaultValue: 0,
description:
"Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.",
},
{
id: "presence_penalty",
type: "number",
name: "Presence penalty",
max: 2,
min: -2,
step: 0.1,
defaultValue: 0,
description:
"Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.",
},
],
},
"gpt-4": {
modelId: "gpt-4",
description: "More capable than any GPT-3.5 model, able to do more complex tasks, and optimized for chat.",
Expand Down
6 changes: 3 additions & 3 deletions packages/@pufflig/ps-nodes-config/package.json
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
{
"name": "@pufflig/ps-nodes-config",
"private": false,
"version": "0.22.0",
"version": "0.23.0",
"description": "Configuration files for nodes used in prompt studio.",
"files": [
"dist"
Expand All @@ -16,10 +16,10 @@
"author": "Pufflig AB",
"license": "MIT",
"dependencies": {
"@pufflig/ps-models": "^0.22.0"
"@pufflig/ps-models": "^0.23.0"
},
"devDependencies": {
"@pufflig/ps-types": "^0.22.0",
"@pufflig/ps-types": "^0.23.0",
"@types/jest": "^29.5.8",
"jest": "^29.7.0",
"ts-jest": "^29.1.0",
Expand Down
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
import { chat_models, completion_models, default_completion_model } from "@pufflig/ps-models";
import { NodeConfig } from "@pufflig/ps-types";
import { default_model, models } from "../../constants";

export const documentCheckNodeType = "modifier/document_check" as const;

export const documentCheck: NodeConfig = {
name: "Document Check",
description: "Run a checklist or extract information from a document.",
name: "Document Checklist",
description: "Run a checklist on a document.",
tags: ["adapter", "document", "text"],
status: "stable",
execution: {
Expand Down Expand Up @@ -33,12 +33,12 @@ export const documentCheck: NodeConfig = {
inputs: [
{
id: "model",
name: "Model",
name: "AI Settings",
description: "The model to use",
type: "model",
definition: { ...completion_models, ...chat_models },
definition: models,
defaultValue: {
modelId: default_completion_model,
modelId: default_model,
parameters: {},
},
},
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import { default_completion_model, completion_models } from "@pufflig/ps-models";
import { models } from "@pufflig/ps-models";
import { NodeConfig } from "@pufflig/ps-types";
import { default_model } from "../../constants";

export const llmCompletionNodeType = "adapter/llm_completion" as const;

Expand Down Expand Up @@ -34,12 +35,12 @@ export const llmCompletionConfig: NodeConfig = {
inputs: [
{
id: "model",
name: "Model",
name: "AI Settings",
description: "The model to use",
type: "model",
definition: completion_models,
definition: models,
defaultValue: {
modelId: default_completion_model,
modelId: default_model,
parameters: {},
},
},
Expand Down
16 changes: 16 additions & 0 deletions packages/@pufflig/ps-nodes-config/src/constants.ts
Original file line number Diff line number Diff line change
@@ -1 +1,17 @@
import { chat_models, completion_models } from "@pufflig/ps-models";

export const default_model = "gpt-3.5-turbo-instruct";

export const available_models = [
"gpt-3.5-turbo-instruct",
"gpt-4-1106-preview",
"anthropic/claude-2",
"meta-llama/llama-2-13b-chat",
];

export const models = Object.values({ ...completion_models, ...chat_models })
.filter((model) => available_models.includes(model.modelId))
.reduce((acc, model) => {
acc[model.modelId] = model;
return acc;
}, {} as Record<string, any>);
10 changes: 5 additions & 5 deletions packages/@pufflig/ps-nodes/package.json
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
{
"name": "@pufflig/ps-nodes",
"private": false,
"version": "0.22.1",
"version": "0.23.0",
"description": "Collection of nodes used in Prompt Studio",
"files": [
"dist"
Expand All @@ -17,7 +17,7 @@
"author": "Pufflig AB",
"license": "MIT",
"devDependencies": {
"@pufflig/ps-types": "^0.22.0",
"@pufflig/ps-types": "^0.23.0",
"@types/jest": "^29.5.8",
"@types/lodash": "^4.14.196",
"@types/mustache": "^4.2.2",
Expand All @@ -33,9 +33,9 @@
},
"dependencies": {
"@dqbd/tiktoken": "^1.0.7",
"@pufflig/ps-models": "^0.22.0",
"@pufflig/ps-nodes-config": "^0.22.0",
"@pufflig/ps-sdk": "^0.22.0",
"@pufflig/ps-models": "^0.23.0",
"@pufflig/ps-nodes-config": "^0.23.0",
"@pufflig/ps-sdk": "^0.23.0",
"axios": "^1.6.2",
"langchain": "^0.0.193",
"lodash": "^4.17.21",
Expand Down
Loading
Loading