Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

2488 novita ai llm integration #2524

Open
wants to merge 4 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .vscode/settings.json
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@
"Mintplex",
"mixtral",
"moderations",
"novita",
"numpages",
"Ollama",
"Oobabooga",
Expand Down
1 change: 1 addition & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -96,6 +96,7 @@ AnythingLLM divides your documents into objects called `workspaces`. A Workspace
- [Text Generation Web UI](https://github.com/oobabooga/text-generation-webui)
- [Apipie](https://apipie.ai/)
- [xAI](https://x.ai/)
- [Novita AI (chat models)](https://novita.ai/model-api/product/llm-api?utm_source=github_anything-llm&utm_medium=github_readme&utm_campaign=link)

**Embedder models:**

Expand Down
4 changes: 4 additions & 0 deletions docker/.env.example
Original file line number Diff line number Diff line change
Expand Up @@ -90,6 +90,10 @@ GID='1000'
# LITE_LLM_BASE_PATH='http://127.0.0.1:4000'
# LITE_LLM_API_KEY='sk-123abc'

# LLM_PROVIDER='novita'
# NOVITA_API_KEY='your-novita-api-key-here' check on https://novita.ai/settings#key-management
# NOVITA_MODEL_PREF='gryphe/mythomax-l2-13b'

# LLM_PROVIDER='cohere'
# COHERE_API_KEY=
# COHERE_MODEL_PREF='command-r'
Expand Down
142 changes: 142 additions & 0 deletions frontend/src/components/LLMSelection/NovitaLLMOptions/index.jsx
Original file line number Diff line number Diff line change
@@ -0,0 +1,142 @@
import System from "@/models/system";
import { CaretDown, CaretUp } from "@phosphor-icons/react";
import { useState, useEffect } from "react";

export default function NovitaLLMOptions({ settings }) {
return (
<div className="flex flex-col gap-y-4 mt-1.5">
<div className="flex gap-[36px]">
<div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-3">
Novita API Key
</label>
<input
type="password"
name="NovitaApiKey"
className="bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:outline-primary-button active:outline-primary-button outline-none block w-full p-2.5"
placeholder="Novita API Key"
defaultValue={settings?.NovitaApiKey ? "*".repeat(20) : ""}
required={true}
autoComplete="off"
spellCheck={false}
/>
</div>
{!settings?.credentialsOnly && (
<NovitaModelSelection settings={settings} />
)}
</div>
<AdvancedControls settings={settings} />
</div>
);
}

function AdvancedControls({ settings }) {
const [showAdvancedControls, setShowAdvancedControls] = useState(false);

return (
<div className="flex flex-col gap-y-4">
<button
type="button"
onClick={() => setShowAdvancedControls(!showAdvancedControls)}
className="text-white hover:text-white/70 flex items-center text-sm"
>
{showAdvancedControls ? "Hide" : "Show"} advanced controls
{showAdvancedControls ? (
<CaretUp size={14} className="ml-1" />
) : (
<CaretDown size={14} className="ml-1" />
)}
</button>
<div hidden={!showAdvancedControls}>
<div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-3">
Stream Timeout (ms)
</label>
<input
type="number"
name="NovitaTimeout"
className="bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:outline-primary-button active:outline-primary-button outline-none block w-full p-2.5"
placeholder="Timeout value between token responses to auto-timeout the stream"
defaultValue={settings?.NovitaTimeout ?? 500}
autoComplete="off"
onScroll={(e) => e.target.blur()}
min={500}
step={1}
/>
</div>
</div>
</div>
);
}

function NovitaModelSelection({ settings }) {
const [groupedModels, setGroupedModels] = useState({});
const [loading, setLoading] = useState(true);

useEffect(() => {
async function findCustomModels() {
setLoading(true);
const { models } = await System.customModels("novita");
if (models?.length > 0) {
const modelsByOrganization = models.reduce((acc, model) => {
acc[model.organization] = acc[model.organization] || [];
acc[model.organization].push(model);
return acc;
}, {});

setGroupedModels(modelsByOrganization);
}

setLoading(false);
}
findCustomModels();
}, []);

if (loading || Object.keys(groupedModels).length === 0) {
return (
<div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-3">
Chat Model Selection
</label>
<select
name="NovitaModelPref"
disabled={true}
className="bg-zinc-900 border-gray-500 text-white text-sm rounded-lg block w-full p-2.5"
>
<option disabled={true} selected={true}>
-- loading available models --
</option>
</select>
</div>
);
}

return (
<div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-3">
Chat Model Selection
</label>
<select
name="NovitaModelPref"
required={true}
className="bg-zinc-900 border-gray-500 text-white text-sm rounded-lg block w-full p-2.5"
>
{Object.keys(groupedModels)
.sort()
.map((organization) => (
<optgroup key={organization} label={organization}>
{groupedModels[organization].map((model) => (
<option
key={model.id}
value={model.id}
selected={settings?.NovitaModelPref === model.id}
>
{model.name}
</option>
))}
</optgroup>
))}
</select>
</div>
);
}
8 changes: 7 additions & 1 deletion frontend/src/hooks/useGetProvidersModels.js
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,13 @@ function groupModels(models) {
}, {});
}

const groupedProviders = ["togetherai", "fireworksai", "openai", "openrouter"];
const groupedProviders = [
"togetherai",
"fireworksai",
"openai",
"novita",
"openrouter",
];
export default function useGetProviderModels(provider = null) {
const [defaultModels, setDefaultModels] = useState([]);
const [customModels, setCustomModels] = useState([]);
Expand Down
Binary file added frontend/src/media/llmprovider/novita.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
11 changes: 11 additions & 0 deletions frontend/src/pages/GeneralSettings/LLMPreference/index.jsx
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ import AzureOpenAiLogo from "@/media/llmprovider/azure.png";
import AnthropicLogo from "@/media/llmprovider/anthropic.png";
import GeminiLogo from "@/media/llmprovider/gemini.png";
import OllamaLogo from "@/media/llmprovider/ollama.png";
import NovitaLogo from "@/media/llmprovider/novita.png";
import LMStudioLogo from "@/media/llmprovider/lmstudio.png";
import LocalAiLogo from "@/media/llmprovider/localai.png";
import TogetherAILogo from "@/media/llmprovider/togetherai.png";
Expand Down Expand Up @@ -39,6 +40,7 @@ import LocalAiOptions from "@/components/LLMSelection/LocalAiOptions";
import NativeLLMOptions from "@/components/LLMSelection/NativeLLMOptions";
import GeminiLLMOptions from "@/components/LLMSelection/GeminiLLMOptions";
import OllamaLLMOptions from "@/components/LLMSelection/OllamaLLMOptions";
import NovitaLLMOptions from "@/components/LLMSelection/NovitaLLMOptions";
import TogetherAiOptions from "@/components/LLMSelection/TogetherAiOptions";
import FireworksAiOptions from "@/components/LLMSelection/FireworksAiOptions";
import MistralOptions from "@/components/LLMSelection/MistralOptions";
Expand Down Expand Up @@ -113,6 +115,15 @@ export const AVAILABLE_LLM_PROVIDERS = [
description: "Run LLMs locally on your own machine.",
requiredConfig: ["OllamaLLMBasePath"],
},
{
name: "Novita AI",
value: "novita",
logo: NovitaLogo,
options: (settings) => <NovitaLLMOptions settings={settings} />,
description:
"Reliable, Scalable, and Cost-Effective for LLMs from Novita AI",
requiredConfig: ["NovitaAiApiKey"],
},
{
name: "LM Studio",
value: "lmstudio",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ import MistralLogo from "@/media/llmprovider/mistral.jpeg";
import HuggingFaceLogo from "@/media/llmprovider/huggingface.png";
import PerplexityLogo from "@/media/llmprovider/perplexity.png";
import OpenRouterLogo from "@/media/llmprovider/openrouter.jpeg";
import NovitaLogo from "@/media/llmprovider/novita.png";
import GroqLogo from "@/media/llmprovider/groq.png";
import KoboldCPPLogo from "@/media/llmprovider/koboldcpp.png";
import TextGenWebUILogo from "@/media/llmprovider/text-generation-webui.png";
Expand Down Expand Up @@ -149,6 +150,14 @@ export const LLM_SELECTION_PRIVACY = {
],
logo: OpenRouterLogo,
},
novita: {
name: "Novita AI",
description: [
"Your chats will not be used for training",
"Your prompts and document text used in response creation are visible to Novita AI",
],
logo: NovitaLogo,
},
groq: {
name: "Groq",
description: [
Expand Down
10 changes: 10 additions & 0 deletions frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ import LiteLLMLogo from "@/media/llmprovider/litellm.png";
import AWSBedrockLogo from "@/media/llmprovider/bedrock.png";
import DeepSeekLogo from "@/media/llmprovider/deepseek.png";
import APIPieLogo from "@/media/llmprovider/apipie.png";
import NovitaLogo from "@/media/llmprovider/novita.png";
import XAILogo from "@/media/llmprovider/xai.png";

import CohereLogo from "@/media/llmprovider/cohere.png";
Expand All @@ -48,6 +49,7 @@ import LiteLLMOptions from "@/components/LLMSelection/LiteLLMOptions";
import AWSBedrockLLMOptions from "@/components/LLMSelection/AwsBedrockLLMOptions";
import DeepSeekOptions from "@/components/LLMSelection/DeepSeekOptions";
import ApiPieLLMOptions from "@/components/LLMSelection/ApiPieOptions";
import NovitaLLMOptions from "@/components/LLMSelection/NovitaLLMOptions";
import XAILLMOptions from "@/components/LLMSelection/XAiLLMOptions";

import LLMItem from "@/components/LLMSelection/LLMItem";
Expand Down Expand Up @@ -104,6 +106,14 @@ const LLMS = [
options: (settings) => <OllamaLLMOptions settings={settings} />,
description: "Run LLMs locally on your own machine.",
},
{
name: "Novita AI",
value: "novita",
logo: NovitaLogo,
options: (settings) => <NovitaLLMOptions settings={settings} />,
description:
"Reliable, Scalable, and Cost-Effective for LLMs from Novita AI",
},
{
name: "LM Studio",
value: "lmstudio",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ const ENABLED_PROVIDERS = [
"koboldcpp",
"togetherai",
"openrouter",
"novita",
"mistral",
"perplexity",
"textgenwebui",
Expand All @@ -40,6 +41,7 @@ const WARN_PERFORMANCE = [
"ollama",
"localai",
"openrouter",
"novita",
"generic-openai",
"textgenwebui",
];
Expand Down
1 change: 1 addition & 0 deletions locales/README.ja-JP.md
Original file line number Diff line number Diff line change
Expand Up @@ -85,6 +85,7 @@ AnythingLLMのいくつかのクールな機能
- [Fireworks AI (チャットモデル)](https://fireworks.ai/)
- [Perplexity (チャットモデル)](https://www.perplexity.ai/)
- [OpenRouter (チャットモデル)](https://openrouter.ai/)
- [Novita AI (チャットモデル)](https://novita.ai/model-api/product/llm-api?utm_source=github_anything-llm&utm_medium=github_readme&utm_campaign=link)
- [Mistral](https://mistral.ai/)
- [Groq](https://groq.com/)
- [Cohere](https://cohere.com/)
Expand Down
1 change: 1 addition & 0 deletions locales/README.zh-CN.md
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,7 @@ AnythingLLM的一些酷炫特性
- [Fireworks AI (聊天模型)](https://fireworks.ai/)
- [Perplexity (聊天模型)](https://www.perplexity.ai/)
- [OpenRouter (聊天模型)](https://openrouter.ai/)
- [Novita AI (聊天模型)](https://novita.ai/model-api/product/llm-api?utm_source=github_anything-llm&utm_medium=github_readme&utm_campaign=link)
- [Mistral](https://mistral.ai/)
- [Groq](https://groq.com/)
- [Cohere](https://cohere.com/)
Expand Down
4 changes: 4 additions & 0 deletions server/.env.example
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,10 @@ SIG_SALT='salt' # Please generate random string at least 32 chars long.
# LITE_LLM_BASE_PATH='http://127.0.0.1:4000'
# LITE_LLM_API_KEY='sk-123abc'

# LLM_PROVIDER='novita'
# NOVITA_API_KEY='your-novita-api-key-here' check on https://novita.ai/settings#key-management
# NOVITA_MODEL_PREF='gryphe/mythomax-l2-13b'

# LLM_PROVIDER='cohere'
# COHERE_API_KEY=
# COHERE_MODEL_PREF='command-r'
Expand Down
5 changes: 5 additions & 0 deletions server/models/systemSettings.js
Original file line number Diff line number Diff line change
Expand Up @@ -448,6 +448,11 @@ const SystemSettings = {
OllamaLLMKeepAliveSeconds: process.env.OLLAMA_KEEP_ALIVE_TIMEOUT ?? 300,
OllamaLLMPerformanceMode: process.env.OLLAMA_PERFORMANCE_MODE ?? "base",

// Novita LLM Keys
NovitaApiKey: !!process.env.NOVITA_API_KEY,
NovitaModelPref: process.env.NOVITA_MODEL_PREF,
NovitaTimeout: process.env.NOVITA_TIMEOUT_MS,

// TogetherAI Keys
TogetherAiApiKey: !!process.env.TOGETHER_AI_API_KEY,
TogetherAiModelPref: process.env.TOGETHER_AI_MODEL_PREF,
Expand Down
3 changes: 2 additions & 1 deletion server/storage/models/.gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -2,4 +2,5 @@ Xenova
downloaded/*
!downloaded/.placeholder
openrouter
apipie
apipie
novita
Loading