diff --git a/.github/workflows/swift.yml b/.github/workflows/swift.yml
index 5b80bbf..f8b34ed 100644
--- a/.github/workflows/swift.yml
+++ b/.github/workflows/swift.yml
@@ -12,7 +12,7 @@ on:
jobs:
build:
- runs-on: macos-13
+ runs-on: macOS-latest
steps:
- uses: actions/checkout@v4
diff --git a/Package.resolved b/Package.resolved
index b5382dd..c7e0485 100644
--- a/Package.resolved
+++ b/Package.resolved
@@ -63,6 +63,15 @@
"version" : "4.2.2"
}
},
+ {
+ "identity" : "llmfarm_core.swift",
+ "kind" : "remoteSourceControl",
+ "location" : "https://github.com/buhe/llmfarm_core.swift",
+ "state" : {
+ "branch" : "langchain",
+ "revision" : "927d670751bc8aebbc5eb845afd36fe1eeef4f5a"
+ }
+ },
{
"identity" : "openai-kit",
"kind" : "remoteSourceControl",
diff --git a/Package.swift b/Package.swift
index e0def42..be04c96 100644
--- a/Package.swift
+++ b/Package.swift
@@ -26,6 +26,7 @@ let package = Package(
.package(url: "https://github.com/juyan/swift-filestore", .upToNextMajor(from: "0.5.0")),
.package(url: "https://github.com/ZachNagengast/similarity-search-kit.git", from: "0.0.11"),
.package(url: "https://github.com/google/generative-ai-swift", .upToNextMajor(from: "0.4.4")),
+ .package(url: "https://github.com/buhe/llmfarm_core.swift", .branch("langchain")),
],
targets: [
// Targets are the basic building blocks of a package, defining a module or a test suite.
@@ -41,6 +42,7 @@ let package = Package(
.product(name: "SwiftFileStore", package: "swift-filestore"),
.product(name: "SimilaritySearchKit", package: "similarity-search-kit", condition: .when(platforms: [.macOS, .iOS, .visionOS])),
.product(name: "GoogleGenerativeAI", package: "generative-ai-swift"),
+ .product(name: "llmfarm_core", package: "llmfarm_core.swift"),
]
),
diff --git a/README.md b/README.md
index e2d6633..c968773 100644
--- a/README.md
+++ b/README.md
@@ -30,6 +30,25 @@ LMSTUDIO_URL=xxx
```
## Get stated
+
+
+π₯ Local Model
+
+Code
+
+```swift
+ Task {
+ if let envPath = Bundle.main.path(forResource: "stablelm-3b-4e1t-Q4_K_M", ofType: "txt") {
+ let local = Local(inference: .GPTNeox_gguf, modelPath: envPath, useMetal: true)
+ let r = await local.generate(text: "hi")
+ print("π₯°\(r!.llm_output!)")
+ } else {
+ print("β οΈ loss model")
+ }
+
+ }
+```
+
π¬ Chatbots
@@ -364,7 +383,8 @@ Task(priority: .background) {
- [x] Baidu
- [x] Llama 2
- [x] Gemini
- - [x] LMStudio API
+ - [x] LMStudio API
+ - [x] Local Model
- Vectorstore
- [x] Supabase
- [x] SimilaritySearchKit
diff --git a/Sources/LangChain/document_loaders/BilibiliLoader.swift b/Sources/LangChain/document_loaders/BilibiliLoader.swift
index e2c9001..34efac2 100644
--- a/Sources/LangChain/document_loaders/BilibiliLoader.swift
+++ b/Sources/LangChain/document_loaders/BilibiliLoader.swift
@@ -16,7 +16,7 @@ public class BilibiliLoader: BaseLoader {
}
public override func _load() async throws -> [Document] {
- let client = BilibiliClient(credential: BilibiliCredential(sessin: "6376fa3e%2C1705926902%2C0b561%2A71gvy_TPyZMWhUweKjYGT502_5FVZdcv8bfjvwtqdlqm8UjyEiUrkPq1AodolcSjIgBXatNwAAEgA", jct: "330aaac577464e453ea1b070fd1281ea"))
+ let client = BilibiliClient(credential: BilibiliCredential(sessin: "f3fd16e8%2C1721810527%2C531af%2A12CjAJT4BNVd9Zbd9uAhx99W1XsX6gbi1Js0uHqHKZPGWzS8xLuGdX7kE5x2-DQUFacJYSVnJlYXVwbm9KY2dfS1BENHN5SzZ2NE1wQ2dKLWxqUEMwQVBVN1JiQnh3NjltTDVoM3FURjUwRWt5TDNkdXVQUzFraWVLSnJUdVdMMVhyc25ZcXhFZW53IIEC", jct: "20601a72b51f0448ada0babc5740dc90"))
let info = await client.fetchVideoInfo(bvid: videoId)
if info == nil {
throw LangChainError.LoaderError("Subtitle not exist")
diff --git a/Sources/LangChain/llms/Local.swift b/Sources/LangChain/llms/Local.swift
new file mode 100644
index 0000000..3e55cef
--- /dev/null
+++ b/Sources/LangChain/llms/Local.swift
@@ -0,0 +1,46 @@
+//
+// File.swift
+//
+//
+// Created by ι‘Ύθ³ε on 1/22/24.
+//
+import llmfarm_core
+import Foundation
+
+public class Local: LLM {
+ let modelPath: String
+ let useMetal: Bool
+ let inference: ModelInference
+
+ public init(inference: ModelInference, modelPath: String, useMetal: Bool = false, callbacks: [BaseCallbackHandler] = [], cache: BaseCache? = nil) {
+ self.inference = inference
+ self.modelPath = modelPath
+ self.useMetal = useMetal
+ super.init(callbacks: callbacks, cache: cache)
+ }
+ public override func _send(text: String, stops: [String] = []) async throws -> LLMResult {
+ let ai = AI(_modelPath: self.modelPath, _chatName: "chat")
+ var params:ModelAndContextParams = .default
+ params.use_metal = useMetal
+ params.promptFormat = .Custom
+ params.custom_prompt_format = "{{prompt}}"
+ try? ai.loadModel(inference, contextParams: params)
+ let output = try? ai.model.predict(text, mainCallback)
+// print("π\(output)")
+ total_output = 0
+ return LLMResult(llm_output: output)
+ }
+
+ let maxOutputLength = 256
+ var total_output = 0
+
+ func mainCallback(_ str: String, _ time: Double) -> Bool {
+ print("\(str)",terminator: "")
+ total_output += str.count
+ if(total_output>maxOutputLength){
+ return true
+ }
+ return false
+ }
+}
+
diff --git a/Tests/LangChainTests/langchain_swiftTests.swift b/Tests/LangChainTests/langchain_swiftTests.swift
index d3efe80..470248b 100644
--- a/Tests/LangChainTests/langchain_swiftTests.swift
+++ b/Tests/LangChainTests/langchain_swiftTests.swift
@@ -74,7 +74,7 @@ Thought: dog
}
func testBilibilClient() async throws {
- let client = BilibiliClient(credential: BilibiliCredential(sessin: "6376fa3e%2C1705926902%2C0b561%2A71gvy_TPyZMWhUweKjYGT502_5FVZdcv8bfjvwtqdlqm8UjyEiUrkPq1AodolcSjIgBXatNwAAEgA", jct: "330aaac577464e453ea1b070fd1281ea"))
+ let client = BilibiliClient(credential: BilibiliCredential(sessin: "f3fd16e8%2C1721810527%2C531af%2A12CjAJT4BNVd9Zbd9uAhx99W1XsX6gbi1Js0uHqHKZPGWzS8xLuGdX7kE5x2-DQUFacJYSVnJlYXVwbm9KY2dfS1BENHN5SzZ2NE1wQ2dKLWxqUEMwQVBVN1JiQnh3NjltTDVoM3FURjUwRWt5TDNkdXVQUzFraWVLSnJUdVdMMVhyc25ZcXhFZW53IIEC", jct: "20601a72b51f0448ada0babc5740dc90"))
let info = await client.fetchVideoInfo(bvid: "BV1iP411y7Vs")
XCTAssertNotNil(info)
XCTAssertNotEqual(info?.subtitle, "")