From 62888e9e8f3a343b6826b809cbd3d1e5edc112b1 Mon Sep 17 00:00:00 2001 From: omagdy7 Date: Wed, 29 Nov 2023 20:45:30 +0200 Subject: [PATCH] Added some refinments and quality of life changes --- README.md | 12 +++++++++--- package.json | 2 +- src/App.tsx | 20 +++++++++++--------- src/ollama.tsx | 14 +++++++------- 4 files changed, 28 insertions(+), 20 deletions(-) diff --git a/README.md b/README.md index 9f2d8b7..7fab0d2 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,3 @@ ->If this plugin helps you, I'd really appreciate your support. You can [buy me a coffee here. ](https://www.buymeacoffee.com/omagdy) # đŸ¦™ ollama-logseq plugin A plugin to integrate [ollama](https://github.com/jmorganca/ollama) with [logseq](https://github.com/logseq/logseq) @@ -18,10 +17,15 @@ A plugin to integrate [ollama](https://github.com/jmorganca/ollama) with [logseq - Create a flash card - Divide a todo task into subtasks - Respects theming -- Context menu commands(Summarize Block, Make a flash card, Divide task into subtasks) +- Context menu commands + - Summarize Block + - Make a flash card + - Divide task into subtasks + - Prompt from block + - Expand block - A slash command via /ollama - Button in tool bar -- Settings for changing the host of the model, the model itself and a shortcut to open the plugin command pallete +- Settings for changing the host of the model, the model itself and a shortcut to open the plugin command palette # Demo @@ -30,5 +34,7 @@ A plugin to integrate [ollama](https://github.com/jmorganca/ollama) with [logseq ![context](./docs/context.gif) +# Contribution If you have any features suggestions feel free to open an issue +>If this plugin helps you, I'd really appreciate your support. You can [buy me a coffee here. ](https://www.buymeacoffee.com/omagdy) diff --git a/package.json b/package.json index 2569640..ae103fd 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "ollama-logseq", - "version": "1.0.4", + "version": "1.0.7", "main": "dist/index.html", "scripts": { "dev": "vite", diff --git a/src/App.tsx b/src/App.tsx index b902629..6ab84a8 100644 --- a/src/App.tsx +++ b/src/App.tsx @@ -1,11 +1,13 @@ import React, { useEffect, useRef, useState } from "react"; import { OllamaCommandPallete } from "./components/OllamaCommandPallete"; -import { convertToFlashCardFromEvent, - DivideTaskIntoSubTasksFromEvent, - ollamaUI, - summarizeBlockFromEvent, - promptFromBlockEvent, - expandBlockEvent } from "./ollama"; +import { + convertToFlashCardFromEvent, + DivideTaskIntoSubTasksFromEvent, + ollamaUI, + summarizeBlockFromEvent, + promptFromBlockEvent, + expandBlockEvent +} from "./ollama"; import { useAppVisible } from "./utils"; const options = [ @@ -45,9 +47,9 @@ function App() { return } logseq.Editor.registerSlashCommand("ollama", ollamaUI) - logseq.Editor.registerBlockContextMenuItem("Create a flash card", convertToFlashCardFromEvent) - logseq.Editor.registerBlockContextMenuItem("Summarize block", summarizeBlockFromEvent) - logseq.Editor.registerBlockContextMenuItem("Divide into subtasks", DivideTaskIntoSubTasksFromEvent) + logseq.Editor.registerBlockContextMenuItem("Ollama: Create a flash card", convertToFlashCardFromEvent) + logseq.Editor.registerBlockContextMenuItem("Ollama: Summarize block", summarizeBlockFromEvent) + logseq.Editor.registerBlockContextMenuItem("Ollama: Divide into subtasks", DivideTaskIntoSubTasksFromEvent) logseq.Editor.registerBlockContextMenuItem("Ollama: Prompt from Block", promptFromBlockEvent) logseq.Editor.registerBlockContextMenuItem("Ollama: Expand Block", expandBlockEvent) logseq.App.registerCommandShortcut( diff --git a/src/ollama.tsx b/src/ollama.tsx index 945d2cb..b99513c 100644 --- a/src/ollama.tsx +++ b/src/ollama.tsx @@ -85,7 +85,7 @@ async function promptLLM(prompt: string) { }) if (!response.ok) { console.log("Error: couldn't fulfill request") - logseq.App.showMsg("Couldn't fulfill request make sure you don't have a typo in the name of the model or the host url") + logseq.App.showMsg("Coudln't fulfull request make sure that ollama service is running and make sure there is no typo in host or model name") throw new Error('Network response was not ok'); } const data = await response.json(); @@ -93,7 +93,7 @@ async function promptLLM(prompt: string) { return data.response; } catch (e: any) { console.error("ERROR: ", e) - logseq.App.showMsg("Couldn't fulfill request make sure you don't have a typo in the name of the model or the host url") + logseq.App.showMsg("Coudln't fulfull request make sure that ollama service is running and make sure there is no typo in host or model name") } } @@ -167,7 +167,7 @@ export async function expandBlockEvent(b: IHookEvent) { const answerBlock = await logseq.Editor.insertBlock(currentBlock!.uuid, '⌛Generating ...', { before: false }) const response = await promptLLM(`Expand: ${currentBlock!.content}`); await logseq.Editor.updateBlock(answerBlock!.uuid, `${response}`) - } catch(e: any) { + } catch (e: any) { logseq.UI.showMsg(e.toString(), 'warning') console.error(e) } @@ -177,7 +177,7 @@ export async function askAI(prompt: string, context: string) { await delay(300) try { const currentBlock = await logseq.Editor.getCurrentBlock() - const block = await logseq.Editor.insertBlock(currentBlock!.uuid, 'Generating....', { before: true }) + const block = await logseq.Editor.insertBlock(currentBlock!.uuid, '⌛Generating....', { before: true }) let response = ""; if (context == "") { response = await promptLLM(prompt) @@ -205,8 +205,8 @@ export async function summarizeBlockFromEvent(b: IHookEvent) { export async function convertToFlashCard(uuid: string, blockContent: string) { try { - const questionBlock = await logseq.Editor.insertBlock(uuid, "Genearting question....", { before: false }) - const answerBlock = await logseq.Editor.insertBlock(questionBlock!.uuid, "Genearting answer....", { before: false }) + const questionBlock = await logseq.Editor.insertBlock(uuid, "⌛Genearting question....", { before: false }) + const answerBlock = await logseq.Editor.insertBlock(questionBlock!.uuid, "⌛Genearting answer....", { before: false }) const question = await promptLLM(`Create a question about this that would fit in a flashcard:\n ${blockContent}`) const answer = await promptLLM(`Given the question ${question} and the context of ${blockContent} What is the answer? be as brief as possible and provide the answer only.`) await logseq.Editor.updateBlock(questionBlock!.uuid, `${question} #card`) @@ -230,7 +230,7 @@ export async function convertToFlashCardCurrentBlock() { export async function DivideTaskIntoSubTasks(uuid: string, content: string) { try { - const block = await logseq.Editor.insertBlock(uuid, "Genearting todos....", { before: false }) + const block = await logseq.Editor.insertBlock(uuid, "✅ Genearting todos ...", { before: false }) let i = 0; const response = await promptLLM(`Divide this task into subtasks with numbers: ${content} `) for (const todo of response.split("\n")) {