Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Showing that the bot is writing (answering) #5

Merged
merged 2 commits into from
Oct 22, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion packages/jupyter-ai/jupyter_ai/chat_handlers/ask.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ async def process_message(self, message: HumanChatMessage, chat: Optional[YChat]
self.get_llm_chain()

try:
with self.pending("Searching learned documents", message):
with self.pending("Searching learned documents", message, chat=chat):
assert self.llm_chain
result = await self.llm_chain.acall({"question": query})
response = result["answer"]
Expand Down
21 changes: 16 additions & 5 deletions packages/jupyter-ai/jupyter_ai/chat_handlers/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -329,6 +329,7 @@ def start_pending(
text: str,
human_msg: Optional[HumanChatMessage] = None,
*,
chat: Optional[YChat] = None,
ellipsis: bool = True,
) -> PendingMessage:
"""
Expand All @@ -347,10 +348,13 @@ def start_pending(
ellipsis=ellipsis,
)

self.broadcast_message(pending_msg)
if chat is not None:
chat.awareness.set_local_state_field("isWriting", True)
else :
self.broadcast_message(pending_msg)
return pending_msg

def close_pending(self, pending_msg: PendingMessage):
def close_pending(self, pending_msg: PendingMessage, chat: Optional[YChat] = None):
"""
Closes a pending message.
"""
Expand All @@ -361,7 +365,10 @@ def close_pending(self, pending_msg: PendingMessage):
id=pending_msg.id,
)

self.broadcast_message(close_pending_msg)
if chat is not None:
chat.awareness.set_local_state_field("isWriting", False)
else:
self.broadcast_message(close_pending_msg)
pending_msg.closed = True

@contextlib.contextmanager
Expand All @@ -370,18 +377,22 @@ def pending(
text: str,
human_msg: Optional[HumanChatMessage] = None,
*,
chat: Optional[YChat] = None,
ellipsis: bool = True,
):
"""
Context manager that sends a pending message to the client, and closes
it after the block is executed.

TODO: Simplify it by only modifying the awareness as soon as collaborative chat
is the only used chat.
"""
pending_msg = self.start_pending(text, human_msg=human_msg, ellipsis=ellipsis)
pending_msg = self.start_pending(text, human_msg=human_msg, chat=chat, ellipsis=ellipsis)
try:
yield pending_msg
finally:
if not pending_msg.closed:
self.close_pending(pending_msg)
self.close_pending(pending_msg, chat=chat)

def get_llm_chain(self):
lm_provider = self.config_manager.lm_provider
Expand Down
4 changes: 2 additions & 2 deletions packages/jupyter-ai/jupyter_ai/chat_handlers/default.py
Original file line number Diff line number Diff line change
Expand Up @@ -142,7 +142,7 @@ async def process_message(self, message: HumanChatMessage, chat: Optional[YChat]
inputs["input"] = self.replace_prompt(inputs["input"])

# start with a pending message
with self.pending("Generating response", message) as pending_message:
with self.pending("Generating response", message, chat=chat) as pending_message:
# stream response in chunks. this works even if a provider does not
# implement streaming, as `astream()` defaults to yielding `_call()`
# when `_stream()` is not implemented on the LLM class.
Expand All @@ -159,7 +159,7 @@ async def process_message(self, message: HumanChatMessage, chat: Optional[YChat]
if not received_first_chunk:
# when receiving the first chunk, close the pending message and
# start the stream.
self.close_pending(pending_message)
self.close_pending(pending_message, chat=chat)
stream_id = self._start_stream(message, chat)
received_first_chunk = True
self.message_interrupted[stream_id] = asyncio.Event()
Expand Down
2 changes: 1 addition & 1 deletion packages/jupyter-ai/jupyter_ai/chat_handlers/fix.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@ async def process_message(self, message: HumanChatMessage, chat: Optional[YChat]
extra_instructions = message.prompt[4:].strip() or "None."

self.get_llm_chain()
with self.pending("Analyzing error", message):
with self.pending("Analyzing error", message, chat=chat):
assert self.llm_chain
response = await self.llm_chain.apredict(
extra_instructions=extra_instructions,
Expand Down
2 changes: 1 addition & 1 deletion packages/jupyter-ai/jupyter_ai/chat_handlers/learn.py
Original file line number Diff line number Diff line change
Expand Up @@ -214,7 +214,7 @@ async def process_message(self, message: HumanChatMessage, chat: Optional[YChat]
# delete and relearn index if embedding model was changed
await self.delete_and_relearn(chat)

with self.pending(f"Loading and splitting files for {load_path}", message):
with self.pending(f"Loading and splitting files for {load_path}", message, chat=chat):
try:
await self.learn_dir(
load_path, args.chunk_size, args.chunk_overlap, args.all_files
Expand Down
19 changes: 12 additions & 7 deletions packages/jupyter-ai/jupyter_ai/extension.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,10 +57,13 @@
else:
COLLAB_VERSION = 2

# The BOT currently has a fixed username, because this username is used has key in chats,
# it needs to constant. Do we need to change it ?
BOT = {
"username": str(uuid.uuid4()),
"username": '5f6a7570-7974-6572-6e61-75742d626f74',
"name": "Jupyternaut",
"display_name": "Jupyternaut",
"initials": "J"
}

DEFAULT_HELP_MESSAGE_TEMPLATE = """Hi there! I'm {persona_name}, your programming assistant.
Expand Down Expand Up @@ -246,6 +249,13 @@ async def connect_chat(

self.log.info(f"Collaborative chat server is listening for {data['room']}")
chat = await self.get_chat(data["room"])

# Add the bot user to the chat document awareness.
BOT["avatar_url"] = url_path_join(
self.settings.get("base_url", "/"), "api/ai/static/jupyternaut.svg"
)
chat.awareness.set_local_state_field("user", BOT)

callback = partial(self.on_change, chat)
chat.ymessages.observe(callback)

Expand Down Expand Up @@ -309,14 +319,9 @@ async def _route(self, message: HumanChatMessage, chat: YChat):
self.log.info(f"{command_readable} chat handler resolved in {latency_ms} ms.")

def write_message(self, chat: YChat, body: str, id: str | None = None) -> str:
BOT["avatar_url"] = url_path_join(
self.settings.get("base_url", "/"), "api/ai/static/jupyternaut.svg"
)
bot = chat.get_user_by_name(BOT["name"])
bot = chat.get_user(BOT["username"])
if not bot:
chat.set_user(BOT)
else:
BOT["username"] = bot["username"]

index = self.messages_indexes[id] if id else None
id = id if id else str(uuid.uuid4())
Expand Down
2 changes: 1 addition & 1 deletion packages/jupyter-ai/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@
"dependencies": {
"@emotion/react": "^11.10.5",
"@emotion/styled": "^11.10.5",
"@jupyter/chat": "^0.4.0",
"@jupyter/chat": "^0.5.0",
"@jupyter/collaboration": "^1",
"@jupyterlab/application": "^4.2.0",
"@jupyterlab/apputils": "^4.2.0",
Expand Down
2 changes: 1 addition & 1 deletion packages/jupyter-ai/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ dependencies = [
"typing_extensions>=4.5.0",
"traitlets>=5.0",
"deepmerge>=2.0,<3",
"jupyterlab-collaborative-chat",
"jupyterlab-collaborative-chat>=0.5.0",
]

dynamic = ["version", "description", "authors", "urls", "keywords"]
Expand Down
20 changes: 10 additions & 10 deletions packages/jupyter-ai/src/index.ts
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
import { IAutocompletionRegistry } from '@jupyter/chat';
import { IGlobalAwareness } from '@jupyter/collaboration';
import {
JupyterFrontEnd,
JupyterFrontEndPlugin,
ILayoutRestorer
} from '@jupyterlab/application';

import {
IWidgetTracker,
ReactWidget,
Expand All @@ -13,25 +13,25 @@ import {
ICommandPalette
} from '@jupyterlab/apputils';
import { IDocumentWidget } from '@jupyterlab/docregistry';
import { IGlobalAwareness } from '@jupyter/collaboration';
import { IRenderMimeRegistry } from '@jupyterlab/rendermime';
import { Signal } from '@lumino/signaling';
import type { Awareness } from 'y-protocols/awareness';
import { buildChatSidebar } from './widgets/chat-sidebar';
import { SelectionWatcher } from './selection-watcher';

import { ChatHandler } from './chat_handler';
import { buildErrorWidget } from './widgets/chat-error';
import { completionPlugin } from './completions';
import { ActiveCellManager } from './contexts/active-cell-context';
import { SelectionWatcher } from './selection-watcher';
import { menuPlugin } from './plugins/menu-plugin';
import { autocompletion } from './slash-autocompletion';
import { statusItemPlugin } from './status';
import {
IJaiCompletionProvider,
IJaiCore,
IJaiMessageFooter,
IJaiTelemetryHandler
} from './tokens';
import { IRenderMimeRegistry } from '@jupyterlab/rendermime';
import { ActiveCellManager } from './contexts/active-cell-context';
import { autocompletion } from './slash-autocompletion';
import { Signal } from '@lumino/signaling';
import { menuPlugin } from './plugins/menu-plugin';
import { buildErrorWidget } from './widgets/chat-error';
import { buildChatSidebar } from './widgets/chat-sidebar';
import { buildAiSettings } from './widgets/settings-widget';

export type DocumentTracker = IWidgetTracker<IDocumentWidget>;
Expand Down
11 changes: 6 additions & 5 deletions yarn.lock
Original file line number Diff line number Diff line change
Expand Up @@ -2220,7 +2220,7 @@ __metadata:
"@babel/preset-env": ^7.0.0
"@emotion/react": ^11.10.5
"@emotion/styled": ^11.10.5
"@jupyter/chat": ^0.4.0
"@jupyter/chat": ^0.5.0
"@jupyter/collaboration": ^1
"@jupyterlab/application": ^4.2.0
"@jupyterlab/apputils": ^4.2.0
Expand Down Expand Up @@ -2286,15 +2286,16 @@ __metadata:
languageName: unknown
linkType: soft

"@jupyter/chat@npm:^0.4.0":
version: 0.4.0
resolution: "@jupyter/chat@npm:0.4.0"
"@jupyter/chat@npm:^0.5.0":
version: 0.5.0
resolution: "@jupyter/chat@npm:0.5.0"
dependencies:
"@emotion/react": ^11.10.5
"@emotion/styled": ^11.10.5
"@jupyter/react-components": ^0.15.2
"@jupyterlab/application": ^4.2.0
"@jupyterlab/apputils": ^4.3.0
"@jupyterlab/fileeditor": ^4.2.0
"@jupyterlab/notebook": ^4.2.0
"@jupyterlab/rendermime": ^4.2.0
"@jupyterlab/ui-components": ^4.2.0
Expand All @@ -2306,7 +2307,7 @@ __metadata:
clsx: ^2.1.0
react: ^18.2.0
react-dom: ^18.2.0
checksum: 6e309c8e70cf480103eb26f3109da417c58d2e6844d5e56e63feabf71926f9dba6f9bc85caff765dfc574a8fd7ed803a8c03e5d812c28568dcf6ec918bbd2e66
checksum: 0c935344e3cf8b60ded1df83c1b608f77becd7ea8c82c175a8588606c6c56a46525130b429b5acfaab441d9ccac9d478b25646bdb8aa3dfd1af82c8f372e07cf
languageName: node
linkType: hard

Expand Down