From 9630742af21d51ddad6d66e5250415f778dac61b Mon Sep 17 00:00:00 2001 From: michaelchia Date: Mon, 21 Oct 2024 23:50:46 +0800 Subject: [PATCH 1/5] Remove `cleared_message_ids` (#1042) --- packages/jupyter-ai/jupyter_ai/handlers.py | 16 +++------------- 1 file changed, 3 insertions(+), 13 deletions(-) diff --git a/packages/jupyter-ai/jupyter_ai/handlers.py b/packages/jupyter-ai/jupyter_ai/handlers.py index e22d2ae3b..28b169c00 100644 --- a/packages/jupyter-ai/jupyter_ai/handlers.py +++ b/packages/jupyter-ai/jupyter_ai/handlers.py @@ -125,13 +125,6 @@ def pending_messages(self) -> List[PendingMessage]: def pending_messages(self, new_pending_messages): self.settings["pending_messages"] = new_pending_messages - @property - def cleared_message_ids(self) -> Set[str]: - """Set of `HumanChatMessage.id` that were cleared via `ClearRequest`.""" - if "cleared_message_ids" not in self.settings: - self.settings["cleared_message_ids"] = set() - return self.settings["cleared_message_ids"] - def initialize(self): self.log.debug("Initializing websocket connection %s", self.request.path) @@ -233,7 +226,9 @@ def broadcast_message(self, message: Message): # do not broadcast agent messages that are replying to cleared human message if ( isinstance(message, (AgentChatMessage, AgentStreamMessage)) - and message.reply_to in self.cleared_message_ids + and message.reply_to + and message.reply_to + not in [m.id for m in self.chat_history if isinstance(m, HumanChatMessage)] ): return @@ -325,10 +320,6 @@ def on_clear_request(self, request: ClearRequest): # if no target, clear all messages if not target: - for msg in self.chat_history: - if msg.type == "human": - self.cleared_message_ids.add(msg.id) - self.chat_history.clear() self.pending_messages.clear() self.llm_chat_memory.clear() @@ -337,7 +328,6 @@ def on_clear_request(self, request: ClearRequest): return # otherwise, clear a single message - self.cleared_message_ids.add(target) for msg in self.chat_history[::-1]: # interrupt the single message if msg.type == "agent-stream" and getattr(msg, "reply_to", None) == target: From 2545fd10590e7ee54b9e494257b3a1053ac0d7c7 Mon Sep 17 00:00:00 2001 From: david qiu Date: Mon, 21 Oct 2024 14:33:36 -0700 Subject: [PATCH 2/5] Migrate streaming logic to `BaseChatHandler` (#1039) * migrate streaming logic to `BaseChatHandler` * add ignore comments to resolve `mypy` errors * pre-commit * revert change to LLMChain import module * drop mutable default argument --- .../jupyter_ai/chat_handlers/ask.py | 6 +- .../jupyter_ai/chat_handlers/base.py | 146 +++++++++++++++++- .../jupyter_ai/chat_handlers/default.py | 118 +------------- .../jupyter_ai/chat_handlers/fix.py | 10 +- 4 files changed, 157 insertions(+), 123 deletions(-) diff --git a/packages/jupyter-ai/jupyter_ai/chat_handlers/ask.py b/packages/jupyter-ai/jupyter_ai/chat_handlers/ask.py index 2da303e5a..b5c4fa38b 100644 --- a/packages/jupyter-ai/jupyter_ai/chat_handlers/ask.py +++ b/packages/jupyter-ai/jupyter_ai/chat_handlers/ask.py @@ -73,7 +73,11 @@ async def process_message(self, message: HumanChatMessage): try: with self.pending("Searching learned documents", message): assert self.llm_chain - result = await self.llm_chain.acall({"question": query}) + # TODO: migrate this class to use a LCEL `Runnable` instead of + # `Chain`, then remove the below ignore comment. + result = await self.llm_chain.acall( # type:ignore[attr-defined] + {"question": query} + ) response = result["answer"] self.reply(response, message) except AssertionError as e: diff --git a/packages/jupyter-ai/jupyter_ai/chat_handlers/base.py b/packages/jupyter-ai/jupyter_ai/chat_handlers/base.py index fc09d8f19..107b5a000 100644 --- a/packages/jupyter-ai/jupyter_ai/chat_handlers/base.py +++ b/packages/jupyter-ai/jupyter_ai/chat_handlers/base.py @@ -1,11 +1,12 @@ import argparse +import asyncio import contextlib import os import time import traceback -from asyncio import Event from typing import ( TYPE_CHECKING, + Any, Awaitable, ClassVar, Dict, @@ -20,10 +21,13 @@ from uuid import uuid4 from dask.distributed import Client as DaskClient +from jupyter_ai.callback_handlers import MetadataCallbackHandler from jupyter_ai.config_manager import ConfigManager, Logger from jupyter_ai.history import WrappedBoundedChatHistory from jupyter_ai.models import ( AgentChatMessage, + AgentStreamChunkMessage, + AgentStreamMessage, ChatMessage, ClosePendingMessage, HumanChatMessage, @@ -32,8 +36,12 @@ ) from jupyter_ai_magics import Persona from jupyter_ai_magics.providers import BaseProvider -from langchain.chains import LLMChain from langchain.pydantic_v1 import BaseModel +from langchain_core.messages import AIMessageChunk +from langchain_core.runnables import Runnable +from langchain_core.runnables.config import RunnableConfig +from langchain_core.runnables.config import merge_configs as merge_runnable_configs +from langchain_core.runnables.utils import Input if TYPE_CHECKING: from jupyter_ai.context_providers import BaseCommandContextProvider @@ -129,7 +137,7 @@ class BaseChatHandler: """Dictionary of context providers. Allows chat handlers to reference context providers, which can be used to provide context to the LLM.""" - message_interrupted: Dict[str, Event] + message_interrupted: Dict[str, asyncio.Event] """Dictionary mapping an agent message identifier to an asyncio Event which indicates if the message generation/streaming was interrupted.""" @@ -147,7 +155,7 @@ def __init__( help_message_template: str, chat_handlers: Dict[str, "BaseChatHandler"], context_providers: Dict[str, "BaseCommandContextProvider"], - message_interrupted: Dict[str, Event], + message_interrupted: Dict[str, asyncio.Event], ): self.log = log self.config_manager = config_manager @@ -173,7 +181,7 @@ def __init__( self.llm: Optional[BaseProvider] = None self.llm_params: Optional[dict] = None - self.llm_chain: Optional[LLMChain] = None + self.llm_chain: Optional[Runnable] = None async def on_message(self, message: HumanChatMessage): """ @@ -471,3 +479,131 @@ def send_help_message(self, human_msg: Optional[HumanChatMessage] = None) -> Non ) self.broadcast_message(help_message) + + def _start_stream(self, human_msg: HumanChatMessage) -> str: + """ + Sends an `agent-stream` message to indicate the start of a response + stream. Returns the ID of the message, denoted as the `stream_id`. + """ + stream_id = uuid4().hex + stream_msg = AgentStreamMessage( + id=stream_id, + time=time.time(), + body="", + reply_to=human_msg.id, + persona=self.persona, + complete=False, + ) + + self.broadcast_message(stream_msg) + return stream_id + + def _send_stream_chunk( + self, + stream_id: str, + content: str, + complete: bool = False, + metadata: Optional[Dict[str, Any]] = None, + ) -> None: + """ + Sends an `agent-stream-chunk` message containing content that should be + appended to an existing `agent-stream` message with ID `stream_id`. + """ + if not metadata: + metadata = {} + + stream_chunk_msg = AgentStreamChunkMessage( + id=stream_id, content=content, stream_complete=complete, metadata=metadata + ) + self.broadcast_message(stream_chunk_msg) + + async def stream_reply( + self, + input: Input, + human_msg: HumanChatMessage, + config: Optional[RunnableConfig] = None, + ): + """ + Streams a reply to a human message by invoking + `self.llm_chain.astream()`. A LangChain `Runnable` instance must be + bound to `self.llm_chain` before invoking this method. + + Arguments + --------- + - `input`: The input to your runnable. The type of `input` depends on + the runnable in `self.llm_chain`, but is usually a dictionary whose keys + refer to input variables in your prompt template. + + - `human_msg`: The `HumanChatMessage` being replied to. + + - `config` (optional): A `RunnableConfig` object that specifies + additional configuration when streaming from the runnable. + """ + assert self.llm_chain + assert isinstance(self.llm_chain, Runnable) + + received_first_chunk = False + metadata_handler = MetadataCallbackHandler() + base_config: RunnableConfig = { + "configurable": {"last_human_msg": human_msg}, + "callbacks": [metadata_handler], + } + merged_config: RunnableConfig = merge_runnable_configs(base_config, config) + + # start with a pending message + with self.pending("Generating response", human_msg) as pending_message: + # stream response in chunks. this works even if a provider does not + # implement streaming, as `astream()` defaults to yielding `_call()` + # when `_stream()` is not implemented on the LLM class. + chunk_generator = self.llm_chain.astream(input, config=merged_config) + stream_interrupted = False + async for chunk in chunk_generator: + if not received_first_chunk: + # when receiving the first chunk, close the pending message and + # start the stream. + self.close_pending(pending_message) + stream_id = self._start_stream(human_msg=human_msg) + received_first_chunk = True + self.message_interrupted[stream_id] = asyncio.Event() + + if self.message_interrupted[stream_id].is_set(): + try: + # notify the model provider that streaming was interrupted + # (this is essential to allow the model to stop generating) + # + # note: `mypy` flags this line, claiming that `athrow` is + # not defined on `AsyncIterator`. This is why an ignore + # comment is placed here. + await chunk_generator.athrow( # type:ignore[attr-defined] + GenerationInterrupted() + ) + except GenerationInterrupted: + # do not let the exception bubble up in case if + # the provider did not handle it + pass + stream_interrupted = True + break + + if isinstance(chunk, AIMessageChunk) and isinstance(chunk.content, str): + self._send_stream_chunk(stream_id, chunk.content) + elif isinstance(chunk, str): + self._send_stream_chunk(stream_id, chunk) + else: + self.log.error(f"Unrecognized type of chunk yielded: {type(chunk)}") + break + + # complete stream after all chunks have been streamed + stream_tombstone = ( + "\n\n(AI response stopped by user)" if stream_interrupted else "" + ) + self._send_stream_chunk( + stream_id, + stream_tombstone, + complete=True, + metadata=metadata_handler.jai_metadata, + ) + del self.message_interrupted[stream_id] + + +class GenerationInterrupted(asyncio.CancelledError): + """Exception raised when streaming is cancelled by the user""" diff --git a/packages/jupyter-ai/jupyter_ai/chat_handlers/default.py b/packages/jupyter-ai/jupyter_ai/chat_handlers/default.py index 46606d994..266ad73ad 100644 --- a/packages/jupyter-ai/jupyter_ai/chat_handlers/default.py +++ b/packages/jupyter-ai/jupyter_ai/chat_handlers/default.py @@ -1,16 +1,8 @@ import asyncio -import time -from typing import Any, Dict, Type -from uuid import uuid4 +from typing import Dict, Type -from jupyter_ai.callback_handlers import MetadataCallbackHandler -from jupyter_ai.models import ( - AgentStreamChunkMessage, - AgentStreamMessage, - HumanChatMessage, -) +from jupyter_ai.models import HumanChatMessage from jupyter_ai_magics.providers import BaseProvider -from langchain_core.messages import AIMessageChunk from langchain_core.runnables import ConfigurableFieldSpec from langchain_core.runnables.history import RunnableWithMessageHistory @@ -18,10 +10,6 @@ from .base import BaseChatHandler, SlashCommandRoutingType -class GenerationInterrupted(asyncio.CancelledError): - """Exception raised when streaming is cancelled by the user""" - - class DefaultChatHandler(BaseChatHandler): id = "default" name = "Default" @@ -65,55 +53,8 @@ def create_llm_chain( ) self.llm_chain = runnable - def _start_stream(self, human_msg: HumanChatMessage) -> str: - """ - Sends an `agent-stream` message to indicate the start of a response - stream. Returns the ID of the message, denoted as the `stream_id`. - """ - stream_id = uuid4().hex - stream_msg = AgentStreamMessage( - id=stream_id, - time=time.time(), - body="", - reply_to=human_msg.id, - persona=self.persona, - complete=False, - ) - - for handler in self._root_chat_handlers.values(): - if not handler: - continue - - handler.broadcast_message(stream_msg) - break - - return stream_id - - def _send_stream_chunk( - self, - stream_id: str, - content: str, - complete: bool = False, - metadata: Dict[str, Any] = {}, - ): - """ - Sends an `agent-stream-chunk` message containing content that should be - appended to an existing `agent-stream` message with ID `stream_id`. - """ - stream_chunk_msg = AgentStreamChunkMessage( - id=stream_id, content=content, stream_complete=complete, metadata=metadata - ) - - for handler in self._root_chat_handlers.values(): - if not handler: - continue - - handler.broadcast_message(stream_chunk_msg) - break - async def process_message(self, message: HumanChatMessage): self.get_llm_chain() - received_first_chunk = False assert self.llm_chain inputs = {"input": message.body} @@ -127,60 +68,7 @@ async def process_message(self, message: HumanChatMessage): inputs["context"] = context_prompt inputs["input"] = self.replace_prompt(inputs["input"]) - # start with a pending message - with self.pending("Generating response", message) as pending_message: - # stream response in chunks. this works even if a provider does not - # implement streaming, as `astream()` defaults to yielding `_call()` - # when `_stream()` is not implemented on the LLM class. - metadata_handler = MetadataCallbackHandler() - chunk_generator = self.llm_chain.astream( - inputs, - config={ - "configurable": {"last_human_msg": message}, - "callbacks": [metadata_handler], - }, - ) - stream_interrupted = False - async for chunk in chunk_generator: - if not received_first_chunk: - # when receiving the first chunk, close the pending message and - # start the stream. - self.close_pending(pending_message) - stream_id = self._start_stream(human_msg=message) - received_first_chunk = True - self.message_interrupted[stream_id] = asyncio.Event() - - if self.message_interrupted[stream_id].is_set(): - try: - # notify the model provider that streaming was interrupted - # (this is essential to allow the model to stop generating) - await chunk_generator.athrow(GenerationInterrupted()) - except GenerationInterrupted: - # do not let the exception bubble up in case if - # the provider did not handle it - pass - stream_interrupted = True - break - - if isinstance(chunk, AIMessageChunk) and isinstance(chunk.content, str): - self._send_stream_chunk(stream_id, chunk.content) - elif isinstance(chunk, str): - self._send_stream_chunk(stream_id, chunk) - else: - self.log.error(f"Unrecognized type of chunk yielded: {type(chunk)}") - break - - # complete stream after all chunks have been streamed - stream_tombstone = ( - "\n\n(AI response stopped by user)" if stream_interrupted else "" - ) - self._send_stream_chunk( - stream_id, - stream_tombstone, - complete=True, - metadata=metadata_handler.jai_metadata, - ) - del self.message_interrupted[stream_id] + await self.stream_reply(inputs, message) async def make_context_prompt(self, human_msg: HumanChatMessage) -> str: return "\n\n".join( diff --git a/packages/jupyter-ai/jupyter_ai/chat_handlers/fix.py b/packages/jupyter-ai/jupyter_ai/chat_handlers/fix.py index 1056e592c..4daf70e03 100644 --- a/packages/jupyter-ai/jupyter_ai/chat_handlers/fix.py +++ b/packages/jupyter-ai/jupyter_ai/chat_handlers/fix.py @@ -75,7 +75,11 @@ def create_llm_chain( llm = provider(**unified_parameters) self.llm = llm - self.llm_chain = LLMChain(llm=llm, prompt=FIX_PROMPT_TEMPLATE, verbose=True) + # TODO: migrate this class to use a LCEL `Runnable` instead of + # `Chain`, then remove the below ignore comment. + self.llm_chain = LLMChain( # type:ignore[arg-type] + llm=llm, prompt=FIX_PROMPT_TEMPLATE, verbose=True + ) async def process_message(self, message: HumanChatMessage): if not (message.selection and message.selection.type == "cell-with-error"): @@ -94,7 +98,9 @@ async def process_message(self, message: HumanChatMessage): self.get_llm_chain() with self.pending("Analyzing error", message): assert self.llm_chain - response = await self.llm_chain.apredict( + # TODO: migrate this class to use a LCEL `Runnable` instead of + # `Chain`, then remove the below ignore comment. + response = await self.llm_chain.apredict( # type:ignore[attr-defined] extra_instructions=extra_instructions, stop=["\nHuman:"], cell_content=selection.source, From ca3f2c92281fd8fb579ca2f2a740e5cdb188db0d Mon Sep 17 00:00:00 2001 From: Jason Weill <93281816+JasonWeill@users.noreply.github.com> Date: Mon, 21 Oct 2024 15:24:36 -0700 Subject: [PATCH 3/5] Update contributors doc (#1045) Copy edits, relaxes recommendation to use Node 18, which is now in maintenance mode, with an EOL of 2025-04-30. --- docs/source/contributors/index.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/source/contributors/index.md b/docs/source/contributors/index.md index 52d1a5458..c6fa74803 100644 --- a/docs/source/contributors/index.md +++ b/docs/source/contributors/index.md @@ -20,11 +20,11 @@ Issues and pull requests that violate the above principles may be declined. If y You can develop Jupyter AI on any system that can run a supported Python version up to and including 3.12, including recent Windows, macOS, and Linux versions. -Each Jupyter AI major version works with only one major version of JupyterLab. Jupyter AI 1.x supports JupyterLab 3.x, and Jupyter AI 2.x supports JupyterLab 4.x. +You should have the newest supported version of JupyterLab installed. -We highly recommend that you install [conda](https://conda.io/projects/conda/en/latest/user-guide/install/index.html) to start developing on Jupyter AI, especially if you are developing on macOS on an Apple Silicon-based Mac (M1, M1 Pro, M2, etc.). +We highly recommend that you install [conda](https://conda.io/projects/conda/en/latest/user-guide/install/index.html) to start contributing to Jupyter AI, especially if you are developing on macOS on an Apple Silicon-based Mac (M1, M1 Pro, M2, etc.). -You will need Node.js 18 to use Jupyter AI. Node.js 18.16.0 is known to work. +You will need [a supported version of node.js](https://github.com/nodejs/release#release-schedule) to use Jupyter AI. :::{warning} :name: node-18-15 From 838dfa9cdcbd8fc0373e3c056c677b016531a68c Mon Sep 17 00:00:00 2001 From: david qiu Date: Mon, 21 Oct 2024 15:46:28 -0700 Subject: [PATCH 4/5] support Quarto Markdown in `/learn` (#1047) --- packages/jupyter-ai/jupyter_ai/document_loaders/directory.py | 1 + 1 file changed, 1 insertion(+) diff --git a/packages/jupyter-ai/jupyter_ai/document_loaders/directory.py b/packages/jupyter-ai/jupyter_ai/document_loaders/directory.py index f182cfb0f..e2840a901 100644 --- a/packages/jupyter-ai/jupyter_ai/document_loaders/directory.py +++ b/packages/jupyter-ai/jupyter_ai/document_loaders/directory.py @@ -86,6 +86,7 @@ def path_to_doc(path): SUPPORTED_EXTS = { ".py", ".md", + ".qmd", ".R", ".Rmd", ".jl", From c2333d920aadb6886d373887e325bebe6324bed9 Mon Sep 17 00:00:00 2001 From: dlqqq Date: Mon, 21 Oct 2024 23:01:49 +0000 Subject: [PATCH 5/5] Publish 2.26.0 SHA256 hashes: jupyter-ai-core-2.26.0.tgz: ad4bcdb47bb52c1fb9d59cb2301e8e4ab203c2c7bbdf70ecf42ed93151395398 jupyter_ai-2.26.0-py3-none-any.whl: a306ad593a78445853be3c7f5cf8a045dbd070ef97cde08b9c246705f9e8c62a jupyter_ai-2.26.0.tar.gz: 36230a9530a9c4b97bbf89b701e378c027686811c12e24849aa43925d4716848 jupyter_ai_magics-2.26.0-py3-none-any.whl: 020d374552f15bee9fcdc2c1b497f82f9cdb5e5e38ce5e9c36f4114dfa59571e jupyter_ai_magics-2.26.0.tar.gz: c5feac17616296aad6da8065182fd84320286ea4c38aec0227e5daf2fe02d5aa --- CHANGELOG.md | 39 +++++++++++++++++++++++-- lerna.json | 2 +- package.json | 2 +- packages/jupyter-ai-magics/package.json | 2 +- packages/jupyter-ai-test/package.json | 2 +- packages/jupyter-ai/package.json | 2 +- 6 files changed, 42 insertions(+), 7 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e8fc9c598..75a93055b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,43 @@ +## 2.26.0 + +This release notably includes the addition of a "Stop streaming" button, which takes over the "Send" button when a reply is streaming and the chat input is empty. While Jupyternaut is streaming a reply to a user, the user has the option to click the "Stop streaming" button to interrupt Jupyternaut and stop it from streaming further. Thank you @krassowski for contributing this feature! 🎉 + +([Full Changelog](https://github.com/jupyterlab/jupyter-ai/compare/@jupyter-ai/core@2.25.0...838dfa9cdcbd8fc0373e3c056c677b016531a68c)) + +### Enhancements made + +- Support Quarto Markdown in `/learn` [#1047](https://github.com/jupyterlab/jupyter-ai/pull/1047) ([@dlqqq](https://github.com/dlqqq)) +- Update requirements contributors doc [#1045](https://github.com/jupyterlab/jupyter-ai/pull/1045) ([@JasonWeill](https://github.com/JasonWeill)) +- Remove clear_message_ids from RootChatHandler [#1042](https://github.com/jupyterlab/jupyter-ai/pull/1042) ([@michaelchia](https://github.com/michaelchia)) +- Migrate streaming logic to `BaseChatHandler` [#1039](https://github.com/jupyterlab/jupyter-ai/pull/1039) ([@dlqqq](https://github.com/dlqqq)) +- Unify message clearing & broadcast logic [#1038](https://github.com/jupyterlab/jupyter-ai/pull/1038) ([@dlqqq](https://github.com/dlqqq)) +- Learn from JSON files [#1024](https://github.com/jupyterlab/jupyter-ai/pull/1024) ([@jlsajfj](https://github.com/jlsajfj)) +- Allow users to stop message streaming [#1022](https://github.com/jupyterlab/jupyter-ai/pull/1022) ([@krassowski](https://github.com/krassowski)) + +### Bugs fixed + +- Always use `username` from `IdentityProvider` [#1034](https://github.com/jupyterlab/jupyter-ai/pull/1034) ([@krassowski](https://github.com/krassowski)) + +### Maintenance and upkeep improvements + +- Support `jupyter-collaboration` v3 [#1035](https://github.com/jupyterlab/jupyter-ai/pull/1035) ([@krassowski](https://github.com/krassowski)) +- Test Python 3.9 and 3.12 on CI, test minimum dependencies [#1029](https://github.com/jupyterlab/jupyter-ai/pull/1029) ([@krassowski](https://github.com/krassowski)) + +### Documentation improvements + +- Update requirements contributors doc [#1045](https://github.com/jupyterlab/jupyter-ai/pull/1045) ([@JasonWeill](https://github.com/JasonWeill)) + +### Contributors to this release + +([GitHub contributors page for this release](https://github.com/jupyterlab/jupyter-ai/graphs/contributors?from=2024-10-07&to=2024-10-21&type=c)) + +[@dlqqq](https://github.com/search?q=repo%3Ajupyterlab%2Fjupyter-ai+involves%3Adlqqq+updated%3A2024-10-07..2024-10-21&type=Issues) | [@JasonWeill](https://github.com/search?q=repo%3Ajupyterlab%2Fjupyter-ai+involves%3AJasonWeill+updated%3A2024-10-07..2024-10-21&type=Issues) | [@jlsajfj](https://github.com/search?q=repo%3Ajupyterlab%2Fjupyter-ai+involves%3Ajlsajfj+updated%3A2024-10-07..2024-10-21&type=Issues) | [@krassowski](https://github.com/search?q=repo%3Ajupyterlab%2Fjupyter-ai+involves%3Akrassowski+updated%3A2024-10-07..2024-10-21&type=Issues) | [@michaelchia](https://github.com/search?q=repo%3Ajupyterlab%2Fjupyter-ai+involves%3Amichaelchia+updated%3A2024-10-07..2024-10-21&type=Issues) | [@pre-commit-ci](https://github.com/search?q=repo%3Ajupyterlab%2Fjupyter-ai+involves%3Apre-commit-ci+updated%3A2024-10-07..2024-10-21&type=Issues) + + + ## 2.25.0 ([Full Changelog](https://github.com/jupyterlab/jupyter-ai/compare/@jupyter-ai/core@2.24.1...097dbe48722e255173c6504e6de835c297c553ab)) @@ -20,8 +57,6 @@ [@akaihola](https://github.com/search?q=repo%3Ajupyterlab%2Fjupyter-ai+involves%3Aakaihola+updated%3A2024-10-04..2024-10-07&type=Issues) | [@dlqqq](https://github.com/search?q=repo%3Ajupyterlab%2Fjupyter-ai+involves%3Adlqqq+updated%3A2024-10-04..2024-10-07&type=Issues) | [@jtpio](https://github.com/search?q=repo%3Ajupyterlab%2Fjupyter-ai+involves%3Ajtpio+updated%3A2024-10-04..2024-10-07&type=Issues) | [@pre-commit-ci](https://github.com/search?q=repo%3Ajupyterlab%2Fjupyter-ai+involves%3Apre-commit-ci+updated%3A2024-10-04..2024-10-07&type=Issues) - - ## 2.24.1 ([Full Changelog](https://github.com/jupyterlab/jupyter-ai/compare/@jupyter-ai/core@2.24.0...f3692d94dfbb4837714888d0e69f6c7ca3ba547c)) diff --git a/lerna.json b/lerna.json index 12818f665..fd933c898 100644 --- a/lerna.json +++ b/lerna.json @@ -1,7 +1,7 @@ { "$schema": "node_modules/lerna/schemas/lerna-schema.json", "useWorkspaces": true, - "version": "2.25.0", + "version": "2.26.0", "npmClient": "yarn", "useNx": true } diff --git a/package.json b/package.json index 5bbb14364..b1ea69ec7 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@jupyter-ai/monorepo", - "version": "2.25.0", + "version": "2.26.0", "description": "A generative AI extension for JupyterLab", "private": true, "keywords": [ diff --git a/packages/jupyter-ai-magics/package.json b/packages/jupyter-ai-magics/package.json index 8259d481b..d29c80830 100644 --- a/packages/jupyter-ai-magics/package.json +++ b/packages/jupyter-ai-magics/package.json @@ -1,6 +1,6 @@ { "name": "@jupyter-ai/magics", - "version": "2.25.0", + "version": "2.26.0", "description": "Jupyter AI magics Python package. Not published on NPM.", "private": true, "homepage": "https://github.com/jupyterlab/jupyter-ai", diff --git a/packages/jupyter-ai-test/package.json b/packages/jupyter-ai-test/package.json index 168ce3246..04b7cc81b 100644 --- a/packages/jupyter-ai-test/package.json +++ b/packages/jupyter-ai-test/package.json @@ -1,6 +1,6 @@ { "name": "@jupyter-ai/test", - "version": "2.25.0", + "version": "2.26.0", "description": "Jupyter AI test package. Not published on NPM or PyPI.", "private": true, "homepage": "https://github.com/jupyterlab/jupyter-ai", diff --git a/packages/jupyter-ai/package.json b/packages/jupyter-ai/package.json index c06e36350..db71f52ae 100644 --- a/packages/jupyter-ai/package.json +++ b/packages/jupyter-ai/package.json @@ -1,6 +1,6 @@ { "name": "@jupyter-ai/core", - "version": "2.25.0", + "version": "2.26.0", "description": "A generative AI extension for JupyterLab", "keywords": [ "jupyter",