From 30182ba7e5fe476a04337b04e36b19d80cbc86fb Mon Sep 17 00:00:00 2001 From: Simonas <20096648+simjak@users.noreply.github.com> Date: Fri, 19 Jul 2024 10:07:44 +0300 Subject: [PATCH 1/7] feat: update regex splitter --- semantic_chunkers/__init__.py | 2 +- semantic_chunkers/chunkers/consecutive.py | 2 +- semantic_chunkers/chunkers/cumulative.py | 2 +- semantic_chunkers/chunkers/statistical.py | 2 +- semantic_chunkers/splitters/__init__.py | 2 +- .../splitters/{sentence.py => regex.py} | 16 ++++++- .../{test_splitters.py => test_chunkers.py} | 0 tests/unit/test_regex_splitter.py | 43 +++++++++++++++++++ 8 files changed, 62 insertions(+), 7 deletions(-) rename semantic_chunkers/splitters/{sentence.py => regex.py} (75%) rename tests/unit/{test_splitters.py => test_chunkers.py} (100%) create mode 100644 tests/unit/test_regex_splitter.py diff --git a/semantic_chunkers/__init__.py b/semantic_chunkers/__init__.py index aed6e71..b9a89b5 100644 --- a/semantic_chunkers/__init__.py +++ b/semantic_chunkers/__init__.py @@ -11,8 +11,8 @@ "ConsecutiveChunker", "CumulativeChunker", "StatisticalChunker", - "BaseSplitter", "RegexSplitter", + "BaseSplitter", ] __version__ = "0.0.8" diff --git a/semantic_chunkers/chunkers/consecutive.py b/semantic_chunkers/chunkers/consecutive.py index c8c8541..921a73b 100644 --- a/semantic_chunkers/chunkers/consecutive.py +++ b/semantic_chunkers/chunkers/consecutive.py @@ -7,7 +7,7 @@ from semantic_chunkers.chunkers.base import BaseChunker from semantic_chunkers.schema import Chunk from semantic_chunkers.splitters.base import BaseSplitter -from semantic_chunkers.splitters.sentence import RegexSplitter +from semantic_chunkers.splitters.regex import RegexSplitter class ConsecutiveChunker(BaseChunker): diff --git a/semantic_chunkers/chunkers/cumulative.py b/semantic_chunkers/chunkers/cumulative.py index b538b14..7e2235c 100644 --- a/semantic_chunkers/chunkers/cumulative.py +++ b/semantic_chunkers/chunkers/cumulative.py @@ -7,7 +7,7 @@ from semantic_chunkers.chunkers.base import BaseChunker from semantic_chunkers.schema import Chunk from semantic_chunkers.splitters.base import BaseSplitter -from semantic_chunkers.splitters.sentence import RegexSplitter +from semantic_chunkers.splitters.regex import RegexSplitter class CumulativeChunker(BaseChunker): diff --git a/semantic_chunkers/chunkers/statistical.py b/semantic_chunkers/chunkers/statistical.py index 922d8ce..73793f1 100644 --- a/semantic_chunkers/chunkers/statistical.py +++ b/semantic_chunkers/chunkers/statistical.py @@ -9,7 +9,7 @@ from semantic_chunkers.chunkers.base import BaseChunker from semantic_chunkers.schema import Chunk from semantic_chunkers.splitters.base import BaseSplitter -from semantic_chunkers.splitters.sentence import RegexSplitter +from semantic_chunkers.splitters.regex import RegexSplitter from semantic_chunkers.utils.logger import logger from semantic_chunkers.utils.text import ( async_retry_with_timeout, diff --git a/semantic_chunkers/splitters/__init__.py b/semantic_chunkers/splitters/__init__.py index 5b3d258..53e46c4 100644 --- a/semantic_chunkers/splitters/__init__.py +++ b/semantic_chunkers/splitters/__init__.py @@ -1,5 +1,5 @@ from semantic_chunkers.splitters.base import BaseSplitter -from semantic_chunkers.splitters.sentence import RegexSplitter +from semantic_chunkers.splitters.regex import RegexSplitter __all__ = [ "BaseSplitter", diff --git a/semantic_chunkers/splitters/sentence.py b/semantic_chunkers/splitters/regex.py similarity index 75% rename from semantic_chunkers/splitters/sentence.py rename to semantic_chunkers/splitters/regex.py index 667cc91..e9ba8cc 100644 --- a/semantic_chunkers/splitters/sentence.py +++ b/semantic_chunkers/splitters/regex.py @@ -52,6 +52,18 @@ class RegexSplitter(BaseSplitter): """ def __call__(self, doc: str) -> List[str]: - sentences = regex.split(self.regex_pattern, doc, flags=regex.VERBOSE) - sentences = [sentence.strip() for sentence in sentences if sentence.strip()] + # Step 1: Split by \n\n + chunks = doc.split("\n\n") + sentences = [] + for chunk in chunks: + # Step 2: Split by \n within each chunk + sub_chunks = chunk.split("\n") + for sub_chunk in sub_chunks: + # Step 3: Split by regex pattern within each sub_chunk + sub_sentences = regex.split( + self.regex_pattern, sub_chunk, flags=regex.VERBOSE + ) + for sentence in sub_sentences: + if sentence.strip(): + sentences.append(sentence.strip()) return sentences diff --git a/tests/unit/test_splitters.py b/tests/unit/test_chunkers.py similarity index 100% rename from tests/unit/test_splitters.py rename to tests/unit/test_chunkers.py diff --git a/tests/unit/test_regex_splitter.py b/tests/unit/test_regex_splitter.py new file mode 100644 index 0000000..41ab9e0 --- /dev/null +++ b/tests/unit/test_regex_splitter.py @@ -0,0 +1,43 @@ +import unittest + +from semantic_chunkers.splitters.regex import RegexSplitter + + +class TestRegexSplitter(unittest.TestCase): + def setUp(self): + self.splitter = RegexSplitter() + + def test_split_by_double_newline(self): + doc = "This is the first paragraph.\n\nThis is the second paragraph." + expected = ["This is the first paragraph.", "This is the second paragraph."] + result = self.splitter(doc) + self.assertEqual(result, expected) + + def test_split_by_single_newline(self): + doc = "This is the first line.\nThis is the second line." + expected = ["This is the first line.", "This is the second line."] + result = self.splitter(doc) + self.assertEqual(result, expected) + + def test_split_by_period(self): + doc = "This is the first sentence. This is the second sentence." + expected = ["This is the first sentence.", "This is the second sentence."] + result = self.splitter(doc) + self.assertEqual(result, expected) + + def test_complex_split(self): + doc = """ + First paragraph.\n\nSecond paragraph.\nThird line in second paragraph. Fourth line.\n\nFifth paragraph.""" + expected = [ + "First paragraph.", + "Second paragraph.", + "Third line in second paragraph.", + "Fourth line.", + "Fifth paragraph.", + ] + result = self.splitter(doc) + self.assertEqual(result, expected) + + +if __name__ == "__main__": + unittest.main() From 54a65ff320747b5cccf81e53b106948dfb09ae65 Mon Sep 17 00:00:00 2001 From: Simonas <20096648+simjak@users.noreply.github.com> Date: Fri, 19 Jul 2024 11:07:40 +0300 Subject: [PATCH 2/7] chore: regex chunker --- docs/00-chunkers-intro.ipynb | 148 ++++++++++++++++++++-- docs/02-chunkers-async.ipynb | 148 ++++++++++++++++++++-- semantic_chunkers/__init__.py | 4 +- semantic_chunkers/chunkers/__init__.py | 2 + semantic_chunkers/chunkers/base.py | 2 +- semantic_chunkers/chunkers/consecutive.py | 2 + semantic_chunkers/chunkers/cumulative.py | 2 + semantic_chunkers/chunkers/regex.py | 50 ++++++++ semantic_chunkers/chunkers/statistical.py | 2 + tests/unit/test_regex_chunker.py | 48 +++++++ 10 files changed, 390 insertions(+), 18 deletions(-) create mode 100644 semantic_chunkers/chunkers/regex.py create mode 100644 tests/unit/test_regex_chunker.py diff --git a/docs/00-chunkers-intro.ipynb b/docs/00-chunkers-intro.ipynb index be81a7e..0608e7d 100644 --- a/docs/00-chunkers-intro.ipynb +++ b/docs/00-chunkers-intro.ipynb @@ -9,7 +9,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 1, "metadata": { "colab": { "base_uri": "https://localhost:8080/" @@ -17,7 +17,17 @@ "id": "iFgZNmSH2Dee", "outputId": "45754137-cb9c-4e85-9dbc-e139c8a2c9bb" }, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m A new release of pip is available: \u001b[0m\u001b[31;49m24.0\u001b[0m\u001b[39;49m -> \u001b[0m\u001b[32;49m24.1.2\u001b[0m\n", + "\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m To update, run: \u001b[0m\u001b[32;49mpip install --upgrade pip\u001b[0m\n" + ] + } + ], "source": [ "!pip install -qU \\\n", " semantic-chunkers \\\n", @@ -37,7 +47,7 @@ "source": [ "Semantic chunkers allow us to build more context aware chunks of information. We can use this for RAG, splitting video, audio, and much more.\n", "\n", - "In this example, we will stick with a simple RAG-focused example. We will learn about three different types of chunkers available to us; `StatisticalChunker`, `ConsecutiveChunker`, and `CumulativeChunker`. To begin, we need some data." + "In this example, we will stick with a simple RAG-focused example. We will learn about three different types of chunkers available to us; `StatisticalChunker`, `ConsecutiveChunker`, `CumulativeChunker`, and `RegexChunker`. To begin, we need some data." ] }, { @@ -49,9 +59,17 @@ }, { "cell_type": "code", - "execution_count": 22, + "execution_count": 2, "metadata": {}, "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/Users/jakit/customers/aurelio/semantic-chunkers/.venv_312/lib/python3.12/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", + " from .autonotebook import tqdm as notebook_tqdm\n" + ] + }, { "data": { "text/plain": [ @@ -61,7 +79,7 @@ "})" ] }, - "execution_count": 22, + "execution_count": 2, "metadata": {}, "output_type": "execute_result" } @@ -75,7 +93,7 @@ }, { "cell_type": "code", - "execution_count": 23, + "execution_count": 3, "metadata": { "colab": { "base_uri": "https://localhost:8080/" @@ -110,7 +128,7 @@ }, { "cell_type": "code", - "execution_count": 24, + "execution_count": 4, "metadata": {}, "outputs": [], "source": [ @@ -1755,6 +1773,120 @@ "chunker.print(chunks[0])" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Regex Chunking" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Split 1, tokens 300, triggered by: token limit\n", + "\u001b[31m# Mamba: Linear-Time Sequence Modeling with Selective State Spaces # Albert Gu*1 and Tri Dao*2 1Machine Learning Department, Carnegie Mellon University 2Department of Computer Science, Princeton University agu@cs.cmu.edu, tri@tridao.me # Abstract Foundation models, now powering most of the exciting applications in deep learning, are almost universally based on the Transformer architecture and its core attention module. Many subquadratic-time architectures such as linear attention, gated convolution and recurrent models, and structured state space models (SSMs) have been developed to address Transformersâ computational ineï¬ ciency on long sequences, but they have not performed as well as attention on important modalities such as language. We identify that a key weakness of such models is their inability to perform content-based reasoning, and make several improvements. First, simply letting the SSM parameters be functions of the input addresses their weakness with discrete modalities, allowing the model to selectively propagate or forget information along the sequence length dimension depending on the current token. Second, even though this change prevents the use of eï¬ cient convolutions, we design a hardware-aware parallel algorithm in recurrent mode. We integrate these selective SSMs into a simpliï¬ ed end-to-end neural network architecture without attention or even MLP blocks (Mamba). Mamba enjoys fast inference (5à higher throughput than Transformers) and linear scaling in sequence length, and its performance improves on real data up to million-length sequences.\u001b[0m\n", + "----------------------------------------------------------------------------------------\n", + "\n", + "\n", + "Split 2, tokens 300, triggered by: token limit\n", + "\u001b[32mAs a general sequence model backbone, Mamba achieves state-of-the-art performance across several modalities such as language, audio, and genomics. On language modeling, our Mamba-3B model outperforms Transformers of the same size and matches Transformers twice its size, both in pretraining and downstream evaluation. # 1 Introduction Foundation models (FMs), or large models pretrained on massive data then adapted for downstream tasks, have emerged as an eï¬ ective paradigm in modern machine learning. The backbone of these FMs are often sequence models, operating on arbitrary sequences of inputs from a wide variety of domains such as language, images, speech, audio, time series, and genomics (Brown et al. 2020; Dosovitskiy et al. 2020; Ismail Fawaz et al. 2019; Oord et al. 2016; Poli et al. 2023; Sutskever, Vinyals, and Quoc V Le 2014). While this concept is agnostic to a particular choice of model architecture, modern FMs are predominantly based on a single type of sequence model: the Transformer (Vaswani et al. 2017) and its core attention layer (Bahdanau, Cho, and Bengio 2015) The eï¬ cacy of self-attention is attributed to its ability to route information densely within a context window, allowing it to model complex data.\u001b[0m\n", + "----------------------------------------------------------------------------------------\n", + "\n", + "\n", + "Split 3, tokens 298, triggered by: token limit\n", + "\u001b[34mHowever, this property brings fundamental drawbacks: an inability to model anything outside of a ï¬ nite window, and quadratic scaling with respect to the window length. An enormous body of research has appeared on more eï¬ cient variants of attention to overcome these drawbacks (Tay, Dehghani, Bahri, et al. 2022), but often at the expense of the very properties that makes it eï¬ ective. As of yet, none of these variants have been shown to be empirically eï¬ ective at scale across domains. Recently, structured state space sequence models (SSMs) (Gu, Goel, and Ré 2022; Gu, Johnson, Goel, et al. 2021) have emerged as a promising class of architectures for sequence modeling. These models can be interpreted as a combination of recurrent neural networks (RNNs) and convolutional neural networks (CNNs), with inspiration from classical state space models (Kalman 1960). This class of models can be computed very eï¬ ciently as either a recurrence or convolution, with linear or near-linear scaling in sequence length. Additionally, they have principled Equal contribution. 1 mechanisms for modeling long-range dependencies (Gu, Dao, et al. 2020) in certain data modalities, and have dominated benchmarks such as the Long Range Arena (Tay, Dehghani, Abnar, et al. 2021).\u001b[0m\n", + "----------------------------------------------------------------------------------------\n", + "\n", + "\n", + "Split 4, tokens 293, triggered by: token limit\n", + "\u001b[35mMany ï¬ avors of SSMs (Gu, Goel, and Ré 2022; Gu, Gupta, et al. 2022; Gupta, Gu, and Berant 2022; Y. Li et al. 2023; Ma et al. 2023; Orvieto et al. 2023; Smith, Warrington, and Linderman 2023) have been successful in domains involving continuous signal data such as audio and vision (Goel et al. 2022; Nguyen, Goel, et al. 2022; Saon, Gupta, and Cui 2023). However, they have been less eï¬ ective at modeling discrete and information-dense data such as text. We propose a new class of selective state space models, that improves on prior work on several axes to achieve the modeling power of Transformers while scaling linearly in sequence length. Selection Mechanism. First, we identify a key limitation of prior models: the ability to eï¬ ciently select data in an input-dependent manner (i.e. focus on or ignore particular inputs). Building on intuition based on important synthetic tasks such as selective copy and induction heads, we design a simple selection mechanism by parameterizing the SSM parameters based on the input. This allows the model to ï¬ lter out irrelevant information and remember relevant information indeï¬ nitely. Hardware-aware Algorithm.\u001b[0m\n", + "----------------------------------------------------------------------------------------\n", + "\n", + "\n", + "Split 5, tokens 200, triggered by: token limit\n", + "\u001b[31mThis simple change poses a technical challenge for the computation of the model; in fact, all prior SSMs models must be time- and input-invariant in order to be computationally eï¬ cient. We overcome this with a hardware-aware algorithm that computes the model recurrently with a scan instead of convolution, but does not materialize the expanded state in order to avoid IO access between diï¬ erent levels of the GPU memory hierarchy. The resulting implementation is faster than previous methods both in theory (scaling linearly in sequence length, compared to pseudo-linear for all convolution-based SSMs) and on modern hardware (up to 3à faster on A100 GPUs). Architecture. We simplify prior deep sequence model architectures by combining the design of prior SSM architectures (Dao, Fu, Saab, et al. 2023) with the MLP block of Transformers into a single block, leading to a simple and homogenous architecture design (Mamba) incorporating selective state spaces.\u001b[0m\n", + "----------------------------------------------------------------------------------------\n", + "\n", + "\n", + "Split 6, tokens 292, triggered by: token limit\n", + "\u001b[32mSelective SSMs, and by extension the Mamba architecture, are fully recurrent models with key properties that make them suitable as the backbone of general foundation models operating on sequences. (i) High quality: selectivity brings strong performance on dense modalities such as language and genomics. (ii) Fast training and inference: computation and memory scales linearly in sequence length during training, and unrolling the model autoregressively during inference requires only constant time per step since it does not require a cache of previous elements. (iii) Long context: the quality and eï¬ ciency together yield performance improvements on real data up to sequence length 1M. We empirically validate Mambaâ s potential as a general sequence FM backbone, in both pretraining quality and domain-speciï¬ c task performance, on several types of modalities and settings: â ¢ Synthetics. On important synthetic tasks such as copying and induction heads that have been proposed as being key to large language models, Mamba not only solves them easily but can extrapolate solutions indeï¬ nitely long (>1M tokens). â ¢ Audio and Genomics. Mamba out-performs prior state-of-the-art models such as SaShiMi, Hyena, and Transform- ers on modeling audio waveforms and DNA sequences, both in pretraining quality and downstream metrics (e.g. reducing FID on a challenging speech generation dataset by more than half).\u001b[0m\n", + "----------------------------------------------------------------------------------------\n", + "\n", + "\n", + "Split 7, tokens 295, triggered by: token limit\n", + "\u001b[34mIn both settings, its performance improves with longer context up to million-length sequences. â ¢ Language Modeling. Mamba is the ï¬ rst linear-time sequence model that truly achieves Transformer-quality performance, both in pretraining perplexity and downstream evaluations. With scaling laws up to 1B parameters, we show that Mamba exceeds the performance of a large range of baselines, including very strong modern Transformer training recipes based on LLaMa (Touvron et al. 2023). Our Mamba language model has 5à generation throughput compared to Transformers of similar size, and Mamba-3Bâ s quality matches that of Transformers twice its size (e.g. 4 points higher avg. on common sense reasoning compared to Pythia-3B and even exceeding Pythia-7B). Model code and pre-trained checkpoints are open-sourced at https://github.com/state-spaces/mamba. 2 # Selective State Space Model # with Hardware-aware State Expansion # A vuvy GPU SRAM Selection Mechanism es Selection Mechanism Figure 1: (Overview.) Structured SSMs independently map each channel (e.g. ð · = 5) of an input ð ¥ to output ð ¦ through a higher dimensional latent state â (e.g. ð = 4). Prior SSMs avoid materializing this large effective state (ð ·ð , times batch size ð µ and sequence length ð\u001b[0m\n", + "----------------------------------------------------------------------------------------\n", + "\n", + "\n", + "Split 8, tokens 297, triggered by: token limit\n", + "\u001b[35m¿) through clever alternate computation paths requiring time-invariance: the (â , A, B, C) parameters are constant across time. Our selection mechanism adds back input-dependent dynamics, which also requires a careful hardware-aware algorithm to only materialize the expanded states in more efficient levels of the GPU memory hierarchy. # 2 State Space Models Structured state space sequence models (S4) are a recent class of sequence models for deep learning that are broadly related to RNNs, and CNNs, and classical state space models. They are inspired by a particular continuous system (1) that maps a 1-dimensional function or sequence ð ¥(ð ¡) â â â ¦ ð ¦(ð ¡) â â through an implicit latent state â (ð ¡) â â ð . Concretely, S4 models are deï¬ ned with four parameters (â , A, B, C), which deï¬ ne a sequence-to-sequence trans- formation in two stages. â â ²(ð ¡) = Aâ (ð ¡) + Bð ¥(ð ¡) ð ¦(ð ¡) = Câ (ð ¡) (1a) (1b) â ð ¡ = Aâ ð ¡â 1 + Bð ¥ð ¡ ð ¦ð ¡ = Câ ð ¡ (2a) (2b) ð ð ² = (Cð ©, Cð ¨ð ©, â\u001b[0m\n", + "----------------------------------------------------------------------------------------\n", + "\n", + "\n", + "Split 9, tokens 290, triggered by: token limit\n", + "\u001b[31m¦ , Cð ¨ ð ¦ = ð ¥ â ð ² ð ©, â ¦ ) (3a) (3b) Discretization. The ï¬ rst stage transforms the â continuous parametersâ (â , A, B) to â discrete parametersâ (A, B) through ï¬ xed formulas A = ð ð ´(â , A) and B = ð ð µ(â , A, B), where the pair (ð ð ´, ð ð µ) is called a discretization rule. Various rules can be used such as the zero-order hold (ZOH) deï¬ ned in equation (4). A = exp(â A) B = (â A)â 1(exp(â A) â I) â â B (4) Discretization has deep connections to continuous-time systems which can endow them with additional properties such as resolution invariance (Nguyen, Goel, et al. 2022) and automatically ensuring that the model is properly normalized (Gu, Johnson, Timalsina, et al. 2023; Orvieto et al. 2023). It also has connections to gating mechanisms of RNNs (Gu, Gulcehre, et al. 2020; Tallec and Ollivier 2018) which we will revisit in Section 3.5.\u001b[0m\n", + "----------------------------------------------------------------------------------------\n", + "\n", + "\n", + "Split 10, tokens 281, triggered by: token limit\n", + "\u001b[32mHowever, from a mechanical point of view discretization can simply be viewed as the ï¬ rst step of the computation graph in the forward pass of an SSM. Alternate ï¬ avors of SSMs can bypass the discretization step and parameterize (A, B) directly instead (Zhang et al. 2023), which may be easier to reason about. Computation. After the parameters have been transformed from (â , A, B, C) â ¦ (A, B, C), the model can be computed in two ways, either as a linear recurrence (2) or a global convolution (3). 3 Commonly, the model uses the convolutional mode (3) for eï¬ cient parallelizable training (where the whole input sequence is seen ahead of time), and switched into recurrent mode (2) for eï¬ cient autoregressive inference (where the inputs are seen one timestep at a time). Linear Time Invariance (LTI). An important property of equations (1) to (3) is that the modelâ s dynamics are constant through time. In other words (â , A, B, C), and consequently (A, B) as well, are ï¬ xed for all time-steps. This property is called linear time invariance (LTI), which is deeply connected to recurrence and convolutions.\u001b[0m\n", + "----------------------------------------------------------------------------------------\n", + "\n", + "\n", + "Split 11, tokens 296, triggered by: token limit\n", + "\u001b[34mInformally, we think of LTI SSMs as being equivalent to any linear recurrence (2a) or convolution (3b), and use LTI as an umbrella term for these classes of models. Thus far, all structured SSMs have been LTI (e.g. computed as convolutions) because of fundamental eï¬ ciency constraints, discussed in Section 3.3. However, a core insight of this work is that LTI models have fundamental limitations in modeling certain types of data, and our technical contributions involve removing the LTI constraint while overcoming the eï¬ ciency bottlenecks. Structure and Dimensions. Finally, we note that structured SSMs are so named because computing them eï¬ ciently also requires imposing structure on the A matrix. The most popular form of structure is diagonal (Gu, Gupta, et al. 2022; Gupta, Gu, and Berant 2022; Smith, Warrington, and Linderman 2023), which we also use. In this case, the A â â ð à ð , B â â ð à 1, C â â 1à ð matrices can all be represented by ð numbers. To operate over an input sequence ð ¥ of batch size ð µ and length ð ¿ with ð · channels, the SSM is applied independently to each channel. Note that in this case, the total hidden state has dimension ð ·ð\u001b[0m\n", + "----------------------------------------------------------------------------------------\n", + "\n", + "\n", + "Split 12, tokens 298, triggered by: token limit\n", + "\u001b[35mper input, and computing it over the sequence length requires ð (ð µð ¿ð ·ð ) time and memory; this is the root of the fundamental eï¬ ciency bottleneck addressed in Section 3.3. General State Space Models. We note that the term state space model has a very broad meaning which simply represents the notion of any recurrent process with a latent state. It has been used to refer to many disparate concepts in diï¬ erent disciplines, including Markov decision processes (MDP) (reinforcement learning (Hafner et al. 2020)), dynamic causal modeling (DCM) (computational neuroscience (Friston, Harrison, and Penny 2003)), Kalman ï¬ lters (controls (Kalman 1960)), hidden Markov models (HMM) and linear dynamical systems (LDS) (machine learning), and recurrent (and sometimes convolutional) models at large (deep learning). Throughout this entire paper we use the term â SSMâ to refer exclusively to the class of structured SSMs or S4 models (Gu, Goel, and Ré 2022; Gu, Gupta, et al. 2022; Gupta, Gu, and Berant 2022; Hasani et al. 2023; Ma et al. 2023; Smith, Warrington, and Linderman 2023) and use these terms interchangeably.\u001b[0m\n", + "----------------------------------------------------------------------------------------\n", + "\n", + "\n", + "Split 13, tokens 296, triggered by: token limit\n", + "\u001b[31mFor convenience we may also include derivatives of such models, such as those focusing on either the linear-recurrence or global-convolution viewpoints (Y. Li et al. 2023; Orvieto et al. 2023; Poli et al. 2023), and clarify nuances when necessary. SSM Architectures. SSMs are standalone sequence transformations that can be incorporated into end-to-end neural network architectures. (We also sometimes call SSM architectures SSNNs, which are to SSM layers as CNNs are to linear convolution layers.) We discuss some of the most well-known SSM architectures, many of which will also serve as our primary baselines. â ¢ Linear attention (Katharopoulos et al. 2020) is an approximation of self-attention involving a recurrence which can be viewed as a degenerate linear SSM. â ¢ H3 (Dao, Fu, Saab, et al. 2023) generalized this recurrence to use S4; it can be viewed as an architecture with an SSM sandwiched by two gated connections (Figure 3). H3 also inserts a standard local convolution, which they frame as a shift-SSM, before the main SSM layer. â ¢ Hyena (Poli et al. 2023) uses the same architecture as H3 but replaces the S4 layer with an MLP-parameterized global convolution (Romero et al. 2021). â\u001b[0m\n", + "----------------------------------------------------------------------------------------\n", + "\n", + "\n", + "Split 14, tokens 281, triggered by: token limit\n", + "\u001b[32m¢ RetNet (Y. Sun et al. 2023) adds an additional gate to the architecture and uses a simpler SSM, allowing an alternative parallelizable computation path, using a variant of multi-head attention (MHA) instead of convolutions. 4 â ¢ RWKV (B. Peng et al. 2023) is a recent RNN designed for language modeling based on another linear attention approximation (attention-free Transformer (S. Zhai et al. 2021)). Its main â WKVâ mechanism involves LTI recurrences and can be viewed as the ratio of two SSMs. Other closely related SSMs and architectures are discussed further in an extended related work (Appendix B). We highlight in particular S5 (Smith, Warrington, and Linderman 2023), QRNN (Bradbury et al. 2016), and SRU (Lei et al. 2017), which we view as the most closely related methods to our core selective SSM. # 3 Selective State Space Models We motivate our selection mechanism using intuition from synthetic tasks (Section 3.1), then explain how to incorporate this mechanism into state space models (Section 3.2). The resulting time-varying SSMs cannot use convolutions, presenting a technical challenge of how to compute them eï¬ ciently.\u001b[0m\n", + "----------------------------------------------------------------------------------------\n", + "\n", + "\n", + "Split 15, tokens 294, triggered by: token limit\n", + "\u001b[34mWe overcome this with a hardware-aware algorithm that exploits the memory hierarchy on modern hardware (Section 3.3). We then describe a simple SSM architecture without attention or even MLP blocks (Section 3.4). Finally, we discuss some additional properties of selection mechanisms (Section 3.5). # 3.1 Motivation: Selection as a Means of Compression We argue that a fundamental problem of sequence modeling is compressing context into a smaller state. In fact, we can view the tradeoï¬ s of popular sequence models from this point of view. For example, attention is both eï¬ ective and ineï¬ cient because it explicitly does not compress context at all. This can be seen from the fact that autoregressive inference requires explicitly storing the entire context (i.e. the KV cache), which directly causes the slow linear-time inference and quadratic-time training of Transformers. On the other hand, recurrent models are eï¬ cient because they have a ï¬ nite state, implying constant-time inference and linear-time training. However, their eï¬ ectiveness is limited by how well this state has compressed the context. To understand this principle, we focus on two running examples of synthetic tasks (Figure 2). â ¢ The Selective Copying task modiï¬ es the popular Copying task (Arjovsky, Shah, and Bengio 2016) by varying the position of the tokens to memorize.\u001b[0m\n", + "----------------------------------------------------------------------------------------\n", + "\n", + "\n", + "Split 16, tokens 288, triggered by: token limit\n", + "\u001b[35mIt requires content-aware reasoning to be able to memorize the relevant tokens (colored) and ï¬ lter out the irrelevant ones (white). â ¢ The Induction Heads task is a well-known mechanism hypothesized to explain the majority of in-context learning abilities of LLMs (Olsson et al. 2022). It requires context-aware reasoning to know when to produce the correct output in the appropriate context (black). These tasks reveal the failure mode of LTI models. From the recurrent view, their constant dynamics (e.g. the (A, B) transitions in (2)) cannot let them select the correct information from their context, or aï¬ ect the hidden state passed along the sequence an in input-dependent way. From the convolutional view, it is known that global convolutions can solve the vanilla Copying task (Romero et al. 2021) because it only requires time-awareness, but that they have diï¬ culty with the Selective Copying task because of lack of content-awareness (Figure 2). More concretely, the spacing between inputs-to-outputs is varying and cannot be modeled by static convolution kernels. In summary, the eï¬ ciency vs. eï¬ ectiveness tradeoï¬ of sequence models is characterized by how well they compress their state: eï¬ cient models must have a small state, while eï¬\u001b[0m\n", + "----------------------------------------------------------------------------------------\n", + "\n", + "\n", + "Split 17, tokens 126, triggered by: final split\n", + "\u001b[31mective models must have a state that contains all necessary information from the context. In turn, we propose that a fundamental principle for building sequence models is selectivity: or the context-aware ability to focus on or ï¬ lter out inputs into a sequential state. In particular, a selection mechanism controls how information propagates or interacts along the sequence dimension (see Section 3.5 for more discussion). # Improving SSMs with Selection One method of incorporating a selection mechanism into models is by letting their parameters that aï¬ ect interactions along the sequence (e.g. the recurrent dynamics of an RNN or the c\u001b[0m\n", + "----------------------------------------------------------------------------------------\n", + "\n", + "\n" + ] + } + ], + "source": [ + "from typing import List\n", + "from semantic_chunkers import RegexChunker\n", + "from semantic_chunkers.schema import Chunk\n", + "\n", + "chunker = RegexChunker()\n", + "chunks: List[List[Chunk]] = chunker(docs=[content])\n", + "chunker.print(chunks[0])" + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -1781,7 +1913,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.9" + "version": "3.12.4" }, "widgets": { "application/vnd.jupyter.widget-state+json": { diff --git a/docs/02-chunkers-async.ipynb b/docs/02-chunkers-async.ipynb index 3a5dd9f..457730b 100644 --- a/docs/02-chunkers-async.ipynb +++ b/docs/02-chunkers-async.ipynb @@ -9,7 +9,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 1, "metadata": { "colab": { "base_uri": "https://localhost:8080/" @@ -17,7 +17,17 @@ "id": "iFgZNmSH2Dee", "outputId": "45754137-cb9c-4e85-9dbc-e139c8a2c9bb" }, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m A new release of pip is available: \u001b[0m\u001b[31;49m24.0\u001b[0m\u001b[39;49m -> \u001b[0m\u001b[32;49m24.1.2\u001b[0m\n", + "\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m To update, run: \u001b[0m\u001b[32;49mpip install --upgrade pip\u001b[0m\n" + ] + } + ], "source": [ "!pip install -qU \\\n", " semantic-chunkers \\\n", @@ -37,12 +47,12 @@ "source": [ "Semantic chunkers allow us to build more context aware chunks of information. We can use this for RAG, splitting video, audio, and much more.\n", "\n", - "In this example, we will stick with a simple RAG-focused example. We will learn about three different types of chunkers available to us; `StatisticalChunker`, `ConsecutiveChunker`, and `CumulativeChunker`. To begin, we need some data." + "In this example, we will stick with a simple RAG-focused example. We will learn about three different types of chunkers available to us; `StatisticalChunker`, `ConsecutiveChunker`, `CumulativeChunker`, and `RegexChunker`. To begin, we need some data." ] }, { "cell_type": "code", - "execution_count": 14, + "execution_count": 2, "metadata": { "colab": { "base_uri": "https://localhost:8080/", @@ -87,6 +97,14 @@ "outputId": "bd606fad-8214-4fd4-cad1-54bb86234575" }, "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/Users/jakit/customers/aurelio/semantic-chunkers/.venv_312/lib/python3.12/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", + " from .autonotebook import tqdm as notebook_tqdm\n" + ] + }, { "data": { "text/plain": [ @@ -96,7 +114,7 @@ "})" ] }, - "execution_count": 14, + "execution_count": 2, "metadata": {}, "output_type": "execute_result" } @@ -110,7 +128,7 @@ }, { "cell_type": "code", - "execution_count": 15, + "execution_count": 3, "metadata": { "colab": { "base_uri": "https://localhost:8080/" @@ -145,7 +163,7 @@ }, { "cell_type": "code", - "execution_count": 16, + "execution_count": 4, "metadata": {}, "outputs": [], "source": [ @@ -558,6 +576,120 @@ "chunker.print(chunks[0])" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Regex Chunking" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Split 1, tokens 300, triggered by: token limit\n", + "\u001b[31m# Mamba: Linear-Time Sequence Modeling with Selective State Spaces # Albert Gu*1 and Tri Dao*2 1Machine Learning Department, Carnegie Mellon University 2Department of Computer Science, Princeton University agu@cs.cmu.edu, tri@tridao.me # Abstract Foundation models, now powering most of the exciting applications in deep learning, are almost universally based on the Transformer architecture and its core attention module. Many subquadratic-time architectures such as linear attention, gated convolution and recurrent models, and structured state space models (SSMs) have been developed to address Transformersâ computational ineï¬ ciency on long sequences, but they have not performed as well as attention on important modalities such as language. We identify that a key weakness of such models is their inability to perform content-based reasoning, and make several improvements. First, simply letting the SSM parameters be functions of the input addresses their weakness with discrete modalities, allowing the model to selectively propagate or forget information along the sequence length dimension depending on the current token. Second, even though this change prevents the use of eï¬ cient convolutions, we design a hardware-aware parallel algorithm in recurrent mode. We integrate these selective SSMs into a simpliï¬ ed end-to-end neural network architecture without attention or even MLP blocks (Mamba). Mamba enjoys fast inference (5à higher throughput than Transformers) and linear scaling in sequence length, and its performance improves on real data up to million-length sequences.\u001b[0m\n", + "----------------------------------------------------------------------------------------\n", + "\n", + "\n", + "Split 2, tokens 300, triggered by: token limit\n", + "\u001b[32mAs a general sequence model backbone, Mamba achieves state-of-the-art performance across several modalities such as language, audio, and genomics. On language modeling, our Mamba-3B model outperforms Transformers of the same size and matches Transformers twice its size, both in pretraining and downstream evaluation. # 1 Introduction Foundation models (FMs), or large models pretrained on massive data then adapted for downstream tasks, have emerged as an eï¬ ective paradigm in modern machine learning. The backbone of these FMs are often sequence models, operating on arbitrary sequences of inputs from a wide variety of domains such as language, images, speech, audio, time series, and genomics (Brown et al. 2020; Dosovitskiy et al. 2020; Ismail Fawaz et al. 2019; Oord et al. 2016; Poli et al. 2023; Sutskever, Vinyals, and Quoc V Le 2014). While this concept is agnostic to a particular choice of model architecture, modern FMs are predominantly based on a single type of sequence model: the Transformer (Vaswani et al. 2017) and its core attention layer (Bahdanau, Cho, and Bengio 2015) The eï¬ cacy of self-attention is attributed to its ability to route information densely within a context window, allowing it to model complex data.\u001b[0m\n", + "----------------------------------------------------------------------------------------\n", + "\n", + "\n", + "Split 3, tokens 298, triggered by: token limit\n", + "\u001b[34mHowever, this property brings fundamental drawbacks: an inability to model anything outside of a ï¬ nite window, and quadratic scaling with respect to the window length. An enormous body of research has appeared on more eï¬ cient variants of attention to overcome these drawbacks (Tay, Dehghani, Bahri, et al. 2022), but often at the expense of the very properties that makes it eï¬ ective. As of yet, none of these variants have been shown to be empirically eï¬ ective at scale across domains. Recently, structured state space sequence models (SSMs) (Gu, Goel, and Ré 2022; Gu, Johnson, Goel, et al. 2021) have emerged as a promising class of architectures for sequence modeling. These models can be interpreted as a combination of recurrent neural networks (RNNs) and convolutional neural networks (CNNs), with inspiration from classical state space models (Kalman 1960). This class of models can be computed very eï¬ ciently as either a recurrence or convolution, with linear or near-linear scaling in sequence length. Additionally, they have principled Equal contribution. 1 mechanisms for modeling long-range dependencies (Gu, Dao, et al. 2020) in certain data modalities, and have dominated benchmarks such as the Long Range Arena (Tay, Dehghani, Abnar, et al. 2021).\u001b[0m\n", + "----------------------------------------------------------------------------------------\n", + "\n", + "\n", + "Split 4, tokens 293, triggered by: token limit\n", + "\u001b[35mMany ï¬ avors of SSMs (Gu, Goel, and Ré 2022; Gu, Gupta, et al. 2022; Gupta, Gu, and Berant 2022; Y. Li et al. 2023; Ma et al. 2023; Orvieto et al. 2023; Smith, Warrington, and Linderman 2023) have been successful in domains involving continuous signal data such as audio and vision (Goel et al. 2022; Nguyen, Goel, et al. 2022; Saon, Gupta, and Cui 2023). However, they have been less eï¬ ective at modeling discrete and information-dense data such as text. We propose a new class of selective state space models, that improves on prior work on several axes to achieve the modeling power of Transformers while scaling linearly in sequence length. Selection Mechanism. First, we identify a key limitation of prior models: the ability to eï¬ ciently select data in an input-dependent manner (i.e. focus on or ignore particular inputs). Building on intuition based on important synthetic tasks such as selective copy and induction heads, we design a simple selection mechanism by parameterizing the SSM parameters based on the input. This allows the model to ï¬ lter out irrelevant information and remember relevant information indeï¬ nitely. Hardware-aware Algorithm.\u001b[0m\n", + "----------------------------------------------------------------------------------------\n", + "\n", + "\n", + "Split 5, tokens 200, triggered by: token limit\n", + "\u001b[31mThis simple change poses a technical challenge for the computation of the model; in fact, all prior SSMs models must be time- and input-invariant in order to be computationally eï¬ cient. We overcome this with a hardware-aware algorithm that computes the model recurrently with a scan instead of convolution, but does not materialize the expanded state in order to avoid IO access between diï¬ erent levels of the GPU memory hierarchy. The resulting implementation is faster than previous methods both in theory (scaling linearly in sequence length, compared to pseudo-linear for all convolution-based SSMs) and on modern hardware (up to 3à faster on A100 GPUs). Architecture. We simplify prior deep sequence model architectures by combining the design of prior SSM architectures (Dao, Fu, Saab, et al. 2023) with the MLP block of Transformers into a single block, leading to a simple and homogenous architecture design (Mamba) incorporating selective state spaces.\u001b[0m\n", + "----------------------------------------------------------------------------------------\n", + "\n", + "\n", + "Split 6, tokens 292, triggered by: token limit\n", + "\u001b[32mSelective SSMs, and by extension the Mamba architecture, are fully recurrent models with key properties that make them suitable as the backbone of general foundation models operating on sequences. (i) High quality: selectivity brings strong performance on dense modalities such as language and genomics. (ii) Fast training and inference: computation and memory scales linearly in sequence length during training, and unrolling the model autoregressively during inference requires only constant time per step since it does not require a cache of previous elements. (iii) Long context: the quality and eï¬ ciency together yield performance improvements on real data up to sequence length 1M. We empirically validate Mambaâ s potential as a general sequence FM backbone, in both pretraining quality and domain-speciï¬ c task performance, on several types of modalities and settings: â ¢ Synthetics. On important synthetic tasks such as copying and induction heads that have been proposed as being key to large language models, Mamba not only solves them easily but can extrapolate solutions indeï¬ nitely long (>1M tokens). â ¢ Audio and Genomics. Mamba out-performs prior state-of-the-art models such as SaShiMi, Hyena, and Transform- ers on modeling audio waveforms and DNA sequences, both in pretraining quality and downstream metrics (e.g. reducing FID on a challenging speech generation dataset by more than half).\u001b[0m\n", + "----------------------------------------------------------------------------------------\n", + "\n", + "\n", + "Split 7, tokens 295, triggered by: token limit\n", + "\u001b[34mIn both settings, its performance improves with longer context up to million-length sequences. â ¢ Language Modeling. Mamba is the ï¬ rst linear-time sequence model that truly achieves Transformer-quality performance, both in pretraining perplexity and downstream evaluations. With scaling laws up to 1B parameters, we show that Mamba exceeds the performance of a large range of baselines, including very strong modern Transformer training recipes based on LLaMa (Touvron et al. 2023). Our Mamba language model has 5à generation throughput compared to Transformers of similar size, and Mamba-3Bâ s quality matches that of Transformers twice its size (e.g. 4 points higher avg. on common sense reasoning compared to Pythia-3B and even exceeding Pythia-7B). Model code and pre-trained checkpoints are open-sourced at https://github.com/state-spaces/mamba. 2 # Selective State Space Model # with Hardware-aware State Expansion # A vuvy GPU SRAM Selection Mechanism es Selection Mechanism Figure 1: (Overview.) Structured SSMs independently map each channel (e.g. ð · = 5) of an input ð ¥ to output ð ¦ through a higher dimensional latent state â (e.g. ð = 4). Prior SSMs avoid materializing this large effective state (ð ·ð , times batch size ð µ and sequence length ð\u001b[0m\n", + "----------------------------------------------------------------------------------------\n", + "\n", + "\n", + "Split 8, tokens 297, triggered by: token limit\n", + "\u001b[35m¿) through clever alternate computation paths requiring time-invariance: the (â , A, B, C) parameters are constant across time. Our selection mechanism adds back input-dependent dynamics, which also requires a careful hardware-aware algorithm to only materialize the expanded states in more efficient levels of the GPU memory hierarchy. # 2 State Space Models Structured state space sequence models (S4) are a recent class of sequence models for deep learning that are broadly related to RNNs, and CNNs, and classical state space models. They are inspired by a particular continuous system (1) that maps a 1-dimensional function or sequence ð ¥(ð ¡) â â â ¦ ð ¦(ð ¡) â â through an implicit latent state â (ð ¡) â â ð . Concretely, S4 models are deï¬ ned with four parameters (â , A, B, C), which deï¬ ne a sequence-to-sequence trans- formation in two stages. â â ²(ð ¡) = Aâ (ð ¡) + Bð ¥(ð ¡) ð ¦(ð ¡) = Câ (ð ¡) (1a) (1b) â ð ¡ = Aâ ð ¡â 1 + Bð ¥ð ¡ ð ¦ð ¡ = Câ ð ¡ (2a) (2b) ð ð ² = (Cð ©, Cð ¨ð ©, â\u001b[0m\n", + "----------------------------------------------------------------------------------------\n", + "\n", + "\n", + "Split 9, tokens 290, triggered by: token limit\n", + "\u001b[31m¦ , Cð ¨ ð ¦ = ð ¥ â ð ² ð ©, â ¦ ) (3a) (3b) Discretization. The ï¬ rst stage transforms the â continuous parametersâ (â , A, B) to â discrete parametersâ (A, B) through ï¬ xed formulas A = ð ð ´(â , A) and B = ð ð µ(â , A, B), where the pair (ð ð ´, ð ð µ) is called a discretization rule. Various rules can be used such as the zero-order hold (ZOH) deï¬ ned in equation (4). A = exp(â A) B = (â A)â 1(exp(â A) â I) â â B (4) Discretization has deep connections to continuous-time systems which can endow them with additional properties such as resolution invariance (Nguyen, Goel, et al. 2022) and automatically ensuring that the model is properly normalized (Gu, Johnson, Timalsina, et al. 2023; Orvieto et al. 2023). It also has connections to gating mechanisms of RNNs (Gu, Gulcehre, et al. 2020; Tallec and Ollivier 2018) which we will revisit in Section 3.5.\u001b[0m\n", + "----------------------------------------------------------------------------------------\n", + "\n", + "\n", + "Split 10, tokens 281, triggered by: token limit\n", + "\u001b[32mHowever, from a mechanical point of view discretization can simply be viewed as the ï¬ rst step of the computation graph in the forward pass of an SSM. Alternate ï¬ avors of SSMs can bypass the discretization step and parameterize (A, B) directly instead (Zhang et al. 2023), which may be easier to reason about. Computation. After the parameters have been transformed from (â , A, B, C) â ¦ (A, B, C), the model can be computed in two ways, either as a linear recurrence (2) or a global convolution (3). 3 Commonly, the model uses the convolutional mode (3) for eï¬ cient parallelizable training (where the whole input sequence is seen ahead of time), and switched into recurrent mode (2) for eï¬ cient autoregressive inference (where the inputs are seen one timestep at a time). Linear Time Invariance (LTI). An important property of equations (1) to (3) is that the modelâ s dynamics are constant through time. In other words (â , A, B, C), and consequently (A, B) as well, are ï¬ xed for all time-steps. This property is called linear time invariance (LTI), which is deeply connected to recurrence and convolutions.\u001b[0m\n", + "----------------------------------------------------------------------------------------\n", + "\n", + "\n", + "Split 11, tokens 296, triggered by: token limit\n", + "\u001b[34mInformally, we think of LTI SSMs as being equivalent to any linear recurrence (2a) or convolution (3b), and use LTI as an umbrella term for these classes of models. Thus far, all structured SSMs have been LTI (e.g. computed as convolutions) because of fundamental eï¬ ciency constraints, discussed in Section 3.3. However, a core insight of this work is that LTI models have fundamental limitations in modeling certain types of data, and our technical contributions involve removing the LTI constraint while overcoming the eï¬ ciency bottlenecks. Structure and Dimensions. Finally, we note that structured SSMs are so named because computing them eï¬ ciently also requires imposing structure on the A matrix. The most popular form of structure is diagonal (Gu, Gupta, et al. 2022; Gupta, Gu, and Berant 2022; Smith, Warrington, and Linderman 2023), which we also use. In this case, the A â â ð à ð , B â â ð à 1, C â â 1à ð matrices can all be represented by ð numbers. To operate over an input sequence ð ¥ of batch size ð µ and length ð ¿ with ð · channels, the SSM is applied independently to each channel. Note that in this case, the total hidden state has dimension ð ·ð\u001b[0m\n", + "----------------------------------------------------------------------------------------\n", + "\n", + "\n", + "Split 12, tokens 298, triggered by: token limit\n", + "\u001b[35mper input, and computing it over the sequence length requires ð (ð µð ¿ð ·ð ) time and memory; this is the root of the fundamental eï¬ ciency bottleneck addressed in Section 3.3. General State Space Models. We note that the term state space model has a very broad meaning which simply represents the notion of any recurrent process with a latent state. It has been used to refer to many disparate concepts in diï¬ erent disciplines, including Markov decision processes (MDP) (reinforcement learning (Hafner et al. 2020)), dynamic causal modeling (DCM) (computational neuroscience (Friston, Harrison, and Penny 2003)), Kalman ï¬ lters (controls (Kalman 1960)), hidden Markov models (HMM) and linear dynamical systems (LDS) (machine learning), and recurrent (and sometimes convolutional) models at large (deep learning). Throughout this entire paper we use the term â SSMâ to refer exclusively to the class of structured SSMs or S4 models (Gu, Goel, and Ré 2022; Gu, Gupta, et al. 2022; Gupta, Gu, and Berant 2022; Hasani et al. 2023; Ma et al. 2023; Smith, Warrington, and Linderman 2023) and use these terms interchangeably.\u001b[0m\n", + "----------------------------------------------------------------------------------------\n", + "\n", + "\n", + "Split 13, tokens 296, triggered by: token limit\n", + "\u001b[31mFor convenience we may also include derivatives of such models, such as those focusing on either the linear-recurrence or global-convolution viewpoints (Y. Li et al. 2023; Orvieto et al. 2023; Poli et al. 2023), and clarify nuances when necessary. SSM Architectures. SSMs are standalone sequence transformations that can be incorporated into end-to-end neural network architectures. (We also sometimes call SSM architectures SSNNs, which are to SSM layers as CNNs are to linear convolution layers.) We discuss some of the most well-known SSM architectures, many of which will also serve as our primary baselines. â ¢ Linear attention (Katharopoulos et al. 2020) is an approximation of self-attention involving a recurrence which can be viewed as a degenerate linear SSM. â ¢ H3 (Dao, Fu, Saab, et al. 2023) generalized this recurrence to use S4; it can be viewed as an architecture with an SSM sandwiched by two gated connections (Figure 3). H3 also inserts a standard local convolution, which they frame as a shift-SSM, before the main SSM layer. â ¢ Hyena (Poli et al. 2023) uses the same architecture as H3 but replaces the S4 layer with an MLP-parameterized global convolution (Romero et al. 2021). â\u001b[0m\n", + "----------------------------------------------------------------------------------------\n", + "\n", + "\n", + "Split 14, tokens 281, triggered by: token limit\n", + "\u001b[32m¢ RetNet (Y. Sun et al. 2023) adds an additional gate to the architecture and uses a simpler SSM, allowing an alternative parallelizable computation path, using a variant of multi-head attention (MHA) instead of convolutions. 4 â ¢ RWKV (B. Peng et al. 2023) is a recent RNN designed for language modeling based on another linear attention approximation (attention-free Transformer (S. Zhai et al. 2021)). Its main â WKVâ mechanism involves LTI recurrences and can be viewed as the ratio of two SSMs. Other closely related SSMs and architectures are discussed further in an extended related work (Appendix B). We highlight in particular S5 (Smith, Warrington, and Linderman 2023), QRNN (Bradbury et al. 2016), and SRU (Lei et al. 2017), which we view as the most closely related methods to our core selective SSM. # 3 Selective State Space Models We motivate our selection mechanism using intuition from synthetic tasks (Section 3.1), then explain how to incorporate this mechanism into state space models (Section 3.2). The resulting time-varying SSMs cannot use convolutions, presenting a technical challenge of how to compute them eï¬ ciently.\u001b[0m\n", + "----------------------------------------------------------------------------------------\n", + "\n", + "\n", + "Split 15, tokens 294, triggered by: token limit\n", + "\u001b[34mWe overcome this with a hardware-aware algorithm that exploits the memory hierarchy on modern hardware (Section 3.3). We then describe a simple SSM architecture without attention or even MLP blocks (Section 3.4). Finally, we discuss some additional properties of selection mechanisms (Section 3.5). # 3.1 Motivation: Selection as a Means of Compression We argue that a fundamental problem of sequence modeling is compressing context into a smaller state. In fact, we can view the tradeoï¬ s of popular sequence models from this point of view. For example, attention is both eï¬ ective and ineï¬ cient because it explicitly does not compress context at all. This can be seen from the fact that autoregressive inference requires explicitly storing the entire context (i.e. the KV cache), which directly causes the slow linear-time inference and quadratic-time training of Transformers. On the other hand, recurrent models are eï¬ cient because they have a ï¬ nite state, implying constant-time inference and linear-time training. However, their eï¬ ectiveness is limited by how well this state has compressed the context. To understand this principle, we focus on two running examples of synthetic tasks (Figure 2). â ¢ The Selective Copying task modiï¬ es the popular Copying task (Arjovsky, Shah, and Bengio 2016) by varying the position of the tokens to memorize.\u001b[0m\n", + "----------------------------------------------------------------------------------------\n", + "\n", + "\n", + "Split 16, tokens 288, triggered by: token limit\n", + "\u001b[35mIt requires content-aware reasoning to be able to memorize the relevant tokens (colored) and ï¬ lter out the irrelevant ones (white). â ¢ The Induction Heads task is a well-known mechanism hypothesized to explain the majority of in-context learning abilities of LLMs (Olsson et al. 2022). It requires context-aware reasoning to know when to produce the correct output in the appropriate context (black). These tasks reveal the failure mode of LTI models. From the recurrent view, their constant dynamics (e.g. the (A, B) transitions in (2)) cannot let them select the correct information from their context, or aï¬ ect the hidden state passed along the sequence an in input-dependent way. From the convolutional view, it is known that global convolutions can solve the vanilla Copying task (Romero et al. 2021) because it only requires time-awareness, but that they have diï¬ culty with the Selective Copying task because of lack of content-awareness (Figure 2). More concretely, the spacing between inputs-to-outputs is varying and cannot be modeled by static convolution kernels. In summary, the eï¬ ciency vs. eï¬ ectiveness tradeoï¬ of sequence models is characterized by how well they compress their state: eï¬ cient models must have a small state, while eï¬\u001b[0m\n", + "----------------------------------------------------------------------------------------\n", + "\n", + "\n", + "Split 17, tokens 126, triggered by: final split\n", + "\u001b[31mective models must have a state that contains all necessary information from the context. In turn, we propose that a fundamental principle for building sequence models is selectivity: or the context-aware ability to focus on or ï¬ lter out inputs into a sequential state. In particular, a selection mechanism controls how information propagates or interacts along the sequence dimension (see Section 3.5 for more discussion). # Improving SSMs with Selection One method of incorporating a selection mechanism into models is by letting their parameters that aï¬ ect interactions along the sequence (e.g. the recurrent dynamics of an RNN or the c\u001b[0m\n", + "----------------------------------------------------------------------------------------\n", + "\n", + "\n" + ] + } + ], + "source": [ + "from typing import List\n", + "from semantic_chunkers import RegexChunker\n", + "from semantic_chunkers.schema import Chunk\n", + "\n", + "chunker = RegexChunker()\n", + "chunks: List[List[Chunk]] = await chunker.acall(docs=[content])\n", + "chunker.print(chunks[0])" + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -584,7 +716,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.9" + "version": "3.12.4" }, "widgets": { "application/vnd.jupyter.widget-state+json": { diff --git a/semantic_chunkers/__init__.py b/semantic_chunkers/__init__.py index b9a89b5..25a29f5 100644 --- a/semantic_chunkers/__init__.py +++ b/semantic_chunkers/__init__.py @@ -2,6 +2,7 @@ BaseChunker, ConsecutiveChunker, CumulativeChunker, + RegexChunker, StatisticalChunker, ) from semantic_chunkers.splitters import BaseSplitter, RegexSplitter @@ -13,6 +14,7 @@ "StatisticalChunker", "RegexSplitter", "BaseSplitter", + "RegexChunker", ] -__version__ = "0.0.8" +__version__ = "0.0.9" diff --git a/semantic_chunkers/chunkers/__init__.py b/semantic_chunkers/chunkers/__init__.py index 20484e2..613e8ab 100644 --- a/semantic_chunkers/chunkers/__init__.py +++ b/semantic_chunkers/chunkers/__init__.py @@ -1,6 +1,7 @@ from semantic_chunkers.chunkers.base import BaseChunker from semantic_chunkers.chunkers.consecutive import ConsecutiveChunker from semantic_chunkers.chunkers.cumulative import CumulativeChunker +from semantic_chunkers.chunkers.regex import RegexChunker from semantic_chunkers.chunkers.statistical import StatisticalChunker __all__ = [ @@ -8,4 +9,5 @@ "ConsecutiveChunker", "CumulativeChunker", "StatisticalChunker", + "RegexChunker", ] diff --git a/semantic_chunkers/chunkers/base.py b/semantic_chunkers/chunkers/base.py index 2e55e5c..5b51c21 100644 --- a/semantic_chunkers/chunkers/base.py +++ b/semantic_chunkers/chunkers/base.py @@ -10,7 +10,7 @@ class BaseChunker(BaseModel): name: str - encoder: BaseEncoder + encoder: BaseEncoder | None splitter: BaseSplitter class Config: diff --git a/semantic_chunkers/chunkers/consecutive.py b/semantic_chunkers/chunkers/consecutive.py index 921a73b..f09e611 100644 --- a/semantic_chunkers/chunkers/consecutive.py +++ b/semantic_chunkers/chunkers/consecutive.py @@ -15,6 +15,8 @@ class ConsecutiveChunker(BaseChunker): Called "consecutive sim chunker" because we check the similarities of consecutive document embeddings (compare ith to i+1th document embedding). """ + encoder: BaseEncoder + def __init__( self, encoder: BaseEncoder, diff --git a/semantic_chunkers/chunkers/cumulative.py b/semantic_chunkers/chunkers/cumulative.py index 7e2235c..bdf5ef0 100644 --- a/semantic_chunkers/chunkers/cumulative.py +++ b/semantic_chunkers/chunkers/cumulative.py @@ -16,6 +16,8 @@ class CumulativeChunker(BaseChunker): embeddings of cumulative concatenated documents with the next document. """ + encoder: BaseEncoder + def __init__( self, encoder: BaseEncoder, diff --git a/semantic_chunkers/chunkers/regex.py b/semantic_chunkers/chunkers/regex.py new file mode 100644 index 0000000..8752d6c --- /dev/null +++ b/semantic_chunkers/chunkers/regex.py @@ -0,0 +1,50 @@ +import asyncio +from typing import List + +from semantic_chunkers.chunkers.base import BaseChunker +from semantic_chunkers.schema import Chunk +from semantic_chunkers.splitters import RegexSplitter +from semantic_chunkers.utils import text + + +class RegexChunker(BaseChunker): + def __init__(self, max_chunk_tokens: int = 300): + super().__init__(name="regex_chunker", encoder=None, splitter=RegexSplitter()) + self.max_chunk_tokens = max_chunk_tokens + + def __call__(self, docs: list[str]) -> List[List[Chunk]]: + chunks = [] + current_chunk = Chunk( + splits=[], + metadata={}, + ) + current_chunk.token_count = 0 + + for doc in docs: + regex_splitter = RegexSplitter() + sentences = regex_splitter(doc) + for sentence in sentences: + sentence_token_count = text.tiktoken_length(sentence) + + if ( + current_chunk.token_count + sentence_token_count + > self.max_chunk_tokens + ): + chunks.append(current_chunk) + current_chunk = Chunk(splits=[]) + current_chunk.token_count = 0 + + current_chunk.splits.append(sentence) + if current_chunk.token_count is None: + current_chunk.token_count = 0 + current_chunk.token_count += sentence_token_count + + # Last chunk + if current_chunk.splits: + chunks.append(current_chunk) + + return [chunks] + + async def acall(self, docs: list[str]) -> List[List[Chunk]]: + chunks = await asyncio.to_thread(self.__call__, docs) + return chunks diff --git a/semantic_chunkers/chunkers/statistical.py b/semantic_chunkers/chunkers/statistical.py index 73793f1..dabb107 100644 --- a/semantic_chunkers/chunkers/statistical.py +++ b/semantic_chunkers/chunkers/statistical.py @@ -44,6 +44,8 @@ def __str__(self): class StatisticalChunker(BaseChunker): + encoder: BaseEncoder + def __init__( self, encoder: BaseEncoder, diff --git a/tests/unit/test_regex_chunker.py b/tests/unit/test_regex_chunker.py new file mode 100644 index 0000000..59f3652 --- /dev/null +++ b/tests/unit/test_regex_chunker.py @@ -0,0 +1,48 @@ +import asyncio +import unittest + +from semantic_chunkers.chunkers.regex import RegexChunker +from semantic_chunkers.schema import Chunk +from semantic_chunkers.utils import text + + +class TestRegexChunker(unittest.TestCase): + def setUp(self): + self.chunker = RegexChunker(max_chunk_tokens=10) + + def test_call(self): + docs = ["This is a test. This is only a test."] + chunks_list = self.chunker(docs) + chunks = chunks_list[0] + + self.assertIsInstance(chunks, list) + self.assertTrue(all(isinstance(chunk, Chunk) for chunk in chunks)) + self.assertGreater(len(chunks), 0) + self.assertTrue( + all( + text.tiktoken_length(chunk.content) <= self.chunker.max_chunk_tokens + for chunk in chunks + ) + ) + + def test_acall(self): + docs = ["This is a test. This is only a test."] + + async def run_test(): + chunks_list = await self.chunker.acall(docs) + chunks = chunks_list[0] + self.assertIsInstance(chunks, list) + self.assertTrue(all(isinstance(chunk, Chunk) for chunk in chunks)) + self.assertGreater(len(chunks), 0) + self.assertTrue( + all( + text.tiktoken_length(chunk.content) <= self.chunker.max_chunk_tokens + for chunk in chunks + ) + ) + + asyncio.run(run_test()) + + +if __name__ == "__main__": + unittest.main() From dfb27136d43d7c34dde81add801b35958031be4e Mon Sep 17 00:00:00 2001 From: Simonas <20096648+simjak@users.noreply.github.com> Date: Fri, 19 Jul 2024 11:13:50 +0300 Subject: [PATCH 3/7] chore: Python 3.9 support --- semantic_chunkers/chunkers/base.py | 4 ++-- semantic_chunkers/chunkers/statistical.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/semantic_chunkers/chunkers/base.py b/semantic_chunkers/chunkers/base.py index 5b51c21..dd26d8e 100644 --- a/semantic_chunkers/chunkers/base.py +++ b/semantic_chunkers/chunkers/base.py @@ -1,4 +1,4 @@ -from typing import Any, List +from typing import Any, List, Optional from colorama import Fore, Style from pydantic.v1 import BaseModel, Extra @@ -10,7 +10,7 @@ class BaseChunker(BaseModel): name: str - encoder: BaseEncoder | None + encoder: Optional[BaseEncoder] splitter: BaseSplitter class Config: diff --git a/semantic_chunkers/chunkers/statistical.py b/semantic_chunkers/chunkers/statistical.py index dabb107..1932e68 100644 --- a/semantic_chunkers/chunkers/statistical.py +++ b/semantic_chunkers/chunkers/statistical.py @@ -1,6 +1,6 @@ import asyncio from dataclasses import dataclass -from typing import Any, List +from typing import Any, List, Optional import numpy as np from semantic_router.encoders.base import BaseEncoder @@ -106,7 +106,7 @@ def _chunk( splits = [split for split in new_splits if split and split.strip()] chunks = [] - last_chunk: Chunk | None = None + last_chunk: Optional[Chunk] = None for i in tqdm(range(0, len(splits), batch_size)): batch_splits = splits[i : i + batch_size] if last_chunk is not None: From fe63881cf165a8d9e8923ee350179d796b5c70dc Mon Sep 17 00:00:00 2001 From: Simonas <20096648+simjak@users.noreply.github.com> Date: Fri, 19 Jul 2024 13:42:40 +0300 Subject: [PATCH 4/7] feat: Regex delimiters --- semantic_chunkers/chunkers/regex.py | 13 +++++-- semantic_chunkers/splitters/regex.py | 54 ++++++++++++++++------------ tests/unit/test_regex_splitter.py | 18 ++++++++-- 3 files changed, 56 insertions(+), 29 deletions(-) diff --git a/semantic_chunkers/chunkers/regex.py b/semantic_chunkers/chunkers/regex.py index 8752d6c..95fb38e 100644 --- a/semantic_chunkers/chunkers/regex.py +++ b/semantic_chunkers/chunkers/regex.py @@ -1,5 +1,7 @@ import asyncio -from typing import List +from typing import List, Union + +import regex from semantic_chunkers.chunkers.base import BaseChunker from semantic_chunkers.schema import Chunk @@ -8,9 +10,14 @@ class RegexChunker(BaseChunker): - def __init__(self, max_chunk_tokens: int = 300): + def __init__( + self, + max_chunk_tokens: int = 300, + delimiters: List[Union[str, regex.Pattern]] = [], + ): super().__init__(name="regex_chunker", encoder=None, splitter=RegexSplitter()) self.max_chunk_tokens = max_chunk_tokens + self.delimiters = delimiters def __call__(self, docs: list[str]) -> List[List[Chunk]]: chunks = [] @@ -22,7 +29,7 @@ def __call__(self, docs: list[str]) -> List[List[Chunk]]: for doc in docs: regex_splitter = RegexSplitter() - sentences = regex_splitter(doc) + sentences = regex_splitter(doc, delimiters=self.delimiters) for sentence in sentences: sentence_token_count = text.tiktoken_length(sentence) diff --git a/semantic_chunkers/splitters/regex.py b/semantic_chunkers/splitters/regex.py index e9ba8cc..b9c3e75 100644 --- a/semantic_chunkers/splitters/regex.py +++ b/semantic_chunkers/splitters/regex.py @@ -1,4 +1,4 @@ -from typing import List +from typing import List, Union import regex @@ -8,13 +8,6 @@ class RegexSplitter(BaseSplitter): """ Enhanced regex pattern to split a given text into sentences more accurately. - - The enhanced regex pattern includes handling for: - - Direct speech and quotations. - - Abbreviations, initials, and acronyms. - - Decimal numbers and dates. - - Ellipses and other punctuation marks used in informal text. - - Removing control characters and format characters. """ regex_pattern = r""" @@ -49,21 +42,36 @@ class RegexSplitter(BaseSplitter): | # Matches and removes control characters and format characters [\p{Cc}\p{Cf}]+ + # OR + | + # Splits after punctuation marks followed by another punctuation mark + (?<=[\.!?])(?=[\.!?]) + # OR + | + # Splits after exclamation or question marks followed by whitespace or end of string + (?<=[!?])(?=\s|$) """ - def __call__(self, doc: str) -> List[str]: - # Step 1: Split by \n\n - chunks = doc.split("\n\n") - sentences = [] - for chunk in chunks: - # Step 2: Split by \n within each chunk - sub_chunks = chunk.split("\n") - for sub_chunk in sub_chunks: - # Step 3: Split by regex pattern within each sub_chunk - sub_sentences = regex.split( - self.regex_pattern, sub_chunk, flags=regex.VERBOSE - ) - for sentence in sub_sentences: - if sentence.strip(): - sentences.append(sentence.strip()) + def __call__( + self, doc: str, delimiters: List[Union[str, regex.Pattern]] = [] + ) -> List[str]: + # Ensure the regex pattern is applied last + delimiters.append(regex.compile(self.regex_pattern, flags=regex.VERBOSE)) + + sentences = [doc] + for delimiter in delimiters: + sentences_for_next_delimiter = [] + for sentence in sentences: + if isinstance(delimiter, regex.Pattern): + sub_sentences = delimiter.split(sentence) + split_char = "" # No single character to append for regex pattern + else: + sub_sentences = sentence.split(delimiter) + split_char = delimiter + for i, sub_sentence in enumerate(sub_sentences): + if i < len(sub_sentences) - 1: + sub_sentence += split_char + if sub_sentence.strip(): + sentences_for_next_delimiter.append(sub_sentence.strip()) + sentences = sentences_for_next_delimiter return sentences diff --git a/tests/unit/test_regex_splitter.py b/tests/unit/test_regex_splitter.py index 41ab9e0..62b256f 100644 --- a/tests/unit/test_regex_splitter.py +++ b/tests/unit/test_regex_splitter.py @@ -10,19 +10,19 @@ def setUp(self): def test_split_by_double_newline(self): doc = "This is the first paragraph.\n\nThis is the second paragraph." expected = ["This is the first paragraph.", "This is the second paragraph."] - result = self.splitter(doc) + result = self.splitter(doc, delimiters=["\n\n"]) self.assertEqual(result, expected) def test_split_by_single_newline(self): doc = "This is the first line.\nThis is the second line." expected = ["This is the first line.", "This is the second line."] - result = self.splitter(doc) + result = self.splitter(doc, delimiters=["\n"]) self.assertEqual(result, expected) def test_split_by_period(self): doc = "This is the first sentence. This is the second sentence." expected = ["This is the first sentence.", "This is the second sentence."] - result = self.splitter(doc) + result = self.splitter(doc, delimiters=["."]) self.assertEqual(result, expected) def test_complex_split(self): @@ -35,6 +35,18 @@ def test_complex_split(self): "Fourth line.", "Fifth paragraph.", ] + result = self.splitter(doc, delimiters=["\n\n", "\n", "."]) + self.assertEqual(result, expected) + + def test_custom_delimiters(self): + doc = "First part|Second part|Third part" + expected = ["First part|", "Second part|", "Third part"] + result = self.splitter(doc, delimiters=["|"]) + self.assertEqual(result, expected) + + def test_regex_split(self): + doc = "This is a sentence. And another one! Yet another?" + expected = ["This is a sentence.", "And another one!", "Yet another?"] result = self.splitter(doc) self.assertEqual(result, expected) From 6817a74cb1b500589ac30e4c07c0436c010cbf52 Mon Sep 17 00:00:00 2001 From: Simonas <20096648+simjak@users.noreply.github.com> Date: Fri, 19 Jul 2024 15:27:09 +0300 Subject: [PATCH 5/7] chore: regex chunker updates --- docs/02-chunkers-async.ipynb | 350 ++------------------------- semantic_chunkers/chunkers/regex.py | 13 +- semantic_chunkers/splitters/regex.py | 12 +- 3 files changed, 34 insertions(+), 341 deletions(-) diff --git a/docs/02-chunkers-async.ipynb b/docs/02-chunkers-async.ipynb index 457730b..0fb440f 100644 --- a/docs/02-chunkers-async.ipynb +++ b/docs/02-chunkers-async.ipynb @@ -9,7 +9,7 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" @@ -17,17 +17,7 @@ "id": "iFgZNmSH2Dee", "outputId": "45754137-cb9c-4e85-9dbc-e139c8a2c9bb" }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - "\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m A new release of pip is available: \u001b[0m\u001b[31;49m24.0\u001b[0m\u001b[39;49m -> \u001b[0m\u001b[32;49m24.1.2\u001b[0m\n", - "\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m To update, run: \u001b[0m\u001b[32;49mpip install --upgrade pip\u001b[0m\n" - ] - } - ], + "outputs": [], "source": [ "!pip install -qU \\\n", " semantic-chunkers \\\n", @@ -52,7 +42,7 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/", @@ -96,29 +86,7 @@ "id": "aTN4gsdl2WBQ", "outputId": "bd606fad-8214-4fd4-cad1-54bb86234575" }, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/Users/jakit/customers/aurelio/semantic-chunkers/.venv_312/lib/python3.12/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", - " from .autonotebook import tqdm as notebook_tqdm\n" - ] - }, - { - "data": { - "text/plain": [ - "Dataset({\n", - " features: ['id', 'title', 'summary', 'source', 'authors', 'categories', 'comment', 'journal_ref', 'primary_category', 'published', 'updated', 'content', 'references'],\n", - " num_rows: 2673\n", - "})" - ] - }, - "execution_count": 2, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "from datasets import load_dataset\n", "\n", @@ -128,7 +96,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" @@ -136,19 +104,7 @@ "id": "nNZSP8iL2dDB", "outputId": "9615cc01-27f5-4bdd-9cc8-54f7308bea72" }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "# Mamba: Linear-Time Sequence Modeling with Selective State Spaces\n", - "# Albert Gu*1 and Tri Dao*2\n", - "1Machine Learning Department, Carnegie Mellon University 2Department of Computer Science, Princeton University agu@cs.cmu.edu, tri@tridao.me\n", - "# Abstract\n", - "Foundation models, now powering most of the exciting applications in deep learning, are almost universally based on the Transformer architecture and its core attention module. Many subquadratic-time architectures such as linear attention, gated convolution and recurrent models, and structured state space models (SSMs) have been developed to address Transformersâ computational ineï¬ciency on long sequences, but they have not performed as well as attention on important modalities such as language. We identify that a key weakness of such models is their inability to perform content-based reasoning, and make several improvements. First, simply letting the SSM parameters be functions of the input addresses their weakness with discrete modalities\n" - ] - } - ], + "outputs": [], "source": [ "content = data[3][\"content\"]\n", "print(content[:1000])" @@ -163,7 +119,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -183,7 +139,7 @@ }, { "cell_type": "code", - "execution_count": 17, + "execution_count": null, "metadata": { "id": "Mqnc35w85A8L" }, @@ -218,7 +174,7 @@ }, { "cell_type": "code", - "execution_count": 18, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -229,19 +185,9 @@ }, { "cell_type": "code", - "execution_count": 19, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\u001b[32m2024-07-03 16:41:12 INFO semantic_chunkers.utils.logger Single document exceeds the maximum token limit of 300. Splitting to sentences before semantically merging.\u001b[0m\n", - "100%|██████████| 6/6 [00:05<00:00, 1.08it/s]\n", - "\u001b[32m2024-07-03 16:41:17 INFO semantic_chunkers.utils.logger Single document exceeds the maximum token limit of 300. Splitting to sentences before semantically merging.\u001b[0m\n" - ] - } - ], + "outputs": [], "source": [ "chunks_async = await chunker.acall(docs=[content])" ] @@ -265,22 +211,9 @@ }, { "cell_type": "code", - "execution_count": 20, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "
# Mamba:Linear-Time Sequence Modeling with Selective State Spaces# Albert Gu*1 and Tri Dao*21Machine Learning Department, Carnegie Mellon University 2Department of Computer Science, Princeton University agu@cs.cmu.edu, tri@tridao.me# AbstractFoundation models, now powering most of the exciting applications in deep learning, are almost universally based on the Transformer architecture and its core attention module.Many subquadratic-time architectures such as linear attention, gated convolution and recurrent models, and structured state space models (SSMs) have been developed to address Transformersâcomputational ineï¬ciency on long sequences, but they have not performed as well as attention on important modalities such as language.We identify that a key weakness of such models is their inability to perform content-based reasoning, and make several improvements.First, simply letting the SSM parameters be functions of the input addresses their weakness with discrete modalities, allowing the model to selectively propagate or forget information along the sequence length dimension depending on the current token.Second, even though this change prevents the use of eï¬cient convolutions, we design a hardware-aware parallel algorithm in recurrent mode.We integrate these selective SSMs into a simpliï¬ed end-to-end neural network architecture without attention or even MLP blocks (Mamba).Mamba enjoys fast inference (5Ãhigher throughput than Transformers) and linear scaling in sequence length, and its performance improves on real data up to million-length sequences.
As a general sequence model backbone, Mamba achieves state-of-the-art performance across several modalities such as language, audio, and genomics.On language modeling, our Mamba-3B model outperforms Transformers of the same size and matches Transformers twice its size, both in pretraining and downstream evaluation.# 1 IntroductionFoundation models (FMs), or large models pretrained on massive data then adapted for downstream tasks, have emerged as an eï¬ective paradigm in modern machine learning.The backbone of these FMs are often sequence models, operating on arbitrary sequences of inputs from a wide variety of domains such as language, images, speech, audio, time series, and genomics (Brown et al. 2020; Dosovitskiy et al. 2020; Ismail Fawaz et al. 2019; Oord et al. 2016; Poli et al. 2023; Sutskever, Vinyals, and Quoc V Le 2014).While this concept is agnostic to a particular choice of model architecture, modern FMs are predominantly based on a single type of sequence model: the Transformer (Vaswani et al. 2017) and its core attention layer (Bahdanau, Cho, and Bengio 2015) The eï¬cacy of self-attention is attributed to its ability to route information densely within a context window, allowing it to model complex data.
However, this property brings fundamental drawbacks: an inability to model anything outside of a ï¬nite window, and quadratic scaling with respect to the window length.An enormous body of research has appeared on more eï¬cient variants of attention to overcome these drawbacks (Tay, Dehghani, Bahri, et al. 2022), but often at the expense of the very properties that makes it eï¬ective.As of yet, none of these variants have been shown to be empirically eï¬ective at scale across domains.
Recently, structured state space sequence models (SSMs) (Gu, Goel, and Ré 2022; Gu, Johnson, Goel, et al. 2021) have emerged as a promising class of architectures for sequence modeling.These models can be interpreted as a combination of recurrent neural networks (RNNs) and convolutional neural networks (CNNs), with inspiration from classical state space models (Kalman 1960).This class of models can be computed very eï¬ciently as either a recurrence or convolution, with linear or near-linear scaling in sequence length.
Additionally, they have principledEqual contribution.1mechanisms for modeling long-range dependencies (Gu, Dao, et al. 2020) in certain data modalities, and have dominated benchmarks such as the Long Range Arena (Tay, Dehghani, Abnar, et al. 2021).Many ï¬avors of SSMs (Gu, Goel, and Ré 2022; Gu, Gupta, et al. 2022; Gupta, Gu, and Berant 2022; Y.Li et al. 2023; Ma et al. 2023; Orvieto et al. 2023; Smith, Warrington, and Linderman 2023) have been successful in domains involving continuous signal data such as audio and vision (Goel et al. 2022; Nguyen, Goel, et al. 2022; Saon, Gupta, and Cui 2023).However, they have been less eï¬ective at modeling discrete and information-dense data such as text.We propose a new class of selective state space models, that improves on prior work on several axes to achieve the modeling power of Transformers while scaling linearly in sequence length.
Selection Mechanism.First, we identify a key limitation of prior models: the ability to eï¬ciently select data in an input-dependent manner (i.e. focus on or ignore particular inputs).Building on intuition based on important synthetic tasks such as selective copy and induction heads, we design a simple selection mechanism by parameterizing the SSM parameters based on the input.This allows the model to ï¬lter out irrelevant information and remember relevant information indeï¬nitely.Hardware-aware Algorithm.This simple change poses a technical challenge for the computation of the model; in fact, all prior SSMs models must be time- and input-invariant in order to be computationally eï¬cient.We overcome this with a hardware-aware algorithm that computes the model recurrently with a scan instead of convolution, but does not materialize the expanded state in order to avoid IO access between diï¬erent levels of the GPU memory hierarchy.The resulting implementation is faster than previous methods both in theory (scaling linearly in sequence length, compared to pseudo-linear for all convolution-based SSMs) and on modern hardware (up to 3Ãfaster on A100 GPUs).
Architecture.We simplify prior deep sequence model architectures by combining the design of prior SSM architectures (Dao, Fu, Saab, et al. 2023) with the MLP block of Transformers into a single block, leading to a simple and homogenous architecture design (Mamba) incorporating selective state spaces.Selective SSMs, and by extension the Mamba architecture, are fully recurrent models with key properties that make them suitable as the backbone of general foundation models operating on sequences. (i) High quality: selectivity brings strong performance on dense modalities such as language and genomics. (ii) Fast training and inference: computation and memory scales linearly in sequence length during training, and unrolling the model autoregressively during inference requires only constant time per step since it does not require a cache of previous elements. (iii) Long context: the quality and eï¬ciency together yield performance improvements on real data up to sequence length 1M.We empirically validate Mambaâs potential as a general sequence FM backbone, in both pretraining quality and domain-speciï¬c task performance, on several types of modalities and settings:
⢠Synthetics.On important synthetic tasks such as copying and induction heads that have been proposed as being key to large language models, Mamba not only solves them easily but can extrapolate solutions indeï¬nitely long (>1M tokens).⢠Audio and Genomics.Mamba out-performs prior state-of-the-art models such as SaShiMi, Hyena, and Transform- ers on modeling audio waveforms and DNA sequences, both in pretraining quality and downstream metrics (e.g. reducing FID on a challenging speech generation dataset by more than half).In both settings, its performance improves with longer context up to million-length sequences.⢠Language Modeling.
Mamba is the ï¬rst linear-time sequence model that truly achieves Transformer-quality performance, both in pretraining perplexity and downstream evaluations.With scaling laws up to 1B parameters, we show that Mamba exceeds the performance of a large range of baselines, including very strong modern Transformer training recipes based on LLaMa (Touvron et al. 2023).Our Mamba language model has 5Ãgeneration throughput compared to Transformers of similar size, and Mamba-3Bâs quality matches that of Transformers twice its size (e.g. 4 points higher avg. on common sense reasoning compared to Pythia-3B and even exceeding Pythia-7B).Model code and pre-trained checkpoints are open-sourced at https://github.com/state-spaces/mamba.
2# Selective State Space Model# with Hardware-aware State Expansion# Avuvy GPU SRAM Selection Mechanism esSelection MechanismFigure 1: (Overview.) Structured SSMs independently map each channel (e.g. ð· = 5) of an input ð¥ to output ð¦ through a higher dimensional latent state â(e.g. ð= 4).Prior SSMs avoid materializing this large effective state (ð·ð, times batch size ðµ and sequence length ð¿) through clever alternate computation paths requiring time-invariance: the (â, A, B, C) parameters are constant across time.Our selection mechanism adds back input-dependent dynamics, which also requires a careful hardware-aware algorithm to only materialize the expanded states in more efficient levels of the GPU memory hierarchy.# 2 State Space ModelsStructured state space sequence models (S4) are a recent class of sequence models for deep learning that are broadly related to RNNs, and CNNs, and classical state space models.They are inspired by a particular continuous system (1) that maps a 1-dimensional function or sequence ð
¥(ð¡) ââ⦠ð¦(ð¡) ââthrough an implicit latent state â(ð¡) ââð.Concretely, S4 models are deï¬ned with four parameters (â, A, B, C), which deï¬ne a sequence-to-sequence trans- formation in two stages.ââ²(ð¡) = Aâ(ð¡) + Bð¥(ð¡) ð¦(ð¡) = Câ(ð¡)(1a) (1b) âð¡ = Aâð¡â1 + Bð¥ð¡ ð¦ð¡ = Câð¡ (2a) (2b) ðð² = (Cð©, Cð¨ð©, ⦠, Cð¨ ð¦ = ð¥ âð² ð©, â
¦ ) (3a) (3b)Discretization.The ï¬rst stage transforms the âcontinuous parametersâ(â, A, B) to âdiscrete parametersâ(A, B) through ï¬xed formulas A = ðð´(â, A) and B = ððµ(â, A, B), where the pair (ðð´, ððµ) is called a discretization rule.Various rules can be used such as the zero-order hold (ZOH) deï¬
ned in equation (4).A = exp(âA) B = (âA)â1(exp(âA) âI) ââB (4)Discretization has deep connections to continuous-time systems which can endow them with additional properties such as resolution invariance (Nguyen, Goel, et al. 2022) and automatically ensuring that the model is properly normalized (Gu, Johnson, Timalsina, et al. 2023; Orvieto et al. 2023).
It also has connections to gating mechanisms of RNNs (Gu, Gulcehre, et al. 2020; Tallec and Ollivier 2018) which we will revisit in Section 3.5.However, from a mechanical point of view discretization can simply be viewed as the ï¬rst step of the computation graph in the forward pass of an SSM.Alternate ï¬avors of SSMs can bypass the discretization step and parameterize (A, B) directly instead (Zhang et al. 2023), which may be easier to reason about.Computation.After the parameters have been transformed from (â, A, B, C) ⦠(A, B, C), the model can be computed in two ways, either as a linear recurrence (2) or a global convolution (3).3Commonly, the model uses the convolutional mode (3) for eï¬cient parallelizable training (where the whole input sequence is seen ahead of time), and switched into recurrent mode (2) for eï¬cient autoregressive inference (where the inputs are seen one timestep at a time).Linear Time Invariance (LTI).An important property of equations (1) to (3) is that the modelâs dynamics are constant through time.
In other words (â, A, B, C), and consequently (A, B) as well, are ï¬xed for all time-steps.This property is called linear time invariance (LTI), which is deeply connected to recurrence and convolutions.Informally, we think of LTI SSMs as being equivalent to any linear recurrence (2a) or convolution (3b), and use LTI as an umbrella term for these classes of models.Thus far, all structured SSMs have been LTI (e.g. computed as convolutions) because of fundamental eï¬ciency constraints, discussed in Section 3.3.However, a core insight of this work is that LTI models have fundamental limitations in modeling certain types of data, and our technical contributions involve removing the LTI constraint while overcoming the eï¬ciency bottlenecks.
Structure and Dimensions.Finally, we note that structured SSMs are so named because computing them eï¬ciently also requires imposing structure on the A matrix.The most popular form of structure is diagonal (Gu, Gupta, et al. 2022; Gupta, Gu, and Berant 2022; Smith, Warrington, and Linderman 2023), which we also use.In this case, the A ââðÃð, B ââðÃ1, C ââ1Ãðmatrices can all be represented by ðnumbers.To operate over an input sequence ð¥ of batch size ðµ and length ð¿ with ð
· channels, the SSM is applied independently to each channel.Note that in this case, the total hidden state has dimension ð·ðper input, and computing it over the sequence length requires ð(ðµð¿ð·ð) time and memory; this is the root of the fundamental eï¬ciency bottleneck addressed in Section 3.3.General State Space Models.We note that the term state space model has a very broad meaning which simply represents the notion of any recurrent process with a latent state.It has been used to refer to many disparate concepts in diï¬erent disciplines, including Markov decision processes (MDP) (reinforcement learning (Hafner et al. 2020)), dynamic causal modeling (DCM) (computational neuroscience (Friston, Harrison, and Penny 2003)), Kalman ï¬lters (controls (Kalman 1960)), hidden Markov models (HMM) and linear dynamical systems (LDS) (machine learning), and recurrent (and sometimes convolutional) models at large (deep learning).Throughout this entire paper we use the term â
SSMâto refer exclusively to the class of structured SSMs or S4 models (Gu, Goel, and Ré 2022; Gu, Gupta, et al. 2022; Gupta, Gu, and Berant 2022; Hasani et al. 2023; Ma et al. 2023; Smith, Warrington, and Linderman 2023) and use these terms interchangeably.For convenience we may also include derivatives of such models, such as those focusing on either the linear-recurrence or global-convolution viewpoints (Y.Li et al. 2023; Orvieto et al. 2023; Poli et al. 2023), and clarify nuances when necessary.SSM Architectures.SSMs are standalone sequence transformations that can be incorporated into end-to-end neural network architectures. (We also sometimes call SSM architectures SSNNs, which are to SSM layers as CNNs are to linear convolution layers.) We discuss some of the most well-known SSM architectures, many of which will also serve as our primary baselines.
⢠Linear attention (Katharopoulos et al. 2020) is an approximation of self-attention involving a recurrence which can be viewed as a degenerate linear SSM.⢠H3 (Dao, Fu, Saab, et al. 2023) generalized this recurrence to use S4; it can be viewed as an architecture with an SSM sandwiched by two gated connections (Figure 3).H3 also inserts a standard local convolution, which they frame as a shift-SSM, before the main SSM layer.⢠Hyena (Poli et al. 2023) uses the same architecture as H3 but replaces the S4 layer with an MLP-parameterized global convolution (Romero et al. 2021).â
¢ RetNet (Y.Sun et al. 2023) adds an additional gate to the architecture and uses a simpler SSM, allowing an alternative parallelizable computation path, using a variant of multi-head attention (MHA) instead of convolutions.4⢠RWKV (B.Peng et al. 2023) is a recent RNN designed for language modeling based on another linear attention approximation (attention-free Transformer (S.Zhai et al. 2021)).Its main âWKVâ
mechanism involves LTI recurrences and can be viewed as the ratio of two SSMs.Other closely related SSMs and architectures are discussed further in an extended related work (Appendix B).We highlight in particular S5 (Smith, Warrington, and Linderman 2023), QRNN (Bradbury et al. 2016), and SRU (Lei et al. 2017), which we view as the most closely related methods to our core selective SSM.# 3 Selective State Space ModelsWe motivate our selection mechanism using intuition from synthetic tasks (Section 3.1), then explain how to incorporate this mechanism into state space models (Section 3.2).The resulting time-varying SSMs cannot use convolutions, presenting a technical challenge of how to compute them eï¬
ciently.We overcome this with a hardware-aware algorithm that exploits the memory hierarchy on modern hardware (Section 3.3).We then describe a simple SSM architecture without attention or even MLP blocks (Section 3.4).Finally, we discuss some additional properties of selection mechanisms (Section 3.5).# 3.1 Motivation:Selection as a Means of CompressionWe argue that a fundamental problem of sequence modeling is compressing context into a smaller state.In fact, we can view the tradeoï¬s of popular sequence models from this point of view.For example, attention is both eï¬ective and ineï¬cient because it explicitly does not compress context at all.This can be seen from the fact that autoregressive inference requires explicitly storing the entire context (i.e. the KV cache), which directly causes the slow linear-time inference and quadratic-time training of Transformers.On the other hand, recurrent models are eï¬cient because they have a ï¬nite state, implying constant-time inference and linear-time training.However, their eï¬ectiveness is limited by how well this state has compressed the context.To understand this principle, we focus on two running examples of synthetic tasks (Figure 2).
⢠The Selective Copying task modiï¬es the popular Copying task (Arjovsky, Shah, and Bengio 2016) by varying the position of the tokens to memorize.It requires content-aware reasoning to be able to memorize the relevant tokens (colored) and ï¬lter out the irrelevant ones (white).⢠The Induction Heads task is a well-known mechanism hypothesized to explain the majority of in-context learning abilities of LLMs (Olsson et al. 2022).It requires context-aware reasoning to know when to produce the correct output in the appropriate context (black).These tasks reveal the failure mode of LTI models.From the recurrent view, their constant dynamics (e.g. the (A, B) transitions in (2)) cannot let them select the correct information from their context, or aï¬ect the hidden state passed along the sequence an in input-dependent way.From the convolutional view, it is known that global convolutions can solve the vanilla Copying task (Romero et al. 2021) because it only requires time-awareness, but that they have diï¬culty with the Selective Copying task because of lack of content-awareness (Figure 2).More concretely, the spacing between inputs-to-outputs is varying and cannot be modeled by static convolution kernels.
In summary, the eï¬ciency vs. eï¬ectiveness tradeoï¬of sequence models is characterized by how well they compress their state: eï¬cient models must have a small state, while eï¬ective models must have a state that contains all necessary information from the context.In turn, we propose that a fundamental principle for building sequence models is selectivity: or the context-aware ability to focus on or ï¬lter out inputs into a sequential state.In particular, a selection mechanism controls how information propagates or interacts along the sequence dimension (see Section 3.5 for more discussion).
# Improving SSMs with SelectionOne method of incorporating a selection mechanism into models is by letting their parameters that aï¬ect interactions along the sequence (e.g. the recurrent dynamics of an RNN or the c
# Mamba:Linear-Time Sequence Modeling with Selective State Spaces# Albert Gu*1 and Tri Dao*21Machine Learning Department, Carnegie Mellon University 2Department of Computer Science, Princeton University agu@cs.cmu.edu, tri@tridao.me# AbstractFoundation models, now powering most of the exciting applications in deep learning, are almost universally based on the Transformer architecture and its core attention module.Many subquadratic-time architectures such as linear attention, gated convolution and recurrent models, and structured state space models (SSMs) have been developed to address Transformersâcomputational ineï¬ciency on long sequences, but they have not performed as well as attention on important modalities such as language.We identify that a key weakness of such models is their inability to perform content-based reasoning, and make several improvements.First, simply letting the SSM parameters be functions of the input addresses their weakness with discrete modalities, allowing the model to selectively propagate or forget information along the sequence length dimension depending on the current token.Second, even though this change prevents the use of eï¬cient convolutions, we design a hardware-aware parallel algorithm in recurrent mode.We integrate these selective SSMs into a simpliï¬ed end-to-end neural network architecture without attention or even MLP blocks (Mamba).Mamba enjoys fast inference (5Ãhigher throughput than Transformers) and linear scaling in sequence length, and its performance improves on real data up to million-length sequences.
As a general sequence model backbone, Mamba achieves state-of-the-art performance across several modalities such as language, audio, and genomics.On language modeling, our Mamba-3B model outperforms Transformers of the same size and matches Transformers twice its size, both in pretraining and downstream evaluation.# 1 IntroductionFoundation models (FMs), or large models pretrained on massive data then adapted for downstream tasks, have emerged as an eï¬ective paradigm in modern machine learning.The backbone of these FMs are often sequence models, operating on arbitrary sequences of inputs from a wide variety of domains such as language, images, speech, audio, time series, and genomics (Brown et al. 2020; Dosovitskiy et al. 2020; Ismail Fawaz et al. 2019; Oord et al. 2016; Poli et al. 2023; Sutskever, Vinyals, and Quoc V Le 2014).While this concept is agnostic to a particular choice of model architecture, modern FMs are predominantly based on a single type of sequence model: the Transformer (Vaswani et al. 2017) and its core attention layer (Bahdanau, Cho, and Bengio 2015) The eï¬cacy of self-attention is attributed to its ability to route information densely within a context window, allowing it to model complex data.
However, this property brings fundamental drawbacks: an inability to model anything outside of a ï¬nite window, and quadratic scaling with respect to the window length.An enormous body of research has appeared on more eï¬cient variants of attention to overcome these drawbacks (Tay, Dehghani, Bahri, et al. 2022), but often at the expense of the very properties that makes it eï¬ective.As of yet, none of these variants have been shown to be empirically eï¬ective at scale across domains.
Recently, structured state space sequence models (SSMs) (Gu, Goel, and Ré 2022; Gu, Johnson, Goel, et al. 2021) have emerged as a promising class of architectures for sequence modeling.These models can be interpreted as a combination of recurrent neural networks (RNNs) and convolutional neural networks (CNNs), with inspiration from classical state space models (Kalman 1960).This class of models can be computed very eï¬ciently as either a recurrence or convolution, with linear or near-linear scaling in sequence length.
Additionally, they have principledEqual contribution.1mechanisms for modeling long-range dependencies (Gu, Dao, et al. 2020) in certain data modalities, and have dominated benchmarks such as the Long Range Arena (Tay, Dehghani, Abnar, et al. 2021).Many ï¬avors of SSMs (Gu, Goel, and Ré 2022; Gu, Gupta, et al. 2022; Gupta, Gu, and Berant 2022; Y.Li et al. 2023; Ma et al. 2023; Orvieto et al. 2023; Smith, Warrington, and Linderman 2023) have been successful in domains involving continuous signal data such as audio and vision (Goel et al. 2022; Nguyen, Goel, et al. 2022; Saon, Gupta, and Cui 2023).However, they have been less eï¬ective at modeling discrete and information-dense data such as text.We propose a new class of selective state space models, that improves on prior work on several axes to achieve the modeling power of Transformers while scaling linearly in sequence length.
Selection Mechanism.First, we identify a key limitation of prior models: the ability to eï¬ciently select data in an input-dependent manner (i.e. focus on or ignore particular inputs).Building on intuition based on important synthetic tasks such as selective copy and induction heads, we design a simple selection mechanism by parameterizing the SSM parameters based on the input.This allows the model to ï¬lter out irrelevant information and remember relevant information indeï¬nitely.Hardware-aware Algorithm.This simple change poses a technical challenge for the computation of the model; in fact, all prior SSMs models must be time- and input-invariant in order to be computationally eï¬cient.We overcome this with a hardware-aware algorithm that computes the model recurrently with a scan instead of convolution, but does not materialize the expanded state in order to avoid IO access between diï¬erent levels of the GPU memory hierarchy.The resulting implementation is faster than previous methods both in theory (scaling linearly in sequence length, compared to pseudo-linear for all convolution-based SSMs) and on modern hardware (up to 3Ãfaster on A100 GPUs).
Architecture.We simplify prior deep sequence model architectures by combining the design of prior SSM architectures (Dao, Fu, Saab, et al. 2023) with the MLP block of Transformers into a single block, leading to a simple and homogenous architecture design (Mamba) incorporating selective state spaces.Selective SSMs, and by extension the Mamba architecture, are fully recurrent models with key properties that make them suitable as the backbone of general foundation models operating on sequences. (i) High quality: selectivity brings strong performance on dense modalities such as language and genomics. (ii) Fast training and inference: computation and memory scales linearly in sequence length during training, and unrolling the model autoregressively during inference requires only constant time per step since it does not require a cache of previous elements. (iii) Long context: the quality and eï¬ciency together yield performance improvements on real data up to sequence length 1M.
We empirically validate Mambaâs potential as a general sequence FM backbone, in both pretraining quality and domain-speciï¬c task performance, on several types of modalities and settings:⢠Synthetics.On important synthetic tasks such as copying and induction heads that have been proposed as being key to large language models, Mamba not only solves them easily but can extrapolate solutions indeï¬nitely long (>1M tokens).⢠Audio and Genomics.Mamba out-performs prior state-of-the-art models such as SaShiMi, Hyena, and Transform- ers on modeling audio waveforms and DNA sequences, both in pretraining quality and downstream metrics (e.g. reducing FID on a challenging speech generation dataset by more than half).In both settings, its performance improves with longer context up to million-length sequences.⢠Language Modeling.
Mamba is the ï¬rst linear-time sequence model that truly achieves Transformer-quality performance, both in pretraining perplexity and downstream evaluations.With scaling laws up to 1B parameters, we show that Mamba exceeds the performance of a large range of baselines, including very strong modern Transformer training recipes based on LLaMa (Touvron et al. 2023).Our Mamba language model has 5Ãgeneration throughput compared to Transformers of similar size, and Mamba-3Bâs quality matches that of Transformers twice its size (e.g. 4 points higher avg. on common sense reasoning compared to Pythia-3B and even exceeding Pythia-7B).Model code and pre-trained checkpoints are open-sourced at https://github.com/state-spaces/mamba.
2# Selective State Space Model# with Hardware-aware State Expansion# Avuvy GPU SRAM Selection Mechanism esSelection MechanismFigure 1: (Overview.) Structured SSMs independently map each channel (e.g. ð· = 5) of an input ð¥ to output ð¦ through a higher dimensional latent state â(e.g. ð= 4).Prior SSMs avoid materializing this large effective state (ð·ð, times batch size ðµ and sequence length ð¿) through clever alternate computation paths requiring time-invariance: the (â, A, B, C) parameters are constant across time.Our selection mechanism adds back input-dependent dynamics, which also requires a careful hardware-aware algorithm to only materialize the expanded states in more efficient levels of the GPU memory hierarchy.# 2 State Space ModelsStructured state space sequence models (S4) are a recent class of sequence models for deep learning that are broadly related to RNNs, and CNNs, and classical state space models.They are inspired by a particular continuous system (1) that maps a 1-dimensional function or sequence ð
¥(ð¡) ââ⦠ð¦(ð¡) ââthrough an implicit latent state â(ð¡) ââð.Concretely, S4 models are deï¬ned with four parameters (â, A, B, C), which deï¬ne a sequence-to-sequence trans- formation in two stages.ââ²(ð¡) = Aâ
(ð¡) + Bð¥(ð¡) ð¦(ð¡) = Câ(ð¡)(1a) (1b) âð¡ = Aâð¡â1 + Bð¥ð¡ ð¦ð¡ = Câð¡ (2a) (2b) ðð² = (Cð©, Cð¨ð©, ⦠, Cð¨ ð¦ = ð¥ âð² ð©, â
¦ ) (3a) (3b)Discretization.The ï¬rst stage transforms the âcontinuous parametersâ(â, A, B) to âdiscrete parametersâ(A, B) through ï¬xed formulas A = ðð´(â, A) and B = ððµ(â, A, B), where the pair (ðð´, ððµ) is called a discretization rule.Various rules can be used such as the zero-order hold (ZOH) deï¬
ned in equation (4).A = exp(âA) B = (âA)â1(exp(âA) âI) ââB (4)Discretization has deep connections to continuous-time systems which can endow them with additional properties such as resolution invariance (Nguyen, Goel, et al. 2022) and automatically ensuring that the model is properly normalized (Gu, Johnson, Timalsina, et al. 2023; Orvieto et al. 2023).
It also has connections to gating mechanisms of RNNs (Gu, Gulcehre, et al. 2020; Tallec and Ollivier 2018) which we will revisit in Section 3.5.
However, from a mechanical point of view discretization can simply be viewed as the ï¬rst step of the computation graph in the forward pass of an SSM.Alternate ï¬avors of SSMs can bypass the discretization step and parameterize (A, B) directly instead (Zhang et al. 2023), which may be easier to reason about.Computation.After the parameters have been transformed from (â, A, B, C) ⦠(A, B, C), the model can be computed in two ways, either as a linear recurrence (2) or a global convolution (3).3Commonly, the model uses the convolutional mode (3) for eï¬cient parallelizable training (where the whole input sequence is seen ahead of time), and switched into recurrent mode (2) for eï¬cient autoregressive inference (where the inputs are seen one timestep at a time).Linear Time Invariance (LTI).An important property of equations (1) to (3) is that the modelâs dynamics are constant through time.
In other words (â, A, B, C), and consequently (A, B) as well, are ï¬xed for all time-steps.This property is called linear time invariance (LTI), which is deeply connected to recurrence and convolutions.Informally, we think of LTI SSMs as being equivalent to any linear recurrence (2a) or convolution (3b), and use LTI as an umbrella term for these classes of models.Thus far, all structured SSMs have been LTI (e.g. computed as convolutions) because of fundamental eï¬ciency constraints, discussed in Section 3.3.However, a core insight of this work is that LTI models have fundamental limitations in modeling certain types of data, and our technical contributions involve removing the LTI constraint while overcoming the eï¬ciency bottlenecks.
Structure and Dimensions.Finally, we note that structured SSMs are so named because computing them eï¬ciently also requires imposing structure on the A matrix.The most popular form of structure is diagonal (Gu, Gupta, et al. 2022; Gupta, Gu, and Berant 2022; Smith, Warrington, and Linderman 2023), which we also use.In this case, the A ââðÃð, B ââðÃ1, C ââ1Ãðmatrices can all be represented by ðnumbers.To operate over an input sequence ð¥ of batch size ðµ and length ð¿ with ð
· channels, the SSM is applied independently to each channel.Note that in this case, the total hidden state has dimension ð·ðper input, and computing it over the sequence length requires ð(ðµð¿ð·ð) time and memory; this is the root of the fundamental eï¬ciency bottleneck addressed in Section 3.3.General State Space Models.We note that the term state space model has a very broad meaning which simply represents the notion of any recurrent process with a latent state.It has been used to refer to many disparate concepts in diï¬erent disciplines, including Markov decision processes (MDP) (reinforcement learning (Hafner et al. 2020)), dynamic causal modeling (DCM) (computational neuroscience (Friston, Harrison, and Penny 2003)), Kalman ï¬lters (controls (Kalman 1960)), hidden Markov models (HMM) and linear dynamical systems (LDS) (machine learning), and recurrent (and sometimes convolutional) models at large (deep learning).Throughout this entire paper we use the term â
SSMâ
to refer exclusively to the class of structured SSMs or S4 models (Gu, Goel, and Ré 2022; Gu, Gupta, et al. 2022; Gupta, Gu, and Berant 2022; Hasani et al. 2023; Ma et al. 2023; Smith, Warrington, and Linderman 2023) and use these terms interchangeably.For convenience we may also include derivatives of such models, such as those focusing on either the linear-recurrence or global-convolution viewpoints (Y.Li et al. 2023; Orvieto et al. 2023; Poli et al. 2023), and clarify nuances when necessary.SSM Architectures.SSMs are standalone sequence transformations that can be incorporated into end-to-end neural network architectures. (We also sometimes call SSM architectures SSNNs, which are to SSM layers as CNNs are to linear convolution layers.) We discuss some of the most well-known SSM architectures, many of which will also serve as our primary baselines.
⢠Linear attention (Katharopoulos et al. 2020) is an approximation of self-attention involving a recurrence which can be viewed as a degenerate linear SSM.⢠H3 (Dao, Fu, Saab, et al. 2023) generalized this recurrence to use S4; it can be viewed as an architecture with an SSM sandwiched by two gated connections (Figure 3).H3 also inserts a standard local convolution, which they frame as a shift-SSM, before the main SSM layer.⢠Hyena (Poli et al. 2023) uses the same architecture as H3 but replaces the S4 layer with an MLP-parameterized global convolution (Romero et al. 2021).â
¢ RetNet (Y.Sun et al. 2023) adds an additional gate to the architecture and uses a simpler SSM, allowing an alternative parallelizable computation path, using a variant of multi-head attention (MHA) instead of convolutions.4⢠RWKV (B.Peng et al. 2023) is a recent RNN designed for language modeling based on another linear attention approximation (attention-free Transformer (S.Zhai et al. 2021)).Its main âWKVâ
mechanism involves LTI recurrences and can be viewed as the ratio of two SSMs.Other closely related SSMs and architectures are discussed further in an extended related work (Appendix B).We highlight in particular S5 (Smith, Warrington, and Linderman 2023), QRNN (Bradbury et al. 2016), and SRU (Lei et al. 2017), which we view as the most closely related methods to our core selective SSM.# 3 Selective State Space ModelsWe motivate our selection mechanism using intuition from synthetic tasks (Section 3.1), then explain how to incorporate this mechanism into state space models (Section 3.2).The resulting time-varying SSMs cannot use convolutions, presenting a technical challenge of how to compute them eï¬
ciently.We overcome this with a hardware-aware algorithm that exploits the memory hierarchy on modern hardware (Section 3.3).We then describe a simple SSM architecture without attention or even MLP blocks (Section 3.4).Finally, we discuss some additional properties of selection mechanisms (Section 3.5).# 3.1 Motivation:Selection as a Means of CompressionWe argue that a fundamental problem of sequence modeling is compressing context into a smaller state.In fact, we can view the tradeoï¬s of popular sequence models from this point of view.For example, attention is both eï¬ective and ineï¬cient because it explicitly does not compress context at all.This can be seen from the fact that autoregressive inference requires explicitly storing the entire context (i.e. the KV cache), which directly causes the slow linear-time inference and quadratic-time training of Transformers.On the other hand, recurrent models are eï¬cient because they have a ï¬nite state, implying constant-time inference and linear-time training.However, their eï¬ectiveness is limited by how well this state has compressed the context.To understand this principle, we focus on two running examples of synthetic tasks (Figure 2).
⢠The Selective Copying task modiï¬es the popular Copying task (Arjovsky, Shah, and Bengio 2016) by varying the position of the tokens to memorize.It requires content-aware reasoning to be able to memorize the relevant tokens (colored) and ï¬lter out the irrelevant ones (white).⢠The Induction Heads task is a well-known mechanism hypothesized to explain the majority of in-context learning abilities of LLMs (Olsson et al. 2022).It requires context-aware reasoning to know when to produce the correct output in the appropriate context (black).These tasks reveal the failure mode of LTI models.From the recurrent view, their constant dynamics (e.g. the (A, B) transitions in (2)) cannot let them select the correct information from their context, or aï¬ect the hidden state passed along the sequence an in input-dependent way.From the convolutional view, it is known that global convolutions can solve the vanilla Copying task (Romero et al. 2021) because it only requires time-awareness, but that they have diï¬culty with the Selective Copying task because of lack of content-awareness (Figure 2).More concretely, the spacing between inputs-to-outputs is varying and cannot be modeled by static convolution kernels.
In summary, the eï¬ciency vs. eï¬ectiveness tradeoï¬
of sequence models is characterized by how well they compress their state: eï¬cient models must have a small state, while eï¬ective models must have a state that contains all necessary information from the context.In turn, we propose that a fundamental principle for building sequence models is selectivity: or the context-aware ability to focus on or ï¬lter out inputs into a sequential state.In particular, a selection mechanism controls how information propagates or interacts along the sequence dimension (see Section 3.5 for more discussion).
# Improving SSMs with SelectionOne method of incorporating a selection mechanism into models is by letting their parameters that aï¬ect interactions along the sequence (e.g. the recurrent dynamics of an RNN or the c