From 631d059a70b76137743bf6d89c6e67a1747c647a Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Mon, 26 Aug 2024 13:25:44 +0000 Subject: [PATCH] Deployed 04e9828 with MkDocs version: 1.6.0 --- .nojekyll | 0 404.html | 1174 +++ Inference/docker/index.html | 1342 ++++ Inference/index.html | 1205 +++ Inference/inference/index.html | 3580 +++++++++ Ollama server/index.html | 1216 +++ Query processing LLM/api_reference/index.html | 1572 ++++ Query processing LLM/index.html | 1382 ++++ .../change data input/index.html | 1485 ++++ .../change model/index.html | 1475 ++++ .../Developer Tutorials/change_model.py | 65 + .../create vectordb/index.html | 1333 ++++ Rag Pipeline/Developer Tutorials/index.html | 1279 ++++ .../load vectordb and get results/index.html | 1743 +++++ Rag Pipeline/configuration/index.html | 1296 ++++ Rag Pipeline/general_utils/index.html | 1425 ++++ Rag Pipeline/index.html | 1307 ++++ Rag Pipeline/llm_module/index.html | 1787 +++++ Rag Pipeline/metadata_module/index.html | 3238 ++++++++ Rag Pipeline/result_gen/index.html | 2219 ++++++ Rag Pipeline/training/index.html | 1288 ++++ Rag Pipeline/vector_store/index.html | 2567 +++++++ UI/api_reference/index.html | 5544 ++++++++++++++ UI/frontend/index.html | 1395 ++++ assets/_mkdocstrings.css | 119 + assets/images/favicon.png | Bin 0 -> 1870 bytes assets/javascripts/bundle.af256bd8.min.js | 29 + assets/javascripts/bundle.af256bd8.min.js.map | 7 + assets/javascripts/lunr/min/lunr.ar.min.js | 1 + assets/javascripts/lunr/min/lunr.da.min.js | 18 + assets/javascripts/lunr/min/lunr.de.min.js | 18 + assets/javascripts/lunr/min/lunr.du.min.js | 18 + assets/javascripts/lunr/min/lunr.el.min.js | 1 + assets/javascripts/lunr/min/lunr.es.min.js | 18 + assets/javascripts/lunr/min/lunr.fi.min.js | 18 + assets/javascripts/lunr/min/lunr.fr.min.js | 18 + assets/javascripts/lunr/min/lunr.he.min.js | 1 + assets/javascripts/lunr/min/lunr.hi.min.js | 1 + assets/javascripts/lunr/min/lunr.hu.min.js | 18 + assets/javascripts/lunr/min/lunr.hy.min.js | 1 + assets/javascripts/lunr/min/lunr.it.min.js | 18 + assets/javascripts/lunr/min/lunr.ja.min.js | 1 + assets/javascripts/lunr/min/lunr.jp.min.js | 1 + assets/javascripts/lunr/min/lunr.kn.min.js | 1 + assets/javascripts/lunr/min/lunr.ko.min.js | 1 + assets/javascripts/lunr/min/lunr.multi.min.js | 1 + assets/javascripts/lunr/min/lunr.nl.min.js | 18 + assets/javascripts/lunr/min/lunr.no.min.js | 18 + assets/javascripts/lunr/min/lunr.pt.min.js | 18 + assets/javascripts/lunr/min/lunr.ro.min.js | 18 + assets/javascripts/lunr/min/lunr.ru.min.js | 18 + assets/javascripts/lunr/min/lunr.sa.min.js | 1 + .../lunr/min/lunr.stemmer.support.min.js | 1 + assets/javascripts/lunr/min/lunr.sv.min.js | 18 + assets/javascripts/lunr/min/lunr.ta.min.js | 1 + assets/javascripts/lunr/min/lunr.te.min.js | 1 + assets/javascripts/lunr/min/lunr.th.min.js | 1 + assets/javascripts/lunr/min/lunr.tr.min.js | 18 + assets/javascripts/lunr/min/lunr.vi.min.js | 1 + assets/javascripts/lunr/min/lunr.zh.min.js | 1 + assets/javascripts/lunr/tinyseg.js | 206 + assets/javascripts/lunr/wordcut.js | 6708 +++++++++++++++++ .../workers/search.b8dbb3d2.min.js | 42 + .../workers/search.b8dbb3d2.min.js.map | 7 + assets/stylesheets/main.3cba04c6.min.css | 1 + assets/stylesheets/main.3cba04c6.min.css.map | 1 + assets/stylesheets/palette.06af60db.min.css | 1 + .../stylesheets/palette.06af60db.min.css.map | 1 + css/ansi-colours.css | 174 + css/jupyter-cells.css | 10 + css/pandas-dataframe.css | 36 + evaluation/api_reference/index.html | 4564 +++++++++++ evaluation/evaluation/index.html | 1339 ++++ evaluation/index.html | 1375 ++++ evaluation/labelling_tool/index.html | 1338 ++++ evaluation/merging_labels/index.html | 1277 ++++ evaluation/testing/index.html | 1316 ++++ images/search_ui.png | Bin 0 -> 170010 bytes images/work.jpg | Bin 0 -> 44925 bytes index.html | 1602 ++++ objects.inv | Bin 0 -> 1661 bytes search/search_index.json | 1 + sitemap.xml | 3 + sitemap.xml.gz | Bin 0 -> 127 bytes 84 files changed, 61361 insertions(+) create mode 100644 .nojekyll create mode 100644 404.html create mode 100644 Inference/docker/index.html create mode 100644 Inference/index.html create mode 100644 Inference/inference/index.html create mode 100644 Ollama server/index.html create mode 100644 Query processing LLM/api_reference/index.html create mode 100644 Query processing LLM/index.html create mode 100644 Rag Pipeline/Developer Tutorials/change data input/index.html create mode 100644 Rag Pipeline/Developer Tutorials/change model/index.html create mode 100644 Rag Pipeline/Developer Tutorials/change_model.py create mode 100644 Rag Pipeline/Developer Tutorials/create vectordb/index.html create mode 100644 Rag Pipeline/Developer Tutorials/index.html create mode 100644 Rag Pipeline/Developer Tutorials/load vectordb and get results/index.html create mode 100644 Rag Pipeline/configuration/index.html create mode 100644 Rag Pipeline/general_utils/index.html create mode 100644 Rag Pipeline/index.html create mode 100644 Rag Pipeline/llm_module/index.html create mode 100644 Rag Pipeline/metadata_module/index.html create mode 100644 Rag Pipeline/result_gen/index.html create mode 100644 Rag Pipeline/training/index.html create mode 100644 Rag Pipeline/vector_store/index.html create mode 100644 UI/api_reference/index.html create mode 100644 UI/frontend/index.html create mode 100644 assets/_mkdocstrings.css create mode 100644 assets/images/favicon.png create mode 100644 assets/javascripts/bundle.af256bd8.min.js create mode 100644 assets/javascripts/bundle.af256bd8.min.js.map create mode 100644 assets/javascripts/lunr/min/lunr.ar.min.js create mode 100644 assets/javascripts/lunr/min/lunr.da.min.js create mode 100644 assets/javascripts/lunr/min/lunr.de.min.js create mode 100644 assets/javascripts/lunr/min/lunr.du.min.js create mode 100644 assets/javascripts/lunr/min/lunr.el.min.js create mode 100644 assets/javascripts/lunr/min/lunr.es.min.js create mode 100644 assets/javascripts/lunr/min/lunr.fi.min.js create mode 100644 assets/javascripts/lunr/min/lunr.fr.min.js create mode 100644 assets/javascripts/lunr/min/lunr.he.min.js create mode 100644 assets/javascripts/lunr/min/lunr.hi.min.js create mode 100644 assets/javascripts/lunr/min/lunr.hu.min.js create mode 100644 assets/javascripts/lunr/min/lunr.hy.min.js create mode 100644 assets/javascripts/lunr/min/lunr.it.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ja.min.js create mode 100644 assets/javascripts/lunr/min/lunr.jp.min.js create mode 100644 assets/javascripts/lunr/min/lunr.kn.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ko.min.js create mode 100644 assets/javascripts/lunr/min/lunr.multi.min.js create mode 100644 assets/javascripts/lunr/min/lunr.nl.min.js create mode 100644 assets/javascripts/lunr/min/lunr.no.min.js create mode 100644 assets/javascripts/lunr/min/lunr.pt.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ro.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ru.min.js create mode 100644 assets/javascripts/lunr/min/lunr.sa.min.js create mode 100644 assets/javascripts/lunr/min/lunr.stemmer.support.min.js create mode 100644 assets/javascripts/lunr/min/lunr.sv.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ta.min.js create mode 100644 assets/javascripts/lunr/min/lunr.te.min.js create mode 100644 assets/javascripts/lunr/min/lunr.th.min.js create mode 100644 assets/javascripts/lunr/min/lunr.tr.min.js create mode 100644 assets/javascripts/lunr/min/lunr.vi.min.js create mode 100644 assets/javascripts/lunr/min/lunr.zh.min.js create mode 100644 assets/javascripts/lunr/tinyseg.js create mode 100644 assets/javascripts/lunr/wordcut.js create mode 100644 assets/javascripts/workers/search.b8dbb3d2.min.js create mode 100644 assets/javascripts/workers/search.b8dbb3d2.min.js.map create mode 100644 assets/stylesheets/main.3cba04c6.min.css create mode 100644 assets/stylesheets/main.3cba04c6.min.css.map create mode 100644 assets/stylesheets/palette.06af60db.min.css create mode 100644 assets/stylesheets/palette.06af60db.min.css.map create mode 100644 css/ansi-colours.css create mode 100644 css/jupyter-cells.css create mode 100644 css/pandas-dataframe.css create mode 100644 evaluation/api_reference/index.html create mode 100644 evaluation/evaluation/index.html create mode 100644 evaluation/index.html create mode 100644 evaluation/labelling_tool/index.html create mode 100644 evaluation/merging_labels/index.html create mode 100644 evaluation/testing/index.html create mode 100644 images/search_ui.png create mode 100644 images/work.jpg create mode 100644 index.html create mode 100644 objects.inv create mode 100644 search/search_index.json create mode 100644 sitemap.xml create mode 100644 sitemap.xml.gz diff --git a/.nojekyll b/.nojekyll new file mode 100644 index 0000000..e69de29 diff --git a/404.html b/404.html new file mode 100644 index 0000000..c7db662 --- /dev/null +++ b/404.html @@ -0,0 +1,1174 @@ + + + + + + + + + + + + + + + + + + + OpenML RAG Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ +

404 - Not found

+ +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Inference/docker/index.html b/Inference/docker/index.html new file mode 100644 index 0000000..a6f2c36 --- /dev/null +++ b/Inference/docker/index.html @@ -0,0 +1,1342 @@ + + + + + + + + + + + + + + + + + + + + + + + Docker container - OpenML RAG Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Docker container

+

Building

+
    +
  • Run docker compose build --progress=plain
  • +
+

Running

+
    +
  • Run ./start_docker.sh
  • +
  • This uses the docker compose file to run the docker process in the background.
  • +
  • The required LLM model is also pulled from the docker hub and the container is started.
  • +
+

Stopping

+
    +
  • Run ./stop_docker.sh
  • +
+

Potential Errors

+
    +
  • Permission errors : Run chmod +x *.sh
  • +
  • If you get a memory error you can run docker system prune. Please be careful with this command as it will remove all stopped containers, all dangling images, and all unused networks. So ensure you have no important data in any of the containers before running this command.
  • +
  • On docker desktop for Mac, increase memory limits to as much as your system can handle.
  • +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Inference/index.html b/Inference/index.html new file mode 100644 index 0000000..7068ed1 --- /dev/null +++ b/Inference/index.html @@ -0,0 +1,1205 @@ + + + + + + + + + + + + + + + + + + + + + + + Index - OpenML RAG Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Index

+ + + + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Inference/inference/index.html b/Inference/inference/index.html new file mode 100644 index 0000000..f7f32ca --- /dev/null +++ b/Inference/inference/index.html @@ -0,0 +1,3580 @@ + + + + + + + + + + + + + + + + + + + + + + + Inference - OpenML RAG Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + +

Inference

+
    +
  • Just run ./start_local.sh and it will take care of everything.
  • +
  • The UI should either pop up or you can navigate to http://localhost:8501/ in your browser.
  • +
  • Note that it takes a decent bit of time to load everything.
  • +
+

Stopping

+
    +
  • Run ./stop_local.sh
  • +
  • ./start_local.sh stores the PIDs of all the processes it starts in files in all the directories it starts them in. stop_local.sh reads these files and kills the processes.
  • +
+

CLI access to the API

+
    +
  • We all are lazy sometimes and don't want to use the interface sometimes. Or just want to test out different parts of the API without any hassle. To that end, you can either test out the individual components like so:
  • +
  • Note that the %20 are spaces in the URL.
  • +
+

Ollama

+
    +
  • This is the server that runs an Ollama server (This is basically an optimized version of a local LLM. It does not do anything of itself but runs as a background service so you can use the LLM).
  • +
  • You can start it by running cd ollama && ./get_ollama.sh &
  • +
+

LLM Service

+
    +
  • This component is the one that runs the query processing using LLMs module. It uses the Ollama server, runs queries and processes them.
  • +
  • You can start it by running cd llm_service && uvicorn llm_service:app --host 0.0.0.0 --port 8081 &
  • +
  • Curl Example : curl http://0.0.0.0:8081/llmquery/find%20me%20a%20mushroom%20dataset%20with%20less%20than%203000%20classes
  • +
+

Backend

+
    +
  • This component runs the RAG pipeline. It returns a JSON with dataset ids of the OpenML datasets that match the query.
  • +
  • You can start it by running cd backend && uvicorn backend:app --host 0.0.0.0 --port 8000 &
  • +
  • Curl Example : curl http://0.0.0.0:8000/dataset/find%20me%20a%20mushroom%20dataset
  • +
+

Frontend

+
    +
  • This component runs the Streamlit frontend. It is the UI that you see when you navigate to http://localhost:8501.
  • +
  • You can start it by running cd frontend && streamlit run ui.py &
  • +
+

Errors

+
    +
  • If you get an error about file permissions, run chmod +x start_local.sh and chmod +x stop_local.sh to make them executable.
  • +
+ + +
+ + + + +
+ + + +
+ + + + + + + + +
+ + + +

+ LLMResponseParser + + +

+ + +
+ + +

Description: Parse the response from the LLM service and update the columns based on the response.

+ +
+ Source code in frontend/ui_utils.py +
14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
class LLMResponseParser:
+    """
+    Description: Parse the response from the LLM service and update the columns based on the response.
+    """
+
+    def __init__(self, llm_response):
+        self.llm_response = llm_response
+        self.subset_cols = ["did", "name"]
+        self.size_sort = None
+        self.classification_type = None
+        self.uploader_name = None
+
+    def process_size_attribute(self, attr_size: str):
+        size, sort = attr_size.split(",") if "," in attr_size else (attr_size, None)
+        if size == "yes":
+            self.subset_cols.append("NumberOfInstances")
+        if sort:
+            self.size_sort = sort
+
+    def missing_values_attribute(self, attr_missing: str):
+        if attr_missing == "yes":
+            self.subset_cols.append("NumberOfMissingValues")
+
+    def classification_type_attribute(self, attr_classification: str):
+        if attr_classification != "none":
+            self.subset_cols.append("NumberOfClasses")
+            self.classification_type = attr_classification
+
+    def uploader_attribute(self, attr_uploader: str):
+        if attr_uploader != "none":
+            self.subset_cols.append("uploader")
+            self.uploader_name = attr_uploader.split("=")[1].strip()
+
+    def get_attributes_from_response(self):
+        attribute_processors = {
+            "size_of_dataset": self.process_size_attribute,
+            "missing_values": self.missing_values_attribute,
+            "classification_type": self.classification_type_attribute,
+            "uploader": self.uploader_attribute,
+        }
+
+        for attribute, value in self.llm_response.items():
+            if attribute in attribute_processors:
+                attribute_processors[attribute](value)
+
+    def update_subset_cols(self, metadata: pd.DataFrame):
+        """
+        Description: Filter the metadata based on the updated subset columns and extra conditions
+        """
+        if self.classification_type is not None:
+            if "multi" in self.classification_type:
+                metadata = metadata[metadata["NumberOfClasses"] > 2]
+            elif "binary" in self.classification_type:
+                metadata = metadata[metadata["NumberOfClasses"] == 2]
+        if self.uploader_name is not None:
+            try:
+                uploader = int(self.uploader_name)
+                metadata = metadata[metadata["uploader"] == uploader]
+            except:
+                pass
+
+        return metadata[self.subset_cols]
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ update_subset_cols(metadata) + +

+ + +
+ +

Description: Filter the metadata based on the updated subset columns and extra conditions

+ +
+ Source code in frontend/ui_utils.py +
59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
def update_subset_cols(self, metadata: pd.DataFrame):
+    """
+    Description: Filter the metadata based on the updated subset columns and extra conditions
+    """
+    if self.classification_type is not None:
+        if "multi" in self.classification_type:
+            metadata = metadata[metadata["NumberOfClasses"] > 2]
+        elif "binary" in self.classification_type:
+            metadata = metadata[metadata["NumberOfClasses"] == 2]
+    if self.uploader_name is not None:
+        try:
+            uploader = int(self.uploader_name)
+            metadata = metadata[metadata["uploader"] == uploader]
+        except:
+            pass
+
+    return metadata[self.subset_cols]
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ ResponseParser + + +

+ + +
+ + +

Description : This classe is used to decide the order of operations and run the response parsing. +It loads the paths, fetches the Query parsing LLM response, the rag response, loads the metadatas and then based on the config, decides the order in which to apply each of them.

+ +
+ Source code in frontend/ui_utils.py +
 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
+226
+227
+228
+229
+230
+231
+232
+233
+234
+235
+236
+237
+238
+239
+240
+241
+242
+243
+244
+245
+246
+247
+248
+249
+250
+251
+252
+253
+254
+255
+256
+257
+258
+259
+260
+261
+262
+263
+264
+265
+266
+267
+268
+269
+270
+271
+272
+273
+274
+275
+276
+277
+278
+279
+280
+281
+282
+283
+284
+285
+286
+287
+288
+289
+290
+291
+292
+293
+294
+295
+296
+297
+298
+299
+300
+301
+302
+303
+304
+305
+306
+307
+308
+309
+310
+311
+312
+313
+314
class ResponseParser:
+    """
+    Description : This classe is used to decide the order of operations and run the response parsing.
+    It loads the paths, fetches the Query parsing LLM response, the rag response, loads the metadatas and then based on the config, decides the order in which to apply each of them.
+    """
+
+    def __init__(self, query_type: str, apply_llm_before_rag: bool = False):
+        self.query_type = query_type
+        self.paths = self.load_paths()
+        self.rag_response = None
+        self.llm_response = None
+        self.apply_llm_before_rag = apply_llm_before_rag
+        self.database_filtered = None
+        self.structured_query_response = None
+
+    def load_paths(self):
+        """
+        Description: Load paths from paths.json
+        """
+        with open("paths.json", "r") as file:
+            return json.load(file)
+
+    def fetch_llm_response(self, query: str):
+        """
+        Description: Fetch the response from the query parsing LLM service as a json
+        """
+        llm_response_path = self.paths["llm_response"]
+        try:
+            self.llm_response = requests.get(
+                f"{llm_response_path['docker']}{query}"
+            ).json()
+        except:
+            self.llm_response = requests.get(
+                f"{llm_response_path['local']}{query}"
+            ).json()
+        return self.llm_response
+
+    def fetch_structured_query(self, query_type: str, query: str):
+        """
+        Description: Fetch the response for a structured query from the LLM service as a JSON
+        """
+        structured_response_path = self.paths["structured_query"]
+        try:
+            self.structured_query_response = requests.get(
+                f"{structured_response_path['docker']}{query}",
+                json={"query": query},
+            ).json()
+        except (requests.exceptions.RequestException, json.JSONDecodeError) as e:
+            # Print the error for debugging purposes
+            print(f"Error occurred: {e}")
+            # Set structured_query_response to None on error
+            self.structured_query_response = None
+        try:
+            self.structured_query_response = requests.get(
+                f"{structured_response_path['local']}{query}",
+                json={"query": query},
+            ).json()
+        except Exception as e:
+            # Print the error for debugging purposes
+            print(f"Error occurred while fetching from local endpoint: {e}")
+            # Set structured_query_response to None if the local request also fails
+            self.structured_query_response = None
+
+        return self.structured_query_response
+
+    def database_filter(self, filter_condition, collec):
+        """
+        Apply database filter on the rag_response
+        """
+        ids = list(map(str, self.rag_response["initial_response"]))
+        self.database_filtered = collec.get(ids=ids, where=filter_condition)["ids"]
+        self.database_filtered = list(map(int, self.database_filtered))
+        # print(self.database_filtered)
+        return self.database_filtered
+
+    def fetch_rag_response(self, query_type, query):
+        """
+        Description: Fetch the response from RAG pipeline
+
+        """
+        rag_response_path = self.paths["rag_response"]
+        try:
+            self.rag_response = requests.get(
+                f"{rag_response_path['docker']}{query_type.lower()}/{query}",
+                json={"query": query, "type": query_type.lower()},
+            ).json()
+        except:
+            self.rag_response = requests.get(
+                f"{rag_response_path['local']}{query_type.lower()}/{query}",
+                json={"query": query, "type": query_type.lower()},
+            ).json()
+        ordered_set = self._order_results()
+        self.rag_response["initial_response"] = ordered_set
+
+        return self.rag_response
+
+    def _order_results(self):
+        doc_set = set()
+        ordered_set = []
+        for docid in self.rag_response["initial_response"]:
+            if docid not in doc_set:
+                ordered_set.append(docid)
+            doc_set.add(docid)
+        return ordered_set
+
+    def parse_and_update_response(self, metadata: pd.DataFrame):
+        """
+         Description: Parse the response from the RAG and LLM services and update the metadata based on the response.
+         Decide which order to apply them
+         -  self.apply_llm_before_rag == False
+             - Metadata is filtered based on the rag response first and then by the Query parsing LLM
+        -  self.apply_llm_before_rag == False
+             - Metadata is filtered based by the Query parsing LLM first and the rag response second
+        - in case structured_query == true, take results are applying data filters.
+        """
+        if self.apply_llm_before_rag is None or self.llm_response is None:
+            print("No LLM filter.")
+            # print(self.rag_response, flush=True)
+            filtered_metadata = self._no_filter(metadata)
+
+            # print(filtered_metadata)
+            # if no llm response is required, return the initial response
+            return filtered_metadata
+
+        elif (
+            self.rag_response is not None and self.llm_response is not None
+        ) and not config["structured_query"]:
+            if not self.apply_llm_before_rag:
+                filtered_metadata, llm_parser = self._rag_before_llm(metadata)
+
+                if self.query_type.lower() == "dataset":
+                    llm_parser.get_attributes_from_response()
+                    return llm_parser.update_subset_cols(filtered_metadata)
+
+            elif self.apply_llm_before_rag:
+                filtered_metadata = self._filter_before_rag(metadata)
+                return filtered_metadata
+
+        elif (
+            self.rag_response is not None and self.structured_query_response is not None
+        ):
+            col_name = [
+                "status",
+                "NumberOfClasses",
+                "NumberOfFeatures",
+                "NumberOfInstances",
+            ]
+            # print(self.structured_query_response)  # Only for debugging. Comment later.
+            if self.structured_query_response[0] is not None and isinstance(
+                self.structured_query_response[1], dict
+            ):
+                # Safely attempt to access the "filter" key in the first element
+
+                self._structured_query_on_success(metadata)
+
+            else:
+                filtered_metadata = self._structured_query_on_fail(metadata)
+                # print("Showing only rag response")
+            return filtered_metadata[["did", "name", *col_name]]
+
+    def _structured_query_on_fail(self, metadata):
+        filtered_metadata = metadata[
+            metadata["did"].isin(self.rag_response["initial_response"])
+        ]
+        filtered_metadata["did"] = pd.Categorical(
+            filtered_metadata["did"],
+            categories=self.rag_response["initial_response"],
+            ordered=True,
+        )
+        filtered_metadata = filtered_metadata.sort_values("did").reset_index(drop=True)
+
+        return filtered_metadata
+
+    def _structured_query_on_success(self, metadata):
+        if (
+            self.structured_query_response[0].get("filter", None)
+            and self.database_filtered
+        ):
+            filtered_metadata = metadata[metadata["did"].isin(self.database_filtered)]
+            # print("Showing database filtered data")
+        else:
+            filtered_metadata = metadata[
+                metadata["did"].isin(self.rag_response["initial_response"])
+            ]
+            # print(
+            #     "Showing only rag response as filter is empty or none of the rag data satisfies filter conditions."
+            # )
+        filtered_metadata["did"] = pd.Categorical(
+            filtered_metadata["did"],
+            categories=self.rag_response["initial_response"],
+            ordered=True,
+        )
+        filtered_metadata = filtered_metadata.sort_values("did").reset_index(drop=True)
+
+    def _filter_before_rag(self, metadata):
+        print("LLM filter before RAG")
+        llm_parser = LLMResponseParser(self.llm_response)
+        llm_parser.get_attributes_from_response()
+        filtered_metadata = llm_parser.update_subset_cols(metadata)
+        filtered_metadata = filtered_metadata[
+            metadata["did"].isin(self.rag_response["initial_response"])
+        ]
+        filtered_metadata["did"] = pd.Categorical(
+            filtered_metadata["did"],
+            categories=self.rag_response["initial_response"],
+            ordered=True,
+        )
+        filtered_metadata = filtered_metadata.sort_values("did").reset_index(drop=True)
+
+        return filtered_metadata
+
+    def _rag_before_llm(self, metadata):
+        print("RAG before LLM filter.")
+        filtered_metadata = metadata[
+            metadata["did"].isin(self.rag_response["initial_response"])
+        ]
+        filtered_metadata["did"] = pd.Categorical(
+            filtered_metadata["did"],
+            categories=self.rag_response["initial_response"],
+            ordered=True,
+        )
+        filtered_metadata = filtered_metadata.sort_values("did").reset_index(drop=True)
+        llm_parser = LLMResponseParser(self.llm_response)
+        return filtered_metadata, llm_parser
+
+    def _no_filter(self, metadata):
+        filtered_metadata = metadata[
+            metadata["did"].isin(self.rag_response["initial_response"])
+        ]
+        filtered_metadata["did"] = pd.Categorical(
+            filtered_metadata["did"],
+            categories=self.rag_response["initial_response"],
+            ordered=True,
+        )
+        filtered_metadata = filtered_metadata.sort_values("did").reset_index(drop=True)
+
+        return filtered_metadata
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ database_filter(filter_condition, collec) + +

+ + +
+ +

Apply database filter on the rag_response

+ +
+ Source code in frontend/ui_utils.py +
143
+144
+145
+146
+147
+148
+149
+150
+151
def database_filter(self, filter_condition, collec):
+    """
+    Apply database filter on the rag_response
+    """
+    ids = list(map(str, self.rag_response["initial_response"]))
+    self.database_filtered = collec.get(ids=ids, where=filter_condition)["ids"]
+    self.database_filtered = list(map(int, self.database_filtered))
+    # print(self.database_filtered)
+    return self.database_filtered
+
+
+
+ +
+ +
+ + +

+ fetch_llm_response(query) + +

+ + +
+ +

Description: Fetch the response from the query parsing LLM service as a json

+ +
+ Source code in frontend/ui_utils.py +
100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
def fetch_llm_response(self, query: str):
+    """
+    Description: Fetch the response from the query parsing LLM service as a json
+    """
+    llm_response_path = self.paths["llm_response"]
+    try:
+        self.llm_response = requests.get(
+            f"{llm_response_path['docker']}{query}"
+        ).json()
+    except:
+        self.llm_response = requests.get(
+            f"{llm_response_path['local']}{query}"
+        ).json()
+    return self.llm_response
+
+
+
+ +
+ +
+ + +

+ fetch_rag_response(query_type, query) + +

+ + +
+ +

Description: Fetch the response from RAG pipeline

+ +
+ Source code in frontend/ui_utils.py +
153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
def fetch_rag_response(self, query_type, query):
+    """
+    Description: Fetch the response from RAG pipeline
+
+    """
+    rag_response_path = self.paths["rag_response"]
+    try:
+        self.rag_response = requests.get(
+            f"{rag_response_path['docker']}{query_type.lower()}/{query}",
+            json={"query": query, "type": query_type.lower()},
+        ).json()
+    except:
+        self.rag_response = requests.get(
+            f"{rag_response_path['local']}{query_type.lower()}/{query}",
+            json={"query": query, "type": query_type.lower()},
+        ).json()
+    ordered_set = self._order_results()
+    self.rag_response["initial_response"] = ordered_set
+
+    return self.rag_response
+
+
+
+ +
+ +
+ + +

+ fetch_structured_query(query_type, query) + +

+ + +
+ +

Description: Fetch the response for a structured query from the LLM service as a JSON

+ +
+ Source code in frontend/ui_utils.py +
115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
def fetch_structured_query(self, query_type: str, query: str):
+    """
+    Description: Fetch the response for a structured query from the LLM service as a JSON
+    """
+    structured_response_path = self.paths["structured_query"]
+    try:
+        self.structured_query_response = requests.get(
+            f"{structured_response_path['docker']}{query}",
+            json={"query": query},
+        ).json()
+    except (requests.exceptions.RequestException, json.JSONDecodeError) as e:
+        # Print the error for debugging purposes
+        print(f"Error occurred: {e}")
+        # Set structured_query_response to None on error
+        self.structured_query_response = None
+    try:
+        self.structured_query_response = requests.get(
+            f"{structured_response_path['local']}{query}",
+            json={"query": query},
+        ).json()
+    except Exception as e:
+        # Print the error for debugging purposes
+        print(f"Error occurred while fetching from local endpoint: {e}")
+        # Set structured_query_response to None if the local request also fails
+        self.structured_query_response = None
+
+    return self.structured_query_response
+
+
+
+ +
+ +
+ + +

+ load_paths() + +

+ + +
+ +

Description: Load paths from paths.json

+ +
+ Source code in frontend/ui_utils.py +
93
+94
+95
+96
+97
+98
def load_paths(self):
+    """
+    Description: Load paths from paths.json
+    """
+    with open("paths.json", "r") as file:
+        return json.load(file)
+
+
+
+ +
+ +
+ + +

+ parse_and_update_response(metadata) + +

+ + +
+ +

Description: Parse the response from the RAG and LLM services and update the metadata based on the response. + Decide which order to apply them + - self.apply_llm_before_rag == False + - Metadata is filtered based on the rag response first and then by the Query parsing LLM +- self.apply_llm_before_rag == False + - Metadata is filtered based by the Query parsing LLM first and the rag response second +- in case structured_query == true, take results are applying data filters.

+ +
+ Source code in frontend/ui_utils.py +
183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
+226
+227
+228
+229
+230
+231
+232
+233
+234
+235
+236
def parse_and_update_response(self, metadata: pd.DataFrame):
+    """
+     Description: Parse the response from the RAG and LLM services and update the metadata based on the response.
+     Decide which order to apply them
+     -  self.apply_llm_before_rag == False
+         - Metadata is filtered based on the rag response first and then by the Query parsing LLM
+    -  self.apply_llm_before_rag == False
+         - Metadata is filtered based by the Query parsing LLM first and the rag response second
+    - in case structured_query == true, take results are applying data filters.
+    """
+    if self.apply_llm_before_rag is None or self.llm_response is None:
+        print("No LLM filter.")
+        # print(self.rag_response, flush=True)
+        filtered_metadata = self._no_filter(metadata)
+
+        # print(filtered_metadata)
+        # if no llm response is required, return the initial response
+        return filtered_metadata
+
+    elif (
+        self.rag_response is not None and self.llm_response is not None
+    ) and not config["structured_query"]:
+        if not self.apply_llm_before_rag:
+            filtered_metadata, llm_parser = self._rag_before_llm(metadata)
+
+            if self.query_type.lower() == "dataset":
+                llm_parser.get_attributes_from_response()
+                return llm_parser.update_subset_cols(filtered_metadata)
+
+        elif self.apply_llm_before_rag:
+            filtered_metadata = self._filter_before_rag(metadata)
+            return filtered_metadata
+
+    elif (
+        self.rag_response is not None and self.structured_query_response is not None
+    ):
+        col_name = [
+            "status",
+            "NumberOfClasses",
+            "NumberOfFeatures",
+            "NumberOfInstances",
+        ]
+        # print(self.structured_query_response)  # Only for debugging. Comment later.
+        if self.structured_query_response[0] is not None and isinstance(
+            self.structured_query_response[1], dict
+        ):
+            # Safely attempt to access the "filter" key in the first element
+
+            self._structured_query_on_success(metadata)
+
+        else:
+            filtered_metadata = self._structured_query_on_fail(metadata)
+            # print("Showing only rag response")
+        return filtered_metadata[["did", "name", *col_name]]
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ UILoader + + +

+ + +
+ + +

Description : Create the chat interface

+ +
+ Source code in frontend/ui_utils.py +
317
+318
+319
+320
+321
+322
+323
+324
+325
+326
+327
+328
+329
+330
+331
+332
+333
+334
+335
+336
+337
+338
+339
+340
+341
+342
+343
+344
+345
+346
+347
+348
+349
+350
+351
+352
+353
+354
+355
+356
+357
+358
+359
+360
+361
+362
+363
+364
+365
+366
+367
+368
+369
+370
+371
+372
+373
+374
+375
+376
+377
+378
+379
+380
+381
+382
+383
+384
+385
+386
+387
+388
+389
+390
+391
+392
+393
+394
+395
+396
+397
+398
+399
+400
+401
+402
+403
+404
+405
+406
+407
+408
+409
+410
+411
+412
+413
+414
+415
+416
+417
+418
+419
+420
+421
+422
+423
+424
+425
+426
+427
+428
+429
+430
+431
+432
+433
+434
+435
+436
+437
+438
+439
+440
+441
+442
+443
+444
+445
+446
+447
+448
+449
+450
+451
+452
+453
+454
+455
+456
+457
+458
+459
+460
+461
+462
+463
+464
+465
+466
+467
+468
+469
+470
+471
+472
+473
+474
+475
+476
+477
+478
+479
+480
+481
+482
+483
+484
+485
+486
+487
+488
+489
+490
+491
+492
+493
+494
+495
+496
+497
+498
+499
+500
+501
+502
+503
+504
+505
+506
+507
+508
+509
+510
+511
+512
+513
+514
+515
+516
+517
+518
+519
+520
+521
+522
+523
+524
+525
+526
+527
+528
+529
+530
+531
+532
+533
+534
+535
+536
+537
+538
+539
+540
+541
+542
+543
+544
+545
+546
+547
class UILoader:
+    """
+    Description : Create the chat interface
+    """
+
+    def __init__(self, config_path):
+        with open(config_path, "r") as file:
+            # Load config
+            self.config = json.load(file)
+        # Paths and display information
+
+        # Load metadata chroma database for structured query
+        self.collec = load_chroma_metadata()
+
+        # Metadata paths
+        self.data_metadata_path = (
+            Path(config["data_dir"]) / "all_dataset_description.csv"
+        )
+        self.flow_metadata_path = Path(config["data_dir"]) / "all_flow_description.csv"
+
+        # Read metadata
+        self.data_metadata = pd.read_csv(self.data_metadata_path)
+        self.flow_metadata = pd.read_csv(self.flow_metadata_path)
+
+        # defaults
+        self.query_type = "Dataset"
+        self.llm_filter = False
+        self.paths = self.load_paths()
+        self.info = """
+        <p style='text-align: center; color: white;'>Machine learning research should be easily accessible and reusable. <a href = "https://openml.org/">OpenML</a> is an open platform for sharing datasets, algorithms, and experiments - to learn how to learn better, together. </p>
+        """
+        self.logo = "images/favicon.ico"
+        self.chatbot_display = "How do I do X using OpenML? / Find me a dataset about Y"
+
+        if "messages" not in st.session_state:
+            st.session_state.messages = []
+
+    # container for company description and logo
+    def generate_logo_header(
+        self,
+    ):
+
+        col1, col2 = st.columns([1, 4])
+        with col1:
+            st.image(self.logo, width=100)
+        with col2:
+            st.markdown(
+                self.info,
+                unsafe_allow_html=True,
+            )
+
+    def generate_complete_ui(self):
+
+        self.generate_logo_header()
+        chat_container = st.container()
+        with chat_container:
+            with st.form(key="chat_form"):
+                user_input = st.text_input(
+                    label="Query", placeholder=self.chatbot_display
+                )
+                query_type = st.selectbox(
+                    "Select Query Type",
+                    ["General Query", "Dataset", "Flow"],
+                    help="Are you looking for a dataset or a flow or just have a general query?",
+                )
+                ai_filter = st.toggle(
+                    "Use AI powered filtering",
+                    value=True,
+                    help="Uses an AI model to identify what columns might be useful to you.",
+                )
+                st.form_submit_button(label="Search")
+
+            self.create_chat_interface(user_input=None)
+            if user_input:
+                self.create_chat_interface(
+                    user_input, query_type=query_type, ai_filter=ai_filter
+                )
+
+    def create_chat_interface(self, user_input, query_type=None, ai_filter=False):
+        """
+        Description: Create the chat interface and display the chat history and results. Show the user input and the response from the OpenML Agent.
+
+        """
+        self.query_type = query_type
+        self.ai_filter = ai_filter
+
+        if user_input is None:
+            with st.chat_message(name="ai"):
+                st.write("OpenML Agent: ", "Hello! How can I help you today?")
+                st.write(
+                    "Note that results are powered by local LLM models and may not be accurate. Please refer to the official OpenML website for accurate information."
+                )
+
+        # Handle user input
+        if user_input:
+            self._handle_user_input(user_input, query_type)
+
+    def _handle_user_input(self, user_input, query_type):
+        st.session_state.messages.append({"role": "user", "content": user_input})
+        with st.spinner("Waiting for results..."):
+            results = self.process_query_chat(user_input)
+
+        if not self.query_type == "General Query":
+            st.session_state.messages.append(
+                    {"role": "OpenML Agent", "content": results}
+                )
+        else:
+            self._stream_results(results)
+
+            # reverse messages to show the latest message at the top
+        reversed_messages = self._reverse_session_history()
+
+            # Display chat history
+        self._display_chat_history(query_type, reversed_messages)
+        self.create_download_button()
+
+    def _display_chat_history(self, query_type, reversed_messages):
+        for message in reversed_messages:
+            if query_type == "General Query":
+                pass
+            if message["role"] == "user":
+                with st.chat_message(name="user"):
+                    self.display_results(message["content"], "user")
+            else:
+                with st.chat_message(name="ai"):
+                    self.display_results(message["content"], "ai")
+
+    def _reverse_session_history(self):
+        reversed_messages = []
+        for index in range(0, len(st.session_state.messages), 2):
+            reversed_messages.insert(0, st.session_state.messages[index])
+            reversed_messages.insert(1, st.session_state.messages[index + 1])
+        return reversed_messages
+
+    def _stream_results(self, results):
+        with st.spinner("Fetching results..."):
+            with requests.get(results, stream=True) as r:
+                resp_contain = st.empty()
+                streamed_response = ""
+                for chunk in r.iter_content(chunk_size=1024):
+                    if chunk:
+                        streamed_response += chunk.decode("utf-8")
+                        resp_contain.markdown(streamed_response)
+                resp_contain.empty()
+            st.session_state.messages.append(
+                {"role": "OpenML Agent", "content": streamed_response}
+            )
+
+    @st.experimental_fragment()
+    def create_download_button(self):
+        data = "\n".join(
+            [str(message["content"]) for message in st.session_state.messages]
+        )
+        st.download_button(
+            label="Download chat history",
+            data=data,
+            file_name="chat_history.txt",
+        )
+
+    def display_results(self, initial_response, role):
+        """
+        Description: Display the results in a DataFrame
+        """
+        # st.write("OpenML Agent: ")
+
+        try:
+            st.dataframe(initial_response)
+        except:
+            st.write(initial_response)
+
+    # Function to handle query processing
+    def process_query_chat(self, query):
+        """
+        Description: Process the query and return the results based on the query type and the LLM filter.
+
+        """
+        apply_llm_before_rag = None if not self.llm_filter else False
+        response_parser = ResponseParser(
+            self.query_type, apply_llm_before_rag=apply_llm_before_rag
+        )
+
+        if self.query_type == "Dataset" or self.query_type == "Flow":
+            if not self.ai_filter:
+                response_parser.fetch_rag_response(self.query_type, query)
+                return response_parser.parse_and_update_response(self.data_metadata)
+            else:
+                # get structured query
+                self._display_structured_query_results(query, response_parser)
+
+            results = response_parser.parse_and_update_response(self.data_metadata)
+            return results
+
+        elif self.query_type == "General Query":
+            # Return documentation response path
+            return self.paths["documentation_query"]["local"] + query
+
+    def _display_structured_query_results(self, query, response_parser):
+        response_parser.fetch_structured_query(self.query_type, query)
+        try:
+            # get rag response
+            # using original query instead of extracted topics.
+            response_parser.fetch_rag_response(
+                self.query_type,
+                response_parser.structured_query_response[0]["query"],
+            )
+
+            if response_parser.structured_query_response:
+                st.write(
+                    "Detected Filter(s): ",
+                    json.dumps(
+                        response_parser.structured_query_response[0].get("filter", None)
+                    ),
+                )
+            else:
+                st.write("Detected Filter(s): ", None)
+            if response_parser.structured_query_response[1].get("filter"):
+                with st.spinner("Applying LLM Detected Filter(s)..."):
+                    response_parser.database_filter(
+                        response_parser.structured_query_response[1]["filter"],
+                        collec,
+                    )
+        except:
+            # fallback to RAG response
+            response_parser.fetch_rag_response(self.query_type, query)
+
+    def load_paths(self):
+        """
+        Description: Load paths from paths.json
+        """
+        with open("paths.json", "r") as file:
+            return json.load(file)
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ create_chat_interface(user_input, query_type=None, ai_filter=False) + +

+ + +
+ +

Description: Create the chat interface and display the chat history and results. Show the user input and the response from the OpenML Agent.

+ +
+ Source code in frontend/ui_utils.py +
395
+396
+397
+398
+399
+400
+401
+402
+403
+404
+405
+406
+407
+408
+409
+410
+411
+412
def create_chat_interface(self, user_input, query_type=None, ai_filter=False):
+    """
+    Description: Create the chat interface and display the chat history and results. Show the user input and the response from the OpenML Agent.
+
+    """
+    self.query_type = query_type
+    self.ai_filter = ai_filter
+
+    if user_input is None:
+        with st.chat_message(name="ai"):
+            st.write("OpenML Agent: ", "Hello! How can I help you today?")
+            st.write(
+                "Note that results are powered by local LLM models and may not be accurate. Please refer to the official OpenML website for accurate information."
+            )
+
+    # Handle user input
+    if user_input:
+        self._handle_user_input(user_input, query_type)
+
+
+
+ +
+ +
+ + +

+ display_results(initial_response, role) + +

+ + +
+ +

Description: Display the results in a DataFrame

+ +
+ Source code in frontend/ui_utils.py +
476
+477
+478
+479
+480
+481
+482
+483
+484
+485
def display_results(self, initial_response, role):
+    """
+    Description: Display the results in a DataFrame
+    """
+    # st.write("OpenML Agent: ")
+
+    try:
+        st.dataframe(initial_response)
+    except:
+        st.write(initial_response)
+
+
+
+ +
+ +
+ + +

+ load_paths() + +

+ + +
+ +

Description: Load paths from paths.json

+ +
+ Source code in frontend/ui_utils.py +
542
+543
+544
+545
+546
+547
def load_paths(self):
+    """
+    Description: Load paths from paths.json
+    """
+    with open("paths.json", "r") as file:
+        return json.load(file)
+
+
+
+ +
+ +
+ + +

+ process_query_chat(query) + +

+ + +
+ +

Description: Process the query and return the results based on the query type and the LLM filter.

+ +
+ Source code in frontend/ui_utils.py +
488
+489
+490
+491
+492
+493
+494
+495
+496
+497
+498
+499
+500
+501
+502
+503
+504
+505
+506
+507
+508
+509
+510
+511
def process_query_chat(self, query):
+    """
+    Description: Process the query and return the results based on the query type and the LLM filter.
+
+    """
+    apply_llm_before_rag = None if not self.llm_filter else False
+    response_parser = ResponseParser(
+        self.query_type, apply_llm_before_rag=apply_llm_before_rag
+    )
+
+    if self.query_type == "Dataset" or self.query_type == "Flow":
+        if not self.ai_filter:
+            response_parser.fetch_rag_response(self.query_type, query)
+            return response_parser.parse_and_update_response(self.data_metadata)
+        else:
+            # get structured query
+            self._display_structured_query_results(query, response_parser)
+
+        results = response_parser.parse_and_update_response(self.data_metadata)
+        return results
+
+    elif self.query_type == "General Query":
+        # Return documentation response path
+        return self.paths["documentation_query"]["local"] + query
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Ollama server/index.html b/Ollama server/index.html new file mode 100644 index 0000000..a867631 --- /dev/null +++ b/Ollama server/index.html @@ -0,0 +1,1216 @@ + + + + + + + + + + + + + + + + + + + + + + + Ollama Server - OpenML RAG Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Ollama Server

+
    +
  • This is the server that runs an Ollama server (This is basically an optimized version of a local LLM. It does not do anything of itself but runs as a background service so you can use the LLM).
  • +
  • You can start it by running cd ollama && ./get_ollama.sh &
  • +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Query processing LLM/api_reference/index.html b/Query processing LLM/api_reference/index.html new file mode 100644 index 0000000..ec00eef --- /dev/null +++ b/Query processing LLM/api_reference/index.html @@ -0,0 +1,1572 @@ + + + + + + + + + + + + + + + + + + + + + + + Api reference - OpenML RAG Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Api reference

+ +
+ + + + +
+ + + +
+ + + + + + + + + +
+ + +

+ get_llm_query(query) + + + async + + +

+ + +
+ +

Description: Get the query, replace %20 (url spacing) with space and invoke the chain to get the answers based on the prompt

+ +
+ Source code in llm_service/llm_service.py +
35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
@app.get("/llmquery/{query}", response_class=JSONResponse)
+@retry(stop=stop_after_attempt(3), retry=retry_if_exception_type(ConnectTimeout))
+async def get_llm_query(query: str):
+    """
+    Description: Get the query, replace %20 (url spacing) with space and invoke the chain to get the answers based on the prompt
+    """
+    query = query.replace("%20", " ")
+    print(f"Query: {query}")
+    try:
+        response = chain_docker.invoke({"query": query})
+    except:
+        response = chain.invoke({"query": query})
+    answers = parse_answers_initial(response, patterns, prompt_dict)
+    return JSONResponse(content=answers)
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + + +
+ + + +
+ + + + + + + + + +
+ + +

+ create_chain(prompt, model='llama3', temperature=0, base_url='http://localhost:11434') + +

+ + +
+ +

Description: Create a langchain chain with the given prompt and model and the temperature. +The lower the temperature, the less "creative" the model will be.

+ +
+ Source code in llm_service/llm_service_utils.py +
 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
def create_chain(
+    prompt,
+    model: str = "llama3",
+    temperature: int = 0,
+    base_url: str = "http://localhost:11434",
+):
+    """
+    Description: Create a langchain chain with the given prompt and model and the temperature.
+    The lower the temperature, the less "creative" the model will be.
+    """
+    llm = ChatOllama(model=model, temperature=temperature, base_url=base_url)
+    prompt = ChatPromptTemplate.from_template(prompt)
+
+    return prompt | llm | StrOutputParser()
+
+
+
+ +
+ +
+ + +

+ parse_answers_initial(response, patterns, prompt_dict) + +

+ + +
+ +

Description: Parse the answers from the initial response +- if the response contains a ? and a new line then join the next line with it (sometimes the LLM adds a new line after the ? instead of just printing it on the same line)

+ +
+ Source code in llm_service/llm_service_utils.py +
23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
def parse_answers_initial(response: str, patterns: list, prompt_dict: dict) -> dict:
+    """
+    Description: Parse the answers from the initial response
+    - if the response contains a ? and a new line then join the next line with it (sometimes the LLM adds a new line after the ? instead of just printing it on the same line)
+    """
+
+    answers = []
+    response = response.replace("?\n", "?")
+
+    # convert the response to lowercase and split it into lines
+    lines = response.lower().split("\n")
+
+    for line in lines:
+        if "?" in line:
+            # Extract the part of the line after the question mark
+            potential_answer = line.split("?")[1].strip()
+        else:
+            potential_answer = line.strip()
+
+        # Check if the potential answer matches any of the patterns
+        for pattern in patterns:
+            if re.match(pattern, potential_answer):
+                answers.append(potential_answer)
+                break  # Stop checking other patterns if a match is found
+
+    # return answers as a dict using the prompt_dict keys
+    answers_dict = {}
+    for i, key in enumerate(prompt_dict.keys()):
+        answers_dict[key] = answers[i]
+
+    return answers_dict
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Query processing LLM/index.html b/Query processing LLM/index.html new file mode 100644 index 0000000..7be7539 --- /dev/null +++ b/Query processing LLM/index.html @@ -0,0 +1,1382 @@ + + + + + + + + + + + + + + + + + + + + + + + LLM Query parsing - OpenML RAG Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

LLM Query parsing

+
    +
  • This page is only an overview. Please refer to the api reference for more detailed information.
  • +
  • The query parsing LLM reads the query and parses it into a list of filters based on a prompt. The expected result is a JSON with a list of filters to be applied to the metadata and the query.
  • +
  • This is done by providing a prompt to the RAG and telling it to extract the filters/etc and either structure it or not.
  • +
  • This implementation is served as a FastAPI service that can be queried quite easily.
  • +
+

Unstructured Implementation

+
    +
  • This implementation is independent of langchain, and takes a more manual approach to parsing the filters. At the moment, this does not separate the query from the filters either. (The structured query implementation attempts to do that.)
  • +
  • The response of the the LLM parser does not take into account how to apply the filters, it just provides a list of the ones that the LLM considered relevant to the UI.
  • +
  • This component is the one that runs the query processing using LLMs module. It uses the Ollama server, runs queries and processes them.
  • +
  • You can start it by running cd llm_service && uvicorn llm_service:app --host 0.0.0.0 --port 8081 &
  • +
  • Curl Example : curl http://0.0.0.0:8081/llmquery/find%20me%20a%20mushroom%20dataset%20with%20less%20than%203000%20classes
  • +
+

llm_service.py

+
    +
  • A prompt template is used to tell the RAG what to do.
  • +
  • The prompt_dict defines a list of filters and their respective prompts for the LLM. This is concatenated with the prompt template.
  • +
  • The response is parsed quite simply. Since the LLM is asked to provide it's answers line by line, each line is parsed for the required information according to a list of patterns provided.
  • +
  • Thus, if you want to add a new type of answer, add it to the patterns list and it should be taken care of.
  • +
+

llm_service_utils.py

+
    +
  • The main logic of the above is defined here.
  • +
+

Structured Query Implementation

+

Additional information

+
    +
  • In the process of testing this implementation, a blog was written about how the temperature parameter affects the results of the model. This can be found here.
  • +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Rag Pipeline/Developer Tutorials/change data input/index.html b/Rag Pipeline/Developer Tutorials/change data input/index.html new file mode 100644 index 0000000..79dad06 --- /dev/null +++ b/Rag Pipeline/Developer Tutorials/change data input/index.html @@ -0,0 +1,1485 @@ + + + + + + + + + + + + + + + + + + + + + + + Change data input - OpenML RAG Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Change data input

+ + + +
+
+ +
1
+2
+3
+4
+5
+6
from __future__ import annotations
+from langchain.globals import set_llm_cache
+from langchain_community.cache import SQLiteCache
+import os
+import sys
+import chromadb
+
+ +
+
+
+
+ +
1
+2
+3
from backend.modules.utils import *
+from backend.modules.rag_llm import *
+from backend.modules.results_gen import *
+
+ +
+
+
+
+
+
+/Users/smukherjee/.pyenv/versions/3.10.14/envs/openml/lib/python3.10/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html
+  from .autonotebook import tqdm as notebook_tqdm
+
+
+
+
+
+
+
+
+
+ +
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
config = load_config_and_device("../../../backend/config.json")
+config["persist_dir"] = "../../data/doc_examples/chroma_db/"
+config["data_dir"] = "../../data/doc_examples/"
+config["type_of_data"] = "dataset"
+config["training"] = False
+config["testing_flag"] = True  # set this to false while training, this is for demo
+config["test_subset"] = True  # set this to false while training, this is for demo
+
+# load the persistent database using ChromaDB
+client = chromadb.PersistentClient(path=config["persist_dir"])
+print(config)
+
+ +
+
+
+
+
+
+[INFO] Finding device.
+[INFO] Device found: mps
+{'rqa_prompt_template': 'This database is a list of metadata. Use the following pieces of context to find the relevant document. Answer only from the context given using the {question} given. If you do not know the answer, say you do not know. {context}', 'llm_prompt_template': 'The following is a set of documents {docs}. Based on these docs, please summarize the content concisely. Also give a list of main concepts found in the documents. Do not add any new information. Helpful Answer: ', 'num_return_documents': 30, 'embedding_model': 'BAAI/bge-large-en-v1.5', 'llm_model': 'llama3', 'num_documents_for_llm': 30, 'data_dir': '../../data/doc_examples/', 'persist_dir': '../../data/doc_examples/chroma_db/', 'testing_flag': True, 'ignore_downloading_data': False, 'test_subset': True, 'data_download_n_jobs': 20, 'training': False, 'temperature': 0.95, 'top_p': 0.95, 'search_type': 'similarity', 'reranking': False, 'long_context_reorder': False, 'structure_query': False, 'use_chroma_for_saving_metadata': False, 'device': 'mps', 'type_of_data': 'dataset'}
+
+
+
+
+
+
+
+
+
+
+

Change the way the data is combined

+
    +
  • To pass to the RAG, all the metadata is combined into a single string. This is done by concatenating all the metadata fields with a space separator.
  • +
  • We can change the way the data in whatever way we want. For example, we can concatenate all the metadata fields with a "~" separator.
  • +
+
+
+
+
+
+ +
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
def join_attributes(attribute: object, attr_name: str) -&gt; str:
+    """
+    Description: Join the attributes of the OpenML objects into a single string with the format "key : value"
+    """
+    return (
+        " ~ ".join(
+            [f"{k} : {v}," for k, v in getattr(attribute, attr_name, {}).items()]
+        )
+        if hasattr(attribute, attr_name)
+        else ""
+    )
+
+
+def combine_metadata(
+    self, all_dataset_metadata: pd.DataFrame, all_data_description_df: pd.DataFrame
+) -&gt; pd.DataFrame:
+    """
+    Description: Combine the descriptions with the metadata table.
+    """
+    all_dataset_metadata = pd.merge(
+        all_dataset_metadata, all_data_description_df, on="did", how="inner"
+    )
+    all_dataset_metadata["Combined_information"] = all_dataset_metadata.apply(
+        self.merge_all_columns_to_string, axis=1
+    )
+    return all_dataset_metadata
+
+ +
+
+
+
+ +
1
+2
OpenMLObjectHandler.join_attributes = join_attributes
+OpenMLObjectHandler.combine_metadata = combine_metadata
+
+ +
+
+
+
+ +
1
+2
+3
+4
+5
+6
# Setup llm chain, initialize the retriever and llm, and setup Retrieval QA
+qa_dataset_handler = QASetup(
+    config=config,
+    data_type=config["type_of_data"],
+    client=client,
+)
+
+ +
+
+
+
+ +
1
qa_dataset, _ = qa_dataset_handler.setup_vector_db_and_qa()
+
+ +
+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Rag Pipeline/Developer Tutorials/change model/index.html b/Rag Pipeline/Developer Tutorials/change model/index.html new file mode 100644 index 0000000..7a5cfee --- /dev/null +++ b/Rag Pipeline/Developer Tutorials/change model/index.html @@ -0,0 +1,1475 @@ + + + + + + + + + + + + + + + + + + + + + + + Change model - OpenML RAG Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + +
+
+
+

Tutorial on changing models

+
    +
  • How would you use a different embedding and llm model?
  • +
+
+
+
+
+
+ +
1
+2
+3
+4
+5
from __future__ import annotations
+from langchain_community.cache import SQLiteCache
+import os
+import sys
+import chromadb
+
+ +
+
+
+
+ +
1
+2
from backend.modules.utils import load_config_and_device
+from backend.modules.rag_llm import QASetup
+
+ +
+
+
+
+
+

Initial config

+
+
+
+
+
+ +
1
+2
+3
+4
+5
+6
+7
+8
+9
config = load_config_and_device("../../../backend/config.json")
+config["persist_dir"] = "../../data/doc_examples/chroma_db/"
+config["data_dir"] = "../../data/doc_examples/"
+config["type_of_data"] = "dataset"
+config["training"] = True
+config["test_subset"] = True  # set this to false while training, this is for demo
+# load the persistent database using ChromaDB
+client = chromadb.PersistentClient(path=config["persist_dir"])
+print(config)
+
+ +
+
+
+
+
+

Embedding model

+
    +
  • Pick a model from HF
  • +
+
+
+
+
+
+ +
1
config["embedding_model"] = "BAAI/bge-large-en-v1.5"
+
+ +
+
+
+
+
+

LLM model

+
+
+
+
+
+
+
    +
  • Pick a model from Ollama - https://ollama.com/library?sort=popular
  • +
  • eg : mistral
  • +
+
+
+
+
+
+ +
1
config["llm_model"] = "mistral"
+
+ +
+
+
+
+ +
1
+2
+3
+4
+5
+6
+7
qa_dataset_handler = QASetup(
+    config=config,
+    data_type=config["type_of_data"],
+    client=client,
+)
+
+qa_dataset, _ = qa_dataset_handler.setup_vector_db_and_qa()
+
+ +
+
+
+
+
+

IMPORTANT

+
    +
  • Do NOT forget to change the model to the best model in ollama/get_ollama.sh
  • +
+
+
+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Rag Pipeline/Developer Tutorials/change_model.py b/Rag Pipeline/Developer Tutorials/change_model.py new file mode 100644 index 0000000..fd8cc45 --- /dev/null +++ b/Rag Pipeline/Developer Tutorials/change_model.py @@ -0,0 +1,65 @@ +# --- +# jupyter: +# jupytext: +# text_representation: +# extension: .py +# format_name: light +# format_version: '1.5' +# jupytext_version: 1.16.3 +# kernelspec: +# display_name: openml +# language: python +# name: python3 +# --- + +# # Tutorial on changing models +# - How would you use a different embedding and llm model? + +from __future__ import annotations + +import os +import sys + +import chromadb +from langchain_community.cache import SQLiteCache + +from backend.modules.rag_llm import QASetup +from backend.modules.utils import load_config_and_device + +# ## Initial config + +config = load_config_and_device("../../../backend/config.json") +config["persist_dir"] = "../../data/doc_examples/chroma_db/" +config["data_dir"] = "../../data/doc_examples/" +config["type_of_data"] = "dataset" +config["training"] = True +config["test_subset"] = True # set this to false while training, this is for demo +# load the persistent database using ChromaDB +client = chromadb.PersistentClient(path=config["persist_dir"]) +print(config) + +# ## Embedding model +# - Pick a model from HF + +config["embedding_model"] = "BAAI/bge-large-en-v1.5" + +# ## LLM model + +# - Pick a model from Ollama - https://ollama.com/library?sort=popular +# - eg : mistral +# + +config["llm_model"] = "mistral" + +# + +qa_dataset_handler = QASetup( + config=config, + data_type=config["type_of_data"], + client=client, +) + +qa_dataset, _ = qa_dataset_handler.setup_vector_db_and_qa() +# - + +# # IMPORTANT +# - Do NOT forget to change the model to the best model in ollama/get_ollama.sh diff --git a/Rag Pipeline/Developer Tutorials/create vectordb/index.html b/Rag Pipeline/Developer Tutorials/create vectordb/index.html new file mode 100644 index 0000000..bc3c1a8 --- /dev/null +++ b/Rag Pipeline/Developer Tutorials/create vectordb/index.html @@ -0,0 +1,1333 @@ + + + + + + + + + + + + + + + + + + + + + + + Create vectordb - OpenML RAG Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + +
+
+
+

Tutorial on creating a vector database with openml objects

+
    +
  • How would you use the API to create a vector database with openml objects (datasets, flows etc)
  • +
+
+
+
+
+
+ +
1
+2
+3
+4
+5
+6
from __future__ import annotations
+from langchain.globals import set_llm_cache
+from langchain_community.cache import SQLiteCache
+import os
+import sys
+import chromadb
+
+ +
+
+
+
+ +
1
+2
from backend.modules.utils import *
+from backend.modules.rag_llm import *
+
+ +
+
+
+
+ +
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
config = load_config_and_device("../../../backend/config.json")
+config["persist_dir"] = "../../data/doc_examples/chroma_db/"
+config["data_dir"] = "../../data/doc_examples/"
+config["type_of_data"] = "dataset"
+config["training"] = False
+config["testing_flag"] = True  # set this to false while training, this is for demo
+config["test_subset"] = True  # set this to false while training, this is for demo
+
+# load the persistent database using ChromaDB
+client = chromadb.PersistentClient(path=config["persist_dir"])
+print(config)
+
+ +
+
+
+
+ +
1
+2
+3
+4
+5
qa_dataset_handler = QASetup(
+    config=config,
+    data_type="dataset",
+    client=client,
+)
+
+ +
+
+
+
+ +
1
qa_dataset, _ = qa_dataset_handler.setup_vector_db_and_qa()
+
+ +
+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Rag Pipeline/Developer Tutorials/index.html b/Rag Pipeline/Developer Tutorials/index.html new file mode 100644 index 0000000..27378fd --- /dev/null +++ b/Rag Pipeline/Developer Tutorials/index.html @@ -0,0 +1,1279 @@ + + + + + + + + + + + + + + + + + + + + + + + Developer Tutorials - OpenML RAG Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Developer Tutorials

+
    +
  • Hello there, future OpenML contributor! It is nice meeting you here. This page is a collection of tutorials that will help you get started with contributing to the OpenML RAG pipeline.
  • +
  • The tutorials show you how to perform common tasks and should make it a lot easier to get started with contributing to this project.
  • +
  • Note that you would have had to setup the project before you begin. If you missed this step, please refer to
  • +
+

How to use them

+
    +
  • Once you have setup the project, just navigate to the tutorial you are interested in and open them in your IDE.
  • +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Rag Pipeline/Developer Tutorials/load vectordb and get results/index.html b/Rag Pipeline/Developer Tutorials/load vectordb and get results/index.html new file mode 100644 index 0000000..74f56f6 --- /dev/null +++ b/Rag Pipeline/Developer Tutorials/load vectordb and get results/index.html @@ -0,0 +1,1743 @@ + + + + + + + + + + + + + + + + + + + + + + + Load vectordb and get results - OpenML RAG Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + +
+
+
+

Load the Chroma Db and get retrieval results for a given query

+
    +
  • How would you load the Chroma Db and get retrieval results for a given query?
  • +
+
+
+
+
+
+ +
1
+2
+3
+4
+5
+6
from __future__ import annotations
+from langchain.globals import set_llm_cache
+from langchain_community.cache import SQLiteCache
+import os
+import sys
+import chromadb
+
+ +
+
+
+
+ +
1
+2
+3
from backend.modules.utils import *
+from backend.modules.rag_llm import *
+from backend.modules.results_gen import *
+
+ +
+
+
+
+
+
+/Users/smukherjee/.pyenv/versions/3.10.14/envs/openml/lib/python3.10/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html
+  from .autonotebook import tqdm as notebook_tqdm
+
+
+
+
+
+
+
+
+
+ +
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
config = load_config_and_device("../../../backend/config.json")
+config["persist_dir"] = "../../data/doc_examples/chroma_db/"
+config["data_dir"] = "../../data/doc_examples/"
+config["type_of_data"] = "dataset"
+config["training"] = False
+config["testing_flag"] = True  # set this to false while training, this is for demo
+config["test_subset"] = True  # set this to false while training, this is for demo
+# load the persistent database using ChromaDB
+client = chromadb.PersistentClient(path=config["persist_dir"])
+print(config)
+
+ +
+
+
+
+
+
+[INFO] Finding device.
+[INFO] Device found: mps
+{'rqa_prompt_template': 'This database is a list of metadata. Use the following pieces of context to find the relevant document. Answer only from the context given using the {question} given. If you do not know the answer, say you do not know. {context}', 'llm_prompt_template': 'The following is a set of documents {docs}. Based on these docs, please summarize the content concisely. Also give a list of main concepts found in the documents. Do not add any new information. Helpful Answer: ', 'num_return_documents': 30, 'embedding_model': 'BAAI/bge-large-en-v1.5', 'llm_model': 'llama3', 'num_documents_for_llm': 30, 'data_dir': '../../data/doc_examples/', 'persist_dir': '../../data/doc_examples/chroma_db/', 'testing_flag': True, 'ignore_downloading_data': False, 'test_subset': True, 'data_download_n_jobs': 20, 'training': False, 'temperature': 0.95, 'top_p': 0.95, 'search_type': 'similarity', 'reranking': False, 'long_context_reorder': False, 'structure_query': False, 'use_chroma_for_saving_metadata': False, 'device': 'mps', 'type_of_data': 'dataset'}
+
+
+
+
+
+
+
+
+
+ +
1
+2
+3
+4
+5
+6
+7
+8
# Setup llm chain, initialize the retriever and llm, and setup Retrieval QA
+qa_dataset_handler = QASetup(
+    config=config,
+    data_type=config["type_of_data"],
+    client=client,
+)
+
+qa_dataset, _ = qa_dataset_handler.setup_vector_db_and_qa()
+
+ +
+
+
+
+
+
+[INFO] Loading metadata from file.
+[INFO] Loading model...
+[INFO] Model loaded.
+[INFO] Subsetting the data.
+[INFO] Generating unique documents. Total documents: 500
+Number of unique documents: 0 vs Total documents: 500
+No new documents to add.
+
+
+
+
+
+
+
+
+
+ +
1
+2
+3
+4
# get the llm chain and set the cache
+llm_chain_handler = LLMChainCreator(config=config, local=True)
+llm_chain_handler.enable_cache()
+llm_chain = llm_chain_handler.get_llm_chain()
+
+ +
+
+
+
+
+

Just get documents

+
+
+
+
+
+ +
1
query = "give me datasets about mushrooms"
+
+ +
+
+
+
+ +
1
+2
res = qa_dataset.invoke(input=query, top_k=5)[:10]
+res
+
+ +
+
+
+
+
+
+[Document(metadata={'MajorityClassSize': 4208.0, 'MaxNominalAttDistinctValues': 12.0, 'MinorityClassSize': 3916.0, 'NumberOfClasses': 2.0, 'NumberOfFeatures': 23.0, 'NumberOfInstances': 8124.0, 'NumberOfInstancesWithMissingValues': 2480.0, 'NumberOfMissingValues': 2480.0, 'NumberOfNumericFeatures': 0.0, 'NumberOfSymbolicFeatures': 23.0, 'Unnamed: 0': 19, 'description': "**Author**: [Jeff Schlimmer](Jeffrey.Schlimmer@a.gp.cs.cmu.edu)  \n**Source**: [UCI](https://archive.ics.uci.edu/ml/datasets/mushroom) - 1981     \n**Please cite**:  The Audubon Society Field Guide to North American Mushrooms (1981). G. H. Lincoff (Pres.), New York: Alfred A. Knopf \n\n\n### Description\n\nThis dataset describes mushrooms in terms of their physical characteristics. They are classified into: poisonous or edible.\n\n### Source\n```\n(a) Origin: \nMushroom records are drawn from The Audubon Society Field Guide to North American Mushrooms (1981). G. H. Lincoff (Pres.), New York: Alfred A. Knopf \n\n(b) Donor: \nJeff Schlimmer (Jeffrey.Schlimmer '@' a.gp.cs.cmu.edu)\n```\n\n### Dataset description\n\nThis dataset includes descriptions of hypothetical samples corresponding to 23 species of gilled mushrooms in the Agaricus and Lepiota Family. Each species is identified as definitely edible, definitely poisonous, or of unknown edibility and not recommended. This latter class was combined with the poisonous one. The Guide clearly states that there is no simple rule for determining the edibility of a mushroom; no rule like ``leaflets three, let it be'' for Poisonous Oak and Ivy.\n\n### Attributes Information\n```\n1. cap-shape: bell=b,conical=c,convex=x,flat=f, knobbed=k,sunken=s \n2. cap-surface: fibrous=f,grooves=g,scaly=y,smooth=s \n3. cap-color: brown=n,buff=b,cinnamon=c,gray=g,green=r, pink=p,purple=u,red=e,white=w,yellow=y \n4. bruises?: bruises=t,no=f \n5. odor: almond=a,anise=l,creosote=c,fishy=y,foul=f, musty=m,none=n,pungent=p,spicy=s \n6. gill-attachment: attached=a,descending=d,free=f,notched=n \n7. gill-spacing: close=c,crowded=w,distant=d \n8. gill-size: broad=b,narrow=n \n9. gill-color: black=k,brown=n,buff=b,chocolate=h,gray=g, green=r,orange=o,pink=p,purple=u,red=e, white=w,yellow=y \n10. stalk-shape: enlarging=e,tapering=t \n11. stalk-root: bulbous=b,club=c,cup=u,equal=e, rhizomorphs=z,rooted=r,missing=? \n12. stalk-surface-above-ring: fibrous=f,scaly=y,silky=k,smooth=s \n13. stalk-surface-below-ring: fibrous=f,scaly=y,silky=k,smooth=s \n14. stalk-color-above-ring: brown=n,buff=b,cinnamon=c,gray=g,orange=o, pink=p,red=e,white=w,yellow=y \n15. stalk-color-below-ring: brown=n,buff=b,cinnamon=c,gray=g,orange=o, pink=p,red=e,white=w,yellow=y \n16. veil-type: partial=p,universal=u \n17. veil-color: brown=n,orange=o,white=w,yellow=y \n18. ring-number: none=n,one=o,two=t \n19. ring-type: cobwebby=c,evanescent=e,flaring=f,large=l, none=n,pendant=p,sheathing=s,zone=z \n20. spore-print-color: black=k,brown=n,buff=b,chocolate=h,green=r, orange=o,purple=u,white=w,yellow=y \n21. population: abundant=a,clustered=c,numerous=n, scattered=s,several=v,solitary=y \n22. habitat: grasses=g,leaves=l,meadows=m,paths=p, urban=u,waste=w,woods=d\n```\n\n### Relevant papers\n\nSchlimmer,J.S. (1987). Concept Acquisition Through Representational Adjustment (Technical Report 87-19). Doctoral disseration, Department of Information and Computer Science, University of California, Irvine. \n\nIba,W., Wogulis,J., & Langley,P. (1988). Trading off Simplicity and Coverage in Incremental Concept Learning. In Proceedings of the 5th International Conference on Machine Learning, 73-79. Ann Arbor, Michigan: Morgan Kaufmann. \n\nDuch W, Adamczak R, Grabczewski K (1996) Extraction of logical rules from training data using backpropagation networks, in: Proc. of the The 1st Online Workshop on Soft Computing, 19-30.Aug.1996, pp. 25-30, [Web Link] \n\nDuch W, Adamczak R, Grabczewski K, Ishikawa M, Ueda H, Extraction of crisp logical rules using constrained backpropagation networks - comparison of two new approaches, in: Proc. of the European Symposium on Artificial Neural Networks (ESANN'97), Bruge, Belgium 16-18.4.1997.", 'did': 24, 'features': '0 : [0 - cap-shape (nominal)], 1 : [1 - cap-surface (nominal)], 2 : [2 - cap-color (nominal)], 3 : [3 - bruises%3F (nominal)], 4 : [4 - odor (nominal)], 5 : [5 - gill-attachment (nominal)], 6 : [6 - gill-spacing (nominal)], 7 : [7 - gill-size (nominal)], 8 : [8 - gill-color (nominal)], 9 : [9 - stalk-shape (nominal)], 10 : [10 - stalk-root (nominal)], 11 : [11 - stalk-surface-above-ring (nominal)], 12 : [12 - stalk-surface-below-ring (nominal)], 13 : [13 - stalk-color-above-ring (nominal)], 14 : [14 - stalk-color-below-ring (nominal)], 15 : [15 - veil-type (nominal)], 16 : [16 - veil-color (nominal)], 17 : [17 - ring-number (nominal)], 18 : [18 - ring-type (nominal)], 19 : [19 - spore-print-color (nominal)], 20 : [20 - population (nominal)], 21 : [21 - habitat (nominal)], 22 : [22 - class (nominal)],', 'format': 'ARFF', 'name': 'mushroom', 'qualities': 'AutoCorrelation : 0.726332635725717, CfsSubsetEval_DecisionStumpAUC : 0.9910519616800724, CfsSubsetEval_DecisionStumpErrRate : 0.013047759724273756, CfsSubsetEval_DecisionStumpKappa : 0.9738461616958994, CfsSubsetEval_NaiveBayesAUC : 0.9910519616800724, CfsSubsetEval_NaiveBayesErrRate : 0.013047759724273756, CfsSubsetEval_NaiveBayesKappa : 0.9738461616958994, CfsSubsetEval_kNN1NAUC : 0.9910519616800724, CfsSubsetEval_kNN1NErrRate : 0.013047759724273756, CfsSubsetEval_kNN1NKappa : 0.9738461616958994, ClassEntropy : 0.9990678968724604, DecisionStumpAUC : 0.8894935275772204, DecisionStumpErrRate : 0.11324470704086657, DecisionStumpKappa : 0.77457574608175, Dimensionality : 0.002831117676021664, EquivalentNumberOfAtts : 5.0393135801657, J48.00001.AUC : 1.0, J48.00001.ErrRate : 0.0, J48.00001.Kappa : 1.0, J48.0001.AUC : 1.0, J48.0001.ErrRate : 0.0, J48.0001.Kappa : 1.0, J48.001.AUC : 1.0, J48.001.ErrRate : 0.0, J48.001.Kappa : 1.0, MajorityClassPercentage : 51.7971442639094, MajorityClassSize : 4208.0, MaxAttributeEntropy : 3.030432883772633, MaxKurtosisOfNumericAtts : nan, MaxMeansOfNumericAtts : nan, MaxMutualInformation : 0.906074977384, MaxNominalAttDistinctValues : 12.0, MaxSkewnessOfNumericAtts : nan, MaxStdDevOfNumericAtts : nan, MeanAttributeEntropy : 1.4092554739602103, MeanKurtosisOfNumericAtts : nan, MeanMeansOfNumericAtts : nan, MeanMutualInformation : 0.19825475850613955, MeanNoiseToSignalRatio : 6.108305922031972, MeanNominalAttDistinctValues : 5.130434782608695, MeanSkewnessOfNumericAtts : nan, MeanStdDevOfNumericAtts : nan, MinAttributeEntropy : -0.0, MinKurtosisOfNumericAtts : nan, MinMeansOfNumericAtts : nan, MinMutualInformation : 0.0, MinNominalAttDistinctValues : 1.0, MinSkewnessOfNumericAtts : nan, MinStdDevOfNumericAtts : nan, MinorityClassPercentage : 48.20285573609059, MinorityClassSize : 3916.0, NaiveBayesAUC : 0.9976229672941662, NaiveBayesErrRate : 0.04899064500246184, NaiveBayesKappa : 0.9015972799616292, NumberOfBinaryFeatures : 5.0, NumberOfClasses : 2.0, NumberOfFeatures : 23.0, NumberOfInstances : 8124.0, NumberOfInstancesWithMissingValues : 2480.0, NumberOfMissingValues : 2480.0, NumberOfNumericFeatures : 0.0, NumberOfSymbolicFeatures : 23.0, PercentageOfBinaryFeatures : 21.73913043478261, PercentageOfInstancesWithMissingValues : 30.526834071885773, PercentageOfMissingValues : 1.3272536552993814, PercentageOfNumericFeatures : 0.0, PercentageOfSymbolicFeatures : 100.0, Quartile1AttributeEntropy : 0.8286618104993447, Quartile1KurtosisOfNumericAtts : nan, Quartile1MeansOfNumericAtts : nan, Quartile1MutualInformation : 0.034184520425602494, Quartile1SkewnessOfNumericAtts : nan, Quartile1StdDevOfNumericAtts : nan, Quartile2AttributeEntropy : 1.467128011861462, Quartile2KurtosisOfNumericAtts : nan, Quartile2MeansOfNumericAtts : nan, Quartile2MutualInformation : 0.174606545183155, Quartile2SkewnessOfNumericAtts : nan, Quartile2StdDevOfNumericAtts : nan, Quartile3AttributeEntropy : 2.0533554351937426, Quartile3KurtosisOfNumericAtts : nan, Quartile3MeansOfNumericAtts : nan, Quartile3MutualInformation : 0.27510225484918505, Quartile3SkewnessOfNumericAtts : nan, Quartile3StdDevOfNumericAtts : nan, REPTreeDepth1AUC : 0.9999987256143267, REPTreeDepth1ErrRate : 0.00036927621861152144, REPTreeDepth1Kappa : 0.9992605118549308, REPTreeDepth2AUC : 0.9999987256143267, REPTreeDepth2ErrRate : 0.00036927621861152144, REPTreeDepth2Kappa : 0.9992605118549308, REPTreeDepth3AUC : 0.9999987256143267, REPTreeDepth3ErrRate : 0.00036927621861152144, REPTreeDepth3Kappa : 0.9992605118549308, RandomTreeDepth1AUC : 0.9995247148288974, RandomTreeDepth1ErrRate : 0.0004923682914820286, RandomTreeDepth1Kappa : 0.9990140245420991, RandomTreeDepth2AUC : 0.9995247148288974, RandomTreeDepth2ErrRate : 0.0004923682914820286, RandomTreeDepth2Kappa : 0.9990140245420991, RandomTreeDepth3AUC : 0.9995247148288974, RandomTreeDepth3ErrRate : 0.0004923682914820286, RandomTreeDepth3Kappa : 0.9990140245420991, StdvNominalAttDistinctValues : 3.1809710899501766, kNN1NAUC : 1.0, kNN1NErrRate : 0.0, kNN1NKappa : 1.0,', 'status': 'active', 'uploader': 1, 'version': 1}, page_content="### Description\n\nThis dataset describes mushrooms in terms of their physical characteristics. They are classified into: poisonous or edible.\n\n### Source\n```\n(a) Origin: \nMushroom records are drawn from The Audubon Society Field Guide to North American Mushrooms (1981). G. H. Lincoff (Pres.), New York: Alfred A. Knopf \n\n(b) Donor: \nJeff Schlimmer (Jeffrey.Schlimmer '@' a.gp.cs.cmu.edu)\n```\n\n### Dataset description\n\nThis dataset includes descriptions of hypothetical samples corresponding to 23 species of gilled mushrooms in the Agaricus and Lepiota Family. Each species is identified as definitely edible, definitely poisonous, or of unknown edibility and not recommended. This latter class was combined with the poisonous one. The Guide clearly states that there is no simple rule for determining the edibility of a mushroom; no rule like ``leaflets three, let it be'' for Poisonous Oak and Ivy."),
+ Document(metadata={'MajorityClassSize': 4208.0, 'MaxNominalAttDistinctValues': 12.0, 'MinorityClassSize': 3916.0, 'NumberOfClasses': 2.0, 'NumberOfFeatures': 23.0, 'NumberOfInstances': 8124.0, 'NumberOfInstancesWithMissingValues': 2480.0, 'NumberOfMissingValues': 2480.0, 'NumberOfNumericFeatures': 0.0, 'NumberOfSymbolicFeatures': 23.0, 'Unnamed: 0': 19, 'description': "**Author**: [Jeff Schlimmer](Jeffrey.Schlimmer@a.gp.cs.cmu.edu)  \n**Source**: [UCI](https://archive.ics.uci.edu/ml/datasets/mushroom) - 1981     \n**Please cite**:  The Audubon Society Field Guide to North American Mushrooms (1981). G. H. Lincoff (Pres.), New York: Alfred A. Knopf \n\n\n### Description\n\nThis dataset describes mushrooms in terms of their physical characteristics. They are classified into: poisonous or edible.\n\n### Source\n```\n(a) Origin: \nMushroom records are drawn from The Audubon Society Field Guide to North American Mushrooms (1981). G. H. Lincoff (Pres.), New York: Alfred A. Knopf \n\n(b) Donor: \nJeff Schlimmer (Jeffrey.Schlimmer '@' a.gp.cs.cmu.edu)\n```\n\n### Dataset description\n\nThis dataset includes descriptions of hypothetical samples corresponding to 23 species of gilled mushrooms in the Agaricus and Lepiota Family. Each species is identified as definitely edible, definitely poisonous, or of unknown edibility and not recommended. This latter class was combined with the poisonous one. The Guide clearly states that there is no simple rule for determining the edibility of a mushroom; no rule like ``leaflets three, let it be'' for Poisonous Oak and Ivy.\n\n### Attributes Information\n```\n1. cap-shape: bell=b,conical=c,convex=x,flat=f, knobbed=k,sunken=s \n2. cap-surface: fibrous=f,grooves=g,scaly=y,smooth=s \n3. cap-color: brown=n,buff=b,cinnamon=c,gray=g,green=r, pink=p,purple=u,red=e,white=w,yellow=y \n4. bruises?: bruises=t,no=f \n5. odor: almond=a,anise=l,creosote=c,fishy=y,foul=f, musty=m,none=n,pungent=p,spicy=s \n6. gill-attachment: attached=a,descending=d,free=f,notched=n \n7. gill-spacing: close=c,crowded=w,distant=d \n8. gill-size: broad=b,narrow=n \n9. gill-color: black=k,brown=n,buff=b,chocolate=h,gray=g, green=r,orange=o,pink=p,purple=u,red=e, white=w,yellow=y \n10. stalk-shape: enlarging=e,tapering=t \n11. stalk-root: bulbous=b,club=c,cup=u,equal=e, rhizomorphs=z,rooted=r,missing=? \n12. stalk-surface-above-ring: fibrous=f,scaly=y,silky=k,smooth=s \n13. stalk-surface-below-ring: fibrous=f,scaly=y,silky=k,smooth=s \n14. stalk-color-above-ring: brown=n,buff=b,cinnamon=c,gray=g,orange=o, pink=p,red=e,white=w,yellow=y \n15. stalk-color-below-ring: brown=n,buff=b,cinnamon=c,gray=g,orange=o, pink=p,red=e,white=w,yellow=y \n16. veil-type: partial=p,universal=u \n17. veil-color: brown=n,orange=o,white=w,yellow=y \n18. ring-number: none=n,one=o,two=t \n19. ring-type: cobwebby=c,evanescent=e,flaring=f,large=l, none=n,pendant=p,sheathing=s,zone=z \n20. spore-print-color: black=k,brown=n,buff=b,chocolate=h,green=r, orange=o,purple=u,white=w,yellow=y \n21. population: abundant=a,clustered=c,numerous=n, scattered=s,several=v,solitary=y \n22. habitat: grasses=g,leaves=l,meadows=m,paths=p, urban=u,waste=w,woods=d\n```\n\n### Relevant papers\n\nSchlimmer,J.S. (1987). Concept Acquisition Through Representational Adjustment (Technical Report 87-19). Doctoral disseration, Department of Information and Computer Science, University of California, Irvine. \n\nIba,W., Wogulis,J., & Langley,P. (1988). Trading off Simplicity and Coverage in Incremental Concept Learning. In Proceedings of the 5th International Conference on Machine Learning, 73-79. Ann Arbor, Michigan: Morgan Kaufmann. \n\nDuch W, Adamczak R, Grabczewski K (1996) Extraction of logical rules from training data using backpropagation networks, in: Proc. of the The 1st Online Workshop on Soft Computing, 19-30.Aug.1996, pp. 25-30, [Web Link] \n\nDuch W, Adamczak R, Grabczewski K, Ishikawa M, Ueda H, Extraction of crisp logical rules using constrained backpropagation networks - comparison of two new approaches, in: Proc. of the European Symposium on Artificial Neural Networks (ESANN'97), Bruge, Belgium 16-18.4.1997.", 'did': 24, 'features': '0 : [0 - cap-shape (nominal)], 1 : [1 - cap-surface (nominal)], 2 : [2 - cap-color (nominal)], 3 : [3 - bruises%3F (nominal)], 4 : [4 - odor (nominal)], 5 : [5 - gill-attachment (nominal)], 6 : [6 - gill-spacing (nominal)], 7 : [7 - gill-size (nominal)], 8 : [8 - gill-color (nominal)], 9 : [9 - stalk-shape (nominal)], 10 : [10 - stalk-root (nominal)], 11 : [11 - stalk-surface-above-ring (nominal)], 12 : [12 - stalk-surface-below-ring (nominal)], 13 : [13 - stalk-color-above-ring (nominal)], 14 : [14 - stalk-color-below-ring (nominal)], 15 : [15 - veil-type (nominal)], 16 : [16 - veil-color (nominal)], 17 : [17 - ring-number (nominal)], 18 : [18 - ring-type (nominal)], 19 : [19 - spore-print-color (nominal)], 20 : [20 - population (nominal)], 21 : [21 - habitat (nominal)], 22 : [22 - class (nominal)],', 'format': 'ARFF', 'name': 'mushroom', 'qualities': 'AutoCorrelation : 0.726332635725717, CfsSubsetEval_DecisionStumpAUC : 0.9910519616800724, CfsSubsetEval_DecisionStumpErrRate : 0.013047759724273756, CfsSubsetEval_DecisionStumpKappa : 0.9738461616958994, CfsSubsetEval_NaiveBayesAUC : 0.9910519616800724, CfsSubsetEval_NaiveBayesErrRate : 0.013047759724273756, CfsSubsetEval_NaiveBayesKappa : 0.9738461616958994, CfsSubsetEval_kNN1NAUC : 0.9910519616800724, CfsSubsetEval_kNN1NErrRate : 0.013047759724273756, CfsSubsetEval_kNN1NKappa : 0.9738461616958994, ClassEntropy : 0.9990678968724604, DecisionStumpAUC : 0.8894935275772204, DecisionStumpErrRate : 0.11324470704086657, DecisionStumpKappa : 0.77457574608175, Dimensionality : 0.002831117676021664, EquivalentNumberOfAtts : 5.0393135801657, J48.00001.AUC : 1.0, J48.00001.ErrRate : 0.0, J48.00001.Kappa : 1.0, J48.0001.AUC : 1.0, J48.0001.ErrRate : 0.0, J48.0001.Kappa : 1.0, J48.001.AUC : 1.0, J48.001.ErrRate : 0.0, J48.001.Kappa : 1.0, MajorityClassPercentage : 51.7971442639094, MajorityClassSize : 4208.0, MaxAttributeEntropy : 3.030432883772633, MaxKurtosisOfNumericAtts : nan, MaxMeansOfNumericAtts : nan, MaxMutualInformation : 0.906074977384, MaxNominalAttDistinctValues : 12.0, MaxSkewnessOfNumericAtts : nan, MaxStdDevOfNumericAtts : nan, MeanAttributeEntropy : 1.4092554739602103, MeanKurtosisOfNumericAtts : nan, MeanMeansOfNumericAtts : nan, MeanMutualInformation : 0.19825475850613955, MeanNoiseToSignalRatio : 6.108305922031972, MeanNominalAttDistinctValues : 5.130434782608695, MeanSkewnessOfNumericAtts : nan, MeanStdDevOfNumericAtts : nan, MinAttributeEntropy : -0.0, MinKurtosisOfNumericAtts : nan, MinMeansOfNumericAtts : nan, MinMutualInformation : 0.0, MinNominalAttDistinctValues : 1.0, MinSkewnessOfNumericAtts : nan, MinStdDevOfNumericAtts : nan, MinorityClassPercentage : 48.20285573609059, MinorityClassSize : 3916.0, NaiveBayesAUC : 0.9976229672941662, NaiveBayesErrRate : 0.04899064500246184, NaiveBayesKappa : 0.9015972799616292, NumberOfBinaryFeatures : 5.0, NumberOfClasses : 2.0, NumberOfFeatures : 23.0, NumberOfInstances : 8124.0, NumberOfInstancesWithMissingValues : 2480.0, NumberOfMissingValues : 2480.0, NumberOfNumericFeatures : 0.0, NumberOfSymbolicFeatures : 23.0, PercentageOfBinaryFeatures : 21.73913043478261, PercentageOfInstancesWithMissingValues : 30.526834071885773, PercentageOfMissingValues : 1.3272536552993814, PercentageOfNumericFeatures : 0.0, PercentageOfSymbolicFeatures : 100.0, Quartile1AttributeEntropy : 0.8286618104993447, Quartile1KurtosisOfNumericAtts : nan, Quartile1MeansOfNumericAtts : nan, Quartile1MutualInformation : 0.034184520425602494, Quartile1SkewnessOfNumericAtts : nan, Quartile1StdDevOfNumericAtts : nan, Quartile2AttributeEntropy : 1.467128011861462, Quartile2KurtosisOfNumericAtts : nan, Quartile2MeansOfNumericAtts : nan, Quartile2MutualInformation : 0.174606545183155, Quartile2SkewnessOfNumericAtts : nan, Quartile2StdDevOfNumericAtts : nan, Quartile3AttributeEntropy : 2.0533554351937426, Quartile3KurtosisOfNumericAtts : nan, Quartile3MeansOfNumericAtts : nan, Quartile3MutualInformation : 0.27510225484918505, Quartile3SkewnessOfNumericAtts : nan, Quartile3StdDevOfNumericAtts : nan, REPTreeDepth1AUC : 0.9999987256143267, REPTreeDepth1ErrRate : 0.00036927621861152144, REPTreeDepth1Kappa : 0.9992605118549308, REPTreeDepth2AUC : 0.9999987256143267, REPTreeDepth2ErrRate : 0.00036927621861152144, REPTreeDepth2Kappa : 0.9992605118549308, REPTreeDepth3AUC : 0.9999987256143267, REPTreeDepth3ErrRate : 0.00036927621861152144, REPTreeDepth3Kappa : 0.9992605118549308, RandomTreeDepth1AUC : 0.9995247148288974, RandomTreeDepth1ErrRate : 0.0004923682914820286, RandomTreeDepth1Kappa : 0.9990140245420991, RandomTreeDepth2AUC : 0.9995247148288974, RandomTreeDepth2ErrRate : 0.0004923682914820286, RandomTreeDepth2Kappa : 0.9990140245420991, RandomTreeDepth3AUC : 0.9995247148288974, RandomTreeDepth3ErrRate : 0.0004923682914820286, RandomTreeDepth3Kappa : 0.9990140245420991, StdvNominalAttDistinctValues : 3.1809710899501766, kNN1NAUC : 1.0, kNN1NErrRate : 0.0, kNN1NKappa : 1.0,', 'status': 'active', 'uploader': 1, 'version': 1}, page_content='did - 24, name - mushroom, version - 1, uploader - 1, status - active, format - ARFF, MajorityClassSize - 4208.0, MaxNominalAttDistinctValues - 12.0, MinorityClassSize - 3916.0, NumberOfClasses - 2.0, NumberOfFeatures - 23.0, NumberOfInstances - 8124.0, NumberOfInstancesWithMissingValues - 2480.0, NumberOfMissingValues - 2480.0, NumberOfNumericFeatures - 0.0, NumberOfSymbolicFeatures - 23.0, description - **Author**: [Jeff Schlimmer](Jeffrey.Schlimmer@a.gp.cs.cmu.edu)  \n**Source**: [UCI](https://archive.ics.uci.edu/ml/datasets/mushroom) - 1981     \n**Please cite**:  The Audubon Society Field Guide to North American Mushrooms (1981). G. H. Lincoff (Pres.), New York: Alfred A. Knopf \n\n\n### Description\n\nThis dataset describes mushrooms in terms of their physical characteristics. They are classified into: poisonous or edible.'),
+ Document(metadata={'NumberOfClasses': 0.0, 'NumberOfFeatures': 37.0, 'NumberOfInstances': 6435.0, 'NumberOfInstancesWithMissingValues': 0.0, 'NumberOfMissingValues': 0.0, 'NumberOfNumericFeatures': 37.0, 'NumberOfSymbolicFeatures': 0.0, 'Unnamed: 0': 203, 'description': "**Author**:   \n**Source**: Unknown - 1993  \n**Please cite**:   \n\nSource:\nAshwin Srinivasan\nDepartment of Statistics and Data Modeling\nUniversity of Strathclyde\nGlasgow\nScotland\nUK\nross '@' uk.ac.turing\n\nThe original Landsat data for this database was generated from data purchased from NASA by the Australian Centre for Remote Sensing, and used for research at: \nThe Centre for Remote Sensing\nUniversity of New South Wales\nKensington, PO Box 1\nNSW 2033\nAustralia.\n\nThe sample database was generated taking a small section (82 rows and 100 columns) from the original data. The binary values were converted to their present ASCII form by Ashwin Srinivasan. The classification for each pixel was performed on the basis of an actual site visit by Ms. Karen Hall, when working for Professor John A. Richards, at the Centre for Remote Sensing at the University of New South Wales, Australia. Conversion to 3x3 neighbourhoods and splitting into test and training sets was done by Alistair Sutherland.\n\nData Set Information:\nThe database consists of the multi-spectral values of pixels in 3x3 neighbourhoods in a satellite image, and the classification associated with the central pixel in each neighbourhood. The aim is to predict this classification, given the multi-spectral values. In the sample database, the class of a pixel is coded as a number. The Landsat satellite data is one of the many sources of information available for a scene. The interpretation of a scene by  integrating spatial data of diverse types and resolutions including multispectral and radar data, maps indicating topography, land use etc. is expected to assume significant importance with the onset of an era characterised by integrative approaches to remote sensing (for example, NASA's Earth Observing System commencing this decade). Existing statistical methods are ill-equipped for handling such diverse data types. Note that this is not true for Landsat MSS data considered in isolation (as in this sample database). This data satisfies the important requirements of being numerical and at a single resolution, and standard maximum-likelihood classification performs very well. Consequently, for this data, it should be interesting to compare the performance of other methods against the statistical approach. One frame of Landsat MSS imagery consists of four digital images of the same scene in different spectral bands. Two of these are in the visible region (corresponding approximately to green and red regions of the visible spectrum) and two are in the (near) infra-red. Each pixel is a 8-bit binary word, with 0 corresponding to black and 255 to white. The spatial resolution of a pixel is about 80m x 80m. Each image contains 2340 x 3380 such pixels. The database is a (tiny) sub-area of a scene, consisting of 82 x 100 pixels. Each line of data corresponds to a 3x3 square neighbourhood of pixels completely contained within the 82x100 sub-area. Each line contains the pixel values in the four spectral bands (converted to ASCII) of each of the 9 pixels in the 3x3 neighbourhood and a number indicating the classification label of the central pixel. The number is a code for the following classes:\n\nNumber Class\n1 red soil\n2 cotton crop\n3 grey soil\n4 damp grey soil\n5 soil with vegetation stubble\n6 mixture class (all types present)\n7 very damp grey soil\nNB. There are no examples with class 6 in this dataset.\n \nThe data is given in random order and certain lines of data have been removed so you cannot reconstruct the original image from this dataset. In each line of data the four spectral values for the top-left pixel are given first followed by the four spectral values for the top-middle pixel and then those for the top-right pixel, and so on with the pixels read out in sequence left-to-right and top-to-bottom. Thus, the four spectral values for the central pixel are given by attributes 17,18,19 and 20. If you like you can use only these four attributes, while ignoring the others. This avoids the problem which arises when a 3x3 neighbourhood straddles a boundary.\n\nAttribute Information:\nThe attributes are numerical, in the range 0 to 255.\n\nUCI: http://archive.ics.uci.edu/ml/datasets/Statlog+(Landsat+Satellite)", 'did': 294, 'features': '0 : [0 - attr1 (numeric)], 1 : [1 - attr2 (numeric)], 2 : [2 - attr3 (numeric)], 3 : [3 - attr4 (numeric)], 4 : [4 - attr5 (numeric)], 5 : [5 - attr6 (numeric)], 6 : [6 - attr7 (numeric)], 7 : [7 - attr8 (numeric)], 8 : [8 - attr9 (numeric)], 9 : [9 - attr10 (numeric)], 10 : [10 - attr11 (numeric)], 11 : [11 - attr12 (numeric)], 12 : [12 - attr13 (numeric)], 13 : [13 - attr14 (numeric)], 14 : [14 - attr15 (numeric)], 15 : [15 - attr16 (numeric)], 16 : [16 - attr17 (numeric)], 17 : [17 - attr18 (numeric)], 18 : [18 - attr19 (numeric)], 19 : [19 - attr20 (numeric)], 20 : [20 - attr21 (numeric)], 21 : [21 - attr22 (numeric)], 22 : [22 - attr23 (numeric)], 23 : [23 - attr24 (numeric)], 24 : [24 - attr25 (numeric)], 25 : [25 - attr26 (numeric)], 26 : [26 - attr27 (numeric)], 27 : [27 - attr28 (numeric)], 28 : [28 - attr29 (numeric)], 29 : [29 - attr30 (numeric)], 30 : [30 - attr31 (numeric)], 31 : [31 - attr32 (numeric)], 32 : [32 - attr33 (numeric)], 33 : [33 - attr34 (numeric)], 34 : [34 - attr35 (numeric)], 35 : [35 - attr36 (numeric)], 36 : [36 - class (numeric)],', 'format': 'ARFF', 'name': 'satellite_image', 'qualities': 'AutoCorrelation : 0.5853279452906435, CfsSubsetEval_DecisionStumpAUC : nan, CfsSubsetEval_DecisionStumpErrRate : nan, CfsSubsetEval_DecisionStumpKappa : nan, CfsSubsetEval_NaiveBayesAUC : nan, CfsSubsetEval_NaiveBayesErrRate : nan, CfsSubsetEval_NaiveBayesKappa : nan, CfsSubsetEval_kNN1NAUC : nan, CfsSubsetEval_kNN1NErrRate : nan, CfsSubsetEval_kNN1NKappa : nan, ClassEntropy : nan, DecisionStumpAUC : nan, DecisionStumpErrRate : nan, DecisionStumpKappa : nan, Dimensionality : 0.00574980574980575, EquivalentNumberOfAtts : nan, J48.00001.AUC : nan, J48.00001.ErrRate : nan, J48.00001.Kappa : nan, J48.0001.AUC : nan, J48.0001.ErrRate : nan, J48.0001.Kappa : nan, J48.001.AUC : nan, J48.001.ErrRate : nan, J48.001.Kappa : nan, MajorityClassPercentage : nan, MajorityClassSize : nan, MaxAttributeEntropy : nan, MaxKurtosisOfNumericAtts : 1.2773432544146832, MaxMeansOfNumericAtts : 99.31126651126642, MaxMutualInformation : nan, MaxNominalAttDistinctValues : nan, MaxSkewnessOfNumericAtts : 0.9187090836988436, MaxStdDevOfNumericAtts : 22.90506492772991, MeanAttributeEntropy : nan, MeanKurtosisOfNumericAtts : -0.18345361023395665, MeanMeansOfNumericAtts : 81.3149961149961, MeanMutualInformation : nan, MeanNoiseToSignalRatio : nan, MeanNominalAttDistinctValues : nan, MeanSkewnessOfNumericAtts : 0.04831449741968043, MeanStdDevOfNumericAtts : 17.586070075450067, MinAttributeEntropy : nan, MinKurtosisOfNumericAtts : -1.2441720904806828, MinMeansOfNumericAtts : 3.6686868686868834, MinMutualInformation : nan, MinNominalAttDistinctValues : nan, MinSkewnessOfNumericAtts : -0.6747275074215006, MinStdDevOfNumericAtts : 2.214052121287819, MinorityClassPercentage : nan, MinorityClassSize : nan, NaiveBayesAUC : nan, NaiveBayesErrRate : nan, NaiveBayesKappa : nan, NumberOfBinaryFeatures : 0.0, NumberOfClasses : 0.0, NumberOfFeatures : 37.0, NumberOfInstances : 6435.0, NumberOfInstancesWithMissingValues : 0.0, NumberOfMissingValues : 0.0, NumberOfNumericFeatures : 37.0, NumberOfSymbolicFeatures : 0.0, PercentageOfBinaryFeatures : 0.0, PercentageOfInstancesWithMissingValues : 0.0, PercentageOfMissingValues : 0.0, PercentageOfNumericFeatures : 100.0, PercentageOfSymbolicFeatures : 0.0, Quartile1AttributeEntropy : nan, Quartile1KurtosisOfNumericAtts : -0.8829551820521702, Quartile1MeansOfNumericAtts : 69.34483294483297, Quartile1MutualInformation : nan, Quartile1SkewnessOfNumericAtts : -0.3859749826493584, Quartile1StdDevOfNumericAtts : 13.604282494809674, Quartile2AttributeEntropy : nan, Quartile2KurtosisOfNumericAtts : -0.6732423440004554, Quartile2MeansOfNumericAtts : 82.66060606060603, Quartile2MutualInformation : nan, Quartile2SkewnessOfNumericAtts : 0.02239958092752799, Quartile2StdDevOfNumericAtts : 16.729622667298376, Quartile3AttributeEntropy : nan, Quartile3KurtosisOfNumericAtts : 0.5035049254688353, Quartile3MeansOfNumericAtts : 91.22408702408694, Quartile3MutualInformation : nan, Quartile3SkewnessOfNumericAtts : 0.6162940189640502, Quartile3StdDevOfNumericAtts : 20.936744304390697, REPTreeDepth1AUC : nan, REPTreeDepth1ErrRate : nan, REPTreeDepth1Kappa : nan, REPTreeDepth2AUC : nan, REPTreeDepth2ErrRate : nan, REPTreeDepth2Kappa : nan, REPTreeDepth3AUC : nan, REPTreeDepth3ErrRate : nan, REPTreeDepth3Kappa : nan, RandomTreeDepth1AUC : nan, RandomTreeDepth1ErrRate : nan, RandomTreeDepth1Kappa : nan, RandomTreeDepth2AUC : nan, RandomTreeDepth2ErrRate : nan, RandomTreeDepth2Kappa : nan, RandomTreeDepth3AUC : nan, RandomTreeDepth3ErrRate : nan, RandomTreeDepth3Kappa : nan, StdvNominalAttDistinctValues : nan, kNN1NAUC : nan, kNN1NErrRate : nan, kNN1NKappa : nan,', 'status': 'active', 'uploader': 94, 'version': 1}, page_content='Data Set Information:'),
+ Document(metadata={'MajorityClassSize': 518298.0, 'MaxNominalAttDistinctValues': 12.0, 'MinorityClassSize': 481702.0, 'NumberOfClasses': 2.0, 'NumberOfFeatures': 23.0, 'NumberOfInstances': 1000000.0, 'NumberOfInstancesWithMissingValues': 0.0, 'NumberOfMissingValues': 0.0, 'NumberOfNumericFeatures': 0.0, 'NumberOfSymbolicFeatures': 23.0, 'Unnamed: 0': 68, 'did': 120, 'features': '0 : [0 - cap-shape (nominal)], 1 : [1 - cap-surface (nominal)], 2 : [2 - cap-color (nominal)], 3 : [3 - bruises%3F (nominal)], 4 : [4 - odor (nominal)], 5 : [5 - gill-attachment (nominal)], 6 : [6 - gill-spacing (nominal)], 7 : [7 - gill-size (nominal)], 8 : [8 - gill-color (nominal)], 9 : [9 - stalk-shape (nominal)], 10 : [10 - stalk-root (nominal)], 11 : [11 - stalk-surface-above-ring (nominal)], 12 : [12 - stalk-surface-below-ring (nominal)], 13 : [13 - stalk-color-above-ring (nominal)], 14 : [14 - stalk-color-below-ring (nominal)], 15 : [15 - veil-type (nominal)], 16 : [16 - veil-color (nominal)], 17 : [17 - ring-number (nominal)], 18 : [18 - ring-type (nominal)], 19 : [19 - spore-print-color (nominal)], 20 : [20 - population (nominal)], 21 : [21 - habitat (nominal)], 22 : [22 - class (nominal)],', 'format': 'ARFF', 'name': 'BNG(mushroom)', 'qualities': 'AutoCorrelation : 0.5011905011905012, CfsSubsetEval_DecisionStumpAUC : 0.9847860299226502, CfsSubsetEval_DecisionStumpErrRate : 0.021824, CfsSubsetEval_DecisionStumpKappa : 0.9562780842181652, CfsSubsetEval_NaiveBayesAUC : 0.9847860299226502, CfsSubsetEval_NaiveBayesErrRate : 0.021824, CfsSubsetEval_NaiveBayesKappa : 0.9562780842181652, CfsSubsetEval_kNN1NAUC : 0.9847860299226502, CfsSubsetEval_kNN1NErrRate : 0.021824, CfsSubsetEval_kNN1NKappa : 0.9562780842181652, ClassEntropy : 0.9990337071596953, DecisionStumpAUC : 0.8815512935166292, DecisionStumpErrRate : 0.121245, DecisionStumpKappa : 0.7587911383829151, Dimensionality : 2.3e-05, EquivalentNumberOfAtts : 6.097271107545528, J48.00001.AUC : 0.9962742048687271, J48.00001.ErrRate : 0.007847, J48.00001.Kappa : 0.9842850236101645, J48.0001.AUC : 0.9962742048687271, J48.0001.ErrRate : 0.007847, J48.0001.Kappa : 0.9842850236101645, J48.001.AUC : 0.9962742048687271, J48.001.ErrRate : 0.007847, J48.001.Kappa : 0.9842850236101645, MajorityClassPercentage : 51.829800000000006, MajorityClassSize : 518298.0, MaxAttributeEntropy : 3.0845637992777144, MaxKurtosisOfNumericAtts : nan, MaxMeansOfNumericAtts : nan, MaxMutualInformation : 0.84128137803192, MaxNominalAttDistinctValues : 12.0, MaxSkewnessOfNumericAtts : nan, MaxStdDevOfNumericAtts : nan, MeanAttributeEntropy : 1.5385002552906082, MeanKurtosisOfNumericAtts : nan, MeanMeansOfNumericAtts : nan, MeanMutualInformation : 0.16384931710242728, MeanNoiseToSignalRatio : 8.389726380909137, MeanNominalAttDistinctValues : 5.521739130434782, MeanSkewnessOfNumericAtts : nan, MeanStdDevOfNumericAtts : nan, MinAttributeEntropy : 0.0016183542115170931, MinKurtosisOfNumericAtts : nan, MinMeansOfNumericAtts : nan, MinMutualInformation : 1.10978079e-06, MinNominalAttDistinctValues : 2.0, MinSkewnessOfNumericAtts : nan, MinStdDevOfNumericAtts : nan, MinorityClassPercentage : 48.1702, MinorityClassSize : 481702.0, NaiveBayesAUC : 0.989456054908011, NaiveBayesErrRate : 0.072603, NaiveBayesKappa : 0.8540047016650592, NumberOfBinaryFeatures : 5.0, NumberOfClasses : 2.0, NumberOfFeatures : 23.0, NumberOfInstances : 1000000.0, NumberOfInstancesWithMissingValues : 0.0, NumberOfMissingValues : 0.0, NumberOfNumericFeatures : 0.0, NumberOfSymbolicFeatures : 23.0, PercentageOfBinaryFeatures : 21.73913043478261, PercentageOfInstancesWithMissingValues : 0.0, PercentageOfMissingValues : 0.0, PercentageOfNumericFeatures : 0.0, PercentageOfSymbolicFeatures : 100.0, Quartile1AttributeEntropy : 0.8684476271925594, Quartile1KurtosisOfNumericAtts : nan, Quartile1MeansOfNumericAtts : nan, Quartile1MutualInformation : 0.0261470876211225, Quartile1SkewnessOfNumericAtts : nan, Quartile1StdDevOfNumericAtts : nan, Quartile2AttributeEntropy : 1.5540739508863595, Quartile2KurtosisOfNumericAtts : nan, Quartile2MeansOfNumericAtts : nan, Quartile2MutualInformation : 0.13087075005855, Quartile2SkewnessOfNumericAtts : nan, Quartile2StdDevOfNumericAtts : nan, Quartile3AttributeEntropy : 2.281038681528015, Quartile3KurtosisOfNumericAtts : nan, Quartile3MeansOfNumericAtts : nan, Quartile3MutualInformation : 0.2240629781340025, Quartile3SkewnessOfNumericAtts : nan, Quartile3StdDevOfNumericAtts : nan, REPTreeDepth1AUC : 0.9971920115811678, REPTreeDepth1ErrRate : 0.01052, REPTreeDepth1Kappa : 0.9789309934238616, REPTreeDepth2AUC : 0.9971920115811678, REPTreeDepth2ErrRate : 0.01052, REPTreeDepth2Kappa : 0.9789309934238616, REPTreeDepth3AUC : 0.9971920115811678, REPTreeDepth3ErrRate : 0.01052, REPTreeDepth3Kappa : 0.9789309934238616, RandomTreeDepth1AUC : 0.9815888004820784, RandomTreeDepth1ErrRate : 0.024243, RandomTreeDepth1Kappa : 0.9514421122524949, RandomTreeDepth2AUC : 0.9815888004820784, RandomTreeDepth2ErrRate : 0.024243, RandomTreeDepth2Kappa : 0.9514421122524949, RandomTreeDepth3AUC : 0.9815888004820784, RandomTreeDepth3ErrRate : 0.024243, RandomTreeDepth3Kappa : 0.9514421122524949, StdvNominalAttDistinctValues : 3.0580677978302706, kNN1NAUC : 0.9989058041456409, kNN1NErrRate : 0.011358, kNN1NKappa : 0.977249584712958,', 'status': 'active', 'uploader': 1, 'version': 1}, page_content='did - 120, name - BNG(mushroom), version - 1, uploader - 1, status - active, format - ARFF, MajorityClassSize - 518298.0, MaxNominalAttDistinctValues - 12.0, MinorityClassSize - 481702.0, NumberOfClasses - 2.0, NumberOfFeatures - 23.0, NumberOfInstances - 1000000.0, NumberOfInstancesWithMissingValues - 0.0, NumberOfMissingValues - 0.0, NumberOfNumericFeatures - 0.0, NumberOfSymbolicFeatures - 23.0, description - None, qualities - AutoCorrelation : 0.5011905011905012, CfsSubsetEval_DecisionStumpAUC : 0.9847860299226502, CfsSubsetEval_DecisionStumpErrRate : 0.021824, CfsSubsetEval_DecisionStumpKappa : 0.9562780842181652, CfsSubsetEval_NaiveBayesAUC : 0.9847860299226502, CfsSubsetEval_NaiveBayesErrRate : 0.021824, CfsSubsetEval_NaiveBayesKappa : 0.9562780842181652, CfsSubsetEval_kNN1NAUC : 0.9847860299226502, CfsSubsetEval_kNN1NErrRate : 0.021824, CfsSubsetEval_kNN1NKappa : 0.9562780842181652, ClassEntropy : 0.9990337071596953, DecisionStumpAUC : 0.8815512935166292, DecisionStumpErrRate :'),
+ Document(metadata={'MajorityClassSize': 518298.0, 'MaxNominalAttDistinctValues': 12.0, 'MinorityClassSize': 481702.0, 'NumberOfClasses': 2.0, 'NumberOfFeatures': 23.0, 'NumberOfInstances': 1000000.0, 'NumberOfInstancesWithMissingValues': 0.0, 'NumberOfMissingValues': 0.0, 'NumberOfNumericFeatures': 0.0, 'NumberOfSymbolicFeatures': 23.0, 'Unnamed: 0': 68, 'did': 120, 'features': '0 : [0 - cap-shape (nominal)], 1 : [1 - cap-surface (nominal)], 2 : [2 - cap-color (nominal)], 3 : [3 - bruises%3F (nominal)], 4 : [4 - odor (nominal)], 5 : [5 - gill-attachment (nominal)], 6 : [6 - gill-spacing (nominal)], 7 : [7 - gill-size (nominal)], 8 : [8 - gill-color (nominal)], 9 : [9 - stalk-shape (nominal)], 10 : [10 - stalk-root (nominal)], 11 : [11 - stalk-surface-above-ring (nominal)], 12 : [12 - stalk-surface-below-ring (nominal)], 13 : [13 - stalk-color-above-ring (nominal)], 14 : [14 - stalk-color-below-ring (nominal)], 15 : [15 - veil-type (nominal)], 16 : [16 - veil-color (nominal)], 17 : [17 - ring-number (nominal)], 18 : [18 - ring-type (nominal)], 19 : [19 - spore-print-color (nominal)], 20 : [20 - population (nominal)], 21 : [21 - habitat (nominal)], 22 : [22 - class (nominal)],', 'format': 'ARFF', 'name': 'BNG(mushroom)', 'qualities': 'AutoCorrelation : 0.5011905011905012, CfsSubsetEval_DecisionStumpAUC : 0.9847860299226502, CfsSubsetEval_DecisionStumpErrRate : 0.021824, CfsSubsetEval_DecisionStumpKappa : 0.9562780842181652, CfsSubsetEval_NaiveBayesAUC : 0.9847860299226502, CfsSubsetEval_NaiveBayesErrRate : 0.021824, CfsSubsetEval_NaiveBayesKappa : 0.9562780842181652, CfsSubsetEval_kNN1NAUC : 0.9847860299226502, CfsSubsetEval_kNN1NErrRate : 0.021824, CfsSubsetEval_kNN1NKappa : 0.9562780842181652, ClassEntropy : 0.9990337071596953, DecisionStumpAUC : 0.8815512935166292, DecisionStumpErrRate : 0.121245, DecisionStumpKappa : 0.7587911383829151, Dimensionality : 2.3e-05, EquivalentNumberOfAtts : 6.097271107545528, J48.00001.AUC : 0.9962742048687271, J48.00001.ErrRate : 0.007847, J48.00001.Kappa : 0.9842850236101645, J48.0001.AUC : 0.9962742048687271, J48.0001.ErrRate : 0.007847, J48.0001.Kappa : 0.9842850236101645, J48.001.AUC : 0.9962742048687271, J48.001.ErrRate : 0.007847, J48.001.Kappa : 0.9842850236101645, MajorityClassPercentage : 51.829800000000006, MajorityClassSize : 518298.0, MaxAttributeEntropy : 3.0845637992777144, MaxKurtosisOfNumericAtts : nan, MaxMeansOfNumericAtts : nan, MaxMutualInformation : 0.84128137803192, MaxNominalAttDistinctValues : 12.0, MaxSkewnessOfNumericAtts : nan, MaxStdDevOfNumericAtts : nan, MeanAttributeEntropy : 1.5385002552906082, MeanKurtosisOfNumericAtts : nan, MeanMeansOfNumericAtts : nan, MeanMutualInformation : 0.16384931710242728, MeanNoiseToSignalRatio : 8.389726380909137, MeanNominalAttDistinctValues : 5.521739130434782, MeanSkewnessOfNumericAtts : nan, MeanStdDevOfNumericAtts : nan, MinAttributeEntropy : 0.0016183542115170931, MinKurtosisOfNumericAtts : nan, MinMeansOfNumericAtts : nan, MinMutualInformation : 1.10978079e-06, MinNominalAttDistinctValues : 2.0, MinSkewnessOfNumericAtts : nan, MinStdDevOfNumericAtts : nan, MinorityClassPercentage : 48.1702, MinorityClassSize : 481702.0, NaiveBayesAUC : 0.989456054908011, NaiveBayesErrRate : 0.072603, NaiveBayesKappa : 0.8540047016650592, NumberOfBinaryFeatures : 5.0, NumberOfClasses : 2.0, NumberOfFeatures : 23.0, NumberOfInstances : 1000000.0, NumberOfInstancesWithMissingValues : 0.0, NumberOfMissingValues : 0.0, NumberOfNumericFeatures : 0.0, NumberOfSymbolicFeatures : 23.0, PercentageOfBinaryFeatures : 21.73913043478261, PercentageOfInstancesWithMissingValues : 0.0, PercentageOfMissingValues : 0.0, PercentageOfNumericFeatures : 0.0, PercentageOfSymbolicFeatures : 100.0, Quartile1AttributeEntropy : 0.8684476271925594, Quartile1KurtosisOfNumericAtts : nan, Quartile1MeansOfNumericAtts : nan, Quartile1MutualInformation : 0.0261470876211225, Quartile1SkewnessOfNumericAtts : nan, Quartile1StdDevOfNumericAtts : nan, Quartile2AttributeEntropy : 1.5540739508863595, Quartile2KurtosisOfNumericAtts : nan, Quartile2MeansOfNumericAtts : nan, Quartile2MutualInformation : 0.13087075005855, Quartile2SkewnessOfNumericAtts : nan, Quartile2StdDevOfNumericAtts : nan, Quartile3AttributeEntropy : 2.281038681528015, Quartile3KurtosisOfNumericAtts : nan, Quartile3MeansOfNumericAtts : nan, Quartile3MutualInformation : 0.2240629781340025, Quartile3SkewnessOfNumericAtts : nan, Quartile3StdDevOfNumericAtts : nan, REPTreeDepth1AUC : 0.9971920115811678, REPTreeDepth1ErrRate : 0.01052, REPTreeDepth1Kappa : 0.9789309934238616, REPTreeDepth2AUC : 0.9971920115811678, REPTreeDepth2ErrRate : 0.01052, REPTreeDepth2Kappa : 0.9789309934238616, REPTreeDepth3AUC : 0.9971920115811678, REPTreeDepth3ErrRate : 0.01052, REPTreeDepth3Kappa : 0.9789309934238616, RandomTreeDepth1AUC : 0.9815888004820784, RandomTreeDepth1ErrRate : 0.024243, RandomTreeDepth1Kappa : 0.9514421122524949, RandomTreeDepth2AUC : 0.9815888004820784, RandomTreeDepth2ErrRate : 0.024243, RandomTreeDepth2Kappa : 0.9514421122524949, RandomTreeDepth3AUC : 0.9815888004820784, RandomTreeDepth3ErrRate : 0.024243, RandomTreeDepth3Kappa : 0.9514421122524949, StdvNominalAttDistinctValues : 3.0580677978302706, kNN1NAUC : 0.9989058041456409, kNN1NErrRate : 0.011358, kNN1NKappa : 0.977249584712958,', 'status': 'active', 'uploader': 1, 'version': 1}, page_content='RandomTreeDepth3ErrRate : 0.024243, RandomTreeDepth3Kappa : 0.9514421122524949, StdvNominalAttDistinctValues : 3.0580677978302706, kNN1NAUC : 0.9989058041456409, kNN1NErrRate : 0.011358, kNN1NKappa : 0.977249584712958,, features - 0 : [0 - cap-shape (nominal)], 1 : [1 - cap-surface (nominal)], 2 : [2 - cap-color (nominal)], 3 : [3 - bruises%3F (nominal)], 4 : [4 - odor (nominal)], 5 : [5 - gill-attachment (nominal)], 6 : [6 - gill-spacing (nominal)], 7 : [7 - gill-size (nominal)], 8 : [8 - gill-color (nominal)], 9 : [9 - stalk-shape (nominal)], 10 : [10 - stalk-root (nominal)], 11 : [11 - stalk-surface-above-ring (nominal)], 12 : [12 - stalk-surface-below-ring (nominal)], 13 : [13 - stalk-color-above-ring (nominal)], 14 : [14 - stalk-color-below-ring (nominal)], 15 : [15 - veil-type (nominal)], 16 : [16 - veil-color (nominal)], 17 : [17 - ring-number (nominal)], 18 : [18 - ring-type (nominal)], 19 : [19 - spore-print-color (nominal)], 20 : [20 - population (nominal)], 21 : [21 -'),
+ Document(metadata={'MajorityClassSize': 92.0, 'MaxNominalAttDistinctValues': 19.0, 'MinorityClassSize': 8.0, 'NumberOfClasses': 19.0, 'NumberOfFeatures': 36.0, 'NumberOfInstances': 683.0, 'NumberOfInstancesWithMissingValues': 121.0, 'NumberOfMissingValues': 2337.0, 'NumberOfNumericFeatures': 0.0, 'NumberOfSymbolicFeatures': 36.0, 'Unnamed: 0': 36, 'description': '**Author**: R.S. Michalski and R.L. Chilausky (Donors: Ming Tan & Jeff Schlimmer)  \n**Source**: [UCI](https://archive.ics.uci.edu/ml/datasets/Soybean+(Large)) - 1988  \n**Please cite**: R.S. Michalski and R.L. Chilausky "Learning by Being Told and Learning from Examples: An Experimental Comparison of the Two Methods of Knowledge Acquisition in the Context of Developing an Expert System for Soybean Disease Diagnosis", International Journal of Policy Analysis and Information Systems, Vol. 4, No. 2, 1980.  \n\n**Large Soybean Database**  \nThis is the large soybean database from the UCI repository, with its training and test database combined into a single file. \n\nThere are 19 classes, only the first 15 of which have been used in prior work. The folklore seems to be that the last four classes are unjustified by the data since they have so few examples. There are 35 categorical attributes, some nominal and some ordered. The value \'dna\' means does not apply. The values for attributes are encoded numerically, with the first value encoded as "0,\'\' the second as "1,\'\' and so forth. An unknown value is encoded as "?\'\'.\n\n### Attribute Information\n\n1. date: april,may,june,july,august,september,october,?. \n2. plant-stand: normal,lt-normal,?. \n3. precip: lt-norm,norm,gt-norm,?. \n4. temp: lt-norm,norm,gt-norm,?. \n5. hail: yes,no,?. \n6. crop-hist: diff-lst-year,same-lst-yr,same-lst-two-yrs, \nsame-lst-sev-yrs,?. \n7. area-damaged: scattered,low-areas,upper-areas,whole-field,?. \n8. severity: minor,pot-severe,severe,?. \n9. seed-tmt: none,fungicide,other,?. \n10. germination: 90-100%,80-89%,lt-80%,?. \n11. plant-growth: norm,abnorm,?. \n12. leaves: norm,abnorm. \n13. leafspots-halo: absent,yellow-halos,no-yellow-halos,?. \n14. leafspots-marg: w-s-marg,no-w-s-marg,dna,?. \n15. leafspot-size: lt-1/8,gt-1/8,dna,?. \n16. leaf-shread: absent,present,?. \n17. leaf-malf: absent,present,?. \n18. leaf-mild: absent,upper-surf,lower-surf,?. \n19. stem: norm,abnorm,?. \n20. lodging: yes,no,?. \n21. stem-cankers: absent,below-soil,above-soil,above-sec-nde,?. \n22. canker-lesion: dna,brown,dk-brown-blk,tan,?. \n23. fruiting-bodies: absent,present,?. \n24. external decay: absent,firm-and-dry,watery,?. \n25. mycelium: absent,present,?. \n26. int-discolor: none,brown,black,?. \n27. sclerotia: absent,present,?. \n28. fruit-pods: norm,diseased,few-present,dna,?. \n29. fruit spots: absent,colored,brown-w/blk-specks,distort,dna,?. \n30. seed: norm,abnorm,?. \n31. mold-growth: absent,present,?. \n32. seed-discolor: absent,present,?. \n33. seed-size: norm,lt-norm,?. \n34. shriveling: absent,present,?. \n35. roots: norm,rotted,galls-cysts,?.\n\n### Classes \n\n-- 19 Classes = {diaporthe-stem-canker, charcoal-rot, rhizoctonia-root-rot, phytophthora-rot, brown-stem-rot, powdery-mildew, downy-mildew, brown-spot, bacterial-blight, bacterial-pustule, purple-seed-stain, anthracnose, phyllosticta-leaf-spot, alternarialeaf-spot, frog-eye-leaf-spot, diaporthe-pod-&-stem-blight, cyst-nematode, 2-4-d-injury, herbicide-injury} \n\n### Revelant papers\n\nTan, M., & Eshelman, L. (1988). Using weighted networks to represent classification knowledge in noisy domains. Proceedings of the Fifth International Conference on Machine Learning (pp. 121-134). Ann Arbor, Michigan: Morgan Kaufmann. \n\nFisher,D.H. & Schlimmer,J.C. (1988). Concept Simplification and Predictive Accuracy. Proceedings of the Fifth International Conference on Machine Learning (pp. 22-28). Ann Arbor, Michigan: Morgan Kaufmann.', 'did': 42, 'features': '0 : [0 - date (nominal)], 1 : [1 - plant-stand (nominal)], 2 : [2 - precip (nominal)], 3 : [3 - temp (nominal)], 4 : [4 - hail (nominal)], 5 : [5 - crop-hist (nominal)], 6 : [6 - area-damaged (nominal)], 7 : [7 - severity (nominal)], 8 : [8 - seed-tmt (nominal)], 9 : [9 - germination (nominal)], 10 : [10 - plant-growth (nominal)], 11 : [11 - leaves (nominal)], 12 : [12 - leafspots-halo (nominal)], 13 : [13 - leafspots-marg (nominal)], 14 : [14 - leafspot-size (nominal)], 15 : [15 - leaf-shread (nominal)], 16 : [16 - leaf-malf (nominal)], 17 : [17 - leaf-mild (nominal)], 18 : [18 - stem (nominal)], 19 : [19 - lodging (nominal)], 20 : [20 - stem-cankers (nominal)], 21 : [21 - canker-lesion (nominal)], 22 : [22 - fruiting-bodies (nominal)], 23 : [23 - external-decay (nominal)], 24 : [24 - mycelium (nominal)], 25 : [25 - int-discolor (nominal)], 26 : [26 - sclerotia (nominal)], 27 : [27 - fruit-pods (nominal)], 28 : [28 - fruit-spots (nominal)], 29 : [29 - seed (nominal)], 30 : [30 - mold-growth (nominal)], 31 : [31 - seed-discolor (nominal)], 32 : [32 - seed-size (nominal)], 33 : [33 - shriveling (nominal)], 34 : [34 - roots (nominal)], 35 : [35 - class (nominal)],', 'format': 'ARFF', 'name': 'soybean', 'qualities': 'AutoCorrelation : 0.9457478005865103, CfsSubsetEval_DecisionStumpAUC : 0.9620422408823379, CfsSubsetEval_DecisionStumpErrRate : 0.13323572474377746, CfsSubsetEval_DecisionStumpKappa : 0.8534752853145238, CfsSubsetEval_NaiveBayesAUC : 0.9620422408823379, CfsSubsetEval_NaiveBayesErrRate : 0.13323572474377746, CfsSubsetEval_NaiveBayesKappa : 0.8534752853145238, CfsSubsetEval_kNN1NAUC : 0.9620422408823379, CfsSubsetEval_kNN1NErrRate : 0.13323572474377746, CfsSubsetEval_kNN1NKappa : 0.8534752853145238, ClassEntropy : 3.83550798457672, DecisionStumpAUC : 0.8099631489104341, DecisionStumpErrRate : 0.7203513909224012, DecisionStumpKappa : 0.19424522533539545, Dimensionality : 0.0527086383601757, EquivalentNumberOfAtts : 7.508591767241043, J48.00001.AUC : 0.9739047068470593, J48.00001.ErrRate : 0.12152269399707175, J48.00001.Kappa : 0.8663370421980624, J48.0001.AUC : 0.9739047068470593, J48.0001.ErrRate : 0.12152269399707175, J48.0001.Kappa : 0.8663370421980624, J48.001.AUC : 0.9739047068470593, J48.001.ErrRate : 0.12152269399707175, J48.001.Kappa : 0.8663370421980624, MajorityClassPercentage : 13.469985358711567, MajorityClassSize : 92.0, MaxAttributeEntropy : 2.6849389644492594, MaxKurtosisOfNumericAtts : nan, MaxMeansOfNumericAtts : nan, MaxMutualInformation : 1.28692474762189, MaxNominalAttDistinctValues : 19.0, MaxSkewnessOfNumericAtts : nan, MaxStdDevOfNumericAtts : nan, MeanAttributeEntropy : 0.9655890619117928, MeanKurtosisOfNumericAtts : nan, MeanMeansOfNumericAtts : nan, MeanMutualInformation : 0.5108158897798274, MeanNoiseToSignalRatio : 0.8902878340922058, MeanNominalAttDistinctValues : 3.2777777777777777, MeanSkewnessOfNumericAtts : nan, MeanStdDevOfNumericAtts : nan, MinAttributeEntropy : 0.07262476248540556, MinKurtosisOfNumericAtts : nan, MinMeansOfNumericAtts : nan, MinMutualInformation : 0.0468182939867, MinNominalAttDistinctValues : 2.0, MinSkewnessOfNumericAtts : nan, MinStdDevOfNumericAtts : nan, MinorityClassPercentage : 1.171303074670571, MinorityClassSize : 8.0, NaiveBayesAUC : 0.9921587580230303, NaiveBayesErrRate : 0.08931185944363104, NaiveBayesKappa : 0.9019654903843212, NumberOfBinaryFeatures : 16.0, NumberOfClasses : 19.0, NumberOfFeatures : 36.0, NumberOfInstances : 683.0, NumberOfInstancesWithMissingValues : 121.0, NumberOfMissingValues : 2337.0, NumberOfNumericFeatures : 0.0, NumberOfSymbolicFeatures : 36.0, PercentageOfBinaryFeatures : 44.44444444444444, PercentageOfInstancesWithMissingValues : 17.71595900439239, PercentageOfMissingValues : 9.504636408003904, PercentageOfNumericFeatures : 0.0, PercentageOfSymbolicFeatures : 100.0, Quartile1AttributeEntropy : 0.4629328593168401, Quartile1KurtosisOfNumericAtts : nan, Quartile1MeansOfNumericAtts : nan, Quartile1MutualInformation : 0.26369905545327, Quartile1SkewnessOfNumericAtts : nan, Quartile1StdDevOfNumericAtts : nan, Quartile2AttributeEntropy : 0.9158362664344971, Quartile2KurtosisOfNumericAtts : nan, Quartile2MeansOfNumericAtts : nan, Quartile2MutualInformation : 0.45996721558355, Quartile2SkewnessOfNumericAtts : nan, Quartile2StdDevOfNumericAtts : nan, Quartile3AttributeEntropy : 1.408326420019514, Quartile3KurtosisOfNumericAtts : nan, Quartile3MeansOfNumericAtts : nan, Quartile3MutualInformation : 0.71879499353135, Quartile3SkewnessOfNumericAtts : nan, Quartile3StdDevOfNumericAtts : nan, REPTreeDepth1AUC : 0.9436075624852911, REPTreeDepth1ErrRate : 0.26500732064421667, REPTreeDepth1Kappa : 0.7052208643815201, REPTreeDepth2AUC : 0.9436075624852911, REPTreeDepth2ErrRate : 0.26500732064421667, REPTreeDepth2Kappa : 0.7052208643815201, REPTreeDepth3AUC : 0.9436075624852911, REPTreeDepth3ErrRate : 0.26500732064421667, REPTreeDepth3Kappa : 0.7052208643815201, RandomTreeDepth1AUC : 0.9035959879652148, RandomTreeDepth1ErrRate : 0.18594436310395315, RandomTreeDepth1Kappa : 0.7960191985250715, RandomTreeDepth2AUC : 0.9035959879652148, RandomTreeDepth2ErrRate : 0.18594436310395315, RandomTreeDepth2Kappa : 0.7960191985250715, RandomTreeDepth3AUC : 0.9035959879652148, RandomTreeDepth3ErrRate : 0.18594436310395315, RandomTreeDepth3Kappa : 0.7960191985250715, StdvNominalAttDistinctValues : 2.884551077834282, kNN1NAUC : 0.9616161058225481, kNN1NErrRate : 0.1171303074670571, kNN1NKappa : 0.871344781387376,', 'status': 'active', 'uploader': 1, 'version': 1}, page_content='### Classes \n\n-- 19 Classes = {diaporthe-stem-canker, charcoal-rot, rhizoctonia-root-rot, phytophthora-rot, brown-stem-rot, powdery-mildew, downy-mildew, brown-spot, bacterial-blight, bacterial-pustule, purple-seed-stain, anthracnose, phyllosticta-leaf-spot, alternarialeaf-spot, frog-eye-leaf-spot, diaporthe-pod-&-stem-blight, cyst-nematode, 2-4-d-injury, herbicide-injury} \n\n### Revelant papers\n\nTan, M., & Eshelman, L. (1988). Using weighted networks to represent classification knowledge in noisy domains. Proceedings of the Fifth International Conference on Machine Learning (pp. 121-134). Ann Arbor, Michigan: Morgan Kaufmann.'),
+ Document(metadata={'MajorityClassSize': 214.0, 'MaxNominalAttDistinctValues': 27.0, 'MinorityClassSize': 105.0, 'NumberOfClasses': 5.0, 'NumberOfFeatures': 20.0, 'NumberOfInstances': 736.0, 'NumberOfInstancesWithMissingValues': 95.0, 'NumberOfMissingValues': 448.0, 'NumberOfNumericFeatures': 14.0, 'NumberOfSymbolicFeatures': 6.0, 'Unnamed: 0': 123, 'description': "**Author**: Bruce Bulloch    \n**Source**: [WEKA Dataset Collection](http://www.cs.waikato.ac.nz/ml/weka/datasets.html) - part of the agridatasets archive. [This is the true source](http://tunedit.org/repo/Data/Agricultural/eucalyptus.arff)  \n**Please cite**: None  \n\n**Eucalyptus Soil Conservation**  \nThe objective was to determine which seedlots in a species are best for soil conservation in seasonally dry hill country. Determination is found by measurement of height, diameter by height, survival, and other contributing factors. \n \nIt is important to note that eucalypt trial methods changed over time; earlier trials included mostly 15 - 30cm tall seedling grown in peat plots and the later trials have included mostly three replications of eight trees grown. This change may contribute to less significant results.\n\nExperimental data recording procedures which require noting include:\n - instances with no data recorded due to experimental recording procedures\n   require that the absence of a species from one replicate at a site was\n   treated as a missing value, but if absent from two or more replicates at a\n   site the species was excluded from the site's analyses.\n - missing data for survival, vigour, insect resistance, stem form, crown form\n   and utility especially for the data recorded at the Morea Station; this \n   could indicate the death of species in these areas or a lack in collection\n   of data.  \n\n### Attribute Information  \n \n  1.  Abbrev - site abbreviation - enumerated\n  2.  Rep - site rep - integer\n  3.  Locality - site locality in the North Island - enumerated\n  4.  Map_Ref - map location in the North Island - enumerated\n  5.  Latitude - latitude approximation - enumerated\n  6.  Altitude - altitude approximation - integer\n  7.  Rainfall - rainfall (mm pa) - integer\n  8.  Frosts - frosts (deg. c) - integer\n  9.  Year - year of planting - integer\n  10. Sp - species code - enumerated\n  11. PMCno - seedlot number - integer\n  12. DBH - best diameter base height (cm) - real\n  13. Ht - height (m) - real\n  14. Surv - survival - integer\n  15. Vig - vigour - real\n  16. Ins_res - insect resistance - real\n  17. Stem_Fm - stem form - real\n  18. Crown_Fm - crown form - real\n  19. Brnch_Fm - branch form - real\n  Class:\n  20. Utility - utility rating - enumerated\n\n### Relevant papers\n\nBulluch B. T., (1992) Eucalyptus Species Selection for Soil Conservation in Seasonally Dry Hill Country - Twelfth Year Assessment  New Zealand Journal of Forestry Science 21(1): 10 - 31 (1991)  \n\nKirsten Thomson and Robert J. McQueen (1996) Machine Learning Applied to Fourteen Agricultural Datasets. University of Waikato Research Report  \nhttps://www.cs.waikato.ac.nz/ml/publications/1996/Thomson-McQueen-96.pdf + the original publication:", 'did': 188, 'features': '0 : [0 - Abbrev (nominal)], 1 : [1 - Rep (numeric)], 2 : [2 - Locality (nominal)], 3 : [3 - Map_Ref (nominal)], 4 : [4 - Latitude (nominal)], 5 : [5 - Altitude (numeric)], 6 : [6 - Rainfall (numeric)], 7 : [7 - Frosts (numeric)], 8 : [8 - Year (numeric)], 9 : [9 - Sp (nominal)], 10 : [10 - PMCno (numeric)], 11 : [11 - DBH (numeric)], 12 : [12 - Ht (numeric)], 13 : [13 - Surv (numeric)], 14 : [14 - Vig (numeric)], 15 : [15 - Ins_res (numeric)], 16 : [16 - Stem_Fm (numeric)], 17 : [17 - Crown_Fm (numeric)], 18 : [18 - Brnch_Fm (numeric)], 19 : [19 - Utility (nominal)],', 'format': 'ARFF', 'name': 'eucalyptus', 'qualities': 'AutoCorrelation : 0.39319727891156464, CfsSubsetEval_DecisionStumpAUC : 0.8239493966657213, CfsSubsetEval_DecisionStumpErrRate : 0.41847826086956524, CfsSubsetEval_DecisionStumpKappa : 0.4637307109078737, CfsSubsetEval_NaiveBayesAUC : 0.8239493966657213, CfsSubsetEval_NaiveBayesErrRate : 0.41847826086956524, CfsSubsetEval_NaiveBayesKappa : 0.4637307109078737, CfsSubsetEval_kNN1NAUC : 0.8239493966657213, CfsSubsetEval_kNN1NErrRate : 0.41847826086956524, CfsSubsetEval_kNN1NKappa : 0.4637307109078737, ClassEntropy : 2.262083620428274, DecisionStumpAUC : 0.7519401667350958, DecisionStumpErrRate : 0.5054347826086957, DecisionStumpKappa : 0.30247986100142155, Dimensionality : 0.02717391304347826, EquivalentNumberOfAtts : 5.9334684401020565, J48.00001.AUC : 0.8184137151683228, J48.00001.ErrRate : 0.3967391304347826, J48.00001.Kappa : 0.49336985707179887, J48.0001.AUC : 0.8184137151683228, J48.0001.ErrRate : 0.3967391304347826, J48.0001.Kappa : 0.49336985707179887, J48.001.AUC : 0.8184137151683228, J48.001.ErrRate : 0.3967391304347826, J48.001.Kappa : 0.49336985707179887, MajorityClassPercentage : 29.076086956521742, MajorityClassSize : 214.0, MaxAttributeEntropy : 4.2373637557635595, MaxKurtosisOfNumericAtts : 734.9416211795777, MaxMeansOfNumericAtts : 2054.7393689986247, MaxMutualInformation : 0.42753276429854, MaxNominalAttDistinctValues : 27.0, MaxSkewnessOfNumericAtts : 27.109270846229688, MaxStdDevOfNumericAtts : 1551.7798185802085, MeanAttributeEntropy : 3.4626363060529055, MeanKurtosisOfNumericAtts : 62.86596625813314, MeanMeansOfNumericAtts : 390.0868288072735, MeanMutualInformation : 0.381241367214446, MeanNoiseToSignalRatio : 8.082530396301912, MeanNominalAttDistinctValues : 13.666666666666666, MeanSkewnessOfNumericAtts : 2.551453016115177, MeanStdDevOfNumericAtts : 172.61081562461396, MinAttributeEntropy : 2.5810641739409617, MinKurtosisOfNumericAtts : -1.887802596870339, MinMeansOfNumericAtts : -2.5842391304347836, MinMutualInformation : 0.24650313929826, MinNominalAttDistinctValues : 5.0, MinSkewnessOfNumericAtts : -0.6970908724266737, MinStdDevOfNumericAtts : 0.49318784476285216, MinorityClassPercentage : 14.266304347826086, MinorityClassSize : 105.0, NaiveBayesAUC : 0.8520788174118736, NaiveBayesErrRate : 0.45108695652173914, NaiveBayesKappa : 0.42741183362624485, NumberOfBinaryFeatures : 0.0, NumberOfClasses : 5.0, NumberOfFeatures : 20.0, NumberOfInstances : 736.0, NumberOfInstancesWithMissingValues : 95.0, NumberOfMissingValues : 448.0, NumberOfNumericFeatures : 14.0, NumberOfSymbolicFeatures : 6.0, PercentageOfBinaryFeatures : 0.0, PercentageOfInstancesWithMissingValues : 12.907608695652172, PercentageOfMissingValues : 3.0434782608695654, PercentageOfNumericFeatures : 70.0, PercentageOfSymbolicFeatures : 30.0, Quartile1AttributeEntropy : 2.908861461974274, Quartile1KurtosisOfNumericAtts : -0.4961422376730956, Quartile1MeansOfNumericAtts : 2.882908545727137, Quartile1MutualInformation : 0.323312362530555, Quartile1SkewnessOfNumericAtts : -0.3960800165047112, Quartile1StdDevOfNumericAtts : 0.778502789181291, Quartile2AttributeEntropy : 3.4759821137655975, Quartile2KurtosisOfNumericAtts : 0.4289115082721384, Quartile2MeansOfNumericAtts : 6.249602617058818, Quartile2MutualInformation : 0.40765214345173, Quartile2SkewnessOfNumericAtts : 0.11119478923130877, Quartile2StdDevOfNumericAtts : 1.3465996573398586, Quartile3AttributeEntropy : 4.009738246275191, Quartile3KurtosisOfNumericAtts : 1.3641239688052669, Quartile3MeansOfNumericAtts : 403.0027173913041, Quartile3MutualInformation : 0.425964983779695, Quartile3SkewnessOfNumericAtts : 0.9548948008878528, Quartile3StdDevOfNumericAtts : 80.61760056258042, REPTreeDepth1AUC : 0.7171370640805235, REPTreeDepth1ErrRate : 0.5557065217391305, REPTreeDepth1Kappa : 0.2672017371533179, REPTreeDepth2AUC : 0.7171370640805235, REPTreeDepth2ErrRate : 0.5557065217391305, REPTreeDepth2Kappa : 0.2672017371533179, REPTreeDepth3AUC : 0.7171370640805235, REPTreeDepth3ErrRate : 0.5557065217391305, REPTreeDepth3Kappa : 0.2672017371533179, RandomTreeDepth1AUC : 0.7219508813313532, RandomTreeDepth1ErrRate : 0.47690217391304346, RandomTreeDepth1Kappa : 0.3915134670419616, RandomTreeDepth2AUC : 0.7219508813313532, RandomTreeDepth2ErrRate : 0.47690217391304346, RandomTreeDepth2Kappa : 0.3915134670419616, RandomTreeDepth3AUC : 0.7219508813313532, RandomTreeDepth3ErrRate : 0.47690217391304346, RandomTreeDepth3Kappa : 0.3915134670419616, StdvNominalAttDistinctValues : 7.659416862050705, kNN1NAUC : 0.7018152602695222, kNN1NErrRate : 0.46603260869565216, kNN1NKappa : 0.40228622299671357,', 'status': 'active', 'uploader': 1, 'version': 1}, page_content='Kirsten Thomson and Robert J. McQueen (1996) Machine Learning Applied to Fourteen Agricultural Datasets. University of Waikato Research Report'),
+ Document(metadata={'MajorityClassSize': 92.0, 'MaxNominalAttDistinctValues': 19.0, 'MinorityClassSize': 8.0, 'NumberOfClasses': 19.0, 'NumberOfFeatures': 36.0, 'NumberOfInstances': 683.0, 'NumberOfInstancesWithMissingValues': 121.0, 'NumberOfMissingValues': 2337.0, 'NumberOfNumericFeatures': 0.0, 'NumberOfSymbolicFeatures': 36.0, 'Unnamed: 0': 36, 'description': '**Author**: R.S. Michalski and R.L. Chilausky (Donors: Ming Tan & Jeff Schlimmer)  \n**Source**: [UCI](https://archive.ics.uci.edu/ml/datasets/Soybean+(Large)) - 1988  \n**Please cite**: R.S. Michalski and R.L. Chilausky "Learning by Being Told and Learning from Examples: An Experimental Comparison of the Two Methods of Knowledge Acquisition in the Context of Developing an Expert System for Soybean Disease Diagnosis", International Journal of Policy Analysis and Information Systems, Vol. 4, No. 2, 1980.  \n\n**Large Soybean Database**  \nThis is the large soybean database from the UCI repository, with its training and test database combined into a single file. \n\nThere are 19 classes, only the first 15 of which have been used in prior work. The folklore seems to be that the last four classes are unjustified by the data since they have so few examples. There are 35 categorical attributes, some nominal and some ordered. The value \'dna\' means does not apply. The values for attributes are encoded numerically, with the first value encoded as "0,\'\' the second as "1,\'\' and so forth. An unknown value is encoded as "?\'\'.\n\n### Attribute Information\n\n1. date: april,may,june,july,august,september,october,?. \n2. plant-stand: normal,lt-normal,?. \n3. precip: lt-norm,norm,gt-norm,?. \n4. temp: lt-norm,norm,gt-norm,?. \n5. hail: yes,no,?. \n6. crop-hist: diff-lst-year,same-lst-yr,same-lst-two-yrs, \nsame-lst-sev-yrs,?. \n7. area-damaged: scattered,low-areas,upper-areas,whole-field,?. \n8. severity: minor,pot-severe,severe,?. \n9. seed-tmt: none,fungicide,other,?. \n10. germination: 90-100%,80-89%,lt-80%,?. \n11. plant-growth: norm,abnorm,?. \n12. leaves: norm,abnorm. \n13. leafspots-halo: absent,yellow-halos,no-yellow-halos,?. \n14. leafspots-marg: w-s-marg,no-w-s-marg,dna,?. \n15. leafspot-size: lt-1/8,gt-1/8,dna,?. \n16. leaf-shread: absent,present,?. \n17. leaf-malf: absent,present,?. \n18. leaf-mild: absent,upper-surf,lower-surf,?. \n19. stem: norm,abnorm,?. \n20. lodging: yes,no,?. \n21. stem-cankers: absent,below-soil,above-soil,above-sec-nde,?. \n22. canker-lesion: dna,brown,dk-brown-blk,tan,?. \n23. fruiting-bodies: absent,present,?. \n24. external decay: absent,firm-and-dry,watery,?. \n25. mycelium: absent,present,?. \n26. int-discolor: none,brown,black,?. \n27. sclerotia: absent,present,?. \n28. fruit-pods: norm,diseased,few-present,dna,?. \n29. fruit spots: absent,colored,brown-w/blk-specks,distort,dna,?. \n30. seed: norm,abnorm,?. \n31. mold-growth: absent,present,?. \n32. seed-discolor: absent,present,?. \n33. seed-size: norm,lt-norm,?. \n34. shriveling: absent,present,?. \n35. roots: norm,rotted,galls-cysts,?.\n\n### Classes \n\n-- 19 Classes = {diaporthe-stem-canker, charcoal-rot, rhizoctonia-root-rot, phytophthora-rot, brown-stem-rot, powdery-mildew, downy-mildew, brown-spot, bacterial-blight, bacterial-pustule, purple-seed-stain, anthracnose, phyllosticta-leaf-spot, alternarialeaf-spot, frog-eye-leaf-spot, diaporthe-pod-&-stem-blight, cyst-nematode, 2-4-d-injury, herbicide-injury} \n\n### Revelant papers\n\nTan, M., & Eshelman, L. (1988). Using weighted networks to represent classification knowledge in noisy domains. Proceedings of the Fifth International Conference on Machine Learning (pp. 121-134). Ann Arbor, Michigan: Morgan Kaufmann. \n\nFisher,D.H. & Schlimmer,J.C. (1988). Concept Simplification and Predictive Accuracy. Proceedings of the Fifth International Conference on Machine Learning (pp. 22-28). Ann Arbor, Michigan: Morgan Kaufmann.', 'did': 42, 'features': '0 : [0 - date (nominal)], 1 : [1 - plant-stand (nominal)], 2 : [2 - precip (nominal)], 3 : [3 - temp (nominal)], 4 : [4 - hail (nominal)], 5 : [5 - crop-hist (nominal)], 6 : [6 - area-damaged (nominal)], 7 : [7 - severity (nominal)], 8 : [8 - seed-tmt (nominal)], 9 : [9 - germination (nominal)], 10 : [10 - plant-growth (nominal)], 11 : [11 - leaves (nominal)], 12 : [12 - leafspots-halo (nominal)], 13 : [13 - leafspots-marg (nominal)], 14 : [14 - leafspot-size (nominal)], 15 : [15 - leaf-shread (nominal)], 16 : [16 - leaf-malf (nominal)], 17 : [17 - leaf-mild (nominal)], 18 : [18 - stem (nominal)], 19 : [19 - lodging (nominal)], 20 : [20 - stem-cankers (nominal)], 21 : [21 - canker-lesion (nominal)], 22 : [22 - fruiting-bodies (nominal)], 23 : [23 - external-decay (nominal)], 24 : [24 - mycelium (nominal)], 25 : [25 - int-discolor (nominal)], 26 : [26 - sclerotia (nominal)], 27 : [27 - fruit-pods (nominal)], 28 : [28 - fruit-spots (nominal)], 29 : [29 - seed (nominal)], 30 : [30 - mold-growth (nominal)], 31 : [31 - seed-discolor (nominal)], 32 : [32 - seed-size (nominal)], 33 : [33 - shriveling (nominal)], 34 : [34 - roots (nominal)], 35 : [35 - class (nominal)],', 'format': 'ARFF', 'name': 'soybean', 'qualities': 'AutoCorrelation : 0.9457478005865103, CfsSubsetEval_DecisionStumpAUC : 0.9620422408823379, CfsSubsetEval_DecisionStumpErrRate : 0.13323572474377746, CfsSubsetEval_DecisionStumpKappa : 0.8534752853145238, CfsSubsetEval_NaiveBayesAUC : 0.9620422408823379, CfsSubsetEval_NaiveBayesErrRate : 0.13323572474377746, CfsSubsetEval_NaiveBayesKappa : 0.8534752853145238, CfsSubsetEval_kNN1NAUC : 0.9620422408823379, CfsSubsetEval_kNN1NErrRate : 0.13323572474377746, CfsSubsetEval_kNN1NKappa : 0.8534752853145238, ClassEntropy : 3.83550798457672, DecisionStumpAUC : 0.8099631489104341, DecisionStumpErrRate : 0.7203513909224012, DecisionStumpKappa : 0.19424522533539545, Dimensionality : 0.0527086383601757, EquivalentNumberOfAtts : 7.508591767241043, J48.00001.AUC : 0.9739047068470593, J48.00001.ErrRate : 0.12152269399707175, J48.00001.Kappa : 0.8663370421980624, J48.0001.AUC : 0.9739047068470593, J48.0001.ErrRate : 0.12152269399707175, J48.0001.Kappa : 0.8663370421980624, J48.001.AUC : 0.9739047068470593, J48.001.ErrRate : 0.12152269399707175, J48.001.Kappa : 0.8663370421980624, MajorityClassPercentage : 13.469985358711567, MajorityClassSize : 92.0, MaxAttributeEntropy : 2.6849389644492594, MaxKurtosisOfNumericAtts : nan, MaxMeansOfNumericAtts : nan, MaxMutualInformation : 1.28692474762189, MaxNominalAttDistinctValues : 19.0, MaxSkewnessOfNumericAtts : nan, MaxStdDevOfNumericAtts : nan, MeanAttributeEntropy : 0.9655890619117928, MeanKurtosisOfNumericAtts : nan, MeanMeansOfNumericAtts : nan, MeanMutualInformation : 0.5108158897798274, MeanNoiseToSignalRatio : 0.8902878340922058, MeanNominalAttDistinctValues : 3.2777777777777777, MeanSkewnessOfNumericAtts : nan, MeanStdDevOfNumericAtts : nan, MinAttributeEntropy : 0.07262476248540556, MinKurtosisOfNumericAtts : nan, MinMeansOfNumericAtts : nan, MinMutualInformation : 0.0468182939867, MinNominalAttDistinctValues : 2.0, MinSkewnessOfNumericAtts : nan, MinStdDevOfNumericAtts : nan, MinorityClassPercentage : 1.171303074670571, MinorityClassSize : 8.0, NaiveBayesAUC : 0.9921587580230303, NaiveBayesErrRate : 0.08931185944363104, NaiveBayesKappa : 0.9019654903843212, NumberOfBinaryFeatures : 16.0, NumberOfClasses : 19.0, NumberOfFeatures : 36.0, NumberOfInstances : 683.0, NumberOfInstancesWithMissingValues : 121.0, NumberOfMissingValues : 2337.0, NumberOfNumericFeatures : 0.0, NumberOfSymbolicFeatures : 36.0, PercentageOfBinaryFeatures : 44.44444444444444, PercentageOfInstancesWithMissingValues : 17.71595900439239, PercentageOfMissingValues : 9.504636408003904, PercentageOfNumericFeatures : 0.0, PercentageOfSymbolicFeatures : 100.0, Quartile1AttributeEntropy : 0.4629328593168401, Quartile1KurtosisOfNumericAtts : nan, Quartile1MeansOfNumericAtts : nan, Quartile1MutualInformation : 0.26369905545327, Quartile1SkewnessOfNumericAtts : nan, Quartile1StdDevOfNumericAtts : nan, Quartile2AttributeEntropy : 0.9158362664344971, Quartile2KurtosisOfNumericAtts : nan, Quartile2MeansOfNumericAtts : nan, Quartile2MutualInformation : 0.45996721558355, Quartile2SkewnessOfNumericAtts : nan, Quartile2StdDevOfNumericAtts : nan, Quartile3AttributeEntropy : 1.408326420019514, Quartile3KurtosisOfNumericAtts : nan, Quartile3MeansOfNumericAtts : nan, Quartile3MutualInformation : 0.71879499353135, Quartile3SkewnessOfNumericAtts : nan, Quartile3StdDevOfNumericAtts : nan, REPTreeDepth1AUC : 0.9436075624852911, REPTreeDepth1ErrRate : 0.26500732064421667, REPTreeDepth1Kappa : 0.7052208643815201, REPTreeDepth2AUC : 0.9436075624852911, REPTreeDepth2ErrRate : 0.26500732064421667, REPTreeDepth2Kappa : 0.7052208643815201, REPTreeDepth3AUC : 0.9436075624852911, REPTreeDepth3ErrRate : 0.26500732064421667, REPTreeDepth3Kappa : 0.7052208643815201, RandomTreeDepth1AUC : 0.9035959879652148, RandomTreeDepth1ErrRate : 0.18594436310395315, RandomTreeDepth1Kappa : 0.7960191985250715, RandomTreeDepth2AUC : 0.9035959879652148, RandomTreeDepth2ErrRate : 0.18594436310395315, RandomTreeDepth2Kappa : 0.7960191985250715, RandomTreeDepth3AUC : 0.9035959879652148, RandomTreeDepth3ErrRate : 0.18594436310395315, RandomTreeDepth3Kappa : 0.7960191985250715, StdvNominalAttDistinctValues : 2.884551077834282, kNN1NAUC : 0.9616161058225481, kNN1NErrRate : 0.1171303074670571, kNN1NKappa : 0.871344781387376,', 'status': 'active', 'uploader': 1, 'version': 1}, page_content='RandomTreeDepth3Kappa : 0.7960191985250715, StdvNominalAttDistinctValues : 2.884551077834282, kNN1NAUC : 0.9616161058225481, kNN1NErrRate : 0.1171303074670571, kNN1NKappa : 0.871344781387376,, features - 0 : [0 - date (nominal)], 1 : [1 - plant-stand (nominal)], 2 : [2 - precip (nominal)], 3 : [3 - temp (nominal)], 4 : [4 - hail (nominal)], 5 : [5 - crop-hist (nominal)], 6 : [6 - area-damaged (nominal)], 7 : [7 - severity (nominal)], 8 : [8 - seed-tmt (nominal)], 9 : [9 - germination (nominal)], 10 : [10 - plant-growth (nominal)], 11 : [11 - leaves (nominal)], 12 : [12 - leafspots-halo (nominal)], 13 : [13 - leafspots-marg (nominal)], 14 : [14 - leafspot-size (nominal)], 15 : [15 - leaf-shread (nominal)], 16 : [16 - leaf-malf (nominal)], 17 : [17 - leaf-mild (nominal)], 18 : [18 - stem (nominal)], 19 : [19 - lodging (nominal)], 20 : [20 - stem-cankers (nominal)], 21 : [21 - canker-lesion (nominal)], 22 : [22 - fruiting-bodies (nominal)], 23 : [23 - external-decay (nominal)], 24 : [24'),
+ Document(metadata={'MajorityClassSize': 71.0, 'MaxNominalAttDistinctValues': 3.0, 'MinorityClassSize': 48.0, 'NumberOfClasses': 3.0, 'NumberOfFeatures': 14.0, 'NumberOfInstances': 178.0, 'NumberOfInstancesWithMissingValues': 0.0, 'NumberOfMissingValues': 0.0, 'NumberOfNumericFeatures': 13.0, 'NumberOfSymbolicFeatures': 1.0, 'Unnamed: 0': 122, 'description': '**Author**:   \n**Source**: Unknown -   \n**Please cite**:   \n\n1. Title of Database: Wine recognition data\n \tUpdated Sept 21, 1998 by C.Blake : Added attribute information\n \n 2. Sources:\n    (a) Forina, M. et al, PARVUS - An Extendible Package for Data\n        Exploration, Classification and Correlation. Institute of Pharmaceutical\n        and Food Analysis and Technologies, Via Brigata Salerno, \n        16147 Genoa, Italy.\n \n    (b) Stefan Aeberhard, email: stefan@coral.cs.jcu.edu.au\n    (c) July 1991\n 3. Past Usage:\n \n    (1)\n    S. Aeberhard, D. Coomans and O. de Vel,\n    Comparison of Classifiers in High Dimensional Settings,\n    Tech. Rep. no. 92-02, (1992), Dept. of Computer Science and Dept. of\n    Mathematics and Statistics, James Cook University of North Queensland.\n    (Also submitted to Technometrics).\n \n    The data was used with many others for comparing various \n    classifiers. The classes are separable, though only RDA \n    has achieved 100% correct classification.\n    (RDA : 100%, QDA 99.4%, LDA 98.9%, 1NN 96.1% (z-transformed data))\n    (All results using the leave-one-out technique)\n \n    In a classification context, this is a well posed problem \n    with "well behaved" class structures. A good data set \n    for first testing of a new classifier, but not very \n    challenging.\n \n    (2) \n    S. Aeberhard, D. Coomans and O. de Vel,\n    "THE CLASSIFICATION PERFORMANCE OF RDA"\n    Tech. Rep. no. 92-01, (1992), Dept. of Computer Science and Dept. of\n    Mathematics and Statistics, James Cook University of North Queensland.\n    (Also submitted to Journal of Chemometrics).\n \n    Here, the data was used to illustrate the superior performance of\n    the use of a new appreciation function with RDA. \n \n 4. Relevant Information:\n \n    -- These data are the results of a chemical analysis of\n       wines grown in the same region in Italy but derived from three\n       different cultivars.\n       The analysis determined the quantities of 13 constituents\n       found in each of the three types of wines. \n \n    -- I think that the initial data set had around 30 variables, but \n       for some reason I only have the 13 dimensional version. \n       I had a list of what the 30 or so variables were, but a.) \n       I lost it, and b.), I would not know which 13 variables\n       are included in the set.\n \n    -- The attributes are (dontated by Riccardo Leardi, \n \triclea@anchem.unige.it )\n  \t1) Alcohol\n  \t2) Malic acid\n  \t3) Ash\n \t4) Alcalinity of ash  \n  \t5) Magnesium\n \t6) Total phenols\n  \t7) Flavanoids\n  \t8) Nonflavanoid phenols\n  \t9) Proanthocyanins\n \t10)Color intensity\n  \t11)Hue\n  \t12)OD280/OD315 of diluted wines\n  \t13)Proline            \n \n 5. Number of Instances\n \n       \tclass 1 59\n \tclass 2 71\n \tclass 3 48\n \n 6. Number of Attributes \n \t\n \t13\n \n 7. For Each Attribute:\n \n \tAll attributes are continuous\n \t\n \tNo statistics available, but suggest to standardise\n \tvariables for certain uses (e.g. for us with classifiers\n \twhich are NOT scale invariant)\n \n \tNOTE: 1st attribute is class identifier (1-3)\n \n 8. Missing Attribute Values:\n \n \tNone\n \n 9. Class Distribution: number of instances per class\n \n       \tclass 1 59\n \tclass 2 71\n \tclass 3 48\n\n Information about the dataset\n CLASSTYPE: nominal\n CLASSINDEX: first', 'did': 187, 'features': '0 : [0 - class (nominal)], 1 : [1 - Alcohol (numeric)], 2 : [2 - Malic_acid (numeric)], 3 : [3 - Ash (numeric)], 4 : [4 - Alcalinity_of_ash (numeric)], 5 : [5 - Magnesium (numeric)], 6 : [6 - Total_phenols (numeric)], 7 : [7 - Flavanoids (numeric)], 8 : [8 - Nonflavanoid_phenols (numeric)], 9 : [9 - Proanthocyanins (numeric)], 10 : [10 - Color_intensity (numeric)], 11 : [11 - Hue (numeric)], 12 : [12 - OD280%2FOD315_of_diluted_wines (numeric)], 13 : [13 - Proline (numeric)],', 'format': 'ARFF', 'name': 'wine', 'qualities': 'AutoCorrelation : 0.9887005649717514, CfsSubsetEval_DecisionStumpAUC : 0.934807485785613, CfsSubsetEval_DecisionStumpErrRate : 0.0898876404494382, CfsSubsetEval_DecisionStumpKappa : 0.8636080647478569, CfsSubsetEval_NaiveBayesAUC : 0.934807485785613, CfsSubsetEval_NaiveBayesErrRate : 0.0898876404494382, CfsSubsetEval_NaiveBayesKappa : 0.8636080647478569, CfsSubsetEval_kNN1NAUC : 0.934807485785613, CfsSubsetEval_kNN1NErrRate : 0.0898876404494382, CfsSubsetEval_kNN1NKappa : 0.8636080647478569, ClassEntropy : 1.5668222768551812, DecisionStumpAUC : 0.7973435168459908, DecisionStumpErrRate : 0.37640449438202245, DecisionStumpKappa : 0.4058981767460396, Dimensionality : 0.07865168539325842, EquivalentNumberOfAtts : nan, J48.00001.AUC : 0.934807485785613, J48.00001.ErrRate : 0.0898876404494382, J48.00001.Kappa : 0.8636080647478569, J48.0001.AUC : 0.934807485785613, J48.0001.ErrRate : 0.0898876404494382, J48.0001.Kappa : 0.8636080647478569, J48.001.AUC : 0.934807485785613, J48.001.ErrRate : 0.0898876404494382, J48.001.Kappa : 0.8636080647478569, MajorityClassPercentage : 39.8876404494382, MajorityClassSize : 71.0, MaxAttributeEntropy : nan, MaxKurtosisOfNumericAtts : 2.1049913235905877, MaxMeansOfNumericAtts : 746.8932584269661, MaxMutualInformation : nan, MaxNominalAttDistinctValues : 3.0, MaxSkewnessOfNumericAtts : 1.0981910547551612, MaxStdDevOfNumericAtts : 314.90747427684903, MeanAttributeEntropy : nan, MeanKurtosisOfNumericAtts : 0.006742802303924433, MeanMeansOfNumericAtts : 69.13366292091614, MeanMutualInformation : nan, MeanNoiseToSignalRatio : nan, MeanNominalAttDistinctValues : 3.0, MeanSkewnessOfNumericAtts : 0.3501684984202117, MeanStdDevOfNumericAtts : 26.17778523132608, MinAttributeEntropy : nan, MinKurtosisOfNumericAtts : -1.0864345274098706, MinMeansOfNumericAtts : 0.3618539325842697, MinMutualInformation : nan, MinNominalAttDistinctValues : 3.0, MinSkewnessOfNumericAtts : -0.30728549895848073, MinStdDevOfNumericAtts : 0.12445334029667939, MinorityClassPercentage : 26.96629213483146, MinorityClassSize : 48.0, NaiveBayesAUC : 0.983140867878747, NaiveBayesErrRate : 0.0449438202247191, NaiveBayesKappa : 0.9319148936170213, NumberOfBinaryFeatures : 0.0, NumberOfClasses : 3.0, NumberOfFeatures : 14.0, NumberOfInstances : 178.0, NumberOfInstancesWithMissingValues : 0.0, NumberOfMissingValues : 0.0, NumberOfNumericFeatures : 13.0, NumberOfSymbolicFeatures : 1.0, PercentageOfBinaryFeatures : 0.0, PercentageOfInstancesWithMissingValues : 0.0, PercentageOfMissingValues : 0.0, PercentageOfNumericFeatures : 92.85714285714286, PercentageOfSymbolicFeatures : 7.142857142857142, Quartile1AttributeEntropy : nan, Quartile1KurtosisOfNumericAtts : -0.8440630459414751, Quartile1MeansOfNumericAtts : 1.8100842696629211, Quartile1MutualInformation : nan, Quartile1SkewnessOfNumericAtts : -0.0151955294387136, Quartile1StdDevOfNumericAtts : 0.4233514358677881, Quartile2AttributeEntropy : nan, Quartile2KurtosisOfNumericAtts : -0.24840310614613204, Quartile2MeansOfNumericAtts : 2.366516853932584, Quartile2MutualInformation : nan, Quartile2SkewnessOfNumericAtts : 0.2130468864264532, Quartile2StdDevOfNumericAtts : 0.8118265380058574, Quartile3AttributeEntropy : nan, Quartile3KurtosisOfNumericAtts : 0.5212950315345126, Quartile3MeansOfNumericAtts : 16.247780898876407, Quartile3MutualInformation : nan, Quartile3SkewnessOfNumericAtts : 0.8182032861734947, Quartile3StdDevOfNumericAtts : 2.828924819497959, REPTreeDepth1AUC : 0.9038527890255288, REPTreeDepth1ErrRate : 0.15168539325842698, REPTreeDepth1Kappa : 0.7710010959165197, REPTreeDepth2AUC : 0.9038527890255288, REPTreeDepth2ErrRate : 0.15168539325842698, REPTreeDepth2Kappa : 0.7710010959165197, REPTreeDepth3AUC : 0.9038527890255288, REPTreeDepth3ErrRate : 0.15168539325842698, REPTreeDepth3Kappa : 0.7710010959165197, RandomTreeDepth1AUC : 0.9363963414265778, RandomTreeDepth1ErrRate : 0.08426966292134831, RandomTreeDepth1Kappa : 0.872212118311477, RandomTreeDepth2AUC : 0.9363963414265778, RandomTreeDepth2ErrRate : 0.08426966292134831, RandomTreeDepth2Kappa : 0.872212118311477, RandomTreeDepth3AUC : 0.9363963414265778, RandomTreeDepth3ErrRate : 0.08426966292134831, RandomTreeDepth3Kappa : 0.872212118311477, StdvNominalAttDistinctValues : 0.0, kNN1NAUC : 0.9552036199095022, kNN1NErrRate : 0.06179775280898876, kNN1NKappa : 0.9069126176666349,', 'status': 'active', 'uploader': 1, 'version': 1}, page_content='the use of a new appreciation function with RDA. \n \n 4. Relevant Information:\n \n    -- These data are the results of a chemical analysis of\n       wines grown in the same region in Italy but derived from three\n       different cultivars.\n       The analysis determined the quantities of 13 constituents\n       found in each of the three types of wines. \n \n    -- I think that the initial data set had around 30 variables, but \n       for some reason I only have the 13 dimensional version. \n       I had a list of what the 30 or so variables were, but a.) \n       I lost it, and b.), I would not know which 13 variables\n       are included in the set.\n \n    -- The attributes are (dontated by Riccardo Leardi, \n \triclea@anchem.unige.it )\n  \t1) Alcohol\n  \t2) Malic acid\n  \t3) Ash\n \t4) Alcalinity of ash  \n  \t5) Magnesium\n \t6) Total phenols\n  \t7) Flavanoids\n  \t8) Nonflavanoid phenols\n  \t9) Proanthocyanins\n \t10)Color intensity\n  \t11)Hue\n  \t12)OD280/OD315 of diluted wines'),
+ Document(metadata={'MaxNominalAttDistinctValues': 3.0, 'NumberOfClasses': 0.0, 'NumberOfFeatures': 5.0, 'NumberOfInstances': 125.0, 'NumberOfInstancesWithMissingValues': 0.0, 'NumberOfMissingValues': 0.0, 'NumberOfNumericFeatures': 3.0, 'NumberOfSymbolicFeatures': 2.0, 'Unnamed: 0': 134, 'description': '**Author**:   \n**Source**: Unknown -   \n**Please cite**:   \n\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n\n Identifier attribute deleted.\n\n !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n\n NAME:  Sexual activity and the lifespan of male fruitflies\n TYPE:  Designed (almost factorial) experiment\n SIZE:  125 observations, 5 variables\n \n DESCRIPTIVE ABSTRACT:\n A cost of increased reproduction in terms of reduced longevity has been\n shown for female fruitflies, but not for males.  The flies used were an\n outbred stock.  Sexual activity was manipulated by supplying individual\n males with one or eight receptive virgin females per day.  The\n longevity of these males was compared with that of two control types.\n The first control consisted of two sets of individual males kept with\n one or eight newly inseminated females.  Newly inseminated females will\n not usually remate for at least two days, and thus served as a control\n for any effect of competition with the male for food or space.  The\n second control was a set of individual males kept with no females.\n There were 25 males in each of the five groups, which were treated\n identically in number of anaesthetizations (using CO2) and provision of\n fresh food medium.\n \n SOURCE:\n Figure 2 in the article "Sexual Activity and the Lifespan of Male\n Fruitflies" by Linda Partridge and Marion Farquhar.  _Nature_, 294,\n 580-581, 1981.\n \n VARIABLE DESCRIPTIONS:\n Columns  Variable    Description\n -------  --------    -----------\n  1- 2    ID          Serial No. (1-25) within each group of 25\n                      (the order in which data points were abstracted)\n \n  4       PARTNERS    Number of companions (0, 1 or 8)\n \n  6       TYPE        Type of companion\n                        0: newly pregnant female\n                        1: virgin female\n                        9: not applicable (when PARTNERS=0)\n \n  8- 9    LONGEVITY   Lifespan, in days\n \n 11-14    THORAX      Length of thorax, in mm (x.xx)\n \n 16-17    SLEEP       Percentage of each day spent sleeping\n \n \n SPECIAL NOTES:\n `Compliance\' of the males in the two experimental groups was documented\n as follows:  On two days per week throughout the life of each\n experimental male, the females that had been supplied as virgins to\n that male were kept and examined for fertile eggs.  The insemination\n rate declined from approximately 7 females/day at age one week to just\n under 2/day at age eight weeks in the males supplied with eight virgin\n females per day, and from just under 1/day at age one week to\n approximately 0.6/day at age eight weeks in the males supplied with one\n virgin female per day.  These `compliance\' data were not supplied for\n individual males, but the authors say that "There were no significant\n differences between the individual males within each experimental\n group."\n \n STORY BEHIND THE DATA:\n James Hanley found this dataset in _Nature_ and was attracted by the\n way the raw data were presented in classical analysis of covariance\n style in Figure 2.  He read the data points from the graphs and brought\n them to the attention of a colleague with whom he was teaching the\n applied statistics course.  Dr. Liddell thought that with only three\n explanatory variables (THORAX, plus PARTNERS and TYPE to describe the\n five groups), it would not be challenging enough as a data-analysis\n project.  He suggested adding another variable.  James Hanley added\n SLEEP, a variable not mentioned in the published article.  Teachers can\n contact us about the construction of this variable.  (We prefer to\n divulge the details at the end of the data-analysis project.)\n \n Further discussion of the background and pedagogical use of this\n dataset can be found in Hanley (1983) and in Hanley and Shapiro\n (1994).  To obtain the Hanley and Shapiro article, send the one-line\n e-mail message:\n send jse/v2n1/datasets.hanley\n to the address archive@jse.stat.ncsu.edu\n \n PEDAGOGICAL NOTES:\n This has been the most successful and the most memorable dataset we\n have used in an "applications of statistics" course, which we have\n taught for ten years.  The most common analysis techniques have been\n analysis of variance, classical analysis of covariance, and multiple\n regression.  Because the variable THORAX is so strong (it explains\n about 1/3 of the variance in LONGEVITY), it is important to consider it\n to increase the precision of between-group contrasts.  When students\n first check and find that the distributions of thorax length, and in\n particular, the mean thorax length, are very similar in the different\n groups, many of them are willing to say (in epidemiological\n terminology) that THORAX is not a confounding variable, and that it can\n be omitted from the analysis.\n \n There is usually lively discussion about the primary contrast.  The\n five groups and their special structure allow opportunities for\n students to understand and verbalize what we mean by the term\n "statistical interaction."\n \n There is also much debate as to whether one should take the SLEEP\n variable into account.  Some students say that it is an `intermediate\'\n variable.  Some students formally test the mean level of SLEEP across\n groups, find one pair where there is a statistically significant\n difference, and want to treat it as a confounding variable.  A few\n students muse about how it was measured.\n \n There is heteroscedasticity in the LONGEVITY variable.\n \n One very observant student (now a professor) argued that THORAX cannot\n be used as a predictor or explanatory variable for the LONGEVITY\n outcome since fruitflies who die young may not be fully grown, i.e., it\n is also an intermediate variable.  One Ph.D. student who had studied\n entomology assured us that fruitflies do not grow longer after birth;\n therefore, the THORAX length is not time-dependent!\n \n Curiously, the dataset has seldom been analyzed using techniques from\n survival analysis.  The fact that there are no censored observations is\n not really an excuse, and one could easily devise a way to introduce\n censoring of LONGEVITY.\n \n REFERENCES:\n Hanley, J. A. (1983), "Appropriate Uses of Multivariate Analysis,"\n _Annual Review of Public Health_, 4, 155-180.\n \n Hanley, J. A., and Shapiro, S. H. (1994), "Sexual Activity and the\n Lifespan of Male Fruitflies:  A Dataset That Gets Attention," _Journal\n of Statistics Education_, Volume 2, Number 1.\n \n SUBMITTED BY:\n James A. Hanley and Stanley H. Shapiro\n Department of Epidemiology and Biostatistics\n McGill University\n 1020 Pine Avenue West\n Montreal, Quebec, H3A 1A2\n Canada\n tel: +1 (514) 398-6270 (JH) \n      +1 (514) 398-6272 (SS)\n fax: +1 (514) 398-4503\n INJH@musicb.mcgill.ca, StanS@epid.lan.mcgill.ca', 'did': 199, 'features': '0 : [0 - PARTNERS (nominal)], 1 : [1 - TYPE (nominal)], 2 : [2 - THORAX (numeric)], 3 : [3 - SLEEP (numeric)], 4 : [4 - class (numeric)],', 'format': 'ARFF', 'name': 'fruitfly', 'qualities': 'AutoCorrelation : -16.653225806451612, CfsSubsetEval_DecisionStumpAUC : nan, CfsSubsetEval_DecisionStumpErrRate : nan, CfsSubsetEval_DecisionStumpKappa : nan, CfsSubsetEval_NaiveBayesAUC : nan, CfsSubsetEval_NaiveBayesErrRate : nan, CfsSubsetEval_NaiveBayesKappa : nan, CfsSubsetEval_kNN1NAUC : nan, CfsSubsetEval_kNN1NErrRate : nan, CfsSubsetEval_kNN1NKappa : nan, ClassEntropy : nan, DecisionStumpAUC : nan, DecisionStumpErrRate : nan, DecisionStumpKappa : nan, Dimensionality : 0.04, EquivalentNumberOfAtts : nan, J48.00001.AUC : nan, J48.00001.ErrRate : nan, J48.00001.Kappa : nan, J48.0001.AUC : nan, J48.0001.ErrRate : nan, J48.0001.Kappa : nan, J48.001.AUC : nan, J48.001.ErrRate : nan, J48.001.Kappa : nan, MajorityClassPercentage : nan, MajorityClassSize : nan, MaxAttributeEntropy : nan, MaxKurtosisOfNumericAtts : 3.1484095157236704, MaxMeansOfNumericAtts : 57.44, MaxMutualInformation : nan, MaxNominalAttDistinctValues : 3.0, MaxSkewnessOfNumericAtts : 1.5903052309118162, MaxStdDevOfNumericAtts : 17.563892580537072, MeanAttributeEntropy : nan, MeanKurtosisOfNumericAtts : 0.7789944524450039, MeanMeansOfNumericAtts : 27.241653333333332, MeanMutualInformation : nan, MeanNoiseToSignalRatio : nan, MeanNominalAttDistinctValues : 3.0, MeanSkewnessOfNumericAtts : 0.3135430433813126, MeanStdDevOfNumericAtts : 11.173398006246252, MinAttributeEntropy : nan, MinKurtosisOfNumericAtts : -0.410404642598019, MinMeansOfNumericAtts : 0.82096, MinMutualInformation : nan, MinNominalAttDistinctValues : 3.0, MinSkewnessOfNumericAtts : -0.6380573853536728, MinStdDevOfNumericAtts : 0.07745366981455389, MinorityClassPercentage : nan, MinorityClassSize : nan, NaiveBayesAUC : nan, NaiveBayesErrRate : nan, NaiveBayesKappa : nan, NumberOfBinaryFeatures : 0.0, NumberOfClasses : 0.0, NumberOfFeatures : 5.0, NumberOfInstances : 125.0, NumberOfInstancesWithMissingValues : 0.0, NumberOfMissingValues : 0.0, NumberOfNumericFeatures : 3.0, NumberOfSymbolicFeatures : 2.0, PercentageOfBinaryFeatures : 0.0, PercentageOfInstancesWithMissingValues : 0.0, PercentageOfMissingValues : 0.0, PercentageOfNumericFeatures : 60.0, PercentageOfSymbolicFeatures : 40.0, Quartile1AttributeEntropy : nan, Quartile1KurtosisOfNumericAtts : -0.410404642598019, Quartile1MeansOfNumericAtts : 0.82096, Quartile1MutualInformation : nan, Quartile1SkewnessOfNumericAtts : -0.6380573853536728, Quartile1StdDevOfNumericAtts : 0.07745366981455389, Quartile2AttributeEntropy : nan, Quartile2KurtosisOfNumericAtts : -0.4010215157906396, Quartile2MeansOfNumericAtts : 23.464, Quartile2MutualInformation : nan, Quartile2SkewnessOfNumericAtts : -0.011618715414205413, Quartile2StdDevOfNumericAtts : 15.878847768387132, Quartile3AttributeEntropy : nan, Quartile3KurtosisOfNumericAtts : 3.1484095157236704, Quartile3MeansOfNumericAtts : 57.44, Quartile3MutualInformation : nan, Quartile3SkewnessOfNumericAtts : 1.5903052309118162, Quartile3StdDevOfNumericAtts : 17.563892580537072, REPTreeDepth1AUC : nan, REPTreeDepth1ErrRate : nan, REPTreeDepth1Kappa : nan, REPTreeDepth2AUC : nan, REPTreeDepth2ErrRate : nan, REPTreeDepth2Kappa : nan, REPTreeDepth3AUC : nan, REPTreeDepth3ErrRate : nan, REPTreeDepth3Kappa : nan, RandomTreeDepth1AUC : nan, RandomTreeDepth1ErrRate : nan, RandomTreeDepth1Kappa : nan, RandomTreeDepth2AUC : nan, RandomTreeDepth2ErrRate : nan, RandomTreeDepth2Kappa : nan, RandomTreeDepth3AUC : nan, RandomTreeDepth3ErrRate : nan, RandomTreeDepth3Kappa : nan, StdvNominalAttDistinctValues : 0.0, kNN1NAUC : nan, kNN1NErrRate : nan, kNN1NKappa : nan,', 'status': 'active', 'uploader': 1, 'version': 1}, page_content='_Annual Review of Public Health_, 4, 155-180.\n \n Hanley, J. A., and Shapiro, S. H. (1994), "Sexual Activity and the\n Lifespan of Male Fruitflies:  A Dataset That Gets Attention," _Journal\n of Statistics Education_, Volume 2, Number 1.\n \n SUBMITTED BY:\n James A. Hanley and Stanley H. Shapiro\n Department of Epidemiology and Biostatistics\n McGill University\n 1020 Pine Avenue West\n Montreal, Quebec, H3A 1A2\n Canada\n tel: +1 (514) 398-6270 (JH) \n      +1 (514) 398-6272 (SS)\n fax: +1 (514) 398-4503')]
+
+
+
+
+
+
+
+
+ +
1
res[0].metadata
+
+ +
+
+
+
+
+
+{'MajorityClassSize': 4208.0,
+ 'MaxNominalAttDistinctValues': 12.0,
+ 'MinorityClassSize': 3916.0,
+ 'NumberOfClasses': 2.0,
+ 'NumberOfFeatures': 23.0,
+ 'NumberOfInstances': 8124.0,
+ 'NumberOfInstancesWithMissingValues': 2480.0,
+ 'NumberOfMissingValues': 2480.0,
+ 'NumberOfNumericFeatures': 0.0,
+ 'NumberOfSymbolicFeatures': 23.0,
+ 'Unnamed: 0': 19,
+ 'description': "**Author**: [Jeff Schlimmer](Jeffrey.Schlimmer@a.gp.cs.cmu.edu)  \n**Source**: [UCI](https://archive.ics.uci.edu/ml/datasets/mushroom) - 1981     \n**Please cite**:  The Audubon Society Field Guide to North American Mushrooms (1981). G. H. Lincoff (Pres.), New York: Alfred A. Knopf \n\n\n### Description\n\nThis dataset describes mushrooms in terms of their physical characteristics. They are classified into: poisonous or edible.\n\n### Source\n```\n(a) Origin: \nMushroom records are drawn from The Audubon Society Field Guide to North American Mushrooms (1981). G. H. Lincoff (Pres.), New York: Alfred A. Knopf \n\n(b) Donor: \nJeff Schlimmer (Jeffrey.Schlimmer '@' a.gp.cs.cmu.edu)\n```\n\n### Dataset description\n\nThis dataset includes descriptions of hypothetical samples corresponding to 23 species of gilled mushrooms in the Agaricus and Lepiota Family. Each species is identified as definitely edible, definitely poisonous, or of unknown edibility and not recommended. This latter class was combined with the poisonous one. The Guide clearly states that there is no simple rule for determining the edibility of a mushroom; no rule like ``leaflets three, let it be'' for Poisonous Oak and Ivy.\n\n### Attributes Information\n```\n1. cap-shape: bell=b,conical=c,convex=x,flat=f, knobbed=k,sunken=s \n2. cap-surface: fibrous=f,grooves=g,scaly=y,smooth=s \n3. cap-color: brown=n,buff=b,cinnamon=c,gray=g,green=r, pink=p,purple=u,red=e,white=w,yellow=y \n4. bruises?: bruises=t,no=f \n5. odor: almond=a,anise=l,creosote=c,fishy=y,foul=f, musty=m,none=n,pungent=p,spicy=s \n6. gill-attachment: attached=a,descending=d,free=f,notched=n \n7. gill-spacing: close=c,crowded=w,distant=d \n8. gill-size: broad=b,narrow=n \n9. gill-color: black=k,brown=n,buff=b,chocolate=h,gray=g, green=r,orange=o,pink=p,purple=u,red=e, white=w,yellow=y \n10. stalk-shape: enlarging=e,tapering=t \n11. stalk-root: bulbous=b,club=c,cup=u,equal=e, rhizomorphs=z,rooted=r,missing=? \n12. stalk-surface-above-ring: fibrous=f,scaly=y,silky=k,smooth=s \n13. stalk-surface-below-ring: fibrous=f,scaly=y,silky=k,smooth=s \n14. stalk-color-above-ring: brown=n,buff=b,cinnamon=c,gray=g,orange=o, pink=p,red=e,white=w,yellow=y \n15. stalk-color-below-ring: brown=n,buff=b,cinnamon=c,gray=g,orange=o, pink=p,red=e,white=w,yellow=y \n16. veil-type: partial=p,universal=u \n17. veil-color: brown=n,orange=o,white=w,yellow=y \n18. ring-number: none=n,one=o,two=t \n19. ring-type: cobwebby=c,evanescent=e,flaring=f,large=l, none=n,pendant=p,sheathing=s,zone=z \n20. spore-print-color: black=k,brown=n,buff=b,chocolate=h,green=r, orange=o,purple=u,white=w,yellow=y \n21. population: abundant=a,clustered=c,numerous=n, scattered=s,several=v,solitary=y \n22. habitat: grasses=g,leaves=l,meadows=m,paths=p, urban=u,waste=w,woods=d\n```\n\n### Relevant papers\n\nSchlimmer,J.S. (1987). Concept Acquisition Through Representational Adjustment (Technical Report 87-19). Doctoral disseration, Department of Information and Computer Science, University of California, Irvine. \n\nIba,W., Wogulis,J., & Langley,P. (1988). Trading off Simplicity and Coverage in Incremental Concept Learning. In Proceedings of the 5th International Conference on Machine Learning, 73-79. Ann Arbor, Michigan: Morgan Kaufmann. \n\nDuch W, Adamczak R, Grabczewski K (1996) Extraction of logical rules from training data using backpropagation networks, in: Proc. of the The 1st Online Workshop on Soft Computing, 19-30.Aug.1996, pp. 25-30, [Web Link] \n\nDuch W, Adamczak R, Grabczewski K, Ishikawa M, Ueda H, Extraction of crisp logical rules using constrained backpropagation networks - comparison of two new approaches, in: Proc. of the European Symposium on Artificial Neural Networks (ESANN'97), Bruge, Belgium 16-18.4.1997.",
+ 'did': 24,
+ 'features': '0 : [0 - cap-shape (nominal)], 1 : [1 - cap-surface (nominal)], 2 : [2 - cap-color (nominal)], 3 : [3 - bruises%3F (nominal)], 4 : [4 - odor (nominal)], 5 : [5 - gill-attachment (nominal)], 6 : [6 - gill-spacing (nominal)], 7 : [7 - gill-size (nominal)], 8 : [8 - gill-color (nominal)], 9 : [9 - stalk-shape (nominal)], 10 : [10 - stalk-root (nominal)], 11 : [11 - stalk-surface-above-ring (nominal)], 12 : [12 - stalk-surface-below-ring (nominal)], 13 : [13 - stalk-color-above-ring (nominal)], 14 : [14 - stalk-color-below-ring (nominal)], 15 : [15 - veil-type (nominal)], 16 : [16 - veil-color (nominal)], 17 : [17 - ring-number (nominal)], 18 : [18 - ring-type (nominal)], 19 : [19 - spore-print-color (nominal)], 20 : [20 - population (nominal)], 21 : [21 - habitat (nominal)], 22 : [22 - class (nominal)],',
+ 'format': 'ARFF',
+ 'name': 'mushroom',
+ 'qualities': 'AutoCorrelation : 0.726332635725717, CfsSubsetEval_DecisionStumpAUC : 0.9910519616800724, CfsSubsetEval_DecisionStumpErrRate : 0.013047759724273756, CfsSubsetEval_DecisionStumpKappa : 0.9738461616958994, CfsSubsetEval_NaiveBayesAUC : 0.9910519616800724, CfsSubsetEval_NaiveBayesErrRate : 0.013047759724273756, CfsSubsetEval_NaiveBayesKappa : 0.9738461616958994, CfsSubsetEval_kNN1NAUC : 0.9910519616800724, CfsSubsetEval_kNN1NErrRate : 0.013047759724273756, CfsSubsetEval_kNN1NKappa : 0.9738461616958994, ClassEntropy : 0.9990678968724604, DecisionStumpAUC : 0.8894935275772204, DecisionStumpErrRate : 0.11324470704086657, DecisionStumpKappa : 0.77457574608175, Dimensionality : 0.002831117676021664, EquivalentNumberOfAtts : 5.0393135801657, J48.00001.AUC : 1.0, J48.00001.ErrRate : 0.0, J48.00001.Kappa : 1.0, J48.0001.AUC : 1.0, J48.0001.ErrRate : 0.0, J48.0001.Kappa : 1.0, J48.001.AUC : 1.0, J48.001.ErrRate : 0.0, J48.001.Kappa : 1.0, MajorityClassPercentage : 51.7971442639094, MajorityClassSize : 4208.0, MaxAttributeEntropy : 3.030432883772633, MaxKurtosisOfNumericAtts : nan, MaxMeansOfNumericAtts : nan, MaxMutualInformation : 0.906074977384, MaxNominalAttDistinctValues : 12.0, MaxSkewnessOfNumericAtts : nan, MaxStdDevOfNumericAtts : nan, MeanAttributeEntropy : 1.4092554739602103, MeanKurtosisOfNumericAtts : nan, MeanMeansOfNumericAtts : nan, MeanMutualInformation : 0.19825475850613955, MeanNoiseToSignalRatio : 6.108305922031972, MeanNominalAttDistinctValues : 5.130434782608695, MeanSkewnessOfNumericAtts : nan, MeanStdDevOfNumericAtts : nan, MinAttributeEntropy : -0.0, MinKurtosisOfNumericAtts : nan, MinMeansOfNumericAtts : nan, MinMutualInformation : 0.0, MinNominalAttDistinctValues : 1.0, MinSkewnessOfNumericAtts : nan, MinStdDevOfNumericAtts : nan, MinorityClassPercentage : 48.20285573609059, MinorityClassSize : 3916.0, NaiveBayesAUC : 0.9976229672941662, NaiveBayesErrRate : 0.04899064500246184, NaiveBayesKappa : 0.9015972799616292, NumberOfBinaryFeatures : 5.0, NumberOfClasses : 2.0, NumberOfFeatures : 23.0, NumberOfInstances : 8124.0, NumberOfInstancesWithMissingValues : 2480.0, NumberOfMissingValues : 2480.0, NumberOfNumericFeatures : 0.0, NumberOfSymbolicFeatures : 23.0, PercentageOfBinaryFeatures : 21.73913043478261, PercentageOfInstancesWithMissingValues : 30.526834071885773, PercentageOfMissingValues : 1.3272536552993814, PercentageOfNumericFeatures : 0.0, PercentageOfSymbolicFeatures : 100.0, Quartile1AttributeEntropy : 0.8286618104993447, Quartile1KurtosisOfNumericAtts : nan, Quartile1MeansOfNumericAtts : nan, Quartile1MutualInformation : 0.034184520425602494, Quartile1SkewnessOfNumericAtts : nan, Quartile1StdDevOfNumericAtts : nan, Quartile2AttributeEntropy : 1.467128011861462, Quartile2KurtosisOfNumericAtts : nan, Quartile2MeansOfNumericAtts : nan, Quartile2MutualInformation : 0.174606545183155, Quartile2SkewnessOfNumericAtts : nan, Quartile2StdDevOfNumericAtts : nan, Quartile3AttributeEntropy : 2.0533554351937426, Quartile3KurtosisOfNumericAtts : nan, Quartile3MeansOfNumericAtts : nan, Quartile3MutualInformation : 0.27510225484918505, Quartile3SkewnessOfNumericAtts : nan, Quartile3StdDevOfNumericAtts : nan, REPTreeDepth1AUC : 0.9999987256143267, REPTreeDepth1ErrRate : 0.00036927621861152144, REPTreeDepth1Kappa : 0.9992605118549308, REPTreeDepth2AUC : 0.9999987256143267, REPTreeDepth2ErrRate : 0.00036927621861152144, REPTreeDepth2Kappa : 0.9992605118549308, REPTreeDepth3AUC : 0.9999987256143267, REPTreeDepth3ErrRate : 0.00036927621861152144, REPTreeDepth3Kappa : 0.9992605118549308, RandomTreeDepth1AUC : 0.9995247148288974, RandomTreeDepth1ErrRate : 0.0004923682914820286, RandomTreeDepth1Kappa : 0.9990140245420991, RandomTreeDepth2AUC : 0.9995247148288974, RandomTreeDepth2ErrRate : 0.0004923682914820286, RandomTreeDepth2Kappa : 0.9990140245420991, RandomTreeDepth3AUC : 0.9995247148288974, RandomTreeDepth3ErrRate : 0.0004923682914820286, RandomTreeDepth3Kappa : 0.9990140245420991, StdvNominalAttDistinctValues : 3.1809710899501766, kNN1NAUC : 1.0, kNN1NErrRate : 0.0, kNN1NKappa : 1.0,',
+ 'status': 'active',
+ 'uploader': 1,
+ 'version': 1}
+
+
+
+
+
+
+
+
+ +
1
print(res[0].page_content)
+
+ +
+
+
+
+
+
+### Description
+
+This dataset describes mushrooms in terms of their physical characteristics. They are classified into: poisonous or edible.
+
+### Source
+
1
+2
+3
+4
+5
(a) Origin: 
+Mushroom records are drawn from The Audubon Society Field Guide to North American Mushrooms (1981). G. H. Lincoff (Pres.), New York: Alfred A. Knopf 
+
+(b) Donor: 
+Jeff Schlimmer (Jeffrey.Schlimmer '@' a.gp.cs.cmu.edu)
+
+ +### Dataset description + +This dataset includes descriptions of hypothetical samples corresponding to 23 species of gilled mushrooms in the Agaricus and Lepiota Family. Each species is identified as definitely edible, definitely poisonous, or of unknown edibility and not recommended. This latter class was combined with the poisonous one. The Guide clearly states that there is no simple rule for determining the edibility of a mushroom; no rule like ``leaflets three, let it be'' for Poisonous Oak and Ivy. +
+
+
+
+
+
+
+
+
+
+

Process the results and return a dataframe instead

+
+
+
+
+
+ +
1
+2
+3
+4
+5
+6
output_df, ids_order = QueryProcessor(
+    query=query,
+    qa=qa_dataset,
+    type_of_query=config["type_of_data"],
+    config=config,
+).get_result_from_query()
+
+ +
+
+
+
+ +
1
ids_order
+
+ +
+
+
+
+
+
+[24,
+ 24,
+ 294,
+ 120,
+ 120,
+ 42,
+ 188,
+ 42,
+ 187,
+ 199,
+ 183,
+ 134,
+ 23,
+ 134,
+ 287,
+ 334,
+ 335,
+ 333,
+ 42,
+ 42,
+ 287,
+ 343,
+ 8,
+ 334,
+ 24,
+ 333,
+ 179,
+ 335,
+ 61,
+ 13]
+
+
+
+
+
+
+
+
+ +
1
output_df.head()
+
+ +
+
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
idnameDescriptionOpenML URLCommand
024mushroomStdvNominalAttDistinctValues : 3.1809710899501...<a href="https://www.openml.org/search?type=da...dataset = openml.datasets.get_dataset(24)
2294satellite_imageData Set Information:<a href="https://www.openml.org/search?type=da...dataset = openml.datasets.get_dataset(294)
3120BNG(mushroom)RandomTreeDepth3ErrRate : 0.024243, RandomTree...<a href="https://www.openml.org/search?type=da...dataset = openml.datasets.get_dataset(120)
542soybeandid - 42, name - soybean, version - 1, uploade...<a href="https://www.openml.org/search?type=da...dataset = openml.datasets.get_dataset(42)
6188eucalyptusKirsten Thomson and Robert J. McQueen (1996) M...<a href="https://www.openml.org/search?type=da...dataset = openml.datasets.get_dataset(188)
+
+
+
+
+
+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Rag Pipeline/configuration/index.html b/Rag Pipeline/configuration/index.html new file mode 100644 index 0000000..52acb38 --- /dev/null +++ b/Rag Pipeline/configuration/index.html @@ -0,0 +1,1296 @@ + + + + + + + + + + + + + + + + + + + + + + + Configuration - OpenML RAG Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Configuration

+
    +
  • The main config file is config.json. Since this is loaded in every training/evaluation script, you can use this to modify the behavior inline.
  • +
+

Possible options

+
    +
  • rqa_prompt_template: The template for the RAG pipeline search prompt. This is used by the model to query the database.
  • +
  • llm_prompt_template: The template for the summary generator LLM prompt.
  • +
  • num_return_documents: Number of documents to return for a query. Too high a number can lead to Out of Memory errors. (Defaults to 50)
  • +
  • embedding_model: THIS IS FROM HUGGINGFACE. The model to use for generating embeddings. This is used to generate embeddings for the documents as a means of comparison using the LLM's embeddings. (Defaults to BAAI/bge-large-en-v1.5)
      +
    • Other possible tested models
        +
      • BAAI/bge-base-en-v1.5
      • +
      • BAAI/bge-large-en-v1.5
      • +
      +
    • +
    +
  • +
  • llm_model: THIS IS FROM OLLAMA. The model used for generating the result summary. (Defaults to qwen2:1.5b)
  • +
  • data_dir: The directory to store the intermediate data like tables/databases etc. (Defaults to ./data/)
  • +
  • persist_dir: The directory to store the cached data. Defaults to ./data/chroma_db/ and stores the embeddings for the documents with a unique hash. (Defaults to ./data/chroma_db/)
  • +
  • testing_flag: Enables testing mode by using subsets of the data for quick debugging. This is used to test the pipeline and is not recommended for normal use. (Defaults to False)
  • +
  • test_subset: Uses a tiny subset of the data for testing.
  • +
  • data_download_n_jobs: Number of jobs to run in parallel for downloading data. (Defaults to 20)
  • +
  • training: Whether to train the model or not. (Defaults to False) this is automatically set to True when when running the training.py script. Do NOT set this to True manually.
  • +
  • search_type : The type of vector comparison to use. (Defaults to "similarity")
  • +
  • reraanking: Whether to rerank the results using the FlashRank algorithm. (Defaults to False)
  • +
  • long_context_reordering: Whether to reorder the results using the Long Context Reordering algorithm. (Defaults to False)
  • +
  • chunk_size: Size of the chunks for the RAG document chunking
  • +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Rag Pipeline/general_utils/index.html b/Rag Pipeline/general_utils/index.html new file mode 100644 index 0000000..16f5145 --- /dev/null +++ b/Rag Pipeline/general_utils/index.html @@ -0,0 +1,1425 @@ + + + + + + + + + + + + + + + + + + + + + + + General utilities - OpenML RAG Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

General utilities

+
    +
  • The logic for which device to use. At the moment, this does not support multiple GPUs, but it should be pretty easy to add in if required.
  • +
+ + +
+ + + + +
+ + + +
+ + + + + + + + + +
+ + +

+ find_device(training=False) + +

+ + +
+ +

Description: Find the device to use for the pipeline. If cuda is available, use it. If not, check if MPS is available and use it. If not, use CPU.

+ +
+ Source code in backend/modules/general_utils.py +
 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
def find_device(training: bool = False) -> str:
+    """
+    Description: Find the device to use for the pipeline. If cuda is available, use it. If not, check if MPS is available and use it. If not, use CPU.
+    """
+    print("[INFO] Finding device.")
+    if torch.cuda.is_available():
+        return "cuda"
+    elif torch.backends.mps.is_available():
+        return "mps"
+    else:
+        return "cpu"
+
+
+
+ +
+ +
+ + +

+ load_config_and_device(config_file, training=False) + +

+ + +
+ +

Description: Load the config file and find the device to use for the pipeline.

+ +
+ Source code in backend/modules/general_utils.py +
22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
def load_config_and_device(config_file: str, training: bool = False) -> dict:
+    """
+    Description: Load the config file and find the device to use for the pipeline.
+    """
+    # Check if the config file exists and load it
+    if not os.path.exists(config_file):
+        raise Exception("Config file does not exist.")
+    with open(config_file, "r") as f:
+        config = json.load(f)
+
+    # Find device and set it in the config between cpu and cuda and mps if available
+    config["device"] = find_device(training)
+    print(f"[INFO] Device found: {config['device']}")
+    return config
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Rag Pipeline/index.html b/Rag Pipeline/index.html new file mode 100644 index 0000000..8b19532 --- /dev/null +++ b/Rag Pipeline/index.html @@ -0,0 +1,1307 @@ + + + + + + + + + + + + + + + + + + + + + + + RAG Pipeline - OpenML RAG Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

RAG Pipeline

+
    +
  • The RAG pipeline is the main service of the AI search. At the moment though, we are not doing the Generation part and only using the RAG to find relevant datasets for the given query.
  • +
  • The RAG pipeline code here is divided into two parts - training (training.py) and inference (backend.py). The first is used to gather data from the OpenML API and then preprocess it and store it in a vector database, the second is used for inference.
  • +
+

Training

+
    +
  • All the modules you are looking for are in backend/modules. To modify/understand any of the behavior, you should look at the corresponding documentation for each of the ones that you want to modify.
  • +
  • config.json : JSON with the main config used for training and inference - documentation
  • +
  • results_gen.py : Code for creating the output and running parts of the other modules during inference - documentation
  • +
  • general_utils.py : Code for device configuration (gpu/cpu/mps) - documentation
  • +
  • metadata_utils.py : Getting/formatting/loading metadata from OpenML - documentation
  • +
  • rag_llm.py : Langchain code for the RAG pipeline - documentation
  • +
  • utils.py : Just imports all the utility files
  • +
  • vector_store_utils.py : Code for loading data into the vector store. - documentation
  • +
+

Inference

+
    +
  • This component runs the RAG pipeline. It returns a JSON with dataset ids of the OpenML datasets that match the query.
  • +
  • You can start it by running cd backend && uvicorn backend:app --host 0.0.0.0 --port 8000 &
  • +
  • Curl Example : curl http://0.0.0.0:8000/dataset/find%20me%20a%20mushroom%20dataset
  • +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Rag Pipeline/llm_module/index.html b/Rag Pipeline/llm_module/index.html new file mode 100644 index 0000000..206205c --- /dev/null +++ b/Rag Pipeline/llm_module/index.html @@ -0,0 +1,1787 @@ + + + + + + + + + + + + + + + + + + + + + + + RAG LLM - OpenML RAG Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

RAG LLM

+
    +
  • Setting up the retrival and using Lanchain APIs
  • +
+

Modify LLM Chain

+
    +
  • At the moment the LLM chain is a retriver, if you want to add functionality, you will need to modify the LLMChainInitializer function.
  • +
  • To change the way vectorstore is used, modify the QASetup function.
  • +
  • To change the way Ollama works, caching works and add generation and stuff, modify the LLMChainCreator function.
  • +
+ + +
+ + + + +
+ + + +
+ + + + + + + + +
+ + + +

+ LLMChainCreator + + +

+ + +
+ + +

Description: Gets Ollama, sends query, enables query caching

+ +
+ Source code in backend/modules/rag_llm.py +
 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
class LLMChainCreator:
+    """
+    Description: Gets Ollama, sends query, enables query caching
+    """
+
+    def __init__(self, config: dict, local: bool = False):
+        self.config = config
+        self.local = local
+
+    def get_llm_chain(self) -> LLMChain | bool:
+        """
+        Description: Send a query to Ollama using the paths.
+        """
+        base_url = "http://127.0.0.1:11434" if self.local else "http://ollama:11434"
+        llm = Ollama(model=self.config["llm_model"], base_url=base_url)
+        map_template = self.config["llm_prompt_template"]
+        map_prompt = PromptTemplate.from_template(map_template)
+        return map_prompt | llm | StrOutputParser()
+
+    def enable_cache(self):
+        """
+        Description: Enable a cache for queries to prevent running the same query again for no reason.
+        """
+        set_llm_cache(
+            SQLiteCache(
+                database_path=os.path.join(self.config["data_dir"], ".langchain.db")
+            )
+        )
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ enable_cache() + +

+ + +
+ +

Description: Enable a cache for queries to prevent running the same query again for no reason.

+ +
+ Source code in backend/modules/rag_llm.py +
103
+104
+105
+106
+107
+108
+109
+110
+111
def enable_cache(self):
+    """
+    Description: Enable a cache for queries to prevent running the same query again for no reason.
+    """
+    set_llm_cache(
+        SQLiteCache(
+            database_path=os.path.join(self.config["data_dir"], ".langchain.db")
+        )
+    )
+
+
+
+ +
+ +
+ + +

+ get_llm_chain() + +

+ + +
+ +

Description: Send a query to Ollama using the paths.

+ +
+ Source code in backend/modules/rag_llm.py +
 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
def get_llm_chain(self) -> LLMChain | bool:
+    """
+    Description: Send a query to Ollama using the paths.
+    """
+    base_url = "http://127.0.0.1:11434" if self.local else "http://ollama:11434"
+    llm = Ollama(model=self.config["llm_model"], base_url=base_url)
+    map_template = self.config["llm_prompt_template"]
+    map_prompt = PromptTemplate.from_template(map_template)
+    return map_prompt | llm | StrOutputParser()
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ LLMChainInitializer + + +

+ + +
+ + +

Description: Setup the vectordb (Chroma) as a retriever with parameters

+ +
+ Source code in backend/modules/rag_llm.py +
25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
class LLMChainInitializer:
+    """
+    Description: Setup the vectordb (Chroma) as a retriever with parameters
+    """
+
+    @staticmethod
+    def initialize_llm_chain(
+        vectordb: Chroma, config: dict
+    ) -> langchain.chains.retrieval_qa.base.RetrievalQA:
+        if config["search_type"] == "similarity_score_threshold":
+            return vectordb.as_retriever(
+                search_type=config["search_type"],
+                search_kwargs={
+                    "k": config["num_return_documents"],
+                    "score_threshold": 0.5,
+                },
+            )
+        else:
+            return vectordb.as_retriever(
+                search_type=config["search_type"],
+                search_kwargs={"k": config["num_return_documents"]},
+            )
+
+
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ +
+ + + +

+ QASetup + + +

+ + +
+ + +

Description: Setup the VectorDB, QA and initalize the LLM for each type of data

+ +
+ Source code in backend/modules/rag_llm.py +
49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
class QASetup:
+    """
+    Description: Setup the VectorDB, QA and initalize the LLM for each type of data
+    """
+
+    def __init__(
+        self, config: dict, data_type: str, client: ClientAPI, subset_ids: list = None
+    ):
+        self.config = config
+        self.data_type = data_type
+        self.client = client
+        self.subset_ids = subset_ids
+
+    def setup_vector_db_and_qa(self):
+        self.config["type_of_data"] = self.data_type
+
+        metadata_processor = OpenMLMetadataProcessor(config=self.config)
+        openml_data_object, data_id, all_metadata, handler = (
+            metadata_processor.get_all_metadata_from_openml()
+        )
+        metadata_df, all_metadata = metadata_processor.create_metadata_dataframe(
+            handler,
+            openml_data_object,
+            data_id,
+            all_metadata,
+            subset_ids=self.subset_ids,
+        )
+
+        vector_store_manager = VectorStoreManager(self.client, self.config)
+        vectordb = vector_store_manager.create_vector_store(metadata_df)
+        qa = LLMChainInitializer.initialize_llm_chain(vectordb, self.config)
+
+        return qa, all_metadata
+
+
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Rag Pipeline/metadata_module/index.html b/Rag Pipeline/metadata_module/index.html new file mode 100644 index 0000000..d0d0027 --- /dev/null +++ b/Rag Pipeline/metadata_module/index.html @@ -0,0 +1,3238 @@ + + + + + + + + + + + + + + + + + + + + + + + Metadata Module - OpenML RAG Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + +

Metadata Module

+
    +
  • Logic for Getting/formatting/loading metadata from OpenML.
  • +
  • If you want to modify the logic for the data ingestion pipeline : Refer to OpenMLObjectHandler , OpenMLDatasetHandler
  • +
  • If you want to chane the pipeline itself, refer to OpenMLMetadataProcessor
  • +
+ + +
+ + + + +
+ + + +
+ + + + + + + + +
+ + + +

+ OpenMLDatasetHandler + + +

+ + +
+

+ Bases: OpenMLObjectHandler

+ + +

Description: The class for handling OpenML dataset objects.

+ +
+ Source code in backend/modules/metadata_utils.py +
149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
class OpenMLDatasetHandler(OpenMLObjectHandler):
+    """
+    Description: The class for handling OpenML dataset objects.
+    """
+
+    def get_description(self, data_id: int):
+        return openml.datasets.get_dataset(
+            dataset_id=data_id,
+            download_data=False,
+            download_qualities=True,
+            download_features_meta_data=True,
+        )
+
+    def get_openml_objects(self):
+        return openml.datasets.list_datasets(output_format="dataframe")
+
+    def process_metadata(
+        self,
+        openml_data_object: Sequence[openml.datasets.dataset.OpenMLDataset],
+        data_id: Sequence[int],
+        all_dataset_metadata: pd.DataFrame,
+        file_path: str,
+        subset_ids=None,
+    ):
+        """
+        Description: Combine the metadata attributes into a single string and save it to a CSV / ChromaDB file. Subset the data if given a list of IDs to subset by.
+        """
+
+        # Metadata
+        descriptions = [
+            self.extract_attribute(attr, "description") for attr in openml_data_object
+        ]
+        joined_qualities = [
+            self.join_attributes(attr, "qualities") for attr in openml_data_object
+        ]
+        joined_features = [
+            self.join_attributes(attr, "features") for attr in openml_data_object
+        ]
+
+        # Combine them
+
+        all_data_description_df = self.create_combined_information_df_for_datasets(
+            data_id, descriptions, joined_qualities, joined_features
+        )
+        all_dataset_metadata = self.combine_metadata(
+            all_dataset_metadata, all_data_description_df
+        )
+
+        # subset the metadata if subset_ids is not None
+        all_dataset_metadata = self.subset_metadata(subset_ids, all_dataset_metadata)
+
+        # Save to a CSV
+        all_dataset_metadata.to_csv(file_path)
+
+        # Save to chroma if needed
+        if self.config.get("use_chroma_for_saving_metadata"):
+            client = chromadb.PersistentClient(
+                path=self.config["persist_dir"] + "metadata_db"
+            )
+            vecmanager = VectorStoreManager(client, self.config)
+            vecmanager.add_df_chunks_to_db(all_dataset_metadata)
+
+        return (
+            all_dataset_metadata[["did", "name", "Combined_information"]],
+            all_dataset_metadata,
+        )
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ process_metadata(openml_data_object, data_id, all_dataset_metadata, file_path, subset_ids=None) + +

+ + +
+ +

Description: Combine the metadata attributes into a single string and save it to a CSV / ChromaDB file. Subset the data if given a list of IDs to subset by.

+ +
+ Source code in backend/modules/metadata_utils.py +
165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
def process_metadata(
+    self,
+    openml_data_object: Sequence[openml.datasets.dataset.OpenMLDataset],
+    data_id: Sequence[int],
+    all_dataset_metadata: pd.DataFrame,
+    file_path: str,
+    subset_ids=None,
+):
+    """
+    Description: Combine the metadata attributes into a single string and save it to a CSV / ChromaDB file. Subset the data if given a list of IDs to subset by.
+    """
+
+    # Metadata
+    descriptions = [
+        self.extract_attribute(attr, "description") for attr in openml_data_object
+    ]
+    joined_qualities = [
+        self.join_attributes(attr, "qualities") for attr in openml_data_object
+    ]
+    joined_features = [
+        self.join_attributes(attr, "features") for attr in openml_data_object
+    ]
+
+    # Combine them
+
+    all_data_description_df = self.create_combined_information_df_for_datasets(
+        data_id, descriptions, joined_qualities, joined_features
+    )
+    all_dataset_metadata = self.combine_metadata(
+        all_dataset_metadata, all_data_description_df
+    )
+
+    # subset the metadata if subset_ids is not None
+    all_dataset_metadata = self.subset_metadata(subset_ids, all_dataset_metadata)
+
+    # Save to a CSV
+    all_dataset_metadata.to_csv(file_path)
+
+    # Save to chroma if needed
+    if self.config.get("use_chroma_for_saving_metadata"):
+        client = chromadb.PersistentClient(
+            path=self.config["persist_dir"] + "metadata_db"
+        )
+        vecmanager = VectorStoreManager(client, self.config)
+        vecmanager.add_df_chunks_to_db(all_dataset_metadata)
+
+    return (
+        all_dataset_metadata[["did", "name", "Combined_information"]],
+        all_dataset_metadata,
+    )
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ OpenMLFlowHandler + + +

+ + +
+

+ Bases: OpenMLObjectHandler

+ + +

Description: The class for handling OpenML flow objects.

+ +
+ Source code in backend/modules/metadata_utils.py +
217
+218
+219
+220
+221
+222
+223
+224
+225
+226
+227
+228
+229
+230
+231
+232
+233
+234
+235
+236
+237
+238
+239
+240
+241
+242
+243
+244
+245
+246
+247
+248
+249
+250
+251
+252
+253
+254
+255
+256
+257
+258
+259
+260
+261
+262
+263
+264
class OpenMLFlowHandler(OpenMLObjectHandler):
+    """
+    Description: The class for handling OpenML flow objects.
+    """
+
+    def get_description(self, data_id: int):
+        return openml.flows.get_flow(flow_id=data_id)
+
+    def get_openml_objects(self):
+        all_objects = openml.flows.list_flows(output_format="dataframe")
+        return all_objects.rename(columns={"id": "did"})
+
+    def process_metadata(
+        self,
+        openml_data_object: Sequence[openml.flows.flow.OpenMLFlow],
+        data_id: Sequence[int],
+        all_dataset_metadata: pd.DataFrame,
+        file_path: str,
+        subset_ids=None,
+    ):
+        descriptions = [
+            self.extract_attribute(attr, "description") for attr in openml_data_object
+        ]
+        names = [self.extract_attribute(attr, "name") for attr in openml_data_object]
+        tags = [self.extract_attribute(attr, "tags") for attr in openml_data_object]
+
+        all_data_description_df = pd.DataFrame(
+            {
+                "did": data_id,
+                "description": descriptions,
+                "name": names,
+                "tags": tags,
+            }
+        )
+
+        all_data_description_df["Combined_information"] = all_data_description_df.apply(
+            self.merge_all_columns_to_string, axis=1
+        )
+        # subset the metadata if subset_ids is not None
+
+        all_dataset_metadata = self.subset_metadata(subset_ids, all_dataset_metadata)
+
+        all_data_description_df.to_csv(file_path)
+
+        return (
+            all_data_description_df[["did", "name", "Combined_information"]],
+            all_data_description_df,
+        )
+
+
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ +
+ + + +

+ OpenMLMetadataProcessor + + +

+ + +
+ + +

Description: Process metadata using the OpenMLHandlers

+ +
+ Source code in backend/modules/metadata_utils.py +
267
+268
+269
+270
+271
+272
+273
+274
+275
+276
+277
+278
+279
+280
+281
+282
+283
+284
+285
+286
+287
+288
+289
+290
+291
+292
+293
+294
+295
+296
+297
+298
+299
+300
+301
+302
+303
+304
+305
+306
+307
+308
+309
+310
+311
+312
+313
+314
+315
+316
+317
+318
+319
+320
+321
+322
+323
+324
+325
+326
+327
+328
+329
+330
+331
+332
+333
+334
+335
+336
+337
+338
+339
+340
+341
+342
+343
+344
+345
+346
+347
+348
+349
+350
+351
+352
+353
+354
class OpenMLMetadataProcessor:
+    """
+    Description: Process metadata using the OpenMLHandlers
+    """
+
+    def __init__(self, config: dict):
+        self.config = config
+        self.save_filename = os.path.join(
+            config["data_dir"], f"all_{config['type_of_data']}_metadata.pkl"
+        )
+        self.description_filename = os.path.join(
+            config["data_dir"], f"all_{config['type_of_data']}_description.csv"
+        )
+
+    def get_all_metadata_from_openml(self):
+        """
+        Description: Gets all the metadata from OpenML for the type of data specified in the config.
+        If training is set to False, it loads the metadata from the files. If training is set to True, it gets the metadata from OpenML.
+
+        This uses parallel threads (pqdm) and so to ensure thread safety, install the package oslo.concurrency.
+        """
+        if not self.config.get("training", False) or self.config.get(
+            "ignore_downloading_data", False
+        ):
+            if not os.path.exists(self.save_filename):
+                raise Exception(
+                    "Metadata files do not exist. Please run the training pipeline first."
+                )
+            print("[INFO] Loading metadata from file.")
+            return load_metadata_from_file(self.save_filename)
+
+        print("[INFO] Training is set to True.")
+        handler = (
+            OpenMLDatasetHandler(self.config)
+            if self.config["type_of_data"] == "dataset"
+            else OpenMLFlowHandler(self.config)
+        )
+
+        all_objects = handler.get_openml_objects()
+
+        if self.config.get("test_subset", False):
+            print("[INFO] Subsetting the data.")
+            all_objects = all_objects[:500]
+
+        data_id = [int(all_objects.iloc[i]["did"]) for i in range(len(all_objects))]
+
+        print("[INFO] Initializing cache.")
+        handler.initialize_cache(data_id)
+
+        print(f"[INFO] Getting {self.config['type_of_data']} metadata from OpenML.")
+        openml_data_object = handler.get_metadata(data_id)
+
+        print("[INFO] Saving metadata to file.")
+        save_metadata_to_file(
+            (openml_data_object, data_id, all_objects, handler), self.save_filename
+        )
+
+        return openml_data_object, data_id, all_objects, handler
+
+    def create_metadata_dataframe(
+        self,
+        handler: Union["OpenMLDatasetHandler", "OpenMLFlowHandler"],
+        openml_data_object: Sequence[
+            Union[openml.datasets.dataset.OpenMLDataset, openml.flows.flow.OpenMLFlow]
+        ],
+        data_id: Sequence[int],
+        all_dataset_metadata: pd.DataFrame,
+        subset_ids=None,
+    ) -> Tuple[pd.DataFrame, pd.DataFrame]:
+        """
+        Description: Creates a dataframe with all the metadata, joined columns with all information
+        for the type of data specified in the config. If training is set to False,
+        the dataframes are loaded from the files. If training is set to True, the
+        dataframes are created and then saved to the files.
+        """
+        if not self.config.get("training", False):
+            return (
+                handler.load_metadata(self.description_filename),
+                all_dataset_metadata,
+            )
+
+        return handler.process_metadata(
+            openml_data_object,
+            data_id,
+            all_dataset_metadata,
+            self.description_filename,
+            subset_ids,
+        )
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ create_metadata_dataframe(handler, openml_data_object, data_id, all_dataset_metadata, subset_ids=None) + +

+ + +
+ +

Description: Creates a dataframe with all the metadata, joined columns with all information +for the type of data specified in the config. If training is set to False, +the dataframes are loaded from the files. If training is set to True, the +dataframes are created and then saved to the files.

+ +
+ Source code in backend/modules/metadata_utils.py +
326
+327
+328
+329
+330
+331
+332
+333
+334
+335
+336
+337
+338
+339
+340
+341
+342
+343
+344
+345
+346
+347
+348
+349
+350
+351
+352
+353
+354
def create_metadata_dataframe(
+    self,
+    handler: Union["OpenMLDatasetHandler", "OpenMLFlowHandler"],
+    openml_data_object: Sequence[
+        Union[openml.datasets.dataset.OpenMLDataset, openml.flows.flow.OpenMLFlow]
+    ],
+    data_id: Sequence[int],
+    all_dataset_metadata: pd.DataFrame,
+    subset_ids=None,
+) -> Tuple[pd.DataFrame, pd.DataFrame]:
+    """
+    Description: Creates a dataframe with all the metadata, joined columns with all information
+    for the type of data specified in the config. If training is set to False,
+    the dataframes are loaded from the files. If training is set to True, the
+    dataframes are created and then saved to the files.
+    """
+    if not self.config.get("training", False):
+        return (
+            handler.load_metadata(self.description_filename),
+            all_dataset_metadata,
+        )
+
+    return handler.process_metadata(
+        openml_data_object,
+        data_id,
+        all_dataset_metadata,
+        self.description_filename,
+        subset_ids,
+    )
+
+
+
+ +
+ +
+ + +

+ get_all_metadata_from_openml() + +

+ + +
+ +

Description: Gets all the metadata from OpenML for the type of data specified in the config. +If training is set to False, it loads the metadata from the files. If training is set to True, it gets the metadata from OpenML.

+

This uses parallel threads (pqdm) and so to ensure thread safety, install the package oslo.concurrency.

+ +
+ Source code in backend/modules/metadata_utils.py +
281
+282
+283
+284
+285
+286
+287
+288
+289
+290
+291
+292
+293
+294
+295
+296
+297
+298
+299
+300
+301
+302
+303
+304
+305
+306
+307
+308
+309
+310
+311
+312
+313
+314
+315
+316
+317
+318
+319
+320
+321
+322
+323
+324
def get_all_metadata_from_openml(self):
+    """
+    Description: Gets all the metadata from OpenML for the type of data specified in the config.
+    If training is set to False, it loads the metadata from the files. If training is set to True, it gets the metadata from OpenML.
+
+    This uses parallel threads (pqdm) and so to ensure thread safety, install the package oslo.concurrency.
+    """
+    if not self.config.get("training", False) or self.config.get(
+        "ignore_downloading_data", False
+    ):
+        if not os.path.exists(self.save_filename):
+            raise Exception(
+                "Metadata files do not exist. Please run the training pipeline first."
+            )
+        print("[INFO] Loading metadata from file.")
+        return load_metadata_from_file(self.save_filename)
+
+    print("[INFO] Training is set to True.")
+    handler = (
+        OpenMLDatasetHandler(self.config)
+        if self.config["type_of_data"] == "dataset"
+        else OpenMLFlowHandler(self.config)
+    )
+
+    all_objects = handler.get_openml_objects()
+
+    if self.config.get("test_subset", False):
+        print("[INFO] Subsetting the data.")
+        all_objects = all_objects[:500]
+
+    data_id = [int(all_objects.iloc[i]["did"]) for i in range(len(all_objects))]
+
+    print("[INFO] Initializing cache.")
+    handler.initialize_cache(data_id)
+
+    print(f"[INFO] Getting {self.config['type_of_data']} metadata from OpenML.")
+    openml_data_object = handler.get_metadata(data_id)
+
+    print("[INFO] Saving metadata to file.")
+    save_metadata_to_file(
+        (openml_data_object, data_id, all_objects, handler), self.save_filename
+    )
+
+    return openml_data_object, data_id, all_objects, handler
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ OpenMLObjectHandler + + +

+ + +
+ + +

Description: The base class for handling OpenML objects. The logic for handling datasets/flows are subclasses from this.

+ +
+ Source code in backend/modules/metadata_utils.py +
 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
class OpenMLObjectHandler:
+    """
+    Description: The base class for handling OpenML objects. The logic for handling datasets/flows are subclasses from this.
+    """
+
+    def __init__(self, config):
+        self.config = config
+
+    def get_description(self, data_id: int):
+        """
+        Description: Get the description of the OpenML object.
+        """
+        raise NotImplementedError
+
+    def get_openml_objects(self):
+        """
+        Description: Get the OpenML objects.
+        """
+        raise NotImplementedError
+
+    def initialize_cache(self, data_id: Sequence[int]) -> None:
+        """
+        Description: Initialize the cache for the OpenML objects.
+        """
+        self.get_description(data_id[0])
+
+    def get_metadata(self, data_id: Sequence[int]):
+        """
+        Description: Get metadata from OpenML using parallel processing.
+        """
+        return pqdm(
+            data_id, self.get_description, n_jobs=self.config["data_download_n_jobs"]
+        )
+
+    def process_metadata(
+        self,
+        openml_data_object,
+        data_id: Sequence[int],
+        all_dataset_metadata: pd.DataFrame,
+        file_path: str,
+        subset_ids=None,
+    ):
+        """
+        Description: Process the metadata.
+        """
+        raise NotImplementedError
+
+    @staticmethod
+    def load_metadata(file_path: str):
+        """
+        Description: Load metadata from a file.
+        """
+        try:
+            return pd.read_csv(file_path)
+        except FileNotFoundError:
+            raise Exception(
+                "Metadata files do not exist. Please run the training pipeline first."
+            )
+
+    @staticmethod
+    def extract_attribute(attribute: object, attr_name: str) -> str:
+        """
+        Description: Extract an attribute from the OpenML object.
+        """
+        return getattr(attribute, attr_name, "")
+
+    @staticmethod
+    def join_attributes(attribute: object, attr_name: str) -> str:
+        """
+        Description: Join the attributes of the OpenML object.
+        """
+        return (
+            " ".join(
+                [f"{k} : {v}," for k, v in getattr(attribute, attr_name, {}).items()]
+            )
+            if hasattr(attribute, attr_name)
+            else ""
+        )
+
+    @staticmethod
+    def create_combined_information_df_for_datasets(
+        data_id: int | Sequence[int],
+        descriptions: Sequence[str],
+        joined_qualities: Sequence[str],
+        joined_features: Sequence[str],
+    ) -> pd.DataFrame:
+        """
+        Description: Create a dataframe with the combined information of the OpenML object.
+        """
+        return pd.DataFrame(
+            {
+                "did": data_id,
+                "description": descriptions,
+                "qualities": joined_qualities,
+                "features": joined_features,
+            }
+        )
+
+    @staticmethod
+    def merge_all_columns_to_string(row: pd.Series) -> str:
+        """
+        Description: Create a single column that has a combined string of all the metadata and the description in the form of "column - value, column - value, ... description"
+        """
+        return " ".join([f"{col} - {val}," for col, val in zip(row.index, row.values)])
+
+    def combine_metadata(
+        self, all_dataset_metadata: pd.DataFrame, all_data_description_df: pd.DataFrame
+    ) -> pd.DataFrame:
+        """
+        Description: Combine the descriptions with the metadata table.
+        """
+        all_dataset_metadata = pd.merge(
+            all_dataset_metadata, all_data_description_df, on="did", how="inner"
+        )
+        all_dataset_metadata["Combined_information"] = all_dataset_metadata.apply(
+            self.merge_all_columns_to_string, axis=1
+        )
+        return all_dataset_metadata
+
+    @staticmethod
+    def subset_metadata(
+        subset_ids: Sequence[int] | None, all_dataset_metadata: pd.DataFrame
+    ):
+        if subset_ids is not None:
+            subset_ids = [int(x) for x in subset_ids]
+            all_dataset_metadata = all_dataset_metadata[
+                all_dataset_metadata["did"].isin(subset_ids)
+            ]
+        return all_dataset_metadata
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ combine_metadata(all_dataset_metadata, all_data_description_df) + +

+ + +
+ +

Description: Combine the descriptions with the metadata table.

+ +
+ Source code in backend/modules/metadata_utils.py +
123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
def combine_metadata(
+    self, all_dataset_metadata: pd.DataFrame, all_data_description_df: pd.DataFrame
+) -> pd.DataFrame:
+    """
+    Description: Combine the descriptions with the metadata table.
+    """
+    all_dataset_metadata = pd.merge(
+        all_dataset_metadata, all_data_description_df, on="did", how="inner"
+    )
+    all_dataset_metadata["Combined_information"] = all_dataset_metadata.apply(
+        self.merge_all_columns_to_string, axis=1
+    )
+    return all_dataset_metadata
+
+
+
+ +
+ +
+ + +

+ create_combined_information_df_for_datasets(data_id, descriptions, joined_qualities, joined_features) + + + staticmethod + + +

+ + +
+ +

Description: Create a dataframe with the combined information of the OpenML object.

+ +
+ Source code in backend/modules/metadata_utils.py +
 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
@staticmethod
+def create_combined_information_df_for_datasets(
+    data_id: int | Sequence[int],
+    descriptions: Sequence[str],
+    joined_qualities: Sequence[str],
+    joined_features: Sequence[str],
+) -> pd.DataFrame:
+    """
+    Description: Create a dataframe with the combined information of the OpenML object.
+    """
+    return pd.DataFrame(
+        {
+            "did": data_id,
+            "description": descriptions,
+            "qualities": joined_qualities,
+            "features": joined_features,
+        }
+    )
+
+
+
+ +
+ +
+ + +

+ extract_attribute(attribute, attr_name) + + + staticmethod + + +

+ + +
+ +

Description: Extract an attribute from the OpenML object.

+ +
+ Source code in backend/modules/metadata_utils.py +
77
+78
+79
+80
+81
+82
@staticmethod
+def extract_attribute(attribute: object, attr_name: str) -> str:
+    """
+    Description: Extract an attribute from the OpenML object.
+    """
+    return getattr(attribute, attr_name, "")
+
+
+
+ +
+ +
+ + +

+ get_description(data_id) + +

+ + +
+ +

Description: Get the description of the OpenML object.

+ +
+ Source code in backend/modules/metadata_utils.py +
26
+27
+28
+29
+30
def get_description(self, data_id: int):
+    """
+    Description: Get the description of the OpenML object.
+    """
+    raise NotImplementedError
+
+
+
+ +
+ +
+ + +

+ get_metadata(data_id) + +

+ + +
+ +

Description: Get metadata from OpenML using parallel processing.

+ +
+ Source code in backend/modules/metadata_utils.py +
44
+45
+46
+47
+48
+49
+50
def get_metadata(self, data_id: Sequence[int]):
+    """
+    Description: Get metadata from OpenML using parallel processing.
+    """
+    return pqdm(
+        data_id, self.get_description, n_jobs=self.config["data_download_n_jobs"]
+    )
+
+
+
+ +
+ +
+ + +

+ get_openml_objects() + +

+ + +
+ +

Description: Get the OpenML objects.

+ +
+ Source code in backend/modules/metadata_utils.py +
32
+33
+34
+35
+36
def get_openml_objects(self):
+    """
+    Description: Get the OpenML objects.
+    """
+    raise NotImplementedError
+
+
+
+ +
+ +
+ + +

+ initialize_cache(data_id) + +

+ + +
+ +

Description: Initialize the cache for the OpenML objects.

+ +
+ Source code in backend/modules/metadata_utils.py +
38
+39
+40
+41
+42
def initialize_cache(self, data_id: Sequence[int]) -> None:
+    """
+    Description: Initialize the cache for the OpenML objects.
+    """
+    self.get_description(data_id[0])
+
+
+
+ +
+ +
+ + +

+ join_attributes(attribute, attr_name) + + + staticmethod + + +

+ + +
+ +

Description: Join the attributes of the OpenML object.

+ +
+ Source code in backend/modules/metadata_utils.py +
84
+85
+86
+87
+88
+89
+90
+91
+92
+93
+94
+95
@staticmethod
+def join_attributes(attribute: object, attr_name: str) -> str:
+    """
+    Description: Join the attributes of the OpenML object.
+    """
+    return (
+        " ".join(
+            [f"{k} : {v}," for k, v in getattr(attribute, attr_name, {}).items()]
+        )
+        if hasattr(attribute, attr_name)
+        else ""
+    )
+
+
+
+ +
+ +
+ + +

+ load_metadata(file_path) + + + staticmethod + + +

+ + +
+ +

Description: Load metadata from a file.

+ +
+ Source code in backend/modules/metadata_utils.py +
65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
@staticmethod
+def load_metadata(file_path: str):
+    """
+    Description: Load metadata from a file.
+    """
+    try:
+        return pd.read_csv(file_path)
+    except FileNotFoundError:
+        raise Exception(
+            "Metadata files do not exist. Please run the training pipeline first."
+        )
+
+
+
+ +
+ +
+ + +

+ merge_all_columns_to_string(row) + + + staticmethod + + +

+ + +
+ +

Description: Create a single column that has a combined string of all the metadata and the description in the form of "column - value, column - value, ... description"

+ +
+ Source code in backend/modules/metadata_utils.py +
116
+117
+118
+119
+120
+121
@staticmethod
+def merge_all_columns_to_string(row: pd.Series) -> str:
+    """
+    Description: Create a single column that has a combined string of all the metadata and the description in the form of "column - value, column - value, ... description"
+    """
+    return " ".join([f"{col} - {val}," for col, val in zip(row.index, row.values)])
+
+
+
+ +
+ +
+ + +

+ process_metadata(openml_data_object, data_id, all_dataset_metadata, file_path, subset_ids=None) + +

+ + +
+ +

Description: Process the metadata.

+ +
+ Source code in backend/modules/metadata_utils.py +
52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
def process_metadata(
+    self,
+    openml_data_object,
+    data_id: Sequence[int],
+    all_dataset_metadata: pd.DataFrame,
+    file_path: str,
+    subset_ids=None,
+):
+    """
+    Description: Process the metadata.
+    """
+    raise NotImplementedError
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Rag Pipeline/result_gen/index.html b/Rag Pipeline/result_gen/index.html new file mode 100644 index 0000000..b6e7ebd --- /dev/null +++ b/Rag Pipeline/result_gen/index.html @@ -0,0 +1,2219 @@ + + + + + + + + + + + + + + + + + + + + + + + Result gen - OpenML RAG Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + +

Result gen

+ +

Results Generation

+ + + +
+ + + + +
+ + + +
+ + + + + + + + +
+ + + +

+ QueryProcessor + + +

+ + +
+ + +
+ Source code in backend/modules/results_gen.py +
 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
class QueryProcessor:
+    def __init__(self, query: str, qa: RetrievalQA, type_of_query: str, config: dict):
+        self.query = query
+        self.qa = qa
+        self.type_of_query = type_of_query
+        self.config = config
+
+    def fetch_results(self):
+        """
+        Fetch results for the query using the QA chain.
+        """
+        results = self.qa.invoke(
+            input=self.query,
+            config={
+                "temperature": self.config["temperature"],
+                "top-p": self.config["top_p"],
+            },
+        )
+        if self.config["long_context_reorder"]:
+            results = long_context_reorder(results)
+        id_column = {"dataset": "did", "flow": "id", "data": "did"}[self.type_of_query]
+
+        if self.config["reranking"]:
+            try:
+                print("[INFO] Reranking results...")
+                ranker = Ranker(model_name="ms-marco-MiniLM-L-12-v2", cache_dir="/tmp/")
+                rerankrequest = RerankRequest(
+                    query=self.query,
+                    passages=[
+                        {"id": result.metadata[id_column], "text": result.page_content}
+                        for result in results
+                    ],
+                )
+                ranking = ranker.rerank(rerankrequest)
+                ids = [result["id"] for result in ranking]
+                ranked_results = [
+                    result for result in results if result.metadata[id_column] in ids
+                ]
+                print("[INFO] Reranking complete.")
+                return ranked_results
+            except Exception as e:
+                print(f"[ERROR] Reranking failed: {e}")
+                return results
+        else:
+            return results
+
+    @staticmethod
+    def process_documents(
+        source_documents: Sequence[Document],
+    ) -> Tuple[OrderedDict, list]:
+        """
+        Process the source documents and create a dictionary with the key_name as the key and the name and page content as the values.
+        """
+        dict_results = OrderedDict()
+        for result in source_documents:
+            dict_results[result.metadata["did"]] = {
+                "name": result.metadata["name"],
+                "page_content": result.page_content,
+            }
+        ids = [result.metadata["did"] for result in source_documents]
+        return dict_results, ids
+
+    @staticmethod
+    def make_clickable(val: str) -> str:
+        """
+        Make the URL clickable in the dataframe.
+        """
+        return '<a href="{}">{}</a>'.format(val, val)
+
+    def create_output_dataframe(
+        self, dict_results: dict, type_of_data: str, ids_order: list
+    ) -> pd.DataFrame:
+        """
+        Create an output dataframe with the results. The URLs are API calls to the OpenML API for the specific type of data.
+        """
+        output_df = pd.DataFrame(dict_results).T.reset_index()
+        output_df["index"] = output_df["index"].astype(int)
+        output_df = output_df.set_index("index").loc[ids_order].reset_index()
+        output_df["urls"] = output_df["index"].apply(
+            lambda x: f"https://www.openml.org/search?type={type_of_data}&id={x}"
+        )
+        output_df["urls"] = output_df["urls"].apply(self.make_clickable)
+
+        if type_of_data == "data":
+            output_df["command"] = output_df["index"].apply(
+                lambda x: f"dataset = openml.datasets.get_dataset({x})"
+            )
+        elif type_of_data == "flow":
+            output_df["command"] = output_df["index"].apply(
+                lambda x: f"flow = openml.flows.get_flow({x})"
+            )
+        output_df = output_df.drop_duplicates(subset=["name"])
+        replace_dict = {
+            "index": "id",
+            "command": "Command",
+            "urls": "OpenML URL",
+            "page_content": "Description",
+        }
+        for col in ["index", "command", "urls", "page_content"]:
+            if col in output_df.columns:
+                output_df = output_df.rename(columns={col: replace_dict[col]})
+        return output_df
+
+    @staticmethod
+    def check_query(query: str) -> str:
+        """
+        Performs checks on the query:
+        - Replaces %20 with space character (browsers do this automatically when spaces are in the URL)
+        - Removes leading and trailing spaces
+        - Limits the query to 200 characters
+        """
+        if query == "":
+            raise ValueError("Query cannot be empty.")
+        query = query.replace("%20", " ")
+        query = query.strip()
+        query = query[:200]
+        return query
+
+    def get_result_from_query(self) -> Tuple[pd.DataFrame, Sequence[Document]]:
+        """
+        Get the result from the query using the QA chain and return the results in a dataframe that is then sent to the frontend.
+        """
+        if self.type_of_query == "dataset":
+            type_of_query = "data"
+        elif self.type_of_query == "flow":
+            type_of_query = "flow"
+        else:
+            raise ValueError(f"Unsupported type_of_data: {self.type_of_query}")
+
+        query = self.check_query(self.query)
+        if query == "":
+            return pd.DataFrame(), []
+
+        source_documents = self.fetch_results()
+        dict_results, ids_order = self.process_documents(source_documents)
+        output_df = self.create_output_dataframe(dict_results, type_of_query, ids_order)
+
+        return output_df, ids_order
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ check_query(query) + + + staticmethod + + +

+ + +
+ +

Performs checks on the query: +- Replaces %20 with space character (browsers do this automatically when spaces are in the URL) +- Removes leading and trailing spaces +- Limits the query to 200 characters

+ +
+ Source code in backend/modules/results_gen.py +
138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
@staticmethod
+def check_query(query: str) -> str:
+    """
+    Performs checks on the query:
+    - Replaces %20 with space character (browsers do this automatically when spaces are in the URL)
+    - Removes leading and trailing spaces
+    - Limits the query to 200 characters
+    """
+    if query == "":
+        raise ValueError("Query cannot be empty.")
+    query = query.replace("%20", " ")
+    query = query.strip()
+    query = query[:200]
+    return query
+
+
+
+ +
+ +
+ + +

+ create_output_dataframe(dict_results, type_of_data, ids_order) + +

+ + +
+ +

Create an output dataframe with the results. The URLs are API calls to the OpenML API for the specific type of data.

+ +
+ Source code in backend/modules/results_gen.py +
104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
def create_output_dataframe(
+    self, dict_results: dict, type_of_data: str, ids_order: list
+) -> pd.DataFrame:
+    """
+    Create an output dataframe with the results. The URLs are API calls to the OpenML API for the specific type of data.
+    """
+    output_df = pd.DataFrame(dict_results).T.reset_index()
+    output_df["index"] = output_df["index"].astype(int)
+    output_df = output_df.set_index("index").loc[ids_order].reset_index()
+    output_df["urls"] = output_df["index"].apply(
+        lambda x: f"https://www.openml.org/search?type={type_of_data}&id={x}"
+    )
+    output_df["urls"] = output_df["urls"].apply(self.make_clickable)
+
+    if type_of_data == "data":
+        output_df["command"] = output_df["index"].apply(
+            lambda x: f"dataset = openml.datasets.get_dataset({x})"
+        )
+    elif type_of_data == "flow":
+        output_df["command"] = output_df["index"].apply(
+            lambda x: f"flow = openml.flows.get_flow({x})"
+        )
+    output_df = output_df.drop_duplicates(subset=["name"])
+    replace_dict = {
+        "index": "id",
+        "command": "Command",
+        "urls": "OpenML URL",
+        "page_content": "Description",
+    }
+    for col in ["index", "command", "urls", "page_content"]:
+        if col in output_df.columns:
+            output_df = output_df.rename(columns={col: replace_dict[col]})
+    return output_df
+
+
+
+ +
+ +
+ + +

+ fetch_results() + +

+ + +
+ +

Fetch results for the query using the QA chain.

+ +
+ Source code in backend/modules/results_gen.py +
42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
def fetch_results(self):
+    """
+    Fetch results for the query using the QA chain.
+    """
+    results = self.qa.invoke(
+        input=self.query,
+        config={
+            "temperature": self.config["temperature"],
+            "top-p": self.config["top_p"],
+        },
+    )
+    if self.config["long_context_reorder"]:
+        results = long_context_reorder(results)
+    id_column = {"dataset": "did", "flow": "id", "data": "did"}[self.type_of_query]
+
+    if self.config["reranking"]:
+        try:
+            print("[INFO] Reranking results...")
+            ranker = Ranker(model_name="ms-marco-MiniLM-L-12-v2", cache_dir="/tmp/")
+            rerankrequest = RerankRequest(
+                query=self.query,
+                passages=[
+                    {"id": result.metadata[id_column], "text": result.page_content}
+                    for result in results
+                ],
+            )
+            ranking = ranker.rerank(rerankrequest)
+            ids = [result["id"] for result in ranking]
+            ranked_results = [
+                result for result in results if result.metadata[id_column] in ids
+            ]
+            print("[INFO] Reranking complete.")
+            return ranked_results
+        except Exception as e:
+            print(f"[ERROR] Reranking failed: {e}")
+            return results
+    else:
+        return results
+
+
+
+ +
+ +
+ + +

+ get_result_from_query() + +

+ + +
+ +

Get the result from the query using the QA chain and return the results in a dataframe that is then sent to the frontend.

+ +
+ Source code in backend/modules/results_gen.py +
153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
def get_result_from_query(self) -> Tuple[pd.DataFrame, Sequence[Document]]:
+    """
+    Get the result from the query using the QA chain and return the results in a dataframe that is then sent to the frontend.
+    """
+    if self.type_of_query == "dataset":
+        type_of_query = "data"
+    elif self.type_of_query == "flow":
+        type_of_query = "flow"
+    else:
+        raise ValueError(f"Unsupported type_of_data: {self.type_of_query}")
+
+    query = self.check_query(self.query)
+    if query == "":
+        return pd.DataFrame(), []
+
+    source_documents = self.fetch_results()
+    dict_results, ids_order = self.process_documents(source_documents)
+    output_df = self.create_output_dataframe(dict_results, type_of_query, ids_order)
+
+    return output_df, ids_order
+
+
+
+ +
+ +
+ + +

+ make_clickable(val) + + + staticmethod + + +

+ + +
+ +

Make the URL clickable in the dataframe.

+ +
+ Source code in backend/modules/results_gen.py +
 97
+ 98
+ 99
+100
+101
+102
@staticmethod
+def make_clickable(val: str) -> str:
+    """
+    Make the URL clickable in the dataframe.
+    """
+    return '<a href="{}">{}</a>'.format(val, val)
+
+
+
+ +
+ +
+ + +

+ process_documents(source_documents) + + + staticmethod + + +

+ + +
+ +

Process the source documents and create a dictionary with the key_name as the key and the name and page content as the values.

+ +
+ Source code in backend/modules/results_gen.py +
81
+82
+83
+84
+85
+86
+87
+88
+89
+90
+91
+92
+93
+94
+95
@staticmethod
+def process_documents(
+    source_documents: Sequence[Document],
+) -> Tuple[OrderedDict, list]:
+    """
+    Process the source documents and create a dictionary with the key_name as the key and the name and page content as the values.
+    """
+    dict_results = OrderedDict()
+    for result in source_documents:
+        dict_results[result.metadata["did"]] = {
+            "name": result.metadata["name"],
+            "page_content": result.page_content,
+        }
+    ids = [result.metadata["did"] for result in source_documents]
+    return dict_results, ids
+
+
+
+ +
+ + + +
+ +
+ +
+ + +
+ + +

+ long_context_reorder(results) + +

+ + +
+ +

Description: Lost in the middle reorder: the less relevant documents will be at the +middle of the list and more relevant elements at beginning / end. +See: https://arxiv.org/abs//2307.03172

+ +
+ Source code in backend/modules/results_gen.py +
20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
def long_context_reorder(results):
+    """
+    Description: Lost in the middle reorder: the less relevant documents will be at the
+    middle of the list and more relevant elements at beginning / end.
+    See: https://arxiv.org/abs//2307.03172
+
+
+    """
+    print("[INFO] Reordering results...")
+    reordering = LongContextReorder()
+    results = reordering.transform_documents(results)
+    print("[INFO] Reordering complete.")
+    return results
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Rag Pipeline/training/index.html b/Rag Pipeline/training/index.html new file mode 100644 index 0000000..2b12c0d --- /dev/null +++ b/Rag Pipeline/training/index.html @@ -0,0 +1,1288 @@ + + + + + + + + + + + + + + + + + + + + + + + Training - OpenML RAG Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Training

+
    +
  • While we are not creating a new model, we are using the existing model to create embeddings. The name might be misleading but this was chosen as an attempt to keep the naming consistent with other codebases.
  • +
  • (Perhaps we might fine tune the model in the future)
  • +
  • The training script is present in training.py. Running this script will take care of everything.
  • +
+

What does the training script do?

+
    +
  • Load the config file and set the necessary variables
  • +
  • If testing_flag is set to True, the script will use a subset of the data for quick debugging
  • +
  • testing_flag is set to True
  • +
  • persist_dir is set to ./data/chroma_db_testing
  • +
  • test_subset is set to True
  • +
  • data_dir is set to ./data/testing_data/
  • +
  • If testing_flag is set to False, the script will use the entire dataset
  • +
  • For all datasets in the OpenML dataset list:
  • +
  • Download the dataset
  • +
  • Create the vector dataset with computed embeddings
  • +
  • Create a vectordb retriever
  • +
  • Run some test queries
  • +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/Rag Pipeline/vector_store/index.html b/Rag Pipeline/vector_store/index.html new file mode 100644 index 0000000..fbeb959 --- /dev/null +++ b/Rag Pipeline/vector_store/index.html @@ -0,0 +1,2567 @@ + + + + + + + + + + + + + + + + + + + + + + + Vector Store Utilities - OpenML RAG Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + +

Vector Store Utilities

+
    +
  • Code for loading data into the vector store.
  • +
+

What to look for

+
    +
  • DataLoader: If you want to modify chunking
  • +
  • DocumentProcessor: If you want to modify how the unique documents are obtained and/or add other methods
  • +
  • VectorStoreManager: If you want to modify how the documents are embedded and loaded to the vector store
  • +
+ + +
+ + + + +
+ + + +
+ + + + + + + + +
+ + + +

+ DataLoader + + +

+ + +
+ + +

Description: Used to chunk data

+ +
+ Source code in backend/modules/vector_store_utils.py +
13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
class DataLoader:
+    """
+    Description: Used to chunk data
+    """
+
+    def __init__(
+        self,
+        metadata_df: pd.DataFrame,
+        page_content_column: str,
+        chunk_size: int = 1000,
+        chunk_overlap: int = 150,
+    ):
+        self.metadata_df = metadata_df
+        self.page_content_column = page_content_column
+        self.chunk_size = chunk_size
+        self.chunk_overlap = (
+            chunk_overlap if self.chunk_size > chunk_overlap else self.chunk_size
+        )
+
+    def load_and_process_data(self) -> list:
+        """
+        Description: Recursively chunk data before embedding
+        """
+        loader = DataFrameLoader(
+            self.metadata_df, page_content_column=self.page_content_column
+        )
+        documents = loader.load()
+
+        text_splitter = RecursiveCharacterTextSplitter(
+            chunk_size=self.chunk_size, chunk_overlap=self.chunk_overlap
+        )
+        documents = text_splitter.split_documents(documents)
+
+        return documents
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ load_and_process_data() + +

+ + +
+ +

Description: Recursively chunk data before embedding

+ +
+ Source code in backend/modules/vector_store_utils.py +
32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
def load_and_process_data(self) -> list:
+    """
+    Description: Recursively chunk data before embedding
+    """
+    loader = DataFrameLoader(
+        self.metadata_df, page_content_column=self.page_content_column
+    )
+    documents = loader.load()
+
+    text_splitter = RecursiveCharacterTextSplitter(
+        chunk_size=self.chunk_size, chunk_overlap=self.chunk_overlap
+    )
+    documents = text_splitter.split_documents(documents)
+
+    return documents
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ DocumentProcessor + + +

+ + +
+ + +

Description: Used to generate unique documents based on text content to prevent duplicates during embedding

+ +
+ Source code in backend/modules/vector_store_utils.py +
49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
class DocumentProcessor:
+    """
+    Description: Used to generate unique documents based on text content to prevent duplicates during embedding
+    """
+
+    @staticmethod
+    def generate_unique_documents(documents: list, db: Chroma) -> tuple:
+        """
+        Description: Sometimes the text content of the data is the same, this ensures that does not happen by computing a string matching
+        """
+        new_document_ids = set([str(x.metadata["did"]) for x in documents])
+        print(f"[INFO] Generating unique documents. Total documents: {len(documents)}")
+        try:
+            old_dids = set([str(x["did"]) for x in db.get()["metadatas"]])
+        except KeyError:
+            old_dids = set([str(x["id"]) for x in db.get()["metadatas"]])
+
+        new_dids = new_document_ids - old_dids
+        documents = [x for x in documents if str(x.metadata["did"]) in new_dids]
+        ids = [
+            str(uuid.uuid5(uuid.NAMESPACE_DNS, doc.page_content)) for doc in documents
+        ]
+
+        unique_ids = list(set(ids))
+        seen_ids = set()
+        unique_docs = [
+            doc
+            for doc, id in zip(documents, ids)
+            if id not in seen_ids and (seen_ids.add(id) or True)
+        ]
+
+        return unique_docs, unique_ids
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ generate_unique_documents(documents, db) + + + staticmethod + + +

+ + +
+ +

Description: Sometimes the text content of the data is the same, this ensures that does not happen by computing a string matching

+ +
+ Source code in backend/modules/vector_store_utils.py +
54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
@staticmethod
+def generate_unique_documents(documents: list, db: Chroma) -> tuple:
+    """
+    Description: Sometimes the text content of the data is the same, this ensures that does not happen by computing a string matching
+    """
+    new_document_ids = set([str(x.metadata["did"]) for x in documents])
+    print(f"[INFO] Generating unique documents. Total documents: {len(documents)}")
+    try:
+        old_dids = set([str(x["did"]) for x in db.get()["metadatas"]])
+    except KeyError:
+        old_dids = set([str(x["id"]) for x in db.get()["metadatas"]])
+
+    new_dids = new_document_ids - old_dids
+    documents = [x for x in documents if str(x.metadata["did"]) in new_dids]
+    ids = [
+        str(uuid.uuid5(uuid.NAMESPACE_DNS, doc.page_content)) for doc in documents
+    ]
+
+    unique_ids = list(set(ids))
+    seen_ids = set()
+    unique_docs = [
+        doc
+        for doc, id in zip(documents, ids)
+        if id not in seen_ids and (seen_ids.add(id) or True)
+    ]
+
+    return unique_docs, unique_ids
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ VectorStoreManager + + +

+ + +
+ + +

Description: Manages the Vector store (chromadb) and takes care of data ingestion, loading the embedding model and embedding the data before adding it to the vector store

+ +
+ Source code in backend/modules/vector_store_utils.py +
 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
class VectorStoreManager:
+    """
+    Description: Manages the Vector store (chromadb) and takes care of data ingestion, loading the embedding model and embedding the data before adding it to the vector store
+    """
+
+    def __init__(self, chroma_client: ClientAPI, config: dict):
+        self.chroma_client = chroma_client
+        self.config = config
+        self.chunk_size = 100
+
+    def chunk_dataframe(self, df, chunk_size):
+        """
+        Description: Chunk dataframe for use with chroma metadata saving
+        """
+        for i in range(0, df.shape[0], self.chunk_size):
+            yield df.iloc[i : i + self.chunk_size]
+
+    def add_df_chunks_to_db(self, metadata_df):
+        """
+        Description: Add chunks from a dataframe for use with chroma metadata saving
+        """
+        collec = self.chroma_client.get_or_create_collection("metadata")
+        for chunk in tqdm(
+            self.chunk_dataframe(metadata_df, self.chunk_size),
+            total=(len(metadata_df) // self.chunk_size) + 1,
+        ):
+            ids = chunk["did"].astype(str).tolist()
+            documents = chunk["description"].astype(str).tolist()
+            metadatas = chunk.to_dict(orient="records")
+
+            # Add to ChromaDB collection
+            collec.add(ids=ids, documents=documents, metadatas=metadatas)
+
+    def load_model(self) -> HuggingFaceEmbeddings | None:
+        """
+        Description: Load a model from Hugging face for embedding
+        """
+        print("[INFO] Loading model...")
+        model_kwargs = {"device": self.config["device"], "trust_remote_code": True}
+        encode_kwargs = {"normalize_embeddings": True}
+        embeddings = HuggingFaceEmbeddings(
+            model_name=self.config["embedding_model"],
+            model_kwargs=model_kwargs,
+            encode_kwargs=encode_kwargs,
+            show_progress=False,
+        )
+        print("[INFO] Model loaded.")
+        return embeddings
+
+    def get_collection_name(self) -> str:
+        """
+        Description: Fixes some collection names. (workaround from OpenML API)
+        """
+        return {"dataset": "datasets", "flow": "flows"}.get(
+            self.config["type_of_data"], "default"
+        )
+
+    def load_vector_store(
+        self, embeddings: HuggingFaceEmbeddings, collection_name: str
+    ) -> Chroma:
+        """
+        Description: Persist directory. If does not exist, cannot be served
+        """
+        if not os.path.exists(self.config["persist_dir"]):
+            raise Exception(
+                "Persist directory does not exist. Please run the training pipeline first."
+            )
+
+        return Chroma(
+            client=self.chroma_client,
+            persist_directory=self.config["persist_dir"],
+            embedding_function=embeddings,
+            collection_name=collection_name,
+        )
+
+    @staticmethod
+    def add_documents_to_db(db, unique_docs, unique_ids, bs=512):
+        """
+        Description: Add documents to Chroma DB in batches of bs
+        """
+        if len(unique_docs) < bs:
+            db.add_documents(unique_docs, ids=unique_ids)
+        else:
+            for i in range(0, len(unique_docs), bs):
+                db.add_documents(unique_docs[i : i + bs], ids=unique_ids[i : i + bs])
+
+    def create_vector_store(self, metadata_df: pd.DataFrame) -> Chroma:
+        """
+        Description: Load embeddings, get chunked data, subset if needed , find unique, and then finally add to ChromaDB
+        """
+        embeddings = self.load_model()
+        collection_name = self.get_collection_name()
+
+        db = Chroma(
+            client=self.chroma_client,
+            embedding_function=embeddings,
+            persist_directory=self.config["persist_dir"],
+            collection_name=collection_name,
+        )
+
+        data_loader = DataLoader(
+            metadata_df,
+            page_content_column="Combined_information",
+            chunk_size=self.config["chunk_size"],
+        )
+        documents = data_loader.load_and_process_data()
+
+        if self.config["testing_flag"]:
+            if self.config["test_subset"]:
+                print("[INFO] Subsetting the data.")
+                documents = documents[:500]
+
+        unique_docs, unique_ids = DocumentProcessor.generate_unique_documents(
+            documents, db
+        )
+        print(
+            f"Number of unique documents: {len(unique_docs)} vs Total documents: {len(documents)}"
+        )
+
+        if len(unique_docs) == 0:
+            print("No new documents to add.")
+        else:
+            self.add_documents_to_db(db, unique_docs, unique_ids)
+
+        return db
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ add_df_chunks_to_db(metadata_df) + +

+ + +
+ +

Description: Add chunks from a dataframe for use with chroma metadata saving

+ +
+ Source code in backend/modules/vector_store_utils.py +
100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
def add_df_chunks_to_db(self, metadata_df):
+    """
+    Description: Add chunks from a dataframe for use with chroma metadata saving
+    """
+    collec = self.chroma_client.get_or_create_collection("metadata")
+    for chunk in tqdm(
+        self.chunk_dataframe(metadata_df, self.chunk_size),
+        total=(len(metadata_df) // self.chunk_size) + 1,
+    ):
+        ids = chunk["did"].astype(str).tolist()
+        documents = chunk["description"].astype(str).tolist()
+        metadatas = chunk.to_dict(orient="records")
+
+        # Add to ChromaDB collection
+        collec.add(ids=ids, documents=documents, metadatas=metadatas)
+
+
+
+ +
+ +
+ + +

+ add_documents_to_db(db, unique_docs, unique_ids, bs=512) + + + staticmethod + + +

+ + +
+ +

Description: Add documents to Chroma DB in batches of bs

+ +
+ Source code in backend/modules/vector_store_utils.py +
158
+159
+160
+161
+162
+163
+164
+165
+166
+167
@staticmethod
+def add_documents_to_db(db, unique_docs, unique_ids, bs=512):
+    """
+    Description: Add documents to Chroma DB in batches of bs
+    """
+    if len(unique_docs) < bs:
+        db.add_documents(unique_docs, ids=unique_ids)
+    else:
+        for i in range(0, len(unique_docs), bs):
+            db.add_documents(unique_docs[i : i + bs], ids=unique_ids[i : i + bs])
+
+
+
+ +
+ +
+ + +

+ chunk_dataframe(df, chunk_size) + +

+ + +
+ +

Description: Chunk dataframe for use with chroma metadata saving

+ +
+ Source code in backend/modules/vector_store_utils.py +
93
+94
+95
+96
+97
+98
def chunk_dataframe(self, df, chunk_size):
+    """
+    Description: Chunk dataframe for use with chroma metadata saving
+    """
+    for i in range(0, df.shape[0], self.chunk_size):
+        yield df.iloc[i : i + self.chunk_size]
+
+
+
+ +
+ +
+ + +

+ create_vector_store(metadata_df) + +

+ + +
+ +

Description: Load embeddings, get chunked data, subset if needed , find unique, and then finally add to ChromaDB

+ +
+ Source code in backend/modules/vector_store_utils.py +
169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
def create_vector_store(self, metadata_df: pd.DataFrame) -> Chroma:
+    """
+    Description: Load embeddings, get chunked data, subset if needed , find unique, and then finally add to ChromaDB
+    """
+    embeddings = self.load_model()
+    collection_name = self.get_collection_name()
+
+    db = Chroma(
+        client=self.chroma_client,
+        embedding_function=embeddings,
+        persist_directory=self.config["persist_dir"],
+        collection_name=collection_name,
+    )
+
+    data_loader = DataLoader(
+        metadata_df,
+        page_content_column="Combined_information",
+        chunk_size=self.config["chunk_size"],
+    )
+    documents = data_loader.load_and_process_data()
+
+    if self.config["testing_flag"]:
+        if self.config["test_subset"]:
+            print("[INFO] Subsetting the data.")
+            documents = documents[:500]
+
+    unique_docs, unique_ids = DocumentProcessor.generate_unique_documents(
+        documents, db
+    )
+    print(
+        f"Number of unique documents: {len(unique_docs)} vs Total documents: {len(documents)}"
+    )
+
+    if len(unique_docs) == 0:
+        print("No new documents to add.")
+    else:
+        self.add_documents_to_db(db, unique_docs, unique_ids)
+
+    return db
+
+
+
+ +
+ +
+ + +

+ get_collection_name() + +

+ + +
+ +

Description: Fixes some collection names. (workaround from OpenML API)

+ +
+ Source code in backend/modules/vector_store_utils.py +
132
+133
+134
+135
+136
+137
+138
def get_collection_name(self) -> str:
+    """
+    Description: Fixes some collection names. (workaround from OpenML API)
+    """
+    return {"dataset": "datasets", "flow": "flows"}.get(
+        self.config["type_of_data"], "default"
+    )
+
+
+
+ +
+ +
+ + +

+ load_model() + +

+ + +
+ +

Description: Load a model from Hugging face for embedding

+ +
+ Source code in backend/modules/vector_store_utils.py +
116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
def load_model(self) -> HuggingFaceEmbeddings | None:
+    """
+    Description: Load a model from Hugging face for embedding
+    """
+    print("[INFO] Loading model...")
+    model_kwargs = {"device": self.config["device"], "trust_remote_code": True}
+    encode_kwargs = {"normalize_embeddings": True}
+    embeddings = HuggingFaceEmbeddings(
+        model_name=self.config["embedding_model"],
+        model_kwargs=model_kwargs,
+        encode_kwargs=encode_kwargs,
+        show_progress=False,
+    )
+    print("[INFO] Model loaded.")
+    return embeddings
+
+
+
+ +
+ +
+ + +

+ load_vector_store(embeddings, collection_name) + +

+ + +
+ +

Description: Persist directory. If does not exist, cannot be served

+ +
+ Source code in backend/modules/vector_store_utils.py +
140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
def load_vector_store(
+    self, embeddings: HuggingFaceEmbeddings, collection_name: str
+) -> Chroma:
+    """
+    Description: Persist directory. If does not exist, cannot be served
+    """
+    if not os.path.exists(self.config["persist_dir"]):
+        raise Exception(
+            "Persist directory does not exist. Please run the training pipeline first."
+        )
+
+    return Chroma(
+        client=self.chroma_client,
+        persist_directory=self.config["persist_dir"],
+        embedding_function=embeddings,
+        collection_name=collection_name,
+    )
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/UI/api_reference/index.html b/UI/api_reference/index.html new file mode 100644 index 0000000..be60eb7 --- /dev/null +++ b/UI/api_reference/index.html @@ -0,0 +1,5544 @@ + + + + + + + + + + + + + + + + + + + + + + + Api reference - OpenML RAG Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + +

Api reference

+ +
+ + + + +
+ + + +
+ + + + + + + + +
+ + + +

+ LLMResponseParser + + +

+ + +
+ + +

Description: Parse the response from the LLM service and update the columns based on the response.

+ +
+ Source code in frontend/ui_utils.py +
14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
class LLMResponseParser:
+    """
+    Description: Parse the response from the LLM service and update the columns based on the response.
+    """
+
+    def __init__(self, llm_response):
+        self.llm_response = llm_response
+        self.subset_cols = ["did", "name"]
+        self.size_sort = None
+        self.classification_type = None
+        self.uploader_name = None
+
+    def process_size_attribute(self, attr_size: str):
+        size, sort = attr_size.split(",") if "," in attr_size else (attr_size, None)
+        if size == "yes":
+            self.subset_cols.append("NumberOfInstances")
+        if sort:
+            self.size_sort = sort
+
+    def missing_values_attribute(self, attr_missing: str):
+        if attr_missing == "yes":
+            self.subset_cols.append("NumberOfMissingValues")
+
+    def classification_type_attribute(self, attr_classification: str):
+        if attr_classification != "none":
+            self.subset_cols.append("NumberOfClasses")
+            self.classification_type = attr_classification
+
+    def uploader_attribute(self, attr_uploader: str):
+        if attr_uploader != "none":
+            self.subset_cols.append("uploader")
+            self.uploader_name = attr_uploader.split("=")[1].strip()
+
+    def get_attributes_from_response(self):
+        attribute_processors = {
+            "size_of_dataset": self.process_size_attribute,
+            "missing_values": self.missing_values_attribute,
+            "classification_type": self.classification_type_attribute,
+            "uploader": self.uploader_attribute,
+        }
+
+        for attribute, value in self.llm_response.items():
+            if attribute in attribute_processors:
+                attribute_processors[attribute](value)
+
+    def update_subset_cols(self, metadata: pd.DataFrame):
+        """
+        Description: Filter the metadata based on the updated subset columns and extra conditions
+        """
+        if self.classification_type is not None:
+            if "multi" in self.classification_type:
+                metadata = metadata[metadata["NumberOfClasses"] > 2]
+            elif "binary" in self.classification_type:
+                metadata = metadata[metadata["NumberOfClasses"] == 2]
+        if self.uploader_name is not None:
+            try:
+                uploader = int(self.uploader_name)
+                metadata = metadata[metadata["uploader"] == uploader]
+            except:
+                pass
+
+        return metadata[self.subset_cols]
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ update_subset_cols(metadata) + +

+ + +
+ +

Description: Filter the metadata based on the updated subset columns and extra conditions

+ +
+ Source code in frontend/ui_utils.py +
59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
def update_subset_cols(self, metadata: pd.DataFrame):
+    """
+    Description: Filter the metadata based on the updated subset columns and extra conditions
+    """
+    if self.classification_type is not None:
+        if "multi" in self.classification_type:
+            metadata = metadata[metadata["NumberOfClasses"] > 2]
+        elif "binary" in self.classification_type:
+            metadata = metadata[metadata["NumberOfClasses"] == 2]
+    if self.uploader_name is not None:
+        try:
+            uploader = int(self.uploader_name)
+            metadata = metadata[metadata["uploader"] == uploader]
+        except:
+            pass
+
+    return metadata[self.subset_cols]
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ ResponseParser + + +

+ + +
+ + +

Description : This classe is used to decide the order of operations and run the response parsing. +It loads the paths, fetches the Query parsing LLM response, the rag response, loads the metadatas and then based on the config, decides the order in which to apply each of them.

+ +
+ Source code in frontend/ui_utils.py +
 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
+226
+227
+228
+229
+230
+231
+232
+233
+234
+235
+236
+237
+238
+239
+240
+241
+242
+243
+244
+245
+246
+247
+248
+249
+250
+251
+252
+253
+254
+255
+256
+257
+258
+259
+260
+261
+262
+263
+264
+265
+266
+267
+268
+269
+270
+271
+272
+273
+274
+275
+276
+277
+278
+279
+280
+281
+282
+283
+284
+285
+286
+287
+288
+289
+290
+291
+292
+293
+294
+295
+296
+297
+298
+299
+300
+301
+302
+303
+304
+305
+306
+307
+308
+309
+310
+311
+312
+313
+314
class ResponseParser:
+    """
+    Description : This classe is used to decide the order of operations and run the response parsing.
+    It loads the paths, fetches the Query parsing LLM response, the rag response, loads the metadatas and then based on the config, decides the order in which to apply each of them.
+    """
+
+    def __init__(self, query_type: str, apply_llm_before_rag: bool = False):
+        self.query_type = query_type
+        self.paths = self.load_paths()
+        self.rag_response = None
+        self.llm_response = None
+        self.apply_llm_before_rag = apply_llm_before_rag
+        self.database_filtered = None
+        self.structured_query_response = None
+
+    def load_paths(self):
+        """
+        Description: Load paths from paths.json
+        """
+        with open("paths.json", "r") as file:
+            return json.load(file)
+
+    def fetch_llm_response(self, query: str):
+        """
+        Description: Fetch the response from the query parsing LLM service as a json
+        """
+        llm_response_path = self.paths["llm_response"]
+        try:
+            self.llm_response = requests.get(
+                f"{llm_response_path['docker']}{query}"
+            ).json()
+        except:
+            self.llm_response = requests.get(
+                f"{llm_response_path['local']}{query}"
+            ).json()
+        return self.llm_response
+
+    def fetch_structured_query(self, query_type: str, query: str):
+        """
+        Description: Fetch the response for a structured query from the LLM service as a JSON
+        """
+        structured_response_path = self.paths["structured_query"]
+        try:
+            self.structured_query_response = requests.get(
+                f"{structured_response_path['docker']}{query}",
+                json={"query": query},
+            ).json()
+        except (requests.exceptions.RequestException, json.JSONDecodeError) as e:
+            # Print the error for debugging purposes
+            print(f"Error occurred: {e}")
+            # Set structured_query_response to None on error
+            self.structured_query_response = None
+        try:
+            self.structured_query_response = requests.get(
+                f"{structured_response_path['local']}{query}",
+                json={"query": query},
+            ).json()
+        except Exception as e:
+            # Print the error for debugging purposes
+            print(f"Error occurred while fetching from local endpoint: {e}")
+            # Set structured_query_response to None if the local request also fails
+            self.structured_query_response = None
+
+        return self.structured_query_response
+
+    def database_filter(self, filter_condition, collec):
+        """
+        Apply database filter on the rag_response
+        """
+        ids = list(map(str, self.rag_response["initial_response"]))
+        self.database_filtered = collec.get(ids=ids, where=filter_condition)["ids"]
+        self.database_filtered = list(map(int, self.database_filtered))
+        # print(self.database_filtered)
+        return self.database_filtered
+
+    def fetch_rag_response(self, query_type, query):
+        """
+        Description: Fetch the response from RAG pipeline
+
+        """
+        rag_response_path = self.paths["rag_response"]
+        try:
+            self.rag_response = requests.get(
+                f"{rag_response_path['docker']}{query_type.lower()}/{query}",
+                json={"query": query, "type": query_type.lower()},
+            ).json()
+        except:
+            self.rag_response = requests.get(
+                f"{rag_response_path['local']}{query_type.lower()}/{query}",
+                json={"query": query, "type": query_type.lower()},
+            ).json()
+        ordered_set = self._order_results()
+        self.rag_response["initial_response"] = ordered_set
+
+        return self.rag_response
+
+    def _order_results(self):
+        doc_set = set()
+        ordered_set = []
+        for docid in self.rag_response["initial_response"]:
+            if docid not in doc_set:
+                ordered_set.append(docid)
+            doc_set.add(docid)
+        return ordered_set
+
+    def parse_and_update_response(self, metadata: pd.DataFrame):
+        """
+         Description: Parse the response from the RAG and LLM services and update the metadata based on the response.
+         Decide which order to apply them
+         -  self.apply_llm_before_rag == False
+             - Metadata is filtered based on the rag response first and then by the Query parsing LLM
+        -  self.apply_llm_before_rag == False
+             - Metadata is filtered based by the Query parsing LLM first and the rag response second
+        - in case structured_query == true, take results are applying data filters.
+        """
+        if self.apply_llm_before_rag is None or self.llm_response is None:
+            print("No LLM filter.")
+            # print(self.rag_response, flush=True)
+            filtered_metadata = self._no_filter(metadata)
+
+            # print(filtered_metadata)
+            # if no llm response is required, return the initial response
+            return filtered_metadata
+
+        elif (
+            self.rag_response is not None and self.llm_response is not None
+        ) and not config["structured_query"]:
+            if not self.apply_llm_before_rag:
+                filtered_metadata, llm_parser = self._rag_before_llm(metadata)
+
+                if self.query_type.lower() == "dataset":
+                    llm_parser.get_attributes_from_response()
+                    return llm_parser.update_subset_cols(filtered_metadata)
+
+            elif self.apply_llm_before_rag:
+                filtered_metadata = self._filter_before_rag(metadata)
+                return filtered_metadata
+
+        elif (
+            self.rag_response is not None and self.structured_query_response is not None
+        ):
+            col_name = [
+                "status",
+                "NumberOfClasses",
+                "NumberOfFeatures",
+                "NumberOfInstances",
+            ]
+            # print(self.structured_query_response)  # Only for debugging. Comment later.
+            if self.structured_query_response[0] is not None and isinstance(
+                self.structured_query_response[1], dict
+            ):
+                # Safely attempt to access the "filter" key in the first element
+
+                self._structured_query_on_success(metadata)
+
+            else:
+                filtered_metadata = self._structured_query_on_fail(metadata)
+                # print("Showing only rag response")
+            return filtered_metadata[["did", "name", *col_name]]
+
+    def _structured_query_on_fail(self, metadata):
+        filtered_metadata = metadata[
+            metadata["did"].isin(self.rag_response["initial_response"])
+        ]
+        filtered_metadata["did"] = pd.Categorical(
+            filtered_metadata["did"],
+            categories=self.rag_response["initial_response"],
+            ordered=True,
+        )
+        filtered_metadata = filtered_metadata.sort_values("did").reset_index(drop=True)
+
+        return filtered_metadata
+
+    def _structured_query_on_success(self, metadata):
+        if (
+            self.structured_query_response[0].get("filter", None)
+            and self.database_filtered
+        ):
+            filtered_metadata = metadata[metadata["did"].isin(self.database_filtered)]
+            # print("Showing database filtered data")
+        else:
+            filtered_metadata = metadata[
+                metadata["did"].isin(self.rag_response["initial_response"])
+            ]
+            # print(
+            #     "Showing only rag response as filter is empty or none of the rag data satisfies filter conditions."
+            # )
+        filtered_metadata["did"] = pd.Categorical(
+            filtered_metadata["did"],
+            categories=self.rag_response["initial_response"],
+            ordered=True,
+        )
+        filtered_metadata = filtered_metadata.sort_values("did").reset_index(drop=True)
+
+    def _filter_before_rag(self, metadata):
+        print("LLM filter before RAG")
+        llm_parser = LLMResponseParser(self.llm_response)
+        llm_parser.get_attributes_from_response()
+        filtered_metadata = llm_parser.update_subset_cols(metadata)
+        filtered_metadata = filtered_metadata[
+            metadata["did"].isin(self.rag_response["initial_response"])
+        ]
+        filtered_metadata["did"] = pd.Categorical(
+            filtered_metadata["did"],
+            categories=self.rag_response["initial_response"],
+            ordered=True,
+        )
+        filtered_metadata = filtered_metadata.sort_values("did").reset_index(drop=True)
+
+        return filtered_metadata
+
+    def _rag_before_llm(self, metadata):
+        print("RAG before LLM filter.")
+        filtered_metadata = metadata[
+            metadata["did"].isin(self.rag_response["initial_response"])
+        ]
+        filtered_metadata["did"] = pd.Categorical(
+            filtered_metadata["did"],
+            categories=self.rag_response["initial_response"],
+            ordered=True,
+        )
+        filtered_metadata = filtered_metadata.sort_values("did").reset_index(drop=True)
+        llm_parser = LLMResponseParser(self.llm_response)
+        return filtered_metadata, llm_parser
+
+    def _no_filter(self, metadata):
+        filtered_metadata = metadata[
+            metadata["did"].isin(self.rag_response["initial_response"])
+        ]
+        filtered_metadata["did"] = pd.Categorical(
+            filtered_metadata["did"],
+            categories=self.rag_response["initial_response"],
+            ordered=True,
+        )
+        filtered_metadata = filtered_metadata.sort_values("did").reset_index(drop=True)
+
+        return filtered_metadata
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ database_filter(filter_condition, collec) + +

+ + +
+ +

Apply database filter on the rag_response

+ +
+ Source code in frontend/ui_utils.py +
143
+144
+145
+146
+147
+148
+149
+150
+151
def database_filter(self, filter_condition, collec):
+    """
+    Apply database filter on the rag_response
+    """
+    ids = list(map(str, self.rag_response["initial_response"]))
+    self.database_filtered = collec.get(ids=ids, where=filter_condition)["ids"]
+    self.database_filtered = list(map(int, self.database_filtered))
+    # print(self.database_filtered)
+    return self.database_filtered
+
+
+
+ +
+ +
+ + +

+ fetch_llm_response(query) + +

+ + +
+ +

Description: Fetch the response from the query parsing LLM service as a json

+ +
+ Source code in frontend/ui_utils.py +
100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
def fetch_llm_response(self, query: str):
+    """
+    Description: Fetch the response from the query parsing LLM service as a json
+    """
+    llm_response_path = self.paths["llm_response"]
+    try:
+        self.llm_response = requests.get(
+            f"{llm_response_path['docker']}{query}"
+        ).json()
+    except:
+        self.llm_response = requests.get(
+            f"{llm_response_path['local']}{query}"
+        ).json()
+    return self.llm_response
+
+
+
+ +
+ +
+ + +

+ fetch_rag_response(query_type, query) + +

+ + +
+ +

Description: Fetch the response from RAG pipeline

+ +
+ Source code in frontend/ui_utils.py +
153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
def fetch_rag_response(self, query_type, query):
+    """
+    Description: Fetch the response from RAG pipeline
+
+    """
+    rag_response_path = self.paths["rag_response"]
+    try:
+        self.rag_response = requests.get(
+            f"{rag_response_path['docker']}{query_type.lower()}/{query}",
+            json={"query": query, "type": query_type.lower()},
+        ).json()
+    except:
+        self.rag_response = requests.get(
+            f"{rag_response_path['local']}{query_type.lower()}/{query}",
+            json={"query": query, "type": query_type.lower()},
+        ).json()
+    ordered_set = self._order_results()
+    self.rag_response["initial_response"] = ordered_set
+
+    return self.rag_response
+
+
+
+ +
+ +
+ + +

+ fetch_structured_query(query_type, query) + +

+ + +
+ +

Description: Fetch the response for a structured query from the LLM service as a JSON

+ +
+ Source code in frontend/ui_utils.py +
115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
def fetch_structured_query(self, query_type: str, query: str):
+    """
+    Description: Fetch the response for a structured query from the LLM service as a JSON
+    """
+    structured_response_path = self.paths["structured_query"]
+    try:
+        self.structured_query_response = requests.get(
+            f"{structured_response_path['docker']}{query}",
+            json={"query": query},
+        ).json()
+    except (requests.exceptions.RequestException, json.JSONDecodeError) as e:
+        # Print the error for debugging purposes
+        print(f"Error occurred: {e}")
+        # Set structured_query_response to None on error
+        self.structured_query_response = None
+    try:
+        self.structured_query_response = requests.get(
+            f"{structured_response_path['local']}{query}",
+            json={"query": query},
+        ).json()
+    except Exception as e:
+        # Print the error for debugging purposes
+        print(f"Error occurred while fetching from local endpoint: {e}")
+        # Set structured_query_response to None if the local request also fails
+        self.structured_query_response = None
+
+    return self.structured_query_response
+
+
+
+ +
+ +
+ + +

+ load_paths() + +

+ + +
+ +

Description: Load paths from paths.json

+ +
+ Source code in frontend/ui_utils.py +
93
+94
+95
+96
+97
+98
def load_paths(self):
+    """
+    Description: Load paths from paths.json
+    """
+    with open("paths.json", "r") as file:
+        return json.load(file)
+
+
+
+ +
+ +
+ + +

+ parse_and_update_response(metadata) + +

+ + +
+ +

Description: Parse the response from the RAG and LLM services and update the metadata based on the response. + Decide which order to apply them + - self.apply_llm_before_rag == False + - Metadata is filtered based on the rag response first and then by the Query parsing LLM +- self.apply_llm_before_rag == False + - Metadata is filtered based by the Query parsing LLM first and the rag response second +- in case structured_query == true, take results are applying data filters.

+ +
+ Source code in frontend/ui_utils.py +
183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
+226
+227
+228
+229
+230
+231
+232
+233
+234
+235
+236
def parse_and_update_response(self, metadata: pd.DataFrame):
+    """
+     Description: Parse the response from the RAG and LLM services and update the metadata based on the response.
+     Decide which order to apply them
+     -  self.apply_llm_before_rag == False
+         - Metadata is filtered based on the rag response first and then by the Query parsing LLM
+    -  self.apply_llm_before_rag == False
+         - Metadata is filtered based by the Query parsing LLM first and the rag response second
+    - in case structured_query == true, take results are applying data filters.
+    """
+    if self.apply_llm_before_rag is None or self.llm_response is None:
+        print("No LLM filter.")
+        # print(self.rag_response, flush=True)
+        filtered_metadata = self._no_filter(metadata)
+
+        # print(filtered_metadata)
+        # if no llm response is required, return the initial response
+        return filtered_metadata
+
+    elif (
+        self.rag_response is not None and self.llm_response is not None
+    ) and not config["structured_query"]:
+        if not self.apply_llm_before_rag:
+            filtered_metadata, llm_parser = self._rag_before_llm(metadata)
+
+            if self.query_type.lower() == "dataset":
+                llm_parser.get_attributes_from_response()
+                return llm_parser.update_subset_cols(filtered_metadata)
+
+        elif self.apply_llm_before_rag:
+            filtered_metadata = self._filter_before_rag(metadata)
+            return filtered_metadata
+
+    elif (
+        self.rag_response is not None and self.structured_query_response is not None
+    ):
+        col_name = [
+            "status",
+            "NumberOfClasses",
+            "NumberOfFeatures",
+            "NumberOfInstances",
+        ]
+        # print(self.structured_query_response)  # Only for debugging. Comment later.
+        if self.structured_query_response[0] is not None and isinstance(
+            self.structured_query_response[1], dict
+        ):
+            # Safely attempt to access the "filter" key in the first element
+
+            self._structured_query_on_success(metadata)
+
+        else:
+            filtered_metadata = self._structured_query_on_fail(metadata)
+            # print("Showing only rag response")
+        return filtered_metadata[["did", "name", *col_name]]
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ UILoader + + +

+ + +
+ + +

Description : Create the chat interface

+ +
+ Source code in frontend/ui_utils.py +
317
+318
+319
+320
+321
+322
+323
+324
+325
+326
+327
+328
+329
+330
+331
+332
+333
+334
+335
+336
+337
+338
+339
+340
+341
+342
+343
+344
+345
+346
+347
+348
+349
+350
+351
+352
+353
+354
+355
+356
+357
+358
+359
+360
+361
+362
+363
+364
+365
+366
+367
+368
+369
+370
+371
+372
+373
+374
+375
+376
+377
+378
+379
+380
+381
+382
+383
+384
+385
+386
+387
+388
+389
+390
+391
+392
+393
+394
+395
+396
+397
+398
+399
+400
+401
+402
+403
+404
+405
+406
+407
+408
+409
+410
+411
+412
+413
+414
+415
+416
+417
+418
+419
+420
+421
+422
+423
+424
+425
+426
+427
+428
+429
+430
+431
+432
+433
+434
+435
+436
+437
+438
+439
+440
+441
+442
+443
+444
+445
+446
+447
+448
+449
+450
+451
+452
+453
+454
+455
+456
+457
+458
+459
+460
+461
+462
+463
+464
+465
+466
+467
+468
+469
+470
+471
+472
+473
+474
+475
+476
+477
+478
+479
+480
+481
+482
+483
+484
+485
+486
+487
+488
+489
+490
+491
+492
+493
+494
+495
+496
+497
+498
+499
+500
+501
+502
+503
+504
+505
+506
+507
+508
+509
+510
+511
+512
+513
+514
+515
+516
+517
+518
+519
+520
+521
+522
+523
+524
+525
+526
+527
+528
+529
+530
+531
+532
+533
+534
+535
+536
+537
+538
+539
+540
+541
+542
+543
+544
+545
+546
+547
class UILoader:
+    """
+    Description : Create the chat interface
+    """
+
+    def __init__(self, config_path):
+        with open(config_path, "r") as file:
+            # Load config
+            self.config = json.load(file)
+        # Paths and display information
+
+        # Load metadata chroma database for structured query
+        self.collec = load_chroma_metadata()
+
+        # Metadata paths
+        self.data_metadata_path = (
+            Path(config["data_dir"]) / "all_dataset_description.csv"
+        )
+        self.flow_metadata_path = Path(config["data_dir"]) / "all_flow_description.csv"
+
+        # Read metadata
+        self.data_metadata = pd.read_csv(self.data_metadata_path)
+        self.flow_metadata = pd.read_csv(self.flow_metadata_path)
+
+        # defaults
+        self.query_type = "Dataset"
+        self.llm_filter = False
+        self.paths = self.load_paths()
+        self.info = """
+        <p style='text-align: center; color: white;'>Machine learning research should be easily accessible and reusable. <a href = "https://openml.org/">OpenML</a> is an open platform for sharing datasets, algorithms, and experiments - to learn how to learn better, together. </p>
+        """
+        self.logo = "images/favicon.ico"
+        self.chatbot_display = "How do I do X using OpenML? / Find me a dataset about Y"
+
+        if "messages" not in st.session_state:
+            st.session_state.messages = []
+
+    # container for company description and logo
+    def generate_logo_header(
+        self,
+    ):
+
+        col1, col2 = st.columns([1, 4])
+        with col1:
+            st.image(self.logo, width=100)
+        with col2:
+            st.markdown(
+                self.info,
+                unsafe_allow_html=True,
+            )
+
+    def generate_complete_ui(self):
+
+        self.generate_logo_header()
+        chat_container = st.container()
+        with chat_container:
+            with st.form(key="chat_form"):
+                user_input = st.text_input(
+                    label="Query", placeholder=self.chatbot_display
+                )
+                query_type = st.selectbox(
+                    "Select Query Type",
+                    ["General Query", "Dataset", "Flow"],
+                    help="Are you looking for a dataset or a flow or just have a general query?",
+                )
+                ai_filter = st.toggle(
+                    "Use AI powered filtering",
+                    value=True,
+                    help="Uses an AI model to identify what columns might be useful to you.",
+                )
+                st.form_submit_button(label="Search")
+
+            self.create_chat_interface(user_input=None)
+            if user_input:
+                self.create_chat_interface(
+                    user_input, query_type=query_type, ai_filter=ai_filter
+                )
+
+    def create_chat_interface(self, user_input, query_type=None, ai_filter=False):
+        """
+        Description: Create the chat interface and display the chat history and results. Show the user input and the response from the OpenML Agent.
+
+        """
+        self.query_type = query_type
+        self.ai_filter = ai_filter
+
+        if user_input is None:
+            with st.chat_message(name="ai"):
+                st.write("OpenML Agent: ", "Hello! How can I help you today?")
+                st.write(
+                    "Note that results are powered by local LLM models and may not be accurate. Please refer to the official OpenML website for accurate information."
+                )
+
+        # Handle user input
+        if user_input:
+            self._handle_user_input(user_input, query_type)
+
+    def _handle_user_input(self, user_input, query_type):
+        st.session_state.messages.append({"role": "user", "content": user_input})
+        with st.spinner("Waiting for results..."):
+            results = self.process_query_chat(user_input)
+
+        if not self.query_type == "General Query":
+            st.session_state.messages.append(
+                    {"role": "OpenML Agent", "content": results}
+                )
+        else:
+            self._stream_results(results)
+
+            # reverse messages to show the latest message at the top
+        reversed_messages = self._reverse_session_history()
+
+            # Display chat history
+        self._display_chat_history(query_type, reversed_messages)
+        self.create_download_button()
+
+    def _display_chat_history(self, query_type, reversed_messages):
+        for message in reversed_messages:
+            if query_type == "General Query":
+                pass
+            if message["role"] == "user":
+                with st.chat_message(name="user"):
+                    self.display_results(message["content"], "user")
+            else:
+                with st.chat_message(name="ai"):
+                    self.display_results(message["content"], "ai")
+
+    def _reverse_session_history(self):
+        reversed_messages = []
+        for index in range(0, len(st.session_state.messages), 2):
+            reversed_messages.insert(0, st.session_state.messages[index])
+            reversed_messages.insert(1, st.session_state.messages[index + 1])
+        return reversed_messages
+
+    def _stream_results(self, results):
+        with st.spinner("Fetching results..."):
+            with requests.get(results, stream=True) as r:
+                resp_contain = st.empty()
+                streamed_response = ""
+                for chunk in r.iter_content(chunk_size=1024):
+                    if chunk:
+                        streamed_response += chunk.decode("utf-8")
+                        resp_contain.markdown(streamed_response)
+                resp_contain.empty()
+            st.session_state.messages.append(
+                {"role": "OpenML Agent", "content": streamed_response}
+            )
+
+    @st.experimental_fragment()
+    def create_download_button(self):
+        data = "\n".join(
+            [str(message["content"]) for message in st.session_state.messages]
+        )
+        st.download_button(
+            label="Download chat history",
+            data=data,
+            file_name="chat_history.txt",
+        )
+
+    def display_results(self, initial_response, role):
+        """
+        Description: Display the results in a DataFrame
+        """
+        # st.write("OpenML Agent: ")
+
+        try:
+            st.dataframe(initial_response)
+        except:
+            st.write(initial_response)
+
+    # Function to handle query processing
+    def process_query_chat(self, query):
+        """
+        Description: Process the query and return the results based on the query type and the LLM filter.
+
+        """
+        apply_llm_before_rag = None if not self.llm_filter else False
+        response_parser = ResponseParser(
+            self.query_type, apply_llm_before_rag=apply_llm_before_rag
+        )
+
+        if self.query_type == "Dataset" or self.query_type == "Flow":
+            if not self.ai_filter:
+                response_parser.fetch_rag_response(self.query_type, query)
+                return response_parser.parse_and_update_response(self.data_metadata)
+            else:
+                # get structured query
+                self._display_structured_query_results(query, response_parser)
+
+            results = response_parser.parse_and_update_response(self.data_metadata)
+            return results
+
+        elif self.query_type == "General Query":
+            # Return documentation response path
+            return self.paths["documentation_query"]["local"] + query
+
+    def _display_structured_query_results(self, query, response_parser):
+        response_parser.fetch_structured_query(self.query_type, query)
+        try:
+            # get rag response
+            # using original query instead of extracted topics.
+            response_parser.fetch_rag_response(
+                self.query_type,
+                response_parser.structured_query_response[0]["query"],
+            )
+
+            if response_parser.structured_query_response:
+                st.write(
+                    "Detected Filter(s): ",
+                    json.dumps(
+                        response_parser.structured_query_response[0].get("filter", None)
+                    ),
+                )
+            else:
+                st.write("Detected Filter(s): ", None)
+            if response_parser.structured_query_response[1].get("filter"):
+                with st.spinner("Applying LLM Detected Filter(s)..."):
+                    response_parser.database_filter(
+                        response_parser.structured_query_response[1]["filter"],
+                        collec,
+                    )
+        except:
+            # fallback to RAG response
+            response_parser.fetch_rag_response(self.query_type, query)
+
+    def load_paths(self):
+        """
+        Description: Load paths from paths.json
+        """
+        with open("paths.json", "r") as file:
+            return json.load(file)
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ create_chat_interface(user_input, query_type=None, ai_filter=False) + +

+ + +
+ +

Description: Create the chat interface and display the chat history and results. Show the user input and the response from the OpenML Agent.

+ +
+ Source code in frontend/ui_utils.py +
395
+396
+397
+398
+399
+400
+401
+402
+403
+404
+405
+406
+407
+408
+409
+410
+411
+412
def create_chat_interface(self, user_input, query_type=None, ai_filter=False):
+    """
+    Description: Create the chat interface and display the chat history and results. Show the user input and the response from the OpenML Agent.
+
+    """
+    self.query_type = query_type
+    self.ai_filter = ai_filter
+
+    if user_input is None:
+        with st.chat_message(name="ai"):
+            st.write("OpenML Agent: ", "Hello! How can I help you today?")
+            st.write(
+                "Note that results are powered by local LLM models and may not be accurate. Please refer to the official OpenML website for accurate information."
+            )
+
+    # Handle user input
+    if user_input:
+        self._handle_user_input(user_input, query_type)
+
+
+
+ +
+ +
+ + +

+ display_results(initial_response, role) + +

+ + +
+ +

Description: Display the results in a DataFrame

+ +
+ Source code in frontend/ui_utils.py +
476
+477
+478
+479
+480
+481
+482
+483
+484
+485
def display_results(self, initial_response, role):
+    """
+    Description: Display the results in a DataFrame
+    """
+    # st.write("OpenML Agent: ")
+
+    try:
+        st.dataframe(initial_response)
+    except:
+        st.write(initial_response)
+
+
+
+ +
+ +
+ + +

+ load_paths() + +

+ + +
+ +

Description: Load paths from paths.json

+ +
+ Source code in frontend/ui_utils.py +
542
+543
+544
+545
+546
+547
def load_paths(self):
+    """
+    Description: Load paths from paths.json
+    """
+    with open("paths.json", "r") as file:
+        return json.load(file)
+
+
+
+ +
+ +
+ + +

+ process_query_chat(query) + +

+ + +
+ +

Description: Process the query and return the results based on the query type and the LLM filter.

+ +
+ Source code in frontend/ui_utils.py +
488
+489
+490
+491
+492
+493
+494
+495
+496
+497
+498
+499
+500
+501
+502
+503
+504
+505
+506
+507
+508
+509
+510
+511
def process_query_chat(self, query):
+    """
+    Description: Process the query and return the results based on the query type and the LLM filter.
+
+    """
+    apply_llm_before_rag = None if not self.llm_filter else False
+    response_parser = ResponseParser(
+        self.query_type, apply_llm_before_rag=apply_llm_before_rag
+    )
+
+    if self.query_type == "Dataset" or self.query_type == "Flow":
+        if not self.ai_filter:
+            response_parser.fetch_rag_response(self.query_type, query)
+            return response_parser.parse_and_update_response(self.data_metadata)
+        else:
+            # get structured query
+            self._display_structured_query_results(query, response_parser)
+
+        results = response_parser.parse_and_update_response(self.data_metadata)
+        return results
+
+    elif self.query_type == "General Query":
+        # Return documentation response path
+        return self.paths["documentation_query"]["local"] + query
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ +
+ + + + +
+ + + +
+ + + + + + + + +
+ + + +

+ LLMResponseParser + + +

+ + +
+ + +

Description: Parse the response from the LLM service and update the columns based on the response.

+ +
+ Source code in frontend/ui_utils.py +
14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
class LLMResponseParser:
+    """
+    Description: Parse the response from the LLM service and update the columns based on the response.
+    """
+
+    def __init__(self, llm_response):
+        self.llm_response = llm_response
+        self.subset_cols = ["did", "name"]
+        self.size_sort = None
+        self.classification_type = None
+        self.uploader_name = None
+
+    def process_size_attribute(self, attr_size: str):
+        size, sort = attr_size.split(",") if "," in attr_size else (attr_size, None)
+        if size == "yes":
+            self.subset_cols.append("NumberOfInstances")
+        if sort:
+            self.size_sort = sort
+
+    def missing_values_attribute(self, attr_missing: str):
+        if attr_missing == "yes":
+            self.subset_cols.append("NumberOfMissingValues")
+
+    def classification_type_attribute(self, attr_classification: str):
+        if attr_classification != "none":
+            self.subset_cols.append("NumberOfClasses")
+            self.classification_type = attr_classification
+
+    def uploader_attribute(self, attr_uploader: str):
+        if attr_uploader != "none":
+            self.subset_cols.append("uploader")
+            self.uploader_name = attr_uploader.split("=")[1].strip()
+
+    def get_attributes_from_response(self):
+        attribute_processors = {
+            "size_of_dataset": self.process_size_attribute,
+            "missing_values": self.missing_values_attribute,
+            "classification_type": self.classification_type_attribute,
+            "uploader": self.uploader_attribute,
+        }
+
+        for attribute, value in self.llm_response.items():
+            if attribute in attribute_processors:
+                attribute_processors[attribute](value)
+
+    def update_subset_cols(self, metadata: pd.DataFrame):
+        """
+        Description: Filter the metadata based on the updated subset columns and extra conditions
+        """
+        if self.classification_type is not None:
+            if "multi" in self.classification_type:
+                metadata = metadata[metadata["NumberOfClasses"] > 2]
+            elif "binary" in self.classification_type:
+                metadata = metadata[metadata["NumberOfClasses"] == 2]
+        if self.uploader_name is not None:
+            try:
+                uploader = int(self.uploader_name)
+                metadata = metadata[metadata["uploader"] == uploader]
+            except:
+                pass
+
+        return metadata[self.subset_cols]
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ update_subset_cols(metadata) + +

+ + +
+ +

Description: Filter the metadata based on the updated subset columns and extra conditions

+ +
+ Source code in frontend/ui_utils.py +
59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
def update_subset_cols(self, metadata: pd.DataFrame):
+    """
+    Description: Filter the metadata based on the updated subset columns and extra conditions
+    """
+    if self.classification_type is not None:
+        if "multi" in self.classification_type:
+            metadata = metadata[metadata["NumberOfClasses"] > 2]
+        elif "binary" in self.classification_type:
+            metadata = metadata[metadata["NumberOfClasses"] == 2]
+    if self.uploader_name is not None:
+        try:
+            uploader = int(self.uploader_name)
+            metadata = metadata[metadata["uploader"] == uploader]
+        except:
+            pass
+
+    return metadata[self.subset_cols]
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ ResponseParser + + +

+ + +
+ + +

Description : This classe is used to decide the order of operations and run the response parsing. +It loads the paths, fetches the Query parsing LLM response, the rag response, loads the metadatas and then based on the config, decides the order in which to apply each of them.

+ +
+ Source code in frontend/ui_utils.py +
 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
+226
+227
+228
+229
+230
+231
+232
+233
+234
+235
+236
+237
+238
+239
+240
+241
+242
+243
+244
+245
+246
+247
+248
+249
+250
+251
+252
+253
+254
+255
+256
+257
+258
+259
+260
+261
+262
+263
+264
+265
+266
+267
+268
+269
+270
+271
+272
+273
+274
+275
+276
+277
+278
+279
+280
+281
+282
+283
+284
+285
+286
+287
+288
+289
+290
+291
+292
+293
+294
+295
+296
+297
+298
+299
+300
+301
+302
+303
+304
+305
+306
+307
+308
+309
+310
+311
+312
+313
+314
class ResponseParser:
+    """
+    Description : This classe is used to decide the order of operations and run the response parsing.
+    It loads the paths, fetches the Query parsing LLM response, the rag response, loads the metadatas and then based on the config, decides the order in which to apply each of them.
+    """
+
+    def __init__(self, query_type: str, apply_llm_before_rag: bool = False):
+        self.query_type = query_type
+        self.paths = self.load_paths()
+        self.rag_response = None
+        self.llm_response = None
+        self.apply_llm_before_rag = apply_llm_before_rag
+        self.database_filtered = None
+        self.structured_query_response = None
+
+    def load_paths(self):
+        """
+        Description: Load paths from paths.json
+        """
+        with open("paths.json", "r") as file:
+            return json.load(file)
+
+    def fetch_llm_response(self, query: str):
+        """
+        Description: Fetch the response from the query parsing LLM service as a json
+        """
+        llm_response_path = self.paths["llm_response"]
+        try:
+            self.llm_response = requests.get(
+                f"{llm_response_path['docker']}{query}"
+            ).json()
+        except:
+            self.llm_response = requests.get(
+                f"{llm_response_path['local']}{query}"
+            ).json()
+        return self.llm_response
+
+    def fetch_structured_query(self, query_type: str, query: str):
+        """
+        Description: Fetch the response for a structured query from the LLM service as a JSON
+        """
+        structured_response_path = self.paths["structured_query"]
+        try:
+            self.structured_query_response = requests.get(
+                f"{structured_response_path['docker']}{query}",
+                json={"query": query},
+            ).json()
+        except (requests.exceptions.RequestException, json.JSONDecodeError) as e:
+            # Print the error for debugging purposes
+            print(f"Error occurred: {e}")
+            # Set structured_query_response to None on error
+            self.structured_query_response = None
+        try:
+            self.structured_query_response = requests.get(
+                f"{structured_response_path['local']}{query}",
+                json={"query": query},
+            ).json()
+        except Exception as e:
+            # Print the error for debugging purposes
+            print(f"Error occurred while fetching from local endpoint: {e}")
+            # Set structured_query_response to None if the local request also fails
+            self.structured_query_response = None
+
+        return self.structured_query_response
+
+    def database_filter(self, filter_condition, collec):
+        """
+        Apply database filter on the rag_response
+        """
+        ids = list(map(str, self.rag_response["initial_response"]))
+        self.database_filtered = collec.get(ids=ids, where=filter_condition)["ids"]
+        self.database_filtered = list(map(int, self.database_filtered))
+        # print(self.database_filtered)
+        return self.database_filtered
+
+    def fetch_rag_response(self, query_type, query):
+        """
+        Description: Fetch the response from RAG pipeline
+
+        """
+        rag_response_path = self.paths["rag_response"]
+        try:
+            self.rag_response = requests.get(
+                f"{rag_response_path['docker']}{query_type.lower()}/{query}",
+                json={"query": query, "type": query_type.lower()},
+            ).json()
+        except:
+            self.rag_response = requests.get(
+                f"{rag_response_path['local']}{query_type.lower()}/{query}",
+                json={"query": query, "type": query_type.lower()},
+            ).json()
+        ordered_set = self._order_results()
+        self.rag_response["initial_response"] = ordered_set
+
+        return self.rag_response
+
+    def _order_results(self):
+        doc_set = set()
+        ordered_set = []
+        for docid in self.rag_response["initial_response"]:
+            if docid not in doc_set:
+                ordered_set.append(docid)
+            doc_set.add(docid)
+        return ordered_set
+
+    def parse_and_update_response(self, metadata: pd.DataFrame):
+        """
+         Description: Parse the response from the RAG and LLM services and update the metadata based on the response.
+         Decide which order to apply them
+         -  self.apply_llm_before_rag == False
+             - Metadata is filtered based on the rag response first and then by the Query parsing LLM
+        -  self.apply_llm_before_rag == False
+             - Metadata is filtered based by the Query parsing LLM first and the rag response second
+        - in case structured_query == true, take results are applying data filters.
+        """
+        if self.apply_llm_before_rag is None or self.llm_response is None:
+            print("No LLM filter.")
+            # print(self.rag_response, flush=True)
+            filtered_metadata = self._no_filter(metadata)
+
+            # print(filtered_metadata)
+            # if no llm response is required, return the initial response
+            return filtered_metadata
+
+        elif (
+            self.rag_response is not None and self.llm_response is not None
+        ) and not config["structured_query"]:
+            if not self.apply_llm_before_rag:
+                filtered_metadata, llm_parser = self._rag_before_llm(metadata)
+
+                if self.query_type.lower() == "dataset":
+                    llm_parser.get_attributes_from_response()
+                    return llm_parser.update_subset_cols(filtered_metadata)
+
+            elif self.apply_llm_before_rag:
+                filtered_metadata = self._filter_before_rag(metadata)
+                return filtered_metadata
+
+        elif (
+            self.rag_response is not None and self.structured_query_response is not None
+        ):
+            col_name = [
+                "status",
+                "NumberOfClasses",
+                "NumberOfFeatures",
+                "NumberOfInstances",
+            ]
+            # print(self.structured_query_response)  # Only for debugging. Comment later.
+            if self.structured_query_response[0] is not None and isinstance(
+                self.structured_query_response[1], dict
+            ):
+                # Safely attempt to access the "filter" key in the first element
+
+                self._structured_query_on_success(metadata)
+
+            else:
+                filtered_metadata = self._structured_query_on_fail(metadata)
+                # print("Showing only rag response")
+            return filtered_metadata[["did", "name", *col_name]]
+
+    def _structured_query_on_fail(self, metadata):
+        filtered_metadata = metadata[
+            metadata["did"].isin(self.rag_response["initial_response"])
+        ]
+        filtered_metadata["did"] = pd.Categorical(
+            filtered_metadata["did"],
+            categories=self.rag_response["initial_response"],
+            ordered=True,
+        )
+        filtered_metadata = filtered_metadata.sort_values("did").reset_index(drop=True)
+
+        return filtered_metadata
+
+    def _structured_query_on_success(self, metadata):
+        if (
+            self.structured_query_response[0].get("filter", None)
+            and self.database_filtered
+        ):
+            filtered_metadata = metadata[metadata["did"].isin(self.database_filtered)]
+            # print("Showing database filtered data")
+        else:
+            filtered_metadata = metadata[
+                metadata["did"].isin(self.rag_response["initial_response"])
+            ]
+            # print(
+            #     "Showing only rag response as filter is empty or none of the rag data satisfies filter conditions."
+            # )
+        filtered_metadata["did"] = pd.Categorical(
+            filtered_metadata["did"],
+            categories=self.rag_response["initial_response"],
+            ordered=True,
+        )
+        filtered_metadata = filtered_metadata.sort_values("did").reset_index(drop=True)
+
+    def _filter_before_rag(self, metadata):
+        print("LLM filter before RAG")
+        llm_parser = LLMResponseParser(self.llm_response)
+        llm_parser.get_attributes_from_response()
+        filtered_metadata = llm_parser.update_subset_cols(metadata)
+        filtered_metadata = filtered_metadata[
+            metadata["did"].isin(self.rag_response["initial_response"])
+        ]
+        filtered_metadata["did"] = pd.Categorical(
+            filtered_metadata["did"],
+            categories=self.rag_response["initial_response"],
+            ordered=True,
+        )
+        filtered_metadata = filtered_metadata.sort_values("did").reset_index(drop=True)
+
+        return filtered_metadata
+
+    def _rag_before_llm(self, metadata):
+        print("RAG before LLM filter.")
+        filtered_metadata = metadata[
+            metadata["did"].isin(self.rag_response["initial_response"])
+        ]
+        filtered_metadata["did"] = pd.Categorical(
+            filtered_metadata["did"],
+            categories=self.rag_response["initial_response"],
+            ordered=True,
+        )
+        filtered_metadata = filtered_metadata.sort_values("did").reset_index(drop=True)
+        llm_parser = LLMResponseParser(self.llm_response)
+        return filtered_metadata, llm_parser
+
+    def _no_filter(self, metadata):
+        filtered_metadata = metadata[
+            metadata["did"].isin(self.rag_response["initial_response"])
+        ]
+        filtered_metadata["did"] = pd.Categorical(
+            filtered_metadata["did"],
+            categories=self.rag_response["initial_response"],
+            ordered=True,
+        )
+        filtered_metadata = filtered_metadata.sort_values("did").reset_index(drop=True)
+
+        return filtered_metadata
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ database_filter(filter_condition, collec) + +

+ + +
+ +

Apply database filter on the rag_response

+ +
+ Source code in frontend/ui_utils.py +
143
+144
+145
+146
+147
+148
+149
+150
+151
def database_filter(self, filter_condition, collec):
+    """
+    Apply database filter on the rag_response
+    """
+    ids = list(map(str, self.rag_response["initial_response"]))
+    self.database_filtered = collec.get(ids=ids, where=filter_condition)["ids"]
+    self.database_filtered = list(map(int, self.database_filtered))
+    # print(self.database_filtered)
+    return self.database_filtered
+
+
+
+ +
+ +
+ + +

+ fetch_llm_response(query) + +

+ + +
+ +

Description: Fetch the response from the query parsing LLM service as a json

+ +
+ Source code in frontend/ui_utils.py +
100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
def fetch_llm_response(self, query: str):
+    """
+    Description: Fetch the response from the query parsing LLM service as a json
+    """
+    llm_response_path = self.paths["llm_response"]
+    try:
+        self.llm_response = requests.get(
+            f"{llm_response_path['docker']}{query}"
+        ).json()
+    except:
+        self.llm_response = requests.get(
+            f"{llm_response_path['local']}{query}"
+        ).json()
+    return self.llm_response
+
+
+
+ +
+ +
+ + +

+ fetch_rag_response(query_type, query) + +

+ + +
+ +

Description: Fetch the response from RAG pipeline

+ +
+ Source code in frontend/ui_utils.py +
153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
def fetch_rag_response(self, query_type, query):
+    """
+    Description: Fetch the response from RAG pipeline
+
+    """
+    rag_response_path = self.paths["rag_response"]
+    try:
+        self.rag_response = requests.get(
+            f"{rag_response_path['docker']}{query_type.lower()}/{query}",
+            json={"query": query, "type": query_type.lower()},
+        ).json()
+    except:
+        self.rag_response = requests.get(
+            f"{rag_response_path['local']}{query_type.lower()}/{query}",
+            json={"query": query, "type": query_type.lower()},
+        ).json()
+    ordered_set = self._order_results()
+    self.rag_response["initial_response"] = ordered_set
+
+    return self.rag_response
+
+
+
+ +
+ +
+ + +

+ fetch_structured_query(query_type, query) + +

+ + +
+ +

Description: Fetch the response for a structured query from the LLM service as a JSON

+ +
+ Source code in frontend/ui_utils.py +
115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
def fetch_structured_query(self, query_type: str, query: str):
+    """
+    Description: Fetch the response for a structured query from the LLM service as a JSON
+    """
+    structured_response_path = self.paths["structured_query"]
+    try:
+        self.structured_query_response = requests.get(
+            f"{structured_response_path['docker']}{query}",
+            json={"query": query},
+        ).json()
+    except (requests.exceptions.RequestException, json.JSONDecodeError) as e:
+        # Print the error for debugging purposes
+        print(f"Error occurred: {e}")
+        # Set structured_query_response to None on error
+        self.structured_query_response = None
+    try:
+        self.structured_query_response = requests.get(
+            f"{structured_response_path['local']}{query}",
+            json={"query": query},
+        ).json()
+    except Exception as e:
+        # Print the error for debugging purposes
+        print(f"Error occurred while fetching from local endpoint: {e}")
+        # Set structured_query_response to None if the local request also fails
+        self.structured_query_response = None
+
+    return self.structured_query_response
+
+
+
+ +
+ +
+ + +

+ load_paths() + +

+ + +
+ +

Description: Load paths from paths.json

+ +
+ Source code in frontend/ui_utils.py +
93
+94
+95
+96
+97
+98
def load_paths(self):
+    """
+    Description: Load paths from paths.json
+    """
+    with open("paths.json", "r") as file:
+        return json.load(file)
+
+
+
+ +
+ +
+ + +

+ parse_and_update_response(metadata) + +

+ + +
+ +

Description: Parse the response from the RAG and LLM services and update the metadata based on the response. + Decide which order to apply them + - self.apply_llm_before_rag == False + - Metadata is filtered based on the rag response first and then by the Query parsing LLM +- self.apply_llm_before_rag == False + - Metadata is filtered based by the Query parsing LLM first and the rag response second +- in case structured_query == true, take results are applying data filters.

+ +
+ Source code in frontend/ui_utils.py +
183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
+226
+227
+228
+229
+230
+231
+232
+233
+234
+235
+236
def parse_and_update_response(self, metadata: pd.DataFrame):
+    """
+     Description: Parse the response from the RAG and LLM services and update the metadata based on the response.
+     Decide which order to apply them
+     -  self.apply_llm_before_rag == False
+         - Metadata is filtered based on the rag response first and then by the Query parsing LLM
+    -  self.apply_llm_before_rag == False
+         - Metadata is filtered based by the Query parsing LLM first and the rag response second
+    - in case structured_query == true, take results are applying data filters.
+    """
+    if self.apply_llm_before_rag is None or self.llm_response is None:
+        print("No LLM filter.")
+        # print(self.rag_response, flush=True)
+        filtered_metadata = self._no_filter(metadata)
+
+        # print(filtered_metadata)
+        # if no llm response is required, return the initial response
+        return filtered_metadata
+
+    elif (
+        self.rag_response is not None and self.llm_response is not None
+    ) and not config["structured_query"]:
+        if not self.apply_llm_before_rag:
+            filtered_metadata, llm_parser = self._rag_before_llm(metadata)
+
+            if self.query_type.lower() == "dataset":
+                llm_parser.get_attributes_from_response()
+                return llm_parser.update_subset_cols(filtered_metadata)
+
+        elif self.apply_llm_before_rag:
+            filtered_metadata = self._filter_before_rag(metadata)
+            return filtered_metadata
+
+    elif (
+        self.rag_response is not None and self.structured_query_response is not None
+    ):
+        col_name = [
+            "status",
+            "NumberOfClasses",
+            "NumberOfFeatures",
+            "NumberOfInstances",
+        ]
+        # print(self.structured_query_response)  # Only for debugging. Comment later.
+        if self.structured_query_response[0] is not None and isinstance(
+            self.structured_query_response[1], dict
+        ):
+            # Safely attempt to access the "filter" key in the first element
+
+            self._structured_query_on_success(metadata)
+
+        else:
+            filtered_metadata = self._structured_query_on_fail(metadata)
+            # print("Showing only rag response")
+        return filtered_metadata[["did", "name", *col_name]]
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ UILoader + + +

+ + +
+ + +

Description : Create the chat interface

+ +
+ Source code in frontend/ui_utils.py +
317
+318
+319
+320
+321
+322
+323
+324
+325
+326
+327
+328
+329
+330
+331
+332
+333
+334
+335
+336
+337
+338
+339
+340
+341
+342
+343
+344
+345
+346
+347
+348
+349
+350
+351
+352
+353
+354
+355
+356
+357
+358
+359
+360
+361
+362
+363
+364
+365
+366
+367
+368
+369
+370
+371
+372
+373
+374
+375
+376
+377
+378
+379
+380
+381
+382
+383
+384
+385
+386
+387
+388
+389
+390
+391
+392
+393
+394
+395
+396
+397
+398
+399
+400
+401
+402
+403
+404
+405
+406
+407
+408
+409
+410
+411
+412
+413
+414
+415
+416
+417
+418
+419
+420
+421
+422
+423
+424
+425
+426
+427
+428
+429
+430
+431
+432
+433
+434
+435
+436
+437
+438
+439
+440
+441
+442
+443
+444
+445
+446
+447
+448
+449
+450
+451
+452
+453
+454
+455
+456
+457
+458
+459
+460
+461
+462
+463
+464
+465
+466
+467
+468
+469
+470
+471
+472
+473
+474
+475
+476
+477
+478
+479
+480
+481
+482
+483
+484
+485
+486
+487
+488
+489
+490
+491
+492
+493
+494
+495
+496
+497
+498
+499
+500
+501
+502
+503
+504
+505
+506
+507
+508
+509
+510
+511
+512
+513
+514
+515
+516
+517
+518
+519
+520
+521
+522
+523
+524
+525
+526
+527
+528
+529
+530
+531
+532
+533
+534
+535
+536
+537
+538
+539
+540
+541
+542
+543
+544
+545
+546
+547
class UILoader:
+    """
+    Description : Create the chat interface
+    """
+
+    def __init__(self, config_path):
+        with open(config_path, "r") as file:
+            # Load config
+            self.config = json.load(file)
+        # Paths and display information
+
+        # Load metadata chroma database for structured query
+        self.collec = load_chroma_metadata()
+
+        # Metadata paths
+        self.data_metadata_path = (
+            Path(config["data_dir"]) / "all_dataset_description.csv"
+        )
+        self.flow_metadata_path = Path(config["data_dir"]) / "all_flow_description.csv"
+
+        # Read metadata
+        self.data_metadata = pd.read_csv(self.data_metadata_path)
+        self.flow_metadata = pd.read_csv(self.flow_metadata_path)
+
+        # defaults
+        self.query_type = "Dataset"
+        self.llm_filter = False
+        self.paths = self.load_paths()
+        self.info = """
+        <p style='text-align: center; color: white;'>Machine learning research should be easily accessible and reusable. <a href = "https://openml.org/">OpenML</a> is an open platform for sharing datasets, algorithms, and experiments - to learn how to learn better, together. </p>
+        """
+        self.logo = "images/favicon.ico"
+        self.chatbot_display = "How do I do X using OpenML? / Find me a dataset about Y"
+
+        if "messages" not in st.session_state:
+            st.session_state.messages = []
+
+    # container for company description and logo
+    def generate_logo_header(
+        self,
+    ):
+
+        col1, col2 = st.columns([1, 4])
+        with col1:
+            st.image(self.logo, width=100)
+        with col2:
+            st.markdown(
+                self.info,
+                unsafe_allow_html=True,
+            )
+
+    def generate_complete_ui(self):
+
+        self.generate_logo_header()
+        chat_container = st.container()
+        with chat_container:
+            with st.form(key="chat_form"):
+                user_input = st.text_input(
+                    label="Query", placeholder=self.chatbot_display
+                )
+                query_type = st.selectbox(
+                    "Select Query Type",
+                    ["General Query", "Dataset", "Flow"],
+                    help="Are you looking for a dataset or a flow or just have a general query?",
+                )
+                ai_filter = st.toggle(
+                    "Use AI powered filtering",
+                    value=True,
+                    help="Uses an AI model to identify what columns might be useful to you.",
+                )
+                st.form_submit_button(label="Search")
+
+            self.create_chat_interface(user_input=None)
+            if user_input:
+                self.create_chat_interface(
+                    user_input, query_type=query_type, ai_filter=ai_filter
+                )
+
+    def create_chat_interface(self, user_input, query_type=None, ai_filter=False):
+        """
+        Description: Create the chat interface and display the chat history and results. Show the user input and the response from the OpenML Agent.
+
+        """
+        self.query_type = query_type
+        self.ai_filter = ai_filter
+
+        if user_input is None:
+            with st.chat_message(name="ai"):
+                st.write("OpenML Agent: ", "Hello! How can I help you today?")
+                st.write(
+                    "Note that results are powered by local LLM models and may not be accurate. Please refer to the official OpenML website for accurate information."
+                )
+
+        # Handle user input
+        if user_input:
+            self._handle_user_input(user_input, query_type)
+
+    def _handle_user_input(self, user_input, query_type):
+        st.session_state.messages.append({"role": "user", "content": user_input})
+        with st.spinner("Waiting for results..."):
+            results = self.process_query_chat(user_input)
+
+        if not self.query_type == "General Query":
+            st.session_state.messages.append(
+                    {"role": "OpenML Agent", "content": results}
+                )
+        else:
+            self._stream_results(results)
+
+            # reverse messages to show the latest message at the top
+        reversed_messages = self._reverse_session_history()
+
+            # Display chat history
+        self._display_chat_history(query_type, reversed_messages)
+        self.create_download_button()
+
+    def _display_chat_history(self, query_type, reversed_messages):
+        for message in reversed_messages:
+            if query_type == "General Query":
+                pass
+            if message["role"] == "user":
+                with st.chat_message(name="user"):
+                    self.display_results(message["content"], "user")
+            else:
+                with st.chat_message(name="ai"):
+                    self.display_results(message["content"], "ai")
+
+    def _reverse_session_history(self):
+        reversed_messages = []
+        for index in range(0, len(st.session_state.messages), 2):
+            reversed_messages.insert(0, st.session_state.messages[index])
+            reversed_messages.insert(1, st.session_state.messages[index + 1])
+        return reversed_messages
+
+    def _stream_results(self, results):
+        with st.spinner("Fetching results..."):
+            with requests.get(results, stream=True) as r:
+                resp_contain = st.empty()
+                streamed_response = ""
+                for chunk in r.iter_content(chunk_size=1024):
+                    if chunk:
+                        streamed_response += chunk.decode("utf-8")
+                        resp_contain.markdown(streamed_response)
+                resp_contain.empty()
+            st.session_state.messages.append(
+                {"role": "OpenML Agent", "content": streamed_response}
+            )
+
+    @st.experimental_fragment()
+    def create_download_button(self):
+        data = "\n".join(
+            [str(message["content"]) for message in st.session_state.messages]
+        )
+        st.download_button(
+            label="Download chat history",
+            data=data,
+            file_name="chat_history.txt",
+        )
+
+    def display_results(self, initial_response, role):
+        """
+        Description: Display the results in a DataFrame
+        """
+        # st.write("OpenML Agent: ")
+
+        try:
+            st.dataframe(initial_response)
+        except:
+            st.write(initial_response)
+
+    # Function to handle query processing
+    def process_query_chat(self, query):
+        """
+        Description: Process the query and return the results based on the query type and the LLM filter.
+
+        """
+        apply_llm_before_rag = None if not self.llm_filter else False
+        response_parser = ResponseParser(
+            self.query_type, apply_llm_before_rag=apply_llm_before_rag
+        )
+
+        if self.query_type == "Dataset" or self.query_type == "Flow":
+            if not self.ai_filter:
+                response_parser.fetch_rag_response(self.query_type, query)
+                return response_parser.parse_and_update_response(self.data_metadata)
+            else:
+                # get structured query
+                self._display_structured_query_results(query, response_parser)
+
+            results = response_parser.parse_and_update_response(self.data_metadata)
+            return results
+
+        elif self.query_type == "General Query":
+            # Return documentation response path
+            return self.paths["documentation_query"]["local"] + query
+
+    def _display_structured_query_results(self, query, response_parser):
+        response_parser.fetch_structured_query(self.query_type, query)
+        try:
+            # get rag response
+            # using original query instead of extracted topics.
+            response_parser.fetch_rag_response(
+                self.query_type,
+                response_parser.structured_query_response[0]["query"],
+            )
+
+            if response_parser.structured_query_response:
+                st.write(
+                    "Detected Filter(s): ",
+                    json.dumps(
+                        response_parser.structured_query_response[0].get("filter", None)
+                    ),
+                )
+            else:
+                st.write("Detected Filter(s): ", None)
+            if response_parser.structured_query_response[1].get("filter"):
+                with st.spinner("Applying LLM Detected Filter(s)..."):
+                    response_parser.database_filter(
+                        response_parser.structured_query_response[1]["filter"],
+                        collec,
+                    )
+        except:
+            # fallback to RAG response
+            response_parser.fetch_rag_response(self.query_type, query)
+
+    def load_paths(self):
+        """
+        Description: Load paths from paths.json
+        """
+        with open("paths.json", "r") as file:
+            return json.load(file)
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ create_chat_interface(user_input, query_type=None, ai_filter=False) + +

+ + +
+ +

Description: Create the chat interface and display the chat history and results. Show the user input and the response from the OpenML Agent.

+ +
+ Source code in frontend/ui_utils.py +
395
+396
+397
+398
+399
+400
+401
+402
+403
+404
+405
+406
+407
+408
+409
+410
+411
+412
def create_chat_interface(self, user_input, query_type=None, ai_filter=False):
+    """
+    Description: Create the chat interface and display the chat history and results. Show the user input and the response from the OpenML Agent.
+
+    """
+    self.query_type = query_type
+    self.ai_filter = ai_filter
+
+    if user_input is None:
+        with st.chat_message(name="ai"):
+            st.write("OpenML Agent: ", "Hello! How can I help you today?")
+            st.write(
+                "Note that results are powered by local LLM models and may not be accurate. Please refer to the official OpenML website for accurate information."
+            )
+
+    # Handle user input
+    if user_input:
+        self._handle_user_input(user_input, query_type)
+
+
+
+ +
+ +
+ + +

+ display_results(initial_response, role) + +

+ + +
+ +

Description: Display the results in a DataFrame

+ +
+ Source code in frontend/ui_utils.py +
476
+477
+478
+479
+480
+481
+482
+483
+484
+485
def display_results(self, initial_response, role):
+    """
+    Description: Display the results in a DataFrame
+    """
+    # st.write("OpenML Agent: ")
+
+    try:
+        st.dataframe(initial_response)
+    except:
+        st.write(initial_response)
+
+
+
+ +
+ +
+ + +

+ load_paths() + +

+ + +
+ +

Description: Load paths from paths.json

+ +
+ Source code in frontend/ui_utils.py +
542
+543
+544
+545
+546
+547
def load_paths(self):
+    """
+    Description: Load paths from paths.json
+    """
+    with open("paths.json", "r") as file:
+        return json.load(file)
+
+
+
+ +
+ +
+ + +

+ process_query_chat(query) + +

+ + +
+ +

Description: Process the query and return the results based on the query type and the LLM filter.

+ +
+ Source code in frontend/ui_utils.py +
488
+489
+490
+491
+492
+493
+494
+495
+496
+497
+498
+499
+500
+501
+502
+503
+504
+505
+506
+507
+508
+509
+510
+511
def process_query_chat(self, query):
+    """
+    Description: Process the query and return the results based on the query type and the LLM filter.
+
+    """
+    apply_llm_before_rag = None if not self.llm_filter else False
+    response_parser = ResponseParser(
+        self.query_type, apply_llm_before_rag=apply_llm_before_rag
+    )
+
+    if self.query_type == "Dataset" or self.query_type == "Flow":
+        if not self.ai_filter:
+            response_parser.fetch_rag_response(self.query_type, query)
+            return response_parser.parse_and_update_response(self.data_metadata)
+        else:
+            # get structured query
+            self._display_structured_query_results(query, response_parser)
+
+        results = response_parser.parse_and_update_response(self.data_metadata)
+        return results
+
+    elif self.query_type == "General Query":
+        # Return documentation response path
+        return self.paths["documentation_query"]["local"] + query
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/UI/frontend/index.html b/UI/frontend/index.html new file mode 100644 index 0000000..70e99f9 --- /dev/null +++ b/UI/frontend/index.html @@ -0,0 +1,1395 @@ + + + + + + + + + + + + + + + + + + + + + + + Frontend Overview - OpenML RAG Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + +

Frontend Overview

+
    +
  • This page is only an overview. Please refer to the api reference for more detailed information.
  • +
  • Currently the frontend is based on Streamlit. The hope is to integrate it with the OpenML website in the future.
  • +
  • This is what it looks like at the moment :
  • +
  • This component runs the Streamlit frontend. It is the UI that you see when you navigate to http://localhost:8501.
  • +
  • You can start it by running cd frontend && streamlit run ui.py &
  • +
+

Design Methodology

+
    +
  • The main point to note here is that the UI is responsible for all the post-processing of the results, including the displayed metadata information etc.
  • +
  • The RAG pipeline only returns IDs of the relevant datasets and then it is upto the frontend to decide what to do with it. This was a conscious choice as the final objective was to let elasticsearch handle the results.
  • +
  • This includes the logic for filtering the metadata, applying the filters obtained from the query parsing LLM and also what to do with the output of the RAG pipeline.
  • +
+

Main logic

+
    +
  • Streamlit is used for displaying the results. A selectbox is used for the user to select what kind of data they want and then a text_input box is used for them to enter their query.
  • +
  • To make it easier to see what is happening, a spinning indicator with text (eg; Waiting for LLM results) was also added.
  • +
  • Once the query is entered, the RAG pipeline is sent the query as a get request.
  • +
  • Once the results of the RAG pipeline are obtained, the resulting list of IDs is queried from the metadata files (to be replaced with elasticsearch later) and then the relevant data is displayed.
  • +
  • Now it is possible for the query parsing LLM to read the query and infer the columns that the user finds relevant. (eg: "find me a dataset with multiple classes" would enable the filters where num_classes >=2).
  • +
+

paths.json

+
    +
  • Configure this file if any of the endpoints change.
  • +
+

ui.py

+
    +
  • This is where all the above logic is executed and displayed using Streamlit.
  • +
+

ui_utils.py

+
    +
  • This is where all the logic is defined.
  • +
  • Query filtering
      +
    • During the processing of the RAG pipeline data ingestion pipeline, the metadata for all the datasets are saved as a csv file Path(config["data_dir"]) / "all_dataset_description.csv". This file contains information like number of classes, number of instances, authors etc.
    • +
    • Using this file, it is possible to "filter" out what is needed and decide which columns to show.
    • +
    • The ResponseParser and LLMResponseParser classes are probably what you are looking for if you want to modify the behavior of how the filters are created and used.
    • +
    +
  • +
  • RAG pipeline
  • +
  • The RAG pipeline is used to get the relevant IDs for the query.
  • +
  • Feedback
  • +
  • For now feedback is collected in a feedback.json file. This can be changed to something more meaningful later on.
  • +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/assets/_mkdocstrings.css b/assets/_mkdocstrings.css new file mode 100644 index 0000000..85449ec --- /dev/null +++ b/assets/_mkdocstrings.css @@ -0,0 +1,119 @@ + +/* Avoid breaking parameter names, etc. in table cells. */ +.doc-contents td code { + word-break: normal !important; +} + +/* No line break before first paragraph of descriptions. */ +.doc-md-description, +.doc-md-description>p:first-child { + display: inline; +} + +/* Max width for docstring sections tables. */ +.doc .md-typeset__table, +.doc .md-typeset__table table { + display: table !important; + width: 100%; +} + +.doc .md-typeset__table tr { + display: table-row; +} + +/* Defaults in Spacy table style. */ +.doc-param-default { + float: right; +} + +/* Backward-compatibility: docstring section titles in bold. */ +.doc-section-title { + font-weight: bold; +} + +/* Symbols in Navigation and ToC. */ +:root, +[data-md-color-scheme="default"] { + --doc-symbol-attribute-fg-color: #953800; + --doc-symbol-function-fg-color: #8250df; + --doc-symbol-method-fg-color: #8250df; + --doc-symbol-class-fg-color: #0550ae; + --doc-symbol-module-fg-color: #5cad0f; + + --doc-symbol-attribute-bg-color: #9538001a; + --doc-symbol-function-bg-color: #8250df1a; + --doc-symbol-method-bg-color: #8250df1a; + --doc-symbol-class-bg-color: #0550ae1a; + --doc-symbol-module-bg-color: #5cad0f1a; +} + +[data-md-color-scheme="slate"] { + --doc-symbol-attribute-fg-color: #ffa657; + --doc-symbol-function-fg-color: #d2a8ff; + --doc-symbol-method-fg-color: #d2a8ff; + --doc-symbol-class-fg-color: #79c0ff; + --doc-symbol-module-fg-color: #baff79; + + --doc-symbol-attribute-bg-color: #ffa6571a; + --doc-symbol-function-bg-color: #d2a8ff1a; + --doc-symbol-method-bg-color: #d2a8ff1a; + --doc-symbol-class-bg-color: #79c0ff1a; + --doc-symbol-module-bg-color: #baff791a; +} + +code.doc-symbol { + border-radius: .1rem; + font-size: .85em; + padding: 0 .3em; + font-weight: bold; +} + +code.doc-symbol-attribute { + color: var(--doc-symbol-attribute-fg-color); + background-color: var(--doc-symbol-attribute-bg-color); +} + +code.doc-symbol-attribute::after { + content: "attr"; +} + +code.doc-symbol-function { + color: var(--doc-symbol-function-fg-color); + background-color: var(--doc-symbol-function-bg-color); +} + +code.doc-symbol-function::after { + content: "func"; +} + +code.doc-symbol-method { + color: var(--doc-symbol-method-fg-color); + background-color: var(--doc-symbol-method-bg-color); +} + +code.doc-symbol-method::after { + content: "meth"; +} + +code.doc-symbol-class { + color: var(--doc-symbol-class-fg-color); + background-color: var(--doc-symbol-class-bg-color); +} + +code.doc-symbol-class::after { + content: "class"; +} + +code.doc-symbol-module { + color: var(--doc-symbol-module-fg-color); + background-color: var(--doc-symbol-module-bg-color); +} + +code.doc-symbol-module::after { + content: "mod"; +} + +.doc-signature .autorefs { + color: inherit; + border-bottom: 1px dotted currentcolor; +} diff --git a/assets/images/favicon.png b/assets/images/favicon.png new file mode 100644 index 0000000000000000000000000000000000000000..1cf13b9f9d978896599290a74f77d5dbe7d1655c GIT binary patch literal 1870 zcmV-U2eJ5xP)Gc)JR9QMau)O=X#!i9;T z37kk-upj^(fsR36MHs_+1RCI)NNu9}lD0S{B^g8PN?Ww(5|~L#Ng*g{WsqleV}|#l zz8@ri&cTzw_h33bHI+12+kK6WN$h#n5cD8OQt`5kw6p~9H3()bUQ8OS4Q4HTQ=1Ol z_JAocz`fLbT2^{`8n~UAo=#AUOf=SOq4pYkt;XbC&f#7lb$*7=$na!mWCQ`dBQsO0 zLFBSPj*N?#u5&pf2t4XjEGH|=pPQ8xh7tpx;US5Cx_Ju;!O`ya-yF`)b%TEt5>eP1ZX~}sjjA%FJF?h7cX8=b!DZl<6%Cv z*G0uvvU+vmnpLZ2paivG-(cd*y3$hCIcsZcYOGh{$&)A6*XX&kXZd3G8m)G$Zz-LV z^GF3VAW^Mdv!)4OM8EgqRiz~*Cji;uzl2uC9^=8I84vNp;ltJ|q-*uQwGp2ma6cY7 z;`%`!9UXO@fr&Ebapfs34OmS9^u6$)bJxrucutf>`dKPKT%%*d3XlFVKunp9 zasduxjrjs>f8V=D|J=XNZp;_Zy^WgQ$9WDjgY=z@stwiEBm9u5*|34&1Na8BMjjgf3+SHcr`5~>oz1Y?SW^=K z^bTyO6>Gar#P_W2gEMwq)ot3; zREHn~U&Dp0l6YT0&k-wLwYjb?5zGK`W6S2v+K>AM(95m2C20L|3m~rN8dprPr@t)5lsk9Hu*W z?pS990s;Ez=+Rj{x7p``4>+c0G5^pYnB1^!TL=(?HLHZ+HicG{~4F1d^5Awl_2!1jICM-!9eoLhbbT^;yHcefyTAaqRcY zmuctDopPT!%k+}x%lZRKnzykr2}}XfG_ne?nRQO~?%hkzo;@RN{P6o`&mMUWBYMTe z6i8ChtjX&gXl`nvrU>jah)2iNM%JdjqoaeaU%yVn!^70x-flljp6Q5tK}5}&X8&&G zX3fpb3E(!rH=zVI_9Gjl45w@{(ITqngWFe7@9{mX;tO25Z_8 zQHEpI+FkTU#4xu>RkN>b3Tnc3UpWzPXWm#o55GKF09j^Mh~)K7{QqbO_~(@CVq! zS<8954|P8mXN2MRs86xZ&Q4EfM@JB94b=(YGuk)s&^jiSF=t3*oNK3`rD{H`yQ?d; ztE=laAUoZx5?RC8*WKOj`%LXEkgDd>&^Q4M^z`%u0rg-It=hLCVsq!Z%^6eB-OvOT zFZ28TN&cRmgU}Elrnk43)!>Z1FCPL2K$7}gwzIc48NX}#!A1BpJP?#v5wkNprhV** z?Cpalt1oH&{r!o3eSKc&ap)iz2BTn_VV`4>9M^b3;(YY}4>#ML6{~(4mH+?%07*qo IM6N<$f(jP3KmY&$ literal 0 HcmV?d00001 diff --git a/assets/javascripts/bundle.af256bd8.min.js b/assets/javascripts/bundle.af256bd8.min.js new file mode 100644 index 0000000..27355d2 --- /dev/null +++ b/assets/javascripts/bundle.af256bd8.min.js @@ -0,0 +1,29 @@ +"use strict";(()=>{var ji=Object.create;var gr=Object.defineProperty;var Wi=Object.getOwnPropertyDescriptor;var Ui=Object.getOwnPropertyNames,Vt=Object.getOwnPropertySymbols,Di=Object.getPrototypeOf,xr=Object.prototype.hasOwnProperty,io=Object.prototype.propertyIsEnumerable;var no=(e,t,r)=>t in e?gr(e,t,{enumerable:!0,configurable:!0,writable:!0,value:r}):e[t]=r,$=(e,t)=>{for(var r in t||(t={}))xr.call(t,r)&&no(e,r,t[r]);if(Vt)for(var r of Vt(t))io.call(t,r)&&no(e,r,t[r]);return e};var ao=(e,t)=>{var r={};for(var o in e)xr.call(e,o)&&t.indexOf(o)<0&&(r[o]=e[o]);if(e!=null&&Vt)for(var o of Vt(e))t.indexOf(o)<0&&io.call(e,o)&&(r[o]=e[o]);return r};var yr=(e,t)=>()=>(t||e((t={exports:{}}).exports,t),t.exports);var Vi=(e,t,r,o)=>{if(t&&typeof t=="object"||typeof t=="function")for(let n of Ui(t))!xr.call(e,n)&&n!==r&&gr(e,n,{get:()=>t[n],enumerable:!(o=Wi(t,n))||o.enumerable});return e};var Lt=(e,t,r)=>(r=e!=null?ji(Di(e)):{},Vi(t||!e||!e.__esModule?gr(r,"default",{value:e,enumerable:!0}):r,e));var so=(e,t,r)=>new Promise((o,n)=>{var i=p=>{try{s(r.next(p))}catch(c){n(c)}},a=p=>{try{s(r.throw(p))}catch(c){n(c)}},s=p=>p.done?o(p.value):Promise.resolve(p.value).then(i,a);s((r=r.apply(e,t)).next())});var po=yr((Er,co)=>{(function(e,t){typeof Er=="object"&&typeof co!="undefined"?t():typeof define=="function"&&define.amd?define(t):t()})(Er,function(){"use strict";function e(r){var o=!0,n=!1,i=null,a={text:!0,search:!0,url:!0,tel:!0,email:!0,password:!0,number:!0,date:!0,month:!0,week:!0,time:!0,datetime:!0,"datetime-local":!0};function s(H){return!!(H&&H!==document&&H.nodeName!=="HTML"&&H.nodeName!=="BODY"&&"classList"in H&&"contains"in H.classList)}function p(H){var ft=H.type,qe=H.tagName;return!!(qe==="INPUT"&&a[ft]&&!H.readOnly||qe==="TEXTAREA"&&!H.readOnly||H.isContentEditable)}function c(H){H.classList.contains("focus-visible")||(H.classList.add("focus-visible"),H.setAttribute("data-focus-visible-added",""))}function l(H){H.hasAttribute("data-focus-visible-added")&&(H.classList.remove("focus-visible"),H.removeAttribute("data-focus-visible-added"))}function f(H){H.metaKey||H.altKey||H.ctrlKey||(s(r.activeElement)&&c(r.activeElement),o=!0)}function u(H){o=!1}function h(H){s(H.target)&&(o||p(H.target))&&c(H.target)}function w(H){s(H.target)&&(H.target.classList.contains("focus-visible")||H.target.hasAttribute("data-focus-visible-added"))&&(n=!0,window.clearTimeout(i),i=window.setTimeout(function(){n=!1},100),l(H.target))}function A(H){document.visibilityState==="hidden"&&(n&&(o=!0),te())}function te(){document.addEventListener("mousemove",J),document.addEventListener("mousedown",J),document.addEventListener("mouseup",J),document.addEventListener("pointermove",J),document.addEventListener("pointerdown",J),document.addEventListener("pointerup",J),document.addEventListener("touchmove",J),document.addEventListener("touchstart",J),document.addEventListener("touchend",J)}function ie(){document.removeEventListener("mousemove",J),document.removeEventListener("mousedown",J),document.removeEventListener("mouseup",J),document.removeEventListener("pointermove",J),document.removeEventListener("pointerdown",J),document.removeEventListener("pointerup",J),document.removeEventListener("touchmove",J),document.removeEventListener("touchstart",J),document.removeEventListener("touchend",J)}function J(H){H.target.nodeName&&H.target.nodeName.toLowerCase()==="html"||(o=!1,ie())}document.addEventListener("keydown",f,!0),document.addEventListener("mousedown",u,!0),document.addEventListener("pointerdown",u,!0),document.addEventListener("touchstart",u,!0),document.addEventListener("visibilitychange",A,!0),te(),r.addEventListener("focus",h,!0),r.addEventListener("blur",w,!0),r.nodeType===Node.DOCUMENT_FRAGMENT_NODE&&r.host?r.host.setAttribute("data-js-focus-visible",""):r.nodeType===Node.DOCUMENT_NODE&&(document.documentElement.classList.add("js-focus-visible"),document.documentElement.setAttribute("data-js-focus-visible",""))}if(typeof window!="undefined"&&typeof document!="undefined"){window.applyFocusVisiblePolyfill=e;var t;try{t=new CustomEvent("focus-visible-polyfill-ready")}catch(r){t=document.createEvent("CustomEvent"),t.initCustomEvent("focus-visible-polyfill-ready",!1,!1,{})}window.dispatchEvent(t)}typeof document!="undefined"&&e(document)})});var qr=yr((lx,Sn)=>{"use strict";/*! + * escape-html + * Copyright(c) 2012-2013 TJ Holowaychuk + * Copyright(c) 2015 Andreas Lubbe + * Copyright(c) 2015 Tiancheng "Timothy" Gu + * MIT Licensed + */var Ha=/["'&<>]/;Sn.exports=ka;function ka(e){var t=""+e,r=Ha.exec(t);if(!r)return t;var o,n="",i=0,a=0;for(i=r.index;i{/*! + * clipboard.js v2.0.11 + * https://clipboardjs.com/ + * + * Licensed MIT © Zeno Rocha + */(function(t,r){typeof It=="object"&&typeof Yr=="object"?Yr.exports=r():typeof define=="function"&&define.amd?define([],r):typeof It=="object"?It.ClipboardJS=r():t.ClipboardJS=r()})(It,function(){return function(){var e={686:function(o,n,i){"use strict";i.d(n,{default:function(){return Fi}});var a=i(279),s=i.n(a),p=i(370),c=i.n(p),l=i(817),f=i.n(l);function u(V){try{return document.execCommand(V)}catch(_){return!1}}var h=function(_){var M=f()(_);return u("cut"),M},w=h;function A(V){var _=document.documentElement.getAttribute("dir")==="rtl",M=document.createElement("textarea");M.style.fontSize="12pt",M.style.border="0",M.style.padding="0",M.style.margin="0",M.style.position="absolute",M.style[_?"right":"left"]="-9999px";var j=window.pageYOffset||document.documentElement.scrollTop;return M.style.top="".concat(j,"px"),M.setAttribute("readonly",""),M.value=V,M}var te=function(_,M){var j=A(_);M.container.appendChild(j);var D=f()(j);return u("copy"),j.remove(),D},ie=function(_){var M=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{container:document.body},j="";return typeof _=="string"?j=te(_,M):_ instanceof HTMLInputElement&&!["text","search","url","tel","password"].includes(_==null?void 0:_.type)?j=te(_.value,M):(j=f()(_),u("copy")),j},J=ie;function H(V){"@babel/helpers - typeof";return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?H=function(M){return typeof M}:H=function(M){return M&&typeof Symbol=="function"&&M.constructor===Symbol&&M!==Symbol.prototype?"symbol":typeof M},H(V)}var ft=function(){var _=arguments.length>0&&arguments[0]!==void 0?arguments[0]:{},M=_.action,j=M===void 0?"copy":M,D=_.container,Y=_.target,$e=_.text;if(j!=="copy"&&j!=="cut")throw new Error('Invalid "action" value, use either "copy" or "cut"');if(Y!==void 0)if(Y&&H(Y)==="object"&&Y.nodeType===1){if(j==="copy"&&Y.hasAttribute("disabled"))throw new Error('Invalid "target" attribute. Please use "readonly" instead of "disabled" attribute');if(j==="cut"&&(Y.hasAttribute("readonly")||Y.hasAttribute("disabled")))throw new Error(`Invalid "target" attribute. You can't cut text from elements with "readonly" or "disabled" attributes`)}else throw new Error('Invalid "target" value, use a valid Element');if($e)return J($e,{container:D});if(Y)return j==="cut"?w(Y):J(Y,{container:D})},qe=ft;function je(V){"@babel/helpers - typeof";return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?je=function(M){return typeof M}:je=function(M){return M&&typeof Symbol=="function"&&M.constructor===Symbol&&M!==Symbol.prototype?"symbol":typeof M},je(V)}function Ai(V,_){if(!(V instanceof _))throw new TypeError("Cannot call a class as a function")}function oo(V,_){for(var M=0;M<_.length;M++){var j=_[M];j.enumerable=j.enumerable||!1,j.configurable=!0,"value"in j&&(j.writable=!0),Object.defineProperty(V,j.key,j)}}function Ci(V,_,M){return _&&oo(V.prototype,_),M&&oo(V,M),V}function Hi(V,_){if(typeof _!="function"&&_!==null)throw new TypeError("Super expression must either be null or a function");V.prototype=Object.create(_&&_.prototype,{constructor:{value:V,writable:!0,configurable:!0}}),_&&br(V,_)}function br(V,_){return br=Object.setPrototypeOf||function(j,D){return j.__proto__=D,j},br(V,_)}function ki(V){var _=Ri();return function(){var j=Ut(V),D;if(_){var Y=Ut(this).constructor;D=Reflect.construct(j,arguments,Y)}else D=j.apply(this,arguments);return $i(this,D)}}function $i(V,_){return _&&(je(_)==="object"||typeof _=="function")?_:Pi(V)}function Pi(V){if(V===void 0)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return V}function Ri(){if(typeof Reflect=="undefined"||!Reflect.construct||Reflect.construct.sham)return!1;if(typeof Proxy=="function")return!0;try{return Date.prototype.toString.call(Reflect.construct(Date,[],function(){})),!0}catch(V){return!1}}function Ut(V){return Ut=Object.setPrototypeOf?Object.getPrototypeOf:function(M){return M.__proto__||Object.getPrototypeOf(M)},Ut(V)}function vr(V,_){var M="data-clipboard-".concat(V);if(_.hasAttribute(M))return _.getAttribute(M)}var Ii=function(V){Hi(M,V);var _=ki(M);function M(j,D){var Y;return Ai(this,M),Y=_.call(this),Y.resolveOptions(D),Y.listenClick(j),Y}return Ci(M,[{key:"resolveOptions",value:function(){var D=arguments.length>0&&arguments[0]!==void 0?arguments[0]:{};this.action=typeof D.action=="function"?D.action:this.defaultAction,this.target=typeof D.target=="function"?D.target:this.defaultTarget,this.text=typeof D.text=="function"?D.text:this.defaultText,this.container=je(D.container)==="object"?D.container:document.body}},{key:"listenClick",value:function(D){var Y=this;this.listener=c()(D,"click",function($e){return Y.onClick($e)})}},{key:"onClick",value:function(D){var Y=D.delegateTarget||D.currentTarget,$e=this.action(Y)||"copy",Dt=qe({action:$e,container:this.container,target:this.target(Y),text:this.text(Y)});this.emit(Dt?"success":"error",{action:$e,text:Dt,trigger:Y,clearSelection:function(){Y&&Y.focus(),window.getSelection().removeAllRanges()}})}},{key:"defaultAction",value:function(D){return vr("action",D)}},{key:"defaultTarget",value:function(D){var Y=vr("target",D);if(Y)return document.querySelector(Y)}},{key:"defaultText",value:function(D){return vr("text",D)}},{key:"destroy",value:function(){this.listener.destroy()}}],[{key:"copy",value:function(D){var Y=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{container:document.body};return J(D,Y)}},{key:"cut",value:function(D){return w(D)}},{key:"isSupported",value:function(){var D=arguments.length>0&&arguments[0]!==void 0?arguments[0]:["copy","cut"],Y=typeof D=="string"?[D]:D,$e=!!document.queryCommandSupported;return Y.forEach(function(Dt){$e=$e&&!!document.queryCommandSupported(Dt)}),$e}}]),M}(s()),Fi=Ii},828:function(o){var n=9;if(typeof Element!="undefined"&&!Element.prototype.matches){var i=Element.prototype;i.matches=i.matchesSelector||i.mozMatchesSelector||i.msMatchesSelector||i.oMatchesSelector||i.webkitMatchesSelector}function a(s,p){for(;s&&s.nodeType!==n;){if(typeof s.matches=="function"&&s.matches(p))return s;s=s.parentNode}}o.exports=a},438:function(o,n,i){var a=i(828);function s(l,f,u,h,w){var A=c.apply(this,arguments);return l.addEventListener(u,A,w),{destroy:function(){l.removeEventListener(u,A,w)}}}function p(l,f,u,h,w){return typeof l.addEventListener=="function"?s.apply(null,arguments):typeof u=="function"?s.bind(null,document).apply(null,arguments):(typeof l=="string"&&(l=document.querySelectorAll(l)),Array.prototype.map.call(l,function(A){return s(A,f,u,h,w)}))}function c(l,f,u,h){return function(w){w.delegateTarget=a(w.target,f),w.delegateTarget&&h.call(l,w)}}o.exports=p},879:function(o,n){n.node=function(i){return i!==void 0&&i instanceof HTMLElement&&i.nodeType===1},n.nodeList=function(i){var a=Object.prototype.toString.call(i);return i!==void 0&&(a==="[object NodeList]"||a==="[object HTMLCollection]")&&"length"in i&&(i.length===0||n.node(i[0]))},n.string=function(i){return typeof i=="string"||i instanceof String},n.fn=function(i){var a=Object.prototype.toString.call(i);return a==="[object Function]"}},370:function(o,n,i){var a=i(879),s=i(438);function p(u,h,w){if(!u&&!h&&!w)throw new Error("Missing required arguments");if(!a.string(h))throw new TypeError("Second argument must be a String");if(!a.fn(w))throw new TypeError("Third argument must be a Function");if(a.node(u))return c(u,h,w);if(a.nodeList(u))return l(u,h,w);if(a.string(u))return f(u,h,w);throw new TypeError("First argument must be a String, HTMLElement, HTMLCollection, or NodeList")}function c(u,h,w){return u.addEventListener(h,w),{destroy:function(){u.removeEventListener(h,w)}}}function l(u,h,w){return Array.prototype.forEach.call(u,function(A){A.addEventListener(h,w)}),{destroy:function(){Array.prototype.forEach.call(u,function(A){A.removeEventListener(h,w)})}}}function f(u,h,w){return s(document.body,u,h,w)}o.exports=p},817:function(o){function n(i){var a;if(i.nodeName==="SELECT")i.focus(),a=i.value;else if(i.nodeName==="INPUT"||i.nodeName==="TEXTAREA"){var s=i.hasAttribute("readonly");s||i.setAttribute("readonly",""),i.select(),i.setSelectionRange(0,i.value.length),s||i.removeAttribute("readonly"),a=i.value}else{i.hasAttribute("contenteditable")&&i.focus();var p=window.getSelection(),c=document.createRange();c.selectNodeContents(i),p.removeAllRanges(),p.addRange(c),a=p.toString()}return a}o.exports=n},279:function(o){function n(){}n.prototype={on:function(i,a,s){var p=this.e||(this.e={});return(p[i]||(p[i]=[])).push({fn:a,ctx:s}),this},once:function(i,a,s){var p=this;function c(){p.off(i,c),a.apply(s,arguments)}return c._=a,this.on(i,c,s)},emit:function(i){var a=[].slice.call(arguments,1),s=((this.e||(this.e={}))[i]||[]).slice(),p=0,c=s.length;for(p;p0&&i[i.length-1])&&(c[0]===6||c[0]===2)){r=0;continue}if(c[0]===3&&(!i||c[1]>i[0]&&c[1]=e.length&&(e=void 0),{value:e&&e[o++],done:!e}}};throw new TypeError(t?"Object is not iterable.":"Symbol.iterator is not defined.")}function N(e,t){var r=typeof Symbol=="function"&&e[Symbol.iterator];if(!r)return e;var o=r.call(e),n,i=[],a;try{for(;(t===void 0||t-- >0)&&!(n=o.next()).done;)i.push(n.value)}catch(s){a={error:s}}finally{try{n&&!n.done&&(r=o.return)&&r.call(o)}finally{if(a)throw a.error}}return i}function q(e,t,r){if(r||arguments.length===2)for(var o=0,n=t.length,i;o1||s(u,h)})})}function s(u,h){try{p(o[u](h))}catch(w){f(i[0][3],w)}}function p(u){u.value instanceof nt?Promise.resolve(u.value.v).then(c,l):f(i[0][2],u)}function c(u){s("next",u)}function l(u){s("throw",u)}function f(u,h){u(h),i.shift(),i.length&&s(i[0][0],i[0][1])}}function fo(e){if(!Symbol.asyncIterator)throw new TypeError("Symbol.asyncIterator is not defined.");var t=e[Symbol.asyncIterator],r;return t?t.call(e):(e=typeof he=="function"?he(e):e[Symbol.iterator](),r={},o("next"),o("throw"),o("return"),r[Symbol.asyncIterator]=function(){return this},r);function o(i){r[i]=e[i]&&function(a){return new Promise(function(s,p){a=e[i](a),n(s,p,a.done,a.value)})}}function n(i,a,s,p){Promise.resolve(p).then(function(c){i({value:c,done:s})},a)}}function k(e){return typeof e=="function"}function ut(e){var t=function(o){Error.call(o),o.stack=new Error().stack},r=e(t);return r.prototype=Object.create(Error.prototype),r.prototype.constructor=r,r}var zt=ut(function(e){return function(r){e(this),this.message=r?r.length+` errors occurred during unsubscription: +`+r.map(function(o,n){return n+1+") "+o.toString()}).join(` + `):"",this.name="UnsubscriptionError",this.errors=r}});function Qe(e,t){if(e){var r=e.indexOf(t);0<=r&&e.splice(r,1)}}var We=function(){function e(t){this.initialTeardown=t,this.closed=!1,this._parentage=null,this._finalizers=null}return e.prototype.unsubscribe=function(){var t,r,o,n,i;if(!this.closed){this.closed=!0;var a=this._parentage;if(a)if(this._parentage=null,Array.isArray(a))try{for(var s=he(a),p=s.next();!p.done;p=s.next()){var c=p.value;c.remove(this)}}catch(A){t={error:A}}finally{try{p&&!p.done&&(r=s.return)&&r.call(s)}finally{if(t)throw t.error}}else a.remove(this);var l=this.initialTeardown;if(k(l))try{l()}catch(A){i=A instanceof zt?A.errors:[A]}var f=this._finalizers;if(f){this._finalizers=null;try{for(var u=he(f),h=u.next();!h.done;h=u.next()){var w=h.value;try{uo(w)}catch(A){i=i!=null?i:[],A instanceof zt?i=q(q([],N(i)),N(A.errors)):i.push(A)}}}catch(A){o={error:A}}finally{try{h&&!h.done&&(n=u.return)&&n.call(u)}finally{if(o)throw o.error}}}if(i)throw new zt(i)}},e.prototype.add=function(t){var r;if(t&&t!==this)if(this.closed)uo(t);else{if(t instanceof e){if(t.closed||t._hasParent(this))return;t._addParent(this)}(this._finalizers=(r=this._finalizers)!==null&&r!==void 0?r:[]).push(t)}},e.prototype._hasParent=function(t){var r=this._parentage;return r===t||Array.isArray(r)&&r.includes(t)},e.prototype._addParent=function(t){var r=this._parentage;this._parentage=Array.isArray(r)?(r.push(t),r):r?[r,t]:t},e.prototype._removeParent=function(t){var r=this._parentage;r===t?this._parentage=null:Array.isArray(r)&&Qe(r,t)},e.prototype.remove=function(t){var r=this._finalizers;r&&Qe(r,t),t instanceof e&&t._removeParent(this)},e.EMPTY=function(){var t=new e;return t.closed=!0,t}(),e}();var Tr=We.EMPTY;function qt(e){return e instanceof We||e&&"closed"in e&&k(e.remove)&&k(e.add)&&k(e.unsubscribe)}function uo(e){k(e)?e():e.unsubscribe()}var Pe={onUnhandledError:null,onStoppedNotification:null,Promise:void 0,useDeprecatedSynchronousErrorHandling:!1,useDeprecatedNextContext:!1};var dt={setTimeout:function(e,t){for(var r=[],o=2;o0},enumerable:!1,configurable:!0}),t.prototype._trySubscribe=function(r){return this._throwIfClosed(),e.prototype._trySubscribe.call(this,r)},t.prototype._subscribe=function(r){return this._throwIfClosed(),this._checkFinalizedStatuses(r),this._innerSubscribe(r)},t.prototype._innerSubscribe=function(r){var o=this,n=this,i=n.hasError,a=n.isStopped,s=n.observers;return i||a?Tr:(this.currentObservers=null,s.push(r),new We(function(){o.currentObservers=null,Qe(s,r)}))},t.prototype._checkFinalizedStatuses=function(r){var o=this,n=o.hasError,i=o.thrownError,a=o.isStopped;n?r.error(i):a&&r.complete()},t.prototype.asObservable=function(){var r=new F;return r.source=this,r},t.create=function(r,o){return new wo(r,o)},t}(F);var wo=function(e){re(t,e);function t(r,o){var n=e.call(this)||this;return n.destination=r,n.source=o,n}return t.prototype.next=function(r){var o,n;(n=(o=this.destination)===null||o===void 0?void 0:o.next)===null||n===void 0||n.call(o,r)},t.prototype.error=function(r){var o,n;(n=(o=this.destination)===null||o===void 0?void 0:o.error)===null||n===void 0||n.call(o,r)},t.prototype.complete=function(){var r,o;(o=(r=this.destination)===null||r===void 0?void 0:r.complete)===null||o===void 0||o.call(r)},t.prototype._subscribe=function(r){var o,n;return(n=(o=this.source)===null||o===void 0?void 0:o.subscribe(r))!==null&&n!==void 0?n:Tr},t}(g);var _r=function(e){re(t,e);function t(r){var o=e.call(this)||this;return o._value=r,o}return Object.defineProperty(t.prototype,"value",{get:function(){return this.getValue()},enumerable:!1,configurable:!0}),t.prototype._subscribe=function(r){var o=e.prototype._subscribe.call(this,r);return!o.closed&&r.next(this._value),o},t.prototype.getValue=function(){var r=this,o=r.hasError,n=r.thrownError,i=r._value;if(o)throw n;return this._throwIfClosed(),i},t.prototype.next=function(r){e.prototype.next.call(this,this._value=r)},t}(g);var At={now:function(){return(At.delegate||Date).now()},delegate:void 0};var Ct=function(e){re(t,e);function t(r,o,n){r===void 0&&(r=1/0),o===void 0&&(o=1/0),n===void 0&&(n=At);var i=e.call(this)||this;return i._bufferSize=r,i._windowTime=o,i._timestampProvider=n,i._buffer=[],i._infiniteTimeWindow=!0,i._infiniteTimeWindow=o===1/0,i._bufferSize=Math.max(1,r),i._windowTime=Math.max(1,o),i}return t.prototype.next=function(r){var o=this,n=o.isStopped,i=o._buffer,a=o._infiniteTimeWindow,s=o._timestampProvider,p=o._windowTime;n||(i.push(r),!a&&i.push(s.now()+p)),this._trimBuffer(),e.prototype.next.call(this,r)},t.prototype._subscribe=function(r){this._throwIfClosed(),this._trimBuffer();for(var o=this._innerSubscribe(r),n=this,i=n._infiniteTimeWindow,a=n._buffer,s=a.slice(),p=0;p0?e.prototype.schedule.call(this,r,o):(this.delay=o,this.state=r,this.scheduler.flush(this),this)},t.prototype.execute=function(r,o){return o>0||this.closed?e.prototype.execute.call(this,r,o):this._execute(r,o)},t.prototype.requestAsyncId=function(r,o,n){return n===void 0&&(n=0),n!=null&&n>0||n==null&&this.delay>0?e.prototype.requestAsyncId.call(this,r,o,n):(r.flush(this),0)},t}(gt);var Oo=function(e){re(t,e);function t(){return e!==null&&e.apply(this,arguments)||this}return t}(xt);var Hr=new Oo(So);var Mo=function(e){re(t,e);function t(r,o){var n=e.call(this,r,o)||this;return n.scheduler=r,n.work=o,n}return t.prototype.requestAsyncId=function(r,o,n){return n===void 0&&(n=0),n!==null&&n>0?e.prototype.requestAsyncId.call(this,r,o,n):(r.actions.push(this),r._scheduled||(r._scheduled=vt.requestAnimationFrame(function(){return r.flush(void 0)})))},t.prototype.recycleAsyncId=function(r,o,n){var i;if(n===void 0&&(n=0),n!=null?n>0:this.delay>0)return e.prototype.recycleAsyncId.call(this,r,o,n);var a=r.actions;o!=null&&((i=a[a.length-1])===null||i===void 0?void 0:i.id)!==o&&(vt.cancelAnimationFrame(o),r._scheduled=void 0)},t}(gt);var Lo=function(e){re(t,e);function t(){return e!==null&&e.apply(this,arguments)||this}return t.prototype.flush=function(r){this._active=!0;var o=this._scheduled;this._scheduled=void 0;var n=this.actions,i;r=r||n.shift();do if(i=r.execute(r.state,r.delay))break;while((r=n[0])&&r.id===o&&n.shift());if(this._active=!1,i){for(;(r=n[0])&&r.id===o&&n.shift();)r.unsubscribe();throw i}},t}(xt);var me=new Lo(Mo);var S=new F(function(e){return e.complete()});function Yt(e){return e&&k(e.schedule)}function kr(e){return e[e.length-1]}function Xe(e){return k(kr(e))?e.pop():void 0}function He(e){return Yt(kr(e))?e.pop():void 0}function Bt(e,t){return typeof kr(e)=="number"?e.pop():t}var yt=function(e){return e&&typeof e.length=="number"&&typeof e!="function"};function Gt(e){return k(e==null?void 0:e.then)}function Jt(e){return k(e[bt])}function Xt(e){return Symbol.asyncIterator&&k(e==null?void 0:e[Symbol.asyncIterator])}function Zt(e){return new TypeError("You provided "+(e!==null&&typeof e=="object"?"an invalid object":"'"+e+"'")+" where a stream was expected. You can provide an Observable, Promise, ReadableStream, Array, AsyncIterable, or Iterable.")}function Ji(){return typeof Symbol!="function"||!Symbol.iterator?"@@iterator":Symbol.iterator}var er=Ji();function tr(e){return k(e==null?void 0:e[er])}function rr(e){return mo(this,arguments,function(){var r,o,n,i;return Nt(this,function(a){switch(a.label){case 0:r=e.getReader(),a.label=1;case 1:a.trys.push([1,,9,10]),a.label=2;case 2:return[4,nt(r.read())];case 3:return o=a.sent(),n=o.value,i=o.done,i?[4,nt(void 0)]:[3,5];case 4:return[2,a.sent()];case 5:return[4,nt(n)];case 6:return[4,a.sent()];case 7:return a.sent(),[3,2];case 8:return[3,10];case 9:return r.releaseLock(),[7];case 10:return[2]}})})}function or(e){return k(e==null?void 0:e.getReader)}function W(e){if(e instanceof F)return e;if(e!=null){if(Jt(e))return Xi(e);if(yt(e))return Zi(e);if(Gt(e))return ea(e);if(Xt(e))return _o(e);if(tr(e))return ta(e);if(or(e))return ra(e)}throw Zt(e)}function Xi(e){return new F(function(t){var r=e[bt]();if(k(r.subscribe))return r.subscribe(t);throw new TypeError("Provided object does not correctly implement Symbol.observable")})}function Zi(e){return new F(function(t){for(var r=0;r=2;return function(o){return o.pipe(e?b(function(n,i){return e(n,i,o)}):le,Te(1),r?De(t):qo(function(){return new ir}))}}function Fr(e){return e<=0?function(){return S}:y(function(t,r){var o=[];t.subscribe(T(r,function(n){o.push(n),e=2,!0))}function pe(e){e===void 0&&(e={});var t=e.connector,r=t===void 0?function(){return new g}:t,o=e.resetOnError,n=o===void 0?!0:o,i=e.resetOnComplete,a=i===void 0?!0:i,s=e.resetOnRefCountZero,p=s===void 0?!0:s;return function(c){var l,f,u,h=0,w=!1,A=!1,te=function(){f==null||f.unsubscribe(),f=void 0},ie=function(){te(),l=u=void 0,w=A=!1},J=function(){var H=l;ie(),H==null||H.unsubscribe()};return y(function(H,ft){h++,!A&&!w&&te();var qe=u=u!=null?u:r();ft.add(function(){h--,h===0&&!A&&!w&&(f=Wr(J,p))}),qe.subscribe(ft),!l&&h>0&&(l=new at({next:function(je){return qe.next(je)},error:function(je){A=!0,te(),f=Wr(ie,n,je),qe.error(je)},complete:function(){w=!0,te(),f=Wr(ie,a),qe.complete()}}),W(H).subscribe(l))})(c)}}function Wr(e,t){for(var r=[],o=2;oe.next(document)),e}function P(e,t=document){return Array.from(t.querySelectorAll(e))}function R(e,t=document){let r=fe(e,t);if(typeof r=="undefined")throw new ReferenceError(`Missing element: expected "${e}" to be present`);return r}function fe(e,t=document){return t.querySelector(e)||void 0}function Ie(){var e,t,r,o;return(o=(r=(t=(e=document.activeElement)==null?void 0:e.shadowRoot)==null?void 0:t.activeElement)!=null?r:document.activeElement)!=null?o:void 0}var ya=O(d(document.body,"focusin"),d(document.body,"focusout")).pipe(_e(1),Q(void 0),m(()=>Ie()||document.body),G(1));function et(e){return ya.pipe(m(t=>e.contains(t)),K())}function $t(e,t){return C(()=>O(d(e,"mouseenter").pipe(m(()=>!0)),d(e,"mouseleave").pipe(m(()=>!1))).pipe(t?kt(r=>Me(+!r*t)):le,Q(e.matches(":hover"))))}function Go(e,t){if(typeof t=="string"||typeof t=="number")e.innerHTML+=t.toString();else if(t instanceof Node)e.appendChild(t);else if(Array.isArray(t))for(let r of t)Go(e,r)}function x(e,t,...r){let o=document.createElement(e);if(t)for(let n of Object.keys(t))typeof t[n]!="undefined"&&(typeof t[n]!="boolean"?o.setAttribute(n,t[n]):o.setAttribute(n,""));for(let n of r)Go(o,n);return o}function sr(e){if(e>999){let t=+((e-950)%1e3>99);return`${((e+1e-6)/1e3).toFixed(t)}k`}else return e.toString()}function Tt(e){let t=x("script",{src:e});return C(()=>(document.head.appendChild(t),O(d(t,"load"),d(t,"error").pipe(v(()=>$r(()=>new ReferenceError(`Invalid script: ${e}`))))).pipe(m(()=>{}),L(()=>document.head.removeChild(t)),Te(1))))}var Jo=new g,Ea=C(()=>typeof ResizeObserver=="undefined"?Tt("https://unpkg.com/resize-observer-polyfill"):I(void 0)).pipe(m(()=>new ResizeObserver(e=>e.forEach(t=>Jo.next(t)))),v(e=>O(Ye,I(e)).pipe(L(()=>e.disconnect()))),G(1));function ce(e){return{width:e.offsetWidth,height:e.offsetHeight}}function ge(e){let t=e;for(;t.clientWidth===0&&t.parentElement;)t=t.parentElement;return Ea.pipe(E(r=>r.observe(t)),v(r=>Jo.pipe(b(o=>o.target===t),L(()=>r.unobserve(t)))),m(()=>ce(e)),Q(ce(e)))}function St(e){return{width:e.scrollWidth,height:e.scrollHeight}}function cr(e){let t=e.parentElement;for(;t&&(e.scrollWidth<=t.scrollWidth&&e.scrollHeight<=t.scrollHeight);)t=(e=t).parentElement;return t?e:void 0}function Xo(e){let t=[],r=e.parentElement;for(;r;)(e.clientWidth>r.clientWidth||e.clientHeight>r.clientHeight)&&t.push(r),r=(e=r).parentElement;return t.length===0&&t.push(document.documentElement),t}function Ve(e){return{x:e.offsetLeft,y:e.offsetTop}}function Zo(e){let t=e.getBoundingClientRect();return{x:t.x+window.scrollX,y:t.y+window.scrollY}}function en(e){return O(d(window,"load"),d(window,"resize")).pipe(Le(0,me),m(()=>Ve(e)),Q(Ve(e)))}function pr(e){return{x:e.scrollLeft,y:e.scrollTop}}function Ne(e){return O(d(e,"scroll"),d(window,"scroll"),d(window,"resize")).pipe(Le(0,me),m(()=>pr(e)),Q(pr(e)))}var tn=new g,wa=C(()=>I(new IntersectionObserver(e=>{for(let t of e)tn.next(t)},{threshold:0}))).pipe(v(e=>O(Ye,I(e)).pipe(L(()=>e.disconnect()))),G(1));function tt(e){return wa.pipe(E(t=>t.observe(e)),v(t=>tn.pipe(b(({target:r})=>r===e),L(()=>t.unobserve(e)),m(({isIntersecting:r})=>r))))}function rn(e,t=16){return Ne(e).pipe(m(({y:r})=>{let o=ce(e),n=St(e);return r>=n.height-o.height-t}),K())}var lr={drawer:R("[data-md-toggle=drawer]"),search:R("[data-md-toggle=search]")};function on(e){return lr[e].checked}function Je(e,t){lr[e].checked!==t&&lr[e].click()}function ze(e){let t=lr[e];return d(t,"change").pipe(m(()=>t.checked),Q(t.checked))}function Ta(e,t){switch(e.constructor){case HTMLInputElement:return e.type==="radio"?/^Arrow/.test(t):!0;case HTMLSelectElement:case HTMLTextAreaElement:return!0;default:return e.isContentEditable}}function Sa(){return O(d(window,"compositionstart").pipe(m(()=>!0)),d(window,"compositionend").pipe(m(()=>!1))).pipe(Q(!1))}function nn(){let e=d(window,"keydown").pipe(b(t=>!(t.metaKey||t.ctrlKey)),m(t=>({mode:on("search")?"search":"global",type:t.key,claim(){t.preventDefault(),t.stopPropagation()}})),b(({mode:t,type:r})=>{if(t==="global"){let o=Ie();if(typeof o!="undefined")return!Ta(o,r)}return!0}),pe());return Sa().pipe(v(t=>t?S:e))}function xe(){return new URL(location.href)}function lt(e,t=!1){if(B("navigation.instant")&&!t){let r=x("a",{href:e.href});document.body.appendChild(r),r.click(),r.remove()}else location.href=e.href}function an(){return new g}function sn(){return location.hash.slice(1)}function cn(e){let t=x("a",{href:e});t.addEventListener("click",r=>r.stopPropagation()),t.click()}function Oa(e){return O(d(window,"hashchange"),e).pipe(m(sn),Q(sn()),b(t=>t.length>0),G(1))}function pn(e){return Oa(e).pipe(m(t=>fe(`[id="${t}"]`)),b(t=>typeof t!="undefined"))}function Pt(e){let t=matchMedia(e);return ar(r=>t.addListener(()=>r(t.matches))).pipe(Q(t.matches))}function ln(){let e=matchMedia("print");return O(d(window,"beforeprint").pipe(m(()=>!0)),d(window,"afterprint").pipe(m(()=>!1))).pipe(Q(e.matches))}function Nr(e,t){return e.pipe(v(r=>r?t():S))}function zr(e,t){return new F(r=>{let o=new XMLHttpRequest;return o.open("GET",`${e}`),o.responseType="blob",o.addEventListener("load",()=>{o.status>=200&&o.status<300?(r.next(o.response),r.complete()):r.error(new Error(o.statusText))}),o.addEventListener("error",()=>{r.error(new Error("Network error"))}),o.addEventListener("abort",()=>{r.complete()}),typeof(t==null?void 0:t.progress$)!="undefined"&&(o.addEventListener("progress",n=>{var i;if(n.lengthComputable)t.progress$.next(n.loaded/n.total*100);else{let a=(i=o.getResponseHeader("Content-Length"))!=null?i:0;t.progress$.next(n.loaded/+a*100)}}),t.progress$.next(5)),o.send(),()=>o.abort()})}function Fe(e,t){return zr(e,t).pipe(v(r=>r.text()),m(r=>JSON.parse(r)),G(1))}function mn(e,t){let r=new DOMParser;return zr(e,t).pipe(v(o=>o.text()),m(o=>r.parseFromString(o,"text/html")),G(1))}function fn(e,t){let r=new DOMParser;return zr(e,t).pipe(v(o=>o.text()),m(o=>r.parseFromString(o,"text/xml")),G(1))}function un(){return{x:Math.max(0,scrollX),y:Math.max(0,scrollY)}}function dn(){return O(d(window,"scroll",{passive:!0}),d(window,"resize",{passive:!0})).pipe(m(un),Q(un()))}function hn(){return{width:innerWidth,height:innerHeight}}function bn(){return d(window,"resize",{passive:!0}).pipe(m(hn),Q(hn()))}function vn(){return z([dn(),bn()]).pipe(m(([e,t])=>({offset:e,size:t})),G(1))}function mr(e,{viewport$:t,header$:r}){let o=t.pipe(Z("size")),n=z([o,r]).pipe(m(()=>Ve(e)));return z([r,t,n]).pipe(m(([{height:i},{offset:a,size:s},{x:p,y:c}])=>({offset:{x:a.x-p,y:a.y-c+i},size:s})))}function Ma(e){return d(e,"message",t=>t.data)}function La(e){let t=new g;return t.subscribe(r=>e.postMessage(r)),t}function gn(e,t=new Worker(e)){let r=Ma(t),o=La(t),n=new g;n.subscribe(o);let i=o.pipe(X(),ne(!0));return n.pipe(X(),Re(r.pipe(U(i))),pe())}var _a=R("#__config"),Ot=JSON.parse(_a.textContent);Ot.base=`${new URL(Ot.base,xe())}`;function ye(){return Ot}function B(e){return Ot.features.includes(e)}function Ee(e,t){return typeof t!="undefined"?Ot.translations[e].replace("#",t.toString()):Ot.translations[e]}function Se(e,t=document){return R(`[data-md-component=${e}]`,t)}function ae(e,t=document){return P(`[data-md-component=${e}]`,t)}function Aa(e){let t=R(".md-typeset > :first-child",e);return d(t,"click",{once:!0}).pipe(m(()=>R(".md-typeset",e)),m(r=>({hash:__md_hash(r.innerHTML)})))}function xn(e){if(!B("announce.dismiss")||!e.childElementCount)return S;if(!e.hidden){let t=R(".md-typeset",e);__md_hash(t.innerHTML)===__md_get("__announce")&&(e.hidden=!0)}return C(()=>{let t=new g;return t.subscribe(({hash:r})=>{e.hidden=!0,__md_set("__announce",r)}),Aa(e).pipe(E(r=>t.next(r)),L(()=>t.complete()),m(r=>$({ref:e},r)))})}function Ca(e,{target$:t}){return t.pipe(m(r=>({hidden:r!==e})))}function yn(e,t){let r=new g;return r.subscribe(({hidden:o})=>{e.hidden=o}),Ca(e,t).pipe(E(o=>r.next(o)),L(()=>r.complete()),m(o=>$({ref:e},o)))}function Rt(e,t){return t==="inline"?x("div",{class:"md-tooltip md-tooltip--inline",id:e,role:"tooltip"},x("div",{class:"md-tooltip__inner md-typeset"})):x("div",{class:"md-tooltip",id:e,role:"tooltip"},x("div",{class:"md-tooltip__inner md-typeset"}))}function En(...e){return x("div",{class:"md-tooltip2",role:"tooltip"},x("div",{class:"md-tooltip2__inner md-typeset"},e))}function wn(e,t){if(t=t?`${t}_annotation_${e}`:void 0,t){let r=t?`#${t}`:void 0;return x("aside",{class:"md-annotation",tabIndex:0},Rt(t),x("a",{href:r,class:"md-annotation__index",tabIndex:-1},x("span",{"data-md-annotation-id":e})))}else return x("aside",{class:"md-annotation",tabIndex:0},Rt(t),x("span",{class:"md-annotation__index",tabIndex:-1},x("span",{"data-md-annotation-id":e})))}function Tn(e){return x("button",{class:"md-clipboard md-icon",title:Ee("clipboard.copy"),"data-clipboard-target":`#${e} > code`})}var On=Lt(qr());function Qr(e,t){let r=t&2,o=t&1,n=Object.keys(e.terms).filter(p=>!e.terms[p]).reduce((p,c)=>[...p,x("del",null,(0,On.default)(c))," "],[]).slice(0,-1),i=ye(),a=new URL(e.location,i.base);B("search.highlight")&&a.searchParams.set("h",Object.entries(e.terms).filter(([,p])=>p).reduce((p,[c])=>`${p} ${c}`.trim(),""));let{tags:s}=ye();return x("a",{href:`${a}`,class:"md-search-result__link",tabIndex:-1},x("article",{class:"md-search-result__article md-typeset","data-md-score":e.score.toFixed(2)},r>0&&x("div",{class:"md-search-result__icon md-icon"}),r>0&&x("h1",null,e.title),r<=0&&x("h2",null,e.title),o>0&&e.text.length>0&&e.text,e.tags&&e.tags.map(p=>{let c=s?p in s?`md-tag-icon md-tag--${s[p]}`:"md-tag-icon":"";return x("span",{class:`md-tag ${c}`},p)}),o>0&&n.length>0&&x("p",{class:"md-search-result__terms"},Ee("search.result.term.missing"),": ",...n)))}function Mn(e){let t=e[0].score,r=[...e],o=ye(),n=r.findIndex(l=>!`${new URL(l.location,o.base)}`.includes("#")),[i]=r.splice(n,1),a=r.findIndex(l=>l.scoreQr(l,1)),...p.length?[x("details",{class:"md-search-result__more"},x("summary",{tabIndex:-1},x("div",null,p.length>0&&p.length===1?Ee("search.result.more.one"):Ee("search.result.more.other",p.length))),...p.map(l=>Qr(l,1)))]:[]];return x("li",{class:"md-search-result__item"},c)}function Ln(e){return x("ul",{class:"md-source__facts"},Object.entries(e).map(([t,r])=>x("li",{class:`md-source__fact md-source__fact--${t}`},typeof r=="number"?sr(r):r)))}function Kr(e){let t=`tabbed-control tabbed-control--${e}`;return x("div",{class:t,hidden:!0},x("button",{class:"tabbed-button",tabIndex:-1,"aria-hidden":"true"}))}function _n(e){return x("div",{class:"md-typeset__scrollwrap"},x("div",{class:"md-typeset__table"},e))}function $a(e){var o;let t=ye(),r=new URL(`../${e.version}/`,t.base);return x("li",{class:"md-version__item"},x("a",{href:`${r}`,class:"md-version__link"},e.title,((o=t.version)==null?void 0:o.alias)&&e.aliases.length>0&&x("span",{class:"md-version__alias"},e.aliases[0])))}function An(e,t){var o;let r=ye();return e=e.filter(n=>{var i;return!((i=n.properties)!=null&&i.hidden)}),x("div",{class:"md-version"},x("button",{class:"md-version__current","aria-label":Ee("select.version")},t.title,((o=r.version)==null?void 0:o.alias)&&t.aliases.length>0&&x("span",{class:"md-version__alias"},t.aliases[0])),x("ul",{class:"md-version__list"},e.map($a)))}var Pa=0;function Ra(e){let t=z([et(e),$t(e)]).pipe(m(([o,n])=>o||n),K()),r=C(()=>Xo(e)).pipe(oe(Ne),pt(1),ke(t),m(()=>Zo(e)));return t.pipe(Ae(o=>o),v(()=>z([t,r])),m(([o,n])=>({active:o,offset:n})),pe())}function Ia(e,t){let{content$:r,viewport$:o}=t,n=`__tooltip2_${Pa++}`;return C(()=>{let i=new g,a=new _r(!1);i.pipe(X(),ne(!1)).subscribe(a);let s=a.pipe(kt(c=>Me(+!c*250,Hr)),K(),v(c=>c?r:S),E(c=>c.id=n),pe());z([i.pipe(m(({active:c})=>c)),s.pipe(v(c=>$t(c,250)),Q(!1))]).pipe(m(c=>c.some(l=>l))).subscribe(a);let p=a.pipe(b(c=>c),ee(s,o),m(([c,l,{size:f}])=>{let u=e.getBoundingClientRect(),h=u.width/2;if(l.role==="tooltip")return{x:h,y:8+u.height};if(u.y>=f.height/2){let{height:w}=ce(l);return{x:h,y:-16-w}}else return{x:h,y:16+u.height}}));return z([s,i,p]).subscribe(([c,{offset:l},f])=>{c.style.setProperty("--md-tooltip-host-x",`${l.x}px`),c.style.setProperty("--md-tooltip-host-y",`${l.y}px`),c.style.setProperty("--md-tooltip-x",`${f.x}px`),c.style.setProperty("--md-tooltip-y",`${f.y}px`),c.classList.toggle("md-tooltip2--top",f.y<0),c.classList.toggle("md-tooltip2--bottom",f.y>=0)}),a.pipe(b(c=>c),ee(s,(c,l)=>l),b(c=>c.role==="tooltip")).subscribe(c=>{let l=ce(R(":scope > *",c));c.style.setProperty("--md-tooltip-width",`${l.width}px`),c.style.setProperty("--md-tooltip-tail","0px")}),a.pipe(K(),ve(me),ee(s)).subscribe(([c,l])=>{l.classList.toggle("md-tooltip2--active",c)}),z([a.pipe(b(c=>c)),s]).subscribe(([c,l])=>{l.role==="dialog"?(e.setAttribute("aria-controls",n),e.setAttribute("aria-haspopup","dialog")):e.setAttribute("aria-describedby",n)}),a.pipe(b(c=>!c)).subscribe(()=>{e.removeAttribute("aria-controls"),e.removeAttribute("aria-describedby"),e.removeAttribute("aria-haspopup")}),Ra(e).pipe(E(c=>i.next(c)),L(()=>i.complete()),m(c=>$({ref:e},c)))})}function mt(e,{viewport$:t},r=document.body){return Ia(e,{content$:new F(o=>{let n=e.title,i=En(n);return o.next(i),e.removeAttribute("title"),r.append(i),()=>{i.remove(),e.setAttribute("title",n)}}),viewport$:t})}function Fa(e,t){let r=C(()=>z([en(e),Ne(t)])).pipe(m(([{x:o,y:n},i])=>{let{width:a,height:s}=ce(e);return{x:o-i.x+a/2,y:n-i.y+s/2}}));return et(e).pipe(v(o=>r.pipe(m(n=>({active:o,offset:n})),Te(+!o||1/0))))}function Cn(e,t,{target$:r}){let[o,n]=Array.from(e.children);return C(()=>{let i=new g,a=i.pipe(X(),ne(!0));return i.subscribe({next({offset:s}){e.style.setProperty("--md-tooltip-x",`${s.x}px`),e.style.setProperty("--md-tooltip-y",`${s.y}px`)},complete(){e.style.removeProperty("--md-tooltip-x"),e.style.removeProperty("--md-tooltip-y")}}),tt(e).pipe(U(a)).subscribe(s=>{e.toggleAttribute("data-md-visible",s)}),O(i.pipe(b(({active:s})=>s)),i.pipe(_e(250),b(({active:s})=>!s))).subscribe({next({active:s}){s?e.prepend(o):o.remove()},complete(){e.prepend(o)}}),i.pipe(Le(16,me)).subscribe(({active:s})=>{o.classList.toggle("md-tooltip--active",s)}),i.pipe(pt(125,me),b(()=>!!e.offsetParent),m(()=>e.offsetParent.getBoundingClientRect()),m(({x:s})=>s)).subscribe({next(s){s?e.style.setProperty("--md-tooltip-0",`${-s}px`):e.style.removeProperty("--md-tooltip-0")},complete(){e.style.removeProperty("--md-tooltip-0")}}),d(n,"click").pipe(U(a),b(s=>!(s.metaKey||s.ctrlKey))).subscribe(s=>{s.stopPropagation(),s.preventDefault()}),d(n,"mousedown").pipe(U(a),ee(i)).subscribe(([s,{active:p}])=>{var c;if(s.button!==0||s.metaKey||s.ctrlKey)s.preventDefault();else if(p){s.preventDefault();let l=e.parentElement.closest(".md-annotation");l instanceof HTMLElement?l.focus():(c=Ie())==null||c.blur()}}),r.pipe(U(a),b(s=>s===o),Ge(125)).subscribe(()=>e.focus()),Fa(e,t).pipe(E(s=>i.next(s)),L(()=>i.complete()),m(s=>$({ref:e},s)))})}function ja(e){return e.tagName==="CODE"?P(".c, .c1, .cm",e):[e]}function Wa(e){let t=[];for(let r of ja(e)){let o=[],n=document.createNodeIterator(r,NodeFilter.SHOW_TEXT);for(let i=n.nextNode();i;i=n.nextNode())o.push(i);for(let i of o){let a;for(;a=/(\(\d+\))(!)?/.exec(i.textContent);){let[,s,p]=a;if(typeof p=="undefined"){let c=i.splitText(a.index);i=c.splitText(s.length),t.push(c)}else{i.textContent=s,t.push(i);break}}}}return t}function Hn(e,t){t.append(...Array.from(e.childNodes))}function fr(e,t,{target$:r,print$:o}){let n=t.closest("[id]"),i=n==null?void 0:n.id,a=new Map;for(let s of Wa(t)){let[,p]=s.textContent.match(/\((\d+)\)/);fe(`:scope > li:nth-child(${p})`,e)&&(a.set(p,wn(p,i)),s.replaceWith(a.get(p)))}return a.size===0?S:C(()=>{let s=new g,p=s.pipe(X(),ne(!0)),c=[];for(let[l,f]of a)c.push([R(".md-typeset",f),R(`:scope > li:nth-child(${l})`,e)]);return o.pipe(U(p)).subscribe(l=>{e.hidden=!l,e.classList.toggle("md-annotation-list",l);for(let[f,u]of c)l?Hn(f,u):Hn(u,f)}),O(...[...a].map(([,l])=>Cn(l,t,{target$:r}))).pipe(L(()=>s.complete()),pe())})}function kn(e){if(e.nextElementSibling){let t=e.nextElementSibling;if(t.tagName==="OL")return t;if(t.tagName==="P"&&!t.children.length)return kn(t)}}function $n(e,t){return C(()=>{let r=kn(e);return typeof r!="undefined"?fr(r,e,t):S})}var Pn=Lt(Br());var Ua=0;function Rn(e){if(e.nextElementSibling){let t=e.nextElementSibling;if(t.tagName==="OL")return t;if(t.tagName==="P"&&!t.children.length)return Rn(t)}}function Da(e){return ge(e).pipe(m(({width:t})=>({scrollable:St(e).width>t})),Z("scrollable"))}function In(e,t){let{matches:r}=matchMedia("(hover)"),o=C(()=>{let n=new g,i=n.pipe(Fr(1));n.subscribe(({scrollable:c})=>{c&&r?e.setAttribute("tabindex","0"):e.removeAttribute("tabindex")});let a=[];if(Pn.default.isSupported()&&(e.closest(".copy")||B("content.code.copy")&&!e.closest(".no-copy"))){let c=e.closest("pre");c.id=`__code_${Ua++}`;let l=Tn(c.id);c.insertBefore(l,e),B("content.tooltips")&&a.push(mt(l,{viewport$}))}let s=e.closest(".highlight");if(s instanceof HTMLElement){let c=Rn(s);if(typeof c!="undefined"&&(s.classList.contains("annotate")||B("content.code.annotate"))){let l=fr(c,e,t);a.push(ge(s).pipe(U(i),m(({width:f,height:u})=>f&&u),K(),v(f=>f?l:S)))}}return P(":scope > span[id]",e).length&&e.classList.add("md-code__content"),Da(e).pipe(E(c=>n.next(c)),L(()=>n.complete()),m(c=>$({ref:e},c)),Re(...a))});return B("content.lazy")?tt(e).pipe(b(n=>n),Te(1),v(()=>o)):o}function Va(e,{target$:t,print$:r}){let o=!0;return O(t.pipe(m(n=>n.closest("details:not([open])")),b(n=>e===n),m(()=>({action:"open",reveal:!0}))),r.pipe(b(n=>n||!o),E(()=>o=e.open),m(n=>({action:n?"open":"close"}))))}function Fn(e,t){return C(()=>{let r=new g;return r.subscribe(({action:o,reveal:n})=>{e.toggleAttribute("open",o==="open"),n&&e.scrollIntoView()}),Va(e,t).pipe(E(o=>r.next(o)),L(()=>r.complete()),m(o=>$({ref:e},o)))})}var jn=".node circle,.node ellipse,.node path,.node polygon,.node rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}marker{fill:var(--md-mermaid-edge-color)!important}.edgeLabel .label rect{fill:#0000}.label{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.label foreignObject{line-height:normal;overflow:visible}.label div .edgeLabel{color:var(--md-mermaid-label-fg-color)}.edgeLabel,.edgeLabel rect,.label div .edgeLabel{background-color:var(--md-mermaid-label-bg-color)}.edgeLabel,.edgeLabel rect{fill:var(--md-mermaid-label-bg-color);color:var(--md-mermaid-edge-color)}.edgePath .path,.flowchart-link{stroke:var(--md-mermaid-edge-color);stroke-width:.05rem}.edgePath .arrowheadPath{fill:var(--md-mermaid-edge-color);stroke:none}.cluster rect{fill:var(--md-default-fg-color--lightest);stroke:var(--md-default-fg-color--lighter)}.cluster span{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}g #flowchart-circleEnd,g #flowchart-circleStart,g #flowchart-crossEnd,g #flowchart-crossStart,g #flowchart-pointEnd,g #flowchart-pointStart{stroke:none}g.classGroup line,g.classGroup rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}g.classGroup text{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.classLabel .box{fill:var(--md-mermaid-label-bg-color);background-color:var(--md-mermaid-label-bg-color);opacity:1}.classLabel .label{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.node .divider{stroke:var(--md-mermaid-node-fg-color)}.relation{stroke:var(--md-mermaid-edge-color)}.cardinality{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.cardinality text{fill:inherit!important}defs #classDiagram-compositionEnd,defs #classDiagram-compositionStart,defs #classDiagram-dependencyEnd,defs #classDiagram-dependencyStart,defs #classDiagram-extensionEnd,defs #classDiagram-extensionStart{fill:var(--md-mermaid-edge-color)!important;stroke:var(--md-mermaid-edge-color)!important}defs #classDiagram-aggregationEnd,defs #classDiagram-aggregationStart{fill:var(--md-mermaid-label-bg-color)!important;stroke:var(--md-mermaid-edge-color)!important}g.stateGroup rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}g.stateGroup .state-title{fill:var(--md-mermaid-label-fg-color)!important;font-family:var(--md-mermaid-font-family)}g.stateGroup .composit{fill:var(--md-mermaid-label-bg-color)}.nodeLabel,.nodeLabel p{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}a .nodeLabel{text-decoration:underline}.node circle.state-end,.node circle.state-start,.start-state{fill:var(--md-mermaid-edge-color);stroke:none}.end-state-inner,.end-state-outer{fill:var(--md-mermaid-edge-color)}.end-state-inner,.node circle.state-end{stroke:var(--md-mermaid-label-bg-color)}.transition{stroke:var(--md-mermaid-edge-color)}[id^=state-fork] rect,[id^=state-join] rect{fill:var(--md-mermaid-edge-color)!important;stroke:none!important}.statediagram-cluster.statediagram-cluster .inner{fill:var(--md-default-bg-color)}.statediagram-cluster rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}.statediagram-state rect.divider{fill:var(--md-default-fg-color--lightest);stroke:var(--md-default-fg-color--lighter)}defs #statediagram-barbEnd{stroke:var(--md-mermaid-edge-color)}.attributeBoxEven,.attributeBoxOdd{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}.entityBox{fill:var(--md-mermaid-label-bg-color);stroke:var(--md-mermaid-node-fg-color)}.entityLabel{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.relationshipLabelBox{fill:var(--md-mermaid-label-bg-color);fill-opacity:1;background-color:var(--md-mermaid-label-bg-color);opacity:1}.relationshipLabel{fill:var(--md-mermaid-label-fg-color)}.relationshipLine{stroke:var(--md-mermaid-edge-color)}defs #ONE_OR_MORE_END *,defs #ONE_OR_MORE_START *,defs #ONLY_ONE_END *,defs #ONLY_ONE_START *,defs #ZERO_OR_MORE_END *,defs #ZERO_OR_MORE_START *,defs #ZERO_OR_ONE_END *,defs #ZERO_OR_ONE_START *{stroke:var(--md-mermaid-edge-color)!important}defs #ZERO_OR_MORE_END circle,defs #ZERO_OR_MORE_START circle{fill:var(--md-mermaid-label-bg-color)}.actor{fill:var(--md-mermaid-sequence-actor-bg-color);stroke:var(--md-mermaid-sequence-actor-border-color)}text.actor>tspan{fill:var(--md-mermaid-sequence-actor-fg-color);font-family:var(--md-mermaid-font-family)}line{stroke:var(--md-mermaid-sequence-actor-line-color)}.actor-man circle,.actor-man line{fill:var(--md-mermaid-sequence-actorman-bg-color);stroke:var(--md-mermaid-sequence-actorman-line-color)}.messageLine0,.messageLine1{stroke:var(--md-mermaid-sequence-message-line-color)}.note{fill:var(--md-mermaid-sequence-note-bg-color);stroke:var(--md-mermaid-sequence-note-border-color)}.loopText,.loopText>tspan,.messageText,.noteText>tspan{stroke:none;font-family:var(--md-mermaid-font-family)!important}.messageText{fill:var(--md-mermaid-sequence-message-fg-color)}.loopText,.loopText>tspan{fill:var(--md-mermaid-sequence-loop-fg-color)}.noteText>tspan{fill:var(--md-mermaid-sequence-note-fg-color)}#arrowhead path{fill:var(--md-mermaid-sequence-message-line-color);stroke:none}.loopLine{fill:var(--md-mermaid-sequence-loop-bg-color);stroke:var(--md-mermaid-sequence-loop-border-color)}.labelBox{fill:var(--md-mermaid-sequence-label-bg-color);stroke:none}.labelText,.labelText>span{fill:var(--md-mermaid-sequence-label-fg-color);font-family:var(--md-mermaid-font-family)}.sequenceNumber{fill:var(--md-mermaid-sequence-number-fg-color)}rect.rect{fill:var(--md-mermaid-sequence-box-bg-color);stroke:none}rect.rect+text.text{fill:var(--md-mermaid-sequence-box-fg-color)}defs #sequencenumber{fill:var(--md-mermaid-sequence-number-bg-color)!important}";var Gr,za=0;function qa(){return typeof mermaid=="undefined"||mermaid instanceof Element?Tt("https://unpkg.com/mermaid@10/dist/mermaid.min.js"):I(void 0)}function Wn(e){return e.classList.remove("mermaid"),Gr||(Gr=qa().pipe(E(()=>mermaid.initialize({startOnLoad:!1,themeCSS:jn,sequence:{actorFontSize:"16px",messageFontSize:"16px",noteFontSize:"16px"}})),m(()=>{}),G(1))),Gr.subscribe(()=>so(this,null,function*(){e.classList.add("mermaid");let t=`__mermaid_${za++}`,r=x("div",{class:"mermaid"}),o=e.textContent,{svg:n,fn:i}=yield mermaid.render(t,o),a=r.attachShadow({mode:"closed"});a.innerHTML=n,e.replaceWith(r),i==null||i(a)})),Gr.pipe(m(()=>({ref:e})))}var Un=x("table");function Dn(e){return e.replaceWith(Un),Un.replaceWith(_n(e)),I({ref:e})}function Qa(e){let t=e.find(r=>r.checked)||e[0];return O(...e.map(r=>d(r,"change").pipe(m(()=>R(`label[for="${r.id}"]`))))).pipe(Q(R(`label[for="${t.id}"]`)),m(r=>({active:r})))}function Vn(e,{viewport$:t,target$:r}){let o=R(".tabbed-labels",e),n=P(":scope > input",e),i=Kr("prev");e.append(i);let a=Kr("next");return e.append(a),C(()=>{let s=new g,p=s.pipe(X(),ne(!0));z([s,ge(e),tt(e)]).pipe(U(p),Le(1,me)).subscribe({next([{active:c},l]){let f=Ve(c),{width:u}=ce(c);e.style.setProperty("--md-indicator-x",`${f.x}px`),e.style.setProperty("--md-indicator-width",`${u}px`);let h=pr(o);(f.xh.x+l.width)&&o.scrollTo({left:Math.max(0,f.x-16),behavior:"smooth"})},complete(){e.style.removeProperty("--md-indicator-x"),e.style.removeProperty("--md-indicator-width")}}),z([Ne(o),ge(o)]).pipe(U(p)).subscribe(([c,l])=>{let f=St(o);i.hidden=c.x<16,a.hidden=c.x>f.width-l.width-16}),O(d(i,"click").pipe(m(()=>-1)),d(a,"click").pipe(m(()=>1))).pipe(U(p)).subscribe(c=>{let{width:l}=ce(o);o.scrollBy({left:l*c,behavior:"smooth"})}),r.pipe(U(p),b(c=>n.includes(c))).subscribe(c=>c.click()),o.classList.add("tabbed-labels--linked");for(let c of n){let l=R(`label[for="${c.id}"]`);l.replaceChildren(x("a",{href:`#${l.htmlFor}`,tabIndex:-1},...Array.from(l.childNodes))),d(l.firstElementChild,"click").pipe(U(p),b(f=>!(f.metaKey||f.ctrlKey)),E(f=>{f.preventDefault(),f.stopPropagation()})).subscribe(()=>{history.replaceState({},"",`#${l.htmlFor}`),l.click()})}return B("content.tabs.link")&&s.pipe(Ce(1),ee(t)).subscribe(([{active:c},{offset:l}])=>{let f=c.innerText.trim();if(c.hasAttribute("data-md-switching"))c.removeAttribute("data-md-switching");else{let u=e.offsetTop-l.y;for(let w of P("[data-tabs]"))for(let A of P(":scope > input",w)){let te=R(`label[for="${A.id}"]`);if(te!==c&&te.innerText.trim()===f){te.setAttribute("data-md-switching",""),A.click();break}}window.scrollTo({top:e.offsetTop-u});let h=__md_get("__tabs")||[];__md_set("__tabs",[...new Set([f,...h])])}}),s.pipe(U(p)).subscribe(()=>{for(let c of P("audio, video",e))c.pause()}),Qa(n).pipe(E(c=>s.next(c)),L(()=>s.complete()),m(c=>$({ref:e},c)))}).pipe(Ke(se))}function Nn(e,{viewport$:t,target$:r,print$:o}){return O(...P(".annotate:not(.highlight)",e).map(n=>$n(n,{target$:r,print$:o})),...P("pre:not(.mermaid) > code",e).map(n=>In(n,{target$:r,print$:o})),...P("pre.mermaid",e).map(n=>Wn(n)),...P("table:not([class])",e).map(n=>Dn(n)),...P("details",e).map(n=>Fn(n,{target$:r,print$:o})),...P("[data-tabs]",e).map(n=>Vn(n,{viewport$:t,target$:r})),...P("[title]",e).filter(()=>B("content.tooltips")).map(n=>mt(n,{viewport$:t})))}function Ka(e,{alert$:t}){return t.pipe(v(r=>O(I(!0),I(!1).pipe(Ge(2e3))).pipe(m(o=>({message:r,active:o})))))}function zn(e,t){let r=R(".md-typeset",e);return C(()=>{let o=new g;return o.subscribe(({message:n,active:i})=>{e.classList.toggle("md-dialog--active",i),r.textContent=n}),Ka(e,t).pipe(E(n=>o.next(n)),L(()=>o.complete()),m(n=>$({ref:e},n)))})}var Ya=0;function Ba(e,t){document.body.append(e);let{width:r}=ce(e);e.style.setProperty("--md-tooltip-width",`${r}px`),e.remove();let o=cr(t),n=typeof o!="undefined"?Ne(o):I({x:0,y:0}),i=O(et(t),$t(t)).pipe(K());return z([i,n]).pipe(m(([a,s])=>{let{x:p,y:c}=Ve(t),l=ce(t),f=t.closest("table");return f&&t.parentElement&&(p+=f.offsetLeft+t.parentElement.offsetLeft,c+=f.offsetTop+t.parentElement.offsetTop),{active:a,offset:{x:p-s.x+l.width/2-r/2,y:c-s.y+l.height+8}}}))}function qn(e){let t=e.title;if(!t.length)return S;let r=`__tooltip_${Ya++}`,o=Rt(r,"inline"),n=R(".md-typeset",o);return n.innerHTML=t,C(()=>{let i=new g;return i.subscribe({next({offset:a}){o.style.setProperty("--md-tooltip-x",`${a.x}px`),o.style.setProperty("--md-tooltip-y",`${a.y}px`)},complete(){o.style.removeProperty("--md-tooltip-x"),o.style.removeProperty("--md-tooltip-y")}}),O(i.pipe(b(({active:a})=>a)),i.pipe(_e(250),b(({active:a})=>!a))).subscribe({next({active:a}){a?(e.insertAdjacentElement("afterend",o),e.setAttribute("aria-describedby",r),e.removeAttribute("title")):(o.remove(),e.removeAttribute("aria-describedby"),e.setAttribute("title",t))},complete(){o.remove(),e.removeAttribute("aria-describedby"),e.setAttribute("title",t)}}),i.pipe(Le(16,me)).subscribe(({active:a})=>{o.classList.toggle("md-tooltip--active",a)}),i.pipe(pt(125,me),b(()=>!!e.offsetParent),m(()=>e.offsetParent.getBoundingClientRect()),m(({x:a})=>a)).subscribe({next(a){a?o.style.setProperty("--md-tooltip-0",`${-a}px`):o.style.removeProperty("--md-tooltip-0")},complete(){o.style.removeProperty("--md-tooltip-0")}}),Ba(o,e).pipe(E(a=>i.next(a)),L(()=>i.complete()),m(a=>$({ref:e},a)))}).pipe(Ke(se))}function Ga({viewport$:e}){if(!B("header.autohide"))return I(!1);let t=e.pipe(m(({offset:{y:n}})=>n),Be(2,1),m(([n,i])=>[nMath.abs(i-n.y)>100),m(([,[n]])=>n),K()),o=ze("search");return z([e,o]).pipe(m(([{offset:n},i])=>n.y>400&&!i),K(),v(n=>n?r:I(!1)),Q(!1))}function Qn(e,t){return C(()=>z([ge(e),Ga(t)])).pipe(m(([{height:r},o])=>({height:r,hidden:o})),K((r,o)=>r.height===o.height&&r.hidden===o.hidden),G(1))}function Kn(e,{header$:t,main$:r}){return C(()=>{let o=new g,n=o.pipe(X(),ne(!0));o.pipe(Z("active"),ke(t)).subscribe(([{active:a},{hidden:s}])=>{e.classList.toggle("md-header--shadow",a&&!s),e.hidden=s});let i=ue(P("[title]",e)).pipe(b(()=>B("content.tooltips")),oe(a=>qn(a)));return r.subscribe(o),t.pipe(U(n),m(a=>$({ref:e},a)),Re(i.pipe(U(n))))})}function Ja(e,{viewport$:t,header$:r}){return mr(e,{viewport$:t,header$:r}).pipe(m(({offset:{y:o}})=>{let{height:n}=ce(e);return{active:o>=n}}),Z("active"))}function Yn(e,t){return C(()=>{let r=new g;r.subscribe({next({active:n}){e.classList.toggle("md-header__title--active",n)},complete(){e.classList.remove("md-header__title--active")}});let o=fe(".md-content h1");return typeof o=="undefined"?S:Ja(o,t).pipe(E(n=>r.next(n)),L(()=>r.complete()),m(n=>$({ref:e},n)))})}function Bn(e,{viewport$:t,header$:r}){let o=r.pipe(m(({height:i})=>i),K()),n=o.pipe(v(()=>ge(e).pipe(m(({height:i})=>({top:e.offsetTop,bottom:e.offsetTop+i})),Z("bottom"))));return z([o,n,t]).pipe(m(([i,{top:a,bottom:s},{offset:{y:p},size:{height:c}}])=>(c=Math.max(0,c-Math.max(0,a-p,i)-Math.max(0,c+p-s)),{offset:a-i,height:c,active:a-i<=p})),K((i,a)=>i.offset===a.offset&&i.height===a.height&&i.active===a.active))}function Xa(e){let t=__md_get("__palette")||{index:e.findIndex(o=>matchMedia(o.getAttribute("data-md-color-media")).matches)},r=Math.max(0,Math.min(t.index,e.length-1));return I(...e).pipe(oe(o=>d(o,"change").pipe(m(()=>o))),Q(e[r]),m(o=>({index:e.indexOf(o),color:{media:o.getAttribute("data-md-color-media"),scheme:o.getAttribute("data-md-color-scheme"),primary:o.getAttribute("data-md-color-primary"),accent:o.getAttribute("data-md-color-accent")}})),G(1))}function Gn(e){let t=P("input",e),r=x("meta",{name:"theme-color"});document.head.appendChild(r);let o=x("meta",{name:"color-scheme"});document.head.appendChild(o);let n=Pt("(prefers-color-scheme: light)");return C(()=>{let i=new g;return i.subscribe(a=>{if(document.body.setAttribute("data-md-color-switching",""),a.color.media==="(prefers-color-scheme)"){let s=matchMedia("(prefers-color-scheme: light)"),p=document.querySelector(s.matches?"[data-md-color-media='(prefers-color-scheme: light)']":"[data-md-color-media='(prefers-color-scheme: dark)']");a.color.scheme=p.getAttribute("data-md-color-scheme"),a.color.primary=p.getAttribute("data-md-color-primary"),a.color.accent=p.getAttribute("data-md-color-accent")}for(let[s,p]of Object.entries(a.color))document.body.setAttribute(`data-md-color-${s}`,p);for(let s=0;sa.key==="Enter"),ee(i,(a,s)=>s)).subscribe(({index:a})=>{a=(a+1)%t.length,t[a].click(),t[a].focus()}),i.pipe(m(()=>{let a=Se("header"),s=window.getComputedStyle(a);return o.content=s.colorScheme,s.backgroundColor.match(/\d+/g).map(p=>(+p).toString(16).padStart(2,"0")).join("")})).subscribe(a=>r.content=`#${a}`),i.pipe(ve(se)).subscribe(()=>{document.body.removeAttribute("data-md-color-switching")}),Xa(t).pipe(U(n.pipe(Ce(1))),ct(),E(a=>i.next(a)),L(()=>i.complete()),m(a=>$({ref:e},a)))})}function Jn(e,{progress$:t}){return C(()=>{let r=new g;return r.subscribe(({value:o})=>{e.style.setProperty("--md-progress-value",`${o}`)}),t.pipe(E(o=>r.next({value:o})),L(()=>r.complete()),m(o=>({ref:e,value:o})))})}var Jr=Lt(Br());function Za(e){e.setAttribute("data-md-copying","");let t=e.closest("[data-copy]"),r=t?t.getAttribute("data-copy"):e.innerText;return e.removeAttribute("data-md-copying"),r.trimEnd()}function Xn({alert$:e}){Jr.default.isSupported()&&new F(t=>{new Jr.default("[data-clipboard-target], [data-clipboard-text]",{text:r=>r.getAttribute("data-clipboard-text")||Za(R(r.getAttribute("data-clipboard-target")))}).on("success",r=>t.next(r))}).pipe(E(t=>{t.trigger.focus()}),m(()=>Ee("clipboard.copied"))).subscribe(e)}function Zn(e,t){return e.protocol=t.protocol,e.hostname=t.hostname,e}function es(e,t){let r=new Map;for(let o of P("url",e)){let n=R("loc",o),i=[Zn(new URL(n.textContent),t)];r.set(`${i[0]}`,i);for(let a of P("[rel=alternate]",o)){let s=a.getAttribute("href");s!=null&&i.push(Zn(new URL(s),t))}}return r}function ur(e){return fn(new URL("sitemap.xml",e)).pipe(m(t=>es(t,new URL(e))),de(()=>I(new Map)))}function ts(e,t){if(!(e.target instanceof Element))return S;let r=e.target.closest("a");if(r===null)return S;if(r.target||e.metaKey||e.ctrlKey)return S;let o=new URL(r.href);return o.search=o.hash="",t.has(`${o}`)?(e.preventDefault(),I(new URL(r.href))):S}function ei(e){let t=new Map;for(let r of P(":scope > *",e.head))t.set(r.outerHTML,r);return t}function ti(e){for(let t of P("[href], [src]",e))for(let r of["href","src"]){let o=t.getAttribute(r);if(o&&!/^(?:[a-z]+:)?\/\//i.test(o)){t[r]=t[r];break}}return I(e)}function rs(e){for(let o of["[data-md-component=announce]","[data-md-component=container]","[data-md-component=header-topic]","[data-md-component=outdated]","[data-md-component=logo]","[data-md-component=skip]",...B("navigation.tabs.sticky")?["[data-md-component=tabs]"]:[]]){let n=fe(o),i=fe(o,e);typeof n!="undefined"&&typeof i!="undefined"&&n.replaceWith(i)}let t=ei(document);for(let[o,n]of ei(e))t.has(o)?t.delete(o):document.head.appendChild(n);for(let o of t.values()){let n=o.getAttribute("name");n!=="theme-color"&&n!=="color-scheme"&&o.remove()}let r=Se("container");return Ue(P("script",r)).pipe(v(o=>{let n=e.createElement("script");if(o.src){for(let i of o.getAttributeNames())n.setAttribute(i,o.getAttribute(i));return o.replaceWith(n),new F(i=>{n.onload=()=>i.complete()})}else return n.textContent=o.textContent,o.replaceWith(n),S}),X(),ne(document))}function ri({location$:e,viewport$:t,progress$:r}){let o=ye();if(location.protocol==="file:")return S;let n=ur(o.base);I(document).subscribe(ti);let i=d(document.body,"click").pipe(ke(n),v(([p,c])=>ts(p,c)),pe()),a=d(window,"popstate").pipe(m(xe),pe());i.pipe(ee(t)).subscribe(([p,{offset:c}])=>{history.replaceState(c,""),history.pushState(null,"",p)}),O(i,a).subscribe(e);let s=e.pipe(Z("pathname"),v(p=>mn(p,{progress$:r}).pipe(de(()=>(lt(p,!0),S)))),v(ti),v(rs),pe());return O(s.pipe(ee(e,(p,c)=>c)),s.pipe(v(()=>e),Z("pathname"),v(()=>e),Z("hash")),e.pipe(K((p,c)=>p.pathname===c.pathname&&p.hash===c.hash),v(()=>i),E(()=>history.back()))).subscribe(p=>{var c,l;history.state!==null||!p.hash?window.scrollTo(0,(l=(c=history.state)==null?void 0:c.y)!=null?l:0):(history.scrollRestoration="auto",cn(p.hash),history.scrollRestoration="manual")}),e.subscribe(()=>{history.scrollRestoration="manual"}),d(window,"beforeunload").subscribe(()=>{history.scrollRestoration="auto"}),t.pipe(Z("offset"),_e(100)).subscribe(({offset:p})=>{history.replaceState(p,"")}),s}var oi=Lt(qr());function ni(e){let t=e.separator.split("|").map(n=>n.replace(/(\(\?[!=<][^)]+\))/g,"").length===0?"\uFFFD":n).join("|"),r=new RegExp(t,"img"),o=(n,i,a)=>`${i}${a}`;return n=>{n=n.replace(/[\s*+\-:~^]+/g," ").trim();let i=new RegExp(`(^|${e.separator}|)(${n.replace(/[|\\{}()[\]^$+*?.-]/g,"\\$&").replace(r,"|")})`,"img");return a=>(0,oi.default)(a).replace(i,o).replace(/<\/mark>(\s+)]*>/img,"$1")}}function Ft(e){return e.type===1}function dr(e){return e.type===3}function ii(e,t){let r=gn(e);return O(I(location.protocol!=="file:"),ze("search")).pipe(Ae(o=>o),v(()=>t)).subscribe(({config:o,docs:n})=>r.next({type:0,data:{config:o,docs:n,options:{suggest:B("search.suggest")}}})),r}function ai({document$:e}){let t=ye(),r=Fe(new URL("../versions.json",t.base)).pipe(de(()=>S)),o=r.pipe(m(n=>{let[,i]=t.base.match(/([^/]+)\/?$/);return n.find(({version:a,aliases:s})=>a===i||s.includes(i))||n[0]}));r.pipe(m(n=>new Map(n.map(i=>[`${new URL(`../${i.version}/`,t.base)}`,i]))),v(n=>d(document.body,"click").pipe(b(i=>!i.metaKey&&!i.ctrlKey),ee(o),v(([i,a])=>{if(i.target instanceof Element){let s=i.target.closest("a");if(s&&!s.target&&n.has(s.href)){let p=s.href;return!i.target.closest(".md-version")&&n.get(p)===a?S:(i.preventDefault(),I(p))}}return S}),v(i=>ur(new URL(i)).pipe(m(a=>{let p=xe().href.replace(t.base,i);return a.has(p.split("#")[0])?new URL(p):new URL(i)})))))).subscribe(n=>lt(n,!0)),z([r,o]).subscribe(([n,i])=>{R(".md-header__topic").appendChild(An(n,i))}),e.pipe(v(()=>o)).subscribe(n=>{var a;let i=__md_get("__outdated",sessionStorage);if(i===null){i=!0;let s=((a=t.version)==null?void 0:a.default)||"latest";Array.isArray(s)||(s=[s]);e:for(let p of s)for(let c of n.aliases.concat(n.version))if(new RegExp(p,"i").test(c)){i=!1;break e}__md_set("__outdated",i,sessionStorage)}if(i)for(let s of ae("outdated"))s.hidden=!1})}function is(e,{worker$:t}){let{searchParams:r}=xe();r.has("q")&&(Je("search",!0),e.value=r.get("q"),e.focus(),ze("search").pipe(Ae(i=>!i)).subscribe(()=>{let i=xe();i.searchParams.delete("q"),history.replaceState({},"",`${i}`)}));let o=et(e),n=O(t.pipe(Ae(Ft)),d(e,"keyup"),o).pipe(m(()=>e.value),K());return z([n,o]).pipe(m(([i,a])=>({value:i,focus:a})),G(1))}function si(e,{worker$:t}){let r=new g,o=r.pipe(X(),ne(!0));z([t.pipe(Ae(Ft)),r],(i,a)=>a).pipe(Z("value")).subscribe(({value:i})=>t.next({type:2,data:i})),r.pipe(Z("focus")).subscribe(({focus:i})=>{i&&Je("search",i)}),d(e.form,"reset").pipe(U(o)).subscribe(()=>e.focus());let n=R("header [for=__search]");return d(n,"click").subscribe(()=>e.focus()),is(e,{worker$:t}).pipe(E(i=>r.next(i)),L(()=>r.complete()),m(i=>$({ref:e},i)),G(1))}function ci(e,{worker$:t,query$:r}){let o=new g,n=rn(e.parentElement).pipe(b(Boolean)),i=e.parentElement,a=R(":scope > :first-child",e),s=R(":scope > :last-child",e);ze("search").subscribe(l=>s.setAttribute("role",l?"list":"presentation")),o.pipe(ee(r),Ur(t.pipe(Ae(Ft)))).subscribe(([{items:l},{value:f}])=>{switch(l.length){case 0:a.textContent=f.length?Ee("search.result.none"):Ee("search.result.placeholder");break;case 1:a.textContent=Ee("search.result.one");break;default:let u=sr(l.length);a.textContent=Ee("search.result.other",u)}});let p=o.pipe(E(()=>s.innerHTML=""),v(({items:l})=>O(I(...l.slice(0,10)),I(...l.slice(10)).pipe(Be(4),Vr(n),v(([f])=>f)))),m(Mn),pe());return p.subscribe(l=>s.appendChild(l)),p.pipe(oe(l=>{let f=fe("details",l);return typeof f=="undefined"?S:d(f,"toggle").pipe(U(o),m(()=>f))})).subscribe(l=>{l.open===!1&&l.offsetTop<=i.scrollTop&&i.scrollTo({top:l.offsetTop})}),t.pipe(b(dr),m(({data:l})=>l)).pipe(E(l=>o.next(l)),L(()=>o.complete()),m(l=>$({ref:e},l)))}function as(e,{query$:t}){return t.pipe(m(({value:r})=>{let o=xe();return o.hash="",r=r.replace(/\s+/g,"+").replace(/&/g,"%26").replace(/=/g,"%3D"),o.search=`q=${r}`,{url:o}}))}function pi(e,t){let r=new g,o=r.pipe(X(),ne(!0));return r.subscribe(({url:n})=>{e.setAttribute("data-clipboard-text",e.href),e.href=`${n}`}),d(e,"click").pipe(U(o)).subscribe(n=>n.preventDefault()),as(e,t).pipe(E(n=>r.next(n)),L(()=>r.complete()),m(n=>$({ref:e},n)))}function li(e,{worker$:t,keyboard$:r}){let o=new g,n=Se("search-query"),i=O(d(n,"keydown"),d(n,"focus")).pipe(ve(se),m(()=>n.value),K());return o.pipe(ke(i),m(([{suggest:s},p])=>{let c=p.split(/([\s-]+)/);if(s!=null&&s.length&&c[c.length-1]){let l=s[s.length-1];l.startsWith(c[c.length-1])&&(c[c.length-1]=l)}else c.length=0;return c})).subscribe(s=>e.innerHTML=s.join("").replace(/\s/g," ")),r.pipe(b(({mode:s})=>s==="search")).subscribe(s=>{switch(s.type){case"ArrowRight":e.innerText.length&&n.selectionStart===n.value.length&&(n.value=e.innerText);break}}),t.pipe(b(dr),m(({data:s})=>s)).pipe(E(s=>o.next(s)),L(()=>o.complete()),m(()=>({ref:e})))}function mi(e,{index$:t,keyboard$:r}){let o=ye();try{let n=ii(o.search,t),i=Se("search-query",e),a=Se("search-result",e);d(e,"click").pipe(b(({target:p})=>p instanceof Element&&!!p.closest("a"))).subscribe(()=>Je("search",!1)),r.pipe(b(({mode:p})=>p==="search")).subscribe(p=>{let c=Ie();switch(p.type){case"Enter":if(c===i){let l=new Map;for(let f of P(":first-child [href]",a)){let u=f.firstElementChild;l.set(f,parseFloat(u.getAttribute("data-md-score")))}if(l.size){let[[f]]=[...l].sort(([,u],[,h])=>h-u);f.click()}p.claim()}break;case"Escape":case"Tab":Je("search",!1),i.blur();break;case"ArrowUp":case"ArrowDown":if(typeof c=="undefined")i.focus();else{let l=[i,...P(":not(details) > [href], summary, details[open] [href]",a)],f=Math.max(0,(Math.max(0,l.indexOf(c))+l.length+(p.type==="ArrowUp"?-1:1))%l.length);l[f].focus()}p.claim();break;default:i!==Ie()&&i.focus()}}),r.pipe(b(({mode:p})=>p==="global")).subscribe(p=>{switch(p.type){case"f":case"s":case"/":i.focus(),i.select(),p.claim();break}});let s=si(i,{worker$:n});return O(s,ci(a,{worker$:n,query$:s})).pipe(Re(...ae("search-share",e).map(p=>pi(p,{query$:s})),...ae("search-suggest",e).map(p=>li(p,{worker$:n,keyboard$:r}))))}catch(n){return e.hidden=!0,Ye}}function fi(e,{index$:t,location$:r}){return z([t,r.pipe(Q(xe()),b(o=>!!o.searchParams.get("h")))]).pipe(m(([o,n])=>ni(o.config)(n.searchParams.get("h"))),m(o=>{var a;let n=new Map,i=document.createNodeIterator(e,NodeFilter.SHOW_TEXT);for(let s=i.nextNode();s;s=i.nextNode())if((a=s.parentElement)!=null&&a.offsetHeight){let p=s.textContent,c=o(p);c.length>p.length&&n.set(s,c)}for(let[s,p]of n){let{childNodes:c}=x("span",null,p);s.replaceWith(...Array.from(c))}return{ref:e,nodes:n}}))}function ss(e,{viewport$:t,main$:r}){let o=e.closest(".md-grid"),n=o.offsetTop-o.parentElement.offsetTop;return z([r,t]).pipe(m(([{offset:i,height:a},{offset:{y:s}}])=>(a=a+Math.min(n,Math.max(0,s-i))-n,{height:a,locked:s>=i+n})),K((i,a)=>i.height===a.height&&i.locked===a.locked))}function Xr(e,o){var n=o,{header$:t}=n,r=ao(n,["header$"]);let i=R(".md-sidebar__scrollwrap",e),{y:a}=Ve(i);return C(()=>{let s=new g,p=s.pipe(X(),ne(!0)),c=s.pipe(Le(0,me));return c.pipe(ee(t)).subscribe({next([{height:l},{height:f}]){i.style.height=`${l-2*a}px`,e.style.top=`${f}px`},complete(){i.style.height="",e.style.top=""}}),c.pipe(Ae()).subscribe(()=>{for(let l of P(".md-nav__link--active[href]",e)){if(!l.clientHeight)continue;let f=l.closest(".md-sidebar__scrollwrap");if(typeof f!="undefined"){let u=l.offsetTop-f.offsetTop,{height:h}=ce(f);f.scrollTo({top:u-h/2})}}}),ue(P("label[tabindex]",e)).pipe(oe(l=>d(l,"click").pipe(ve(se),m(()=>l),U(p)))).subscribe(l=>{let f=R(`[id="${l.htmlFor}"]`);R(`[aria-labelledby="${l.id}"]`).setAttribute("aria-expanded",`${f.checked}`)}),ss(e,r).pipe(E(l=>s.next(l)),L(()=>s.complete()),m(l=>$({ref:e},l)))})}function ui(e,t){if(typeof t!="undefined"){let r=`https://api.github.com/repos/${e}/${t}`;return st(Fe(`${r}/releases/latest`).pipe(de(()=>S),m(o=>({version:o.tag_name})),De({})),Fe(r).pipe(de(()=>S),m(o=>({stars:o.stargazers_count,forks:o.forks_count})),De({}))).pipe(m(([o,n])=>$($({},o),n)))}else{let r=`https://api.github.com/users/${e}`;return Fe(r).pipe(m(o=>({repositories:o.public_repos})),De({}))}}function di(e,t){let r=`https://${e}/api/v4/projects/${encodeURIComponent(t)}`;return st(Fe(`${r}/releases/permalink/latest`).pipe(de(()=>S),m(({tag_name:o})=>({version:o})),De({})),Fe(r).pipe(de(()=>S),m(({star_count:o,forks_count:n})=>({stars:o,forks:n})),De({}))).pipe(m(([o,n])=>$($({},o),n)))}function hi(e){let t=e.match(/^.+github\.com\/([^/]+)\/?([^/]+)?/i);if(t){let[,r,o]=t;return ui(r,o)}if(t=e.match(/^.+?([^/]*gitlab[^/]+)\/(.+?)\/?$/i),t){let[,r,o]=t;return di(r,o)}return S}var cs;function ps(e){return cs||(cs=C(()=>{let t=__md_get("__source",sessionStorage);if(t)return I(t);if(ae("consent").length){let o=__md_get("__consent");if(!(o&&o.github))return S}return hi(e.href).pipe(E(o=>__md_set("__source",o,sessionStorage)))}).pipe(de(()=>S),b(t=>Object.keys(t).length>0),m(t=>({facts:t})),G(1)))}function bi(e){let t=R(":scope > :last-child",e);return C(()=>{let r=new g;return r.subscribe(({facts:o})=>{t.appendChild(Ln(o)),t.classList.add("md-source__repository--active")}),ps(e).pipe(E(o=>r.next(o)),L(()=>r.complete()),m(o=>$({ref:e},o)))})}function ls(e,{viewport$:t,header$:r}){return ge(document.body).pipe(v(()=>mr(e,{header$:r,viewport$:t})),m(({offset:{y:o}})=>({hidden:o>=10})),Z("hidden"))}function vi(e,t){return C(()=>{let r=new g;return r.subscribe({next({hidden:o}){e.hidden=o},complete(){e.hidden=!1}}),(B("navigation.tabs.sticky")?I({hidden:!1}):ls(e,t)).pipe(E(o=>r.next(o)),L(()=>r.complete()),m(o=>$({ref:e},o)))})}function ms(e,{viewport$:t,header$:r}){let o=new Map,n=P(".md-nav__link",e);for(let s of n){let p=decodeURIComponent(s.hash.substring(1)),c=fe(`[id="${p}"]`);typeof c!="undefined"&&o.set(s,c)}let i=r.pipe(Z("height"),m(({height:s})=>{let p=Se("main"),c=R(":scope > :first-child",p);return s+.8*(c.offsetTop-p.offsetTop)}),pe());return ge(document.body).pipe(Z("height"),v(s=>C(()=>{let p=[];return I([...o].reduce((c,[l,f])=>{for(;p.length&&o.get(p[p.length-1]).tagName>=f.tagName;)p.pop();let u=f.offsetTop;for(;!u&&f.parentElement;)f=f.parentElement,u=f.offsetTop;let h=f.offsetParent;for(;h;h=h.offsetParent)u+=h.offsetTop;return c.set([...p=[...p,l]].reverse(),u)},new Map))}).pipe(m(p=>new Map([...p].sort(([,c],[,l])=>c-l))),ke(i),v(([p,c])=>t.pipe(jr(([l,f],{offset:{y:u},size:h})=>{let w=u+h.height>=Math.floor(s.height);for(;f.length;){let[,A]=f[0];if(A-c=u&&!w)f=[l.pop(),...f];else break}return[l,f]},[[],[...p]]),K((l,f)=>l[0]===f[0]&&l[1]===f[1])))))).pipe(m(([s,p])=>({prev:s.map(([c])=>c),next:p.map(([c])=>c)})),Q({prev:[],next:[]}),Be(2,1),m(([s,p])=>s.prev.length{let i=new g,a=i.pipe(X(),ne(!0));if(i.subscribe(({prev:s,next:p})=>{for(let[c]of p)c.classList.remove("md-nav__link--passed"),c.classList.remove("md-nav__link--active");for(let[c,[l]]of s.entries())l.classList.add("md-nav__link--passed"),l.classList.toggle("md-nav__link--active",c===s.length-1)}),B("toc.follow")){let s=O(t.pipe(_e(1),m(()=>{})),t.pipe(_e(250),m(()=>"smooth")));i.pipe(b(({prev:p})=>p.length>0),ke(o.pipe(ve(se))),ee(s)).subscribe(([[{prev:p}],c])=>{let[l]=p[p.length-1];if(l.offsetHeight){let f=cr(l);if(typeof f!="undefined"){let u=l.offsetTop-f.offsetTop,{height:h}=ce(f);f.scrollTo({top:u-h/2,behavior:c})}}})}return B("navigation.tracking")&&t.pipe(U(a),Z("offset"),_e(250),Ce(1),U(n.pipe(Ce(1))),ct({delay:250}),ee(i)).subscribe(([,{prev:s}])=>{let p=xe(),c=s[s.length-1];if(c&&c.length){let[l]=c,{hash:f}=new URL(l.href);p.hash!==f&&(p.hash=f,history.replaceState({},"",`${p}`))}else p.hash="",history.replaceState({},"",`${p}`)}),ms(e,{viewport$:t,header$:r}).pipe(E(s=>i.next(s)),L(()=>i.complete()),m(s=>$({ref:e},s)))})}function fs(e,{viewport$:t,main$:r,target$:o}){let n=t.pipe(m(({offset:{y:a}})=>a),Be(2,1),m(([a,s])=>a>s&&s>0),K()),i=r.pipe(m(({active:a})=>a));return z([i,n]).pipe(m(([a,s])=>!(a&&s)),K(),U(o.pipe(Ce(1))),ne(!0),ct({delay:250}),m(a=>({hidden:a})))}function xi(e,{viewport$:t,header$:r,main$:o,target$:n}){let i=new g,a=i.pipe(X(),ne(!0));return i.subscribe({next({hidden:s}){e.hidden=s,s?(e.setAttribute("tabindex","-1"),e.blur()):e.removeAttribute("tabindex")},complete(){e.style.top="",e.hidden=!0,e.removeAttribute("tabindex")}}),r.pipe(U(a),Z("height")).subscribe(({height:s})=>{e.style.top=`${s+16}px`}),d(e,"click").subscribe(s=>{s.preventDefault(),window.scrollTo({top:0})}),fs(e,{viewport$:t,main$:o,target$:n}).pipe(E(s=>i.next(s)),L(()=>i.complete()),m(s=>$({ref:e},s)))}function yi({document$:e,viewport$:t}){e.pipe(v(()=>P(".md-ellipsis")),oe(r=>tt(r).pipe(U(e.pipe(Ce(1))),b(o=>o),m(()=>r),Te(1))),b(r=>r.offsetWidth{let o=r.innerText,n=r.closest("a")||r;return n.title=o,B("content.tooltips")?mt(n,{viewport$:t}).pipe(U(e.pipe(Ce(1))),L(()=>n.removeAttribute("title"))):S})).subscribe(),B("content.tooltips")&&e.pipe(v(()=>P(".md-status")),oe(r=>mt(r,{viewport$:t}))).subscribe()}function Ei({document$:e,tablet$:t}){e.pipe(v(()=>P(".md-toggle--indeterminate")),E(r=>{r.indeterminate=!0,r.checked=!1}),oe(r=>d(r,"change").pipe(Dr(()=>r.classList.contains("md-toggle--indeterminate")),m(()=>r))),ee(t)).subscribe(([r,o])=>{r.classList.remove("md-toggle--indeterminate"),o&&(r.checked=!1)})}function us(){return/(iPad|iPhone|iPod)/.test(navigator.userAgent)}function wi({document$:e}){e.pipe(v(()=>P("[data-md-scrollfix]")),E(t=>t.removeAttribute("data-md-scrollfix")),b(us),oe(t=>d(t,"touchstart").pipe(m(()=>t)))).subscribe(t=>{let r=t.scrollTop;r===0?t.scrollTop=1:r+t.offsetHeight===t.scrollHeight&&(t.scrollTop=r-1)})}function Ti({viewport$:e,tablet$:t}){z([ze("search"),t]).pipe(m(([r,o])=>r&&!o),v(r=>I(r).pipe(Ge(r?400:100))),ee(e)).subscribe(([r,{offset:{y:o}}])=>{if(r)document.body.setAttribute("data-md-scrolllock",""),document.body.style.top=`-${o}px`;else{let n=-1*parseInt(document.body.style.top,10);document.body.removeAttribute("data-md-scrolllock"),document.body.style.top="",n&&window.scrollTo(0,n)}})}Object.entries||(Object.entries=function(e){let t=[];for(let r of Object.keys(e))t.push([r,e[r]]);return t});Object.values||(Object.values=function(e){let t=[];for(let r of Object.keys(e))t.push(e[r]);return t});typeof Element!="undefined"&&(Element.prototype.scrollTo||(Element.prototype.scrollTo=function(e,t){typeof e=="object"?(this.scrollLeft=e.left,this.scrollTop=e.top):(this.scrollLeft=e,this.scrollTop=t)}),Element.prototype.replaceWith||(Element.prototype.replaceWith=function(...e){let t=this.parentNode;if(t){e.length===0&&t.removeChild(this);for(let r=e.length-1;r>=0;r--){let o=e[r];typeof o=="string"?o=document.createTextNode(o):o.parentNode&&o.parentNode.removeChild(o),r?t.insertBefore(this.previousSibling,o):t.replaceChild(o,this)}}}));function ds(){return location.protocol==="file:"?Tt(`${new URL("search/search_index.js",Zr.base)}`).pipe(m(()=>__index),G(1)):Fe(new URL("search/search_index.json",Zr.base))}document.documentElement.classList.remove("no-js");document.documentElement.classList.add("js");var ot=Bo(),Wt=an(),Mt=pn(Wt),eo=nn(),Oe=vn(),hr=Pt("(min-width: 960px)"),Oi=Pt("(min-width: 1220px)"),Mi=ln(),Zr=ye(),Li=document.forms.namedItem("search")?ds():Ye,to=new g;Xn({alert$:to});var ro=new g;B("navigation.instant")&&ri({location$:Wt,viewport$:Oe,progress$:ro}).subscribe(ot);var Si;((Si=Zr.version)==null?void 0:Si.provider)==="mike"&&ai({document$:ot});O(Wt,Mt).pipe(Ge(125)).subscribe(()=>{Je("drawer",!1),Je("search",!1)});eo.pipe(b(({mode:e})=>e==="global")).subscribe(e=>{switch(e.type){case"p":case",":let t=fe("link[rel=prev]");typeof t!="undefined"&<(t);break;case"n":case".":let r=fe("link[rel=next]");typeof r!="undefined"&<(r);break;case"Enter":let o=Ie();o instanceof HTMLLabelElement&&o.click()}});yi({viewport$:Oe,document$:ot});Ei({document$:ot,tablet$:hr});wi({document$:ot});Ti({viewport$:Oe,tablet$:hr});var rt=Qn(Se("header"),{viewport$:Oe}),jt=ot.pipe(m(()=>Se("main")),v(e=>Bn(e,{viewport$:Oe,header$:rt})),G(1)),hs=O(...ae("consent").map(e=>yn(e,{target$:Mt})),...ae("dialog").map(e=>zn(e,{alert$:to})),...ae("header").map(e=>Kn(e,{viewport$:Oe,header$:rt,main$:jt})),...ae("palette").map(e=>Gn(e)),...ae("progress").map(e=>Jn(e,{progress$:ro})),...ae("search").map(e=>mi(e,{index$:Li,keyboard$:eo})),...ae("source").map(e=>bi(e))),bs=C(()=>O(...ae("announce").map(e=>xn(e)),...ae("content").map(e=>Nn(e,{viewport$:Oe,target$:Mt,print$:Mi})),...ae("content").map(e=>B("search.highlight")?fi(e,{index$:Li,location$:Wt}):S),...ae("header-title").map(e=>Yn(e,{viewport$:Oe,header$:rt})),...ae("sidebar").map(e=>e.getAttribute("data-md-type")==="navigation"?Nr(Oi,()=>Xr(e,{viewport$:Oe,header$:rt,main$:jt})):Nr(hr,()=>Xr(e,{viewport$:Oe,header$:rt,main$:jt}))),...ae("tabs").map(e=>vi(e,{viewport$:Oe,header$:rt})),...ae("toc").map(e=>gi(e,{viewport$:Oe,header$:rt,main$:jt,target$:Mt})),...ae("top").map(e=>xi(e,{viewport$:Oe,header$:rt,main$:jt,target$:Mt})))),_i=ot.pipe(v(()=>bs),Re(hs),G(1));_i.subscribe();window.document$=ot;window.location$=Wt;window.target$=Mt;window.keyboard$=eo;window.viewport$=Oe;window.tablet$=hr;window.screen$=Oi;window.print$=Mi;window.alert$=to;window.progress$=ro;window.component$=_i;})(); +//# sourceMappingURL=bundle.af256bd8.min.js.map + diff --git a/assets/javascripts/bundle.af256bd8.min.js.map b/assets/javascripts/bundle.af256bd8.min.js.map new file mode 100644 index 0000000..0501d11 --- /dev/null +++ b/assets/javascripts/bundle.af256bd8.min.js.map @@ -0,0 +1,7 @@ +{ + "version": 3, + "sources": ["node_modules/focus-visible/dist/focus-visible.js", "node_modules/escape-html/index.js", "node_modules/clipboard/dist/clipboard.js", "src/templates/assets/javascripts/bundle.ts", "node_modules/rxjs/node_modules/tslib/tslib.es6.js", "node_modules/rxjs/src/internal/util/isFunction.ts", "node_modules/rxjs/src/internal/util/createErrorClass.ts", "node_modules/rxjs/src/internal/util/UnsubscriptionError.ts", "node_modules/rxjs/src/internal/util/arrRemove.ts", "node_modules/rxjs/src/internal/Subscription.ts", "node_modules/rxjs/src/internal/config.ts", "node_modules/rxjs/src/internal/scheduler/timeoutProvider.ts", "node_modules/rxjs/src/internal/util/reportUnhandledError.ts", "node_modules/rxjs/src/internal/util/noop.ts", "node_modules/rxjs/src/internal/NotificationFactories.ts", "node_modules/rxjs/src/internal/util/errorContext.ts", "node_modules/rxjs/src/internal/Subscriber.ts", "node_modules/rxjs/src/internal/symbol/observable.ts", "node_modules/rxjs/src/internal/util/identity.ts", "node_modules/rxjs/src/internal/util/pipe.ts", "node_modules/rxjs/src/internal/Observable.ts", "node_modules/rxjs/src/internal/util/lift.ts", "node_modules/rxjs/src/internal/operators/OperatorSubscriber.ts", "node_modules/rxjs/src/internal/scheduler/animationFrameProvider.ts", "node_modules/rxjs/src/internal/util/ObjectUnsubscribedError.ts", "node_modules/rxjs/src/internal/Subject.ts", "node_modules/rxjs/src/internal/BehaviorSubject.ts", "node_modules/rxjs/src/internal/scheduler/dateTimestampProvider.ts", "node_modules/rxjs/src/internal/ReplaySubject.ts", "node_modules/rxjs/src/internal/scheduler/Action.ts", "node_modules/rxjs/src/internal/scheduler/intervalProvider.ts", "node_modules/rxjs/src/internal/scheduler/AsyncAction.ts", "node_modules/rxjs/src/internal/Scheduler.ts", "node_modules/rxjs/src/internal/scheduler/AsyncScheduler.ts", "node_modules/rxjs/src/internal/scheduler/async.ts", "node_modules/rxjs/src/internal/scheduler/QueueAction.ts", "node_modules/rxjs/src/internal/scheduler/QueueScheduler.ts", "node_modules/rxjs/src/internal/scheduler/queue.ts", "node_modules/rxjs/src/internal/scheduler/AnimationFrameAction.ts", "node_modules/rxjs/src/internal/scheduler/AnimationFrameScheduler.ts", "node_modules/rxjs/src/internal/scheduler/animationFrame.ts", "node_modules/rxjs/src/internal/observable/empty.ts", "node_modules/rxjs/src/internal/util/isScheduler.ts", "node_modules/rxjs/src/internal/util/args.ts", "node_modules/rxjs/src/internal/util/isArrayLike.ts", "node_modules/rxjs/src/internal/util/isPromise.ts", "node_modules/rxjs/src/internal/util/isInteropObservable.ts", "node_modules/rxjs/src/internal/util/isAsyncIterable.ts", "node_modules/rxjs/src/internal/util/throwUnobservableError.ts", "node_modules/rxjs/src/internal/symbol/iterator.ts", "node_modules/rxjs/src/internal/util/isIterable.ts", "node_modules/rxjs/src/internal/util/isReadableStreamLike.ts", "node_modules/rxjs/src/internal/observable/innerFrom.ts", "node_modules/rxjs/src/internal/util/executeSchedule.ts", "node_modules/rxjs/src/internal/operators/observeOn.ts", "node_modules/rxjs/src/internal/operators/subscribeOn.ts", "node_modules/rxjs/src/internal/scheduled/scheduleObservable.ts", "node_modules/rxjs/src/internal/scheduled/schedulePromise.ts", "node_modules/rxjs/src/internal/scheduled/scheduleArray.ts", "node_modules/rxjs/src/internal/scheduled/scheduleIterable.ts", "node_modules/rxjs/src/internal/scheduled/scheduleAsyncIterable.ts", "node_modules/rxjs/src/internal/scheduled/scheduleReadableStreamLike.ts", "node_modules/rxjs/src/internal/scheduled/scheduled.ts", "node_modules/rxjs/src/internal/observable/from.ts", "node_modules/rxjs/src/internal/observable/of.ts", "node_modules/rxjs/src/internal/observable/throwError.ts", "node_modules/rxjs/src/internal/util/EmptyError.ts", "node_modules/rxjs/src/internal/util/isDate.ts", "node_modules/rxjs/src/internal/operators/map.ts", "node_modules/rxjs/src/internal/util/mapOneOrManyArgs.ts", "node_modules/rxjs/src/internal/util/argsArgArrayOrObject.ts", "node_modules/rxjs/src/internal/util/createObject.ts", "node_modules/rxjs/src/internal/observable/combineLatest.ts", "node_modules/rxjs/src/internal/operators/mergeInternals.ts", "node_modules/rxjs/src/internal/operators/mergeMap.ts", "node_modules/rxjs/src/internal/operators/mergeAll.ts", "node_modules/rxjs/src/internal/operators/concatAll.ts", "node_modules/rxjs/src/internal/observable/concat.ts", "node_modules/rxjs/src/internal/observable/defer.ts", "node_modules/rxjs/src/internal/observable/fromEvent.ts", "node_modules/rxjs/src/internal/observable/fromEventPattern.ts", "node_modules/rxjs/src/internal/observable/timer.ts", "node_modules/rxjs/src/internal/observable/merge.ts", "node_modules/rxjs/src/internal/observable/never.ts", "node_modules/rxjs/src/internal/util/argsOrArgArray.ts", "node_modules/rxjs/src/internal/operators/filter.ts", "node_modules/rxjs/src/internal/observable/zip.ts", "node_modules/rxjs/src/internal/operators/audit.ts", "node_modules/rxjs/src/internal/operators/auditTime.ts", "node_modules/rxjs/src/internal/operators/bufferCount.ts", "node_modules/rxjs/src/internal/operators/catchError.ts", "node_modules/rxjs/src/internal/operators/scanInternals.ts", "node_modules/rxjs/src/internal/operators/combineLatest.ts", "node_modules/rxjs/src/internal/operators/combineLatestWith.ts", "node_modules/rxjs/src/internal/operators/debounce.ts", "node_modules/rxjs/src/internal/operators/debounceTime.ts", "node_modules/rxjs/src/internal/operators/defaultIfEmpty.ts", "node_modules/rxjs/src/internal/operators/take.ts", "node_modules/rxjs/src/internal/operators/ignoreElements.ts", "node_modules/rxjs/src/internal/operators/mapTo.ts", "node_modules/rxjs/src/internal/operators/delayWhen.ts", "node_modules/rxjs/src/internal/operators/delay.ts", "node_modules/rxjs/src/internal/operators/distinctUntilChanged.ts", "node_modules/rxjs/src/internal/operators/distinctUntilKeyChanged.ts", "node_modules/rxjs/src/internal/operators/throwIfEmpty.ts", "node_modules/rxjs/src/internal/operators/endWith.ts", "node_modules/rxjs/src/internal/operators/finalize.ts", "node_modules/rxjs/src/internal/operators/first.ts", "node_modules/rxjs/src/internal/operators/takeLast.ts", "node_modules/rxjs/src/internal/operators/merge.ts", "node_modules/rxjs/src/internal/operators/mergeWith.ts", "node_modules/rxjs/src/internal/operators/repeat.ts", "node_modules/rxjs/src/internal/operators/scan.ts", "node_modules/rxjs/src/internal/operators/share.ts", "node_modules/rxjs/src/internal/operators/shareReplay.ts", "node_modules/rxjs/src/internal/operators/skip.ts", "node_modules/rxjs/src/internal/operators/skipUntil.ts", "node_modules/rxjs/src/internal/operators/startWith.ts", "node_modules/rxjs/src/internal/operators/switchMap.ts", "node_modules/rxjs/src/internal/operators/takeUntil.ts", "node_modules/rxjs/src/internal/operators/takeWhile.ts", "node_modules/rxjs/src/internal/operators/tap.ts", "node_modules/rxjs/src/internal/operators/throttle.ts", "node_modules/rxjs/src/internal/operators/throttleTime.ts", "node_modules/rxjs/src/internal/operators/withLatestFrom.ts", "node_modules/rxjs/src/internal/operators/zip.ts", "node_modules/rxjs/src/internal/operators/zipWith.ts", "src/templates/assets/javascripts/browser/document/index.ts", "src/templates/assets/javascripts/browser/element/_/index.ts", "src/templates/assets/javascripts/browser/element/focus/index.ts", "src/templates/assets/javascripts/browser/element/hover/index.ts", "src/templates/assets/javascripts/utilities/h/index.ts", "src/templates/assets/javascripts/utilities/round/index.ts", "src/templates/assets/javascripts/browser/script/index.ts", "src/templates/assets/javascripts/browser/element/size/_/index.ts", "src/templates/assets/javascripts/browser/element/size/content/index.ts", "src/templates/assets/javascripts/browser/element/offset/_/index.ts", "src/templates/assets/javascripts/browser/element/offset/content/index.ts", "src/templates/assets/javascripts/browser/element/visibility/index.ts", "src/templates/assets/javascripts/browser/toggle/index.ts", "src/templates/assets/javascripts/browser/keyboard/index.ts", "src/templates/assets/javascripts/browser/location/_/index.ts", "src/templates/assets/javascripts/browser/location/hash/index.ts", "src/templates/assets/javascripts/browser/media/index.ts", "src/templates/assets/javascripts/browser/request/index.ts", "src/templates/assets/javascripts/browser/viewport/offset/index.ts", "src/templates/assets/javascripts/browser/viewport/size/index.ts", "src/templates/assets/javascripts/browser/viewport/_/index.ts", "src/templates/assets/javascripts/browser/viewport/at/index.ts", "src/templates/assets/javascripts/browser/worker/index.ts", "src/templates/assets/javascripts/_/index.ts", "src/templates/assets/javascripts/components/_/index.ts", "src/templates/assets/javascripts/components/announce/index.ts", "src/templates/assets/javascripts/components/consent/index.ts", "src/templates/assets/javascripts/templates/tooltip/index.tsx", "src/templates/assets/javascripts/templates/annotation/index.tsx", "src/templates/assets/javascripts/templates/clipboard/index.tsx", "src/templates/assets/javascripts/templates/search/index.tsx", "src/templates/assets/javascripts/templates/source/index.tsx", "src/templates/assets/javascripts/templates/tabbed/index.tsx", "src/templates/assets/javascripts/templates/table/index.tsx", "src/templates/assets/javascripts/templates/version/index.tsx", "src/templates/assets/javascripts/components/tooltip2/index.ts", "src/templates/assets/javascripts/components/content/annotation/_/index.ts", "src/templates/assets/javascripts/components/content/annotation/list/index.ts", "src/templates/assets/javascripts/components/content/annotation/block/index.ts", "src/templates/assets/javascripts/components/content/code/_/index.ts", "src/templates/assets/javascripts/components/content/details/index.ts", "src/templates/assets/javascripts/components/content/mermaid/index.css", "src/templates/assets/javascripts/components/content/mermaid/index.ts", "src/templates/assets/javascripts/components/content/table/index.ts", "src/templates/assets/javascripts/components/content/tabs/index.ts", "src/templates/assets/javascripts/components/content/_/index.ts", "src/templates/assets/javascripts/components/dialog/index.ts", "src/templates/assets/javascripts/components/tooltip/index.ts", "src/templates/assets/javascripts/components/header/_/index.ts", "src/templates/assets/javascripts/components/header/title/index.ts", "src/templates/assets/javascripts/components/main/index.ts", "src/templates/assets/javascripts/components/palette/index.ts", "src/templates/assets/javascripts/components/progress/index.ts", "src/templates/assets/javascripts/integrations/clipboard/index.ts", "src/templates/assets/javascripts/integrations/sitemap/index.ts", "src/templates/assets/javascripts/integrations/instant/index.ts", "src/templates/assets/javascripts/integrations/search/highlighter/index.ts", "src/templates/assets/javascripts/integrations/search/worker/message/index.ts", "src/templates/assets/javascripts/integrations/search/worker/_/index.ts", "src/templates/assets/javascripts/integrations/version/index.ts", "src/templates/assets/javascripts/components/search/query/index.ts", "src/templates/assets/javascripts/components/search/result/index.ts", "src/templates/assets/javascripts/components/search/share/index.ts", "src/templates/assets/javascripts/components/search/suggest/index.ts", "src/templates/assets/javascripts/components/search/_/index.ts", "src/templates/assets/javascripts/components/search/highlight/index.ts", "src/templates/assets/javascripts/components/sidebar/index.ts", "src/templates/assets/javascripts/components/source/facts/github/index.ts", "src/templates/assets/javascripts/components/source/facts/gitlab/index.ts", "src/templates/assets/javascripts/components/source/facts/_/index.ts", "src/templates/assets/javascripts/components/source/_/index.ts", "src/templates/assets/javascripts/components/tabs/index.ts", "src/templates/assets/javascripts/components/toc/index.ts", "src/templates/assets/javascripts/components/top/index.ts", "src/templates/assets/javascripts/patches/ellipsis/index.ts", "src/templates/assets/javascripts/patches/indeterminate/index.ts", "src/templates/assets/javascripts/patches/scrollfix/index.ts", "src/templates/assets/javascripts/patches/scrolllock/index.ts", "src/templates/assets/javascripts/polyfills/index.ts"], + "sourcesContent": ["(function (global, factory) {\n typeof exports === 'object' && typeof module !== 'undefined' ? factory() :\n typeof define === 'function' && define.amd ? define(factory) :\n (factory());\n}(this, (function () { 'use strict';\n\n /**\n * Applies the :focus-visible polyfill at the given scope.\n * A scope in this case is either the top-level Document or a Shadow Root.\n *\n * @param {(Document|ShadowRoot)} scope\n * @see https://github.com/WICG/focus-visible\n */\n function applyFocusVisiblePolyfill(scope) {\n var hadKeyboardEvent = true;\n var hadFocusVisibleRecently = false;\n var hadFocusVisibleRecentlyTimeout = null;\n\n var inputTypesAllowlist = {\n text: true,\n search: true,\n url: true,\n tel: true,\n email: true,\n password: true,\n number: true,\n date: true,\n month: true,\n week: true,\n time: true,\n datetime: true,\n 'datetime-local': true\n };\n\n /**\n * Helper function for legacy browsers and iframes which sometimes focus\n * elements like document, body, and non-interactive SVG.\n * @param {Element} el\n */\n function isValidFocusTarget(el) {\n if (\n el &&\n el !== document &&\n el.nodeName !== 'HTML' &&\n el.nodeName !== 'BODY' &&\n 'classList' in el &&\n 'contains' in el.classList\n ) {\n return true;\n }\n return false;\n }\n\n /**\n * Computes whether the given element should automatically trigger the\n * `focus-visible` class being added, i.e. whether it should always match\n * `:focus-visible` when focused.\n * @param {Element} el\n * @return {boolean}\n */\n function focusTriggersKeyboardModality(el) {\n var type = el.type;\n var tagName = el.tagName;\n\n if (tagName === 'INPUT' && inputTypesAllowlist[type] && !el.readOnly) {\n return true;\n }\n\n if (tagName === 'TEXTAREA' && !el.readOnly) {\n return true;\n }\n\n if (el.isContentEditable) {\n return true;\n }\n\n return false;\n }\n\n /**\n * Add the `focus-visible` class to the given element if it was not added by\n * the author.\n * @param {Element} el\n */\n function addFocusVisibleClass(el) {\n if (el.classList.contains('focus-visible')) {\n return;\n }\n el.classList.add('focus-visible');\n el.setAttribute('data-focus-visible-added', '');\n }\n\n /**\n * Remove the `focus-visible` class from the given element if it was not\n * originally added by the author.\n * @param {Element} el\n */\n function removeFocusVisibleClass(el) {\n if (!el.hasAttribute('data-focus-visible-added')) {\n return;\n }\n el.classList.remove('focus-visible');\n el.removeAttribute('data-focus-visible-added');\n }\n\n /**\n * If the most recent user interaction was via the keyboard;\n * and the key press did not include a meta, alt/option, or control key;\n * then the modality is keyboard. Otherwise, the modality is not keyboard.\n * Apply `focus-visible` to any current active element and keep track\n * of our keyboard modality state with `hadKeyboardEvent`.\n * @param {KeyboardEvent} e\n */\n function onKeyDown(e) {\n if (e.metaKey || e.altKey || e.ctrlKey) {\n return;\n }\n\n if (isValidFocusTarget(scope.activeElement)) {\n addFocusVisibleClass(scope.activeElement);\n }\n\n hadKeyboardEvent = true;\n }\n\n /**\n * If at any point a user clicks with a pointing device, ensure that we change\n * the modality away from keyboard.\n * This avoids the situation where a user presses a key on an already focused\n * element, and then clicks on a different element, focusing it with a\n * pointing device, while we still think we're in keyboard modality.\n * @param {Event} e\n */\n function onPointerDown(e) {\n hadKeyboardEvent = false;\n }\n\n /**\n * On `focus`, add the `focus-visible` class to the target if:\n * - the target received focus as a result of keyboard navigation, or\n * - the event target is an element that will likely require interaction\n * via the keyboard (e.g. a text box)\n * @param {Event} e\n */\n function onFocus(e) {\n // Prevent IE from focusing the document or HTML element.\n if (!isValidFocusTarget(e.target)) {\n return;\n }\n\n if (hadKeyboardEvent || focusTriggersKeyboardModality(e.target)) {\n addFocusVisibleClass(e.target);\n }\n }\n\n /**\n * On `blur`, remove the `focus-visible` class from the target.\n * @param {Event} e\n */\n function onBlur(e) {\n if (!isValidFocusTarget(e.target)) {\n return;\n }\n\n if (\n e.target.classList.contains('focus-visible') ||\n e.target.hasAttribute('data-focus-visible-added')\n ) {\n // To detect a tab/window switch, we look for a blur event followed\n // rapidly by a visibility change.\n // If we don't see a visibility change within 100ms, it's probably a\n // regular focus change.\n hadFocusVisibleRecently = true;\n window.clearTimeout(hadFocusVisibleRecentlyTimeout);\n hadFocusVisibleRecentlyTimeout = window.setTimeout(function() {\n hadFocusVisibleRecently = false;\n }, 100);\n removeFocusVisibleClass(e.target);\n }\n }\n\n /**\n * If the user changes tabs, keep track of whether or not the previously\n * focused element had .focus-visible.\n * @param {Event} e\n */\n function onVisibilityChange(e) {\n if (document.visibilityState === 'hidden') {\n // If the tab becomes active again, the browser will handle calling focus\n // on the element (Safari actually calls it twice).\n // If this tab change caused a blur on an element with focus-visible,\n // re-apply the class when the user switches back to the tab.\n if (hadFocusVisibleRecently) {\n hadKeyboardEvent = true;\n }\n addInitialPointerMoveListeners();\n }\n }\n\n /**\n * Add a group of listeners to detect usage of any pointing devices.\n * These listeners will be added when the polyfill first loads, and anytime\n * the window is blurred, so that they are active when the window regains\n * focus.\n */\n function addInitialPointerMoveListeners() {\n document.addEventListener('mousemove', onInitialPointerMove);\n document.addEventListener('mousedown', onInitialPointerMove);\n document.addEventListener('mouseup', onInitialPointerMove);\n document.addEventListener('pointermove', onInitialPointerMove);\n document.addEventListener('pointerdown', onInitialPointerMove);\n document.addEventListener('pointerup', onInitialPointerMove);\n document.addEventListener('touchmove', onInitialPointerMove);\n document.addEventListener('touchstart', onInitialPointerMove);\n document.addEventListener('touchend', onInitialPointerMove);\n }\n\n function removeInitialPointerMoveListeners() {\n document.removeEventListener('mousemove', onInitialPointerMove);\n document.removeEventListener('mousedown', onInitialPointerMove);\n document.removeEventListener('mouseup', onInitialPointerMove);\n document.removeEventListener('pointermove', onInitialPointerMove);\n document.removeEventListener('pointerdown', onInitialPointerMove);\n document.removeEventListener('pointerup', onInitialPointerMove);\n document.removeEventListener('touchmove', onInitialPointerMove);\n document.removeEventListener('touchstart', onInitialPointerMove);\n document.removeEventListener('touchend', onInitialPointerMove);\n }\n\n /**\n * When the polfyill first loads, assume the user is in keyboard modality.\n * If any event is received from a pointing device (e.g. mouse, pointer,\n * touch), turn off keyboard modality.\n * This accounts for situations where focus enters the page from the URL bar.\n * @param {Event} e\n */\n function onInitialPointerMove(e) {\n // Work around a Safari quirk that fires a mousemove on whenever the\n // window blurs, even if you're tabbing out of the page. \u00AF\\_(\u30C4)_/\u00AF\n if (e.target.nodeName && e.target.nodeName.toLowerCase() === 'html') {\n return;\n }\n\n hadKeyboardEvent = false;\n removeInitialPointerMoveListeners();\n }\n\n // For some kinds of state, we are interested in changes at the global scope\n // only. For example, global pointer input, global key presses and global\n // visibility change should affect the state at every scope:\n document.addEventListener('keydown', onKeyDown, true);\n document.addEventListener('mousedown', onPointerDown, true);\n document.addEventListener('pointerdown', onPointerDown, true);\n document.addEventListener('touchstart', onPointerDown, true);\n document.addEventListener('visibilitychange', onVisibilityChange, true);\n\n addInitialPointerMoveListeners();\n\n // For focus and blur, we specifically care about state changes in the local\n // scope. This is because focus / blur events that originate from within a\n // shadow root are not re-dispatched from the host element if it was already\n // the active element in its own scope:\n scope.addEventListener('focus', onFocus, true);\n scope.addEventListener('blur', onBlur, true);\n\n // We detect that a node is a ShadowRoot by ensuring that it is a\n // DocumentFragment and also has a host property. This check covers native\n // implementation and polyfill implementation transparently. If we only cared\n // about the native implementation, we could just check if the scope was\n // an instance of a ShadowRoot.\n if (scope.nodeType === Node.DOCUMENT_FRAGMENT_NODE && scope.host) {\n // Since a ShadowRoot is a special kind of DocumentFragment, it does not\n // have a root element to add a class to. So, we add this attribute to the\n // host element instead:\n scope.host.setAttribute('data-js-focus-visible', '');\n } else if (scope.nodeType === Node.DOCUMENT_NODE) {\n document.documentElement.classList.add('js-focus-visible');\n document.documentElement.setAttribute('data-js-focus-visible', '');\n }\n }\n\n // It is important to wrap all references to global window and document in\n // these checks to support server-side rendering use cases\n // @see https://github.com/WICG/focus-visible/issues/199\n if (typeof window !== 'undefined' && typeof document !== 'undefined') {\n // Make the polyfill helper globally available. This can be used as a signal\n // to interested libraries that wish to coordinate with the polyfill for e.g.,\n // applying the polyfill to a shadow root:\n window.applyFocusVisiblePolyfill = applyFocusVisiblePolyfill;\n\n // Notify interested libraries of the polyfill's presence, in case the\n // polyfill was loaded lazily:\n var event;\n\n try {\n event = new CustomEvent('focus-visible-polyfill-ready');\n } catch (error) {\n // IE11 does not support using CustomEvent as a constructor directly:\n event = document.createEvent('CustomEvent');\n event.initCustomEvent('focus-visible-polyfill-ready', false, false, {});\n }\n\n window.dispatchEvent(event);\n }\n\n if (typeof document !== 'undefined') {\n // Apply the polyfill to the global document, so that no JavaScript\n // coordination is required to use the polyfill in the top-level document:\n applyFocusVisiblePolyfill(document);\n }\n\n})));\n", "/*!\n * escape-html\n * Copyright(c) 2012-2013 TJ Holowaychuk\n * Copyright(c) 2015 Andreas Lubbe\n * Copyright(c) 2015 Tiancheng \"Timothy\" Gu\n * MIT Licensed\n */\n\n'use strict';\n\n/**\n * Module variables.\n * @private\n */\n\nvar matchHtmlRegExp = /[\"'&<>]/;\n\n/**\n * Module exports.\n * @public\n */\n\nmodule.exports = escapeHtml;\n\n/**\n * Escape special characters in the given string of html.\n *\n * @param {string} string The string to escape for inserting into HTML\n * @return {string}\n * @public\n */\n\nfunction escapeHtml(string) {\n var str = '' + string;\n var match = matchHtmlRegExp.exec(str);\n\n if (!match) {\n return str;\n }\n\n var escape;\n var html = '';\n var index = 0;\n var lastIndex = 0;\n\n for (index = match.index; index < str.length; index++) {\n switch (str.charCodeAt(index)) {\n case 34: // \"\n escape = '"';\n break;\n case 38: // &\n escape = '&';\n break;\n case 39: // '\n escape = ''';\n break;\n case 60: // <\n escape = '<';\n break;\n case 62: // >\n escape = '>';\n break;\n default:\n continue;\n }\n\n if (lastIndex !== index) {\n html += str.substring(lastIndex, index);\n }\n\n lastIndex = index + 1;\n html += escape;\n }\n\n return lastIndex !== index\n ? html + str.substring(lastIndex, index)\n : html;\n}\n", "/*!\n * clipboard.js v2.0.11\n * https://clipboardjs.com/\n *\n * Licensed MIT \u00A9 Zeno Rocha\n */\n(function webpackUniversalModuleDefinition(root, factory) {\n\tif(typeof exports === 'object' && typeof module === 'object')\n\t\tmodule.exports = factory();\n\telse if(typeof define === 'function' && define.amd)\n\t\tdefine([], factory);\n\telse if(typeof exports === 'object')\n\t\texports[\"ClipboardJS\"] = factory();\n\telse\n\t\troot[\"ClipboardJS\"] = factory();\n})(this, function() {\nreturn /******/ (function() { // webpackBootstrap\n/******/ \tvar __webpack_modules__ = ({\n\n/***/ 686:\n/***/ (function(__unused_webpack_module, __webpack_exports__, __webpack_require__) {\n\n\"use strict\";\n\n// EXPORTS\n__webpack_require__.d(__webpack_exports__, {\n \"default\": function() { return /* binding */ clipboard; }\n});\n\n// EXTERNAL MODULE: ./node_modules/tiny-emitter/index.js\nvar tiny_emitter = __webpack_require__(279);\nvar tiny_emitter_default = /*#__PURE__*/__webpack_require__.n(tiny_emitter);\n// EXTERNAL MODULE: ./node_modules/good-listener/src/listen.js\nvar listen = __webpack_require__(370);\nvar listen_default = /*#__PURE__*/__webpack_require__.n(listen);\n// EXTERNAL MODULE: ./node_modules/select/src/select.js\nvar src_select = __webpack_require__(817);\nvar select_default = /*#__PURE__*/__webpack_require__.n(src_select);\n;// CONCATENATED MODULE: ./src/common/command.js\n/**\n * Executes a given operation type.\n * @param {String} type\n * @return {Boolean}\n */\nfunction command(type) {\n try {\n return document.execCommand(type);\n } catch (err) {\n return false;\n }\n}\n;// CONCATENATED MODULE: ./src/actions/cut.js\n\n\n/**\n * Cut action wrapper.\n * @param {String|HTMLElement} target\n * @return {String}\n */\n\nvar ClipboardActionCut = function ClipboardActionCut(target) {\n var selectedText = select_default()(target);\n command('cut');\n return selectedText;\n};\n\n/* harmony default export */ var actions_cut = (ClipboardActionCut);\n;// CONCATENATED MODULE: ./src/common/create-fake-element.js\n/**\n * Creates a fake textarea element with a value.\n * @param {String} value\n * @return {HTMLElement}\n */\nfunction createFakeElement(value) {\n var isRTL = document.documentElement.getAttribute('dir') === 'rtl';\n var fakeElement = document.createElement('textarea'); // Prevent zooming on iOS\n\n fakeElement.style.fontSize = '12pt'; // Reset box model\n\n fakeElement.style.border = '0';\n fakeElement.style.padding = '0';\n fakeElement.style.margin = '0'; // Move element out of screen horizontally\n\n fakeElement.style.position = 'absolute';\n fakeElement.style[isRTL ? 'right' : 'left'] = '-9999px'; // Move element to the same position vertically\n\n var yPosition = window.pageYOffset || document.documentElement.scrollTop;\n fakeElement.style.top = \"\".concat(yPosition, \"px\");\n fakeElement.setAttribute('readonly', '');\n fakeElement.value = value;\n return fakeElement;\n}\n;// CONCATENATED MODULE: ./src/actions/copy.js\n\n\n\n/**\n * Create fake copy action wrapper using a fake element.\n * @param {String} target\n * @param {Object} options\n * @return {String}\n */\n\nvar fakeCopyAction = function fakeCopyAction(value, options) {\n var fakeElement = createFakeElement(value);\n options.container.appendChild(fakeElement);\n var selectedText = select_default()(fakeElement);\n command('copy');\n fakeElement.remove();\n return selectedText;\n};\n/**\n * Copy action wrapper.\n * @param {String|HTMLElement} target\n * @param {Object} options\n * @return {String}\n */\n\n\nvar ClipboardActionCopy = function ClipboardActionCopy(target) {\n var options = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {\n container: document.body\n };\n var selectedText = '';\n\n if (typeof target === 'string') {\n selectedText = fakeCopyAction(target, options);\n } else if (target instanceof HTMLInputElement && !['text', 'search', 'url', 'tel', 'password'].includes(target === null || target === void 0 ? void 0 : target.type)) {\n // If input type doesn't support `setSelectionRange`. Simulate it. https://developer.mozilla.org/en-US/docs/Web/API/HTMLInputElement/setSelectionRange\n selectedText = fakeCopyAction(target.value, options);\n } else {\n selectedText = select_default()(target);\n command('copy');\n }\n\n return selectedText;\n};\n\n/* harmony default export */ var actions_copy = (ClipboardActionCopy);\n;// CONCATENATED MODULE: ./src/actions/default.js\nfunction _typeof(obj) { \"@babel/helpers - typeof\"; if (typeof Symbol === \"function\" && typeof Symbol.iterator === \"symbol\") { _typeof = function _typeof(obj) { return typeof obj; }; } else { _typeof = function _typeof(obj) { return obj && typeof Symbol === \"function\" && obj.constructor === Symbol && obj !== Symbol.prototype ? \"symbol\" : typeof obj; }; } return _typeof(obj); }\n\n\n\n/**\n * Inner function which performs selection from either `text` or `target`\n * properties and then executes copy or cut operations.\n * @param {Object} options\n */\n\nvar ClipboardActionDefault = function ClipboardActionDefault() {\n var options = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {};\n // Defines base properties passed from constructor.\n var _options$action = options.action,\n action = _options$action === void 0 ? 'copy' : _options$action,\n container = options.container,\n target = options.target,\n text = options.text; // Sets the `action` to be performed which can be either 'copy' or 'cut'.\n\n if (action !== 'copy' && action !== 'cut') {\n throw new Error('Invalid \"action\" value, use either \"copy\" or \"cut\"');\n } // Sets the `target` property using an element that will be have its content copied.\n\n\n if (target !== undefined) {\n if (target && _typeof(target) === 'object' && target.nodeType === 1) {\n if (action === 'copy' && target.hasAttribute('disabled')) {\n throw new Error('Invalid \"target\" attribute. Please use \"readonly\" instead of \"disabled\" attribute');\n }\n\n if (action === 'cut' && (target.hasAttribute('readonly') || target.hasAttribute('disabled'))) {\n throw new Error('Invalid \"target\" attribute. You can\\'t cut text from elements with \"readonly\" or \"disabled\" attributes');\n }\n } else {\n throw new Error('Invalid \"target\" value, use a valid Element');\n }\n } // Define selection strategy based on `text` property.\n\n\n if (text) {\n return actions_copy(text, {\n container: container\n });\n } // Defines which selection strategy based on `target` property.\n\n\n if (target) {\n return action === 'cut' ? actions_cut(target) : actions_copy(target, {\n container: container\n });\n }\n};\n\n/* harmony default export */ var actions_default = (ClipboardActionDefault);\n;// CONCATENATED MODULE: ./src/clipboard.js\nfunction clipboard_typeof(obj) { \"@babel/helpers - typeof\"; if (typeof Symbol === \"function\" && typeof Symbol.iterator === \"symbol\") { clipboard_typeof = function _typeof(obj) { return typeof obj; }; } else { clipboard_typeof = function _typeof(obj) { return obj && typeof Symbol === \"function\" && obj.constructor === Symbol && obj !== Symbol.prototype ? \"symbol\" : typeof obj; }; } return clipboard_typeof(obj); }\n\nfunction _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError(\"Cannot call a class as a function\"); } }\n\nfunction _defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if (\"value\" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } }\n\nfunction _createClass(Constructor, protoProps, staticProps) { if (protoProps) _defineProperties(Constructor.prototype, protoProps); if (staticProps) _defineProperties(Constructor, staticProps); return Constructor; }\n\nfunction _inherits(subClass, superClass) { if (typeof superClass !== \"function\" && superClass !== null) { throw new TypeError(\"Super expression must either be null or a function\"); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, writable: true, configurable: true } }); if (superClass) _setPrototypeOf(subClass, superClass); }\n\nfunction _setPrototypeOf(o, p) { _setPrototypeOf = Object.setPrototypeOf || function _setPrototypeOf(o, p) { o.__proto__ = p; return o; }; return _setPrototypeOf(o, p); }\n\nfunction _createSuper(Derived) { var hasNativeReflectConstruct = _isNativeReflectConstruct(); return function _createSuperInternal() { var Super = _getPrototypeOf(Derived), result; if (hasNativeReflectConstruct) { var NewTarget = _getPrototypeOf(this).constructor; result = Reflect.construct(Super, arguments, NewTarget); } else { result = Super.apply(this, arguments); } return _possibleConstructorReturn(this, result); }; }\n\nfunction _possibleConstructorReturn(self, call) { if (call && (clipboard_typeof(call) === \"object\" || typeof call === \"function\")) { return call; } return _assertThisInitialized(self); }\n\nfunction _assertThisInitialized(self) { if (self === void 0) { throw new ReferenceError(\"this hasn't been initialised - super() hasn't been called\"); } return self; }\n\nfunction _isNativeReflectConstruct() { if (typeof Reflect === \"undefined\" || !Reflect.construct) return false; if (Reflect.construct.sham) return false; if (typeof Proxy === \"function\") return true; try { Date.prototype.toString.call(Reflect.construct(Date, [], function () {})); return true; } catch (e) { return false; } }\n\nfunction _getPrototypeOf(o) { _getPrototypeOf = Object.setPrototypeOf ? Object.getPrototypeOf : function _getPrototypeOf(o) { return o.__proto__ || Object.getPrototypeOf(o); }; return _getPrototypeOf(o); }\n\n\n\n\n\n\n/**\n * Helper function to retrieve attribute value.\n * @param {String} suffix\n * @param {Element} element\n */\n\nfunction getAttributeValue(suffix, element) {\n var attribute = \"data-clipboard-\".concat(suffix);\n\n if (!element.hasAttribute(attribute)) {\n return;\n }\n\n return element.getAttribute(attribute);\n}\n/**\n * Base class which takes one or more elements, adds event listeners to them,\n * and instantiates a new `ClipboardAction` on each click.\n */\n\n\nvar Clipboard = /*#__PURE__*/function (_Emitter) {\n _inherits(Clipboard, _Emitter);\n\n var _super = _createSuper(Clipboard);\n\n /**\n * @param {String|HTMLElement|HTMLCollection|NodeList} trigger\n * @param {Object} options\n */\n function Clipboard(trigger, options) {\n var _this;\n\n _classCallCheck(this, Clipboard);\n\n _this = _super.call(this);\n\n _this.resolveOptions(options);\n\n _this.listenClick(trigger);\n\n return _this;\n }\n /**\n * Defines if attributes would be resolved using internal setter functions\n * or custom functions that were passed in the constructor.\n * @param {Object} options\n */\n\n\n _createClass(Clipboard, [{\n key: \"resolveOptions\",\n value: function resolveOptions() {\n var options = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {};\n this.action = typeof options.action === 'function' ? options.action : this.defaultAction;\n this.target = typeof options.target === 'function' ? options.target : this.defaultTarget;\n this.text = typeof options.text === 'function' ? options.text : this.defaultText;\n this.container = clipboard_typeof(options.container) === 'object' ? options.container : document.body;\n }\n /**\n * Adds a click event listener to the passed trigger.\n * @param {String|HTMLElement|HTMLCollection|NodeList} trigger\n */\n\n }, {\n key: \"listenClick\",\n value: function listenClick(trigger) {\n var _this2 = this;\n\n this.listener = listen_default()(trigger, 'click', function (e) {\n return _this2.onClick(e);\n });\n }\n /**\n * Defines a new `ClipboardAction` on each click event.\n * @param {Event} e\n */\n\n }, {\n key: \"onClick\",\n value: function onClick(e) {\n var trigger = e.delegateTarget || e.currentTarget;\n var action = this.action(trigger) || 'copy';\n var text = actions_default({\n action: action,\n container: this.container,\n target: this.target(trigger),\n text: this.text(trigger)\n }); // Fires an event based on the copy operation result.\n\n this.emit(text ? 'success' : 'error', {\n action: action,\n text: text,\n trigger: trigger,\n clearSelection: function clearSelection() {\n if (trigger) {\n trigger.focus();\n }\n\n window.getSelection().removeAllRanges();\n }\n });\n }\n /**\n * Default `action` lookup function.\n * @param {Element} trigger\n */\n\n }, {\n key: \"defaultAction\",\n value: function defaultAction(trigger) {\n return getAttributeValue('action', trigger);\n }\n /**\n * Default `target` lookup function.\n * @param {Element} trigger\n */\n\n }, {\n key: \"defaultTarget\",\n value: function defaultTarget(trigger) {\n var selector = getAttributeValue('target', trigger);\n\n if (selector) {\n return document.querySelector(selector);\n }\n }\n /**\n * Allow fire programmatically a copy action\n * @param {String|HTMLElement} target\n * @param {Object} options\n * @returns Text copied.\n */\n\n }, {\n key: \"defaultText\",\n\n /**\n * Default `text` lookup function.\n * @param {Element} trigger\n */\n value: function defaultText(trigger) {\n return getAttributeValue('text', trigger);\n }\n /**\n * Destroy lifecycle.\n */\n\n }, {\n key: \"destroy\",\n value: function destroy() {\n this.listener.destroy();\n }\n }], [{\n key: \"copy\",\n value: function copy(target) {\n var options = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {\n container: document.body\n };\n return actions_copy(target, options);\n }\n /**\n * Allow fire programmatically a cut action\n * @param {String|HTMLElement} target\n * @returns Text cutted.\n */\n\n }, {\n key: \"cut\",\n value: function cut(target) {\n return actions_cut(target);\n }\n /**\n * Returns the support of the given action, or all actions if no action is\n * given.\n * @param {String} [action]\n */\n\n }, {\n key: \"isSupported\",\n value: function isSupported() {\n var action = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : ['copy', 'cut'];\n var actions = typeof action === 'string' ? [action] : action;\n var support = !!document.queryCommandSupported;\n actions.forEach(function (action) {\n support = support && !!document.queryCommandSupported(action);\n });\n return support;\n }\n }]);\n\n return Clipboard;\n}((tiny_emitter_default()));\n\n/* harmony default export */ var clipboard = (Clipboard);\n\n/***/ }),\n\n/***/ 828:\n/***/ (function(module) {\n\nvar DOCUMENT_NODE_TYPE = 9;\n\n/**\n * A polyfill for Element.matches()\n */\nif (typeof Element !== 'undefined' && !Element.prototype.matches) {\n var proto = Element.prototype;\n\n proto.matches = proto.matchesSelector ||\n proto.mozMatchesSelector ||\n proto.msMatchesSelector ||\n proto.oMatchesSelector ||\n proto.webkitMatchesSelector;\n}\n\n/**\n * Finds the closest parent that matches a selector.\n *\n * @param {Element} element\n * @param {String} selector\n * @return {Function}\n */\nfunction closest (element, selector) {\n while (element && element.nodeType !== DOCUMENT_NODE_TYPE) {\n if (typeof element.matches === 'function' &&\n element.matches(selector)) {\n return element;\n }\n element = element.parentNode;\n }\n}\n\nmodule.exports = closest;\n\n\n/***/ }),\n\n/***/ 438:\n/***/ (function(module, __unused_webpack_exports, __webpack_require__) {\n\nvar closest = __webpack_require__(828);\n\n/**\n * Delegates event to a selector.\n *\n * @param {Element} element\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @param {Boolean} useCapture\n * @return {Object}\n */\nfunction _delegate(element, selector, type, callback, useCapture) {\n var listenerFn = listener.apply(this, arguments);\n\n element.addEventListener(type, listenerFn, useCapture);\n\n return {\n destroy: function() {\n element.removeEventListener(type, listenerFn, useCapture);\n }\n }\n}\n\n/**\n * Delegates event to a selector.\n *\n * @param {Element|String|Array} [elements]\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @param {Boolean} useCapture\n * @return {Object}\n */\nfunction delegate(elements, selector, type, callback, useCapture) {\n // Handle the regular Element usage\n if (typeof elements.addEventListener === 'function') {\n return _delegate.apply(null, arguments);\n }\n\n // Handle Element-less usage, it defaults to global delegation\n if (typeof type === 'function') {\n // Use `document` as the first parameter, then apply arguments\n // This is a short way to .unshift `arguments` without running into deoptimizations\n return _delegate.bind(null, document).apply(null, arguments);\n }\n\n // Handle Selector-based usage\n if (typeof elements === 'string') {\n elements = document.querySelectorAll(elements);\n }\n\n // Handle Array-like based usage\n return Array.prototype.map.call(elements, function (element) {\n return _delegate(element, selector, type, callback, useCapture);\n });\n}\n\n/**\n * Finds closest match and invokes callback.\n *\n * @param {Element} element\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @return {Function}\n */\nfunction listener(element, selector, type, callback) {\n return function(e) {\n e.delegateTarget = closest(e.target, selector);\n\n if (e.delegateTarget) {\n callback.call(element, e);\n }\n }\n}\n\nmodule.exports = delegate;\n\n\n/***/ }),\n\n/***/ 879:\n/***/ (function(__unused_webpack_module, exports) {\n\n/**\n * Check if argument is a HTML element.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.node = function(value) {\n return value !== undefined\n && value instanceof HTMLElement\n && value.nodeType === 1;\n};\n\n/**\n * Check if argument is a list of HTML elements.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.nodeList = function(value) {\n var type = Object.prototype.toString.call(value);\n\n return value !== undefined\n && (type === '[object NodeList]' || type === '[object HTMLCollection]')\n && ('length' in value)\n && (value.length === 0 || exports.node(value[0]));\n};\n\n/**\n * Check if argument is a string.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.string = function(value) {\n return typeof value === 'string'\n || value instanceof String;\n};\n\n/**\n * Check if argument is a function.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.fn = function(value) {\n var type = Object.prototype.toString.call(value);\n\n return type === '[object Function]';\n};\n\n\n/***/ }),\n\n/***/ 370:\n/***/ (function(module, __unused_webpack_exports, __webpack_require__) {\n\nvar is = __webpack_require__(879);\nvar delegate = __webpack_require__(438);\n\n/**\n * Validates all params and calls the right\n * listener function based on its target type.\n *\n * @param {String|HTMLElement|HTMLCollection|NodeList} target\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listen(target, type, callback) {\n if (!target && !type && !callback) {\n throw new Error('Missing required arguments');\n }\n\n if (!is.string(type)) {\n throw new TypeError('Second argument must be a String');\n }\n\n if (!is.fn(callback)) {\n throw new TypeError('Third argument must be a Function');\n }\n\n if (is.node(target)) {\n return listenNode(target, type, callback);\n }\n else if (is.nodeList(target)) {\n return listenNodeList(target, type, callback);\n }\n else if (is.string(target)) {\n return listenSelector(target, type, callback);\n }\n else {\n throw new TypeError('First argument must be a String, HTMLElement, HTMLCollection, or NodeList');\n }\n}\n\n/**\n * Adds an event listener to a HTML element\n * and returns a remove listener function.\n *\n * @param {HTMLElement} node\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenNode(node, type, callback) {\n node.addEventListener(type, callback);\n\n return {\n destroy: function() {\n node.removeEventListener(type, callback);\n }\n }\n}\n\n/**\n * Add an event listener to a list of HTML elements\n * and returns a remove listener function.\n *\n * @param {NodeList|HTMLCollection} nodeList\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenNodeList(nodeList, type, callback) {\n Array.prototype.forEach.call(nodeList, function(node) {\n node.addEventListener(type, callback);\n });\n\n return {\n destroy: function() {\n Array.prototype.forEach.call(nodeList, function(node) {\n node.removeEventListener(type, callback);\n });\n }\n }\n}\n\n/**\n * Add an event listener to a selector\n * and returns a remove listener function.\n *\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenSelector(selector, type, callback) {\n return delegate(document.body, selector, type, callback);\n}\n\nmodule.exports = listen;\n\n\n/***/ }),\n\n/***/ 817:\n/***/ (function(module) {\n\nfunction select(element) {\n var selectedText;\n\n if (element.nodeName === 'SELECT') {\n element.focus();\n\n selectedText = element.value;\n }\n else if (element.nodeName === 'INPUT' || element.nodeName === 'TEXTAREA') {\n var isReadOnly = element.hasAttribute('readonly');\n\n if (!isReadOnly) {\n element.setAttribute('readonly', '');\n }\n\n element.select();\n element.setSelectionRange(0, element.value.length);\n\n if (!isReadOnly) {\n element.removeAttribute('readonly');\n }\n\n selectedText = element.value;\n }\n else {\n if (element.hasAttribute('contenteditable')) {\n element.focus();\n }\n\n var selection = window.getSelection();\n var range = document.createRange();\n\n range.selectNodeContents(element);\n selection.removeAllRanges();\n selection.addRange(range);\n\n selectedText = selection.toString();\n }\n\n return selectedText;\n}\n\nmodule.exports = select;\n\n\n/***/ }),\n\n/***/ 279:\n/***/ (function(module) {\n\nfunction E () {\n // Keep this empty so it's easier to inherit from\n // (via https://github.com/lipsmack from https://github.com/scottcorgan/tiny-emitter/issues/3)\n}\n\nE.prototype = {\n on: function (name, callback, ctx) {\n var e = this.e || (this.e = {});\n\n (e[name] || (e[name] = [])).push({\n fn: callback,\n ctx: ctx\n });\n\n return this;\n },\n\n once: function (name, callback, ctx) {\n var self = this;\n function listener () {\n self.off(name, listener);\n callback.apply(ctx, arguments);\n };\n\n listener._ = callback\n return this.on(name, listener, ctx);\n },\n\n emit: function (name) {\n var data = [].slice.call(arguments, 1);\n var evtArr = ((this.e || (this.e = {}))[name] || []).slice();\n var i = 0;\n var len = evtArr.length;\n\n for (i; i < len; i++) {\n evtArr[i].fn.apply(evtArr[i].ctx, data);\n }\n\n return this;\n },\n\n off: function (name, callback) {\n var e = this.e || (this.e = {});\n var evts = e[name];\n var liveEvents = [];\n\n if (evts && callback) {\n for (var i = 0, len = evts.length; i < len; i++) {\n if (evts[i].fn !== callback && evts[i].fn._ !== callback)\n liveEvents.push(evts[i]);\n }\n }\n\n // Remove event from queue to prevent memory leak\n // Suggested by https://github.com/lazd\n // Ref: https://github.com/scottcorgan/tiny-emitter/commit/c6ebfaa9bc973b33d110a84a307742b7cf94c953#commitcomment-5024910\n\n (liveEvents.length)\n ? e[name] = liveEvents\n : delete e[name];\n\n return this;\n }\n};\n\nmodule.exports = E;\nmodule.exports.TinyEmitter = E;\n\n\n/***/ })\n\n/******/ \t});\n/************************************************************************/\n/******/ \t// The module cache\n/******/ \tvar __webpack_module_cache__ = {};\n/******/ \t\n/******/ \t// The require function\n/******/ \tfunction __webpack_require__(moduleId) {\n/******/ \t\t// Check if module is in cache\n/******/ \t\tif(__webpack_module_cache__[moduleId]) {\n/******/ \t\t\treturn __webpack_module_cache__[moduleId].exports;\n/******/ \t\t}\n/******/ \t\t// Create a new module (and put it into the cache)\n/******/ \t\tvar module = __webpack_module_cache__[moduleId] = {\n/******/ \t\t\t// no module.id needed\n/******/ \t\t\t// no module.loaded needed\n/******/ \t\t\texports: {}\n/******/ \t\t};\n/******/ \t\n/******/ \t\t// Execute the module function\n/******/ \t\t__webpack_modules__[moduleId](module, module.exports, __webpack_require__);\n/******/ \t\n/******/ \t\t// Return the exports of the module\n/******/ \t\treturn module.exports;\n/******/ \t}\n/******/ \t\n/************************************************************************/\n/******/ \t/* webpack/runtime/compat get default export */\n/******/ \t!function() {\n/******/ \t\t// getDefaultExport function for compatibility with non-harmony modules\n/******/ \t\t__webpack_require__.n = function(module) {\n/******/ \t\t\tvar getter = module && module.__esModule ?\n/******/ \t\t\t\tfunction() { return module['default']; } :\n/******/ \t\t\t\tfunction() { return module; };\n/******/ \t\t\t__webpack_require__.d(getter, { a: getter });\n/******/ \t\t\treturn getter;\n/******/ \t\t};\n/******/ \t}();\n/******/ \t\n/******/ \t/* webpack/runtime/define property getters */\n/******/ \t!function() {\n/******/ \t\t// define getter functions for harmony exports\n/******/ \t\t__webpack_require__.d = function(exports, definition) {\n/******/ \t\t\tfor(var key in definition) {\n/******/ \t\t\t\tif(__webpack_require__.o(definition, key) && !__webpack_require__.o(exports, key)) {\n/******/ \t\t\t\t\tObject.defineProperty(exports, key, { enumerable: true, get: definition[key] });\n/******/ \t\t\t\t}\n/******/ \t\t\t}\n/******/ \t\t};\n/******/ \t}();\n/******/ \t\n/******/ \t/* webpack/runtime/hasOwnProperty shorthand */\n/******/ \t!function() {\n/******/ \t\t__webpack_require__.o = function(obj, prop) { return Object.prototype.hasOwnProperty.call(obj, prop); }\n/******/ \t}();\n/******/ \t\n/************************************************************************/\n/******/ \t// module exports must be returned from runtime so entry inlining is disabled\n/******/ \t// startup\n/******/ \t// Load entry module and return exports\n/******/ \treturn __webpack_require__(686);\n/******/ })()\n.default;\n});", "/*\n * Copyright (c) 2016-2024 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport \"focus-visible\"\n\nimport {\n EMPTY,\n NEVER,\n Observable,\n Subject,\n defer,\n delay,\n filter,\n map,\n merge,\n mergeWith,\n shareReplay,\n switchMap\n} from \"rxjs\"\n\nimport { configuration, feature } from \"./_\"\nimport {\n at,\n getActiveElement,\n getOptionalElement,\n requestJSON,\n setLocation,\n setToggle,\n watchDocument,\n watchKeyboard,\n watchLocation,\n watchLocationTarget,\n watchMedia,\n watchPrint,\n watchScript,\n watchViewport\n} from \"./browser\"\nimport {\n getComponentElement,\n getComponentElements,\n mountAnnounce,\n mountBackToTop,\n mountConsent,\n mountContent,\n mountDialog,\n mountHeader,\n mountHeaderTitle,\n mountPalette,\n mountProgress,\n mountSearch,\n mountSearchHiglight,\n mountSidebar,\n mountSource,\n mountTableOfContents,\n mountTabs,\n watchHeader,\n watchMain\n} from \"./components\"\nimport {\n SearchIndex,\n setupClipboardJS,\n setupInstantNavigation,\n setupVersionSelector\n} from \"./integrations\"\nimport {\n patchEllipsis,\n patchIndeterminate,\n patchScrollfix,\n patchScrolllock\n} from \"./patches\"\nimport \"./polyfills\"\n\n/* ----------------------------------------------------------------------------\n * Functions - @todo refactor\n * ------------------------------------------------------------------------- */\n\n/**\n * Fetch search index\n *\n * @returns Search index observable\n */\nfunction fetchSearchIndex(): Observable {\n if (location.protocol === \"file:\") {\n return watchScript(\n `${new URL(\"search/search_index.js\", config.base)}`\n )\n .pipe(\n // @ts-ignore - @todo fix typings\n map(() => __index),\n shareReplay(1)\n )\n } else {\n return requestJSON(\n new URL(\"search/search_index.json\", config.base)\n )\n }\n}\n\n/* ----------------------------------------------------------------------------\n * Application\n * ------------------------------------------------------------------------- */\n\n/* Yay, JavaScript is available */\ndocument.documentElement.classList.remove(\"no-js\")\ndocument.documentElement.classList.add(\"js\")\n\n/* Set up navigation observables and subjects */\nconst document$ = watchDocument()\nconst location$ = watchLocation()\nconst target$ = watchLocationTarget(location$)\nconst keyboard$ = watchKeyboard()\n\n/* Set up media observables */\nconst viewport$ = watchViewport()\nconst tablet$ = watchMedia(\"(min-width: 960px)\")\nconst screen$ = watchMedia(\"(min-width: 1220px)\")\nconst print$ = watchPrint()\n\n/* Retrieve search index, if search is enabled */\nconst config = configuration()\nconst index$ = document.forms.namedItem(\"search\")\n ? fetchSearchIndex()\n : NEVER\n\n/* Set up Clipboard.js integration */\nconst alert$ = new Subject()\nsetupClipboardJS({ alert$ })\n\n/* Set up progress indicator */\nconst progress$ = new Subject()\n\n/* Set up instant navigation, if enabled */\nif (feature(\"navigation.instant\"))\n setupInstantNavigation({ location$, viewport$, progress$ })\n .subscribe(document$)\n\n/* Set up version selector */\nif (config.version?.provider === \"mike\")\n setupVersionSelector({ document$ })\n\n/* Always close drawer and search on navigation */\nmerge(location$, target$)\n .pipe(\n delay(125)\n )\n .subscribe(() => {\n setToggle(\"drawer\", false)\n setToggle(\"search\", false)\n })\n\n/* Set up global keyboard handlers */\nkeyboard$\n .pipe(\n filter(({ mode }) => mode === \"global\")\n )\n .subscribe(key => {\n switch (key.type) {\n\n /* Go to previous page */\n case \"p\":\n case \",\":\n const prev = getOptionalElement(\"link[rel=prev]\")\n if (typeof prev !== \"undefined\")\n setLocation(prev)\n break\n\n /* Go to next page */\n case \"n\":\n case \".\":\n const next = getOptionalElement(\"link[rel=next]\")\n if (typeof next !== \"undefined\")\n setLocation(next)\n break\n\n /* Expand navigation, see https://bit.ly/3ZjG5io */\n case \"Enter\":\n const active = getActiveElement()\n if (active instanceof HTMLLabelElement)\n active.click()\n }\n })\n\n/* Set up patches */\npatchEllipsis({ viewport$, document$ })\npatchIndeterminate({ document$, tablet$ })\npatchScrollfix({ document$ })\npatchScrolllock({ viewport$, tablet$ })\n\n/* Set up header and main area observable */\nconst header$ = watchHeader(getComponentElement(\"header\"), { viewport$ })\nconst main$ = document$\n .pipe(\n map(() => getComponentElement(\"main\")),\n switchMap(el => watchMain(el, { viewport$, header$ })),\n shareReplay(1)\n )\n\n/* Set up control component observables */\nconst control$ = merge(\n\n /* Consent */\n ...getComponentElements(\"consent\")\n .map(el => mountConsent(el, { target$ })),\n\n /* Dialog */\n ...getComponentElements(\"dialog\")\n .map(el => mountDialog(el, { alert$ })),\n\n /* Header */\n ...getComponentElements(\"header\")\n .map(el => mountHeader(el, { viewport$, header$, main$ })),\n\n /* Color palette */\n ...getComponentElements(\"palette\")\n .map(el => mountPalette(el)),\n\n /* Progress bar */\n ...getComponentElements(\"progress\")\n .map(el => mountProgress(el, { progress$ })),\n\n /* Search */\n ...getComponentElements(\"search\")\n .map(el => mountSearch(el, { index$, keyboard$ })),\n\n /* Repository information */\n ...getComponentElements(\"source\")\n .map(el => mountSource(el))\n)\n\n/* Set up content component observables */\nconst content$ = defer(() => merge(\n\n /* Announcement bar */\n ...getComponentElements(\"announce\")\n .map(el => mountAnnounce(el)),\n\n /* Content */\n ...getComponentElements(\"content\")\n .map(el => mountContent(el, { viewport$, target$, print$ })),\n\n /* Search highlighting */\n ...getComponentElements(\"content\")\n .map(el => feature(\"search.highlight\")\n ? mountSearchHiglight(el, { index$, location$ })\n : EMPTY\n ),\n\n /* Header title */\n ...getComponentElements(\"header-title\")\n .map(el => mountHeaderTitle(el, { viewport$, header$ })),\n\n /* Sidebar */\n ...getComponentElements(\"sidebar\")\n .map(el => el.getAttribute(\"data-md-type\") === \"navigation\"\n ? at(screen$, () => mountSidebar(el, { viewport$, header$, main$ }))\n : at(tablet$, () => mountSidebar(el, { viewport$, header$, main$ }))\n ),\n\n /* Navigation tabs */\n ...getComponentElements(\"tabs\")\n .map(el => mountTabs(el, { viewport$, header$ })),\n\n /* Table of contents */\n ...getComponentElements(\"toc\")\n .map(el => mountTableOfContents(el, {\n viewport$, header$, main$, target$\n })),\n\n /* Back-to-top button */\n ...getComponentElements(\"top\")\n .map(el => mountBackToTop(el, { viewport$, header$, main$, target$ }))\n))\n\n/* Set up component observables */\nconst component$ = document$\n .pipe(\n switchMap(() => content$),\n mergeWith(control$),\n shareReplay(1)\n )\n\n/* Subscribe to all components */\ncomponent$.subscribe()\n\n/* ----------------------------------------------------------------------------\n * Exports\n * ------------------------------------------------------------------------- */\n\nwindow.document$ = document$ /* Document observable */\nwindow.location$ = location$ /* Location subject */\nwindow.target$ = target$ /* Location target observable */\nwindow.keyboard$ = keyboard$ /* Keyboard observable */\nwindow.viewport$ = viewport$ /* Viewport observable */\nwindow.tablet$ = tablet$ /* Media tablet observable */\nwindow.screen$ = screen$ /* Media screen observable */\nwindow.print$ = print$ /* Media print observable */\nwindow.alert$ = alert$ /* Alert subject */\nwindow.progress$ = progress$ /* Progress indicator subject */\nwindow.component$ = component$ /* Component observable */\n", "/*! *****************************************************************************\r\nCopyright (c) Microsoft Corporation.\r\n\r\nPermission to use, copy, modify, and/or distribute this software for any\r\npurpose with or without fee is hereby granted.\r\n\r\nTHE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH\r\nREGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY\r\nAND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,\r\nINDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM\r\nLOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR\r\nOTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR\r\nPERFORMANCE OF THIS SOFTWARE.\r\n***************************************************************************** */\r\n/* global Reflect, Promise */\r\n\r\nvar extendStatics = function(d, b) {\r\n extendStatics = Object.setPrototypeOf ||\r\n ({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||\r\n function (d, b) { for (var p in b) if (Object.prototype.hasOwnProperty.call(b, p)) d[p] = b[p]; };\r\n return extendStatics(d, b);\r\n};\r\n\r\nexport function __extends(d, b) {\r\n if (typeof b !== \"function\" && b !== null)\r\n throw new TypeError(\"Class extends value \" + String(b) + \" is not a constructor or null\");\r\n extendStatics(d, b);\r\n function __() { this.constructor = d; }\r\n d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());\r\n}\r\n\r\nexport var __assign = function() {\r\n __assign = Object.assign || function __assign(t) {\r\n for (var s, i = 1, n = arguments.length; i < n; i++) {\r\n s = arguments[i];\r\n for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p)) t[p] = s[p];\r\n }\r\n return t;\r\n }\r\n return __assign.apply(this, arguments);\r\n}\r\n\r\nexport function __rest(s, e) {\r\n var t = {};\r\n for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p) && e.indexOf(p) < 0)\r\n t[p] = s[p];\r\n if (s != null && typeof Object.getOwnPropertySymbols === \"function\")\r\n for (var i = 0, p = Object.getOwnPropertySymbols(s); i < p.length; i++) {\r\n if (e.indexOf(p[i]) < 0 && Object.prototype.propertyIsEnumerable.call(s, p[i]))\r\n t[p[i]] = s[p[i]];\r\n }\r\n return t;\r\n}\r\n\r\nexport function __decorate(decorators, target, key, desc) {\r\n var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;\r\n if (typeof Reflect === \"object\" && typeof Reflect.decorate === \"function\") r = Reflect.decorate(decorators, target, key, desc);\r\n else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;\r\n return c > 3 && r && Object.defineProperty(target, key, r), r;\r\n}\r\n\r\nexport function __param(paramIndex, decorator) {\r\n return function (target, key) { decorator(target, key, paramIndex); }\r\n}\r\n\r\nexport function __metadata(metadataKey, metadataValue) {\r\n if (typeof Reflect === \"object\" && typeof Reflect.metadata === \"function\") return Reflect.metadata(metadataKey, metadataValue);\r\n}\r\n\r\nexport function __awaiter(thisArg, _arguments, P, generator) {\r\n function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }\r\n return new (P || (P = Promise))(function (resolve, reject) {\r\n function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }\r\n function rejected(value) { try { step(generator[\"throw\"](value)); } catch (e) { reject(e); } }\r\n function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }\r\n step((generator = generator.apply(thisArg, _arguments || [])).next());\r\n });\r\n}\r\n\r\nexport function __generator(thisArg, body) {\r\n var _ = { label: 0, sent: function() { if (t[0] & 1) throw t[1]; return t[1]; }, trys: [], ops: [] }, f, y, t, g;\r\n return g = { next: verb(0), \"throw\": verb(1), \"return\": verb(2) }, typeof Symbol === \"function\" && (g[Symbol.iterator] = function() { return this; }), g;\r\n function verb(n) { return function (v) { return step([n, v]); }; }\r\n function step(op) {\r\n if (f) throw new TypeError(\"Generator is already executing.\");\r\n while (_) try {\r\n if (f = 1, y && (t = op[0] & 2 ? y[\"return\"] : op[0] ? y[\"throw\"] || ((t = y[\"return\"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t;\r\n if (y = 0, t) op = [op[0] & 2, t.value];\r\n switch (op[0]) {\r\n case 0: case 1: t = op; break;\r\n case 4: _.label++; return { value: op[1], done: false };\r\n case 5: _.label++; y = op[1]; op = [0]; continue;\r\n case 7: op = _.ops.pop(); _.trys.pop(); continue;\r\n default:\r\n if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) { _ = 0; continue; }\r\n if (op[0] === 3 && (!t || (op[1] > t[0] && op[1] < t[3]))) { _.label = op[1]; break; }\r\n if (op[0] === 6 && _.label < t[1]) { _.label = t[1]; t = op; break; }\r\n if (t && _.label < t[2]) { _.label = t[2]; _.ops.push(op); break; }\r\n if (t[2]) _.ops.pop();\r\n _.trys.pop(); continue;\r\n }\r\n op = body.call(thisArg, _);\r\n } catch (e) { op = [6, e]; y = 0; } finally { f = t = 0; }\r\n if (op[0] & 5) throw op[1]; return { value: op[0] ? op[1] : void 0, done: true };\r\n }\r\n}\r\n\r\nexport var __createBinding = Object.create ? (function(o, m, k, k2) {\r\n if (k2 === undefined) k2 = k;\r\n Object.defineProperty(o, k2, { enumerable: true, get: function() { return m[k]; } });\r\n}) : (function(o, m, k, k2) {\r\n if (k2 === undefined) k2 = k;\r\n o[k2] = m[k];\r\n});\r\n\r\nexport function __exportStar(m, o) {\r\n for (var p in m) if (p !== \"default\" && !Object.prototype.hasOwnProperty.call(o, p)) __createBinding(o, m, p);\r\n}\r\n\r\nexport function __values(o) {\r\n var s = typeof Symbol === \"function\" && Symbol.iterator, m = s && o[s], i = 0;\r\n if (m) return m.call(o);\r\n if (o && typeof o.length === \"number\") return {\r\n next: function () {\r\n if (o && i >= o.length) o = void 0;\r\n return { value: o && o[i++], done: !o };\r\n }\r\n };\r\n throw new TypeError(s ? \"Object is not iterable.\" : \"Symbol.iterator is not defined.\");\r\n}\r\n\r\nexport function __read(o, n) {\r\n var m = typeof Symbol === \"function\" && o[Symbol.iterator];\r\n if (!m) return o;\r\n var i = m.call(o), r, ar = [], e;\r\n try {\r\n while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value);\r\n }\r\n catch (error) { e = { error: error }; }\r\n finally {\r\n try {\r\n if (r && !r.done && (m = i[\"return\"])) m.call(i);\r\n }\r\n finally { if (e) throw e.error; }\r\n }\r\n return ar;\r\n}\r\n\r\n/** @deprecated */\r\nexport function __spread() {\r\n for (var ar = [], i = 0; i < arguments.length; i++)\r\n ar = ar.concat(__read(arguments[i]));\r\n return ar;\r\n}\r\n\r\n/** @deprecated */\r\nexport function __spreadArrays() {\r\n for (var s = 0, i = 0, il = arguments.length; i < il; i++) s += arguments[i].length;\r\n for (var r = Array(s), k = 0, i = 0; i < il; i++)\r\n for (var a = arguments[i], j = 0, jl = a.length; j < jl; j++, k++)\r\n r[k] = a[j];\r\n return r;\r\n}\r\n\r\nexport function __spreadArray(to, from, pack) {\r\n if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) {\r\n if (ar || !(i in from)) {\r\n if (!ar) ar = Array.prototype.slice.call(from, 0, i);\r\n ar[i] = from[i];\r\n }\r\n }\r\n return to.concat(ar || Array.prototype.slice.call(from));\r\n}\r\n\r\nexport function __await(v) {\r\n return this instanceof __await ? (this.v = v, this) : new __await(v);\r\n}\r\n\r\nexport function __asyncGenerator(thisArg, _arguments, generator) {\r\n if (!Symbol.asyncIterator) throw new TypeError(\"Symbol.asyncIterator is not defined.\");\r\n var g = generator.apply(thisArg, _arguments || []), i, q = [];\r\n return i = {}, verb(\"next\"), verb(\"throw\"), verb(\"return\"), i[Symbol.asyncIterator] = function () { return this; }, i;\r\n function verb(n) { if (g[n]) i[n] = function (v) { return new Promise(function (a, b) { q.push([n, v, a, b]) > 1 || resume(n, v); }); }; }\r\n function resume(n, v) { try { step(g[n](v)); } catch (e) { settle(q[0][3], e); } }\r\n function step(r) { r.value instanceof __await ? Promise.resolve(r.value.v).then(fulfill, reject) : settle(q[0][2], r); }\r\n function fulfill(value) { resume(\"next\", value); }\r\n function reject(value) { resume(\"throw\", value); }\r\n function settle(f, v) { if (f(v), q.shift(), q.length) resume(q[0][0], q[0][1]); }\r\n}\r\n\r\nexport function __asyncDelegator(o) {\r\n var i, p;\r\n return i = {}, verb(\"next\"), verb(\"throw\", function (e) { throw e; }), verb(\"return\"), i[Symbol.iterator] = function () { return this; }, i;\r\n function verb(n, f) { i[n] = o[n] ? function (v) { return (p = !p) ? { value: __await(o[n](v)), done: n === \"return\" } : f ? f(v) : v; } : f; }\r\n}\r\n\r\nexport function __asyncValues(o) {\r\n if (!Symbol.asyncIterator) throw new TypeError(\"Symbol.asyncIterator is not defined.\");\r\n var m = o[Symbol.asyncIterator], i;\r\n return m ? m.call(o) : (o = typeof __values === \"function\" ? __values(o) : o[Symbol.iterator](), i = {}, verb(\"next\"), verb(\"throw\"), verb(\"return\"), i[Symbol.asyncIterator] = function () { return this; }, i);\r\n function verb(n) { i[n] = o[n] && function (v) { return new Promise(function (resolve, reject) { v = o[n](v), settle(resolve, reject, v.done, v.value); }); }; }\r\n function settle(resolve, reject, d, v) { Promise.resolve(v).then(function(v) { resolve({ value: v, done: d }); }, reject); }\r\n}\r\n\r\nexport function __makeTemplateObject(cooked, raw) {\r\n if (Object.defineProperty) { Object.defineProperty(cooked, \"raw\", { value: raw }); } else { cooked.raw = raw; }\r\n return cooked;\r\n};\r\n\r\nvar __setModuleDefault = Object.create ? (function(o, v) {\r\n Object.defineProperty(o, \"default\", { enumerable: true, value: v });\r\n}) : function(o, v) {\r\n o[\"default\"] = v;\r\n};\r\n\r\nexport function __importStar(mod) {\r\n if (mod && mod.__esModule) return mod;\r\n var result = {};\r\n if (mod != null) for (var k in mod) if (k !== \"default\" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);\r\n __setModuleDefault(result, mod);\r\n return result;\r\n}\r\n\r\nexport function __importDefault(mod) {\r\n return (mod && mod.__esModule) ? mod : { default: mod };\r\n}\r\n\r\nexport function __classPrivateFieldGet(receiver, state, kind, f) {\r\n if (kind === \"a\" && !f) throw new TypeError(\"Private accessor was defined without a getter\");\r\n if (typeof state === \"function\" ? receiver !== state || !f : !state.has(receiver)) throw new TypeError(\"Cannot read private member from an object whose class did not declare it\");\r\n return kind === \"m\" ? f : kind === \"a\" ? f.call(receiver) : f ? f.value : state.get(receiver);\r\n}\r\n\r\nexport function __classPrivateFieldSet(receiver, state, value, kind, f) {\r\n if (kind === \"m\") throw new TypeError(\"Private method is not writable\");\r\n if (kind === \"a\" && !f) throw new TypeError(\"Private accessor was defined without a setter\");\r\n if (typeof state === \"function\" ? receiver !== state || !f : !state.has(receiver)) throw new TypeError(\"Cannot write private member to an object whose class did not declare it\");\r\n return (kind === \"a\" ? f.call(receiver, value) : f ? f.value = value : state.set(receiver, value)), value;\r\n}\r\n", "/**\n * Returns true if the object is a function.\n * @param value The value to check\n */\nexport function isFunction(value: any): value is (...args: any[]) => any {\n return typeof value === 'function';\n}\n", "/**\n * Used to create Error subclasses until the community moves away from ES5.\n *\n * This is because compiling from TypeScript down to ES5 has issues with subclassing Errors\n * as well as other built-in types: https://github.com/Microsoft/TypeScript/issues/12123\n *\n * @param createImpl A factory function to create the actual constructor implementation. The returned\n * function should be a named function that calls `_super` internally.\n */\nexport function createErrorClass(createImpl: (_super: any) => any): T {\n const _super = (instance: any) => {\n Error.call(instance);\n instance.stack = new Error().stack;\n };\n\n const ctorFunc = createImpl(_super);\n ctorFunc.prototype = Object.create(Error.prototype);\n ctorFunc.prototype.constructor = ctorFunc;\n return ctorFunc;\n}\n", "import { createErrorClass } from './createErrorClass';\n\nexport interface UnsubscriptionError extends Error {\n readonly errors: any[];\n}\n\nexport interface UnsubscriptionErrorCtor {\n /**\n * @deprecated Internal implementation detail. Do not construct error instances.\n * Cannot be tagged as internal: https://github.com/ReactiveX/rxjs/issues/6269\n */\n new (errors: any[]): UnsubscriptionError;\n}\n\n/**\n * An error thrown when one or more errors have occurred during the\n * `unsubscribe` of a {@link Subscription}.\n */\nexport const UnsubscriptionError: UnsubscriptionErrorCtor = createErrorClass(\n (_super) =>\n function UnsubscriptionErrorImpl(this: any, errors: (Error | string)[]) {\n _super(this);\n this.message = errors\n ? `${errors.length} errors occurred during unsubscription:\n${errors.map((err, i) => `${i + 1}) ${err.toString()}`).join('\\n ')}`\n : '';\n this.name = 'UnsubscriptionError';\n this.errors = errors;\n }\n);\n", "/**\n * Removes an item from an array, mutating it.\n * @param arr The array to remove the item from\n * @param item The item to remove\n */\nexport function arrRemove(arr: T[] | undefined | null, item: T) {\n if (arr) {\n const index = arr.indexOf(item);\n 0 <= index && arr.splice(index, 1);\n }\n}\n", "import { isFunction } from './util/isFunction';\nimport { UnsubscriptionError } from './util/UnsubscriptionError';\nimport { SubscriptionLike, TeardownLogic, Unsubscribable } from './types';\nimport { arrRemove } from './util/arrRemove';\n\n/**\n * Represents a disposable resource, such as the execution of an Observable. A\n * Subscription has one important method, `unsubscribe`, that takes no argument\n * and just disposes the resource held by the subscription.\n *\n * Additionally, subscriptions may be grouped together through the `add()`\n * method, which will attach a child Subscription to the current Subscription.\n * When a Subscription is unsubscribed, all its children (and its grandchildren)\n * will be unsubscribed as well.\n *\n * @class Subscription\n */\nexport class Subscription implements SubscriptionLike {\n /** @nocollapse */\n public static EMPTY = (() => {\n const empty = new Subscription();\n empty.closed = true;\n return empty;\n })();\n\n /**\n * A flag to indicate whether this Subscription has already been unsubscribed.\n */\n public closed = false;\n\n private _parentage: Subscription[] | Subscription | null = null;\n\n /**\n * The list of registered finalizers to execute upon unsubscription. Adding and removing from this\n * list occurs in the {@link #add} and {@link #remove} methods.\n */\n private _finalizers: Exclude[] | null = null;\n\n /**\n * @param initialTeardown A function executed first as part of the finalization\n * process that is kicked off when {@link #unsubscribe} is called.\n */\n constructor(private initialTeardown?: () => void) {}\n\n /**\n * Disposes the resources held by the subscription. May, for instance, cancel\n * an ongoing Observable execution or cancel any other type of work that\n * started when the Subscription was created.\n * @return {void}\n */\n unsubscribe(): void {\n let errors: any[] | undefined;\n\n if (!this.closed) {\n this.closed = true;\n\n // Remove this from it's parents.\n const { _parentage } = this;\n if (_parentage) {\n this._parentage = null;\n if (Array.isArray(_parentage)) {\n for (const parent of _parentage) {\n parent.remove(this);\n }\n } else {\n _parentage.remove(this);\n }\n }\n\n const { initialTeardown: initialFinalizer } = this;\n if (isFunction(initialFinalizer)) {\n try {\n initialFinalizer();\n } catch (e) {\n errors = e instanceof UnsubscriptionError ? e.errors : [e];\n }\n }\n\n const { _finalizers } = this;\n if (_finalizers) {\n this._finalizers = null;\n for (const finalizer of _finalizers) {\n try {\n execFinalizer(finalizer);\n } catch (err) {\n errors = errors ?? [];\n if (err instanceof UnsubscriptionError) {\n errors = [...errors, ...err.errors];\n } else {\n errors.push(err);\n }\n }\n }\n }\n\n if (errors) {\n throw new UnsubscriptionError(errors);\n }\n }\n }\n\n /**\n * Adds a finalizer to this subscription, so that finalization will be unsubscribed/called\n * when this subscription is unsubscribed. If this subscription is already {@link #closed},\n * because it has already been unsubscribed, then whatever finalizer is passed to it\n * will automatically be executed (unless the finalizer itself is also a closed subscription).\n *\n * Closed Subscriptions cannot be added as finalizers to any subscription. Adding a closed\n * subscription to a any subscription will result in no operation. (A noop).\n *\n * Adding a subscription to itself, or adding `null` or `undefined` will not perform any\n * operation at all. (A noop).\n *\n * `Subscription` instances that are added to this instance will automatically remove themselves\n * if they are unsubscribed. Functions and {@link Unsubscribable} objects that you wish to remove\n * will need to be removed manually with {@link #remove}\n *\n * @param teardown The finalization logic to add to this subscription.\n */\n add(teardown: TeardownLogic): void {\n // Only add the finalizer if it's not undefined\n // and don't add a subscription to itself.\n if (teardown && teardown !== this) {\n if (this.closed) {\n // If this subscription is already closed,\n // execute whatever finalizer is handed to it automatically.\n execFinalizer(teardown);\n } else {\n if (teardown instanceof Subscription) {\n // We don't add closed subscriptions, and we don't add the same subscription\n // twice. Subscription unsubscribe is idempotent.\n if (teardown.closed || teardown._hasParent(this)) {\n return;\n }\n teardown._addParent(this);\n }\n (this._finalizers = this._finalizers ?? []).push(teardown);\n }\n }\n }\n\n /**\n * Checks to see if a this subscription already has a particular parent.\n * This will signal that this subscription has already been added to the parent in question.\n * @param parent the parent to check for\n */\n private _hasParent(parent: Subscription) {\n const { _parentage } = this;\n return _parentage === parent || (Array.isArray(_parentage) && _parentage.includes(parent));\n }\n\n /**\n * Adds a parent to this subscription so it can be removed from the parent if it\n * unsubscribes on it's own.\n *\n * NOTE: THIS ASSUMES THAT {@link _hasParent} HAS ALREADY BEEN CHECKED.\n * @param parent The parent subscription to add\n */\n private _addParent(parent: Subscription) {\n const { _parentage } = this;\n this._parentage = Array.isArray(_parentage) ? (_parentage.push(parent), _parentage) : _parentage ? [_parentage, parent] : parent;\n }\n\n /**\n * Called on a child when it is removed via {@link #remove}.\n * @param parent The parent to remove\n */\n private _removeParent(parent: Subscription) {\n const { _parentage } = this;\n if (_parentage === parent) {\n this._parentage = null;\n } else if (Array.isArray(_parentage)) {\n arrRemove(_parentage, parent);\n }\n }\n\n /**\n * Removes a finalizer from this subscription that was previously added with the {@link #add} method.\n *\n * Note that `Subscription` instances, when unsubscribed, will automatically remove themselves\n * from every other `Subscription` they have been added to. This means that using the `remove` method\n * is not a common thing and should be used thoughtfully.\n *\n * If you add the same finalizer instance of a function or an unsubscribable object to a `Subscription` instance\n * more than once, you will need to call `remove` the same number of times to remove all instances.\n *\n * All finalizer instances are removed to free up memory upon unsubscription.\n *\n * @param teardown The finalizer to remove from this subscription\n */\n remove(teardown: Exclude): void {\n const { _finalizers } = this;\n _finalizers && arrRemove(_finalizers, teardown);\n\n if (teardown instanceof Subscription) {\n teardown._removeParent(this);\n }\n }\n}\n\nexport const EMPTY_SUBSCRIPTION = Subscription.EMPTY;\n\nexport function isSubscription(value: any): value is Subscription {\n return (\n value instanceof Subscription ||\n (value && 'closed' in value && isFunction(value.remove) && isFunction(value.add) && isFunction(value.unsubscribe))\n );\n}\n\nfunction execFinalizer(finalizer: Unsubscribable | (() => void)) {\n if (isFunction(finalizer)) {\n finalizer();\n } else {\n finalizer.unsubscribe();\n }\n}\n", "import { Subscriber } from './Subscriber';\nimport { ObservableNotification } from './types';\n\n/**\n * The {@link GlobalConfig} object for RxJS. It is used to configure things\n * like how to react on unhandled errors.\n */\nexport const config: GlobalConfig = {\n onUnhandledError: null,\n onStoppedNotification: null,\n Promise: undefined,\n useDeprecatedSynchronousErrorHandling: false,\n useDeprecatedNextContext: false,\n};\n\n/**\n * The global configuration object for RxJS, used to configure things\n * like how to react on unhandled errors. Accessible via {@link config}\n * object.\n */\nexport interface GlobalConfig {\n /**\n * A registration point for unhandled errors from RxJS. These are errors that\n * cannot were not handled by consuming code in the usual subscription path. For\n * example, if you have this configured, and you subscribe to an observable without\n * providing an error handler, errors from that subscription will end up here. This\n * will _always_ be called asynchronously on another job in the runtime. This is because\n * we do not want errors thrown in this user-configured handler to interfere with the\n * behavior of the library.\n */\n onUnhandledError: ((err: any) => void) | null;\n\n /**\n * A registration point for notifications that cannot be sent to subscribers because they\n * have completed, errored or have been explicitly unsubscribed. By default, next, complete\n * and error notifications sent to stopped subscribers are noops. However, sometimes callers\n * might want a different behavior. For example, with sources that attempt to report errors\n * to stopped subscribers, a caller can configure RxJS to throw an unhandled error instead.\n * This will _always_ be called asynchronously on another job in the runtime. This is because\n * we do not want errors thrown in this user-configured handler to interfere with the\n * behavior of the library.\n */\n onStoppedNotification: ((notification: ObservableNotification, subscriber: Subscriber) => void) | null;\n\n /**\n * The promise constructor used by default for {@link Observable#toPromise toPromise} and {@link Observable#forEach forEach}\n * methods.\n *\n * @deprecated As of version 8, RxJS will no longer support this sort of injection of a\n * Promise constructor. If you need a Promise implementation other than native promises,\n * please polyfill/patch Promise as you see appropriate. Will be removed in v8.\n */\n Promise?: PromiseConstructorLike;\n\n /**\n * If true, turns on synchronous error rethrowing, which is a deprecated behavior\n * in v6 and higher. This behavior enables bad patterns like wrapping a subscribe\n * call in a try/catch block. It also enables producer interference, a nasty bug\n * where a multicast can be broken for all observers by a downstream consumer with\n * an unhandled error. DO NOT USE THIS FLAG UNLESS IT'S NEEDED TO BUY TIME\n * FOR MIGRATION REASONS.\n *\n * @deprecated As of version 8, RxJS will no longer support synchronous throwing\n * of unhandled errors. All errors will be thrown on a separate call stack to prevent bad\n * behaviors described above. Will be removed in v8.\n */\n useDeprecatedSynchronousErrorHandling: boolean;\n\n /**\n * If true, enables an as-of-yet undocumented feature from v5: The ability to access\n * `unsubscribe()` via `this` context in `next` functions created in observers passed\n * to `subscribe`.\n *\n * This is being removed because the performance was severely problematic, and it could also cause\n * issues when types other than POJOs are passed to subscribe as subscribers, as they will likely have\n * their `this` context overwritten.\n *\n * @deprecated As of version 8, RxJS will no longer support altering the\n * context of next functions provided as part of an observer to Subscribe. Instead,\n * you will have access to a subscription or a signal or token that will allow you to do things like\n * unsubscribe and test closed status. Will be removed in v8.\n */\n useDeprecatedNextContext: boolean;\n}\n", "import type { TimerHandle } from './timerHandle';\ntype SetTimeoutFunction = (handler: () => void, timeout?: number, ...args: any[]) => TimerHandle;\ntype ClearTimeoutFunction = (handle: TimerHandle) => void;\n\ninterface TimeoutProvider {\n setTimeout: SetTimeoutFunction;\n clearTimeout: ClearTimeoutFunction;\n delegate:\n | {\n setTimeout: SetTimeoutFunction;\n clearTimeout: ClearTimeoutFunction;\n }\n | undefined;\n}\n\nexport const timeoutProvider: TimeoutProvider = {\n // When accessing the delegate, use the variable rather than `this` so that\n // the functions can be called without being bound to the provider.\n setTimeout(handler: () => void, timeout?: number, ...args) {\n const { delegate } = timeoutProvider;\n if (delegate?.setTimeout) {\n return delegate.setTimeout(handler, timeout, ...args);\n }\n return setTimeout(handler, timeout, ...args);\n },\n clearTimeout(handle) {\n const { delegate } = timeoutProvider;\n return (delegate?.clearTimeout || clearTimeout)(handle as any);\n },\n delegate: undefined,\n};\n", "import { config } from '../config';\nimport { timeoutProvider } from '../scheduler/timeoutProvider';\n\n/**\n * Handles an error on another job either with the user-configured {@link onUnhandledError},\n * or by throwing it on that new job so it can be picked up by `window.onerror`, `process.on('error')`, etc.\n *\n * This should be called whenever there is an error that is out-of-band with the subscription\n * or when an error hits a terminal boundary of the subscription and no error handler was provided.\n *\n * @param err the error to report\n */\nexport function reportUnhandledError(err: any) {\n timeoutProvider.setTimeout(() => {\n const { onUnhandledError } = config;\n if (onUnhandledError) {\n // Execute the user-configured error handler.\n onUnhandledError(err);\n } else {\n // Throw so it is picked up by the runtime's uncaught error mechanism.\n throw err;\n }\n });\n}\n", "/* tslint:disable:no-empty */\nexport function noop() { }\n", "import { CompleteNotification, NextNotification, ErrorNotification } from './types';\n\n/**\n * A completion object optimized for memory use and created to be the\n * same \"shape\" as other notifications in v8.\n * @internal\n */\nexport const COMPLETE_NOTIFICATION = (() => createNotification('C', undefined, undefined) as CompleteNotification)();\n\n/**\n * Internal use only. Creates an optimized error notification that is the same \"shape\"\n * as other notifications.\n * @internal\n */\nexport function errorNotification(error: any): ErrorNotification {\n return createNotification('E', undefined, error) as any;\n}\n\n/**\n * Internal use only. Creates an optimized next notification that is the same \"shape\"\n * as other notifications.\n * @internal\n */\nexport function nextNotification(value: T) {\n return createNotification('N', value, undefined) as NextNotification;\n}\n\n/**\n * Ensures that all notifications created internally have the same \"shape\" in v8.\n *\n * TODO: This is only exported to support a crazy legacy test in `groupBy`.\n * @internal\n */\nexport function createNotification(kind: 'N' | 'E' | 'C', value: any, error: any) {\n return {\n kind,\n value,\n error,\n };\n}\n", "import { config } from '../config';\n\nlet context: { errorThrown: boolean; error: any } | null = null;\n\n/**\n * Handles dealing with errors for super-gross mode. Creates a context, in which\n * any synchronously thrown errors will be passed to {@link captureError}. Which\n * will record the error such that it will be rethrown after the call back is complete.\n * TODO: Remove in v8\n * @param cb An immediately executed function.\n */\nexport function errorContext(cb: () => void) {\n if (config.useDeprecatedSynchronousErrorHandling) {\n const isRoot = !context;\n if (isRoot) {\n context = { errorThrown: false, error: null };\n }\n cb();\n if (isRoot) {\n const { errorThrown, error } = context!;\n context = null;\n if (errorThrown) {\n throw error;\n }\n }\n } else {\n // This is the general non-deprecated path for everyone that\n // isn't crazy enough to use super-gross mode (useDeprecatedSynchronousErrorHandling)\n cb();\n }\n}\n\n/**\n * Captures errors only in super-gross mode.\n * @param err the error to capture\n */\nexport function captureError(err: any) {\n if (config.useDeprecatedSynchronousErrorHandling && context) {\n context.errorThrown = true;\n context.error = err;\n }\n}\n", "import { isFunction } from './util/isFunction';\nimport { Observer, ObservableNotification } from './types';\nimport { isSubscription, Subscription } from './Subscription';\nimport { config } from './config';\nimport { reportUnhandledError } from './util/reportUnhandledError';\nimport { noop } from './util/noop';\nimport { nextNotification, errorNotification, COMPLETE_NOTIFICATION } from './NotificationFactories';\nimport { timeoutProvider } from './scheduler/timeoutProvider';\nimport { captureError } from './util/errorContext';\n\n/**\n * Implements the {@link Observer} interface and extends the\n * {@link Subscription} class. While the {@link Observer} is the public API for\n * consuming the values of an {@link Observable}, all Observers get converted to\n * a Subscriber, in order to provide Subscription-like capabilities such as\n * `unsubscribe`. Subscriber is a common type in RxJS, and crucial for\n * implementing operators, but it is rarely used as a public API.\n *\n * @class Subscriber\n */\nexport class Subscriber extends Subscription implements Observer {\n /**\n * A static factory for a Subscriber, given a (potentially partial) definition\n * of an Observer.\n * @param next The `next` callback of an Observer.\n * @param error The `error` callback of an\n * Observer.\n * @param complete The `complete` callback of an\n * Observer.\n * @return A Subscriber wrapping the (partially defined)\n * Observer represented by the given arguments.\n * @nocollapse\n * @deprecated Do not use. Will be removed in v8. There is no replacement for this\n * method, and there is no reason to be creating instances of `Subscriber` directly.\n * If you have a specific use case, please file an issue.\n */\n static create(next?: (x?: T) => void, error?: (e?: any) => void, complete?: () => void): Subscriber {\n return new SafeSubscriber(next, error, complete);\n }\n\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n protected isStopped: boolean = false;\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n protected destination: Subscriber | Observer; // this `any` is the escape hatch to erase extra type param (e.g. R)\n\n /**\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n * There is no reason to directly create an instance of Subscriber. This type is exported for typings reasons.\n */\n constructor(destination?: Subscriber | Observer) {\n super();\n if (destination) {\n this.destination = destination;\n // Automatically chain subscriptions together here.\n // if destination is a Subscription, then it is a Subscriber.\n if (isSubscription(destination)) {\n destination.add(this);\n }\n } else {\n this.destination = EMPTY_OBSERVER;\n }\n }\n\n /**\n * The {@link Observer} callback to receive notifications of type `next` from\n * the Observable, with a value. The Observable may call this method 0 or more\n * times.\n * @param {T} [value] The `next` value.\n * @return {void}\n */\n next(value?: T): void {\n if (this.isStopped) {\n handleStoppedNotification(nextNotification(value), this);\n } else {\n this._next(value!);\n }\n }\n\n /**\n * The {@link Observer} callback to receive notifications of type `error` from\n * the Observable, with an attached `Error`. Notifies the Observer that\n * the Observable has experienced an error condition.\n * @param {any} [err] The `error` exception.\n * @return {void}\n */\n error(err?: any): void {\n if (this.isStopped) {\n handleStoppedNotification(errorNotification(err), this);\n } else {\n this.isStopped = true;\n this._error(err);\n }\n }\n\n /**\n * The {@link Observer} callback to receive a valueless notification of type\n * `complete` from the Observable. Notifies the Observer that the Observable\n * has finished sending push-based notifications.\n * @return {void}\n */\n complete(): void {\n if (this.isStopped) {\n handleStoppedNotification(COMPLETE_NOTIFICATION, this);\n } else {\n this.isStopped = true;\n this._complete();\n }\n }\n\n unsubscribe(): void {\n if (!this.closed) {\n this.isStopped = true;\n super.unsubscribe();\n this.destination = null!;\n }\n }\n\n protected _next(value: T): void {\n this.destination.next(value);\n }\n\n protected _error(err: any): void {\n try {\n this.destination.error(err);\n } finally {\n this.unsubscribe();\n }\n }\n\n protected _complete(): void {\n try {\n this.destination.complete();\n } finally {\n this.unsubscribe();\n }\n }\n}\n\n/**\n * This bind is captured here because we want to be able to have\n * compatibility with monoid libraries that tend to use a method named\n * `bind`. In particular, a library called Monio requires this.\n */\nconst _bind = Function.prototype.bind;\n\nfunction bind any>(fn: Fn, thisArg: any): Fn {\n return _bind.call(fn, thisArg);\n}\n\n/**\n * Internal optimization only, DO NOT EXPOSE.\n * @internal\n */\nclass ConsumerObserver implements Observer {\n constructor(private partialObserver: Partial>) {}\n\n next(value: T): void {\n const { partialObserver } = this;\n if (partialObserver.next) {\n try {\n partialObserver.next(value);\n } catch (error) {\n handleUnhandledError(error);\n }\n }\n }\n\n error(err: any): void {\n const { partialObserver } = this;\n if (partialObserver.error) {\n try {\n partialObserver.error(err);\n } catch (error) {\n handleUnhandledError(error);\n }\n } else {\n handleUnhandledError(err);\n }\n }\n\n complete(): void {\n const { partialObserver } = this;\n if (partialObserver.complete) {\n try {\n partialObserver.complete();\n } catch (error) {\n handleUnhandledError(error);\n }\n }\n }\n}\n\nexport class SafeSubscriber extends Subscriber {\n constructor(\n observerOrNext?: Partial> | ((value: T) => void) | null,\n error?: ((e?: any) => void) | null,\n complete?: (() => void) | null\n ) {\n super();\n\n let partialObserver: Partial>;\n if (isFunction(observerOrNext) || !observerOrNext) {\n // The first argument is a function, not an observer. The next\n // two arguments *could* be observers, or they could be empty.\n partialObserver = {\n next: (observerOrNext ?? undefined) as (((value: T) => void) | undefined),\n error: error ?? undefined,\n complete: complete ?? undefined,\n };\n } else {\n // The first argument is a partial observer.\n let context: any;\n if (this && config.useDeprecatedNextContext) {\n // This is a deprecated path that made `this.unsubscribe()` available in\n // next handler functions passed to subscribe. This only exists behind a flag\n // now, as it is *very* slow.\n context = Object.create(observerOrNext);\n context.unsubscribe = () => this.unsubscribe();\n partialObserver = {\n next: observerOrNext.next && bind(observerOrNext.next, context),\n error: observerOrNext.error && bind(observerOrNext.error, context),\n complete: observerOrNext.complete && bind(observerOrNext.complete, context),\n };\n } else {\n // The \"normal\" path. Just use the partial observer directly.\n partialObserver = observerOrNext;\n }\n }\n\n // Wrap the partial observer to ensure it's a full observer, and\n // make sure proper error handling is accounted for.\n this.destination = new ConsumerObserver(partialObserver);\n }\n}\n\nfunction handleUnhandledError(error: any) {\n if (config.useDeprecatedSynchronousErrorHandling) {\n captureError(error);\n } else {\n // Ideal path, we report this as an unhandled error,\n // which is thrown on a new call stack.\n reportUnhandledError(error);\n }\n}\n\n/**\n * An error handler used when no error handler was supplied\n * to the SafeSubscriber -- meaning no error handler was supplied\n * do the `subscribe` call on our observable.\n * @param err The error to handle\n */\nfunction defaultErrorHandler(err: any) {\n throw err;\n}\n\n/**\n * A handler for notifications that cannot be sent to a stopped subscriber.\n * @param notification The notification being sent\n * @param subscriber The stopped subscriber\n */\nfunction handleStoppedNotification(notification: ObservableNotification, subscriber: Subscriber) {\n const { onStoppedNotification } = config;\n onStoppedNotification && timeoutProvider.setTimeout(() => onStoppedNotification(notification, subscriber));\n}\n\n/**\n * The observer used as a stub for subscriptions where the user did not\n * pass any arguments to `subscribe`. Comes with the default error handling\n * behavior.\n */\nexport const EMPTY_OBSERVER: Readonly> & { closed: true } = {\n closed: true,\n next: noop,\n error: defaultErrorHandler,\n complete: noop,\n};\n", "/**\n * Symbol.observable or a string \"@@observable\". Used for interop\n *\n * @deprecated We will no longer be exporting this symbol in upcoming versions of RxJS.\n * Instead polyfill and use Symbol.observable directly *or* use https://www.npmjs.com/package/symbol-observable\n */\nexport const observable: string | symbol = (() => (typeof Symbol === 'function' && Symbol.observable) || '@@observable')();\n", "/**\n * This function takes one parameter and just returns it. Simply put,\n * this is like `(x: T): T => x`.\n *\n * ## Examples\n *\n * This is useful in some cases when using things like `mergeMap`\n *\n * ```ts\n * import { interval, take, map, range, mergeMap, identity } from 'rxjs';\n *\n * const source$ = interval(1000).pipe(take(5));\n *\n * const result$ = source$.pipe(\n * map(i => range(i)),\n * mergeMap(identity) // same as mergeMap(x => x)\n * );\n *\n * result$.subscribe({\n * next: console.log\n * });\n * ```\n *\n * Or when you want to selectively apply an operator\n *\n * ```ts\n * import { interval, take, identity } from 'rxjs';\n *\n * const shouldLimit = () => Math.random() < 0.5;\n *\n * const source$ = interval(1000);\n *\n * const result$ = source$.pipe(shouldLimit() ? take(5) : identity);\n *\n * result$.subscribe({\n * next: console.log\n * });\n * ```\n *\n * @param x Any value that is returned by this function\n * @returns The value passed as the first parameter to this function\n */\nexport function identity(x: T): T {\n return x;\n}\n", "import { identity } from './identity';\nimport { UnaryFunction } from '../types';\n\nexport function pipe(): typeof identity;\nexport function pipe(fn1: UnaryFunction): UnaryFunction;\nexport function pipe(fn1: UnaryFunction, fn2: UnaryFunction): UnaryFunction;\nexport function pipe(fn1: UnaryFunction, fn2: UnaryFunction, fn3: UnaryFunction): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction,\n fn8: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction,\n fn8: UnaryFunction,\n fn9: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction,\n fn8: UnaryFunction,\n fn9: UnaryFunction,\n ...fns: UnaryFunction[]\n): UnaryFunction;\n\n/**\n * pipe() can be called on one or more functions, each of which can take one argument (\"UnaryFunction\")\n * and uses it to return a value.\n * It returns a function that takes one argument, passes it to the first UnaryFunction, and then\n * passes the result to the next one, passes that result to the next one, and so on. \n */\nexport function pipe(...fns: Array>): UnaryFunction {\n return pipeFromArray(fns);\n}\n\n/** @internal */\nexport function pipeFromArray(fns: Array>): UnaryFunction {\n if (fns.length === 0) {\n return identity as UnaryFunction;\n }\n\n if (fns.length === 1) {\n return fns[0];\n }\n\n return function piped(input: T): R {\n return fns.reduce((prev: any, fn: UnaryFunction) => fn(prev), input as any);\n };\n}\n", "import { Operator } from './Operator';\nimport { SafeSubscriber, Subscriber } from './Subscriber';\nimport { isSubscription, Subscription } from './Subscription';\nimport { TeardownLogic, OperatorFunction, Subscribable, Observer } from './types';\nimport { observable as Symbol_observable } from './symbol/observable';\nimport { pipeFromArray } from './util/pipe';\nimport { config } from './config';\nimport { isFunction } from './util/isFunction';\nimport { errorContext } from './util/errorContext';\n\n/**\n * A representation of any set of values over any amount of time. This is the most basic building block\n * of RxJS.\n *\n * @class Observable\n */\nexport class Observable implements Subscribable {\n /**\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n */\n source: Observable | undefined;\n\n /**\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n */\n operator: Operator | undefined;\n\n /**\n * @constructor\n * @param {Function} subscribe the function that is called when the Observable is\n * initially subscribed to. This function is given a Subscriber, to which new values\n * can be `next`ed, or an `error` method can be called to raise an error, or\n * `complete` can be called to notify of a successful completion.\n */\n constructor(subscribe?: (this: Observable, subscriber: Subscriber) => TeardownLogic) {\n if (subscribe) {\n this._subscribe = subscribe;\n }\n }\n\n // HACK: Since TypeScript inherits static properties too, we have to\n // fight against TypeScript here so Subject can have a different static create signature\n /**\n * Creates a new Observable by calling the Observable constructor\n * @owner Observable\n * @method create\n * @param {Function} subscribe? the subscriber function to be passed to the Observable constructor\n * @return {Observable} a new observable\n * @nocollapse\n * @deprecated Use `new Observable()` instead. Will be removed in v8.\n */\n static create: (...args: any[]) => any = (subscribe?: (subscriber: Subscriber) => TeardownLogic) => {\n return new Observable(subscribe);\n };\n\n /**\n * Creates a new Observable, with this Observable instance as the source, and the passed\n * operator defined as the new observable's operator.\n * @method lift\n * @param operator the operator defining the operation to take on the observable\n * @return a new observable with the Operator applied\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n * If you have implemented an operator using `lift`, it is recommended that you create an\n * operator by simply returning `new Observable()` directly. See \"Creating new operators from\n * scratch\" section here: https://rxjs.dev/guide/operators\n */\n lift(operator?: Operator): Observable {\n const observable = new Observable();\n observable.source = this;\n observable.operator = operator;\n return observable;\n }\n\n subscribe(observerOrNext?: Partial> | ((value: T) => void)): Subscription;\n /** @deprecated Instead of passing separate callback arguments, use an observer argument. Signatures taking separate callback arguments will be removed in v8. Details: https://rxjs.dev/deprecations/subscribe-arguments */\n subscribe(next?: ((value: T) => void) | null, error?: ((error: any) => void) | null, complete?: (() => void) | null): Subscription;\n /**\n * Invokes an execution of an Observable and registers Observer handlers for notifications it will emit.\n *\n * Use it when you have all these Observables, but still nothing is happening.\n *\n * `subscribe` is not a regular operator, but a method that calls Observable's internal `subscribe` function. It\n * might be for example a function that you passed to Observable's constructor, but most of the time it is\n * a library implementation, which defines what will be emitted by an Observable, and when it be will emitted. This means\n * that calling `subscribe` is actually the moment when Observable starts its work, not when it is created, as it is often\n * the thought.\n *\n * Apart from starting the execution of an Observable, this method allows you to listen for values\n * that an Observable emits, as well as for when it completes or errors. You can achieve this in two\n * of the following ways.\n *\n * The first way is creating an object that implements {@link Observer} interface. It should have methods\n * defined by that interface, but note that it should be just a regular JavaScript object, which you can create\n * yourself in any way you want (ES6 class, classic function constructor, object literal etc.). In particular, do\n * not attempt to use any RxJS implementation details to create Observers - you don't need them. Remember also\n * that your object does not have to implement all methods. If you find yourself creating a method that doesn't\n * do anything, you can simply omit it. Note however, if the `error` method is not provided and an error happens,\n * it will be thrown asynchronously. Errors thrown asynchronously cannot be caught using `try`/`catch`. Instead,\n * use the {@link onUnhandledError} configuration option or use a runtime handler (like `window.onerror` or\n * `process.on('error)`) to be notified of unhandled errors. Because of this, it's recommended that you provide\n * an `error` method to avoid missing thrown errors.\n *\n * The second way is to give up on Observer object altogether and simply provide callback functions in place of its methods.\n * This means you can provide three functions as arguments to `subscribe`, where the first function is equivalent\n * of a `next` method, the second of an `error` method and the third of a `complete` method. Just as in case of an Observer,\n * if you do not need to listen for something, you can omit a function by passing `undefined` or `null`,\n * since `subscribe` recognizes these functions by where they were placed in function call. When it comes\n * to the `error` function, as with an Observer, if not provided, errors emitted by an Observable will be thrown asynchronously.\n *\n * You can, however, subscribe with no parameters at all. This may be the case where you're not interested in terminal events\n * and you also handled emissions internally by using operators (e.g. using `tap`).\n *\n * Whichever style of calling `subscribe` you use, in both cases it returns a Subscription object.\n * This object allows you to call `unsubscribe` on it, which in turn will stop the work that an Observable does and will clean\n * up all resources that an Observable used. Note that cancelling a subscription will not call `complete` callback\n * provided to `subscribe` function, which is reserved for a regular completion signal that comes from an Observable.\n *\n * Remember that callbacks provided to `subscribe` are not guaranteed to be called asynchronously.\n * It is an Observable itself that decides when these functions will be called. For example {@link of}\n * by default emits all its values synchronously. Always check documentation for how given Observable\n * will behave when subscribed and if its default behavior can be modified with a `scheduler`.\n *\n * #### Examples\n *\n * Subscribe with an {@link guide/observer Observer}\n *\n * ```ts\n * import { of } from 'rxjs';\n *\n * const sumObserver = {\n * sum: 0,\n * next(value) {\n * console.log('Adding: ' + value);\n * this.sum = this.sum + value;\n * },\n * error() {\n * // We actually could just remove this method,\n * // since we do not really care about errors right now.\n * },\n * complete() {\n * console.log('Sum equals: ' + this.sum);\n * }\n * };\n *\n * of(1, 2, 3) // Synchronously emits 1, 2, 3 and then completes.\n * .subscribe(sumObserver);\n *\n * // Logs:\n * // 'Adding: 1'\n * // 'Adding: 2'\n * // 'Adding: 3'\n * // 'Sum equals: 6'\n * ```\n *\n * Subscribe with functions ({@link deprecations/subscribe-arguments deprecated})\n *\n * ```ts\n * import { of } from 'rxjs'\n *\n * let sum = 0;\n *\n * of(1, 2, 3).subscribe(\n * value => {\n * console.log('Adding: ' + value);\n * sum = sum + value;\n * },\n * undefined,\n * () => console.log('Sum equals: ' + sum)\n * );\n *\n * // Logs:\n * // 'Adding: 1'\n * // 'Adding: 2'\n * // 'Adding: 3'\n * // 'Sum equals: 6'\n * ```\n *\n * Cancel a subscription\n *\n * ```ts\n * import { interval } from 'rxjs';\n *\n * const subscription = interval(1000).subscribe({\n * next(num) {\n * console.log(num)\n * },\n * complete() {\n * // Will not be called, even when cancelling subscription.\n * console.log('completed!');\n * }\n * });\n *\n * setTimeout(() => {\n * subscription.unsubscribe();\n * console.log('unsubscribed!');\n * }, 2500);\n *\n * // Logs:\n * // 0 after 1s\n * // 1 after 2s\n * // 'unsubscribed!' after 2.5s\n * ```\n *\n * @param {Observer|Function} observerOrNext (optional) Either an observer with methods to be called,\n * or the first of three possible handlers, which is the handler for each value emitted from the subscribed\n * Observable.\n * @param {Function} error (optional) A handler for a terminal event resulting from an error. If no error handler is provided,\n * the error will be thrown asynchronously as unhandled.\n * @param {Function} complete (optional) A handler for a terminal event resulting from successful completion.\n * @return {Subscription} a subscription reference to the registered handlers\n * @method subscribe\n */\n subscribe(\n observerOrNext?: Partial> | ((value: T) => void) | null,\n error?: ((error: any) => void) | null,\n complete?: (() => void) | null\n ): Subscription {\n const subscriber = isSubscriber(observerOrNext) ? observerOrNext : new SafeSubscriber(observerOrNext, error, complete);\n\n errorContext(() => {\n const { operator, source } = this;\n subscriber.add(\n operator\n ? // We're dealing with a subscription in the\n // operator chain to one of our lifted operators.\n operator.call(subscriber, source)\n : source\n ? // If `source` has a value, but `operator` does not, something that\n // had intimate knowledge of our API, like our `Subject`, must have\n // set it. We're going to just call `_subscribe` directly.\n this._subscribe(subscriber)\n : // In all other cases, we're likely wrapping a user-provided initializer\n // function, so we need to catch errors and handle them appropriately.\n this._trySubscribe(subscriber)\n );\n });\n\n return subscriber;\n }\n\n /** @internal */\n protected _trySubscribe(sink: Subscriber): TeardownLogic {\n try {\n return this._subscribe(sink);\n } catch (err) {\n // We don't need to return anything in this case,\n // because it's just going to try to `add()` to a subscription\n // above.\n sink.error(err);\n }\n }\n\n /**\n * Used as a NON-CANCELLABLE means of subscribing to an observable, for use with\n * APIs that expect promises, like `async/await`. You cannot unsubscribe from this.\n *\n * **WARNING**: Only use this with observables you *know* will complete. If the source\n * observable does not complete, you will end up with a promise that is hung up, and\n * potentially all of the state of an async function hanging out in memory. To avoid\n * this situation, look into adding something like {@link timeout}, {@link take},\n * {@link takeWhile}, or {@link takeUntil} amongst others.\n *\n * #### Example\n *\n * ```ts\n * import { interval, take } from 'rxjs';\n *\n * const source$ = interval(1000).pipe(take(4));\n *\n * async function getTotal() {\n * let total = 0;\n *\n * await source$.forEach(value => {\n * total += value;\n * console.log('observable -> ' + value);\n * });\n *\n * return total;\n * }\n *\n * getTotal().then(\n * total => console.log('Total: ' + total)\n * );\n *\n * // Expected:\n * // 'observable -> 0'\n * // 'observable -> 1'\n * // 'observable -> 2'\n * // 'observable -> 3'\n * // 'Total: 6'\n * ```\n *\n * @param next a handler for each value emitted by the observable\n * @return a promise that either resolves on observable completion or\n * rejects with the handled error\n */\n forEach(next: (value: T) => void): Promise;\n\n /**\n * @param next a handler for each value emitted by the observable\n * @param promiseCtor a constructor function used to instantiate the Promise\n * @return a promise that either resolves on observable completion or\n * rejects with the handled error\n * @deprecated Passing a Promise constructor will no longer be available\n * in upcoming versions of RxJS. This is because it adds weight to the library, for very\n * little benefit. If you need this functionality, it is recommended that you either\n * polyfill Promise, or you create an adapter to convert the returned native promise\n * to whatever promise implementation you wanted. Will be removed in v8.\n */\n forEach(next: (value: T) => void, promiseCtor: PromiseConstructorLike): Promise;\n\n forEach(next: (value: T) => void, promiseCtor?: PromiseConstructorLike): Promise {\n promiseCtor = getPromiseCtor(promiseCtor);\n\n return new promiseCtor((resolve, reject) => {\n const subscriber = new SafeSubscriber({\n next: (value) => {\n try {\n next(value);\n } catch (err) {\n reject(err);\n subscriber.unsubscribe();\n }\n },\n error: reject,\n complete: resolve,\n });\n this.subscribe(subscriber);\n }) as Promise;\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): TeardownLogic {\n return this.source?.subscribe(subscriber);\n }\n\n /**\n * An interop point defined by the es7-observable spec https://github.com/zenparsing/es-observable\n * @method Symbol.observable\n * @return {Observable} this instance of the observable\n */\n [Symbol_observable]() {\n return this;\n }\n\n /* tslint:disable:max-line-length */\n pipe(): Observable;\n pipe(op1: OperatorFunction): Observable;\n pipe(op1: OperatorFunction, op2: OperatorFunction): Observable;\n pipe(op1: OperatorFunction, op2: OperatorFunction, op3: OperatorFunction): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction,\n op8: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction,\n op8: OperatorFunction,\n op9: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction,\n op8: OperatorFunction,\n op9: OperatorFunction,\n ...operations: OperatorFunction[]\n ): Observable;\n /* tslint:enable:max-line-length */\n\n /**\n * Used to stitch together functional operators into a chain.\n * @method pipe\n * @return {Observable} the Observable result of all of the operators having\n * been called in the order they were passed in.\n *\n * ## Example\n *\n * ```ts\n * import { interval, filter, map, scan } from 'rxjs';\n *\n * interval(1000)\n * .pipe(\n * filter(x => x % 2 === 0),\n * map(x => x + x),\n * scan((acc, x) => acc + x)\n * )\n * .subscribe(x => console.log(x));\n * ```\n */\n pipe(...operations: OperatorFunction[]): Observable {\n return pipeFromArray(operations)(this);\n }\n\n /* tslint:disable:max-line-length */\n /** @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise */\n toPromise(): Promise;\n /** @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise */\n toPromise(PromiseCtor: typeof Promise): Promise;\n /** @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise */\n toPromise(PromiseCtor: PromiseConstructorLike): Promise;\n /* tslint:enable:max-line-length */\n\n /**\n * Subscribe to this Observable and get a Promise resolving on\n * `complete` with the last emission (if any).\n *\n * **WARNING**: Only use this with observables you *know* will complete. If the source\n * observable does not complete, you will end up with a promise that is hung up, and\n * potentially all of the state of an async function hanging out in memory. To avoid\n * this situation, look into adding something like {@link timeout}, {@link take},\n * {@link takeWhile}, or {@link takeUntil} amongst others.\n *\n * @method toPromise\n * @param [promiseCtor] a constructor function used to instantiate\n * the Promise\n * @return A Promise that resolves with the last value emit, or\n * rejects on an error. If there were no emissions, Promise\n * resolves with undefined.\n * @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise\n */\n toPromise(promiseCtor?: PromiseConstructorLike): Promise {\n promiseCtor = getPromiseCtor(promiseCtor);\n\n return new promiseCtor((resolve, reject) => {\n let value: T | undefined;\n this.subscribe(\n (x: T) => (value = x),\n (err: any) => reject(err),\n () => resolve(value)\n );\n }) as Promise;\n }\n}\n\n/**\n * Decides between a passed promise constructor from consuming code,\n * A default configured promise constructor, and the native promise\n * constructor and returns it. If nothing can be found, it will throw\n * an error.\n * @param promiseCtor The optional promise constructor to passed by consuming code\n */\nfunction getPromiseCtor(promiseCtor: PromiseConstructorLike | undefined) {\n return promiseCtor ?? config.Promise ?? Promise;\n}\n\nfunction isObserver(value: any): value is Observer {\n return value && isFunction(value.next) && isFunction(value.error) && isFunction(value.complete);\n}\n\nfunction isSubscriber(value: any): value is Subscriber {\n return (value && value instanceof Subscriber) || (isObserver(value) && isSubscription(value));\n}\n", "import { Observable } from '../Observable';\nimport { Subscriber } from '../Subscriber';\nimport { OperatorFunction } from '../types';\nimport { isFunction } from './isFunction';\n\n/**\n * Used to determine if an object is an Observable with a lift function.\n */\nexport function hasLift(source: any): source is { lift: InstanceType['lift'] } {\n return isFunction(source?.lift);\n}\n\n/**\n * Creates an `OperatorFunction`. Used to define operators throughout the library in a concise way.\n * @param init The logic to connect the liftedSource to the subscriber at the moment of subscription.\n */\nexport function operate(\n init: (liftedSource: Observable, subscriber: Subscriber) => (() => void) | void\n): OperatorFunction {\n return (source: Observable) => {\n if (hasLift(source)) {\n return source.lift(function (this: Subscriber, liftedSource: Observable) {\n try {\n return init(liftedSource, this);\n } catch (err) {\n this.error(err);\n }\n });\n }\n throw new TypeError('Unable to lift unknown Observable type');\n };\n}\n", "import { Subscriber } from '../Subscriber';\n\n/**\n * Creates an instance of an `OperatorSubscriber`.\n * @param destination The downstream subscriber.\n * @param onNext Handles next values, only called if this subscriber is not stopped or closed. Any\n * error that occurs in this function is caught and sent to the `error` method of this subscriber.\n * @param onError Handles errors from the subscription, any errors that occur in this handler are caught\n * and send to the `destination` error handler.\n * @param onComplete Handles completion notification from the subscription. Any errors that occur in\n * this handler are sent to the `destination` error handler.\n * @param onFinalize Additional teardown logic here. This will only be called on teardown if the\n * subscriber itself is not already closed. This is called after all other teardown logic is executed.\n */\nexport function createOperatorSubscriber(\n destination: Subscriber,\n onNext?: (value: T) => void,\n onComplete?: () => void,\n onError?: (err: any) => void,\n onFinalize?: () => void\n): Subscriber {\n return new OperatorSubscriber(destination, onNext, onComplete, onError, onFinalize);\n}\n\n/**\n * A generic helper for allowing operators to be created with a Subscriber and\n * use closures to capture necessary state from the operator function itself.\n */\nexport class OperatorSubscriber extends Subscriber {\n /**\n * Creates an instance of an `OperatorSubscriber`.\n * @param destination The downstream subscriber.\n * @param onNext Handles next values, only called if this subscriber is not stopped or closed. Any\n * error that occurs in this function is caught and sent to the `error` method of this subscriber.\n * @param onError Handles errors from the subscription, any errors that occur in this handler are caught\n * and send to the `destination` error handler.\n * @param onComplete Handles completion notification from the subscription. Any errors that occur in\n * this handler are sent to the `destination` error handler.\n * @param onFinalize Additional finalization logic here. This will only be called on finalization if the\n * subscriber itself is not already closed. This is called after all other finalization logic is executed.\n * @param shouldUnsubscribe An optional check to see if an unsubscribe call should truly unsubscribe.\n * NOTE: This currently **ONLY** exists to support the strange behavior of {@link groupBy}, where unsubscription\n * to the resulting observable does not actually disconnect from the source if there are active subscriptions\n * to any grouped observable. (DO NOT EXPOSE OR USE EXTERNALLY!!!)\n */\n constructor(\n destination: Subscriber,\n onNext?: (value: T) => void,\n onComplete?: () => void,\n onError?: (err: any) => void,\n private onFinalize?: () => void,\n private shouldUnsubscribe?: () => boolean\n ) {\n // It's important - for performance reasons - that all of this class's\n // members are initialized and that they are always initialized in the same\n // order. This will ensure that all OperatorSubscriber instances have the\n // same hidden class in V8. This, in turn, will help keep the number of\n // hidden classes involved in property accesses within the base class as\n // low as possible. If the number of hidden classes involved exceeds four,\n // the property accesses will become megamorphic and performance penalties\n // will be incurred - i.e. inline caches won't be used.\n //\n // The reasons for ensuring all instances have the same hidden class are\n // further discussed in this blog post from Benedikt Meurer:\n // https://benediktmeurer.de/2018/03/23/impact-of-polymorphism-on-component-based-frameworks-like-react/\n super(destination);\n this._next = onNext\n ? function (this: OperatorSubscriber, value: T) {\n try {\n onNext(value);\n } catch (err) {\n destination.error(err);\n }\n }\n : super._next;\n this._error = onError\n ? function (this: OperatorSubscriber, err: any) {\n try {\n onError(err);\n } catch (err) {\n // Send any errors that occur down stream.\n destination.error(err);\n } finally {\n // Ensure finalization.\n this.unsubscribe();\n }\n }\n : super._error;\n this._complete = onComplete\n ? function (this: OperatorSubscriber) {\n try {\n onComplete();\n } catch (err) {\n // Send any errors that occur down stream.\n destination.error(err);\n } finally {\n // Ensure finalization.\n this.unsubscribe();\n }\n }\n : super._complete;\n }\n\n unsubscribe() {\n if (!this.shouldUnsubscribe || this.shouldUnsubscribe()) {\n const { closed } = this;\n super.unsubscribe();\n // Execute additional teardown if we have any and we didn't already do so.\n !closed && this.onFinalize?.();\n }\n }\n}\n", "import { Subscription } from '../Subscription';\n\ninterface AnimationFrameProvider {\n schedule(callback: FrameRequestCallback): Subscription;\n requestAnimationFrame: typeof requestAnimationFrame;\n cancelAnimationFrame: typeof cancelAnimationFrame;\n delegate:\n | {\n requestAnimationFrame: typeof requestAnimationFrame;\n cancelAnimationFrame: typeof cancelAnimationFrame;\n }\n | undefined;\n}\n\nexport const animationFrameProvider: AnimationFrameProvider = {\n // When accessing the delegate, use the variable rather than `this` so that\n // the functions can be called without being bound to the provider.\n schedule(callback) {\n let request = requestAnimationFrame;\n let cancel: typeof cancelAnimationFrame | undefined = cancelAnimationFrame;\n const { delegate } = animationFrameProvider;\n if (delegate) {\n request = delegate.requestAnimationFrame;\n cancel = delegate.cancelAnimationFrame;\n }\n const handle = request((timestamp) => {\n // Clear the cancel function. The request has been fulfilled, so\n // attempting to cancel the request upon unsubscription would be\n // pointless.\n cancel = undefined;\n callback(timestamp);\n });\n return new Subscription(() => cancel?.(handle));\n },\n requestAnimationFrame(...args) {\n const { delegate } = animationFrameProvider;\n return (delegate?.requestAnimationFrame || requestAnimationFrame)(...args);\n },\n cancelAnimationFrame(...args) {\n const { delegate } = animationFrameProvider;\n return (delegate?.cancelAnimationFrame || cancelAnimationFrame)(...args);\n },\n delegate: undefined,\n};\n", "import { createErrorClass } from './createErrorClass';\n\nexport interface ObjectUnsubscribedError extends Error {}\n\nexport interface ObjectUnsubscribedErrorCtor {\n /**\n * @deprecated Internal implementation detail. Do not construct error instances.\n * Cannot be tagged as internal: https://github.com/ReactiveX/rxjs/issues/6269\n */\n new (): ObjectUnsubscribedError;\n}\n\n/**\n * An error thrown when an action is invalid because the object has been\n * unsubscribed.\n *\n * @see {@link Subject}\n * @see {@link BehaviorSubject}\n *\n * @class ObjectUnsubscribedError\n */\nexport const ObjectUnsubscribedError: ObjectUnsubscribedErrorCtor = createErrorClass(\n (_super) =>\n function ObjectUnsubscribedErrorImpl(this: any) {\n _super(this);\n this.name = 'ObjectUnsubscribedError';\n this.message = 'object unsubscribed';\n }\n);\n", "import { Operator } from './Operator';\nimport { Observable } from './Observable';\nimport { Subscriber } from './Subscriber';\nimport { Subscription, EMPTY_SUBSCRIPTION } from './Subscription';\nimport { Observer, SubscriptionLike, TeardownLogic } from './types';\nimport { ObjectUnsubscribedError } from './util/ObjectUnsubscribedError';\nimport { arrRemove } from './util/arrRemove';\nimport { errorContext } from './util/errorContext';\n\n/**\n * A Subject is a special type of Observable that allows values to be\n * multicasted to many Observers. Subjects are like EventEmitters.\n *\n * Every Subject is an Observable and an Observer. You can subscribe to a\n * Subject, and you can call next to feed values as well as error and complete.\n */\nexport class Subject extends Observable implements SubscriptionLike {\n closed = false;\n\n private currentObservers: Observer[] | null = null;\n\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n observers: Observer[] = [];\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n isStopped = false;\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n hasError = false;\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n thrownError: any = null;\n\n /**\n * Creates a \"subject\" by basically gluing an observer to an observable.\n *\n * @nocollapse\n * @deprecated Recommended you do not use. Will be removed at some point in the future. Plans for replacement still under discussion.\n */\n static create: (...args: any[]) => any = (destination: Observer, source: Observable): AnonymousSubject => {\n return new AnonymousSubject(destination, source);\n };\n\n constructor() {\n // NOTE: This must be here to obscure Observable's constructor.\n super();\n }\n\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n lift(operator: Operator): Observable {\n const subject = new AnonymousSubject(this, this);\n subject.operator = operator as any;\n return subject as any;\n }\n\n /** @internal */\n protected _throwIfClosed() {\n if (this.closed) {\n throw new ObjectUnsubscribedError();\n }\n }\n\n next(value: T) {\n errorContext(() => {\n this._throwIfClosed();\n if (!this.isStopped) {\n if (!this.currentObservers) {\n this.currentObservers = Array.from(this.observers);\n }\n for (const observer of this.currentObservers) {\n observer.next(value);\n }\n }\n });\n }\n\n error(err: any) {\n errorContext(() => {\n this._throwIfClosed();\n if (!this.isStopped) {\n this.hasError = this.isStopped = true;\n this.thrownError = err;\n const { observers } = this;\n while (observers.length) {\n observers.shift()!.error(err);\n }\n }\n });\n }\n\n complete() {\n errorContext(() => {\n this._throwIfClosed();\n if (!this.isStopped) {\n this.isStopped = true;\n const { observers } = this;\n while (observers.length) {\n observers.shift()!.complete();\n }\n }\n });\n }\n\n unsubscribe() {\n this.isStopped = this.closed = true;\n this.observers = this.currentObservers = null!;\n }\n\n get observed() {\n return this.observers?.length > 0;\n }\n\n /** @internal */\n protected _trySubscribe(subscriber: Subscriber): TeardownLogic {\n this._throwIfClosed();\n return super._trySubscribe(subscriber);\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n this._throwIfClosed();\n this._checkFinalizedStatuses(subscriber);\n return this._innerSubscribe(subscriber);\n }\n\n /** @internal */\n protected _innerSubscribe(subscriber: Subscriber) {\n const { hasError, isStopped, observers } = this;\n if (hasError || isStopped) {\n return EMPTY_SUBSCRIPTION;\n }\n this.currentObservers = null;\n observers.push(subscriber);\n return new Subscription(() => {\n this.currentObservers = null;\n arrRemove(observers, subscriber);\n });\n }\n\n /** @internal */\n protected _checkFinalizedStatuses(subscriber: Subscriber) {\n const { hasError, thrownError, isStopped } = this;\n if (hasError) {\n subscriber.error(thrownError);\n } else if (isStopped) {\n subscriber.complete();\n }\n }\n\n /**\n * Creates a new Observable with this Subject as the source. You can do this\n * to create custom Observer-side logic of the Subject and conceal it from\n * code that uses the Observable.\n * @return {Observable} Observable that the Subject casts to\n */\n asObservable(): Observable {\n const observable: any = new Observable();\n observable.source = this;\n return observable;\n }\n}\n\n/**\n * @class AnonymousSubject\n */\nexport class AnonymousSubject extends Subject {\n constructor(\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n public destination?: Observer,\n source?: Observable\n ) {\n super();\n this.source = source;\n }\n\n next(value: T) {\n this.destination?.next?.(value);\n }\n\n error(err: any) {\n this.destination?.error?.(err);\n }\n\n complete() {\n this.destination?.complete?.();\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n return this.source?.subscribe(subscriber) ?? EMPTY_SUBSCRIPTION;\n }\n}\n", "import { Subject } from './Subject';\nimport { Subscriber } from './Subscriber';\nimport { Subscription } from './Subscription';\n\n/**\n * A variant of Subject that requires an initial value and emits its current\n * value whenever it is subscribed to.\n *\n * @class BehaviorSubject\n */\nexport class BehaviorSubject extends Subject {\n constructor(private _value: T) {\n super();\n }\n\n get value(): T {\n return this.getValue();\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n const subscription = super._subscribe(subscriber);\n !subscription.closed && subscriber.next(this._value);\n return subscription;\n }\n\n getValue(): T {\n const { hasError, thrownError, _value } = this;\n if (hasError) {\n throw thrownError;\n }\n this._throwIfClosed();\n return _value;\n }\n\n next(value: T): void {\n super.next((this._value = value));\n }\n}\n", "import { TimestampProvider } from '../types';\n\ninterface DateTimestampProvider extends TimestampProvider {\n delegate: TimestampProvider | undefined;\n}\n\nexport const dateTimestampProvider: DateTimestampProvider = {\n now() {\n // Use the variable rather than `this` so that the function can be called\n // without being bound to the provider.\n return (dateTimestampProvider.delegate || Date).now();\n },\n delegate: undefined,\n};\n", "import { Subject } from './Subject';\nimport { TimestampProvider } from './types';\nimport { Subscriber } from './Subscriber';\nimport { Subscription } from './Subscription';\nimport { dateTimestampProvider } from './scheduler/dateTimestampProvider';\n\n/**\n * A variant of {@link Subject} that \"replays\" old values to new subscribers by emitting them when they first subscribe.\n *\n * `ReplaySubject` has an internal buffer that will store a specified number of values that it has observed. Like `Subject`,\n * `ReplaySubject` \"observes\" values by having them passed to its `next` method. When it observes a value, it will store that\n * value for a time determined by the configuration of the `ReplaySubject`, as passed to its constructor.\n *\n * When a new subscriber subscribes to the `ReplaySubject` instance, it will synchronously emit all values in its buffer in\n * a First-In-First-Out (FIFO) manner. The `ReplaySubject` will also complete, if it has observed completion; and it will\n * error if it has observed an error.\n *\n * There are two main configuration items to be concerned with:\n *\n * 1. `bufferSize` - This will determine how many items are stored in the buffer, defaults to infinite.\n * 2. `windowTime` - The amount of time to hold a value in the buffer before removing it from the buffer.\n *\n * Both configurations may exist simultaneously. So if you would like to buffer a maximum of 3 values, as long as the values\n * are less than 2 seconds old, you could do so with a `new ReplaySubject(3, 2000)`.\n *\n * ### Differences with BehaviorSubject\n *\n * `BehaviorSubject` is similar to `new ReplaySubject(1)`, with a couple of exceptions:\n *\n * 1. `BehaviorSubject` comes \"primed\" with a single value upon construction.\n * 2. `ReplaySubject` will replay values, even after observing an error, where `BehaviorSubject` will not.\n *\n * @see {@link Subject}\n * @see {@link BehaviorSubject}\n * @see {@link shareReplay}\n */\nexport class ReplaySubject extends Subject {\n private _buffer: (T | number)[] = [];\n private _infiniteTimeWindow = true;\n\n /**\n * @param bufferSize The size of the buffer to replay on subscription\n * @param windowTime The amount of time the buffered items will stay buffered\n * @param timestampProvider An object with a `now()` method that provides the current timestamp. This is used to\n * calculate the amount of time something has been buffered.\n */\n constructor(\n private _bufferSize = Infinity,\n private _windowTime = Infinity,\n private _timestampProvider: TimestampProvider = dateTimestampProvider\n ) {\n super();\n this._infiniteTimeWindow = _windowTime === Infinity;\n this._bufferSize = Math.max(1, _bufferSize);\n this._windowTime = Math.max(1, _windowTime);\n }\n\n next(value: T): void {\n const { isStopped, _buffer, _infiniteTimeWindow, _timestampProvider, _windowTime } = this;\n if (!isStopped) {\n _buffer.push(value);\n !_infiniteTimeWindow && _buffer.push(_timestampProvider.now() + _windowTime);\n }\n this._trimBuffer();\n super.next(value);\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n this._throwIfClosed();\n this._trimBuffer();\n\n const subscription = this._innerSubscribe(subscriber);\n\n const { _infiniteTimeWindow, _buffer } = this;\n // We use a copy here, so reentrant code does not mutate our array while we're\n // emitting it to a new subscriber.\n const copy = _buffer.slice();\n for (let i = 0; i < copy.length && !subscriber.closed; i += _infiniteTimeWindow ? 1 : 2) {\n subscriber.next(copy[i] as T);\n }\n\n this._checkFinalizedStatuses(subscriber);\n\n return subscription;\n }\n\n private _trimBuffer() {\n const { _bufferSize, _timestampProvider, _buffer, _infiniteTimeWindow } = this;\n // If we don't have an infinite buffer size, and we're over the length,\n // use splice to truncate the old buffer values off. Note that we have to\n // double the size for instances where we're not using an infinite time window\n // because we're storing the values and the timestamps in the same array.\n const adjustedBufferSize = (_infiniteTimeWindow ? 1 : 2) * _bufferSize;\n _bufferSize < Infinity && adjustedBufferSize < _buffer.length && _buffer.splice(0, _buffer.length - adjustedBufferSize);\n\n // Now, if we're not in an infinite time window, remove all values where the time is\n // older than what is allowed.\n if (!_infiniteTimeWindow) {\n const now = _timestampProvider.now();\n let last = 0;\n // Search the array for the first timestamp that isn't expired and\n // truncate the buffer up to that point.\n for (let i = 1; i < _buffer.length && (_buffer[i] as number) <= now; i += 2) {\n last = i;\n }\n last && _buffer.splice(0, last + 1);\n }\n }\n}\n", "import { Scheduler } from '../Scheduler';\nimport { Subscription } from '../Subscription';\nimport { SchedulerAction } from '../types';\n\n/**\n * A unit of work to be executed in a `scheduler`. An action is typically\n * created from within a {@link SchedulerLike} and an RxJS user does not need to concern\n * themselves about creating and manipulating an Action.\n *\n * ```ts\n * class Action extends Subscription {\n * new (scheduler: Scheduler, work: (state?: T) => void);\n * schedule(state?: T, delay: number = 0): Subscription;\n * }\n * ```\n *\n * @class Action\n */\nexport class Action extends Subscription {\n constructor(scheduler: Scheduler, work: (this: SchedulerAction, state?: T) => void) {\n super();\n }\n /**\n * Schedules this action on its parent {@link SchedulerLike} for execution. May be passed\n * some context object, `state`. May happen at some point in the future,\n * according to the `delay` parameter, if specified.\n * @param {T} [state] Some contextual data that the `work` function uses when\n * called by the Scheduler.\n * @param {number} [delay] Time to wait before executing the work, where the\n * time unit is implicit and defined by the Scheduler.\n * @return {void}\n */\n public schedule(state?: T, delay: number = 0): Subscription {\n return this;\n }\n}\n", "import type { TimerHandle } from './timerHandle';\ntype SetIntervalFunction = (handler: () => void, timeout?: number, ...args: any[]) => TimerHandle;\ntype ClearIntervalFunction = (handle: TimerHandle) => void;\n\ninterface IntervalProvider {\n setInterval: SetIntervalFunction;\n clearInterval: ClearIntervalFunction;\n delegate:\n | {\n setInterval: SetIntervalFunction;\n clearInterval: ClearIntervalFunction;\n }\n | undefined;\n}\n\nexport const intervalProvider: IntervalProvider = {\n // When accessing the delegate, use the variable rather than `this` so that\n // the functions can be called without being bound to the provider.\n setInterval(handler: () => void, timeout?: number, ...args) {\n const { delegate } = intervalProvider;\n if (delegate?.setInterval) {\n return delegate.setInterval(handler, timeout, ...args);\n }\n return setInterval(handler, timeout, ...args);\n },\n clearInterval(handle) {\n const { delegate } = intervalProvider;\n return (delegate?.clearInterval || clearInterval)(handle as any);\n },\n delegate: undefined,\n};\n", "import { Action } from './Action';\nimport { SchedulerAction } from '../types';\nimport { Subscription } from '../Subscription';\nimport { AsyncScheduler } from './AsyncScheduler';\nimport { intervalProvider } from './intervalProvider';\nimport { arrRemove } from '../util/arrRemove';\nimport { TimerHandle } from './timerHandle';\n\nexport class AsyncAction extends Action {\n public id: TimerHandle | undefined;\n public state?: T;\n // @ts-ignore: Property has no initializer and is not definitely assigned\n public delay: number;\n protected pending: boolean = false;\n\n constructor(protected scheduler: AsyncScheduler, protected work: (this: SchedulerAction, state?: T) => void) {\n super(scheduler, work);\n }\n\n public schedule(state?: T, delay: number = 0): Subscription {\n if (this.closed) {\n return this;\n }\n\n // Always replace the current state with the new state.\n this.state = state;\n\n const id = this.id;\n const scheduler = this.scheduler;\n\n //\n // Important implementation note:\n //\n // Actions only execute once by default, unless rescheduled from within the\n // scheduled callback. This allows us to implement single and repeat\n // actions via the same code path, without adding API surface area, as well\n // as mimic traditional recursion but across asynchronous boundaries.\n //\n // However, JS runtimes and timers distinguish between intervals achieved by\n // serial `setTimeout` calls vs. a single `setInterval` call. An interval of\n // serial `setTimeout` calls can be individually delayed, which delays\n // scheduling the next `setTimeout`, and so on. `setInterval` attempts to\n // guarantee the interval callback will be invoked more precisely to the\n // interval period, regardless of load.\n //\n // Therefore, we use `setInterval` to schedule single and repeat actions.\n // If the action reschedules itself with the same delay, the interval is not\n // canceled. If the action doesn't reschedule, or reschedules with a\n // different delay, the interval will be canceled after scheduled callback\n // execution.\n //\n if (id != null) {\n this.id = this.recycleAsyncId(scheduler, id, delay);\n }\n\n // Set the pending flag indicating that this action has been scheduled, or\n // has recursively rescheduled itself.\n this.pending = true;\n\n this.delay = delay;\n // If this action has already an async Id, don't request a new one.\n this.id = this.id ?? this.requestAsyncId(scheduler, this.id, delay);\n\n return this;\n }\n\n protected requestAsyncId(scheduler: AsyncScheduler, _id?: TimerHandle, delay: number = 0): TimerHandle {\n return intervalProvider.setInterval(scheduler.flush.bind(scheduler, this), delay);\n }\n\n protected recycleAsyncId(_scheduler: AsyncScheduler, id?: TimerHandle, delay: number | null = 0): TimerHandle | undefined {\n // If this action is rescheduled with the same delay time, don't clear the interval id.\n if (delay != null && this.delay === delay && this.pending === false) {\n return id;\n }\n // Otherwise, if the action's delay time is different from the current delay,\n // or the action has been rescheduled before it's executed, clear the interval id\n if (id != null) {\n intervalProvider.clearInterval(id);\n }\n\n return undefined;\n }\n\n /**\n * Immediately executes this action and the `work` it contains.\n * @return {any}\n */\n public execute(state: T, delay: number): any {\n if (this.closed) {\n return new Error('executing a cancelled action');\n }\n\n this.pending = false;\n const error = this._execute(state, delay);\n if (error) {\n return error;\n } else if (this.pending === false && this.id != null) {\n // Dequeue if the action didn't reschedule itself. Don't call\n // unsubscribe(), because the action could reschedule later.\n // For example:\n // ```\n // scheduler.schedule(function doWork(counter) {\n // /* ... I'm a busy worker bee ... */\n // var originalAction = this;\n // /* wait 100ms before rescheduling the action */\n // setTimeout(function () {\n // originalAction.schedule(counter + 1);\n // }, 100);\n // }, 1000);\n // ```\n this.id = this.recycleAsyncId(this.scheduler, this.id, null);\n }\n }\n\n protected _execute(state: T, _delay: number): any {\n let errored: boolean = false;\n let errorValue: any;\n try {\n this.work(state);\n } catch (e) {\n errored = true;\n // HACK: Since code elsewhere is relying on the \"truthiness\" of the\n // return here, we can't have it return \"\" or 0 or false.\n // TODO: Clean this up when we refactor schedulers mid-version-8 or so.\n errorValue = e ? e : new Error('Scheduled action threw falsy error');\n }\n if (errored) {\n this.unsubscribe();\n return errorValue;\n }\n }\n\n unsubscribe() {\n if (!this.closed) {\n const { id, scheduler } = this;\n const { actions } = scheduler;\n\n this.work = this.state = this.scheduler = null!;\n this.pending = false;\n\n arrRemove(actions, this);\n if (id != null) {\n this.id = this.recycleAsyncId(scheduler, id, null);\n }\n\n this.delay = null!;\n super.unsubscribe();\n }\n }\n}\n", "import { Action } from './scheduler/Action';\nimport { Subscription } from './Subscription';\nimport { SchedulerLike, SchedulerAction } from './types';\nimport { dateTimestampProvider } from './scheduler/dateTimestampProvider';\n\n/**\n * An execution context and a data structure to order tasks and schedule their\n * execution. Provides a notion of (potentially virtual) time, through the\n * `now()` getter method.\n *\n * Each unit of work in a Scheduler is called an `Action`.\n *\n * ```ts\n * class Scheduler {\n * now(): number;\n * schedule(work, delay?, state?): Subscription;\n * }\n * ```\n *\n * @class Scheduler\n * @deprecated Scheduler is an internal implementation detail of RxJS, and\n * should not be used directly. Rather, create your own class and implement\n * {@link SchedulerLike}. Will be made internal in v8.\n */\nexport class Scheduler implements SchedulerLike {\n public static now: () => number = dateTimestampProvider.now;\n\n constructor(private schedulerActionCtor: typeof Action, now: () => number = Scheduler.now) {\n this.now = now;\n }\n\n /**\n * A getter method that returns a number representing the current time\n * (at the time this function was called) according to the scheduler's own\n * internal clock.\n * @return {number} A number that represents the current time. May or may not\n * have a relation to wall-clock time. May or may not refer to a time unit\n * (e.g. milliseconds).\n */\n public now: () => number;\n\n /**\n * Schedules a function, `work`, for execution. May happen at some point in\n * the future, according to the `delay` parameter, if specified. May be passed\n * some context object, `state`, which will be passed to the `work` function.\n *\n * The given arguments will be processed an stored as an Action object in a\n * queue of actions.\n *\n * @param {function(state: ?T): ?Subscription} work A function representing a\n * task, or some unit of work to be executed by the Scheduler.\n * @param {number} [delay] Time to wait before executing the work, where the\n * time unit is implicit and defined by the Scheduler itself.\n * @param {T} [state] Some contextual data that the `work` function uses when\n * called by the Scheduler.\n * @return {Subscription} A subscription in order to be able to unsubscribe\n * the scheduled work.\n */\n public schedule(work: (this: SchedulerAction, state?: T) => void, delay: number = 0, state?: T): Subscription {\n return new this.schedulerActionCtor(this, work).schedule(state, delay);\n }\n}\n", "import { Scheduler } from '../Scheduler';\nimport { Action } from './Action';\nimport { AsyncAction } from './AsyncAction';\nimport { TimerHandle } from './timerHandle';\n\nexport class AsyncScheduler extends Scheduler {\n public actions: Array> = [];\n /**\n * A flag to indicate whether the Scheduler is currently executing a batch of\n * queued actions.\n * @type {boolean}\n * @internal\n */\n public _active: boolean = false;\n /**\n * An internal ID used to track the latest asynchronous task such as those\n * coming from `setTimeout`, `setInterval`, `requestAnimationFrame`, and\n * others.\n * @type {any}\n * @internal\n */\n public _scheduled: TimerHandle | undefined;\n\n constructor(SchedulerAction: typeof Action, now: () => number = Scheduler.now) {\n super(SchedulerAction, now);\n }\n\n public flush(action: AsyncAction): void {\n const { actions } = this;\n\n if (this._active) {\n actions.push(action);\n return;\n }\n\n let error: any;\n this._active = true;\n\n do {\n if ((error = action.execute(action.state, action.delay))) {\n break;\n }\n } while ((action = actions.shift()!)); // exhaust the scheduler queue\n\n this._active = false;\n\n if (error) {\n while ((action = actions.shift()!)) {\n action.unsubscribe();\n }\n throw error;\n }\n }\n}\n", "import { AsyncAction } from './AsyncAction';\nimport { AsyncScheduler } from './AsyncScheduler';\n\n/**\n *\n * Async Scheduler\n *\n * Schedule task as if you used setTimeout(task, duration)\n *\n * `async` scheduler schedules tasks asynchronously, by putting them on the JavaScript\n * event loop queue. It is best used to delay tasks in time or to schedule tasks repeating\n * in intervals.\n *\n * If you just want to \"defer\" task, that is to perform it right after currently\n * executing synchronous code ends (commonly achieved by `setTimeout(deferredTask, 0)`),\n * better choice will be the {@link asapScheduler} scheduler.\n *\n * ## Examples\n * Use async scheduler to delay task\n * ```ts\n * import { asyncScheduler } from 'rxjs';\n *\n * const task = () => console.log('it works!');\n *\n * asyncScheduler.schedule(task, 2000);\n *\n * // After 2 seconds logs:\n * // \"it works!\"\n * ```\n *\n * Use async scheduler to repeat task in intervals\n * ```ts\n * import { asyncScheduler } from 'rxjs';\n *\n * function task(state) {\n * console.log(state);\n * this.schedule(state + 1, 1000); // `this` references currently executing Action,\n * // which we reschedule with new state and delay\n * }\n *\n * asyncScheduler.schedule(task, 3000, 0);\n *\n * // Logs:\n * // 0 after 3s\n * // 1 after 4s\n * // 2 after 5s\n * // 3 after 6s\n * ```\n */\n\nexport const asyncScheduler = new AsyncScheduler(AsyncAction);\n\n/**\n * @deprecated Renamed to {@link asyncScheduler}. Will be removed in v8.\n */\nexport const async = asyncScheduler;\n", "import { AsyncAction } from './AsyncAction';\nimport { Subscription } from '../Subscription';\nimport { QueueScheduler } from './QueueScheduler';\nimport { SchedulerAction } from '../types';\nimport { TimerHandle } from './timerHandle';\n\nexport class QueueAction extends AsyncAction {\n constructor(protected scheduler: QueueScheduler, protected work: (this: SchedulerAction, state?: T) => void) {\n super(scheduler, work);\n }\n\n public schedule(state?: T, delay: number = 0): Subscription {\n if (delay > 0) {\n return super.schedule(state, delay);\n }\n this.delay = delay;\n this.state = state;\n this.scheduler.flush(this);\n return this;\n }\n\n public execute(state: T, delay: number): any {\n return delay > 0 || this.closed ? super.execute(state, delay) : this._execute(state, delay);\n }\n\n protected requestAsyncId(scheduler: QueueScheduler, id?: TimerHandle, delay: number = 0): TimerHandle {\n // If delay exists and is greater than 0, or if the delay is null (the\n // action wasn't rescheduled) but was originally scheduled as an async\n // action, then recycle as an async action.\n\n if ((delay != null && delay > 0) || (delay == null && this.delay > 0)) {\n return super.requestAsyncId(scheduler, id, delay);\n }\n\n // Otherwise flush the scheduler starting with this action.\n scheduler.flush(this);\n\n // HACK: In the past, this was returning `void`. However, `void` isn't a valid\n // `TimerHandle`, and generally the return value here isn't really used. So the\n // compromise is to return `0` which is both \"falsy\" and a valid `TimerHandle`,\n // as opposed to refactoring every other instanceo of `requestAsyncId`.\n return 0;\n }\n}\n", "import { AsyncScheduler } from './AsyncScheduler';\n\nexport class QueueScheduler extends AsyncScheduler {\n}\n", "import { QueueAction } from './QueueAction';\nimport { QueueScheduler } from './QueueScheduler';\n\n/**\n *\n * Queue Scheduler\n *\n * Put every next task on a queue, instead of executing it immediately\n *\n * `queue` scheduler, when used with delay, behaves the same as {@link asyncScheduler} scheduler.\n *\n * When used without delay, it schedules given task synchronously - executes it right when\n * it is scheduled. However when called recursively, that is when inside the scheduled task,\n * another task is scheduled with queue scheduler, instead of executing immediately as well,\n * that task will be put on a queue and wait for current one to finish.\n *\n * This means that when you execute task with `queue` scheduler, you are sure it will end\n * before any other task scheduled with that scheduler will start.\n *\n * ## Examples\n * Schedule recursively first, then do something\n * ```ts\n * import { queueScheduler } from 'rxjs';\n *\n * queueScheduler.schedule(() => {\n * queueScheduler.schedule(() => console.log('second')); // will not happen now, but will be put on a queue\n *\n * console.log('first');\n * });\n *\n * // Logs:\n * // \"first\"\n * // \"second\"\n * ```\n *\n * Reschedule itself recursively\n * ```ts\n * import { queueScheduler } from 'rxjs';\n *\n * queueScheduler.schedule(function(state) {\n * if (state !== 0) {\n * console.log('before', state);\n * this.schedule(state - 1); // `this` references currently executing Action,\n * // which we reschedule with new state\n * console.log('after', state);\n * }\n * }, 0, 3);\n *\n * // In scheduler that runs recursively, you would expect:\n * // \"before\", 3\n * // \"before\", 2\n * // \"before\", 1\n * // \"after\", 1\n * // \"after\", 2\n * // \"after\", 3\n *\n * // But with queue it logs:\n * // \"before\", 3\n * // \"after\", 3\n * // \"before\", 2\n * // \"after\", 2\n * // \"before\", 1\n * // \"after\", 1\n * ```\n */\n\nexport const queueScheduler = new QueueScheduler(QueueAction);\n\n/**\n * @deprecated Renamed to {@link queueScheduler}. Will be removed in v8.\n */\nexport const queue = queueScheduler;\n", "import { AsyncAction } from './AsyncAction';\nimport { AnimationFrameScheduler } from './AnimationFrameScheduler';\nimport { SchedulerAction } from '../types';\nimport { animationFrameProvider } from './animationFrameProvider';\nimport { TimerHandle } from './timerHandle';\n\nexport class AnimationFrameAction extends AsyncAction {\n constructor(protected scheduler: AnimationFrameScheduler, protected work: (this: SchedulerAction, state?: T) => void) {\n super(scheduler, work);\n }\n\n protected requestAsyncId(scheduler: AnimationFrameScheduler, id?: TimerHandle, delay: number = 0): TimerHandle {\n // If delay is greater than 0, request as an async action.\n if (delay !== null && delay > 0) {\n return super.requestAsyncId(scheduler, id, delay);\n }\n // Push the action to the end of the scheduler queue.\n scheduler.actions.push(this);\n // If an animation frame has already been requested, don't request another\n // one. If an animation frame hasn't been requested yet, request one. Return\n // the current animation frame request id.\n return scheduler._scheduled || (scheduler._scheduled = animationFrameProvider.requestAnimationFrame(() => scheduler.flush(undefined)));\n }\n\n protected recycleAsyncId(scheduler: AnimationFrameScheduler, id?: TimerHandle, delay: number = 0): TimerHandle | undefined {\n // If delay exists and is greater than 0, or if the delay is null (the\n // action wasn't rescheduled) but was originally scheduled as an async\n // action, then recycle as an async action.\n if (delay != null ? delay > 0 : this.delay > 0) {\n return super.recycleAsyncId(scheduler, id, delay);\n }\n // If the scheduler queue has no remaining actions with the same async id,\n // cancel the requested animation frame and set the scheduled flag to\n // undefined so the next AnimationFrameAction will request its own.\n const { actions } = scheduler;\n if (id != null && actions[actions.length - 1]?.id !== id) {\n animationFrameProvider.cancelAnimationFrame(id as number);\n scheduler._scheduled = undefined;\n }\n // Return undefined so the action knows to request a new async id if it's rescheduled.\n return undefined;\n }\n}\n", "import { AsyncAction } from './AsyncAction';\nimport { AsyncScheduler } from './AsyncScheduler';\n\nexport class AnimationFrameScheduler extends AsyncScheduler {\n public flush(action?: AsyncAction): void {\n this._active = true;\n // The async id that effects a call to flush is stored in _scheduled.\n // Before executing an action, it's necessary to check the action's async\n // id to determine whether it's supposed to be executed in the current\n // flush.\n // Previous implementations of this method used a count to determine this,\n // but that was unsound, as actions that are unsubscribed - i.e. cancelled -\n // are removed from the actions array and that can shift actions that are\n // scheduled to be executed in a subsequent flush into positions at which\n // they are executed within the current flush.\n const flushId = this._scheduled;\n this._scheduled = undefined;\n\n const { actions } = this;\n let error: any;\n action = action || actions.shift()!;\n\n do {\n if ((error = action.execute(action.state, action.delay))) {\n break;\n }\n } while ((action = actions[0]) && action.id === flushId && actions.shift());\n\n this._active = false;\n\n if (error) {\n while ((action = actions[0]) && action.id === flushId && actions.shift()) {\n action.unsubscribe();\n }\n throw error;\n }\n }\n}\n", "import { AnimationFrameAction } from './AnimationFrameAction';\nimport { AnimationFrameScheduler } from './AnimationFrameScheduler';\n\n/**\n *\n * Animation Frame Scheduler\n *\n * Perform task when `window.requestAnimationFrame` would fire\n *\n * When `animationFrame` scheduler is used with delay, it will fall back to {@link asyncScheduler} scheduler\n * behaviour.\n *\n * Without delay, `animationFrame` scheduler can be used to create smooth browser animations.\n * It makes sure scheduled task will happen just before next browser content repaint,\n * thus performing animations as efficiently as possible.\n *\n * ## Example\n * Schedule div height animation\n * ```ts\n * // html:
\n * import { animationFrameScheduler } from 'rxjs';\n *\n * const div = document.querySelector('div');\n *\n * animationFrameScheduler.schedule(function(height) {\n * div.style.height = height + \"px\";\n *\n * this.schedule(height + 1); // `this` references currently executing Action,\n * // which we reschedule with new state\n * }, 0, 0);\n *\n * // You will see a div element growing in height\n * ```\n */\n\nexport const animationFrameScheduler = new AnimationFrameScheduler(AnimationFrameAction);\n\n/**\n * @deprecated Renamed to {@link animationFrameScheduler}. Will be removed in v8.\n */\nexport const animationFrame = animationFrameScheduler;\n", "import { Observable } from '../Observable';\nimport { SchedulerLike } from '../types';\n\n/**\n * A simple Observable that emits no items to the Observer and immediately\n * emits a complete notification.\n *\n * Just emits 'complete', and nothing else.\n *\n * ![](empty.png)\n *\n * A simple Observable that only emits the complete notification. It can be used\n * for composing with other Observables, such as in a {@link mergeMap}.\n *\n * ## Examples\n *\n * Log complete notification\n *\n * ```ts\n * import { EMPTY } from 'rxjs';\n *\n * EMPTY.subscribe({\n * next: () => console.log('Next'),\n * complete: () => console.log('Complete!')\n * });\n *\n * // Outputs\n * // Complete!\n * ```\n *\n * Emit the number 7, then complete\n *\n * ```ts\n * import { EMPTY, startWith } from 'rxjs';\n *\n * const result = EMPTY.pipe(startWith(7));\n * result.subscribe(x => console.log(x));\n *\n * // Outputs\n * // 7\n * ```\n *\n * Map and flatten only odd numbers to the sequence `'a'`, `'b'`, `'c'`\n *\n * ```ts\n * import { interval, mergeMap, of, EMPTY } from 'rxjs';\n *\n * const interval$ = interval(1000);\n * const result = interval$.pipe(\n * mergeMap(x => x % 2 === 1 ? of('a', 'b', 'c') : EMPTY),\n * );\n * result.subscribe(x => console.log(x));\n *\n * // Results in the following to the console:\n * // x is equal to the count on the interval, e.g. (0, 1, 2, 3, ...)\n * // x will occur every 1000ms\n * // if x % 2 is equal to 1, print a, b, c (each on its own)\n * // if x % 2 is not equal to 1, nothing will be output\n * ```\n *\n * @see {@link Observable}\n * @see {@link NEVER}\n * @see {@link of}\n * @see {@link throwError}\n */\nexport const EMPTY = new Observable((subscriber) => subscriber.complete());\n\n/**\n * @param scheduler A {@link SchedulerLike} to use for scheduling\n * the emission of the complete notification.\n * @deprecated Replaced with the {@link EMPTY} constant or {@link scheduled} (e.g. `scheduled([], scheduler)`). Will be removed in v8.\n */\nexport function empty(scheduler?: SchedulerLike) {\n return scheduler ? emptyScheduled(scheduler) : EMPTY;\n}\n\nfunction emptyScheduled(scheduler: SchedulerLike) {\n return new Observable((subscriber) => scheduler.schedule(() => subscriber.complete()));\n}\n", "import { SchedulerLike } from '../types';\nimport { isFunction } from './isFunction';\n\nexport function isScheduler(value: any): value is SchedulerLike {\n return value && isFunction(value.schedule);\n}\n", "import { SchedulerLike } from '../types';\nimport { isFunction } from './isFunction';\nimport { isScheduler } from './isScheduler';\n\nfunction last(arr: T[]): T | undefined {\n return arr[arr.length - 1];\n}\n\nexport function popResultSelector(args: any[]): ((...args: unknown[]) => unknown) | undefined {\n return isFunction(last(args)) ? args.pop() : undefined;\n}\n\nexport function popScheduler(args: any[]): SchedulerLike | undefined {\n return isScheduler(last(args)) ? args.pop() : undefined;\n}\n\nexport function popNumber(args: any[], defaultValue: number): number {\n return typeof last(args) === 'number' ? args.pop()! : defaultValue;\n}\n", "export const isArrayLike = ((x: any): x is ArrayLike => x && typeof x.length === 'number' && typeof x !== 'function');", "import { isFunction } from \"./isFunction\";\n\n/**\n * Tests to see if the object is \"thennable\".\n * @param value the object to test\n */\nexport function isPromise(value: any): value is PromiseLike {\n return isFunction(value?.then);\n}\n", "import { InteropObservable } from '../types';\nimport { observable as Symbol_observable } from '../symbol/observable';\nimport { isFunction } from './isFunction';\n\n/** Identifies an input as being Observable (but not necessary an Rx Observable) */\nexport function isInteropObservable(input: any): input is InteropObservable {\n return isFunction(input[Symbol_observable]);\n}\n", "import { isFunction } from './isFunction';\n\nexport function isAsyncIterable(obj: any): obj is AsyncIterable {\n return Symbol.asyncIterator && isFunction(obj?.[Symbol.asyncIterator]);\n}\n", "/**\n * Creates the TypeError to throw if an invalid object is passed to `from` or `scheduled`.\n * @param input The object that was passed.\n */\nexport function createInvalidObservableTypeError(input: any) {\n // TODO: We should create error codes that can be looked up, so this can be less verbose.\n return new TypeError(\n `You provided ${\n input !== null && typeof input === 'object' ? 'an invalid object' : `'${input}'`\n } where a stream was expected. You can provide an Observable, Promise, ReadableStream, Array, AsyncIterable, or Iterable.`\n );\n}\n", "export function getSymbolIterator(): symbol {\n if (typeof Symbol !== 'function' || !Symbol.iterator) {\n return '@@iterator' as any;\n }\n\n return Symbol.iterator;\n}\n\nexport const iterator = getSymbolIterator();\n", "import { iterator as Symbol_iterator } from '../symbol/iterator';\nimport { isFunction } from './isFunction';\n\n/** Identifies an input as being an Iterable */\nexport function isIterable(input: any): input is Iterable {\n return isFunction(input?.[Symbol_iterator]);\n}\n", "import { ReadableStreamLike } from '../types';\nimport { isFunction } from './isFunction';\n\nexport async function* readableStreamLikeToAsyncGenerator(readableStream: ReadableStreamLike): AsyncGenerator {\n const reader = readableStream.getReader();\n try {\n while (true) {\n const { value, done } = await reader.read();\n if (done) {\n return;\n }\n yield value!;\n }\n } finally {\n reader.releaseLock();\n }\n}\n\nexport function isReadableStreamLike(obj: any): obj is ReadableStreamLike {\n // We don't want to use instanceof checks because they would return\n // false for instances from another Realm, like an