diff --git a/docs/integrations/index.md b/docs/integrations/index.md index b1cd315ac..34da66778 100644 --- a/docs/integrations/index.md +++ b/docs/integrations/index.md @@ -42,6 +42,8 @@ Flytekit functionality. These plugins can be anything and for comparison can be - Run analytical queries using DuckDB. * - {doc}`Weights and Biases ` - `wandb`: Machine learning platform to build better models faster. +* - {doc}`Neptune ` + - `neptune`: Neptune is the MLOps stack component for experiment tracking. * - {doc}`NIM ` - Serve optimized model containers with NIM. * - {doc}`Ollama ` @@ -214,6 +216,7 @@ constructs natively within other orchestration tools. /auto_examples/mmcloud_agent/index /auto_examples/modin_plugin/index /auto_examples/kfmpi_plugin/index +/auto_examples/neptune_plugin/index /auto_examples/nim_plugin/index /auto_examples/ollama_plugin/index /auto_examples/onnx_plugin/index diff --git a/examples/neptune_plugin/Dockerfile b/examples/neptune_plugin/Dockerfile new file mode 100644 index 000000000..80482086b --- /dev/null +++ b/examples/neptune_plugin/Dockerfile @@ -0,0 +1,24 @@ +FROM python:3.11-slim-bookworm +LABEL org.opencontainers.image.source https://github.com/flyteorg/flytesnacks + +WORKDIR /root +ENV VENV /opt/venv +ENV LANG C.UTF-8 +ENV LC_ALL C.UTF-8 +ENV PYTHONPATH /root + +# Virtual environment +RUN python3 -m venv ${VENV} +ENV PATH="${VENV}/bin:$PATH" + +# Install Python dependencies +COPY requirements.in /root +RUN pip install --no-cache-dir -r /root/requirements.in + +# Copy the actual code +COPY . /root + +# This tag is supplied by the build script and will be used to determine the version +# when registering tasks, workflows, and launch plans +ARG tag +ENV FLYTE_INTERNAL_IMAGE $tag diff --git a/examples/neptune_plugin/README.md b/examples/neptune_plugin/README.md new file mode 100644 index 000000000..5e6e321e3 --- /dev/null +++ b/examples/neptune_plugin/README.md @@ -0,0 +1,47 @@ +(neptune)= + +# Neptune + +```{eval-rst} +.. tags:: Integration, Data, Metrics, Intermediate +``` + +[Neptune](https://neptune.ai/) is the MLOps stack component for experiment tracking. It offers a single place to log, compare, store, and collaborate on experiments and models. This plugin enables seamless use of Neptune within Flyte by configuring links between the two platforms. You can find more information about how to use Neptune in their [documentation](https://docs.neptune.ai/). + +## Installation + +To install the Flyte Neptune plugin, , run the following command: + +```bash +pip install flytekitplugins-neptune +``` + +## Example usage + +For a usage example, see the {doc}`Neptune example `. + +## Local testing + +To run {doc}`Neptune example ` locally: + +1. Create an account on [Neptune](https://neptune.ai/). +2. Create a project on Neptune. +3. In the example, set `NEPTUNE_PROJECT` to your project name. +4. Add a secret using [Flyte's Secrets manager](https://docs.flyte.org/en/latest/user_guide/productionizing/secrets.html) with `key="neptune-api-token"` and `group="neptune-api-group"` +5. If you want to see the dynamic log links in the UI, then add the configuration in the next section. + +## Flyte deployment configuration + +To enable dynamic log links, add the plugin to Flyte's configuration file: +```yaml +plugins: + logs: + dynamic-log-links: + - neptune-run-id: + displayName: Neptune + templateUris: "{{ .taskConfig.host }}/{{ .taskConfig.project }}?query=(%60flyte%2Fexecution_id%60%3Astring%20%3D%20%22{{ .executionName }}-{{ .nodeId }}-{{ .taskRetryAttempt }}%22)&lbViewUnpacked=true" +``` + +```{auto-examples-toc} +neptune_example +``` diff --git a/examples/neptune_plugin/neptune_plugin/__init__.py b/examples/neptune_plugin/neptune_plugin/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/examples/neptune_plugin/neptune_plugin/neptune_example.py b/examples/neptune_plugin/neptune_plugin/neptune_example.py new file mode 100644 index 000000000..84638cfe5 --- /dev/null +++ b/examples/neptune_plugin/neptune_plugin/neptune_example.py @@ -0,0 +1,153 @@ +# %% [markdown] +# (neptune_example)= +# +# # Neptune example +# Neptune is the MLOps stack component for experiment tracking. It offers a single place +# to log, compare, store, and collaborate on experiments and models. This plugin +# enables seamless use of Neptune within Flyte by configuring links between the +# two platforms. In this example, we learn how to train scale up training multiple +# XGBoost models and use Neptune for tracking. +# %% +from typing import List, Tuple + +import numpy as np +from flytekit import ( + ImageSpec, + Resources, + Secret, + current_context, + dynamic, + task, + workflow, +) +from flytekitplugins.neptune import neptune_init_run + +# %% [markdown] +# First, we specify the Neptune project that was created on Neptune's platform. +# Please update `NEPTUNE_PROJECT` to the value associated with your account. +NEPTUNE_PROJECT = "username/project" + +# %% [markdown] +# Neptune requires an API key to authenticate with their service. In the above example, +# the secret is created using +# [Flyte's Secrets manager](https://docs.flyte.org/en/latest/user_guide/productionizing/secrets.html). +api_key = Secret(key="neptune-api-token", group="neptune-api-group") + +# %% [markdown] +# Next, we use `ImageSpec` to construct a container with the dependencies for our +# XGBoost training task. Please set the `REGISTRY` to a registry that your cluster can access; +REGISTRY = "localhost:30000" + +image = ImageSpec( + name="flytekit-xgboost", + packages=[ + "neptune", + "neptune-xgboost", + "flytekitplugins-neptune", + "scikit-learn==1.5.1", + "numpy==1.26.1", + "matplotlib==3.9.2", + ], + builder="default", + registry=REGISTRY, +) + + +# %% +# First, we use a task to download the dataset and cache the data in Flyte: +@task( + container_image=image, + cache=True, + cache_version="v2", + requests=Resources(cpu="2", mem="2Gi"), +) +def get_dataset() -> Tuple[np.ndarray, np.ndarray]: + from sklearn.datasets import fetch_california_housing + + X, y = fetch_california_housing(return_X_y=True, as_frame=False) + return X, y + + +# %% +# Next, we use the `neptune_init_run` decorator to configure Flyte to train an XGBoost +# model. The decorator requires an `api_key` secret to authenticate with Neptune and +# the task definition needs to request the same `api_key` secret. In the training +# function, the [Neptune run object](https://docs.neptune.ai/api/run/) is accessible +# through `current_context().neptune_run`, which is frequently used +# in Neptune's integrations. In this example, we pass the `Run` object into Neptune's +# XGBoost callback. +@task( + container_image=image, + secret_requests=[api_key], + requests=Resources(cpu="2", mem="4Gi"), +) +@neptune_init_run(project=NEPTUNE_PROJECT, secret=api_key) +def train_model(max_depth: int, X: np.ndarray, y: np.ndarray): + import xgboost as xgb + from neptune.integrations.xgboost import NeptuneCallback + from sklearn.model_selection import train_test_split + + X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=123) + dtrain = xgb.DMatrix(X_train, label=y_train) + dval = xgb.DMatrix(X_test, label=y_test) + + ctx = current_context() + run = ctx.neptune_run + neptune_callback = NeptuneCallback(run=run) + + model_params = { + "tree_method": "hist", + "eta": 0.7, + "gamma": 0.001, + "max_depth": max_depth, + "objective": "reg:squarederror", + "eval_metric": ["mae", "rmse"], + } + evals = [(dtrain, "train"), (dval, "valid")] + + # Train the model and log metadata to the run in Neptune + xgb.train( + params=model_params, + dtrain=dtrain, + num_boost_round=57, + evals=evals, + callbacks=[ + neptune_callback, + xgb.callback.LearningRateScheduler(lambda epoch: 0.99**epoch), + xgb.callback.EarlyStopping(rounds=30), + ], + ) + + +# %% +# With Flyte's dynamic workflows, we can scale up multiple training jobs with different +# `max_depths`: +@dynamic(container_image=image) +def train_multiple_models(max_depths: List[int], X: np.ndarray, y: np.ndarray): + for max_depth in max_depths: + train_model(max_depth=max_depth, X=X, y=y) + + +@workflow +def train_wf(max_depths: List[int] = [2, 4, 10]): + X, y = get_dataset() + train_multiple_models(max_depths=max_depths, X=X, y=y) + + +# %% +# To run this workflow on a remote Flyte cluster run: +# ```bash +# union run --remote neptune_example.py train_wf +# ``` + + +# %% [markdown] +# To enable dynamic log links, add plugin to Flyte's configuration file: +# ```yaml +# plugins: +# logs: +# dynamic-log-links: +# - neptune-run-id: +# displayName: Neptune +# templateUris: "{{ .taskConfig.host }}/{{ .taskConfig.project }}?query=(%60flyte%2Fexecution_id%60%3Astring%20%3D%20%22{{ .executionName }}-{{ .nodeId }}-{{ .taskRetryAttempt }}%22)&lbViewUnpacked=true" +# ``` diff --git a/examples/neptune_plugin/requirements.in b/examples/neptune_plugin/requirements.in new file mode 100644 index 000000000..878f4ef4e --- /dev/null +++ b/examples/neptune_plugin/requirements.in @@ -0,0 +1,7 @@ +flytekitplugins-neptune +xgboost +neptune +neptune-xgboost +scikit-learn==1.5.1 +numpy==1.26.1 +matplotlib==3.9.2