diff --git a/Dockerfile b/Dockerfile index fa9b5ff92..12a12bd87 100644 --- a/Dockerfile +++ b/Dockerfile @@ -12,9 +12,9 @@ RUN nix-env -i bash git nano sudo procps # Copy all nix files from the repo so that we can use them to install # mayastor dependencies -COPY ci.nix $NIX_EXPR_DIR/ +COPY shell.nix $NIX_EXPR_DIR/ COPY nix $NIX_EXPR_DIR/nix RUN cd $NIX_EXPR_DIR && \ - nix-shell --argstr channel nightly --command "echo Debug dependencies done" ci.nix && \ - nix-shell --argstr channel stable --command "echo Release dependencies done" ci.nix + nix-shell --argstr channel nightly --command "echo Debug dependencies done" && \ + nix-shell --argstr channel stable --command "echo Release dependencies done" diff --git a/Jenkinsfile b/Jenkinsfile index 2a210044a..6d4575a0f 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -138,13 +138,13 @@ pipeline { steps { cleanWs() unstash 'source' - sh 'nix-shell --run "FMT_OPTS=--check ./scripts/rust-style.sh" ci.nix' - sh 'nix-shell --run "./scripts/rust-linter.sh" ci.nix' - sh 'nix-shell --run "./scripts/js-check.sh" ci.nix' - sh 'nix-shell --run "nixpkgs-fmt --check ." ci.nix' + sh 'nix-shell --run "FMT_OPTS=--check ./scripts/rust-style.sh"' + sh 'nix-shell --run "./scripts/rust-linter.sh"' + sh 'nix-shell --run "./scripts/js-check.sh"' + sh 'nix-shell --run "nixpkgs-fmt --check ."' script { if (env.BRANCH_NAME != "trying") { - sh 'nix-shell --run "./scripts/check-submodule-branches.sh" ci.nix' + sh 'nix-shell --run "./scripts/check-submodule-branches.sh"' } } } @@ -173,12 +173,12 @@ pipeline { cleanWs() unstash 'source' sh 'printenv' - sh 'nix-shell --run "cargo build --bins --features=io-engine-testing" ci.nix' - sh 'nix-shell --run "./scripts/cargo-test.sh" ci.nix' + sh 'nix-shell --run "cargo build --bins --features=io-engine-testing"' + sh 'nix-shell --run "./scripts/cargo-test.sh"' } post { always { - sh 'nix-shell --run "./scripts/clean-cargo-tests.sh" ci.nix' + sh 'nix-shell --run "./scripts/clean-cargo-tests.sh"' sh 'sudo ./scripts/check-coredumps.sh --since "${START_DATE}"' } } @@ -208,12 +208,12 @@ pipeline { cleanWs() unstash 'source' sh 'printenv' - sh 'nix-shell --run "cargo build --bins --features=io-engine-testing" ci.nix' - sh 'nix-shell --run "./scripts/grpc-test.sh" ci.nix' + sh 'nix-shell --run "cargo build --bins --features=io-engine-testing"' + sh 'nix-shell --run "./scripts/grpc-test.sh"' } post { always { - sh 'nix-shell --run "./scripts/clean-cargo-tests.sh" ci.nix' + sh 'nix-shell --run "./scripts/clean-cargo-tests.sh"' junit '*-xunit-report.xml' sh 'sudo ./scripts/check-coredumps.sh --since "${START_DATE}"' } @@ -254,12 +254,12 @@ pipeline { stage('build') { steps { sh 'printenv' - sh 'nix-shell --run "cargo build --bins --features=io-engine-testing" ci.nix' + sh 'nix-shell --run "cargo build --bins --features=io-engine-testing"' } } stage('python setup') { steps { - sh 'nix-shell --run "./test/python/setup.sh" ci.nix' + sh 'nix-shell --run "./test/python/setup.sh"' } } stage('run tests') { @@ -268,12 +268,12 @@ pipeline { // Cleanup any existing containers. // They could be lingering if there were previous test failures. sh 'docker system prune -f' - sh 'nix-shell --run "./scripts/pytest-tests.sh" ci.nix' + sh 'nix-shell --run "./scripts/pytest-tests.sh"' } post { always { junit 'test/python/reports/**/*xunit-report.xml' - sh 'nix-shell --run "./scripts/pytest-tests.sh --clean-all-exit" ci.nix' + sh 'nix-shell --run "./scripts/pytest-tests.sh --clean-all-exit"' } } } diff --git a/ci.nix b/ci.nix deleted file mode 100644 index d95eb6a0d..000000000 --- a/ci.nix +++ /dev/null @@ -1,75 +0,0 @@ -{ rust ? "stable" -, spdk ? "develop" -, spdk-path ? null -} @ args: -let - sources = import ./nix/sources.nix; - - pkgs = import sources.nixpkgs { - overlays = [ - (_: _: { inherit sources; }) - (import ./nix/overlay.nix { }) - ]; - }; - - # python environment for test/python - pytest_inputs = with pkgs; python3.withPackages - (ps: with ps; [ virtualenv grpcio grpcio-tools asyncssh black ]); - - shellAttrs = import ./spdk-rs/nix/shell { - inherit rust; - inherit spdk; - inherit spdk-path; - inherit sources; - inherit pkgs; - - cfg = { - buildInputs = with pkgs; [ - docker - docker-compose - e2fsprogs - etcd - gdb - git - gnuplot - kubernetes-helm - nodejs-18_x - numactl - pytest_inputs - udev - libnvme - nvme-cli - xfsprogs - nixpkgs-fmt - ]; - - shellEnv = with pkgs; { - PROTOC = io-engine.PROTOC; - PROTOC_INCLUDE = io-engine.PROTOC_INCLUDE; - ETCD_BIN = "${etcd}/bin/etcd"; - LVM_BINS = "${lvm2.bin}/bin"; - }; - - shellHook = '' - # SRCDIR is needed by docker-compose files as it requires absolute paths - export SRCDIR=`pwd` - - export PATH="$PATH:$(pwd)/scripts/nix-sudo" - - export IO_ENGINE_DIR="$RUST_TARGET_DEBUG" - ''; - - shellInfoHook = '' - echo - echo "PROTOC : $PROTOC" - echo "PROTOC_INCLUDE : $PROTOC_INCLUDE" - echo "ETCD_BIN : $ETCD_BIN" - echo "LVM path : $LVM_BINS" - echo "I/O engine dir : $IO_ENGINE_DIR" - ''; - }; - }; -in -pkgs.mkShell shellAttrs // { - name = "io-engine-dev-shell"; -} diff --git a/doc/build.md b/doc/build.md index 079be5f02..14a534944 100644 --- a/doc/build.md +++ b/doc/build.md @@ -58,45 +58,12 @@ curl -L https://nixos.org/nix/install | sh - Some of our team uses [NixOS][nixos] which has `nix` baked in, but you don't need to. - Some of our team uses [`direnv][direnv], but you don't need to. -For some tasks, we use features from `nixUnstable`. You can use `nixos-unstable` -**(or `nixpkgs-unstable` for `nix` users)** by [changing your channel][nix-channel]. - -First, setting the following: - -```nix -{ pkgs, ... }: { - nix.extraOptions = '' - experimental-features = nix-command flakes - ''; - nix.package = pkgs.nixUnstable; -} -``` - -Then, updating the channel: - -```bash -$ sudo nix-channel --list -nixos https://nixos.org/channels/nixos-22.11 -$ sudo nix-channel --remove nixos -$ sudo nix-channel --add https://nixos.org/channels/nixos-unstable nixos -$ sudo nixos-rebuild switch --update -``` - -> If you don't want, you can drop into a -> `nixUnstable` supporting shell with: -> -> ```bash -> nix-shell -I nixpkgs=channel:nixpkgs-unstable -p nixUnstable --command "nix --experimental-features 'nix-command flakes' develop -f . mayastor" -> ``` -> -> Don't want to use `nixUnstable`? **That's ok!** Use `nix-shell` and `nix-build` as you normally would. - **Want to run or hack on Mayastor?** _You need more configuration!_ See [running][doc-run], then [testing][doc-test]. You can use a tool like [`direnv`][direnv] to automate `nix shell` entry. If you are unable to use the Nix provided Rust for some reason, there are `rust` and -`spdk` arguments to Nix shell. `nix-shell --arg rust none` +`spdk-path` arguments to Nix shell. `nix-shell --arg rust none` After cloning the repository don't forget to run a: @@ -109,11 +76,9 @@ to initialize the submodules. ## Iterative Builds Contributors often build Mayastor repeatedly during the development process. -Using [`nix develop`][nix-develop] to enter a more persistent development shell can help improve -iteration time: ```bash -nix develop -f . io-engine +nix-shell ``` Once entered, you can start any tooling (eg `code .`) to ensure the correct resources are available. @@ -129,20 +94,10 @@ cargo build --release **Want to run or hack on Mayastor?** _You need more configuration!_ See [running][doc-run], then [testing][doc-test]. -Whilst the nix develop will allow you to build mayastor exactly as the image build, it might not have all the necessary -components required for testing. -For that you might want to use the explicit shell configuration file: ci.nix: - -```bash -nix-shell -``` - -To ensure you are aware of this, we greet you with a nice cow. - ## Artifacts There are a few ways to build Mayastor! If you're hacking on Mayastor, it's best to use -[`nix develop`][nix-develop] (above) then turn to traditional Rust tools. If you're looking for releases, +[`nix-shell`][nix-shell] (above) then turn to traditional Rust tools. If you're looking for releases, use [`nix build`][nix-build] or [`nix bundle`][nix-bundle] depending on your needs. > **Why is the build process this way?** @@ -157,10 +112,9 @@ use [`nix build`][nix-build] or [`nix bundle`][nix-bundle] depending on your nee You can build release binaries of Mayastor with [`nix build`][nix-build]: ```bash -for PKG in io-engine; do - echo "Building ${PKG} to artifacts/pkgs/${PKG}"; \ - nix build -f . -o artifacts/pkgs/${PKG} ${PKG}; -done +nix build -f . -o artifacts/pkgs io-engine +ls artifacts/pkgs/bin +casperf io-engine io-engine-client ``` Try them as if they were installed: @@ -177,9 +131,9 @@ In order to make an artifact which can be distributed, we use [`nix bundle`][nix > `io-engine-client`. This is coming. ```bash -for BUNDLE in io-engine; do - echo "Bundling ${BUNDLE} to artifacts/bundle/${BUNDLE}"; \ - nix bundle -f . -o artifacts/bundles/${BUNDLE} ${BUNDLE}; +for BUNDLE in io-engine io-engine-cli casperf; do + echo "Bundling ${BUNDLE} to artifacts/bundle/${BUNDLE}" + nix bundle -f . -o artifacts/bundles/${BUNDLE} units.release.${BUNDLE} --extra-experimental-features flakes done ``` @@ -200,21 +154,29 @@ Build the Docker images with the CI build script: ❯ ./scripts/release.sh --help Usage: release.sh [OPTIONS] - Options: -d, --dry-run Output actions that would be taken, but don't run them. -h, --help Display this text. --registry Push the built images to the provided registry. + To also replace the image org provide the full repository path, example: docker.io/org --debug Build debug version of images where possible. --skip-build Don't perform nix-build. --skip-publish Don't publish built images. - --image Specify what image to build. + --image Specify what image to build and/or upload. + --tar Decompress and load images as tar rather than tar.gz. + --skip-images Don't build nor upload any images. --alias-tag Explicit alias for short commit hash tag. --tag Explicit tag (overrides the git tag). + --incremental Builds components in two stages allowing for faster rebuilds during development. + --skopeo-copy Don't load containers into host, simply copy them to registry with skopeo. + --skip-cargo-deps Don't prefetch the cargo build dependencies. + + Environment Variables: + RUSTFLAGS Set Rust compiler options when building binaries. Examples: release.sh --registry 127.0.0.1:5000 - ❯ ./scripts/release.sh --registry localhost:5000 --image "mayastor-io-engine" + ❯ ./scripts/release.sh --registry localhost:5000 --image "io-engine" ``` The container images are packaged and pushed using either docker or podman - whichever is run successfully with @@ -224,14 +186,14 @@ If you want to specifically test one of these first, please set DOCKER env varia Build the Docker images with [`nix build`][nix-build]: ```bash - nix-build --out-link artifacts/docker/mayastor-io-engine-image -A images.mayastor-io-engine + nix-build --out-link artifacts/docker/mayastor-io-engine-image -A images.io-engine ``` **Optionally,** the generated Docker images will **not** tag to the `latest`. You may wish to do that if you want to run them locally: ```bash - ./scripts/release.sh --registry docker.io/your-registry --image "mayastor-io-engine --alias-tag latest" + ./scripts/release.sh --registry $registry --image "io-engine" --alias-tag latest ``` ### Building KVM images diff --git a/doc/mayastor-client.md b/doc/mayastor-client.md index 3357329a3..e3fafb76d 100644 --- a/doc/mayastor-client.md +++ b/doc/mayastor-client.md @@ -3,9 +3,9 @@ This section shows a couple of examples of what you already can do with Mayastor today: - [Overview](#overview) - - [io-engine-client](#io-engine-client) - - [Local Storage](#local-storage) - - [Use Case: Mirroring over NVMF](#use-case-mirroring-over-nvmf) + - [io-engine-client](#io-engine-client) + - [Local Storage](#local-storage) + - [Use Case: Mirroring over NVMF](#use-case-mirroring-over-nvmf) ## io-engine-client @@ -15,31 +15,41 @@ added support for sharing the Nexus over NBD and NVMf. ```bash > io-engine-client --help -Mayastor CLI 0.1 CLI utility for Mayastor -USAGE: - io-engine-client [FLAGS] [OPTIONS] - -FLAGS: - -h, --help Prints help information - -q, --quiet Do not print any output except for list records - -V, --version Prints version information - -v, --verbose Verbose output - -OPTIONS: - -a, --address IP address of mayastor instance [default: 127.0.0.1] - -p, --port Port number of mayastor server [default: 10124] - -u, --units - Output with large units: i for kiB, etc. or d for kB, etc. - - -SUBCOMMANDS: - bdev Block device management - help Prints this message or the help of the given subcommand(s) - nexus Nexus device management - pool Storage pool management - replica Replica management +Usage: io-engine-client [OPTIONS] + +Commands: + pool Storage pool management + nexus Nexus device management + replica Replica management + bdev Block device management + device Host devices + perf Performance statistics + rebuild Rebuild management + snapshot-rebuild Snapshot Rebuild Management + snapshot Snapshot management + jsonrpc Call a json-rpc method with a raw JSON payload + controller NVMe controllers + test Test management + stats Resource IOStats + help Print this message or the help of the given subcommand(s) + +Options: + -b, --bind + The URI of mayastor instance [env: MY_POD_IP=] [default: http://127.0.0.1:10124] + -q, --quiet + Do not print any output except for list records + -v, --verbose... + Verbose output + -u, --units + Output with large units: i for kiB, etc. or d for kB, etc. + -o, --output + Output format. [default: default] [possible values: default, json] + -h, --help + Print help + -V, --version + Print version ``` To get more information specific to a subcommand, just execute the subcomand without any additional parameters, @@ -47,26 +57,31 @@ or by using the `-h` flag, for example: ```bash > io-engine-client nexus -h -io-engine-client-nexus Nexus device management -USAGE: - io-engine-client nexus - -FLAGS: - -h, --help Prints help information - -V, --version Prints version information - -SUBCOMMANDS: - add add a child - children list nexus children - create Create a new nexus device - destroy destroy the nexus with given name - help Prints this message or the help of the given subcommand(s) - list list all nexus devices - publish publish the nexus - remove remove a child - unpublish unpublish the nexus +Usage: io-engine-client nexus [OPTIONS] + +Commands: + create Create a new nexus device + destroy destroy the nexus with given name + shutdown shutdown the nexus with given name + publish publish the nexus + add add a child + remove remove a child + unpublish unpublish the nexus + ana_state get or set the NVMe ANA state of the nexus + list list all nexus devices + children list nexus children + resize Resize nexus + child Nexus child management + help Print this message or the help of the given subcommand(s) + +Options: + -b, --bind The URI of mayastor instance [env: MY_POD_IP=] [default: http://127.0.0.1:10124] + -q, --quiet Do not print any output except for list records + -v, --verbose... Verbose output + -o, --output Output format. [default: default] [possible values: default, json] + -h, --help Print help ``` ## Local Storage @@ -76,7 +91,7 @@ it is configured on. This makes certain things more simple, but at the same time of freedom as well. With Mayastor, we attempt to solve this transparently and determine based on declarative intent what is best to do. Let us start with an example. -Let's assume we have a local disk `/dev/sdb` and we want to make use of it. +Let's assume we have a local disk `/dev/sdb` and we want to make use of it. By making use of the `io-engine-client` we can specify a URI to the resource and we can start using it. @@ -172,7 +187,7 @@ the working of the Nexus. ```bash uname -r sudo modprobe nvme_tcp -sudo nvme discover -t tcp -a 192.168.1.2 -s 4420 +sudo nvme discover -t tcp -a 192.168.1.2 -s 8420 Discovery Log Number of Records 2, Generation counter 7 =====Discovery Log Entry 0====== @@ -181,7 +196,7 @@ adrfam: ipv4 subtype: nvme subsystem treq: not specified portid: 0 -trsvcid: 4420 +trsvcid: 8420 subnqn: nqn.2019-05.io.openebs:cnode1 traddr: 192.168.1.2 sectype: none @@ -191,7 +206,7 @@ adrfam: ipv4 subtype: nvme subsystem treq: not specified portid: 1 -trsvcid: 4420 +trsvcid: 8420 subnqn: nqn.2019-05.io.openebs:cnode2 traddr: 192.168.1.2 sectype: none @@ -200,20 +215,20 @@ sectype: none Now that we can see the block devices, we will connect to them and perform some IO to one of the devices. ```bash -sudo nvme connect-all -t tcp -a 192.168.1.2 -s 4420 +sudo nvme connect-all -t tcp -a 192.168.1.2 -s 8420 ``` We can verify the connection has been made by looking at dmesg for some output: ```bash -[17251.205183] nvme nvme1: new ctrl: NQN "nqn.2014-08.org.nvmexpress.discovery", addr 192.168.1.2:4420 +[17251.205183] nvme nvme1: new ctrl: NQN "nqn.2014-08.org.nvmexpress.discovery", addr 192.168.1.2:8420 [17251.206576] nvme nvme1: Removing ctrl: NQN "nqn.2014-08.org.nvmexpress.discovery" [17251.245350] nvme nvme1: creating 4 I/O queues. [17251.281562] nvme nvme1: mapped 4/0 default/read queues. -[17251.284471] nvme nvme1: new ctrl: NQN "nqn.2019-05.io.openebs:cnode1", addr 192.168.1.2:4420 +[17251.284471] nvme nvme1: new ctrl: NQN "nqn.2019-05.io.openebs:cnode1", addr 192.168.1.2:8420 [17251.297755] nvme nvme2: creating 4 I/O queues. [17251.332165] nvme nvme2: mapped 4/0 default/read queues. -[17251.341883] nvme nvme2: new ctrl: NQN "nqn.2019-05.io.openebs:cnode2", addr 192.168.1.2:4420 +[17251.341883] nvme nvme2: new ctrl: NQN "nqn.2019-05.io.openebs:cnode2", addr 192.168.1.2:8420 ``` Using the following fio config: @@ -392,7 +407,7 @@ We will attach the devices directly to the host without the Nexus in between. We will have the same data on its filesystem, and have the same content including a matching md5. ```bash -sudo nvme connect-all -t tcp -a 192.168.1.2 -s 4420 +sudo nvme connect-all -t tcp -a 192.168.1.2 -s 8420 sudo mkdir /{disk1,disk2} sudo mount /dev/nvme1n1 /disk1 sudo mount /dev/nvme2n1 /disk2 @@ -412,5 +427,6 @@ md5sum nexus ``` What this demonstrates is that indeed -- we write the data twice. If you where to add a third child, we would write to -that device all the same. What this also shows, is how we are transparent to the actual block devices. When we are removed +that device all the same. What this also shows, is how we are transparent to the actual block devices. When we are +removed from the data path, the data is still accessible without any special purpose tools or software. diff --git a/doc/run.md b/doc/run.md index b2c5ca37f..fc9439cc8 100644 --- a/doc/run.md +++ b/doc/run.md @@ -70,13 +70,13 @@ In order to use the full feature set of Mayastor, some or all of the following c - A Linux Kernel 5.1+ (with [`io-uring`][io_uring-intro] support) - The following kernel modules loaded: - - `nbd`: Network Block Device support - - `nvmet`: NVMe Target support - - `nvmet_rdma`: NVMe Target (rDMA) support - - `nvme_fabrics`: NVMe over Fabric support - - `nvme_tcp`: NVMe over TCP support - - `nvme_rdma`: NVMe (rDMA) support - - `nvme_loop`: NVMe Loop Device support + - `nbd`: Network Block Device support + - `nvmet`: NVMe Target support + - `nvmet_rdma`: NVMe Target (rDMA) support + - `nvme_fabrics`: NVMe over Fabric support + - `nvme_tcp`: NVMe over TCP support + - `nvme_rdma`: NVMe (rDMA) support + - `nvme_loop`: NVMe Loop Device support To load these on NixOS: @@ -95,7 +95,7 @@ In order to use the full feature set of Mayastor, some or all of the following c - For Asymmetric Namespace Access (ANA) support (early preview), the following kernel build configuration enabled: - - `CONFIG_NVME_MULTIPATH`: enables support for multipath access to NVMe subsystems + - `CONFIG_NVME_MULTIPATH`: enables support for multipath access to NVMe subsystems This is usually already enabled in distributions kernels, at least for RHEL/CentOS 8.2, Ubuntu 20.04 LTS, and SUSE Linux Enterprise 15.2. @@ -174,8 +174,8 @@ retag these to `latest` if required. If you forget, check `docker images`. **`io-engine-client`** and **`mayastor-csi`**: ```bash -docker run --interactive --rm mayadata/io-engine-client:${TAG} -docker run --interactive --rm mayadata/mayastor-csi:${TAG} +docker run --interactive --rm openebs/mayastor-io-engine-client:${TAG} +docker run --interactive --rm openebs/mayastor-io-engine:${TAG} ``` **`mayastor`** requires some special parameters: @@ -189,7 +189,7 @@ docker run \ --volume /dev/shm:/dev/shm:rw \ --volume /dev/hugepages:/dev/hugepages:rw \ --network host \ - mayadata/mayastor:${TAG} + openebs/mayastor-io-engine:${TAG} ``` Why these parameters? diff --git a/doc/test.md b/doc/test.md index a50f77e83..ce5861914 100644 --- a/doc/test.md +++ b/doc/test.md @@ -23,10 +23,12 @@ Mayastor's unit tests, integration tests, and documentation tests via the conven > **An important note**: Mayastor tests need to run on the host with [`SYS_ADMIN` capabilities][sys-admin-capabilities]. > -> You can see in `mayastor/.cargo/config` we override the test runner to execute as root, take this capability, +> You can see in `io-engine/.cargo/config` we override the test runner to execute as root, take this capability, > then drop privileges. -Mayastor uses [spdk][spdk] which is quite senistive to threading. This means tests need to run one at a time: +> **Remember to enter the nix-shell before running any of the commands herein** + +Mayastor uses [spdk][spdk] which is quite sensitive to threading. This means tests need to run one at a time: ```bash cd io-engine @@ -36,15 +38,38 @@ RUST_LOG=TRACE cargo test -- --test-threads 1 --nocapture ## Testing your own SPDK version To test your custom SPDK version please refere to the [spdk-rs documentation](https://github.com/openebs/spdk-rs/blob/develop/README.md#custom-spdk) +## Using PCIe NVMe devices in cargo tests while developing + +When developing new features, testing those with real PCIe devices in the process might come in handy. +In order to do so, the PCIe device first needs to be bound to the vfio driver: + +```bash +sudo PCI_ALLOWED="" ./spdk-rs/spdk/scripts/setup.sh +``` + +The bdev name in the cargo test case can then follow the PCIe URI pattern: + +```rust +static BDEVNAME1: &str = "pcie:///"; +``` + +After testing the device may be rebound to the NVMe driver: + +```bash +sudo PCI_ALLOWED="" ./spdk-rs/spdk/scripts/setup.sh reset +``` + +Please do not submit pull requests with active cargo test cases that require PCIe devices to be present. + ## Running the end-to-end test suite -Mayastor does more complete, end-to-end testing testing with [`mocha`][mocha]. It requires some extra setup. +Mayastor does more complete, end-to-end testing. It requires some extra setup. > **TODO:** We're still writing this! Sorry! Let us know if you want us to prioritize this! -## Running the gRPC test suite +## Running the gRPC [`mocha`][mocha] test suite -There is a bit of extra setup to the gRPC tests, you need to set up the node modules. +There is a bit of extra setup to the [`mocha`][mocha] tests, you need to set up the node modules. To prepare: @@ -59,30 +84,28 @@ Then, to run the tests: ./node_modules/mocha/bin/mocha test_csi.js ``` -## Using PCIe NVMe devices in cargo tests while developing +## Running the BDD test suite -When developing new features, testing those with real PCIe devices in the process might come in handy. -In order to do so, the PCIe device first needs to be bound to the vfio driver: +> **TODO:** We're still writing this! Sorry! Let us know if you want us to prioritize this! -```bash -sudo PCI_ALLOWED="" ./spdk-rs/spdk/scripts/setup.sh -``` +There is a bit of extra setup to the gRPC tests, you need to set up the python venv. -The bdev name in the cargo test case can then follow the PCIe URI pattern: +To prepare: -```rust -static BDEVNAME1: &str = "pcie:///"; +```bash +./test/python/setup.sh ``` -After testing the device may be rebound to the NVMe driver: +Then, to run the tests: ```bash -sudo PCI_ALLOWED="" ./spdk-rs/spdk/scripts/setup.sh reset +./scripts/pytest-tests.sh ``` -Please do not submit pull requests with active cargo test cases that require PCIe devices to be present. - [spdk]: https://spdk.io/ + [doc-run]: ./run.md + [mocha]: https://mochajs.org/ + [sys-admin-capabilities]: https://man7.org/linux/man-pages/man7/capabilities.7.html diff --git a/ide.nix b/ide.nix index 13e0e1555..39acebd17 100644 --- a/ide.nix +++ b/ide.nix @@ -2,7 +2,7 @@ , spdk ? "develop" , spdk-path ? null } @ args: -import ./ci.nix { +import ./shell.nix { inherit rust; inherit spdk; inherit spdk-path; diff --git a/nix/pkgs/io-engine/cargo-package.nix b/nix/pkgs/io-engine/cargo-package.nix index e3da5a597..bf9b8c12d 100644 --- a/nix/pkgs/io-engine/cargo-package.nix +++ b/nix/pkgs/io-engine/cargo-package.nix @@ -29,6 +29,7 @@ , systemdMinimal , rdma-core , cargoBuildFlags ? [ ] +, pname ? "io-engine" , rustFlags }: let @@ -59,8 +60,8 @@ let "utils" ]; buildProps = rec { - name = "io-engine"; - inherit version cargoBuildFlags; + inherit version cargoBuildFlags pname; + src = whitelistSource ../../../. src_list; LIBCLANG_PATH = "${llvmPackages.libclang.lib}/lib"; PROTOC = "${protobuf}/bin/protoc"; diff --git a/nix/pkgs/io-engine/units.nix b/nix/pkgs/io-engine/units.nix index 57cbc9eca..0eb848e92 100644 --- a/nix/pkgs/io-engine/units.nix +++ b/nix/pkgs/io-engine/units.nix @@ -13,16 +13,16 @@ let "long" = builtins.readFile "${versionDrv.long}"; "tag_or_long" = builtins.readFile "${versionDrv.tag_or_long}"; }; - project-builder = { cargoBuildFlags ? [ ] }: pkgs.callPackage ./cargo-package.nix { inherit versions cargoBuildFlags rustFlags; }; + project-builder = { cargoBuildFlags ? [ ], pname }: pkgs.callPackage ./cargo-package.nix { inherit versions cargoBuildFlags rustFlags pname; }; components = { build }: { - io-engine = (project-builder { cargoBuildFlags = [ "--bin io-engine" ]; }).${build}; - io-engine-cli = (project-builder { cargoBuildFlags = [ "--bin io-engine-cli" ]; }).${build}; - casperf = (project-builder { cargoBuildFlags = [ "--bin casperf" ]; }).${build}; - custom = { cargoBuildFlags }: (project-builder { cargoBuildFlags = [ cargoBuildFlags ]; }).${build}; + io-engine = (project-builder { cargoBuildFlags = [ "--bin io-engine" ]; pname = "io-engine"; }).${build}; + io-engine-cli = (project-builder { cargoBuildFlags = [ "--bin io-engine-cli" ]; pname = "io-engine-client"; }).${build}; + casperf = (project-builder { cargoBuildFlags = [ "--bin casperf" ]; pname = "casperf"; }).${build}; + custom = { cargoBuildFlags }: (project-builder { cargoBuildFlags = [ cargoBuildFlags ]; pname = "io-engine"; }).${build}; }; in { - cargoDeps = (project-builder { }).cargoDeps; + cargoDeps = (project-builder { pname = ""; }).cargoDeps; release = components { build = "release"; }; debug = components { build = "debug"; }; } diff --git a/shell.nix b/shell.nix index 612b4fcb3..d95eb6a0d 100644 --- a/shell.nix +++ b/shell.nix @@ -2,8 +2,74 @@ , spdk ? "develop" , spdk-path ? null } @ args: -import ./ci.nix { - inherit rust; - inherit spdk; - inherit spdk-path; +let + sources = import ./nix/sources.nix; + + pkgs = import sources.nixpkgs { + overlays = [ + (_: _: { inherit sources; }) + (import ./nix/overlay.nix { }) + ]; + }; + + # python environment for test/python + pytest_inputs = with pkgs; python3.withPackages + (ps: with ps; [ virtualenv grpcio grpcio-tools asyncssh black ]); + + shellAttrs = import ./spdk-rs/nix/shell { + inherit rust; + inherit spdk; + inherit spdk-path; + inherit sources; + inherit pkgs; + + cfg = { + buildInputs = with pkgs; [ + docker + docker-compose + e2fsprogs + etcd + gdb + git + gnuplot + kubernetes-helm + nodejs-18_x + numactl + pytest_inputs + udev + libnvme + nvme-cli + xfsprogs + nixpkgs-fmt + ]; + + shellEnv = with pkgs; { + PROTOC = io-engine.PROTOC; + PROTOC_INCLUDE = io-engine.PROTOC_INCLUDE; + ETCD_BIN = "${etcd}/bin/etcd"; + LVM_BINS = "${lvm2.bin}/bin"; + }; + + shellHook = '' + # SRCDIR is needed by docker-compose files as it requires absolute paths + export SRCDIR=`pwd` + + export PATH="$PATH:$(pwd)/scripts/nix-sudo" + + export IO_ENGINE_DIR="$RUST_TARGET_DEBUG" + ''; + + shellInfoHook = '' + echo + echo "PROTOC : $PROTOC" + echo "PROTOC_INCLUDE : $PROTOC_INCLUDE" + echo "ETCD_BIN : $ETCD_BIN" + echo "LVM path : $LVM_BINS" + echo "I/O engine dir : $IO_ENGINE_DIR" + ''; + }; + }; +in +pkgs.mkShell shellAttrs // { + name = "io-engine-dev-shell"; }