diff --git a/.cargo/audit.toml b/.cargo/audit.toml deleted file mode 100644 index e303cfbe..00000000 --- a/.cargo/audit.toml +++ /dev/null @@ -1,2 +0,0 @@ -[advisories] -ignore = ["RUSTSEC-2020-0159"] diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 00000000..ea8c4bf7 --- /dev/null +++ b/.dockerignore @@ -0,0 +1 @@ +/target diff --git a/.github/actions/cargo-cache/action.yaml b/.github/actions/cargo-cache/action.yaml deleted file mode 100644 index 17076609..00000000 --- a/.github/actions/cargo-cache/action.yaml +++ /dev/null @@ -1,34 +0,0 @@ -# Copyright 2021 The Engula Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -name: 'Cargo Cache' -description: 'Cache cargo installation' -outputs: - cache-hit: - description: "Whether this cache hit or not" - value: ${{ steps.cache.outputs.cache-hit }} -runs: - using: "composite" - steps: - - name: Cache cargo registry - id: cache - uses: actions/cache@v2 - with: - path: | - ~/.cargo/bin/ - ~/.cargo/registry/index/ - ~/.cargo/registry/cache/ - ~/.cargo/git/db/ - target/ - key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} diff --git a/.github/actions/skip/action.yml b/.github/actions/skip/action.yml new file mode 100644 index 00000000..7429f261 --- /dev/null +++ b/.github/actions/skip/action.yml @@ -0,0 +1,117 @@ +# Copyright 2021 The Engula Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +name: "Set Skip Env Var" +description: "Action to check whether we should skip CI jobs" +inputs: + paths-ignore: + description: >- + Output skip=true when and only when all of the changed files located in one of the path, + the paths is shell-style pattern. + required: false + default: >- + "*.md" + "*.svg" + paths: + description: >- + Output skip=true when and only when none of the changed files located in one of the path, + the paths is shell-style pattern. + required: false + default: '' +outputs: + skip: + description: "whether we should skip CI jobs" + value: "${{ steps.check.outputs.skip }}" + +runs: + using: "composite" + steps: + - name: Check Changed Files And Set Env Var + id: check + shell: bash + run: | + echo '::set-output name=skip::false' + + if [[ "${{ github.event_name }}" != "pull_request" ]]; then + exit 0 + fi + + BASE_SHA=$(jq -r '.pull_request.base.sha' $GITHUB_EVENT_PATH) + echo "Base sha is $BASE_SHA, head sha is $GITHUB_SHA" + + git fetch --no-tags --progress --recurse-submodules --depth=1 origin ${BASE_SHA}:origin/${BASE_SHA} + BASE_SHA=origin/${BASE_SHA} + echo "Base sha is $BASE_SHA, head sha is $GITHUB_SHA" + + if ! files=$(git --no-pager diff --name-only ${GITHUB_SHA} ${BASE_SHA}); then + exit 1 + fi + + echo "Paths Ignore pattern:" + for pattern in $(echo '${{ inputs.paths-ignore }}'); do + echo $pattern + done + + echo "Paths pattern:" + for pattern in $(echo '${{ inputs.paths }}'); do + echo $pattern + done + + echo "Changed files:" + for file in ${files}; do + echo $file + done + + echo "Checking paths-ignore..." + ALL_IGNORE=1 + for file in ${files}; do + matched=0 + for pattern in $(echo '${{ inputs.paths-ignore }}'); do + pattern=$(echo "$pattern" | sed 's/"//g') + if eval "[[ '$file' == $pattern ]]"; then + matched=1 + break + fi + done + if [[ "$matched" == "0" ]]; then + echo "$file doesn't match pattern $(echo '${{ inputs.paths-ignore }}'), stop checking" + ALL_IGNORE=0 + break + fi + done + + echo "Checking paths..." + NONE_MATCH=1 + for file in ${files}; do + matched=0 + for pattern in $(echo '${{ inputs.paths }}'); do + pattern=$(echo "$pattern" | sed 's/"//g') + if eval "[[ '$file' == $pattern ]]"; then + matched=1 + break + fi + done + if [[ "$matched" == "1" ]]; then + echo "$file match pattern $(echo '${{ inputs.paths }}'), stop checking" + NONE_MATCH=0 + break + fi + done + + echo "ALL_IGNORE: $ALL_IGNORE, NONE_MATCH: $NONE_MATCH" + if [[ "$ALL_IGNORE" == "1" && "$NONE_MATCH" == "1" ]]; then + echo "Set skip to true because all changed files are in paths-ignore and all changed files are not in paths." + echo '::set-output name=skip::true' + exit 0 + fi diff --git a/.github/workflows/audit-security.yml b/.github/workflows/audit-security.yml deleted file mode 100644 index 6161d88a..00000000 --- a/.github/workflows/audit-security.yml +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright 2021 The Engula Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -name: Audit Security - -on: - push: - paths: - - '**/Cargo.lock' - pull_request: - paths: - - '**/Cargo.lock' - schedule: - - cron: '25 4 * * *' - -concurrency: - group: ${{ github.ref }}-${{ github.workflow }} - cancel-in-progress: true - -jobs: - audit-security: - name: Audit Security - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - - name: Load cargo cache - id: cache - uses: ./.github/actions/cargo-cache - - - name: Install cargo-audit - uses: actions-rs/cargo@v1 - with: - command: install - args: cargo-audit - - - name: Audit dependencies - uses: actions-rs/cargo@v1 - with: - command: audit diff --git a/.github/workflows/check-code.yml b/.github/workflows/check-code.yml deleted file mode 100644 index 18116293..00000000 --- a/.github/workflows/check-code.yml +++ /dev/null @@ -1,127 +0,0 @@ -# Copyright 2021 The Engula Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -name: Check Code - -on: - push: - paths-ignore: - - 'docs/' - - '*.md' - pull_request: - paths-ignore: - - 'docs/' - - '*.md' - -concurrency: - group: ${{ github.ref }}-${{ github.workflow }} - cancel-in-progress: true - -jobs: - test: - name: Run tests - - strategy: - matrix: - os: [ubuntu-latest, macos-latest] - rust: [nightly] - - runs-on: ${{ matrix.os }} - - steps: - - name: Checkout Repository - uses: actions/checkout@v2 - - - name: Install Rust toolchain - uses: actions-rs/toolchain@v1 - with: - profile: minimal - toolchain: ${{ matrix.rust }} - components: rustfmt - override: true - - - name: Load cargo cache - id: cache - uses: ./.github/actions/cargo-cache - - - name: Cargo build - uses: actions-rs/cargo@v1 - with: - command: build - - - name: Cargo test - uses: actions-rs/cargo@v1 - with: - command: test - args: --workspace - - check: - name: Check style - runs-on: ubuntu-latest - - steps: - - name: Checkout repository - uses: actions/checkout@v2 - - - name: Install Rust toolchain - uses: actions-rs/toolchain@v1 - with: - profile: minimal - toolchain: nightly - components: clippy, rustfmt - override: true - - - name: Load cargo cache - id: cache - uses: ./.github/actions/cargo-cache - - - name: Check clippy - uses: actions-rs/cargo@v1 - with: - command: clippy - args: --workspace --tests --all-features -- -D warnings - - - name: Check format - uses: actions-rs/cargo@v1 - with: - command: fmt - args: --all -- --check - - tidy: - name: Check tidy - runs-on: ubuntu-latest - - steps: - - name: Checkout repository - uses: actions/checkout@v2 - - - name: Install Rust toolchain - uses: actions-rs/toolchain@v1 - with: - profile: minimal - toolchain: nightly - components: rustfmt - override: true - - - name: Load cargo cache - id: cache - uses: ./.github/actions/cargo-cache - - - name: Cargo build - uses: actions-rs/cargo@v1 - with: - command: build - - - name: Verify tidy - run: git diff --exit-code diff --git a/.github/workflows/check-dependency.yml b/.github/workflows/check-dependency.yml deleted file mode 100644 index e1f2673a..00000000 --- a/.github/workflows/check-dependency.yml +++ /dev/null @@ -1,60 +0,0 @@ -# Copyright 2021 The Engula Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -name: Check Dependency - -on: - push: - paths: - - '**/Cargo.toml' - pull_request: - paths: - - '**/Cargo.toml' - -concurrency: - group: ${{ github.ref }}-${{ github.workflow }} - cancel-in-progress: true - -jobs: - udpes: - name: Cargo udeps - runs-on: ubuntu-latest - - steps: - - name: Checkout repository - uses: actions/checkout@v2 - - - name: Install Rust toolchain - uses: actions-rs/toolchain@v1 - with: - profile: minimal - toolchain: nightly - components: rustfmt - override: true - - - name: Load cargo cache - id: cache - uses: ./.github/actions/cargo-cache - - - name: Install cargo-udeps - uses: actions-rs/cargo@v1 - with: - command: install - args: cargo-udeps --locked - - - name: Check udeps - uses: actions-rs/cargo@v1 - with: - command: udeps - args: --workspace diff --git a/.github/workflows/check-editorconfig.yml b/.github/workflows/check-editorconfig.yml deleted file mode 100644 index f740ee26..00000000 --- a/.github/workflows/check-editorconfig.yml +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright 2021 The Engula Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -name: Check EditorConfig - -on: [push, pull_request] - -concurrency: - group: ${{ github.ref }}-${{ github.workflow }} - cancel-in-progress: true - -jobs: - editorconfig: - name: Check - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - uses: actions/checkout@v2 - with: - repository: editorconfig-checker/editorconfig-checker - path: editorconfig-checker - - uses: actions/setup-go@v2 - with: - go-version: 1.16 - - run: | - pushd editorconfig-checker && make build && popd && \ - editorconfig-checker/bin/ec -exclude editorconfig-checker diff --git a/.github/workflows/check-toml.yml b/.github/workflows/check-toml.yml deleted file mode 100644 index 3ff7df22..00000000 --- a/.github/workflows/check-toml.yml +++ /dev/null @@ -1,56 +0,0 @@ -# Copyright 2021 The Engula Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -name: Check TOML - -on: - push: - paths: - - '**/*.toml' - pull_request: - paths: - - '**/*.toml' - -concurrency: - group: ${{ github.ref }}-${{ github.workflow }} - cancel-in-progress: true - -jobs: - taplo: - name: Check TOML - runs-on: ubuntu-latest - - steps: - - name: Checkout repository - uses: actions/checkout@v2 - - - name: Install Rust toolchain - uses: actions-rs/toolchain@v1 - with: - profile: minimal - toolchain: stable - override: true - - - name: Load cargo cache - id: cache - uses: ./.github/actions/cargo-cache - - - name: Install taplo-cli - uses: actions-rs/cargo@v1 - with: - command: install - args: taplo-cli - - - name: Check TOML - run: taplo format --check diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 00000000..34459e2f --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,200 @@ +# Copyright 2021 The Engula Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +name: CI + +on: + push: + pull_request: + schedule: + - cron: '25 4 * * *' + +concurrency: + group: ${{ github.ref }}-${{ github.workflow }} + cancel-in-progress: true + +jobs: + license: + if: (github.event_name == 'schedule' && github.repository == 'engula/engula') || (github.event_name != 'schedule') + name: Check license header + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - uses: apache/skywalking-eyes@main + with: + config: tools/ci/licenserc.yml + + editorconfig: + if: (github.event_name == 'schedule' && github.repository == 'engula/engula') || (github.event_name != 'schedule') + name: Check editorconfig + needs: [license] + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - uses: editorconfig-checker/action-editorconfig-checker@v1 + + toml: + if: (github.event_name == 'schedule' && github.repository == 'engula/engula') || (github.event_name != 'schedule') + name: Check TOML + needs: [editorconfig] + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: Check Skip CI + id: skip-ci + uses: ./.github/actions/skip + with: + paths-ignore: >- + "*" + paths: >- + "*.toml" + - uses: Swatinem/rust-cache@v1 + if: steps.skip-ci.outputs.skip != 'true' + - name: Install taplo-cli + if: steps.skip-ci.outputs.skip != 'true' + uses: actions-rs/cargo@v1 + with: + command: install + args: taplo-cli + - name: Check TOML + if: steps.skip-ci.outputs.skip != 'true' + run: taplo format --check + + dependency: + if: (github.event_name == 'schedule' && github.repository == 'engula/engula') || (github.event_name != 'schedule') + name: Check dependency + needs: [editorconfig] + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + with: + submodules: recursive + - name: Check Skip CI + id: skip-ci + uses: ./.github/actions/skip + with: + paths-ignore: >- + "*" + paths: >- + "*.rs" + "Cargo.lock" + - uses: Swatinem/rust-cache@v1 + if: steps.skip-ci.outputs.skip != 'true' + - name: Install cargo-udeps + if: steps.skip-ci.outputs.skip != 'true' + uses: actions-rs/cargo@v1 + with: + command: install + args: cargo-udeps --locked + - name: Check udeps + if: steps.skip-ci.outputs.skip != 'true' + uses: actions-rs/cargo@v1 + with: + command: udeps + args: --workspace + + security: + if: (github.event_name == 'schedule' && github.repository == 'engula/engula') || (github.event_name != 'schedule') + name: Audit security + needs: [editorconfig] + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: Check Skip CI + id: skip-ci + uses: ./.github/actions/skip + with: + paths-ignore: >- + "*" + paths: >- + "Cargo.lock" + - uses: Swatinem/rust-cache@v1 + if: steps.skip-ci.outputs.skip != 'true' + - name: Install cargo-aduit + if: steps.skip-ci.outputs.skip != 'true' + uses: actions-rs/cargo@v1 + with: + command: install + args: cargo-audit + - name: Audit dependencies + if: steps.skip-ci.outputs.skip != 'true' + uses: actions-rs/cargo@v1 + with: + command: audit + + test: + if: (github.event_name == 'schedule' && github.repository == 'engula/engula') || (github.event_name != 'schedule') + name: Run tests + needs: [editorconfig] + strategy: + matrix: + os: [ubuntu-latest, macos-latest] + rust: [nightly] + + runs-on: ${{ matrix.os }} + + steps: + - name: Checkout Repository + uses: actions/checkout@v2 + with: + submodules: recursive + + - name: Check Skip CI + id: skip-ci + uses: ./.github/actions/skip + + - uses: Swatinem/rust-cache@v1 + if: steps.skip-ci.outputs.skip != 'true' + + - name: Cargo build + if: steps.skip-ci.outputs.skip != 'true' + uses: actions-rs/cargo@v1 + with: + command: build + + - name: Verify tidy + if: steps.skip-ci.outputs.skip != 'true' + run: git diff --exit-code + + - name: Check clippy + if: steps.skip-ci.outputs.skip != 'true' + uses: actions-rs/cargo@v1 + with: + command: clippy + args: --workspace --tests --all-features -- -D warnings + + - name: Check format + if: steps.skip-ci.outputs.skip != 'true' + uses: actions-rs/cargo@v1 + with: + command: fmt + args: --all -- --check + + - name: Cargo test + if: steps.skip-ci.outputs.skip != 'true' + uses: actions-rs/cargo@v1 + with: + command: test + args: --workspace + + pass: + name: All tests passed + runs-on: ubuntu-latest + needs: + - toml + - dependency + - security + - test + steps: + - run: exit 0 diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml new file mode 100644 index 00000000..6b976479 --- /dev/null +++ b/.github/workflows/docker.yml @@ -0,0 +1,61 @@ +# Copyright 2022 The Engula Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +name: Build Image + +on: + push: + branches: ['main'] + tags: ['v*'] + pull_request: + workflow_dispatch: + +concurrency: + group: ${{ github.ref }}-${{ github.workflow }} + cancel-in-progress: true + +jobs: + build-and-push-image: + runs-on: ubuntu-latest + permissions: + contents: read + packages: write + + steps: + - name: Checkout Repository + uses: actions/checkout@v2 + with: + submodules: recursive + - uses: docker/login-action@v1 + if: github.event_name != 'pull_request' + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - id: meta + uses: docker/metadata-action@v3 + with: + images: ghcr.io/${{ github.repository }} + flavor: latest=false + tags: | + type=ref,event=tag + type=sha,format=long + type=raw,value=latest,enable=${{ !startsWith(github.ref, 'refs/tags/') }} + - uses: docker/build-push-action@v2 + with: + context: . + push: ${{ github.event_name != 'pull_request' }} + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 00000000..c9ce0515 --- /dev/null +++ b/.gitmodules @@ -0,0 +1,3 @@ +[submodule "src/apis"] + path = src/apis + url = https://github.com/engula/engula-apis diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 42ed807a..0c6244af 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -22,7 +22,7 @@ curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh Clone the repository: ```sh -git clone https://github.com/engula/engula.git +git clone --recursive https://github.com/engula/engula.git cd engula ``` @@ -38,7 +38,6 @@ Now that you have the `engula` binary, execute it for exploring: ```sh cargo run -p engula -## OR: ./target/debug/engula ``` ## Contribute diff --git a/Cargo.lock b/Cargo.lock index c89857e5..d80a896c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -11,11 +11,20 @@ dependencies = [ "memchr", ] +[[package]] +name = "ansi_term" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2" +dependencies = [ + "winapi", +] + [[package]] name = "anyhow" -version = "1.0.51" +version = "1.0.55" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b26702f315f53b6071259e15dd9d64528213b44d61de1ec926eca7715d62203" +checksum = "159bb86af3a200e19a068f4224eae4c8bb2d0fa054c7e5d1cacd5cef95e684cd" [[package]] name = "async-stream" @@ -62,9 +71,9 @@ dependencies = [ [[package]] name = "autocfg" -version = "1.0.1" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" +checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "base64" @@ -92,9 +101,9 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "clap" -version = "3.0.0-beta.5" +version = "3.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "feff3878564edb93745d58cf63e17b63f24142506e7a20c87a5521ed7bfb1d63" +checksum = "5177fac1ab67102d8989464efd043c6ff44191b1557ec1ddd489b4f7e1447e77" dependencies = [ "atty", "bitflags", @@ -105,22 +114,32 @@ dependencies = [ "strsim", "termcolor", "textwrap", - "unicase", ] [[package]] name = "clap_derive" -version = "3.0.0-beta.5" +version = "3.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b15c6b4f786ffb6192ffe65a36855bc1fc2444bcd0945ae16748dcd6ed7d0d3" +checksum = "01d42c94ce7c2252681b5fed4d3627cc807b13dfc033246bd05d5b252399000e" dependencies = [ - "heck", + "heck 0.4.0", "proc-macro-error", "proc-macro2", "quote", "syn", ] +[[package]] +name = "derivative" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "either" version = "1.6.1" @@ -129,71 +148,90 @@ checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" [[package]] name = "engula" -version = "0.2.0" +version = "0.3.0" dependencies = [ + "anyhow", "clap", - "engula-journal", - "engula-kernel", - "engula-storage", - "hash-engine", + "engula-transactor", + "object-engine-master", + "prost", + "stream-engine-master", "tokio", + "tokio-stream", "tonic", + "tracing", + "tracing-subscriber", ] [[package]] -name = "engula-journal" -version = "0.2.0" +name = "engula-apis" +version = "0.3.0" dependencies = [ - "async-trait", - "futures", "prost", - "tempfile", - "thiserror", - "tokio", - "tokio-stream", "tonic", "tonic-build", ] [[package]] -name = "engula-kernel" -version = "0.2.0" +name = "engula-client" +version = "0.3.0" dependencies = [ - "async-trait", - "engula-journal", - "engula-storage", - "futures", + "anyhow", + "engula-apis", + "prost", + "tokio", + "tonic", +] + +[[package]] +name = "engula-cooperator" +version = "0.1.0" +dependencies = [ + "engula-apis", + "engula-supervisor", "prost", - "tempfile", - "thiserror", "tokio", - "tokio-stream", "tonic", "tonic-build", ] [[package]] -name = "engula-storage" -version = "0.2.0" +name = "engula-supervisor" +version = "0.1.0" dependencies = [ - "async-trait", - "bytes", - "futures", + "engula-apis", "prost", - "tempfile", - "thiserror", "tokio", - "tokio-stream", - "tokio-util", "tonic", "tonic-build", ] +[[package]] +name = "engula-transactor" +version = "0.1.0" +dependencies = [ + "engula-apis", + "engula-cooperator", + "engula-supervisor", + "prost", + "tokio", + "tonic", +] + +[[package]] +name = "fastrand" +version = "1.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3fcf0cee53519c866c09b5de1f6c56ff9d647101f81c1964fa632e148896cdf" +dependencies = [ + "instant", +] + [[package]] name = "fixedbitset" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "398ea4fabe40b9b0d885340a2a991a44c8a645624075ad966d21f88688e2b69e" +checksum = "279fb028e20b3c4c320317955b77c5e0c9701f05a1d309905d6fc702cdc5053e" [[package]] name = "fnv" @@ -203,9 +241,9 @@ checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] name = "futures" -version = "0.3.17" +version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a12aa0eb539080d55c3f2d45a67c3b58b6b0773c1a3ca2dfec66d58c97fd66ca" +checksum = "f73fe65f54d1e12b726f517d3e2135ca3125a437b6d998caf1962961f7172d9e" dependencies = [ "futures-channel", "futures-core", @@ -218,9 +256,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.17" +version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5da6ba8c3bb3c165d3c7319fc1cc8304facf1fb8db99c5de877183c08a273888" +checksum = "c3083ce4b914124575708913bca19bfe887522d6e2e6d0952943f5eac4a74010" dependencies = [ "futures-core", "futures-sink", @@ -228,15 +266,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.17" +version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88d1c26957f23603395cd326b0ffe64124b818f4449552f960d815cfba83a53d" +checksum = "0c09fd04b7e4073ac7156a9539b57a484a8ea920f79c7c675d05d289ab6110d3" [[package]] name = "futures-executor" -version = "0.3.17" +version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45025be030969d763025784f7f355043dc6bc74093e4ecc5000ca4dc50d8745c" +checksum = "9420b90cfa29e327d0429f19be13e7ddb68fa1cccb09d65e5706b8c7a749b8a6" dependencies = [ "futures-core", "futures-task", @@ -245,18 +283,16 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.17" +version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "522de2a0fe3e380f1bc577ba0474108faf3f6b18321dbf60b3b9c39a75073377" +checksum = "fc4045962a5a5e935ee2fdedaa4e08284547402885ab326734432bed5d12966b" [[package]] name = "futures-macro" -version = "0.3.17" +version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18e4a4b95cea4b4ccbcf1c5675ca7c4ee4e9e75eb79944d07defde18068f79bb" +checksum = "33c1e13800337f4d4d7a316bf45a567dbcb6ffe087f16424852d97e97a91f512" dependencies = [ - "autocfg", - "proc-macro-hack", "proc-macro2", "quote", "syn", @@ -264,23 +300,22 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.17" +version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36ea153c13024fe480590b3e3d4cad89a0cfacecc24577b68f86c6ced9c2bc11" +checksum = "21163e139fa306126e6eedaf49ecdb4588f939600f0b1e770f4205ee4b7fa868" [[package]] name = "futures-task" -version = "0.3.17" +version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d3d00f4eddb73e498a54394f228cd55853bdf059259e8e7bc6e69d408892e99" +checksum = "57c66a976bf5909d801bbef33416c41372779507e7a6b3a5e25e4749c58f776a" [[package]] name = "futures-util" -version = "0.3.17" +version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36568465210a3a6ee45e1f165136d68671471a501e632e9a98d96872222b5481" +checksum = "d8b7abd5d659d9b90c8cba917f6ec750a74e2dc23902ef9cd4cc8c8b22e6036a" dependencies = [ - "autocfg", "futures-channel", "futures-core", "futures-io", @@ -290,16 +325,14 @@ dependencies = [ "memchr", "pin-project-lite", "pin-utils", - "proc-macro-hack", - "proc-macro-nested", "slab", ] [[package]] name = "getrandom" -version = "0.2.3" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fcd999463524c52659517fe2cea98493cfe485d10565e7b0fb07dbba7ad2753" +checksum = "d39cd93900197114fa1fcb7ae84ca742095eed9442088988ae74fa744e930e77" dependencies = [ "cfg-if", "libc", @@ -308,9 +341,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.9" +version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f072413d126e57991455e0a922b31e4c8ba7c2ffbebf6b78b4f8521397d65cd" +checksum = "d9f1f717ddc7b2ba36df7e871fd88db79326551d3d6f1fc406fbfd28b582ff8e" dependencies = [ "bytes", "fnv", @@ -321,23 +354,10 @@ dependencies = [ "indexmap", "slab", "tokio", - "tokio-util", + "tokio-util 0.6.9", "tracing", ] -[[package]] -name = "hash-engine" -version = "0.2.0" -dependencies = [ - "bytes", - "engula-journal", - "engula-kernel", - "engula-storage", - "futures", - "thiserror", - "tokio", -] - [[package]] name = "hashbrown" version = "0.11.2" @@ -353,6 +373,12 @@ dependencies = [ "unicode-segmentation", ] +[[package]] +name = "heck" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2540771e65fc8cb83cd6e8a237f70c319bd5c29f78ed1084ba5d50eeac86f7f9" + [[package]] name = "hermit-abi" version = "0.1.19" @@ -364,9 +390,9 @@ dependencies = [ [[package]] name = "http" -version = "0.2.5" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1323096b05d41827dadeaee54c9981958c0f94e670bc94ed80037d1a7b8b186b" +checksum = "31f4c6746584866f0feabcc69893c5b51beef3831656a968ed7ae254cdc4fd03" dependencies = [ "bytes", "fnv", @@ -386,9 +412,9 @@ dependencies = [ [[package]] name = "httparse" -version = "1.5.1" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acd94fdbe1d4ff688b67b04eee2e17bd50995534a61539e45adfefb45e5e5503" +checksum = "9100414882e15fb7feccb4897e5f0ff0ff1ca7d1a86a23208ada4d7a18e6c6c4" [[package]] name = "httpdate" @@ -398,9 +424,9 @@ checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" [[package]] name = "hyper" -version = "0.14.16" +version = "0.14.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7ec3e62bdc98a2f0393a5048e4c30ef659440ea6e0e572965103e72bd836f55" +checksum = "043f0e083e9901b6cc658a77d1eb86f4fc650bbb977a4337dd63192826aa85dd" dependencies = [ "bytes", "futures-channel", @@ -434,9 +460,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "1.7.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc633605454125dec4b66843673f01c7df2b89479b32e0ed634e43a91cff62a5" +checksum = "282a6247722caba404c065016bbfa522806e51714c34f5dfc3e4a3a46fcb4223" dependencies = [ "autocfg", "hashbrown", @@ -462,9 +488,9 @@ dependencies = [ [[package]] name = "itoa" -version = "0.4.8" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b71991ff56294aa922b450139ee08b3bfc70982c6b2c7562771375cf73542dd4" +checksum = "1aab8fc367588b89dcee83ab0fd66b72b50b72fa1904d7095045ace2b0c81c35" [[package]] name = "lazy_static" @@ -474,15 +500,15 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.112" +version = "0.2.119" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b03d17f364a3a042d5e5d46b053bbbf82c92c9430c592dd4c064dc6ee997125" +checksum = "1bf2e165bb3457c8e098ea76f3e3bc9db55f87aa90d52d0e6be741470916aaa4" [[package]] name = "lock_api" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "712a4d093c9976e24e7dbca41db895dabcbac38eb5f4045393d17a95bdfb1109" +checksum = "88943dd7ef4a2e5a4bfa2753aaab3013e34ce2533d1996fb18ef591e315e2b3b" dependencies = [ "scopeguard", ] @@ -504,9 +530,9 @@ checksum = "308cc39be01b73d0d18f82a0e7b2a3df85245f84af96fdddc5d202d27e47b86a" [[package]] name = "mio" -version = "0.7.14" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8067b404fe97c70829f082dec8bcf4f71225d7eaea1d8645349cb76fa06205cc" +checksum = "ba272f85fa0b41fc91872be579b3bbe0f56b792aa361a380eb669469f68dafb2" dependencies = [ "libc", "log", @@ -532,23 +558,74 @@ checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a" [[package]] name = "ntapi" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f6bb902e437b6d86e03cce10a7e2af662292c5dfef23b65899ea3ac9354ad44" +checksum = "c28774a7fd2fbb4f0babd8237ce554b73af68021b5f695a3cebd6c59bac0980f" dependencies = [ "winapi", ] [[package]] name = "num_cpus" -version = "1.13.0" +version = "1.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05499f3756671c15885fee9034446956fff3f243d6077b91e5767df161f766b3" +checksum = "19e64526ebdee182341572e50e9ad03965aa510cd94427a4549448f285e957a1" dependencies = [ "hermit-abi", "libc", ] +[[package]] +name = "object-engine-client" +version = "0.1.0" +dependencies = [ + "anyhow", + "engula-apis", + "object-engine-proto", + "prost", + "thiserror", + "tokio", + "tonic", +] + +[[package]] +name = "object-engine-common" +version = "0.1.0" +dependencies = [ + "async-trait", + "thiserror", +] + +[[package]] +name = "object-engine-filestore" +version = "0.1.0" +dependencies = [ + "async-trait", + "object-engine-common", + "thiserror", + "tokio", +] + +[[package]] +name = "object-engine-master" +version = "0.1.0" +dependencies = [ + "object-engine-proto", + "prost", + "thiserror", + "tokio", + "tonic", +] + +[[package]] +name = "object-engine-proto" +version = "0.1.0" +dependencies = [ + "prost", + "tonic", + "tonic-build", +] + [[package]] name = "once_cell" version = "1.9.0" @@ -557,36 +634,34 @@ checksum = "da32515d9f6e6e489d7bc9d84c71b060db7247dc035bbe44eac88cf87486d8d5" [[package]] name = "os_str_bytes" -version = "4.2.0" +version = "6.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "addaa943333a514159c80c97ff4a93306530d965d27e139188283cd13e06a799" +checksum = "8e22443d1643a904602595ba1cd8f7d896afe56d26712531c5ff73a15b2fbf64" dependencies = [ "memchr", ] [[package]] name = "parking_lot" -version = "0.11.2" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99" +checksum = "87f5ec2493a61ac0506c0f4199f99070cbe83857b0337006a30f3e6719b8ef58" dependencies = [ - "instant", "lock_api", "parking_lot_core", ] [[package]] name = "parking_lot_core" -version = "0.8.5" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d76e8e1493bcac0d2766c42737f34458f1c8c50c0d23bcb24ea953affb273216" +checksum = "28141e0cc4143da2443301914478dc976a61ffdb3f043058310c70df2fed8954" dependencies = [ "cfg-if", - "instant", "libc", "redox_syscall", "smallvec", - "winapi", + "windows-sys", ] [[package]] @@ -607,18 +682,18 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.0.8" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "576bc800220cc65dac09e99e97b08b358cfab6e17078de8dc5fee223bd2d0c08" +checksum = "58ad3879ad3baf4e44784bc6a718a8698867bb991f8ce24d1bcbe2cfb4c3a75e" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.0.8" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e8fe8163d14ce7f0cdac2e040116f22eac817edabff0be91e8aff7e9accf389" +checksum = "744b6f092ba29c3650faf274db506afd39944f48420f6c86b17cfe0ee1cb36bb" dependencies = [ "proc-macro2", "quote", @@ -627,9 +702,9 @@ dependencies = [ [[package]] name = "pin-project-lite" -version = "0.2.7" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d31d11c69a6b52a174b42bdc0c30e5e11670f90788b2c471c31c1d17d449443" +checksum = "e280fbe77cc62c91527259e9442153f4688736748d24660126286329742b4c6c" [[package]] name = "pin-utils" @@ -639,9 +714,9 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "ppv-lite86" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed0cfbc8191465bed66e1718596ee0b0b35d5ee1f41c5df2189d0fe8bde535ba" +checksum = "eb9f9e6e233e5c4a35559a617bf40a4ec447db2e84c20b55a6f83167b7e57872" [[package]] name = "proc-macro-error" @@ -667,23 +742,11 @@ dependencies = [ "version_check", ] -[[package]] -name = "proc-macro-hack" -version = "0.5.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" - -[[package]] -name = "proc-macro-nested" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc881b2c22681370c6a780e47af9840ef841837bc98118431d4e1868bd0c1086" - [[package]] name = "proc-macro2" -version = "1.0.34" +version = "1.0.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f84e92c0f7c9d58328b85a78557813e4bd845130db68d7184635344399423b1" +checksum = "c7342d5883fbccae1cc37a2353b09c87c9b0f3afd73f5fb9bba687a1f733b029" dependencies = [ "unicode-xid", ] @@ -705,7 +768,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "62941722fb675d463659e49c4f3fe1fe792ff24fe5bbaa9c08cd3b98a1c354f5" dependencies = [ "bytes", - "heck", + "heck 0.3.3", "itertools", "lazy_static", "log", @@ -743,23 +806,22 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.10" +version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38bc8cc6a5f2e3655e0899c1b848643b2562f853f114bfec7be120678e3ace05" +checksum = "864d3e96a899863136fc6e99f3d7cae289dafe43bf2c5ac19b70df7210c0a145" dependencies = [ "proc-macro2", ] [[package]] name = "rand" -version = "0.8.4" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e7573632e6454cf6b99d7aac4ccca54be06da05aca2ef7423d22d27d4d4bcd8" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ "libc", "rand_chacha", "rand_core", - "rand_hc", ] [[package]] @@ -781,15 +843,6 @@ dependencies = [ "getrandom", ] -[[package]] -name = "rand_hc" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d51e9f596de227fda2ea6c84607f5558e196eeaf43c986b724ba4fb8fdf497e7" -dependencies = [ - "rand_core", -] - [[package]] name = "redox_syscall" version = "0.2.10" @@ -831,6 +884,15 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" +[[package]] +name = "sharded-slab" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "900fba806f70c630b0a382d0d825e17a0f19fcd059a2ade1ff237bcddf446b31" +dependencies = [ + "lazy_static", +] + [[package]] name = "signal-hook-registry" version = "1.4.0" @@ -848,20 +910,85 @@ checksum = "9def91fd1e018fe007022791f865d0ccc9b3a0d5001e01aabb8b40e46000afb5" [[package]] name = "smallvec" -version = "1.7.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ecab6c735a6bb4139c0caafd0cc3635748bbb3acf4550e8138122099251f309" +checksum = "f2dd574626839106c320a323308629dcb1acfc96e32a8cba364ddc61ac23ee83" [[package]] name = "socket2" -version = "0.4.2" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dc90fe6c7be1a323296982db1836d1ea9e47b6839496dde9a541bc496df3516" +checksum = "66d72b759436ae32898a2af0a14218dbf55efde3feeb170eb623637db85ee1e0" dependencies = [ "libc", "winapi", ] +[[package]] +name = "stream-engine-client" +version = "0.1.0" +dependencies = [ + "anyhow", + "futures", + "prost", + "stream-engine-common", + "stream-engine-master", + "stream-engine-proto", + "stream-engine-store", + "thiserror", + "tokio", + "tonic", +] + +[[package]] +name = "stream-engine-common" +version = "0.1.0" +dependencies = [ + "derivative", + "futures", + "thiserror", + "tonic", +] + +[[package]] +name = "stream-engine-master" +version = "0.1.0" +dependencies = [ + "log", + "prost", + "stream-engine-common", + "stream-engine-proto", + "thiserror", + "tokio", + "tokio-stream", + "tonic", +] + +[[package]] +name = "stream-engine-proto" +version = "0.1.0" +dependencies = [ + "prost", + "stream-engine-common", + "tonic", + "tonic-build", +] + +[[package]] +name = "stream-engine-store" +version = "0.1.0" +dependencies = [ + "futures", + "log", + "prost", + "stream-engine-common", + "stream-engine-proto", + "thiserror", + "tokio", + "tokio-stream", + "tonic", +] + [[package]] name = "strsim" version = "0.10.0" @@ -870,9 +997,9 @@ checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" [[package]] name = "syn" -version = "1.0.82" +version = "1.0.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8daf5dd0bb60cbd4137b1b587d2fc0ae729bc07cf01cd70b36a1ed5ade3b9d59" +checksum = "8a65b3f4ffa0092e9887669db0eae07941f023991ab58ea44da8fe8e2d511c6b" dependencies = [ "proc-macro2", "quote", @@ -881,13 +1008,13 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.2.0" +version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dac1c663cfc93810f88aed9b8941d48cabf856a1b111c29a40439018d870eb22" +checksum = "5cdb1ef4eaeeaddc8fbd371e5017057064af0911902ef36b39801f67cc6d79e4" dependencies = [ "cfg-if", + "fastrand", "libc", - "rand", "redox_syscall", "remove_dir_all", "winapi", @@ -907,9 +1034,6 @@ name = "textwrap" version = "0.14.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0066c8d12af8b5acd21e00547c3797fde4e8677254a7ee429176ccebbe93dd80" -dependencies = [ - "unicode-width", -] [[package]] name = "thiserror" @@ -931,11 +1055,20 @@ dependencies = [ "syn", ] +[[package]] +name = "thread_local" +version = "1.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5516c27b78311c50bf42c071425c560ac799b11c30b31f87e3081965fe5e0180" +dependencies = [ + "once_cell", +] + [[package]] name = "tokio" -version = "1.15.0" +version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbbf1c778ec206785635ce8ad57fe52b3009ae9e0c9f574a728f3049d3e55838" +checksum = "2af73ac49756f3f7c01172e34a23e5d0216f6c32333757c2c61feb2bbff5a5ee" dependencies = [ "bytes", "libc", @@ -946,15 +1079,16 @@ dependencies = [ "parking_lot", "pin-project-lite", "signal-hook-registry", + "socket2", "tokio-macros", "winapi", ] [[package]] name = "tokio-io-timeout" -version = "1.1.1" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90c49f106be240de154571dd31fbe48acb10ba6c6dd6f6517ad603abffa42de9" +checksum = "30b74022ada614a1b4834de765f9bb43877f910cc8ce4be40e89042c9223a8bf" dependencies = [ "pin-project-lite", "tokio", @@ -980,7 +1114,6 @@ dependencies = [ "futures-core", "pin-project-lite", "tokio", - "tokio-util", ] [[package]] @@ -997,6 +1130,20 @@ dependencies = [ "tokio", ] +[[package]] +name = "tokio-util" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64910e1b9c1901aaf5375561e35b9c057d95ff41a44ede043a03e09279eabaf1" +dependencies = [ + "bytes", + "futures-core", + "futures-sink", + "log", + "pin-project-lite", + "tokio", +] + [[package]] name = "tonic" version = "0.6.2" @@ -1020,7 +1167,7 @@ dependencies = [ "prost-derive", "tokio", "tokio-stream", - "tokio-util", + "tokio-util 0.6.9", "tower", "tower-layer", "tower-service", @@ -1042,9 +1189,9 @@ dependencies = [ [[package]] name = "tower" -version = "0.4.11" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5651b5f6860a99bd1adb59dbfe1db8beb433e73709d9032b413a77e2fb7c066a" +checksum = "9a89fd63ad6adf737582df5db40d286574513c69a11dac5214dc3b5603d6713e" dependencies = [ "futures-core", "futures-util", @@ -1054,8 +1201,7 @@ dependencies = [ "rand", "slab", "tokio", - "tokio-stream", - "tokio-util", + "tokio-util 0.7.0", "tower-layer", "tower-service", "tracing", @@ -1075,9 +1221,9 @@ checksum = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6" [[package]] name = "tracing" -version = "0.1.29" +version = "0.1.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "375a639232caf30edfc78e8d89b2d4c375515393e7af7e16f01cd96917fb2105" +checksum = "f6c650a8ef0cd2dd93736f033d21cbd1224c5a967aa0c258d00fcf7dafef9b9f" dependencies = [ "cfg-if", "log", @@ -1088,9 +1234,9 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.18" +version = "0.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4f480b8f81512e825f337ad51e94c1eb5d3bbdf2b363dcd01e2b19a9ffe3f8e" +checksum = "8276d9a4a3a558d7b7ad5303ad50b53d58264641b82914b7ada36bd762e7a716" dependencies = [ "proc-macro2", "quote", @@ -1099,11 +1245,12 @@ dependencies = [ [[package]] name = "tracing-core" -version = "0.1.21" +version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f4ed65637b8390770814083d20756f87bfa2c21bf2f110babdc5438351746e4" +checksum = "03cfcb51380632a72d3111cb8d3447a8d908e577d31beeac006f836383d29a23" dependencies = [ "lazy_static", + "valuable", ] [[package]] @@ -1117,31 +1264,41 @@ dependencies = [ ] [[package]] -name = "try-lock" -version = "0.2.3" +name = "tracing-log" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" +checksum = "a6923477a48e41c1951f1999ef8bb5a3023eb723ceadafe78ffb65dc366761e3" +dependencies = [ + "lazy_static", + "log", + "tracing-core", +] [[package]] -name = "unicase" -version = "2.6.0" +name = "tracing-subscriber" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50f37be617794602aabbeee0be4f259dc1778fabe05e2d67ee8f79326d5cb4f6" +checksum = "9e0ab7bdc962035a87fba73f3acca9b8a8d0034c2e6f60b84aeaaddddc155dce" dependencies = [ - "version_check", + "ansi_term", + "sharded-slab", + "smallvec", + "thread_local", + "tracing-core", + "tracing-log", ] [[package]] -name = "unicode-segmentation" -version = "1.8.0" +name = "try-lock" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8895849a949e7845e06bd6dc1aa51731a103c42707010a5b591c0038fb73385b" +checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" [[package]] -name = "unicode-width" -version = "0.1.9" +name = "unicode-segmentation" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ed742d4ea2bd1176e236172c8429aaf54486e7ac098db29ffe6529e0ce50973" +checksum = "7e8820f5d777f6224dc4be3632222971ac30164d4a258d595640799554ebfd99" [[package]] name = "unicode-xid" @@ -1149,11 +1306,17 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3" +[[package]] +name = "valuable" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" + [[package]] name = "version_check" -version = "0.9.3" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fecdca9a5291cc2b8dcf7dc02453fee791a280f3743cb0905f8822ae463b3fe" +checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" [[package]] name = "want" @@ -1173,9 +1336,9 @@ checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" [[package]] name = "which" -version = "4.2.2" +version = "4.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea187a8ef279bc014ec368c27a920da2024d2a711109bfbe3440585d5cf27ad9" +checksum = "2a5a7e487e921cf220206864a94a89b6c6905bfc19f1057fa26a4cb360e5c1d2" dependencies = [ "either", "lazy_static", @@ -1212,3 +1375,46 @@ name = "winapi-x86_64-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + +[[package]] +name = "windows-sys" +version = "0.32.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3df6e476185f92a12c072be4a189a0210dcdcf512a1891d6dff9edb874deadc6" +dependencies = [ + "windows_aarch64_msvc", + "windows_i686_gnu", + "windows_i686_msvc", + "windows_x86_64_gnu", + "windows_x86_64_msvc", +] + +[[package]] +name = "windows_aarch64_msvc" +version = "0.32.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d8e92753b1c443191654ec532f14c199742964a061be25d77d7a96f09db20bf5" + +[[package]] +name = "windows_i686_gnu" +version = "0.32.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a711c68811799e017b6038e0922cb27a5e2f43a2ddb609fe0b6f3eeda9de615" + +[[package]] +name = "windows_i686_msvc" +version = "0.32.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "146c11bb1a02615db74680b32a68e2d61f553cc24c4eb5b4ca10311740e44172" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.32.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c912b12f7454c6620635bbff3450962753834be2a594819bd5e945af18ec64bc" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.32.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "504a2476202769977a040c6364301a3f65d0cc9e3fb08600b2bda150a0488316" diff --git a/Cargo.toml b/Cargo.toml index 64286660..2e66eaab 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,32 +1,19 @@ -[package] -name = "engula" -version = "0.2.0" -edition = "2021" -license = "Apache-2.0" -homepage = "https://engula.io" -repository = "https://github.com/engula/engula" -keywords = ["database", "serverless", "storage-engine"] -categories = [ - "asynchronous", - "data-structures", - "database", - "database-implementations", -] -description = "A serverless storage engine that empowers engineers to build reliable and cost-effective databases." - -[[bin]] -name = "engula" -path = "bin/engula.rs" - [workspace] -members = ["src/engine/hash", "src/kernel", "src/journal", "src/storage"] - -[dependencies] -hash-engine = { version = "0.2", path = "src/engine/hash" } -engula-kernel = { version = "0.2", path = "src/kernel" } -engula-journal = { version = "0.2", path = "src/journal" } -engula-storage = { version = "0.2", path = "src/storage" } - -clap = "=3.0.0-beta.5" -tonic = "0.6" -tokio = { version = "1", features = ["full"] } +members = [ + "src/apis", + "src/client", + "src/engula", + "src/supervisor", + "src/transactor", + "src/cooperator", + "src/object-engine/proto", + "src/object-engine/client", + "src/object-engine/common", + "src/object-engine/master", + "src/object-engine/filestore", + "src/stream-engine/proto", + "src/stream-engine/client", + "src/stream-engine/common", + "src/stream-engine/master", + "src/stream-engine/store", +] diff --git a/.github/workflows/audit-license.yml b/Dockerfile similarity index 58% rename from .github/workflows/audit-license.yml rename to Dockerfile index fb4dc55a..12aa79dc 100644 --- a/.github/workflows/audit-license.yml +++ b/Dockerfile @@ -1,4 +1,4 @@ -# Copyright 2021 The Engula Authors. +# Copyright 2022 The Engula Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,20 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -name: Audit License +FROM rust:1.58 as build +WORKDIR /build +COPY . /build/ +RUN cargo build --locked --release --package=engula -on: [push, pull_request] - -concurrency: - group: ${{ github.ref }}-${{ github.workflow }} - cancel-in-progress: true - -jobs: - audit: - name: Audit License - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - uses: apache/skywalking-eyes@main - with: - config: tools/ci/licenserc.yml +FROM gcr.io/distroless/cc +COPY --from=build /build/target/release/engula /bin/ +ENTRYPOINT ["/bin/engula"] diff --git a/README.md b/README.md index 5707c204..c43d5f10 100644 --- a/README.md +++ b/README.md @@ -8,34 +8,39 @@ [twitter-badge]: https://img.shields.io/twitter/follow/engulaio?style=flat-square&logo=twitter&color=brightgreen [twitter-url]: https://twitter.com/intent/follow?screen_name=engulaio -Engula is a serverless storage engine that empowers engineers to build reliable and cost-effective databases. +Engula is a persistent data structure store, used as a database, cache, and storage engine. -Engula's design goals are as follows: +Features: -- **Elastic**: takes advantage of elastic resources on the cloud -- **Adaptive**: adapts to dynamic workloads and diverse applications -- **Extensible**: provides pluggable APIs and modules for customization -- **Platform independent**: allows flexible deployments on local hosts, on-premise servers, and cloud platforms +- Provide data structures such as numbers, strings, maps, and lists. +- Support ACID transactions with different isolation and consistency levels. +- Provide built-in cache to speed up reads, resist hotspots and traffic bursts. +- Implement a cloud-native, multi-tenant architecture to deliver a cost-effective service. ## Status -We are working on version 0.2. Please check the [roadmap][roadmap] for more details. +We are working on v0.3. Please check the [roadmap][roadmap] for more details. -[roadmap]: https://github.com/engula/engula/issues/57 +[roadmap]: https://github.com/engula/engula/issues/359 -We have released Demo 1 in Oct 2021. -You can check [the branch](https://github.com/engula/engula/tree/demo-1) and [the report](https://engula.com/posts/demo-1/) for more details. +We released demo 1 in Oct 2021 and v0.2 in Dec 2021. You can check the [demo 1 report](https://engula.com/posts/demo-1/) and [v0.2 release post](https://engula.io/posts/release-0.2/) for more details. -## Design +## Examples -![Architecture](docs/images/architecture.drawio.svg) +You can check some usages in [examples](src/client/examples). -For internal designs of Engula, please see the [docs](docs). +To run the examples: -## Discussion - -For discussions about ideas, designs, and roadmaps, please see the [discussions](https://github.com/engula/engula/discussions). +``` +cargo run -p engula -- server start +cargo run -p engula-client --example database +cargo run -p engula-client --example {example file name} +``` ## Contributing -Thanks for your help improving the project! We have a [contributing guide](CONTRIBUTING.md) to help you get involved in the Engula project. +Thanks for your help in improving the project! We have a [contributing guide](CONTRIBUTING.md) to help you get involved in the Engula project. + +## More information + +For internal designs, please see the [docs](docs). For informal discussions, please go to the [forum](https://github.com/engula/engula/discussions). diff --git a/bin/engula.rs b/bin/engula.rs deleted file mode 100644 index eca5e953..00000000 --- a/bin/engula.rs +++ /dev/null @@ -1,208 +0,0 @@ -// Copyright 2021 The Engula Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use std::{net::SocketAddr, path::PathBuf}; - -use clap::{crate_description, crate_version, Parser, Subcommand}; -use engula_journal::{ - file::Journal as FileJournal, grpc::Server as JournalServer, mem::Journal as MemJournal, -}; -use engula_kernel::grpc::{FileKernel, MemKernel, Server as KernelServer}; -use engula_storage::{ - file::Storage as FileStorage, grpc::Server as StorageServer, mem::Storage as MemStorage, -}; - -pub type Result = std::result::Result>; - -macro_rules! run_until_asked_to_quit { - ($addr:expr, $server:expr) => {{ - let cloned_addr = $addr.clone(); - tonic::transport::Server::builder() - .add_service($server.into_service()) - .serve(cloned_addr) - .await?; - }}; -} - -#[derive(Subcommand)] -enum RunMode { - #[clap(name = "--mem", about = "Stores data in memory")] - Mem, - #[clap(name = "--file", about = "Stores data in local files")] - File { - #[clap(parse(from_os_str), about = "Path to store data")] - path: PathBuf, - }, -} - -#[derive(Subcommand)] -#[clap(about = "Commands to operate Storage")] -enum StorageCommand { - #[clap(about = "Run a storage server")] - Run { - #[clap(about = "Socket address to listen")] - addr: SocketAddr, - - #[clap(subcommand)] - cmd: RunMode, - }, -} - -impl StorageCommand { - async fn run(&self) -> Result<()> { - match self { - StorageCommand::Run { addr, cmd } => match cmd { - RunMode::File { path } => { - let storage = FileStorage::new(&path).await?; - let server = StorageServer::new(storage); - run_until_asked_to_quit!(addr, server); - } - RunMode::Mem => { - let server = StorageServer::new(MemStorage::default()); - run_until_asked_to_quit!(addr, server); - } - }, - } - Ok(()) - } -} - -#[derive(Subcommand)] -#[clap(about = "Commands to operate Journal")] -enum JournalCommand { - #[clap(about = "Run a journal server")] - Run { - #[clap(about = "Socket address to listen")] - addr: SocketAddr, - - #[clap(subcommand)] - cmd: RunMode, - - #[clap( - long, - default_value = "67108864", - about = "The size of segments in bytes, only taking effects for a file instance" - )] - segment_size: usize, - }, -} - -impl JournalCommand { - async fn run(&self) -> Result<()> { - match self { - JournalCommand::Run { - addr, - cmd, - segment_size, - } => match cmd { - RunMode::File { path } => { - let journal = FileJournal::open(path, *segment_size).await?; - let server = JournalServer::new(journal); - run_until_asked_to_quit!(addr, server); - } - RunMode::Mem => { - let server = JournalServer::new(MemJournal::default()); - run_until_asked_to_quit!(addr, server); - } - }, - } - Ok(()) - } -} - -#[derive(Subcommand)] -#[clap(about = "Commands to operate Kernel")] -enum KernelCommand { - #[clap(about = "Run a kernel server")] - Run { - #[clap(about = "Socket address to listen")] - addr: SocketAddr, - - #[clap(subcommand)] - mode: RunMode, - - #[clap(long, about = "The address of journal server")] - journal: SocketAddr, - - #[clap(long, about = "The address of storage server")] - storage: SocketAddr, - }, -} - -impl KernelCommand { - async fn run(&self) -> Result<()> { - match self { - KernelCommand::Run { - addr, - mode: cmd, - journal, - storage, - } => match cmd { - RunMode::Mem => { - let kernel = - MemKernel::open(&journal.to_string(), &storage.to_string()).await?; - let server = - KernelServer::new(&journal.to_string(), &storage.to_string(), kernel); - run_until_asked_to_quit!(addr, server); - } - RunMode::File { path } => { - let kernel = - FileKernel::open(&journal.to_string(), &storage.to_string(), &path).await?; - let server = - KernelServer::new(&journal.to_string(), &storage.to_string(), kernel); - run_until_asked_to_quit!(addr, server); - } - }, - } - Ok(()) - } -} - -#[derive(Parser)] -enum SubCommand { - #[clap(subcommand)] - Storage(StorageCommand), - #[clap(subcommand)] - Journal(JournalCommand), - #[clap(subcommand)] - Kernel(KernelCommand), -} - -#[derive(Parser)] -#[clap( - version = crate_version!(), - about = crate_description!(), -)] -struct Command { - #[clap(subcommand)] - subcmd: SubCommand, -} - -impl Command { - async fn run(&self) -> Result<()> { - match &self.subcmd { - SubCommand::Storage(cmd) => cmd.run().await?, - SubCommand::Journal(cmd) => cmd.run().await?, - SubCommand::Kernel(cmd) => cmd.run().await?, - } - - Ok(()) - } -} - -#[tokio::main] -async fn main() -> Result<()> { - let cmd: Command = Command::parse(); - cmd.run().await -} diff --git a/docs/README.md b/docs/README.md index 2fa60465..f71f5de5 100644 --- a/docs/README.md +++ b/docs/README.md @@ -1,5 +1,5 @@ # Engula Development Documentation -This is a collaborative effort to build a guide that explains how Engula works. The aim of the guide is to help new contributors get oriented to Engula, as well as to help more experienced folks in figuring out some new part of the storage engine that they haven't worked on before. +This is a collaborative effort to build a guide that explains how Engula works. The aim of the guide is to help new contributors get oriented to Engula, as well as to help more experienced folks in figuring out some new part of the project that they haven't worked on before. Please check the top-level [design document](design.md) to get started. diff --git a/docs/design.md b/docs/design.md index 32f7e2b1..de0e63d7 100644 --- a/docs/design.md +++ b/docs/design.md @@ -1,162 +1,86 @@ -# About +# Engula -This document describes the top-level design of Engula. +Engula is a persistent data structure store, used as a database and storage engine. Engula aims to the standard collections for stateful applications. Engula provides a set of persistent data structures that optimize for specific use cases to serve diverse applications in one system. New data structures can be added for new use cases to extend Engula's capabilities instead of reinventing the wheel from scratch. -**The current design is still in progress.** You can also check [the previous design][demo-design] for more information. +Engula provides clients for different programming languages. The most important client interface is a set of typed collections. A typed collection contains a set of objects of the same type. Each type defines the meaning of the data and the operations that can be done on the data. A client converts data operations into internal expressions and then communicates with Engula to execute the expressions. -[demo-design]: https://github.com/engula/engula/blob/demo-1/docs/design.md +Engula provides ACID transactions to extend its range of applications further. Engula supports different isolation and consistency levels. The default is read committed (RC) isolation with causal consistency level. This combination meets the requirements of most OLTP applications with high performance. To provide causally consistent reads and writes, Engula uses hybrid logical clocks (HLC) to order events throughout the system. -# Overview +Engula implements a cloud-native architecture to deliver a cost-effective, highly-scalable, and highly-available service on the cloud. Engula disaggregates compute and storage to allow scaling different kinds of resources on-demand. The compute tier consists of a set of cooperators, each serving a portion of data. The storage tier consists of a stream engine and an object engine. Cooperators use the stream engine to elect leaders and store transaction logs. Committed transactions are first accumulated in cooperators and then flushed to the object engine in batches. On failures, cooperators read logs from the stream engine to recover unflushed transactions. Since all cooperators share the same stream engine and object engine, it is lightweight to scale cooperators without data movement. Cooperators also serve as a built-in cache tier to speed up reads, enabling Engula to resist traffic bursts and hotspots. -Engula is a serverless storage engine that empowers engineers to build reliable and cost-effective databases. +## Data model -Engula's design goals are as follows: +An Engula deployment is called a universe. A universe consists of multiple databases, each of which consists of multiple collections. A collection contains a set of objects of the same type. Each object has an object identifier, which is a sequence of bytes and is unique within a collection. Each object provides a set of methods associated with its type to manipulate its state. Engula supports common data types in programming languages. For example, numbers, strings, sets, maps, and lists. -- Elastic: takes advantage of elastic resources on the cloud -- Adaptive: adapts to dynamic workloads and diverse applications -- Extensible: provides pluggable module interfaces and implementations for customization -- Platform independent: allows flexible deployments on local hosts, on-premise servers, and cloud platforms +A collection can be partitioned (hash or range) into shards, each of which contains a portion of objects in the collection. Shards are the unit of data movement, and objects are the unit of data partition. -It is important to note that Engula is not a full-functional database. Engula is more like a framework that allows users to build their databases or storage engines. However, for users that don't need customization, Engula can also be used as an out-of-the-box data service for typical applications. +Engula supports transactions across collections of the same database. However, different databases are independent, which means that transactions across databases are not supported. -# Architecture +## Architecture ![Architecture](images/architecture.drawio.svg) -Engula employs *a modular and serverless architecture*. +Engula implements a tiered architecture. A universe has four tiers: -Engula unbundles the storage engine into the following modules: +- The control tier consists of a supervisor and an orchestrator +- The service tier consists of a set of transactors +- The compute tier consists of a set of cooperators +- The storage tier consists of a stream engine and an object engine -- **Engine** provides storage engines for different database workloads. -- **Kernel** provides stateful environments to implement upper-level storage engines. -- **Journal** provides abstractions and implementations to store data streams. For example, transaction logs. -- **Storage** provides abstractions and implementations to store data objects. For example, SSTables or Parquet tables. -- **Background** provides abstractions and implementations to run background jobs. For example, compactions or garbage collections. +The control tier manages the service tier and the compute tier. The supervisor scales transactors and cooperators on-demand through the orchestrator. The supervisor assigns data to cooperators and handles automated data movement among them, either to meet replication constraints or to balance load. -These modules have varied resource requirements, which allows Engula to take advantage of different kinds of resources. Engula intends to shift most foreground computation to `Engine`, background computation to `Background`, and then make the cost of stateful modules (`Kernel`, `Journal`, and `Storage`) as low as possible. +Transactors are stateless. Transactors retrieve metadata and location information from the supervisor. Each transactor can handle transactions of all databases in the universe. To execute a transaction, a transactor inspects the data involved in the transaction and coordinates one or more cooperators to complete the transaction. -Engula modules also expose extensible interfaces that allow different implementations. Engula provides some built-in implementations for common use cases. For example, `Storage` offers a local implementation based on the local file system and a remote implementation based on gRPC services. For a specific application, users can choose the appropriate implementations or build their own ones. +![Cooperator Architecture](images/cooperator-architecture.drawio.svg) -As for deployment, unlike traditional databases and storage engines, Engula modules are not aware of nodes or servers. From the perspective of Engula, all modules run on a unified resource pool with unlimited resources. The resource pool divided resources into resource groups, each of which consists of multiple resource units that can be scaled on-demand. Each kernel instance runs on a dedicated resource group to provide an isolated stateful environment for upper-level storage engines. +Cooperators are divided into replicated groups. Each group can serve multiple shards of different collections in the same database. In each group, one of the cooperators is elected as a leader, and the others are followers. Each group has at least one leader and zero or more followers. Followers can be added on-demand for failover or load balance. Leader cooperators serve all writes, while all cooperators with up-to-date data can serve reads to share traffic. -# Engine +Cooperators use the same stream engine and object engine for data persistence. The stream engine and the object engine are two independent storage systems. The stream engine manages a set of single-producer, multi-consumer (SPMC) streams. A stream stores a sequence of events and supports leader election to choose a single producer. The object engine provides functionalities to get, scan, and update objects. In addition, each cooperator has two cache components: a read cache and a write cache. The read cache contains the results of recent reads. The write cache contains the changes of recent writes. -A well-optimized storage engine needs to make a lot of assumptions about its applications. While these assumptions allow targeted optimizations, they also limit the range of applications. That's why we need different kinds of storage engines. However, despite the varied assumptions, the storage requirements of these engines are surprisingly similar. This observation motivates Engula to be a ubiquitous framework that empowers all kinds of storage engines. +A cooperator group uses a dedicated stream from the stream engine to elect a leader and store transaction logs. On writes, the leader cooperator append logs to the stream first and then accumulate the changes to the write cache. When the write cache size reaches a threshold, the leader cooperator flushes the changes to the object engine. The object engine applies the changes to update its data. Once the leader cooperator confirms that the changes have been persisted to the object engine, it truncates the corresponding logs in the stream. On reads, a cooperator queries from the read cache first. On cache misses, the cooperator reads from the object engine and updates the read cache. -In Engula, `Engine` interacts with `Kernel` to accomplish various kinds of storage operations. The interaction between `Engine` and `Kernel` can be generalized as follows: +## Orchestrator -For writes: +Orchestrator provides an abstraction on the running platform to manage a set of server instances. Orchestrator follows the [operator pattern](https://github.com/cncf/tag-app-delivery/blob/main/operator-wg/whitepaper/Operator-WhitePaper_v1-0.md) and can be implemented as an operator in Kubernetes. -- `Engine` converts client requests into transaction logs -- `Engine` persists the logs in `Kernel` and then applies them to a memory table -- `Engine` flushes the memory table to `Kernel` and then deletes the obsoleted logs -- `Engine` submits some background jobs to `Kernel` periodically to re-organize persisted data +An orchestrator implementation should provide the following functionalities: -For queries: +- Provide information of instances +- Provide health checks on instances +- Allow users to provision and de-provision instances -- `Engine` converts client requests into low-level data queries -- `Engine` merges data in the memory table and data from `Kernel` to serve these queries -- `Engine` optionally caches data from `Kernel` to optimize read performance +Engula employs orchestrators to build an autonomous system. From this point of view, the architecture of Engula can be regarded as the composition of three autonomous sub-systems. The control, service, and compute tiers form one sub-system, and the stream engine and the object engine are the other two sub-systems. -# Kernel +## Storage -![Kernel Implementation](images/kernel-implementation.drawio.svg) +The storage of Engula consists of a stream engine and an object engine. The stream engine and the object engine are multi-tenant, highly scalable, and highly available storage systems. Both of them can serve a lot of tenants in one deployment. A stream engine tenant manages multiple streams, and an object engine tenant manages multiple buckets. Each database uses one dedicated tenant in the stream engine to store logs and one dedicated tenant in the object engine to store objects. -`Kernel` is a stateful and pluggable environment for storage engines. A `Kernel` implementation integrates different modules to provide a specific set of storage capabilities. For example, a memory kernel stores everything in memory, and a file kernel stores everything in local files. +### StreamEngine -Specifically, `Kernel` stores streams in `Journal`, objects in `Storage`, metadata in `Manifest`, and runs background jobs in `Background`. In addition, `Kernel` supports versioned metadata and atomic metadata operations to meet the following requirements: +A StreamEngine deployment manages a lot of tenants, each of which consists of multiple streams. A stream stores a sequence of events proposed by users. -- Commit metadata across objects and modules atomically -- Access consistent metadata snapshots of the storage engine -- Make sure that the required data remains valid during requests processing +![Stream Engine Architecture](images/stream-engine-architecture.drawio.svg) -To achieve that, `Kernel` maintains multiple versions of metadata. Each version represents the state of `Kernel` at a specific time. Each metadata transaction creates a version update that transforms the last version into a new one. When an engine connects to `Kernel`, it gets the last version from `Kernel` as its base version and subscribes to future version updates. When a version update arrives, the engine applies it to its base version to catch up with `Kernel`. The engine maintains a list of live versions for ongoing queries and releases a version once it is no longer used. `Kernel` needs to guarantee that objects in all engine versions remain valid until the corresponding versions are released. +A StreamEngine deployment consists of a master, an orchestrator, and a set of segment stores. -`Manifest` provides a single point of truth for `Kernel`. To add objects, `Kernel` uploads objects to `Storage` first and then commits the uploaded objects to `Manifest`. To delete objects, `Kernel` commits the to be deleted objects to `Manifest` before deleting those objects. It is possible that `Kernel` fails to upload or delete some objects. In this case, the corresponding objects are obsoleted and left in `Storage`. So `Kernel` implements garbage collection to purge deleted and obsoleted objects eventually. +The events of a stream are divided into multiple segments, according to a certain strategy, since its capacity might exceed the hardware limitation. For fault tolerance and durability, each segment is replicated and persisted in multiple segment stores. The master records the segment placements of streams, and it assigns the segment's replica to the segment store and balances load among them. -# Journal +![Stream Engine Election](images/stream-engine-election.drawio.svg) -`Journal` divides data into streams. A stream stores a sequence of events. Each stream has a unique identifier called the stream name. Events within a stream are ordered by timestamps. Users are responsible for assigning increasing timestamps to events when appending to a stream. However, timestamps within a stream are not required to be continuous, which allows users to dispatch events to multiple streams. +Only one client as a leader can write events into a stream at the same time. For fault tolerance, multiple clients will try to elect a leader at the same time. The master is responsible for choosing one of these clients as the leader. -## Semantic +### ObjectEngine -`Journal` provides the following interfaces to manipulate streams: +An ObjectEngine deployment manages a lot of tenants, each of which consists of multiple buckets. ObjectEngine stores data as files. Files in a bucket are organized as an LSM-Tree. -- List streams -- Create a stream with a unique name -- Delete a stream +![Object Engine Architecture](images/object-engine-architecture.drawio.svg) -`Journal` provides the following interfaces to manipulate events in a stream: +An ObjectEngine deployment consists of a manifest, an orchestrator, a background scheduler, a file store, and a set of cache stores. All files are persisted in the file store. The file store is highly durable and serves as the single point of truth for data. Each cache store caches a portion of files from the file store to speed up reads. The manifest assigns data to the cache stores and balances load among them. It is important to note that the data distribution in cache stores has nothing to do with the data partition of collections described in the data model section. Multiple cache stores can serve overlapped data to share traffic. -- Read events since a timestamp -- Append events with a timestamp -- Release events up to a timestamp +ObjectEngine provides interfaces for users to generate and ingest files. ObjectEngine supports atomic ingestion across buckets in the same tenant. The manifest needs to decide the layout of files in each bucket to maintain the LSM-Tree structure. As file ingestions go on, the manifest needs to re-organize LSM-Tree structures to reduce read and space amplification. The manifest can submit background jobs (e.g., compaction, garbage collection) to the scheduler, which provisions resources on-demand to run the jobs. -It is also possible to support stream subscriptions. We leave the exploration of this feature to future work. +TODO: We should provide high-level design here and leave the detailed design to [a separated document](object-engine.md). -Released events can be archived or garbage collected. Whether released events are accessible depends on the implementation. For example, if events are archived, it should allow users to recover data from archives. Nevertheless, implementations should guarantee to return continuous events. That is, the returned events must be a sub-sequence of a stream. +## Deployment -## Implementation - -![Journal Implementation](images/journal-implementation.drawio.svg) - -`Journal` can be implemented in the following forms: - -- `Local Journal`: stores data in memory or local file system. -- `Remote Journal`: stores data in multiple remote services with some kind of consensus. -- `External Journal`: stores data in various third-party services like Kafka or LogDevice. - -`Journal` doesn't assume how data should be persisted. It is up to the implementer to decide what guarantees it provides. - -# Storage - -`Storage` divides data into buckets. A bucket stores a set of data objects. Each bucket has a unique identifier called the bucket name. Each object has an object name that is unique within a bucket. Objects are immutable once created. - -## Semantic - -`Storage` provides the following interfaces to manipulate buckets: - -- List buckets -- Create a bucket with a unique name -- Delete a bucket - -`Storage` provides the following interfaces to manipulate objects in a bucket: - -- List objects -- Upload an object -- Delete an object -- Read some bytes from an object at a specific position - -It is also possible to support object-level expression evaluation for some object formats (e.g., JSON, Parquet), which is important to analytical workloads. We leave the exploration of this feature to future work. - -`Storage` is a low-level abstraction to manipulate individual objects. It doesn't support atomic operations across multiple objects. See [`Kernel`](#kernel) for more advanced semantics. - -## Implementation - -![Storage Implementation](images/storage-implementation.drawio.svg) - -`Storage` can be implemented in the following forms: - -- `Local Storage`: stores data in memory or local file system. -- `Remote Storage`: stores data in multiple remote services with some kind of replication or load balance. -- `External Storage`: stores data in various third-party services, for example, S3 or MinIO. - -It is a good idea to combine different implementations into a more powerful one. For example, we can create a hybrid storage that persists data to a slow but highly-durable storage and then reads data from a fast and highly-available storage. - -`Storage` doesn't assume how data should be persisted. It is up to the implementer to decide what guarantees it provides. - -# Discussions - -Casual discussions about the design can proceed in the following discussions: - -- [Architecture][architecture-discussion] -- [Engine][engine-discussion] -- [Journal][journal-discussion] -- [Storage][storage-discussion] - -[architecture-discussion]: https://github.com/engula/engula/discussions/41 -[engine-discussion]: https://github.com/engula/engula/discussions/55 -[journal-discussion]: https://github.com/engula/engula/discussions/70 -[storage-discussion]: https://github.com/engula/engula/discussions/79 - -Formal discussions about the design of a specific implementation should proceed with an [RFC](rfcs). +While the architecture introduces different components, it doesn't mean that each component must be deployed as an independent node. The separation of concerns allows Engula to provide flexible deployment. For example, it is possible to run all components together as a standalone server for convenience. However, to maximize resource utilization, components with different resource requirements should be separated apart. diff --git a/docs/dev/release-guide.md b/docs/dev/release-guide.md new file mode 100644 index 00000000..2882fba8 --- /dev/null +++ b/docs/dev/release-guide.md @@ -0,0 +1,84 @@ +# Release Guide + +## Prerequisites + +The release process requires the following privileges: + +- [Cargo owner][cargo-owner] of the Engula crates +- Commit permissions to the main and release branch of the Engula project + +Engula consists of multiple cargo crates. It is tedious and error-prone to update and publish all crates manually. We can use [cargo-workspaces][cargo-workspaces] to do the job instead. + +[cargo-owner]: https://doc.rust-lang.org/cargo/reference/publishing.html#cargo-owner +[cargo-workspaces]: https://github.com/pksunkara/cargo-workspaces + +## Branch management + +We have two major git branches for now: + +- main: all new commits are merged into this branch first +- release: commits for the current release are picked from the main branch to this branch + +## Release process + +When the current release is almost ready, we can start a release process. By default, the release day will be a Friday. + +### Prepare the release post (4 days before announce) + +Every release comes with a post announcing the release. Writing and reviewing the release post may take more time than expected. So it's a good idea to prepare it a few days before the release. + +### Resolve issues and prepare the branches (1 day before announce) + +Ensure all issues are resolved, and all commits are landed on the release branch. + +Bump the version number of the main and release branches: + +- The main branch should use the version number for the next release. +- The release branch should use the version number for the current release. + +To bump the version number of all crates: + +```sh +cargo workspaces version --no-git-tag +``` + +Note that we don't tag here because we will do that along with the release on Github. + +Then send a pull request with the generated commit to the main and release branches, respectively. After the commits have been merged, close the tracking issues and the milestone for the current release. + +### Release day (Friday) + +The following steps assume that you are on the release branch with `upstream` pointing to `github.com/engula/engula`. + +- **30 minutes before announce** - Publish to crates.io and release on Github + - Publish all crates: + + ```sh + cargo workspaces publish --from-git + ``` + + Check crates.io to see if everything works. + + - Tag the release commit and push it to Github: + + ```sh + git tag vx.y.z + git push vx.y.z -u upstream + ``` + + - Create a release with the tag on Github. + +- **The announcement** + - First, merge the release post. + - Tweet on [Twitter](https://twitter.com/engulaio). + - Post on [Reddit](https://www.reddit.com/r/rust). + - Share on [Hacker News](https://news.ycombinator.com/) + - Send a message to everyone on [Zulip](https://engula.zulipchat.com/). + +- Take a break to celebrate with all the contributors! + +## References + +- [Releasing on Github](https://docs.github.com/en/repositories/releasing-projects-on-github/about-releases) +- [Publishing on crates.io](https://doc.rust-lang.org/cargo/reference/publishing.html) +- [The Rust Release Process](https://forge.rust-lang.org/release/process.html) diff --git a/docs/images/architecture.drawio.svg b/docs/images/architecture.drawio.svg index fdfe68d4..8733a49a 100644 --- a/docs/images/architecture.drawio.svg +++ b/docs/images/architecture.drawio.svg @@ -1,1114 +1,253 @@ - + - - - - - - + + - -
-
-
- Write + +
+
+
+ Transactor
- - Write + + Transactor - - - + - -
-
-
- Map Engine -
-
-
-
+ +
+
+
+ Cooperator
- - Map Engine... + + Cooperator - - - - - - - + - -
-
-
- Unit -
-
-
-
+ +
+
+
+ Stream Engine
- - Unit... + + Stream Engine - - - + - -
-
-
- Storage Service + +
+
+
+ Object Engine
- - Storage Service + + Object Engine - - - + - -
-
-
- Storage Group + +
+
+
+ Transactor
- - Storage Group + + Transactor - - - + - -
-
-
- Journal Group + +
+
+
+ Transactor
- - Journal Group + + Transactor - - - + - -
-
-
- Background Group + +
+
+
+ + Service +
- - Background Group + + Service - - - + - -
-
-
- Local Journal + +
+
+
+ + Compute +
- - Local Journal + + Compute - - - + - -
-
-
- Local Storage + +
+
+
+ + Storage +
- - Local Storage - - - - - - - - - -
-
-
- Local Background -
-
-
-
- - Local Background - -
-
- - - - - - - - - - - -
-
-
- Remote Journal -
-
-
-
- - Remote Journal - -
-
- - - - - - - - - - - -
-
-
- Remote Storage -
-
-
-
- - Remote Storage - -
-
- - - - - - - - - - - -
-
-
- Remote Background -
-
-
-
- - Remote Background - -
-
- - - - - - -
-
-
- Journal -
-
-
-
- - Journal - -
-
- - - - - - -
-
-
- Storage -
-
-
-
- + Storage
- - - - - - - - -
-
-
- Streams -
-
-
-
- - Streams - -
-
- - - - - - - - -
-
-
- Objects -
-
-
-
- - Objects - -
-
- - - - - - - - -
-
-
- Background jobs -
-
-
-
- - Background jobs - -
-
- - - - - - - - - - - - - -
-
-
- Read -
-
-
-
- - Read - -
-
- - - - - - - - -
-
-
- Read -
-
-
-
- - Read - -
-
- - - - - - -
-
-
- Kernel -
-
-
-
- - Kernel - -
-
- - - - - - - - - - - - - - -
-
-
- Background -
-
-
-
- - Background - -
-
- - - - - - - - - - - - - - -
-
-
- Storage Group -
-
-
-
- - Storage Group - -
-
- - - - - - -
-
-
- Background Group -
-
-
-
- - Background Group - -
-
- - - - - - -
-
-
- Kernel Group -
-
-
-
- - Kernel Group - -
-
- - - - - - -
-
-
- Unit -
-
-
-
-
-
-
-
- - Unit... - -
-
- - - - - - - - -
-
-
- Backup -
-
-
-
- - Backup - -
-
- - - - - - -
-
-
- Storage Service -
-
-
-
- - Storage Service - -
-
- - - - - - -
-
-
- Unit -
-
-
-
-
-
-
-
- - Unit... - -
-
- - - - - - -
-
-
- Storage Service -
-
-
-
- - Storage Service - -
-
- - - - - - - - - - - - - -
-
-
- Flush -
-
-
-
- - Flush - -
-
- - - - - - -
-
-
- Memtable -
-
-
-
- - Memtable - -
-
- - - - - - -
-
-
- Cache -
-
-
-
- - Cache - -
-
- - - - - - -
-
-
- Engine Group -
-
-
-
- - Engine Group - -
-
- - - - - - - - - - - - -
-
-
- Journal Group -
-
-
-
- - Journal Group - -
-
- - - - - - -
-
-
- Object Storage -
-
-
-
- - Object Storage - -
-
- - - - - - -
-
-
- Unit -
-
-
-
-
-
-
-
- - Unit... - -
-
- - - - - - -
-
-
- Journal Service -
-
-
-
- - Journal Service - -
-
- - - - - - -
-
-
- Unit -
-
-
-
-
-
-
-
- - Unit... - -
-
- - - - - - - - -
-
-
- Backup -
-
-
-
- - Backup - -
-
- - - - - - -
-
-
- Journal Service -
-
-
-
- - Journal Service - -
-
- - - - - - -
-
-
- Unit -
-
-
-
-
-
-
-
- - Unit... - -
-
- - - - - - -
-
-
- Journal Service -
-
-
-
- - Journal Service - -
-
- - - - - - - - -
-
-
- Kernel -
-
-
-
- - Kernel - -
-
- - - - -
-
-
- Resource -
-
-
-
- - Resource - -
-
- - - - -
-
-
- Engine -
-
-
-
- - Engine - -
-
- - - - - - -
-
-
- Kernel Group -
-
-
-
- - Kernel Group - -
-
- - - - - - - - -
-
-
- Write -
-
-
-
- - Write - -
-
- - - + + + - -
-
-
- Hash Engine -
-
-
-
+ +
+
+
+ Supervisor
- - Hash Engine... + + Supervisor - - - - - + - -
-
-
- Flush + +
+
+
+ Cooperator
- - Flush + + Cooperator - - - + - -
-
-
- Memtable + +
+
+
+ Cooperator
- - Memtable + + Cooperator - - - + + - -
-
-
- Cache + +
+
+
+ Orchestrator
- - Cache + + Orchestrator - - - + - -
-
-
- Engine Group + +
+
+
+ + Control +
- - Engine Group + + Control diff --git a/docs/images/cooperator-architecture.drawio.svg b/docs/images/cooperator-architecture.drawio.svg new file mode 100644 index 00000000..64437623 --- /dev/null +++ b/docs/images/cooperator-architecture.drawio.svg @@ -0,0 +1,444 @@ + + + + + + + + +
+
+
+ Cooperator Group +
+
+
+
+
+
+
+
+
+
+
+
+
+
+ + Cooperator Group... + +
+
+ + + + + +
+
+
+ Forward +
+
+
+
+ + Forward + +
+
+ + + + +
+
+
+ Stream Engine +
+
+
+
+ + Stream Engine + +
+
+ + + + +
+
+
+ Object Engine +
+
+
+
+ + Object Engine + +
+
+ + + + + +
+
+
+ Forward +
+
+
+
+ + Forward + +
+
+ + + + + +
+
+
+ Flush +
+
+
+
+ + Flush + +
+
+ + + + + +
+
+
+ Read +
+
+
+
+ + Read + +
+
+ + + + + +
+
+
+ Write +
+
+
+
+ + Write + +
+
+ + + + +
+
+
+ Cooperator (Leader) +
+
+
+
+
+
+
+
+
+
+ + Cooperator (Leader)... + +
+
+ + + + + +
+
+
+ Read +
+
+
+
+ + Read + +
+
+ + + + +
+
+
+ Read Cache +
+
+
+
+ + Read Cache + +
+
+ + + + + +
+
+
+ Write +
+
+
+
+ + Write + +
+
+ + + + +
+
+
+ Write Cache +
+
+
+
+ + Write Cache + +
+
+ + + + +
+
+
+ Cooperator (Follower) +
+
+
+
+
+
+
+
+
+
+ + Cooperator (Follower)... + +
+
+ + + + + +
+
+
+ Read +
+
+
+
+ + Read + +
+
+ + + + +
+
+
+ Read Cache +
+
+
+
+ + Read Cache + +
+
+ + + + + +
+
+
+ Write +
+
+
+
+ + Write + +
+
+ + + + +
+
+
+ Write Cache +
+
+
+
+ + Write Cache + +
+
+ + + + +
+
+
+ Cooperator (Follower) +
+
+
+
+
+
+
+
+
+
+ + Cooperator (Follower)... + +
+
+ + + + + +
+
+
+ Read +
+
+
+
+ + Read + +
+
+ + + + +
+
+
+ Read Cache +
+
+
+
+ + Read Cache + +
+
+ + + + + +
+
+
+ Write +
+
+
+
+ + Write + +
+
+ + + + +
+
+
+ Write Cache +
+
+
+
+ + Write Cache + +
+
+
+ + + + + Viewer does not support full SVG 1.1 + + + +
diff --git a/docs/images/journal-implementation.drawio.svg b/docs/images/journal-implementation.drawio.svg deleted file mode 100644 index c786f35e..00000000 --- a/docs/images/journal-implementation.drawio.svg +++ /dev/null @@ -1,249 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
- Journal -
-
-
-
- - Journal - -
-
- - - - - - - - - - - - - - - - -
-
-
- Remote Journal -
-
-
-
- - Remote Journal - -
-
- - - - - - -
-
-
- Journal Service -
-
-
-
- - Journal Service - -
-
- - - - - - -
-
-
- Journal Service -
-
-
-
- - Journal Service - -
-
- - - - - - - - - - - - - - - - -
-
-
- External Journal -
-
-
-
- - External Journal - -
-
- - - - - - -
-
-
- Kafka -
-
-
-
- - Kafka - -
-
- - - - - - -
-
-
- LogDevice -
-
-
-
- - LogDevice - -
-
- - - - - - - - - - - - - - - - -
-
-
- Local Journal -
-
-
-
- - Local Journal - -
-
- - - - - - -
-
-
- Memory -
-
-
-
- - Memory - -
-
- - - - - - -
-
-
- File System -
-
-
-
- - File System - -
-
-
- - - - - Viewer does not support full SVG 1.1 - - - -
\ No newline at end of file diff --git a/docs/images/kernel-implementation.drawio.svg b/docs/images/kernel-implementation.drawio.svg deleted file mode 100644 index 93b78d29..00000000 --- a/docs/images/kernel-implementation.drawio.svg +++ /dev/null @@ -1,309 +0,0 @@ - - - - - - - - - - - - - - - - -
-
-
- Streams -
-
-
-
- - Streams - -
-
- - - - - - - - - - - -
-
-
- Objects -
-
-
-
- - Objects - -
-
- - - - - - - - - - - -
-
-
- Background jobs -
-
-
-
- - Background jobs - -
-
- - - - - - - - - - - -
-
-
- Metadata -
-
-
-
- - Metadata - -
-
- - - - - - -
-
-
- Kernel -
-
-
-
- - Kernel - -
-
- - - - - - -
-
-
- Storage -
-
-
-
- - Storage - -
-
- - - - - - - - -
-
-
- Streams -
-
-
-
- - Streams - -
-
- - - - - - - - -
-
-
- Metadata -
-
-
-
- - Metadata - -
-
- - - - - - - - -
-
-
- Objects -
-
-
-
- - Objects - -
-
- - - - - - - - -
-
-
- Background jobs -
-
-
-
- - Background jobs - -
-
- - - - - - -
-
-
- Engine -
-
-
-
- - Engine - -
-
- - - - - - -
-
-
- Journal -
-
-
-
- - Journal - -
-
- - - - - - -
-
-
- Background -
-
-
-
- - Background - -
-
- - - - - - -
-
-
- Manifest -
-
-
-
- - Manifest - -
-
-
- - - - - Viewer does not support full SVG 1.1 - - - -
diff --git a/docs/images/object-engine-architecture.drawio.svg b/docs/images/object-engine-architecture.drawio.svg new file mode 100644 index 00000000..af369dba --- /dev/null +++ b/docs/images/object-engine-architecture.drawio.svg @@ -0,0 +1,368 @@ + + + + + + + + + + + + +
+
+
+ File Store +
+
+
+
+ + File Store + +
+
+ + + + + + +
+
+
+ LSM Store +
+
+ Range [A, B) +
+
+
+
+ + LSM Store... + +
+
+ + + + + +
+
+
+ Submit +
+
+
+
+ + Submit + +
+
+ + + + + +
+
+
+ Apply +
+
+
+
+ + Apply + +
+
+ + + + +
+
+
+ Master +
+
+
+
+ + Master + +
+
+ + + + + +
+
+
+ Provision +
+
+
+
+ + Provision + +
+
+ + + + + +
+
+
+ Watch +
+
+
+
+ + Watch + +
+
+ + + + +
+
+
+ Orchestrator +
+
+
+
+ + Orchestrator + +
+
+ + + + +
+
+
+ LSM Store +
+
+ Range [B, C) +
+
+
+
+ + LSM Store... + +
+
+ + + + + + +
+
+
+ LSM Store +
+
+ Range [C, D) +
+
+
+
+ + LSM Store... + +
+
+ + + + + +
+
+
+ De-provision +
+
+
+
+ + De-provision + +
+
+ + + + +
+
+
+ LSM Store +
+
+ Range [D, E) +
+
+
+
+ + LSM Store... + +
+
+ + + + + +
+
+
+ Load +
+
+
+
+ + Load + +
+
+ + + + + +
+
+
+ Pull +
+
+
+
+ + Pull + +
+
+ + + + + +
+
+
+ Commit +
+
+
+
+ + Commit + +
+
+ + + + + +
+
+
+ Run +
+
+
+
+ + Run + +
+
+ + + + +
+
+
+ Scheduler +
+
+
+
+ + Scheduler + +
+
+ + + + + + +
+
+
+ Read / Write +
+
+
+
+ + Read / Write + +
+
+ + + + +
+
+
+ Background jobs +
+
+
+
+ + Background jobs + +
+
+
+ + + + + Viewer does not support full SVG 1.1 + + + +
diff --git a/docs/images/object-engine-ingestion.drawio.svg b/docs/images/object-engine-ingestion.drawio.svg new file mode 100644 index 00000000..5b7b0a05 --- /dev/null +++ b/docs/images/object-engine-ingestion.drawio.svg @@ -0,0 +1,155 @@ + + + + + + + + + +
+
+
+ 1. Begin +
+
+
+
+ + 1. Begin + +
+
+ + + + + +
+
+
+ 2. Allocate +
+
+
+
+ + 2. Allocate + +
+
+ + + + + +
+
+
+ 3. Upload +
+
+
+
+ + 3. Upload + +
+
+ + + + + +
+
+
+ 4. Commit +
+
+
+
+ + 4. Commit + +
+
+ + + + +
+
+
+ Client +
+
+
+
+ + Client + +
+
+ + + + + +
+
+
+ 5. Garbage collect +
+
+
+
+ + 5. Garbage collect + +
+
+ + + + +
+
+
+ Master +
+
+
+
+ + Master + +
+
+ + + + +
+
+
+ File Store +
+
+
+
+ + File Store + +
+
+
+ + + + + Viewer does not support full SVG 1.1 + + + +
diff --git a/docs/images/storage-implementation.drawio.svg b/docs/images/storage-implementation.drawio.svg deleted file mode 100644 index 55428a2f..00000000 --- a/docs/images/storage-implementation.drawio.svg +++ /dev/null @@ -1,250 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
- Storage -
-
-
-
- - Storage - -
-
- - - - - - - - - - - - - - - - -
-
-
- Local Storage -
-
-
-
- - Local Storage - -
-
- - - - - - -
-
-
- Memory -
-
-
-
- - Memory - -
-
- - - - - - -
-
-
- File System -
-
-
-
- - File System - -
-
- - - - - - -
-
-
- Storage Service -
-
-
-
- - Storage Service - -
-
- - - - - - -
-
-
- Storage Service -
-
-
-
- - Storage Service - -
-
- - - - - - -
-
-
- S3 -
-
-
-
- - S3 - -
-
- - - - - - -
-
-
- MinIO -
-
-
-
- - MinIO - -
-
- - - - - - - - - - - - - - - - -
-
-
- Remote Storage -
-
-
-
- - Remote Storage - -
-
- - - - - - - - - - - - - - - - -
-
-
- External Storage -
-
-
-
- - External Storage - -
-
-
- - - - - Viewer does not support full SVG 1.1 - - - -
\ No newline at end of file diff --git a/docs/images/stream-engine-architecture.drawio.svg b/docs/images/stream-engine-architecture.drawio.svg new file mode 100644 index 00000000..e1d2f034 --- /dev/null +++ b/docs/images/stream-engine-architecture.drawio.svg @@ -0,0 +1,273 @@ + + + + + + + +
+
+
+ Master +
+
+
+
+ + Master + +
+
+ + + + + + + + + + + +
+
+
+ Segment Store +
+
+ Range [1, 100) +
+ [100, 200) +
+
+
+
+ + Segment Store... + +
+
+ + + + + +
+
+
+ Apply +
+
+
+
+ + Apply + +
+
+ + + + +
+
+
+ Master +
+
+
+
+ + Master + +
+
+ + + + + +
+
+
+ Provision +
+
+
+
+ + Provision + +
+
+ + + + + +
+
+
+ Watch +
+
+
+
+ + Watch + +
+
+ + + + +
+
+
+ Orchestrator +
+
+
+
+ + Orchestrator + +
+
+ + + + + + +
+
+
+ Segment Store +
+
+ Range [100, 200) +
+ [200, 300) +
+
+
+
+ + Segment Store... + +
+
+ + + + + + +
+
+
+ Segment Store +
+
+ Range [1, 100) +
+ [200, 300) +
+
+
+
+ + Segment Store... + +
+
+ + + + + +
+
+
+ De-provision +
+
+
+
+ + De-provision + +
+
+ + + + +
+
+
+ Segment Store +
+
+ Range [1, 100) +
+ [100, 200) +
+ [200, 300) +
+
+
+
+ + Segment Store... + +
+
+ + + + + +
+
+
+ Read/Write +
+
+
+
+ + Read/Write + +
+
+ + + + +
+
+
+ Collect +
+
+
+
+ + Collect + +
+
+ +
+ + + + + Viewer does not support full SVG 1.1 + + + +
\ No newline at end of file diff --git a/docs/images/stream-engine-election.drawio.svg b/docs/images/stream-engine-election.drawio.svg new file mode 100644 index 00000000..7734fbd9 --- /dev/null +++ b/docs/images/stream-engine-election.drawio.svg @@ -0,0 +1,219 @@ + + + + + + + + +
+
+
+ Segment Stores +
+
+
+
+ + Segment Stores + +
+
+ + + + + + + + +
+
+
+ Client +
+
+
+
+ + Client + +
+
+ + + + + + + + +
+
+
+ Client +
+
+
+
+ + Client + +
+
+ + + + + + + + +
+
+
+ Client +
+
+
+
+ + Client + +
+
+ + + + +
+
+
+ Master +
+
+
+
+ + Master + +
+
+ + + + +
+
+
+ Read/Write +
+
+
+
+ + Read/Write + +
+
+ + + + + +
+
+
+ Read +
+
+
+
+ + Read + +
+
+ + + + + +
+
+
+ Read +
+
+
+
+ + Read + +
+
+ + + + + +
+
+
+ Observe +
+
+
+
+ + Observe + +
+
+ + + + + +
+
+
+ Observe +
+
+
+
+ + Observe + +
+
+ + + + + +
+
+
+ Observe +
+
+
+
+ + Observe + +
+
+ +
+ + + + + Viewer does not support full SVG 1.1 + + + +
\ No newline at end of file diff --git a/docs/object-engine.md b/docs/object-engine.md new file mode 100644 index 00000000..7d874bb3 --- /dev/null +++ b/docs/object-engine.md @@ -0,0 +1,45 @@ +# Object Engine + +This document describes the detailed design of Object Engine. + +## Architecture + +![Architecture](images/object-engine-architecture.drawio.svg) + +Object Engine consists of a Master, a set of LSM Stores, and a File Store. + +Master, LSM Store, and File Store organize files into tenants and buckets. A tenant consists of multiple buckets, and a bucket contains a lot of files. + +## Master + +### Manifest + +Master organizes files in each bucket as an LSM-Tree. The file layout of buckets in a tenant is called a version. Master records the file layout of each tenant in a manifest file. A manifest file is a sequence of version edits ordered by an increasing sequence number. Each version edit contains a bunch of changes (e.g., add or remove files) to one or more buckets based on the previous version. Master reads the manifest file sequentially and applies all version edits to reconstruct the current version of each tenant when it restarts. + +### Ingestion + +![Ingestion](images/object-engine-ingestion.drawio.svg) + +Master supports atomic file ingestions to multiple buckets in the same tenant. To ingest files, a client asks Master to begin an ingestion first. The client allocates unique file names from Master and then uploads files to File Store. When all files are uploaded, the client asks Master to commit the ingestion. During the ingestion, Master tracks the ingestion state and keeps the allocated files alive until the client commits or times out. The ingestion state stays in memory and will be lost when Master restarts. So when an ingestion commits, if its state is missing in Master, it will be simply aborted. When an ingestion commits, Master decides the file layout of the ingested files and appends a new version edit to the manifest file. Finally, due to various failure cases (e.g., client timeouts), some files may be left uncommitted in File Store. So Master runs garbage collection periodically to clean up obsolete files. + +### Load balance + +Master divides a bucket into multiple partitions with similar sizes. Each partition represents a partial LSM-Tree of the bucket. Master scatters partitions to LSM Stores for load balance. When Master assigns a partition to an LSM Store, it sends a version edit containing the file layout of the partition to the LSM Store. The file layout of each partition changes from time to time due to file ingestions or background jobs. So every time a transaction commits, Master sends a version edit with the corresponding changes to each affected LSM Store. Master doesn't need to persist the distribution of partitions at run time. It pulls information from all LSM Stores to reconstruct the current distribution of partitions when it restarts. + +### Background jobs + +TODO + +## LSM Store + +LSM Store is an LSM-Tree storage system built on File Store. LSM Store uses an external File Store as a data source and an optional local File Store as a cache. LSM Store is responsible for managing files in the local store (if present) but not the external store. + +LSM Store organizes files in each bucket as an LSM-Tree similar to Master. At run time, LSM Store maintains a list of live versions for each tenant. The current version is at the end of the list. For reads, LSM Store provides key-value APIs for point gets and range scans. To prevent the required files from being deleted during reads, LSM Store acquires a reference of the current version before reading. LSM Store releases the version once the read is done. When the reference count of a version drops to zero, obsoleted files in the version will be deleted. For writes, LSM Store accepts version edits from Master. When a version edit arrives, if a local store is used, LSM Store loads the required files from the external store to the local store and deletes obsoleted files from the local store. Then LSM Store appends the version edit to a local manifest file. + +## File Store + +File Store is a file storage system. File Store provides file APIs for random reads and sequential writes. There are two kinds of File Store implementations: local and external. + +A local store is built on a local file system. It provides low-latency IO with limited storage capacity. A local store is suitable for real-time workloads. + +An external store is built on an external storage system (e.g., S3, HDFS). It provides low-cost and highly-durable storage with unlimited capacity. But its IO latency is potentially high and unstable. An external store is suitable for backups and offline workloads. diff --git a/docs/rfcs/00000000-template.md b/docs/rfcs/00000000-template.md index 808dcbcc..f441b084 100644 --- a/docs/rfcs/00000000-template.md +++ b/docs/rfcs/00000000-template.md @@ -1,9 +1,40 @@ ---- -status: '[accepted|released|discarded]' -discussion: https://github.com/engula/engula/discussions/0 -issue: https://github.com/engula/engula/issues/0 ---- - - +# Feature Name + +- Status: '[accepted|completed|obsoleted]' +- Discussion: https://github.com/engula/engula/discussions/0 +- Pull Request: https://github.com/engula/engula/pull/0 +- Tracking Issue: https://github.com/engula/engula/issues/0 + +## Summary + +One paragraph explanation of the feature. + +## Motivation + +Why are we doing this? What use cases does it support? What is the expected outcome? + +## Detailed design + +This is the technical portion of the RFC. Explain the design in sufficient detail that: + +- Its interaction with other features is clear. +- It is reasonably clear how the feature would be implemented. +- Corner cases are dissected by example. + +The section should return to the examples given in the previous section, and explain more fully how the detailed proposal makes those examples work. + +## Drawbacks + +Why should we *not* do this? + +## Alternatives + +- Why is this design the best in the space of possible designs? +- What other designs have been considered and what is the rationale for not choosing them? +- What is the impact of not doing this? + +## Unresolved questions + +- What parts of the design do you expect to resolve through the RFC process before this gets merged? +- What parts of the design do you expect to resolve through the implementation of this feature before stabilization? +- What related issues do you consider out of scope for this RFC that could be addressed in the future independently of the solution that comes out of this RFC? diff --git a/examples/hash_engine.rs b/examples/hash_engine.rs deleted file mode 100644 index 6d44a04c..00000000 --- a/examples/hash_engine.rs +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2021 The Engula Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use std::net::SocketAddr; - -use clap::Parser; -use engula::{ - engine::hash::{Engine, Result}, - kernel::{grpc::Kernel as KernelClient, mem::Kernel as MemKernel, Kernel}, -}; - -#[derive(Parser)] -struct Args { - #[clap( - long, - about = "The address of a Kernel server, a memory kernel instance is run if not specified" - )] - kernel: Option, -} - -async fn run(kernel: K) -> Result<()> { - let engine = Engine::open(kernel).await?; - let key = vec![1]; - let value = vec![2]; - engine.put(key.clone(), value.clone()).await?; - let got = engine.get(&key).await?; - assert_eq!(got, Some(value)); - engine.delete(key.clone()).await?; - let got = engine.get(&key).await?; - assert_eq!(got, None); - Ok(()) -} - -#[tokio::main] -async fn main() -> Result<()> { - let arg: Args = Args::parse(); - if let Some(addr) = arg.kernel { - let kernel = KernelClient::connect(&addr.to_string()).await?; - run(kernel).await - } else { - let kernel = MemKernel::open().await?; - run(kernel).await - } -} diff --git a/rust-toolchain.toml b/rust-toolchain.toml index e3866e13..7744a765 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,3 +1,3 @@ [toolchain] -channel = "nightly-2021-11-16" +channel = "nightly-2022-02-01" components = ["rustfmt", "clippy"] diff --git a/src/apis b/src/apis new file mode 160000 index 00000000..166c35bd --- /dev/null +++ b/src/apis @@ -0,0 +1 @@ +Subproject commit 166c35bdcef1d3dd18e39ff51c13f059e3193e55 diff --git a/src/client/Cargo.toml b/src/client/Cargo.toml new file mode 100644 index 00000000..260d2d4f --- /dev/null +++ b/src/client/Cargo.toml @@ -0,0 +1,18 @@ +[package] +name = "engula-client" +version = "0.3.0" +edition = "2021" +license = "Apache-2.0" +homepage = "https://engula.io" +repository = "https://github.com/engula/engula" +description = "The Rust client for Engula." + +[dependencies] +engula-apis = { version = "0.3", path = "../apis" } + +prost = "0.9" +tokio = { version = "1.15", features = ["full"] } +tonic = "0.6" + +[dev-dependencies] +anyhow = "1.0" diff --git a/src/client/examples/blob.rs b/src/client/examples/blob.rs new file mode 100644 index 00000000..1cf4ae54 --- /dev/null +++ b/src/client/examples/blob.rs @@ -0,0 +1,37 @@ +// Copyright 2022 The Engula Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use anyhow::Result; +use engula_client::{Blob, Universe}; + +#[tokio::main] +async fn main() -> Result<()> { + let url = "http://localhost:21716"; + let uv = Universe::connect(url).await?; + let db = uv.create_database("blob").await?; + let co = db.create_collection::("blob").await?; + + co.set("o", vec![1, 2]).await?; + println!("o = {:?}", co.get("o").await?); + co.object("o").append(vec![3, 4]).await?; + println!("o = {:?}", co.get("o").await?); + println!("o.len = {:?}", co.object("o").len().await?); + + let mut txn = co.object("o").begin(); + txn.append(vec![5, 6]).append(vec![7, 8]); + txn.commit().await?; + println!("o = {:?}", co.object("o").load().await?); + + Ok(()) +} diff --git a/src/client/examples/collection.rs b/src/client/examples/collection.rs new file mode 100644 index 00000000..d9b749c1 --- /dev/null +++ b/src/client/examples/collection.rs @@ -0,0 +1,41 @@ +// Copyright 2022 The Engula Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use anyhow::Result; +use engula_client::{Any, Universe}; + +#[tokio::main] +async fn main() -> Result<()> { + let url = "http://localhost:21716"; + let uv = Universe::connect(url).await?; + let db = uv.create_database("co").await?; + let co = db.create_collection::("co").await?; + + co.set("o", 1).await?; + println!("o = {:?}", co.get("o").await?); + co.set("o", "o").await?; + println!("o = {:?}", co.get("o").await?); + + let mut txn = co.begin(); + txn.delete("o"); + txn.set("a", "a"); + txn.set("b", "b"); + txn.commit().await?; + + println!("o = {:?}", co.get("o").await?); + println!("a = {:?}", co.get("a").await?); + println!("b = {:?}", co.get("b").await?); + + Ok(()) +} diff --git a/src/client/examples/database.rs b/src/client/examples/database.rs new file mode 100644 index 00000000..844da8d0 --- /dev/null +++ b/src/client/examples/database.rs @@ -0,0 +1,47 @@ +// Copyright 2022 The Engula Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use anyhow::Result; +use engula_client::{Any, Universe}; + +#[tokio::main] +async fn main() -> Result<()> { + let url = "http://localhost:21716"; + let uv = Universe::connect(url).await?; + let db = uv.create_database("db").await?; + let ca = db.create_collection::("ca").await?; + let cb = db.create_collection::("cb").await?; + + let txn = db.begin(); + { + let mut t = ca.begin_with(txn.clone()); + t.set("a1", 1); + t.set("a2", 2); + t.commit().await?; + } + { + let mut t = cb.begin_with(txn.clone()); + t.set("b1", "b1"); + t.set("b2", "b2"); + t.commit().await?; + } + txn.commit().await?; + + println!("a1 = {:?}", ca.get("a1").await?); + println!("a2 = {:?}", ca.get("a2").await?); + println!("b1 = {:?}", cb.get("b1").await?); + println!("b2 = {:?}", cb.get("b2").await?); + + Ok(()) +} diff --git a/src/client/examples/i64.rs b/src/client/examples/i64.rs new file mode 100644 index 00000000..d15ffe53 --- /dev/null +++ b/src/client/examples/i64.rs @@ -0,0 +1,38 @@ +// Copyright 2022 The Engula Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use anyhow::Result; +use engula_client::{Universe, I64}; + +#[tokio::main] +async fn main() -> Result<()> { + let url = "http://localhost:21716"; + let uv = Universe::connect(url).await?; + let db = uv.create_database("i64").await?; + let co = db.create_collection::("i64").await?; + + co.set("o", 1).await?; + println!("o = {:?}", co.get("o").await?); + co.object("o").add(2).await?; + println!("o = {:?}", co.get("o").await?); + co.object("o").sub(3).await?; + println!("o = {:?}", co.get("o").await?); + + let mut txn = co.object("o").begin(); + txn.add(1).add(2).sub(1); + txn.commit().await?; + println!("o = {:?}", co.object("o").load().await?); + + Ok(()) +} diff --git a/src/client/examples/list.rs b/src/client/examples/list.rs new file mode 100644 index 00000000..0b3b7eae --- /dev/null +++ b/src/client/examples/list.rs @@ -0,0 +1,59 @@ +// Copyright 2022 The Engula Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use anyhow::Result; +use engula_client::{Any, Blob, List, Universe, I64}; + +#[tokio::main] +async fn main() -> Result<()> { + let url = "http://localhost:21716"; + let uv = Universe::connect(url).await?; + let db = uv.create_database("list").await?; + + { + let c = db.create_collection::>("list").await?; + println!("{}", c.name()); + let mut txn = c.object("o").begin(); + txn.store(vec![1.into(), 2.into()]) + .push_back("3") + .push_front("0"); + txn.commit().await?; + println!("o = {:?}", c.object("o").load().await?); + println!("o.len = {:?}", c.object("o").len().await?); + } + + { + let c = db.create_collection::>("list").await?; + println!("{}", c.name()); + c.set("o", [1, 2]).await?; + println!("o = {:?}", c.get("o").await?); + c.object("o").push_back(3).await?; + c.object("o").push_front(0).await?; + println!("o = {:?}", c.object("o").load().await?); + println!("o.len = {:?}", c.object("o").len().await?); + } + + { + let c = db.create_collection::>("list").await?; + println!("{}", c.name()); + c.set("o", [vec![1, 2], vec![3, 4]]).await?; + println!("o = {:?}", c.get("o").await?); + c.object("o").push_back(vec![5, 6]).await?; + c.object("o").push_front(vec![0]).await?; + println!("o = {:?}", c.object("o").load().await?); + println!("o.len = {:?}", c.object("o").len().await?); + } + + Ok(()) +} diff --git a/src/client/examples/map.rs b/src/client/examples/map.rs new file mode 100644 index 00000000..6f11810d --- /dev/null +++ b/src/client/examples/map.rs @@ -0,0 +1,59 @@ +// Copyright 2022 The Engula Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use anyhow::Result; +use engula_client::{Any, Blob, Map, Universe, I64}; + +#[tokio::main] +async fn main() -> Result<()> { + let url = "http://localhost:21716"; + let uv = Universe::connect(url).await?; + let db = uv.create_database("map").await?; + + let (k1, k2, k3) = (vec![1], vec![2], vec![3]); + + { + let c = db.create_collection::>("map").await?; + println!("{}", c.name()); + let mut txn = c.object("o").begin(); + txn.store([(k1.clone(), 1.into()), (k2.clone(), "2".into())]); + txn.set(k3.clone(), 3).delete(k2.clone()); + txn.commit().await?; + println!("o = {:?}", c.get("o").await?); + } + + { + let c = db.create_collection::>("map").await?; + println!("{}", c.name()); + c.set("o", [(k1.clone(), 1), (k2.clone(), 2)]).await?; + println!("o = {:?}", c.get("o").await?); + println!("o.len = {:?}", c.object("o").len().await?); + c.object("o").set(k3.clone(), 3).await?; + c.object("o").delete(k2.clone()).await?; + println!("o = {:?}", c.object("o").load().await?); + } + + { + let c = db.create_collection::>("map").await?; + println!("{}", c.name()); + c.set("o", [(k1.clone(), k1.clone()), (k2.clone(), k2.clone())]) + .await?; + println!("o = {:?}", c.get("o").await?); + println!("o.len = {:?}", c.object("o").len().await?); + println!("o[k1] = {:?}", c.object("o").get(k1.clone()).await?); + println!("o[k2] = {:?}", c.object("o").get(k2.clone()).await?); + } + + Ok(()) +} diff --git a/src/client/examples/tutorial.rs b/src/client/examples/tutorial.rs new file mode 100644 index 00000000..b76ac983 --- /dev/null +++ b/src/client/examples/tutorial.rs @@ -0,0 +1,156 @@ +// Copyright 2022 The Engula Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use anyhow::Result; +use engula_client::{Any, Blob, List, Map, Universe, I64}; + +#[tokio::main] +async fn main() -> Result<()> { + // The address of the server you started above. + let url = "http://localhost:21716"; + let uv = Universe::connect(url).await?; + let db = uv.create_database("db").await?; + let c1 = db.create_collection::("c1").await?; + let c2 = db.create_collection::("c2").await?; + let c3 = db.create_collection::("c3").await?; + let c4 = db.create_collection::>("c4").await?; + let c5 = db.create_collection::>("c5").await?; + let c6 = db.create_collection::>("c6").await?; + let c7 = db.create_collection::>("c7").await?; + let c8 = db.create_collection::>("c8").await?; + let c9 = db.create_collection::>("c9").await?; + + // Manipulate collections + { + // Sets the Any object with i64 (I64) + c1.set("o", 1).await?; + // Sets the Any object with Vec (Blob) + c1.set("o", vec![1u8, 2u8]).await?; + // Sets the Any object with Vec (List) + c1.set("o", vec![1i64, 2i64]).await?; + // Gets and prints the object + println!("c1.o = {:?}", c1.get("o").await?); + // Deletes the object + c1.delete("o").await?; + // Sets the I64 object with i64 + c2.set("o", 1).await?; + println!("c2.o = {:?}", c2.get("o").await?); + // Sets the Blob object with Vec + c3.set("o", vec![1, 2]).await?; + println!("c3.o = {:?}", c3.get("o").await?); + // Sets the List object with Vec + c4.set("o", vec![1.into(), 2.into()]).await?; + println!("c4.o = {:?}", c4.get("o").await?); + // Sets the List object with Vec + c5.set("o", vec![1, 2]).await?; + println!("c5.o = {:?}", c5.get("o").await?); + // Sets the List object with Vec> + c6.set("o", vec![vec![1], vec![2]]).await?; + println!("c6.o = {:?}", c6.get("o").await?); + // Sets the Map object with HashMap, Value> + c7.set("o", [(vec![1], 1.into()), (vec![2], vec![2u8].into())]) + .await?; + println!("c7.o = {:?}", c7.get("o").await?); + // Sets the Map object with HashMap, i64> + c8.set("o", [(vec![1], 1), (vec![2], 2)]).await?; + println!("c8.o = {:?}", c8.get("o").await?); + // Sets the Map object with HashMap, Vec> + c9.set("o", [(vec![1], vec![1]), (vec![2], vec![2])]) + .await?; + println!("c9.o = {:?}", c9.get("o").await?); + } + + // Manipulate individual objects + { + // Any object + c1.set("o", 1).await?; + c1.object("o").add(1).await?; + println!("c1.o = {:?}", c1.get("o").await?); + // I64 object + c2.object("o").add(2).await?; + println!("c2.o = {:?}", c2.get("o").await?); + // Blob object + c3.object("o").append(vec![3u8, 4u8]).await?; + println!("c3.o = {:?}", c3.get("o").await?); + println!("c3.o.len = {:?}", c3.object("o").len().await?); + // List object + c5.object("o").push_back(3).await?; + c5.object("o").push_front(0).await?; + println!("c5.o = {:?}", c5.get("o").await?); + println!("c5.o.len = {:?}", c5.object("o").len().await?); + // Map object + c9.object("o").set(vec![3], vec![3]).await?; + c9.object("o").delete(vec![1]).await?; + println!("c9.o = {:?}", c9.get("o").await?); + println!("c9.o.len = {:?}", c9.object("o").len().await?); + println!("c9.o.[3] = {:?}", c9.object("o").get(vec![3]).await?); + } + + // Object-level transactions + { + // Updates a List object in a transaction. + let mut txn = c5.object("txn").begin(); + txn.push_back(1).push_front(0); + txn.commit().await?; + println!("c5.txn = {:?}", c5.get("txn").await?); + // Updates a Map object in a transaction. + let mut txn = c9.object("txn").begin(); + txn.set(vec![1], vec![1]) + .set(vec![2], vec![2]) + .delete(vec![3]); + txn.commit().await?; + println!("c9.txn = {:?}", c9.get("txn").await?); + } + + // Collection-level transactions + { + // Updates multiple I64 objects in a transaction. + let mut txn = c2.begin(); + txn.set("a", 1); + txn.object("b").add(1).sub(2); + txn.commit().await?; + // Updates multiple List objects in a transaction. + let mut txn = c5.begin(); + txn.set("a", vec![1, 2]); + txn.object("b").push_back(3).push_front(0); + txn.commit().await?; + } + + // Database-level transactions + { + // Begins a database transaction + let txn = db.begin(); + { + // Begins a collection sub-transaction + let mut t = c5.begin_with(txn.clone()); + t.set("a", vec![1, 2]); + t.object("b").push_back(3); + // Commits the sub-transaction. + // Note that the operations will not be executed yet. + t.commit().await?; + } + { + // Begins another collection sub-transaction + let mut t = c9.begin_with(txn.clone()); + t.set("a", [(vec![1], vec![1]), (vec![2], vec![2])]); + t.object("b").set(vec![3], vec![3]); + t.commit().await?; + } + // Commits the database transaction and executes all the sub-transactions. + // This will fail if there is any uncommitted sub-transaction. + txn.commit().await?; + } + + Ok(()) +} diff --git a/src/client/examples/txn.rs b/src/client/examples/txn.rs new file mode 100644 index 00000000..ea3ef2b7 --- /dev/null +++ b/src/client/examples/txn.rs @@ -0,0 +1,80 @@ +// Copyright 2022 The Engula Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use anyhow::Result; +use engula_client::{Any, Blob, List, Map, Universe, I64}; + +#[tokio::main] +async fn main() -> Result<()> { + let url = "http://localhost:21716"; + let uv = Universe::connect(url).await?; + let db = uv.create_database("txn").await?; + let c0 = db.create_collection::("any").await?; + let c1 = db.create_collection::("i64").await?; + let c2 = db.create_collection::("blob").await?; + let c3 = db.create_collection::>("list").await?; + let c4 = db.create_collection::>("map").await?; + + let txn = db.begin(); + { + println!("{}", c0.name()); + let mut t = c0.begin_with(txn.clone()); + t.object("a").add(1).sub(2); + t.object("b").store(vec![1u8, 2u8]); + t.commit().await?; + } + { + println!("{}", c1.name()); + let mut t = c1.begin_with(txn.clone()); + t.object("a").add(1).sub(2); + t.object("b").sub(1).add(2); + t.commit().await?; + } + { + println!("{}", c2.name()); + let mut t = c2.begin_with(txn.clone()); + t.object("a").append(vec![1, 2]).append(vec![3, 4]); + t.object("b").append(vec![5, 6]).append(vec![7, 8]); + t.commit().await?; + } + { + println!("{}", c3.name()); + let mut t = c3.begin_with(txn.clone()); + t.object("a").push_back(1).push_back(2); + t.object("b").push_front(1).push_front(2); + t.commit().await?; + } + { + println!("{}", c4.name()); + let c = c4.begin_with(txn.clone()); + // TODO + c.commit().await?; + } + txn.commit().await?; + + { + println!("c0[a] = {:?}", c0.get("a").await?); + println!("c0[b] = {:?}", c0.get("b").await?); + println!("c1[a] = {:?}", c1.get("a").await?); + println!("c1[b] = {:?}", c1.get("b").await?); + println!("c2[a] = {:?}", c2.get("a").await?); + println!("c2[b] = {:?}", c2.get("b").await?); + println!("c3[a] = {:?}", c3.get("a").await?); + println!("c3[b] = {:?}", c3.get("b").await?); + println!("c4[a] = {:?}", c4.get("a").await?); + println!("c4[b] = {:?}", c4.get("b").await?); + } + + Ok(()) +} diff --git a/src/client/src/any.rs b/src/client/src/any.rs new file mode 100644 index 00000000..134d5482 --- /dev/null +++ b/src/client/src/any.rs @@ -0,0 +1,112 @@ +// Copyright 2022 The Engula Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use engula_apis::*; + +use crate::{expr::call, Client, ObjectValue, Result, Txn}; + +pub struct Any { + id: Vec, + index: Option, + dbname: String, + coname: String, + client: Client, +} + +impl Any { + pub(crate) fn new(id: Vec, dbname: String, coname: String, client: Client) -> Self { + Self { + id, + index: None, + dbname, + coname, + client, + } + } + + pub(crate) fn index(mut self, index: impl Into) -> Self { + self.index = Some(index.into()); + self + } + + pub fn begin(self) -> Txn { + Txn::new(self.id, self.dbname, self.coname, self.client) + } + + pub async fn load(self) -> Result> { + self.call(call::load()).await + } + + pub async fn store(self, value: impl Into) -> Result<()> { + self.call(call::store(value)).await?; + Ok(()) + } + + pub async fn reset(self) -> Result<()> { + self.call(call::reset()).await?; + Ok(()) + } + + pub async fn add(self, value: impl Into) -> Result<()> { + self.call(call::add(value)).await?; + Ok(()) + } + + pub async fn sub(self, value: impl Into) -> Result<()> { + self.call(call::sub(value)).await?; + Ok(()) + } + + pub async fn len(self) -> Result> { + let value = self.call(call::len()).await?; + i64::cast_from_option(value) + } + + pub(crate) async fn append(self, value: impl Into) -> Result<()> { + self.call(call::append(value)).await?; + Ok(()) + } + + pub(crate) async fn push_back(self, value: impl Into) -> Result<()> { + self.call(call::push_back(value)).await?; + Ok(()) + } + + pub(crate) async fn push_front(self, value: impl Into) -> Result<()> { + self.call(call::push_front(value)).await?; + Ok(()) + } + + async fn call(self, call: CallExpr) -> Result> { + let mut expr = Expr { + from: Some(expr::From::Id(self.id)), + ..Default::default() + }; + if let Some(index) = self.index { + let subexpr = Expr { + from: Some(expr::From::Index(index.into())), + call: Some(call), + ..Default::default() + }; + expr.subexprs.push(subexpr); + } else { + expr.call = Some(call); + } + let mut result = self + .client + .collection_expr(self.dbname, self.coname, expr) + .await?; + Ok(result.values.pop().and_then(|v| v.into())) + } +} diff --git a/src/client/src/client.rs b/src/client/src/client.rs new file mode 100644 index 00000000..66bdc54f --- /dev/null +++ b/src/client/src/client.rs @@ -0,0 +1,118 @@ +// Copyright 2022 The Engula Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use engula_apis::*; +use tonic::transport::Channel; + +use crate::{Error, Result}; + +#[derive(Clone)] +pub struct Client { + client: engula_client::EngulaClient, +} + +impl Client { + pub async fn connect(url: String) -> Result { + let client = engula_client::EngulaClient::connect(url) + .await + .map_err(|e| Error::internal(e.to_string()))?; + Ok(Self { client }) + } + + pub async fn txn(&self, req: TxnRequest) -> Result { + let res = self.client.clone().txn(req).await?; + Ok(res.into_inner()) + } + + pub async fn database_txn(&self, req: DatabaseTxnRequest) -> Result { + let req = TxnRequest { + requests: vec![req], + }; + let mut res = self.txn(req).await?; + res.responses + .pop() + .ok_or_else(|| Error::internal("missing database response")) + } + + pub async fn collection_txn( + &self, + dbname: String, + req: CollectionTxnRequest, + ) -> Result { + let req = DatabaseTxnRequest { + name: dbname, + requests: vec![req], + }; + let mut res = self.database_txn(req).await?; + res.responses + .pop() + .ok_or_else(|| Error::internal("missing collection response")) + } + + pub async fn collection_expr( + &self, + dbname: String, + coname: String, + expr: Expr, + ) -> Result { + let req = CollectionTxnRequest { + name: coname, + exprs: vec![expr], + }; + let mut res = self.collection_txn(dbname, req).await?; + res.results + .pop() + .ok_or_else(|| Error::internal("missing expression result")) + } + + pub async fn database(&self, req: DatabaseRequest) -> Result { + let res = self.client.clone().database(req).await?; + Ok(res.into_inner()) + } + + pub async fn database_union( + &self, + req: database_request_union::Request, + ) -> Result { + let req = DatabaseRequest { + requests: vec![DatabaseRequestUnion { request: Some(req) }], + }; + let mut res = self.database(req).await?; + res.responses + .pop() + .and_then(|x| x.response) + .ok_or_else(|| Error::internal("missing database response")) + } + + pub async fn collection(&self, req: CollectionRequest) -> Result { + let res = self.client.clone().collection(req).await?; + Ok(res.into_inner()) + } + + pub async fn collection_union( + &self, + dbname: String, + req: collection_request_union::Request, + ) -> Result { + let req = CollectionRequest { + dbname, + requests: vec![CollectionRequestUnion { request: Some(req) }], + }; + let mut res = self.collection(req).await?; + res.responses + .pop() + .and_then(|x| x.response) + .ok_or_else(|| Error::internal("missing collection response")) + } +} diff --git a/src/client/src/collection.rs b/src/client/src/collection.rs new file mode 100644 index 00000000..e737519d --- /dev/null +++ b/src/client/src/collection.rs @@ -0,0 +1,122 @@ +// Copyright 2022 The Engula Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::{marker::PhantomData, sync::Arc}; + +use engula_apis::*; + +use crate::{Any, Client, CollectionTxn, DatabaseTxn, Error, Object, ObjectValue, Result}; + +#[derive(Clone)] +pub struct Collection { + inner: Arc, + _marker: PhantomData, +} + +impl Collection { + pub(crate) fn new(coname: String, dbname: String, client: Client) -> Self { + let inner = CollectionInner { + dbname, + coname, + client, + }; + Self { + inner: Arc::new(inner), + _marker: PhantomData, + } + } + + pub fn name(&self) -> &str { + &self.inner.coname + } + + pub async fn desc(&self) -> Result { + let req = DescribeCollectionRequest { + name: self.inner.coname.clone(), + }; + let req = collection_request_union::Request::DescribeCollection(req); + let res = self.inner.collection_union_call(req).await?; + let desc = if let collection_response_union::Response::DescribeCollection(res) = res { + res.desc + } else { + None + }; + desc.ok_or_else(|| Error::internal("missing collection description")) + } + + pub fn begin(&self) -> CollectionTxn { + self.inner.new_txn() + } + + pub fn begin_with(&self, parent: DatabaseTxn) -> CollectionTxn { + parent.collection(self.inner.coname.clone()) + } + + pub fn object(&self, id: impl Into>) -> T { + self.inner.new_object(id.into()) + } +} + +// Provides common interfaces for convenience. +impl Collection { + fn any(&self, id: impl Into>) -> Any { + self.inner.new_object(id.into()) + } + + pub async fn get(&self, id: impl Into>) -> Result> { + let value = self.any(id).load().await?; + T::Value::cast_from_option(value) + } + + pub async fn set(&self, id: impl Into>, value: impl Into) -> Result<()> { + self.any(id).store(value.into()).await + } + + pub async fn delete(&self, id: impl Into>) -> Result<()> { + self.any(id).reset().await + } +} + +pub struct CollectionInner { + dbname: String, + coname: String, + client: Client, +} + +impl CollectionInner { + fn new_txn(&self) -> CollectionTxn { + CollectionTxn::new( + self.dbname.clone(), + self.coname.clone(), + self.client.clone(), + ) + } + + fn new_object(&self, id: Vec) -> T { + Any::new( + id, + self.dbname.clone(), + self.coname.clone(), + self.client.clone(), + ) + .into() + } + + async fn collection_union_call( + &self, + req: collection_request_union::Request, + ) -> Result { + self.client.collection_union(self.dbname.clone(), req).await + } +} diff --git a/src/client/src/database.rs b/src/client/src/database.rs new file mode 100644 index 00000000..18cfa9ab --- /dev/null +++ b/src/client/src/database.rs @@ -0,0 +1,104 @@ +// Copyright 2022 The Engula Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::sync::Arc; + +use engula_apis::*; + +use crate::{Client, Collection, DatabaseTxn, Error, Object, Result}; + +#[derive(Clone)] +pub struct Database { + inner: Arc, +} + +impl Database { + pub fn new(name: String, client: Client) -> Self { + let inner = DatabaseInner { name, client }; + Self { + inner: Arc::new(inner), + } + } + + pub async fn desc(&self) -> Result { + let req = DescribeDatabaseRequest { + name: self.inner.name.clone(), + }; + let req = database_request_union::Request::DescribeDatabase(req); + let res = self.inner.database_union_call(req).await?; + let desc = if let database_response_union::Response::DescribeDatabase(res) = res { + res.desc + } else { + None + }; + desc.ok_or_else(|| Error::internal("missing database description")) + } + + pub fn begin(&self) -> DatabaseTxn { + self.inner.new_txn() + } + + pub fn collection(&self, name: &str) -> Collection { + self.inner.new_collection(name.to_owned()) + } + + pub async fn create_collection(&self, name: &str) -> Result> { + let desc = CollectionDesc { + name: name.to_owned(), + ..Default::default() + }; + let req = CreateCollectionRequest { desc: Some(desc) }; + let req = collection_request_union::Request::CreateCollection(req); + self.inner.collection_union_call(req).await?; + Ok(self.collection(name)) + } + + pub async fn delete_collection(&self, name: &str) -> Result<()> { + let req = DeleteCollectionRequest { + name: name.to_owned(), + }; + let req = collection_request_union::Request::DeleteCollection(req); + self.inner.collection_union_call(req).await?; + Ok(()) + } +} + +struct DatabaseInner { + name: String, + client: Client, +} + +impl DatabaseInner { + fn new_txn(&self) -> DatabaseTxn { + DatabaseTxn::new(self.name.clone(), self.client.clone()) + } + + fn new_collection(&self, name: String) -> Collection { + Collection::new(name, self.name.clone(), self.client.clone()) + } + + async fn database_union_call( + &self, + req: database_request_union::Request, + ) -> Result { + self.client.database_union(req).await + } + + async fn collection_union_call( + &self, + req: collection_request_union::Request, + ) -> Result { + self.client.collection_union(self.name.clone(), req).await + } +} diff --git a/src/client/src/error.rs b/src/client/src/error.rs new file mode 100644 index 00000000..be559104 --- /dev/null +++ b/src/client/src/error.rs @@ -0,0 +1,16 @@ +// Copyright 2022 The Engula Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub type Error = tonic::Status; +pub type Result = std::result::Result; diff --git a/src/client/src/expr/call.rs b/src/client/src/expr/call.rs new file mode 100644 index 00000000..ee7ddc8e --- /dev/null +++ b/src/client/src/expr/call.rs @@ -0,0 +1,66 @@ +// Copyright 2022 The Engula Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use engula_apis::*; + +macro_rules! call_expr { + ($func:expr) => { + CallExpr { + func: $func as i32, + args: vec![], + } + }; + ($func:expr, $arg0:expr) => { + CallExpr { + func: $func as i32, + args: vec![$arg0.into()], + } + }; +} + +pub fn load() -> CallExpr { + call_expr!(Function::Load) +} + +pub fn store(value: impl Into) -> CallExpr { + call_expr!(Function::Store, value.into()) +} + +pub fn reset() -> CallExpr { + call_expr!(Function::Reset) +} + +pub fn add(value: impl Into) -> CallExpr { + call_expr!(Function::Add, value.into()) +} + +pub fn sub(value: impl Into) -> CallExpr { + call_expr!(Function::Sub, value.into()) +} + +pub fn len() -> CallExpr { + call_expr!(Function::Len) +} + +pub fn append(value: impl Into) -> CallExpr { + call_expr!(Function::Append, value.into()) +} + +pub fn push_back(value: impl Into) -> CallExpr { + call_expr!(Function::PushBack, value.into()) +} + +pub fn push_front(value: impl Into) -> CallExpr { + call_expr!(Function::PushFront, value.into()) +} diff --git a/src/journal/src/grpc/proto.rs b/src/client/src/expr/mod.rs similarity index 87% rename from src/journal/src/grpc/proto.rs rename to src/client/src/expr/mod.rs index 962cec13..f47ae079 100644 --- a/src/journal/src/grpc/proto.rs +++ b/src/client/src/expr/mod.rs @@ -1,4 +1,4 @@ -// Copyright 2021 The Engula Authors. +// Copyright 2022 The Engula Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,4 +12,4 @@ // See the License for the specific language governing permissions and // limitations under the License. -tonic::include_proto!("engula.journal.v1"); +pub mod call; diff --git a/src/client/src/lib.rs b/src/client/src/lib.rs new file mode 100644 index 00000000..49b26e56 --- /dev/null +++ b/src/client/src/lib.rs @@ -0,0 +1,38 @@ +// Copyright 2022 The Engula Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +mod any; +mod client; +mod collection; +mod database; +mod error; +mod expr; +mod object; +mod txn; +mod types; +mod universe; + +pub use self::{ + any::Any, + collection::Collection, + database::Database, + error::{Error, Result}, + txn::{CollectionTxn, DatabaseTxn, Txn}, + types::{Blob, List, Map, I64}, + universe::Universe, +}; +pub(crate) use self::{ + client::Client, + object::{Object, ObjectValue}, +}; diff --git a/src/client/src/object.rs b/src/client/src/object.rs new file mode 100644 index 00000000..530569d9 --- /dev/null +++ b/src/client/src/object.rs @@ -0,0 +1,126 @@ +// Copyright 2022 The Engula Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::{collections::HashMap, hash::Hash}; + +use engula_apis::*; + +use crate::{Any, Error, Result, Txn}; + +pub trait Object: From { + type Txn: From; + type Value: ObjectValue; +} + +impl Object for Any { + type Txn = Txn; + type Value = Value; +} + +pub trait ObjectValue: Into { + fn cast_from(v: Value) -> Result; + + fn cast_from_option(v: Option) -> Result> { + if let Some(v) = v { + Ok(Some(Self::cast_from(v)?)) + } else { + Ok(None) + } + } +} + +impl ObjectValue for Value { + fn cast_from(v: Value) -> Result { + Ok(v) + } +} + +impl ObjectValue for i64 { + fn cast_from(v: Value) -> Result { + if let Value::I64Value(v) = v { + Ok(v) + } else { + Err(Error::invalid_argument(format!("{:?} to i64", v))) + } + } +} + +impl ObjectValue for Vec { + fn cast_from(v: Value) -> Result { + if let Value::BlobValue(v) = v { + Ok(v) + } else { + Err(Error::invalid_argument(format!("{:?} to Vec", v))) + } + } +} + +impl ObjectValue for String { + fn cast_from(v: Value) -> Result { + if let Value::TextValue(v) = v { + Ok(v) + } else { + Err(Error::invalid_argument(format!("{:?} to String", v,))) + } + } +} + +impl ObjectValue for HashMap +where + K: ObjectValue + Ord + Hash, + V: ObjectValue, +{ + fn cast_from(v: Value) -> Result { + if let Value::MappingValue(v) = v { + let keys: Result> = v + .keys + .into_iter() + .map(|x| { + x.value + .ok_or_else(|| Error::invalid_argument("missing value")) + .and_then(K::cast_from) + }) + .collect(); + let values: Result> = v + .values + .into_iter() + .map(|x| { + x.value + .ok_or_else(|| Error::invalid_argument("missing value")) + .and_then(V::cast_from) + }) + .collect(); + Ok(keys?.into_iter().zip(values?).collect()) + } else { + Err(Error::invalid_argument(format!("{:?} to Map", v,))) + } + } +} + +impl ObjectValue for Vec { + fn cast_from(v: Value) -> Result { + if let Value::RepeatedValue(v) = v { + v.values + .into_iter() + .map(|x| { + x.value + .ok_or_else(|| Error::invalid_argument("missing value")) + .and_then(T::cast_from) + }) + .collect() + } else { + Err(Error::invalid_argument(format!("{:?} to Vec", v,))) + } + } +} diff --git a/src/client/src/txn.rs b/src/client/src/txn.rs new file mode 100644 index 00000000..e3216ed5 --- /dev/null +++ b/src/client/src/txn.rs @@ -0,0 +1,262 @@ +// Copyright 2022 The Engula Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::{ + marker::PhantomData, + sync::{Arc, Mutex}, +}; + +use engula_apis::*; + +use crate::{expr::call, Client, Error, Object, Result}; + +#[derive(Clone)] +pub struct DatabaseTxn { + inner: Arc, +} + +struct DatabaseTxnInner { + handle: DatabaseTxnHandle, + requests: Mutex>, +} + +struct DatabaseTxnHandle { + dbname: String, + client: Client, +} + +impl DatabaseTxn { + pub(crate) fn new(dbname: String, client: Client) -> Self { + let inner = DatabaseTxnInner { + handle: DatabaseTxnHandle { dbname, client }, + requests: Mutex::new(Vec::new()), + }; + Self { + inner: Arc::new(inner), + } + } + + pub(crate) fn collection(&self, coname: String) -> CollectionTxn { + CollectionTxn::new_with(coname, self.inner.clone()) + } + + pub async fn commit(self) -> Result<()> { + let inner = + Arc::try_unwrap(self.inner).map_err(|_| Error::aborted("pending transaction"))?; + let handle = inner.handle; + let req = DatabaseTxnRequest { + name: handle.dbname, + requests: inner.requests.into_inner().unwrap(), + }; + handle.client.database_txn(req).await?; + Ok(()) + } +} + +pub struct CollectionTxn { + inner: Arc, + subtxn: Option, + _marker: PhantomData, +} + +struct CollectionTxnInner { + coname: String, + handle: Option, + parent: Option>, + exprs: Mutex>, +} + +struct CollectionTxnHandle { + dbname: String, + coname: String, + client: Client, +} + +impl CollectionTxn { + pub(crate) fn new(dbname: String, coname: String, client: Client) -> Self { + let handle = DatabaseTxnHandle { dbname, client }; + Self::new_inner(coname, Some(handle), None) + } + + fn new_with(coname: String, parent: Arc) -> Self { + Self::new_inner(coname, None, Some(parent)) + } + + fn new_inner( + coname: String, + handle: Option, + parent: Option>, + ) -> Self { + let inner = CollectionTxnInner { + coname, + handle, + parent, + exprs: Mutex::new(Vec::new()), + }; + Self { + inner: Arc::new(inner), + subtxn: None, + _marker: PhantomData, + } + } + + pub fn object(&mut self, id: impl Into>) -> &mut T::Txn { + self.subtxn = Some(self.txn(id).into()); + self.subtxn.as_mut().unwrap() + } + + pub async fn commit(mut self) -> Result<()> { + // Consumes the pending transaction. + self.subtxn.take(); + let inner = + Arc::try_unwrap(self.inner).map_err(|_| Error::aborted("pending transaction"))?; + let req = CollectionTxnRequest { + name: inner.coname, + exprs: inner.exprs.into_inner().unwrap(), + }; + if let Some(handle) = inner.handle { + handle.client.collection_txn(handle.dbname, req).await?; + } else { + let parent = inner.parent.unwrap(); + parent.requests.lock().unwrap().push(req); + } + Ok(()) + } +} + +// Provides common interfaces for convenience. +impl CollectionTxn { + fn txn(&self, id: impl Into>) -> Txn { + Txn::new_with(id.into(), self.inner.clone()) + } + + pub fn set(&mut self, id: impl Into>, value: impl Into) { + self.txn(id).store(value.into()); + } + + pub fn delete(&mut self, id: impl Into>) { + self.txn(id).reset(); + } +} + +pub struct Txn { + handle: Option, + parent: Option>, + expr: Expr, +} + +impl Txn { + pub(crate) fn new(id: Vec, dbname: String, coname: String, client: Client) -> Self { + let handle = CollectionTxnHandle { + dbname, + coname, + client, + }; + Self::new_inner(id, Some(handle), None) + } + + fn new_with(id: Vec, parent: Arc) -> Self { + Self::new_inner(id, None, Some(parent)) + } + + fn new_inner( + id: Vec, + handle: Option, + parent: Option>, + ) -> Self { + Self { + handle, + parent, + expr: Expr { + from: Some(expr::From::Id(id)), + ..Default::default() + }, + } + } + + fn add_call(&mut self, call: CallExpr) -> &mut Self { + let expr = Expr { + call: Some(call), + ..Default::default() + }; + self.expr.subexprs.push(expr); + self + } + + fn add_index_call(&mut self, index: impl Into, call: CallExpr) -> &mut Self { + let expr = Expr { + from: Some(expr::From::Index(index.into().into())), + call: Some(call), + ..Default::default() + }; + self.expr.subexprs.push(expr); + self + } + + pub fn store(&mut self, value: impl Into) -> &mut Self { + self.add_call(call::store(value)) + } + + pub fn reset(&mut self) -> &mut Self { + self.add_call(call::reset()) + } + + pub fn add(&mut self, value: impl Into) -> &mut Self { + self.add_call(call::add(value)) + } + + pub fn sub(&mut self, value: impl Into) -> &mut Self { + self.add_call(call::sub(value)) + } + + pub(crate) fn append(&mut self, value: impl Into) -> &mut Self { + self.add_call(call::append(value)) + } + + pub(crate) fn push_back(&mut self, value: impl Into) -> &mut Self { + self.add_call(call::push_back(value)) + } + + pub(crate) fn push_front(&mut self, value: impl Into) -> &mut Self { + self.add_call(call::push_front(value)) + } + + pub(crate) fn set(&mut self, index: impl Into, value: impl Into) -> &mut Self { + self.add_index_call(index, call::store(value)) + } + + pub(crate) fn delete(&mut self, index: impl Into) -> &mut Self { + self.add_index_call(index, call::reset()) + } + + pub async fn commit(mut self) -> Result<()> { + if let Some(handle) = self.handle.take() { + let expr = std::mem::take(&mut self.expr); + handle + .client + .collection_expr(handle.dbname, handle.coname, expr) + .await?; + } + Ok(()) + } +} + +impl Drop for Txn { + fn drop(&mut self) { + if let Some(parent) = self.parent.take() { + let expr = std::mem::take(&mut self.expr); + parent.exprs.lock().unwrap().push(expr); + } + } +} diff --git a/src/client/src/types/blob.rs b/src/client/src/types/blob.rs new file mode 100644 index 00000000..b583ff1e --- /dev/null +++ b/src/client/src/types/blob.rs @@ -0,0 +1,84 @@ +// Copyright 2022 The Engula Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::{Any, Object, ObjectValue, Result, Txn}; + +pub struct Blob(Any); + +impl Object for Blob { + type Txn = BlobTxn; + type Value = Vec; +} + +impl From for Blob { + fn from(ob: Any) -> Self { + Self(ob) + } +} + +impl Blob { + pub fn begin(self) -> BlobTxn { + self.0.begin().into() + } + + pub async fn load(self) -> Result>> { + let value = self.0.load().await?; + Vec::cast_from_option(value) + } + + pub async fn store(self, value: impl Into>) -> Result<()> { + self.0.store(value.into()).await + } + + pub async fn reset(self) -> Result<()> { + self.0.reset().await + } + + pub async fn len(self) -> Result> { + self.0.len().await + } + + pub async fn append(self, value: Vec) -> Result<()> { + self.0.append(value).await + } +} + +pub struct BlobTxn(Txn); + +impl From for BlobTxn { + fn from(txn: Txn) -> Self { + Self(txn) + } +} + +impl BlobTxn { + pub fn store(&mut self, value: impl Into>) -> &mut Self { + self.0.store(value.into()); + self + } + + pub fn reset(&mut self) -> &mut Self { + self.0.reset(); + self + } + + pub fn append(&mut self, value: impl Into>) -> &mut Self { + self.0.append(value.into()); + self + } + + pub async fn commit(self) -> Result<()> { + self.0.commit().await + } +} diff --git a/src/client/src/types/i64.rs b/src/client/src/types/i64.rs new file mode 100644 index 00000000..b5c2bbec --- /dev/null +++ b/src/client/src/types/i64.rs @@ -0,0 +1,89 @@ +// Copyright 2022 The Engula Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::{Any, Object, ObjectValue, Result, Txn}; + +pub struct I64(Any); + +impl Object for I64 { + type Txn = I64Txn; + type Value = i64; +} + +impl From for I64 { + fn from(ob: Any) -> Self { + Self(ob) + } +} + +impl I64 { + pub fn begin(self) -> I64Txn { + self.0.begin().into() + } + + pub async fn load(self) -> Result> { + let value = self.0.load().await?; + i64::cast_from_option(value) + } + + pub async fn store(self, value: i64) -> Result<()> { + self.0.store(value).await + } + + pub async fn reset(self) -> Result<()> { + self.0.reset().await + } + + pub async fn add(self, value: i64) -> Result<()> { + self.0.add(value).await + } + + pub async fn sub(self, value: i64) -> Result<()> { + self.0.sub(value).await + } +} + +pub struct I64Txn(Txn); + +impl From for I64Txn { + fn from(txn: Txn) -> Self { + Self(txn) + } +} + +impl I64Txn { + pub fn store(&mut self, value: i64) -> &mut Self { + self.0.store(value); + self + } + + pub fn reset(&mut self) -> &mut Self { + self.0.reset(); + self + } + + pub fn add(&mut self, value: i64) -> &mut Self { + self.0.add(value); + self + } + + pub fn sub(&mut self, value: i64) -> &mut Self { + self.0.sub(value); + self + } + + pub async fn commit(self) -> Result<()> { + self.0.commit().await + } +} diff --git a/src/client/src/types/list.rs b/src/client/src/types/list.rs new file mode 100644 index 00000000..11a2aef8 --- /dev/null +++ b/src/client/src/types/list.rs @@ -0,0 +1,128 @@ +// Copyright 2022 The Engula Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::marker::PhantomData; + +use crate::{Any, Object, ObjectValue, Result, Txn}; + +pub struct List { + ob: Any, + _marker: PhantomData, +} + +impl From for List { + fn from(ob: Any) -> Self { + Self { + ob, + _marker: PhantomData, + } + } +} + +impl Object for List +where + T: Object, + Vec: ObjectValue, +{ + type Txn = ListTxn; + type Value = Vec; +} + +impl List +where + T: Object, + Vec: ObjectValue, +{ + pub fn begin(self) -> ListTxn { + self.ob.begin().into() + } + + pub async fn load(self) -> Result>> { + let value = self.ob.load().await?; + Vec::cast_from_option(value) + } + + pub async fn store(self, value: impl Into>) -> Result<()> { + self.ob.store(value.into()).await + } + + pub async fn reset(self) -> Result<()> { + self.ob.reset().await + } + + pub async fn len(self) -> Result> { + self.ob.len().await + } + + pub async fn append(self, value: impl Into>) -> Result<()> { + self.ob.append(value.into()).await + } + + pub async fn push_back(self, value: impl Into) -> Result<()> { + self.ob.push_back(value.into()).await + } + + pub async fn push_front(self, value: impl Into) -> Result<()> { + self.ob.push_front(value.into()).await + } +} + +pub struct ListTxn { + txn: Txn, + _marker: PhantomData, +} + +impl From for ListTxn { + fn from(txn: Txn) -> Self { + Self { + txn, + _marker: PhantomData, + } + } +} + +impl ListTxn +where + T: Object, + Vec: ObjectValue, +{ + pub fn store(&mut self, value: impl Into>) -> &mut Self { + self.txn.store(value.into()); + self + } + + pub fn reset(&mut self) -> &mut Self { + self.txn.reset(); + self + } + + pub fn append(&mut self, value: impl Into) -> &mut Self { + self.txn.append(value.into()); + self + } + + pub fn push_back(&mut self, value: impl Into) -> &mut Self { + self.txn.push_back(value.into()); + self + } + + pub fn push_front(&mut self, value: impl Into) -> &mut Self { + self.txn.push_front(value.into()); + self + } + + pub async fn commit(self) -> Result<()> { + self.txn.commit().await + } +} diff --git a/src/client/src/types/map.rs b/src/client/src/types/map.rs new file mode 100644 index 00000000..9865157e --- /dev/null +++ b/src/client/src/types/map.rs @@ -0,0 +1,120 @@ +// Copyright 2022 The Engula Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::{collections::HashMap, marker::PhantomData}; + +use crate::{Any, Object, ObjectValue, Result, Txn}; + +pub struct Map { + ob: Any, + _marker: PhantomData, +} + +impl From for Map { + fn from(ob: Any) -> Self { + Self { + ob, + _marker: PhantomData, + } + } +} + +impl Object for Map +where + T: Object, + HashMap, T::Value>: ObjectValue, +{ + type Txn = MapTxn; + type Value = HashMap, T::Value>; +} + +impl Map +where + T: Object, + HashMap, T::Value>: ObjectValue, +{ + pub fn begin(self) -> MapTxn { + self.ob.begin().into() + } + + pub async fn load(self) -> Result, T::Value>>> { + let value = self.ob.load().await?; + HashMap::cast_from_option(value) + } + + pub async fn store(self, value: impl Into, T::Value>>) -> Result<()> { + self.ob.store(value.into()).await + } + + pub async fn reset(self) -> Result<()> { + self.ob.reset().await + } + + pub async fn len(self) -> Result> { + self.ob.len().await + } + + pub async fn get(self, key: impl Into>) -> Result> { + let value = self.ob.index(key.into()).load().await?; + T::Value::cast_from_option(value) + } + + pub async fn set(self, key: impl Into>, value: impl Into) -> Result<()> { + self.ob.index(key.into()).store(value.into()).await + } + + pub async fn delete(self, key: impl Into>) -> Result<()> { + self.ob.index(key.into()).reset().await + } +} + +pub struct MapTxn { + txn: Txn, + _marker: PhantomData, +} + +impl From for MapTxn { + fn from(txn: Txn) -> Self { + Self { + txn, + _marker: PhantomData, + } + } +} + +impl MapTxn { + pub fn store(&mut self, value: impl Into, T::Value>>) -> &mut Self { + self.txn.store(value.into()); + self + } + + pub fn reset(&mut self) -> &mut Self { + self.txn.reset(); + self + } + + pub fn set(&mut self, key: impl Into>, value: impl Into) -> &mut Self { + self.txn.set(key.into(), value.into()); + self + } + + pub fn delete(&mut self, key: impl Into>) -> &mut Self { + self.txn.delete(key.into()); + self + } + + pub async fn commit(self) -> Result<()> { + self.txn.commit().await + } +} diff --git a/src/client/src/types/mod.rs b/src/client/src/types/mod.rs new file mode 100644 index 00000000..8241e54d --- /dev/null +++ b/src/client/src/types/mod.rs @@ -0,0 +1,20 @@ +// Copyright 2022 The Engula Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +mod blob; +mod i64; +mod list; +mod map; + +pub use self::{blob::Blob, i64::I64, list::List, map::Map}; diff --git a/src/client/src/universe.rs b/src/client/src/universe.rs new file mode 100644 index 00000000..1bbae917 --- /dev/null +++ b/src/client/src/universe.rs @@ -0,0 +1,75 @@ +// Copyright 2022 The Engula Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::sync::Arc; + +use engula_apis::*; + +use crate::{Client, Database, Result}; + +#[derive(Clone)] +pub struct Universe { + inner: Arc, +} + +impl Universe { + pub async fn connect(url: impl Into) -> Result { + let client = Client::connect(url.into()).await?; + let inner = UniverseInner { client }; + Ok(Universe { + inner: Arc::new(inner), + }) + } + + pub fn database(&self, name: &str) -> Database { + self.inner.new_database(name.to_owned()) + } + + pub async fn create_database(&self, name: &str) -> Result { + let desc = DatabaseDesc { + name: name.to_owned(), + ..Default::default() + }; + let req = CreateDatabaseRequest { desc: Some(desc) }; + let req = database_request_union::Request::CreateDatabase(req); + self.inner.database_union_call(req).await?; + Ok(self.database(name)) + } + + pub async fn delete_database(&self, name: &str) -> Result<()> { + let req = DeleteDatabaseRequest { + name: name.to_owned(), + }; + let req = database_request_union::Request::DeleteDatabase(req); + self.inner.database_union_call(req).await?; + Ok(()) + } +} + +struct UniverseInner { + client: Client, +} + +impl UniverseInner { + fn new_database(&self, name: String) -> Database { + Database::new(name, self.client.clone()) + } + + async fn database_union_call( + &self, + req: database_request_union::Request, + ) -> Result { + self.client.database_union(req).await + } +} diff --git a/src/cooperator/Cargo.toml b/src/cooperator/Cargo.toml new file mode 100644 index 00000000..dc045ed7 --- /dev/null +++ b/src/cooperator/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "engula-cooperator" +version = "0.1.0" +edition = "2021" +publish = false + +[dependencies] +engula-apis = { version = "0.3", path = "../apis" } +engula-supervisor = { path = "../supervisor" } + +prost = "0.9" +tokio = { version = "1.15", features = ["full"] } +tonic = "0.6" + +[build-dependencies] +tonic-build = "0.6" diff --git a/src/kernel/build.rs b/src/cooperator/build.rs similarity index 72% rename from src/kernel/build.rs rename to src/cooperator/build.rs index d13361a7..31fc8b1e 100644 --- a/src/kernel/build.rs +++ b/src/cooperator/build.rs @@ -1,4 +1,4 @@ -// Copyright 2021 The Engula Authors. +// Copyright 2022 The Engula Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -13,8 +13,11 @@ // limitations under the License. fn main() -> Result<(), Box> { - tonic_build::compile_protos("src/metadata.proto")?; - tonic_build::configure().compile(&["src/grpc/kernel.proto"], &["src"])?; - + tonic_build::configure() + .extern_path(".engula.v1", "::engula_apis") + .compile( + &["engula/cooperator/v1/cooperator.proto"], + &[".", "../apis"], + )?; Ok(()) } diff --git a/src/cooperator/engula/cooperator/v1/cooperator.proto b/src/cooperator/engula/cooperator/v1/cooperator.proto new file mode 100644 index 00000000..45c2c051 --- /dev/null +++ b/src/cooperator/engula/cooperator/v1/cooperator.proto @@ -0,0 +1,23 @@ +// Copyright 2022 The Engula Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package engula.cooperator.v1; + +import "engula/v1/txn.proto"; + +service Cooperator { + rpc txn(engula.v1.TxnRequest) returns (engula.v1.TxnResponse) {} +} diff --git a/src/kernel/src/grpc/proto.rs b/src/cooperator/src/apis.rs similarity index 82% rename from src/kernel/src/grpc/proto.rs rename to src/cooperator/src/apis.rs index 5a60bb44..9d3583ec 100644 --- a/src/kernel/src/grpc/proto.rs +++ b/src/cooperator/src/apis.rs @@ -1,4 +1,4 @@ -// Copyright 2021 The Engula Authors. +// Copyright 2022 The Engula Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,6 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -pub use crate::{Version, VersionUpdate}; +#![allow(clippy::all)] -tonic::include_proto!("engula.kernel.v1"); +tonic::include_proto!("engula.cooperator.v1"); diff --git a/src/cooperator/src/args.rs b/src/cooperator/src/args.rs new file mode 100644 index 00000000..fa5bf2d0 --- /dev/null +++ b/src/cooperator/src/args.rs @@ -0,0 +1,80 @@ +// Copyright 2022 The Engula Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::collections::VecDeque; + +use engula_apis::*; + +use crate::{Error, Result}; + +pub struct Args(VecDeque); + +impl Args { + pub fn new(args: Vec) -> Self { + Self(args.into()) + } + + pub fn take(&mut self) -> Result { + self.0 + .pop_front() + .and_then(|v| v.value) + .ok_or_else(|| Error::invalid_argument("missing argument")) + } + + pub fn take_i64(&mut self) -> Result { + match self.take()? { + Value::I64Value(v) => Ok(v), + _ => Err(Error::invalid_argument("require i64")), + } + } + + pub fn take_numeric(&mut self) -> Result { + let v = self.take()?; + match v { + Value::I64Value(_) => Ok(v), + _ => Err(Error::invalid_argument("require numeric")), + } + } + + pub fn take_blob(&mut self) -> Result> { + match self.take()? { + Value::BlobValue(v) => Ok(v), + _ => Err(Error::invalid_argument("require blob")), + } + } + + pub fn take_text(&mut self) -> Result { + match self.take()? { + Value::TextValue(v) => Ok(v), + _ => Err(Error::invalid_argument("require text")), + } + } + + pub fn take_repeated(&mut self) -> Result { + match self.take()? { + Value::RepeatedValue(v) => Ok(v), + _ => Err(Error::invalid_argument("require repeated")), + } + } + + pub fn take_sequence(&mut self) -> Result { + let v = self.take()?; + match v { + Value::BlobValue(_) => Ok(v), + Value::TextValue(_) => Ok(v), + Value::RepeatedValue(_) => Ok(v), + _ => Err(Error::invalid_argument("require sequence")), + } + } +} diff --git a/src/cooperator/src/collection.rs b/src/cooperator/src/collection.rs new file mode 100644 index 00000000..1d614f60 --- /dev/null +++ b/src/cooperator/src/collection.rs @@ -0,0 +1,307 @@ +// Copyright 2022 The Engula Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::{collections::BTreeMap, sync::Arc}; + +use engula_apis::*; +use tokio::sync::Mutex; + +use crate::{Args, Error, Result}; + +#[derive(Clone)] +pub struct Collection { + inner: Arc>, +} + +impl Collection { + pub fn new() -> Self { + Self { + inner: Arc::new(Mutex::new(Inner::new())), + } + } + + pub async fn execute(&self, req: CollectionTxnRequest) -> Result { + let mut inner = self.inner.lock().await; + let mut res = CollectionTxnResponse::default(); + for expr in req.exprs { + let result = inner.handle_expr(expr)?; + res.results.push(result); + } + Ok(res) + } +} + +struct Inner { + read_cache: BTreeMap, Value>, + _write_cache: BTreeMap, Vec>, +} + +impl Inner { + fn new() -> Self { + Self { + read_cache: BTreeMap::new(), + _write_cache: BTreeMap::new(), + } + } + + fn handle_expr(&mut self, expr: Expr) -> Result { + let id = if let Some(expr::From::Id(id)) = expr.from { + id + } else { + return Err(Error::invalid_argument("missing object id")); + }; + let mut result = ExprResult::default(); + if let Some(call) = expr.call { + self.handle_object_call(&id, call, &mut result)?; + } else { + let mut res = self.handle_object_exprs(&id, expr.subexprs)?; + let value = if res.values.len() <= 1 { + res.values.pop().unwrap_or_default() + } else { + RepeatedValue { values: res.values }.into() + }; + result.values.push(value); + } + Ok(result) + } + + fn handle_object_exprs(&mut self, id: &[u8], exprs: Vec) -> Result { + let mut result = ExprResult::default(); + for expr in exprs { + let call = expr + .call + .ok_or_else(|| Error::invalid_argument("missing call expr"))?; + if let Some(expr::From::Index(index)) = expr.from { + self.handle_member_call(id, call, index, &mut result)?; + } else { + self.handle_object_call(id, call, &mut result)?; + } + } + Ok(result) + } + + fn handle_object_call( + &mut self, + id: &[u8], + call: CallExpr, + result: &mut ExprResult, + ) -> Result<()> { + let func = Function::from_i32(call.func) + .ok_or_else(|| Error::invalid_argument("invalid function"))?; + let mut args = Args::new(call.args); + match func { + Function::Nop => {} + Function::Load => { + let value = self.read_cache.get(id).cloned(); + result.values.push(value.into()); + } + Function::Store => { + let value = args.take()?; + self.read_cache.insert(id.to_owned(), value); + } + Function::Reset => { + self.read_cache.remove(id); + } + Function::Add | Function::Sub => { + if let Some(value) = self.read_cache.get_mut(id) { + if let Value::I64Value(v) = value { + let operand = args.take_i64()?; + if func == Function::Add { + *v += operand; + } else { + *v -= operand; + } + } else { + return Err(Error::invalid_argument("require numeric object")); + } + } else { + let value = args.take_numeric()?; + self.read_cache.insert(id.to_owned(), value); + } + } + Function::Len => { + let len = if let Some(value) = self.read_cache.get(id) { + match value { + Value::BlobValue(v) => v.len(), + Value::TextValue(v) => v.len(), + Value::MappingValue(v) => v.keys.len(), + Value::RepeatedValue(v) => v.values.len(), + _ => return Err(Error::invalid_argument("require container object")), + } + } else { + 0 + }; + result.values.push(Value::I64Value(len as i64).into()); + } + Function::Append => { + if let Some(value) = self.read_cache.get_mut(id) { + match value { + Value::BlobValue(v) => { + let mut operand = args.take_blob()?; + v.append(&mut operand); + } + Value::TextValue(v) => { + let operand = args.take_text()?; + v.push_str(&operand); + } + Value::RepeatedValue(v) => { + let mut operand = args.take_repeated()?; + v.values.append(&mut operand.values); + } + _ => return Err(Error::invalid_argument("require sequence object")), + } + } else { + let value = args.take_sequence()?; + self.read_cache.insert(id.to_owned(), value); + } + } + Function::PushBack => { + let operand = args.take()?; + if let Some(value) = self.read_cache.get_mut(id) { + match value { + Value::RepeatedValue(v) => { + v.values.push(operand.into()); + } + _ => return Err(Error::invalid_argument("require sequence object")), + } + } else { + let value = RepeatedValue { + values: vec![operand.into()], + }; + self.read_cache.insert(id.to_owned(), value.into()); + } + } + Function::PushFront => { + let operand = args.take()?; + if let Some(value) = self.read_cache.get_mut(id) { + match value { + Value::RepeatedValue(v) => { + v.values.insert(0, operand.into()); + } + _ => return Err(Error::invalid_argument("require sequence object")), + } + } else { + let value = RepeatedValue { + values: vec![operand.into()], + }; + self.read_cache.insert(id.to_owned(), value.into()); + } + } + } + Ok(()) + } + + fn handle_member_call( + &mut self, + id: &[u8], + call: CallExpr, + index: GenericValue, + result: &mut ExprResult, + ) -> Result<()> { + let func = Function::from_i32(call.func) + .ok_or_else(|| Error::invalid_argument("invalid function"))?; + let mut args = Args::new(call.args); + match func { + Function::Nop => {} + Function::Load => { + if let Some(value) = self.read_cache.get(id) { + match value { + Value::MappingValue(v) => { + if let Some(pos) = v.keys.iter().position(|x| x == &index) { + result.values.push(v.values[pos].clone()); + } + } + Value::RepeatedValue(v) => { + if let Some(Value::I64Value(mut pos)) = index.value { + // TODO: do more checks here + let len = v.values.len() as i64; + pos += len; + if pos >= 0 && pos < len { + result.values.push(v.values[pos as usize].clone()); + } else { + return Err(Error::out_of_range("index out of range")); + } + } + } + _ => return Err(Error::invalid_argument("require container object")), + } + } + } + Function::Store => { + let operand = args.take()?; + if let Some(value) = self.read_cache.get_mut(id) { + match value { + Value::MappingValue(v) => { + if let Some(pos) = v.keys.iter().position(|x| x == &index) { + v.values[pos] = operand.into(); + } else { + v.keys.push(index); + v.values.push(operand.into()); + } + } + Value::RepeatedValue(v) => { + if let Some(Value::I64Value(mut pos)) = index.value { + let len = v.values.len() as i64; + pos += len; + if pos >= 0 && pos < len { + v.values[pos as usize] = operand.into(); + } else { + return Err(Error::out_of_range("index out of range")); + } + } + } + _ => return Err(Error::invalid_argument("require container object")), + } + } else { + match index.value { + Some(Value::BlobValue(_)) => { + let value = MappingValue { + keys: vec![index], + values: vec![operand.into()], + }; + self.read_cache.insert(id.to_owned(), value.into()); + } + _ => return Err(Error::invalid_argument("require blob index")), + } + } + } + Function::Reset => { + if let Some(value) = self.read_cache.get_mut(id) { + match value { + Value::MappingValue(v) => { + if let Some(pos) = v.keys.iter().position(|x| x == &index) { + v.keys.remove(pos); + v.values.remove(pos); + } + } + Value::RepeatedValue(v) => { + if let Some(Value::I64Value(mut pos)) = index.value { + let len = v.values.len() as i64; + pos += len; + if pos >= 0 && pos < len { + v.values.remove(pos as usize); + } else { + return Err(Error::out_of_range("index out of range")); + } + } + } + _ => return Err(Error::invalid_argument("require container object")), + } + } + } + _ => return Err(Error::invalid_argument("invalid member function")), + } + Ok(()) + } +} diff --git a/src/cooperator/src/cooperator.rs b/src/cooperator/src/cooperator.rs new file mode 100644 index 00000000..a53805ae --- /dev/null +++ b/src/cooperator/src/cooperator.rs @@ -0,0 +1,38 @@ +// Copyright 2022 The Engula Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use engula_apis::*; +use engula_supervisor::Supervisor; +use tonic::Request; + +use crate::{apis::cooperator_server::Cooperator as _, Result, Server}; + +#[derive(Clone)] +pub struct Cooperator { + server: Server, +} + +impl Cooperator { + pub fn new(supervisor: Supervisor) -> Self { + Self { + server: Server::new(supervisor), + } + } + + pub async fn txn(&self, req: TxnRequest) -> Result { + let req = Request::new(req); + let res = self.server.txn(req).await?; + Ok(res.into_inner()) + } +} diff --git a/src/cooperator/src/database.rs b/src/cooperator/src/database.rs new file mode 100644 index 00000000..3e5eb370 --- /dev/null +++ b/src/cooperator/src/database.rs @@ -0,0 +1,75 @@ +// Copyright 2022 The Engula Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::{collections::BTreeMap, sync::Arc}; + +use engula_apis::*; +use engula_supervisor::Supervisor; +use tokio::sync::Mutex; + +use crate::{Collection, Result}; + +#[derive(Clone)] +pub struct Database { + inner: Arc>, +} + +impl Database { + pub fn new(desc: DatabaseDesc, supervisor: Supervisor) -> Self { + let inner = Inner::new(desc, supervisor); + Self { + inner: Arc::new(Mutex::new(inner)), + } + } + + pub async fn execute(&self, req: DatabaseTxnRequest) -> Result { + let mut inner = self.inner.lock().await; + let mut res = DatabaseTxnResponse::default(); + for coreq in req.requests { + let co = inner.collection(&coreq.name).await?; + let cores = co.execute(coreq).await?; + res.responses.push(cores); + } + Ok(res) + } +} + +struct Inner { + sp: Supervisor, + desc: DatabaseDesc, + collections: BTreeMap, +} + +impl Inner { + fn new(desc: DatabaseDesc, supervisor: Supervisor) -> Self { + Self { + sp: supervisor, + desc, + collections: BTreeMap::new(), + } + } + + async fn collection(&mut self, name: &str) -> Result { + let desc = self + .sp + .describe_collection(self.desc.name.clone(), name.to_owned()) + .await?; + let co = self + .collections + .entry(desc.id) + .or_insert_with(Collection::new) + .clone(); + Ok(co) + } +} diff --git a/src/cooperator/src/error.rs b/src/cooperator/src/error.rs new file mode 100644 index 00000000..be559104 --- /dev/null +++ b/src/cooperator/src/error.rs @@ -0,0 +1,16 @@ +// Copyright 2022 The Engula Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub type Error = tonic::Status; +pub type Result = std::result::Result; diff --git a/src/cooperator/src/lib.rs b/src/cooperator/src/lib.rs new file mode 100644 index 00000000..577fb874 --- /dev/null +++ b/src/cooperator/src/lib.rs @@ -0,0 +1,29 @@ +// Copyright 2022 The Engula Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +mod apis; +mod args; +mod collection; +mod cooperator; +mod database; +mod error; +mod server; +mod universe; + +pub(crate) use self::{args::Args, collection::Collection, database::Database, universe::Universe}; +pub use self::{ + cooperator::Cooperator, + error::{Error, Result}, + server::Server, +}; diff --git a/src/cooperator/src/server.rs b/src/cooperator/src/server.rs new file mode 100644 index 00000000..82499bc1 --- /dev/null +++ b/src/cooperator/src/server.rs @@ -0,0 +1,45 @@ +// Copyright 2022 The Engula Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use engula_apis::*; +use engula_supervisor::Supervisor; +use tonic::{Request, Response, Status}; + +use crate::{apis::*, Universe}; + +#[derive(Clone)] +pub struct Server { + uv: Universe, +} + +impl Server { + pub fn new(supervisor: Supervisor) -> Self { + Self { + uv: Universe::new(supervisor), + } + } + + pub fn into_service(self) -> cooperator_server::CooperatorServer { + cooperator_server::CooperatorServer::new(self) + } +} + +#[tonic::async_trait] +impl cooperator_server::Cooperator for Server { + async fn txn(&self, req: Request) -> Result, Status> { + let req = req.into_inner(); + let res = self.uv.execute(req).await?; + Ok(Response::new(res)) + } +} diff --git a/src/cooperator/src/universe.rs b/src/cooperator/src/universe.rs new file mode 100644 index 00000000..2bf5527d --- /dev/null +++ b/src/cooperator/src/universe.rs @@ -0,0 +1,70 @@ +// Copyright 2022 The Engula Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::{collections::BTreeMap, sync::Arc}; + +use engula_apis::*; +use engula_supervisor::Supervisor; +use tokio::sync::Mutex; + +use crate::{Database, Result}; + +#[derive(Clone)] +pub struct Universe { + inner: Arc>, +} + +impl Universe { + pub fn new(supervisor: Supervisor) -> Self { + let inner = Inner::new(supervisor); + Self { + inner: Arc::new(Mutex::new(inner)), + } + } + + pub async fn execute(&self, req: TxnRequest) -> Result { + let mut inner = self.inner.lock().await; + let mut res = TxnResponse::default(); + for dbreq in req.requests { + let db = inner.database(&dbreq.name).await?; + let dbres = db.execute(dbreq).await?; + res.responses.push(dbres); + } + Ok(res) + } +} + +struct Inner { + sp: Supervisor, + databases: BTreeMap, +} + +impl Inner { + fn new(supervisor: Supervisor) -> Self { + Self { + sp: supervisor, + databases: BTreeMap::new(), + } + } + + async fn database(&mut self, name: &str) -> Result { + let desc = self.sp.describe_database(name.to_owned()).await?; + let db = self + .databases + .entry(desc.id) + .or_insert_with(|| Database::new(desc, self.sp.clone())) + .clone(); + Ok(db) + } +} diff --git a/src/engine/hash/Cargo.toml b/src/engine/hash/Cargo.toml deleted file mode 100644 index abaf860f..00000000 --- a/src/engine/hash/Cargo.toml +++ /dev/null @@ -1,18 +0,0 @@ -[package] -name = "hash-engine" -version = "0.2.0" -edition = "2021" -license = "Apache-2.0" -homepage = "https://engula.io" -repository = "https://github.com/engula/engula" -description = "An Engula engine that provides simple key-value storage." - -[dependencies] -engula-kernel = { version = "0.2", path = "../../kernel" } -engula-journal = { version = "0.2", path = "../../journal" } -engula-storage = { version = "0.2", path = "../../storage" } - -thiserror = "1.0" -tokio = { version = "1.14", features = ["full"] } -bytes = "1.1" -futures = "0.3" diff --git a/src/engine/hash/src/codec.rs b/src/engine/hash/src/codec.rs deleted file mode 100644 index b7b9cc86..00000000 --- a/src/engine/hash/src/codec.rs +++ /dev/null @@ -1,135 +0,0 @@ -// Copyright 2021 The Engula Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use std::io::{Error as IoError, ErrorKind}; - -use bytes::{Buf, BufMut}; -use tokio::io::{AsyncRead, AsyncReadExt}; - -use crate::{Error, Result}; - -pub type Timestamp = u64; -pub type Value = Option>; - -#[repr(u8)] -enum ValueKind { - Some = 0, - None = 1, - Unknown = 255, -} - -impl From for u8 { - fn from(v: ValueKind) -> Self { - v as u8 - } -} - -impl From for ValueKind { - fn from(v: u8) -> Self { - match v { - 0 => ValueKind::Some, - 1 => ValueKind::None, - _ => ValueKind::Unknown, - } - } -} - -pub fn record_size(key: &[u8], value: &Value) -> usize { - let klen = 8 + key.len(); - let vlen = match value { - Some(value) => 8 + value.len(), - None => 0, - }; - 1 + klen + vlen -} - -pub fn put_record(buf: &mut impl BufMut, key: &[u8], value: &Value) { - match value { - Some(value) => { - buf.put_u8(ValueKind::Some.into()); - buf.put_u64(key.len() as u64); - buf.put_u64(value.len() as u64); - buf.put_slice(key); - buf.put_slice(value); - } - None => { - buf.put_u8(ValueKind::None.into()); - buf.put_u64(key.len() as u64); - buf.put_slice(key); - } - } -} - -pub fn encode_record(key: &[u8], value: &Value) -> Vec { - let cap = record_size(key, value); - let mut buf = Vec::with_capacity(cap); - put_record(&mut buf, key, value); - buf -} - -pub fn decode_record(mut buf: &[u8]) -> Result<(Vec, Value)> { - if buf.len() < 9 { - return Err(Error::corrupted("record size too small")); - } - let kind = buf.get_u8(); - let klen = buf.get_u64() as usize; - match ValueKind::from(kind) { - ValueKind::Some => { - let vlen = buf.get_u64() as usize; - if buf.len() < klen + vlen { - Err(Error::corrupted("record size too small")) - } else { - Ok(( - buf[0..klen].to_vec(), - Some(buf[klen..(klen + vlen)].to_vec()), - )) - } - } - ValueKind::None => { - if buf.len() < klen { - Err(Error::corrupted("record size too small")) - } else { - Ok((buf[0..klen].to_vec(), None)) - } - } - ValueKind::Unknown => Err(Error::corrupted(format!("invalid value kind {}", kind))), - } -} - -type IoResult = std::result::Result; - -pub async fn read_record(r: &mut R) -> IoResult<(Vec, Value)> { - let kind = r.read_u8().await?; - let klen = r.read_u64().await?; - - match ValueKind::from(kind) { - ValueKind::Some => { - let vlen = r.read_u64().await?; - let mut key = vec![0; klen as usize]; - r.read_exact(&mut key).await?; - let mut value = vec![0; vlen as usize]; - r.read_exact(&mut value).await?; - Ok((key, Some(value))) - } - ValueKind::None => { - let mut key = vec![0; klen as usize]; - r.read_exact(&mut key).await?; - Ok((key, None)) - } - ValueKind::Unknown => Err(IoError::new( - ErrorKind::InvalidData, - format!("invalid value kind {}", kind), - )), - } -} diff --git a/src/engine/hash/src/engine.rs b/src/engine/hash/src/engine.rs deleted file mode 100644 index b23aa8c5..00000000 --- a/src/engine/hash/src/engine.rs +++ /dev/null @@ -1,302 +0,0 @@ -// Copyright 2021 The Engula Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use std::{ - collections::{HashMap, VecDeque}, - sync::Arc, -}; - -use engula_kernel::{ - Bucket, Event, Kernel, KernelUpdate, ResultStream, Sequence, Stream, Version, VersionUpdate, -}; -use futures::TryStreamExt; -use tokio::sync::Mutex; - -use crate::{ - codec::{self, Timestamp, Value}, - memtable::Memtable, - table_builder::TableBuilder, - table_reader::TableReader, - Error, Result, -}; - -/// A hash engine that provides simple key-value storage. -#[derive(Clone)] -pub struct Engine { - kernel: K, - stream: K::Stream, - bucket: K::Bucket, - current: Arc>>>, - last_timestamp: Arc>, - last_object_number: Arc>, -} - -impl Engine { - /// Opens an engine that runs on the given kernel. - /// - /// See [engula_kernel](engula_kernel) for available kernels. - pub async fn open(kernel: K) -> Result { - let stream = kernel.stream().await?; - let bucket = kernel.bucket().await?; - let version = kernel.current_version().await?; - let version_updates = kernel.version_updates(version.sequence).await; - - let current = EngineVersion::open(bucket.clone(), version).await?; - let engine = Self { - kernel, - stream, - bucket, - current: Arc::new(Mutex::new(Arc::new(current))), - last_timestamp: Arc::new(Mutex::new(0)), - last_object_number: Arc::new(Mutex::new(0)), - }; - engine.recover().await?; - - // Starts a task to update versions. - { - let engine = engine.clone(); - tokio::spawn(async move { - Self::handle_version_updates(engine, version_updates) - .await - .unwrap(); - }); - } - - Ok(engine) - } - - async fn recover(&self) -> Result<()> { - let current = self.current_version().await; - let mut ts = current.last_timestamp; - let mut stream = self.stream.read_events((ts + 1).into()).await; - while let Some(events) = stream.try_next().await? { - for event in events { - ts += 1; - let (key, value) = codec::decode_record(&event.data)?; - current.insert(ts, key, value).await; - } - } - *self.last_timestamp.lock().await = ts; - Ok(()) - } - - /// Returns the value corresponding to the key. - /// - /// - Returns `Ok(Some())` if the value is found. - /// - Retruns `Ok(None)` if the value is not found. - /// - Returns `Err()` if any error occurs. - pub async fn get(&self, key: &[u8]) -> Result>> { - let current = self.current_version().await; - match current.get(key).await? { - Some(Some(value)) => Ok(Some(value)), - _ => Ok(None), - } - } - - /// Updates or inserts a key-value pair. - /// - /// - If the key exists, the entry will be updated. - /// - If the key doesn't exist, a new entry will be inserted. - pub async fn put(&self, key: Vec, value: Vec) -> Result<()> { - self.write(key, Some(value)).await - } - - /// Deletes an entry. - /// - /// - If the key exists, the entry will be removed. - /// - If the key doesn't exists, it will not be regarded as an error. - pub async fn delete(&self, key: Vec) -> Result<()> { - self.write(key, None).await - } - - async fn write(&self, key: Vec, value: Value) -> Result<()> { - let mut ts = self.last_timestamp.lock().await; - *ts += 1; - - let event = Event { - ts: (*ts).into(), - data: codec::encode_record(&key, &value), - }; - self.stream.append_event(event).await?; - - let current = self.current_version().await; - current.insert(*ts, key, value).await; - - if let Some((imm, version)) = current.should_flush().await { - self.install_version(Arc::new(version)).await; - let engine = self.clone(); - tokio::spawn(async move { - Self::flush(engine, imm).await.unwrap(); - }); - } - Ok(()) - } - - async fn flush(self, imm: Arc) -> Result<()> { - let mut number = self.last_object_number.lock().await; - (*number) += 1; - - let object = format!("{}", number); - let writer = self.bucket.new_sequential_writer(&object).await?; - - let mut table_builder = TableBuilder::new(writer); - for (key, value) in imm.iter().await { - table_builder.add(&key, &value).await?; - } - table_builder.finish().await?; - - let mut update = KernelUpdate::default(); - let last_ts = encode_u64_meta(imm.last_update_timestamp().await); - update.add_meta(LAST_TIMESTAMP, last_ts); - let last_number = encode_u64_meta(*number); - update.add_meta(LAST_OBJECT_NUMBER, last_number); - update.add_object(object); - self.kernel.apply_update(update).await?; - Ok(()) - } - - async fn current_version(&self) -> Arc> { - self.current.lock().await.clone() - } - - async fn install_version(&self, version: Arc>) { - *self.current.lock().await = version; - } - - async fn handle_version_updates( - self, - mut updates: ResultStream>, - ) -> Result<()> { - while let Some(update) = updates.try_next().await? { - let current = self.current.lock().await.clone(); - let version = current.install_update(update).await?; - let version = Arc::new(version); - self.install_version(version.clone()).await; - self.stream - .release_events(version.last_timestamp.into()) - .await?; - } - Ok(()) - } -} - -#[derive(Clone)] -struct EngineVersion { - bucket: K::Bucket, - last_sequence: Sequence, - last_timestamp: Timestamp, - last_object_number: u64, - mem: Arc, - imm: VecDeque>, - tables: Vec>, -} - -impl EngineVersion { - async fn open(bucket: K::Bucket, version: Arc) -> Result { - let mut tables = Vec::new(); - for object in &version.objects { - let reader = bucket.new_sequential_reader(object).await?; - let table_reader = TableReader::new(reader).await?; - tables.push(Arc::new(table_reader)); - } - let last_timestamp = decode_u64_meta(&version.meta, LAST_TIMESTAMP)?.unwrap_or(0); - let last_object_number = decode_u64_meta(&version.meta, LAST_OBJECT_NUMBER)?.unwrap_or(0); - Ok(Self { - bucket, - last_sequence: version.sequence, - last_timestamp, - last_object_number, - mem: Arc::new(Memtable::new(0)), - imm: VecDeque::new(), - tables, - }) - } - - async fn get(&self, key: &[u8]) -> Result> { - if let Some(value) = self.mem.get(key).await { - return Ok(Some(value)); - } - for imm in self.imm.iter().rev() { - if let Some(value) = imm.get(key).await { - return Ok(Some(value)); - } - } - for table in self.tables.iter().rev() { - match table.get(key).await { - Ok(Some(value)) => return Ok(Some(value)), - Ok(None) => continue, - Err(err) => return Err(err), - } - } - Ok(None) - } - - async fn insert(&self, ts: Timestamp, key: Vec, value: Value) { - self.mem.insert(ts, key, value).await; - } - - async fn should_flush(&self) -> Option<(Arc, EngineVersion)> { - if self.mem.approximate_size().await > MEMTABLE_SIZE { - let mut version = self.clone(); - let mem = version.mem.clone(); - let last_ts = mem.last_update_timestamp().await; - version.imm.push_back(mem.clone()); - version.mem = Arc::new(Memtable::new(last_ts)); - Some((mem, version)) - } else { - None - } - } - - async fn install_update(&self, update: Arc) -> Result> { - // Makes sure we don't miss updates. - assert_eq!(self.last_sequence + 1, update.sequence); - - let mut version = self.clone(); - version.last_sequence = update.sequence; - if let Some(value) = decode_u64_meta(&update.add_meta, LAST_TIMESTAMP)? { - version.last_timestamp = value; - } - if let Some(value) = decode_u64_meta(&update.add_meta, LAST_OBJECT_NUMBER)? { - version.last_object_number = value; - } - - for object in &update.add_objects { - // We assume that objects are flushed from the oldest immtable to the newest. - let reader = version.bucket.new_sequential_reader(object).await?; - let table_reader = TableReader::new(reader).await?; - version.tables.push(Arc::new(table_reader)); - version.imm.pop_front(); - } - - Ok(version) - } -} - -const MEMTABLE_SIZE: usize = 4 * 1024; -const LAST_TIMESTAMP: &str = "last_timestamp"; -const LAST_OBJECT_NUMBER: &str = "last_object_number"; - -fn encode_u64_meta(value: u64) -> Vec { - value.to_be_bytes().to_vec() -} - -fn decode_u64_meta(map: &HashMap>, name: &str) -> Result> { - if let Some(buf) = map.get(name) { - let buf = buf.as_slice().try_into().map_err(Error::corrupted)?; - Ok(Some(u64::from_be_bytes(buf))) - } else { - Ok(None) - } -} diff --git a/src/engine/hash/src/lib.rs b/src/engine/hash/src/lib.rs deleted file mode 100644 index 77a04527..00000000 --- a/src/engine/hash/src/lib.rs +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright 2021 The Engula Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! An Engula engine that provides simple key-value storage. - -mod codec; -mod engine; -mod error; -mod memtable; -mod table_builder; -mod table_reader; - -pub use self::{ - engine::Engine, - error::{Error, Result}, -}; - -#[cfg(test)] -mod tests { - use engula_kernel::mem::Kernel; - use tokio::fs::OpenOptions; - - use crate::{table_builder::TableBuilder, table_reader::TableReader, *}; - - #[tokio::test] - async fn engine() -> Result<()> { - const N: u32 = 4096; - - let kernel = Kernel::open().await?; - let engine = Engine::open(kernel.clone()).await?; - for i in 0..N { - let k = i.to_be_bytes().to_vec(); - engine.put(k.clone(), k.clone()).await?; - let got = engine.get(&k).await?; - assert_eq!(got, Some(k.clone())); - if i % 2 == 0 { - engine.delete(k.clone()).await?; - let got = engine.get(&k).await?; - assert_eq!(got, None); - } - } - - // Re-open - let engine = Engine::open(kernel.clone()).await?; - for i in 0..N { - let k = i.to_be_bytes().to_vec(); - if i % 2 == 0 { - let got = engine.get(&k).await?; - assert_eq!(got, None); - } else { - let got = engine.get(&k).await?; - assert_eq!(got, Some(k)) - } - } - - Ok(()) - } - - #[tokio::test] - async fn table() -> Result<()> { - let records = vec![ - (vec![1], Some(vec![1])), - (vec![2], Some(vec![2])), - (vec![3], None), - ]; - - let path = std::env::temp_dir().join("table"); - let file = OpenOptions::new() - .write(true) - .create(true) - .truncate(true) - .open(&path) - .await?; - let mut builder = TableBuilder::new(file); - for record in &records { - builder.add(&record.0, &record.1).await?; - } - builder.finish().await?; - - let file = OpenOptions::new().read(true).open(&path).await?; - let reader = TableReader::new(file).await?; - for record in &records { - let got = reader.get(&record.0).await?; - assert_eq!(got.as_ref(), Some(&record.1)); - } - - Ok(()) - } -} diff --git a/src/engine/hash/src/memtable.rs b/src/engine/hash/src/memtable.rs deleted file mode 100644 index 3d647489..00000000 --- a/src/engine/hash/src/memtable.rs +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2021 The Engula Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use std::collections::BTreeMap; - -use tokio::sync::Mutex; - -use crate::codec::{self, Timestamp, Value}; - -pub struct Memtable { - inner: Mutex, -} - -struct Inner { - map: BTreeMap, Value>, - size: usize, - last_ts: Timestamp, -} - -impl Memtable { - pub fn new(ts: Timestamp) -> Self { - let inner = Inner { - map: BTreeMap::new(), - size: 0, - last_ts: ts, - }; - Memtable { - inner: Mutex::new(inner), - } - } - - pub async fn get(&self, key: &[u8]) -> Option { - let inner = self.inner.lock().await; - inner.map.get(key).cloned() - } - - pub async fn iter(&self) -> BTreeMap, Value> { - let inner = self.inner.lock().await; - inner.map.clone() - } - - pub async fn insert(&self, ts: Timestamp, key: Vec, value: Value) { - let mut inner = self.inner.lock().await; - inner.size += codec::record_size(&key, &value); - assert!(ts > inner.last_ts); - inner.last_ts = ts; - inner.map.insert(key, value); - } - - pub async fn approximate_size(&self) -> usize { - self.inner.lock().await.size - } - - pub async fn last_update_timestamp(&self) -> Timestamp { - self.inner.lock().await.last_ts - } -} diff --git a/src/engine/hash/src/table_builder.rs b/src/engine/hash/src/table_builder.rs deleted file mode 100644 index ba0a7045..00000000 --- a/src/engine/hash/src/table_builder.rs +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright 2021 The Engula Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use tokio::io::{AsyncWrite, AsyncWriteExt}; - -use crate::{codec, codec::Value, Result}; - -pub struct TableBuilder { - write: W, - block: BlockBuilder, -} - -impl TableBuilder { - pub fn new(write: W) -> Self { - Self { - write, - block: BlockBuilder::new(), - } - } - - pub async fn add(&mut self, key: &[u8], value: &Value) -> Result<()> { - self.block.add(key, value); - if self.block.size() >= BLOCK_SIZE { - self.flush().await?; - } - Ok(()) - } - - async fn flush(&mut self) -> Result<()> { - self.write.write_all(self.block.data()).await?; - self.block.reset(); - Ok(()) - } - - pub async fn finish(mut self) -> Result<()> { - if self.block.size() > 0 { - self.flush().await?; - } - self.write.shutdown().await?; - Ok(()) - } -} - -const BLOCK_SIZE: usize = 8 * 1024; - -struct BlockBuilder { - buf: Vec, -} - -impl BlockBuilder { - fn new() -> Self { - Self { - buf: Vec::with_capacity(BLOCK_SIZE), - } - } - - fn add(&mut self, key: &[u8], value: &Value) { - codec::put_record(&mut self.buf, key, value); - } - - fn data(&self) -> &[u8] { - &self.buf - } - - fn size(&self) -> usize { - self.buf.len() - } - - fn reset(&mut self) { - self.buf.clear() - } -} diff --git a/src/engine/hash/src/table_reader.rs b/src/engine/hash/src/table_reader.rs deleted file mode 100644 index 606ba7f3..00000000 --- a/src/engine/hash/src/table_reader.rs +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright 2021 The Engula Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use std::{collections::HashMap, io::ErrorKind}; - -use tokio::io::AsyncRead; - -use crate::{codec, codec::Value, Result}; - -pub struct TableReader { - map: HashMap, Value>, -} - -impl TableReader { - pub async fn new(mut r: R) -> Result { - let map = read_all(&mut r).await?; - Ok(TableReader { map }) - } - - pub async fn get(&self, key: &[u8]) -> Result> { - Ok(self.map.get(key).cloned()) - } -} - -type IoResult = std::result::Result; - -async fn read_all(r: &mut R) -> IoResult, Value>> { - let mut map = HashMap::new(); - loop { - match codec::read_record(r).await { - Ok(record) => { - assert!(map.insert(record.0, record.1).is_none()); - } - Err(err) => { - if err.kind() == ErrorKind::UnexpectedEof { - return Ok(map); - } else { - return Err(err); - } - } - } - } -} diff --git a/src/engula/Cargo.toml b/src/engula/Cargo.toml new file mode 100644 index 00000000..67fe9250 --- /dev/null +++ b/src/engula/Cargo.toml @@ -0,0 +1,29 @@ +[package] +name = "engula" +version = "0.3.0" +edition = "2021" +license = "Apache-2.0" +homepage = "https://engula.io" +repository = "https://github.com/engula/engula" +keywords = ["database", "storage-engine"] +categories = [ + "asynchronous", + "data-structures", + "database", + "database-implementations", +] +description = "The Engula command line tool." + +[dependencies] +engula-transactor = { path = "../transactor" } +object-engine-master = { path = "../object-engine/master" } +stream-engine-master = { path = "../stream-engine/master" } + +anyhow = "1.0" +clap = { version = "3.0", features = ["derive"] } +prost = "0.9" +tokio = { version = "1.15", features = ["full"] } +tokio-stream = { version = "0.1.8", features = ["net"] } +tonic = "0.6" +tracing = "0.1.31" +tracing-subscriber = "0.3.9" diff --git a/src/engula/src/main.rs b/src/engula/src/main.rs new file mode 100644 index 00000000..e851dbd8 --- /dev/null +++ b/src/engula/src/main.rs @@ -0,0 +1,52 @@ +// Copyright 2022 The Engula Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use anyhow::Result; +use clap::Parser; + +mod server; + +#[derive(Parser)] +struct Command { + #[clap(subcommand)] + subcmd: SubCommand, +} + +impl Command { + async fn run(self) -> Result<()> { + self.subcmd.run().await?; + Ok(()) + } +} + +#[derive(Parser)] +enum SubCommand { + Server(server::Command), +} + +impl SubCommand { + async fn run(self) -> Result<()> { + match self { + SubCommand::Server(cmd) => cmd.run().await, + } + } +} + +#[tokio::main] +async fn main() -> Result<()> { + let cmd: Command = Command::parse(); + tracing_subscriber::fmt::init(); + cmd.run().await?; + Ok(()) +} diff --git a/src/engula/src/server.rs b/src/engula/src/server.rs new file mode 100644 index 00000000..a7c964f2 --- /dev/null +++ b/src/engula/src/server.rs @@ -0,0 +1,83 @@ +// Copyright 2022 The Engula Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use anyhow::Result; +use clap::Parser; +use tokio::net::TcpListener; +use tokio_stream::wrappers::TcpListenerStream; +use tracing::{error, info}; + +#[derive(Parser)] +pub struct Command { + #[clap(subcommand)] + subcmd: SubCommand, +} + +impl Command { + pub async fn run(self) -> Result<()> { + self.subcmd.run().await?; + Ok(()) + } +} + +#[derive(Parser)] +enum SubCommand { + Start(StartCommand), +} + +impl SubCommand { + async fn run(self) -> Result<()> { + match self { + SubCommand::Start(cmd) => cmd.run().await, + } + } +} + +#[derive(Parser)] +struct StartCommand { + #[clap(long, default_value = "0.0.0.0:21716")] + addr: String, +} + +impl StartCommand { + async fn run(self) -> Result<()> { + let listener = TcpListener::bind(self.addr).await?; + let addr = listener.local_addr()?; + + let transactor = engula_transactor::Server::new().into_service(); + let object_engine_master = object_engine_master::Server::new().into_service(); + let stream_engine_master = stream_engine_master::Server::new().into_service(); + + let serve = tonic::transport::Server::builder() + .add_service(transactor) + .add_service(object_engine_master) + .add_service(stream_engine_master) + .serve_with_incoming(TcpListenerStream::new(listener)); + + info!(message = "Starting Engula server...", %addr); + + tokio::select! { + res = serve => { + if let Err(err) = res { + error!(cause = %err, "Fatal error occurs!"); + } + } + _ = tokio::signal::ctrl_c() => { + info!("Shutting down..."); + } + } + + Ok(()) + } +} diff --git a/src/journal/Cargo.toml b/src/journal/Cargo.toml deleted file mode 100644 index 26a3057d..00000000 --- a/src/journal/Cargo.toml +++ /dev/null @@ -1,23 +0,0 @@ -[package] -name = "engula-journal" -version = "0.2.0" -edition = "2021" -license = "Apache-2.0" -homepage = "https://engula.io" -repository = "https://github.com/engula/engula" -description = "An Engula module that provides stream storage abstractions and implementations." - -[dependencies] -thiserror = "1.0" -async-trait = "0.1" -futures = "0.3" -tokio = { version = "1.13", features = ["full"] } -tokio-stream = { version = "0.1", features = ["net"] } -tonic = "0.6" -prost = "0.9" - -[dev-dependencies] -tempfile = "3" - -[build-dependencies] -tonic-build = "0.6" diff --git a/src/journal/src/file/codec.rs b/src/journal/src/file/codec.rs deleted file mode 100644 index d36d29f1..00000000 --- a/src/journal/src/file/codec.rs +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright 2021 The Engula Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use tokio::io::{ - AsyncRead, AsyncReadExt, AsyncSeek, AsyncSeekExt, AsyncWrite, AsyncWriteExt, SeekFrom, -}; - -use crate::{Error, Event, Result, Timestamp}; - -// Event format: -// -// | ts_len (u32) | data_len (u32) | ts_bytes (ts_len) | data_bytes (data_len) | -// -// Footer format: -// -// | ts_bytes (ts_len) | ts_len (u32) | - -pub async fn write_event(w: &mut W, event: &Event) -> Result { - let ts_bytes = event.ts.serialize(); - w.write_u32(ts_bytes.len() as u32).await?; - w.write_u32(event.data.len() as u32).await?; - w.write_buf(&mut ts_bytes.as_ref()).await?; - w.write_buf(&mut event.data.as_ref()).await?; - Ok(8 + ts_bytes.len() + event.data.len()) -} - -pub async fn write_footer(w: &mut W, ts: Timestamp) -> Result { - let ts_bytes = ts.serialize(); - w.write_buf(&mut ts_bytes.as_ref()).await?; - w.write_u32(ts_bytes.len() as u32).await?; - Ok(ts_bytes.len() + 4) -} - -pub async fn read_event_at( - r: &mut R, - mut offset: usize, - max_offset: usize, -) -> Result> { - if offset == max_offset { - return Ok(None); - } - offset += 8; - if offset > max_offset { - return Err(Error::Corrupted(format!( - "offset {} > max_offset {}", - offset, max_offset - ))); - } - let ts_len = r.read_u32().await?; - let data_len = r.read_u32().await?; - offset += (ts_len + data_len) as usize; - if offset > max_offset { - return Err(Error::Corrupted(format!( - "offset {} > max_offset {}", - offset, max_offset - ))); - } - let mut ts_buf = vec![0; ts_len as usize]; - r.read_exact(&mut ts_buf).await?; - let ts = Timestamp::deserialize(ts_buf)?; - let mut data = vec![0; data_len as usize]; - r.read_exact(&mut data).await?; - Ok(Some((Event { ts, data }, offset))) -} - -pub async fn read_footer_from( - r: &mut R, - mut max_offset: usize, -) -> Result<(Timestamp, usize)> { - if max_offset < 4 { - return Err(Error::Corrupted("file size too small".to_owned())); - } - max_offset -= 4; - r.seek(SeekFrom::Start(max_offset as u64)).await?; - let ts_len = r.read_u32().await?; - if max_offset < ts_len as usize { - return Err(Error::Corrupted("file size too small".to_owned())); - } - max_offset -= ts_len as usize; - r.seek(SeekFrom::Start(max_offset as u64)).await?; - let mut ts_buf = vec![0; ts_len as usize]; - r.read_exact(&mut ts_buf).await?; - let ts = Timestamp::deserialize(ts_buf)?; - Ok((ts, max_offset)) -} diff --git a/src/journal/src/file/journal.rs b/src/journal/src/file/journal.rs deleted file mode 100644 index cb2eb685..00000000 --- a/src/journal/src/file/journal.rs +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright 2021 The Engula Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use std::{path::PathBuf, sync::Arc}; - -use tokio::{fs, sync::Mutex}; - -use super::stream::Stream; -use crate::{async_trait, Error, Result}; - -#[derive(Clone)] -pub struct Journal { - root: Arc>, - segment_size: usize, -} - -impl Journal { - pub async fn open(root: impl Into, segment_size: usize) -> Result { - let root = root.into(); - Ok(Self { - root: Arc::new(Mutex::new(root)), - segment_size, - }) - } -} - -#[async_trait] -impl crate::Journal for Journal { - type Stream = Stream; - - async fn stream(&self, name: &str) -> Result { - let root = self.root.lock().await; - let path = root.join(name); - if !path.exists() { - return Err(Error::NotFound(format!("stream '{:?}'", path))); - } - Stream::open(path, self.segment_size).await - } - - async fn create_stream(&self, name: &str) -> Result { - let root = self.root.lock().await; - let path = root.join(name); - if path.exists() { - return Err(Error::AlreadyExists(format!("stream '{:?}'", path))); - } - fs::create_dir_all(&path).await?; - Stream::open(path, self.segment_size).await - } - - async fn delete_stream(&self, name: &str) -> Result<()> { - let root = self.root.lock().await; - let path = root.join(name); - if !path.exists() { - return Err(Error::NotFound(format!("stream '{:?}'", path))); - } - fs::remove_dir_all(path).await?; - Ok(()) - } -} diff --git a/src/journal/src/file/mod.rs b/src/journal/src/file/mod.rs deleted file mode 100644 index a79b16b4..00000000 --- a/src/journal/src/file/mod.rs +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright 2021 The Engula Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! A [`Journal`] implementation that stores data in local files. -//! -//! [`Journal`]: crate::Journal - -mod codec; -mod journal; -mod segment; -mod segment_reader; -mod segment_stream; -mod stream; - -pub use self::{journal::Journal, stream::Stream}; - -#[cfg(test)] -mod tests { - use futures::TryStreamExt; - - use crate::*; - - #[tokio::test] - async fn test() -> Result<()> { - let tmp = tempfile::tempdir()?; - let root = tmp.path(); - let stream_name = "stream"; - let segment_size = 1024; - - // Creates a stream - let j = super::Journal::open(root, segment_size).await?; - assert!(matches!( - j.stream(stream_name).await, - Err(Error::NotFound(_)) - )); - let stream = j.create_stream(stream_name).await?; - test_stream(&stream, 1, 256).await?; - - // Reopen - let j = super::Journal::open(root, segment_size).await?; - assert!(matches!( - j.create_stream(stream_name).await, - Err(Error::AlreadyExists(_)) - )); - let stream = j.stream(stream_name).await?; - // This will conflict with the last timestamp. - assert!(matches!( - test_stream(&stream, 255, 256).await, - Err(Error::InvalidArgument(_)) - )); - test_stream(&stream, 256, 512).await?; - - // Deletes a stream - j.delete_stream(stream_name).await?; - assert!(matches!( - j.stream(stream_name).await, - Err(Error::NotFound(_)) - )); - - Ok(()) - } - - async fn test_stream(stream: &super::Stream, start: u64, limit: u64) -> Result<()> { - let mut released_ts = start; - for ts in start..limit { - let event = Event { - ts: ts.into(), - data: ts.to_be_bytes().to_vec(), - }; - stream.append_event(event).await?; - check_stream(stream, released_ts, ts + 1).await?; - if ts % 29 == 0 { - released_ts = ts - 17; - stream.release_events(released_ts.into()).await?; - check_stream(stream, released_ts, ts + 1).await?; - } - } - Ok(()) - } - - async fn check_stream(stream: &super::Stream, start: u64, limit: u64) -> Result<()> { - let mut events = stream.read_events(start.into()).await; - for i in start..limit { - for event in events.try_next().await?.unwrap() { - assert_eq!(event.ts, i.into()); - } - } - Ok(()) - } -} diff --git a/src/journal/src/file/segment.rs b/src/journal/src/file/segment.rs deleted file mode 100644 index b7dbf581..00000000 --- a/src/journal/src/file/segment.rs +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright 2021 The Engula Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use std::path::PathBuf; - -use futures::TryStreamExt; -use tokio::{ - fs::{File, OpenOptions}, - io::AsyncWriteExt, -}; - -use super::{codec, segment_stream::SegmentStream}; -use crate::{Error, Event, Result, ResultStream, Timestamp}; - -pub struct Segment { - path: PathBuf, - file: File, - offset: usize, - last_timestamp: Option, -} - -impl Segment { - pub async fn open( - path: impl Into, - mut last_timestamp: Option, - ) -> Result { - let path = path.into(); - let file = OpenOptions::new() - .create(true) - .append(true) - .open(&path) - .await?; - let offset = file.metadata().await?.len() as usize; - - // Recovers the last timestamp. - let mut stream = SegmentStream::open(&path, offset, None).await?; - while let Some(events) = stream.try_next().await? { - for event in events { - last_timestamp = Some(event.ts); - } - } - - Ok(Self { - path, - file, - offset, - last_timestamp, - }) - } - - pub async fn seal(mut self) -> Result { - let ts = self.last_timestamp.ok_or_else(|| { - Error::Unknown("should not seal a segment with no timestamp".to_owned()) - })?; - // Records the last timestamp at the file footer. - codec::write_footer(&mut self.file, ts).await?; - self.file.sync_data().await?; - Ok(ts) - } - - pub async fn read_events(&self, ts: Timestamp) -> Result>> { - SegmentStream::open(&self.path, self.offset, Some(ts)).await - } - - pub async fn append_event(&mut self, event: Event) -> Result { - if let Some(last_ts) = self.last_timestamp { - if event.ts <= last_ts { - return Err(Error::InvalidArgument(format!( - "event timestamp {:?} <= last event timestamp {:?}", - event.ts, last_ts, - ))); - } - } - let size = codec::write_event(&mut self.file, &event).await?; - self.file.flush().await?; - self.offset += size; - self.last_timestamp = Some(event.ts); - Ok(self.offset) - } -} diff --git a/src/journal/src/file/segment_reader.rs b/src/journal/src/file/segment_reader.rs deleted file mode 100644 index f9ce7dca..00000000 --- a/src/journal/src/file/segment_reader.rs +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2021 The Engula Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use std::path::{Path, PathBuf}; - -use tokio::fs::File; - -use super::{codec, segment_stream::SegmentStream}; -use crate::{Event, Result, ResultStream, Timestamp}; - -pub struct SegmentReader { - path: PathBuf, - max_offset: usize, - max_timestamp: Timestamp, -} - -impl SegmentReader { - pub async fn open(path: impl Into) -> Result { - let path = path.into(); - let mut file = File::open(&path).await?; - let max_offset = file.metadata().await?.len() as usize; - // Reads the max timestamp from the file footer. - let (max_timestamp, max_offset) = codec::read_footer_from(&mut file, max_offset).await?; - Ok(Self { - path, - max_offset, - max_timestamp, - }) - } - - pub fn path(&self) -> &Path { - self.path.as_path() - } - - pub fn max_timestamp(&self) -> Timestamp { - self.max_timestamp - } - - pub async fn read_events(&self, ts: Timestamp) -> Result>> { - SegmentStream::open(&self.path, self.max_offset, Some(ts)).await - } -} diff --git a/src/journal/src/file/segment_stream.rs b/src/journal/src/file/segment_stream.rs deleted file mode 100644 index f9cce4b4..00000000 --- a/src/journal/src/file/segment_stream.rs +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright 2021 The Engula Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use std::path::Path; - -use futures::stream; -use tokio::fs::File; - -use super::codec; -use crate::{Event, Result, ResultStream, Timestamp}; - -pub struct SegmentStream { - file: File, - offset: usize, - max_offset: usize, - start_event: Option, -} - -impl SegmentStream { - pub async fn open( - path: impl AsRef, - limit: usize, - start_ts: Option, - ) -> Result>> { - let file = File::open(path).await?; - let mut stream = Self { - file, - offset: 0, - max_offset: limit, - start_event: None, - }; - - // Seeks to the start event. - if let Some(ts) = start_ts { - while let Some(event) = stream.read_event().await? { - if event.ts >= ts { - stream.start_event = Some(event); - break; - } - } - } - - let stream = stream::unfold(stream, |mut stream| async move { - stream.next_events().await.map(|events| (events, stream)) - }); - Ok(Box::pin(stream)) - } - - async fn read_event(&mut self) -> Result> { - if let Some((event, offset)) = - codec::read_event_at(&mut self.file, self.offset, self.max_offset).await? - { - self.offset = offset; - Ok(Some(event)) - } else { - Ok(None) - } - } - - async fn next_event(&mut self) -> Result> { - if let Some(event) = self.start_event.take() { - Ok(Some(event)) - } else { - self.read_event().await - } - } - - async fn next_events(&mut self) -> Option>> { - match self.next_event().await { - Ok(Some(event)) => Some(Ok(vec![event])), - Ok(None) => None, - Err(err) => Some(Err(err)), - } - } -} diff --git a/src/journal/src/file/stream.rs b/src/journal/src/file/stream.rs deleted file mode 100644 index b21dc11b..00000000 --- a/src/journal/src/file/stream.rs +++ /dev/null @@ -1,149 +0,0 @@ -// Copyright 2021 The Engula Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use std::{path::PathBuf, sync::Arc}; - -use futures::{future, stream, StreamExt}; -use tokio::{fs, sync::Mutex}; - -use super::{segment::Segment, segment_reader::SegmentReader}; -use crate::{async_trait, Error, Event, Result, ResultStream, Timestamp}; - -#[derive(Clone)] -pub struct Stream { - inner: Arc>, -} - -#[derive(Default)] -pub struct Inner { - path: PathBuf, - segment_size: usize, - active_segment: Option, - sealed_segments: Vec, -} - -// Journal file layout: -// -// - journal -// - stream_1 -// - active_segment (CURRENT) -// - sealed_segment_1 -// - sealed_segment_2 -// - ... -// - stream_2 -const ACTIVE_SEGMENT: &str = "CURRENT"; - -impl Inner { - fn active_segment_path(&self) -> PathBuf { - self.path.join(ACTIVE_SEGMENT) - } - - fn sealed_segment_path(&self, ts: Timestamp) -> PathBuf { - self.path.join(format!("{:?}", ts)) - } -} - -impl Stream { - pub async fn open(path: PathBuf, segment_size: usize) -> Result { - let mut inner = Inner { - path, - segment_size, - active_segment: None, - sealed_segments: Vec::new(), - }; - - // Opens all sealed segments and sorts them by timestamp. - let mut entries = fs::read_dir(&inner.path).await?; - while let Some(ent) = entries.next_entry().await? { - if ent.file_name() != ACTIVE_SEGMENT { - let segment = SegmentReader::open(ent.path()).await?; - inner.sealed_segments.push(segment); - } - } - inner.sealed_segments.sort_by_key(|x| x.max_timestamp()); - - let last_timestamp = inner.sealed_segments.last().map(|x| x.max_timestamp()); - let active = Segment::open(inner.active_segment_path(), last_timestamp).await?; - inner.active_segment = Some(active); - - Ok(Stream { - inner: Arc::new(Mutex::new(inner)), - }) - } - - async fn read_segments(&self, ts: Timestamp) -> Result>>> { - let inner = self.inner.lock().await; - let index = inner - .sealed_segments - .partition_point(|x| x.max_timestamp() < ts); - let mut streams = Vec::new(); - for segment in &inner.sealed_segments[index..] { - streams.push(segment.read_events(ts).await?); - } - if let Some(segment) = &inner.active_segment { - streams.push(segment.read_events(ts).await?); - } - Ok(streams) - } -} - -#[async_trait] -impl crate::Stream for Stream { - async fn read_events(&self, ts: Timestamp) -> ResultStream> { - match self.read_segments(ts).await { - Ok(streams) => Box::pin(stream::iter(streams).flatten()), - Err(err) => Box::pin(stream::once(future::err(err))), - } - } - - async fn append_event(&self, event: Event) -> Result<()> { - let mut inner = self.inner.lock().await; - let size = if let Some(active) = inner.active_segment.as_mut() { - active.append_event(event).await? - } else { - return Err(Error::Unknown( - "active segment is closed due to previous errors".to_owned(), - )); - }; - - if size >= inner.segment_size { - // Seals the active segment. - let active = inner.active_segment.take().unwrap(); - let last_timestamp = active.seal().await?; - - // Renames the active segment to a sealed segment. - let active_segment_path = inner.active_segment_path(); - let sealed_segment_path = inner.sealed_segment_path(last_timestamp); - fs::rename(&active_segment_path, &sealed_segment_path).await?; - let sealed = SegmentReader::open(sealed_segment_path).await?; - inner.sealed_segments.push(sealed); - - // Opens a new active segment. - let active = Segment::open(&active_segment_path, Some(last_timestamp)).await?; - inner.active_segment = Some(active); - } - Ok(()) - } - - async fn release_events(&self, ts: Timestamp) -> Result<()> { - let mut inner = self.inner.lock().await; - let index = inner - .sealed_segments - .partition_point(|x| x.max_timestamp() < ts); - for segment in inner.sealed_segments.drain(..index) { - fs::remove_file(segment.path()).await?; - } - Ok(()) - } -} diff --git a/src/journal/src/grpc/client.rs b/src/journal/src/grpc/client.rs deleted file mode 100644 index ae13b923..00000000 --- a/src/journal/src/grpc/client.rs +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2021 The Engula Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use tonic::{transport::Channel, Streaming}; - -use super::proto::*; -use crate::Result; - -type JournalClient = journal_client::JournalClient; - -#[derive(Clone)] -pub struct Client { - client: JournalClient, -} - -impl Client { - pub async fn connect(addr: &str) -> Result { - let client = JournalClient::connect(addr.to_owned()).await?; - Ok(Client { client }) - } - - pub async fn create_stream(&self, input: CreateStreamRequest) -> Result { - let mut client = self.client.clone(); - let response = client.create_stream(input).await?; - Ok(response.into_inner()) - } - - pub async fn delete_stream(&self, input: DeleteStreamRequest) -> Result { - let mut client = self.client.clone(); - let response = client.delete_stream(input).await?; - Ok(response.into_inner()) - } - - pub async fn append_event(&self, input: AppendEventRequest) -> Result { - let mut client = self.client.clone(); - let response = client.append_event(input).await?; - Ok(response.into_inner()) - } - - pub async fn release_events( - &self, - input: ReleaseEventsRequest, - ) -> Result { - let mut client = self.client.clone(); - let response = client.release_events(input).await?; - Ok(response.into_inner()) - } - - pub async fn read_events( - &self, - input: ReadEventsRequest, - ) -> Result> { - let mut client = self.client.clone(); - let response = client.read_events(input).await?; - Ok(response.into_inner()) - } -} diff --git a/src/journal/src/grpc/error.rs b/src/journal/src/grpc/error.rs deleted file mode 100644 index f287af3a..00000000 --- a/src/journal/src/grpc/error.rs +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2021 The Engula Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use crate::Error; - -impl From for Error { - fn from(s: tonic::Status) -> Self { - match s.code() { - tonic::Code::NotFound => Error::NotFound(s.message().into()), - tonic::Code::AlreadyExists => Error::AlreadyExists(s.message().into()), - tonic::Code::InvalidArgument => Error::InvalidArgument(s.message().into()), - _ => Error::Unknown(s.to_string()), - } - } -} - -impl From for Error { - fn from(e: tonic::transport::Error) -> Self { - Error::Unknown(e.to_string()) - } -} - -impl From for tonic::Status { - fn from(err: Error) -> Self { - let (code, message) = match err { - Error::NotFound(s) => (tonic::Code::NotFound, s), - Error::AlreadyExists(s) => (tonic::Code::AlreadyExists, s), - Error::InvalidArgument(s) => (tonic::Code::InvalidArgument, s), - Error::Io(err) => (tonic::Code::Unknown, err.to_string()), - Error::Corrupted(s) => (tonic::Code::Unknown, s), - Error::Unknown(s) => (tonic::Code::Unknown, s), - }; - tonic::Status::new(code, message) - } -} diff --git a/src/journal/src/grpc/journal.proto b/src/journal/src/grpc/journal.proto deleted file mode 100644 index a26164be..00000000 --- a/src/journal/src/grpc/journal.proto +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright 2021 The Engula Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package engula.journal.v1; - -service Journal { - // APIs to manipulate a journal. - - rpc CreateStream(CreateStreamRequest) returns (CreateStreamResponse) {} - - rpc DeleteStream(DeleteStreamRequest) returns (DeleteStreamResponse) {} - - // APIs to manipulate a stream. - - rpc AppendEvent(AppendEventRequest) returns (AppendEventResponse) {} - - rpc ReleaseEvents(ReleaseEventsRequest) returns (ReleaseEventsResponse) {} - - rpc ReadEvents(ReadEventsRequest) returns (stream ReadEventsResponse) {} -} - -message CreateStreamRequest { - string stream = 1; -} - -message CreateStreamResponse {} - -message DeleteStreamRequest { - string stream = 1; -} - -message DeleteStreamResponse {} - -message AppendEventRequest { - string stream = 1; - bytes ts = 2; - bytes data = 3; -} - -message AppendEventResponse {} - -message ReleaseEventsRequest { - string stream = 1; - bytes ts = 2; -} - -message ReleaseEventsResponse {} - -message ReadEventsRequest { - string stream = 1; - bytes ts = 2; -} - -message ReadEventsResponse { - repeated Event events = 1; -} - -message Event { - bytes ts = 1; - bytes data = 2; -} diff --git a/src/journal/src/grpc/journal.rs b/src/journal/src/grpc/journal.rs deleted file mode 100644 index 9616c1cf..00000000 --- a/src/journal/src/grpc/journal.rs +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright 2021 The Engula Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use super::{ - client::Client, - proto::{CreateStreamRequest, DeleteStreamRequest}, - stream::Stream, -}; -use crate::{async_trait, Result}; - -#[derive(Clone)] -pub struct Journal { - client: Client, -} - -impl Journal { - pub async fn connect(addr: &str) -> Result { - let endpoint = format!("http://{}", addr); - let client = Client::connect(&endpoint).await?; - Ok(Journal { client }) - } -} - -#[async_trait] -impl crate::Journal for Journal { - type Stream = Stream; - - async fn stream(&self, name: &str) -> Result { - Ok(Stream::new(self.client.clone(), name.to_owned())) - } - - async fn create_stream(&self, name: &str) -> Result { - let input = CreateStreamRequest { - stream: name.to_owned(), - }; - self.client.create_stream(input).await?; - self.stream(name).await - } - - async fn delete_stream(&self, name: &str) -> Result<()> { - let input = DeleteStreamRequest { - stream: name.to_owned(), - }; - self.client.delete_stream(input).await?; - Ok(()) - } -} diff --git a/src/journal/src/grpc/mod.rs b/src/journal/src/grpc/mod.rs deleted file mode 100644 index b4d18905..00000000 --- a/src/journal/src/grpc/mod.rs +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright 2021 The Engula Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! A [`Journal`] implementation that interacts with gRPC journal service. -//! -//! [`Journal`]: crate::Journal - -mod client; -mod error; -mod journal; -mod proto; -mod server; -mod stream; - -pub use self::{client::Client, journal::Journal, server::Server, stream::Stream}; - -#[cfg(test)] -mod tests { - use futures::TryStreamExt; - use tokio::net::TcpListener; - use tokio_stream::wrappers::TcpListenerStream; - - use super::Server; - use crate::*; - - #[tokio::test(flavor = "multi_thread")] - async fn test() -> std::result::Result<(), Box> { - let listener = TcpListener::bind("127.0.0.1:0").await?; - let local_addr = listener.local_addr()?; - tokio::task::spawn(async move { - let j = mem::Journal::default(); - let server = Server::new(j); - tonic::transport::Server::builder() - .add_service(server.into_service()) - .serve_with_incoming(TcpListenerStream::new(listener)) - .await - .unwrap(); - }); - - let journal = grpc::Journal::connect(&local_addr.to_string()).await?; - let stream = journal.create_stream("s").await?; - let ts = 31340128116183; - let event = Event { - ts: ts.into(), - data: vec![0, 1, 2], - }; - stream.append_event(event.clone()).await?; - { - let mut events = stream.read_events(0.into()).await; - let got = events.try_next().await?.unwrap(); - assert_eq!(got, vec![event]); - } - stream.release_events((ts + 1).into()).await?; - { - let mut events = stream.read_events(0.into()).await; - let got = events.try_next().await?.unwrap(); - assert_eq!(got, vec![]); - } - let _ = journal.delete_stream("s").await?; - Ok(()) - } -} diff --git a/src/journal/src/grpc/server.rs b/src/journal/src/grpc/server.rs deleted file mode 100644 index 46e909d7..00000000 --- a/src/journal/src/grpc/server.rs +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright 2021 The Engula Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use futures::StreamExt; -use tonic::{Request, Response, Status}; - -use super::{proto, proto::*}; -use crate::{Event, Journal, Stream, Timestamp}; - -pub struct Server { - journal: J, -} - -impl Server { - pub fn new(journal: J) -> Self { - Server { journal } - } - - pub fn into_service(self) -> journal_server::JournalServer> { - journal_server::JournalServer::new(self) - } -} - -#[tonic::async_trait] -impl journal_server::Journal for Server { - type ReadEventsStream = - Box> + Send + Unpin>; - - async fn create_stream( - &self, - request: Request, - ) -> Result, Status> { - let input = request.into_inner(); - self.journal.create_stream(&input.stream).await?; - Ok(Response::new(CreateStreamResponse {})) - } - - async fn delete_stream( - &self, - request: Request, - ) -> Result, Status> { - let input = request.into_inner(); - self.journal.delete_stream(&input.stream).await?; - Ok(Response::new(DeleteStreamResponse {})) - } - - async fn append_event( - &self, - request: Request, - ) -> Result, Status> { - let input = request.into_inner(); - let stream = self.journal.stream(&input.stream).await?; - stream - .append_event(Event { - ts: Timestamp::deserialize(input.ts)?, - data: input.data, - }) - .await?; - Ok(Response::new(AppendEventResponse {})) - } - - async fn release_events( - &self, - request: Request, - ) -> Result, Status> { - let input = request.into_inner(); - let stream = self.journal.stream(&input.stream).await?; - let ts = Timestamp::deserialize(input.ts)?; - stream.release_events(ts).await?; - Ok(Response::new(ReleaseEventsResponse {})) - } - - async fn read_events( - &self, - request: Request, - ) -> Result, Status> { - let input = request.into_inner(); - let stream = self.journal.stream(&input.stream).await?; - let ts = Timestamp::deserialize(input.ts)?; - let event_stream = stream.read_events(ts).await; - Ok(Response::new(Box::new(event_stream.map( - |result| match result { - Ok(es) => { - let events = es - .into_iter() - .map(|e| proto::Event { - ts: e.ts.serialize(), - data: e.data, - }) - .collect(); - Ok(ReadEventsResponse { events }) - } - Err(e) => Err(Status::from(e)), - }, - )))) - } -} diff --git a/src/journal/src/grpc/stream.rs b/src/journal/src/grpc/stream.rs deleted file mode 100644 index d19e9d08..00000000 --- a/src/journal/src/grpc/stream.rs +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright 2021 The Engula Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use futures::StreamExt; -use tonic::Streaming; - -use super::{client::Client, proto::*}; -use crate::{async_trait, Error, Event, Result, ResultStream, Timestamp}; - -#[derive(Clone)] -pub struct Stream { - client: Client, - stream: String, -} - -impl Stream { - pub fn new(client: Client, stream: String) -> Stream { - Stream { client, stream } - } - - async fn read_events_internal(&self, ts: Timestamp) -> Result> { - let input = ReadEventsRequest { - stream: self.stream.clone(), - ts: ts.serialize(), - }; - self.client.read_events(input).await - } -} - -#[async_trait] -impl crate::Stream for Stream { - async fn read_events(&self, ts: Timestamp) -> ResultStream> { - let output = self.read_events_internal(ts).await; - match output { - Ok(output) => Box::pin(output.map(|result| match result { - Ok(resp) => { - let events: Result> = resp - .events - .into_iter() - .map(|e| { - Ok(Event { - ts: Timestamp::deserialize(e.ts)?, - data: e.data, - }) - }) - .collect(); - Ok(events?) - } - Err(status) => Err(Error::from(status)), - })), - Err(e) => Box::pin(futures::stream::once(futures::future::err(e))), - } - } - - async fn append_event(&self, event: Event) -> Result<()> { - let input = AppendEventRequest { - stream: self.stream.clone(), - ts: event.ts.serialize(), - data: event.data, - }; - self.client.append_event(input).await?; - Ok(()) - } - - async fn release_events(&self, ts: Timestamp) -> Result<()> { - let input = ReleaseEventsRequest { - stream: self.stream.clone(), - ts: ts.serialize(), - }; - self.client.release_events(input).await?; - Ok(()) - } -} diff --git a/src/journal/src/journal.rs b/src/journal/src/journal.rs deleted file mode 100644 index db022b0d..00000000 --- a/src/journal/src/journal.rs +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2021 The Engula Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use crate::{async_trait, Result, Stream}; - -/// An interface to manipulate a journal. -#[async_trait] -pub trait Journal: Clone + Send + Sync + 'static { - type Stream: Stream; - - /// Returns a stream. - async fn stream(&self, name: &str) -> Result; - - /// Creates a stream. - /// - /// # Errors - /// - /// Returns `Error::AlreadyExists` if the stream already exists. - async fn create_stream(&self, name: &str) -> Result; - - /// Deletes a stream. - /// - /// Using a deleted stream is an undefined behavior. - /// - /// # Errors - /// - /// Returns `Error::NotFound` if the stream doesn't exist. - async fn delete_stream(&self, name: &str) -> Result<()>; -} diff --git a/src/journal/src/lib.rs b/src/journal/src/lib.rs deleted file mode 100644 index 6d080c1b..00000000 --- a/src/journal/src/lib.rs +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2021 The Engula Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! An Engula module that provides stream storage abstractions and -//! implementations. -//! -//! # Abstraction -//! -//! [`Journal`] is an abstraction to store data streams. -//! -//! # Implementation -//! -//! Some built-in implementations of [`Journal`]: -//! -//! - [`mem`](crate::mem) -//! - [`file`](crate::file) -//! - [`grpc`](crate::grpc) -//! -//! [`Journal`]: crate::Journal - -mod error; -mod journal; -mod stream; - -pub mod file; -pub mod grpc; -pub mod mem; - -pub use async_trait::async_trait; - -pub type ResultStream = futures::stream::BoxStream<'static, Result>; - -pub use self::{ - error::{Error, Result}, - journal::Journal, - stream::{Event, Stream, Timestamp}, -}; diff --git a/src/journal/src/mem/journal.rs b/src/journal/src/mem/journal.rs deleted file mode 100644 index 93f4fa79..00000000 --- a/src/journal/src/mem/journal.rs +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2021 The Engula Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use std::{ - collections::{hash_map, HashMap}, - sync::Arc, -}; - -use tokio::sync::Mutex; - -use super::stream::Stream; -use crate::{async_trait, Error, Result}; - -#[derive(Clone)] -pub struct Journal { - streams: Arc>>, -} - -impl Default for Journal { - fn default() -> Self { - Self { - streams: Arc::new(Mutex::new(HashMap::new())), - } - } -} - -#[async_trait] -impl crate::Journal for Journal { - type Stream = Stream; - - async fn stream(&self, name: &str) -> Result { - let streams = self.streams.lock().await; - match streams.get(name) { - Some(stream) => Ok(stream.clone()), - None => Err(Error::NotFound(format!("stream '{}'", name))), - } - } - - async fn create_stream(&self, name: &str) -> Result { - let stream = Stream::default(); - let mut streams = self.streams.lock().await; - match streams.entry(name.to_owned()) { - hash_map::Entry::Vacant(ent) => { - ent.insert(stream.clone()); - Ok(stream) - } - hash_map::Entry::Occupied(ent) => { - Err(Error::AlreadyExists(format!("stream '{}'", ent.key()))) - } - } - } - - async fn delete_stream(&self, name: &str) -> Result<()> { - let mut streams = self.streams.lock().await; - match streams.remove(name) { - Some(_) => Ok(()), - None => Err(Error::NotFound(format!("stream '{}'", name))), - } - } -} diff --git a/src/journal/src/mem/mod.rs b/src/journal/src/mem/mod.rs deleted file mode 100644 index 82b90423..00000000 --- a/src/journal/src/mem/mod.rs +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2021 The Engula Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! A [`Journal`] implementation that stores data in memory. -//! -//! [`Journal`]: crate::Journal - -mod journal; -mod stream; - -pub use self::{journal::Journal, stream::Stream}; - -#[cfg(test)] -mod tests { - use futures::TryStreamExt; - - use crate::*; - - #[tokio::test] - async fn test() -> Result<()> { - let j = mem::Journal::default(); - let stream = j.create_stream("a").await?; - let event = Event { - ts: 0.into(), - data: vec![1, 2, 3], - }; - stream.append_event(event.clone()).await?; - let mut events = stream.read_events(0.into()).await; - let got = events.try_next().await?; - assert_eq!(got, Some(vec![event])); - Ok(()) - } -} diff --git a/src/journal/src/mem/stream.rs b/src/journal/src/mem/stream.rs deleted file mode 100644 index c457fbb8..00000000 --- a/src/journal/src/mem/stream.rs +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright 2021 The Engula Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use std::{collections::VecDeque, sync::Arc}; - -use futures::{future, stream}; -use tokio::sync::Mutex; - -use crate::{async_trait, Error, Event, Result, ResultStream, Timestamp}; - -#[derive(Clone)] -pub struct Stream { - events: Arc>>, -} - -impl Default for Stream { - fn default() -> Self { - Self { - events: Arc::new(Mutex::new(VecDeque::new())), - } - } -} - -#[async_trait] -impl crate::Stream for Stream { - async fn read_events(&self, ts: Timestamp) -> ResultStream> { - let events = self.events.lock().await; - let offset = events.partition_point(|x| x.ts < ts); - Box::pin(stream::once(future::ok( - events.range(offset..).cloned().collect(), - ))) - } - - async fn append_event(&self, event: Event) -> Result<()> { - let mut events = self.events.lock().await; - if let Some(last_ts) = events.back().map(|x| x.ts) { - if event.ts <= last_ts { - return Err(Error::InvalidArgument(format!( - "timestamp {:?} <= last timestamp {:?}", - event.ts, last_ts - ))); - } - } - events.push_back(event); - Ok(()) - } - - async fn release_events(&self, ts: Timestamp) -> Result<()> { - let mut events = self.events.lock().await; - let index = events.partition_point(|x| x.ts < ts); - events.drain(..index); - Ok(()) - } -} diff --git a/src/journal/src/stream.rs b/src/journal/src/stream.rs deleted file mode 100644 index 0a275d08..00000000 --- a/src/journal/src/stream.rs +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2021 The Engula Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use std::convert::TryInto; - -use crate::{async_trait, Error, Result, ResultStream}; - -/// A generic timestamp to order events. -#[derive(Copy, Clone, Debug, Eq, Ord, PartialEq, PartialOrd)] -pub struct Timestamp(u64); - -impl Timestamp { - pub fn serialize(&self) -> Vec { - self.0.to_be_bytes().to_vec() - } - - pub fn deserialize(bytes: Vec) -> Result { - let bytes: [u8; 8] = bytes - .try_into() - .map_err(|v| Error::Unknown(format!("malformed bytes: {:?}", v)))?; - Ok(Self(u64::from_be_bytes(bytes))) - } -} - -impl From for Timestamp { - fn from(v: u64) -> Self { - Self(v) - } -} - -#[derive(Clone, Debug, PartialEq)] -pub struct Event { - pub ts: Timestamp, - pub data: Vec, -} - -/// An interface to manipulate a stream. -#[async_trait] -pub trait Stream: Clone + Send + Sync + 'static { - /// Reads events since a timestamp (inclusive). - async fn read_events(&self, ts: Timestamp) -> ResultStream>; - - /// Appends an event. - async fn append_event(&self, event: Event) -> Result<()>; - - /// Releases events up to a timestamp (exclusive). - async fn release_events(&self, ts: Timestamp) -> Result<()>; -} diff --git a/src/kernel/Cargo.toml b/src/kernel/Cargo.toml deleted file mode 100644 index 511a8730..00000000 --- a/src/kernel/Cargo.toml +++ /dev/null @@ -1,26 +0,0 @@ -[package] -name = "engula-kernel" -version = "0.2.0" -edition = "2021" -license = "Apache-2.0" -homepage = "https://engula.io" -repository = "https://github.com/engula/engula" -description = "An Engula module that provides stateful environment abstractions and implementations." - -[dependencies] -engula-journal = { version = "0.2", path = "../journal" } -engula-storage = { version = "0.2", path = "../storage" } - -thiserror = "1.0" -async-trait = "0.1" -futures = "0.3" -tokio = { version = "1.14", features = ["full"] } -tokio-stream = { version = "0.1", features = ["sync"] } -tonic = "0.6" -prost = "0.9" - -[dev-dependencies] -tempfile = "3" - -[build-dependencies] -tonic-build = "0.6" diff --git a/src/kernel/src/error.rs b/src/kernel/src/error.rs deleted file mode 100644 index d7054c31..00000000 --- a/src/kernel/src/error.rs +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2021 The Engula Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use engula_journal::Error as JournalError; -use engula_storage::Error as StorageError; -use thiserror::Error; - -#[derive(Error, Debug)] -pub enum Error { - #[error("{0} is not found")] - NotFound(String), - #[error("{0} already exists")] - AlreadyExists(String), - #[error("{0}")] - InvalidArgument(String), - #[error("{0}")] - Internal(String), - #[error(transparent)] - Io(#[from] std::io::Error), - #[error("{0}")] - Corrupted(String), - #[error(transparent)] - Unknown(Box), -} - -impl Error { - pub fn unknown(err: impl std::error::Error + Send + 'static) -> Self { - Self::Unknown(Box::new(err)) - } -} - -impl From for Error { - fn from(err: JournalError) -> Self { - match err { - JournalError::NotFound(s) => Self::NotFound(s), - JournalError::AlreadyExists(s) => Self::AlreadyExists(s), - JournalError::InvalidArgument(s) => Self::InvalidArgument(s), - JournalError::Io(err) => Self::Io(err), - JournalError::Corrupted(s) => Self::Corrupted(s), - err @ JournalError::Unknown(_) => Self::Unknown(Box::new(err)), - } - } -} - -impl From for Error { - fn from(err: StorageError) -> Self { - match err { - StorageError::NotFound(s) => Self::NotFound(s), - StorageError::AlreadyExists(s) => Self::AlreadyExists(s), - StorageError::InvalidArgument(s) => Self::InvalidArgument(s), - StorageError::Io(err) => Self::Io(err), - StorageError::Unknown(err) => Self::Unknown(err), - } - } -} - -pub type Result = std::result::Result; diff --git a/src/kernel/src/file/kernel.rs b/src/kernel/src/file/kernel.rs deleted file mode 100644 index 9cc883b4..00000000 --- a/src/kernel/src/file/kernel.rs +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2021 The Engula Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use std::path::Path; - -use super::{manifest::Manifest, Journal, Storage}; -use crate::Result; - -pub type Kernel = crate::local::Kernel; - -const SEGMENT_SIZE: usize = 64 * 1024 * 1024; - -impl Kernel { - pub async fn open(path: impl AsRef) -> Result { - let path = path.as_ref(); - let journal_path = path.join("journal"); - let storage_path = path.join("storage"); - let manifest_path = path.join("MANIFEST"); - let journal = Journal::open(journal_path, SEGMENT_SIZE).await?; - let storage = Storage::new(storage_path).await?; - let manifest = Manifest::open(manifest_path).await; - Self::init(journal, storage, manifest).await - } -} diff --git a/src/kernel/src/file/manifest.rs b/src/kernel/src/file/manifest.rs deleted file mode 100644 index 4f12c9af..00000000 --- a/src/kernel/src/file/manifest.rs +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2021 The Engula Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use std::{io::ErrorKind, path::PathBuf}; - -use prost::Message; -use tokio::{ - fs::{File, OpenOptions}, - io::{AsyncReadExt, AsyncWriteExt}, -}; - -use crate::{async_trait, Error, Result, Version}; - -#[derive(Clone)] -pub struct Manifest { - path: PathBuf, -} - -impl Manifest { - pub async fn open(path: impl Into) -> Self { - Self { path: path.into() } - } -} - -#[async_trait] -impl crate::manifest::Manifest for Manifest { - async fn load_version(&self) -> Result { - let mut buf = Vec::new(); - match File::open(&self.path).await { - Ok(mut file) => { - file.read_to_end(&mut buf).await?; - Version::decode(buf.as_ref()).map_err(|err| Error::Corrupted(err.to_string())) - } - Err(err) => { - if err.kind() == ErrorKind::NotFound { - Ok(Version::default()) - } else { - Err(err.into()) - } - } - } - } - - async fn save_version(&self, version: &Version) -> Result<()> { - let buf = version.encode_to_vec(); - let mut file = OpenOptions::new() - .write(true) - .create(true) - .truncate(true) - .open(&self.path) - .await?; - file.write_buf(&mut buf.as_ref()).await?; - file.sync_data().await?; - Ok(()) - } -} diff --git a/src/kernel/src/file/mod.rs b/src/kernel/src/file/mod.rs deleted file mode 100644 index 7663519e..00000000 --- a/src/kernel/src/file/mod.rs +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2021 The Engula Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! A [`Kernel`] implementation that stores everything in local files. -//! -//! [`Kernel`]: crate::Kernel - -mod kernel; -mod manifest; - -pub use engula_journal::file::Journal; -pub use engula_storage::file::Storage; - -pub use self::{kernel::Kernel, manifest::Manifest}; diff --git a/src/kernel/src/grpc/client.rs b/src/kernel/src/grpc/client.rs deleted file mode 100644 index 1b799417..00000000 --- a/src/kernel/src/grpc/client.rs +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2021 The Engula Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use tonic::{transport::Channel, Streaming}; - -use super::proto::*; -use crate::Result; - -type KernelClient = kernel_client::KernelClient; - -#[derive(Clone)] -pub struct Client { - client: KernelClient, -} - -impl Client { - pub async fn apply_update(&self, input: ApplyUpdateRequest) -> Result { - let mut client = self.client.clone(); - let resp = client.apply_update(input).await?; - Ok(resp.into_inner()) - } - - pub async fn current_version( - &self, - input: CurrentVersionRequest, - ) -> Result { - let mut client = self.client.clone(); - let resp = client.current_version(input).await?; - Ok(resp.into_inner()) - } - - pub async fn version_updates( - &self, - input: VersionUpdatesRequest, - ) -> Result> { - let mut client = self.client.clone(); - let resp = client.version_updates(input).await?; - Ok(resp.into_inner()) - } - - pub async fn place_lookup(&self, input: PlaceLookupRequest) -> Result { - let mut client = self.client.clone(); - let resp = client.place_lookup(input).await?; - Ok(resp.into_inner()) - } - - pub async fn connect(addr: &str) -> Result { - let client = KernelClient::connect(addr.to_owned()).await?; - Ok(Client { client }) - } -} diff --git a/src/kernel/src/grpc/compose.rs b/src/kernel/src/grpc/compose.rs deleted file mode 100644 index 40e466f9..00000000 --- a/src/kernel/src/grpc/compose.rs +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright 2021 The Engula Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use std::path::Path; - -use engula_journal::{grpc as grpc_journal, Error as JournalError, Journal}; -use engula_storage::{grpc as grpc_storage, Error as StorageError, Storage}; - -use crate::{ - file::Manifest as FileManifest, - local::{Kernel as LocalKernel, DEFAULT_NAME}, - manifest::Manifest, - mem::Manifest as MemManifest, - Result, -}; - -pub type Kernel = LocalKernel; - -async fn create_default_stream(journal: &impl Journal) -> Result<()> { - match journal.create_stream(DEFAULT_NAME).await { - Err(JournalError::AlreadyExists(_)) => Ok(()), - Ok(_) => Ok(()), - Err(e) => Err(e.into()), - } -} - -async fn create_default_bucket(storage: &impl Storage) -> Result<()> { - match storage.create_bucket(DEFAULT_NAME).await { - Err(StorageError::AlreadyExists(_)) => Ok(()), - Ok(_) => Ok(()), - Err(e) => Err(e.into()), - } -} - -async fn create_kernel( - journal_addr: &str, - storage_addr: &str, - manifest: M, -) -> Result> { - let journal = grpc_journal::Journal::connect(journal_addr).await?; - let storage = grpc_storage::Storage::connect(storage_addr).await?; - - // HACK: Create default stream & bucket here to avoid manipulating the stream or - // bucket from `Kernel::stream` or `Kernel::bucket` result not found. - // See https://github.com/engula/engula/issues/194 for details. - create_default_stream(&journal).await?; - create_default_bucket(&storage).await?; - Kernel::init(journal, storage, manifest).await -} - -pub type MemKernel = Kernel; - -impl MemKernel { - pub async fn open(journal_addr: &str, storage_addr: &str) -> Result { - create_kernel(journal_addr, storage_addr, MemManifest::default()).await - } -} - -pub type FileKernel = Kernel; - -impl FileKernel { - pub async fn open>( - journal_addr: &str, - storage_addr: &str, - path: P, - ) -> Result { - let manifest = FileManifest::open(path.as_ref()).await; - create_kernel(journal_addr, storage_addr, manifest).await - } -} diff --git a/src/kernel/src/grpc/error.rs b/src/kernel/src/grpc/error.rs deleted file mode 100644 index 235a686e..00000000 --- a/src/kernel/src/grpc/error.rs +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2021 The Engula Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use crate::Error; - -impl From for Error { - fn from(s: tonic::Status) -> Self { - match s.code() { - tonic::Code::NotFound => Error::NotFound(s.message().into()), - tonic::Code::AlreadyExists => Error::AlreadyExists(s.message().into()), - tonic::Code::InvalidArgument => Error::InvalidArgument(s.message().into()), - _ => Error::Unknown(Box::new(s)), - } - } -} - -impl From for Error { - fn from(e: tonic::transport::Error) -> Self { - Error::Unknown(Box::new(e)) - } -} - -impl From for tonic::Status { - fn from(err: Error) -> Self { - let (code, message) = match err { - Error::NotFound(s) => (tonic::Code::NotFound, s), - Error::AlreadyExists(s) => (tonic::Code::AlreadyExists, s), - Error::InvalidArgument(s) => (tonic::Code::InvalidArgument, s), - Error::Internal(s) => (tonic::Code::Internal, s), - Error::Io(inner) => (tonic::Code::Internal, inner.to_string()), - Error::Corrupted(s) => (tonic::Code::DataLoss, s), - Error::Unknown(s) => (tonic::Code::Unknown, s.to_string()), - }; - tonic::Status::new(code, message) - } -} diff --git a/src/kernel/src/grpc/kernel.proto b/src/kernel/src/grpc/kernel.proto deleted file mode 100644 index 0d70cffb..00000000 --- a/src/kernel/src/grpc/kernel.proto +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2021 The Engula Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package engula.kernel.v1; - -import "metadata.proto"; - -service Kernel { - rpc ApplyUpdate(ApplyUpdateRequest) returns (ApplyUpdateResponse) {} - - rpc CurrentVersion(CurrentVersionRequest) returns (CurrentVersionResponse) {} - - rpc VersionUpdates(VersionUpdatesRequest) returns (stream VersionUpdatesResponse) {} - - rpc PlaceLookup(PlaceLookupRequest) returns (PlaceLookupResponse) {} -} - -message ApplyUpdateRequest { - metadata.v1.VersionUpdate version_update = 1; -} - -message ApplyUpdateResponse {} - -message CurrentVersionRequest {} - -message CurrentVersionResponse { - metadata.v1.Version version = 1; -} - -message VersionUpdatesRequest { - uint64 sequence = 1; -} - -message VersionUpdatesResponse { - metadata.v1.VersionUpdate version_update = 1; -} - -message PlaceLookupRequest {} - -message PlaceLookupResponse { - string journal_address = 1; - string storage_address = 2; -} diff --git a/src/kernel/src/grpc/kernel.rs b/src/kernel/src/grpc/kernel.rs deleted file mode 100644 index 0c68ed8b..00000000 --- a/src/kernel/src/grpc/kernel.rs +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright 2021 The Engula Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use std::sync::Arc; - -use engula_journal::{grpc as grpc_journal, Error as JournalError, Journal}; -use engula_storage::{grpc as grpc_storage, Error as StorageError, Storage}; -use futures::StreamExt; - -use super::{client::Client, proto::*}; -use crate::{async_trait, Error, KernelUpdate, Result, ResultStream, Sequence}; - -pub(crate) const DEFAULT_STREAM: &str = "DEFAULT"; -pub(crate) const DEFAULT_BUCKET: &str = "DEFAULT"; - -#[derive(Clone)] -pub struct Kernel { - client: Client, - journal: grpc_journal::Journal, - storage: grpc_storage::Storage, -} - -impl Kernel { - pub async fn connect(addr: &str) -> Result { - let endpoint = format!("http://{}", addr); - let client = Client::connect(&endpoint).await?; - let resp = client.place_lookup(PlaceLookupRequest {}).await?; - let journal = grpc_journal::Journal::connect(&resp.journal_address).await?; - let storage = grpc_storage::Storage::connect(&resp.storage_address).await?; - Ok(Kernel { - client, - journal, - storage, - }) - } -} - -#[async_trait] -impl crate::Kernel for Kernel { - type Bucket = grpc_storage::Bucket; - type Stream = grpc_journal::Stream; - - /// Returns a journal stream. - async fn stream(&self) -> Result { - match self.journal.stream(DEFAULT_STREAM).await { - Ok(stream) => Ok(stream), - Err(JournalError::NotFound(_)) => { - Ok(self.journal.create_stream(DEFAULT_STREAM).await?) - } - Err(e) => Err(e.into()), - } - } - - /// Returns a storage bucket. - async fn bucket(&self) -> Result { - match self.storage.bucket(DEFAULT_BUCKET).await { - Ok(bucket) => Ok(bucket), - Err(StorageError::NotFound(_)) => { - Ok(self.storage.create_bucket(DEFAULT_BUCKET).await?) - } - Err(e) => Err(e.into()), - } - } - - /// Applies a kernel update. - async fn apply_update(&self, update: KernelUpdate) -> Result<()> { - let input = ApplyUpdateRequest { - version_update: Some(update.update), - }; - self.client.apply_update(input).await?; - Ok(()) - } - - /// Returns the current version. - async fn current_version(&self) -> Result> { - let input = CurrentVersionRequest {}; - let resp = self.client.current_version(input).await?; - let version = resp - .version - .ok_or_else(|| Error::Internal("CurrentVersionResponse::version is none".into()))?; - Ok(Arc::new(version)) - } - - /// Returns a stream of version updates since a given sequence (inclusive). - async fn version_updates(&self, sequence: Sequence) -> ResultStream> { - let input = VersionUpdatesRequest { sequence }; - match self.client.version_updates(input).await { - Ok(output) => Box::new(output.map(|result| match result { - Ok(resp) => Ok(Arc::new(resp.version_update.ok_or_else(|| { - Error::Internal("VersionUpdatesResponse::version_update is none".into()) - })?)), - Err(status) => Err(status.into()), - })), - Err(e) => Box::new(futures::stream::once(futures::future::err(e))), - } - } -} diff --git a/src/kernel/src/grpc/mod.rs b/src/kernel/src/grpc/mod.rs deleted file mode 100644 index 4be1f580..00000000 --- a/src/kernel/src/grpc/mod.rs +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright 2021 The Engula Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -//! A [`Kernel`] implementation that interacts with gRPC kernel service. -//! -//! [`Kernel`]: crate::Kernel - -mod client; -mod compose; -mod error; -mod kernel; -mod proto; -mod server; - -pub use self::{ - client::Client, - compose::{FileKernel, Kernel as ComposeKernel, MemKernel}, - kernel::Kernel, - server::Server, -}; - -#[cfg(test)] -mod tests { - use futures::TryStreamExt; - use tokio::net::TcpListener; - use tokio_stream::wrappers::TcpListenerStream; - - use super::{MemKernel, Server}; - use crate::*; - - async fn mock_journal_and_storage_server( - ) -> std::result::Result> { - let listener = TcpListener::bind("127.0.0.1:0").await?; - let local_addr = listener.local_addr()?; - tokio::task::spawn(async move { - let journal = mem::Journal::default(); - let storage = mem::Storage::default(); - tonic::transport::Server::builder() - .add_service(engula_journal::grpc::Server::new(journal).into_service()) - .add_service(engula_storage::grpc::Server::new(storage).into_service()) - .serve_with_incoming(TcpListenerStream::new(listener)) - .await - .unwrap(); - }); - - Ok(local_addr.to_string()) - } - - #[tokio::test(flavor = "multi_thread")] - async fn test() -> std::result::Result<(), Box> { - let address = mock_journal_and_storage_server().await?; - let listener = TcpListener::bind("127.0.0.1:0").await?; - let local_addr = listener.local_addr()?; - tokio::task::spawn(async move { - let kernel = MemKernel::open(&address, &address).await.unwrap(); - let server = Server::new(&address, &address, kernel); - tonic::transport::Server::builder() - .add_service(server.into_service()) - .serve_with_incoming(TcpListenerStream::new(listener)) - .await - .unwrap(); - }); - - let kernel = grpc::Kernel::connect(&local_addr.to_string()).await?; - let version = kernel.current_version().await?; - assert_eq!(version.sequence, 0); - assert_eq!(version.meta.len(), 0); - assert_eq!(version.objects.len(), 0); - - let handle = { - let mut expect = VersionUpdate { - sequence: 1, - ..Default::default() - }; - expect.add_meta.insert("a".to_owned(), b"b".to_vec()); - expect.remove_meta.push("b".to_owned()); - expect.add_objects.push("a".to_owned()); - expect.remove_objects.push("b".to_owned()); - let mut version_updates = kernel.version_updates(0).await; - tokio::spawn(async move { - let update = version_updates.try_next().await.unwrap().unwrap(); - assert_eq!(*update, expect); - }) - }; - - let mut update = KernelUpdate::default(); - update.add_meta("a", "b"); - update.remove_meta("b"); - update.add_object("a"); - update.remove_object("b"); - kernel.apply_update(update).await?; - - handle.await.unwrap(); - - let new_version = kernel.current_version().await?; - assert_eq!(new_version.sequence, 1); - assert_eq!(new_version.meta.len(), 1); - assert_eq!(new_version.objects.len(), 1); - - Ok(()) - } -} diff --git a/src/kernel/src/grpc/server.rs b/src/kernel/src/grpc/server.rs deleted file mode 100644 index b534a0db..00000000 --- a/src/kernel/src/grpc/server.rs +++ /dev/null @@ -1,92 +0,0 @@ -// Copyright 2021 The Engula Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use futures::StreamExt; -use tonic::{Request, Response, Status}; - -use super::{compose::Kernel as ComposeKernel, proto::*}; -use crate::{manifest::Manifest, Kernel, KernelUpdate}; - -pub struct Server { - journal_address: String, - storage_address: String, - kernel: ComposeKernel, -} - -impl Server { - pub fn new(journal_addr: &str, storage_addr: &str, kernel: ComposeKernel) -> Self { - Server { - journal_address: journal_addr.to_owned(), - storage_address: storage_addr.to_owned(), - kernel, - } - } - - pub fn into_service(self) -> kernel_server::KernelServer { - kernel_server::KernelServer::new(self) - } -} - -#[tonic::async_trait] -impl kernel_server::Kernel for Server { - type VersionUpdatesStream = - Box> + Send + Unpin>; - - async fn apply_update( - &self, - request: Request, - ) -> Result, Status> { - let input = request.into_inner(); - if let Some(update) = input.version_update { - self.kernel.apply_update(KernelUpdate { update }).await?; - } - Ok(Response::new(ApplyUpdateResponse {})) - } - - async fn current_version( - &self, - _request: Request, - ) -> Result, Status> { - let version = self.kernel.current_version().await?; - Ok(Response::new(CurrentVersionResponse { - version: Some((*version).clone()), - })) - } - - async fn version_updates( - &self, - request: Request, - ) -> Result, Status> { - let input = request.into_inner(); - let updates_stream = self.kernel.version_updates(input.sequence).await; - Ok(Response::new(Box::new(updates_stream.map( - |result| match result { - Ok(version_update) => Ok(VersionUpdatesResponse { - version_update: Some((*version_update).clone()), - }), - Err(e) => Err(e.into()), - }, - )))) - } - - async fn place_lookup( - &self, - _request: Request, - ) -> Result, Status> { - Ok(Response::new(PlaceLookupResponse { - journal_address: self.journal_address.clone(), - storage_address: self.storage_address.clone(), - })) - } -} diff --git a/src/kernel/src/kernel.rs b/src/kernel/src/kernel.rs deleted file mode 100644 index ad42590b..00000000 --- a/src/kernel/src/kernel.rs +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright 2021 The Engula Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use std::sync::Arc; - -use crate::{async_trait, Bucket, Result, ResultStream, Sequence, Stream, Version, VersionUpdate}; - -/// An interface to interact with a kernel. -#[async_trait] -pub trait Kernel: Clone + Send + Sync + 'static { - type Stream: Stream; - type Bucket: Bucket; - - /// Returns a journal stream. - async fn stream(&self) -> Result; - - /// Returns a storage bucket. - async fn bucket(&self) -> Result; - - /// Applies a kernel update. - async fn apply_update(&self, update: KernelUpdate) -> Result<()>; - - /// Returns the current version. - async fn current_version(&self) -> Result>; - - /// Returns a stream of version updates since a given sequence (inclusive). - async fn version_updates(&self, sequence: Sequence) -> ResultStream>; -} - -#[derive(Default)] -pub struct KernelUpdate { - pub(crate) update: VersionUpdate, -} - -impl KernelUpdate { - pub fn add_meta(&mut self, key: impl Into, value: impl Into>) -> &mut Self { - self.update.add_meta.insert(key.into(), value.into()); - self - } - - pub fn remove_meta(&mut self, key: impl Into) -> &mut Self { - self.update.remove_meta.push(key.into()); - self - } - - pub fn add_object(&mut self, name: impl Into) -> &mut Self { - self.update.add_objects.push(name.into()); - self - } - - pub fn remove_object(&mut self, name: impl Into) -> &mut Self { - self.update.remove_objects.push(name.into()); - self - } -} diff --git a/src/kernel/src/lib.rs b/src/kernel/src/lib.rs deleted file mode 100644 index ff2a2b46..00000000 --- a/src/kernel/src/lib.rs +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright 2021 The Engula Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! An Engula module that provides stateful environment abstractions and -//! implementations. -//! -//! # Abstraction -//! -//! [`Kernel`] is an abstraction to provide a stateful environment to storage -//! engines. -//! -//! # Implementation -//! -//! Some built-in implementations of [`Kernel`]: -//! -//! - [`mem`](crate::mem) -//! - [`file`](crate::file) -//! - [`grpc`](crate::grpc) -//! -//! [`Kernel`]: crate::Kernel - -mod error; -mod kernel; -mod manifest; -mod metadata; - -pub mod file; -pub mod grpc; -mod local; -pub mod mem; - -pub use async_trait::async_trait; -pub use engula_journal::{Event, Journal, Stream, Timestamp}; -pub use engula_storage::{Bucket, Storage}; - -pub type ResultStream = Box> + Send + Unpin>; - -pub use self::{ - error::{Error, Result}, - kernel::{Kernel, KernelUpdate}, - metadata::{Sequence, Version, VersionUpdate}, -}; - -#[cfg(test)] -mod tests { - use futures::TryStreamExt; - - use crate::*; - - #[tokio::test] - async fn kernel() -> Result<()> { - let tmp = tempfile::tempdir()?; - - let kernel = mem::Kernel::open().await?; - test_kernel(kernel).await?; - - let kernel = file::Kernel::open(tmp.path()).await?; - test_kernel(kernel).await?; - - Ok(()) - } - - async fn test_kernel(kernel: impl Kernel) -> Result<()> { - let handle = { - let mut expect = VersionUpdate { - sequence: 1, - ..Default::default() - }; - expect.add_meta.insert("a".to_owned(), b"b".to_vec()); - expect.remove_meta.push("b".to_owned()); - expect.add_objects.push("a".to_owned()); - expect.remove_objects.push("b".to_owned()); - let mut version_updates = kernel.version_updates(0).await; - tokio::spawn(async move { - let update = version_updates.try_next().await.unwrap().unwrap(); - assert_eq!(*update, expect); - }) - }; - - let mut update = KernelUpdate::default(); - update.add_meta("a", "b"); - update.remove_meta("b"); - update.add_object("a"); - update.remove_object("b"); - kernel.apply_update(update).await?; - - handle.await.unwrap(); - Ok(()) - } -} diff --git a/src/kernel/src/local/kernel.rs b/src/kernel/src/local/kernel.rs deleted file mode 100644 index 374de32e..00000000 --- a/src/kernel/src/local/kernel.rs +++ /dev/null @@ -1,125 +0,0 @@ -// Copyright 2021 The Engula Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use std::sync::Arc; - -use engula_journal::Error as JournalError; -use engula_storage::Error as StorageError; -use futures::TryStreamExt; -use tokio::sync::{broadcast, Mutex}; -use tokio_stream::wrappers::BroadcastStream; - -use crate::{ - async_trait, manifest::Manifest, Error, Journal, KernelUpdate, Result, ResultStream, Sequence, - Storage, Version, VersionUpdate, -}; - -#[derive(Clone)] -pub struct Kernel { - inner: Arc>, - journal: J, - storage: S, - manifest: M, -} - -struct Inner { - current: Arc, - updates: broadcast::Sender>, -} - -impl Kernel -where - J: Journal, - S: Storage, - M: Manifest, -{ - pub async fn init(journal: J, storage: S, manifest: M) -> Result { - let version = manifest.load_version().await?; - let (updates, _) = broadcast::channel(1024); - let inner = Inner { - current: Arc::new(version), - updates, - }; - Ok(Self { - inner: Arc::new(Mutex::new(inner)), - journal, - storage, - manifest, - }) - } -} - -pub(crate) const DEFAULT_NAME: &str = "DEFAULT"; - -#[async_trait] -impl crate::Kernel for Kernel -where - J: Journal, - S: Storage, - M: Manifest, -{ - type Bucket = S::Bucket; - type Stream = J::Stream; - - async fn stream(&self) -> Result { - match self.journal.stream(DEFAULT_NAME).await { - Ok(stream) => Ok(stream), - Err(JournalError::NotFound(_)) => { - let stream = self.journal.create_stream(DEFAULT_NAME).await?; - Ok(stream) - } - Err(err) => Err(err.into()), - } - } - - async fn bucket(&self) -> Result { - match self.storage.bucket(DEFAULT_NAME).await { - Ok(bucket) => Ok(bucket), - Err(StorageError::NotFound(_)) => { - let bucket = self.storage.create_bucket(DEFAULT_NAME).await?; - Ok(bucket) - } - Err(err) => Err(err.into()), - } - } - - async fn apply_update(&self, update: KernelUpdate) -> Result<()> { - let mut inner = self.inner.lock().await; - - let mut version = (*inner.current).clone(); - let mut version_update = update.update; - version_update.sequence = version.sequence + 1; - version.update(&version_update); - self.manifest.save_version(&version).await?; - - inner.current = Arc::new(version); - inner - .updates - .send(Arc::new(version_update)) - .map_err(Error::unknown)?; - Ok(()) - } - - async fn current_version(&self) -> Result> { - let inner = self.inner.lock().await; - Ok(inner.current.clone()) - } - - async fn version_updates(&self, _: Sequence) -> ResultStream> { - // TODO: handle sequence - let inner = self.inner.lock().await; - let stream = BroadcastStream::new(inner.updates.subscribe()); - Box::new(stream.map_err(Error::unknown)) - } -} diff --git a/src/kernel/src/mem/manifest.rs b/src/kernel/src/mem/manifest.rs deleted file mode 100644 index e3635ad9..00000000 --- a/src/kernel/src/mem/manifest.rs +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2021 The Engula Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use std::sync::Arc; - -use tokio::sync::Mutex; - -use crate::{async_trait, Result, Version}; - -#[derive(Clone)] -pub struct Manifest { - current: Arc>, -} - -impl Default for Manifest { - fn default() -> Self { - Self { - current: Arc::new(Mutex::new(Version::default())), - } - } -} - -#[async_trait] -impl crate::manifest::Manifest for Manifest { - async fn load_version(&self) -> Result { - let current = self.current.lock().await; - Ok(current.clone()) - } - - async fn save_version(&self, version: &Version) -> Result<()> { - let mut current = self.current.lock().await; - (*current) = version.clone(); - Ok(()) - } -} diff --git a/src/kernel/src/mem/mod.rs b/src/kernel/src/mem/mod.rs deleted file mode 100644 index 53669447..00000000 --- a/src/kernel/src/mem/mod.rs +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2021 The Engula Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! A [`Kernel`] implementation that stores everything in memory. -//! -//! [`Kernel`]: crate::Kernel - -mod kernel; -mod manifest; - -pub use engula_journal::mem::Journal; -pub use engula_storage::mem::Storage; - -pub use self::{kernel::Kernel, manifest::Manifest}; diff --git a/src/kernel/src/metadata.rs b/src/kernel/src/metadata.rs deleted file mode 100644 index 0b45c106..00000000 --- a/src/kernel/src/metadata.rs +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2021 The Engula Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -pub mod v1 { - tonic::include_proto!("engula.metadata.v1"); -} - -pub use v1::{Version, VersionUpdate}; - -/// An increasing number to order versions. -pub type Sequence = u64; - -impl Version { - pub fn update(&mut self, update: &VersionUpdate) { - self.sequence = update.sequence; - for meta in &update.add_meta { - self.meta.insert(meta.0.clone(), meta.1.clone()); - } - for name in &update.remove_meta { - self.meta.remove(name); - } - for desc in &update.add_objects { - self.objects.push(desc.clone()); - } - for name in &update.remove_objects { - if let Some(index) = self.objects.iter().position(|x| x == name) { - self.objects.remove(index); - } - } - } -} diff --git a/src/object-engine/client/Cargo.toml b/src/object-engine/client/Cargo.toml new file mode 100644 index 00000000..b1801acc --- /dev/null +++ b/src/object-engine/client/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "object-engine-client" +version = "0.1.0" +edition = "2021" +publish = false + +[dependencies] +engula-apis = { version = "0.3", path = "../../apis" } +object-engine-proto = { version = "0.1", path = "../proto" } + +prost = "0.9" +thiserror = "1.0" +tokio = { version = "1.15", features = ["full"] } +tonic = "0.6" + +[dev-dependencies] +anyhow = "1.0" diff --git a/src/object-engine/client/examples/engine.rs b/src/object-engine/client/examples/engine.rs new file mode 100644 index 00000000..1cfe4bf5 --- /dev/null +++ b/src/object-engine/client/examples/engine.rs @@ -0,0 +1,27 @@ +// Copyright 2022 The Engula Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use anyhow::Result; +use object_engine_client::Engine; + +#[tokio::main] +async fn main() -> Result<()> { + let url = "http://localhost:21716"; + let engine = Engine::connect(url).await?; + let tenant = engine.create_tenant("tenant").await?; + println!("created {:?}", tenant.desc().await?); + let bucket = tenant.create_bucket("bucket").await?; + println!("created {:?}", bucket.desc().await?); + Ok(()) +} diff --git a/src/object-engine/client/src/bucket.rs b/src/object-engine/client/src/bucket.rs new file mode 100644 index 00000000..ac9a529c --- /dev/null +++ b/src/object-engine/client/src/bucket.rs @@ -0,0 +1,79 @@ +// Copyright 2022 The Engula Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::sync::Arc; + +use engula_apis::*; +use object_engine_proto::*; + +use crate::{master::Master, BucketTxn, Error, Result}; + +#[derive(Clone)] +pub struct Bucket { + inner: Arc, +} + +impl Bucket { + pub(crate) fn new(bucket: String, tenant: String, master: Master) -> Self { + let inner = BucketInner { + tenant, + bucket, + master, + }; + Self { + inner: Arc::new(inner), + } + } + + pub async fn desc(&self) -> Result { + let req = DescribeBucketRequest { + name: self.inner.bucket.clone(), + }; + let req = bucket_request_union::Request::DescribeBucket(req); + let res = self.inner.bucket_union_call(req).await?; + let desc = if let bucket_response_union::Response::DescribeBucket(res) = res { + res.desc + } else { + None + }; + desc.ok_or(Error::InvalidResponse) + } + + pub async fn eval(&self, _expr: Expr) -> Result { + todo!(); + } + + pub async fn metadata(&self, _key: &[u8]) -> Result>> { + todo!(); + } + + pub fn begin(&self) -> BucketTxn { + todo!(); + } +} + +struct BucketInner { + tenant: String, + bucket: String, + master: Master, +} + +impl BucketInner { + async fn bucket_union_call( + &self, + req: bucket_request_union::Request, + ) -> Result { + self.master.bucket_union(self.tenant.clone(), req).await + } +} diff --git a/src/kernel/src/mem/kernel.rs b/src/object-engine/client/src/bucket_txn.rs similarity index 58% rename from src/kernel/src/mem/kernel.rs rename to src/object-engine/client/src/bucket_txn.rs index 1ccb7b1f..9846e243 100644 --- a/src/kernel/src/mem/kernel.rs +++ b/src/object-engine/client/src/bucket_txn.rs @@ -1,4 +1,4 @@ -// Copyright 2021 The Engula Authors. +// Copyright 2022 The Engula Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,16 +12,26 @@ // See the License for the specific language governing permissions and // limitations under the License. -use super::{manifest::Manifest, Journal, Storage}; +use engula_apis::Expr; + use crate::Result; -pub type Kernel = crate::local::Kernel; +pub struct BucketTxn {} + +impl BucketTxn { + pub fn add_expr(&mut self, _expr: Expr) { + todo!(); + } + + pub fn add_metadata(&mut self, _key: Vec, _value: Vec) { + todo!(); + } + + pub fn remove_metadata(&mut self, _key: Vec) { + todo!(); + } -impl Kernel { - pub async fn open() -> Result { - let journal = Journal::default(); - let storage = Storage::default(); - let manifest = Manifest::default(); - Self::init(journal, storage, manifest).await + pub async fn commit(self) -> Result<()> { + todo!(); } } diff --git a/src/object-engine/client/src/engine.rs b/src/object-engine/client/src/engine.rs new file mode 100644 index 00000000..2b293d5f --- /dev/null +++ b/src/object-engine/client/src/engine.rs @@ -0,0 +1,82 @@ +// Copyright 2022 The Engula Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::sync::Arc; + +use object_engine_proto::*; +use tonic::transport::Endpoint; + +use crate::{master::Master, Error, Result, Tenant}; + +#[derive(Clone)] +pub struct Engine { + inner: Arc, +} + +impl Engine { + pub async fn connect(url: impl Into) -> Result { + let chan = Endpoint::new(url.into()) + .map_err(Error::unknown)? + .connect() + .await + .map_err(Error::unknown)?; + let inner = EngineInner { + master: Master::new(chan), + }; + Ok(Self { + inner: Arc::new(inner), + }) + } + + pub fn tenant(&self, name: &str) -> Tenant { + self.inner.new_tenant(name.to_owned()) + } + + pub async fn create_tenant(&self, name: &str) -> Result { + let desc = TenantDesc { + name: name.to_owned(), + ..Default::default() + }; + let req = CreateTenantRequest { desc: Some(desc) }; + let req = tenant_request_union::Request::CreateTenant(req); + self.inner.tenant_union_call(req).await?; + Ok(self.tenant(name)) + } + + pub async fn delete_tenant(&self, name: &str) -> Result<()> { + let req = DeleteTenantRequest { + name: name.to_owned(), + }; + let req = tenant_request_union::Request::DeleteTenant(req); + self.inner.tenant_union_call(req).await?; + Ok(()) + } +} + +struct EngineInner { + master: Master, +} + +impl EngineInner { + fn new_tenant(&self, name: String) -> Tenant { + Tenant::new(name, self.master.clone()) + } + + async fn tenant_union_call( + &self, + req: tenant_request_union::Request, + ) -> Result { + self.master.tenant_union(req).await + } +} diff --git a/src/object-engine/client/src/error.rs b/src/object-engine/client/src/error.rs new file mode 100644 index 00000000..ba675e61 --- /dev/null +++ b/src/object-engine/client/src/error.rs @@ -0,0 +1,49 @@ +// Copyright 2022 The Engula Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use thiserror::Error; +use tonic::{Code, Status}; + +#[derive(Error, Debug)] +pub enum Error { + #[error("{0} is not found")] + NotFound(String), + #[error("{0} already exists")] + AlreadyExists(String), + #[error("invalid argument: {0}")] + InvalidArgument(String), + #[error("invalid response")] + InvalidResponse, + #[error("unknown error: {0}")] + Unknown(String), +} + +impl Error { + pub fn unknown(s: impl ToString) -> Self { + Self::Unknown(s.to_string()) + } +} + +impl From for Error { + fn from(s: Status) -> Self { + match s.code() { + Code::NotFound => Error::NotFound(s.message().to_owned()), + Code::AlreadyExists => Error::AlreadyExists(s.message().to_owned()), + Code::InvalidArgument => Error::InvalidArgument(s.message().to_owned()), + _ => Error::Unknown(s.to_string()), + } + } +} + +pub type Result = std::result::Result; diff --git a/src/object-engine/client/src/lib.rs b/src/object-engine/client/src/lib.rs new file mode 100644 index 00000000..87a4c360 --- /dev/null +++ b/src/object-engine/client/src/lib.rs @@ -0,0 +1,28 @@ +// Copyright 2022 The Engula Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +mod bucket; +mod bucket_txn; +mod engine; +mod error; +mod master; +mod tenant; + +pub use self::{ + bucket::Bucket, + bucket_txn::BucketTxn, + engine::Engine, + error::{Error, Result}, + tenant::Tenant, +}; diff --git a/src/object-engine/client/src/master.rs b/src/object-engine/client/src/master.rs new file mode 100644 index 00000000..1a726e94 --- /dev/null +++ b/src/object-engine/client/src/master.rs @@ -0,0 +1,70 @@ +// Copyright 2022 The Engula Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use object_engine_proto::*; +use tonic::transport::Channel; + +use crate::{Error, Result}; + +#[derive(Clone)] +pub struct Master { + client: master_client::MasterClient, +} + +impl Master { + pub fn new(chan: Channel) -> Self { + let client = master_client::MasterClient::new(chan); + Self { client } + } + + pub async fn tenant(&self, req: TenantRequest) -> Result { + let res = self.client.clone().tenant(req).await?; + Ok(res.into_inner()) + } + + pub async fn tenant_union( + &self, + req: tenant_request_union::Request, + ) -> Result { + let req = TenantRequest { + requests: vec![TenantRequestUnion { request: Some(req) }], + }; + let mut res = self.tenant(req).await?; + res.responses + .pop() + .and_then(|x| x.response) + .ok_or(Error::InvalidResponse) + } + + pub async fn bucket(&self, req: BucketRequest) -> Result { + let res = self.client.clone().bucket(req).await?; + Ok(res.into_inner()) + } + + pub async fn bucket_union( + &self, + tenant: String, + req: bucket_request_union::Request, + ) -> Result { + let req = BucketRequest { + tenant, + requests: vec![BucketRequestUnion { request: Some(req) }], + }; + let mut res = self.bucket(req).await?; + res.responses + .pop() + .and_then(|x| x.response) + .ok_or(Error::InvalidResponse) + } +} diff --git a/src/object-engine/client/src/tenant.rs b/src/object-engine/client/src/tenant.rs new file mode 100644 index 00000000..5c1711f2 --- /dev/null +++ b/src/object-engine/client/src/tenant.rs @@ -0,0 +1,96 @@ +// Copyright 2022 The Engula Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::sync::Arc; + +use object_engine_proto::*; + +use crate::{master::Master, Bucket, Error, Result}; + +#[derive(Clone)] +pub struct Tenant { + inner: Arc, +} + +impl Tenant { + pub fn new(name: String, master: Master) -> Self { + let inner = TenantInner { name, master }; + Self { + inner: Arc::new(inner), + } + } + + pub async fn desc(&self) -> Result { + let req = DescribeTenantRequest { + name: self.inner.name.clone(), + }; + let req = tenant_request_union::Request::DescribeTenant(req); + let res = self.inner.tenant_union_call(req).await?; + let desc = if let tenant_response_union::Response::DescribeTenant(res) = res { + res.desc + } else { + None + }; + desc.ok_or(Error::InvalidResponse) + } + + pub fn bucket(&self, name: &str) -> Bucket { + self.inner.new_bucket(name.to_owned()) + } + + pub async fn create_bucket(&self, name: &str) -> Result { + let desc = BucketDesc { + name: name.to_owned(), + ..Default::default() + }; + let req = CreateBucketRequest { desc: Some(desc) }; + let req = bucket_request_union::Request::CreateBucket(req); + self.inner.bucket_union_call(req).await?; + Ok(self.bucket(name)) + } + + pub async fn delete_bucket(&self, name: &str) -> Result<()> { + let req = DeleteBucketRequest { + name: name.to_owned(), + }; + let req = bucket_request_union::Request::DeleteBucket(req); + self.inner.bucket_union_call(req).await?; + Ok(()) + } +} + +struct TenantInner { + name: String, + master: Master, +} + +impl TenantInner { + fn new_bucket(&self, name: String) -> Bucket { + Bucket::new(name, self.name.clone(), self.master.clone()) + } + + async fn tenant_union_call( + &self, + req: tenant_request_union::Request, + ) -> Result { + self.master.tenant_union(req).await + } + + async fn bucket_union_call( + &self, + req: bucket_request_union::Request, + ) -> Result { + self.master.bucket_union(self.name.clone(), req).await + } +} diff --git a/src/object-engine/common/Cargo.toml b/src/object-engine/common/Cargo.toml new file mode 100644 index 00000000..cc3b771e --- /dev/null +++ b/src/object-engine/common/Cargo.toml @@ -0,0 +1,9 @@ +[package] +name = "object-engine-common" +version = "0.1.0" +edition = "2021" +publish = false + +[dependencies] +async-trait = "0.1" +thiserror = "1.0" diff --git a/src/storage/src/error.rs b/src/object-engine/common/src/error.rs similarity index 90% rename from src/storage/src/error.rs rename to src/object-engine/common/src/error.rs index 1c196790..ca82f5d3 100644 --- a/src/storage/src/error.rs +++ b/src/object-engine/common/src/error.rs @@ -1,4 +1,4 @@ -// Copyright 2021 The Engula Authors. +// Copyright 2022 The Engula Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,14 +14,13 @@ use thiserror::Error; -/// Errors for all storage operations. #[derive(Error, Debug)] pub enum Error { #[error("{0} is not found")] NotFound(String), #[error("{0} already exists")] AlreadyExists(String), - #[error("{0}")] + #[error("invalid argument: {0}")] InvalidArgument(String), #[error(transparent)] Io(#[from] std::io::Error), diff --git a/src/object-engine/common/src/lib.rs b/src/object-engine/common/src/lib.rs new file mode 100644 index 00000000..82874650 --- /dev/null +++ b/src/object-engine/common/src/lib.rs @@ -0,0 +1,19 @@ +// Copyright 2022 The Engula Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +mod error; + +pub use async_trait::async_trait; + +pub use self::error::{Error, Result}; diff --git a/src/object-engine/filestore/Cargo.toml b/src/object-engine/filestore/Cargo.toml new file mode 100644 index 00000000..05b27a24 --- /dev/null +++ b/src/object-engine/filestore/Cargo.toml @@ -0,0 +1,12 @@ +[package] +name = "object-engine-filestore" +version = "0.1.0" +edition = "2021" +publish = false + +[dependencies] +object-engine-common = { path = "../common" } + +async-trait = "0.1" +thiserror = "1.0" +tokio = { version = "1.15", features = ["full"] } diff --git a/src/object-engine/filestore/src/lib.rs b/src/object-engine/filestore/src/lib.rs new file mode 100644 index 00000000..45912873 --- /dev/null +++ b/src/object-engine/filestore/src/lib.rs @@ -0,0 +1,21 @@ +// Copyright 2022 The Engula Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +mod store; + +pub mod local; + +use object_engine_common::{async_trait, Error, Result}; + +pub use self::store::{Bucket, RandomRead, SequentialWrite, Store, Tenant}; diff --git a/src/object-engine/filestore/src/local/bucket.rs b/src/object-engine/filestore/src/local/bucket.rs new file mode 100644 index 00000000..ddd6b9a9 --- /dev/null +++ b/src/object-engine/filestore/src/local/bucket.rs @@ -0,0 +1,83 @@ +// Copyright 2022 The Engula Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::{os::unix::fs::FileExt, path::PathBuf}; + +use tokio::io::AsyncWriteExt; + +use crate::{async_trait, Result}; + +pub struct Bucket { + path: PathBuf, +} + +impl Bucket { + pub(crate) fn new(path: PathBuf) -> Self { + Self { path } + } +} + +#[async_trait] +impl crate::Bucket for Bucket { + type RandomReader = RandomReader; + type SequentialWriter = SequentialWriter; + + async fn new_random_reader(&self, name: &str) -> Result { + let path = self.path.join(name); + let file = tokio::fs::File::open(&path).await?; + Ok(RandomReader { + file: file.into_std().await, + }) + } + + async fn new_sequential_writer(&self, name: &str) -> Result { + let path = self.path.join(name); + let file = tokio::fs::OpenOptions::new() + .write(true) + .create(true) + .truncate(true) + .open(&path) + .await?; + Ok(SequentialWriter { file }) + } +} + +pub struct RandomReader { + file: std::fs::File, +} + +#[async_trait] +impl crate::RandomRead for RandomReader { + async fn read_at(&self, buf: &mut [u8], offset: u64) -> Result { + let size = self.file.read_at(buf, offset)?; + Ok(size) + } +} + +pub struct SequentialWriter { + file: tokio::fs::File, +} + +#[async_trait] +impl crate::SequentialWrite for SequentialWriter { + async fn write(&mut self, buf: &[u8]) -> Result<()> { + self.file.write_all(buf).await?; + Ok(()) + } + + async fn flush(&mut self) -> Result<()> { + self.file.sync_all().await?; + Ok(()) + } +} diff --git a/src/object-engine/filestore/src/local/mod.rs b/src/object-engine/filestore/src/local/mod.rs new file mode 100644 index 00000000..5de91735 --- /dev/null +++ b/src/object-engine/filestore/src/local/mod.rs @@ -0,0 +1,19 @@ +// Copyright 2022 The Engula Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +mod bucket; +mod store; +mod tenant; + +pub use self::{bucket::Bucket, store::Store, tenant::Tenant}; diff --git a/src/object-engine/filestore/src/local/store.rs b/src/object-engine/filestore/src/local/store.rs new file mode 100644 index 00000000..62d2d68f --- /dev/null +++ b/src/object-engine/filestore/src/local/store.rs @@ -0,0 +1,48 @@ +// Copyright 2022 The Engula Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::{fs, path::PathBuf}; + +use super::Tenant; +use crate::{async_trait, Error, Result}; + +pub struct Store { + path: PathBuf, +} + +impl Store { + pub fn open(path: impl Into) -> Result { + let path = path.into(); + fs::create_dir_all(&path)?; + Ok(Self { path }) + } +} + +#[async_trait] +impl crate::Store for Store { + type Tenant = Tenant; + + fn tenant(&self, name: &str) -> Self::Tenant { + Tenant::new(self.path.join(name)) + } + + async fn create_tenant(&self, name: &str) -> Result { + let path = self.path.join(name); + if path.exists() { + return Err(Error::AlreadyExists(format!("tenant {}", name))); + } + fs::create_dir_all(&path)?; + Ok(self.tenant(name)) + } +} diff --git a/src/object-engine/filestore/src/local/tenant.rs b/src/object-engine/filestore/src/local/tenant.rs new file mode 100644 index 00000000..7060ccd3 --- /dev/null +++ b/src/object-engine/filestore/src/local/tenant.rs @@ -0,0 +1,46 @@ +// Copyright 2022 The Engula Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::{fs, path::PathBuf}; + +use super::Bucket; +use crate::{async_trait, Error, Result}; + +pub struct Tenant { + path: PathBuf, +} + +impl Tenant { + pub(crate) fn new(path: PathBuf) -> Self { + Self { path } + } +} + +#[async_trait] +impl crate::Tenant for Tenant { + type Bucket = Bucket; + + fn bucket(&self, name: &str) -> Bucket { + Bucket::new(self.path.join(name)) + } + + async fn create_bucket(&self, name: &str) -> Result { + let path = self.path.join(name); + if path.exists() { + return Err(Error::AlreadyExists(format!("bucket {}", name))); + } + fs::create_dir_all(&path)?; + Ok(self.bucket(name)) + } +} diff --git a/src/object-engine/filestore/src/store.rs b/src/object-engine/filestore/src/store.rs new file mode 100644 index 00000000..0e473c32 --- /dev/null +++ b/src/object-engine/filestore/src/store.rs @@ -0,0 +1,55 @@ +// Copyright 2022 The Engula Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::{async_trait, Result}; + +#[async_trait] +pub trait Store { + type Tenant: Tenant; + + fn tenant(&self, name: &str) -> Self::Tenant; + + async fn create_tenant(&self, name: &str) -> Result; +} + +#[async_trait] +pub trait Tenant { + type Bucket: Bucket; + + fn bucket(&self, name: &str) -> Self::Bucket; + + async fn create_bucket(&self, name: &str) -> Result; +} + +#[async_trait] +pub trait Bucket { + type RandomReader: RandomRead; + type SequentialWriter: SequentialWrite; + + async fn new_random_reader(&self, name: &str) -> Result; + + async fn new_sequential_writer(&self, name: &str) -> Result; +} + +#[async_trait] +pub trait RandomRead { + async fn read_at(&self, buf: &mut [u8], offset: u64) -> Result; +} + +#[async_trait] +pub trait SequentialWrite { + async fn write(&mut self, buf: &[u8]) -> Result<()>; + + async fn flush(&mut self) -> Result<()>; +} diff --git a/src/object-engine/master/Cargo.toml b/src/object-engine/master/Cargo.toml new file mode 100644 index 00000000..e639db27 --- /dev/null +++ b/src/object-engine/master/Cargo.toml @@ -0,0 +1,13 @@ +[package] +name = "object-engine-master" +version = "0.1.0" +edition = "2021" +publish = false + +[dependencies] +object-engine-proto = { version = "0.1", path = "../proto" } + +prost = "0.9" +thiserror = "1.0" +tokio = { version = "1.15", features = ["full"] } +tonic = "0.6" diff --git a/src/engine/hash/src/error.rs b/src/object-engine/master/src/error.rs similarity index 53% rename from src/engine/hash/src/error.rs rename to src/object-engine/master/src/error.rs index 7a78e48d..cec0ebde 100644 --- a/src/engine/hash/src/error.rs +++ b/src/object-engine/master/src/error.rs @@ -1,4 +1,4 @@ -// Copyright 2021 The Engula Authors. +// Copyright 2022 The Engula Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,28 +12,27 @@ // See the License for the specific language governing permissions and // limitations under the License. -use engula_journal::Error as JournalError; -use engula_kernel::Error as KernelError; -use engula_storage::Error as StorageError; use thiserror::Error; +use tonic::{Code, Status}; #[derive(Error, Debug)] pub enum Error { - #[error(transparent)] - Io(#[from] std::io::Error), - #[error("corrupted: {0}")] - Corrupted(String), - #[error(transparent)] - Kernel(#[from] KernelError), - #[error(transparent)] - Journal(#[from] JournalError), - #[error(transparent)] - Storage(#[from] StorageError), + #[error("{0} is not found")] + NotFound(String), + #[error("{0} already exists")] + AlreadyExists(String), + #[error("invalid request")] + InvalidRequest, } -impl Error { - pub fn corrupted(err: E) -> Self { - Self::Corrupted(err.to_string()) +impl From for Status { + fn from(err: Error) -> Status { + let (code, message) = match err { + Error::NotFound(m) => (Code::NotFound, m), + Error::AlreadyExists(m) => (Code::AlreadyExists, m), + Error::InvalidRequest => (Code::InvalidArgument, "invalid request".to_owned()), + }; + Status::new(code, message) } } diff --git a/src/object-engine/master/src/lib.rs b/src/object-engine/master/src/lib.rs new file mode 100644 index 00000000..832a3f3b --- /dev/null +++ b/src/object-engine/master/src/lib.rs @@ -0,0 +1,19 @@ +// Copyright 2022 The Engula Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +mod error; +mod master; +mod server; + +pub use self::server::Server; diff --git a/src/object-engine/master/src/master.rs b/src/object-engine/master/src/master.rs new file mode 100644 index 00000000..fa490076 --- /dev/null +++ b/src/object-engine/master/src/master.rs @@ -0,0 +1,112 @@ +// Copyright 2022 The Engula Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::{collections::HashMap, sync::Arc}; + +use object_engine_proto::*; +use tokio::sync::Mutex; + +use crate::error::{Error, Result}; + +#[derive(Clone)] +pub struct Master { + inner: Arc>, +} + +struct MasterInner { + next_id: u64, + tenants: HashMap, +} + +impl Master { + pub fn new() -> Self { + let inner = MasterInner { + next_id: 1, + tenants: HashMap::new(), + }; + Self { + inner: Arc::new(Mutex::new(inner)), + } + } + + pub async fn tenant(&self, name: &str) -> Result { + let inner = self.inner.lock().await; + inner + .tenants + .get(name) + .cloned() + .ok_or_else(|| Error::NotFound(format!("tenant {}", name))) + } + + pub async fn create_tenant(&self, mut desc: TenantDesc) -> Result { + let mut inner = self.inner.lock().await; + if inner.tenants.contains_key(&desc.name) { + return Err(Error::AlreadyExists(format!("tenant {}", desc.name))); + } + desc.id = inner.next_id; + inner.next_id += 1; + let db = Tenant::new(desc.clone()); + inner.tenants.insert(desc.name.clone(), db); + Ok(desc) + } +} + +#[derive(Clone)] +pub struct Tenant { + inner: Arc>, +} + +struct TenantInner { + desc: TenantDesc, + next_id: u64, + buckets: HashMap, +} + +impl Tenant { + fn new(desc: TenantDesc) -> Self { + let inner = TenantInner { + desc, + next_id: 1, + buckets: HashMap::new(), + }; + Self { + inner: Arc::new(Mutex::new(inner)), + } + } + + pub async fn desc(&self) -> TenantDesc { + self.inner.lock().await.desc.clone() + } + + pub async fn bucket(&self, name: &str) -> Result { + let inner = self.inner.lock().await; + inner + .buckets + .get(name) + .cloned() + .ok_or_else(|| Error::NotFound(format!("bucket {}", name))) + } + + pub async fn create_bucket(&self, mut desc: BucketDesc) -> Result { + let mut inner = self.inner.lock().await; + if inner.buckets.contains_key(&desc.name) { + return Err(Error::AlreadyExists(format!("bucket {}", desc.name))); + } + desc.id = inner.next_id; + inner.next_id += 1; + desc.parent_id = inner.desc.id; + inner.buckets.insert(desc.name.clone(), desc.clone()); + Ok(desc) + } +} diff --git a/src/object-engine/master/src/server.rs b/src/object-engine/master/src/server.rs new file mode 100644 index 00000000..7eb784cd --- /dev/null +++ b/src/object-engine/master/src/server.rs @@ -0,0 +1,176 @@ +// Copyright 2022 The Engula Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use object_engine_proto::*; +use tonic::{Request, Response, Status}; + +use crate::{ + error::{Error, Result}, + master::{Master, Tenant}, +}; + +type TonicResult = std::result::Result; + +pub struct Server { + master: Master, +} + +impl Default for Server { + fn default() -> Self { + Self::new() + } +} + +impl Server { + pub fn new() -> Self { + Self { + master: Master::new(), + } + } + + pub fn into_service(self) -> master_server::MasterServer { + master_server::MasterServer::new(self) + } +} + +#[tonic::async_trait] +impl master_server::Master for Server { + async fn tenant(&self, req: Request) -> TonicResult> { + let req = req.into_inner(); + let res = self.handle_tenant(req).await?; + Ok(Response::new(res)) + } + + async fn bucket(&self, req: Request) -> TonicResult> { + let req = req.into_inner(); + let res = self.handle_bucket(req).await?; + Ok(Response::new(res)) + } +} + +impl Server { + pub(crate) async fn handle_tenant(&self, req: TenantRequest) -> Result { + let mut res = TenantResponse::default(); + for req_union in req.requests { + let res_union = self.handle_tenant_union(req_union).await?; + res.responses.push(res_union); + } + Ok(res) + } + + pub(crate) async fn handle_tenant_union( + &self, + req: TenantRequestUnion, + ) -> Result { + let req = req.request.ok_or(Error::InvalidRequest)?; + let res = match req { + tenant_request_union::Request::ListTenants(_req) => { + todo!(); + } + tenant_request_union::Request::CreateTenant(req) => { + let res = self.handle_create_tenant(req).await?; + tenant_response_union::Response::CreateTenant(res) + } + tenant_request_union::Request::UpdateTenant(_req) => { + todo!(); + } + tenant_request_union::Request::DeleteTenant(_req) => { + todo!(); + } + tenant_request_union::Request::DescribeTenant(req) => { + let res = self.handle_describe_tenant(req).await?; + tenant_response_union::Response::DescribeTenant(res) + } + }; + Ok(TenantResponseUnion { + response: Some(res), + }) + } + + async fn handle_create_tenant(&self, req: CreateTenantRequest) -> Result { + let desc = req.desc.ok_or(Error::InvalidRequest)?; + let desc = self.master.create_tenant(desc).await?; + Ok(CreateTenantResponse { desc: Some(desc) }) + } + + async fn handle_describe_tenant( + &self, + req: DescribeTenantRequest, + ) -> Result { + let db = self.master.tenant(&req.name).await?; + let desc = db.desc().await; + Ok(DescribeTenantResponse { desc: Some(desc) }) + } +} + +impl Server { + async fn handle_bucket(&self, req: BucketRequest) -> Result { + let tenant = self.master.tenant(&req.tenant).await?; + let mut res = BucketResponse::default(); + for req_union in req.requests { + let res_union = self.handle_bucket_union(tenant.clone(), req_union).await?; + res.responses.push(res_union); + } + Ok(res) + } + + async fn handle_bucket_union( + &self, + tenant: Tenant, + req: BucketRequestUnion, + ) -> Result { + let req = req.request.ok_or(Error::InvalidRequest)?; + let res = match req { + bucket_request_union::Request::ListBuckets(_req) => { + todo!(); + } + bucket_request_union::Request::CreateBucket(req) => { + let res = self.handle_create_bucket(tenant, req).await?; + bucket_response_union::Response::CreateBucket(res) + } + bucket_request_union::Request::UpdateBucket(_req) => { + todo!(); + } + bucket_request_union::Request::DeleteBucket(_req) => { + todo!(); + } + bucket_request_union::Request::DescribeBucket(req) => { + let res = self.handle_describe_bucket(tenant, req).await?; + bucket_response_union::Response::DescribeBucket(res) + } + }; + Ok(BucketResponseUnion { + response: Some(res), + }) + } + + async fn handle_create_bucket( + &self, + tenant: Tenant, + req: CreateBucketRequest, + ) -> Result { + let desc = req.desc.ok_or(Error::InvalidRequest)?; + let desc = tenant.create_bucket(desc).await?; + Ok(CreateBucketResponse { desc: Some(desc) }) + } + + async fn handle_describe_bucket( + &self, + tenant: Tenant, + req: DescribeBucketRequest, + ) -> Result { + let desc = tenant.bucket(&req.name).await?; + Ok(DescribeBucketResponse { desc: Some(desc) }) + } +} diff --git a/src/object-engine/proto/Cargo.toml b/src/object-engine/proto/Cargo.toml new file mode 100644 index 00000000..3c5a6182 --- /dev/null +++ b/src/object-engine/proto/Cargo.toml @@ -0,0 +1,12 @@ +[package] +name = "object-engine-proto" +version = "0.1.0" +edition = "2021" +publish = false + +[dependencies] +prost = "0.9" +tonic = "0.6" + +[build-dependencies] +tonic-build = "0.6" diff --git a/src/storage/build.rs b/src/object-engine/proto/build.rs similarity index 84% rename from src/storage/build.rs rename to src/object-engine/proto/build.rs index 10baa283..82339482 100644 --- a/src/storage/build.rs +++ b/src/object-engine/proto/build.rs @@ -1,4 +1,4 @@ -// Copyright 2021 The Engula Authors. +// Copyright 2022 The Engula Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -13,6 +13,6 @@ // limitations under the License. fn main() -> Result<(), Box> { - tonic_build::compile_protos("src/grpc/storage.proto")?; + tonic_build::configure().compile(&["proto/master.proto"], &["proto"])?; Ok(()) } diff --git a/src/object-engine/proto/proto/master.proto b/src/object-engine/proto/proto/master.proto new file mode 100644 index 00000000..773ea0ad --- /dev/null +++ b/src/object-engine/proto/proto/master.proto @@ -0,0 +1,125 @@ +// Copyright 2022 The Engula Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package objectengine.master.v1; + +service Master { + rpc Tenant(TenantRequest) returns (TenantResponse) {} + + rpc Bucket(BucketRequest) returns (BucketResponse) {} +} + +message TenantRequest { repeated TenantRequestUnion requests = 1; } + +message TenantResponse { repeated TenantResponseUnion responses = 1; } + +message TenantRequestUnion { + oneof request { + ListTenantsRequest list_tenants = 1; + CreateTenantRequest create_tenant = 2; + UpdateTenantRequest update_tenant = 3; + DeleteTenantRequest delete_tenant = 4; + DescribeTenantRequest describe_tenant = 5; + } +} + +message TenantResponseUnion { + oneof response { + ListTenantsResponse list_tenants = 1; + CreateTenantResponse create_tenant = 2; + UpdateTenantResponse update_tenant = 3; + DeleteTenantResponse delete_tenant = 4; + DescribeTenantResponse describe_tenant = 5; + } +} + +message ListTenantsRequest {} + +message ListTenantsResponse { repeated TenantDesc descs = 1; } + +message CreateTenantRequest { TenantDesc desc = 1; } + +message CreateTenantResponse { TenantDesc desc = 1; } + +message UpdateTenantRequest { TenantDesc desc = 1; } + +message UpdateTenantResponse {} + +message DeleteTenantRequest { string name = 1; } + +message DeleteTenantResponse {} + +message DescribeTenantRequest { string name = 1; } + +message DescribeTenantResponse { TenantDesc desc = 1; } + +message BucketRequest { + string tenant = 1; + repeated BucketRequestUnion requests = 2; +} + +message BucketResponse { repeated BucketResponseUnion responses = 1; } + +message BucketRequestUnion { + oneof request { + ListBucketsRequest list_buckets = 1; + CreateBucketRequest create_bucket = 2; + UpdateBucketRequest update_bucket = 3; + DeleteBucketRequest delete_bucket = 4; + DescribeBucketRequest describe_bucket = 5; + } +} + +message BucketResponseUnion { + oneof response { + ListBucketsResponse list_buckets = 1; + CreateBucketResponse create_bucket = 2; + UpdateBucketResponse update_bucket = 3; + DeleteBucketResponse delete_bucket = 4; + DescribeBucketResponse describe_bucket = 5; + } +} + +message ListBucketsRequest {} + +message ListBucketsResponse { repeated BucketDesc descs = 1; } + +message CreateBucketRequest { BucketDesc desc = 1; } + +message CreateBucketResponse { BucketDesc desc = 1; } + +message UpdateBucketRequest { BucketDesc desc = 1; } + +message UpdateBucketResponse {} + +message DeleteBucketRequest { string name = 1; } + +message DeleteBucketResponse {} + +message DescribeBucketRequest { string name = 1; } + +message DescribeBucketResponse { BucketDesc desc = 1; } + +message TenantDesc { + uint64 id = 1; + string name = 2; +} + +message BucketDesc { + uint64 id = 1; + string name = 2; + uint64 parent_id = 3; +} diff --git a/src/object-engine/proto/src/lib.rs b/src/object-engine/proto/src/lib.rs new file mode 100644 index 00000000..d4b7bbab --- /dev/null +++ b/src/object-engine/proto/src/lib.rs @@ -0,0 +1,17 @@ +// Copyright 2022 The Engula Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![allow(clippy::all)] + +tonic::include_proto!("objectengine.master.v1"); diff --git a/src/storage/Cargo.toml b/src/storage/Cargo.toml deleted file mode 100644 index 6717fd4d..00000000 --- a/src/storage/Cargo.toml +++ /dev/null @@ -1,25 +0,0 @@ -[package] -name = "engula-storage" -version = "0.2.0" -edition = "2021" -license = "Apache-2.0" -homepage = "https://engula.io" -repository = "https://github.com/engula/engula" -description = "An Engula module that provides object storage abstractions and implementations." - -[dependencies] -thiserror = "1.0" -async-trait = "0.1" -bytes = "1.1.0" -tokio = { version = "1.13", features = ["full"] } -tonic = "0.6" -prost = "0.9" -futures = "0.3" -tokio-stream = { version = "0.1", features = ["net"] } -tokio-util = { version = "0.6", features = ["io"] } - -[dev-dependencies] -tempfile = "3" - -[build-dependencies] -tonic-build = "0.6" diff --git a/src/storage/src/bucket.rs b/src/storage/src/bucket.rs deleted file mode 100644 index 52daff4b..00000000 --- a/src/storage/src/bucket.rs +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2021 The Engula Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use tokio::io::{AsyncRead, AsyncWrite}; - -use crate::{async_trait, Result}; - -/// An interface to manipulate a bucket. -#[async_trait] -pub trait Bucket: Clone + Send + Sync + 'static { - type SequentialReader: AsyncRead + Send + Unpin; - type SequentialWriter: AsyncWrite + Send + Unpin; - - /// Deletes an object. - /// - /// # Errors - /// - /// Returns `Error::NotFound` if the object doesn't exist. - async fn delete_object(&self, name: &str) -> Result<()>; - - /// Returns a reader that reads sequentially from the object. - async fn new_sequential_reader(&self, name: &str) -> Result; - - /// Returns a writer that writes sequentially to the object. - async fn new_sequential_writer(&self, name: &str) -> Result; -} diff --git a/src/storage/src/file/bucket.rs b/src/storage/src/file/bucket.rs deleted file mode 100644 index 1ec417c6..00000000 --- a/src/storage/src/file/bucket.rs +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2021 The Engula Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use std::path::{Path, PathBuf}; - -use tokio::{fs, io}; - -use crate::{async_trait, Error, Result}; - -#[derive(Clone)] -pub struct Bucket { - path: PathBuf, -} - -impl Bucket { - pub fn new(path: impl Into) -> Self { - Self { path: path.into() } - } - - fn object_path(&self, name: impl AsRef) -> PathBuf { - self.path.join(name) - } -} - -#[async_trait] -impl crate::Bucket for Bucket { - type SequentialReader = fs::File; - type SequentialWriter = fs::File; - - async fn delete_object(&self, name: &str) -> Result<()> { - let path = self.object_path(name); - check_io_result(fs::remove_file(&path).await, &path).await?; - Ok(()) - } - - async fn new_sequential_reader(&self, name: &str) -> Result { - let path = self.object_path(name); - let f = check_io_result(fs::OpenOptions::new().read(true).open(&path).await, &path).await?; - Ok(f) - } - - async fn new_sequential_writer(&self, name: &str) -> Result { - let path = self.object_path(name); - let f = check_io_result( - fs::OpenOptions::new() - .write(true) - .create(true) - .truncate(true) - .open(&path) - .await, - &path, - ) - .await?; - Ok(f) - } -} - -async fn check_io_result(r: io::Result, obj_path: impl AsRef) -> Result { - match r { - Ok(t) => Ok(t), - Err(err) => { - if err.kind() == io::ErrorKind::NotFound { - let parent = obj_path.as_ref().parent().unwrap(); - if !try_exists(parent).await? { - return Err(Error::NotFound(format!( - "bucket '{}'", - parent.file_name().unwrap().to_str().unwrap(), - ))); - } - return Err(Error::NotFound(format!( - "object '{}'", - obj_path.as_ref().file_name().unwrap().to_str().unwrap(), - ))); - } - Err(err.into()) - } - } -} - -// async version for `std:fs:try_exist`, remove me after https://github.com/tokio-rs/tokio/pull/3375 addressed. -pub async fn try_exists(path: impl AsRef) -> io::Result { - match fs::metadata(path).await { - Ok(_) => Ok(true), - Err(e) if e.kind() == io::ErrorKind::NotFound => Ok(false), - Err(e) => Err(e), - } -} diff --git a/src/storage/src/file/mod.rs b/src/storage/src/file/mod.rs deleted file mode 100644 index 10baf054..00000000 --- a/src/storage/src/file/mod.rs +++ /dev/null @@ -1,132 +0,0 @@ -// Copyright 2021 The Engula Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! A [`Storage`] implementation that stores data in local files. -//! -//! [`Storage`]: crate::Storage - -mod bucket; -mod storage; - -pub use self::storage::Storage; - -#[cfg(test)] -mod tests { - use tokio::io::{AsyncReadExt, AsyncWriteExt}; - - use crate::*; - - #[tokio::test] - async fn test_bucket_manage() -> Result<()> { - const BUCKET_NAME: &str = "test_bucket"; - let tmp = tempfile::tempdir()?; - - let s = super::Storage::new(tmp.path()).await?; - s.create_bucket(BUCKET_NAME).await?; - assert!(s.create_bucket(BUCKET_NAME).await.is_err()); - s.delete_bucket(BUCKET_NAME).await?; - Ok(()) - } - - #[tokio::test] - async fn test_object_manage() -> Result<()> { - const BUCKET_NAME: &str = "test_object"; - let tmp = tempfile::tempdir()?; - let s = super::Storage::new(tmp.path()).await?; - s.create_bucket(BUCKET_NAME).await?; - - let b = s.bucket(BUCKET_NAME).await?; - - let mut w = b.new_sequential_writer("obj-1").await?; - w.write(b"abc").await?; - w.write(b"123").await?; - w.shutdown().await?; - - let mut r = b.new_sequential_reader("obj-1").await?; - let mut got = Vec::new(); - r.read_to_end(&mut got).await?; - assert_eq!(got, b"abc123"); - Ok(()) - } - - #[tokio::test] - async fn test_duplicate_bucket() -> Result<()> { - const BUCKET_NAME: &str = "test_bucket_dup"; - let tmp = tempfile::tempdir()?; - let s = super::Storage::new(tmp.path()).await?; - s.create_bucket(BUCKET_NAME).await?; - let r = s.create_bucket(BUCKET_NAME).await; - assert!(r.is_err()); - assert!(matches!(r, Err(Error::AlreadyExists(_)))); - Ok(()) - } - - #[tokio::test] - async fn test_clear_non_empty_bucket() -> Result<()> { - const BUCKET_NAME: &str = "test_non_empty_delete"; - let tmp = tempfile::tempdir()?; - let s = super::Storage::new(tmp.path()).await?; - s.create_bucket(BUCKET_NAME).await?; - let b = s.bucket(BUCKET_NAME).await?; - let mut w = b.new_sequential_writer("obj-1").await?; - w.write(b"abcd").await?; - w.shutdown().await?; - let r = s.delete_bucket(BUCKET_NAME).await; - assert!(matches!(r, Err(Error::Io(_)))); - Ok(()) - } - - #[tokio::test] - async fn test_put_duplicate_obj() -> Result<()> { - const BUCKET_NAME: &str = "test_put_dup_obj"; - let tmp = tempfile::tempdir()?; - let s = super::Storage::new(tmp.path()).await?; - s.create_bucket(BUCKET_NAME).await?; - let b = s.bucket(BUCKET_NAME).await?; - - let mut w = b.new_sequential_writer("obj-1").await?; - w.write(b"abcdefg").await?; - w.shutdown().await?; - - let mut w = b.new_sequential_writer("obj-1").await?; - w.write(b"123").await?; - w.shutdown().await?; - - let mut r = b.new_sequential_reader("obj-1").await?; - let mut got = Vec::new(); - r.read_to_end(&mut got).await?; - assert_eq!(got, b"123"); - - Ok(()) - } - - #[tokio::test] - async fn test_not_exist_bucket() -> Result<()> { - const BUCKET_NAME: &str = "test_not_exist_bucket"; - let tmp = tempfile::tempdir()?; - let s = super::Storage::new(tmp.path()).await?; - let b = s.bucket(BUCKET_NAME).await?; - - let r = b.delete_object("obj-1").await; - assert!(matches!(r, Err(Error::NotFound(_)))); - - let r = b.new_sequential_reader("obj").await; - assert!(matches!(r, Err(Error::NotFound(_)))); - - let w = b.new_sequential_writer("obj").await; - assert!(matches!(w, Err(Error::NotFound(_)))); - - Ok(()) - } -} diff --git a/src/storage/src/file/storage.rs b/src/storage/src/file/storage.rs deleted file mode 100644 index a4b7d53b..00000000 --- a/src/storage/src/file/storage.rs +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2021 The Engula Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use std::path::{Path, PathBuf}; - -use tokio::fs; - -use super::bucket::{try_exists, Bucket}; -use crate::{async_trait, Error, Result}; - -#[derive(Clone)] -pub struct Storage { - root: PathBuf, -} - -impl Storage { - pub async fn new(root: impl Into) -> Result { - let path = root.into(); - fs::DirBuilder::new().recursive(true).create(&path).await?; - Ok(Self { root: path }) - } - - fn bucket_path(&self, name: impl AsRef) -> PathBuf { - self.root.join(name) - } -} - -#[async_trait] -impl crate::Storage for Storage { - type Bucket = Bucket; - - async fn bucket(&self, name: &str) -> Result { - let path = self.bucket_path(name); - Ok(Bucket::new(path)) - } - - async fn create_bucket(&self, name: &str) -> Result { - let path = self.bucket_path(name); - - if try_exists(&path).await? { - return Err(Error::AlreadyExists(format!("bucket '{}'", name))); - } - - fs::create_dir_all(&path).await?; - - self.bucket(name).await - } - - async fn delete_bucket(&self, name: &str) -> Result<()> { - let path = self.bucket_path(name); - - fs::remove_dir(path).await?; - - Ok(()) - } -} diff --git a/src/storage/src/grpc/bucket.rs b/src/storage/src/grpc/bucket.rs deleted file mode 100644 index a9918af8..00000000 --- a/src/storage/src/grpc/bucket.rs +++ /dev/null @@ -1,156 +0,0 @@ -// Copyright 2021 The Engula Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use std::{ - io::{Error as IoError, ErrorKind}, - pin::Pin, - task::{Context, Poll}, -}; - -use bytes::Bytes; -use futures::{stream::StreamExt, FutureExt}; -use tokio::{ - io::{AsyncRead, AsyncWrite}, - sync::mpsc, - task::JoinHandle, -}; -use tokio_stream::wrappers::ReceiverStream; -use tokio_util::{io::StreamReader, sync::PollSender}; - -use super::{ - client::Client, - proto::{DeleteObjectRequest, ReadObjectRequest, UploadObjectRequest, UploadObjectResponse}, -}; -use crate::{async_trait, Result}; - -#[derive(Clone)] -pub struct Bucket { - client: Client, - bucket_name: String, -} - -impl Bucket { - pub fn new(client: Client, bucket_name: impl Into) -> Self { - Self { - client, - bucket_name: bucket_name.into(), - } - } -} - -#[async_trait] -impl crate::Bucket for Bucket { - type SequentialWriter = SequentialWriter; - - type SequentialReader = impl AsyncRead + Send + Unpin; - - async fn delete_object(&self, name: &str) -> Result<()> { - let input = DeleteObjectRequest { - bucket: self.bucket_name.to_owned(), - object: name.to_owned(), - }; - self.client.delete_object(input).await?; - Ok(()) - } - - async fn new_sequential_reader(&self, name: &str) -> Result { - let input = ReadObjectRequest { - bucket: self.bucket_name.to_owned(), - object: name.to_owned(), - }; - let stream = self.client.read_object(input).await?; - - let byte_stream = stream.map(|res| { - res.map(|resp| Bytes::from(resp.content)) - .map_err(|s| IoError::new(ErrorKind::Other, format!("{:?}", s))) - }); - Ok(StreamReader::new(byte_stream)) - } - - async fn new_sequential_writer(&self, name: &str) -> Result { - Ok(SequentialWriter::new( - self.client.clone(), - self.bucket_name.to_owned(), - name.to_owned(), - )) - } -} - -type IoResult = std::result::Result; - -pub struct SequentialWriter { - tx: PollSender, - upload: JoinHandle>, - bucket_name: String, - object_name: String, -} - -impl SequentialWriter { - fn new(client: Client, bucket_name: String, object_name: String) -> Self { - let (tx, rx) = mpsc::channel(16); - let tx = PollSender::new(tx); - let rx = ReceiverStream::new(rx); - let upload = tokio::spawn(async move { client.upload_object(rx).await }); - Self { - tx, - upload, - bucket_name, - object_name, - } - } -} - -impl AsyncWrite for SequentialWriter { - fn poll_write( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &[u8], - ) -> Poll> { - self.tx.poll_send_done(cx).map(|ready| { - ready - .and_then(|_| { - let req = UploadObjectRequest { - bucket: self.bucket_name.clone(), - object: self.object_name.clone(), - content: buf.to_owned(), - }; - self.tx.start_send(req) - }) - .map(|_| buf.len()) - .map_err(|err| IoError::new(ErrorKind::Other, err.to_string())) - }) - } - - fn poll_flush(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { - // Not sure what guarantee we should provide here yet. - Poll::Ready(Ok(())) - } - - fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - match self.tx.poll_send_done(cx) { - Poll::Ready(ready) => match ready { - Ok(()) => { - self.tx.close_this_sender(); - self.upload.poll_unpin(cx).map(|ready| { - ready - .map(|_| ()) - .map_err(|err| IoError::new(ErrorKind::Other, err.to_string())) - }) - } - Err(err) => Poll::Ready(Err(IoError::new(ErrorKind::Other, err.to_string()))), - }, - Poll::Pending => Poll::Pending, - } - } -} diff --git a/src/storage/src/grpc/client.rs b/src/storage/src/grpc/client.rs deleted file mode 100644 index 544af8ef..00000000 --- a/src/storage/src/grpc/client.rs +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2021 The Engula Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use tonic::{transport::Channel, IntoStreamingRequest, Streaming}; - -use super::proto::*; -use crate::Result; - -type StorageClient = storage_client::StorageClient; - -#[derive(Clone)] -pub struct Client { - client: StorageClient, -} - -impl Client { - pub async fn connect(addr: &str) -> Result { - let client = StorageClient::connect(addr.to_owned()).await?; - Ok(Client { client }) - } - - pub async fn create_bucket(&self, input: CreateBucketRequest) -> Result { - let mut client = self.client.clone(); - let response = client.create_bucket(input).await?; - Ok(response.into_inner()) - } - - pub async fn delete_bucket(&self, input: DeleteBucketRequest) -> Result { - let mut client = self.client.clone(); - let response = client.delete_bucket(input).await?; - Ok(response.into_inner()) - } - - pub async fn delete_object(&self, input: DeleteObjectRequest) -> Result { - let mut client = self.client.clone(); - let response = client.delete_object(input).await?; - Ok(response.into_inner()) - } - - pub async fn upload_object( - &self, - input: impl IntoStreamingRequest, - ) -> Result { - let mut client = self.client.clone(); - let response = client.upload_object(input).await?; - Ok(response.into_inner()) - } - - pub async fn read_object( - &self, - input: ReadObjectRequest, - ) -> Result> { - let mut client = self.client.clone(); - let response = client.read_object(input).await?; - Ok(response.into_inner()) - } -} diff --git a/src/storage/src/grpc/mod.rs b/src/storage/src/grpc/mod.rs deleted file mode 100644 index 639e6af6..00000000 --- a/src/storage/src/grpc/mod.rs +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright 2021 The Engula Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! A [`Storage`] implementation that interacts with gRPC storage service. -//! -//! [`Storage`]: crate::Storage - -mod bucket; -mod client; -mod error; -mod proto; -mod server; -mod storage; - -pub use self::{bucket::Bucket, client::Client, server::Server, storage::Storage}; - -#[cfg(test)] -mod tests { - - use tokio::{ - io::{AsyncReadExt, AsyncWriteExt}, - net::TcpListener, - }; - use tokio_stream::wrappers::TcpListenerStream; - - use crate::*; - - #[tokio::test(flavor = "multi_thread")] - async fn test() -> std::result::Result<(), Box> { - let listener = TcpListener::bind("127.0.0.1:0").await?; - let local_addr = listener.local_addr()?; - tokio::task::spawn(async move { - let server = super::Server::new(mem::Storage::default()); - tonic::transport::Server::builder() - .add_service(server.into_service()) - .serve_with_incoming(TcpListenerStream::new(listener)) - .await - .unwrap(); - }); - let storage = super::Storage::connect(&local_addr.to_string()).await?; - storage.create_bucket("bucket").await?; - let b = storage.bucket("bucket").await?; - let mut w = b.new_sequential_writer("object").await?; - let buf = vec![0, 1, 2]; - w.write(&buf).await?; - w.flush().await?; - w.shutdown().await?; - let mut r = b.new_sequential_reader("object").await?; - let mut got = Vec::new(); - r.read_to_end(&mut got).await?; - assert_eq!(got, buf); - Ok(()) - } -} diff --git a/src/storage/src/grpc/server.rs b/src/storage/src/grpc/server.rs deleted file mode 100644 index af765559..00000000 --- a/src/storage/src/grpc/server.rs +++ /dev/null @@ -1,107 +0,0 @@ -// Copyright 2021 The Engula Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use futures::{Stream, StreamExt, TryStreamExt}; -use tokio::io::AsyncWriteExt; -use tokio_util::io::ReaderStream; -use tonic::{Request, Response, Status, Streaming}; - -use super::proto::*; -use crate::{Bucket, Storage}; - -pub struct Server { - storage: S, -} - -impl Server { - pub fn new(storage: S) -> Self { - Self { storage } - } - - pub fn into_service(self) -> storage_server::StorageServer> { - storage_server::StorageServer::new(self) - } -} - -#[tonic::async_trait] -impl storage_server::Storage for Server { - type ReadObjectStream = impl Stream>; - - async fn create_bucket( - &self, - request: Request, - ) -> Result, Status> { - let input = request.into_inner(); - self.storage.create_bucket(&input.bucket).await?; - Ok(Response::new(CreateBucketResponse {})) - } - - async fn delete_bucket( - &self, - request: Request, - ) -> Result, Status> { - let input = request.into_inner(); - self.storage.delete_bucket(&input.bucket).await?; - Ok(Response::new(DeleteBucketResponse {})) - } - - async fn upload_object( - &self, - request: Request>, - ) -> Result, Status> { - let mut stream = request.into_inner(); - let mut cw = None; - while let Some(req) = stream.try_next().await? { - if cw.is_none() { - let b = self.storage.bucket(&req.bucket).await?; - let w = b.new_sequential_writer(&req.object).await?; - cw = Some(w); - } - if let Some(w) = &mut cw { - w.write_all(&req.content).await?; - } - } - if let Some(w) = &mut cw { - w.shutdown().await?; - } - Ok(Response::new(UploadObjectResponse {})) - } - - async fn delete_object( - &self, - request: Request, - ) -> Result, Status> { - let input = request.into_inner(); - let b = self.storage.bucket(&input.bucket).await?; - b.delete_object(&input.object).await?; - Ok(Response::new(DeleteObjectResponse {})) - } - - async fn read_object( - &self, - request: Request, - ) -> Result, Status> { - let input = request.into_inner(); - let b = self.storage.bucket(&input.bucket).await?; - let r = b.new_sequential_reader(&input.object).await?; - let byte_stream = ReaderStream::new(r); - let resp_stream = byte_stream.map(move |res| { - res.map(|b| ReadObjectResponse { - content: b.to_vec(), - }) - .map_err(|e| e.into()) - }); - Ok(Response::new(resp_stream)) - } -} diff --git a/src/storage/src/grpc/storage.proto b/src/storage/src/grpc/storage.proto deleted file mode 100644 index 262d44a3..00000000 --- a/src/storage/src/grpc/storage.proto +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright 2021 The Engula Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package engula.storage.v1; - -service Storage { - // APIs to manipulate a storage. - - rpc CreateBucket(CreateBucketRequest) returns (CreateBucketResponse) {} - - rpc DeleteBucket(DeleteBucketRequest) returns (DeleteBucketResponse) {} - - // APIs to manipulate a bucket. - - rpc UploadObject(stream UploadObjectRequest) returns (UploadObjectResponse) {} - - rpc DeleteObject(DeleteObjectRequest) returns (DeleteObjectResponse) {} - - // APIs to manipulate an object. - - rpc ReadObject(ReadObjectRequest) returns (stream ReadObjectResponse) {} -} - -message CreateBucketRequest { string bucket = 1; } - -message CreateBucketResponse {} - -message DeleteBucketRequest { string bucket = 1; } - -message DeleteBucketResponse {} - -message UploadObjectRequest { - string bucket = 1; - string object = 2; - bytes content = 3; -} - -message UploadObjectResponse {} - -message DeleteObjectRequest { - string bucket = 1; - string object = 2; -} - -message DeleteObjectResponse {} - -message ReadObjectRequest { - string bucket = 1; - string object = 2; -} - -message ReadObjectResponse { bytes content = 1; } diff --git a/src/storage/src/grpc/storage.rs b/src/storage/src/grpc/storage.rs deleted file mode 100644 index 237cfeee..00000000 --- a/src/storage/src/grpc/storage.rs +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright 2021 The Engula Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use super::{ - bucket::Bucket, - client::Client, - proto::{CreateBucketRequest, DeleteBucketRequest}, -}; -use crate::{async_trait, Result}; - -#[derive(Clone)] -pub struct Storage { - client: Client, -} - -impl Storage { - pub async fn connect(addr: &str) -> Result { - let endpoint = format!("http://{}", addr); - let client = Client::connect(&endpoint).await?; - Ok(Storage { client }) - } -} - -#[async_trait] -impl crate::Storage for Storage { - type Bucket = Bucket; - - async fn bucket(&self, name: &str) -> Result { - Ok(Bucket::new(self.client.clone(), name)) - } - - async fn create_bucket(&self, name: &str) -> Result { - let input = CreateBucketRequest { - bucket: name.to_owned(), - }; - self.client.create_bucket(input).await?; - self.bucket(name).await - } - - async fn delete_bucket(&self, name: &str) -> Result<()> { - let input = DeleteBucketRequest { - bucket: name.to_owned(), - }; - self.client.delete_bucket(input).await?; - Ok(()) - } -} diff --git a/src/storage/src/lib.rs b/src/storage/src/lib.rs deleted file mode 100644 index 618be24a..00000000 --- a/src/storage/src/lib.rs +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2021 The Engula Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! An Engula module that provides object storage abstractions and -//! implementations. -//! -//! # Abstraction -//! -//! [`Storage`] is an abstraction to store data objects. -//! -//! # Implementation -//! -//! Some built-in implementations of [`Storage`]: -//! -//! - [`mem`](crate::mem) -//! - [`file`](crate::file) -//! - [`grpc`](crate::grpc) -//! -//! [`Storage`]: crate::Storage - -#![feature(type_alias_impl_trait)] - -mod bucket; -mod error; -mod storage; - -pub mod file; -pub mod grpc; -pub mod mem; - -pub use async_trait::async_trait; - -pub use self::{ - bucket::Bucket, - error::{Error, Result}, - storage::Storage, -}; diff --git a/src/storage/src/mem/bucket.rs b/src/storage/src/mem/bucket.rs deleted file mode 100644 index c1ce829e..00000000 --- a/src/storage/src/mem/bucket.rs +++ /dev/null @@ -1,163 +0,0 @@ -// Copyright 2021 The Engula Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use std::{ - collections::{hash_map, HashMap}, - future::Future, - io::{Error as IoError, ErrorKind, IoSlice}, - pin::Pin, - sync::Arc, - task::{Context, Poll}, -}; - -use tokio::{ - io::{AsyncRead, AsyncWrite, ReadBuf}, - sync::Mutex, -}; - -use crate::{async_trait, Error, Result}; - -type Object = Arc>; -type Objects = Arc>>; - -#[derive(Clone)] -pub struct Bucket { - objects: Objects, -} - -impl Default for Bucket { - fn default() -> Self { - Self { - objects: Arc::new(Mutex::new(HashMap::new())), - } - } -} - -/// An interface to manipulate a bucket. -#[async_trait] -impl crate::Bucket for Bucket { - type SequentialReader = SequentialReader; - type SequentialWriter = SequentialWriter; - - async fn delete_object(&self, name: &str) -> Result<()> { - let mut objects = self.objects.lock().await; - match objects.remove(name) { - Some(_) => Ok(()), - None => Err(Error::NotFound(format!("object '{}'", name))), - } - } - - async fn new_sequential_reader(&self, name: &str) -> Result { - let objects = self.objects.lock().await; - match objects.get(name) { - Some(object) => Ok(SequentialReader::new(object.clone())), - None => Err(Error::NotFound(format!("object '{}'", name))), - } - } - - async fn new_sequential_writer(&self, name: &str) -> Result { - Ok(SequentialWriter::new(name.to_owned(), self.objects.clone())) - } -} - -pub struct SequentialReader { - object: Object, - offset: usize, -} - -impl SequentialReader { - fn new(object: Object) -> Self { - Self { object, offset: 0 } - } -} - -impl AsyncRead for SequentialReader { - fn poll_read( - mut self: Pin<&mut Self>, - _: &mut Context<'_>, - buf: &mut ReadBuf<'_>, - ) -> Poll> { - let start = self.offset; - self.offset = std::cmp::min(self.object.len(), start + buf.remaining()); - buf.put_slice(&self.object[start..self.offset]); - Poll::Ready(Ok(())) - } -} - -pub struct SequentialWriter { - name: String, - data: Vec, - objects: Objects, -} - -impl SequentialWriter { - fn new(name: String, objects: Objects) -> Self { - Self { - name, - data: Vec::new(), - objects, - } - } -} - -type IoResult = std::result::Result; - -impl AsyncWrite for SequentialWriter { - fn poll_write( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &[u8], - ) -> Poll> { - Pin::new(&mut self.data).poll_write(cx, buf) - } - - fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - Pin::new(&mut self.data).poll_flush(cx) - } - - fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - if Pin::new(&mut self.data).poll_shutdown(cx).is_ready() { - let data = self.data.split_off(0); - let mut lock = Box::pin(self.objects.lock()); - if let Poll::Ready(mut objects) = lock.as_mut().poll(cx) { - match objects.entry(self.name.clone()) { - hash_map::Entry::Vacant(ent) => { - ent.insert(Arc::new(data)); - return Poll::Ready(Ok(())); - } - hash_map::Entry::Occupied(ent) => { - let err = IoError::new( - ErrorKind::AlreadyExists, - format!("object '{}'", ent.key()), - ); - return Poll::Ready(Err(err)); - } - } - } - } - Poll::Pending - } - - fn poll_write_vectored( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - bufs: &[IoSlice<'_>], - ) -> Poll> { - Pin::new(&mut self.data).poll_write_vectored(cx, bufs) - } - - fn is_write_vectored(&self) -> bool { - self.data.is_write_vectored() - } -} diff --git a/src/storage/src/mem/mod.rs b/src/storage/src/mem/mod.rs deleted file mode 100644 index e999c330..00000000 --- a/src/storage/src/mem/mod.rs +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2021 The Engula Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! A [`Storage`] implementation that stores data in memory. -//! -//! [`Storage`]: crate::Storage - -mod bucket; -mod storage; - -pub use self::{bucket::Bucket, storage::Storage}; - -#[cfg(test)] -mod tests { - use tokio::io::{AsyncReadExt, AsyncWriteExt}; - - use crate::*; - - #[tokio::test] - async fn test() -> Result<()> { - let s = super::Storage::default(); - let bucket = s.create_bucket("a").await?; - - let name = "abc"; - let data = vec![0, 1, 2]; - let mut writer = bucket.new_sequential_writer(name).await?; - writer.write_all(&data).await?; - writer.shutdown().await?; - - let mut reader = bucket.new_sequential_reader(name).await?; - let mut got = Vec::new(); - reader.read_to_end(&mut got).await?; - assert_eq!(got, data); - - Ok(()) - } -} diff --git a/src/storage/src/mem/storage.rs b/src/storage/src/mem/storage.rs deleted file mode 100644 index 25ab688f..00000000 --- a/src/storage/src/mem/storage.rs +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2021 The Engula Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use std::{ - collections::{hash_map, HashMap}, - sync::Arc, -}; - -use tokio::sync::Mutex; - -use super::bucket::Bucket; -use crate::{async_trait, Error, Result}; - -#[derive(Clone)] -pub struct Storage { - buckets: Arc>>, -} - -impl Default for Storage { - fn default() -> Self { - Self { - buckets: Arc::new(Mutex::new(HashMap::new())), - } - } -} - -#[async_trait] -impl crate::Storage for Storage { - type Bucket = Bucket; - - async fn bucket(&self, name: &str) -> Result { - let buckets = self.buckets.lock().await; - match buckets.get(name) { - Some(bucket) => Ok(bucket.clone()), - None => Err(Error::NotFound(format!("bucket '{}'", name))), - } - } - - async fn create_bucket(&self, name: &str) -> Result { - let bucket = Bucket::default(); - let mut buckets = self.buckets.lock().await; - match buckets.entry(name.to_owned()) { - hash_map::Entry::Vacant(ent) => { - ent.insert(bucket.clone()); - Ok(bucket) - } - hash_map::Entry::Occupied(ent) => { - Err(Error::AlreadyExists(format!("bucket '{}'", ent.key()))) - } - } - } - - async fn delete_bucket(&self, name: &str) -> Result<()> { - let mut buckets = self.buckets.lock().await; - match buckets.remove(name) { - Some(_) => Ok(()), - None => Err(Error::NotFound(format!("bucket '{}'", name))), - } - } -} diff --git a/src/storage/src/storage.rs b/src/storage/src/storage.rs deleted file mode 100644 index e0ac89b4..00000000 --- a/src/storage/src/storage.rs +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2021 The Engula Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use crate::{async_trait, Bucket, Result}; - -/// An interface to manipulate a storage. -#[async_trait] -pub trait Storage: Clone + Send + Sync + 'static { - type Bucket: Bucket; - - /// Returns a bucket. - async fn bucket(&self, name: &str) -> Result; - - /// Creates a bucket. - /// - /// # Errors - /// - /// Returns `Error::AlreadyExists` if the bucket already exists. - async fn create_bucket(&self, name: &str) -> Result; - - /// Deletes a bucket. - /// - /// Using a deleted bucket is an undefined behavior. - /// - /// # Errors - /// - /// Returns `Error::NotFound` if the bucket doesn't exist. - async fn delete_bucket(&self, name: &str) -> Result<()>; -} diff --git a/src/stream-engine/client/Cargo.toml b/src/stream-engine/client/Cargo.toml new file mode 100644 index 00000000..097231a8 --- /dev/null +++ b/src/stream-engine/client/Cargo.toml @@ -0,0 +1,21 @@ +[package] +name = "stream-engine-client" +version = "0.1.0" +edition = "2021" +publish = false + +[dependencies] +stream-engine-common = { version = "0.1", path = "../common" } +stream-engine-proto = { version = "0.1", path = "../proto" } + +futures = "0.3" +prost = "0.9" +thiserror = "1.0" +tokio = { version = "1.15", features = ["full"] } +tonic = "0.6" + +[dev-dependencies] +stream-engine-master = { version = "0.1", path = "../master" } +stream-engine-store = { version = "0.1", path = "../store" } + +anyhow = "1.0" diff --git a/src/stream-engine/client/examples/engine.rs b/src/stream-engine/client/examples/engine.rs new file mode 100644 index 00000000..7ae68eba --- /dev/null +++ b/src/stream-engine/client/examples/engine.rs @@ -0,0 +1,35 @@ +// Copyright 2022 The Engula Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use futures::StreamExt; +use stream_engine_client::{Engine, Result}; + +#[tokio::main] +async fn main() -> Result<()> { + let url = "http://localhost:21716"; + let engine = Engine::connect(url).await?; + let tenant = engine.create_tenant("tenant").await?; + println!("created {:?}", tenant.desc().await?); + let stream = tenant.create_stream("stream").await?; + println!("created {:?}", stream.desc()); + let mut state_stream = stream.subscribe_state().await?; + println!("current state {:?}", state_stream.next().await); + let seq = stream.append(Box::new([0u8])).await?; + let mut reader = stream.new_reader().await?; + reader.seek(seq).await?; + let event = reader.wait_next().await?; + println!("append and read event {:?}", event); + println!("try read {:?}", reader.try_next().await?); + Ok(()) +} diff --git a/src/kernel/src/local/mod.rs b/src/stream-engine/client/src/core/mod.rs similarity index 81% rename from src/kernel/src/local/mod.rs rename to src/stream-engine/client/src/core/mod.rs index 99dfa22e..61a87240 100644 --- a/src/kernel/src/local/mod.rs +++ b/src/stream-engine/client/src/core/mod.rs @@ -1,4 +1,4 @@ -// Copyright 2021 The Engula Authors. +// Copyright 2022 The Engula Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,7 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -mod kernel; +mod progress; -pub use self::kernel::Kernel; -pub(crate) use self::kernel::DEFAULT_NAME; +pub(crate) use progress::Progress; diff --git a/src/kernel/src/manifest.rs b/src/stream-engine/client/src/core/progress.rs similarity index 64% rename from src/kernel/src/manifest.rs rename to src/stream-engine/client/src/core/progress.rs index 45ff20da..360064fa 100644 --- a/src/kernel/src/manifest.rs +++ b/src/stream-engine/client/src/core/progress.rs @@ -1,4 +1,4 @@ -// Copyright 2021 The Engula Authors. +// Copyright 2022 The Engula Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,11 +12,15 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::{async_trait, Result, Version}; - -#[async_trait] -pub trait Manifest: Clone + Send + Sync + 'static { - async fn load_version(&self) -> Result; +/// An abstraction for describing the state of a segment store, that receives +/// and persists entries. +pub(crate) struct Progress { + matched_index: u32, +} - async fn save_version(&self, version: &Version) -> Result<()>; +impl Progress { + #[inline(always)] + pub fn matched_index(&self) -> u32 { + self.matched_index + } } diff --git a/src/stream-engine/client/src/engine.rs b/src/stream-engine/client/src/engine.rs new file mode 100644 index 00000000..7fe5e9bf --- /dev/null +++ b/src/stream-engine/client/src/engine.rs @@ -0,0 +1,46 @@ +// Copyright 2022 The Engula Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::{ + master::{Master, Tenant}, + Result, +}; + +#[derive(Clone)] +pub struct Engine { + master: Master, +} + +impl Engine { + pub async fn connect(url: impl Into) -> Result { + Ok(Engine { + master: Master::new(url).await?, + }) + } + + #[inline(always)] + pub fn tenant(&self, name: &str) -> Tenant { + self.master.tenant(name) + } + + #[inline(always)] + pub async fn create_tenant(&self, name: &str) -> Result { + self.master.create_tenant(name).await + } + + #[inline(always)] + pub async fn delete_tenant(&self, name: &str) -> Result<()> { + self.master.delete_tenant(name).await + } +} diff --git a/src/storage/src/grpc/proto.rs b/src/stream-engine/client/src/error.rs similarity index 86% rename from src/storage/src/grpc/proto.rs rename to src/stream-engine/client/src/error.rs index 16cbf11b..67be9e19 100644 --- a/src/storage/src/grpc/proto.rs +++ b/src/stream-engine/client/src/error.rs @@ -1,4 +1,4 @@ -// Copyright 2021 The Engula Authors. +// Copyright 2022 The Engula Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,5 +11,3 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. - -tonic::include_proto!("engula.storage.v1"); diff --git a/src/lib.rs b/src/stream-engine/client/src/lib.rs similarity index 65% rename from src/lib.rs rename to src/stream-engine/client/src/lib.rs index c35b60d6..36cfdd94 100644 --- a/src/lib.rs +++ b/src/stream-engine/client/src/lib.rs @@ -1,4 +1,4 @@ -// Copyright 2021 The Engula Authors. +// Copyright 2022 The Engula Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,20 +12,20 @@ // See the License for the specific language governing permissions and // limitations under the License. -pub mod engine { - pub mod hash { - pub use hash_engine::*; - } -} +mod core; +mod engine; +mod master; +mod policy; +mod store; +mod stream; -pub mod kernel { - pub use engula_kernel::*; -} +pub use stream_engine_common::{ + error::{Error, Result}, + Entry, Sequence, +}; -pub mod journal { - pub use engula_journal::*; -} - -pub mod storage { - pub use engula_storage::*; -} +pub use self::{ + engine::Engine, + master::Tenant, + stream::{EpochState, Role, Stream}, +}; diff --git a/src/stream-engine/client/src/master/client.rs b/src/stream-engine/client/src/master/client.rs new file mode 100644 index 00000000..2a58aa92 --- /dev/null +++ b/src/stream-engine/client/src/master/client.rs @@ -0,0 +1,110 @@ +// Copyright 2022 The Engula Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use stream_engine_proto::*; +use tonic::transport::Channel; + +use crate::{Error, Result}; + +#[derive(Clone)] +pub struct MasterClient { + client: master_client::MasterClient, +} + +impl MasterClient { + pub fn new(chan: Channel) -> Self { + let client = master_client::MasterClient::new(chan); + Self { client } + } +} + +impl MasterClient { + pub async fn tenant(&self, req: TenantRequest) -> Result { + let mut client = self.client.clone(); + let resp = client.tenant(req).await?; + Ok(resp.into_inner()) + } + + pub async fn tenant_union( + &self, + req: tenant_request_union::Request, + ) -> Result { + let req = TenantRequest { + requests: vec![TenantRequestUnion { request: Some(req) }], + }; + let mut res = self.tenant(req).await?; + res.responses + .pop() + .and_then(|x| x.response) + .ok_or(Error::InvalidResponse) + } +} + +impl MasterClient { + pub async fn stream(&self, req: StreamRequest) -> Result { + let mut client = self.client.clone(); + let resp = client.stream(req).await?; + Ok(resp.into_inner()) + } + + pub async fn stream_union( + &self, + tenant: String, + req: stream_request_union::Request, + ) -> Result { + let req = StreamRequest { + tenant, + requests: vec![StreamRequestUnion { request: Some(req) }], + }; + let mut res = self.stream(req).await?; + res.responses + .pop() + .and_then(|x| x.response) + .ok_or(Error::InvalidResponse) + } +} + +impl MasterClient { + pub async fn segment(&self, req: SegmentRequest) -> Result { + let mut client = self.client.clone(); + let resp = client.segment(req).await?; + Ok(resp.into_inner()) + } + + pub async fn segment_union( + &self, + tenant: String, + stream_id: u64, + req: segment_request_union::Request, + ) -> Result { + let req = SegmentRequest { + tenant, + stream_id, + requests: vec![SegmentRequestUnion { request: Some(req) }], + }; + let mut res = self.segment(req).await?; + res.responses + .pop() + .and_then(|x| x.response) + .ok_or(Error::InvalidResponse) + } +} + +impl MasterClient { + pub async fn heartbeat(&self, req: HeartbeatRequest) -> Result { + let mut client = self.client.clone(); + let resp = client.heartbeat(req).await?; + Ok(resp.into_inner()) + } +} diff --git a/src/stream-engine/client/src/master/mod.rs b/src/stream-engine/client/src/master/mod.rs new file mode 100644 index 00000000..27a275f8 --- /dev/null +++ b/src/stream-engine/client/src/master/mod.rs @@ -0,0 +1,62 @@ +// Copyright 2022 The Engula Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +mod client; +mod stream; +mod tenant; + +use stream_engine_proto::*; +use tonic::transport::Endpoint; + +use self::client::MasterClient; +pub use self::{stream::Stream, tenant::Tenant}; +use crate::Result; + +#[derive(Clone)] +pub struct Master { + master_client: MasterClient, +} + +impl Master { + pub async fn new(url: impl Into) -> Result { + let chan = Endpoint::new(url.into())?.connect().await?; + Ok(Master { + master_client: MasterClient::new(chan), + }) + } + + pub fn tenant(&self, name: &str) -> Tenant { + Tenant::new(name.to_owned(), self.master_client.clone()) + } + + pub async fn create_tenant(&self, name: &str) -> Result { + let desc = TenantDesc { + name: name.to_owned(), + ..Default::default() + }; + let req = CreateTenantRequest { desc: Some(desc) }; + let req = tenant_request_union::Request::CreateTenant(req); + self.master_client.tenant_union(req).await?; + Ok(self.tenant(name)) + } + + pub async fn delete_tenant(&self, name: &str) -> Result<()> { + let req = DeleteTenantRequest { + name: name.to_owned(), + }; + let req = tenant_request_union::Request::DeleteTenant(req); + self.master_client.tenant_union(req).await?; + Ok(()) + } +} diff --git a/src/stream-engine/client/src/master/stream.rs b/src/stream-engine/client/src/master/stream.rs new file mode 100644 index 00000000..e56555f5 --- /dev/null +++ b/src/stream-engine/client/src/master/stream.rs @@ -0,0 +1,313 @@ +// Copyright 2022 The Engula Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use stream_engine_proto::*; + +use super::MasterClient; +use crate::{Error, Result, Sequence}; + +#[derive(Debug, Clone)] +pub struct ObserverMeta { + pub observer_id: String, + pub state: ObserverState, + + /// The value of epoch in observer's memory. + pub writer_epoch: u32, + + /// The acked sequence of entries it has already known. It might less than + /// (epoch << 32). + pub acked_seq: Sequence, +} + +#[derive(Clone)] +pub struct Stream { + tenant: String, + stream_desc: StreamDesc, + master_client: MasterClient, +} + +#[allow(unused)] +impl Stream { + pub(super) fn new( + tenant: String, + stream_desc: StreamDesc, + master_client: MasterClient, + ) -> Self { + Stream { + tenant, + stream_desc, + master_client, + } + } + + #[inline(always)] + pub fn desc(&self) -> StreamDesc { + self.stream_desc.clone() + } + + /// Sends the state of a stream observer to master, and receives commands. + pub async fn heartbeat(&self, observer_meta: ObserverMeta) -> Result> { + let role: Role = observer_meta.state.into(); + let req = HeartbeatRequest { + tenant: self.tenant.clone(), + writer_epoch: observer_meta.writer_epoch, + observer_id: observer_meta.observer_id, + stream_id: self.stream_desc.id, + role: role as i32, + observer_state: observer_meta.state as i32, + acked_seq: observer_meta.acked_seq.into(), + }; + + let resp = self.master_client.heartbeat(req).await?; + Ok(resp.commands.into_iter().map(Into::into).collect()) + } + + /// Get segment desc of the specified epoch of a stream. + pub async fn get_segment(&self, epoch: u32) -> Result> { + self.get_segments(vec![epoch]) + .await? + .first() + .cloned() + .ok_or(Error::InvalidResponse) + } + + pub async fn get_segments(&self, epochs: Vec) -> Result>> { + type Request = segment_request_union::Request; + type Response = segment_response_union::Response; + + let sub_requests = epochs + .into_iter() + .map(|epoch| SegmentRequestUnion { + request: Some(Request::GetSegment(GetSegmentRequest { + segment_epoch: epoch, + })), + }) + .collect(); + + let req = SegmentRequest { + tenant: self.tenant.clone(), + stream_id: self.stream_desc.id, + requests: sub_requests, + }; + let resp = self.master_client.segment(req).await?; + resp.responses + .into_iter() + .map(|resp| match resp.response { + Some(Response::GetSegment(resp)) => Ok(resp.desc), + _ => Err(Error::InvalidResponse), + }) + .collect() + } + + /// Mark the corresponding segment as sealed. The request is ignored if the + /// target segment is already sealed. + pub async fn seal_segment(&self, epoch: u32) -> Result<()> { + let req = segment_request_union::Request::SealSegment(SealSegmentRequest { + segment_epoch: epoch, + }); + self.master_client + .segment_union(self.tenant.clone(), self.stream_desc.id, req) + .await?; + Ok(()) + } +} + +#[cfg(test)] +pub(crate) mod tests { + use stream_engine_master::build_master; + + use super::*; + use crate::{master::Master, Sequence}; + + #[tokio::test(flavor = "multi_thread")] + async fn heartbeat() -> Result<()> { + let replicas = vec!["a", "b", "c"]; + let local_addr = build_master(&replicas).await?; + let master = Master::new(&local_addr.to_string()).await?; + let tenant = master.create_tenant("tenant").await?; + let stream = tenant.create_stream_client("stream").await?; + + let observer_meta = ObserverMeta { + observer_id: "1".to_owned(), + writer_epoch: 1, + state: ObserverState::Leading, + acked_seq: Sequence::new(1, 0), + }; + stream.heartbeat(observer_meta).await?; + + Ok(()) + } + + async fn default_heartbeat(stream: &crate::master::stream::Stream) -> Result<()> { + let observer_meta = ObserverMeta { + observer_id: "1".to_owned(), + writer_epoch: 0, + state: ObserverState::Following, + acked_seq: Sequence::new(0, 0), + }; + stream.heartbeat(observer_meta).await?; + Ok(()) + } + + #[tokio::test(flavor = "multi_thread")] + async fn get_segment() -> Result<()> { + let replicas = vec!["a", "b", "c"]; + let local_addr = build_master(&replicas).await?; + let master = Master::new(&local_addr.to_string()).await?; + let tenant = master.create_tenant("tenant").await?; + let stream = tenant.create_stream_client("stream").await?; + default_heartbeat(&stream).await?; + + let resp = stream.get_segment(1).await?; + assert!( + matches!(resp, Some(segment_desc) if segment_desc == SegmentDesc { + stream_id: 1, + epoch: 1, + copy_set: replicas.iter().map(ToString::to_string).collect(), + state: SegmentState::Appending as i32, + }) + ); + + let resp = stream.get_segment(2).await?; + assert!(matches!(resp, None)); + + Ok(()) + } + + fn expect_promote_command(cmd: &Command, target_role: Role, target_epoch: u32) -> bool { + matches!(cmd, Command { command_type, role, epoch, .. } + if *command_type == CommandType::Promote as i32 && + *role == target_role as i32 && + *epoch == target_epoch) + } + + #[tokio::test(flavor = "multi_thread")] + async fn heartbeat_with_threshold_switching() -> Result<()> { + let replicas = vec!["a", "b", "c"]; + let local_addr = build_master(&replicas).await?; + let master = Master::new(&local_addr.to_string()).await?; + let tenant = master.create_tenant("tenant").await?; + let stream = tenant.create_stream_client("stream").await?; + + let observer_meta = ObserverMeta { + observer_id: "1".to_owned(), + writer_epoch: 0, + state: ObserverState::Leading, + acked_seq: u64::MAX.into(), + }; + + let commands = stream.heartbeat(observer_meta).await?; + assert_eq!(commands.len(), 1); + assert!(expect_promote_command(&commands[0], Role::Leader, 1)); + Ok(()) + } + + #[tokio::test(flavor = "multi_thread")] + async fn heartbeat_promote_leader_or_followers() -> Result<()> { + let replicas = vec!["a", "b", "c"]; + let local_addr = build_master(&replicas).await?; + let master = Master::new(&local_addr.to_string()).await?; + let tenant = master.create_tenant("tenant").await?; + let stream = tenant.create_stream_client("stream").await?; + + let observer_meta = ObserverMeta { + observer_id: "1".to_owned(), + writer_epoch: 0, + state: ObserverState::Following, + acked_seq: Sequence::new(1, 0), + }; + let commands = stream.heartbeat(observer_meta).await?; + println!("commands {:?}", commands); + let promote = commands + .iter() + .find(|cmd| expect_promote_command(cmd, Role::Leader, 1)); + assert!(promote.is_some()); + + // Now a follower send heartbeat request and receive promote request. + let observer_meta = ObserverMeta { + observer_id: "2".to_owned(), + writer_epoch: 0, + state: ObserverState::Following, + acked_seq: Sequence::new(1, 0), + }; + let commands = stream.heartbeat(observer_meta).await?; + let promote = commands + .iter() + .find(|cmd| expect_promote_command(cmd, Role::Follower, 1)); + assert!(promote.is_some()); + Ok(()) + } + + #[tokio::test(flavor = "multi_thread")] + async fn seal_segment() -> Result<()> { + let master_addr = build_master(&[]).await?; + let master = Master::new(&master_addr).await?; + let tenant = master.create_tenant("tenant").await?; + let stream = tenant.create_stream_client("stream").await?; + default_heartbeat(&stream).await?; + let meta = stream.get_segment(1).await?.unwrap(); + assert_eq!(meta.state, SegmentState::Appending as i32); + + stream.seal_segment(meta.epoch).await?; + let meta = stream.get_segment(1).await?.unwrap(); + assert_eq!(meta.state, SegmentState::Sealed as i32); + + Ok(()) + } + + /// If observer lost the heartbeat response, it should receive and continue + /// the previous promote request. + #[tokio::test(flavor = "multi_thread")] + async fn heartbeat_idempotent() -> std::result::Result<(), Box> { + let replicas = vec!["a", "b", "c"]; + let local_addr = build_master(&replicas).await?; + let master = Master::new(&local_addr.to_string()).await?; + let tenant = master.create_tenant("tenant").await?; + let stream = tenant.create_stream_client("stream").await?; + let observer_meta = ObserverMeta { + observer_id: "1".to_owned(), + writer_epoch: 0, + state: ObserverState::Following, + acked_seq: Sequence::new(1, 0), + }; + let commands = stream.heartbeat(observer_meta).await?; + let prev_promote = commands + .iter() + .find(|cmd| expect_promote_command(cmd, Role::Leader, 1)) + .unwrap(); + let observer_meta = ObserverMeta { + observer_id: "1".to_owned(), + writer_epoch: 0, + state: ObserverState::Following, + acked_seq: Sequence::new(1, 0), + }; + let commands = stream.heartbeat(observer_meta).await?; + let new_promote = commands + .iter() + .find(|cmd| expect_promote_command(cmd, Role::Leader, 1)) + .unwrap(); + match (prev_promote, new_promote) { + ( + Command { + epoch: prev_epoch, .. + }, + Command { + epoch: new_epoch, .. + }, + ) if prev_epoch == new_epoch => {} + _ => panic!("shouldn't happen"), + } + Ok(()) + } +} diff --git a/src/stream-engine/client/src/master/tenant.rs b/src/stream-engine/client/src/master/tenant.rs new file mode 100644 index 00000000..5b150aaf --- /dev/null +++ b/src/stream-engine/client/src/master/tenant.rs @@ -0,0 +1,166 @@ +// Copyright 2022 The Engula Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::sync::Arc; + +use stream_engine_proto::*; + +use super::{client::MasterClient, stream::Stream as MasterStreamClient}; +use crate::{Error, Result, Stream}; + +#[derive(Clone)] +pub struct Tenant { + inner: Arc, +} + +impl Tenant { + pub fn new(name: String, master_client: MasterClient) -> Self { + let inner = TenantInner { + name, + master_client, + }; + Self { + inner: Arc::new(inner), + } + } + + pub async fn desc(&self) -> Result { + let req = DescribeTenantRequest { + name: self.inner.name.clone(), + }; + let req = tenant_request_union::Request::DescribeTenant(req); + let res = self.inner.tenant_union_call(req).await?; + let desc = if let tenant_response_union::Response::DescribeTenant(res) = res { + res.desc + } else { + None + }; + desc.ok_or(Error::InvalidResponse) + } + + pub async fn stream(&self, name: &str) -> Result { + let req = DescribeStreamRequest { + name: name.to_owned(), + }; + let req = stream_request_union::Request::DescribeStream(req); + let res = self.inner.stream_union_call(req).await?; + let desc = if let stream_response_union::Response::DescribeStream(res) = res { + res.desc + } else { + None + }; + let stream_desc = desc.ok_or(Error::InvalidResponse)?; + let client = self.inner.new_stream_client(stream_desc); + Ok(Stream::new(client)) + } + + pub async fn create_stream(&self, name: &str) -> Result { + Ok(Stream::new(self.create_stream_client(name).await?)) + } + + pub async fn delete_stream(&self, name: &str) -> Result<()> { + let req = DeleteStreamRequest { + name: name.to_owned(), + }; + let req = stream_request_union::Request::DeleteStream(req); + self.inner.stream_union_call(req).await?; + Ok(()) + } + + pub(super) async fn create_stream_client(&self, name: &str) -> Result { + let desc = StreamDesc { + name: name.to_owned(), + ..Default::default() + }; + let req = CreateStreamRequest { desc: Some(desc) }; + let req = stream_request_union::Request::CreateStream(req); + let res = self.inner.stream_union_call(req).await?; + let desc = if let stream_response_union::Response::CreateStream(res) = res { + res.desc + } else { + None + }; + let stream_desc = desc.ok_or(Error::InvalidResponse)?; + Ok(self.inner.new_stream_client(stream_desc)) + } +} + +struct TenantInner { + name: String, + master_client: MasterClient, +} + +impl TenantInner { + fn new_stream_client(&self, stream_desc: StreamDesc) -> MasterStreamClient { + MasterStreamClient::new(self.name.clone(), stream_desc, self.master_client.clone()) + } + + async fn tenant_union_call( + &self, + req: tenant_request_union::Request, + ) -> Result { + self.master_client.tenant_union(req).await + } + + async fn stream_union_call( + &self, + req: stream_request_union::Request, + ) -> Result { + self.master_client + .stream_union(self.name.clone(), req) + .await + } +} + +#[cfg(test)] +mod tests { + use stream_engine_master::build_master; + + use crate::{master::Master, Error, Result}; + + #[tokio::test(flavor = "multi_thread")] + async fn create_tenant() -> Result<()> { + let master_addr = build_master(&[]).await?; + + let master = Master::new(&master_addr).await?; + master.create_tenant("tenant").await?; + match master.create_tenant("tenant").await { + Err(Error::AlreadyExists(_)) => {} + _ => panic!("create same tenant must fail"), + } + Ok(()) + } + + #[tokio::test(flavor = "multi_thread")] + async fn create_stream() -> Result<()> { + let master_addr = build_master(&[]).await?; + + let master = Master::new(&master_addr).await?; + master.create_tenant("tenant").await?; + + let tenant = master.tenant("tenant"); + match tenant.stream("stream").await { + Err(Error::NotFound(_)) => {} + _ => panic!("no such stream exists"), + } + + tenant.create_stream_client("stream").await?; + match tenant.create_stream_client("stream").await { + Err(Error::AlreadyExists(_)) => {} + _ => panic!("create same stream must fail"), + } + + Ok(()) + } +} diff --git a/src/stream-engine/client/src/policy/group.rs b/src/stream-engine/client/src/policy/group.rs new file mode 100644 index 00000000..20f6b3d2 --- /dev/null +++ b/src/stream-engine/client/src/policy/group.rs @@ -0,0 +1,354 @@ +// Copyright 2022 The Engula Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::Policy; +use crate::Entry; + +#[derive(Debug, Clone)] +pub(crate) enum ReaderState { + Polling, + Ready { index: u32, entry: Entry }, + Done, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] +pub(crate) enum GroupPolicy { + /// A simple strategy that accept entries as long as one copy holds the + /// dataset. + /// + /// This strategy is mainly used for testing. + Simple, + + /// A strategy that accept a majority set of entries are ready. + /// + /// This strategy is mainly used for testing. + #[allow(unused)] + Majority, +} + +impl From for GroupPolicy { + fn from(policy: Policy) -> Self { + match policy { + Policy::Simple => GroupPolicy::Simple, + } + } +} + +#[allow(unused)] +#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone, Copy)] +pub(crate) enum GroupState { + Pending, + Active, + Done, +} + +#[allow(unused)] +#[derive(Debug)] +pub(crate) struct GroupReader { + num_ready: usize, + num_done: usize, + num_copies: usize, + next_index: u32, + policy: GroupPolicy, +} + +#[allow(dead_code)] +impl GroupReader { + pub(super) fn new(policy: GroupPolicy, next_index: u32, num_copies: usize) -> Self { + GroupReader { + num_ready: 0, + num_done: 0, + num_copies, + next_index, + policy, + } + } + + fn majority(&self) -> usize { + (self.num_copies / 2) + 1 + } + + pub(super) fn state(&self) -> GroupState { + let majority = self.majority(); + match self.policy { + GroupPolicy::Simple if self.num_ready >= 1 => GroupState::Active, + GroupPolicy::Majority if self.num_ready >= majority => GroupState::Active, + GroupPolicy::Majority if self.num_done >= majority => GroupState::Done, + GroupPolicy::Majority if self.num_ready + self.num_done >= majority => { + GroupState::Active + } + _ if self.num_done == self.num_copies => GroupState::Done, + _ => GroupState::Pending, + } + } + + #[inline(always)] + pub(super) fn next_index(&self) -> u32 { + self.next_index + } + + pub(super) fn transform( + &mut self, + reader_state: &mut ReaderState, + input: Option<(u32, Entry)>, + ) { + match input { + Some((index, entry)) if index >= self.next_index => { + self.num_ready += 1; + *reader_state = ReaderState::Ready { index, entry }; + } + Some(_) => { + // Ignore staled entries. + } + None => { + self.num_done += 1; + *reader_state = ReaderState::Done; + } + } + } + + /// Read next entry of group state, panic if this isn't active + pub(super) fn next_entry<'a, I>(&mut self, i: I) -> Option + where + I: IntoIterator, + { + // Found matched index + let mut fresh_entry: Option = None; + for state in i.into_iter() { + if let ReaderState::Ready { index, entry } = state { + if *index == self.next_index { + self.num_ready -= 1; + + if !fresh_entry + .as_ref() + .map(|e| e.epoch() >= entry.epoch()) + .unwrap_or_default() + { + fresh_entry = Some(std::mem::replace(entry, Entry::Hole)); + } + *state = ReaderState::Polling; + } + } + } + + // skip to next + self.next_index += 1; + fresh_entry + } +} + +#[cfg(test)] +mod tests { + use super::*; + + /// Group reader must reject staled (index less than next_index) + /// transforming request. + #[test] + fn group_reader_ignore_staled_request() { + let mut reader = GroupReader::new(GroupPolicy::Simple, 123, 3); + let mut state = ReaderState::Polling; + + reader.transform(&mut state, Some((122, Entry::Hole))); + assert!(matches!(state, ReaderState::Polling)); + assert_eq!(reader.num_ready, 0); + assert_eq!(reader.num_done, 0); + } + + fn ee(index: u32, epoch: u32) -> Entry { + let event: Vec = index.to_le_bytes().as_slice().into(); + Entry::Event { + epoch, + event: event.into(), + } + } + + #[test] + fn group_reader_next_entry_basic() { + struct TestCase { + desc: &'static str, + states: Vec, + expects: Vec>, + } + + let cases = vec![ + TestCase { + desc: "1. return largest entry", + states: vec![ + ReaderState::Ready { + index: 1, + entry: ee(2, 2), + }, + ReaderState::Ready { + index: 1, + entry: ee(2, 1), + }, + ReaderState::Ready { + index: 1, + entry: ee(2, 3), + }, + ], + expects: vec![Some(ee(2, 3))], + }, + TestCase { + desc: "2. allow pending state", + states: vec![ + ReaderState::Polling, + ReaderState::Ready { + index: 1, + entry: ee(2, 2), + }, + ReaderState::Ready { + index: 1, + entry: ee(2, 1), + }, + ], + expects: vec![Some(ee(2, 2))], + }, + TestCase { + desc: "3. returns hole if no such index entry exists", + states: vec![ + ReaderState::Ready { + index: 2, + entry: ee(2, 2), + }, + ReaderState::Ready { + index: 4, + entry: ee(4, 2), + }, + ReaderState::Ready { + index: 6, + entry: ee(6, 8), + }, + ], + expects: vec![ + None, + Some(ee(2, 2)), + None, + Some(ee(4, 2)), + None, + Some(ee(6, 8)), + ], + }, + ]; + + for mut case in cases { + let mut reader = GroupReader::new(GroupPolicy::Simple, 1, 3); + reader.num_ready = 3; + for expect in case.expects { + let entry = reader.next_entry(case.states.iter_mut()); + match expect { + Some(e) => { + assert!(entry.is_some(), "case: {}", case.desc); + assert_eq!(entry.unwrap(), e, "case: {}", case.desc); + } + None => { + assert!(entry.is_none(), "case: {}", case.desc); + } + } + } + } + } + + #[test] + fn group_reader_state() { + #[derive(Debug)] + struct TestCase { + num_copies: usize, + num_ready: usize, + num_done: usize, + group_policy: GroupPolicy, + expect_state: GroupState, + } + let cases = vec![ + // 1. simple policy pending + TestCase { + num_copies: 1, + num_ready: 0, + num_done: 0, + group_policy: GroupPolicy::Simple, + expect_state: GroupState::Pending, + }, + // 2. simple policy active + TestCase { + num_copies: 1, + num_ready: 1, + num_done: 0, + group_policy: GroupPolicy::Simple, + expect_state: GroupState::Active, + }, + // 3. simple policy done + TestCase { + num_copies: 1, + num_ready: 0, + num_done: 1, + group_policy: GroupPolicy::Simple, + expect_state: GroupState::Done, + }, + // 4. simple policy active but some copies already done. + TestCase { + num_copies: 2, + num_ready: 1, + num_done: 1, + group_policy: GroupPolicy::Simple, + expect_state: GroupState::Active, + }, + // 5. majority policy pending + TestCase { + num_copies: 3, + num_ready: 1, + num_done: 0, + group_policy: GroupPolicy::Majority, + expect_state: GroupState::Pending, + }, + // 6. majority policy active + TestCase { + num_copies: 3, + num_ready: 2, + num_done: 0, + group_policy: GroupPolicy::Majority, + expect_state: GroupState::Active, + }, + // 7. majority policy active but partial done + TestCase { + num_copies: 3, + num_ready: 1, + num_done: 1, + group_policy: GroupPolicy::Majority, + expect_state: GroupState::Active, + }, + // 8. majority policy done + TestCase { + num_copies: 3, + num_ready: 0, + num_done: 2, + group_policy: GroupPolicy::Majority, + expect_state: GroupState::Done, + }, + // 9. majority policy active although majority done, this is expected for recovering. + TestCase { + num_copies: 3, + num_ready: 1, + num_done: 2, + group_policy: GroupPolicy::Majority, + expect_state: GroupState::Done, + }, + ]; + for case in cases { + let mut reader = GroupReader::new(case.group_policy, 1, case.num_copies); + reader.num_ready = case.num_ready; + reader.num_done = case.num_done; + assert_eq!(reader.state(), case.expect_state, "{:?}", case); + } + } +} diff --git a/src/stream-engine/client/src/policy/mod.rs b/src/stream-engine/client/src/policy/mod.rs new file mode 100644 index 00000000..d22368ee --- /dev/null +++ b/src/stream-engine/client/src/policy/mod.rs @@ -0,0 +1,62 @@ +// Copyright 2022 The Engula Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +mod group; +mod simple; + +use std::collections::HashMap; + +pub(crate) use group::GroupReader; + +use crate::{core::Progress, Sequence}; + +#[allow(unused)] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub(crate) enum Policy { + /// A simple strategy that allows ack entries as long as one copy holds the + /// dataset. + /// + /// This strategy is mainly used for testing. + Simple, +} + +#[allow(dead_code)] +impl Policy { + /// Calculate the `acked_sequence` based on the matched indexes. + pub(super) fn advance_acked_sequence( + self, + epoch: u32, + progresses: &HashMap, + ) -> Sequence { + match self { + Policy::Simple => simple::advance_acked_sequence(epoch, progresses), + } + } + + /// Return the actual acked index, `None` if the indexes aren't enough. + #[inline(always)] + pub(super) fn actual_acked_index( + self, + num_copies: usize, + acked_indexes: &[u32], + ) -> Option { + match self { + Policy::Simple => simple::actual_acked_index(num_copies, acked_indexes), + } + } + + pub(super) fn new_group_reader(self, next_index: u32, num_copies: usize) -> GroupReader { + GroupReader::new(self.into(), next_index, num_copies) + } +} diff --git a/src/stream-engine/client/src/policy/simple.rs b/src/stream-engine/client/src/policy/simple.rs new file mode 100644 index 00000000..5eae5c41 --- /dev/null +++ b/src/stream-engine/client/src/policy/simple.rs @@ -0,0 +1,41 @@ +// Copyright 2022 The Engula Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::collections::HashMap; + +use crate::{core::Progress, Sequence}; + +#[inline(always)] +pub(super) fn advance_acked_sequence( + epoch: u32, + progresses: &HashMap, +) -> Sequence { + progresses + .iter() + .filter_map(|(_, p)| { + let matched_index = p.matched_index(); + if matched_index == 0 { + None + } else { + Some(Sequence::new(epoch, matched_index)) + } + }) + .max() + .unwrap_or_default() +} + +#[inline(always)] +pub(super) fn actual_acked_index(_num_copies: usize, acked_indexes: &[u32]) -> Option { + acked_indexes.iter().max().cloned() +} diff --git a/src/stream-engine/client/src/store/client.rs b/src/stream-engine/client/src/store/client.rs new file mode 100644 index 00000000..a7dfbcf5 --- /dev/null +++ b/src/stream-engine/client/src/store/client.rs @@ -0,0 +1,479 @@ +// Copyright 2022 The Engula Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use stream_engine_proto::*; +use tonic::{transport::Channel, Streaming}; + +use crate::{Error, Result}; + +#[derive(Clone)] +pub struct StoreClient { + client: store_client::StoreClient, +} + +impl StoreClient { + pub fn new(channel: Channel) -> Self { + StoreClient { + client: store_client::StoreClient::new(channel), + } + } +} + +impl StoreClient { + pub async fn mutate(&self, input: MutateRequest) -> Result { + let mut client = self.client.clone(); + let resp = client.mutate(input).await?; + Ok(resp.into_inner()) + } + + pub async fn write( + &self, + stream_id: u64, + writer_epoch: u32, + input: WriteRequest, + ) -> Result { + type Request = mutate_request_union::Request; + type Response = mutate_response_union::Response; + + let req = MutateRequest { + stream_id, + writer_epoch, + request: Some(MutateRequestUnion { + request: Some(Request::Write(input)), + }), + }; + let resp = self.mutate(req).await?; + if let Some(Response::Write(resp)) = resp.response.and_then(|r| r.response) { + Ok(resp) + } else { + Err(Error::InvalidResponse) + } + } + + pub async fn seal( + &self, + stream_id: u64, + writer_epoch: u32, + input: SealRequest, + ) -> Result { + type Request = mutate_request_union::Request; + type Response = mutate_response_union::Response; + + let req = MutateRequest { + stream_id, + writer_epoch, + request: Some(MutateRequestUnion { + request: Some(Request::Seal(input)), + }), + }; + let resp = self.mutate(req).await?; + if let Some(Response::Seal(resp)) = resp.response.and_then(|r| r.response) { + Ok(resp) + } else { + Err(Error::InvalidResponse) + } + } +} + +impl StoreClient { + #[allow(dead_code)] + pub async fn read(&self, input: ReadRequest) -> crate::Result> { + let mut client = self.client.clone(); + let resp = client.read(input).await?; + Ok(resp.into_inner()) + } +} + +#[cfg(test)] +mod tests { + use futures::StreamExt; + use stream_engine_common::{Entry, Sequence}; + use stream_engine_proto as proto; + use stream_engine_proto::{ReadRequest, SealRequest, WriteRequest}; + use stream_engine_store::build_store; + use tonic::transport::Endpoint; + + use super::{Result, StoreClient}; + + fn entry(event: Vec) -> proto::Entry { + proto::Entry { + entry_type: proto::EntryType::Event as i32, + epoch: 1, + event, + } + } + + async fn build_store_client() -> Result { + let addr = build_store().await?; + let endpoint = Endpoint::new(addr)?.connect().await?; + Ok(StoreClient::new(endpoint)) + } + + #[tokio::test(flavor = "multi_thread")] + async fn basic_write_and_read_acked() -> crate::Result<()> { + let writes = vec![ + WriteRequest { + segment_epoch: 1, + acked_seq: 0, + first_index: 1, + entries: vec![entry(vec![0u8]), entry(vec![2u8]), entry(vec![4u8])], + }, + WriteRequest { + segment_epoch: 1, + acked_seq: Sequence::new(1, 3).into(), + first_index: 4, + entries: vec![entry(vec![6u8]), entry(vec![8u8])], + }, + WriteRequest { + acked_seq: Sequence::new(1, 5).into(), + segment_epoch: 1, + first_index: 6, + entries: vec![], + }, + ]; + + let entries = vec![ + Entry::Event { + epoch: 1, + event: vec![0u8].into(), + }, + Entry::Event { + epoch: 1, + event: vec![2u8].into(), + }, + Entry::Event { + epoch: 1, + event: vec![4u8].into(), + }, + Entry::Event { + epoch: 1, + event: vec![6u8].into(), + }, + Entry::Event { + epoch: 1, + event: vec![8u8].into(), + }, + ]; + + #[derive(Debug)] + struct Test<'a> { + from: u32, + limit: u32, + expect: &'a [Entry], + } + + let tests = vec![ + Test { + from: 1, + limit: 1, + expect: &entries[0..1], + }, + Test { + from: 4, + limit: 2, + expect: &entries[3..], + }, + Test { + from: 1, + limit: 5, + expect: &entries[..], + }, + ]; + + let stream_id: u64 = 1; + let writer_epoch: u32 = 1; + let client = build_store_client().await?; + for w in writes { + client.write(stream_id, writer_epoch, w).await?; + } + + for test in tests { + let req = ReadRequest { + stream_id: 1, + seg_epoch: 1, + start_index: test.from, + limit: test.limit, + include_pending_entries: false, + }; + let mut stream = client.read(req).await?; + let mut got = Vec::::new(); + while let Some(resp) = stream.next().await { + got.push(resp?.entry.unwrap().into()); + } + assert_eq!(got.len(), test.expect.len()); + assert!(got.iter().zip(test.expect.iter()).all(|(l, r)| l == r)); + } + Ok(()) + } + + #[tokio::test(flavor = "multi_thread")] + async fn basic_write_and_read_including_pending_entries() -> crate::Result<()> { + let writes = vec![ + WriteRequest { + segment_epoch: 1, + acked_seq: 0, + first_index: 1, + entries: vec![entry(vec![0u8]), entry(vec![2u8]), entry(vec![4u8])], + }, + WriteRequest { + acked_seq: 0, + segment_epoch: 1, + first_index: 4, + entries: vec![entry(vec![6u8]), entry(vec![8u8])], + }, + WriteRequest { + acked_seq: 0, + segment_epoch: 1, + first_index: 6, + entries: vec![], + }, + ]; + + let stream_id: u64 = 1; + let writer_epoch: u32 = 1; + let entries = vec![ + Entry::Event { + epoch: 1, + event: vec![0u8].into(), + }, + Entry::Event { + epoch: 1, + event: vec![2u8].into(), + }, + Entry::Event { + epoch: 1, + event: vec![4u8].into(), + }, + Entry::Event { + epoch: 1, + event: vec![6u8].into(), + }, + Entry::Event { + epoch: 1, + event: vec![8u8].into(), + }, + ]; + + struct Test<'a> { + from: u32, + limit: u32, + expect: &'a [Entry], + } + + let tests = vec![ + Test { + from: 1, + limit: 1, + expect: &entries[0..1], + }, + Test { + from: 4, + limit: 2, + expect: &entries[3..], + }, + Test { + from: 1, + limit: 5, + expect: &entries[..], + }, + // include_pending_entries don't wait any entries + Test { + from: 1, + limit: u32::MAX, + expect: &entries[..], + }, + ]; + + let client = build_store_client().await?; + for w in writes { + client.write(stream_id, writer_epoch, w).await?; + } + + for test in tests { + let req = ReadRequest { + stream_id: 1, + seg_epoch: 1, + start_index: test.from, + limit: test.limit, + include_pending_entries: true, + }; + let mut stream = client.read(req).await?; + let mut got = Vec::::new(); + while let Some(resp) = stream.next().await { + got.push(resp?.entry.unwrap().into()); + } + assert_eq!(got.len(), test.expect.len()); + assert!(got.iter().zip(test.expect.iter()).all(|(l, r)| l == r)); + } + Ok(()) + } + + #[tokio::test(flavor = "multi_thread")] + async fn reject_staled_sealing_request() -> crate::Result<()> { + let client = build_store_client().await?; + client.seal(1, 3, SealRequest { segment_epoch: 1 }).await?; + + match client.seal(1, 2, SealRequest { segment_epoch: 1 }).await { + Err(crate::Error::Staled(_)) => {} + _ => { + panic!("should reject staled sealing request"); + } + }; + + client.seal(1, 4, SealRequest { segment_epoch: 1 }).await?; + + Ok(()) + } + + #[tokio::test(flavor = "multi_thread")] + async fn reject_staled_writing_if_sealed() -> crate::Result<()> { + let client = build_store_client().await?; + let write_req = WriteRequest { + segment_epoch: 1, + acked_seq: 0, + first_index: 0, + entries: vec![entry(vec![0u8]), entry(vec![2u8]), entry(vec![4u8])], + }; + client.write(1, 1, write_req).await?; + + client.seal(1, 3, SealRequest { segment_epoch: 1 }).await?; + + let write_req = WriteRequest { + segment_epoch: 1, + acked_seq: Sequence::new(1, 2).into(), + first_index: 3, + entries: vec![entry(vec![6u8]), entry(vec![8u8])], + }; + match client.write(1, 1, write_req).await { + Err(crate::Error::Staled(_)) => {} + _ => { + panic!("should reject staled store request"); + } + }; + + Ok(()) + } + + #[tokio::test(flavor = "multi_thread")] + async fn remove_entries_once_receiving_bridge_entry() -> crate::Result<()> { + let client = build_store_client().await?; + let write_req = WriteRequest { + segment_epoch: 1, + acked_seq: 0, + first_index: 1, + entries: vec![entry(vec![1u8]), entry(vec![2u8]), entry(vec![3u8])], + }; + client.write(1, 1, write_req).await?; + + let write_req = WriteRequest { + segment_epoch: 1, + acked_seq: 0, + first_index: 5, + entries: vec![entry(vec![5u8])], + }; + client.write(1, 1, write_req).await?; + + client.seal(1, 1, SealRequest { segment_epoch: 1 }).await?; + + let read_expect: Vec = vec![ + entry(vec![1u8]).into(), + entry(vec![2u8]).into(), + entry(vec![3u8]).into(), + entry(vec![5u8]).into(), + ]; + let req = ReadRequest { + stream_id: 1, + seg_epoch: 1, + start_index: 1, + limit: u32::MAX, + include_pending_entries: true, + }; + let mut stream = client.read(req).await?; + let mut got = Vec::::new(); + while let Some(resp) = stream.next().await { + got.push(resp?.entry.unwrap().into()); + } + assert_eq!(got.len(), read_expect.len()); + assert!(got.iter().zip(read_expect.iter()).all(|(l, r)| l == r)); + + // send bridge record + let bridge = proto::Entry { + entry_type: proto::EntryType::Bridge as i32, + epoch: 3, + event: Vec::default(), + }; + let write_req = WriteRequest { + segment_epoch: 1, + acked_seq: 0, + first_index: 4, + entries: vec![bridge.clone()], + }; + client.write(1, 1, write_req).await?; + + let read_expect: Vec = vec![ + entry(vec![1u8]).into(), + entry(vec![2u8]).into(), + entry(vec![3u8]).into(), + bridge.into(), + ]; + let req = ReadRequest { + stream_id: 1, + seg_epoch: 1, + start_index: 1, + limit: u32::MAX, + include_pending_entries: true, + }; + let mut stream = client.read(req).await?; + let mut got = Vec::::new(); + while let Some(resp) = stream.next().await { + got.push(resp?.entry.unwrap().into()); + } + assert_eq!(got.len(), read_expect.len()); + assert!(got.iter().zip(read_expect.iter()).all(|(l, r)| l == r)); + Ok(()) + } + + #[tokio::test(flavor = "multi_thread")] + async fn write_returns_continuously_persisted_index() -> crate::Result<()> { + let client = build_store_client().await?; + let write_req = WriteRequest { + segment_epoch: 1, + acked_seq: 0, + first_index: 1, + entries: vec![entry(vec![1u8]), entry(vec![2u8]), entry(vec![3u8])], + }; + let resp = client.write(1, 1, write_req).await?; + assert_eq!(resp.persisted_index, 3); + + let write_req = WriteRequest { + segment_epoch: 1, + acked_seq: 0, + first_index: 5, + entries: vec![entry(vec![5u8])], + }; + let resp = client.write(1, 1, write_req).await?; + assert_eq!(resp.persisted_index, 3); + + let write_req = WriteRequest { + segment_epoch: 1, + acked_seq: 0, + first_index: 4, + entries: vec![entry(vec![4u8])], + }; + let resp = client.write(1, 1, write_req).await?; + assert_eq!(resp.persisted_index, 5); + + Ok(()) + } +} diff --git a/src/stream-engine/client/src/store/mod.rs b/src/stream-engine/client/src/store/mod.rs new file mode 100644 index 00000000..ff34bd7e --- /dev/null +++ b/src/stream-engine/client/src/store/mod.rs @@ -0,0 +1,18 @@ +// Copyright 2022 The Engula Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +mod client; +mod transport; + +pub use transport::Transport; diff --git a/src/stream-engine/client/src/store/transport.rs b/src/stream-engine/client/src/store/transport.rs new file mode 100644 index 00000000..98add3db --- /dev/null +++ b/src/stream-engine/client/src/store/transport.rs @@ -0,0 +1,95 @@ +// Copyright 2022 The Engula Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::{collections::HashMap, sync::Arc}; + +use stream_engine_proto::{SealRequest, WriteRequest}; +use tokio::sync::Mutex; +use tonic::transport::Endpoint; + +use super::client::StoreClient; +use crate::Result; + +pub struct Transport { + inner: Arc>, +} + +struct TransportInner { + clients: HashMap, + stream_set: HashMap, +} + +#[allow(dead_code)] +impl Transport { + pub async fn new() -> Self { + Transport { + inner: Arc::new(Mutex::new(TransportInner { + clients: HashMap::new(), + stream_set: HashMap::new(), + })), + } + } + + pub async fn register(&self, stream_id: u64) { + let mut inner = self.inner.lock().await; + inner.stream_set.insert(stream_id, ()); + } + + pub async fn unregister(&self, stream_id: u64) { + let mut inner = self.inner.lock().await; + inner.stream_set.remove(&stream_id); + } + + pub async fn write( + &self, + target: String, + stream_id: u64, + writer_epoch: u32, + write: WriteRequest, + ) -> Result<()> { + let client = self.get_client(target).await?; + client.write(stream_id, writer_epoch, write).await?; + Ok(()) + } + + pub async fn seal( + &self, + target: String, + stream_id: u64, + writer_epoch: u32, + segment_epoch: u32, + ) -> Result<()> { + let client = self.get_client(target).await?; + client + .seal(stream_id, writer_epoch, SealRequest { segment_epoch }) + .await?; + Ok(()) + } + + async fn get_client(&self, target: String) -> Result { + let inner = self.inner.lock().await; + if let Some(client) = inner.clients.get(&target) { + Ok(client.clone()) + } else { + drop(inner); + + // FIXME(w41ter) too many concurrent connections. + let channel = Endpoint::new(target.clone())?.connect().await?; + let client = StoreClient::new(channel); + let mut inner = self.inner.lock().await; + inner.clients.insert(target, client.clone()); + Ok(client) + } + } +} diff --git a/src/stream-engine/client/src/stream.rs b/src/stream-engine/client/src/stream.rs new file mode 100644 index 00000000..ca17506a --- /dev/null +++ b/src/stream-engine/client/src/stream.rs @@ -0,0 +1,250 @@ +// Copyright 2022 The Engula Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::{ + collections::VecDeque, + pin::Pin, + sync::Arc, + task::{Context, Poll}, +}; + +use futures::Stream as FutureStream; +use stream_engine_proto::*; +use tokio::sync::{Mutex, Notify}; + +use crate::{master::Stream as MasterStreamClient, Error, Result}; + +/// The role of a stream. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum Role { + /// A leader manipulate a stream. + Leader, + /// A follower subscribes a stream. + Follower, +} + +impl std::fmt::Display for Role { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "{}", + match self { + Role::Leader => "LEADER", + Role::Follower => "FOLLOWER", + } + ) + } +} + +/// The role and leader's address of current epoch. +#[derive(Debug, Clone)] +pub struct EpochState { + pub epoch: u64, + + /// The role of the associated stream. + pub role: Role, + + /// The leader of the associated stream. + pub leader: Option, +} + +#[derive(Clone)] +pub struct Stream { + inner: Arc, +} + +impl Stream { + pub(crate) fn new(stream_client: MasterStreamClient) -> Self { + let inner = StreamInner { + stream_client, + core: Mutex::new(StreamCore::new()), + }; + Self { + inner: Arc::new(inner), + } + } + + #[inline(always)] + pub fn desc(&self) -> StreamDesc { + self.inner.stream_client.desc() + } + + /// Return a endless stream which returns a new epoch state once the + /// associated stream enters a new epoch. + pub async fn subscribe_state(&self) -> Result { + Ok(EpochStateStream::new()) + } + + /// Returns a stream reader. + pub async fn new_reader(&self) -> Result { + Ok(StreamReader::new(self.clone())) + } + + /// Append an event, returns the sequence. + pub async fn append(&self, event: Box<[u8]>) -> Result { + let mut stream = self.inner.core.lock().await; + stream.events.push_back(event); + stream.end += 1; + stream.waiter.notify_waiters(); + Ok(stream.end - 1) + } + + /// Truncates events up to a sequence (exclusive). + pub async fn truncate(&self, sequence: u64) -> Result<()> { + let mut stream = self.inner.core.lock().await; + if stream.end.checked_sub(sequence).is_some() { + let offset = sequence.saturating_sub(stream.start); + stream.events.drain(..offset as usize); + stream.start += offset; + Ok(()) + } else { + Err(Error::InvalidArgument(format!( + "truncate sequence (is {}) should be <= end (is {})", + sequence, stream.end + ))) + } + } +} + +struct StreamInner { + stream_client: MasterStreamClient, + core: Mutex, +} + +struct StreamCore { + start: u64, + end: u64, + events: VecDeque>, + waiter: Arc, +} + +impl StreamCore { + fn new() -> Self { + StreamCore { + start: 0, + end: 0, + events: VecDeque::new(), + waiter: Arc::new(Notify::new()), + } + } +} + +impl StreamCore { + fn read_all(&self, from: u64) -> VecDeque> { + if self.end.checked_sub(from).is_some() { + self.events + .range(from.saturating_sub(self.start) as usize..) + .cloned() + .collect() + } else { + VecDeque::default() + } + } +} + +pub struct StreamReader { + cursor: u64, + stream: Stream, + events: VecDeque>, +} + +impl StreamReader { + fn new(stream: Stream) -> Self { + Self { + cursor: 0, + stream, + events: VecDeque::new(), + } + } + + async fn next(&mut self, wait: bool) -> Result>> { + if let Some(event) = self.events.pop_front() { + return Ok(Some(event)); + } + + let stream = self.stream.inner.core.lock().await; + let events = stream.read_all(self.cursor); + if events.is_empty() { + if wait { + let waiter = stream.waiter.clone(); + let notified = waiter.notified(); + drop(stream); + notified.await; + } + Ok(None) + } else { + self.events = events; + self.cursor = stream.end; + Ok(self.events.pop_front()) + } + } + + /// Seeks to the given sequence. + pub async fn seek(&mut self, sequence: u64) -> Result<()> { + let stream = self.stream.inner.core.lock().await; + if sequence < stream.start { + Err(Error::InvalidArgument(format!( + "seek sequence (is {}) should be >= start (is {})", + sequence, stream.start + ))) + } else { + self.cursor = sequence; + self.events.clear(); + Ok(()) + } + } + + /// Returns the next entry or `None` if no available entries. + pub async fn try_next(&mut self) -> Result>> { + self.next(false).await + } + + /// Returns the next entry or wait until it is available. + pub async fn wait_next(&mut self) -> Result> { + loop { + if let Some(next) = self.next(true).await? { + return Ok(next); + } + } + } +} + +pub struct EpochStateStream { + returned: bool, +} + +impl EpochStateStream { + fn new() -> Self { + EpochStateStream { returned: false } + } +} + +impl FutureStream for EpochStateStream { + type Item = EpochState; + + #[allow(unused)] + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let this = self.get_mut(); + if this.returned { + Poll::Pending + } else { + this.returned = true; + Poll::Ready(Some(EpochState { + epoch: 1, + role: Role::Leader, + leader: Some("".into()), + })) + } + } +} diff --git a/src/stream-engine/common/Cargo.toml b/src/stream-engine/common/Cargo.toml new file mode 100644 index 00000000..33211868 --- /dev/null +++ b/src/stream-engine/common/Cargo.toml @@ -0,0 +1,11 @@ +[package] +name = "stream-engine-common" +version = "0.1.0" +edition = "2021" +publish = false + +[dependencies] +derivative = "2.2.0" +futures = "0.3" +thiserror = "1.0" +tonic = "0.6" diff --git a/src/stream-engine/common/src/entry.rs b/src/stream-engine/common/src/entry.rs new file mode 100644 index 00000000..ae827c52 --- /dev/null +++ b/src/stream-engine/common/src/entry.rs @@ -0,0 +1,57 @@ +// Copyright 2022 The Engula Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/// `Entry` is the minimum unit of the journal system. A continuous entries +/// compound a stream. + +#[derive(Derivative, Clone, PartialEq, Eq)] +#[derivative(Debug)] +pub enum Entry { + /// A placeholder, used in recovery phase. + Hole, + Event { + epoch: u32, + #[derivative(Debug = "ignore")] + event: Box<[u8]>, + }, + /// A bridge record, which identify the end of a segment. + Bridge { epoch: u32 }, +} + +impl Entry { + // FIXME(w41ter) a better implementation. + pub fn epoch(&self) -> u32 { + match self { + Entry::Event { epoch, event: _ } => *epoch, + Entry::Bridge { epoch } => *epoch, + _ => panic!("Entry::Hole no epoch field"), + } + } + + pub fn set_epoch(&mut self, target: u32) { + match self { + Entry::Event { epoch, event: _ } => *epoch = target, + Entry::Bridge { epoch } => *epoch = target, + _ => {} + } + } + + pub fn size(&self) -> usize { + if let Entry::Event { event, .. } = self { + core::mem::size_of::() + event.len() + } else { + core::mem::size_of::() + } + } +} diff --git a/src/storage/src/grpc/error.rs b/src/stream-engine/common/src/error.rs similarity index 52% rename from src/storage/src/grpc/error.rs rename to src/stream-engine/common/src/error.rs index b3d42ef1..bcb8e1dc 100644 --- a/src/storage/src/grpc/error.rs +++ b/src/stream-engine/common/src/error.rs @@ -1,4 +1,4 @@ -// Copyright 2021 The Engula Authors. +// Copyright 2022 The Engula Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,7 +12,45 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::Error; +use futures::channel::oneshot; +use thiserror::Error; + +/// Errors for all journal operations. +#[derive(Error, Debug)] +pub enum Error { + #[error("{0} is not found")] + NotFound(String), + #[error("not leader, new leader is {0}")] + NotLeader(String), + #[error("{0} already exists")] + AlreadyExists(String), + #[error("{0}")] + InvalidArgument(String), + #[error("invalid response")] + InvalidResponse, + #[error(transparent)] + Io(#[from] std::io::Error), + #[error("{0} is staled")] + Staled(String), + #[error(transparent)] + Unknown(Box), +} + +#[must_use = "this `Result` may be an `Err` variant, which should be handled"] +pub type Result = std::result::Result; + +impl From for Error { + fn from(_: oneshot::Canceled) -> Self { + use std::io; + + // Because we cannot determine whether a canceled proposal acked, it is + // processed according to the third state of distributed system. + Error::Io(io::Error::new( + io::ErrorKind::TimedOut, + "task has been canceled", + )) + } +} impl From for Error { fn from(s: tonic::Status) -> Self { @@ -20,6 +58,7 @@ impl From for Error { tonic::Code::NotFound => Error::NotFound(s.message().into()), tonic::Code::AlreadyExists => Error::AlreadyExists(s.message().into()), tonic::Code::InvalidArgument => Error::InvalidArgument(s.message().into()), + tonic::Code::FailedPrecondition => Error::Staled(s.message().into()), _ => Error::Unknown(Box::new(s)), } } @@ -34,11 +73,14 @@ impl From for Error { impl From for tonic::Status { fn from(err: Error) -> Self { let (code, message) = match err { + Error::NotLeader(_) => unreachable!(), Error::NotFound(s) => (tonic::Code::NotFound, s), Error::AlreadyExists(s) => (tonic::Code::AlreadyExists, s), Error::InvalidArgument(s) => (tonic::Code::InvalidArgument, s), + Error::InvalidResponse => (tonic::Code::InvalidArgument, "invalid response".into()), Error::Io(s) => (tonic::Code::Unknown, s.to_string()), Error::Unknown(s) => (tonic::Code::Unknown, s.to_string()), + Error::Staled(s) => (tonic::Code::FailedPrecondition, s), }; tonic::Status::new(code, message) } diff --git a/src/stream-engine/common/src/lib.rs b/src/stream-engine/common/src/lib.rs new file mode 100644 index 00000000..eef9796e --- /dev/null +++ b/src/stream-engine/common/src/lib.rs @@ -0,0 +1,25 @@ +// Copyright 2022 The Engula Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Contains some common definitions for stream engine client, master and store. + +#[macro_use] +extern crate derivative; + +mod entry; +pub mod error; +mod sequence; + +pub use entry::Entry; +pub use sequence::Sequence; diff --git a/src/stream-engine/common/src/sequence.rs b/src/stream-engine/common/src/sequence.rs new file mode 100644 index 00000000..cd12e32d --- /dev/null +++ b/src/stream-engine/common/src/sequence.rs @@ -0,0 +1,48 @@ +// Copyright 2022 The Engula Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/// An increasing number to order events. +#[derive(Default, Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] +#[repr(C)] +pub struct Sequence { + pub epoch: u32, + pub index: u32, +} + +impl Sequence { + pub fn new(epoch: u32, index: u32) -> Self { + Sequence { epoch, index } + } +} + +impl From for Sequence { + fn from(v: u64) -> Self { + Sequence { + epoch: (v >> 32) as u32, + index: (v as u32), + } + } +} + +impl From for u64 { + fn from(seq: Sequence) -> Self { + (seq.epoch as u64) << 32 | (seq.index as u64) + } +} + +impl std::fmt::Display for Sequence { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", >::into(*self)) + } +} diff --git a/src/stream-engine/master/Cargo.toml b/src/stream-engine/master/Cargo.toml new file mode 100644 index 00000000..7b012ac2 --- /dev/null +++ b/src/stream-engine/master/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "stream-engine-master" +version = "0.1.0" +edition = "2021" +publish = false + +[dependencies] +stream-engine-common = { version = "0.1", path = "../common" } +stream-engine-proto = { version = "0.1", path = "../proto" } + +log = "0.4.14" +prost = "0.9" +thiserror = "1.0" +tokio = { version = "1.15", features = ["full"] } +tokio-stream = { version = "0.1", features = ["net"] } +tonic = "0.6" diff --git a/src/stream-engine/master/src/lib.rs b/src/stream-engine/master/src/lib.rs new file mode 100644 index 00000000..bfbbe831 --- /dev/null +++ b/src/stream-engine/master/src/lib.rs @@ -0,0 +1,51 @@ +// Copyright 2022 The Engula Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +mod master; +mod server; +mod stream; + +pub use stream_engine_common::{ + error::{Error, Result}, + Sequence, +}; +#[cfg(debug_assertions)] +pub use tests::build_master; + +pub use self::server::Server; + +#[cfg(debug_assertions)] +pub mod tests { + use tokio::net::TcpListener; + use tokio_stream::wrappers::TcpListenerStream; + + use super::*; + use crate::Result; + + pub async fn build_master(replicas: &[&str]) -> Result { + let replicas: Vec = replicas.iter().map(ToString::to_string).collect(); + let listener = TcpListener::bind("127.0.0.1:0").await?; + let local_addr = listener.local_addr()?; + tokio::task::spawn(async { + let server = Server::test_new(replicas); + tonic::transport::Server::builder() + .add_service(server.into_service()) + .serve_with_incoming(TcpListenerStream::new(listener)) + .await + .unwrap(); + }); + + Ok(format!("http://{}", local_addr)) + } +} diff --git a/src/stream-engine/master/src/master.rs b/src/stream-engine/master/src/master.rs new file mode 100644 index 00000000..68c9fb3f --- /dev/null +++ b/src/stream-engine/master/src/master.rs @@ -0,0 +1,166 @@ +// Copyright 2022 The Engula Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::{collections::HashMap, sync::Arc, time::Duration}; + +use stream_engine_proto::*; +use tokio::sync::Mutex; + +use crate::{stream::StreamInfo, Error, Result}; + +#[derive(Debug, Clone)] +pub struct Config { + /// How many tick before an observer's lease is timeout. + /// + /// Default: 3 + pub heartbeat_timeout_tick: u64, + + /// Observer heartbeat intervals in ms. + /// + /// Default: 500ms + pub heartbeat_interval_ms: u64, +} + +impl Config { + pub fn heartbeat_timeout(&self) -> Duration { + Duration::from_millis(self.heartbeat_interval_ms * self.heartbeat_timeout_tick) + } +} + +impl Default for Config { + fn default() -> Self { + Config { + heartbeat_timeout_tick: 3, + heartbeat_interval_ms: 500, + } + } +} + +#[derive(Clone)] +pub struct Master { + pub config: Config, + + // FIXME(w41ter) + // This is a temporary implementation, which needs to be + // supported on orchestrator. + pub stores: Vec, + inner: Arc>, +} + +struct MasterInner { + next_id: u64, + tenants: HashMap, +} + +impl Master { + pub fn new(config: Config, stores: Vec) -> Self { + let inner = MasterInner { + next_id: 1, + tenants: HashMap::new(), + }; + Self { + config, + stores, + inner: Arc::new(Mutex::new(inner)), + } + } + + pub async fn tenant(&self, name: &str) -> Result { + let inner = self.inner.lock().await; + inner + .tenants + .get(name) + .cloned() + .ok_or_else(|| Error::NotFound(format!("tenant {}", name))) + } + + pub async fn create_tenant(&self, mut desc: TenantDesc) -> Result { + let mut inner = self.inner.lock().await; + if inner.tenants.contains_key(&desc.name) { + return Err(Error::AlreadyExists(format!("tenant {}", desc.name))); + } + desc.id = inner.next_id; + inner.next_id += 1; + let db = Tenant::new(desc.clone()); + inner.tenants.insert(desc.name.clone(), db); + Ok(desc) + } +} + +#[derive(Clone)] +pub struct Tenant { + inner: Arc>, +} + +struct TenantInner { + desc: TenantDesc, + next_id: u64, + streams: HashMap, +} + +impl Tenant { + fn new(desc: TenantDesc) -> Self { + let inner = TenantInner { + desc, + next_id: 1, + streams: HashMap::new(), + }; + Self { + inner: Arc::new(Mutex::new(inner)), + } + } + + pub async fn desc(&self) -> TenantDesc { + self.inner.lock().await.desc.clone() + } + + pub async fn stream_desc(&self, name: &str) -> Result { + let inner = self.inner.lock().await; + inner + .streams + .values() + .find(|info| info.stream_name == name) + .map(StreamInfo::stream_desc) + .ok_or_else(|| Error::NotFound(format!("stream {}", name))) + } + + pub async fn stream(&self, stream_id: u64) -> Result { + let inner = self.inner.lock().await; + inner + .streams + .get(&stream_id) + .cloned() + .ok_or_else(|| Error::NotFound(format!("stream id {}", stream_id))) + } + + pub async fn create_stream(&self, mut desc: StreamDesc) -> Result { + let mut inner = self.inner.lock().await; + if inner + .streams + .values() + .any(|info| info.stream_name == desc.name) + { + return Err(Error::AlreadyExists(format!("stream {}", desc.name))); + } + + desc.id = inner.next_id; + inner.next_id += 1; + desc.parent_id = inner.desc.id; + inner.streams.insert( + desc.id, + StreamInfo::new(desc.parent_id, desc.id, desc.name.clone()), + ); + Ok(desc) + } +} diff --git a/src/stream-engine/master/src/server.rs b/src/stream-engine/master/src/server.rs new file mode 100644 index 00000000..6c557403 --- /dev/null +++ b/src/stream-engine/master/src/server.rs @@ -0,0 +1,293 @@ +// Copyright 2022 The Engula Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use stream_engine_proto::*; +use tonic::{Request, Response, Status}; + +use crate::{ + master::{Config, Master, Tenant}, + stream::{ObserverMeta, StreamInfo}, + Error, Result, +}; + +type TonicResult = std::result::Result; + +pub struct Server { + master: Master, +} + +impl Default for Server { + fn default() -> Self { + Self::new() + } +} + +impl Server { + pub fn new() -> Self { + // FIXME(w41ter) add store addresses. + let stores = vec![]; + Self { + master: Master::new(Config::default(), stores), + } + } + + #[cfg(debug_assertions)] + pub fn test_new(stores: Vec) -> Self { + Self { + master: Master::new(Config::default(), stores), + } + } + + pub fn into_service(self) -> master_server::MasterServer { + master_server::MasterServer::new(self) + } +} + +#[tonic::async_trait] +impl master_server::Master for Server { + async fn tenant(&self, req: Request) -> TonicResult> { + let req = req.into_inner(); + let res = self.handle_tenant(req).await?; + Ok(Response::new(res)) + } + + async fn stream(&self, req: Request) -> TonicResult> { + let req = req.into_inner(); + let res = self.handle_stream(req).await?; + Ok(Response::new(res)) + } + + async fn segment( + &self, + req: Request, + ) -> TonicResult> { + let req = req.into_inner(); + let res = self.handle_segment(req).await?; + Ok(Response::new(res)) + } + + async fn heartbeat( + &self, + req: Request, + ) -> TonicResult> { + let req = req.into_inner(); + let tenant = self.master.tenant(&req.tenant).await?; + let stream = tenant.stream(req.stream_id).await?; + + let observer_meta = ObserverMeta { + stream_name: stream.stream_name.clone(), + observer_id: req.observer_id, + state: req.observer_state.into(), + epoch: req.writer_epoch, + acked_seq: req.acked_seq.into(), + }; + + let commands = stream + .heartbeat( + &self.master.config, + &self.master.stores, + observer_meta, + req.role.into(), + ) + .await?; + + Ok(Response::new(HeartbeatResponse { commands })) + } +} + +impl Server { + pub(crate) async fn handle_tenant(&self, req: TenantRequest) -> Result { + let mut res = TenantResponse::default(); + for req_union in req.requests { + let res_union = self.handle_tenant_union(req_union).await?; + res.responses.push(res_union); + } + Ok(res) + } + + pub(crate) async fn handle_tenant_union( + &self, + req: TenantRequestUnion, + ) -> Result { + type Request = tenant_request_union::Request; + type Response = tenant_response_union::Response; + + let req = req + .request + .ok_or_else(|| Error::InvalidArgument("tenant request".into()))?; + let res = match req { + Request::ListTenants(_req) => { + todo!(); + } + Request::CreateTenant(req) => { + let res = self.handle_create_tenant(req).await?; + Response::CreateTenant(res) + } + Request::UpdateTenant(_req) => { + todo!(); + } + Request::DeleteTenant(_req) => { + todo!(); + } + Request::DescribeTenant(req) => { + let res = self.handle_describe_tenant(req).await?; + Response::DescribeTenant(res) + } + }; + Ok(TenantResponseUnion { + response: Some(res), + }) + } + + async fn handle_create_tenant(&self, req: CreateTenantRequest) -> Result { + let desc = req + .desc + .ok_or_else(|| Error::InvalidArgument("tenant request".into()))?; + let desc = self.master.create_tenant(desc).await?; + Ok(CreateTenantResponse { desc: Some(desc) }) + } + + async fn handle_describe_tenant( + &self, + req: DescribeTenantRequest, + ) -> Result { + let db = self.master.tenant(&req.name).await?; + let desc = db.desc().await; + Ok(DescribeTenantResponse { desc: Some(desc) }) + } +} + +impl Server { + async fn handle_stream(&self, req: StreamRequest) -> Result { + let tenant = self.master.tenant(&req.tenant).await?; + let mut res = StreamResponse::default(); + for req_union in req.requests { + let res_union = self.handle_stream_union(tenant.clone(), req_union).await?; + res.responses.push(res_union); + } + Ok(res) + } + + async fn handle_stream_union( + &self, + tenant: Tenant, + req: StreamRequestUnion, + ) -> Result { + type Request = stream_request_union::Request; + type Response = stream_response_union::Response; + + let req = req + .request + .ok_or_else(|| Error::InvalidArgument("stream request".into()))?; + let res = match req { + Request::ListStreams(_req) => { + todo!(); + } + Request::CreateStream(req) => { + let res = self.handle_create_stream(tenant, req).await?; + Response::CreateStream(res) + } + Request::UpdateStream(_req) => { + todo!(); + } + Request::DeleteStream(_req) => { + todo!(); + } + Request::DescribeStream(req) => { + let res = self.handle_describe_stream(tenant, req).await?; + Response::DescribeStream(res) + } + }; + Ok(StreamResponseUnion { + response: Some(res), + }) + } + + async fn handle_create_stream( + &self, + tenant: Tenant, + req: CreateStreamRequest, + ) -> Result { + let desc = req + .desc + .ok_or_else(|| Error::InvalidArgument("stream request".into()))?; + let desc = tenant.create_stream(desc).await?; + Ok(CreateStreamResponse { desc: Some(desc) }) + } + + async fn handle_describe_stream( + &self, + tenant: Tenant, + req: DescribeStreamRequest, + ) -> Result { + let desc = tenant.stream_desc(&req.name).await?; + Ok(DescribeStreamResponse { desc: Some(desc) }) + } +} + +impl Server { + async fn handle_segment(&self, req: SegmentRequest) -> Result { + let tenant = self.master.tenant(&req.tenant).await?; + let stream = tenant.stream(req.stream_id).await?; + + let mut res = SegmentResponse::default(); + for req_union in req.requests { + let res_union = self.handle_segment_union(&stream, req_union).await?; + res.responses.push(res_union); + } + Ok(res) + } + + async fn handle_segment_union( + &self, + stream: &StreamInfo, + req: SegmentRequestUnion, + ) -> Result { + type Request = segment_request_union::Request; + type Response = segment_response_union::Response; + + let res = match req + .request + .ok_or_else(|| Error::InvalidArgument("segment request".into()))? + { + Request::GetSegment(req) => { + Response::GetSegment(self.handle_get_segment(stream, req).await?) + } + Request::SealSegment(req) => { + Response::SealSegment(self.handle_seal_segment(stream, req).await?) + } + }; + Ok(SegmentResponseUnion { + response: Some(res), + }) + } + + async fn handle_get_segment( + &self, + stream: &StreamInfo, + req: GetSegmentRequest, + ) -> Result { + let segment = stream.segment(req.segment_epoch).await; + Ok(GetSegmentResponse { desc: segment }) + } + + async fn handle_seal_segment( + &self, + stream: &StreamInfo, + req: SealSegmentRequest, + ) -> Result { + stream.seal(req.segment_epoch).await?; + Ok(SealSegmentResponse {}) + } +} diff --git a/src/stream-engine/master/src/stream.rs b/src/stream-engine/master/src/stream.rs new file mode 100644 index 00000000..83c60305 --- /dev/null +++ b/src/stream-engine/master/src/stream.rs @@ -0,0 +1,332 @@ +// Copyright 2022 The Engula Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::{collections::HashMap, ops::DerefMut, sync::Arc, time::Instant}; + +use log::{debug, info}; +use stream_engine_proto::{ + Command, CommandType, ObserverState, Role, SegmentDesc, SegmentState, StreamDesc, +}; +use tokio::sync::Mutex; + +use crate::{master::Config, Error, Result, Sequence}; + +const INITIAL_EPOCH: u32 = 0; + +#[derive(Debug)] +struct PolicyApplicant<'a> { + role: Role, + epoch: u32, + observer_id: String, + stores: &'a [String], +} + +pub(crate) const DEFAULT_NUM_THRESHOLD: u32 = 1024; + +#[derive(Debug, Clone, Copy)] +pub struct ThresholdSwitching {} + +impl ThresholdSwitching { + fn new() -> Self { + // TODO(w41ter) support size option. + ThresholdSwitching {} + } + + fn apply(&self, applicant: &PolicyApplicant, stream_info: &mut StreamInner) -> Option { + if let Role::Leader = applicant.role { + if let Some(segment_info) = stream_info.segments.get(&stream_info.epoch) { + if segment_info.acked_index > DEFAULT_NUM_THRESHOLD { + return Some(stream_info.elect_leader(applicant)); + } + } + } + None + } +} + +#[derive(Debug, Clone, Copy)] +enum SwitchPolicy { + Threshold(ThresholdSwitching), +} + +impl SwitchPolicy { + fn apply(&self, applicant: &PolicyApplicant, stream_info: &mut StreamInner) -> Option { + match self { + SwitchPolicy::Threshold(policy) => policy.apply(applicant, stream_info), + } + } +} + +#[derive(Debug, Clone)] +#[allow(unused)] +pub struct ObserverMeta { + pub observer_id: String, + + /// Which stream is observing? + pub stream_name: String, + + /// The value of epoch in observer's memory. + pub epoch: u32, + + pub state: ObserverState, + + /// The acked sequence of entries it has already known. It might less than + /// (epoch << 32). + pub acked_seq: Sequence, +} + +#[derive(Debug)] +struct ObserverInfo { + meta: ObserverMeta, + #[allow(dead_code)] + role: Role, + last_heartbeat: Instant, +} + +#[derive(Debug)] +pub(crate) struct SegmentInfo { + epoch: u32, + acked_index: u32, + state: SegmentState, + copy_set: Vec, +} + +impl SegmentInfo { + fn new(epoch: u32, copy_set: Vec) -> Self { + SegmentInfo { + epoch, + acked_index: 0, + state: SegmentState::Appending, + copy_set, + } + } + + fn segment_desc(&self, stream_id: u64) -> SegmentDesc { + SegmentDesc { + stream_id, + epoch: self.epoch, + state: self.state as i32, + copy_set: self.copy_set.clone(), + } + } +} + +#[derive(Clone)] +pub struct StreamInfo { + pub parent_id: u64, + pub stream_id: u64, + pub stream_name: String, + + inner: Arc>, +} + +struct StreamInner { + /// The latest allocated epoch of this stream. + epoch: u32, + + switch_policy: Option, + + segments: HashMap, + leader: Option, + observers: HashMap, +} + +impl StreamInfo { + pub fn new(parent_id: u64, stream_id: u64, stream_name: String) -> Self { + // TODO(w41ter) support configuring switch policy. + StreamInfo { + parent_id, + stream_id, + stream_name, + + inner: Arc::new(Mutex::new(StreamInner { + epoch: INITIAL_EPOCH, + switch_policy: Some(SwitchPolicy::Threshold(ThresholdSwitching::new())), + segments: HashMap::new(), + leader: None, + observers: HashMap::new(), + })), + } + } + + pub fn stream_desc(&self) -> StreamDesc { + StreamDesc { + id: self.stream_id, + name: self.stream_name.clone(), + parent_id: self.parent_id, + } + } + + pub async fn segment(&self, segment_epoch: u32) -> Option { + let inner = self.inner.lock().await; + inner + .segments + .get(&segment_epoch) + .map(|s| s.segment_desc(self.stream_id)) + } + + pub async fn seal(&self, segment_epoch: u32) -> Result<()> { + let mut inner = self.inner.lock().await; + if let Some(segment) = inner.segments.get_mut(&segment_epoch) { + if segment.state != SegmentState::Sealed { + segment.state = SegmentState::Sealed; + } + Ok(()) + } else { + Err(Error::NotFound("no such segment".into())) + } + } + + pub async fn heartbeat( + &self, + config: &Config, + stores: &[String], + observer_meta: ObserverMeta, + role: Role, + ) -> Result> { + let writer_epoch = observer_meta.epoch; + let observer_id = observer_meta.observer_id.clone(); + let observer_info = ObserverInfo { + meta: observer_meta, + role, + last_heartbeat: Instant::now(), + }; + + let mut stream = self.inner.lock().await; + let stream = stream.deref_mut(); + if stream.epoch < writer_epoch && stream.epoch != INITIAL_EPOCH { + return Err(Error::InvalidArgument("too large epoch".into())); + } + + stream.observe(&observer_id, observer_info); + + let applicant = PolicyApplicant { + epoch: writer_epoch, + role, + observer_id, + stores, + }; + Ok(apply_strategies(self.stream_id, config, &applicant, stream)) + } +} + +impl StreamInner { + pub fn leader_desc(&self) -> &str { + if let Some(leader) = self.leader.as_ref() { + leader + } else { + "" + } + } + + pub fn observe(&mut self, observer_id: &str, observer_info: ObserverInfo) -> bool { + if let Some(prev_info) = self.observers.get(observer_id) { + if prev_info.last_heartbeat > observer_info.last_heartbeat { + return false; + } + } + + let acked_seq = observer_info.meta.acked_seq; + let acked_epoch = acked_seq.epoch; + let acked_index = acked_seq.index; + + debug!( + "{:?} acked epoch: {}, acked index {}", + observer_info, acked_epoch, acked_index + ); + + self.observers.insert(observer_id.into(), observer_info); + if let Some(segment_info) = self.segments.get_mut(&acked_epoch) { + // TODO(w41ter) update all previous epochs. + segment_info.epoch = acked_epoch; + segment_info.acked_index = acked_index; + } + + true + } + + pub fn elect_leader(&mut self, applicant: &PolicyApplicant) -> Command { + self.epoch += 1; + self.leader = Some(applicant.observer_id.clone()); + self.segments.insert( + self.epoch, + SegmentInfo::new(self.epoch, applicant.stores.into()), + ); + self.gen_promote_cmd(&applicant.observer_id) + } + + pub fn gen_promote_cmd(&self, observer_id: &str) -> Command { + // TODO(w41ter) set pending epochs. + if let Some(leader) = &self.leader { + if leader == observer_id { + return Command { + command_type: CommandType::Promote as i32, + epoch: self.epoch, + role: Role::Leader as i32, + leader: observer_id.to_string(), + pending_epochs: vec![], + }; + } + } + Command { + command_type: CommandType::Promote as i32, + role: Role::Follower as i32, + epoch: self.epoch, + leader: self.leader.as_ref().cloned().unwrap_or_default(), + pending_epochs: vec![], + } + } +} + +fn apply_strategies( + stream_id: u64, + config: &Config, + applicant: &PolicyApplicant, + stream_info: &mut StreamInner, +) -> Vec { + if let Some(policy) = stream_info.switch_policy { + if let Some(cmd) = policy.apply(applicant, stream_info) { + return vec![cmd]; + } + } + + // stale request, promote it + if applicant.epoch < stream_info.epoch { + // The observer might lost heartbeat response, so here check and accept + // the staled heartbeat request from current leader, and continue to promote + // it for idempotent. + info!("stream {} epoch {} leader {} promote observer {}, epoch: {}, by receiving staled heartbeat", + stream_id, stream_info.epoch, stream_info.leader_desc(), + applicant.observer_id, applicant.epoch); + return vec![stream_info.gen_promote_cmd(&applicant.observer_id)]; + } + + // check leader + let now = Instant::now(); + let select_new_leader = match &stream_info.leader { + Some(observer_id) => { + let observer_info = stream_info + .observers + .get(observer_id) + .expect("stream must exists if it is a leader"); + // Leader might lost, need select new leader + observer_info.last_heartbeat + config.heartbeat_timeout() <= now + } + None => true, + }; + if select_new_leader { + return vec![stream_info.elect_leader(applicant)]; + } + vec![] +} diff --git a/src/stream-engine/proto/Cargo.toml b/src/stream-engine/proto/Cargo.toml new file mode 100644 index 00000000..de256d74 --- /dev/null +++ b/src/stream-engine/proto/Cargo.toml @@ -0,0 +1,14 @@ +[package] +name = "stream-engine-proto" +version = "0.1.0" +edition = "2021" +publish = false + +[dependencies] +stream-engine-common = { version = "0.1", path = "../common" } + +prost = "0.9" +tonic = "0.6" + +[build-dependencies] +tonic-build = "0.6" diff --git a/src/journal/build.rs b/src/stream-engine/proto/build.rs similarity index 82% rename from src/journal/build.rs rename to src/stream-engine/proto/build.rs index 6260629d..f505299f 100644 --- a/src/journal/build.rs +++ b/src/stream-engine/proto/build.rs @@ -1,4 +1,4 @@ -// Copyright 2021 The Engula Authors. +// Copyright 2022 The Engula Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -13,6 +13,6 @@ // limitations under the License. fn main() -> Result<(), Box> { - tonic_build::compile_protos("src/grpc/journal.proto")?; + tonic_build::configure().compile(&["proto/master.proto", "proto/store.proto"], &["proto"])?; Ok(()) } diff --git a/src/stream-engine/proto/proto/master.proto b/src/stream-engine/proto/proto/master.proto new file mode 100644 index 00000000..9d7ad905 --- /dev/null +++ b/src/stream-engine/proto/proto/master.proto @@ -0,0 +1,233 @@ +// Copyright 2022 The Engula Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package streamengine.master.v1; + +service Master { + rpc Tenant(TenantRequest) returns (TenantResponse) {} + + rpc Stream(StreamRequest) returns (StreamResponse) {} + + rpc Segment(SegmentRequest) returns (SegmentResponse) {} + + // Sent heartbeat to master and receives commands which must complete by + // stream observers + rpc Heartbeat(HeartbeatRequest) returns (HeartbeatResponse) {} +} + +message TenantRequest { repeated TenantRequestUnion requests = 1; } + +message TenantResponse { repeated TenantResponseUnion responses = 1; } + +message TenantRequestUnion { + oneof request { + ListTenantsRequest list_tenants = 1; + CreateTenantRequest create_tenant = 2; + UpdateTenantRequest update_tenant = 3; + DeleteTenantRequest delete_tenant = 4; + DescribeTenantRequest describe_tenant = 5; + } +} + +message TenantResponseUnion { + oneof response { + ListTenantsResponse list_tenants = 1; + CreateTenantResponse create_tenant = 2; + UpdateTenantResponse update_tenant = 3; + DeleteTenantResponse delete_tenant = 4; + DescribeTenantResponse describe_tenant = 5; + } +} + +message ListTenantsRequest {} + +message ListTenantsResponse { repeated TenantDesc descs = 1; } + +message CreateTenantRequest { TenantDesc desc = 1; } + +message CreateTenantResponse { TenantDesc desc = 1; } + +message UpdateTenantRequest { TenantDesc desc = 1; } + +message UpdateTenantResponse {} + +message DeleteTenantRequest { string name = 1; } + +message DeleteTenantResponse {} + +message DescribeTenantRequest { string name = 1; } + +message DescribeTenantResponse { TenantDesc desc = 1; } + +message StreamRequest { + string tenant = 1; + repeated StreamRequestUnion requests = 2; +} + +message StreamResponse { repeated StreamResponseUnion responses = 1; } + +message StreamRequestUnion { + oneof request { + ListStreamsRequest list_streams = 1; + CreateStreamRequest create_stream = 2; + UpdateStreamRequest update_stream = 3; + DeleteStreamRequest delete_stream = 4; + DescribeStreamRequest describe_stream = 5; + } +} + +message StreamResponseUnion { + oneof response { + ListStreamsResponse list_streams = 1; + CreateStreamResponse create_stream = 2; + UpdateStreamResponse update_stream = 3; + DeleteStreamResponse delete_stream = 4; + DescribeStreamResponse describe_stream = 5; + } +} + +message ListStreamsRequest {} + +message ListStreamsResponse { repeated StreamDesc descs = 1; } + +message CreateStreamRequest { StreamDesc desc = 1; } + +message CreateStreamResponse { StreamDesc desc = 1; } + +message UpdateStreamRequest { StreamDesc desc = 1; } + +message UpdateStreamResponse {} + +message DeleteStreamRequest { string name = 1; } + +message DeleteStreamResponse {} + +message DescribeStreamRequest { string name = 1; } + +message DescribeStreamResponse { StreamDesc desc = 1; } + +message TenantDesc { + uint64 id = 1; + string name = 2; +} + +message StreamDesc { + uint64 id = 1; + string name = 2; + uint64 parent_id = 3; +} + +enum SegmentState { + APPENDING = 0; + SEALED = 1; +} + +message SegmentDesc { + uint64 stream_id = 1; + uint32 epoch = 2; + SegmentState state = 3; + repeated string copy_set = 4; +} + +message SegmentRequest { + string tenant = 1; + uint64 stream_id = 2; + + repeated SegmentRequestUnion requests = 4; +} + +message SegmentResponse { + repeated SegmentResponseUnion responses = 1; +} + +message SegmentRequestUnion { + oneof request { + GetSegmentRequest get_segment = 1; + SealSegmentRequest seal_segment = 2; + } +} + +message SegmentResponseUnion { + oneof response { + GetSegmentResponse get_segment = 1; + SealSegmentResponse seal_segment = 2; + } +} + +message GetSegmentRequest { + uint32 segment_epoch = 1; +} + +message GetSegmentResponse { + SegmentDesc desc = 1; +} + +message SealSegmentRequest { + uint32 segment_epoch = 1; +} + +message SealSegmentResponse {} + +enum Role { + FOLLOWER = 0; + LEADER = 1; +} + +// Commands is supplied by master and complete by stream observers. +enum CommandType { + NOP = 0; + PROMOTE = 1; +} + +message Command { + CommandType command_type = 1; + + uint32 epoch = 2; + string leader = 3; + Role role = 4; + repeated uint32 pending_epochs = 5; +} + +// The state of an stream's observer. The transition of states is: +// +// Following -> Recovering -> Leading +// ^ | +// +---------------------------+ +enum ObserverState { + // A follower is prepared to follow and subscribe a stream. + FOLLOWING = 0; + // A leader must seals the former epochs and recovery all unfinished + // replications before starting to lead a stream. + RECOVERING = 1; + // A leader is prepared to receive incoming events. + LEADING = 2; +} + +message HeartbeatRequest { + string tenant = 1; + uint64 stream_id = 2; + uint32 writer_epoch = 3; + Role role = 4; + + string observer_id = 5; + ObserverState observer_state = 6; + + uint64 acked_seq = 7; +} + +message HeartbeatResponse { + repeated Command commands = 1; +} diff --git a/src/stream-engine/proto/proto/store.proto b/src/stream-engine/proto/proto/store.proto new file mode 100644 index 00000000..de55fff0 --- /dev/null +++ b/src/stream-engine/proto/proto/store.proto @@ -0,0 +1,108 @@ +// Copyright 2022 The Engula Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package streamengine.store.v1; + +enum EntryType { + HOLE = 0; + EVENT = 1; + BRIDGE = 2; +} + +message Entry { + EntryType entry_type = 1; + uint32 epoch = 2; + bytes event = 3; +} + +service Store { + // Read some entries from a segment. + rpc Read(ReadRequest) returns (stream ReadResponse); + + rpc Mutate(MutateRequest) returns (MutateResponse); +} + +message MutateRequest { + uint64 stream_id = 1; + uint32 writer_epoch = 2; + + MutateRequestUnion request = 3; +} + +message MutateResponse { + MutateResponseUnion response = 1; +} + +message MutateRequestUnion { + oneof request { + // Store some continuous entries to a segment. This function will create + // a new segment replica if it not exists. + WriteRequest write = 1; + + // Seal the corresponding segment so that any request with small epoch will be rejected. + SealRequest seal = 2; + } +} + +message MutateResponseUnion { + oneof response { + WriteResponse write = 1; + SealResponse seal = 2; + } +} + +message WriteRequest { + uint32 segment_epoch = 1; + + // The entries before this sequence have been acked. This value might + // euquals to `(seg_epoch << 32) | (first_index + events.len())`, so + // a server would forwards acked once the event all persisted. + uint64 acked_seq = 2; + + uint32 first_index = 3; + + repeated Entry entries = 4; +} + +message WriteResponse { + // The store only returns continouesly index. + uint32 persisted_index = 1; +} + +message ReadRequest { + uint64 stream_id = 1; + uint32 seg_epoch = 2; + + uint32 start_index = 3; + uint32 limit = 4; + + bool include_pending_entries = 5; +} + +message ReadResponse { + uint32 index = 1; + Entry entry = 2; +} + +message SealRequest { + uint32 segment_epoch = 1; +} + +message SealResponse { + // The acked index this replica already known. This field is used to help + // determine the starting position of log entries that need recovery. + uint32 acked_index = 1; +} diff --git a/src/stream-engine/proto/src/cast.rs b/src/stream-engine/proto/src/cast.rs new file mode 100644 index 00000000..8d91e737 --- /dev/null +++ b/src/stream-engine/proto/src/cast.rs @@ -0,0 +1,73 @@ +// Copyright 2022 The Engula Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use stream_engine_common::Entry; + +use super::{ObserverState, Role}; + +impl From for ObserverState { + fn from(state: i32) -> Self { + ObserverState::from_i32(state).unwrap_or(ObserverState::Following) + } +} + +impl From for Role { + fn from(role: i32) -> Self { + Role::from_i32(role).unwrap_or(Role::Follower) + } +} + +impl From for Role { + fn from(observer_state: ObserverState) -> Self { + match observer_state { + ObserverState::Following => Role::Follower, + ObserverState::Recovering | ObserverState::Leading => Role::Leader, + } + } +} + +impl From for crate::Entry { + fn from(e: Entry) -> Self { + match e { + Entry::Hole => crate::Entry { + entry_type: crate::EntryType::Hole as i32, + epoch: 0, + event: vec![], + }, + Entry::Event { epoch, event } => crate::Entry { + entry_type: crate::EntryType::Event as i32, + epoch, + event: event.into(), + }, + Entry::Bridge { epoch } => crate::Entry { + entry_type: crate::EntryType::Bridge as i32, + epoch, + event: vec![], + }, + } + } +} + +impl From for Entry { + fn from(e: crate::Entry) -> Self { + match crate::EntryType::from_i32(e.entry_type) { + Some(crate::EntryType::Event) => Entry::Event { + event: e.event.into(), + epoch: e.epoch, + }, + Some(crate::EntryType::Bridge) => Entry::Bridge { epoch: e.epoch }, + _ => Entry::Hole, + } + } +} diff --git a/src/stream-engine/proto/src/lib.rs b/src/stream-engine/proto/src/lib.rs new file mode 100644 index 00000000..4d31e6ab --- /dev/null +++ b/src/stream-engine/proto/src/lib.rs @@ -0,0 +1,20 @@ +// Copyright 2022 The Engula Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![allow(clippy::all)] + +mod cast; + +tonic::include_proto!("streamengine.master.v1"); +tonic::include_proto!("streamengine.store.v1"); diff --git a/src/stream-engine/store/Cargo.toml b/src/stream-engine/store/Cargo.toml new file mode 100644 index 00000000..bbe1b7fd --- /dev/null +++ b/src/stream-engine/store/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "stream-engine-store" +version = "0.1.0" +edition = "2021" +publish = false + +[dependencies] +stream-engine-common = { version = "0.1", path = "../common" } +stream-engine-proto = { version = "0.1", path = "../proto" } + +futures = "0.3.21" +log = "0.4.14" +prost = "0.9" +thiserror = "1.0" +tokio = { version = "1.15", features = ["full"] } +tokio-stream = { version = "0.1", features = ["net"] } +tonic = "0.6" diff --git a/src/journal/src/error.rs b/src/stream-engine/store/src/error.rs similarity index 62% rename from src/journal/src/error.rs rename to src/stream-engine/store/src/error.rs index 37f4ecc0..dac51559 100644 --- a/src/journal/src/error.rs +++ b/src/stream-engine/store/src/error.rs @@ -1,4 +1,4 @@ -// Copyright 2021 The Engula Authors. +// Copyright 2022 The Engula Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -13,22 +13,27 @@ // limitations under the License. use thiserror::Error; +use tonic::{Code, Status}; -/// Errors for all journal operations. #[derive(Error, Debug)] pub enum Error { #[error("{0} is not found")] NotFound(String), #[error("{0} already exists")] AlreadyExists(String), - #[error("{0}")] - InvalidArgument(String), - #[error(transparent)] - Io(#[from] std::io::Error), - #[error("corrupted: {0}")] - Corrupted(String), - #[error("unknown: {0}")] - Unknown(String), + #[error("invalid {0}")] + InvalidRequest(String), +} + +impl From for Status { + fn from(err: Error) -> Status { + let (code, message) = match err { + Error::NotFound(m) => (Code::NotFound, m), + Error::AlreadyExists(m) => (Code::AlreadyExists, m), + Error::InvalidRequest(m) => (Code::InvalidArgument, m), + }; + Status::new(code, message) + } } pub type Result = std::result::Result; diff --git a/src/stream-engine/store/src/lib.rs b/src/stream-engine/store/src/lib.rs new file mode 100644 index 00000000..f4da2bfe --- /dev/null +++ b/src/stream-engine/store/src/lib.rs @@ -0,0 +1,42 @@ +// Copyright 2022 The Engula Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +mod server; + +pub use server::Server; +use stream_engine_common::{error::Result, Entry, Sequence}; +#[cfg(debug_assertions)] +pub use tests::build_store; + +#[cfg(debug_assertions)] +mod tests { + use tokio::net::TcpListener; + use tokio_stream::wrappers::TcpListenerStream; + + use super::*; + + pub async fn build_store() -> Result { + let listener = TcpListener::bind("127.0.0.1:0").await?; + let local_addr = listener.local_addr()?; + tokio::task::spawn(async move { + let server = Server::new(); + tonic::transport::Server::builder() + .add_service(server.into_service()) + .serve_with_incoming(TcpListenerStream::new(listener)) + .await + .unwrap(); + }); + Ok(format!("http://{}", local_addr)) + } +} diff --git a/src/stream-engine/store/src/server.rs b/src/stream-engine/store/src/server.rs new file mode 100644 index 00000000..f9e60beb --- /dev/null +++ b/src/stream-engine/store/src/server.rs @@ -0,0 +1,406 @@ +// Copyright 2022 The Engula Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::{ + collections::{BTreeMap, BTreeSet, HashMap}, + pin::Pin, + sync::{Arc, Mutex}, + task::{Context, Poll, Waker}, +}; + +use futures::Stream; +use log::warn; +use stream_engine_proto::*; +use tokio::sync::Mutex as TokioMutex; +use tonic::{async_trait, Request, Response, Status}; + +use crate::{Entry, Sequence}; + +type Result = std::result::Result; + +#[derive(Debug)] +struct Replica { + bridge: Option, + acked_index: Option, + wakers: Vec, + entries: BTreeMap, + sealed: Option, + expect_index: u32, +} + +impl Replica { + fn new() -> Self { + Replica { + bridge: None, + acked_index: None, + wakers: Vec::new(), + entries: BTreeMap::new(), + sealed: None, + expect_index: 1, + } + } + + fn store(&mut self, first_index: u32, entries: Vec) -> Result<()> { + for (off, entry) in entries.into_iter().enumerate() { + let index = first_index + (off as u32); + if self.bridge.map(|idx| index > idx).unwrap_or_default() { + return Err(Status::invalid_argument( + "try to append a record after a bridge record", + )); + } + if let Entry::Bridge { epoch: _ } = &entry { + self.bridge = Some(index); + self.entries.split_off(&index); + } + self.entries.insert(index, entry); + } + + for (idx, _) in self.entries.range(self.expect_index..) { + if *idx != self.expect_index { + break; + } + self.expect_index += 1; + } + + Ok(()) + } + + fn advance(&mut self, acked_index: u32) -> bool { + if let Some(index) = &self.acked_index { + if *index < acked_index { + self.acked_index = Some(acked_index); + true + } else { + false + } + } else { + self.acked_index = Some(acked_index); + true + } + } + + fn broadcast(&mut self) { + // It's not efficient, but sufficient for verifying. + std::mem::take(&mut self.wakers) + .into_iter() + .for_each(Waker::wake); + } + + fn is_index_acked(&self, index: u32) -> bool { + self.acked_index.map(|i| i >= index).unwrap_or_default() + } + + /// Returns the last index of continuously entries. + fn continuously_persisted_index(&self) -> u32 { + self.expect_index.saturating_sub(1) + } +} + +type SharedReplica = Arc>; + +#[derive(Debug)] +struct PartialStream { + epochs: BTreeSet, + replicas: HashMap, +} + +impl PartialStream { + fn new() -> Self { + PartialStream { + epochs: BTreeSet::new(), + replicas: HashMap::new(), + } + } +} + +#[derive(Debug)] +pub struct ReplicaReader { + next_index: u32, + limit: usize, + finished: bool, + include_pending_entries: bool, + + replica: SharedReplica, +} + +impl Stream for ReplicaReader { + type Item = Result; + + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let this = self.get_mut(); + if this.finished { + return Poll::Ready(None); + } + + let mut replica = this.replica.lock().unwrap(); + if let Some((index, entry)) = replica.entries.range(this.next_index..).next() { + if this.include_pending_entries + || (*index == this.next_index && replica.is_index_acked(*index)) + { + // End of segment. + if let Entry::Bridge { epoch: _ } = entry { + this.finished = true; + } + this.next_index = *index + 1; + this.limit -= 1; + if this.limit == 0 { + this.finished = true; + } + + let resp = ReadResponse { + index: *index, + entry: Some(entry.clone().into()), + }; + + return Poll::Ready(Some(Ok(resp))); + } + } else if this.include_pending_entries { + return Poll::Ready(None); + } + + replica.wakers.push(cx.waker().clone()); + + Poll::Pending + } +} + +#[derive(Debug)] +pub(super) struct Store { + streams: HashMap>, +} + +impl Store { + pub fn new() -> Self { + Store { + streams: HashMap::new(), + } + } + + pub fn write( + &mut self, + stream_id: u64, + seg_epoch: u32, + writer_epoch: u32, + acked_seq: Sequence, + first_index: u32, + entries: Vec, + ) -> Result { + let stream = self + .streams + .entry(stream_id) + .or_insert_with(|| Box::new(PartialStream::new())); + + let replica = stream.replicas.entry(seg_epoch).or_insert_with(|| { + stream.epochs.insert(seg_epoch); + Arc::new(Mutex::new(Replica::new())) + }); + + let mut replica = replica.lock().unwrap(); + if let Some(epoch) = replica.sealed { + if writer_epoch < epoch { + warn!( + "stream {} seg {} reject staled store request, writer epoch is {}, sealed epoch is {}", + stream_id, seg_epoch, writer_epoch, epoch + ); + return Err(Status::failed_precondition("epoch is staled")); + } + } + + let mut updated = false; + if !entries.is_empty() { + updated = true; + replica.store(first_index, entries)?; + } + + if acked_seq.epoch >= seg_epoch { + updated = true; + replica.advance(acked_seq.index); + } + + if updated { + replica.broadcast(); + } + + Ok(replica.continuously_persisted_index()) + } + + pub fn read( + &mut self, + stream_id: u64, + seg_epoch: u32, + start_index: u32, + limit: usize, + include_pending_entries: bool, + ) -> Result { + let stream = match self.streams.get_mut(&stream_id) { + Some(s) => s, + None => return Err(Status::not_found("no such stream")), + }; + + let replica = match stream.replicas.get_mut(&seg_epoch) { + Some(r) => r, + None => return Err(Status::not_found("no such segment replica exists")), + }; + + Ok(ReplicaReader { + next_index: start_index, + limit, + finished: limit == 0, + replica: replica.clone(), + include_pending_entries, + }) + } + + pub fn seal(&mut self, stream_id: u64, seg_epoch: u32, writer_epoch: u32) -> Result { + let stream = self + .streams + .entry(stream_id) + .or_insert_with(|| Box::new(PartialStream::new())); + + let replica = stream.replicas.entry(seg_epoch).or_insert_with(|| { + stream.epochs.insert(seg_epoch); + Arc::new(Mutex::new(Replica::new())) + }); + + let mut replica = replica.lock().unwrap(); + if let Some(epoch) = replica.sealed { + if epoch > writer_epoch { + warn!( + "stream {} seg {} reject staled sealing request, writer epoch is {}, sealed epoch is {}", + stream_id, seg_epoch, writer_epoch, epoch + ); + return Err(Status::failed_precondition("epoch is sealed")); + } + } + + replica.sealed = Some(writer_epoch); + Ok(replica.acked_index.unwrap_or_default()) + } +} + +#[derive(Debug)] +pub struct Server { + store: Arc>, +} + +#[allow(unused, dead_code)] +impl Server { + pub fn new() -> Self { + Server { + store: Arc::new(TokioMutex::new(Store::new())), + } + } + + pub fn into_service(self) -> store_server::StoreServer { + store_server::StoreServer::new(self) + } +} + +impl Default for Server { + fn default() -> Self { + Self::new() + } +} + +#[allow(unused)] +#[async_trait] +impl store_server::Store for Server { + type ReadStream = ReplicaReader; + + async fn mutate(&self, input: Request) -> Result> { + Ok(Response::new(self.handle_mutate(input.into_inner()).await?)) + } + + async fn read(&self, input: Request) -> Result> { + let req = input.into_inner(); + let mut store = self.store.lock().await; + let stream = store.read( + req.stream_id, + req.seg_epoch, + req.start_index, + req.limit as usize, + req.include_pending_entries, + )?; + Ok(Response::new(stream)) + } +} + +#[allow(unused, dead_code)] +impl Server { + async fn handle_mutate(&self, req: MutateRequest) -> Result { + let mut resp = MutateResponse::default(); + if let Some(union_req) = req.request { + resp.response = Some( + self.handle_mutate_union(req.stream_id, req.writer_epoch, union_req) + .await?, + ); + } + Ok(resp) + } + + async fn handle_mutate_union( + &self, + stream_id: u64, + writer_epoch: u32, + req: MutateRequestUnion, + ) -> Result { + type Request = mutate_request_union::Request; + type Response = mutate_response_union::Response; + + let req = req + .request + .ok_or_else(|| Status::invalid_argument("mutate request"))?; + let res = match req { + Request::Write(req) => { + Response::Write(self.handle_write(stream_id, writer_epoch, req).await?) + } + Request::Seal(req) => { + Response::Seal(self.handle_seal(stream_id, writer_epoch, req).await?) + } + }; + Ok(MutateResponseUnion { + response: Some(res), + }) + } + + async fn handle_write( + &self, + stream_id: u64, + writer_epoch: u32, + req: WriteRequest, + ) -> Result { + let mut store = self.store.lock().await; + let persisted_index = store.write( + stream_id, + req.segment_epoch, + writer_epoch, + req.acked_seq.into(), + req.first_index, + req.entries.into_iter().map(Into::into).collect(), + )?; + + Ok(WriteResponse { persisted_index }) + } + + async fn handle_seal( + &self, + stream_id: u64, + writer_epoch: u32, + req: SealRequest, + ) -> Result { + let mut store = self.store.lock().await; + let acked_index = store.seal(stream_id, req.segment_epoch, writer_epoch)?; + Ok(SealResponse { acked_index }) + } +} diff --git a/src/supervisor/Cargo.toml b/src/supervisor/Cargo.toml new file mode 100644 index 00000000..5a21517f --- /dev/null +++ b/src/supervisor/Cargo.toml @@ -0,0 +1,15 @@ +[package] +name = "engula-supervisor" +version = "0.1.0" +edition = "2021" +publish = false + +[dependencies] +engula-apis = { version = "0.3", path = "../apis" } + +prost = "0.9" +tokio = { version = "1.15", features = ["full"] } +tonic = "0.6" + +[build-dependencies] +tonic-build = "0.6" diff --git a/src/supervisor/build.rs b/src/supervisor/build.rs new file mode 100644 index 00000000..4a6e30a8 --- /dev/null +++ b/src/supervisor/build.rs @@ -0,0 +1,23 @@ +// Copyright 2022 The Engula Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +fn main() -> Result<(), Box> { + tonic_build::configure() + .extern_path(".engula.v1", "::engula_apis") + .compile( + &["engula/supervisor/v1/supervisor.proto"], + &[".", "../apis"], + )?; + Ok(()) +} diff --git a/src/kernel/src/metadata.proto b/src/supervisor/engula/supervisor/v1/supervisor.proto similarity index 61% rename from src/kernel/src/metadata.proto rename to src/supervisor/engula/supervisor/v1/supervisor.proto index d05febd0..3936cacd 100644 --- a/src/kernel/src/metadata.proto +++ b/src/supervisor/engula/supervisor/v1/supervisor.proto @@ -1,4 +1,4 @@ -// Copyright 2021 The Engula Authors. +// Copyright 2022 The Engula Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,18 +14,15 @@ syntax = "proto3"; -package engula.metadata.v1; +package engula.supervisor.v1; -message Version { - uint64 sequence = 1; - map meta = 2; - repeated string objects = 3; -} +import "engula/v1/database.proto"; +import "engula/v1/collection.proto"; + +service Supervisor { + rpc database(engula.v1.DatabaseRequest) returns (engula.v1.DatabaseResponse) { + } -message VersionUpdate { - uint64 sequence = 1; - map add_meta = 2; - repeated string remove_meta = 3; - repeated string add_objects = 4; - repeated string remove_objects = 5; + rpc collection(engula.v1.CollectionRequest) + returns (engula.v1.CollectionResponse) {} } diff --git a/src/supervisor/src/apis.rs b/src/supervisor/src/apis.rs new file mode 100644 index 00000000..be2bfdc8 --- /dev/null +++ b/src/supervisor/src/apis.rs @@ -0,0 +1,17 @@ +// Copyright 2022 The Engula Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![allow(clippy::all)] + +tonic::include_proto!("engula.supervisor.v1"); diff --git a/src/supervisor/src/error.rs b/src/supervisor/src/error.rs new file mode 100644 index 00000000..be559104 --- /dev/null +++ b/src/supervisor/src/error.rs @@ -0,0 +1,16 @@ +// Copyright 2022 The Engula Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub type Error = tonic::Status; +pub type Result = std::result::Result; diff --git a/src/supervisor/src/lib.rs b/src/supervisor/src/lib.rs new file mode 100644 index 00000000..dbff328e --- /dev/null +++ b/src/supervisor/src/lib.rs @@ -0,0 +1,26 @@ +// Copyright 2022 The Engula Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +mod apis; +mod error; +mod server; +mod supervisor; +mod universe; + +pub(crate) use self::universe::{Database, Universe}; +pub use self::{ + error::{Error, Result}, + server::Server, + supervisor::Supervisor, +}; diff --git a/src/supervisor/src/server.rs b/src/supervisor/src/server.rs new file mode 100644 index 00000000..e1890f1c --- /dev/null +++ b/src/supervisor/src/server.rs @@ -0,0 +1,185 @@ +// Copyright 2022 The Engula Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use engula_apis::*; +use tonic::{Request, Response}; + +use crate::{apis::*, Database, Error, Result, Universe}; + +#[derive(Clone)] +pub struct Server { + uv: Universe, +} + +impl Default for Server { + fn default() -> Self { + Self::new() + } +} + +impl Server { + pub fn new() -> Self { + Self { + uv: Universe::new(), + } + } + + pub fn into_service(self) -> supervisor_server::SupervisorServer { + supervisor_server::SupervisorServer::new(self) + } +} + +impl Server { + async fn handle_database(&self, req: DatabaseRequest) -> Result { + let mut res = DatabaseResponse::default(); + for req_union in req.requests { + let res_union = self.handle_database_union(req_union).await?; + res.responses.push(res_union); + } + Ok(res) + } + + async fn handle_database_union( + &self, + req: DatabaseRequestUnion, + ) -> Result { + let req = req + .request + .ok_or_else(|| Error::invalid_argument("missing database request"))?; + let res = match req { + database_request_union::Request::ListDatabases(_) => { + todo!(); + } + database_request_union::Request::CreateDatabase(req) => { + let res = self.handle_create_database(req).await?; + database_response_union::Response::CreateDatabase(res) + } + database_request_union::Request::UpdateDatabase(_) => { + todo!(); + } + database_request_union::Request::DeleteDatabase(_) => { + todo!(); + } + database_request_union::Request::DescribeDatabase(req) => { + let res = self.handle_describe_database(req).await?; + database_response_union::Response::DescribeDatabase(res) + } + }; + Ok(DatabaseResponseUnion { + response: Some(res), + }) + } + + async fn handle_create_database( + &self, + req: CreateDatabaseRequest, + ) -> Result { + let desc = req + .desc + .ok_or_else(|| Error::invalid_argument("missing database description"))?; + let desc = self.uv.create_database(desc).await?; + Ok(CreateDatabaseResponse { desc: Some(desc) }) + } + + async fn handle_describe_database( + &self, + req: DescribeDatabaseRequest, + ) -> Result { + let db = self.uv.database(&req.name).await?; + let desc = db.desc().await; + Ok(DescribeDatabaseResponse { desc: Some(desc) }) + } + + async fn handle_collection(&self, req: CollectionRequest) -> Result { + let db = self.uv.database(&req.dbname).await?; + let mut res = CollectionResponse::default(); + for req_union in req.requests { + let res_union = self.handle_collection_union(db.clone(), req_union).await?; + res.responses.push(res_union); + } + Ok(res) + } + + async fn handle_collection_union( + &self, + db: Database, + req: CollectionRequestUnion, + ) -> Result { + let req = req + .request + .ok_or_else(|| Error::invalid_argument("missing collection request"))?; + let res = match req { + collection_request_union::Request::ListCollections(_) => { + todo!(); + } + collection_request_union::Request::CreateCollection(req) => { + let res = self.handle_create_collection(db, req).await?; + collection_response_union::Response::CreateCollection(res) + } + collection_request_union::Request::UpdateCollection(_) => { + todo!(); + } + collection_request_union::Request::DeleteCollection(_) => { + todo!(); + } + collection_request_union::Request::DescribeCollection(req) => { + let res = self.handle_describe_collection(db, req).await?; + collection_response_union::Response::DescribeCollection(res) + } + }; + Ok(CollectionResponseUnion { + response: Some(res), + }) + } + + async fn handle_create_collection( + &self, + db: Database, + req: CreateCollectionRequest, + ) -> Result { + let desc = req + .desc + .ok_or_else(|| Error::invalid_argument("missing collection description"))?; + let desc = db.create_collection(desc).await?; + Ok(CreateCollectionResponse { desc: Some(desc) }) + } + + async fn handle_describe_collection( + &self, + db: Database, + req: DescribeCollectionRequest, + ) -> Result { + let co = db.collection(&req.name).await?; + let desc = co.desc().await; + Ok(DescribeCollectionResponse { desc: Some(desc) }) + } +} + +#[tonic::async_trait] +impl supervisor_server::Supervisor for Server { + async fn database(&self, req: Request) -> Result> { + let req = req.into_inner(); + let res = self.handle_database(req).await?; + Ok(Response::new(res)) + } + + async fn collection( + &self, + req: Request, + ) -> Result> { + let req = req.into_inner(); + let res = self.handle_collection(req).await?; + Ok(Response::new(res)) + } +} diff --git a/src/supervisor/src/supervisor.rs b/src/supervisor/src/supervisor.rs new file mode 100644 index 00000000..1959064e --- /dev/null +++ b/src/supervisor/src/supervisor.rs @@ -0,0 +1,107 @@ +// Copyright 2022 The Engula Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use engula_apis::*; +use tonic::Request; + +use crate::{apis::supervisor_server::Supervisor as _, Error, Result, Server}; + +#[derive(Clone)] +pub struct Supervisor { + server: Server, +} + +impl Default for Supervisor { + fn default() -> Self { + Self::new() + } +} + +impl Supervisor { + pub fn new() -> Self { + Self { + server: Server::new(), + } + } + + pub async fn database(&self, req: DatabaseRequest) -> Result { + let req = Request::new(req); + let res = self.server.database(req).await?; + Ok(res.into_inner()) + } + + async fn database_union( + &self, + req: database_request_union::Request, + ) -> Result { + let req = DatabaseRequest { + requests: vec![DatabaseRequestUnion { request: Some(req) }], + }; + let mut res = self.database(req).await?; + res.responses + .pop() + .and_then(|x| x.response) + .ok_or_else(|| Error::internal("missing database response")) + } + + pub async fn describe_database(&self, name: String) -> Result { + let req = DescribeDatabaseRequest { name }; + let req = database_request_union::Request::DescribeDatabase(req); + let res = self.database_union(req).await?; + let desc = if let database_response_union::Response::DescribeDatabase(res) = res { + res.desc + } else { + None + }; + desc.ok_or_else(|| Error::internal("missing database description")) + } + + pub async fn collection(&self, req: CollectionRequest) -> Result { + let req = Request::new(req); + let res = self.server.collection(req).await?; + Ok(res.into_inner()) + } + + async fn collection_union( + &self, + dbname: String, + req: collection_request_union::Request, + ) -> Result { + let req = CollectionRequest { + dbname, + requests: vec![CollectionRequestUnion { request: Some(req) }], + }; + let mut res = self.collection(req).await?; + res.responses + .pop() + .and_then(|x| x.response) + .ok_or_else(|| Error::internal("missing collection response")) + } + + pub async fn describe_collection( + &self, + dbname: String, + coname: String, + ) -> Result { + let req = DescribeCollectionRequest { name: coname }; + let req = collection_request_union::Request::DescribeCollection(req); + let res = self.collection_union(dbname, req).await?; + let desc = if let collection_response_union::Response::DescribeCollection(res) = res { + res.desc + } else { + None + }; + desc.ok_or_else(|| Error::internal("missing collection description")) + } +} diff --git a/src/supervisor/src/universe.rs b/src/supervisor/src/universe.rs new file mode 100644 index 00000000..4e175f75 --- /dev/null +++ b/src/supervisor/src/universe.rs @@ -0,0 +1,135 @@ +// Copyright 2022 The Engula Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::{collections::HashMap, sync::Arc}; + +use engula_apis::*; +use tokio::sync::Mutex; + +use crate::error::{Error, Result}; + +#[derive(Clone)] +pub struct Universe { + inner: Arc>, +} + +struct UniverseInner { + next_id: u64, + databases: HashMap, +} + +impl Universe { + pub fn new() -> Self { + let inner = UniverseInner { + next_id: 1, + databases: HashMap::new(), + }; + Self { + inner: Arc::new(Mutex::new(inner)), + } + } + + pub async fn database(&self, name: &str) -> Result { + let inner = self.inner.lock().await; + inner + .databases + .get(name) + .cloned() + .ok_or_else(|| Error::not_found(format!("database {}", name))) + } + + pub async fn create_database(&self, mut desc: DatabaseDesc) -> Result { + let mut inner = self.inner.lock().await; + if inner.databases.contains_key(&desc.name) { + return Err(Error::already_exists(format!("database {}", desc.name))); + } + desc.id = inner.next_id; + inner.next_id += 1; + let db = Database::new(desc.clone()); + inner.databases.insert(desc.name.clone(), db); + Ok(desc) + } +} + +#[derive(Clone)] +pub struct Database { + inner: Arc>, +} + +struct DatabaseInner { + desc: DatabaseDesc, + next_id: u64, + collections: HashMap, +} + +impl Database { + fn new(desc: DatabaseDesc) -> Self { + let inner = DatabaseInner { + desc, + next_id: 1, + collections: HashMap::new(), + }; + Self { + inner: Arc::new(Mutex::new(inner)), + } + } + + pub async fn desc(&self) -> DatabaseDesc { + self.inner.lock().await.desc.clone() + } + + pub async fn collection(&self, name: &str) -> Result { + let inner = self.inner.lock().await; + inner + .collections + .get(name) + .cloned() + .ok_or_else(|| Error::not_found(format!("collection {}", name))) + } + + pub async fn create_collection(&self, mut desc: CollectionDesc) -> Result { + let mut inner = self.inner.lock().await; + if inner.collections.contains_key(&desc.name) { + return Err(Error::already_exists(format!("collection {}", desc.name))); + } + desc.id = inner.next_id; + inner.next_id += 1; + desc.parent_id = inner.desc.id; + let co = Collection::new(desc.clone()); + inner.collections.insert(desc.name.clone(), co); + Ok(desc) + } +} + +#[derive(Clone)] +pub struct Collection { + inner: Arc>, +} + +struct CollectionInner { + desc: CollectionDesc, +} + +impl Collection { + fn new(desc: CollectionDesc) -> Self { + let inner = CollectionInner { desc }; + Self { + inner: Arc::new(Mutex::new(inner)), + } + } + + pub async fn desc(&self) -> CollectionDesc { + self.inner.lock().await.desc.clone() + } +} diff --git a/src/transactor/Cargo.toml b/src/transactor/Cargo.toml new file mode 100644 index 00000000..210ecb2c --- /dev/null +++ b/src/transactor/Cargo.toml @@ -0,0 +1,14 @@ +[package] +name = "engula-transactor" +version = "0.1.0" +edition = "2021" +publish = false + +[dependencies] +engula-apis = { version = "0.3", path = "../apis" } +engula-cooperator = { path = "../cooperator" } +engula-supervisor = { path = "../supervisor" } + +prost = "0.9" +tokio = { version = "1.15", features = ["full"] } +tonic = "0.6" diff --git a/src/transactor/src/error.rs b/src/transactor/src/error.rs new file mode 100644 index 00000000..be559104 --- /dev/null +++ b/src/transactor/src/error.rs @@ -0,0 +1,16 @@ +// Copyright 2022 The Engula Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub type Error = tonic::Status; +pub type Result = std::result::Result; diff --git a/src/transactor/src/lib.rs b/src/transactor/src/lib.rs new file mode 100644 index 00000000..7d9c9ab9 --- /dev/null +++ b/src/transactor/src/lib.rs @@ -0,0 +1,21 @@ +// Copyright 2022 The Engula Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +mod error; +mod server; + +pub use self::{ + error::{Error, Result}, + server::Server, +}; diff --git a/src/transactor/src/server.rs b/src/transactor/src/server.rs new file mode 100644 index 00000000..b60ad239 --- /dev/null +++ b/src/transactor/src/server.rs @@ -0,0 +1,70 @@ +// Copyright 2022 The Engula Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use engula_apis::*; +use engula_cooperator::Cooperator; +use engula_supervisor::Supervisor; +use tonic::{Request, Response}; + +use crate::Result; + +pub struct Server { + supervisor: Supervisor, + cooperator: Cooperator, +} + +impl Default for Server { + fn default() -> Self { + Self::new() + } +} + +impl Server { + pub fn new() -> Self { + let supervisor = Supervisor::new(); + let cooperator = Cooperator::new(supervisor.clone()); + Self { + supervisor, + cooperator, + } + } + + pub fn into_service(self) -> engula_server::EngulaServer { + engula_server::EngulaServer::new(self) + } +} + +#[tonic::async_trait] +impl engula_server::Engula for Server { + async fn txn(&self, req: Request) -> Result> { + let req = req.into_inner(); + let res = self.cooperator.txn(req).await?; + Ok(Response::new(res)) + } + + async fn database(&self, req: Request) -> Result> { + let req = req.into_inner(); + let res = self.supervisor.database(req).await?; + Ok(Response::new(res)) + } + + async fn collection( + &self, + req: Request, + ) -> Result> { + let req = req.into_inner(); + let res = self.supervisor.collection(req).await?; + Ok(Response::new(res)) + } +} diff --git a/taplo.toml b/taplo.toml new file mode 100644 index 00000000..98ab3517 --- /dev/null +++ b/taplo.toml @@ -0,0 +1,11 @@ +include = ["**/Cargo.toml"] + +[formatting] +reorder_keys = true + +# Do not sort key names within [package] section. +[[rule]] +keys = ["package"] + +[rule.formatting] +reorder_keys = false diff --git a/tools/ci/licenserc.yml b/tools/ci/licenserc.yml index 8696b5b4..ee2e7a52 100644 --- a/tools/ci/licenserc.yml +++ b/tools/ci/licenserc.yml @@ -19,10 +19,13 @@ header: paths-ignore: - 'docs' - '**/*.md' + - '.cargo/audit.toml' + - '.dockerignore' - '.gitignore' + - '.gitmodules' - 'Cargo.lock' - 'Cargo.toml' - 'LICENSE' - 'rust-toolchain.toml' - 'rustfmt.toml' - - '.cargo/audit.toml' + - 'taplo.toml'